content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Add an html dependency, without overwriting existing ones
appendDependencies <- function(x, value) {
if (inherits(value, "html_dependency"))
value <- list(value)
old <- attr(x, "html_dependencies", TRUE)
htmltools::htmlDependencies(x) <- c(old, value)
x
}
# Add CSS dependencies to a tag object
addCSSDeps <- function(x) {
# CSS
framework7_css <- "framework7.bundle.min.css"
framework7_icons_css <- "framework7-icons.css"
custom_css <- "my-app.css"
# card extra elements
social_cards_css <- "social-cards.css"
card_img_css <- "card-img.css"
# swiper css
swiper_css <- "swiper.css"
# grid extra css
grid_css <- "grid-extra.css"
# material icons
material_icons_css <- "material-icons.css"
f7Deps <- list(
# deps
htmltools::htmlDependency(
name = "framework7",
version = "5.1.1",
src = c(file = system.file("framework7-5.1.1", package = "shinyMobile")),
script = NULL,
stylesheet = c(
framework7_css,
material_icons_css,
custom_css,
framework7_icons_css,
social_cards_css,
card_img_css,
grid_css,
swiper_css
)
)
)
# currently, this piece is a bit useless since
# there is only 1 dependency. However, we never
# what will happen later!
appendDependencies(x, f7Deps)
}
# Add JS dependencies to a tag object
# for framework 7 htmldependency is not
# what we want in order to include js files
# we need the crapy tags$script function.
# Indeed, framework7 js deps MUUUUUUST be
# located at the end of the body.
addJSDeps <- function() {
depsPath <- "framework7-5.1.1/"
# JS
framework7_js <- paste0(depsPath, "framework7.bundle.min.js")
custom_js <- paste0(depsPath, "my-app.js")
fullScreen_js <- paste0(depsPath, "fullscreen.js")
shiny::tagList(
shiny::singleton(
shiny::tags$script(src = framework7_js)
),
shiny::singleton(
shiny::tags$script(src = custom_js)
),
shiny::singleton(
shiny::tags$script(src = fullScreen_js)
)
)
}
#' @importFrom utils packageVersion
#' @importFrom htmltools htmlDependency
f7InputsDeps <- function() {
htmltools::htmlDependency(
name = "framework7-bindings",
version = as.character(packageVersion("shinyMobile")),
src = c(
file = system.file("framework7-5.1.1/input-bindings", package = "shinyMobile"),
href = "framework7-5.1.1/input-bindings"
),
package = "shinyF7",
script = c(
"sliderInputBinding.js",
"stepperInputBinding.js",
"toggleInputBinding.js",
"datePickerInputBinding.js",
"pickerInputBinding.js",
"colorPickerInputBinding.js",
"tabsInputBinding.js",
"dateInputBinding.js",
"panelInputBinding.js",
"collapsibleInputBinding.js",
"sheetInputBinding.js",
"cardInputBinding.js",
"autoCompleteInputBinding.js",
"actionSheetInputBinding.js"
)
)
}
| /R/deps.R | no_license | kavetinaveen/shinyMobile | R | false | false | 2,939 | r | # Add an html dependency, without overwriting existing ones
appendDependencies <- function(x, value) {
if (inherits(value, "html_dependency"))
value <- list(value)
old <- attr(x, "html_dependencies", TRUE)
htmltools::htmlDependencies(x) <- c(old, value)
x
}
# Add CSS dependencies to a tag object
addCSSDeps <- function(x) {
# CSS
framework7_css <- "framework7.bundle.min.css"
framework7_icons_css <- "framework7-icons.css"
custom_css <- "my-app.css"
# card extra elements
social_cards_css <- "social-cards.css"
card_img_css <- "card-img.css"
# swiper css
swiper_css <- "swiper.css"
# grid extra css
grid_css <- "grid-extra.css"
# material icons
material_icons_css <- "material-icons.css"
f7Deps <- list(
# deps
htmltools::htmlDependency(
name = "framework7",
version = "5.1.1",
src = c(file = system.file("framework7-5.1.1", package = "shinyMobile")),
script = NULL,
stylesheet = c(
framework7_css,
material_icons_css,
custom_css,
framework7_icons_css,
social_cards_css,
card_img_css,
grid_css,
swiper_css
)
)
)
# currently, this piece is a bit useless since
# there is only 1 dependency. However, we never
# what will happen later!
appendDependencies(x, f7Deps)
}
# Add JS dependencies to a tag object
# for framework 7 htmldependency is not
# what we want in order to include js files
# we need the crapy tags$script function.
# Indeed, framework7 js deps MUUUUUUST be
# located at the end of the body.
addJSDeps <- function() {
depsPath <- "framework7-5.1.1/"
# JS
framework7_js <- paste0(depsPath, "framework7.bundle.min.js")
custom_js <- paste0(depsPath, "my-app.js")
fullScreen_js <- paste0(depsPath, "fullscreen.js")
shiny::tagList(
shiny::singleton(
shiny::tags$script(src = framework7_js)
),
shiny::singleton(
shiny::tags$script(src = custom_js)
),
shiny::singleton(
shiny::tags$script(src = fullScreen_js)
)
)
}
#' @importFrom utils packageVersion
#' @importFrom htmltools htmlDependency
f7InputsDeps <- function() {
htmltools::htmlDependency(
name = "framework7-bindings",
version = as.character(packageVersion("shinyMobile")),
src = c(
file = system.file("framework7-5.1.1/input-bindings", package = "shinyMobile"),
href = "framework7-5.1.1/input-bindings"
),
package = "shinyF7",
script = c(
"sliderInputBinding.js",
"stepperInputBinding.js",
"toggleInputBinding.js",
"datePickerInputBinding.js",
"pickerInputBinding.js",
"colorPickerInputBinding.js",
"tabsInputBinding.js",
"dateInputBinding.js",
"panelInputBinding.js",
"collapsibleInputBinding.js",
"sheetInputBinding.js",
"cardInputBinding.js",
"autoCompleteInputBinding.js",
"actionSheetInputBinding.js"
)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R6-classes.R
\docType{class}
\name{HpdsAttribList}
\alias{HpdsAttribList}
\title{R6 class used as base class for all query parameter lists - DO NOT CREATE THIS OBJECT DIRECTLY!}
\format{
\code{\link{HpdsAttribList}} object.
}
\value{
Object of \code{\link{R6Class}} used to access a HPDS-hosted resource's data dictionary.
}
\description{
R6 class used as base class for all query parameter lists - DO NOT CREATE THIS OBJECT DIRECTLY!
R6 class used as base class for all query parameter lists - DO NOT CREATE THIS OBJECT DIRECTLY!
}
\section{Methods}{
\describe{
\item{Documentation}{For full documentation of each method go to https://github.com/hms-dbmi/pic-sure-r-adapter-hpds}
\item{\code{new(connection)}}{This method is used to create new object of this class. DO NOT CREATE THIS OBJECT DIRECTLY!}
\item{\code{add()}}{This method adds one or more entries to the query parameter list.}
\item{\code{delete(keys)}}{This method deletes one or more entries from the query parameter list.}
\item{\code{clear()}}{This method clears all entries from the query parameter list.}
\item{\code{show()}}{This method displays the entries of the query parameter list.}
\item{\code{getQueryValues()}}{This is an internally used method that returns the entries for use by the parent query object.}}
}
\keyword{data}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{HpdsAttribList$new()}}
\item \href{#method-add}{\code{HpdsAttribList$add()}}
\item \href{#method-delete}{\code{HpdsAttribList$delete()}}
\item \href{#method-show}{\code{HpdsAttribList$show()}}
\item \href{#method-clear}{\code{HpdsAttribList$clear()}}
\item \href{#method-getQueryValues}{\code{HpdsAttribList$getQueryValues()}}
\item \href{#method-normalize_VariantSpec}{\code{HpdsAttribList$normalize_VariantSpec()}}
\item \href{#method-is_VariantSpec}{\code{HpdsAttribList$is_VariantSpec()}}
\item \href{#method-clone}{\code{HpdsAttribList$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$new(
inst_list = FALSE,
help_text = "",
allow_variants = TRUE,
dictionary_obj = FALSE
)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-add"></a>}}
\if{latex}{\out{\hypertarget{method-add}{}}}
\subsection{Method \code{add()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$add(keys = FALSE, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-delete"></a>}}
\if{latex}{\out{\hypertarget{method-delete}{}}}
\subsection{Method \code{delete()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$delete(keys, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-show"></a>}}
\if{latex}{\out{\hypertarget{method-show}{}}}
\subsection{Method \code{show()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$show()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clear"></a>}}
\if{latex}{\out{\hypertarget{method-clear}{}}}
\subsection{Method \code{clear()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$clear()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getQueryValues"></a>}}
\if{latex}{\out{\hypertarget{method-getQueryValues}{}}}
\subsection{Method \code{getQueryValues()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$getQueryValues()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-normalize_VariantSpec"></a>}}
\if{latex}{\out{\hypertarget{method-normalize_VariantSpec}{}}}
\subsection{Method \code{normalize_VariantSpec()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$normalize_VariantSpec(teststr)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-is_VariantSpec"></a>}}
\if{latex}{\out{\hypertarget{method-is_VariantSpec}{}}}
\subsection{Method \code{is_VariantSpec()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$is_VariantSpec(teststr)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/HpdsAttribList.Rd | permissive | hms-dbmi/pic-sure-r-adapter-hpds | R | false | true | 4,914 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R6-classes.R
\docType{class}
\name{HpdsAttribList}
\alias{HpdsAttribList}
\title{R6 class used as base class for all query parameter lists - DO NOT CREATE THIS OBJECT DIRECTLY!}
\format{
\code{\link{HpdsAttribList}} object.
}
\value{
Object of \code{\link{R6Class}} used to access a HPDS-hosted resource's data dictionary.
}
\description{
R6 class used as base class for all query parameter lists - DO NOT CREATE THIS OBJECT DIRECTLY!
R6 class used as base class for all query parameter lists - DO NOT CREATE THIS OBJECT DIRECTLY!
}
\section{Methods}{
\describe{
\item{Documentation}{For full documentation of each method go to https://github.com/hms-dbmi/pic-sure-r-adapter-hpds}
\item{\code{new(connection)}}{This method is used to create new object of this class. DO NOT CREATE THIS OBJECT DIRECTLY!}
\item{\code{add()}}{This method adds one or more entries to the query parameter list.}
\item{\code{delete(keys)}}{This method deletes one or more entries from the query parameter list.}
\item{\code{clear()}}{This method clears all entries from the query parameter list.}
\item{\code{show()}}{This method displays the entries of the query parameter list.}
\item{\code{getQueryValues()}}{This is an internally used method that returns the entries for use by the parent query object.}}
}
\keyword{data}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{HpdsAttribList$new()}}
\item \href{#method-add}{\code{HpdsAttribList$add()}}
\item \href{#method-delete}{\code{HpdsAttribList$delete()}}
\item \href{#method-show}{\code{HpdsAttribList$show()}}
\item \href{#method-clear}{\code{HpdsAttribList$clear()}}
\item \href{#method-getQueryValues}{\code{HpdsAttribList$getQueryValues()}}
\item \href{#method-normalize_VariantSpec}{\code{HpdsAttribList$normalize_VariantSpec()}}
\item \href{#method-is_VariantSpec}{\code{HpdsAttribList$is_VariantSpec()}}
\item \href{#method-clone}{\code{HpdsAttribList$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$new(
inst_list = FALSE,
help_text = "",
allow_variants = TRUE,
dictionary_obj = FALSE
)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-add"></a>}}
\if{latex}{\out{\hypertarget{method-add}{}}}
\subsection{Method \code{add()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$add(keys = FALSE, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-delete"></a>}}
\if{latex}{\out{\hypertarget{method-delete}{}}}
\subsection{Method \code{delete()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$delete(keys, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-show"></a>}}
\if{latex}{\out{\hypertarget{method-show}{}}}
\subsection{Method \code{show()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$show()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clear"></a>}}
\if{latex}{\out{\hypertarget{method-clear}{}}}
\subsection{Method \code{clear()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$clear()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-getQueryValues"></a>}}
\if{latex}{\out{\hypertarget{method-getQueryValues}{}}}
\subsection{Method \code{getQueryValues()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$getQueryValues()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-normalize_VariantSpec"></a>}}
\if{latex}{\out{\hypertarget{method-normalize_VariantSpec}{}}}
\subsection{Method \code{normalize_VariantSpec()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$normalize_VariantSpec(teststr)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-is_VariantSpec"></a>}}
\if{latex}{\out{\hypertarget{method-is_VariantSpec}{}}}
\subsection{Method \code{is_VariantSpec()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$is_VariantSpec(teststr)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{HpdsAttribList$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
ar.egls <-
function (x, R, order.max, na.action = na.fail, series = NULL,
...)
{
R = sync(R)
aic = F
demean = T
intercept = F
n <- nrow(x)
k <- ncol(x)
if (is.null(series))
series <- deparse(substitute(x))
rescale <- TRUE
ists <- is.ts(x)
x <- na.action(as.ts(x))
xfreq <- frequency(x)
if (any(is.na(x)))
stop("NAs in x")
if (ists)
xtsp <- tsp(x)
x <- as.matrix(x)
if (!is.numeric(x))
stop("`x' must be numeric")
n.used <- nrow(x)
nser <- ncol(x)
if (rescale) {
sc <- sqrt(drop(apply(x, 2, var)))
x <- x/rep(sc, rep(n.used, nser))
}
else sc <- rep(1, nser)
order.max <- if (is.null(order.max))
floor(10 * log10(n.used))
else round(order.max)
if (order.max < 0)
stop("order.max must be >= 0")
if (aic)
order.min <- 0
else order.min <- order.max
A <- vector("list", order.max - order.min + 1)
varE <- vector("list", order.max - order.min + 1)
seA <- vector("list", order.max - order.min + 1)
aic <- rep(Inf, order.max - order.min + 1)
det <- function(x) {
prod(diag(qr(x)$qr)) * (-1)^(ncol(x) - 1)
}
if (demean) {
xm <- colMeans(x)
x <- sweep(x, 2, xm)
}
else xm <- rep(0, nser)
for (m in order.min:order.max) {
y <- embed(x, m + 1)
if (intercept) {
if (m > 0)
X <- cbind(rep(1, nrow(y)), y[, (nser + 1):ncol(y)])
else X <- as.matrix(rep(1, nrow(y)))
}
else {
if (m > 0)
X <- y[, (nser + 1):ncol(y)]
else X <- matrix(0, nrow(y), 0)
}
Y <- t(y[, 1:nser])
N <- ncol(Y)
XX <- t(X) %*% X
rank <- qr(XX)$rank
if (rank != nrow(XX)) {
warning(paste("Model order", m))
warning("Singularities in the computation of the projection matrix")
warning(paste("Results are only valid up to model order",
m - 1))
break
}
P <- if (ncol(XX) > 0)
solve(XX)
else XX
A[[m - order.min + 1]] <- Y %*% X %*% P
YH <- A[[m - order.min + 1]] %*% t(X)
E <- (Y - YH)
varE[[m - order.min + 1]] <- E %*% t(E)/N
aic[m - order.min + 1] <- n.used * log(det(varE[[m -
order.min + 1]])) + 2 * nser * (nser * m + intercept)
}
m <- which(aic == min(aic)) + order.min - 1
y <- embed(x, m + 1)
sigmau <- (1/(n - k * order.max - 1)) * E %*% t(E)
for (i in 1:2) {
T1 <- ginv(t(R) %*% (kronecker((XX), ginv(sigmau)) %*%
R))
T2 <- t(R) %*% (kronecker(id(k * order.max), ginv(sigmau)))
gam <- vec(A[[m - order.min + 1]]) + T1 %*% T2 %*% vec(E %*%
(X))
beta <- R %*% gam
AA <- unvec(beta, k, order.max)
YH <- AA %*% t(X)
E <- (Y - YH)
}
sig <- E %*% t(E)/N
varA <- R %*% ginv(t(R) %*% (kronecker((XX/n), ginv(sig))) %*%
R) %*% t(R)/n
seA[[m - order.min + 1]] <- if (ncol(varA) > 0)
sqrt(diag(varA))
else numeric(0)
if (intercept) {
xint <- AA[, 1]
ar <- AA[, -1]
if (m > 0)
X <- cbind(rep(1, nrow(y)), y[, (nser + 1):ncol(y)])
else X <- as.matrix(rep(1, nrow(y)))
}
else {
if (m > 0)
X <- y[, (nser + 1):ncol(y)]
else X <- matrix(0, nrow(y), 0)
xint <- NULL
ar <- AA
}
Y <- t(y[, 1:nser, drop = FALSE])
YH <- AA %*% t(X)
E <- drop(rbind(matrix(NA, m, nser), t(Y - YH)))
aic <- aic - min(aic)
names(aic) <- order.min:order.max
dim(ar) <- c(nser, nser, m)
ar <- aperm(ar, c(3, 1, 2))
ses <- seA[[m - order.min + 1]]
if (intercept) {
sem <- ses[1:nser]
ses <- ses[-(1:nser)]
}
else sem <- rep(0, nser)
dim(ses) <- c(nser, nser, m)
ses <- aperm(ses, c(3, 1, 2))
var.pred <- varE[[m - order.min + 1]]
if (nser > 1) {
snames <- colnames(x)
dimnames(ses) <- dimnames(ar) <- list(seq(length = m),
snames, snames)
dimnames(var.pred) <- list(snames, snames)
names(sem) <- colnames(E)
snames <- colnames(E)
}
if (ists) {
attr(E, "tsp") <- xtsp
attr(E, "class") <- "ts"
}
if (rescale) {
xm <- xm * sc
if (!is.null(xint))
xint <- xint * sc
aa <- outer(sc, 1/sc)
if (nser > 1 && m > 0)
for (i in 1:m) ar[i, , ] <- ar[i, , ] * aa
var.pred <- var.pred * outer(sc, sc)
E <- E * rep(sc, rep(NROW(E), nser))
sem <- sem * sc
if (m > 0)
for (i in 1:m) ses[i, , ] <- ses[i, , ] * aa
}
res <- list(order = m, ar = ar, var.pred = var.pred, x.mean = xm,
x.intercept = xint, aic = aic, n.used = n.used, order.max = order.max,
partialacf = NULL, resid = E, method = "EGLS", series = series,
frequency = xfreq, call = match.call(), asy.se.coef = list(x.mean = sem,
ar = drop(ses)))
class(res) <- "ar"
res
}
| /R/ar.egls.R | no_license | cran/LPM | R | false | false | 5,328 | r | ar.egls <-
function (x, R, order.max, na.action = na.fail, series = NULL,
...)
{
R = sync(R)
aic = F
demean = T
intercept = F
n <- nrow(x)
k <- ncol(x)
if (is.null(series))
series <- deparse(substitute(x))
rescale <- TRUE
ists <- is.ts(x)
x <- na.action(as.ts(x))
xfreq <- frequency(x)
if (any(is.na(x)))
stop("NAs in x")
if (ists)
xtsp <- tsp(x)
x <- as.matrix(x)
if (!is.numeric(x))
stop("`x' must be numeric")
n.used <- nrow(x)
nser <- ncol(x)
if (rescale) {
sc <- sqrt(drop(apply(x, 2, var)))
x <- x/rep(sc, rep(n.used, nser))
}
else sc <- rep(1, nser)
order.max <- if (is.null(order.max))
floor(10 * log10(n.used))
else round(order.max)
if (order.max < 0)
stop("order.max must be >= 0")
if (aic)
order.min <- 0
else order.min <- order.max
A <- vector("list", order.max - order.min + 1)
varE <- vector("list", order.max - order.min + 1)
seA <- vector("list", order.max - order.min + 1)
aic <- rep(Inf, order.max - order.min + 1)
det <- function(x) {
prod(diag(qr(x)$qr)) * (-1)^(ncol(x) - 1)
}
if (demean) {
xm <- colMeans(x)
x <- sweep(x, 2, xm)
}
else xm <- rep(0, nser)
for (m in order.min:order.max) {
y <- embed(x, m + 1)
if (intercept) {
if (m > 0)
X <- cbind(rep(1, nrow(y)), y[, (nser + 1):ncol(y)])
else X <- as.matrix(rep(1, nrow(y)))
}
else {
if (m > 0)
X <- y[, (nser + 1):ncol(y)]
else X <- matrix(0, nrow(y), 0)
}
Y <- t(y[, 1:nser])
N <- ncol(Y)
XX <- t(X) %*% X
rank <- qr(XX)$rank
if (rank != nrow(XX)) {
warning(paste("Model order", m))
warning("Singularities in the computation of the projection matrix")
warning(paste("Results are only valid up to model order",
m - 1))
break
}
P <- if (ncol(XX) > 0)
solve(XX)
else XX
A[[m - order.min + 1]] <- Y %*% X %*% P
YH <- A[[m - order.min + 1]] %*% t(X)
E <- (Y - YH)
varE[[m - order.min + 1]] <- E %*% t(E)/N
aic[m - order.min + 1] <- n.used * log(det(varE[[m -
order.min + 1]])) + 2 * nser * (nser * m + intercept)
}
m <- which(aic == min(aic)) + order.min - 1
y <- embed(x, m + 1)
sigmau <- (1/(n - k * order.max - 1)) * E %*% t(E)
for (i in 1:2) {
T1 <- ginv(t(R) %*% (kronecker((XX), ginv(sigmau)) %*%
R))
T2 <- t(R) %*% (kronecker(id(k * order.max), ginv(sigmau)))
gam <- vec(A[[m - order.min + 1]]) + T1 %*% T2 %*% vec(E %*%
(X))
beta <- R %*% gam
AA <- unvec(beta, k, order.max)
YH <- AA %*% t(X)
E <- (Y - YH)
}
sig <- E %*% t(E)/N
varA <- R %*% ginv(t(R) %*% (kronecker((XX/n), ginv(sig))) %*%
R) %*% t(R)/n
seA[[m - order.min + 1]] <- if (ncol(varA) > 0)
sqrt(diag(varA))
else numeric(0)
if (intercept) {
xint <- AA[, 1]
ar <- AA[, -1]
if (m > 0)
X <- cbind(rep(1, nrow(y)), y[, (nser + 1):ncol(y)])
else X <- as.matrix(rep(1, nrow(y)))
}
else {
if (m > 0)
X <- y[, (nser + 1):ncol(y)]
else X <- matrix(0, nrow(y), 0)
xint <- NULL
ar <- AA
}
Y <- t(y[, 1:nser, drop = FALSE])
YH <- AA %*% t(X)
E <- drop(rbind(matrix(NA, m, nser), t(Y - YH)))
aic <- aic - min(aic)
names(aic) <- order.min:order.max
dim(ar) <- c(nser, nser, m)
ar <- aperm(ar, c(3, 1, 2))
ses <- seA[[m - order.min + 1]]
if (intercept) {
sem <- ses[1:nser]
ses <- ses[-(1:nser)]
}
else sem <- rep(0, nser)
dim(ses) <- c(nser, nser, m)
ses <- aperm(ses, c(3, 1, 2))
var.pred <- varE[[m - order.min + 1]]
if (nser > 1) {
snames <- colnames(x)
dimnames(ses) <- dimnames(ar) <- list(seq(length = m),
snames, snames)
dimnames(var.pred) <- list(snames, snames)
names(sem) <- colnames(E)
snames <- colnames(E)
}
if (ists) {
attr(E, "tsp") <- xtsp
attr(E, "class") <- "ts"
}
if (rescale) {
xm <- xm * sc
if (!is.null(xint))
xint <- xint * sc
aa <- outer(sc, 1/sc)
if (nser > 1 && m > 0)
for (i in 1:m) ar[i, , ] <- ar[i, , ] * aa
var.pred <- var.pred * outer(sc, sc)
E <- E * rep(sc, rep(NROW(E), nser))
sem <- sem * sc
if (m > 0)
for (i in 1:m) ses[i, , ] <- ses[i, , ] * aa
}
res <- list(order = m, ar = ar, var.pred = var.pred, x.mean = xm,
x.intercept = xint, aic = aic, n.used = n.used, order.max = order.max,
partialacf = NULL, resid = E, method = "EGLS", series = series,
frequency = xfreq, call = match.call(), asy.se.coef = list(x.mean = sem,
ar = drop(ses)))
class(res) <- "ar"
res
}
|
###################################################
### code chunk number 35: Cs410_conf-int
###################################################
pred_kfas_t1 <- predict(fit_kfas$model,
interval = "prediction",
se.fit = TRUE, filtered = TRUE
)
head(pred_kfas_t1)
| /inst/userguide/figures/KFAS--Cs410_conf-int.R | permissive | nwfsc-timeseries/MARSS | R | false | false | 268 | r | ###################################################
### code chunk number 35: Cs410_conf-int
###################################################
pred_kfas_t1 <- predict(fit_kfas$model,
interval = "prediction",
se.fit = TRUE, filtered = TRUE
)
head(pred_kfas_t1)
|
/lecture.R | no_license | ahmedjoubest/windows-server | R | false | false | 4,281 | r | ||
#' @title Parallel processing of raster stack objects
#' @name apply_stack_parallel
#' @author Victor Maus, \email{vwmaus1@@gmail.com}
#'
#' @param x raster stack
#'
#' @param fun is a function to be applied to pixel location of the raster stack object and can
#' be defined by the user. The function must have two arguments: x is numeric vector and
#' args.list is a list of other arguments used in the processing. The function must return
#' a vector lenght equal to \code{nl}, number of layers in the output stack.
#'
#' @param args.list a list of arguments to pass to \code{fun}
#'
#' @param nl integer > 0. How many layers should the output RasterBrick have?
#' Default is \code{nlayers(x)}
#'
#' @param progress a character. See \code{\link[raster]{pbCreate}}. Default is \code{'text'}
#'
#' @param filename a character with the file name. Optional
#'
#' @param ... other arguments to pass to \code{\link[raster]{beginCluster}} and
#' \code{\link[raster]{writeStart}}
#'
#' @description This function performs the parallel processing of raster stack objects.
#'
#' @noRd
apply_stack_parallel <- function(x, fun, args.list = list(), nl = nlayers(x), progress = 'text', filename = "", ...) {
# Create output raster
out <- raster::brick(x, nl = nl, values = FALSE)
#names(out) <- names(x)
args.list <- c(args.list, nl = nl)
# Create cluster
raster::beginCluster(...)
cl <- raster::getCluster()
nodes <- length(cl)
# Compute raster tiles
bs <- raster::blockSize(x, minblocks = nodes * 4)
bs$array_rows <- cumsum(c(1, bs$nrows * out@ncols))
pb <- raster::pbCreate(bs$n, progress = progress)
if(bs$n < nodes)
nodes <- bs$n
# Creat cluster function
cl_fun <- function(k, x, bs, fun, args.list){
v <- raster::getValues(x, bs$row[k], bs$nrows[k])
res <- matrix(apply(v, 1, fun, args.list), ncol = args.list$nl, byrow = TRUE)
return(res)
}
# Get all nodes going
for (k in 1:nodes) {
snow::sendCall(cl[[k]], cl_fun, list(k, x, bs, fun, args.list), tag = k)
}
# If needed create raster files
filename <- raster::trim(filename)
if (!raster::canProcessInMemory(out) & filename == "") {
filename <- raster::rasterTmpFile()
}
# Start writing the output
if (filename != "") {
out <- raster::writeStart(out, filename = filename, ... )
} else {
vv <- matrix(out, ncol = nlayers(out))
}
# Process raster tiles
for (k in 1:bs$n) {
# receive results from a node
d <- snow::recvOneData(cl)
# error?
if (! d$value$success) {
print(d$value)
stop('cluster error')
}
# which block is this?
b <- d$value$tag
# Write chunk results to output
if (filename != "") {
out <- raster::writeValues(out, d$value$value, bs$row[b])
} else {
rows <- seq(from = bs$array_rows[b], by = 1, length.out = bs$nrows[b]*out@ncols)
vv[rows,] <- d$value$value
}
# need to send more data?
ni <- nodes + k
if (ni <= bs$n) {
snow::sendCall(cl[[d$node]], cl_fun, list(ni, x, bs, fun, args.list), tag = ni)
}
# Progess bar
raster::pbStep(pb, k)
}
# Create output raster
if (filename != "") {
out <- raster::writeStop(out)
} else {
out <- raster::setValues(out, as.vector(vv))
}
# Close cluster
raster::pbClose(pb)
# Assign layers names
#names(out) <- names(x)
raster::endCluster()
return(out)
}
| /scripts/apply_stack_parallel.R | no_license | rkstan/GE712 | R | false | false | 3,739 | r | #' @title Parallel processing of raster stack objects
#' @name apply_stack_parallel
#' @author Victor Maus, \email{vwmaus1@@gmail.com}
#'
#' @param x raster stack
#'
#' @param fun is a function to be applied to pixel location of the raster stack object and can
#' be defined by the user. The function must have two arguments: x is numeric vector and
#' args.list is a list of other arguments used in the processing. The function must return
#' a vector lenght equal to \code{nl}, number of layers in the output stack.
#'
#' @param args.list a list of arguments to pass to \code{fun}
#'
#' @param nl integer > 0. How many layers should the output RasterBrick have?
#' Default is \code{nlayers(x)}
#'
#' @param progress a character. See \code{\link[raster]{pbCreate}}. Default is \code{'text'}
#'
#' @param filename a character with the file name. Optional
#'
#' @param ... other arguments to pass to \code{\link[raster]{beginCluster}} and
#' \code{\link[raster]{writeStart}}
#'
#' @description This function performs the parallel processing of raster stack objects.
#'
#' @noRd
apply_stack_parallel <- function(x, fun, args.list = list(), nl = nlayers(x), progress = 'text', filename = "", ...) {
# Create output raster
out <- raster::brick(x, nl = nl, values = FALSE)
#names(out) <- names(x)
args.list <- c(args.list, nl = nl)
# Create cluster
raster::beginCluster(...)
cl <- raster::getCluster()
nodes <- length(cl)
# Compute raster tiles
bs <- raster::blockSize(x, minblocks = nodes * 4)
bs$array_rows <- cumsum(c(1, bs$nrows * out@ncols))
pb <- raster::pbCreate(bs$n, progress = progress)
if(bs$n < nodes)
nodes <- bs$n
# Creat cluster function
cl_fun <- function(k, x, bs, fun, args.list){
v <- raster::getValues(x, bs$row[k], bs$nrows[k])
res <- matrix(apply(v, 1, fun, args.list), ncol = args.list$nl, byrow = TRUE)
return(res)
}
# Get all nodes going
for (k in 1:nodes) {
snow::sendCall(cl[[k]], cl_fun, list(k, x, bs, fun, args.list), tag = k)
}
# If needed create raster files
filename <- raster::trim(filename)
if (!raster::canProcessInMemory(out) & filename == "") {
filename <- raster::rasterTmpFile()
}
# Start writing the output
if (filename != "") {
out <- raster::writeStart(out, filename = filename, ... )
} else {
vv <- matrix(out, ncol = nlayers(out))
}
# Process raster tiles
for (k in 1:bs$n) {
# receive results from a node
d <- snow::recvOneData(cl)
# error?
if (! d$value$success) {
print(d$value)
stop('cluster error')
}
# which block is this?
b <- d$value$tag
# Write chunk results to output
if (filename != "") {
out <- raster::writeValues(out, d$value$value, bs$row[b])
} else {
rows <- seq(from = bs$array_rows[b], by = 1, length.out = bs$nrows[b]*out@ncols)
vv[rows,] <- d$value$value
}
# need to send more data?
ni <- nodes + k
if (ni <= bs$n) {
snow::sendCall(cl[[d$node]], cl_fun, list(ni, x, bs, fun, args.list), tag = ni)
}
# Progess bar
raster::pbStep(pb, k)
}
# Create output raster
if (filename != "") {
out <- raster::writeStop(out)
} else {
out <- raster::setValues(out, as.vector(vv))
}
# Close cluster
raster::pbClose(pb)
# Assign layers names
#names(out) <- names(x)
raster::endCluster()
return(out)
}
|
library(nparLD)
### Name: f2.ld.f1
### Title: Nonparametric Tests for the F2-LD-F1 Design
### Aliases: f2.ld.f1
### Keywords: htest
### ** Examples
## Example with the "Shoulder tip pain study" data ##
data(shoulder)
attach(shoulder)
ex.f2f1<-f2.ld.f1(y=resp, time=time, group1=group1, group2=group2,
subject=subject, time.name="Time", group1.name="Treatment",
group2.name="Gender", description=FALSE, time.order=c(1,2,3,4,5,6),
group1.order=c("Y","N"), group2.order=c("F","M"))
# F2 LD F1 Model
# -----------------------
# Check that the order of the time, group1, and group2 levels are correct.
# Time level: 1 2 3 4 5 6
# Group1 level: Y N
# Group2 level: F M
# If the order is not correct, specify the correct order in time.order,
# group1.order, or group2.order.
#
#
# Warning(s):
# The covariance matrix is singular.
## Wald-type statistic
ex.f2f1$Wald.test
# Statistic df p-value
#Treatment 16.40129021 1 5.125033e-05
#Gender 0.04628558 1 8.296575e-01
#Time 16.34274332 5 5.930698e-03
#Treatment:Gender 0.03583558 1 8.498554e-01
#Treatment:Time 27.51450085 5 4.527996e-05
#Gender:Time 12.37903186 5 2.994753e-02
#Treatment:Gender:Time 5.11864769 5 4.015727e-01
## ANOVA-type statistic
ex.f2f1$ANOVA.test
# Statistic df p-value
#Treatment 16.40129021 1.000000 5.125033e-05
#Gender 0.04628558 1.000000 8.296575e-01
#Time 3.38218704 2.700754 2.120366e-02
#Treatment:Gender 0.03583558 1.000000 8.498554e-01
#Treatment:Time 3.71077200 2.700754 1.398190e-02
#Gender:Time 1.14434841 2.700754 3.272967e-01
#Treatment:Gender:Time 0.43755394 2.700754 7.054255e-01
## ANOVA-type statistic for the whole-plot factors and
## their interaction
ex.f2f1$ANOVA.test.mod.Box
# Statistic df1 df2 p-value
#Treatment 16.40129021 1 21.86453 0.0005395379
#Gender 0.04628558 1 21.86453 0.8316516274
#Treatment:Gender 0.03583558 1 21.86453 0.8516017168
| /data/genthat_extracted_code/nparLD/examples/f2.ld.f1.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,128 | r | library(nparLD)
### Name: f2.ld.f1
### Title: Nonparametric Tests for the F2-LD-F1 Design
### Aliases: f2.ld.f1
### Keywords: htest
### ** Examples
## Example with the "Shoulder tip pain study" data ##
data(shoulder)
attach(shoulder)
ex.f2f1<-f2.ld.f1(y=resp, time=time, group1=group1, group2=group2,
subject=subject, time.name="Time", group1.name="Treatment",
group2.name="Gender", description=FALSE, time.order=c(1,2,3,4,5,6),
group1.order=c("Y","N"), group2.order=c("F","M"))
# F2 LD F1 Model
# -----------------------
# Check that the order of the time, group1, and group2 levels are correct.
# Time level: 1 2 3 4 5 6
# Group1 level: Y N
# Group2 level: F M
# If the order is not correct, specify the correct order in time.order,
# group1.order, or group2.order.
#
#
# Warning(s):
# The covariance matrix is singular.
## Wald-type statistic
ex.f2f1$Wald.test
# Statistic df p-value
#Treatment 16.40129021 1 5.125033e-05
#Gender 0.04628558 1 8.296575e-01
#Time 16.34274332 5 5.930698e-03
#Treatment:Gender 0.03583558 1 8.498554e-01
#Treatment:Time 27.51450085 5 4.527996e-05
#Gender:Time 12.37903186 5 2.994753e-02
#Treatment:Gender:Time 5.11864769 5 4.015727e-01
## ANOVA-type statistic
ex.f2f1$ANOVA.test
# Statistic df p-value
#Treatment 16.40129021 1.000000 5.125033e-05
#Gender 0.04628558 1.000000 8.296575e-01
#Time 3.38218704 2.700754 2.120366e-02
#Treatment:Gender 0.03583558 1.000000 8.498554e-01
#Treatment:Time 3.71077200 2.700754 1.398190e-02
#Gender:Time 1.14434841 2.700754 3.272967e-01
#Treatment:Gender:Time 0.43755394 2.700754 7.054255e-01
## ANOVA-type statistic for the whole-plot factors and
## their interaction
ex.f2f1$ANOVA.test.mod.Box
# Statistic df1 df2 p-value
#Treatment 16.40129021 1 21.86453 0.0005395379
#Gender 0.04628558 1 21.86453 0.8316516274
#Treatment:Gender 0.03583558 1 21.86453 0.8516017168
|
################################################################################
##### Directed Acyclid Graph ##################################################
################################################################################
## For browser-based:
dag {
"LDL-C" [pos="0.296,0.447"]
AF [pos="0.653,-0.006"]
Age [pos="-0.013,0.458"]
Antidiabetic_drugs [pos="0.609,1.003"]
Antihypertensive_drugs [pos="0.475,0.019"]
BMI [pos="0.022,0.083"]
HbA1c [pos="0.232,0.255"]
MACE [outcome,pos="0.897,0.596"]
Microalbuminuria [pos="0.424,0.772"]
Protein [exposure,pos="0.643,0.774"]
SBP [pos="0.173,-0.052"]
Sex [pos="-0.019,0.259"]
Smoking [pos="0.100,0.019"]
Statin [pos="0.145,0.518"]
T2D_duration [pos="0.419,1.001"]
Total_cholesterol [pos="0.156,0.736"]
eGFR [pos="0.292,0.591"]
previous_CVD [pos="0.471,0.462"]
"LDL-C" -> HbA1c
"LDL-C" -> MACE
AF -> MACE
AF -> Protein
AF -> previous_CVD
Age -> HbA1c
Age -> Microalbuminuria [pos="-0.006,1.030"]
Age -> Protein
Age -> Statin
Age -> T2D_duration [pos="0.011,1.032"]
Age -> eGFR [pos="0.190,0.362"]
Antidiabetic_drugs -> MACE [pos="0.786,0.894"]
Antihypertensive_drugs -> MACE
Antihypertensive_drugs -> previous_CVD
BMI -> "LDL-C"
BMI -> HbA1c
BMI -> Protein
BMI -> T2D_duration [pos="-0.023,0.950"]
BMI -> Total_cholesterol [pos="0.032,0.498"]
BMI -> eGFR [pos="0.068,0.271"]
HbA1c -> MACE [pos="0.351,0.233"]
HbA1c -> previous_CVD
Microalbuminuria -> MACE
Microalbuminuria -> Protein
Protein -> MACE
SBP -> Antihypertensive_drugs
SBP -> MACE
SBP -> Protein [pos="0.465,0.177"]
SBP -> eGFR [pos="0.071,0.206"]
SBP -> previous_CVD
Sex -> HbA1c
Sex -> MACE [pos="0.256,-0.052"]
Sex -> Microalbuminuria [pos="0.463,0.344"]
Sex -> Protein
Sex -> eGFR
Sex -> previous_CVD
Smoking -> MACE
Smoking -> Protein [pos="0.514,0.173"]
Smoking -> SBP
Smoking -> eGFR [pos="0.084,0.299"]
Smoking -> previous_CVD
Statin -> "LDL-C"
Statin -> T2D_duration
Statin -> Total_cholesterol
T2D_duration -> Antidiabetic_drugs
T2D_duration -> MACE [pos="0.774,0.832"]
T2D_duration -> Protein
Total_cholesterol -> MACE
eGFR -> MACE
eGFR -> Microalbuminuria
eGFR -> previous_CVD [pos="0.383,0.462"]
previous_CVD -> MACE
previous_CVD -> Protein
}
## for R:
testImplications <- function( covariance.matrix, sample.size ){
library(ggm)
tst <- function(i){ pcor.test( pcor(i,covariance.matrix), length(i)-2, sample.size )$pvalue }
tos <- function(i){ paste(i,collapse=" ") }
implications <- list(c("MACE","Age","BMI","T2D_duration","Sex","Microalbuminuria","Smoking","SBP","LDL-C","HbA1c","Protein","Statin","eGFR","previous_CVD","AF"),
c("MACE","Age","T2D_duration","Total_cholesterol","LDL-C","SBP","eGFR","HbA1c","Protein","Sex","Smoking","Microalbuminuria","previous_CVD","AF"),
c("MACE","BMI","T2D_duration","Sex","Microalbuminuria","Smoking","SBP","LDL-C","HbA1c","Protein","eGFR","previous_CVD","AF","Total_cholesterol"),
c("MACE","Statin","BMI","T2D_duration","Total_cholesterol","LDL-C","Age"),
c("MACE","Statin","T2D_duration","Sex","Microalbuminuria","Smoking","SBP","LDL-C","HbA1c","Protein","eGFR","previous_CVD","AF","Total_cholesterol"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","LDL-C"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","Statin"),
c("Protein","HbA1c","BMI","T2D_duration","Age","AF","SBP","Smoking","Sex","eGFR","previous_CVD"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","LDL-C"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","previous_CVD","BMI","Age","Statin","Microalbuminuria"),
c("Protein","HbA1c","BMI","T2D_duration","Age","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria"),
c("Protein","LDL-C","HbA1c","Statin","BMI","Sex","Age"),
c("Protein","LDL-C","BMI","T2D_duration","Age","HbA1c","Sex"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","Statin"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","T2D_duration","Age"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","Statin"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","T2D_duration"),
c("Protein","Total_cholesterol","Statin","BMI"),
c("Protein","Total_cholesterol","BMI","T2D_duration","LDL-C","Age"),
c("Protein","Total_cholesterol","HbA1c","BMI","Sex","Age","T2D_duration"),
c("Protein","Total_cholesterol","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","T2D_duration"),
c("Protein","Total_cholesterol","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","T2D_duration"),
c("Protein","Antihypertensive_drugs","AF","Smoking","Sex","HbA1c","eGFR","previous_CVD","SBP"),
c("Protein","Antihypertensive_drugs","eGFR","previous_CVD","AF","SBP","Smoking","Sex","BMI","Age","LDL-C"),
c("Protein","Antihypertensive_drugs","BMI","Sex","Age","Statin","eGFR","previous_CVD","AF","SBP","Smoking"),
c("Protein","Antihypertensive_drugs","Age","BMI","T2D_duration","Sex","eGFR","previous_CVD","AF","SBP","Smoking"),
c("Protein","Antihypertensive_drugs","previous_CVD","Microalbuminuria","AF","SBP","Smoking","Sex","HbA1c","Age","BMI"),
c("Protein","Antihypertensive_drugs","previous_CVD","Microalbuminuria","AF","SBP","Smoking","Sex","Age","BMI","LDL-C"),
c("Protein","Antihypertensive_drugs","BMI","Sex","Age","Statin","previous_CVD","Microalbuminuria","AF","SBP","Smoking"),
c("Protein","Antihypertensive_drugs","Age","BMI","T2D_duration","Sex","previous_CVD","Microalbuminuria","AF","SBP","Smoking"),
c("Protein","Antidiabetic_drugs","T2D_duration"),
c("Protein","Statin","BMI","T2D_duration","LDL-C","Age"),
c("Protein","Statin","HbA1c","BMI","Sex","Age","T2D_duration"),
c("Protein","Statin","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","T2D_duration"),
c("Protein","Statin","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","T2D_duration"),
c("Protein","eGFR","AF","SBP","Smoking","Sex","HbA1c","previous_CVD","Microalbuminuria","Age","BMI"),
c("Protein","eGFR","previous_CVD","AF","SBP","Smoking","Sex","BMI","Age","LDL-C","Microalbuminuria"),
c("Protein","eGFR","BMI","Sex","Age","Statin","previous_CVD","AF","SBP","Smoking","Microalbuminuria"),
c("Protein","eGFR","Age","BMI","T2D_duration","Sex","previous_CVD","AF","SBP","Smoking","Microalbuminuria"),
c("Sex","Age"),
c("Sex","BMI"),
c("Sex","Smoking"),
c("Sex","SBP"),
c("Sex","T2D_duration"),
c("Sex","LDL-C"),
c("Sex","Total_cholesterol"),
c("Sex","Antihypertensive_drugs"),
c("Sex","Antidiabetic_drugs"),
c("Sex","Statin"),
c("Sex","AF"),
c("Age","BMI"),
c("Age","Smoking"),
c("Age","previous_CVD","SBP","Smoking","Sex","HbA1c","eGFR"),
c("Age","SBP"),
c("Age","LDL-C","Statin"),
c("Age","Total_cholesterol","Statin"),
c("Age","Antihypertensive_drugs"),
c("Age","Antidiabetic_drugs","T2D_duration"),
c("Age","AF"),
c("HbA1c","Smoking"),
c("HbA1c","Microalbuminuria","eGFR","Age","Sex"),
c("HbA1c","Microalbuminuria","Sex","Age","BMI"),
c("HbA1c","SBP"),
c("HbA1c","T2D_duration","Statin","Age","BMI"),
c("HbA1c","T2D_duration","Age","BMI","LDL-C"),
c("HbA1c","Total_cholesterol","Statin","BMI"),
c("HbA1c","Total_cholesterol","BMI","LDL-C","Age"),
c("HbA1c","Antihypertensive_drugs"),
c("HbA1c","Antidiabetic_drugs","T2D_duration"),
c("HbA1c","Antidiabetic_drugs","Statin","Age","BMI"),
c("HbA1c","Antidiabetic_drugs","Age","BMI","LDL-C"),
c("HbA1c","Statin","BMI","LDL-C","Age"),
c("HbA1c","eGFR","Sex","Age","BMI"),
c("HbA1c","AF"),
c("BMI","Smoking"),
c("BMI","Microalbuminuria","eGFR","Age","Sex"),
c("BMI","previous_CVD","SBP","Smoking","Sex","HbA1c","eGFR"),
c("BMI","SBP"),
c("BMI","Antihypertensive_drugs"),
c("BMI","Antidiabetic_drugs","T2D_duration"),
c("BMI","Statin"),
c("BMI","AF"),
c("Smoking","Microalbuminuria","eGFR","Age","Sex"),
c("Smoking","T2D_duration"),
c("Smoking","LDL-C"),
c("Smoking","Total_cholesterol"),
c("Smoking","Antihypertensive_drugs","SBP"),
c("Smoking","Antidiabetic_drugs"),
c("Smoking","Statin"),
c("Smoking","AF"),
c("Microalbuminuria","previous_CVD","SBP","Smoking","Sex","HbA1c","eGFR"),
c("Microalbuminuria","previous_CVD","Age","Sex","eGFR"),
c("Microalbuminuria","SBP","eGFR","Sex","Age"),
c("Microalbuminuria","T2D_duration","Age","BMI"),
c("Microalbuminuria","T2D_duration","eGFR","Sex","Age"),
c("Microalbuminuria","LDL-C","Statin","BMI"),
c("Microalbuminuria","LDL-C","BMI","Age"),
c("Microalbuminuria","LDL-C","eGFR","Sex","Age"),
c("Microalbuminuria","Total_cholesterol","Statin","BMI"),
c("Microalbuminuria","Total_cholesterol","BMI","Age"),
c("Microalbuminuria","Total_cholesterol","eGFR","Sex","Age"),
c("Microalbuminuria","Antihypertensive_drugs","SBP"),
c("Microalbuminuria","Antihypertensive_drugs","eGFR","Sex","Age"),
c("Microalbuminuria","Antidiabetic_drugs","T2D_duration"),
c("Microalbuminuria","Antidiabetic_drugs","Age","BMI"),
c("Microalbuminuria","Antidiabetic_drugs","eGFR","Sex","Age"),
c("Microalbuminuria","Statin","Age"),
c("Microalbuminuria","AF"),
c("previous_CVD","T2D_duration","Statin","Age","BMI"),
c("previous_CVD","T2D_duration","Age","BMI","LDL-C"),
c("previous_CVD","T2D_duration","HbA1c","BMI","Sex","Age"),
c("previous_CVD","T2D_duration","HbA1c","Sex","SBP","eGFR","Smoking"),
c("previous_CVD","LDL-C","BMI","Age","HbA1c","Sex"),
c("previous_CVD","LDL-C","Smoking","SBP","eGFR","HbA1c","Sex"),
c("previous_CVD","Total_cholesterol","Statin","BMI"),
c("previous_CVD","Total_cholesterol","BMI","LDL-C","Age"),
c("previous_CVD","Total_cholesterol","HbA1c","BMI","Sex","Age"),
c("previous_CVD","Total_cholesterol","Smoking","SBP","eGFR","HbA1c","Sex"),
c("previous_CVD","Antidiabetic_drugs","T2D_duration"),
c("previous_CVD","Antidiabetic_drugs","Statin","Age","BMI"),
c("previous_CVD","Antidiabetic_drugs","Age","BMI","LDL-C"),
c("previous_CVD","Antidiabetic_drugs","HbA1c","BMI","Sex","Age"),
c("previous_CVD","Antidiabetic_drugs","HbA1c","Sex","SBP","eGFR","Smoking"),
c("previous_CVD","Statin","BMI","LDL-C","Age"),
c("previous_CVD","Statin","HbA1c","BMI","Sex","Age"),
c("previous_CVD","Statin","Smoking","SBP","eGFR","HbA1c","Sex"),
c("SBP","T2D_duration"),
c("SBP","LDL-C"),
c("SBP","Total_cholesterol"),
c("SBP","Antidiabetic_drugs"),
c("SBP","Statin"),
c("SBP","AF"),
c("T2D_duration","LDL-C","Statin","BMI"),
c("T2D_duration","Total_cholesterol","Statin","BMI"),
c("T2D_duration","Antihypertensive_drugs"),
c("T2D_duration","eGFR","Age","BMI"),
c("T2D_duration","AF"),
c("LDL-C","Total_cholesterol","Statin","BMI"),
c("LDL-C","Antihypertensive_drugs"),
c("LDL-C","Antidiabetic_drugs","T2D_duration"),
c("LDL-C","Antidiabetic_drugs","BMI","Statin"),
c("LDL-C","eGFR","Age","BMI"),
c("LDL-C","eGFR","BMI","Statin"),
c("LDL-C","AF"),
c("Total_cholesterol","Antihypertensive_drugs"),
c("Total_cholesterol","Antidiabetic_drugs","T2D_duration"),
c("Total_cholesterol","Antidiabetic_drugs","BMI","Statin"),
c("Total_cholesterol","eGFR","Age","BMI"),
c("Total_cholesterol","eGFR","BMI","Statin"),
c("Total_cholesterol","AF"),
c("Antihypertensive_drugs","Antidiabetic_drugs"),
c("Antihypertensive_drugs","Statin"),
c("Antihypertensive_drugs","eGFR","SBP"),
c("Antihypertensive_drugs","AF"),
c("Antidiabetic_drugs","Statin","T2D_duration"),
c("Antidiabetic_drugs","eGFR","Age","BMI"),
c("Antidiabetic_drugs","eGFR","T2D_duration"),
c("Antidiabetic_drugs","AF"),
c("Statin","eGFR","Age"),
c("Statin","AF"),
c("eGFR","AF"))
data.frame( implication=unlist(lapply(implications,tos)),
pvalue=unlist( lapply( implications, tst ) ) )
} | /MACE/DAG.R | no_license | chrnowak/MACE | R | false | false | 15,303 | r | ################################################################################
##### Directed Acyclid Graph ##################################################
################################################################################
## For browser-based:
dag {
"LDL-C" [pos="0.296,0.447"]
AF [pos="0.653,-0.006"]
Age [pos="-0.013,0.458"]
Antidiabetic_drugs [pos="0.609,1.003"]
Antihypertensive_drugs [pos="0.475,0.019"]
BMI [pos="0.022,0.083"]
HbA1c [pos="0.232,0.255"]
MACE [outcome,pos="0.897,0.596"]
Microalbuminuria [pos="0.424,0.772"]
Protein [exposure,pos="0.643,0.774"]
SBP [pos="0.173,-0.052"]
Sex [pos="-0.019,0.259"]
Smoking [pos="0.100,0.019"]
Statin [pos="0.145,0.518"]
T2D_duration [pos="0.419,1.001"]
Total_cholesterol [pos="0.156,0.736"]
eGFR [pos="0.292,0.591"]
previous_CVD [pos="0.471,0.462"]
"LDL-C" -> HbA1c
"LDL-C" -> MACE
AF -> MACE
AF -> Protein
AF -> previous_CVD
Age -> HbA1c
Age -> Microalbuminuria [pos="-0.006,1.030"]
Age -> Protein
Age -> Statin
Age -> T2D_duration [pos="0.011,1.032"]
Age -> eGFR [pos="0.190,0.362"]
Antidiabetic_drugs -> MACE [pos="0.786,0.894"]
Antihypertensive_drugs -> MACE
Antihypertensive_drugs -> previous_CVD
BMI -> "LDL-C"
BMI -> HbA1c
BMI -> Protein
BMI -> T2D_duration [pos="-0.023,0.950"]
BMI -> Total_cholesterol [pos="0.032,0.498"]
BMI -> eGFR [pos="0.068,0.271"]
HbA1c -> MACE [pos="0.351,0.233"]
HbA1c -> previous_CVD
Microalbuminuria -> MACE
Microalbuminuria -> Protein
Protein -> MACE
SBP -> Antihypertensive_drugs
SBP -> MACE
SBP -> Protein [pos="0.465,0.177"]
SBP -> eGFR [pos="0.071,0.206"]
SBP -> previous_CVD
Sex -> HbA1c
Sex -> MACE [pos="0.256,-0.052"]
Sex -> Microalbuminuria [pos="0.463,0.344"]
Sex -> Protein
Sex -> eGFR
Sex -> previous_CVD
Smoking -> MACE
Smoking -> Protein [pos="0.514,0.173"]
Smoking -> SBP
Smoking -> eGFR [pos="0.084,0.299"]
Smoking -> previous_CVD
Statin -> "LDL-C"
Statin -> T2D_duration
Statin -> Total_cholesterol
T2D_duration -> Antidiabetic_drugs
T2D_duration -> MACE [pos="0.774,0.832"]
T2D_duration -> Protein
Total_cholesterol -> MACE
eGFR -> MACE
eGFR -> Microalbuminuria
eGFR -> previous_CVD [pos="0.383,0.462"]
previous_CVD -> MACE
previous_CVD -> Protein
}
## for R:
testImplications <- function( covariance.matrix, sample.size ){
library(ggm)
tst <- function(i){ pcor.test( pcor(i,covariance.matrix), length(i)-2, sample.size )$pvalue }
tos <- function(i){ paste(i,collapse=" ") }
implications <- list(c("MACE","Age","BMI","T2D_duration","Sex","Microalbuminuria","Smoking","SBP","LDL-C","HbA1c","Protein","Statin","eGFR","previous_CVD","AF"),
c("MACE","Age","T2D_duration","Total_cholesterol","LDL-C","SBP","eGFR","HbA1c","Protein","Sex","Smoking","Microalbuminuria","previous_CVD","AF"),
c("MACE","BMI","T2D_duration","Sex","Microalbuminuria","Smoking","SBP","LDL-C","HbA1c","Protein","eGFR","previous_CVD","AF","Total_cholesterol"),
c("MACE","Statin","BMI","T2D_duration","Total_cholesterol","LDL-C","Age"),
c("MACE","Statin","T2D_duration","Sex","Microalbuminuria","Smoking","SBP","LDL-C","HbA1c","Protein","eGFR","previous_CVD","AF","Total_cholesterol"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","LDL-C"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","Statin"),
c("Protein","HbA1c","BMI","T2D_duration","Age","AF","SBP","Smoking","Sex","eGFR","previous_CVD"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","LDL-C"),
c("Protein","HbA1c","AF","SBP","Smoking","Sex","previous_CVD","BMI","Age","Statin","Microalbuminuria"),
c("Protein","HbA1c","BMI","T2D_duration","Age","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria"),
c("Protein","LDL-C","HbA1c","Statin","BMI","Sex","Age"),
c("Protein","LDL-C","BMI","T2D_duration","Age","HbA1c","Sex"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","Statin"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","T2D_duration","Age"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","Statin"),
c("Protein","LDL-C","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","T2D_duration"),
c("Protein","Total_cholesterol","Statin","BMI"),
c("Protein","Total_cholesterol","BMI","T2D_duration","LDL-C","Age"),
c("Protein","Total_cholesterol","HbA1c","BMI","Sex","Age","T2D_duration"),
c("Protein","Total_cholesterol","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","T2D_duration"),
c("Protein","Total_cholesterol","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","T2D_duration"),
c("Protein","Antihypertensive_drugs","AF","Smoking","Sex","HbA1c","eGFR","previous_CVD","SBP"),
c("Protein","Antihypertensive_drugs","eGFR","previous_CVD","AF","SBP","Smoking","Sex","BMI","Age","LDL-C"),
c("Protein","Antihypertensive_drugs","BMI","Sex","Age","Statin","eGFR","previous_CVD","AF","SBP","Smoking"),
c("Protein","Antihypertensive_drugs","Age","BMI","T2D_duration","Sex","eGFR","previous_CVD","AF","SBP","Smoking"),
c("Protein","Antihypertensive_drugs","previous_CVD","Microalbuminuria","AF","SBP","Smoking","Sex","HbA1c","Age","BMI"),
c("Protein","Antihypertensive_drugs","previous_CVD","Microalbuminuria","AF","SBP","Smoking","Sex","Age","BMI","LDL-C"),
c("Protein","Antihypertensive_drugs","BMI","Sex","Age","Statin","previous_CVD","Microalbuminuria","AF","SBP","Smoking"),
c("Protein","Antihypertensive_drugs","Age","BMI","T2D_duration","Sex","previous_CVD","Microalbuminuria","AF","SBP","Smoking"),
c("Protein","Antidiabetic_drugs","T2D_duration"),
c("Protein","Statin","BMI","T2D_duration","LDL-C","Age"),
c("Protein","Statin","HbA1c","BMI","Sex","Age","T2D_duration"),
c("Protein","Statin","AF","SBP","Smoking","Sex","eGFR","previous_CVD","BMI","Age","T2D_duration"),
c("Protein","Statin","AF","SBP","Smoking","Sex","previous_CVD","Microalbuminuria","Age","BMI","T2D_duration"),
c("Protein","eGFR","AF","SBP","Smoking","Sex","HbA1c","previous_CVD","Microalbuminuria","Age","BMI"),
c("Protein","eGFR","previous_CVD","AF","SBP","Smoking","Sex","BMI","Age","LDL-C","Microalbuminuria"),
c("Protein","eGFR","BMI","Sex","Age","Statin","previous_CVD","AF","SBP","Smoking","Microalbuminuria"),
c("Protein","eGFR","Age","BMI","T2D_duration","Sex","previous_CVD","AF","SBP","Smoking","Microalbuminuria"),
c("Sex","Age"),
c("Sex","BMI"),
c("Sex","Smoking"),
c("Sex","SBP"),
c("Sex","T2D_duration"),
c("Sex","LDL-C"),
c("Sex","Total_cholesterol"),
c("Sex","Antihypertensive_drugs"),
c("Sex","Antidiabetic_drugs"),
c("Sex","Statin"),
c("Sex","AF"),
c("Age","BMI"),
c("Age","Smoking"),
c("Age","previous_CVD","SBP","Smoking","Sex","HbA1c","eGFR"),
c("Age","SBP"),
c("Age","LDL-C","Statin"),
c("Age","Total_cholesterol","Statin"),
c("Age","Antihypertensive_drugs"),
c("Age","Antidiabetic_drugs","T2D_duration"),
c("Age","AF"),
c("HbA1c","Smoking"),
c("HbA1c","Microalbuminuria","eGFR","Age","Sex"),
c("HbA1c","Microalbuminuria","Sex","Age","BMI"),
c("HbA1c","SBP"),
c("HbA1c","T2D_duration","Statin","Age","BMI"),
c("HbA1c","T2D_duration","Age","BMI","LDL-C"),
c("HbA1c","Total_cholesterol","Statin","BMI"),
c("HbA1c","Total_cholesterol","BMI","LDL-C","Age"),
c("HbA1c","Antihypertensive_drugs"),
c("HbA1c","Antidiabetic_drugs","T2D_duration"),
c("HbA1c","Antidiabetic_drugs","Statin","Age","BMI"),
c("HbA1c","Antidiabetic_drugs","Age","BMI","LDL-C"),
c("HbA1c","Statin","BMI","LDL-C","Age"),
c("HbA1c","eGFR","Sex","Age","BMI"),
c("HbA1c","AF"),
c("BMI","Smoking"),
c("BMI","Microalbuminuria","eGFR","Age","Sex"),
c("BMI","previous_CVD","SBP","Smoking","Sex","HbA1c","eGFR"),
c("BMI","SBP"),
c("BMI","Antihypertensive_drugs"),
c("BMI","Antidiabetic_drugs","T2D_duration"),
c("BMI","Statin"),
c("BMI","AF"),
c("Smoking","Microalbuminuria","eGFR","Age","Sex"),
c("Smoking","T2D_duration"),
c("Smoking","LDL-C"),
c("Smoking","Total_cholesterol"),
c("Smoking","Antihypertensive_drugs","SBP"),
c("Smoking","Antidiabetic_drugs"),
c("Smoking","Statin"),
c("Smoking","AF"),
c("Microalbuminuria","previous_CVD","SBP","Smoking","Sex","HbA1c","eGFR"),
c("Microalbuminuria","previous_CVD","Age","Sex","eGFR"),
c("Microalbuminuria","SBP","eGFR","Sex","Age"),
c("Microalbuminuria","T2D_duration","Age","BMI"),
c("Microalbuminuria","T2D_duration","eGFR","Sex","Age"),
c("Microalbuminuria","LDL-C","Statin","BMI"),
c("Microalbuminuria","LDL-C","BMI","Age"),
c("Microalbuminuria","LDL-C","eGFR","Sex","Age"),
c("Microalbuminuria","Total_cholesterol","Statin","BMI"),
c("Microalbuminuria","Total_cholesterol","BMI","Age"),
c("Microalbuminuria","Total_cholesterol","eGFR","Sex","Age"),
c("Microalbuminuria","Antihypertensive_drugs","SBP"),
c("Microalbuminuria","Antihypertensive_drugs","eGFR","Sex","Age"),
c("Microalbuminuria","Antidiabetic_drugs","T2D_duration"),
c("Microalbuminuria","Antidiabetic_drugs","Age","BMI"),
c("Microalbuminuria","Antidiabetic_drugs","eGFR","Sex","Age"),
c("Microalbuminuria","Statin","Age"),
c("Microalbuminuria","AF"),
c("previous_CVD","T2D_duration","Statin","Age","BMI"),
c("previous_CVD","T2D_duration","Age","BMI","LDL-C"),
c("previous_CVD","T2D_duration","HbA1c","BMI","Sex","Age"),
c("previous_CVD","T2D_duration","HbA1c","Sex","SBP","eGFR","Smoking"),
c("previous_CVD","LDL-C","BMI","Age","HbA1c","Sex"),
c("previous_CVD","LDL-C","Smoking","SBP","eGFR","HbA1c","Sex"),
c("previous_CVD","Total_cholesterol","Statin","BMI"),
c("previous_CVD","Total_cholesterol","BMI","LDL-C","Age"),
c("previous_CVD","Total_cholesterol","HbA1c","BMI","Sex","Age"),
c("previous_CVD","Total_cholesterol","Smoking","SBP","eGFR","HbA1c","Sex"),
c("previous_CVD","Antidiabetic_drugs","T2D_duration"),
c("previous_CVD","Antidiabetic_drugs","Statin","Age","BMI"),
c("previous_CVD","Antidiabetic_drugs","Age","BMI","LDL-C"),
c("previous_CVD","Antidiabetic_drugs","HbA1c","BMI","Sex","Age"),
c("previous_CVD","Antidiabetic_drugs","HbA1c","Sex","SBP","eGFR","Smoking"),
c("previous_CVD","Statin","BMI","LDL-C","Age"),
c("previous_CVD","Statin","HbA1c","BMI","Sex","Age"),
c("previous_CVD","Statin","Smoking","SBP","eGFR","HbA1c","Sex"),
c("SBP","T2D_duration"),
c("SBP","LDL-C"),
c("SBP","Total_cholesterol"),
c("SBP","Antidiabetic_drugs"),
c("SBP","Statin"),
c("SBP","AF"),
c("T2D_duration","LDL-C","Statin","BMI"),
c("T2D_duration","Total_cholesterol","Statin","BMI"),
c("T2D_duration","Antihypertensive_drugs"),
c("T2D_duration","eGFR","Age","BMI"),
c("T2D_duration","AF"),
c("LDL-C","Total_cholesterol","Statin","BMI"),
c("LDL-C","Antihypertensive_drugs"),
c("LDL-C","Antidiabetic_drugs","T2D_duration"),
c("LDL-C","Antidiabetic_drugs","BMI","Statin"),
c("LDL-C","eGFR","Age","BMI"),
c("LDL-C","eGFR","BMI","Statin"),
c("LDL-C","AF"),
c("Total_cholesterol","Antihypertensive_drugs"),
c("Total_cholesterol","Antidiabetic_drugs","T2D_duration"),
c("Total_cholesterol","Antidiabetic_drugs","BMI","Statin"),
c("Total_cholesterol","eGFR","Age","BMI"),
c("Total_cholesterol","eGFR","BMI","Statin"),
c("Total_cholesterol","AF"),
c("Antihypertensive_drugs","Antidiabetic_drugs"),
c("Antihypertensive_drugs","Statin"),
c("Antihypertensive_drugs","eGFR","SBP"),
c("Antihypertensive_drugs","AF"),
c("Antidiabetic_drugs","Statin","T2D_duration"),
c("Antidiabetic_drugs","eGFR","Age","BMI"),
c("Antidiabetic_drugs","eGFR","T2D_duration"),
c("Antidiabetic_drugs","AF"),
c("Statin","eGFR","Age"),
c("Statin","AF"),
c("eGFR","AF"))
data.frame( implication=unlist(lapply(implications,tos)),
pvalue=unlist( lapply( implications, tst ) ) )
} |
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962340043e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615845735-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,090 | r | testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962340043e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) |
---
title: "Time_Series_Template_in_R"
author: "Sharath"
date: "07/08/2020"
output: html_document
editor_options:
chunk_output_type: console
---
library(kernlab)
library(kknn)
library(caret)
library(ggplot2)
library(dplyr)
library(lubridate)
library(MLmetrics)
library(forecast)
library(GeneCycle)
###################################################################################
#Setting the seed to retain the randomness and reading the temps file
###################################################################################
set.seed(1245)
data = read.delim("303_Train.csv", sep = ",")
###################################################################################
#Convert the data into timeseries
###################################################################################
#Train from 2017 January to 2019 November
data.full = ts(data,start=c(2017,1),end=c(2019,365),frequency=365)
cat(data.full)
data.ts = ts(data,start=c(2017,1),end=c(2019,334),frequency=365)
cat(data.ts)
#Test on 2019 December
data.test = window(data.full,start=c(2019,335),end=c(2019,365),frequency=365)
cat(data.test)
###################################################################################
#Preliminary data analysis
###################################################################################
autoplot(data.ts) +
ggtitle("Bookings - Actuals from 2017 to 2019") +
ylab("Booking counts per day")
###################################################################################
#Difference of data
###################################################################################
diff.ts = diff(data.ts)
###################################################################################
#Preliminary analysis of differenced data
###################################################################################
autoplot(diff.ts) +
ggtitle("Bookings - Actuals from 2017 to 2019") +
ylab("Booking counts per day")
###################################################################################
#Seasonality analysis
###################################################################################
ggseasonplot(diff.ts) +
ggtitle("Seasonal plot - bookings") +
ylab("Booking counts per day")
###################################################################################
## fit the data to ES
###################################################################################
es_fit <- ets(data.ts)
print(summary(es_fit))
checkresiduals(es_fit) #20.19
es_fit$fitted
###################################################################################
# ES accuracy metrics
###################################################################################
mse <- RMSE(es_fit$fitted, data.ts)
cat("RMSE for ES model is found to be : ", mse)
mpe <- MAPE(es_fit$fitted, data.ts)
cat("MAPE for ES model is found to be : ", mpe)
###################################################################################
#Exponential Forecast for next 30 days
###################################################################################
es.fcst <- forecast(es_fit, h=31)
###################################################################################
#ES Testing RMSE and MAPE
###################################################################################
mse <- RMSE(es.fcst$mean, data.test)
cat("RMSE for ES model is found to be : ", mse)
mpe <- MAPE(es.fcst$mean, data.test)
cat("MAPE for ES model is found to be : ", round(mpe*100, 2))
###################################################################################
#Plot the forecase and compare with actuals
###################################################################################
autoplot(es.fcst,PI = FALSE) +
autolayer(data.test) +
scale_x_continuous(limits = c(2019.6,2020.1)) +
autolayer(es.fcst$mean, series="Forecasts")
#autolayer(es.fcst$mean, series="Forecasts")
###################################################################################
#Fit Arima model
###################################################################################
ar_fit <- auto.arima(data.ts, d=1, D=1, stepwise=FALSE, approximation = FALSE, trace = TRUE)
#ar_fit <- auto.arima(data.ts)
print(summary(ar_fit))
checkresiduals(ar_fit) #20.19
###################################################################################
#Arima Training RMSE and MAPE
###################################################################################
mse <- RMSE(ar_fit$fitted, data.ts)
cat("RMSE for ARIMA model is found to be : ", mse)
mpe <- MAPE(ar_fit$fitted, data.ts)
cat("MAPE for ARIMA model is found to be : ", round(mpe*100, 2))
###################################################################################
#Arima Forecast for next 30 days
###################################################################################
ar.fcst <- forecast(ar_fit, h=31)
###################################################################################
#Arima Testing RMSE and MAPE
###################################################################################
mse <- RMSE(ar.fcst$mean, data.test)
cat("RMSE for ARIMA model is found to be : ", mse)
mpe <- MAPE(ar.fcst$mean, data.test)
cat("MAPE for ARIMA model is found to be : ", round(mpe*100, 2))
###################################################################################
#Write the Arima forecasts into a csv file
###################################################################################
write.csv(ar.fcst$mean, "303_Arima_actual_forecast.csv")
###################################################################################
#Plot the predicted vs actuals
###################################################################################
autoplot(ar.fcst,PI = FALSE) +
autolayer(data.test) +
scale_x_continuous(limits = c(2019.6,2020.1)) +
autolayer(es.fcst$mean, series="Forecasts")
#coord_cartesian(xlim = c(2019.6, 2020.1), ylim = c(0, 150))
#coord_cartesian(xlim = c(2019.6, 2020.1), ylim = c(0, 150))
#autolayer(es.fcst$mean, series="Forecasts")
| /R/TimeSeriesinR.R | no_license | sharathnatraj/TimeSeriesForecastinginR | R | false | false | 6,054 | r | ---
title: "Time_Series_Template_in_R"
author: "Sharath"
date: "07/08/2020"
output: html_document
editor_options:
chunk_output_type: console
---
library(kernlab)
library(kknn)
library(caret)
library(ggplot2)
library(dplyr)
library(lubridate)
library(MLmetrics)
library(forecast)
library(GeneCycle)
###################################################################################
#Setting the seed to retain the randomness and reading the temps file
###################################################################################
set.seed(1245)
data = read.delim("303_Train.csv", sep = ",")
###################################################################################
#Convert the data into timeseries
###################################################################################
#Train from 2017 January to 2019 November
data.full = ts(data,start=c(2017,1),end=c(2019,365),frequency=365)
cat(data.full)
data.ts = ts(data,start=c(2017,1),end=c(2019,334),frequency=365)
cat(data.ts)
#Test on 2019 December
data.test = window(data.full,start=c(2019,335),end=c(2019,365),frequency=365)
cat(data.test)
###################################################################################
#Preliminary data analysis
###################################################################################
autoplot(data.ts) +
ggtitle("Bookings - Actuals from 2017 to 2019") +
ylab("Booking counts per day")
###################################################################################
#Difference of data
###################################################################################
diff.ts = diff(data.ts)
###################################################################################
#Preliminary analysis of differenced data
###################################################################################
autoplot(diff.ts) +
ggtitle("Bookings - Actuals from 2017 to 2019") +
ylab("Booking counts per day")
###################################################################################
#Seasonality analysis
###################################################################################
ggseasonplot(diff.ts) +
ggtitle("Seasonal plot - bookings") +
ylab("Booking counts per day")
###################################################################################
## fit the data to ES
###################################################################################
es_fit <- ets(data.ts)
print(summary(es_fit))
checkresiduals(es_fit) #20.19
es_fit$fitted
###################################################################################
# ES accuracy metrics
###################################################################################
mse <- RMSE(es_fit$fitted, data.ts)
cat("RMSE for ES model is found to be : ", mse)
mpe <- MAPE(es_fit$fitted, data.ts)
cat("MAPE for ES model is found to be : ", mpe)
###################################################################################
#Exponential Forecast for next 30 days
###################################################################################
es.fcst <- forecast(es_fit, h=31)
###################################################################################
#ES Testing RMSE and MAPE
###################################################################################
mse <- RMSE(es.fcst$mean, data.test)
cat("RMSE for ES model is found to be : ", mse)
mpe <- MAPE(es.fcst$mean, data.test)
cat("MAPE for ES model is found to be : ", round(mpe*100, 2))
###################################################################################
#Plot the forecase and compare with actuals
###################################################################################
autoplot(es.fcst,PI = FALSE) +
autolayer(data.test) +
scale_x_continuous(limits = c(2019.6,2020.1)) +
autolayer(es.fcst$mean, series="Forecasts")
#autolayer(es.fcst$mean, series="Forecasts")
###################################################################################
#Fit Arima model
###################################################################################
ar_fit <- auto.arima(data.ts, d=1, D=1, stepwise=FALSE, approximation = FALSE, trace = TRUE)
#ar_fit <- auto.arima(data.ts)
print(summary(ar_fit))
checkresiduals(ar_fit) #20.19
###################################################################################
#Arima Training RMSE and MAPE
###################################################################################
mse <- RMSE(ar_fit$fitted, data.ts)
cat("RMSE for ARIMA model is found to be : ", mse)
mpe <- MAPE(ar_fit$fitted, data.ts)
cat("MAPE for ARIMA model is found to be : ", round(mpe*100, 2))
###################################################################################
#Arima Forecast for next 30 days
###################################################################################
ar.fcst <- forecast(ar_fit, h=31)
###################################################################################
#Arima Testing RMSE and MAPE
###################################################################################
mse <- RMSE(ar.fcst$mean, data.test)
cat("RMSE for ARIMA model is found to be : ", mse)
mpe <- MAPE(ar.fcst$mean, data.test)
cat("MAPE for ARIMA model is found to be : ", round(mpe*100, 2))
###################################################################################
#Write the Arima forecasts into a csv file
###################################################################################
write.csv(ar.fcst$mean, "303_Arima_actual_forecast.csv")
###################################################################################
#Plot the predicted vs actuals
###################################################################################
autoplot(ar.fcst,PI = FALSE) +
autolayer(data.test) +
scale_x_continuous(limits = c(2019.6,2020.1)) +
autolayer(es.fcst$mean, series="Forecasts")
#coord_cartesian(xlim = c(2019.6, 2020.1), ylim = c(0, 150))
#coord_cartesian(xlim = c(2019.6, 2020.1), ylim = c(0, 150))
#autolayer(es.fcst$mean, series="Forecasts")
|
tabPanel("HCPC",
column(3,
tags$h4(""),
sliderInput(inputId = "sliderhcpc",label = "Select number of components", min=2, max=10, step = 1, value = 2)
),
column(8,
tags$h3("Interactive hierarchical cluster on PCA"),
bsAlert("hcpcmessage"),
fluidRow(
column(width = 6,
plotOutput("hcpc")
),
column(width=6,
plotOutput("clusterhcpc"))
),
br(),
fluidRow(
plotOutput("dendrohcpc")
)
)
) #tabpanel hierachicalplot | /ui-hcpc.R | no_license | fpsanz/pcaExplorer | R | false | false | 717 | r | tabPanel("HCPC",
column(3,
tags$h4(""),
sliderInput(inputId = "sliderhcpc",label = "Select number of components", min=2, max=10, step = 1, value = 2)
),
column(8,
tags$h3("Interactive hierarchical cluster on PCA"),
bsAlert("hcpcmessage"),
fluidRow(
column(width = 6,
plotOutput("hcpc")
),
column(width=6,
plotOutput("clusterhcpc"))
),
br(),
fluidRow(
plotOutput("dendrohcpc")
)
)
) #tabpanel hierachicalplot |
#' Attempts to install a package directly from github.
#'
#' This function is vectorised so you can install multiple packages in
#' a single command.
#'
#' @param username Github username
#' @param repo Repo name
#' @param ref Desired git reference. Could be a commit, tag, or branch
#' name. Defaults to \code{"master"}.
#' @param pull Desired pull request. A pull request refers to a branch,
#' so you can't specify both \code{branch} and \code{pull}; one of
#' them must be \code{NULL}.
#' @param subdir subdirectory within repo that contains the R package.
#' @param branch Deprecated. Use \code{ref} instead.
#' @param auth_user your github username if you're attempting to install
#' a package hosted in a private repository (and your username is different
#' to \code{username})
#' @param password your github password
#' @param ... Other arguments passed on to \code{\link{install}}.
#' @export
#' @family package installation
#' @examples
#' \dontrun{
#' install_github("roxygen")
#' }
#' @importFrom httr authenticate
install_github <- function(repo, username = getOption("github.user"),
ref = "master", pull = NULL, subdir = NULL, branch = NULL, auth_user = NULL, password = NULL, ...) {
if (!is.null(branch)) {
warning("'branch' is deprecated. In the future, please use 'ref' instead.")
ref <- branch
}
if (!xor(is.null(pull), is.null(ref))) {
stop("Must specify either a ref or a pull request, not both. ",
"Perhaps you want to use 'ref=NULL'?")
}
if(!is.null(pull)) {
pullinfo <- github_pull_info(repo, username, pull)
username <- pullinfo$username
ref <- pullinfo$ref
}
if (!is.null(password)) {
auth <- authenticate(
user = auth_user %||% username,
password = password,
type = "basic")
} else {
auth <- list()
}
message("Installing github repo(s) ",
paste(repo, ref, sep = "/", collapse = ", "),
" from ",
paste(username, collapse = ", "))
name <- paste(username, "-", repo, sep = "")
url <- paste("https://github.com/", username, "/", repo,
"/archive/", ref, ".zip", sep = "")
# If there are slashes in the ref, the URL will have extra slashes, but the
# downloaded file shouldn't have them.
# install_github("shiny", "rstudio", "v/0/2/1")
# URL: https://github.com/rstudio/shiny/archive/v/0/2/1.zip
# Output file: shiny.zip
install_url(url, name = paste(repo, ".zip", sep=""), subdir = subdir,
config = auth, ...)
}
# Retrieve the username and ref for a pull request
#' @importFrom httr parsed_content
github_pull_info <- function(repo, username, pull) {
host <- "https://api.github.com"
# GET /repos/:user/:repo/pulls/:number
path <- paste("repos", username, repo, "pulls", pull, sep = "/")
r <- GET(host, path = path)
stop_for_status(r)
head <- parsed_content(r)$head
list(repo = head$repo$name, username = head$repo$owner$login,
ref = head$ref)
}
| /devtoolsVersion/devtools 13/R/install-github.r | no_license | connectthefuture/devtools-R-Forge | R | false | false | 2,918 | r | #' Attempts to install a package directly from github.
#'
#' This function is vectorised so you can install multiple packages in
#' a single command.
#'
#' @param username Github username
#' @param repo Repo name
#' @param ref Desired git reference. Could be a commit, tag, or branch
#' name. Defaults to \code{"master"}.
#' @param pull Desired pull request. A pull request refers to a branch,
#' so you can't specify both \code{branch} and \code{pull}; one of
#' them must be \code{NULL}.
#' @param subdir subdirectory within repo that contains the R package.
#' @param branch Deprecated. Use \code{ref} instead.
#' @param auth_user your github username if you're attempting to install
#' a package hosted in a private repository (and your username is different
#' to \code{username})
#' @param password your github password
#' @param ... Other arguments passed on to \code{\link{install}}.
#' @export
#' @family package installation
#' @examples
#' \dontrun{
#' install_github("roxygen")
#' }
#' @importFrom httr authenticate
install_github <- function(repo, username = getOption("github.user"),
ref = "master", pull = NULL, subdir = NULL, branch = NULL, auth_user = NULL, password = NULL, ...) {
if (!is.null(branch)) {
warning("'branch' is deprecated. In the future, please use 'ref' instead.")
ref <- branch
}
if (!xor(is.null(pull), is.null(ref))) {
stop("Must specify either a ref or a pull request, not both. ",
"Perhaps you want to use 'ref=NULL'?")
}
if(!is.null(pull)) {
pullinfo <- github_pull_info(repo, username, pull)
username <- pullinfo$username
ref <- pullinfo$ref
}
if (!is.null(password)) {
auth <- authenticate(
user = auth_user %||% username,
password = password,
type = "basic")
} else {
auth <- list()
}
message("Installing github repo(s) ",
paste(repo, ref, sep = "/", collapse = ", "),
" from ",
paste(username, collapse = ", "))
name <- paste(username, "-", repo, sep = "")
url <- paste("https://github.com/", username, "/", repo,
"/archive/", ref, ".zip", sep = "")
# If there are slashes in the ref, the URL will have extra slashes, but the
# downloaded file shouldn't have them.
# install_github("shiny", "rstudio", "v/0/2/1")
# URL: https://github.com/rstudio/shiny/archive/v/0/2/1.zip
# Output file: shiny.zip
install_url(url, name = paste(repo, ".zip", sep=""), subdir = subdir,
config = auth, ...)
}
# Retrieve the username and ref for a pull request
#' @importFrom httr parsed_content
github_pull_info <- function(repo, username, pull) {
host <- "https://api.github.com"
# GET /repos/:user/:repo/pulls/:number
path <- paste("repos", username, repo, "pulls", pull, sep = "/")
r <- GET(host, path = path)
stop_for_status(r)
head <- parsed_content(r)$head
list(repo = head$repo$name, username = head$repo$owner$login,
ref = head$ref)
}
|
## These functions create a cached matrix inversion object with helper
## methods to get/set the inverse and values on the object.
## The matrix inverse is cached after the first calculation so that it
## need not be recalculated on subsequent inversion calcuation requests.
## The make cache matrix function creates a list object that has several
## embedded helper functions to set/get the value and inverse.
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse to NULL.
xinverse <- NULL
## The set function sets the matrix and init the inverse to null.
set <- function(y) {
x <<- y
xinverse <<- NULL
}
## Get the matrix.
get <- function() x
## Set and get the inverse of the matrix.
setinverse <- function(inv) xinverse <<- inv
getinverse <- function() xinverse
## Return a list of the functions on the matrix object.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function solves the inverse of a matrix object that has been created
## with the makeCacheMatrix function.
## The inverse is cached once it has been calculated so it need not be
## recomputed on subsequent inversion requests.
cacheSolve <- function(x, ...) {
## Get the inverse field on the matrix object.
inv <- x$getinverse()
## If the inverse has aldready been calculated, then return the inverse.
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## If the inverse hasn't been calculated, get the data, caluclate the inverse,
## save the inverse and return the inverse.
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | ellisgiles/ProgrammingAssignment2 | R | false | false | 1,672 | r | ## These functions create a cached matrix inversion object with helper
## methods to get/set the inverse and values on the object.
## The matrix inverse is cached after the first calculation so that it
## need not be recalculated on subsequent inversion calcuation requests.
## The make cache matrix function creates a list object that has several
## embedded helper functions to set/get the value and inverse.
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse to NULL.
xinverse <- NULL
## The set function sets the matrix and init the inverse to null.
set <- function(y) {
x <<- y
xinverse <<- NULL
}
## Get the matrix.
get <- function() x
## Set and get the inverse of the matrix.
setinverse <- function(inv) xinverse <<- inv
getinverse <- function() xinverse
## Return a list of the functions on the matrix object.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function solves the inverse of a matrix object that has been created
## with the makeCacheMatrix function.
## The inverse is cached once it has been calculated so it need not be
## recomputed on subsequent inversion requests.
cacheSolve <- function(x, ...) {
## Get the inverse field on the matrix object.
inv <- x$getinverse()
## If the inverse has aldready been calculated, then return the inverse.
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## If the inverse hasn't been calculated, get the data, caluclate the inverse,
## save the inverse and return the inverse.
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
saveRDS(data.frame(), file = "data/dennys.rds")
| /parse_dennys.R | no_license | nicolegu/hw4 | R | false | false | 49 | r |
saveRDS(data.frame(), file = "data/dennys.rds")
|
#Download data file
url<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
path<-file.path(getwd(),"hpc.zip")
download.file(url,path)
household_power_consumption<-read.csv( unz(path, "household_power_consumption.txt"), sep=";", quote="",na="?")
dim(household_power_consumption) #2075259 9
#subset data
hpc<-household_power_consumption[household_power_consumption$Date %in% c('1/2/2007','2/2/2007'),]
dim(hpc) #2880 9
#Plot4 building
hpc$DateTime<-paste(hpc$Date,hpc$Time)
hpc$DateTime<-strptime(hpc$DateTime,"%d/%m/%Y %H:%M:%S")
png(filename="plot4.png", width=480, height=480, units="px", bg="transparent")
par(mfrow=c(2,2))
#Topleft
plot(hpc$DateTime, hpc$Global_active_power, type="l",xlab="",ylab="Global Active Power")
#Topright
plot(hpc$DateTime, hpc$Voltage, type="l",xlab="datetime",ylab="Voltage")
#Bottomleft
plot(hpc$DateTime, hpc$Sub_metering_1, type="l",col="black",xlab="", ylab="Enegry sub metering")
lines(hpc$DateTime, hpc$Sub_metering_2, col="red")
lines(hpc$DateTime, hpc$Sub_metering_3, col="blue")
legend("topright",bty="n",col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1)
#Bottomright
plot(hpc$DateTime,hpc$Global_reactive_power, type="l",col="black",xlab="datetime",ylab=colnames(hpc)[4])
dev.off()
| /plot4.R | no_license | IgorOnyshchenko/Exploratory_data_analysis_in_R_v1 | R | false | false | 1,312 | r | #Download data file
url<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
path<-file.path(getwd(),"hpc.zip")
download.file(url,path)
household_power_consumption<-read.csv( unz(path, "household_power_consumption.txt"), sep=";", quote="",na="?")
dim(household_power_consumption) #2075259 9
#subset data
hpc<-household_power_consumption[household_power_consumption$Date %in% c('1/2/2007','2/2/2007'),]
dim(hpc) #2880 9
#Plot4 building
hpc$DateTime<-paste(hpc$Date,hpc$Time)
hpc$DateTime<-strptime(hpc$DateTime,"%d/%m/%Y %H:%M:%S")
png(filename="plot4.png", width=480, height=480, units="px", bg="transparent")
par(mfrow=c(2,2))
#Topleft
plot(hpc$DateTime, hpc$Global_active_power, type="l",xlab="",ylab="Global Active Power")
#Topright
plot(hpc$DateTime, hpc$Voltage, type="l",xlab="datetime",ylab="Voltage")
#Bottomleft
plot(hpc$DateTime, hpc$Sub_metering_1, type="l",col="black",xlab="", ylab="Enegry sub metering")
lines(hpc$DateTime, hpc$Sub_metering_2, col="red")
lines(hpc$DateTime, hpc$Sub_metering_3, col="blue")
legend("topright",bty="n",col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1)
#Bottomright
plot(hpc$DateTime,hpc$Global_reactive_power, type="l",col="black",xlab="datetime",ylab=colnames(hpc)[4])
dev.off()
|
separate_bike_model <-
function(data, keep_model_column = TRUE, append = TRUE) {
if (!append) {
data <- data %>% select(model)
}
# Fix typo
output_tbl <- data %>%
mutate(model = case_when(
model == "CAAD Disc Ultegra" ~ "CAAD12 Disc Ultegra",
model == "Syapse Carbon Tiagra" ~ "Synapse Carbon Tiagra",
model == "Supersix Evo Hi-Mod Utegra" ~ "Supersix Evo Hi-Mod Ultegra",
TRUE ~ model
)) %>%
# separate using spaces
separate(col = model,
into = str_c("model_", 1:7),
sep = " ",
remove = FALSE,
fill = "right") %>%
# creating a "base" feature
mutate(model_base = case_when(
# Fix Supersix Evo
str_detect(str_to_lower(model_1), "supersix") ~ str_c(model_1, model_2, sep = " "),
# Fix Fat CAAD bikes
str_detect(str_to_lower(model_1), "fat") ~ str_c(model_1, model_2, sep = " "),
# Fix Beast of the East
str_detect(str_to_lower(model_1), "beast") ~ str_c(model_1, model_2, model_3, model_4, sep = " "),
# Fix Bad Habit
str_detect(str_to_lower(model_1), "bad") ~ str_c(model_1, model_2, sep = " "),
# Fix Scalpel 29
str_detect(str_to_lower(model_2), "29") ~ str_c(model_1, model_2, sep = " "),
# catch all
TRUE ~ model_1)
) %>%
# Get "tier" feature
mutate(model_tier = model %>% str_replace(model_base, replacement = "") %>% str_trim()) %>%
# Remove unnecessary columns
select(-matches("model_[0-9]")) %>%
# Create Flags
mutate(
black = model_tier %>% str_to_lower() %>% str_detect("black") %>% as.numeric(),
hi_mod = model_tier %>% str_to_lower() %>% str_detect("hi-mod") %>% as.numeric(),
team = model_tier %>% str_to_lower() %>% str_detect("team") %>% as.numeric(),
red = model_tier %>% str_to_lower() %>% str_detect("red") %>% as.numeric(),
ultegra = model_tier %>% str_to_lower() %>% str_detect("ultegra") %>% as.numeric(),
dura_ace = model_tier %>% str_to_lower() %>% str_detect("dura ace") %>% as.numeric(),
disc = model_tier %>% str_to_lower() %>% str_detect("disc") %>% as.numeric()
)
if (!keep_model_column) output_tbl <- output_tbl %>% select(-model)
return(output_tbl)
}
separate_bike_description <-
function(data, keep_description_column = TRUE, append = TRUE) {
if (!append) {
data <- data %>% select(description)
}
output_tbl <- data %>% separate(description,
sep = " - ",
into = c("category_1", "category_2", "frame_material"),
remove = FALSE)
if (!keep_description_column) output_tbl <- output_tbl %>% select(-description)
return(output_tbl)
}
| /price_prediction2/02_process_data.R | no_license | dukebird/prueba | R | false | false | 3,139 | r | separate_bike_model <-
function(data, keep_model_column = TRUE, append = TRUE) {
if (!append) {
data <- data %>% select(model)
}
# Fix typo
output_tbl <- data %>%
mutate(model = case_when(
model == "CAAD Disc Ultegra" ~ "CAAD12 Disc Ultegra",
model == "Syapse Carbon Tiagra" ~ "Synapse Carbon Tiagra",
model == "Supersix Evo Hi-Mod Utegra" ~ "Supersix Evo Hi-Mod Ultegra",
TRUE ~ model
)) %>%
# separate using spaces
separate(col = model,
into = str_c("model_", 1:7),
sep = " ",
remove = FALSE,
fill = "right") %>%
# creating a "base" feature
mutate(model_base = case_when(
# Fix Supersix Evo
str_detect(str_to_lower(model_1), "supersix") ~ str_c(model_1, model_2, sep = " "),
# Fix Fat CAAD bikes
str_detect(str_to_lower(model_1), "fat") ~ str_c(model_1, model_2, sep = " "),
# Fix Beast of the East
str_detect(str_to_lower(model_1), "beast") ~ str_c(model_1, model_2, model_3, model_4, sep = " "),
# Fix Bad Habit
str_detect(str_to_lower(model_1), "bad") ~ str_c(model_1, model_2, sep = " "),
# Fix Scalpel 29
str_detect(str_to_lower(model_2), "29") ~ str_c(model_1, model_2, sep = " "),
# catch all
TRUE ~ model_1)
) %>%
# Get "tier" feature
mutate(model_tier = model %>% str_replace(model_base, replacement = "") %>% str_trim()) %>%
# Remove unnecessary columns
select(-matches("model_[0-9]")) %>%
# Create Flags
mutate(
black = model_tier %>% str_to_lower() %>% str_detect("black") %>% as.numeric(),
hi_mod = model_tier %>% str_to_lower() %>% str_detect("hi-mod") %>% as.numeric(),
team = model_tier %>% str_to_lower() %>% str_detect("team") %>% as.numeric(),
red = model_tier %>% str_to_lower() %>% str_detect("red") %>% as.numeric(),
ultegra = model_tier %>% str_to_lower() %>% str_detect("ultegra") %>% as.numeric(),
dura_ace = model_tier %>% str_to_lower() %>% str_detect("dura ace") %>% as.numeric(),
disc = model_tier %>% str_to_lower() %>% str_detect("disc") %>% as.numeric()
)
if (!keep_model_column) output_tbl <- output_tbl %>% select(-model)
return(output_tbl)
}
separate_bike_description <-
function(data, keep_description_column = TRUE, append = TRUE) {
if (!append) {
data <- data %>% select(description)
}
output_tbl <- data %>% separate(description,
sep = " - ",
into = c("category_1", "category_2", "frame_material"),
remove = FALSE)
if (!keep_description_column) output_tbl <- output_tbl %>% select(-description)
return(output_tbl)
}
|
#' @title Read table from Citavi database (via SQL)
#'
#' @param path Path to the local Citavi project file (.ctv6).
#' @param CitDBTableName Name of the table to be read from the connected Citavi database (via \code{DBI::dbReadTable()}).
#' Set to "Reference" by default. Shows all table names when set to NULL (via \code{DBI::dbListTables}).
#'
#' @details
#' `r lifecycle::badge("experimental")` \cr
#' The underlying core functions are \code{DBI::dbConnect()} \code{RSQLite::SQLite()}, \code{DBI::dbReadTable()} and \code{DBI::dbListTables}.
#'
#' @examples
#' # example Citavi project
#' example_path <- example_file("3dupsin5refs/3dupsin5refs.ctv6")
#'
#' # import reference (=default) table
#' CitDat <- read_Citavi_ctv6(example_path)
#' CitDat %>% dplyr::select(Title, Year, Abstract, DOI)
#'
#' # show table names
#' read_Citavi_ctv6(example_path, CitDBTableName = NULL)
#'
#' @return A tibble
#' @importFrom RSQLite SQLite
#' @importFrom DBI dbConnect
#' @importFrom DBI dbDisconnect
#' @importFrom DBI dbListTables
#' @importFrom DBI dbReadTable
#' @importFrom dplyr as_tibble
#' @export
read_Citavi_ctv6 <- function(path = NULL, CitDBTableName = "Reference") {
stopifnot(is.character(path)) # path required
if (is.null(CitDBTableName)) {
# connect, ListTables, disconnect -----------------------------------------
Citcon <- DBI::dbConnect(RSQLite::SQLite(), path)
CitDBTablesVector <- DBI::dbListTables(conn = Citcon)
Citcon %>% DBI::dbDisconnect()
CitDBTablesVector # return vector
} else {
# connect, ReadTable, disconnect ------------------------------------------
Citcon <- DBI::dbConnect(RSQLite::SQLite(), path)
CitDBTable <- DBI::dbReadTable(conn = Citcon, name = CitDBTableName)
Citcon %>% DBI::dbDisconnect()
# format ------------------------------------------------------------------
CitDBTable <- CitDBTable %>%
dplyr::as_tibble()
# return tibble -----------------------------------------------------------
CitDBTable
}
}
| /R/read_Citavi_ctv6.R | no_license | dl0s/CitaviR | R | false | false | 2,060 | r | #' @title Read table from Citavi database (via SQL)
#'
#' @param path Path to the local Citavi project file (.ctv6).
#' @param CitDBTableName Name of the table to be read from the connected Citavi database (via \code{DBI::dbReadTable()}).
#' Set to "Reference" by default. Shows all table names when set to NULL (via \code{DBI::dbListTables}).
#'
#' @details
#' `r lifecycle::badge("experimental")` \cr
#' The underlying core functions are \code{DBI::dbConnect()} \code{RSQLite::SQLite()}, \code{DBI::dbReadTable()} and \code{DBI::dbListTables}.
#'
#' @examples
#' # example Citavi project
#' example_path <- example_file("3dupsin5refs/3dupsin5refs.ctv6")
#'
#' # import reference (=default) table
#' CitDat <- read_Citavi_ctv6(example_path)
#' CitDat %>% dplyr::select(Title, Year, Abstract, DOI)
#'
#' # show table names
#' read_Citavi_ctv6(example_path, CitDBTableName = NULL)
#'
#' @return A tibble
#' @importFrom RSQLite SQLite
#' @importFrom DBI dbConnect
#' @importFrom DBI dbDisconnect
#' @importFrom DBI dbListTables
#' @importFrom DBI dbReadTable
#' @importFrom dplyr as_tibble
#' @export
read_Citavi_ctv6 <- function(path = NULL, CitDBTableName = "Reference") {
stopifnot(is.character(path)) # path required
if (is.null(CitDBTableName)) {
# connect, ListTables, disconnect -----------------------------------------
Citcon <- DBI::dbConnect(RSQLite::SQLite(), path)
CitDBTablesVector <- DBI::dbListTables(conn = Citcon)
Citcon %>% DBI::dbDisconnect()
CitDBTablesVector # return vector
} else {
# connect, ReadTable, disconnect ------------------------------------------
Citcon <- DBI::dbConnect(RSQLite::SQLite(), path)
CitDBTable <- DBI::dbReadTable(conn = Citcon, name = CitDBTableName)
Citcon %>% DBI::dbDisconnect()
# format ------------------------------------------------------------------
CitDBTable <- CitDBTable %>%
dplyr::as_tibble()
# return tibble -----------------------------------------------------------
CitDBTable
}
}
|
library(scpm)
### Name: scpm-package
### Title: 'An R Package for Spatial Smoothing'
### Aliases: scpm-package scpm
### ** Examples
require(geoR)
data(landim1, package = "geoR")
d <- as.sss(landim1, coords = NULL, coords.col = 1:2, data.col = 3:4)
##fitting spatial linear model with response A and covariate B
##Gneiting covariance function in the errors
#m0 <- scp(A ~ linear(~ B), data = d, model = "RMgneiting")
##adding a bivariate cubic spline based on the coordinates
#m1 <- scp(A ~ linear(~ B) + s2D(penalty = "cs"), data = d, model = "RMgneiting")
##plotting observed and estimated field from each model
#par(mfrow=c(2,2))
#plot(m0, what = "obs", type = "persp", main = "Model null - y")
#plot(m0, what = "fit", type = "persp", main = "Model null - fit")
#plot(m1, what = "obs", type = "persp", main = "Model alternative - y")
#plot(m1, what = "fit", type = "persp", main = "Model alternative - fit")
##plotting the estimated semivariogram from each model
#par(mfrow=c(1,2))
#Variogram(m0,main="Semivariogram - model null", ylim = c(0,0.7))
#Variogram(m1,main="Semivariogram - model alternative", ylim = c(0,0.7))
##summary of the estimated coefficients
#summary(m0)
#summary(m1)
##some information criteria
#AIC(m0)
#AIC(m1)
#AICm(m0)
#AICm(m1)
#AICc(m0)
#AICc(m1)
#BIC(m0)
#BIC(m1)
| /data/genthat_extracted_code/scpm/examples/scpm-package.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,300 | r | library(scpm)
### Name: scpm-package
### Title: 'An R Package for Spatial Smoothing'
### Aliases: scpm-package scpm
### ** Examples
require(geoR)
data(landim1, package = "geoR")
d <- as.sss(landim1, coords = NULL, coords.col = 1:2, data.col = 3:4)
##fitting spatial linear model with response A and covariate B
##Gneiting covariance function in the errors
#m0 <- scp(A ~ linear(~ B), data = d, model = "RMgneiting")
##adding a bivariate cubic spline based on the coordinates
#m1 <- scp(A ~ linear(~ B) + s2D(penalty = "cs"), data = d, model = "RMgneiting")
##plotting observed and estimated field from each model
#par(mfrow=c(2,2))
#plot(m0, what = "obs", type = "persp", main = "Model null - y")
#plot(m0, what = "fit", type = "persp", main = "Model null - fit")
#plot(m1, what = "obs", type = "persp", main = "Model alternative - y")
#plot(m1, what = "fit", type = "persp", main = "Model alternative - fit")
##plotting the estimated semivariogram from each model
#par(mfrow=c(1,2))
#Variogram(m0,main="Semivariogram - model null", ylim = c(0,0.7))
#Variogram(m1,main="Semivariogram - model alternative", ylim = c(0,0.7))
##summary of the estimated coefficients
#summary(m0)
#summary(m1)
##some information criteria
#AIC(m0)
#AIC(m1)
#AICm(m0)
#AICm(m1)
#AICc(m0)
#AICc(m1)
#BIC(m0)
#BIC(m1)
|
# Predicting classes for new samples
rm(list=ls())
library(randomForest)
library(glmnet)
source(file.path("code","R","train-pred_per_Samp.R"))
# Load classifier
mod <- "Capper" ### Change RF model accordingly
load(file.path("Results",paste0("rf.pred_",mod,".RData")))
probes_rf <- rownames(rf.pred$importance)
# Load calibrating model
load(file.path("Results","CV",mod,"calfit.100.RData"))
# Example with training data set (GSE90496)
set <- "training"
load(file.path("Results","betas_ba.RData")) ### Change file accordingly
predictions <- predictPerSample(new_samp=new_samp, rf_model=rf.pred, cal_model=cv.calfit)
rf.scores <- predictions$rf.scores
probs <- predictions$probs
dir.create(file.path("Results","predictions",paste0("Capper_",mod,"_",set)), recursive = T)
### Change file/directory name accordingly (RF used, samples predicted)
save(rf.scores, pred.class, probs, pred.class.cal, file=file.path("Results","predictions",
paste0("Capper_",mod,"_",set),"predictionNewSamples.RData"))
| /code/R/predictionNewSamples.R | no_license | cjthernandez/CNS_RF_Class | R | false | false | 1,063 | r | # Predicting classes for new samples
rm(list=ls())
library(randomForest)
library(glmnet)
source(file.path("code","R","train-pred_per_Samp.R"))
# Load classifier
mod <- "Capper" ### Change RF model accordingly
load(file.path("Results",paste0("rf.pred_",mod,".RData")))
probes_rf <- rownames(rf.pred$importance)
# Load calibrating model
load(file.path("Results","CV",mod,"calfit.100.RData"))
# Example with training data set (GSE90496)
set <- "training"
load(file.path("Results","betas_ba.RData")) ### Change file accordingly
predictions <- predictPerSample(new_samp=new_samp, rf_model=rf.pred, cal_model=cv.calfit)
rf.scores <- predictions$rf.scores
probs <- predictions$probs
dir.create(file.path("Results","predictions",paste0("Capper_",mod,"_",set)), recursive = T)
### Change file/directory name accordingly (RF used, samples predicted)
save(rf.scores, pred.class, probs, pred.class.cal, file=file.path("Results","predictions",
paste0("Capper_",mod,"_",set),"predictionNewSamples.RData"))
|
### =========================================================================
### Comparing and ordering the elements in one or more XRawList objects
### -------------------------------------------------------------------------
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### pcompare()
###
setMethod("pcompare", c("XRawList", "XRawList"),
function(x, y)
.Call2("XRawList_pcompare", x, y, PACKAGE="XVector")
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Element-wise (aka "parallel") comparison of 2 XRawList objects.
###
### We only need to implement "==" and "<=" methods. The other comparison
### binary operators (!=, >=, <, >) will then work out-of-the-box on
### XRawList objects thanks to the methods for Vector objects.
###
setMethod("==", c("XRawList", "XRawList"),
function(e1, e2) pcompare(e1, e2) == 0L
)
setMethod("<=", c("XRawList", "XRawList"),
function(e1, e2) pcompare(e1, e2) <= 0L
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### match() and duplicated()
###
setMethod("match", c("XRawList", "XRawList"),
function(x, table, nomatch=NA_integer_, incomparables=NULL)
{
if (!is.numeric(nomatch) || length(nomatch) != 1L)
stop("'nomatch' must be a single integer value")
if (!is.integer(nomatch))
nomatch <- as.integer(nomatch)
if (!is.null(incomparables))
stop("\"match\" method for XRawList objects ",
"only accepts 'incomparables=NULL'")
.Call2("XRawList_match_hash", x, table, nomatch, PACKAGE="XVector")
}
)
.selfmatchXRawList <- function(x)
{
.Call2("XRawList_selfmatch_hash", x, PACKAGE="XVector")
}
.duplicated.XRawList <- function(x, incomparables=FALSE)
{
if (!identical(incomparables, FALSE))
stop("\"duplicated\" method for XRawList objects ",
"only accepts 'incomparables=FALSE'")
sm <- .selfmatchXRawList(x)
sm != seq_len(length(sm))
}
### S3/S4 combo for duplicated.XRawList
duplicated.XRawList <- function(x, incomparables=FALSE, ...)
.duplicated.XRawList(x, incomparables=incomparables, ...)
setMethod("duplicated", "XRawList", duplicated.XRawList)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### order() and related methods
###
setMethod("is.unsorted", "XRawList",
function(x, na.rm=FALSE, strictly=FALSE)
{
if (!identical(na.rm, FALSE))
warning("\"is.unsorted\" method for XRawList objects ",
"ignores the 'na.rm' argument")
if (!isTRUEorFALSE(strictly))
stop("'strictly' must be TRUE or FALSE")
.Call2("XRawList_is_unsorted", x, strictly, PACKAGE="XVector")
}
)
### 'na.last' is pointless (XRawList objects don't contain NAs) so is ignored.
### 'method' is also ignored at the moment.
setMethod("order", "XRawList",
function(..., na.last=TRUE, decreasing=FALSE,
method=c("auto", "shell", "radix"))
{
## Turn off this warning for now since it triggers spurious warnings
## when calling sort() on an XRawList derivative. The root of the
## problem is inconsistent defaults for 'na.last' between order()
## and sort(), as reported here:
## https://stat.ethz.ch/pipermail/r-devel/2015-November/072012.html
#if (!identical(na.last, TRUE))
# warning("\"order\" method for XRawList objects ",
# "ignores the 'na.last' argument")
if (!isTRUEorFALSE(decreasing))
stop("'decreasing' must be TRUE or FALSE")
## All arguments in '...' are guaranteed to be XRawList objects.
args <- list(...)
if (length(args) == 1L) {
x <- args[[1L]]
return(.Call2("XRawList_order", x, decreasing, PACKAGE="XVector"))
}
stop("\"order\" method for XRawList objects ",
"only takes 1 XRawList object for now, sorry")
}
)
setMethod("rank", "XRawList",
function(x, na.last=TRUE,
ties.method=c("average", "first", "random", "max", "min"))
{
if (!identical(na.last, TRUE))
warning("\"rank\" method for XRawList objects ",
"ignores the 'na.last' argument")
ties.method <- match.arg(ties.method)
if (!(ties.method %in% c("first", "min")))
stop("\"rank\" method for XRawList objects supports ",
"only 'ties.method=\"first\"' and 'ties.method=\"min\"'")
.Call2("XRawList_rank", x, ties.method, PACKAGE="XVector")
}
)
| /R/XRawList-comparison.R | no_license | kiwiroy/XVector | R | false | false | 4,638 | r | ### =========================================================================
### Comparing and ordering the elements in one or more XRawList objects
### -------------------------------------------------------------------------
###
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### pcompare()
###
setMethod("pcompare", c("XRawList", "XRawList"),
function(x, y)
.Call2("XRawList_pcompare", x, y, PACKAGE="XVector")
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Element-wise (aka "parallel") comparison of 2 XRawList objects.
###
### We only need to implement "==" and "<=" methods. The other comparison
### binary operators (!=, >=, <, >) will then work out-of-the-box on
### XRawList objects thanks to the methods for Vector objects.
###
setMethod("==", c("XRawList", "XRawList"),
function(e1, e2) pcompare(e1, e2) == 0L
)
setMethod("<=", c("XRawList", "XRawList"),
function(e1, e2) pcompare(e1, e2) <= 0L
)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### match() and duplicated()
###
setMethod("match", c("XRawList", "XRawList"),
function(x, table, nomatch=NA_integer_, incomparables=NULL)
{
if (!is.numeric(nomatch) || length(nomatch) != 1L)
stop("'nomatch' must be a single integer value")
if (!is.integer(nomatch))
nomatch <- as.integer(nomatch)
if (!is.null(incomparables))
stop("\"match\" method for XRawList objects ",
"only accepts 'incomparables=NULL'")
.Call2("XRawList_match_hash", x, table, nomatch, PACKAGE="XVector")
}
)
.selfmatchXRawList <- function(x)
{
.Call2("XRawList_selfmatch_hash", x, PACKAGE="XVector")
}
.duplicated.XRawList <- function(x, incomparables=FALSE)
{
if (!identical(incomparables, FALSE))
stop("\"duplicated\" method for XRawList objects ",
"only accepts 'incomparables=FALSE'")
sm <- .selfmatchXRawList(x)
sm != seq_len(length(sm))
}
### S3/S4 combo for duplicated.XRawList
duplicated.XRawList <- function(x, incomparables=FALSE, ...)
.duplicated.XRawList(x, incomparables=incomparables, ...)
setMethod("duplicated", "XRawList", duplicated.XRawList)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### order() and related methods
###
setMethod("is.unsorted", "XRawList",
function(x, na.rm=FALSE, strictly=FALSE)
{
if (!identical(na.rm, FALSE))
warning("\"is.unsorted\" method for XRawList objects ",
"ignores the 'na.rm' argument")
if (!isTRUEorFALSE(strictly))
stop("'strictly' must be TRUE or FALSE")
.Call2("XRawList_is_unsorted", x, strictly, PACKAGE="XVector")
}
)
### 'na.last' is pointless (XRawList objects don't contain NAs) so is ignored.
### 'method' is also ignored at the moment.
setMethod("order", "XRawList",
function(..., na.last=TRUE, decreasing=FALSE,
method=c("auto", "shell", "radix"))
{
## Turn off this warning for now since it triggers spurious warnings
## when calling sort() on an XRawList derivative. The root of the
## problem is inconsistent defaults for 'na.last' between order()
## and sort(), as reported here:
## https://stat.ethz.ch/pipermail/r-devel/2015-November/072012.html
#if (!identical(na.last, TRUE))
# warning("\"order\" method for XRawList objects ",
# "ignores the 'na.last' argument")
if (!isTRUEorFALSE(decreasing))
stop("'decreasing' must be TRUE or FALSE")
## All arguments in '...' are guaranteed to be XRawList objects.
args <- list(...)
if (length(args) == 1L) {
x <- args[[1L]]
return(.Call2("XRawList_order", x, decreasing, PACKAGE="XVector"))
}
stop("\"order\" method for XRawList objects ",
"only takes 1 XRawList object for now, sorry")
}
)
setMethod("rank", "XRawList",
function(x, na.last=TRUE,
ties.method=c("average", "first", "random", "max", "min"))
{
if (!identical(na.last, TRUE))
warning("\"rank\" method for XRawList objects ",
"ignores the 'na.last' argument")
ties.method <- match.arg(ties.method)
if (!(ties.method %in% c("first", "min")))
stop("\"rank\" method for XRawList objects supports ",
"only 'ties.method=\"first\"' and 'ties.method=\"min\"'")
.Call2("XRawList_rank", x, ties.method, PACKAGE="XVector")
}
)
|
#This script takes data from the Individual household electric power consumption Data Set and plots a time series of Global Active Power
#-----------------------START: OPEN AND CLEAN DATA------------------------------------#
#read the dataset into variable called 'electric' (NA values in dataset are given as "?")
electric<-read.table("C:/Users/jroberti/JohnsHopkinsDataScience/exploratoryDataAnalysis/household_power_consumption.txt", header=TRUE, sep=";", colClasses="character", na.strings = "?")
#paste $Date and $Time together:
electric$timeStamp<-paste(electric$Date,electric$Time,sep=" ")
#suset the dataset to only include data from 2007-02-01 through 2007-02-02:
electricFEB<-subset(electric,electric$Date=="1/2/2007"|electric$Date=="2/2/2007")
#coerce the $timeStamp from "character" to "Date" within the subset of data:
electricFEB$timeStamp1<-strptime(electricFEB$timeStamp,format="%d/%m/%Y %H:%M:%S")
#-------------------------END: OPEN AND CLEAN DATA------------------------------------#
#-------------------------START: PLOT DATA TO PNG-------------------------------------#
#plot of global active power:
png(file="C:\\Users\\jroberti\\JohnsHopkinsDataScience\\exploratoryDataAnalysis\\plot2.png",width=480,height=480,units="px")
#make plot
plot(electricFEB$timeStamp1,electricFEB$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
#close graphic device:
dev.off()
#---------------------------END: PLOT DATA TO PNG-------------------------------------# | /plot2.R | no_license | jaroberti/ExData_Plotting1 | R | false | false | 1,498 | r | #This script takes data from the Individual household electric power consumption Data Set and plots a time series of Global Active Power
#-----------------------START: OPEN AND CLEAN DATA------------------------------------#
#read the dataset into variable called 'electric' (NA values in dataset are given as "?")
electric<-read.table("C:/Users/jroberti/JohnsHopkinsDataScience/exploratoryDataAnalysis/household_power_consumption.txt", header=TRUE, sep=";", colClasses="character", na.strings = "?")
#paste $Date and $Time together:
electric$timeStamp<-paste(electric$Date,electric$Time,sep=" ")
#suset the dataset to only include data from 2007-02-01 through 2007-02-02:
electricFEB<-subset(electric,electric$Date=="1/2/2007"|electric$Date=="2/2/2007")
#coerce the $timeStamp from "character" to "Date" within the subset of data:
electricFEB$timeStamp1<-strptime(electricFEB$timeStamp,format="%d/%m/%Y %H:%M:%S")
#-------------------------END: OPEN AND CLEAN DATA------------------------------------#
#-------------------------START: PLOT DATA TO PNG-------------------------------------#
#plot of global active power:
png(file="C:\\Users\\jroberti\\JohnsHopkinsDataScience\\exploratoryDataAnalysis\\plot2.png",width=480,height=480,units="px")
#make plot
plot(electricFEB$timeStamp1,electricFEB$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
#close graphic device:
dev.off()
#---------------------------END: PLOT DATA TO PNG-------------------------------------# |
# This program read data about pollutant emission in the years of 1999, 2002, 2005, and 2008
#
library(dplyr)
library(ggplot2)
library(gridExtra)
setwd("C:/dados/coursera/dscoursera/eda")
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
fileName <- "FNEI_data.zip"
directory <- "data"
# Download files if not available
if(!file.exists(fileName)){
download.file(fileURL,fileName)
}
if(!file.exists(directory)){
unzip(fileName, files = NULL, exdir="./data")
}
# Read the files
NEI <- readRDS("./data/summarySCC_PM25.rds") %>%
transform(year = factor(year)) %>%
transform(type = factor(type))
SCC <- readRDS("./data/Source_Classification_Code.rds")
# Calculating the total of emission per year
sum_emi <- tapply(NEI$Emissions, NEI$year, sum)
val_emi <- c(sum_emi[[1]],sum_emi[[2]],sum_emi[[3]], sum_emi[[4]])
par(mfrow = c(1,2))
plot(c(1999, 2002, 2005, 2008),val_emi, xlab = "Year", ylab = "Sum of Emission (tons)"
, main = "Total of emission in USA", pch=19)
boxplot(Emissions ~ year, NEI, xlab = "Year" , ylab = "Emissions"
, outline = FALSE , main = "Spread of Emissions in USA")
dev.copy(png,"plot1.png")
dev.off() | /plot1.R | no_license | bergojr/eda | R | false | false | 1,204 | r | # This program read data about pollutant emission in the years of 1999, 2002, 2005, and 2008
#
library(dplyr)
library(ggplot2)
library(gridExtra)
setwd("C:/dados/coursera/dscoursera/eda")
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
fileName <- "FNEI_data.zip"
directory <- "data"
# Download files if not available
if(!file.exists(fileName)){
download.file(fileURL,fileName)
}
if(!file.exists(directory)){
unzip(fileName, files = NULL, exdir="./data")
}
# Read the files
NEI <- readRDS("./data/summarySCC_PM25.rds") %>%
transform(year = factor(year)) %>%
transform(type = factor(type))
SCC <- readRDS("./data/Source_Classification_Code.rds")
# Calculating the total of emission per year
sum_emi <- tapply(NEI$Emissions, NEI$year, sum)
val_emi <- c(sum_emi[[1]],sum_emi[[2]],sum_emi[[3]], sum_emi[[4]])
par(mfrow = c(1,2))
plot(c(1999, 2002, 2005, 2008),val_emi, xlab = "Year", ylab = "Sum of Emission (tons)"
, main = "Total of emission in USA", pch=19)
boxplot(Emissions ~ year, NEI, xlab = "Year" , ylab = "Emissions"
, outline = FALSE , main = "Spread of Emissions in USA")
dev.copy(png,"plot1.png")
dev.off() |
mtcars
plot(women)
install.packages('car')
?women
x=1:100
y
x
y=seq(1,100, by=2)
| /file2.R | no_license | ishita9327/analytics1- | R | false | false | 82 | r | mtcars
plot(women)
install.packages('car')
?women
x=1:100
y
x
y=seq(1,100, by=2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.secretsmanager_operations.R
\name{tag_resource}
\alias{tag_resource}
\title{Attaches one or more tags, each consisting of a key name and a value, to the specified secret}
\usage{
tag_resource(SecretId, Tags)
}
\arguments{
\item{SecretId}{[required] The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.
If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too---for example, if you don't include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you're specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don't create secret names that end with a hyphen followed by six characters.}
\item{Tags}{[required] The tags to attach to the secret. Each element in the list consists of a \code{Key} and a \code{Value}.
This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json}{Using JSON for Parameters} in the \emph{AWS CLI User Guide}. For the AWS CLI, you can also use the syntax: \code{--Tags Key="Key1",Value="Value1",Key="Key2",Value="Value2"[,…]}}
}
\description{
Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.
}
\details{
The following basic restrictions apply to tags:
\itemize{
\item Maximum number of tags per secret---50
\item Maximum key length---127 Unicode characters in UTF-8
\item Maximum value length---255 Unicode characters in UTF-8
\item Tag keys and values are case sensitive.
\item Do not use the \code{aws:} prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.
\item If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . \_ : / @.
}
If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:TagResource
}
\strong{Related operations}
\itemize{
\item To remove one or more tags from the collection attached to a secret, use UntagResource.
\item To view the list of tags attached to a secret, use DescribeSecret.
}
}
\section{Accepted Parameters}{
\preformatted{tag_resource(
SecretId = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\examples{
# The following example shows how to attach two tags each with a Key and
# Value to a secret. There is no output from this API. To see the result,
# use the DescribeSecret operation.
\donttest{tag_resource(
SecretId = "MyExampleSecret",
Tags = list(
list(
Key = "FirstTag",
Value = "SomeValue"
),
list(
Key = "SecondTag",
Value = "AnotherValue"
)
)
)}
}
| /service/paws.secretsmanager/man/tag_resource.Rd | permissive | CR-Mercado/paws | R | false | true | 4,221 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.secretsmanager_operations.R
\name{tag_resource}
\alias{tag_resource}
\title{Attaches one or more tags, each consisting of a key name and a value, to the specified secret}
\usage{
tag_resource(SecretId, Tags)
}
\arguments{
\item{SecretId}{[required] The identifier for the secret that you want to attach tags to. You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.
If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too---for example, if you don't include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you're specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don't create secret names that end with a hyphen followed by six characters.}
\item{Tags}{[required] The tags to attach to the secret. Each element in the list consists of a \code{Key} and a \code{Value}.
This parameter to the API requires a JSON text string argument. For information on how to format a JSON parameter for the various command line tool environments, see \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json}{Using JSON for Parameters} in the \emph{AWS CLI User Guide}. For the AWS CLI, you can also use the syntax: \code{--Tags Key="Key1",Value="Value1",Key="Key2",Value="Value2"[,…]}}
}
\description{
Attaches one or more tags, each consisting of a key name and a value, to the specified secret. Tags are part of the secret's overall metadata, and are not associated with any specific version of the secret. This operation only appends tags to the existing list of tags. To remove tags, you must use UntagResource.
}
\details{
The following basic restrictions apply to tags:
\itemize{
\item Maximum number of tags per secret---50
\item Maximum key length---127 Unicode characters in UTF-8
\item Maximum value length---255 Unicode characters in UTF-8
\item Tag keys and values are case sensitive.
\item Do not use the \code{aws:} prefix in your tag names or values because it is reserved for AWS use. You can't edit or delete tag names or values with this prefix. Tags with this prefix do not count against your tags per secret limit.
\item If your tagging schema will be used across multiple services and resources, remember that other services might have restrictions on allowed characters. Generally allowed characters are: letters, spaces, and numbers representable in UTF-8, plus the following special characters: + - = . \_ : / @.
}
If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:TagResource
}
\strong{Related operations}
\itemize{
\item To remove one or more tags from the collection attached to a secret, use UntagResource.
\item To view the list of tags attached to a secret, use DescribeSecret.
}
}
\section{Accepted Parameters}{
\preformatted{tag_resource(
SecretId = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\examples{
# The following example shows how to attach two tags each with a Key and
# Value to a secret. There is no output from this API. To see the result,
# use the DescribeSecret operation.
\donttest{tag_resource(
SecretId = "MyExampleSecret",
Tags = list(
list(
Key = "FirstTag",
Value = "SomeValue"
),
list(
Key = "SecondTag",
Value = "AnotherValue"
)
)
)}
}
|
#Poors properties.R
#
# Copyright © 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(stringr)
library(data.table)
library(sm)
library(ggplot2)
library(xlsx)
library(haven)
for(year in (Settings$startyear:Settings$endyear)){
cat(paste0("\nYear:",year,"\t"))
#Load Data
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"NewFinalPoor.rda"))
UNewFinalPoor<-NewFinalPoor[Region=="Urban"]
RNewFinalPoor<-NewFinalPoor[Region=="Rural"]
#Add house properties
T95P2 <- read_dta("D:/R/T95P2.dta")
R95P2 <- read_dta("D:/R/R95P2.dta")
U95P2 <- read_dta("D:/R/U95P2.dta")
CBN95<-merge(NewFinalPoor,T95P2,by =c("HHID"),all.x=TRUE)
CBN_Urban<-merge(UNewFinalPoor,U95P2,by =c("HHID"),all.x=TRUE)
CBN_Rural<-merge(RNewFinalPoor,R95P2,by =c("HHID"),all.x=TRUE)
CBN_Poor95<-CBN95[FinalPoor==1]
CBN_Poor_Urban<-CBN_Urban[FinalPoor==1]
CBN_Poor_Rural<-CBN_Rural[FinalPoor==1]
CBN_NonPoor_Urban<-CBN_Urban[FinalPoor==0]
CBN_NonPoor_Rural<-CBN_Rural[FinalPoor==0]
CBN_NonPoor95<-rbind(CBN_NonPoor_Urban,CBN_NonPoor_Rural)
##### HouseOwn #####
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors1:=weighted.mean(ifelse(HouseOwn==1,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors1,NewArea,Weight)]
HouseOwn1<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn1[,Weight:=NULL]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors2:=weighted.mean(ifelse(HouseOwn==2,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors2,NewArea,Weight)]
HouseOwn2<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors3:=weighted.mean(ifelse(HouseOwn==3,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors3,NewArea,Weight)]
HouseOwn3<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors4:=weighted.mean(ifelse(HouseOwn==4,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors4,NewArea,Weight)]
HouseOwn4<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors5:=weighted.mean(ifelse(HouseOwn==5,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors5,NewArea,Weight)]
HouseOwn5<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors6:=weighted.mean(ifelse(HouseOwn==6,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors6,NewArea,Weight)]
HouseOwn6<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors7:=weighted.mean(ifelse(HouseOwn==1,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors7,NewArea,Weight)]
HouseOwn7<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn7[,Weight:=NULL]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors8:=weighted.mean(ifelse(HouseOwn==2,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors8,NewArea,Weight)]
HouseOwn8<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors9:=weighted.mean(ifelse(HouseOwn==3,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors9,NewArea,Weight)]
HouseOwn9<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors10:=weighted.mean(ifelse(HouseOwn==4,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors10,NewArea,Weight)]
HouseOwn10<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors11:=weighted.mean(ifelse(HouseOwn==5,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors11,NewArea,Weight)]
HouseOwn11<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors12:=weighted.mean(ifelse(HouseOwn==6,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors12,NewArea,Weight)]
HouseOwn12<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn1<-merge(HouseOwn1,HouseOwn2,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn3,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn4,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn5,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn6,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn7,by =c("NewArea"),all.x=TRUE)
HouseOwn1<-merge(HouseOwn1,HouseOwn8,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn9,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn10,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn11,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn12,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
write.xlsx(HouseOwn1, "D:/R/Poors_House2.xlsx",sheetName = "HouseOwn1")
HouseOwn13<-CBN95[,HouseOwn1:=weighted.mean(ifelse(HouseOwn==1,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn13<-HouseOwn13[,.(HouseOwn1,FinalPoor,Weight)]
HouseOwn13<-HouseOwn13[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn13[,Weight:=NULL]
HouseOwn14<-CBN95[,HouseOwn2:=weighted.mean(ifelse(HouseOwn==2,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn14<-HouseOwn14[,.(HouseOwn2,FinalPoor,Weight)]
HouseOwn14<-HouseOwn14[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn14[,Weight:=NULL]
HouseOwn15<-CBN95[,HouseOwn3:=weighted.mean(ifelse(HouseOwn==3,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn15<-HouseOwn15[,.(HouseOwn3,FinalPoor,Weight)]
HouseOwn15<-HouseOwn15[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn15[,Weight:=NULL]
HouseOwn16<-CBN95[,HouseOwn4:=weighted.mean(ifelse(HouseOwn==4,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn16<-HouseOwn16[,.(HouseOwn4,FinalPoor,Weight)]
HouseOwn16<-HouseOwn16[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn16[,Weight:=NULL]
HouseOwn17<-CBN95[,HouseOwn5:=weighted.mean(ifelse(HouseOwn==5,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn17<-HouseOwn17[,.(HouseOwn5,FinalPoor,Weight)]
HouseOwn17<-HouseOwn17[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn17[,Weight:=NULL]
HouseOwn18<-CBN95[,HouseOwn6:=weighted.mean(ifelse(HouseOwn==6,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn18<-HouseOwn18[,.(HouseOwn6,FinalPoor,Weight)]
HouseOwn18<-HouseOwn18[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn18[,Weight:=NULL]
HouseOwn2<-merge(HouseOwn13,HouseOwn14,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn15,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn16,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn17,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn18,by =c("FinalPoor"),all.x=TRUE)
write.xlsx(HouseOwn2, "D:/R/Poors_House2.xlsx",sheetName = "HouseOwn2",append = TRUE)
##### skeleton #####
skeleton<-CBN_NonPoor95[,skeleton_Poors1:=weighted.mean(ifelse(skeleton==1,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors1,NewArea,Weight)]
skeleton1<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton1[,Weight:=NULL]
skeleton<-CBN_NonPoor95[,skeleton_Poors2:=weighted.mean(ifelse(skeleton==2,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors2,NewArea,Weight)]
skeleton2<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors3:=weighted.mean(ifelse(skeleton==3,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors3,NewArea,Weight)]
skeleton3<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors4:=weighted.mean(ifelse(skeleton==4,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors4,NewArea,Weight)]
skeleton4<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors5:=weighted.mean(ifelse(skeleton==5,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors5,NewArea,Weight)]
skeleton5<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors6:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors6,NewArea,Weight)]
skeleton6<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors13:=weighted.mean(ifelse(skeleton==7,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors13,NewArea,Weight)]
skeleton13<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors14:=weighted.mean(ifelse(skeleton==8,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors14,NewArea,Weight)]
skeleton14<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors15:=weighted.mean(ifelse(skeleton==20,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors15,NewArea,Weight)]
skeleton15<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors16:=weighted.mean(ifelse(skeleton==10,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors16,NewArea,Weight)]
skeleton16<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors7:=weighted.mean(ifelse(skeleton==1,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors7,NewArea,Weight)]
skeleton7<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton7[,Weight:=NULL]
skeleton<-CBN_Poor95[,skeleton_Poors8:=weighted.mean(ifelse(skeleton==2,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors8,NewArea,Weight)]
skeleton8<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors9:=weighted.mean(ifelse(skeleton==3,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors9,NewArea,Weight)]
skeleton9<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors10:=weighted.mean(ifelse(skeleton==4,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors10,NewArea,Weight)]
skeleton10<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors11:=weighted.mean(ifelse(skeleton==5,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors11,NewArea,Weight)]
skeleton11<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors12:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors12,NewArea,Weight)]
skeleton12<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors17:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors17,NewArea,Weight)]
skeleton17<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors18:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors18,NewArea,Weight)]
skeleton18<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors19:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors19,NewArea,Weight)]
skeleton19<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors20:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors20,NewArea,Weight)]
skeleton20<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton1<-merge(skeleton1,skeleton2,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton3,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton4,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton5,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton6,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton13,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton14,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton15,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton16,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton7,by =c("NewArea"),all.x=TRUE)
skeleton1<-merge(skeleton1,skeleton8,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton9,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton10,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton11,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton12,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton17,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton18,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton19,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton20,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
write.xlsx(skeleton1, "D:/R/Poors_House2.xlsx",sheetName = "skeleton1",append = TRUE)
skeleton13<-CBN95[,skeleton1:=weighted.mean(ifelse(skeleton==1,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton13<-skeleton13[,.(skeleton1,FinalPoor,Weight)]
skeleton13<-skeleton13[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton13[,Weight:=NULL]
skeleton14<-CBN95[,skeleton2:=weighted.mean(ifelse(skeleton==2,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton14<-skeleton14[,.(skeleton2,FinalPoor,Weight)]
skeleton14<-skeleton14[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton14[,Weight:=NULL]
skeleton15<-CBN95[,skeleton3:=weighted.mean(ifelse(skeleton==3,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton15<-skeleton15[,.(skeleton3,FinalPoor,Weight)]
skeleton15<-skeleton15[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton15[,Weight:=NULL]
skeleton16<-CBN95[,skeleton4:=weighted.mean(ifelse(skeleton==4,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton16<-skeleton16[,.(skeleton4,FinalPoor,Weight)]
skeleton16<-skeleton16[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton16[,Weight:=NULL]
skeleton17<-CBN95[,skeleton5:=weighted.mean(ifelse(skeleton==5,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton17<-skeleton17[,.(skeleton5,FinalPoor,Weight)]
skeleton17<-skeleton17[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton17[,Weight:=NULL]
skeleton18<-CBN95[,skeleton6:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton18<-skeleton18[,.(skeleton6,FinalPoor,Weight)]
skeleton18<-skeleton18[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton18[,Weight:=NULL]
skeleton19<-CBN95[,skeleton7:=weighted.mean(ifelse(skeleton==7,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton19<-skeleton19[,.(skeleton7,FinalPoor,Weight)]
skeleton19<-skeleton19[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton19[,Weight:=NULL]
skeleton20<-CBN95[,skeleton8:=weighted.mean(ifelse(skeleton==8,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton20<-skeleton20[,.(skeleton8,FinalPoor,Weight)]
skeleton20<-skeleton20[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton20[,Weight:=NULL]
skeleton21<-CBN95[,skeleton10:=weighted.mean(ifelse(skeleton==10,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton21<-skeleton21[,.(skeleton10,FinalPoor,Weight)]
skeleton21<-skeleton21[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton21[,Weight:=NULL]
skeleton22<-CBN95[,skeleton20:=weighted.mean(ifelse(skeleton==20,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton22<-skeleton22[,.(skeleton20,FinalPoor,Weight)]
skeleton22<-skeleton22[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton22[,Weight:=NULL]
skeleton2<-merge(skeleton13,skeleton14,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton15,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton16,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton17,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton18,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton19,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton20,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton21,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton22,by =c("FinalPoor"),all.x=TRUE)
write.xlsx(skeleton2, "D:/R/Poors_House2.xlsx",sheetName = "skeleton2",append = TRUE)
}
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat(endtime-starttime) | /R/Archive/November 2019/85-Poor properties_Home2.R | no_license | IPRCIRI/IRHEIS | R | false | false | 18,074 | r | #Poors properties.R
#
# Copyright © 2018:Arin Shahbazian
# Licence: GPL-3
#
rm(list=ls())
starttime <- proc.time()
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(stringr)
library(data.table)
library(sm)
library(ggplot2)
library(xlsx)
library(haven)
for(year in (Settings$startyear:Settings$endyear)){
cat(paste0("\nYear:",year,"\t"))
#Load Data
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"NewFinalPoor.rda"))
UNewFinalPoor<-NewFinalPoor[Region=="Urban"]
RNewFinalPoor<-NewFinalPoor[Region=="Rural"]
#Add house properties
T95P2 <- read_dta("D:/R/T95P2.dta")
R95P2 <- read_dta("D:/R/R95P2.dta")
U95P2 <- read_dta("D:/R/U95P2.dta")
CBN95<-merge(NewFinalPoor,T95P2,by =c("HHID"),all.x=TRUE)
CBN_Urban<-merge(UNewFinalPoor,U95P2,by =c("HHID"),all.x=TRUE)
CBN_Rural<-merge(RNewFinalPoor,R95P2,by =c("HHID"),all.x=TRUE)
CBN_Poor95<-CBN95[FinalPoor==1]
CBN_Poor_Urban<-CBN_Urban[FinalPoor==1]
CBN_Poor_Rural<-CBN_Rural[FinalPoor==1]
CBN_NonPoor_Urban<-CBN_Urban[FinalPoor==0]
CBN_NonPoor_Rural<-CBN_Rural[FinalPoor==0]
CBN_NonPoor95<-rbind(CBN_NonPoor_Urban,CBN_NonPoor_Rural)
##### HouseOwn #####
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors1:=weighted.mean(ifelse(HouseOwn==1,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors1,NewArea,Weight)]
HouseOwn1<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn1[,Weight:=NULL]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors2:=weighted.mean(ifelse(HouseOwn==2,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors2,NewArea,Weight)]
HouseOwn2<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors3:=weighted.mean(ifelse(HouseOwn==3,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors3,NewArea,Weight)]
HouseOwn3<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors4:=weighted.mean(ifelse(HouseOwn==4,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors4,NewArea,Weight)]
HouseOwn4<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors5:=weighted.mean(ifelse(HouseOwn==5,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors5,NewArea,Weight)]
HouseOwn5<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_NonPoor95[,HouseOwn_Poors6:=weighted.mean(ifelse(HouseOwn==6,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors6,NewArea,Weight)]
HouseOwn6<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors7:=weighted.mean(ifelse(HouseOwn==1,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors7,NewArea,Weight)]
HouseOwn7<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn7[,Weight:=NULL]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors8:=weighted.mean(ifelse(HouseOwn==2,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors8,NewArea,Weight)]
HouseOwn8<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors9:=weighted.mean(ifelse(HouseOwn==3,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors9,NewArea,Weight)]
HouseOwn9<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors10:=weighted.mean(ifelse(HouseOwn==4,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors10,NewArea,Weight)]
HouseOwn10<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors11:=weighted.mean(ifelse(HouseOwn==5,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors11,NewArea,Weight)]
HouseOwn11<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn<-CBN_Poor95[,HouseOwn_Poors12:=weighted.mean(ifelse(HouseOwn==6,1,0),Weight),by=NewArea][order(NewArea)]
HouseOwn<-HouseOwn[,.(HouseOwn_Poors12,NewArea,Weight)]
HouseOwn12<-HouseOwn[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
HouseOwn1<-merge(HouseOwn1,HouseOwn2,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn3,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn4,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn5,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn6,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn7,by =c("NewArea"),all.x=TRUE)
HouseOwn1<-merge(HouseOwn1,HouseOwn8,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn9,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn10,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn11,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
HouseOwn1<-merge(HouseOwn1,HouseOwn12,by =c("NewArea"),all.x=TRUE)
HouseOwn1[,Weight:=NULL]
write.xlsx(HouseOwn1, "D:/R/Poors_House2.xlsx",sheetName = "HouseOwn1")
HouseOwn13<-CBN95[,HouseOwn1:=weighted.mean(ifelse(HouseOwn==1,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn13<-HouseOwn13[,.(HouseOwn1,FinalPoor,Weight)]
HouseOwn13<-HouseOwn13[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn13[,Weight:=NULL]
HouseOwn14<-CBN95[,HouseOwn2:=weighted.mean(ifelse(HouseOwn==2,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn14<-HouseOwn14[,.(HouseOwn2,FinalPoor,Weight)]
HouseOwn14<-HouseOwn14[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn14[,Weight:=NULL]
HouseOwn15<-CBN95[,HouseOwn3:=weighted.mean(ifelse(HouseOwn==3,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn15<-HouseOwn15[,.(HouseOwn3,FinalPoor,Weight)]
HouseOwn15<-HouseOwn15[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn15[,Weight:=NULL]
HouseOwn16<-CBN95[,HouseOwn4:=weighted.mean(ifelse(HouseOwn==4,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn16<-HouseOwn16[,.(HouseOwn4,FinalPoor,Weight)]
HouseOwn16<-HouseOwn16[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn16[,Weight:=NULL]
HouseOwn17<-CBN95[,HouseOwn5:=weighted.mean(ifelse(HouseOwn==5,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn17<-HouseOwn17[,.(HouseOwn5,FinalPoor,Weight)]
HouseOwn17<-HouseOwn17[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn17[,Weight:=NULL]
HouseOwn18<-CBN95[,HouseOwn6:=weighted.mean(ifelse(HouseOwn==6,1,0),Weight),by=FinalPoor][order(FinalPoor)]
HouseOwn18<-HouseOwn18[,.(HouseOwn6,FinalPoor,Weight)]
HouseOwn18<-HouseOwn18[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
HouseOwn18[,Weight:=NULL]
HouseOwn2<-merge(HouseOwn13,HouseOwn14,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn15,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn16,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn17,by =c("FinalPoor"),all.x=TRUE)
HouseOwn2<-merge(HouseOwn2,HouseOwn18,by =c("FinalPoor"),all.x=TRUE)
write.xlsx(HouseOwn2, "D:/R/Poors_House2.xlsx",sheetName = "HouseOwn2",append = TRUE)
##### skeleton #####
skeleton<-CBN_NonPoor95[,skeleton_Poors1:=weighted.mean(ifelse(skeleton==1,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors1,NewArea,Weight)]
skeleton1<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton1[,Weight:=NULL]
skeleton<-CBN_NonPoor95[,skeleton_Poors2:=weighted.mean(ifelse(skeleton==2,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors2,NewArea,Weight)]
skeleton2<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors3:=weighted.mean(ifelse(skeleton==3,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors3,NewArea,Weight)]
skeleton3<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors4:=weighted.mean(ifelse(skeleton==4,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors4,NewArea,Weight)]
skeleton4<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors5:=weighted.mean(ifelse(skeleton==5,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors5,NewArea,Weight)]
skeleton5<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors6:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors6,NewArea,Weight)]
skeleton6<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors13:=weighted.mean(ifelse(skeleton==7,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors13,NewArea,Weight)]
skeleton13<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors14:=weighted.mean(ifelse(skeleton==8,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors14,NewArea,Weight)]
skeleton14<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors15:=weighted.mean(ifelse(skeleton==20,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors15,NewArea,Weight)]
skeleton15<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_NonPoor95[,skeleton_Poors16:=weighted.mean(ifelse(skeleton==10,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors16,NewArea,Weight)]
skeleton16<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors7:=weighted.mean(ifelse(skeleton==1,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors7,NewArea,Weight)]
skeleton7<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton7[,Weight:=NULL]
skeleton<-CBN_Poor95[,skeleton_Poors8:=weighted.mean(ifelse(skeleton==2,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors8,NewArea,Weight)]
skeleton8<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors9:=weighted.mean(ifelse(skeleton==3,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors9,NewArea,Weight)]
skeleton9<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors10:=weighted.mean(ifelse(skeleton==4,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors10,NewArea,Weight)]
skeleton10<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors11:=weighted.mean(ifelse(skeleton==5,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors11,NewArea,Weight)]
skeleton11<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors12:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors12,NewArea,Weight)]
skeleton12<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors17:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors17,NewArea,Weight)]
skeleton17<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors18:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors18,NewArea,Weight)]
skeleton18<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors19:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors19,NewArea,Weight)]
skeleton19<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton<-CBN_Poor95[,skeleton_Poors20:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=NewArea][order(NewArea)]
skeleton<-skeleton[,.(skeleton_Poors20,NewArea,Weight)]
skeleton20<-skeleton[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(NewArea)]
skeleton1<-merge(skeleton1,skeleton2,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton3,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton4,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton5,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton6,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton13,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton14,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton15,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton16,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton7,by =c("NewArea"),all.x=TRUE)
skeleton1<-merge(skeleton1,skeleton8,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton9,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton10,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton11,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton12,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton17,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton18,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton19,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
skeleton1<-merge(skeleton1,skeleton20,by =c("NewArea"),all.x=TRUE)
skeleton1[,Weight:=NULL]
write.xlsx(skeleton1, "D:/R/Poors_House2.xlsx",sheetName = "skeleton1",append = TRUE)
skeleton13<-CBN95[,skeleton1:=weighted.mean(ifelse(skeleton==1,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton13<-skeleton13[,.(skeleton1,FinalPoor,Weight)]
skeleton13<-skeleton13[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton13[,Weight:=NULL]
skeleton14<-CBN95[,skeleton2:=weighted.mean(ifelse(skeleton==2,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton14<-skeleton14[,.(skeleton2,FinalPoor,Weight)]
skeleton14<-skeleton14[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton14[,Weight:=NULL]
skeleton15<-CBN95[,skeleton3:=weighted.mean(ifelse(skeleton==3,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton15<-skeleton15[,.(skeleton3,FinalPoor,Weight)]
skeleton15<-skeleton15[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton15[,Weight:=NULL]
skeleton16<-CBN95[,skeleton4:=weighted.mean(ifelse(skeleton==4,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton16<-skeleton16[,.(skeleton4,FinalPoor,Weight)]
skeleton16<-skeleton16[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton16[,Weight:=NULL]
skeleton17<-CBN95[,skeleton5:=weighted.mean(ifelse(skeleton==5,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton17<-skeleton17[,.(skeleton5,FinalPoor,Weight)]
skeleton17<-skeleton17[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton17[,Weight:=NULL]
skeleton18<-CBN95[,skeleton6:=weighted.mean(ifelse(skeleton==6,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton18<-skeleton18[,.(skeleton6,FinalPoor,Weight)]
skeleton18<-skeleton18[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton18[,Weight:=NULL]
skeleton19<-CBN95[,skeleton7:=weighted.mean(ifelse(skeleton==7,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton19<-skeleton19[,.(skeleton7,FinalPoor,Weight)]
skeleton19<-skeleton19[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton19[,Weight:=NULL]
skeleton20<-CBN95[,skeleton8:=weighted.mean(ifelse(skeleton==8,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton20<-skeleton20[,.(skeleton8,FinalPoor,Weight)]
skeleton20<-skeleton20[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton20[,Weight:=NULL]
skeleton21<-CBN95[,skeleton10:=weighted.mean(ifelse(skeleton==10,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton21<-skeleton21[,.(skeleton10,FinalPoor,Weight)]
skeleton21<-skeleton21[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton21[,Weight:=NULL]
skeleton22<-CBN95[,skeleton20:=weighted.mean(ifelse(skeleton==20,1,0),Weight),by=FinalPoor][order(FinalPoor)]
skeleton22<-skeleton22[,.(skeleton20,FinalPoor,Weight)]
skeleton22<-skeleton22[,lapply(.SD,weighted.mean,w=Weight,na.rm = TRUE),by=.(FinalPoor)]
skeleton22[,Weight:=NULL]
skeleton2<-merge(skeleton13,skeleton14,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton15,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton16,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton17,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton18,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton19,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton20,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton21,by =c("FinalPoor"),all.x=TRUE)
skeleton2<-merge(skeleton2,skeleton22,by =c("FinalPoor"),all.x=TRUE)
write.xlsx(skeleton2, "D:/R/Poors_House2.xlsx",sheetName = "skeleton2",append = TRUE)
}
endtime <- proc.time()
cat("\n\n============================\nIt took ")
cat(endtime-starttime) |
rm(list = ls())
library("highcharter")
library("ggdendro")
library("dplyr")
x <- iris[, -5] %>% dist %>% hclust %>% as.dendrogram()
x <- mtcars %>% dist %>% hclust %>% as.dendrogram()
hchart(as.dendrogram(hclust(dist(mtcars))))
attr(x, "class") <- "dendrogram"
class(x)
plot(x)
hchart(x)
highcharter:::hchart.dendrogram(x)
hc <- hchart(x)
#' @importFrom ggdendro dendro_data
hchart.dendrogram <- function(x, ...) {
dddata <- dendro_data(x)
by_row2 <- function(.d, .f, ...) {
purrr::by_row(.d, .f, ..., .to = "out")[["out"]]
}
dsseg <- dddata$segments %>%
mutate(x = x - 1, xend = xend - 1) %>%
by_row2(function(x){
list(list(x = x$x, y = x$y), list(x = x$xend, y = x$yend))
})
hc <- highchart() %>%
hc_plotOptions(
series = list(
lineWidth = 2,
showInLegend = FALSE,
marker = list(radius = 0),
enableMouseTracking = FALSE
)
) %>%
hc_xAxis(categories = dddata$labels$label,
tickmarkPlacement = "on") %>%
hc_colors(list(hex_to_rgba("#606060")))
for (i in seq_along(dsseg)) {
hc <- hc %>% hc_add_series(data = dsseg[[i]], type = "scatter")
}
hc
}
hc %>%
hc_chart(type = "column")
hc %>%
hc_chart(type = "bar") %>%
hc_xAxis(tickLength = 0)
hc %>% hc_chart(type = "bar") %>%
hc_yAxis(reversed = TRUE) %>%
hc_xAxis(opposite = TRUE, tickLength = 0)
Shc %>% hc_chart(polar = TRUE) %>%
hc_yAxis(reversed = TRUE, visible = TRUE) %>%
hc_xAxis(gridLineWidth = 0, lineWidth = 0)
| /devscripts/dendro.R | no_license | APKBridget/highcharter | R | false | false | 1,598 | r | rm(list = ls())
library("highcharter")
library("ggdendro")
library("dplyr")
x <- iris[, -5] %>% dist %>% hclust %>% as.dendrogram()
x <- mtcars %>% dist %>% hclust %>% as.dendrogram()
hchart(as.dendrogram(hclust(dist(mtcars))))
attr(x, "class") <- "dendrogram"
class(x)
plot(x)
hchart(x)
highcharter:::hchart.dendrogram(x)
hc <- hchart(x)
#' @importFrom ggdendro dendro_data
hchart.dendrogram <- function(x, ...) {
dddata <- dendro_data(x)
by_row2 <- function(.d, .f, ...) {
purrr::by_row(.d, .f, ..., .to = "out")[["out"]]
}
dsseg <- dddata$segments %>%
mutate(x = x - 1, xend = xend - 1) %>%
by_row2(function(x){
list(list(x = x$x, y = x$y), list(x = x$xend, y = x$yend))
})
hc <- highchart() %>%
hc_plotOptions(
series = list(
lineWidth = 2,
showInLegend = FALSE,
marker = list(radius = 0),
enableMouseTracking = FALSE
)
) %>%
hc_xAxis(categories = dddata$labels$label,
tickmarkPlacement = "on") %>%
hc_colors(list(hex_to_rgba("#606060")))
for (i in seq_along(dsseg)) {
hc <- hc %>% hc_add_series(data = dsseg[[i]], type = "scatter")
}
hc
}
hc %>%
hc_chart(type = "column")
hc %>%
hc_chart(type = "bar") %>%
hc_xAxis(tickLength = 0)
hc %>% hc_chart(type = "bar") %>%
hc_yAxis(reversed = TRUE) %>%
hc_xAxis(opposite = TRUE, tickLength = 0)
Shc %>% hc_chart(polar = TRUE) %>%
hc_yAxis(reversed = TRUE, visible = TRUE) %>%
hc_xAxis(gridLineWidth = 0, lineWidth = 0)
|
if (!require("pacman")) {
install.packages("pacman")
}
pacman::p_load(
tidyverse,
lubridate,
zeallot,
RcppRoll,
caret,
rsample,
xgboost,
mlrMBO,
hydroGOF,
cowplot,
ggthemes
)
# Data --------------------------------------------------------------------
load("./data/WS/ready_for_training.Rda")
load("./data/WS/cv_folds.Rda")
sapply(1:5, function(iter) analysis(cv_folds$splits[[iter]])$record_id %>% unlist() %>% length())
# observational SHAP ------------------------------------------------------
compute_ob_SHAP <- function(iter, option){
data_all <-
read_csv(paste0(
"./data/WS/model_fits/iter=",
iter,
"opt=",
option,
"_train_test.csv"
))
# load model
model <- xgboost::xgb.load(
paste0(
"./data/WS/model_fits/gof_iter=",
iter,
"opt=",
option,
".model"
)
)
# predict SHAP
SHAP <- predict(model, data_all[-1] %>% data.matrix(), predcontrib = T) %>%
as_tibble()
# join record index
train_event_index <- analysis(cv_folds$splits[[iter]])$record_id %>% unlist()
test_event_index <- assessment(cv_folds$splits[[iter]])$record_id %>% unlist()
SHAP %>%
mutate(record_id = c(train_event_index, test_event_index)) %>%
select(record_id, everything())
}
ob_SHAPs <- vector("list", 5)
for (iter in 1:5){
ob_SHAPs[[iter]] <- compute_ob_SHAP(iter, option = 1) %>%
mutate(iter = iter)
}
save(ob_SHAPs, file = "./data/WS/model_fits/ob_SHAPs.Rda")
| /WS_compute_SHAP.R | permissive | stsfk/ExplainableML_SuDS | R | false | false | 1,497 | r | if (!require("pacman")) {
install.packages("pacman")
}
pacman::p_load(
tidyverse,
lubridate,
zeallot,
RcppRoll,
caret,
rsample,
xgboost,
mlrMBO,
hydroGOF,
cowplot,
ggthemes
)
# Data --------------------------------------------------------------------
load("./data/WS/ready_for_training.Rda")
load("./data/WS/cv_folds.Rda")
sapply(1:5, function(iter) analysis(cv_folds$splits[[iter]])$record_id %>% unlist() %>% length())
# observational SHAP ------------------------------------------------------
compute_ob_SHAP <- function(iter, option){
data_all <-
read_csv(paste0(
"./data/WS/model_fits/iter=",
iter,
"opt=",
option,
"_train_test.csv"
))
# load model
model <- xgboost::xgb.load(
paste0(
"./data/WS/model_fits/gof_iter=",
iter,
"opt=",
option,
".model"
)
)
# predict SHAP
SHAP <- predict(model, data_all[-1] %>% data.matrix(), predcontrib = T) %>%
as_tibble()
# join record index
train_event_index <- analysis(cv_folds$splits[[iter]])$record_id %>% unlist()
test_event_index <- assessment(cv_folds$splits[[iter]])$record_id %>% unlist()
SHAP %>%
mutate(record_id = c(train_event_index, test_event_index)) %>%
select(record_id, everything())
}
ob_SHAPs <- vector("list", 5)
for (iter in 1:5){
ob_SHAPs[[iter]] <- compute_ob_SHAP(iter, option = 1) %>%
mutate(iter = iter)
}
save(ob_SHAPs, file = "./data/WS/model_fits/ob_SHAPs.Rda")
|
#### 第一章 多项式回归的实现 过拟合现象 ####
polyfit<-function(y,x,maxdeg){
pwrs<-powers(x,maxdeg)#生成不同阶数的x
lmout<-list()
class(lmout)<-"polyreg"#创建一个新类
for(i in 1:maxdeg){
lmo<-lm(y~pwrs[,1:i])
lmo$fitted.cvvalues<-lvoneout(y,pwrs[,1:i,drop=FALSE])
lmout[[i]]<-lmo
}
lmout$x<-x
lmout$y<-y
return(lmout)
}
print.polyreg<-function(fits){
maxdeg<-length(fits)-2#计算拟合的模型数
n<-length(fits$y)
tbl<-matrix(nrow = maxdeg,ncol=2)
cat("mean squared prediction errors,by degree\n")
colnames(tbl)<-c("MSPE","TRAIN")
for(i in 1:maxdeg){
fi<-fits[[i]]
errs<-fits$y-fi$fitted.cvvalues
spe<-sum(errs^2)
tbl[i,1]<-spe/n
tbl[i,2]<-sum(fi$residuals^2)/n
}
print(tbl)
return(tbl)
}
plot.polyreg<-function(fits){
plot(fits$x,fits$y,xlab="X",ylab="Y")
maxdg<-length(fits)-2
cols<-c("red","green","blue")
dg<-curvecount<-1
while(dg<maxdg){
prompt<-paste("RETURN for CV fit for degree",dg,"or type degree",
"or q for quit:")
rl<-readline(prompt)
dg<-if(rl=="") dg else if(rl!="q") as.integer(rl) else break
lines(fits$x,fits[[dg]]$fitted.values,col=cols[curvecount%%3+1])
dg<-dg+1
curvecount<-curvecount+1
}
}
powers<-function(x,dg){
pw<-matrix(x,nrow = length(x))
prod<-x
for(i in 2:dg){
prod<-prod*x
pw<-cbind(pw,prod)
}
return(pw)
}
lvoneout<-function(y,xmat){
n<-length(y)
predy<-vector(length = n)
for(i in 1:n){
lmo<-lm(y[-i]~xmat[-i,])
betahat<-as.vector(lmo$coef)
predy[i]<-betahat%*%c(1,xmat[i,])#交叉验证中y的预测值
}
return(predy)
}
#### 例1.1多项式模拟 ####
n<-60
x<-(1:n)/n
y<-vector(length = n)
for(i in 1:n) y[i]<-sin((3*pi/2)*x[i])+x[i]^2+rnorm(1,0,.5)
dg<-12
lmo<-polyfit(y,x,dg);lmo
error<-print(lmo)
plot(lmo)
#### 绘制MSPE/TRAIN图 ####
plot(error[,"TRAIN"],ylab="error",xlab="complex",
ylim=c(min(error),max(error)),col="blue",type="b")
points(error[,"MSPE"],type="b",col="red")
abline(v=4,lty=3)
text(locator(),"MSPE",col="red")
text(locator(),"TRAIN",col="blue")
#### 第二章 感知机 ####
#### 线性可分感知机原始形式的实现 ####
#### 兼容线性不可分的情形,通过设置容忍度endure>0即可 ####
#### 学习函数linePercept() ####
linePercept<-function(cls="y",atr=c("x1","x2"),data=NULL,aita=1,
endure=0,maxiter=1000,w0=rep(0,length(atr)),b0=0){
datause<-data;datause$xb<-1#建立扩充后的数据框,主要是为了将b归入扩充权重向量
wmat<-matrix(c(w0,b0),nrow=length(atr)+1,ncol=1)#先用矩阵按列储存初始扩充权重
iterk<-0
misssample<-vector()
while(iterk>=0){
sign_mat<-as.matrix(datause[,c(atr,"xb"),drop=F])%*%wmat[,iterk+1,drop=F]%*%
t(as.matrix(datause[,cls,drop=F]))#计算时注意将data.frame转换为matrix
sign_vec<-diag(sign_mat)
minlab<-which.min(sign_vec)#误分情况最严重的点
if(endure==0){
if(sign_vec[minlab]>endure){
cat("The Final sign_min is : ",sign_vec[minlab],"\n")
break
}
} else if(endure>0){#abs(sign_vec[minlab])表示最大误差距离
if(all(w0==0)&&b0==0) stop("w0 and b0 must not all be 0 when endure>0.")
if(all(sign_vec>0)) break
if(abs(sign_vec[minlab])<endure){
cat("The Final sign_min is : ",abs(sign_vec[minlab]),"\n")
break
}
} else stop("The endure must not be smaller than 0. ")
if(iterk>maxiter) break #当迭代次数大于maxiter时停止
wchange<-wmat[,iterk+1,drop=F]+
aita*datause[,cls][minlab]*t(as.matrix(datause[minlab,c(atr,"xb"),drop=F]))
wmat<-cbind(wmat,wchange)
misssample[iterk+1]<-minlab
iterk<-iterk+1
}
rownames(wmat)<-c(atr,"b");colnames(wmat)<-paste0("iter",0:iterk)
Percept<-list(Finalweight=t(wmat[,ncol(wmat)]),weight=wmat,
iteration=iterk,miss=misssample,origindata=data,
atrdata=data[,atr,drop=F],clsdata=data[,cls,drop=F],
endure=endure,aita=aita,w0=w0,b0=b0)
class(Percept)<-"linePercept"
return(Percept)
}
#### 绘图函数 ####
plot.linePercept<-function(obj){#只对二维数据有用
plot(obj$atrdata[,1],obj$atrdata[,2],
xlim=c(min(obj$atrdata[,1])-+abs(max(obj$atrdata[,1]))/3,
max(obj$atrdata[,1])+abs(max(obj$atrdata[,1]))/3),
ylim=c(min(obj$atrdata[,2])-abs(max(obj$atrdata[,2]))/3,
max(obj$atrdata[,2])+abs(max(obj$atrdata[,2]))/3),
col=2*abs(obj$clsdata[,1])+obj$clsdata[,1],pch=19,
xlab=colnames(obj$atrdata)[1],ylab=colnames(obj$atrdata)[2])
abline(b=-obj$Finalweight[1,1]/obj$Finalweight[1,2],
a=-obj$Finalweight[1,3]/obj$Finalweight[1,2],
col="red",lwd=1.25)
text(obj$atrdata[,1],obj$atrdata[,2],obj$clsdata[,1])
}
#### 打印函数 ####
print.linePercept<-function(obj){
print.default(obj[c(1,3)])
}
#### 预测函数 ####
#### preClinePercept()函数只能预测一个实例 ####
preClinePercept<-function(lPobj,cls="y",atr=c("x1","x2"),atr_value=c(0,1)){
latr<-length(atr)#特征个数
levelcls<-unique(lPobj$clsdata[,1])
numcls<-as.numeric(levelcls)
# chrcls<-as.character(levelcls)
atrmat<-matrix(c(atr_value,1),nrow=latr+1,ncol=1)
sgn<-ifelse(sign(lPobj$Finalweight%*%atrmat)>0,max(numcls),min(numcls))
return(as.vector(sgn))
}
#### predic.linePercept()函数可以一次预测多个点 ####
## 如果是二维特征,会绘制对应的预测图 ####
predict.linePercept<-function(lPobj,cls="y",atr=c("x1","x2"),atr_value=NULL){
predvalue<-apply(atr_value,1,preClinePercept,lPobj=lPobj,atr=atr,cls=cls)
out_pre<-atr_value
out_pre[,cls]<-predvalue
if(length(atr)==2){
plot(lPobj);points(out_pre[,atr[1]],out_pre[,atr[2]],pch=23,col="red",cex=2.5,lwd=2)
text(out_pre[,atr[1]],out_pre[,atr[2]],predvalue,col="red")
}
return(out_pre)
}
#### 例2.1 线性可分,默认w0=0,b0=0,endure=0,aita=1 aita为学习率 ####
## 学习 ##
percept<-linePercept(data=dataB2.1,cls="y",atr=c("x1","x2"))
is(percept$Finalweight)
plot(percept)
## 预测 ##
data_atr<-data.frame(x1=c(0,2,1,3),x2=c(1,1,3,2))
predict(percept,cls = "y",atr = c("x1","x2"),atr_value = data_atr)
#### 线性感知机对偶算法的实现 ####
#### DualPercept()函数只能用于线性可分集 ####
DualPercept<-function(cls="y",atr=c("x1","x2"),data=NULL,aita=1,
maxiter=1000,alpha0=rep(0,nrow(data)),b0=0){
datause<-as.matrix(data)#转换成矩阵,方便运算
sample_num<-nrow(datause)#样本个数
clsdata<-datause[,cls,drop=F];atrdata<-datause[,atr,drop=F]
Gram<-atrdata%*%t(atrdata)#先计算Gram矩阵
alphaMat<-matrix(c(alpha0,b0),nrow=sample_num+1,ncol=1)#先建立参数扩充矩阵
iterk<-0
misssample<-vector()
while(iterk>=0){
alpha_vec<-alphaMat[1:sample_num,iterk+1]#vector
b<-alphaMat[sample_num+1,iterk+1]#一个数
alpha_cls<-matrix(alpha_vec*clsdata[,1],nrow = sample_num,ncol=1)
signMat<-(Gram%*%alpha_cls+b)%*%t(clsdata)#计算判断矩阵
sign_vec<-diag(signMat)#得到不同点的计算结果
minlab<-which.min(sign_vec)#挑出误分最严重的点
if(sign_vec[minlab]>0) break
alphaChange<-alpha_vec
alphaChange[minlab]<-alphaChange[minlab]+aita
bChange<-b+aita*clsdata[,1][minlab]
AllChange<-matrix(c(alphaChange,bChange),sample_num+1,1)
alphaMat<-cbind(alphaMat,AllChange)
misssample[iterk+1]<-minlab
iterk<-iterk+1
}
rownames(alphaMat)<-c(paste0("alpha",1:sample_num),"b")
colnames(alphaMat)<-paste0("iter",0:iterk)
Finalalpha<-t(alphaMat[,ncol(alphaMat)])#vector
Finalweight<-rep(0,length(atr))
for(i in 1:sample_num){#计算weight
weight<-clsdata[,1][i]*Finalalpha[i]*atrdata[i,]
Finalweight<-Finalweight+weight
}
Finalweight<-c(Finalweight,Finalalpha[sample_num+1])
Finalweight<-matrix(Finalweight,nrow=1);colnames(Finalweight)<-c(atr,"b")
PerceptDual<-list(Finalweight=Finalweight,Finalalpha=Finalalpha,
iteration=iterk,Alpha=alphaMat,miss=misssample,
atrdata=atrdata,clsdata=clsdata,aita=aita,
alpha0=alpha0,b0=b0)
class(PerceptDual)<-c("DualPercept","linePercept")
return(PerceptDual)
}
### 例2.2 的R实现 ###
### 使用了S3类的继承性质 ###
perpectdual<-DualPercept(cls="y",atr = c("x1","x2"),data=dataB2.1)
plot(perpectdual)
perpectdual
class(perpectdual)
names(perpectdual)
perpectdual[1:5]
predict(perpectdual,cls="y",atr = c("x1","x2"),atr_value = data_atr)
#### 第三章 K近邻 ####
#### 2维平衡kd树的R实现 ####
### kd_tie()函数用于计算二维kd树的不同次迭代的所有节点 ###
### tielist值指节点,奇数的list对应x1(第一维)的结点,偶数的list对应x2的结点,list的顺序对应迭代的顺序###
### x_1,x_2的值指各次迭代中对应的子样本集,x_1,x_2一一对应 ###
kd_tie<-function(x1,x2){
x_1<-list(list(x1));x_2<-list(list(x2));l<-length(x1)
timelab<-1;tielist<-list()
while(timelab<=l){
if(!timelab%%2==0){#奇数为第一维的数据及结点,偶数对应的是第二维的数据及结点
x<-x_1[[timelab]]#获取一个list
} else {x<-x_2[[timelab]]}
tie<-sapply(x,median)
tie<-round(tie)
tielist[[timelab]]<-tie
x_1_new<-list()
x_2_new<-list()
lstx<-length(x)
for(j in 1:lstx){
xj<-x[[j]]
x_left<-which(xj<round(median(xj)))
x_right<-which(xj>round(median(xj)))
x_1_new[[2*j-1]]<-x_1[[timelab]][[j]][x_left]
x_1_new[[2*j]]<-x_1[[timelab]][[j]][x_right]
x_2_new[[2*j-1]]<-x_2[[timelab]][[j]][x_left]
x_2_new[[2*j]]<-x_2[[timelab]][[j]][x_right]
}
x_1[[timelab+1]]<-x_1_new
x_2[[timelab+1]]<-x_2_new
lbreak<-sapply(x_1_new,length)
if(any(lbreak<=1)){
end_timetab<-timelab+1
if(!end_timetab%%2==0){
end_tie<-sapply(x_1_new,median)
tielist[[end_timetab]]<-round(end_tie)
} else {
end_tie<-sapply(x_2_new,median)
tielist[[end_timetab]]<-round(end_tie)
}
break
}
timelab<-timelab+1
}
list(tielist=tielist,x_1=x_1,x_2=x_2)
}
x1<-c(2,4,5,7,8,9,12,13,11,1,3,14);x2<-c(3,7,4,2,1,6,9,5,12,11,13,15)
x1<-c(2,4,5,7,8,9);x2<-c(3,7,4,2,1,6)
kd_tie(x1,x2)
kd_plot<-function(x1,x2){
m1<-max(x1);m2<-max(x2);s1<-min(x1);s2<-min(x2);l<-length(x1)
kd2_out<-kd_tie(x1=x1,x2=x2)
tie_kd2<-kd2_out$tielist#提取包含结点信息的list
ltie<-length(tie_kd2)
plot(x1,x2,xlim = c(s1,m1),ylim = c(s2,m2),type = "n",xlab ="x(1)",ylab="x(2)",
main="Balance kd2 tree plot")
points(x1,x2,pch=19)
xkd<-tie_kd2[[1]][1]
abline(v=xkd,col="red",lty=3)
for(i in 2:ltie){
plt<-tie_kd2[[i]]
lplt<-length(plt)
lsep<-seq(1,lplt,by=2)
if(i%%2==0){
for(j in lsep){
lines(c(s1-1,xkd[(j+1)/2]),c(plt[j],plt[j]),col="red",lty=3)
lines(c(xkd[(j+1)/2],m1+1),c(plt[j+1],plt[j+1]),col="red",lty=3)
}
}else{
for(j in lsep){
lines(c(plt[j],plt[j]),c(s2-1,xkd[(j+1)/2]),col="red",lty=3)
lines(c(plt[j+1],plt[j+1]),c(xkd[(j+1)/2],m2+1),col="red",lty=3)
}
}
xkd<-tie_kd2[[i]]
}
return(tiekd2=kd2_out)
}
x1<-c(-2,4,5,7,8,9,-5);x2<-c(3,7,-4,2,1,6,8)
kd_plot(x1=x1,x2=x2)
#### Lp范数递减性模拟 ####
LpSim.Plot<-function(number,maxp=1){
if(any(number<0)) stop("The number must not smaller than 0.")
max_num<-max(number)
LpVec<-vector(length = maxp)
for(i in 1:maxp) LpVec[i]<-(sum(number^i))^(1/i)
tye<-ifelse(maxp<=20,"b","l")
cols<-ifelse(maxp<=20,"blue","red")
plot(1:maxp,LpVec,type = tye,ylab="LpValue",xlab="p",col=cols,
main="Simulate Plot of Lp")
list(maxnumber=max_num,minLp=min(LpVec),LpValue=LpVec)
}
LpSim.Plot(number = 1:5,maxp=15)
LpSim.Plot(number = 1:5,maxp=150)
LpSim.Plot(number = sample(1:50,10),maxp=15)
#### 基于线性扫描的KNN实现 lineKnn()####
#计算一个实例与所有训练样本的Lp距离,data仅含特征信息
LpCalculate<-function(dataTest,atr=c("x1","x2","x3"),dataTrain=NULL,p=2){
datause<-as.matrix(dataTrain);n<-nrow(datause)
LpVec<-vector(length = n)
for(i in 1:n) LpVec[i]<-(sum(abs(dataTest-datause[i,atr])^p))^(1/p)
return(LpVec)
}
lineKnn<-function(cls="y",atr=c("x1","x2","x3"),dataTrain=NULL,
dataTest=NULL,k=3,p=2){
data_use<-dataTrain#注意数据框中有字符时所有被转成字符
atrdata<-data_use[,atr,drop=F];clsdata<-data_use[,cls]#vector
dataTest<-dataTest[,atr]
LpMat<-t(apply(dataTest,1,LpCalculate,atr=atr,dataTrain=atrdata,p=p))
options(warn=-1)
library(dprep)
if(k==1){
clsMatk<-apply(LpMat,1,function(x) clsdata[order(x)[1:k]])
kPredict<-clsMatk#vector
} else{
clsMatk<-t(apply(LpMat,1,function(x) clsdata[order(x)[1:k]]))#只需要知道最近k个的类别
kPredict<-apply(clsMatk,1,function(x) sample(moda(x),1))
}#moda适用于字符向量
detach("package:dprep")
outPredict<-dataTest;outPredict[,cls]<-kPredict
df<-list(FinalPredict=kPredict,PredictMat=outPredict,clsMatk=clsMatk,
LpMat=LpMat,dataTrain=dataTrain,dataTest=dataTest,atr=atr,cls=cls,k=k,p=p)
class(df)<-"lineKnn"
return(df)
}
print.lineKnn<-function(Knnobj){
print(Knnobj[1])
}
plot.lineKnn<-function(Knnobj){
Train<-Knnobj$dataTrain
Test<-Knnobj$dataTest
atr<-Knnobj$atr;cls<-Knnobj$cls
latr<-length(atr)
if(latr==2){
plot(Train[,atr[1]],Train[,atr[2]],xlab=atr[1],ylab=atr[2],
col=as.numeric(as.factor(Train[,cls])),pch=abs(as.numeric(as.factor(Train[,cls]))),
main="Predict Plot of Knn")
points(Test[,atr[1]],Test[,atr[2]],col="blue",
pch=abs(as.numeric(as.factor(Knnobj$FinalPredict))),cex=2)
}
}
#### 示例iris数据集 ####
lab<-sample(1:150,130)
iris[,1:2]
dataKnn_iris<-iris[lab,]
dataKnn_iris_test<-iris[-lab,]
dataKnn_iris_atr<-iris[-lab,-5]
Knn_iris<-lineKnn(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
dataTrain = dataKnn_iris,dataTest = dataKnn_iris_atr,k=3,p=10)
names(Knn_iris)
sapply(Knn_iris,is)
cbind(as.character(dataKnn_iris_test[,5]),as.character(Knn_iris$FinalPredict))
Knn_iris$PredictMat
Knn_iris$LpMat
Knn_iris$clsMatk
#### 第四章 朴素贝叶斯法的实现 ####
#### navieBayes() 基于极大似然估计及贝叶斯估计的朴素贝叶斯法实现(离散特征情形)####
navieBayes<-function(cls="Y",atr=c("X1","X2"),data=NULL,lmada=0){
if(!is.data.frame(data)) stop("Please enter a data.frame.")
if(lmada<0) stop("lmada must be greater than or equal to ZERO.")
d<-as.data.frame(apply(data,2,as.factor))
n<-nrow(d)
prodvar_lst<-list()#用来装计算出来的概率
prec_var<-d[cls][,1];levelprec<-levels(prec_var);lprec<-length(levelprec)
prec_p<-data.frame(level=levelprec,prob=NA)
for(i in 1:lprec){
prec_p[i,2]<-(sum(prec_var==levelprec[i])+lmada)/(n+lprec*lmada)#类Y的先验概率
}
prodvar_lst[[cls]]<-prec_p
lvar=length(atr)#特征个数
for(i in 1:lvar){#特征的条件先验概率
xvar<-d[atr[i]][,1]
txy<-table(xvar,prec_var)+lmada
ptxy<-prop.table(txy,2)
prodvar_lst[[atr[i]]]<-ptxy
}
prodvar_lst$lmada<-lmada
prodvar_lst$response<-cls
prodvar_lst$variables<-atr
class(prodvar_lst)<-"navieBayes" #指定输出对象的类为"navieBayes",以便编写S3类泛函
return(prodvar_lst)
}
navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 1)
#### 编写打印函数:print.navieBayes() ####
print.navieBayes<-function(obj){
cat("response = prec_var: ",obj$response,";","lmada = ",obj$lmada,"\n","\n")
cat("The variables are : ",obj$variables,"\n","\n")
lobj<-length(c(obj$response,obj$variables))
print.default(obj[1:lobj])
}
#### 编写预测函数: predict.navieBayes ####
#### preCnavieBayes() 只能进行一个实例的预测 ####
preCnavieBayes<-function(NBobj,cls=NULL,atr=NULL,atr_value=NULL){
level<-NBobj[[cls]][,1];ncls<-length(level)
latr<-length(NBobj)-4#特征的个数
start_atr<-2
end_atr<-latr+1
predict_df<-data.frame(matrix(NA,ncls,latr+2))#先建立一个数据框储存结果
colnames(predict_df)<-c(atr,"level","post_p")
for(l in 1:latr){
predict_df[1:ncls,l]<-atr_value[l]
}
predict_df$level<-level
for(i in 1:ncls){
xvec<-NULL
for(j in start_atr:end_atr){
xwhich<-which(rownames(NBobj[[atr[j-1]]])==as.character(atr_value[j-1]))
ywhich<-which(colnames(NBobj[[atr[j-1]]])==as.character(predict_df$level[i]))
px<-NBobj[[atr[j-1]]][xwhich,ywhich]
xvec<-c(xvec,px)
}
ypre<-NBobj[[1]][,2][i]
predict_df[i,4]<-ypre*prod(xvec)
}
return(predict_df)
}
#### 泛函predict.navieBayes()针对类“navieBayes”,可一次进行多个样本实例的预测 ####
predict.navieBayes<-function(NBobj,cls=NULL,atr=NULL,atr_value=NULL){
if(!is.data.frame(atr_value)) stop("atr_value must be a data.frame!")
post_lst<-apply(atr_value,1,preCnavieBayes,NBobj=NBobj,atr=atr,cls=cls)
lst<-length(post_lst)
post_df<-NULL
for(i in 1:lst){
prc_df<-post_lst[[i]]
post_df<-rbind(post_df,prc_df)
}
cat("The response : ",cls,"\n")
return(post_df)
}
#### 例4.1 ####
X1<-c(1,1,1,1,1,2,2,2,2,2,3,3,3,3,3)
X2<-c("S","M","M","S","S","S","M","M","L","L","L","M","M","L","L")
Y<-c(-1,-1,1,1,-1,-1,-1,1,1,1,1,1,1,1,-1)
dataB4.1<-data.frame(X1=X1,X2=X2,Y=Y)
## lmada=0 极大似然估计 ##
plist<-navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 0)
pred_var<-data.frame(X1=c(2,1,1,3,3),X2=c("S","L","S","M","L"))
predict(plist,cls="Y",atr=c("X1","X2"),atr_value =pred_var)
## 例4.2 lmada=1 贝叶斯估计 拉普拉斯平滑 ##
plist1<-navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 1)
pred_var<-data.frame(X1=c(2,1),X2=c("S","L"))
predict(plist1,cls="Y",atr=c("X1","X2"),atr_value =pred_var)
## lmada=3 ###
plist3<-navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 3)
pred_var<-data.frame(X1=c(2,1),X2=c("S","L"))
predict(plist3,cls="Y",atr=c("X1","X2"),atr_value =pred_var)
print.default(plist3)
plist
class(plist)
names(plist)
plist$lmada
plist$variables
sapply(plist,class)
?str
1/15
1/45
#### 第五章 决策树 ####
#### 0-1分布的H(p)曲线 ####
p<-pretty(c(0.01,0.99),100)
HpVec<-vector(length = length(p))
for(i in 1:length(p)) HpVec[i]<--p[i]*log(p[i],2)-(1-p[i])*log(1-p[i],2)
plot(p,HpVec,type="l",col="red");abline(v=0.5,lty=3)
#### 编写函数计算信息增益及信息增益比 InfoGain() ####
InfoGain<-function(cls=NULL,atr=NULL,method=c("info","inforate"),data=NULL){
HDfunc<-function(atrcls){#atrcls为向量
l<-length(atrcls)
tatrcls<-table(atrcls)
atrclspvec<-as.vector(tatrcls)/l
logatrclspvec<-ifelse(atrclspvec==0,0,log(atrclspvec,2))
HD<--as.vector(atrclspvec%*%logatrclspvec)
return(HD)
}
HDcls<-HDfunc(atrcls = data[,cls])
HDatr<-apply(data[,atr],2,HDfunc)
HatrVec<-apply(data[,atr],2,Hatr,clsvec=data[,cls])
if(method=="info"){
infogain<-HDcls-HatrVec
} else if(method=="inforate"){
infogain<-(HDcls-HatrVec)/HDatr
} else stop("Please choose a useable method.")
names(infogain)<-atr
list(infogain=infogain,HDcls=HDcls,HatrVec=HatrVec,HDatr=HDatr)
}
Hatr<-function(atrvec=NULL,clsvec=NULL){#输入为特征向量及类别向量,计算经验条件熵
n<-length(atrvec)
tatr<-table(atrvec)
atrpvec<-as.vector(tatr)/n
txy<-table(atrvec,clsvec)
ptxy<-prop.table(txy,1)
logptxy<-ifelse(ptxy==0,0,log(ptxy,2))
loctab<-ptxy*logptxy#对应位置元素相乘,table
atr_clspvec<-apply(loctab,1,sum)#vector
hatr<--as.vector(atrpvec%*%atr_clspvec)
return(hatr)
}
#### 例5.2的R实现 ####
A1<-rep(c("青年","中年","老年"),each=5)
A2<-c("否","否","是","是","否","否","否","是","否","否","否","否","是","是","否")
A3<-c("否","否","否","是","否","否","否","是","是","是","是","是","否","否","否")
A4<-c("一般","好","好","一般","一般","一般","好","好","非常好","非常好","非常好","好",
"好","非常好","一般")
Y<-c("否","否","是","是","否","否","否","是","是","是","是","是","是","是","否")
dataB5.1<-data.frame(A1,A2,A3,A4,Y);dataB5.1
Hatr(atrvec = dataB5.1$A1,clsvec = dataB5.1$Y)
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="info",data=dataB5.1)
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="inforate",data=dataB5.1)
#### 改变后的样本数据计算信息增益 ####
A1<-rep(c("少年","青年","中年","老年","晚年"),each=3)#改为5类
A2<-c("否","否","是","是","否","否","否","是","否","否","否","否","是","是","否")
A3<-c("否","否","否","是","否","否","否","是","是","是","是","是","否","否","否")
A4<-c("坏","好","好","坏","一般","一般","好","好","非常好","非常好","非常好","好",
"好","极好","一般")#改为5类
Y<-c("否","否","是","是","否","否","否","是","是","是","是","是","是","是","否")
dataB5.1<-data.frame(A1,A2,A3,A4,Y);dataB5.1
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="info",data=dataB5.1)
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="inforate",data=dataB5.1)
#### 例5.3的R实现 ####
library(dprep)
subTree<-function(cls="Y",atr=c("A1","A2","A3","A4"),method=c("info","inforate"),
data=NULL,ept=0.1){
atrcl<-atr;clscl<-cls;datacl<-data
clsclvalue<-unique(datacl[,clscl])
infoCalcul<-InfoGain(cls=clscl,atr=atrcl,data=datacl,method = method)#首次迭代结果
subtree<-list()
if(length(clsclvalue)==1){
subtree[["origindata"]]<-datacl
subtree[["single"]]<-clsclvalue
subtree[["infoatr"]]<-"None"
return(subtree)
} else if(length(atrcl)==0||max(infoCalcul$infogain)<ept){
lab<-moda(datacl[,clscl])
subtree[["origindata"]]<-datacl
if(length(lab)==1) subtree[["single"]]<-lab
if(length(lab)>=2) subtree[["single"]]<-sample(lab,1)
subtree[["infoatr"]]<-"None"
return(subtree)
}
atrlab<-which.max(infoCalcul$infogain);
atrchs<-datacl[,atrcl[atrlab]]#挑选信息增益最大的特征
unqatrchs<-unique(atrchs);lunq<-length(unqatrchs)
for(i in 1:lunq){
subtree[[i]]<-datacl[which(atrchs==unqatrchs[i]),-atrlab]#每一个组件都是data.frame
}
names(subtree)<-paste0(atrcl[atrlab],"=",unqatrchs)
subtree[["newatr"]]<-atrcl[-atrlab]
subtree[["infoatr"]]<-atrcl[atrlab]
return(subtree)
}
#### 例5.3求解 ID3 信息增益 ####
stree<-subTree(cls="Y",atr = c("A1","A2","A3","A4"),method="info",data=dataB5.2);stree
stree2<-lapply(stree[1:2],subTree,cls="Y",atr=c("A1","A2","A4"),method="info")
#### 例5.3求解 C4.5 信息增益比 ####
stree<-subTree(cls="Y",atr = c("A1","A2","A3","A4"),method="inforate",data=dataB5.2)
stree2<-lapply(stree[1:2],subTree,cls="Y",atr=c("A1","A2","A4"),method="inforate")
#### 决策树剪枝cutTree() ####
Extree<-function(obj){#用于提取叶节点的子集,obj必须是两层的list
lobj<-length(obj)
lvec<-sapply(obj,length)-2
newlst<-list()
st<-0
for(i in 1:lobj){
for(j in 1:lvec[i]){
newlst[[st+j]]<-obj[[i]][[j]]
}
st<-st+lvec[i]
}
return(newlst)
}
#### 编写函数计算经验熵 ####
HDfunc<-function(atrcls){#atrcls为向量
l<-length(atrcls)
tatrcls<-table(atrcls)
atrclspvec<-as.vector(tatrcls)/l
logatrclspvec<-ifelse(atrclspvec==0,0,log(atrclspvec,2))
HD<--as.vector(atrclspvec%*%logatrclspvec)
return(HD)
}
#### 计算损失函数 ####
cutTree<-function(cls="Y",data=NULL,alpha=1){#data为Extree()的输出结果
ldata<-length(data)
clslst<-list()
for(i in 1:ldata){
clslst[[i]]<-data[[i]][,cls]
}
hdvec<-sapply(clslst,HDfunc)#每个叶节点的经验熵,vector
ldvec<-sapply(clslst,length)#每个叶结点的样本量,vector
Cfunc<-hdvec%*%ldvec+alpha*ldata
return(Cfunc)
}
stree1<-subTree(cls="Y",atr = c("A1","A2","A3","A4"),method="info",data=dataB5.2)
stree2<-lapply(stree1[1:2],subTree,cls="Y",atr=c("A1","A2","A4"),method="info")
le1<-Extree(list(stree1))
le2<-Extree(stree2)
alp=1
cutTree(cls="Y",data=le1,alpha=alp)
cutTree(cls="Y",data=le2,alpha=alp)
#### 模拟alpha的变动对剪枝的影响 ####
ysimple<-vector(length = 20);ycomplex<-vector(length = 20)
for(i in 1:20){
ysimple[i]<-as.vector(cutTree(cls="Y",data=le1,alpha=i))
ycomplex[i]<-as.vector(cutTree(cls="Y",data=le2,alpha=i))
}
plot(1:20,ycomplex,type="b",col="red",xlab="alpha",ylab = "loss",
main="Loss Plot of Alpha")
points(1:20,ysimple,type="b",col="blue")
text(locator(),"SimpleTree",col="blue")
text(locator(),"ComplexTree",col="red")
#### Gini(p)与1/2H(p)关系模拟 ####
#p是一个概率向量,及满足所有元素的和为1
HpSim<-function(p){#熵
lp<-ifelse(p==0,0,log(p,2))
hp<--p%*%lp
return(hp)
}
GpSim<-function(p){#基尼系数
p2<-1-p
gp<-p%*%p2
return(gp)
}
#随机生成概率分布p#
pCreate<-function(l=10,chs=1000){
num<-sample(0:chs,l)
p<-num/sum(num)
return(p)
}
plist<-list()
lst<-vector(length = 100)
chlst<-vector(length = 100)
for(i in 1:100){
l<-sample(2:20,1)
chs<-sample(30:1000,1)
lst[i]<-l
chlst[i]<-chs
plist[[i]]<-pCreate(l=l,chs=chs)
}
all(sapply(plist,sum)==1)#检查是否和都为1
hpvec<-sapply(plist,HpSim)
gpvec<-sapply(plist,GpSim)
dataHG<-data.frame(K=lst,halfHp=hpvec/2,
Gp=gpvec,Hp=hpvec)
datahg<-dataHG[order(dataHG[,1]),]
plot(datahg$K,datahg$Hp,type="b",col="black",xlab = "K",
ylab="uncertainty",main="The Plot of Hp and Gp")
points(datahg$K,datahg$halfHp,type = "b",col="red")
points(datahg$K,datahg$Gp,type = "b",col="blue")
text(locator(),"Gp",col="blue")
text(locator(),"halfHp",col="red")
text(locator(),"Hp",col="black")
#### 图5.7的R实现 ####
p<-pretty(c(0.01,0.99),100)
HpVec<-vector(length = length(p))
GpVec<-vector(length = length(p))
for(i in 1:length(p)){
HpVec[i]<--p[i]*log(p[i],2)-(1-p[i])*log(1-p[i],2)
GpVec[i]<-2*p[i]*(1-p[i])
}
error<-ifelse(p<.5,p,1-p)
plot(p,HpVec/2,type="l",col="red",xlab="p",ylab="value")
lines(p,GpVec,type="l",col="blue")
lines(p,error,type = "l",col="black")
abline(v=0.5,lty=3)
text(locator(),"Gp",col="blue")
text(locator(),"halfHp",col="red")
text(locator(),"error",col="black")
#### 基尼系数的R实现、例5.4的程序求解 ####
GpSim<-function(p){#基尼系数
p2<-1-p
gp<-p%*%p2
return(gp)
}
GiniSingle<-function(atrvec=NULL,clsvec=NULL){#输入的是特征向量与属性向量
D<-length(clsvec)
txy<-table(atrvec,clsvec)
nam<-rownames(txy)
unqatr<-unique(atrvec)
lunq<-length(unqatr)
giniatr<-vector(length = lunq)
for(i in 1:lunq){
t1<-txy[i,];st1<-sum(t1)
t2<-txy[-i,,drop=F];st2<-sum(t2)
p1<-t1/st1;p2<-apply(t2,2,sum)/st2
giniatr[i]<-(st1/D)*GpSim(p1)+(st2/D)*GpSim(p2)
}
names(giniatr)<-nam
return(giniatr)
}
GiniSingle(A1,Y)
GiniSingle(A2,Y)
GiniSingle(A3,Y)
GiniSingle(A4,Y)
GiniCART<-function(cls=NULL,atr=NULL,data=NULL){
if(length(unique(data[,cls]))==1) return(list(Finalabel="None",D=data))
ginilst<-apply(data[,atr],2,GiniSingle,clsvec=data[,cls])#list
nlst<-names(ginilst)
outgini<-sapply(ginilst,function(x) rbind(which.min(x),min(x)))
nvec<-vector(length = length(atr))
for(i in 1:length(atr)){
ns<-names(ginilst[[i]])
nvec[i]<-ns[outgini[1,i]]
}
minlab<-which.min(outgini[2,])
atrlab<-outgini[1,minlab];atrchs<-names(ginilst[[minlab]])[atrlab]
lab<-which(data[,nlst[minlab]]==atrchs)
list(Finalabel=c(nlst[minlab],atrchs),FinalGini=outgini[2,minlab],
GiniMat=outgini,Ginilst=ginilst,data[lab,-minlab],data[-lab,-minlab])
}
cart1<-GiniCART(cls="Y",atr=c("A1","A2","A3","A4"),data=dataB5.2);cart1[1:4]
cart2<-lapply(cart1[5:6],GiniCART,cls="Y",atr=c("A1","A2","A4"));cart2[[1]]
da1<-cart1[[5]]
table(da1$A2,da1$Y)
#### 第六章 逻辑斯蒂回归于最大熵模型 ####
gradLogistic<-function(cls=NULL,atr=NULL,data=NULL,scale=TRUE,
w0=rep(0,length(atr)+1),aita=1,ept=1e-5,maxiter=100000){
if(!is.data.frame(data)) stop("data must be a data.frame.")
datause<-data;datause$xb<-1#扩充矩阵
atrdata<-datause[,c(atr,"xb"),drop=F];atrdata<-as.matrix(atrdata)
if(scale){#自变量数据标准化
for(i in 1:length(atr)){
atrdata[,i]<-scale(atrdata[,i])#0,1标准化
}
}
clsdata<-datause[,cls,drop=F];clsdata<-as.matrix(clsdata)
N<-nrow(datause)
MinusLog<-function(wuse,y=clsdata[,1],x=atrdata){
n<-nrow(atrdata)
MLog<-vector(length = n)
for(i in 1:n){
ep<-as.vector(wuse%*%x[i,])
epe<-exp(ep)
if(is.infinite(epe)){
MLog[i]<-ep-y[i]*ep
} else{
MLog[i]<-log(1+epe)-y[i]*ep
}
}
return(sum(MLog))
}
calpi<-function(x){
ex<-exp(w%*%x)
if(is.infinite(ex)){
px<-1
} else{
px<-ex/(1+ex)
}
return(px)
}
w<-w0#指定w0,vector
iterk<-1
while(iterk>=1){
pi<-apply(atrdata,1,calpi)#指定pi(k),vector
piMinusy<-matrix(pi-clsdata[,1],nrow = N,ncol=1)#N*1矩阵
gradf<-t(atrdata)%*%piMinusy#利用矩阵乘法,N*1矩阵
gradfvec<-gradf[,1]
#print(sqrt(sum(gradfvec^2)))
if(sqrt(sum(gradfvec^2))<=ept){
stoprule<-'sqrt(sum(gradfvec^2))<=ept'
break
}
wbefore<-w
#print(w)
w<-w-aita*gradfvec
MinusLogBtw<-MinusLog(wuse=w)-MinusLog(wuse=wbefore)
wBtw<-w-wbefore
if(abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept){
stoprule<-'abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept'
break
}
if(iterk>=maxiter){
stoprule<-'iterk>=maxiter'
break
}
iterk<-iterk+1
#print(iterk)
}
names(w)<-c(atr,"b")
outlst<-list(weight=w,minusLogkplus1=MinusLog(wuse=w),
minusLogk=MinusLog(wuse=wbefore),variable=atr,
response=cls,origindata=data,iteration=iterk,
formula=paste(cls,"~",paste(atr,collapse = "+")),
stoprule=stoprule)
class(outlst)<-"gradLogistic"
return(outlst)
}
print.gradLogistic<-function(obj){
cat("The stoprule is : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("formula : ",obj$formula,"\n")
oldlst<-options()
options(digits = 9)
print(obj[1:3])
options(oldlst)
}
predict.gradLogistic<-function(obj,atr=NULL,atr_value=NULL){
weight<-obj$weight
atr_value$b<-1
for(i in 1:length(atr)){
atr_value[,i]<-scale(atr_value[,i])
}
predone<-function(x,w){#x,w均是向量
ep1<-exp(w%*%x)
if(is.infinite(ep1)){
p1<-1-0.001
} else{
p1<-ep1/(1+ep1)
}
return(p1)
}
P1<-apply(atr_value,1,predone,w=weight)
P0<-1-P1
predvec<-ifelse(P1>=0.5,1,0)
pMatdf<-data.frame(P1=P1,P0=P0,predict=predvec)
list(FinalPredict=predvec,PredictMat=pMatdf)
}
#### 利用mtcars数据集进行测试 ####
#### 测试模型1 ####
dataB6.1<-mtcars#训练集
dataB6.1_pred<-mtcars[,c("mpg","cyl","disp","hp")]#回测自变量数据集
gradlog1<-gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"))#训练模型
predLog1<-predict(gradlog1,atr=c("mpg","cyl","disp","hp"),
atr_value = dataB6.1_pred)#模型回测
miss<-data.frame(newG=predLog1$FinalPredict,G=mtcars$am)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率
dataB6.1_pred2<-dataB6.1[,c("mpg","cyl","disp","hp","drat","wt","qsec")]
gradlog2<-gradLogistic(data=dataB6.1,cls="vs",
atr=c("mpg","cyl","disp","hp","drat","wt","qsec"))
predLog2<-predict(gradlog2,atr=c("mpg","cyl","disp","hp","drat","wt","qsec"),
atr_value = dataB6.1_pred2)
miss2<-data.frame(newG=predLog2$FinalPredict,G=mtcars$vs)
tbl2<-table(miss2$newG,miss2$G);tbl2
sum(diag(tbl2))/sum(tbl2)#正确率
dataB6.1_pred3<-dataB6.1[,c("mpg","cyl","drat","wt")]
gradlog3<-gradLogistic(data=dataB6.1,cls="vs",
atr=c("mpg","cyl","drat","wt"))
predLog3<-predict(gradlog3,atr=c("mpg","cyl","drat","wt"),
atr_value = dataB6.1_pred3)
miss3<-data.frame(newG=predLog3$FinalPredict,G=mtcars$vs)
tbl3<-table(miss3$newG,miss3$G);tbl3
sum(diag(tbl3))/sum(tbl3)#正确率
#### 迭代效果模拟:迭代50/100/1000/1万/10万/100万次 ####
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 50)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 100)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 1000)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 10000)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 100000)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 1000000)
#### DFP算法的实现 ####
DFPLogistic<-function(cls=NULL,atr=NULL,data=NULL,scale=TRUE,ept=1e-5,
G0=diag(rep(1,length(atr)+1)),MoreStopRule=FALSE,
w0=rep(0,length(atr)+1),aita=.1,maxiter=100000,
SearchAita=F,maxsearch=1000){
if(!is.data.frame(data)) stop("data must be a data.frame.")
datause<-data;datause$xb<-1#扩充矩阵
atrdata<-datause[,c(atr,"xb"),drop=F];atrdata<-as.matrix(atrdata)
if(scale){#自变量数据标准化
for(i in 1:length(atr)){
atrdata[,i]<-scale(atrdata[,i])#0,1标准化
}
}
clsdata<-datause[,cls,drop=F];clsdata<-as.matrix(clsdata)
N<-nrow(datause)
MinusLog<-function(wuse,y=clsdata[,1],x=atrdata){#计算g(w)
n<-nrow(atrdata)
MLog<-vector(length = n)
for(i in 1:n){
ep<-as.vector(wuse%*%x[i,])
epe<-exp(ep)
if(is.infinite(epe)){
MLog[i]<-ep-y[i]*ep
} else{
MLog[i]<-log(1+epe)-y[i]*ep
}
}
return(sum(MLog))
}
calpi<-function(x,wx){#计算pi
oldex<-wx%*%x
ex<-exp(oldex)
if(is.infinite(ex)){
px<-1+sample(c(-ept*10/3,-ept*6,-ept*5/4,-ept/2),1)
} else if(ex==0){
px<-sample(c(ept*10/3,ept*15,ept*9/4,ept*17/2),1)
} else{
px<-ex/(1+ex)
}
return(px)
}
calgrad<-function(dfatr,dfcls,Nl,wc){#计算梯度
pi<-apply(dfatr,1,calpi,wx=wc)#指定pi(k),vector
piMinusy<-matrix(pi-dfcls[,1],nrow = Nl,ncol=1)#N*1矩阵
gradfCal<-t(dfatr)%*%piMinusy#利用矩阵乘法,(n+1)*1矩阵,计算梯度
return(gradfCal)
}
findAita<-function(dataatr,datacls,wkk,dtakk,ata_ept=1e-1,#一维搜索函数
ata0=1,maxatak=maxsearch){#wk,dtak为vector
expata1<-function(wk,dtak,ati,x){
exaita1<-as.vector((wk-ati*dtak)%*%x)
expcal1<-exp(exaita1)
if(is.infinite(expcal1)){
pi1<-1+sample(c(-ata_ept/3,-ata_ept,-ata_ept/4,-ata_ept/2),1)
} else if(expcal1==0){
pi1<-sample(c(ata_ept/3,ata_ept,ata_ept/4,ata_ept/2),1)
} else{
pi1<-expcal1/(1+expcal1)
}
pi1
}
expata2<-function(wk,dtak,ati,x){
exaita2<-as.vector((wk-ati*dtak)%*%x)
expcal2<-exp(exaita2)
if(is.infinite(expcal2)){
pi2<-sample(c(ata_ept/3,ata_ept,ata_ept/4,ata_ept/2),1)
} else if(expcal2==0){
pi2<-sample(c(ata_ept/3,ata_ept,ata_ept/4,ata_ept/2),1)
} else{
pi2<-expcal2/(1+expcal2)^2
}
pi2
}
ata<-ata0
iteratak<-1
while(iteratak>=1){
p1<-apply(dataatr,1,expata1,wk=wkk,dtak=dtakk,ati=ata)
p2<-apply(dataatr,1,expata2,wk=wkk,dtak=dtakk,ati=ata)
ppi<-p1-datacls[,1]
dtkM<-matrix(dtakk,nrow=length(dtakk),ncol=1)
dtkx<-as.vector(dataatr%*%dtkM)
H1<-as.vector(ppi%*%dtkx)
H2<-as.vector(p2%*%(dtkx^2))
ataold<-ata
atanew<-ata-H1/H2
ata<-atanew
if(abs(atanew-ataold)<ata_ept) break
if(iteratak>=maxatak) break
iteratak<-iteratak+1
}
return(ata)
}
w<-w0#指定w0,vector
G<-G0#指定初始正定矩阵
changeG<-0
changeW<-0
changeAita<-0
iterk<-1
while(iterk>=1){
#if(iterk>=4) browser()
if(any(is.infinite(w))||any(is.nan(w))){
w<-sample(seq(-2,2,length=200),length(w0))
changeW<-changeW+1
}
#如果计算出Inf或-Inf或NaN,则重新选择w继续迭代
gradf<-calgrad(dfatr=atrdata,dfcls=clsdata,Nl=N,wc=w)#(n+1)*1矩阵,计算梯度
gradfvec<-gradf[,1]#获得梯度向量
#print(sqrt(sum(gradfvec^2)))
if(sqrt(sum(gradfvec^2))<=ept){
stoprule<-'sqrt(sum(gradfvec^2))<=ept'
break
}
wbefore<-w
#print(w)
if(any(is.infinite(G))||any(is.nan(G))){
G<-G0*sample(seq(-1,1,length=1000),1)
w<-sample(seq(-10,10,length=200),length(w0))
aita<-sample(seq(0.01,2,length=100),1)
changeW<-changeW+1
changeG<-changeG+1
changeAita<-changeAita+1
}
#如果计算出Inf或-Inf或NaN,则重新选择正定的G/w/aita,重新迭代
## 进入一维搜索,寻找最优步长 ##
olddelta<-G%*%gradf
if(SearchAita){
aita<-findAita(dataatr = atrdata,datacls = clsdata,wkk = wbefore,
dtakk = as.vector(olddelta),ata0 = aita)
aita<-max(0.1,aita)
}
deltak<--aita*olddelta#(n+1)*1矩阵
wnew<-wbefore+as.vector(deltak)#更新w
w<-wnew
gradfnew<-calgrad(dfatr=atrdata,dfcls=clsdata,Nl=N,wc=wnew)#w已经改变
yk<-gradfnew-gradf#(n+1)*1矩阵
G<-G+(deltak%*%t(deltak))/as.vector(t(deltak)%*%yk)-
(G%*%yk%*%t(yk)%*%G)/as.vector(t(yk)%*%G%*%yk)#更新G
if(MoreStopRule){
MinusLogBtw<-MinusLog(wuse=w)-MinusLog(wuse=wbefore)
wBtw<-w-wbefore
if(abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept){
stoprule<-'abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept'
break
}
}
if(iterk>=maxiter){
stoprule<-'iterk>=maxiter'
break
}
iterk<-iterk+1
#print(iterk)
}
names(w)<-c(atr,"b")
outlst<-list(weight=w,minusLogkplus1=MinusLog(wuse=w),
minusLogk=MinusLog(wuse=wbefore),LpGradf=sqrt(sum(gradfvec^2)),
changW=changeW,changeG=changeG,changeAita=changeAita,
variable=atr,response=cls,
origindata=data,iteration=iterk,
formula=paste(cls,"~",paste(atr,collapse = "+")),
stoprule=stoprule)
class(outlst)<-"DFPLogistic"
return(outlst)
}
print.DFPLogistic<-function(obj){
cat("The stoprule is : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("formula : ",obj$formula,"\n")
oldlst<-options()
options(digits = 9)
print(obj[1:7])
options(oldlst)
}
predict.DFPLogistic<-function(obj,atr=NULL,atr_value=NULL){
weight<-obj$weight
atr_value$b<-1
for(i in 1:length(atr)){
atr_value[,i]<-scale(atr_value[,i])
}
predone<-function(x,w){#x,w均是向量
ep1<-exp(w%*%x)
if(is.infinite(ep1)){
p1<-1-0.001
} else{
p1<-ep1/(1+ep1)
}
return(p1)
}
P1<-apply(atr_value,1,predone,w=weight)
P0<-1-P1
predvec<-ifelse(P1>=0.5,1,0)
pMatdf<-data.frame(P1=P1,P0=P0,predict=predvec)
list(FinalPredict=predvec,PredictMat=pMatdf)
}
#### DFP算法测试 ####
#### mtcars数据集 模型1:多停止条件 ####
dataDFPLog<-mtcars
dataDFPLog_pred<-mtcars[,c("mpg","cyl","disp","hp")]#回测自变量数据集
DFPLog1<-DFPLogistic(data=dataDFPLog,cls="am",
atr=c("mpg","cyl","disp","hp"),ept=1e-3,
maxiter = 10000,MoreStopRule = T);DFPLog1#训练模型
predDFPLog1<-predict(DFPLog1,atr=c("mpg","cyl","disp","hp"),
atr_value = dataDFPLog_pred)#模型回测
miss<-data.frame(newG=predDFPLog1$FinalPredict,G=mtcars$am)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率
#### mtcars数据集 模型2:更改分类变量,单停止条件,进行一维搜索 ####
DFPLog2<-DFPLogistic(data=dataDFPLog,cls="vs",
atr=c("mpg","cyl","disp","hp"),ept=1e-3,
maxiter = 100000,MoreStopRule = F,SearchAita = T);DFPLog2
#### 对比:不进行一维搜索 ####
DFPLogistic(data=dataDFPLog,cls="vs",
atr=c("mpg","cyl","disp","hp"),ept=1e-3,
maxiter = 100000,MoreStopRule = F,SearchAita = F)#不进行一维搜索
#### mtcars数据集 模型3:更改分类变量及自变量,单停止条件 ####
DFPLog3<-DFPLogistic(data=dataDFPLog,cls="vs",
atr=c("mpg","hp","wt","qsec"),ept=1e-3,
maxiter = 10000,MoreStopRule = F);DFPLog3
#### iris数据集 数据准备 ####
dataDFPLog_iris<-iris[1:100,]
dataDFPLog_iris$Species<-ifelse(dataDFPLog_iris$Species=="setosa",1,0)
trainlab<-sample(100,50)
dataDFPiris_train<-dataDFPLog_iris[trainlab,]
dataDFPiris_test<-dataDFPLog_iris[-trainlab,]
dataDFPiris_test_atr<-dataDFPiris_test[,-5]#测试特征集
#### iris数据集 模型1 ####
DFPLogiris1<-DFPLogistic(cls="Species",data=dataDFPiris_train,
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris1
predDFPLogiris1<-predict(DFPLogiris1,atr_value = dataDFPiris_test_atr,#模型预测
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"))
miss<-data.frame(newG=predDFPLogiris1$FinalPredict,G=dataDFPiris_test$Species)
tbl<-table(miss$newG,miss$G);tbl
sum(diag(tbl))/sum(tbl)#预测正确率
#### iris数据集 模型2 ####
DFPLogiris2<-DFPLogistic(cls="Species",data=dataDFPiris_train,
atr=c("Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris2
predDFPLogiris2<-predict(DFPLogiris2,atr=c("Petal.Length","Petal.Width"),
atr_value = dataDFPiris_test_atr[,-c(1,2)])#模型回测
miss<-data.frame(newG=predDFPLogiris2$FinalPredict,G=dataDFPiris_test$Species)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率
#### iris数据集 模型3: 另外两类 ####
dataDFPLog_iris2<-iris[51:150,]
dataDFPLog_iris2$Species<-ifelse(dataDFPLog_iris2$Species=="versicolor",1,0)
trainlab2<-sample(1:100,50)
dataDFPiris_train2<-dataDFPLog_iris2[trainlab2,]
dataDFPiris_test2<-dataDFPLog_iris2[-trainlab2,]
dataDFPiris_test_atr2<-dataDFPiris_test2[,-5]#测试特征集
DFPLogiris2_1<-DFPLogistic(cls="Species",data=dataDFPiris_train2,
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris2_1
predDFPLogiris2_1<-predict(DFPLogiris2_1,
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"),
atr_value = dataDFPiris_test_atr2)#模型回测
miss<-data.frame(newG=predDFPLogiris2_1$FinalPredict,G=dataDFPiris_test2$Species)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率0.92
DFPLogiris2_2<-DFPLogistic(cls="Species",data=dataDFPiris_train,
atr=c("Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris2_2
predDFPLogiris2_2<-predict(DFPLogiris2_2,atr=c("Petal.Length","Petal.Width"),
atr_value = dataDFPiris_test_atr2[,-c(1,2)])#模型回测
miss<-data.frame(newG=predDFPLogiris2_2$FinalPredict,G=dataDFPiris_test2$Species)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率0.88
#### 第七章 支持向量机 ####
#### 基于SMO算法的SVM实现 线性及非线性 ####
## 编写核函数,输入为一个非扩充的特征矩阵,行数为样本id,列为特征;输出为核函数的Gram矩阵 ####
lineKernel<-function(data){#data为数据框
dMat<-as.matrix(data)
return(dMat%*%t(dMat))#返回Gram矩阵
}
polyKernel<-function(data,p=2){
dMat<-as.matrix(data)
lineGram<-dMat%*%t(dMat)
polyGram<-(lineGram+1)^p
return(polyGram)#返回Gram矩阵
}
gaussiKernel<-function(data,lmada=sqrt(ncol(data))/sqrt(2),Lp=2){
#lmada指定高斯核函数的标准差,默认为特征个数的平方根,基于Lp范
dMat<-as.matrix(data)
ln<-nrow(data)
Gram1<-matrix(NA,nrow=ln,ncol=ln)
for(i in 1:ln){#i行
for(j in 1:ln){#j列
dij<-dMat[i,]-dMat[j,]
absdij<-abs(dij)
Lpdij<-(sum(absdij^Lp))^(1/Lp)
Gram1[i,j]<-Lpdij^2
}
}
Gram2<-Gram1/(-2*(lmada^2))
gaussiGram<-exp(Gram2)
return(gaussiGram)
}
#计算g(x)
gfunc<-function(clsvec,gram,alphak,bk){#clsvec为向量,Gram为矩阵,alphak为alpha向量
ncls<-length(clsvec)
gvec<-vector(length = ncls)
for(i in 1:ncls){
gonevec<-vector(length = ncls)
for(j in 1:ncls){
gonevec[j]<-alphak[j]*clsvec[j]*gram[i,j]
}
gvec[i]<-sum(gonevec)+bk
}
return(gvec)#返回一个vector
}
is(gfunc(clsvec = c(1,-1,1),Gram=testx,alphak = c(1,2,3),bk=3))
is(lineKernel(data=testx))
is(polyKernel(data=testx))
is(gaussiKernel(data=testx))
#### 选择工作工作集的方法 ####
findAlpha<-function(alphak,gveck,ept,C,clsvec){#alphak,gveck为向量
ygk<-clsvec*gveck
lab1<-which(alphak==0)
lab2<-which(alphak==C)
lab3<-which(alphak>0&alphak<C)
alllab<-NULL
if(length(lab1)>=1){
ygkright<-ygk[lab1]
yuselab1<-which(ygkright<(1-ept))#违反KKT
alllab<-c(alllab,lab1[yuselab1])
}
if(length(lab2)>=1){
ygkerror<-ygk[lab2]
yuselab2<-which(ygkerror>(1+ept))#违反KKT
alllab<-c(alllab,lab2[yuselab2])
}
if(length(lab3)>=1){
ygksupport<-ygk[lab3]
ygkuse<-abs(ygksupport-1)
yuselab3<-which(ygkuse>ept)#违反KKT
alllab<-c(alllab,lab3[yuselab3])
}
##先检查支持向量
if(exists("yuselab3")&&length(yuselab3)>=1){
outlab<-lab3[yuselab3]
} else{#再检查其他的样本
outlab<-alllab
}
if(!is.null(outlab)){
ygkMinus1<-abs(ygk-1)[outlab]
maxerrlab<-outlab[which.max(ygkMinus1)]
labmax<-maxerrlab
} else{
labmax<-NULL
}
outlst<-list(alllab=alllab,outlab=outlab,labmax=labmax)
return(outlst)
}
debug(findAlpha)
findAlpha(alphak = c(0,0,0,0,0),gveck = c(0,0,0,0,0),ept=1e-2,C=4,clsvec = c(1,1,-1,-1,1))
undebug(findAlpha)
#### 另一种工作集选取算法 ####
SelectFunc<-function(gram,clsdata,C,alphak){
clsv<-clsdata[,1]
la<-length(alphak)
e<-rep(1,la)
clsMat<-clsdata%*%t(clsdata)
hMat<-clsMat*gram
gradk<-as.vector(hMat%*%matrix(alphak,nrow=la,ncol=1))-e
chsvec<--clsv*gradk
Iup<-which((alphak<C&clsv==1)|(alphak>0&clsv==-1))
Ilow<-which((alphak<C&clsv==-1)|(alphak>0&clsv==1))
chsIup<-Iup[which.max(chsvec[Iup])];chsm<-chsvec[chsIup]
chsIlow<-Ilow[which.min(chsvec[Ilow])];chsM<-chsvec[chsIlow]
list(chsIup=chsIup,chsIlow=chsIlow,chsm=chsm,chsM=chsM)
}
smoSVM<-function(cls,atr,data,Kernel=c("line","poly","gaussi"),scale=T,
C=10,ept=1e-2,alpha0=rep(0,nrow(data)),b0=0,p=2,Lp=2,
lmada=sqrt(ncol(data))/sqrt(2),maxiter=10000,Change=T){
if(!is.data.frame(data)) stop("data must be a data.frame.")
datause<-data
N<-nrow(datause)
lN<-1:N
atrdata<-datause[,atr,drop=F];atrdata<-as.matrix(atrdata)
clsdata<-datause[,cls,drop=F];clsdata<-as.matrix(clsdata)
clsVec<-clsdata[,1]
if(scale){#自变量数据标准化
for(i in 1:length(atr)){
atrdata[,i]<-scale(atrdata[,i])#0,1标准化
}
}
## 计算Gram矩阵
if(Kernel=="line") Gram<-lineKernel(data=atrdata)#matrix
if(Kernel=="poly") Gram<-polyKernel(data=atrdata,p=p)#matrix
if(Kernel=="gaussi") Gram<-gaussiKernel(data=atrdata,lmada = lmada,Lp=Lp)#matrix
alpha<-alpha0
b<-b0
iterk<-1
while(iterk>=1){
#if(iterk>=9) browser()
gk<-gfunc(clsvec = clsVec,gram=Gram,alphak = alpha,bk=b)#vector
Ek<-gk-clsVec#vector
#获取alphastar1的标签,对应的位置是第几
if(Change){
Clst<-SelectFunc(gram=Gram,clsdata=clsdata,C=C,alphak = alpha)
alp1<-Clst$chsIup;Ekalp1<-Ek[alp1]
alp2<-Clst$chsIlow;Ekalp2<-Ek[alp2]
malp<-Clst$chsm;Malp<-Clst$chsM
y1k<-clsVec[alp1];y2k<-clsVec[alp2]
#停止条件
if((malp-Malp)<=ept){
stoprule<-"(malp-Malp)<=ept"
break
}
} else{
lst<-findAlpha(alphak = alpha,gveck = gk,ept=ept,C=C,clsvec = clsVec)
alp1<-lst$labmax;alllab<-lst$alllab;outlab<-lst$outlab
## 停止条件
if(is.null(alp1)||(length(alllab)/N)<ept||(length(outlab)/N)<ept){
stoprule<-"is.null(alp1)||(length(alllab)/N)<ept||(length(outlab)/N)<ept"
break
}
Ekalp1<-Ek[alp1]
y1k<-clsVec[alp1]
chooselN<-lN[-alp1];chooseEk<-Ek[-alp1]
alp2<-ifelse(Ekalp1>0,chooselN[which.min(chooseEk)],chooselN[which.max(chooseEk)])
y2k<-clsVec[alp2]
Ekalp2<-Ek[alp2]
}
alp2old<-alpha[alp2];alp1old<-alpha[alp1]
#计算上下界,nk
Hk<-ifelse(y1k==y2k,min(C,alp2old+alp1old),min(C,C+alp2old-alp1old))
Lk<-ifelse(y1k==y2k,max(0,alp2old+alp1old-C),max(0,alp2old-alp1old))
k11<-Gram[alp1,alp1];k22<-Gram[alp2,alp2];k12<-Gram[alp1,alp2];k21<-k12
nk<-k11+k22-2*k12
#更新选出的alpha
alp2kplus1_unc<-alp2old+y2k*(Ekalp1-Ekalp2)/nk
if(alp2kplus1_unc>Hk){
alpha[alp2]<-Hk
} else if(alp2kplus1_unc<Lk){
if(Lk!=0){
alpha[alp2]<-Lk
} else if(Lk==0&&alp2old!=0){
alpha[alp2]<-Lk
} else{#只有在
alpha[alp2]<-sample(c(.15,.1,ept/6,ept*10,ept),1)
}
} else{
alpha[alp2]<-alp2kplus1_unc
}
alpha[alp1]<-alp1old+y1k*y2k*(alp2old-alpha[alp2])
## alpha已经更新
alp1new<-alpha[alp1];alp2new<-alpha[alp2]
##检查alp2new与alp2old是否不一样,特殊情况:alp2kplus1_unc<Lk,Lk=0时alp2new==alp2old
#这是alpha就没有完成更新,那么接下来的迭代也不会更新;极端情况下Hk也会==0
## 接着更新阈值bk为bkplus1
b_old<-b
b1kplus1<--Ekalp1-y1k*k11*(alp1new-alp1old)-y2k*k21*(alp2new-alp2old)+b_old
b2kplus1<--Ekalp2-y1k*k12*(alp1new-alp1old)-y2k*k22*(alp2new-alp2old)+b_old
if(alp1new>0&&alp1new<C){
b<-b1kplus1
} else if(alp2new>0&&alp2new<C){
b<-b2kplus1
} else if((alp1new==0||alp1new==C)&&(alp2new==0||alp2new==C)){
b<-(b2kplus1+b1kplus1)/2
} else if((alp1new>0&&alp1new<C)&&(alp2new>0&&alp2new<C)){
b<-b1kplus1
}
#b已经更新
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
break
}
iterk<-iterk+1
}
nonzero<-which(alpha!=0);lnz<-length(nonzero)
nonZeroAlpha<-alpha[nonzero]
names(nonZeroAlpha)<-nonzero
alpy<-alpha[nonzero]*clsVec[nonzero]
if(Kernel=="line"){
w<-rep(0,length(atr))
for(i in 1:lnz){
w<-w+alpy[i]*atrdata[nonzero[i],]
}
} else{
w<-NULL
}
bvec<-vector(length = lnz)
for(j in 1:lnz){
gramvec<-as.vector(Gram[nonzero,j])
bvec[j]<-clsVec[nonzero[j]]-as.vector(alpy%*%gramvec)
}
outlst<-list(nonZeroAlpha=nonZeroAlpha,bMean=mean(bvec),support=nonzero,w=w,
stoprule=stoprule,formula=paste(cls,"~",paste0(atr,collapse = "+")),
variables=atr,response=cls,iteration=iterk,clsvec=clsVec,
ScaleAtr=atrdata,Gram=Gram,Kernel=Kernel,data=data,p=p,Lp=Lp,lmada=lmada)
class(outlst)<-"smoSVM"
return(outlst)
}
print.smoSVM<-function(obj){
Kernel<-obj$Kernel
cat("The stoprule is : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("formula : ",obj$formula,"\n")
oldlst<-options()
if(Kernel=="line"){
print(obj[1:4])
} else{
print(obj[1:3])
}
}
predict.smoSVM<-function(SVMobj,cls,atr,atr_value,scale=T){
testdata<-as.matrix(atr_value)
if(scale){#归一化
for(i in 1:length(atr)){
testdata[,i]<-scale(testdata[,i])
}
}
usealpha<-SVMobj$nonZeroAlpha
support<-SVMobj$support
b<-SVMobj$bMean
#Gram<-SVMobj$Gram
Kernel<-SVMobj$Kernel
traincls<-SVMobj$clsvec
if(scale){
trainatr<-SVMobj$ScaleAtr
} else{
trainatr<-SVMobj$data[,atr]
trainatr<-as.matrix(trainatr)
}
## 编写预测
usecls<-traincls[support]
usetrainAtr<-trainatr[support,,drop=F]
alpy<-usealpha*usecls
predoneSVM<-function(x){
newAtr<-rbind(x,usetrainAtr)
if(Kernel=="line") gram<-lineKernel(data=newAtr)
if(Kernel=="poly") gram<-polyKernel(data=newAtr,p=SVMobj$p)
if(Kernel=="gaussi") gram<-gaussiKernel(data=newAtr,lmada=SVMobj$lmada,Lp=SVMobj$Lp)
kvec<-gram[1,2:(length(support)+1)]
sgn<-sign(alpy%*%kvec+b)
sgn<-ifelse(sgn>0,1,-1)
return(sgn)
}
predcls<-apply(testdata,1,predoneSVM)
predcls
}
#### 函数测试 ####
#### mtcars数据集测试 ####
datasvm_mtcars<-mtcars[,c(1,3,4,5,6,7,9)]#训练集
rownames(datasvm_mtcars)<-NULL
datasvm_mtcars$am<-ifelse(datasvm_mtcars$am==0,-1,1)
datasvm_mtcars_pred<-datasvm_mtcars[,c("mpg","disp","hp","qsec","drat")]
#### mtcars模型1 线性核函数 ####
svm1<-smoSVM(cls="am",atr=c("mpg","disp","hp","qsec","drat"),data=datasvm_mtcars,
Kernel = "line",maxiter = 100000,ept=1e-2,C=10,
scale = T,Change = T);svm1#训练模型
pmt1<-predict(svm1,atr=c("mpg","disp","hp","qsec","drat"),cls="am",
atr_value =datasvm_mtcars_pred,scale = TRUE );pmt1#模型回测
tblmt1<-table(datasvm_mtcars$am,pmt1);tblmt1
sum(diag(tblmt1))/sum(tblmt1)#回测正确率,0.875
#### mtcars模型2 高斯径向基核函数 ####
svm2<-smoSVM(cls="am",atr=c("mpg","disp","hp","qsec","drat"),data=datasvm_mtcars,
Kernel = "gaussi",maxiter = 100000,ept=1e-2,C=10,
scale = T,Change = T);svm2#训练模型
pmt2<-predict(svm2,atr=c("mpg","disp","hp","qsec","drat"),cls="am",
atr_value =datasvm_mtcars_pred,scale = TRUE );pmt2#模型回测
tblmt2<-table(datasvm_mtcars$am,pmt2);tblmt2
sum(diag(tblmt2))/sum(tblmt2)#回测正确率,0.96875
#### mtcars模型3 多项式核函数 ####
svm3<-smoSVM(cls="am",atr=c("mpg","disp","hp","qsec","drat"),data=datasvm_mtcars,
Kernel = "poly",maxiter = 100000,ept=1e-2,C=10,p=3,
scale = T,Change = T);svm3#训练模型
pmt3<-predict(svm3,atr=c("mpg","disp","hp","qsec","drat"),cls="am",
atr_value =datasvm_mtcars_pred,scale = TRUE );pmt3#模型回测
tblmt3<-table(datasvm_mtcars$am,pmt3);tblmt3
sum(diag(tblmt3))/sum(tblmt3)#回测正确率,0.90625
#### iris数据集测试 ####
datasvmiris<-iris[1:100,]
datasvmiris$Species<-ifelse(datasvmiris$Species=="setosa",1,-1)
trainlab<-sample(1:100,70)
datasvmiris_train<-datasvmiris[trainlab,]
datasvmiris_test<-datasvmiris[-trainlab,]
datasvmiris_test_atr<-datasvmiris_test[,-5]
#### iris模型1 线性核函数 ####
svmiris1<-smoSVM(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
data=datasvmiris,Kernel = "line",maxiter = 10000,
ept=1e-2,C=5,scale = T,Change = TRUE);svmiris1
piris1<-predict(svmiris1,atr=c("Sepal.Length","Sepal.Width"),cls="Species",
atr_value =datasvmiris_test_atr[,1:2],scale = TRUE );piris1#模型预测
tbliris1<-table(datasvmiris_test$Species,piris1);tbliris1
sum(diag(tbliris1))/sum(tbliris1)#预测正确率,0.9333333
#### iris模型2 高斯径向基核函数 ####
svmiris2<-smoSVM(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
data=datasvmiris,Kernel = "gaussi",maxiter = 10000,
ept=1e-2,C=5,scale = T,Change = TRUE,Lp=3);svmiris2
piris2<-predict(svmiris2,atr=c("Sepal.Length","Sepal.Width"),cls="Species",
atr_value =datasvmiris_test_atr[,1:2],scale = TRUE );piris2#模型预测
tbliris2<-table(datasvmiris_test$Species,piris2);tbliris2
sum(diag(tbliris2))/sum(tbliris2)#预测正确率,0.8666667
#### iris模型3 多项式核函数 ####
svmiris3<-smoSVM(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
data=datasvmiris,Kernel = "poly",maxiter = 10000,
ept=1e-2,C=5,scale = T,Change = TRUE,p=3);svmiris3
piris3<-predict(svmiris3,atr=c("Sepal.Length","Sepal.Width"),cls="Species",
atr_value =datasvmiris_test_atr[,1:2],scale = TRUE );piris3#模型预测
tbliris3<-table(datasvmiris_test$Species,piris3);tbliris3
sum(diag(tbliris3))/sum(tbliris3)#预测正确率,0.9333333
#### 第八章 提升方法 ####
#### 例8.1的R实现 ####
## 计算模型Gm的系数alpham
AdaboostAlpha<-function(em){#em是错误率
if(em==0) alpham<-Inf
if(em==1) alpham<--Inf
if(em>0&em<1){
alp<-(1-em)/em
alplog<-(1/2)*log(alp)
alpham<-alplog
}
alpham
}
## 计算样本权重m+1次迭代,Dm+1
AdaboostWeight<-function(weightm,alpham,clsvec,preclsvec){
#alpham不能是Inf,-Inf,NaN
calog<--alpham*clsvec*preclsvec
expwm<-weightm*exp(calog)
wmplus1<-expwm/sum(expwm)
wmplus1
}
## 计算带权重的em
AdaboostError<-function(clsvec,preclsvec,weightm){#输入都是向量
sum(weightm[which(clsvec!=preclsvec)])
}
## 编写简单的树桩决策树:一次搜索
SearchOneTree<-function(atr,cls,weightm,data,sep=0.5){
atrvec<-data[,atr];clsvec<-data[,cls]
latr<-length(atrvec)
searchx<-atrvec+sep
searchx<-searchx[-latr]
emveclow<-vector(length = latr-1)
emvecup<-vector(length = latr-1)
for(i in 1:(latr-1)){
sch<-searchx[i]
clslow<-ifelse(atrvec<=sch,1,-1)
clsup<-ifelse(atrvec<=sch,-1,1)
emveclow[i]<-AdaboostError(weightm = weightm,clsvec=clsvec,preclsvec = clslow)
emvecup[i]<-AdaboostError(weightm = weightm,clsvec=clsvec,preclsvec = clsup)
}
lowmin<-which.min(emveclow);upmin<-which.min(emvecup)
if(emveclow[lowmin]!=emvecup[upmin]){
error<-min(emveclow[lowmin],emvecup[upmin])
finalab<-ifelse(emveclow[lowmin]<emvecup[upmin],lowmin,upmin)
} else{
error<-emveclow[lowmin]
finalab<-lowmin
}
if(finalab==lowmin){
ModelFinal<-paste("Model:: ",atr,"<=",searchx[lowmin]," is ","1"," else"," -1.",sep = "")
preclsvec<-ifelse(atrvec<=searchx[lowmin],1,-1)
} else{
ModelFinal<-paste("Model:: ",atr,">",searchx[upmin]," is ","1"," else"," -1.",sep = "")
preclsvec<-ifelse(atrvec<=searchx[upmin],-1,1)
}
list(error=error,ModelFinal=ModelFinal,preclsvec=preclsvec)
}
#### 例8.1 一步到位 ####
AdaboostTreeStool<-function(atr,cls,data,weight0=rep(1/length(clsvec),length(clsvec)),
ept=0,maxiter=10000,sep=.5){
atrvec<-data[,atr];clsvec<-data[,cls]
weight<-weight0
f<-rep(0,length(clsvec))
Gmodel<-NULL
Galpha<-NULL
Gerror<-NULL
iterk<-1
while(iterk>=1){
G<-SearchOneTree(atr=atr,cls=cls,data=data,weightm = weight,sep = sep)
err<-G$error;pcls<-G$preclsvec
if(err==0||err==1){
stoprule<-"err==0||err==1"
outlst<-list(stoprule=stoprule,Model=G$ModelFinal,error=err)
break
}
ModelG<-G$ModelFinal
Gmodel<-c(Gmodel,ModelG)
Gerror<-c(Gerror,err)
alpha<-AdaboostAlpha(err)
Galpha<-c(Galpha,alpha)
D<-AdaboostWeight(weightm = weight,alpham = alpha,clsvec = clsvec,preclsvec = pcls)
weight<-D
f<-f+alpha*pcls;sgnf<-sign(f);sgnf<-ifelse(sgnf==1,1,-1)
errf<-1-sum(sgnf==clsvec)/length(clsvec)#f的误分率
if(errf<=ept){
stoprule<-"errf<=ept"
outlst<-list(stoprule=stoprule,errf=errf,iteration=iterk,AdaboostModel=Gmodel,
AdaboostAlpha=Galpha,AdaboostError=Gerror,AdaboostPredict=sgnf)
break
}
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
outlst<-list(stoprule=stoprule,errf=errf,iteration=iterk,AdaboostModel=Gmodel,
AdaboostAlpha=Galpha,AdaboostError=Gerror,AdaboostPredict=sgnf)
break
}
iterk<-iterk+1
}
return(outlst)
}
x<-0:9
y<-c(1,1,1,-1,-1,-1,1,1,1,-1)
dataxy<-data.frame(x=x,y=y);dataxy
## 求解第一个模型
D1<-rep(1/10,10)
G1<-SearchOneTree(atr="x",cls = "y",data=dataxy,weightm =D1);G1
## 求模型G1的系数alpha1及f1
alpha1<-AdaboostAlpha(G1$error);alpha1
f1<-alpha1*G1$preclsvec;f1
## 更新训练数据集的权值分布D1为D2
D2<-AdaboostWeight(weightm = D1,alpham = alpha1,clsvec = y,preclsvec = G1$preclsvec);D2
## 求解第二个模型
G2<-SearchOneTree(atr="x",cls = "y",data=dataxy,weightm = D2);G2
## 求模型G2的系数alpha2及f2
alpha2<-AdaboostAlpha(G2$error);alpha2
f2<-alpha2*G2$preclsvec+f1;f2
## 更新训练数据集的权值分布D2为D3
D3<-AdaboostWeight(weightm = D2,alpham = alpha2,clsvec = y,preclsvec = G2$preclsvec);D3
## 求解第三个模型
G3<-SearchOneTree(atr="x",cls = "y",data=dataxy,weightm = D3);G3
## 求模型G3的系数alpha3及f3
alpha3<-AdaboostAlpha(G3$error);alpha3
f3<-alpha3*G3$preclsvec+f2;f3
## 更新训练数据集的权值分布D3为D4
D4<-AdaboostWeight(weightm = D3,alpham = alpha3,clsvec = y,preclsvec = G3$preclsvec);D4
## 计算组合模型f3的误分率
tblf3<-table(sign(f3),y);tblf3
1-sum(diag(tblf3))/sum(tblf3)
## 使用AdaboostTreeStool()函数一步到位
AdaboostTreeStool(atr="x",cls = "y",data=dataxy)
## 使用mtcars/iris数据集测试函数AdaboostTreeStool() ##
datamtcars1<-mtcars[,c("mpg","am")]
datamtcars1$am<-ifelse(datamtcars1$am==1,1,-1)
AdaboostTreeStool(atr="mpg",cls = "am",data=datamtcars1,ept=.1,sep=0)
datamtcars2<-mtcars[,c("mpg","vs")]
datamtcars2$vs<-ifelse(datamtcars2$vs==1,1,-1)
AdaboostTreeStool(atr="mpg",cls = "vs",data=datamtcars2,ept=.1,sep=0)
datairis<-iris[1:100,c(3,5)]
datairis$Species<-ifelse(datairis$Species=="setosa",1,-1)
AdaboostTreeStool(atr="Petal.Length",cls = "Species",data=datairis)
#### 第九章 EM算法 ####
#### 高斯混合模型参数估计的EM算法 ####
gaussiEM<-function(clsvec,K=2,mean0=rep(0,K),var0=rep(1,K),alpha0=rep(1/K,K),
ept=1e-1,maxiter=10000,Lp=2){
lN<-length(clsvec)
mean<-mean0
var<-var0
alpha<-alpha0
iterk<-1
while(iterk>=1){
parameterold<-c(mean,var)
rjkMat<-gaussiResponse(clsvec = clsvec,K=K,Ml=mean,Vl=var,Alpl=alpha)
paralst<-gaussiParameter(rMat = rjkMat,clsvec = clsvec,K=K)
mean<-paralst$M
var<-paralst$V
alpha<-paralst$A
parameternew<-c(mean,var)
pnewMinusoldLp<-(parameternew-parameterold)^Lp
Lpvalue<-sum(pnewMinusoldLp)^(1/Lp)
if(Lpvalue<=ept){
stoprule<-"Lpvalue<=ept"
break
}
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
break
}
#print(mean);print(var);print(alpha)
iterk<-iterk+1
}
outlst<-list(stoprule=stoprule,iteration=iterk,Mean=mean,
Var=var,Alpha=alpha,K=K)
class(outlst)<-"gaussiEM"
return(outlst)
}
## 计算当前分模型对观测数据yj的响应度
gaussiResponse<-function(clsvec,K=2,Ml=rep(0,K),Vl=rep(1,K),Alpl=rep(1/K,K)){
lj<-length(clsvec);lk<-K
rjkMat<-matrix(0,nrow=lj,ncol=lk)
for(j in 1:lj){
rvec<-vector(length = lk)
for(k in 1:lk){
rjk<-Alpl[k]*dnorm(clsvec[j],mean=Ml[k],sd=sqrt(Vl[k]))
if(rjk==0) rjk<-sample(c(1e-20,1e-30,1e-40,1e-100,1e-65),1)
rvec[k]<-rjk
}
rjkMat[j,]<-rvec/sum(rvec)
}
return(rjkMat)
}
## 更新计算各个参数
gaussiParameter<-function(rMat,clsvec,K=2){
N<-length(clsvec)
Mplus<-vector(length = K)
Vplus<-vector(length = K)
Alplus<-vector(length = K)
for(i in 1:K){
rk<-rMat[,i];srk<-sum(rk)
Mk<-as.vector(rk%*%clsvec)/srk
ymvec<-(clsvec-Mk)^2
Vk<-as.vector(rk%*%ymvec)/srk
Alpk<-srk/N
Mplus[i]<-Mk;Vplus[i]<-Vk;Alplus[i]<-Alpk
}
list(M=Mplus,V=Vplus,A=Alplus)
}
## 编写打印函数
print.gaussiEM<-function(obj){
cat("Stoprule : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("the number of gaussi is : ",obj$K,"\n")
print(obj[3:5])
}
## 测试1:生成混合正态分布数据 ##
comp <- sample(c(0, 1), size = 10000, prob = c(0.7, 0.3), replace = T)
sim1<-rnorm(10000, mean = ifelse(comp == 0, 0, 1), sd = ifelse(comp == 0, 1, 2))
g1<-gaussiEM(clsvec = sim1,ept=1e-3,mean0 = c(0,.5),K=2);g1
## 测试2 ##
comp2 <- sample(c(0,1,2), size = 100000, prob = c(0.5,0.3,0.2), replace = T)
sim2<-rnorm(100000, mean =comp2, sd = ifelse(comp == 0, 1,ifelse(comp2==1,4,2)))
g2<-gaussiEM(clsvec = sim2,ept=1e-3,mean0 = c(0,.8,.9),var0 = c(.6,7.8,2.3),K=3);g2
y<-c(-67,-48,6,8,14,16,23,24,28,29,41,49,56,60,75)
gaussiEM(y)
#### 第十章 隐马尔可夫模型 ####
#### 隐马尔可夫模型观测序列的生成 ####
#A是转移概率矩阵,B是观测概率矩阵,PI是初始概率向量,Lth是输出的观测序列长度
#size为想要生成的观测序列的组数,size,Lth可以是vector
ObjectHMM<-function(size=1,Lth=5,A,B,PI,StateLabel=1:nrow(A),
ObjectLabel=1:ncol(B),seed=NULL){
stlab<-StateLabel#各个状态值采用的标记
objlab<-ObjectLabel#各个观测值采用的标记
lsi<-size
if(length(Lth)==1) Lth<-rep(Lth,size)
stlst<-list()
objlst<-list()
if(!is.null(seed)) set.seed(seed=seed)
for(i in 1:lsi){
lt<-Lth[i]
stvec<-vector(length = lt)
objvec<-vector(length = lt)
stvec[1]<-sample(stlab,1,prob = PI)#确定初始状态
st1<-which(stlab==stvec[1])#在B中对应的行数,即状态
objvec[1]<-sample(objlab,1,prob = B[st1,])#确定初始观测
for(j in 2:lt){
st<-which(stlab==stvec[j-1])#确定当前状态
stvec[j]<-sample(stlab,1,prob =A[st,])#确定下一个状态
stnew<-which(stlab==stvec[j])#下一个状态在B中对应的行数
objvec[j]<-sample(objlab,1,prob = B[stnew,])#确定下一个观测
}
stlst[[i]]<-stvec
objlst[[i]]<-objvec
}
outlst<-list(obs=objlst,state=stlst)
return(outlst)
}
## 测试 例10.1
a10.1<-matrix(c(0,1,0,0,
.4,0,.6,0,
0,.4,0,.6,
0,0,.5,.5),nrow = 4,byrow = T);a10.1
pi10.1<-rep(.25,4);pi10.1
b10.1<-matrix(c(.5,.5,
.3,.7,
.6,.4,
.8,.2),nrow = 4,byrow = T);b10.1
ObjectHMM(size=2,Lth = 5,A=a10.1,B=b10.1,PI=pi10.1,
ObjectLabel = c("红","白"))
ObjectHMM(size=2,Lth = 5,A=a10.1,B=b10.1,PI=pi10.1,
ObjectLabel = c("红","白"),seed=66)
ObjectHMM(size=3,Lth = c(3,4,5),A=a10.1,B=b10.1,
PI=pi10.1,ObjectLabel = c("红","白"))
#### HMM前向算法实现 ####
forwardHMM<-function(obs,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
#obs为观测值序列
lT<-length(obs)#观测序列的长度
Bmat<-B;colnames(Bmat)<-ObjectLabel;rownames(Bmat)<-StateLabel
Amat<-A;colnames(Amat)<-StateLabel;rownames(Amat)<-StateLabel
fA<-function(alpcal,Acol) alpcal%*%Acol
#计算前向概率初值
AlpMat<-matrix(nrow=lT,ncol = nrow(Amat))
colnames(AlpMat)<-paste0("st:",StateLabel)
rownames(AlpMat)<-paste0("T:",1:lT)
alpha<-PI*Bmat[,which(ObjectLabel==obs[1])]#vector
AlpMat[1,]<-alpha
iterk<-2
while(iterk<=lT){
#更新迭代结果
alp<-apply(Amat,2,fA,alpcal=alpha)
alpha<-alp*Bmat[,which(ObjectLabel==obs[iterk])]
AlpMat[iterk,]<-alpha
iterk<-iterk+1
}
list(FinalProb=sum(alpha),AlpMat=AlpMat)
}
## 例10.2的程序求解
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
forwardHMM(obs=c("红","白","红"),A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
## 结合函数ObjectHMM()来做个尝试试
hmm1<-ObjectHMM(size=3,Lth = c(3,4,5),A=a10.1,B=b10.1,
PI=pi10.1,ObjectLabel = c("红","白"));hmm1
hmm1_obs<-hmm1$obs;hmm1_obs
lapply(hmm1_obs,forwardHMM,A=a10.1,B=b10.1,
PI=pi10.1,ObjectLabel=c("红","白"))
#### HMM模型后向算法的实现 ####
backwardHMM<-function(obs,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
#obs为观测值序列
lT<-length(obs)#观测序列的长度
lst<-nrow(A)#状态的取值个数
Bmat<-B;colnames(Bmat)<-ObjectLabel;rownames(Bmat)<-StateLabel
Amat<-A;colnames(Amat)<-StateLabel;rownames(Amat)<-StateLabel
fB<-function(Arow,Bcol,btcal) sum(Arow*Bcol*btcal)
beta<-rep(1,lst)
BtMat<-matrix(nrow=lT,ncol = lst)
colnames(BtMat)<-paste0("st:",StateLabel)
rownames(BtMat)<-paste0("T:",lT:1)
BtMat[1,]<-beta
iterk<-1
while(iterk<=(lT-1)){#迭代是从最后一个观测开始的
bcol<-Bmat[,which(ObjectLabel==obs[lT-iterk+1])]
beta<-apply(Amat,1,fB,Bcol=bcol,btcal=beta)
BtMat[iterk+1,]<-beta
iterk<-iterk+1
}
bo1<-Bmat[,which(ObjectLabel==obs[1])]
finalprob<-sum(PI*bo1*beta)
list(FinalProb=finalprob,BtMat=BtMat)
}
## 例10.2的程序求解 后向算法
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
backwardHMM(obs=c("红","白","红"),A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
hmm<-ObjectHMM(size=2,Lth = c(3,4),A=A10.2,B=B10.2,
PI=pi10.2,ObjectLabel = c("红","白"))
hmm_obs<-hmm$obs;hmm_obs
lapply(hmm_obs,forwardHMM,A=A10.2,B=B10.2,PI=pi10.2,ObjectLabel=c("红","白"))
lapply(hmm_obs,backwardHMM,A=A10.2,B=B10.2, PI=pi10.2,ObjectLabel=c("红","白"))
#### 一些概率及期望的计算 ####
stijHMM<-function(obs,sti=NULL,stij=NULL,time=NULL,A,B,PI,
StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B)),
if.sti=F,if.stij=F){
#sti输入指定的状态,obs为当前观测序列,stijz指定转移状态,time指定时刻
#sti,time均可以是长度相等的向量;stij只能是一个二维向量
lT<-length(obs)
Alplst<-forwardHMM(obs=obs,A=A,B=B,PI=PI,StateLabel = StateLabel,
ObjectLabel = ObjectLabel)
Btlst<-backwardHMM(obs=obs,A=A,B=B,PI=PI,StateLabel = StateLabel,
ObjectLabel = ObjectLabel)
AlpMat<-Alplst$AlpMat
BtMat<-Btlst$BtMat
btmat<-BtMat[lT:1,,drop=F]
Probs<-Alplst$FinalProb
if(!if.stij){
PstiMat<-AlpMat*btmat/Probs#t时刻状态为i的概率矩阵
rownames(PstiMat)<-rownames(AlpMat);colnames(PstiMat)<-colnames(AlpMat)
si<-which(StateLabel==sti)
if(!is.null(sti)&&!is.null(time)) psti<-PstiMat[time,si] else psti<-NULL
}
if(!if.sti){
fbj<-function(j,x,BM) BM[j,which(ObjectLabel==x)]
wj<-which(StateLabel==stij[2]);wi<-which(StateLabel==stij[1])
bjDf<-data.frame(jobs=obs[-1])
bjvec<-apply(bjDf,1,fbj,j=wj,BM=B)
Pstijvec<-A[wi,wj]*AlpMat[1:(lT-1),wi]*bjvec*btmat[-1,wj]/Probs#长度为lT-1
if(!is.null(time)&&!is.null(stij)) pstij<-Pstijvec[time] else pstij<-NULL
}
if(!if.sti&&!if.stij){
outlst<-list(Probs=Probs,psti=psti,pstij=pstij,PstiMat=PstiMat,Pstijvec=Pstijvec,
AlpMat=AlpMat,BtMat=btmat,sti=sti,stij=stij,time=time)
} else if(!if.stij&&if.sti){
outlst<-list(Probs=Probs,psti=psti,PstiMat=PstiMat,AlpMat=AlpMat,
BtMat=btmat,sti=sti,stij=stij,time=time)
} else if(if.stij&&!if.sti){
outlst<-list(Probs=Probs,pstij=pstij,Pstijvec=Pstijvec,AlpMat=AlpMat,
BtMat=btmat,sti=sti,stij=stij,time=time)
} else{
stop("if.sti and if.stij can not both TRUE.")
}
return(outlst)
}
## 检查函数的正确性
stij1<-stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","1"),time=1,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
stij2<-stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","2"),time=1,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
stij3<-stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","3"),time=1,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
apply(stij1$PstiMat,1,sum)
apply(stij1$PstiMat,2,sum)
sum(c(stij1$Pstijvec,stij2$Pstijvec,stij3$Pstijvec))
sum(stij1$PstiMat[1:2,1])
names(stij1)
stijHMM(obs=c("红","白","红"),sti=NULL,
stij=NULL,time=NULL,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel=c("红","白")
,if.sti = TRUE)
stijHMM(obs=c("红","白","红"),sti="1",
stij=c("1","1"),time=NULL,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel=c("红","白"),
if.stij=TRUE)
stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","1"),time=NULL,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"), if.sti = TRUE ,if.stij=TRUE)
#### HMM监督学习方法的R实现 ####
#### HMM监督学习方法的R实现 ####
superviseHMM<-function(obsMat,stMat,StateLabel=NULL,ObjectLabel=NULL){
#obsdf为观测序列组成的矩阵T*S,每一列为一次观测序列,行数代表时刻t=1,2,...,T.
#stdf为状态序列组成的矩阵T*S,每一列为一次状态序列,行数代表时刻t=1,2,...,T.
#StateLabel输入状态的字符标签,ObjectLabel输入观测的字符标签
lT<-nrow(obsMat)#时间长度
S<-ncol(obsMat)#样本量
stMatchs<-stMat[-lT,]
lst<-length(StateLabel)#状态集合长度
lobs<-length(ObjectLabel)#观测集合长度
obsvec<-as.vector(obsMat);stvec<-as.vector(stMat)
aijMat<-matrix(nrow=lst,ncol=lst)##转移概率矩阵
colnames(aijMat)<-StateLabel;rownames(aijMat)<-StateLabel
bjkMat<-matrix(nrow = lst,ncol=lobs)##观测概率矩阵
colnames(bjkMat)<-ObjectLabel;rownames(bjkMat)<-StateLabel
pivec<-vector(length=lst)##初始概率向量
findaij<-function(ichr,jchr){#计算一个转移概率
#在stdf进行搜索,从时间t=1开始至t=lT-1
SAij<-length(which(stMatchs==ichr))
Aij<-0
for(t in 1:(lT-1)){
tvec<-stMat[t,];tplus1vec<-stMat[t+1,]
tlab<-which(tvec==ichr);tplus1st<-tplus1vec[tlab]
sj<-sum(tplus1st==jchr)
Aij<-Aij+sj
}
return(Aij/SAij)
}
findbjk<-function(jchr,kchr){#计算一个观测概率
jlab<-which(stvec==jchr)
kvec<-obsvec[jlab]
sum(kvec==kchr)/length(jlab)
}
#计算转移概率矩阵
for(i in 1:lst){
for(j in 1:lst){
aijMat[i,j]<-findaij(ichr=StateLabel[i],jchr = StateLabel[j])
}
}
#计算观测概率矩阵
for(j in 1:lst){
for(k in 1:lobs){
bjkMat[j,k]<-findbjk(jchr=StateLabel[j],kchr = ObjectLabel[k])
}
}
#计算初始概率向量
first<-stMat[1,]#初始状态
for(i in 1:lst){
pi[i]<-length(which(first==StateLabel[i]))/S
}
outlst<-list(pi=pi,aijMat=aijMat,bjkMat=bjkMat,
StateLabel=StateLabel,ObjectLabel=ObjectLabel)
class(outlst)<-c("superviseHMM","HMM")
return(outlst)
}
print.superviseHMM<-function(obj){
cat("State::",obj$StateLabel,"; ","Observation::",obj$ObjectLabel,"\n")
print(obj[1:3])
}
## 测试1 利用ObjectHMM()函数生成数据
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=2000,Lth = 5,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"))
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
shmm1<-superviseHMM(obsMat = obsmat,stMat = stmat,StateLabel =as.character(1:3),
ObjectLabel =c("红","白") )#训练模型
shmm1
pi10.2
A10.2
B10.2
## 测试2
A1<-matrix(c(0.5,0.3,0.2,
0.4,0.4,0.2,
0.1,0.2,0.7),nrow=3,byrow = T);A10.2
B1<-matrix(c(0.6,0.4,
0.3,0.7,
0.4,0.6),nrow = 3,byrow = T);B10.2
pi1<-c(0.3,0.3,0.4);pi1
## 生成隐马尔可夫观测序列及状态序列
test2<-ObjectHMM(size=2000,Lth = 5,A=A10,B=B10,PI=pi10,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"))
obs2<-test2$obs
st2<-test2$state
obsmat2<-do.call(cbind,obs2)#观测矩阵
stmat2<-do.call(cbind,st2)#状态矩阵
shmm2<-superviseHMM(obsMat = obsmat2,stMat = stmat2,
StateLabel =as.character(1:3),ObjectLabel =c("红","白") )#训练模型
shmm2
pi10
A10
B10
#### HMM模型Buam-Welch无监督学习算法的R实现 ####
BuamWelchHMM<-function(obs,A0,B0,PI0,StateLabel=as.character(1:nrow(A0)),
ObjectLabel=as.character(1:ncol(B0)),ept=1e-2,
maxiter=10000,Lp=2){
#obs输入一个观测序列,vector
lT<-length(obs);lst<-length(StateLabel);lobs<-length(ObjectLabel)
Abw<-A0;Bbw<-B0;PIbw<-PI0
iterk<-1
while(iterk>=1){
#保存旧参数
Abwold<-Abw;Bbwold<-Bbw;PIbwold<-PIbw
#更新初始概率向量
pilst<-stijHMM(obs=obs,A=Abw,B=Bbw,PI=PIbw,if.sti = TRUE,
StateLabel = StateLabel,ObjectLabel = ObjectLabel)
PIbw<-pilst$PstiMat[1,]
#更新转移概率矩阵、观测概率矩阵
for(i in 1:lst){#i行
ir<-StateLabel[i]
#更新转移概率矩阵
for(j in 1:lst){
jr<-StateLabel[j]#获取状态标签
calij<-stijHMM(obs=obs,stij=c(ir,jr),A=Abw,B=Bbw,PI=PIbw,
StateLabel = StateLabel,ObjectLabel=ObjectLabel)
pstiMat<-calij$PstiMat;pstijvec<-calij$Pstijvec
Abw[i,j]<-sum(pstijvec)/sum(pstiMat[1:(lT-1),i])
}
#更细观测概率矩阵
for(k in 1:lobs){
klab<-which(obs==ObjectLabel[k])
cali<-stijHMM(obs=obs,A=Abw,B=Bbw,PI=PIbw,StateLabel = StateLabel,
ObjectLabel=ObjectLabel,if.sti=TRUE)
piMat<-cali$PstiMat
pstivec<-piMat[,i];pkvec<-pstivec[klab]
Bbw[i,k]<-sum(pkvec)/sum(pstivec)
}
}
# 停止条件
Abwoldvec<-as.vector(Abwold);Abwvec<-as.vector(Abw)
Bbwoldvec<-as.vector(Bbwold);Bbwvec<-as.vector(Bbw)
abw<-Abwvec-Abwoldvec;Lpabw<-sum(abs(abw)^Lp)*(1/Lp)
bbw<-Bbwvec-Bbwoldvec;Lpbbw<-sum(abs(bbw)^Lp)*(1/Lp)
pibw<-PIbw-PIbwold;Lppibw<-sum(abs(pibw)^Lp)*(1/Lp)
allbw<-c(abw,bbw,pibw);Lpallbw<-sum(abs(allbw)^Lp)*(1/Lp)
if(Lpabw<=ept&&Lpbbw<=ept&&Lppibw<=ept){
stoprule<-"Lpabw<=ept&&Lpbbw<=ept&&Lppibw<=ept"
break
}
if(Lpallbw<=ept){
stoprule<-"Lpallbw<=ept"
break
}
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
break
}
iterk<-iterk+1
}
outlst<-list(pi=PIbw,aijMat=Abw,bjkMat=Bbw,iteration=iterk,
stoprule=stoprule,StateLabel=StateLabel,ObjectLabel=ObjectLabel)
class(outlst)<-c("BuamWelchHMM","HMM")
return(outlst)
}
MoreBuamWelchHMM<-function(obsMat,A0,B0,PI0,StateLabel=as.character(1:nrow(A0)),
ObjectLabel=as.character(1:ncol(B0)),ept=1e-2,
maxiter=10000,Lp=2){
lMat<-ncol(obsMat)
lst<-length(StateLabel)
lobs<-length(ObjectLabel)
lT<-nrow(obsMat)
A<-matrix(0,nrow=lst,ncol=lst)
B<-matrix(0,nrow = lst,ncol=lobs)
PI<-rep(0,lst)
for(i in 1:lMat){
obsuse<-obsMat[,i]
calst<-BuamWelchHMM(obs=obsuse,A0=A0,B0=B0,PI0=PI0,StateLabel = StateLabel,
ObjectLabel = ObjectLabel,maxiter = maxiter,ept = ept,Lp=Lp)
Arev<-calst$aijMat;Brev<-calst$bjkMat;pirev<-calst$pi
A<-A+Arev
B<-B+Brev
PI<-PI+pirev
}
A<-A/lMat;B<-B/lMat;PI<-PI/lMat
outlst<-list(pi=PI,aijMat=A,bjkMat=B,
StateLabel=StateLabel,ObjectLabel=ObjectLabel)
class(outlst)<-c("BuamWelchHMM","HMM")
return(outlst)
}
print.BuamWelchHMM<-function(obj){
cat("State::",obj$StateLabel,"; ","Observation::",obj$ObjectLabel,"\n")
print(obj[1:3])
}
#### 测试
## 初始化参数
A1<-matrix(c(0.5,0.3,0.2,
0.4,0.4,0.2,
0.1,0.2,0.7),nrow=3,byrow = T);A10.2
B1<-matrix(c(0.6,0.4,
0.3,0.7,
0.4,0.6),nrow = 3,byrow = T);B10.2
pi1<-c(0.3,0.3,0.4);pi1
## 生成隐马尔可夫观测序列及状态序列
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=2000,Lth = 5,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"))
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
BuamWelchHMM(obs=obsmat[,1],A0=A1,B0=B1,PI0=pi1,
ObjectLabel = c("红","白"),ept = 1e-2)
BuamWelchHMM(obs=obsmat[,10],A0=A1,B0=B1,PI0=pi1,
ObjectLabel = c("红","白"),ept = 1e-2)
MoreBuamWelchHMM(obsMat = obsmat,A0=A1,B0=B1,PI0=pi1,
ObjectLabel = c("红","白"),ept = 1e-2)
#### 预测算法:近似算法的R实现 ####
approxHMM<-function(obsMat,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
approxone<-function(obs){
calst<-stijHMM(obs = obs,A=A,B=B,PI=PI,StateLabel = StateLabel,
ObjectLabel = ObjectLabel,if.sti = TRUE)
#只计算状态概率矩阵,不计算转移概率
pstiMat<-calst$PstiMat#状态概率矩阵
#对每一行搜索概率最大的状态
stlab<-apply(pstiMat,1,which.max)
StateLabel[stlab]
}
if(is.vector(obsMat)) obsMat<-matrix(obsMat,nrow=length(obsMat))
apply(obsMat,2,approxone)
}
#### 测试1
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=100,Lth = 100,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"),seed=100)
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
t1<-approxHMM(obsMat = obsmat,A=A10.2,B=B10.2,
PI=pi10.2,ObjectLabel = c("红","白"))
sum(t1==stmat)/10000#准确率
#### 测试2
A10<-matrix(c(0.3,0.2,0.2,0,0.3,
0.1,0.2,0.3,0.3,0.1,
0.2,0.2,0.3,0.15,0.15,
0.2,0.1,0.1,0.3,0.4,
0.1,0.2,0.3,0.3,0.1),nrow=5,byrow = T);A10
B10<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3,
0.4,0.6,
0.45,0.55),nrow = 5,byrow = T);B10
pi10<-c(0.2,0.1,0.25,0.2,0.25);pi10
## 生成隐马尔可夫观测序列及状态序列
test2<-ObjectHMM(size=100,Lth = 100,A=A10,B=B10,PI=pi10,
StateLabel = as.character(1:5),ObjectLabel = c("红","白"),seed=888)
obs2<-test2$obs
st2<-test2$state
obsmat2<-do.call(cbind,obs2)#观测矩阵
stmat2<-do.call(cbind,st2)#状态矩阵
t2<-approxHMM(obsMat = obsmat2,A=A10,B=B10,
PI=pi10,ObjectLabel = c("红","白"))
sum(t2==stmat2)/10000#预测准确率
#### 测试3
a10<-matrix(c(0.4,0.6,
0.7,0.3),nrow = 2,byrow = T);a10
b10<-matrix(c(0.5,0.5,
0.3,0.7),nrow = 2,byrow = T);b10
p10<-c(0.3,0.7);p10
## 生成隐马尔可夫观测序列及状态序列
test3<-ObjectHMM(size=100,Lth = 100,A=a10,B=a10,PI=p10,
StateLabel = as.character(1:2),ObjectLabel = c("红","白"),seed=666)
obs3<-test3$obs
st3<-test3$state
obsmat3<-do.call(cbind,obs3)#观测矩阵
stmat3<-do.call(cbind,st3)#状态矩阵
t3<-approxHMM(obsMat = obsmat3,A=a10,B=a10,
PI=p10,ObjectLabel = c("红","白"))
sum(t3==stmat3)/10000#准确率
#### 预测算法 维特比算法的R实现 ####
ViterbiHMM<-function(obs,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B)),if.show=TRUE){
lst<-length(StateLabel)
lT<-length(obs)
lobs<-length(ObjectLabel)
obs1<-which(ObjectLabel==obs[1])
delta<-PI*B[,obs1]#初始化delta
pasi<-rep(0,lst)#初始化pasi
deltaMat<-matrix(nrow=lT,ncol = lst)#delta矩阵,行为时刻,列为状态
pasiMat<-matrix(nrow=lT,ncol = lst)#pasi矩阵,行为时刻,列为状态
deltaMat[1,]<-delta;pasiMat[1,]<-pasi
#进入递推
iterk<-2
while(iterk<=lT){
obslab<-which(ObjectLabel==obs[iterk])
bitvec<-B[,obslab]
#利用R矩阵乘法特点
delm<-matrix(rep(delta,lst),nrow = lst,byrow = TRUE)
useMat<-delm*t(A)#对应位置相乘
pasi<-apply(useMat,1,which.max)#每行取最大,vector
caldelta<-vector(length = lst)
for(i in 1:lst){
caldelta[i]<-useMat[i,pasi[i]]
}
delta<-caldelta*bitvec
deltaMat[iterk,]<-delta
pasiMat[iterk,]<-pasi
iterk<-iterk+1
}
#先找到最优路径的概率及终点
statelab<-vector(length = lT)
finalstatelab<-which.max(delta)#获取位置
finalprob<-delta[finalstatelab]
statelab[lT]<-finalstatelab
#回溯
for(j in (lT-1):1){
statelab[j]<-pasiMat[j+1,statelab[j+1]]
}
rownames(deltaMat)<-paste0("T:",1:lT)
colnames(deltaMat)<-paste0("st:",StateLabel)
rownames(pasiMat)<-paste0("T:",1:lT)
colnames(pasiMat)<-paste0("st:",StateLabel)
predvec<-StateLabel[statelab];names(predvec)<-paste0("T:",1:lT)
if(if.show){
out<-list(FinalState=predvec,deltaMat=deltaMat,pasiMat=pasiMat)
} else{
out<-predvec
}
return(out)
}
#### 批量预测
MoreViterbiHMM<-function(obsMat,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
if(is.vector(obsMat)) obsMat<-matrix(obsMat,nrow=length(obsMat))
apply(obsMat,2,ViterbiHMM,A=A,B=B,PI=PI,
StateLabel=StateLabel,ObjectLabel=ObjectLabel,if.show=FALSE)
}
#### 求解例10.3
A10.3<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.3
B10.3<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.3
pi10.3<-c(0.2,0.4,0.4);pi10.3
ViterbiHMM(obs=c("红","白","红"),A=A10.3,B=B10.3,PI=pi10.3,
ObjectLabel =c("红","白") )
approxHMM(obs=c("红","白","红"),A=A10.3,B=B10.3,PI=pi10.3,
ObjectLabel =c("红","白") )
#### 测试1
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=100,Lth = 5,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"),seed=666)
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
t1<-MoreViterbiHMM(obsMat = obsmat,A=A10.2,B=B10.2,
PI=pi10.2,ObjectLabel = c("红","白"))
sum(t1==stmat)/500#准确率
#### 测试2
A10<-matrix(c(0.3,0.2,0.2,0,0.3,
0.1,0.2,0.3,0.3,0.1,
0.2,0.2,0.3,0.15,0.15,
0.2,0.1,0.1,0.3,0.4,
0.1,0.2,0.3,0.3,0.1),nrow=5,byrow = T);A10
B10<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3,
0.4,0.6,
0.45,0.55),nrow = 5,byrow = T);B10
pi10<-c(0.2,0.1,0.25,0.2,0.25);pi10
## 生成隐马尔可夫观测序列及状态序列
test2<-ObjectHMM(size=100,Lth = 5,A=A10,B=B10,PI=pi10,
StateLabel = as.character(1:5),ObjectLabel = c("红","白"),seed=888)
obs2<-test2$obs
st2<-test2$state
obsmat2<-do.call(cbind,obs2)#观测矩阵
stmat2<-do.call(cbind,st2)#状态矩阵
t2<-MoreViterbiHMM(obsMat = obsmat2,A=A10,B=B10,
PI=pi10,ObjectLabel = c("红","白"))
sum(t2==stmat2)/500#预测准确率
#### 测试3
a10<-matrix(c(0.4,0.6,
0.7,0.3),nrow = 2,byrow = T);a10
b10<-matrix(c(0.5,0.5,
0.3,0.7),nrow = 2,byrow = T);b10
p10<-c(0.3,0.7);p10
## 生成隐马尔可夫观测序列及状态序列
test3<-ObjectHMM(size=100,Lth = 5,A=a10,B=a10,PI=p10,
StateLabel = as.character(1:2),ObjectLabel = c("红","白"),seed=666)
obs3<-test3$obs
st3<-test3$state
obsmat3<-do.call(cbind,obs3)#观测矩阵
stmat3<-do.call(cbind,st3)#状态矩阵
t3<-MoreViterbiHMM(obsMat = obsmat3,A=a10,B=a10,
PI=p10,ObjectLabel = c("红","白"))
sum(t3==stmat3)/500#准确率
| /李航统计学习方法笔记代码.R | no_license | BlankSeraph/machine-learning | R | false | false | 89,852 | r | #### 第一章 多项式回归的实现 过拟合现象 ####
polyfit<-function(y,x,maxdeg){
pwrs<-powers(x,maxdeg)#生成不同阶数的x
lmout<-list()
class(lmout)<-"polyreg"#创建一个新类
for(i in 1:maxdeg){
lmo<-lm(y~pwrs[,1:i])
lmo$fitted.cvvalues<-lvoneout(y,pwrs[,1:i,drop=FALSE])
lmout[[i]]<-lmo
}
lmout$x<-x
lmout$y<-y
return(lmout)
}
print.polyreg<-function(fits){
maxdeg<-length(fits)-2#计算拟合的模型数
n<-length(fits$y)
tbl<-matrix(nrow = maxdeg,ncol=2)
cat("mean squared prediction errors,by degree\n")
colnames(tbl)<-c("MSPE","TRAIN")
for(i in 1:maxdeg){
fi<-fits[[i]]
errs<-fits$y-fi$fitted.cvvalues
spe<-sum(errs^2)
tbl[i,1]<-spe/n
tbl[i,2]<-sum(fi$residuals^2)/n
}
print(tbl)
return(tbl)
}
plot.polyreg<-function(fits){
plot(fits$x,fits$y,xlab="X",ylab="Y")
maxdg<-length(fits)-2
cols<-c("red","green","blue")
dg<-curvecount<-1
while(dg<maxdg){
prompt<-paste("RETURN for CV fit for degree",dg,"or type degree",
"or q for quit:")
rl<-readline(prompt)
dg<-if(rl=="") dg else if(rl!="q") as.integer(rl) else break
lines(fits$x,fits[[dg]]$fitted.values,col=cols[curvecount%%3+1])
dg<-dg+1
curvecount<-curvecount+1
}
}
powers<-function(x,dg){
pw<-matrix(x,nrow = length(x))
prod<-x
for(i in 2:dg){
prod<-prod*x
pw<-cbind(pw,prod)
}
return(pw)
}
lvoneout<-function(y,xmat){
n<-length(y)
predy<-vector(length = n)
for(i in 1:n){
lmo<-lm(y[-i]~xmat[-i,])
betahat<-as.vector(lmo$coef)
predy[i]<-betahat%*%c(1,xmat[i,])#交叉验证中y的预测值
}
return(predy)
}
#### 例1.1多项式模拟 ####
n<-60
x<-(1:n)/n
y<-vector(length = n)
for(i in 1:n) y[i]<-sin((3*pi/2)*x[i])+x[i]^2+rnorm(1,0,.5)
dg<-12
lmo<-polyfit(y,x,dg);lmo
error<-print(lmo)
plot(lmo)
#### 绘制MSPE/TRAIN图 ####
plot(error[,"TRAIN"],ylab="error",xlab="complex",
ylim=c(min(error),max(error)),col="blue",type="b")
points(error[,"MSPE"],type="b",col="red")
abline(v=4,lty=3)
text(locator(),"MSPE",col="red")
text(locator(),"TRAIN",col="blue")
#### 第二章 感知机 ####
#### 线性可分感知机原始形式的实现 ####
#### 兼容线性不可分的情形,通过设置容忍度endure>0即可 ####
#### 学习函数linePercept() ####
linePercept<-function(cls="y",atr=c("x1","x2"),data=NULL,aita=1,
endure=0,maxiter=1000,w0=rep(0,length(atr)),b0=0){
datause<-data;datause$xb<-1#建立扩充后的数据框,主要是为了将b归入扩充权重向量
wmat<-matrix(c(w0,b0),nrow=length(atr)+1,ncol=1)#先用矩阵按列储存初始扩充权重
iterk<-0
misssample<-vector()
while(iterk>=0){
sign_mat<-as.matrix(datause[,c(atr,"xb"),drop=F])%*%wmat[,iterk+1,drop=F]%*%
t(as.matrix(datause[,cls,drop=F]))#计算时注意将data.frame转换为matrix
sign_vec<-diag(sign_mat)
minlab<-which.min(sign_vec)#误分情况最严重的点
if(endure==0){
if(sign_vec[minlab]>endure){
cat("The Final sign_min is : ",sign_vec[minlab],"\n")
break
}
} else if(endure>0){#abs(sign_vec[minlab])表示最大误差距离
if(all(w0==0)&&b0==0) stop("w0 and b0 must not all be 0 when endure>0.")
if(all(sign_vec>0)) break
if(abs(sign_vec[minlab])<endure){
cat("The Final sign_min is : ",abs(sign_vec[minlab]),"\n")
break
}
} else stop("The endure must not be smaller than 0. ")
if(iterk>maxiter) break #当迭代次数大于maxiter时停止
wchange<-wmat[,iterk+1,drop=F]+
aita*datause[,cls][minlab]*t(as.matrix(datause[minlab,c(atr,"xb"),drop=F]))
wmat<-cbind(wmat,wchange)
misssample[iterk+1]<-minlab
iterk<-iterk+1
}
rownames(wmat)<-c(atr,"b");colnames(wmat)<-paste0("iter",0:iterk)
Percept<-list(Finalweight=t(wmat[,ncol(wmat)]),weight=wmat,
iteration=iterk,miss=misssample,origindata=data,
atrdata=data[,atr,drop=F],clsdata=data[,cls,drop=F],
endure=endure,aita=aita,w0=w0,b0=b0)
class(Percept)<-"linePercept"
return(Percept)
}
#### 绘图函数 ####
plot.linePercept<-function(obj){#只对二维数据有用
plot(obj$atrdata[,1],obj$atrdata[,2],
xlim=c(min(obj$atrdata[,1])-+abs(max(obj$atrdata[,1]))/3,
max(obj$atrdata[,1])+abs(max(obj$atrdata[,1]))/3),
ylim=c(min(obj$atrdata[,2])-abs(max(obj$atrdata[,2]))/3,
max(obj$atrdata[,2])+abs(max(obj$atrdata[,2]))/3),
col=2*abs(obj$clsdata[,1])+obj$clsdata[,1],pch=19,
xlab=colnames(obj$atrdata)[1],ylab=colnames(obj$atrdata)[2])
abline(b=-obj$Finalweight[1,1]/obj$Finalweight[1,2],
a=-obj$Finalweight[1,3]/obj$Finalweight[1,2],
col="red",lwd=1.25)
text(obj$atrdata[,1],obj$atrdata[,2],obj$clsdata[,1])
}
#### 打印函数 ####
print.linePercept<-function(obj){
print.default(obj[c(1,3)])
}
#### 预测函数 ####
#### preClinePercept()函数只能预测一个实例 ####
preClinePercept<-function(lPobj,cls="y",atr=c("x1","x2"),atr_value=c(0,1)){
latr<-length(atr)#特征个数
levelcls<-unique(lPobj$clsdata[,1])
numcls<-as.numeric(levelcls)
# chrcls<-as.character(levelcls)
atrmat<-matrix(c(atr_value,1),nrow=latr+1,ncol=1)
sgn<-ifelse(sign(lPobj$Finalweight%*%atrmat)>0,max(numcls),min(numcls))
return(as.vector(sgn))
}
#### predic.linePercept()函数可以一次预测多个点 ####
## 如果是二维特征,会绘制对应的预测图 ####
predict.linePercept<-function(lPobj,cls="y",atr=c("x1","x2"),atr_value=NULL){
predvalue<-apply(atr_value,1,preClinePercept,lPobj=lPobj,atr=atr,cls=cls)
out_pre<-atr_value
out_pre[,cls]<-predvalue
if(length(atr)==2){
plot(lPobj);points(out_pre[,atr[1]],out_pre[,atr[2]],pch=23,col="red",cex=2.5,lwd=2)
text(out_pre[,atr[1]],out_pre[,atr[2]],predvalue,col="red")
}
return(out_pre)
}
#### 例2.1 线性可分,默认w0=0,b0=0,endure=0,aita=1 aita为学习率 ####
## 学习 ##
percept<-linePercept(data=dataB2.1,cls="y",atr=c("x1","x2"))
is(percept$Finalweight)
plot(percept)
## 预测 ##
data_atr<-data.frame(x1=c(0,2,1,3),x2=c(1,1,3,2))
predict(percept,cls = "y",atr = c("x1","x2"),atr_value = data_atr)
#### 线性感知机对偶算法的实现 ####
#### DualPercept()函数只能用于线性可分集 ####
DualPercept<-function(cls="y",atr=c("x1","x2"),data=NULL,aita=1,
maxiter=1000,alpha0=rep(0,nrow(data)),b0=0){
datause<-as.matrix(data)#转换成矩阵,方便运算
sample_num<-nrow(datause)#样本个数
clsdata<-datause[,cls,drop=F];atrdata<-datause[,atr,drop=F]
Gram<-atrdata%*%t(atrdata)#先计算Gram矩阵
alphaMat<-matrix(c(alpha0,b0),nrow=sample_num+1,ncol=1)#先建立参数扩充矩阵
iterk<-0
misssample<-vector()
while(iterk>=0){
alpha_vec<-alphaMat[1:sample_num,iterk+1]#vector
b<-alphaMat[sample_num+1,iterk+1]#一个数
alpha_cls<-matrix(alpha_vec*clsdata[,1],nrow = sample_num,ncol=1)
signMat<-(Gram%*%alpha_cls+b)%*%t(clsdata)#计算判断矩阵
sign_vec<-diag(signMat)#得到不同点的计算结果
minlab<-which.min(sign_vec)#挑出误分最严重的点
if(sign_vec[minlab]>0) break
alphaChange<-alpha_vec
alphaChange[minlab]<-alphaChange[minlab]+aita
bChange<-b+aita*clsdata[,1][minlab]
AllChange<-matrix(c(alphaChange,bChange),sample_num+1,1)
alphaMat<-cbind(alphaMat,AllChange)
misssample[iterk+1]<-minlab
iterk<-iterk+1
}
rownames(alphaMat)<-c(paste0("alpha",1:sample_num),"b")
colnames(alphaMat)<-paste0("iter",0:iterk)
Finalalpha<-t(alphaMat[,ncol(alphaMat)])#vector
Finalweight<-rep(0,length(atr))
for(i in 1:sample_num){#计算weight
weight<-clsdata[,1][i]*Finalalpha[i]*atrdata[i,]
Finalweight<-Finalweight+weight
}
Finalweight<-c(Finalweight,Finalalpha[sample_num+1])
Finalweight<-matrix(Finalweight,nrow=1);colnames(Finalweight)<-c(atr,"b")
PerceptDual<-list(Finalweight=Finalweight,Finalalpha=Finalalpha,
iteration=iterk,Alpha=alphaMat,miss=misssample,
atrdata=atrdata,clsdata=clsdata,aita=aita,
alpha0=alpha0,b0=b0)
class(PerceptDual)<-c("DualPercept","linePercept")
return(PerceptDual)
}
### 例2.2 的R实现 ###
### 使用了S3类的继承性质 ###
perpectdual<-DualPercept(cls="y",atr = c("x1","x2"),data=dataB2.1)
plot(perpectdual)
perpectdual
class(perpectdual)
names(perpectdual)
perpectdual[1:5]
predict(perpectdual,cls="y",atr = c("x1","x2"),atr_value = data_atr)
#### 第三章 K近邻 ####
#### 2维平衡kd树的R实现 ####
### kd_tie()函数用于计算二维kd树的不同次迭代的所有节点 ###
### tielist值指节点,奇数的list对应x1(第一维)的结点,偶数的list对应x2的结点,list的顺序对应迭代的顺序###
### x_1,x_2的值指各次迭代中对应的子样本集,x_1,x_2一一对应 ###
kd_tie<-function(x1,x2){
x_1<-list(list(x1));x_2<-list(list(x2));l<-length(x1)
timelab<-1;tielist<-list()
while(timelab<=l){
if(!timelab%%2==0){#奇数为第一维的数据及结点,偶数对应的是第二维的数据及结点
x<-x_1[[timelab]]#获取一个list
} else {x<-x_2[[timelab]]}
tie<-sapply(x,median)
tie<-round(tie)
tielist[[timelab]]<-tie
x_1_new<-list()
x_2_new<-list()
lstx<-length(x)
for(j in 1:lstx){
xj<-x[[j]]
x_left<-which(xj<round(median(xj)))
x_right<-which(xj>round(median(xj)))
x_1_new[[2*j-1]]<-x_1[[timelab]][[j]][x_left]
x_1_new[[2*j]]<-x_1[[timelab]][[j]][x_right]
x_2_new[[2*j-1]]<-x_2[[timelab]][[j]][x_left]
x_2_new[[2*j]]<-x_2[[timelab]][[j]][x_right]
}
x_1[[timelab+1]]<-x_1_new
x_2[[timelab+1]]<-x_2_new
lbreak<-sapply(x_1_new,length)
if(any(lbreak<=1)){
end_timetab<-timelab+1
if(!end_timetab%%2==0){
end_tie<-sapply(x_1_new,median)
tielist[[end_timetab]]<-round(end_tie)
} else {
end_tie<-sapply(x_2_new,median)
tielist[[end_timetab]]<-round(end_tie)
}
break
}
timelab<-timelab+1
}
list(tielist=tielist,x_1=x_1,x_2=x_2)
}
x1<-c(2,4,5,7,8,9,12,13,11,1,3,14);x2<-c(3,7,4,2,1,6,9,5,12,11,13,15)
x1<-c(2,4,5,7,8,9);x2<-c(3,7,4,2,1,6)
kd_tie(x1,x2)
kd_plot<-function(x1,x2){
m1<-max(x1);m2<-max(x2);s1<-min(x1);s2<-min(x2);l<-length(x1)
kd2_out<-kd_tie(x1=x1,x2=x2)
tie_kd2<-kd2_out$tielist#提取包含结点信息的list
ltie<-length(tie_kd2)
plot(x1,x2,xlim = c(s1,m1),ylim = c(s2,m2),type = "n",xlab ="x(1)",ylab="x(2)",
main="Balance kd2 tree plot")
points(x1,x2,pch=19)
xkd<-tie_kd2[[1]][1]
abline(v=xkd,col="red",lty=3)
for(i in 2:ltie){
plt<-tie_kd2[[i]]
lplt<-length(plt)
lsep<-seq(1,lplt,by=2)
if(i%%2==0){
for(j in lsep){
lines(c(s1-1,xkd[(j+1)/2]),c(plt[j],plt[j]),col="red",lty=3)
lines(c(xkd[(j+1)/2],m1+1),c(plt[j+1],plt[j+1]),col="red",lty=3)
}
}else{
for(j in lsep){
lines(c(plt[j],plt[j]),c(s2-1,xkd[(j+1)/2]),col="red",lty=3)
lines(c(plt[j+1],plt[j+1]),c(xkd[(j+1)/2],m2+1),col="red",lty=3)
}
}
xkd<-tie_kd2[[i]]
}
return(tiekd2=kd2_out)
}
x1<-c(-2,4,5,7,8,9,-5);x2<-c(3,7,-4,2,1,6,8)
kd_plot(x1=x1,x2=x2)
#### Lp范数递减性模拟 ####
LpSim.Plot<-function(number,maxp=1){
if(any(number<0)) stop("The number must not smaller than 0.")
max_num<-max(number)
LpVec<-vector(length = maxp)
for(i in 1:maxp) LpVec[i]<-(sum(number^i))^(1/i)
tye<-ifelse(maxp<=20,"b","l")
cols<-ifelse(maxp<=20,"blue","red")
plot(1:maxp,LpVec,type = tye,ylab="LpValue",xlab="p",col=cols,
main="Simulate Plot of Lp")
list(maxnumber=max_num,minLp=min(LpVec),LpValue=LpVec)
}
LpSim.Plot(number = 1:5,maxp=15)
LpSim.Plot(number = 1:5,maxp=150)
LpSim.Plot(number = sample(1:50,10),maxp=15)
#### 基于线性扫描的KNN实现 lineKnn()####
#计算一个实例与所有训练样本的Lp距离,data仅含特征信息
LpCalculate<-function(dataTest,atr=c("x1","x2","x3"),dataTrain=NULL,p=2){
datause<-as.matrix(dataTrain);n<-nrow(datause)
LpVec<-vector(length = n)
for(i in 1:n) LpVec[i]<-(sum(abs(dataTest-datause[i,atr])^p))^(1/p)
return(LpVec)
}
lineKnn<-function(cls="y",atr=c("x1","x2","x3"),dataTrain=NULL,
dataTest=NULL,k=3,p=2){
data_use<-dataTrain#注意数据框中有字符时所有被转成字符
atrdata<-data_use[,atr,drop=F];clsdata<-data_use[,cls]#vector
dataTest<-dataTest[,atr]
LpMat<-t(apply(dataTest,1,LpCalculate,atr=atr,dataTrain=atrdata,p=p))
options(warn=-1)
library(dprep)
if(k==1){
clsMatk<-apply(LpMat,1,function(x) clsdata[order(x)[1:k]])
kPredict<-clsMatk#vector
} else{
clsMatk<-t(apply(LpMat,1,function(x) clsdata[order(x)[1:k]]))#只需要知道最近k个的类别
kPredict<-apply(clsMatk,1,function(x) sample(moda(x),1))
}#moda适用于字符向量
detach("package:dprep")
outPredict<-dataTest;outPredict[,cls]<-kPredict
df<-list(FinalPredict=kPredict,PredictMat=outPredict,clsMatk=clsMatk,
LpMat=LpMat,dataTrain=dataTrain,dataTest=dataTest,atr=atr,cls=cls,k=k,p=p)
class(df)<-"lineKnn"
return(df)
}
print.lineKnn<-function(Knnobj){
print(Knnobj[1])
}
plot.lineKnn<-function(Knnobj){
Train<-Knnobj$dataTrain
Test<-Knnobj$dataTest
atr<-Knnobj$atr;cls<-Knnobj$cls
latr<-length(atr)
if(latr==2){
plot(Train[,atr[1]],Train[,atr[2]],xlab=atr[1],ylab=atr[2],
col=as.numeric(as.factor(Train[,cls])),pch=abs(as.numeric(as.factor(Train[,cls]))),
main="Predict Plot of Knn")
points(Test[,atr[1]],Test[,atr[2]],col="blue",
pch=abs(as.numeric(as.factor(Knnobj$FinalPredict))),cex=2)
}
}
#### 示例iris数据集 ####
lab<-sample(1:150,130)
iris[,1:2]
dataKnn_iris<-iris[lab,]
dataKnn_iris_test<-iris[-lab,]
dataKnn_iris_atr<-iris[-lab,-5]
Knn_iris<-lineKnn(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
dataTrain = dataKnn_iris,dataTest = dataKnn_iris_atr,k=3,p=10)
names(Knn_iris)
sapply(Knn_iris,is)
cbind(as.character(dataKnn_iris_test[,5]),as.character(Knn_iris$FinalPredict))
Knn_iris$PredictMat
Knn_iris$LpMat
Knn_iris$clsMatk
#### 第四章 朴素贝叶斯法的实现 ####
#### navieBayes() 基于极大似然估计及贝叶斯估计的朴素贝叶斯法实现(离散特征情形)####
navieBayes<-function(cls="Y",atr=c("X1","X2"),data=NULL,lmada=0){
if(!is.data.frame(data)) stop("Please enter a data.frame.")
if(lmada<0) stop("lmada must be greater than or equal to ZERO.")
d<-as.data.frame(apply(data,2,as.factor))
n<-nrow(d)
prodvar_lst<-list()#用来装计算出来的概率
prec_var<-d[cls][,1];levelprec<-levels(prec_var);lprec<-length(levelprec)
prec_p<-data.frame(level=levelprec,prob=NA)
for(i in 1:lprec){
prec_p[i,2]<-(sum(prec_var==levelprec[i])+lmada)/(n+lprec*lmada)#类Y的先验概率
}
prodvar_lst[[cls]]<-prec_p
lvar=length(atr)#特征个数
for(i in 1:lvar){#特征的条件先验概率
xvar<-d[atr[i]][,1]
txy<-table(xvar,prec_var)+lmada
ptxy<-prop.table(txy,2)
prodvar_lst[[atr[i]]]<-ptxy
}
prodvar_lst$lmada<-lmada
prodvar_lst$response<-cls
prodvar_lst$variables<-atr
class(prodvar_lst)<-"navieBayes" #指定输出对象的类为"navieBayes",以便编写S3类泛函
return(prodvar_lst)
}
navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 1)
#### 编写打印函数:print.navieBayes() ####
print.navieBayes<-function(obj){
cat("response = prec_var: ",obj$response,";","lmada = ",obj$lmada,"\n","\n")
cat("The variables are : ",obj$variables,"\n","\n")
lobj<-length(c(obj$response,obj$variables))
print.default(obj[1:lobj])
}
#### 编写预测函数: predict.navieBayes ####
#### preCnavieBayes() 只能进行一个实例的预测 ####
preCnavieBayes<-function(NBobj,cls=NULL,atr=NULL,atr_value=NULL){
level<-NBobj[[cls]][,1];ncls<-length(level)
latr<-length(NBobj)-4#特征的个数
start_atr<-2
end_atr<-latr+1
predict_df<-data.frame(matrix(NA,ncls,latr+2))#先建立一个数据框储存结果
colnames(predict_df)<-c(atr,"level","post_p")
for(l in 1:latr){
predict_df[1:ncls,l]<-atr_value[l]
}
predict_df$level<-level
for(i in 1:ncls){
xvec<-NULL
for(j in start_atr:end_atr){
xwhich<-which(rownames(NBobj[[atr[j-1]]])==as.character(atr_value[j-1]))
ywhich<-which(colnames(NBobj[[atr[j-1]]])==as.character(predict_df$level[i]))
px<-NBobj[[atr[j-1]]][xwhich,ywhich]
xvec<-c(xvec,px)
}
ypre<-NBobj[[1]][,2][i]
predict_df[i,4]<-ypre*prod(xvec)
}
return(predict_df)
}
#### 泛函predict.navieBayes()针对类“navieBayes”,可一次进行多个样本实例的预测 ####
predict.navieBayes<-function(NBobj,cls=NULL,atr=NULL,atr_value=NULL){
if(!is.data.frame(atr_value)) stop("atr_value must be a data.frame!")
post_lst<-apply(atr_value,1,preCnavieBayes,NBobj=NBobj,atr=atr,cls=cls)
lst<-length(post_lst)
post_df<-NULL
for(i in 1:lst){
prc_df<-post_lst[[i]]
post_df<-rbind(post_df,prc_df)
}
cat("The response : ",cls,"\n")
return(post_df)
}
#### 例4.1 ####
X1<-c(1,1,1,1,1,2,2,2,2,2,3,3,3,3,3)
X2<-c("S","M","M","S","S","S","M","M","L","L","L","M","M","L","L")
Y<-c(-1,-1,1,1,-1,-1,-1,1,1,1,1,1,1,1,-1)
dataB4.1<-data.frame(X1=X1,X2=X2,Y=Y)
## lmada=0 极大似然估计 ##
plist<-navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 0)
pred_var<-data.frame(X1=c(2,1,1,3,3),X2=c("S","L","S","M","L"))
predict(plist,cls="Y",atr=c("X1","X2"),atr_value =pred_var)
## 例4.2 lmada=1 贝叶斯估计 拉普拉斯平滑 ##
plist1<-navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 1)
pred_var<-data.frame(X1=c(2,1),X2=c("S","L"))
predict(plist1,cls="Y",atr=c("X1","X2"),atr_value =pred_var)
## lmada=3 ###
plist3<-navieBayes(cls="Y",atr=c("X1","X2"),data=dataB4.1,lmada = 3)
pred_var<-data.frame(X1=c(2,1),X2=c("S","L"))
predict(plist3,cls="Y",atr=c("X1","X2"),atr_value =pred_var)
print.default(plist3)
plist
class(plist)
names(plist)
plist$lmada
plist$variables
sapply(plist,class)
?str
1/15
1/45
#### 第五章 决策树 ####
#### 0-1分布的H(p)曲线 ####
p<-pretty(c(0.01,0.99),100)
HpVec<-vector(length = length(p))
for(i in 1:length(p)) HpVec[i]<--p[i]*log(p[i],2)-(1-p[i])*log(1-p[i],2)
plot(p,HpVec,type="l",col="red");abline(v=0.5,lty=3)
#### 编写函数计算信息增益及信息增益比 InfoGain() ####
InfoGain<-function(cls=NULL,atr=NULL,method=c("info","inforate"),data=NULL){
HDfunc<-function(atrcls){#atrcls为向量
l<-length(atrcls)
tatrcls<-table(atrcls)
atrclspvec<-as.vector(tatrcls)/l
logatrclspvec<-ifelse(atrclspvec==0,0,log(atrclspvec,2))
HD<--as.vector(atrclspvec%*%logatrclspvec)
return(HD)
}
HDcls<-HDfunc(atrcls = data[,cls])
HDatr<-apply(data[,atr],2,HDfunc)
HatrVec<-apply(data[,atr],2,Hatr,clsvec=data[,cls])
if(method=="info"){
infogain<-HDcls-HatrVec
} else if(method=="inforate"){
infogain<-(HDcls-HatrVec)/HDatr
} else stop("Please choose a useable method.")
names(infogain)<-atr
list(infogain=infogain,HDcls=HDcls,HatrVec=HatrVec,HDatr=HDatr)
}
Hatr<-function(atrvec=NULL,clsvec=NULL){#输入为特征向量及类别向量,计算经验条件熵
n<-length(atrvec)
tatr<-table(atrvec)
atrpvec<-as.vector(tatr)/n
txy<-table(atrvec,clsvec)
ptxy<-prop.table(txy,1)
logptxy<-ifelse(ptxy==0,0,log(ptxy,2))
loctab<-ptxy*logptxy#对应位置元素相乘,table
atr_clspvec<-apply(loctab,1,sum)#vector
hatr<--as.vector(atrpvec%*%atr_clspvec)
return(hatr)
}
#### 例5.2的R实现 ####
A1<-rep(c("青年","中年","老年"),each=5)
A2<-c("否","否","是","是","否","否","否","是","否","否","否","否","是","是","否")
A3<-c("否","否","否","是","否","否","否","是","是","是","是","是","否","否","否")
A4<-c("一般","好","好","一般","一般","一般","好","好","非常好","非常好","非常好","好",
"好","非常好","一般")
Y<-c("否","否","是","是","否","否","否","是","是","是","是","是","是","是","否")
dataB5.1<-data.frame(A1,A2,A3,A4,Y);dataB5.1
Hatr(atrvec = dataB5.1$A1,clsvec = dataB5.1$Y)
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="info",data=dataB5.1)
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="inforate",data=dataB5.1)
#### 改变后的样本数据计算信息增益 ####
A1<-rep(c("少年","青年","中年","老年","晚年"),each=3)#改为5类
A2<-c("否","否","是","是","否","否","否","是","否","否","否","否","是","是","否")
A3<-c("否","否","否","是","否","否","否","是","是","是","是","是","否","否","否")
A4<-c("坏","好","好","坏","一般","一般","好","好","非常好","非常好","非常好","好",
"好","极好","一般")#改为5类
Y<-c("否","否","是","是","否","否","否","是","是","是","是","是","是","是","否")
dataB5.1<-data.frame(A1,A2,A3,A4,Y);dataB5.1
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="info",data=dataB5.1)
InfoGain(cls="Y",atr=c("A1","A2","A3","A4"),method="inforate",data=dataB5.1)
#### 例5.3的R实现 ####
library(dprep)
subTree<-function(cls="Y",atr=c("A1","A2","A3","A4"),method=c("info","inforate"),
data=NULL,ept=0.1){
atrcl<-atr;clscl<-cls;datacl<-data
clsclvalue<-unique(datacl[,clscl])
infoCalcul<-InfoGain(cls=clscl,atr=atrcl,data=datacl,method = method)#首次迭代结果
subtree<-list()
if(length(clsclvalue)==1){
subtree[["origindata"]]<-datacl
subtree[["single"]]<-clsclvalue
subtree[["infoatr"]]<-"None"
return(subtree)
} else if(length(atrcl)==0||max(infoCalcul$infogain)<ept){
lab<-moda(datacl[,clscl])
subtree[["origindata"]]<-datacl
if(length(lab)==1) subtree[["single"]]<-lab
if(length(lab)>=2) subtree[["single"]]<-sample(lab,1)
subtree[["infoatr"]]<-"None"
return(subtree)
}
atrlab<-which.max(infoCalcul$infogain);
atrchs<-datacl[,atrcl[atrlab]]#挑选信息增益最大的特征
unqatrchs<-unique(atrchs);lunq<-length(unqatrchs)
for(i in 1:lunq){
subtree[[i]]<-datacl[which(atrchs==unqatrchs[i]),-atrlab]#每一个组件都是data.frame
}
names(subtree)<-paste0(atrcl[atrlab],"=",unqatrchs)
subtree[["newatr"]]<-atrcl[-atrlab]
subtree[["infoatr"]]<-atrcl[atrlab]
return(subtree)
}
#### 例5.3求解 ID3 信息增益 ####
stree<-subTree(cls="Y",atr = c("A1","A2","A3","A4"),method="info",data=dataB5.2);stree
stree2<-lapply(stree[1:2],subTree,cls="Y",atr=c("A1","A2","A4"),method="info")
#### 例5.3求解 C4.5 信息增益比 ####
stree<-subTree(cls="Y",atr = c("A1","A2","A3","A4"),method="inforate",data=dataB5.2)
stree2<-lapply(stree[1:2],subTree,cls="Y",atr=c("A1","A2","A4"),method="inforate")
#### 决策树剪枝cutTree() ####
Extree<-function(obj){#用于提取叶节点的子集,obj必须是两层的list
lobj<-length(obj)
lvec<-sapply(obj,length)-2
newlst<-list()
st<-0
for(i in 1:lobj){
for(j in 1:lvec[i]){
newlst[[st+j]]<-obj[[i]][[j]]
}
st<-st+lvec[i]
}
return(newlst)
}
#### 编写函数计算经验熵 ####
HDfunc<-function(atrcls){#atrcls为向量
l<-length(atrcls)
tatrcls<-table(atrcls)
atrclspvec<-as.vector(tatrcls)/l
logatrclspvec<-ifelse(atrclspvec==0,0,log(atrclspvec,2))
HD<--as.vector(atrclspvec%*%logatrclspvec)
return(HD)
}
#### 计算损失函数 ####
cutTree<-function(cls="Y",data=NULL,alpha=1){#data为Extree()的输出结果
ldata<-length(data)
clslst<-list()
for(i in 1:ldata){
clslst[[i]]<-data[[i]][,cls]
}
hdvec<-sapply(clslst,HDfunc)#每个叶节点的经验熵,vector
ldvec<-sapply(clslst,length)#每个叶结点的样本量,vector
Cfunc<-hdvec%*%ldvec+alpha*ldata
return(Cfunc)
}
stree1<-subTree(cls="Y",atr = c("A1","A2","A3","A4"),method="info",data=dataB5.2)
stree2<-lapply(stree1[1:2],subTree,cls="Y",atr=c("A1","A2","A4"),method="info")
le1<-Extree(list(stree1))
le2<-Extree(stree2)
alp=1
cutTree(cls="Y",data=le1,alpha=alp)
cutTree(cls="Y",data=le2,alpha=alp)
#### 模拟alpha的变动对剪枝的影响 ####
ysimple<-vector(length = 20);ycomplex<-vector(length = 20)
for(i in 1:20){
ysimple[i]<-as.vector(cutTree(cls="Y",data=le1,alpha=i))
ycomplex[i]<-as.vector(cutTree(cls="Y",data=le2,alpha=i))
}
plot(1:20,ycomplex,type="b",col="red",xlab="alpha",ylab = "loss",
main="Loss Plot of Alpha")
points(1:20,ysimple,type="b",col="blue")
text(locator(),"SimpleTree",col="blue")
text(locator(),"ComplexTree",col="red")
#### Gini(p)与1/2H(p)关系模拟 ####
#p是一个概率向量,及满足所有元素的和为1
HpSim<-function(p){#熵
lp<-ifelse(p==0,0,log(p,2))
hp<--p%*%lp
return(hp)
}
GpSim<-function(p){#基尼系数
p2<-1-p
gp<-p%*%p2
return(gp)
}
#随机生成概率分布p#
pCreate<-function(l=10,chs=1000){
num<-sample(0:chs,l)
p<-num/sum(num)
return(p)
}
plist<-list()
lst<-vector(length = 100)
chlst<-vector(length = 100)
for(i in 1:100){
l<-sample(2:20,1)
chs<-sample(30:1000,1)
lst[i]<-l
chlst[i]<-chs
plist[[i]]<-pCreate(l=l,chs=chs)
}
all(sapply(plist,sum)==1)#检查是否和都为1
hpvec<-sapply(plist,HpSim)
gpvec<-sapply(plist,GpSim)
dataHG<-data.frame(K=lst,halfHp=hpvec/2,
Gp=gpvec,Hp=hpvec)
datahg<-dataHG[order(dataHG[,1]),]
plot(datahg$K,datahg$Hp,type="b",col="black",xlab = "K",
ylab="uncertainty",main="The Plot of Hp and Gp")
points(datahg$K,datahg$halfHp,type = "b",col="red")
points(datahg$K,datahg$Gp,type = "b",col="blue")
text(locator(),"Gp",col="blue")
text(locator(),"halfHp",col="red")
text(locator(),"Hp",col="black")
#### 图5.7的R实现 ####
p<-pretty(c(0.01,0.99),100)
HpVec<-vector(length = length(p))
GpVec<-vector(length = length(p))
for(i in 1:length(p)){
HpVec[i]<--p[i]*log(p[i],2)-(1-p[i])*log(1-p[i],2)
GpVec[i]<-2*p[i]*(1-p[i])
}
error<-ifelse(p<.5,p,1-p)
plot(p,HpVec/2,type="l",col="red",xlab="p",ylab="value")
lines(p,GpVec,type="l",col="blue")
lines(p,error,type = "l",col="black")
abline(v=0.5,lty=3)
text(locator(),"Gp",col="blue")
text(locator(),"halfHp",col="red")
text(locator(),"error",col="black")
#### 基尼系数的R实现、例5.4的程序求解 ####
GpSim<-function(p){#基尼系数
p2<-1-p
gp<-p%*%p2
return(gp)
}
GiniSingle<-function(atrvec=NULL,clsvec=NULL){#输入的是特征向量与属性向量
D<-length(clsvec)
txy<-table(atrvec,clsvec)
nam<-rownames(txy)
unqatr<-unique(atrvec)
lunq<-length(unqatr)
giniatr<-vector(length = lunq)
for(i in 1:lunq){
t1<-txy[i,];st1<-sum(t1)
t2<-txy[-i,,drop=F];st2<-sum(t2)
p1<-t1/st1;p2<-apply(t2,2,sum)/st2
giniatr[i]<-(st1/D)*GpSim(p1)+(st2/D)*GpSim(p2)
}
names(giniatr)<-nam
return(giniatr)
}
GiniSingle(A1,Y)
GiniSingle(A2,Y)
GiniSingle(A3,Y)
GiniSingle(A4,Y)
GiniCART<-function(cls=NULL,atr=NULL,data=NULL){
if(length(unique(data[,cls]))==1) return(list(Finalabel="None",D=data))
ginilst<-apply(data[,atr],2,GiniSingle,clsvec=data[,cls])#list
nlst<-names(ginilst)
outgini<-sapply(ginilst,function(x) rbind(which.min(x),min(x)))
nvec<-vector(length = length(atr))
for(i in 1:length(atr)){
ns<-names(ginilst[[i]])
nvec[i]<-ns[outgini[1,i]]
}
minlab<-which.min(outgini[2,])
atrlab<-outgini[1,minlab];atrchs<-names(ginilst[[minlab]])[atrlab]
lab<-which(data[,nlst[minlab]]==atrchs)
list(Finalabel=c(nlst[minlab],atrchs),FinalGini=outgini[2,minlab],
GiniMat=outgini,Ginilst=ginilst,data[lab,-minlab],data[-lab,-minlab])
}
cart1<-GiniCART(cls="Y",atr=c("A1","A2","A3","A4"),data=dataB5.2);cart1[1:4]
cart2<-lapply(cart1[5:6],GiniCART,cls="Y",atr=c("A1","A2","A4"));cart2[[1]]
da1<-cart1[[5]]
table(da1$A2,da1$Y)
#### 第六章 逻辑斯蒂回归于最大熵模型 ####
gradLogistic<-function(cls=NULL,atr=NULL,data=NULL,scale=TRUE,
w0=rep(0,length(atr)+1),aita=1,ept=1e-5,maxiter=100000){
if(!is.data.frame(data)) stop("data must be a data.frame.")
datause<-data;datause$xb<-1#扩充矩阵
atrdata<-datause[,c(atr,"xb"),drop=F];atrdata<-as.matrix(atrdata)
if(scale){#自变量数据标准化
for(i in 1:length(atr)){
atrdata[,i]<-scale(atrdata[,i])#0,1标准化
}
}
clsdata<-datause[,cls,drop=F];clsdata<-as.matrix(clsdata)
N<-nrow(datause)
MinusLog<-function(wuse,y=clsdata[,1],x=atrdata){
n<-nrow(atrdata)
MLog<-vector(length = n)
for(i in 1:n){
ep<-as.vector(wuse%*%x[i,])
epe<-exp(ep)
if(is.infinite(epe)){
MLog[i]<-ep-y[i]*ep
} else{
MLog[i]<-log(1+epe)-y[i]*ep
}
}
return(sum(MLog))
}
calpi<-function(x){
ex<-exp(w%*%x)
if(is.infinite(ex)){
px<-1
} else{
px<-ex/(1+ex)
}
return(px)
}
w<-w0#指定w0,vector
iterk<-1
while(iterk>=1){
pi<-apply(atrdata,1,calpi)#指定pi(k),vector
piMinusy<-matrix(pi-clsdata[,1],nrow = N,ncol=1)#N*1矩阵
gradf<-t(atrdata)%*%piMinusy#利用矩阵乘法,N*1矩阵
gradfvec<-gradf[,1]
#print(sqrt(sum(gradfvec^2)))
if(sqrt(sum(gradfvec^2))<=ept){
stoprule<-'sqrt(sum(gradfvec^2))<=ept'
break
}
wbefore<-w
#print(w)
w<-w-aita*gradfvec
MinusLogBtw<-MinusLog(wuse=w)-MinusLog(wuse=wbefore)
wBtw<-w-wbefore
if(abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept){
stoprule<-'abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept'
break
}
if(iterk>=maxiter){
stoprule<-'iterk>=maxiter'
break
}
iterk<-iterk+1
#print(iterk)
}
names(w)<-c(atr,"b")
outlst<-list(weight=w,minusLogkplus1=MinusLog(wuse=w),
minusLogk=MinusLog(wuse=wbefore),variable=atr,
response=cls,origindata=data,iteration=iterk,
formula=paste(cls,"~",paste(atr,collapse = "+")),
stoprule=stoprule)
class(outlst)<-"gradLogistic"
return(outlst)
}
print.gradLogistic<-function(obj){
cat("The stoprule is : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("formula : ",obj$formula,"\n")
oldlst<-options()
options(digits = 9)
print(obj[1:3])
options(oldlst)
}
predict.gradLogistic<-function(obj,atr=NULL,atr_value=NULL){
weight<-obj$weight
atr_value$b<-1
for(i in 1:length(atr)){
atr_value[,i]<-scale(atr_value[,i])
}
predone<-function(x,w){#x,w均是向量
ep1<-exp(w%*%x)
if(is.infinite(ep1)){
p1<-1-0.001
} else{
p1<-ep1/(1+ep1)
}
return(p1)
}
P1<-apply(atr_value,1,predone,w=weight)
P0<-1-P1
predvec<-ifelse(P1>=0.5,1,0)
pMatdf<-data.frame(P1=P1,P0=P0,predict=predvec)
list(FinalPredict=predvec,PredictMat=pMatdf)
}
#### 利用mtcars数据集进行测试 ####
#### 测试模型1 ####
dataB6.1<-mtcars#训练集
dataB6.1_pred<-mtcars[,c("mpg","cyl","disp","hp")]#回测自变量数据集
gradlog1<-gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"))#训练模型
predLog1<-predict(gradlog1,atr=c("mpg","cyl","disp","hp"),
atr_value = dataB6.1_pred)#模型回测
miss<-data.frame(newG=predLog1$FinalPredict,G=mtcars$am)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率
dataB6.1_pred2<-dataB6.1[,c("mpg","cyl","disp","hp","drat","wt","qsec")]
gradlog2<-gradLogistic(data=dataB6.1,cls="vs",
atr=c("mpg","cyl","disp","hp","drat","wt","qsec"))
predLog2<-predict(gradlog2,atr=c("mpg","cyl","disp","hp","drat","wt","qsec"),
atr_value = dataB6.1_pred2)
miss2<-data.frame(newG=predLog2$FinalPredict,G=mtcars$vs)
tbl2<-table(miss2$newG,miss2$G);tbl2
sum(diag(tbl2))/sum(tbl2)#正确率
dataB6.1_pred3<-dataB6.1[,c("mpg","cyl","drat","wt")]
gradlog3<-gradLogistic(data=dataB6.1,cls="vs",
atr=c("mpg","cyl","drat","wt"))
predLog3<-predict(gradlog3,atr=c("mpg","cyl","drat","wt"),
atr_value = dataB6.1_pred3)
miss3<-data.frame(newG=predLog3$FinalPredict,G=mtcars$vs)
tbl3<-table(miss3$newG,miss3$G);tbl3
sum(diag(tbl3))/sum(tbl3)#正确率
#### 迭代效果模拟:迭代50/100/1000/1万/10万/100万次 ####
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 50)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 100)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 1000)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 10000)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 100000)
gradLogistic(data=dataB6.1,cls="am",
atr=c("mpg","cyl","disp","hp"),ept = 1e-10,maxiter = 1000000)
#### DFP算法的实现 ####
DFPLogistic<-function(cls=NULL,atr=NULL,data=NULL,scale=TRUE,ept=1e-5,
G0=diag(rep(1,length(atr)+1)),MoreStopRule=FALSE,
w0=rep(0,length(atr)+1),aita=.1,maxiter=100000,
SearchAita=F,maxsearch=1000){
if(!is.data.frame(data)) stop("data must be a data.frame.")
datause<-data;datause$xb<-1#扩充矩阵
atrdata<-datause[,c(atr,"xb"),drop=F];atrdata<-as.matrix(atrdata)
if(scale){#自变量数据标准化
for(i in 1:length(atr)){
atrdata[,i]<-scale(atrdata[,i])#0,1标准化
}
}
clsdata<-datause[,cls,drop=F];clsdata<-as.matrix(clsdata)
N<-nrow(datause)
MinusLog<-function(wuse,y=clsdata[,1],x=atrdata){#计算g(w)
n<-nrow(atrdata)
MLog<-vector(length = n)
for(i in 1:n){
ep<-as.vector(wuse%*%x[i,])
epe<-exp(ep)
if(is.infinite(epe)){
MLog[i]<-ep-y[i]*ep
} else{
MLog[i]<-log(1+epe)-y[i]*ep
}
}
return(sum(MLog))
}
calpi<-function(x,wx){#计算pi
oldex<-wx%*%x
ex<-exp(oldex)
if(is.infinite(ex)){
px<-1+sample(c(-ept*10/3,-ept*6,-ept*5/4,-ept/2),1)
} else if(ex==0){
px<-sample(c(ept*10/3,ept*15,ept*9/4,ept*17/2),1)
} else{
px<-ex/(1+ex)
}
return(px)
}
calgrad<-function(dfatr,dfcls,Nl,wc){#计算梯度
pi<-apply(dfatr,1,calpi,wx=wc)#指定pi(k),vector
piMinusy<-matrix(pi-dfcls[,1],nrow = Nl,ncol=1)#N*1矩阵
gradfCal<-t(dfatr)%*%piMinusy#利用矩阵乘法,(n+1)*1矩阵,计算梯度
return(gradfCal)
}
findAita<-function(dataatr,datacls,wkk,dtakk,ata_ept=1e-1,#一维搜索函数
ata0=1,maxatak=maxsearch){#wk,dtak为vector
expata1<-function(wk,dtak,ati,x){
exaita1<-as.vector((wk-ati*dtak)%*%x)
expcal1<-exp(exaita1)
if(is.infinite(expcal1)){
pi1<-1+sample(c(-ata_ept/3,-ata_ept,-ata_ept/4,-ata_ept/2),1)
} else if(expcal1==0){
pi1<-sample(c(ata_ept/3,ata_ept,ata_ept/4,ata_ept/2),1)
} else{
pi1<-expcal1/(1+expcal1)
}
pi1
}
expata2<-function(wk,dtak,ati,x){
exaita2<-as.vector((wk-ati*dtak)%*%x)
expcal2<-exp(exaita2)
if(is.infinite(expcal2)){
pi2<-sample(c(ata_ept/3,ata_ept,ata_ept/4,ata_ept/2),1)
} else if(expcal2==0){
pi2<-sample(c(ata_ept/3,ata_ept,ata_ept/4,ata_ept/2),1)
} else{
pi2<-expcal2/(1+expcal2)^2
}
pi2
}
ata<-ata0
iteratak<-1
while(iteratak>=1){
p1<-apply(dataatr,1,expata1,wk=wkk,dtak=dtakk,ati=ata)
p2<-apply(dataatr,1,expata2,wk=wkk,dtak=dtakk,ati=ata)
ppi<-p1-datacls[,1]
dtkM<-matrix(dtakk,nrow=length(dtakk),ncol=1)
dtkx<-as.vector(dataatr%*%dtkM)
H1<-as.vector(ppi%*%dtkx)
H2<-as.vector(p2%*%(dtkx^2))
ataold<-ata
atanew<-ata-H1/H2
ata<-atanew
if(abs(atanew-ataold)<ata_ept) break
if(iteratak>=maxatak) break
iteratak<-iteratak+1
}
return(ata)
}
w<-w0#指定w0,vector
G<-G0#指定初始正定矩阵
changeG<-0
changeW<-0
changeAita<-0
iterk<-1
while(iterk>=1){
#if(iterk>=4) browser()
if(any(is.infinite(w))||any(is.nan(w))){
w<-sample(seq(-2,2,length=200),length(w0))
changeW<-changeW+1
}
#如果计算出Inf或-Inf或NaN,则重新选择w继续迭代
gradf<-calgrad(dfatr=atrdata,dfcls=clsdata,Nl=N,wc=w)#(n+1)*1矩阵,计算梯度
gradfvec<-gradf[,1]#获得梯度向量
#print(sqrt(sum(gradfvec^2)))
if(sqrt(sum(gradfvec^2))<=ept){
stoprule<-'sqrt(sum(gradfvec^2))<=ept'
break
}
wbefore<-w
#print(w)
if(any(is.infinite(G))||any(is.nan(G))){
G<-G0*sample(seq(-1,1,length=1000),1)
w<-sample(seq(-10,10,length=200),length(w0))
aita<-sample(seq(0.01,2,length=100),1)
changeW<-changeW+1
changeG<-changeG+1
changeAita<-changeAita+1
}
#如果计算出Inf或-Inf或NaN,则重新选择正定的G/w/aita,重新迭代
## 进入一维搜索,寻找最优步长 ##
olddelta<-G%*%gradf
if(SearchAita){
aita<-findAita(dataatr = atrdata,datacls = clsdata,wkk = wbefore,
dtakk = as.vector(olddelta),ata0 = aita)
aita<-max(0.1,aita)
}
deltak<--aita*olddelta#(n+1)*1矩阵
wnew<-wbefore+as.vector(deltak)#更新w
w<-wnew
gradfnew<-calgrad(dfatr=atrdata,dfcls=clsdata,Nl=N,wc=wnew)#w已经改变
yk<-gradfnew-gradf#(n+1)*1矩阵
G<-G+(deltak%*%t(deltak))/as.vector(t(deltak)%*%yk)-
(G%*%yk%*%t(yk)%*%G)/as.vector(t(yk)%*%G%*%yk)#更新G
if(MoreStopRule){
MinusLogBtw<-MinusLog(wuse=w)-MinusLog(wuse=wbefore)
wBtw<-w-wbefore
if(abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept){
stoprule<-'abs(MinusLogBtw)<ept||sqrt(sum(wBtw^2))<ept'
break
}
}
if(iterk>=maxiter){
stoprule<-'iterk>=maxiter'
break
}
iterk<-iterk+1
#print(iterk)
}
names(w)<-c(atr,"b")
outlst<-list(weight=w,minusLogkplus1=MinusLog(wuse=w),
minusLogk=MinusLog(wuse=wbefore),LpGradf=sqrt(sum(gradfvec^2)),
changW=changeW,changeG=changeG,changeAita=changeAita,
variable=atr,response=cls,
origindata=data,iteration=iterk,
formula=paste(cls,"~",paste(atr,collapse = "+")),
stoprule=stoprule)
class(outlst)<-"DFPLogistic"
return(outlst)
}
print.DFPLogistic<-function(obj){
cat("The stoprule is : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("formula : ",obj$formula,"\n")
oldlst<-options()
options(digits = 9)
print(obj[1:7])
options(oldlst)
}
predict.DFPLogistic<-function(obj,atr=NULL,atr_value=NULL){
weight<-obj$weight
atr_value$b<-1
for(i in 1:length(atr)){
atr_value[,i]<-scale(atr_value[,i])
}
predone<-function(x,w){#x,w均是向量
ep1<-exp(w%*%x)
if(is.infinite(ep1)){
p1<-1-0.001
} else{
p1<-ep1/(1+ep1)
}
return(p1)
}
P1<-apply(atr_value,1,predone,w=weight)
P0<-1-P1
predvec<-ifelse(P1>=0.5,1,0)
pMatdf<-data.frame(P1=P1,P0=P0,predict=predvec)
list(FinalPredict=predvec,PredictMat=pMatdf)
}
#### DFP算法测试 ####
#### mtcars数据集 模型1:多停止条件 ####
dataDFPLog<-mtcars
dataDFPLog_pred<-mtcars[,c("mpg","cyl","disp","hp")]#回测自变量数据集
DFPLog1<-DFPLogistic(data=dataDFPLog,cls="am",
atr=c("mpg","cyl","disp","hp"),ept=1e-3,
maxiter = 10000,MoreStopRule = T);DFPLog1#训练模型
predDFPLog1<-predict(DFPLog1,atr=c("mpg","cyl","disp","hp"),
atr_value = dataDFPLog_pred)#模型回测
miss<-data.frame(newG=predDFPLog1$FinalPredict,G=mtcars$am)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率
#### mtcars数据集 模型2:更改分类变量,单停止条件,进行一维搜索 ####
DFPLog2<-DFPLogistic(data=dataDFPLog,cls="vs",
atr=c("mpg","cyl","disp","hp"),ept=1e-3,
maxiter = 100000,MoreStopRule = F,SearchAita = T);DFPLog2
#### 对比:不进行一维搜索 ####
DFPLogistic(data=dataDFPLog,cls="vs",
atr=c("mpg","cyl","disp","hp"),ept=1e-3,
maxiter = 100000,MoreStopRule = F,SearchAita = F)#不进行一维搜索
#### mtcars数据集 模型3:更改分类变量及自变量,单停止条件 ####
DFPLog3<-DFPLogistic(data=dataDFPLog,cls="vs",
atr=c("mpg","hp","wt","qsec"),ept=1e-3,
maxiter = 10000,MoreStopRule = F);DFPLog3
#### iris数据集 数据准备 ####
dataDFPLog_iris<-iris[1:100,]
dataDFPLog_iris$Species<-ifelse(dataDFPLog_iris$Species=="setosa",1,0)
trainlab<-sample(100,50)
dataDFPiris_train<-dataDFPLog_iris[trainlab,]
dataDFPiris_test<-dataDFPLog_iris[-trainlab,]
dataDFPiris_test_atr<-dataDFPiris_test[,-5]#测试特征集
#### iris数据集 模型1 ####
DFPLogiris1<-DFPLogistic(cls="Species",data=dataDFPiris_train,
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris1
predDFPLogiris1<-predict(DFPLogiris1,atr_value = dataDFPiris_test_atr,#模型预测
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"))
miss<-data.frame(newG=predDFPLogiris1$FinalPredict,G=dataDFPiris_test$Species)
tbl<-table(miss$newG,miss$G);tbl
sum(diag(tbl))/sum(tbl)#预测正确率
#### iris数据集 模型2 ####
DFPLogiris2<-DFPLogistic(cls="Species",data=dataDFPiris_train,
atr=c("Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris2
predDFPLogiris2<-predict(DFPLogiris2,atr=c("Petal.Length","Petal.Width"),
atr_value = dataDFPiris_test_atr[,-c(1,2)])#模型回测
miss<-data.frame(newG=predDFPLogiris2$FinalPredict,G=dataDFPiris_test$Species)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率
#### iris数据集 模型3: 另外两类 ####
dataDFPLog_iris2<-iris[51:150,]
dataDFPLog_iris2$Species<-ifelse(dataDFPLog_iris2$Species=="versicolor",1,0)
trainlab2<-sample(1:100,50)
dataDFPiris_train2<-dataDFPLog_iris2[trainlab2,]
dataDFPiris_test2<-dataDFPLog_iris2[-trainlab2,]
dataDFPiris_test_atr2<-dataDFPiris_test2[,-5]#测试特征集
DFPLogiris2_1<-DFPLogistic(cls="Species",data=dataDFPiris_train2,
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris2_1
predDFPLogiris2_1<-predict(DFPLogiris2_1,
atr=c("Sepal.Length","Sepal.Width","Petal.Length","Petal.Width"),
atr_value = dataDFPiris_test_atr2)#模型回测
miss<-data.frame(newG=predDFPLogiris2_1$FinalPredict,G=dataDFPiris_test2$Species)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率0.92
DFPLogiris2_2<-DFPLogistic(cls="Species",data=dataDFPiris_train,
atr=c("Petal.Length","Petal.Width"),
ept=1e-3,MoreStopRule = F,maxiter = 10000);DFPLogiris2_2
predDFPLogiris2_2<-predict(DFPLogiris2_2,atr=c("Petal.Length","Petal.Width"),
atr_value = dataDFPiris_test_atr2[,-c(1,2)])#模型回测
miss<-data.frame(newG=predDFPLogiris2_2$FinalPredict,G=dataDFPiris_test2$Species)
tbl1<-table(miss$newG,miss$G);tbl1
sum(diag(tbl1))/sum(tbl1)#正确率0.88
#### 第七章 支持向量机 ####
#### 基于SMO算法的SVM实现 线性及非线性 ####
## 编写核函数,输入为一个非扩充的特征矩阵,行数为样本id,列为特征;输出为核函数的Gram矩阵 ####
lineKernel<-function(data){#data为数据框
dMat<-as.matrix(data)
return(dMat%*%t(dMat))#返回Gram矩阵
}
polyKernel<-function(data,p=2){
dMat<-as.matrix(data)
lineGram<-dMat%*%t(dMat)
polyGram<-(lineGram+1)^p
return(polyGram)#返回Gram矩阵
}
gaussiKernel<-function(data,lmada=sqrt(ncol(data))/sqrt(2),Lp=2){
#lmada指定高斯核函数的标准差,默认为特征个数的平方根,基于Lp范
dMat<-as.matrix(data)
ln<-nrow(data)
Gram1<-matrix(NA,nrow=ln,ncol=ln)
for(i in 1:ln){#i行
for(j in 1:ln){#j列
dij<-dMat[i,]-dMat[j,]
absdij<-abs(dij)
Lpdij<-(sum(absdij^Lp))^(1/Lp)
Gram1[i,j]<-Lpdij^2
}
}
Gram2<-Gram1/(-2*(lmada^2))
gaussiGram<-exp(Gram2)
return(gaussiGram)
}
#计算g(x)
gfunc<-function(clsvec,gram,alphak,bk){#clsvec为向量,Gram为矩阵,alphak为alpha向量
ncls<-length(clsvec)
gvec<-vector(length = ncls)
for(i in 1:ncls){
gonevec<-vector(length = ncls)
for(j in 1:ncls){
gonevec[j]<-alphak[j]*clsvec[j]*gram[i,j]
}
gvec[i]<-sum(gonevec)+bk
}
return(gvec)#返回一个vector
}
is(gfunc(clsvec = c(1,-1,1),Gram=testx,alphak = c(1,2,3),bk=3))
is(lineKernel(data=testx))
is(polyKernel(data=testx))
is(gaussiKernel(data=testx))
#### 选择工作工作集的方法 ####
findAlpha<-function(alphak,gveck,ept,C,clsvec){#alphak,gveck为向量
ygk<-clsvec*gveck
lab1<-which(alphak==0)
lab2<-which(alphak==C)
lab3<-which(alphak>0&alphak<C)
alllab<-NULL
if(length(lab1)>=1){
ygkright<-ygk[lab1]
yuselab1<-which(ygkright<(1-ept))#违反KKT
alllab<-c(alllab,lab1[yuselab1])
}
if(length(lab2)>=1){
ygkerror<-ygk[lab2]
yuselab2<-which(ygkerror>(1+ept))#违反KKT
alllab<-c(alllab,lab2[yuselab2])
}
if(length(lab3)>=1){
ygksupport<-ygk[lab3]
ygkuse<-abs(ygksupport-1)
yuselab3<-which(ygkuse>ept)#违反KKT
alllab<-c(alllab,lab3[yuselab3])
}
##先检查支持向量
if(exists("yuselab3")&&length(yuselab3)>=1){
outlab<-lab3[yuselab3]
} else{#再检查其他的样本
outlab<-alllab
}
if(!is.null(outlab)){
ygkMinus1<-abs(ygk-1)[outlab]
maxerrlab<-outlab[which.max(ygkMinus1)]
labmax<-maxerrlab
} else{
labmax<-NULL
}
outlst<-list(alllab=alllab,outlab=outlab,labmax=labmax)
return(outlst)
}
debug(findAlpha)
findAlpha(alphak = c(0,0,0,0,0),gveck = c(0,0,0,0,0),ept=1e-2,C=4,clsvec = c(1,1,-1,-1,1))
undebug(findAlpha)
#### 另一种工作集选取算法 ####
SelectFunc<-function(gram,clsdata,C,alphak){
clsv<-clsdata[,1]
la<-length(alphak)
e<-rep(1,la)
clsMat<-clsdata%*%t(clsdata)
hMat<-clsMat*gram
gradk<-as.vector(hMat%*%matrix(alphak,nrow=la,ncol=1))-e
chsvec<--clsv*gradk
Iup<-which((alphak<C&clsv==1)|(alphak>0&clsv==-1))
Ilow<-which((alphak<C&clsv==-1)|(alphak>0&clsv==1))
chsIup<-Iup[which.max(chsvec[Iup])];chsm<-chsvec[chsIup]
chsIlow<-Ilow[which.min(chsvec[Ilow])];chsM<-chsvec[chsIlow]
list(chsIup=chsIup,chsIlow=chsIlow,chsm=chsm,chsM=chsM)
}
smoSVM<-function(cls,atr,data,Kernel=c("line","poly","gaussi"),scale=T,
C=10,ept=1e-2,alpha0=rep(0,nrow(data)),b0=0,p=2,Lp=2,
lmada=sqrt(ncol(data))/sqrt(2),maxiter=10000,Change=T){
if(!is.data.frame(data)) stop("data must be a data.frame.")
datause<-data
N<-nrow(datause)
lN<-1:N
atrdata<-datause[,atr,drop=F];atrdata<-as.matrix(atrdata)
clsdata<-datause[,cls,drop=F];clsdata<-as.matrix(clsdata)
clsVec<-clsdata[,1]
if(scale){#自变量数据标准化
for(i in 1:length(atr)){
atrdata[,i]<-scale(atrdata[,i])#0,1标准化
}
}
## 计算Gram矩阵
if(Kernel=="line") Gram<-lineKernel(data=atrdata)#matrix
if(Kernel=="poly") Gram<-polyKernel(data=atrdata,p=p)#matrix
if(Kernel=="gaussi") Gram<-gaussiKernel(data=atrdata,lmada = lmada,Lp=Lp)#matrix
alpha<-alpha0
b<-b0
iterk<-1
while(iterk>=1){
#if(iterk>=9) browser()
gk<-gfunc(clsvec = clsVec,gram=Gram,alphak = alpha,bk=b)#vector
Ek<-gk-clsVec#vector
#获取alphastar1的标签,对应的位置是第几
if(Change){
Clst<-SelectFunc(gram=Gram,clsdata=clsdata,C=C,alphak = alpha)
alp1<-Clst$chsIup;Ekalp1<-Ek[alp1]
alp2<-Clst$chsIlow;Ekalp2<-Ek[alp2]
malp<-Clst$chsm;Malp<-Clst$chsM
y1k<-clsVec[alp1];y2k<-clsVec[alp2]
#停止条件
if((malp-Malp)<=ept){
stoprule<-"(malp-Malp)<=ept"
break
}
} else{
lst<-findAlpha(alphak = alpha,gveck = gk,ept=ept,C=C,clsvec = clsVec)
alp1<-lst$labmax;alllab<-lst$alllab;outlab<-lst$outlab
## 停止条件
if(is.null(alp1)||(length(alllab)/N)<ept||(length(outlab)/N)<ept){
stoprule<-"is.null(alp1)||(length(alllab)/N)<ept||(length(outlab)/N)<ept"
break
}
Ekalp1<-Ek[alp1]
y1k<-clsVec[alp1]
chooselN<-lN[-alp1];chooseEk<-Ek[-alp1]
alp2<-ifelse(Ekalp1>0,chooselN[which.min(chooseEk)],chooselN[which.max(chooseEk)])
y2k<-clsVec[alp2]
Ekalp2<-Ek[alp2]
}
alp2old<-alpha[alp2];alp1old<-alpha[alp1]
#计算上下界,nk
Hk<-ifelse(y1k==y2k,min(C,alp2old+alp1old),min(C,C+alp2old-alp1old))
Lk<-ifelse(y1k==y2k,max(0,alp2old+alp1old-C),max(0,alp2old-alp1old))
k11<-Gram[alp1,alp1];k22<-Gram[alp2,alp2];k12<-Gram[alp1,alp2];k21<-k12
nk<-k11+k22-2*k12
#更新选出的alpha
alp2kplus1_unc<-alp2old+y2k*(Ekalp1-Ekalp2)/nk
if(alp2kplus1_unc>Hk){
alpha[alp2]<-Hk
} else if(alp2kplus1_unc<Lk){
if(Lk!=0){
alpha[alp2]<-Lk
} else if(Lk==0&&alp2old!=0){
alpha[alp2]<-Lk
} else{#只有在
alpha[alp2]<-sample(c(.15,.1,ept/6,ept*10,ept),1)
}
} else{
alpha[alp2]<-alp2kplus1_unc
}
alpha[alp1]<-alp1old+y1k*y2k*(alp2old-alpha[alp2])
## alpha已经更新
alp1new<-alpha[alp1];alp2new<-alpha[alp2]
##检查alp2new与alp2old是否不一样,特殊情况:alp2kplus1_unc<Lk,Lk=0时alp2new==alp2old
#这是alpha就没有完成更新,那么接下来的迭代也不会更新;极端情况下Hk也会==0
## 接着更新阈值bk为bkplus1
b_old<-b
b1kplus1<--Ekalp1-y1k*k11*(alp1new-alp1old)-y2k*k21*(alp2new-alp2old)+b_old
b2kplus1<--Ekalp2-y1k*k12*(alp1new-alp1old)-y2k*k22*(alp2new-alp2old)+b_old
if(alp1new>0&&alp1new<C){
b<-b1kplus1
} else if(alp2new>0&&alp2new<C){
b<-b2kplus1
} else if((alp1new==0||alp1new==C)&&(alp2new==0||alp2new==C)){
b<-(b2kplus1+b1kplus1)/2
} else if((alp1new>0&&alp1new<C)&&(alp2new>0&&alp2new<C)){
b<-b1kplus1
}
#b已经更新
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
break
}
iterk<-iterk+1
}
nonzero<-which(alpha!=0);lnz<-length(nonzero)
nonZeroAlpha<-alpha[nonzero]
names(nonZeroAlpha)<-nonzero
alpy<-alpha[nonzero]*clsVec[nonzero]
if(Kernel=="line"){
w<-rep(0,length(atr))
for(i in 1:lnz){
w<-w+alpy[i]*atrdata[nonzero[i],]
}
} else{
w<-NULL
}
bvec<-vector(length = lnz)
for(j in 1:lnz){
gramvec<-as.vector(Gram[nonzero,j])
bvec[j]<-clsVec[nonzero[j]]-as.vector(alpy%*%gramvec)
}
outlst<-list(nonZeroAlpha=nonZeroAlpha,bMean=mean(bvec),support=nonzero,w=w,
stoprule=stoprule,formula=paste(cls,"~",paste0(atr,collapse = "+")),
variables=atr,response=cls,iteration=iterk,clsvec=clsVec,
ScaleAtr=atrdata,Gram=Gram,Kernel=Kernel,data=data,p=p,Lp=Lp,lmada=lmada)
class(outlst)<-"smoSVM"
return(outlst)
}
print.smoSVM<-function(obj){
Kernel<-obj$Kernel
cat("The stoprule is : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("formula : ",obj$formula,"\n")
oldlst<-options()
if(Kernel=="line"){
print(obj[1:4])
} else{
print(obj[1:3])
}
}
predict.smoSVM<-function(SVMobj,cls,atr,atr_value,scale=T){
testdata<-as.matrix(atr_value)
if(scale){#归一化
for(i in 1:length(atr)){
testdata[,i]<-scale(testdata[,i])
}
}
usealpha<-SVMobj$nonZeroAlpha
support<-SVMobj$support
b<-SVMobj$bMean
#Gram<-SVMobj$Gram
Kernel<-SVMobj$Kernel
traincls<-SVMobj$clsvec
if(scale){
trainatr<-SVMobj$ScaleAtr
} else{
trainatr<-SVMobj$data[,atr]
trainatr<-as.matrix(trainatr)
}
## 编写预测
usecls<-traincls[support]
usetrainAtr<-trainatr[support,,drop=F]
alpy<-usealpha*usecls
predoneSVM<-function(x){
newAtr<-rbind(x,usetrainAtr)
if(Kernel=="line") gram<-lineKernel(data=newAtr)
if(Kernel=="poly") gram<-polyKernel(data=newAtr,p=SVMobj$p)
if(Kernel=="gaussi") gram<-gaussiKernel(data=newAtr,lmada=SVMobj$lmada,Lp=SVMobj$Lp)
kvec<-gram[1,2:(length(support)+1)]
sgn<-sign(alpy%*%kvec+b)
sgn<-ifelse(sgn>0,1,-1)
return(sgn)
}
predcls<-apply(testdata,1,predoneSVM)
predcls
}
#### 函数测试 ####
#### mtcars数据集测试 ####
datasvm_mtcars<-mtcars[,c(1,3,4,5,6,7,9)]#训练集
rownames(datasvm_mtcars)<-NULL
datasvm_mtcars$am<-ifelse(datasvm_mtcars$am==0,-1,1)
datasvm_mtcars_pred<-datasvm_mtcars[,c("mpg","disp","hp","qsec","drat")]
#### mtcars模型1 线性核函数 ####
svm1<-smoSVM(cls="am",atr=c("mpg","disp","hp","qsec","drat"),data=datasvm_mtcars,
Kernel = "line",maxiter = 100000,ept=1e-2,C=10,
scale = T,Change = T);svm1#训练模型
pmt1<-predict(svm1,atr=c("mpg","disp","hp","qsec","drat"),cls="am",
atr_value =datasvm_mtcars_pred,scale = TRUE );pmt1#模型回测
tblmt1<-table(datasvm_mtcars$am,pmt1);tblmt1
sum(diag(tblmt1))/sum(tblmt1)#回测正确率,0.875
#### mtcars模型2 高斯径向基核函数 ####
svm2<-smoSVM(cls="am",atr=c("mpg","disp","hp","qsec","drat"),data=datasvm_mtcars,
Kernel = "gaussi",maxiter = 100000,ept=1e-2,C=10,
scale = T,Change = T);svm2#训练模型
pmt2<-predict(svm2,atr=c("mpg","disp","hp","qsec","drat"),cls="am",
atr_value =datasvm_mtcars_pred,scale = TRUE );pmt2#模型回测
tblmt2<-table(datasvm_mtcars$am,pmt2);tblmt2
sum(diag(tblmt2))/sum(tblmt2)#回测正确率,0.96875
#### mtcars模型3 多项式核函数 ####
svm3<-smoSVM(cls="am",atr=c("mpg","disp","hp","qsec","drat"),data=datasvm_mtcars,
Kernel = "poly",maxiter = 100000,ept=1e-2,C=10,p=3,
scale = T,Change = T);svm3#训练模型
pmt3<-predict(svm3,atr=c("mpg","disp","hp","qsec","drat"),cls="am",
atr_value =datasvm_mtcars_pred,scale = TRUE );pmt3#模型回测
tblmt3<-table(datasvm_mtcars$am,pmt3);tblmt3
sum(diag(tblmt3))/sum(tblmt3)#回测正确率,0.90625
#### iris数据集测试 ####
datasvmiris<-iris[1:100,]
datasvmiris$Species<-ifelse(datasvmiris$Species=="setosa",1,-1)
trainlab<-sample(1:100,70)
datasvmiris_train<-datasvmiris[trainlab,]
datasvmiris_test<-datasvmiris[-trainlab,]
datasvmiris_test_atr<-datasvmiris_test[,-5]
#### iris模型1 线性核函数 ####
svmiris1<-smoSVM(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
data=datasvmiris,Kernel = "line",maxiter = 10000,
ept=1e-2,C=5,scale = T,Change = TRUE);svmiris1
piris1<-predict(svmiris1,atr=c("Sepal.Length","Sepal.Width"),cls="Species",
atr_value =datasvmiris_test_atr[,1:2],scale = TRUE );piris1#模型预测
tbliris1<-table(datasvmiris_test$Species,piris1);tbliris1
sum(diag(tbliris1))/sum(tbliris1)#预测正确率,0.9333333
#### iris模型2 高斯径向基核函数 ####
svmiris2<-smoSVM(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
data=datasvmiris,Kernel = "gaussi",maxiter = 10000,
ept=1e-2,C=5,scale = T,Change = TRUE,Lp=3);svmiris2
piris2<-predict(svmiris2,atr=c("Sepal.Length","Sepal.Width"),cls="Species",
atr_value =datasvmiris_test_atr[,1:2],scale = TRUE );piris2#模型预测
tbliris2<-table(datasvmiris_test$Species,piris2);tbliris2
sum(diag(tbliris2))/sum(tbliris2)#预测正确率,0.8666667
#### iris模型3 多项式核函数 ####
svmiris3<-smoSVM(cls="Species",atr=c("Sepal.Length","Sepal.Width"),
data=datasvmiris,Kernel = "poly",maxiter = 10000,
ept=1e-2,C=5,scale = T,Change = TRUE,p=3);svmiris3
piris3<-predict(svmiris3,atr=c("Sepal.Length","Sepal.Width"),cls="Species",
atr_value =datasvmiris_test_atr[,1:2],scale = TRUE );piris3#模型预测
tbliris3<-table(datasvmiris_test$Species,piris3);tbliris3
sum(diag(tbliris3))/sum(tbliris3)#预测正确率,0.9333333
#### 第八章 提升方法 ####
#### 例8.1的R实现 ####
## 计算模型Gm的系数alpham
AdaboostAlpha<-function(em){#em是错误率
if(em==0) alpham<-Inf
if(em==1) alpham<--Inf
if(em>0&em<1){
alp<-(1-em)/em
alplog<-(1/2)*log(alp)
alpham<-alplog
}
alpham
}
## 计算样本权重m+1次迭代,Dm+1
AdaboostWeight<-function(weightm,alpham,clsvec,preclsvec){
#alpham不能是Inf,-Inf,NaN
calog<--alpham*clsvec*preclsvec
expwm<-weightm*exp(calog)
wmplus1<-expwm/sum(expwm)
wmplus1
}
## 计算带权重的em
AdaboostError<-function(clsvec,preclsvec,weightm){#输入都是向量
sum(weightm[which(clsvec!=preclsvec)])
}
## 编写简单的树桩决策树:一次搜索
SearchOneTree<-function(atr,cls,weightm,data,sep=0.5){
atrvec<-data[,atr];clsvec<-data[,cls]
latr<-length(atrvec)
searchx<-atrvec+sep
searchx<-searchx[-latr]
emveclow<-vector(length = latr-1)
emvecup<-vector(length = latr-1)
for(i in 1:(latr-1)){
sch<-searchx[i]
clslow<-ifelse(atrvec<=sch,1,-1)
clsup<-ifelse(atrvec<=sch,-1,1)
emveclow[i]<-AdaboostError(weightm = weightm,clsvec=clsvec,preclsvec = clslow)
emvecup[i]<-AdaboostError(weightm = weightm,clsvec=clsvec,preclsvec = clsup)
}
lowmin<-which.min(emveclow);upmin<-which.min(emvecup)
if(emveclow[lowmin]!=emvecup[upmin]){
error<-min(emveclow[lowmin],emvecup[upmin])
finalab<-ifelse(emveclow[lowmin]<emvecup[upmin],lowmin,upmin)
} else{
error<-emveclow[lowmin]
finalab<-lowmin
}
if(finalab==lowmin){
ModelFinal<-paste("Model:: ",atr,"<=",searchx[lowmin]," is ","1"," else"," -1.",sep = "")
preclsvec<-ifelse(atrvec<=searchx[lowmin],1,-1)
} else{
ModelFinal<-paste("Model:: ",atr,">",searchx[upmin]," is ","1"," else"," -1.",sep = "")
preclsvec<-ifelse(atrvec<=searchx[upmin],-1,1)
}
list(error=error,ModelFinal=ModelFinal,preclsvec=preclsvec)
}
#### 例8.1 一步到位 ####
AdaboostTreeStool<-function(atr,cls,data,weight0=rep(1/length(clsvec),length(clsvec)),
ept=0,maxiter=10000,sep=.5){
atrvec<-data[,atr];clsvec<-data[,cls]
weight<-weight0
f<-rep(0,length(clsvec))
Gmodel<-NULL
Galpha<-NULL
Gerror<-NULL
iterk<-1
while(iterk>=1){
G<-SearchOneTree(atr=atr,cls=cls,data=data,weightm = weight,sep = sep)
err<-G$error;pcls<-G$preclsvec
if(err==0||err==1){
stoprule<-"err==0||err==1"
outlst<-list(stoprule=stoprule,Model=G$ModelFinal,error=err)
break
}
ModelG<-G$ModelFinal
Gmodel<-c(Gmodel,ModelG)
Gerror<-c(Gerror,err)
alpha<-AdaboostAlpha(err)
Galpha<-c(Galpha,alpha)
D<-AdaboostWeight(weightm = weight,alpham = alpha,clsvec = clsvec,preclsvec = pcls)
weight<-D
f<-f+alpha*pcls;sgnf<-sign(f);sgnf<-ifelse(sgnf==1,1,-1)
errf<-1-sum(sgnf==clsvec)/length(clsvec)#f的误分率
if(errf<=ept){
stoprule<-"errf<=ept"
outlst<-list(stoprule=stoprule,errf=errf,iteration=iterk,AdaboostModel=Gmodel,
AdaboostAlpha=Galpha,AdaboostError=Gerror,AdaboostPredict=sgnf)
break
}
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
outlst<-list(stoprule=stoprule,errf=errf,iteration=iterk,AdaboostModel=Gmodel,
AdaboostAlpha=Galpha,AdaboostError=Gerror,AdaboostPredict=sgnf)
break
}
iterk<-iterk+1
}
return(outlst)
}
x<-0:9
y<-c(1,1,1,-1,-1,-1,1,1,1,-1)
dataxy<-data.frame(x=x,y=y);dataxy
## 求解第一个模型
D1<-rep(1/10,10)
G1<-SearchOneTree(atr="x",cls = "y",data=dataxy,weightm =D1);G1
## 求模型G1的系数alpha1及f1
alpha1<-AdaboostAlpha(G1$error);alpha1
f1<-alpha1*G1$preclsvec;f1
## 更新训练数据集的权值分布D1为D2
D2<-AdaboostWeight(weightm = D1,alpham = alpha1,clsvec = y,preclsvec = G1$preclsvec);D2
## 求解第二个模型
G2<-SearchOneTree(atr="x",cls = "y",data=dataxy,weightm = D2);G2
## 求模型G2的系数alpha2及f2
alpha2<-AdaboostAlpha(G2$error);alpha2
f2<-alpha2*G2$preclsvec+f1;f2
## 更新训练数据集的权值分布D2为D3
D3<-AdaboostWeight(weightm = D2,alpham = alpha2,clsvec = y,preclsvec = G2$preclsvec);D3
## 求解第三个模型
G3<-SearchOneTree(atr="x",cls = "y",data=dataxy,weightm = D3);G3
## 求模型G3的系数alpha3及f3
alpha3<-AdaboostAlpha(G3$error);alpha3
f3<-alpha3*G3$preclsvec+f2;f3
## 更新训练数据集的权值分布D3为D4
D4<-AdaboostWeight(weightm = D3,alpham = alpha3,clsvec = y,preclsvec = G3$preclsvec);D4
## 计算组合模型f3的误分率
tblf3<-table(sign(f3),y);tblf3
1-sum(diag(tblf3))/sum(tblf3)
## 使用AdaboostTreeStool()函数一步到位
AdaboostTreeStool(atr="x",cls = "y",data=dataxy)
## 使用mtcars/iris数据集测试函数AdaboostTreeStool() ##
datamtcars1<-mtcars[,c("mpg","am")]
datamtcars1$am<-ifelse(datamtcars1$am==1,1,-1)
AdaboostTreeStool(atr="mpg",cls = "am",data=datamtcars1,ept=.1,sep=0)
datamtcars2<-mtcars[,c("mpg","vs")]
datamtcars2$vs<-ifelse(datamtcars2$vs==1,1,-1)
AdaboostTreeStool(atr="mpg",cls = "vs",data=datamtcars2,ept=.1,sep=0)
datairis<-iris[1:100,c(3,5)]
datairis$Species<-ifelse(datairis$Species=="setosa",1,-1)
AdaboostTreeStool(atr="Petal.Length",cls = "Species",data=datairis)
#### 第九章 EM算法 ####
#### 高斯混合模型参数估计的EM算法 ####
gaussiEM<-function(clsvec,K=2,mean0=rep(0,K),var0=rep(1,K),alpha0=rep(1/K,K),
ept=1e-1,maxiter=10000,Lp=2){
lN<-length(clsvec)
mean<-mean0
var<-var0
alpha<-alpha0
iterk<-1
while(iterk>=1){
parameterold<-c(mean,var)
rjkMat<-gaussiResponse(clsvec = clsvec,K=K,Ml=mean,Vl=var,Alpl=alpha)
paralst<-gaussiParameter(rMat = rjkMat,clsvec = clsvec,K=K)
mean<-paralst$M
var<-paralst$V
alpha<-paralst$A
parameternew<-c(mean,var)
pnewMinusoldLp<-(parameternew-parameterold)^Lp
Lpvalue<-sum(pnewMinusoldLp)^(1/Lp)
if(Lpvalue<=ept){
stoprule<-"Lpvalue<=ept"
break
}
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
break
}
#print(mean);print(var);print(alpha)
iterk<-iterk+1
}
outlst<-list(stoprule=stoprule,iteration=iterk,Mean=mean,
Var=var,Alpha=alpha,K=K)
class(outlst)<-"gaussiEM"
return(outlst)
}
## 计算当前分模型对观测数据yj的响应度
gaussiResponse<-function(clsvec,K=2,Ml=rep(0,K),Vl=rep(1,K),Alpl=rep(1/K,K)){
lj<-length(clsvec);lk<-K
rjkMat<-matrix(0,nrow=lj,ncol=lk)
for(j in 1:lj){
rvec<-vector(length = lk)
for(k in 1:lk){
rjk<-Alpl[k]*dnorm(clsvec[j],mean=Ml[k],sd=sqrt(Vl[k]))
if(rjk==0) rjk<-sample(c(1e-20,1e-30,1e-40,1e-100,1e-65),1)
rvec[k]<-rjk
}
rjkMat[j,]<-rvec/sum(rvec)
}
return(rjkMat)
}
## 更新计算各个参数
gaussiParameter<-function(rMat,clsvec,K=2){
N<-length(clsvec)
Mplus<-vector(length = K)
Vplus<-vector(length = K)
Alplus<-vector(length = K)
for(i in 1:K){
rk<-rMat[,i];srk<-sum(rk)
Mk<-as.vector(rk%*%clsvec)/srk
ymvec<-(clsvec-Mk)^2
Vk<-as.vector(rk%*%ymvec)/srk
Alpk<-srk/N
Mplus[i]<-Mk;Vplus[i]<-Vk;Alplus[i]<-Alpk
}
list(M=Mplus,V=Vplus,A=Alplus)
}
## 编写打印函数
print.gaussiEM<-function(obj){
cat("Stoprule : ",obj$stoprule,"\n")
cat("iteration : ",obj$iteration,"\n")
cat("the number of gaussi is : ",obj$K,"\n")
print(obj[3:5])
}
## 测试1:生成混合正态分布数据 ##
comp <- sample(c(0, 1), size = 10000, prob = c(0.7, 0.3), replace = T)
sim1<-rnorm(10000, mean = ifelse(comp == 0, 0, 1), sd = ifelse(comp == 0, 1, 2))
g1<-gaussiEM(clsvec = sim1,ept=1e-3,mean0 = c(0,.5),K=2);g1
## 测试2 ##
comp2 <- sample(c(0,1,2), size = 100000, prob = c(0.5,0.3,0.2), replace = T)
sim2<-rnorm(100000, mean =comp2, sd = ifelse(comp == 0, 1,ifelse(comp2==1,4,2)))
g2<-gaussiEM(clsvec = sim2,ept=1e-3,mean0 = c(0,.8,.9),var0 = c(.6,7.8,2.3),K=3);g2
y<-c(-67,-48,6,8,14,16,23,24,28,29,41,49,56,60,75)
gaussiEM(y)
#### 第十章 隐马尔可夫模型 ####
#### 隐马尔可夫模型观测序列的生成 ####
#A是转移概率矩阵,B是观测概率矩阵,PI是初始概率向量,Lth是输出的观测序列长度
#size为想要生成的观测序列的组数,size,Lth可以是vector
ObjectHMM<-function(size=1,Lth=5,A,B,PI,StateLabel=1:nrow(A),
ObjectLabel=1:ncol(B),seed=NULL){
stlab<-StateLabel#各个状态值采用的标记
objlab<-ObjectLabel#各个观测值采用的标记
lsi<-size
if(length(Lth)==1) Lth<-rep(Lth,size)
stlst<-list()
objlst<-list()
if(!is.null(seed)) set.seed(seed=seed)
for(i in 1:lsi){
lt<-Lth[i]
stvec<-vector(length = lt)
objvec<-vector(length = lt)
stvec[1]<-sample(stlab,1,prob = PI)#确定初始状态
st1<-which(stlab==stvec[1])#在B中对应的行数,即状态
objvec[1]<-sample(objlab,1,prob = B[st1,])#确定初始观测
for(j in 2:lt){
st<-which(stlab==stvec[j-1])#确定当前状态
stvec[j]<-sample(stlab,1,prob =A[st,])#确定下一个状态
stnew<-which(stlab==stvec[j])#下一个状态在B中对应的行数
objvec[j]<-sample(objlab,1,prob = B[stnew,])#确定下一个观测
}
stlst[[i]]<-stvec
objlst[[i]]<-objvec
}
outlst<-list(obs=objlst,state=stlst)
return(outlst)
}
## 测试 例10.1
a10.1<-matrix(c(0,1,0,0,
.4,0,.6,0,
0,.4,0,.6,
0,0,.5,.5),nrow = 4,byrow = T);a10.1
pi10.1<-rep(.25,4);pi10.1
b10.1<-matrix(c(.5,.5,
.3,.7,
.6,.4,
.8,.2),nrow = 4,byrow = T);b10.1
ObjectHMM(size=2,Lth = 5,A=a10.1,B=b10.1,PI=pi10.1,
ObjectLabel = c("红","白"))
ObjectHMM(size=2,Lth = 5,A=a10.1,B=b10.1,PI=pi10.1,
ObjectLabel = c("红","白"),seed=66)
ObjectHMM(size=3,Lth = c(3,4,5),A=a10.1,B=b10.1,
PI=pi10.1,ObjectLabel = c("红","白"))
#### HMM前向算法实现 ####
forwardHMM<-function(obs,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
#obs为观测值序列
lT<-length(obs)#观测序列的长度
Bmat<-B;colnames(Bmat)<-ObjectLabel;rownames(Bmat)<-StateLabel
Amat<-A;colnames(Amat)<-StateLabel;rownames(Amat)<-StateLabel
fA<-function(alpcal,Acol) alpcal%*%Acol
#计算前向概率初值
AlpMat<-matrix(nrow=lT,ncol = nrow(Amat))
colnames(AlpMat)<-paste0("st:",StateLabel)
rownames(AlpMat)<-paste0("T:",1:lT)
alpha<-PI*Bmat[,which(ObjectLabel==obs[1])]#vector
AlpMat[1,]<-alpha
iterk<-2
while(iterk<=lT){
#更新迭代结果
alp<-apply(Amat,2,fA,alpcal=alpha)
alpha<-alp*Bmat[,which(ObjectLabel==obs[iterk])]
AlpMat[iterk,]<-alpha
iterk<-iterk+1
}
list(FinalProb=sum(alpha),AlpMat=AlpMat)
}
## 例10.2的程序求解
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
forwardHMM(obs=c("红","白","红"),A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
## 结合函数ObjectHMM()来做个尝试试
hmm1<-ObjectHMM(size=3,Lth = c(3,4,5),A=a10.1,B=b10.1,
PI=pi10.1,ObjectLabel = c("红","白"));hmm1
hmm1_obs<-hmm1$obs;hmm1_obs
lapply(hmm1_obs,forwardHMM,A=a10.1,B=b10.1,
PI=pi10.1,ObjectLabel=c("红","白"))
#### HMM模型后向算法的实现 ####
backwardHMM<-function(obs,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
#obs为观测值序列
lT<-length(obs)#观测序列的长度
lst<-nrow(A)#状态的取值个数
Bmat<-B;colnames(Bmat)<-ObjectLabel;rownames(Bmat)<-StateLabel
Amat<-A;colnames(Amat)<-StateLabel;rownames(Amat)<-StateLabel
fB<-function(Arow,Bcol,btcal) sum(Arow*Bcol*btcal)
beta<-rep(1,lst)
BtMat<-matrix(nrow=lT,ncol = lst)
colnames(BtMat)<-paste0("st:",StateLabel)
rownames(BtMat)<-paste0("T:",lT:1)
BtMat[1,]<-beta
iterk<-1
while(iterk<=(lT-1)){#迭代是从最后一个观测开始的
bcol<-Bmat[,which(ObjectLabel==obs[lT-iterk+1])]
beta<-apply(Amat,1,fB,Bcol=bcol,btcal=beta)
BtMat[iterk+1,]<-beta
iterk<-iterk+1
}
bo1<-Bmat[,which(ObjectLabel==obs[1])]
finalprob<-sum(PI*bo1*beta)
list(FinalProb=finalprob,BtMat=BtMat)
}
## 例10.2的程序求解 后向算法
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
backwardHMM(obs=c("红","白","红"),A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
hmm<-ObjectHMM(size=2,Lth = c(3,4),A=A10.2,B=B10.2,
PI=pi10.2,ObjectLabel = c("红","白"))
hmm_obs<-hmm$obs;hmm_obs
lapply(hmm_obs,forwardHMM,A=A10.2,B=B10.2,PI=pi10.2,ObjectLabel=c("红","白"))
lapply(hmm_obs,backwardHMM,A=A10.2,B=B10.2, PI=pi10.2,ObjectLabel=c("红","白"))
#### 一些概率及期望的计算 ####
stijHMM<-function(obs,sti=NULL,stij=NULL,time=NULL,A,B,PI,
StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B)),
if.sti=F,if.stij=F){
#sti输入指定的状态,obs为当前观测序列,stijz指定转移状态,time指定时刻
#sti,time均可以是长度相等的向量;stij只能是一个二维向量
lT<-length(obs)
Alplst<-forwardHMM(obs=obs,A=A,B=B,PI=PI,StateLabel = StateLabel,
ObjectLabel = ObjectLabel)
Btlst<-backwardHMM(obs=obs,A=A,B=B,PI=PI,StateLabel = StateLabel,
ObjectLabel = ObjectLabel)
AlpMat<-Alplst$AlpMat
BtMat<-Btlst$BtMat
btmat<-BtMat[lT:1,,drop=F]
Probs<-Alplst$FinalProb
if(!if.stij){
PstiMat<-AlpMat*btmat/Probs#t时刻状态为i的概率矩阵
rownames(PstiMat)<-rownames(AlpMat);colnames(PstiMat)<-colnames(AlpMat)
si<-which(StateLabel==sti)
if(!is.null(sti)&&!is.null(time)) psti<-PstiMat[time,si] else psti<-NULL
}
if(!if.sti){
fbj<-function(j,x,BM) BM[j,which(ObjectLabel==x)]
wj<-which(StateLabel==stij[2]);wi<-which(StateLabel==stij[1])
bjDf<-data.frame(jobs=obs[-1])
bjvec<-apply(bjDf,1,fbj,j=wj,BM=B)
Pstijvec<-A[wi,wj]*AlpMat[1:(lT-1),wi]*bjvec*btmat[-1,wj]/Probs#长度为lT-1
if(!is.null(time)&&!is.null(stij)) pstij<-Pstijvec[time] else pstij<-NULL
}
if(!if.sti&&!if.stij){
outlst<-list(Probs=Probs,psti=psti,pstij=pstij,PstiMat=PstiMat,Pstijvec=Pstijvec,
AlpMat=AlpMat,BtMat=btmat,sti=sti,stij=stij,time=time)
} else if(!if.stij&&if.sti){
outlst<-list(Probs=Probs,psti=psti,PstiMat=PstiMat,AlpMat=AlpMat,
BtMat=btmat,sti=sti,stij=stij,time=time)
} else if(if.stij&&!if.sti){
outlst<-list(Probs=Probs,pstij=pstij,Pstijvec=Pstijvec,AlpMat=AlpMat,
BtMat=btmat,sti=sti,stij=stij,time=time)
} else{
stop("if.sti and if.stij can not both TRUE.")
}
return(outlst)
}
## 检查函数的正确性
stij1<-stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","1"),time=1,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
stij2<-stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","2"),time=1,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
stij3<-stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","3"),time=1,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"))
apply(stij1$PstiMat,1,sum)
apply(stij1$PstiMat,2,sum)
sum(c(stij1$Pstijvec,stij2$Pstijvec,stij3$Pstijvec))
sum(stij1$PstiMat[1:2,1])
names(stij1)
stijHMM(obs=c("红","白","红"),sti=NULL,
stij=NULL,time=NULL,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel=c("红","白")
,if.sti = TRUE)
stijHMM(obs=c("红","白","红"),sti="1",
stij=c("1","1"),time=NULL,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel=c("红","白"),
if.stij=TRUE)
stijHMM(obs=c("红","白","红"),sti="1",stij=c("1","1"),time=NULL,A=A10.2,
B=B10.2,PI=pi10.2,ObjectLabel = c("红","白"), if.sti = TRUE ,if.stij=TRUE)
#### HMM监督学习方法的R实现 ####
#### HMM监督学习方法的R实现 ####
superviseHMM<-function(obsMat,stMat,StateLabel=NULL,ObjectLabel=NULL){
#obsdf为观测序列组成的矩阵T*S,每一列为一次观测序列,行数代表时刻t=1,2,...,T.
#stdf为状态序列组成的矩阵T*S,每一列为一次状态序列,行数代表时刻t=1,2,...,T.
#StateLabel输入状态的字符标签,ObjectLabel输入观测的字符标签
lT<-nrow(obsMat)#时间长度
S<-ncol(obsMat)#样本量
stMatchs<-stMat[-lT,]
lst<-length(StateLabel)#状态集合长度
lobs<-length(ObjectLabel)#观测集合长度
obsvec<-as.vector(obsMat);stvec<-as.vector(stMat)
aijMat<-matrix(nrow=lst,ncol=lst)##转移概率矩阵
colnames(aijMat)<-StateLabel;rownames(aijMat)<-StateLabel
bjkMat<-matrix(nrow = lst,ncol=lobs)##观测概率矩阵
colnames(bjkMat)<-ObjectLabel;rownames(bjkMat)<-StateLabel
pivec<-vector(length=lst)##初始概率向量
findaij<-function(ichr,jchr){#计算一个转移概率
#在stdf进行搜索,从时间t=1开始至t=lT-1
SAij<-length(which(stMatchs==ichr))
Aij<-0
for(t in 1:(lT-1)){
tvec<-stMat[t,];tplus1vec<-stMat[t+1,]
tlab<-which(tvec==ichr);tplus1st<-tplus1vec[tlab]
sj<-sum(tplus1st==jchr)
Aij<-Aij+sj
}
return(Aij/SAij)
}
findbjk<-function(jchr,kchr){#计算一个观测概率
jlab<-which(stvec==jchr)
kvec<-obsvec[jlab]
sum(kvec==kchr)/length(jlab)
}
#计算转移概率矩阵
for(i in 1:lst){
for(j in 1:lst){
aijMat[i,j]<-findaij(ichr=StateLabel[i],jchr = StateLabel[j])
}
}
#计算观测概率矩阵
for(j in 1:lst){
for(k in 1:lobs){
bjkMat[j,k]<-findbjk(jchr=StateLabel[j],kchr = ObjectLabel[k])
}
}
#计算初始概率向量
first<-stMat[1,]#初始状态
for(i in 1:lst){
pi[i]<-length(which(first==StateLabel[i]))/S
}
outlst<-list(pi=pi,aijMat=aijMat,bjkMat=bjkMat,
StateLabel=StateLabel,ObjectLabel=ObjectLabel)
class(outlst)<-c("superviseHMM","HMM")
return(outlst)
}
print.superviseHMM<-function(obj){
cat("State::",obj$StateLabel,"; ","Observation::",obj$ObjectLabel,"\n")
print(obj[1:3])
}
## 测试1 利用ObjectHMM()函数生成数据
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=2000,Lth = 5,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"))
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
shmm1<-superviseHMM(obsMat = obsmat,stMat = stmat,StateLabel =as.character(1:3),
ObjectLabel =c("红","白") )#训练模型
shmm1
pi10.2
A10.2
B10.2
## 测试2
A1<-matrix(c(0.5,0.3,0.2,
0.4,0.4,0.2,
0.1,0.2,0.7),nrow=3,byrow = T);A10.2
B1<-matrix(c(0.6,0.4,
0.3,0.7,
0.4,0.6),nrow = 3,byrow = T);B10.2
pi1<-c(0.3,0.3,0.4);pi1
## 生成隐马尔可夫观测序列及状态序列
test2<-ObjectHMM(size=2000,Lth = 5,A=A10,B=B10,PI=pi10,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"))
obs2<-test2$obs
st2<-test2$state
obsmat2<-do.call(cbind,obs2)#观测矩阵
stmat2<-do.call(cbind,st2)#状态矩阵
shmm2<-superviseHMM(obsMat = obsmat2,stMat = stmat2,
StateLabel =as.character(1:3),ObjectLabel =c("红","白") )#训练模型
shmm2
pi10
A10
B10
#### HMM模型Buam-Welch无监督学习算法的R实现 ####
BuamWelchHMM<-function(obs,A0,B0,PI0,StateLabel=as.character(1:nrow(A0)),
ObjectLabel=as.character(1:ncol(B0)),ept=1e-2,
maxiter=10000,Lp=2){
#obs输入一个观测序列,vector
lT<-length(obs);lst<-length(StateLabel);lobs<-length(ObjectLabel)
Abw<-A0;Bbw<-B0;PIbw<-PI0
iterk<-1
while(iterk>=1){
#保存旧参数
Abwold<-Abw;Bbwold<-Bbw;PIbwold<-PIbw
#更新初始概率向量
pilst<-stijHMM(obs=obs,A=Abw,B=Bbw,PI=PIbw,if.sti = TRUE,
StateLabel = StateLabel,ObjectLabel = ObjectLabel)
PIbw<-pilst$PstiMat[1,]
#更新转移概率矩阵、观测概率矩阵
for(i in 1:lst){#i行
ir<-StateLabel[i]
#更新转移概率矩阵
for(j in 1:lst){
jr<-StateLabel[j]#获取状态标签
calij<-stijHMM(obs=obs,stij=c(ir,jr),A=Abw,B=Bbw,PI=PIbw,
StateLabel = StateLabel,ObjectLabel=ObjectLabel)
pstiMat<-calij$PstiMat;pstijvec<-calij$Pstijvec
Abw[i,j]<-sum(pstijvec)/sum(pstiMat[1:(lT-1),i])
}
#更细观测概率矩阵
for(k in 1:lobs){
klab<-which(obs==ObjectLabel[k])
cali<-stijHMM(obs=obs,A=Abw,B=Bbw,PI=PIbw,StateLabel = StateLabel,
ObjectLabel=ObjectLabel,if.sti=TRUE)
piMat<-cali$PstiMat
pstivec<-piMat[,i];pkvec<-pstivec[klab]
Bbw[i,k]<-sum(pkvec)/sum(pstivec)
}
}
# 停止条件
Abwoldvec<-as.vector(Abwold);Abwvec<-as.vector(Abw)
Bbwoldvec<-as.vector(Bbwold);Bbwvec<-as.vector(Bbw)
abw<-Abwvec-Abwoldvec;Lpabw<-sum(abs(abw)^Lp)*(1/Lp)
bbw<-Bbwvec-Bbwoldvec;Lpbbw<-sum(abs(bbw)^Lp)*(1/Lp)
pibw<-PIbw-PIbwold;Lppibw<-sum(abs(pibw)^Lp)*(1/Lp)
allbw<-c(abw,bbw,pibw);Lpallbw<-sum(abs(allbw)^Lp)*(1/Lp)
if(Lpabw<=ept&&Lpbbw<=ept&&Lppibw<=ept){
stoprule<-"Lpabw<=ept&&Lpbbw<=ept&&Lppibw<=ept"
break
}
if(Lpallbw<=ept){
stoprule<-"Lpallbw<=ept"
break
}
if(iterk>=maxiter){
stoprule<-"iterk>=maxiter"
break
}
iterk<-iterk+1
}
outlst<-list(pi=PIbw,aijMat=Abw,bjkMat=Bbw,iteration=iterk,
stoprule=stoprule,StateLabel=StateLabel,ObjectLabel=ObjectLabel)
class(outlst)<-c("BuamWelchHMM","HMM")
return(outlst)
}
MoreBuamWelchHMM<-function(obsMat,A0,B0,PI0,StateLabel=as.character(1:nrow(A0)),
ObjectLabel=as.character(1:ncol(B0)),ept=1e-2,
maxiter=10000,Lp=2){
lMat<-ncol(obsMat)
lst<-length(StateLabel)
lobs<-length(ObjectLabel)
lT<-nrow(obsMat)
A<-matrix(0,nrow=lst,ncol=lst)
B<-matrix(0,nrow = lst,ncol=lobs)
PI<-rep(0,lst)
for(i in 1:lMat){
obsuse<-obsMat[,i]
calst<-BuamWelchHMM(obs=obsuse,A0=A0,B0=B0,PI0=PI0,StateLabel = StateLabel,
ObjectLabel = ObjectLabel,maxiter = maxiter,ept = ept,Lp=Lp)
Arev<-calst$aijMat;Brev<-calst$bjkMat;pirev<-calst$pi
A<-A+Arev
B<-B+Brev
PI<-PI+pirev
}
A<-A/lMat;B<-B/lMat;PI<-PI/lMat
outlst<-list(pi=PI,aijMat=A,bjkMat=B,
StateLabel=StateLabel,ObjectLabel=ObjectLabel)
class(outlst)<-c("BuamWelchHMM","HMM")
return(outlst)
}
print.BuamWelchHMM<-function(obj){
cat("State::",obj$StateLabel,"; ","Observation::",obj$ObjectLabel,"\n")
print(obj[1:3])
}
#### 测试
## 初始化参数
A1<-matrix(c(0.5,0.3,0.2,
0.4,0.4,0.2,
0.1,0.2,0.7),nrow=3,byrow = T);A10.2
B1<-matrix(c(0.6,0.4,
0.3,0.7,
0.4,0.6),nrow = 3,byrow = T);B10.2
pi1<-c(0.3,0.3,0.4);pi1
## 生成隐马尔可夫观测序列及状态序列
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=2000,Lth = 5,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"))
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
BuamWelchHMM(obs=obsmat[,1],A0=A1,B0=B1,PI0=pi1,
ObjectLabel = c("红","白"),ept = 1e-2)
BuamWelchHMM(obs=obsmat[,10],A0=A1,B0=B1,PI0=pi1,
ObjectLabel = c("红","白"),ept = 1e-2)
MoreBuamWelchHMM(obsMat = obsmat,A0=A1,B0=B1,PI0=pi1,
ObjectLabel = c("红","白"),ept = 1e-2)
#### 预测算法:近似算法的R实现 ####
approxHMM<-function(obsMat,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
approxone<-function(obs){
calst<-stijHMM(obs = obs,A=A,B=B,PI=PI,StateLabel = StateLabel,
ObjectLabel = ObjectLabel,if.sti = TRUE)
#只计算状态概率矩阵,不计算转移概率
pstiMat<-calst$PstiMat#状态概率矩阵
#对每一行搜索概率最大的状态
stlab<-apply(pstiMat,1,which.max)
StateLabel[stlab]
}
if(is.vector(obsMat)) obsMat<-matrix(obsMat,nrow=length(obsMat))
apply(obsMat,2,approxone)
}
#### 测试1
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=100,Lth = 100,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"),seed=100)
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
t1<-approxHMM(obsMat = obsmat,A=A10.2,B=B10.2,
PI=pi10.2,ObjectLabel = c("红","白"))
sum(t1==stmat)/10000#准确率
#### 测试2
A10<-matrix(c(0.3,0.2,0.2,0,0.3,
0.1,0.2,0.3,0.3,0.1,
0.2,0.2,0.3,0.15,0.15,
0.2,0.1,0.1,0.3,0.4,
0.1,0.2,0.3,0.3,0.1),nrow=5,byrow = T);A10
B10<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3,
0.4,0.6,
0.45,0.55),nrow = 5,byrow = T);B10
pi10<-c(0.2,0.1,0.25,0.2,0.25);pi10
## 生成隐马尔可夫观测序列及状态序列
test2<-ObjectHMM(size=100,Lth = 100,A=A10,B=B10,PI=pi10,
StateLabel = as.character(1:5),ObjectLabel = c("红","白"),seed=888)
obs2<-test2$obs
st2<-test2$state
obsmat2<-do.call(cbind,obs2)#观测矩阵
stmat2<-do.call(cbind,st2)#状态矩阵
t2<-approxHMM(obsMat = obsmat2,A=A10,B=B10,
PI=pi10,ObjectLabel = c("红","白"))
sum(t2==stmat2)/10000#预测准确率
#### 测试3
a10<-matrix(c(0.4,0.6,
0.7,0.3),nrow = 2,byrow = T);a10
b10<-matrix(c(0.5,0.5,
0.3,0.7),nrow = 2,byrow = T);b10
p10<-c(0.3,0.7);p10
## 生成隐马尔可夫观测序列及状态序列
test3<-ObjectHMM(size=100,Lth = 100,A=a10,B=a10,PI=p10,
StateLabel = as.character(1:2),ObjectLabel = c("红","白"),seed=666)
obs3<-test3$obs
st3<-test3$state
obsmat3<-do.call(cbind,obs3)#观测矩阵
stmat3<-do.call(cbind,st3)#状态矩阵
t3<-approxHMM(obsMat = obsmat3,A=a10,B=a10,
PI=p10,ObjectLabel = c("红","白"))
sum(t3==stmat3)/10000#准确率
#### 预测算法 维特比算法的R实现 ####
ViterbiHMM<-function(obs,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B)),if.show=TRUE){
lst<-length(StateLabel)
lT<-length(obs)
lobs<-length(ObjectLabel)
obs1<-which(ObjectLabel==obs[1])
delta<-PI*B[,obs1]#初始化delta
pasi<-rep(0,lst)#初始化pasi
deltaMat<-matrix(nrow=lT,ncol = lst)#delta矩阵,行为时刻,列为状态
pasiMat<-matrix(nrow=lT,ncol = lst)#pasi矩阵,行为时刻,列为状态
deltaMat[1,]<-delta;pasiMat[1,]<-pasi
#进入递推
iterk<-2
while(iterk<=lT){
obslab<-which(ObjectLabel==obs[iterk])
bitvec<-B[,obslab]
#利用R矩阵乘法特点
delm<-matrix(rep(delta,lst),nrow = lst,byrow = TRUE)
useMat<-delm*t(A)#对应位置相乘
pasi<-apply(useMat,1,which.max)#每行取最大,vector
caldelta<-vector(length = lst)
for(i in 1:lst){
caldelta[i]<-useMat[i,pasi[i]]
}
delta<-caldelta*bitvec
deltaMat[iterk,]<-delta
pasiMat[iterk,]<-pasi
iterk<-iterk+1
}
#先找到最优路径的概率及终点
statelab<-vector(length = lT)
finalstatelab<-which.max(delta)#获取位置
finalprob<-delta[finalstatelab]
statelab[lT]<-finalstatelab
#回溯
for(j in (lT-1):1){
statelab[j]<-pasiMat[j+1,statelab[j+1]]
}
rownames(deltaMat)<-paste0("T:",1:lT)
colnames(deltaMat)<-paste0("st:",StateLabel)
rownames(pasiMat)<-paste0("T:",1:lT)
colnames(pasiMat)<-paste0("st:",StateLabel)
predvec<-StateLabel[statelab];names(predvec)<-paste0("T:",1:lT)
if(if.show){
out<-list(FinalState=predvec,deltaMat=deltaMat,pasiMat=pasiMat)
} else{
out<-predvec
}
return(out)
}
#### 批量预测
MoreViterbiHMM<-function(obsMat,A,B,PI,StateLabel=as.character(1:nrow(A)),
ObjectLabel=as.character(1:ncol(B))){
if(is.vector(obsMat)) obsMat<-matrix(obsMat,nrow=length(obsMat))
apply(obsMat,2,ViterbiHMM,A=A,B=B,PI=PI,
StateLabel=StateLabel,ObjectLabel=ObjectLabel,if.show=FALSE)
}
#### 求解例10.3
A10.3<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.3
B10.3<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.3
pi10.3<-c(0.2,0.4,0.4);pi10.3
ViterbiHMM(obs=c("红","白","红"),A=A10.3,B=B10.3,PI=pi10.3,
ObjectLabel =c("红","白") )
approxHMM(obs=c("红","白","红"),A=A10.3,B=B10.3,PI=pi10.3,
ObjectLabel =c("红","白") )
#### 测试1
A10.2<-matrix(c(0.5,0.2,0.3,
0.3,0.5,0.2,
0.2,0.3,0.5),nrow=3,byrow = T);A10.2
B10.2<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3),nrow = 3,byrow = T);B10.2
pi10.2<-c(0.2,0.4,0.4);pi10.2
## 生成隐马尔可夫观测序列及状态序列
test1<-ObjectHMM(size=100,Lth = 5,A=A10.2,B=B10.2,PI=pi10.2,
StateLabel = as.character(1:3),ObjectLabel = c("红","白"),seed=666)
obs1<-test1$obs
st1<-test1$state
obsmat<-do.call(cbind,obs1)#观测矩阵
stmat<-do.call(cbind,st1)#状态矩阵
t1<-MoreViterbiHMM(obsMat = obsmat,A=A10.2,B=B10.2,
PI=pi10.2,ObjectLabel = c("红","白"))
sum(t1==stmat)/500#准确率
#### 测试2
A10<-matrix(c(0.3,0.2,0.2,0,0.3,
0.1,0.2,0.3,0.3,0.1,
0.2,0.2,0.3,0.15,0.15,
0.2,0.1,0.1,0.3,0.4,
0.1,0.2,0.3,0.3,0.1),nrow=5,byrow = T);A10
B10<-matrix(c(0.5,0.5,
0.4,0.6,
0.7,0.3,
0.4,0.6,
0.45,0.55),nrow = 5,byrow = T);B10
pi10<-c(0.2,0.1,0.25,0.2,0.25);pi10
## 生成隐马尔可夫观测序列及状态序列
test2<-ObjectHMM(size=100,Lth = 5,A=A10,B=B10,PI=pi10,
StateLabel = as.character(1:5),ObjectLabel = c("红","白"),seed=888)
obs2<-test2$obs
st2<-test2$state
obsmat2<-do.call(cbind,obs2)#观测矩阵
stmat2<-do.call(cbind,st2)#状态矩阵
t2<-MoreViterbiHMM(obsMat = obsmat2,A=A10,B=B10,
PI=pi10,ObjectLabel = c("红","白"))
sum(t2==stmat2)/500#预测准确率
#### 测试3
a10<-matrix(c(0.4,0.6,
0.7,0.3),nrow = 2,byrow = T);a10
b10<-matrix(c(0.5,0.5,
0.3,0.7),nrow = 2,byrow = T);b10
p10<-c(0.3,0.7);p10
## 生成隐马尔可夫观测序列及状态序列
test3<-ObjectHMM(size=100,Lth = 5,A=a10,B=a10,PI=p10,
StateLabel = as.character(1:2),ObjectLabel = c("红","白"),seed=666)
obs3<-test3$obs
st3<-test3$state
obsmat3<-do.call(cbind,obs3)#观测矩阵
stmat3<-do.call(cbind,st3)#状态矩阵
t3<-MoreViterbiHMM(obsMat = obsmat3,A=a10,B=a10,
PI=p10,ObjectLabel = c("红","白"))
sum(t3==stmat3)/500#准确率
|
# Developmen log
# version 6
# What is new
# Added BEFdata for central Russia species
# Added LAI calculation according top Beer law
# Corrected variable filtering according to it position, now filtered by name
# Starting thinking to switch to dtplyr
# Switched from cat to special logging function
# Switching to foreach %dopar%
# Added function for correcting wrong time extrapolation according to other results
# Added function for finding correct dates according to pattern matching of tair
# Moisture added
# PRI updated according to its logic - compare wavelengths around green(550) in classical PRI its 530 and 570
# in our case it is 500 and 600. Previous version which used asimetrical interval 550-570 - called PRI2
# Luca's spectral calibration added
# TODO prepare third version of results without this variables
# RRR, E2, G2, C2, ff20, DBH, DTT, installation_height, X1, Site, minTd, meanTd, maxTd, step_time, corr_time, serv_cor, imported, rn, wrong_server,zone, SiteIndex, na.rm, datetime, rec_num, PRI,PRI2, MCARI,recharge_flag, charge , cor_dt, MTCI,CIg,CIr,CRI1,CRI2,SIPI,PSSR,PSND,PSRI,CARI,EVI2,VARI, b_O_600,b_Y_570,b_G_550,b_B_500,b_V_450, b_R_610,source,id_rec, gx, gy, gz, serv_datetime, wrong_time,X,incr, type, b_R_650,b_W_860,b_V_810,b_U_760,b_T_730,b_S_680, NDVI,EVI,Nr,Rr,Br,Gr
source("code//TT_graphs.R")
library(dtplyr)
library(data.table)
library(tidyverse)
library(plyr)
library(lubridate)
library(DescTools)
library(plotly)
library(tsibble)
library(openxlsx)
library(stringr)
library(ggsci)
library(corrplot)
library(Hmisc)
library(plotly)
library(foreach)
#Logging function to log all activities into memory, to console or in file=====
message_log = "Starting data log"
fun_log = function(verboseFlag = "mem", message){
if (verboseFlag == "con"){
cat(message)
}
if (verboseFlag == "mem"){
assign(message_log, c("message_log",paste(message,collapse=" ")), env = globalenv())
}
if (verboseFlag == "file"){
file_name = paste("logs//",Sys.Date(),"-calc-loging.txt", collapse = "" )
write(paste(message,collapse = ""),file=file_name,append=TRUE)
}
}
#Basic calculations=============================================================
TTBasicCalc = function(tdt, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("\n",
"Basic calculation started, adding not algorithmic variables to table ",
dim(tdt)[1],"x",dim(tdt)[2],"\n"))
#Clearing unclear
tdt[tdt == -Inf] = NA
tdt[tdt == Inf] = NA
#Separate different types of devices
tdt2 = tdt %>% filter(X3 == 46 | X3 == 49)
tdt = tdt %>% filter(X3 == 45 )
tdt_40 = tdt %>% filter(X3 == 40)
# Basic variables for type 45
tdt$incr = 1:length(tdt$X1)
tdt$datetime = tdt$X4 %>% as_datetime( tz="Europe/Moscow",
origin = "1970-01-01 00:00:00")
tdt$rec_num = tdt$X2 %>% HexToDec
tdt$serv_datetime = tdt$X1 %>% str_trunc(17,"right", ellipsis = "") %>%
dmy_hms( tz="Europe/Rome") %>% as.integer %>%
as.POSIXct(,origin = "1970-01-01 00:00:00", tz="Europe/Moscow")
tdt$wrong_time=((tdt$datetime<157680000)|((tdt$datetime)>tdt$serv_datetime))
tdt$id = tdt$X1 %>% as.character %>%
str_trunc(8,"left", ellipsis = "")
tdt$type = tdt$X3 %>% as.double
fun_log(verboseFlag = verboseFlag, c("Voltage and proximity sensor \n"))
#Voltage and proximity sensor
tdt$volt = (1.1*131072 / tdt$X8)+0.65
tdt$dist = 4.6728*(tdt$X7*1.1/tdt$X8)^(-1.274)
tdt$pulses = tdt$X7
tdt$Hz = tdt$X20
#Climate
tdt$rh = tdt$X10
tdt$tair = tdt$X11/10
tdt$VPD = 0.6108*exp((17.27*tdt$tair)/(tdt$tair+265.5))*(1 - tdt$rh/100)
#Accelerometer
tdt$gx = tdt$X12/4096
tdt$gx2 = tdt$X13/16777216
tdt$gy = tdt$X14/4096
tdt$gy2 = tdt$X15/16777216
tdt$gz = tdt$X16/4096
tdt$gz2 = tdt$X17/16777216
tdt$accel = (tdt$gx^2 + tdt$gy^2 + tdt$gz^2)^0.5
tdt$theta = atan(tdt$gx/(tdt$gy^2+tdt$gz^2)^0.5) /pi * 180
tdt$psi = atan(tdt$gy/(tdt$gx^2+tdt$gz^2)^0.5) /pi * 180
tdt$phi = atan(tdt$gz/(tdt$gy^2+tdt$gx^2)^0.5) /pi * 180
#Temoerature probes
tdt$t1 = tdt$X5 /10
tdt$nt1 = tdt$X6 /10
tdt$t2 = tdt$X18/10
tdt$nt2 = tdt$X19/10
mx= 119.639 - (0.0420 * (tdt$t1)) - (0.00761 * tdt$X20)
y0=-209.9931
a=37.1602
b=-2.2091
c=0.0557
d=-0.0005
tdt$moist = y0+a*mx+b*mx^2+c*mx^3+d*mx^4
if (any(names(tdt)=="s")){
tdt$source = tdt$s
}
# Removing duplicates only according to data variables - slow but robust
# filtering only type 45 since left alignment - 46 type rows will be added
# only to rows of 45 with the same rec_num
#print(names(tdt))
duplicated_data_row = tdt[,c("id","dist","rh","tair","VPD","gx","gx2","gy","gy2")] %>% duplicated %>% which
fun_log(verboseFlag = verboseFlag, c("\n","Table had ",dim(tdt)[1]," rows, found ",
duplicated_data_row %>% length, " rows to be removed.","\n" ))
if(duplicated_data_row %>% length > 0){
tdt = tdt[-duplicated_data_row,]
}
# Basic variables for type 46
tdt2$datetime = tdt2$X4 %>% as_datetime( tz="Europe/Moscow",
origin = "1970-01-01 00:00:00")
tdt2$serv_datetime = tdt2$X1 %>% str_trunc(17,"right", ellipsis = "") %>%
dmy_hms( tz="Europe/Rome") %>% as.integer %>%
as.POSIXct(,origin = "1970-01-01 00:00:00", tz="Europe/Moscow")
tdt2$rec_num = tdt2$X2 %>% HexToDec
tdt2$id = tdt2$X1 %>% as.character %>%
str_trunc(8,"left", ellipsis = "")
# Calculating type 46 variables - spectometer
tdt2$b_R_650 = tdt2$X16
tdt2$b_O_600 = tdt2$X15
tdt2$b_Y_570 = tdt2$X14
tdt2$b_G_550 = tdt2$X13
tdt2$b_B_500 = tdt2$X12
tdt2$b_V_450 = tdt2$X11
tdt2$b_W_860 = tdt2$X10
tdt2$b_V_810 = tdt2$X9
tdt2$b_U_760 = tdt2$X8
tdt2$b_T_730 = tdt2$X7
tdt2$b_S_680 = tdt2$X6
tdt2$b_R_610 = tdt2$X5
tdt2$b_R_650c = tdt2$b_R_650*0.7829+202.77
tdt2$b_O_600c = tdt2$b_O_600*0.8654-328.08
tdt2$b_Y_570c = tdt2$b_Y_570*1.0462-666.72
tdt2$b_G_550c = tdt2$b_G_550*1.0546-842.1
tdt2$b_B_500c = tdt2$b_B_500*0.6257-232.13
tdt2$b_V_450c = tdt2$b_V_450*0.4562-212.62
tdt2$b_W_860c = tdt2$b_W_860*0.5319+334.88
tdt2$b_V_810c = tdt2$b_V_810*0.8414+91.58
tdt2$b_U_760c = tdt2$b_U_760*1.4549-1012.5
tdt2$b_T_730c = tdt2$b_T_730*1.6209-1511.2
tdt2$b_S_680c = tdt2$b_S_680*1.5199-561.56
tdt2$b_R_610c = tdt2$b_R_610*1.6699-312.45
tdt2 = tdt2 %>% mutate( NDVI = (b_V_810 - b_S_680)/(b_V_810 + b_S_680))
tdt2 = tdt2 %>% mutate( EVI = 2.5*(b_V_810-b_S_680)/
(b_V_810 + 6*b_S_680 - 7.5*b_B_500 + 1))
tdt2 = tdt2 %>% mutate( EVI2 = 2.4*(b_V_810 - b_S_680)/
(b_V_810 + b_S_680 + 1))
tdt2 = tdt2 %>% mutate( VARI = (b_G_550-b_S_680)/(b_V_810+b_S_680-b_B_500))
tdt2 = tdt2 %>% mutate( Nr = (b_V_810)/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( Rr = (b_R_610 )/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( Br = ( b_B_500)/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( Gr = (b_G_550)/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( SIPI = (b_V_810 - b_V_450)/(b_V_810 -b_S_680))
tdt2 = tdt2 %>% mutate( PSSR = (b_V_810 / b_R_650))
tdt2 = tdt2 %>% mutate( PSND = (b_V_810 - b_R_650)/(b_V_810 + b_R_650))
tdt2 = tdt2 %>% mutate( PSRI = (b_S_680 - b_B_500)/b_U_760)
tdt2 = tdt2 %>% mutate( CARI= ((b_T_730-b_S_680)-0.2*(b_T_730-b_G_550)))
tdt2 = tdt2 %>% mutate( MCARI= ((b_T_730-b_S_680)-0.2*(b_T_730-b_G_550))*
(b_T_730/b_S_680))
tdt2 = tdt2 %>% mutate( MTCI = ((b_U_760 - b_T_730) /(b_T_730 - b_S_680)))
tdt2 = tdt2 %>% mutate( CIg = ((b_W_860 - b_Y_570)/b_Y_570))
tdt2 = tdt2 %>% mutate( CIr = ((b_W_860 - b_T_730)/b_T_730))
tdt2 = tdt2 %>% mutate( CRI1 = (1/ b_B_500)-(1/b_G_550))
tdt2 = tdt2 %>% mutate( CRI2 = (1/ b_B_500)-(1/b_T_730))
tdt2 = tdt2 %>% mutate( PRI = (b_B_500 - b_O_600 )/(b_B_500 + b_O_600))
tdt2 = tdt2 %>% mutate( PRI2 = (b_G_550- b_Y_570 )/(b_G_550 + b_Y_570))
tdt2 = tdt2 %>% mutate( NDVIc = (b_V_810c - b_S_680c)/(b_V_810c + b_S_680c))
tdt2 = tdt2 %>% mutate( EVIc = 2.5*(b_V_810c-b_S_680c)/
(b_V_810c + 6*b_S_680c - 7.5*b_B_500c + 1))
tdt2 = tdt2 %>% mutate( EVI2c = 2.4*(b_V_810c - b_S_680c)/
(b_V_810c + b_S_680c + 1))
tdt2 = tdt2 %>% mutate( VARIc = (b_G_550c-b_S_680c)/(b_V_810c+b_S_680c-b_B_500c))
tdt2 = tdt2 %>% mutate( Nrc = (b_V_810c)/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( Rrc = (b_R_610c )/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( Brc = ( b_B_500c)/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( Grc = (b_G_550c)/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( SIPIc = (b_V_810c - b_V_450c)/(b_V_810c - b_S_680c))
tdt2 = tdt2 %>% mutate( PSSRc = (b_V_810c / b_R_650c))
tdt2 = tdt2 %>% mutate( PSNDc = (b_V_810c - b_R_650c)/(b_V_810c + b_R_650c))
tdt2 = tdt2 %>% mutate( PSRIc = (b_S_680c - b_B_500c)/b_U_760c)
tdt2 = tdt2 %>% mutate( CARIc = ((b_T_730c - b_S_680c)-0.2*(b_T_730c - b_G_550c)))
tdt2 = tdt2 %>% mutate( MCARIc = ((b_T_730c - b_S_680c)-0.2*(b_T_730c - b_G_550c))*
(b_T_730c/b_S_680c))
tdt2 = tdt2 %>% mutate( MTCIc = ((b_U_760c - b_T_730c) /(b_T_730c - b_S_680c)))
tdt2 = tdt2 %>% mutate( CIgc = ((b_W_860c - b_Y_570c)/b_Y_570c))
tdt2 = tdt2 %>% mutate( CIrc = ((b_W_860c - b_T_730c)/b_T_730c))
tdt2 = tdt2 %>% mutate( CRI1c = (1/ b_B_500c)-(1/b_G_550c))
tdt2 = tdt2 %>% mutate( CRI2c = (1/ b_B_500c)-(1/b_T_730c))
tdt2 = tdt2 %>% mutate( PRIc = (b_B_500c - b_O_600c )/(b_B_500c + b_O_600c))
tdt2 = tdt2 %>% mutate( PRI2c = (b_G_550c - b_Y_570c )/(b_G_550c + b_Y_570c))
# To align type 45 and 46 producing correct unique index variable
tdt = tdt %>% mutate(id_rec = paste(id,rec_num,sep=""))
tdt2 = tdt2 %>% mutate(id_rec = paste(id,rec_num-1,sep=""))
# Uniting rows of type 45 and 46
tdt_a = left_join(tdt,tdt2, by="id_rec", copy=F)
names(tdt_a)[names(tdt_a) == "datetime.x"] = "datetime"
names(tdt_a)[names(tdt_a) == "rec_num.x"] = "rec_num"
names(tdt_a)[names(tdt_a) == "serv_datetime.x"] = "serv_datetime"
names(tdt_a)[names(tdt_a) == "id.x"] = "id"
#Recalculating bands and indexes for type 40 since it has only 4 bands
if(any(tdt_a$type == 40)){
tdt_40 = tdt_a %>% filter(type == 40)
tdt_40$b_R_650 = tdt_40$X24.x
tdt_40$b_G_550 = tdt_40$X23.x
tdt_40$b_B_500 = tdt_40$X22.x
tdt_40$b_V_810 = tdt_40$X21.x
tdt_40 = tdt_40 %>% mutate( NDVI = (b_V_810 - b_R_650)/
(b_V_810 + b_R_650) )
tdt_40 = tdt_40 %>% mutate( EVI = 2.5*(b_V_810-b_R_650)/
(b_V_810+6*b_R_650-7.5*b_B_500+1))
tdt_40 = tdt_40 %>% mutate( VARI = (b_G_550-b_R_650)/
(b_V_810+b_R_650-b_B_500))
tdt_40 = tdt_40 %>% mutate( Nr = (b_V_810)/
(b_V_810+b_R_650+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( Rr = (b_R_650 )/
(b_V_810+b_R_650+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( Br = ( b_B_500)/
(b_V_810+b_R_650+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( Gr = ( b_G_550)/
(b_V_810+b_R_610+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( SIPI = (b_V_810 - b_B_500)/
(b_V_810 -b_R_650) )
tdt_40 = tdt_40 %>% mutate( PSSR = (b_V_810 / b_R_650) )
tdt_40 = tdt_40 %>% mutate( PSND = (b_V_810 - b_R_650)/
(b_V_810 + b_R_650) )
tdt_40 = tdt_40 %>% mutate( PSRI = (b_R_650 - b_B_500)/b_V_810)
tdt_40 = tdt_40 %>% mutate( CARI = ((b_V_810-b_R_650)-
0.2*(b_V_810-b_G_550)))
tdt_40 = tdt_40 %>% mutate( MCARI = ((b_V_810-b_R_650)-
0.2*(b_V_810-b_G_550))
*(b_V_810/b_R_650))
tdt_40 = tdt_40 %>% mutate( CRI1 = (1/ b_B_500)-(1/b_G_550))
tdt_40 = tdt_40 %>% mutate( CRI2 = (1/ b_B_500)-(1/b_V_810))
#tdt_a[tdt_a$type==40,] = tdt_40
}
#Removing unconverted variables
tt_data = tdt_a %>% select(.,-ends_with(".x")) %>% select(., -ends_with(".y"))
return(tt_data)
}
#Example input
#server = c("http://naturetalkers.altervista.org/C18A0031/ttcloud.txt",
#"http://naturetalkers.altervista.org/C18A0025/ttcloud.txt")
#installation_start = 1556845000
#import_folder_name = "./RUDN"
#Calculating charges============================================================
ischarged= function(data){
if(length(data$volt)>2){
charged = c(F,(data$volt[2:length(data$volt)] -
data$volt[1:(length(data$volt)-1)]) > 0.5)
} else {
charged = F
}
charged
}
#Dates correction and extrapolation - new, not used=============================
extrapolate_dates = function(data, timestep){
#print(data)
data$time = data$datetime
if(any(data$cor_dt))
if(length(data$cor_dt %>% na.omit)>0){
if(any(data$cor_dt) & length(data$cor_dt)>1 ){
start = which(data$cor_dt)[1]
if(start > 1){
#print("case 1")
data$time = data$datetime[start]
plus_time = (1:(length(data$cor_dt)-start-1))*timestep
minus_time = (1:(start-1))*(-timestep)
data$time = data$time[(start+1):length(data$time)]+plus_time
data$time = data$time[1:(start-1)]+minus_time
}else{
#print("case 2")
data$time = data$datetime[start]
plus_time = (1:(length(data$time)-start))*timestep
data$time[(start+1):length(data$time)] =
data$time[(start+1):length(data$time)]+plus_time
#print("error?")
}
} else {
if(any(data$serv_cor) & length(data$cor_dt)>1){
start = which(data$serv_cor)[1]
if(start > 1){
#print("case 4")
plus_time = (1:(length(data$cor_dt)-start)*timestep)
minus_time = (1:(start-1))*(-timestep)
data$time = data$datetime[start]
data$time = data$time[(start+1):length(data$time)]+plus_time
data$time = data$time[1:(start-1)]+minus_time
}else{
#print("case 5")
plus_time = (1:(length(data$cor_dt)-start)*timestep)
data$time = data$serv_datetime[start]
data$time[(start+1):length(data$time)] =
data$time[(start+1):length(data$time)]+plus_time
}
}
}
}
return(data)
}
#Flagging continious correct server time========================================
mark_continious_serv_time = function(data){
#is.null(data) %>% print
wst = which(!data$wrong_server)
if (length(wst)>2){
wst_cont = (wst[2:length(wst)]-wst[1:(length(wst)-1)]<2)[-1] &
(wst[2:(length(wst)-1)]-wst[3:(length(wst))]>-2)
data$wrong_server = T
data$wrong_server[wst[c(F,wst_cont,F)]] = F
}
data$wrong_server = T
return(data)
}
#Check measure pediod (step time) per one tt per charge=========================
check_measure_period = function(temp_data, verboseFlag){
real_time = which(!temp_data$time%>%is.na)
if(real_time%>% length >1){
mt = temp_data$time[real_time]
rti = real_time
model = lm(mt~rti)
step_time = plyr::round_any(summary(model)[[4]][2,1], 100)
fun_log(verboseFlag = verboseFlag, c("Estimated step time is ",step_time))
if(step_time < 4000) {step_time=3600}
if(step_time > 3999) {step_time=5400}
fun_log(verboseFlag = verboseFlag, c(" will be used step time ", step_time, "\n"))
} else {
step_time = 3600
}
return(step_time)
}
#Time correction - good old=====================================================
extrapolate_tt_date = function(tt_one, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("Starting data extrapolation \n"))
if(tt_one %>% nrow <2 ){
#print(tt_one)
}
if(tt_one$volt %>% length > 2){
fun_log(verboseFlag = verboseFlag, c("Starting time correction for ", tt_one$id %>% unique,"\n"))
bat_grow = c(F,
tt_one$volt[2:(nrow(tt_one))] - tt_one$volt[1:(nrow(tt_one)-1)] > 0.3)
fun_log(verboseFlag = verboseFlag, c("Found ",which(bat_grow) %>% length,"recharges","\n"))
tt_one$charge = cumsum(bat_grow)+1
fun_log(verboseFlag = verboseFlag, c("So, charges states are", tt_one$charge %>% unique(),"\n"))
tt_one$time = NA
tt_one$corr_time = 1
foreach (ci = tt_one$charge %>% unique) %dopar% {
#temp_data = tt_one[tt_one$charge == ci,]
temp_data = tt_one %>% filter(charge == ci)
temp_data$time[!temp_data$wrong_time] =
temp_data$datetime[!temp_data$wrong_time]
fun_log(verboseFlag = verboseFlag, c("Correct dates ",which(!temp_data$wrong_time) %>% length,
"correct times ",which(!is.na(temp_data$time)) %>% length,"\n"))
temp_data$corr_time[!temp_data$wrong_time] = 2
check_serv_time_too_high_according_it_record = function(x, dt){
# Check if record number of record with current serv_time is
# bigger of any record number of measurements with correct unix time
# which is less than current serv_time
# Index of elemnt in vector of correct unix time whcih are
# less than current serv_time
corr_unix_time_index = which(dt$serv_datetime[x] >
dt$datetime[!dt$wrong_time])
#Correct unix time which are less than current serv_time
corr_unix_time = dt$datetime[!dt$wrong_time[corr_unix_time_index]]
# Indexes of corr_unix_time elemnts in whole datetime variable
datetime_index = which(dt$datetime %in% corr_unix_time)
# If any record numbers of measurement with correct unix time, which
# is less than the current serv_time, is bigger of record number of
# current serv_time than this serv_time is INCORRECT
return (any(dt$rec_num[datetime_index] > dt$rec_num[x]))
}
#if (any(!temp_data$wrong_server)){
# cor_serv_records = which(!temp_data$wrong_server)
# wrong_serv_records = cor_serv_records[cor_serv_records %>%
# map_lgl(check_serv_time_too_high_according_it_record, dt = tt_one)]
# temp_data$wrong_server[wrong_serv_records] = T
#}
if(length(temp_data$serv_datetime[temp_data$wrong_server == F])>0){
# if there is any correct date in charge period don't use serv_date
# but for the case when datetime is wrong use correct server time
if (!any(!temp_data$time%>%is.na)) {
only_server_time_ok = temp_data$wrong_time ==
T & temp_data$wrong_server == F
temp_data$time[only_server_time_ok] =
temp_data$serv_datetime[only_server_time_ok]
temp_data$corr_time[only_server_time_ok] = 3
}
}
temp_data$corr_time[temp_data$datetime <
157680000 & temp_data$time %>% is.na] = 4
temp_data$lost_connect = F
temp_data$lost_connect[temp_data$datetime <
157680000 & temp_data$time %>% is.na] = T
real_time = which(!is.na(temp_data$time))
step_time = check_measure_period(temp_data, verboseFlag)
tt_one$step_time = step_time
fun_log(verboseFlag = verboseFlag, c("Charge:",ci," step time:",step_time,
", correct dates found",real_time %>% length,"\n"))
#If there are any correct time inside this data
if (real_time %>% length > 1){
#If last elemnt do not have correct time - set it as correct
if (real_time[length(real_time)] != length(temp_data$time)) {
real_time = c(real_time,length(temp_data$time))
}
#Extrapolating back from the first elemet with correct time
foreach( i = (real_time[1]-1):1) %dopar% {
temp_data$time[i] = temp_data$time[i+1]-step_time
}
fun_log(verboseFlag = verboseFlag, c("Found measurements with correct timestemp:",
length(real_time)-1, "\n"))
for( i in 1:(length(real_time)-1)) {
if(real_time[i+1] == real_time[i]+1){
next(i)
}
foreach( j = real_time[i]:(real_time[i+1]-2)) %dopar% {
temp_data$time[j+1] = temp_data$time[j]+step_time
}
}
tt_one = tt_one %>% as.data.frame()
temp_data = temp_data %>% as.data.frame()
tt_one[tt_one$charge == ci,] = temp_data
fun_log(verboseFlag = verboseFlag, c(tt_one$id %>% unique(), " charge ", ci,"was filled","\n"))
}
}
tt_one$time = tt_one$time %>% as.POSIXct(origin="1970-01-01 00:00:00",
tz="Europe/Moscow") }
else {
tt_one$charge=1
tt_one$time = NA
}
return(tt_one)
}
correct_extrap_date = function(tt_one, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("TT ", tt_one$id %>% unique, " fixing wrong corrections","\n"))
#Check if date goes back in time with with growing row nunber
ends = nrow(tt_one)
bad_extrap_index =c(F,(tt_one$time[2:ends] - tt_one$time[1:(ends-1)])<0)
# If tt data is very small in size previous step cana generate
# bad_extrap_index longer than the tt data itself, so NA produced
# lets remove them
bad_extrap_index = na.exclude(bad_extrap_index)
real_bad_extrap_index = which(bad_extrap_index)
foreach (bi = which(bad_extrap_index)) %dopar% {
before_problem = bi - 1
time_before_drop = tt_one$time[before_problem]
real_problem = which(c(rep(F, bi),tt_one$time[bi:nrow(tt_one)] < time_before_drop))#wrong
real_bad_extrap_index = c(real_bad_extrap_index,real_problem)
}
bad_extrap_index = rep(F,length(tt_one$time))
bad_extrap_index[real_bad_extrap_index] = T
bad_extrap_index = bad_extrap_index[1:nrow(tt_one)]
#Set this times and datetimes to 0
fun_log(verboseFlag = verboseFlag, c("Found ",which(bad_extrap_index)%>% length," bad extrapolation, fixing","\n" ))
#print(bad_extrap_index)
tt_one$time[bad_extrap_index] = NA
tt_one$wrong_time[bad_extrap_index] = TRUE
tt_one$wrong_server[bad_extrap_index] = TRUE
#tt_one$datetime[bad_extrap_index] = as.POSIXct(0, origin="1970-01-01 00:00:00", tz="Europe/Moscow")
#tt_one$serv_datetime[bad_extrap_index] = as.POSIXct(0, origin="1970-01-01 00:00:00", tz="Europe/Moscow")
fun_log(verboseFlag = verboseFlag, c("Removed incorrect time indexes","\n"))
#Restart extrapolation
for (ci in tt_one$charge %>% unique){
temp_data = tt_one %>% filter(charge == ci)
if(!any(is.na(temp_data$time))) {
fun_log(verboseFlag = verboseFlag, c("Nothing to correct in ",ci," charge, skipping","\n"))
next(ci)
}
real_time = which(!is.na(temp_data$time))
step_time = temp_data$step_time[1]
fun_log(verboseFlag = verboseFlag, c("Charge:",ci," step time:",step_time,
", correct dates found",real_time %>% length,"\n"))
#If there are any correct time inside this data
if (real_time %>% length > 1){
#If last elemnt do not have correct time - set it as correct
if (real_time[length(real_time)] != length(temp_data$time)) {
real_time = c(real_time,length(temp_data$time))
}
if (real_time[1]>2){
#Extrapolating back from the first elemet with correct time
fun_log(verboseFlag = verboseFlag, c("Filling backward\n"))
for( i in (real_time[1]-1):1) {
temp_data$time[i] = temp_data$time[i+1]-step_time
}
}
fun_log(verboseFlag = verboseFlag, c("Found measurements with correct timestamp:",
length(real_time)-1, "\n"))
for( i in 1:(length(real_time)-1)) {
if(real_time[i+1] == real_time[i]+1){
next(i)
}
foreach( j = real_time[i]:(real_time[i+1]-2)) %dopar% {
temp_data$time[j+1] = temp_data$time[j]+step_time
}
}
tt_one[tt_one$charge == ci,] = temp_data
fun_log(verboseFlag = verboseFlag, c(tt_one$id %>% unique(), " charge ", ci,"was filled","\n"))
}
}
tt_one$time = tt_one$time %>% as.POSIXct(origin="1970-01-01 00:00:00",tz="Europe/Moscow")
return(tt_one)
}
correct_time_ts_shift_matching = function(data, verboseFlag){
#data = tt_data_ec
fun_log(verboseFlag = verboseFlag, c("Starting correct_time_ts_shift_matching \n"))
abs.max.ccf <- function(a,b) {
d <- ccf(a, b, plot=FALSE, lag.max=length(a)-5)
cor <- d$acf[,,1]
abscor <- abs(d$acf[,,1])
lag <- d$lag[,,1]
abs.cor.max <- abscor[which.max(abscor)]
abs.cor.max.lag <- lag[which.max(abscor)]
return(c( abs.cor.max, abs.cor.max.lag))
}
lag_table = data.frame()
foreach(icharge = 1:6) %dopar% {
for(iid in (data$id %>% unique)) {
ttsi = data[data$id == iid & data$charge == icharge,]
if(nrow(ttsi)<64){ next()}
for(jid in (data$id %>% unique)) {
if(jid == iid){next()}
ttsj = data %>% filter(id == jid, charge == icharge)
print(nrow(ttsj))
if(nrow(ttsj)<64){ next() }
print(all(is.na(ttsj$time)))
if(all(is.na(ttsj$time))) { next()}
cor_time = which(!is.na(ttsj$time)) %>% length
tsi = ts(ttsi$tair, start=1, end = nrow(ttsi), frequency = 1)
tsj = ts(ttsj$tair, start=1, end = nrow(ttsj), frequency = 1)
lag = abs.max.ccf(tsi,tsj)
lag_row = data.frame(iid, jid, icharge, cor=lag[1], lag = lag[2], cor_time)
print("lag_row")
print(lag_row)
lag_table = rbind(lag_table, lag_row)
print(names(lag_table))
}
}
}
print("end")
print(lag_table)
result_table = lag_table %>% group_by(iid, icharge) %>% summarise(
MaxCor = max(cor), jid = jid[which.max(cor)], lag = lag[which.max(cor)],
cor_time = cor_time[which.max(cor)]
) %>% as.data.frame()
datac = data
foreach(i = 1:nrow(result_table)) %do% {
iid = result_table$iid[i]
jid = result_table$jid[i]
lag = result_table$lag[i]
icharge = result_table$icharge[i]
if(all(is.na(
datac%>%filter(id == iid, charge == icharge) %>% select(time)))){
subj = datac%>%filter(id == jid & charge == icharge)
subi = datac%>%filter(id == iid & charge == icharge)
time_index = match(subi$rn,subj$rn+lag)
subi$time = subj$time[time_index]
datac[datac$id == iid & datac$charge == icharge,] = subi
}
}
fun_log(verboseFlag = verboseFlag, c("Starting correct_extrap_date inside correct_time_ts_shift_matching \n"))
dataec = datac %>% group_by(id) %>%
do(correct_extrap_date(., verboseFlag)) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Stopped correct_time_ts_shift_matching \n"))
return(dataec)
# ggplot()+
# geom_point(data = filter(data, id=="218A0060", charge == 1), aes(x= rn, y=tair), color = 1)+
# geom_point(data = filter(data, id=="218A0178", charge == 1), aes(x= rn-23, y=tair), color = 2)
# ggplot(data = dataec)+
# geom_point(aes(x=rn, y=tair, color=charge,shape=is.na(time)),size=.1)+
# facet_wrap(~id)
}
#Calculating everything for one site - returns two tables=======================
TTcalc_site = function(server,
installation_start,
import_folder_name,
first_imported_dates_reconstructed,
descr_file,
sitename,
verboseFlag){
# Example of possible intput
# server = c("http://naturetalkers.altervista.org/C18A0031/ttcloud.txt",
# "http://naturetalkers.altervista.org/C18A0025/ttcloud.txt")
# installation_start = 1556845000
# import_folder_name="RUDN"
# first_imported_dates_reconstructed = F
# descr_file = "rudn_desc.csv"
temp_serv_dataset = data.frame()
for (serv_url in server) {
i = which(server %in% serv_url)
dest_file_name = paste("db_sync/dbsync-",sitename,"-",i,"-",Sys.Date(),".txt", sep="")
if(!any(str_c("db_sync/",dir("db_sync/")) == dest_file_name) ){
fun_log(verboseFlag = verboseFlag, c("No sync today - downloading file \n"))
download.file(url = serv_url, destfile = dest_file_name, method = "curl",mode="character")
} else {
fun_log(verboseFlag = verboseFlag, c("Sync was done today, no download \n"))
}
temp_read = suppressWarnings(suppressMessages(
read_delim(dest_file_name, col_names = FALSE, comment = "#",delim=";", col_types = "ccdddddddddddddddddd")))
temp_read$s = i
temp_serv_dataset = rbind( temp_serv_dataset,temp_read)
fun_log(verboseFlag = verboseFlag, c(as.character(serv_url), "data read.","\n"))
}
tt_data = TTBasicCalc(temp_serv_dataset, verboseFlag)
#tt_basic_data = lazy_dt(tt_data)
#If there are duplicates - remove
tt_data = tt_data[
-(tt_data[,c("volt","dist","pulses","Hz","rh","tair",
"VPD","gx","gx2","gy","gy2")]
%>% duplicated
%>% which), ]
#Removing rows with corrupted id
corrupted_id = unique(tt_data$id)[str_which(unique(tt_data$id),":")]
if(corrupted_id %>% length() > 0){
fun_log(verboseFlag = verboseFlag, c("Found corrupted id: ", corrupted_id, " removing \n"))
tt_data = tt_data %>% filter(!(id %in% corrupted_id))
}
#If data for one TT came from different clouds, we will try to give it some
#order according to appearance on server since row order and record numbers
#on different clouds will be different.
tt_data = tt_data %>% group_by(id) %>%
arrange(serv_datetime) %>% as.data.frame
#Adding flagging variable showing that this data was obtained from server
tt_data$imported = F
# TT sends record to cloud one by one, and clouds do this in same way,
# so on server records should appear in correct order.Between them could be
# gaps, but we can detect them by battery charge discontinuties
#tt_data = tt_data %>% group_by(id) %>% mutate(nrec_num = 1:length(rec_num))
fun_log(verboseFlag = verboseFlag, c("Checking if there are some back up data data? \n"))
if(!is.null(import_folder_name)){
fun_log(verboseFlag = verboseFlag, c("Yep, there are some \n"))
data_bc = data.frame()
import_folder_name = paste("",import_folder_name,sep="")
for(file in list.files(import_folder_name,pattern="TT*")){
temp = suppressWarnings(suppressMessages(
read_delim(paste(import_folder_name,file, sep="/"),
col_names = FALSE, delim=";", skip=0)
))
fun_log(verboseFlag = verboseFlag, c("Dimmension of the file", file,":",dim(temp)[1]," - rows, ",
dim(temp)[2]," - columns.","\n"))
data_bc = rbind(data_bc, temp)
fun_log(verboseFlag = verboseFlag, c("Binded array size", dim(data_bc)))
}
fun_log(verboseFlag = verboseFlag, c("Binded array size", dim(data_bc)))
first_column = paste("10.05.20 12:55:19",data_bc$X1, sep=",")
#imported data have no server time,so we should add some
data_bc$X1 = first_column
#source variable will be created from that
data_bc$s=i+1
#print(as.data.frame(data_bc))
tt_bc = TTBasicCalc(data_bc , verboseFlag = verboseFlag)
timestep = 3600
#Adding flagging variable showing that this data was obtained from impport
tt_bc$imported = T
fun_log(verboseFlag = verboseFlag, c("Basic calculated array size", dim(tt_bc),"\n"))
#Inserting exported(backuped) from TT data into server data,
#Very slow and stupid way, but there were no time for elegance
#
tt_imported = data.frame()
# logging cases into file or memory
log_imp = data.frame(id = "first", starts = NA, ends = NA,
var = 0, tt = 0,bc = 0,stringsAsFactors = FALSE)
print(names(tt_bc))
for (ids in unique(tt_data$id)){
bc = tt_bc %>% filter(id == ids) %>% as.data.frame
tt = tt_data %>% filter(id == ids) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Data from the TT ", ids," imported has ",
dim(bc)[1]," - rows, ",dim(bc)[2]," - columns.","\n" ))
fun_log(verboseFlag = verboseFlag, c("Data from the TT ", ids," on server has ",
dim(tt)[1]," - rows, ",dim(tt)[2]," - columns.","\n" ))
fun_log(verboseFlag = verboseFlag, c("Starting import of directly extracted from TT data","\n"))
u = tt
#if(dim(tt_imported)[1]>0){
if(length(tt$volt)>0){
#print(ids)
badnames = c("datetime","wrong_time","id","type","incr","rec_num",
"serv_datetime","dist","source","id_rec",
"b_O_600","b_Y_570","b_W_860","b_V_810","b_U_760","b_R_610",
"NDVI","EVI","EVI2","Nr","Rr","Br","Gr","PSSR","PSND",
"MTCI","PRI","b_R_650","b_G_550","b_B_500","b_V_450",
"b_T_730","b_S_680","VARI","SIPI","PSRI","CARI","MCARI",
"CIg","CIr","CRI1","CRI2","imported")
names_for_compare = names(tt)[!(names(tt) %in% badnames)]
frank=data.frame()
logs=data.frame()
if(dim(bc)[1]>10){ #if imported data from tt is to small in most cases it's crappy and better just to skip it
matches = match(do.call("paste",tt[, names_for_compare]),
do.call("paste", bc[, names_for_compare]))
msize = length(matches)
edges = c((is.na(matches[2:msize]) - is.na(matches[1:(msize-1)])),0)
if(any(edges == -1) & any(edges == 1) ) {
starts = which(edges == -1)
ends = which(edges == 1)
subset1 = 1:starts
subset2 = ends:msize
frank = rbind(tt[subset1,], bc, tt[subset2,])
#logs = data.frame(id = as.character(ids), starts = starts, ends = ends,
# var = 1, tt = dim(tt)[1],bc = dim(bc)[1],stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 1 intersect, start is ", starts," end is ",ends,"bc size ",dim(bc)[1]," tt size ",dim(tt)[1],"\n"))
} else {
if(any(edges == -1)){
starts = which(edges==-1)
frank = rbind(tt[1:starts,], bc)
#logs = data.frame(id = as.character(ids), starts = starts, ends = NA,
# var = 2, tt = dim(tt)[1],bc = dim(bc)[1],stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 2 add to end, start is ", starts,"bc size ",dim(bc)[1]," tt size ",dim(tt)[1],"\n"))
}
if(any(edges == 1)){
ends = which(edges==1)
frank = rbind( bc, tt[ends:msize,])
#logs = data.frame(id = as.character(ids), starts = NA, ends = ends,
# var = 3, tt = dim(tt)[1],bc = dim(bc)[1],stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 3 add to begin, end is ",ends,"bc size ",dim(bc)[1]," tt size ",dim(tt)[1],"\n"))
}
}
} else {
frank = tt
#logs = data.frame(id = as.character(ids), starts = NA, ends = NA,
# var = 0, tt = dim(tt)[1],bc = dim(bc)[1], stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 0 no backup","\n"))
}
u=frank
#matches %>% print
#fun_log(verboseFlag = verboseFlag, c("Found ", matches %>% length,
# " matches between importing and server data for TT ", ids,"\n")
} else {
u = bc
#logs = c(id = ids, starts = NA, ends = NA, var = 4)
fun_log(verboseFlag = verboseFlag, c("Case 4, only backup result is ", dim(u)[1]))
}
tt_imported = rbind(tt_imported,u)
#log_imp = rbind(log_imp,logs)
} # end of cycling through ids instruction
# if no import was done just take tt_data
#print(log_imp)
} else {
fun_log(verboseFlag = verboseFlag, c("Nope, nothing to import \n"))
tt_imported = tt_data
}
tt_imported = tt_imported %>% group_by(id) %>% mutate(rn = 1:length(volt))
#DONE Add flagging variable to mark source of data - imported from backup or
#got directly from server
#TODO Check what is wrong with Timiryazev
#TODO Add to time extrapolation option to understand measurement time
########NOT USING NOW
#Esoterical way to flag data before and after reseting -
#if rec_num falls more than 1000 records - it means that this TT was reseted
#(its magic because cloud was not reseted, but record numbers in
# cloud still drops)
#tt_data$before_reset = c(T, cumsum(tt_data$rec_num[2:length(tt_data$rec_num)]
#-tt_data$rec_num[1:(length(tt_data$rec_num)-1)] < -1000) < 1 )
#Finding last record number before reset and constructing new variable with
#continuous record number
#d = tt_data %>% filter(before_reset == T) %>% group_by(id)
#%>% summarise (max = max(rec_num))
#tt_data = left_join(tt_data,d, by="id")
#tt_data = tt_data %>% group_by(id) %>%
#mutate(nrec_num = if_else(before_reset, rec_num, rec_num+max))
#Function to detect recharge of battery - find rise on more than 0.5 volts
tt_imported = as.data.frame(tt_imported)
#print(head(tt_imported))
# Marking server time wrong if there are more than one measurement per hour
tt_imported$years = year(tt_imported$serv_datetime)
tt_imported$doys = yday(tt_imported$serv_datetime)
tt_imported$hours = hour(tt_imported$serv_datetime)
#tt_imported = lazy_dt(tt_imported)
tt_imported = tt_imported %>% group_by(id,years,doys, hours) %>%
mutate(wrong_server = length(doys)>1) %>% as.data.frame
#Marking recharge
#tt_imported = lazy_dt(tt_imported)
tt_imported = tt_imported %>% group_by(id) %>% arrange(rn) %>%
mutate(recharge_flag =ischarged(.data))
#Calculating charge cycles
tt_imported = tt_imported %>% group_by(id) %>% arrange(rn) %>%
mutate(charge = cumsum(recharge_flag))
#Simple detect of clearly wrong datetimes
tt_imported = tt_imported %>% group_by(id) %>% arrange(rn) %>%
mutate(cor_dt = (datetime > min(serv_datetime)) & (datetime <
max(serv_datetime)))
#Detecting correct server time - first assumption -
#it should one measurement per hour
tt_imported = tt_imported %>% group_by(id, years, doys,hours) %>%
mutate(serv_cor = length(serv_datetime) < 2)
timestep = 3600
#tt_imported = tt_imported %>% group_by(id, charge) %>%
# do(extrapolate_dates(., timestep))%>% as.data.frame
tt_imported = tt_imported %>% group_by(id) %>%
do(mark_continious_serv_time(.)) %>% as.data.frame
#tt_imported = lazy_dt(tt_imported)
tt_imported = tt_imported %>% group_by(id) %>%
do(mark_continious_serv_time(.)) %>% as.data.frame
SITE_list = suppressWarnings(suppressMessages(
read_delim(descr_file,col_names = T, delim=",")))
if (!is.null(sitename)){
SITE_list = SITE_list%>%filter(Site == sitename)
if(SITE_list %>% length <1) {
fun_log(verboseFlag = verboseFlag, c(
"Looks like you have error in site name.\n"
))
}
}
tt_imported = tt_imported %>% filter(id %in% SITE_list$id)
if(tt_imported %>% nrow <1) {
fun_log(verboseFlag = verboseFlag, c(
"Looks like you have error in site name.\n"
))
}
fun_log(verboseFlag = verboseFlag, c("Starting extrapolation of dates \n"))
tt_data_e = tt_imported %>% group_by(id) %>%
do(extrapolate_tt_date(., verboseFlag)) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Starting correction of extrapolated dates \n"))
tt_data_ec = tt_data_e %>% group_by(id) %>%
do(correct_extrap_date(., verboseFlag)) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Starting correct_time_ts_shift_matching of extrapolated dates \n"))
#tt_data_ec = correct_time_ts_shift_matching(tt_data_ec, verboseFlag)
#tt_data_e = tt_imported
tt_data_e = tt_data_ec %>% select(-c(years,doys,hours))
tt_data_e$year = year(tt_data_e$time)
tt_data_e$week = week(tt_data_e$time)
tt_data_e$doy = yday(tt_data_e$time)
tt_data_e$hour = hour(tt_data_e$time)
tt_data_e$min = minute(tt_data_e$time)
tt_data_e = tt_data_e %>% group_by(id,year,doy) %>%
mutate(dT = nt2 - t2, na.rm = T) %>%
mutate(dTa = nt2 - t2 - nt1 + t1, na.rm = T) %>%
mutate(dTm = max(dT, na.rm=T)) %>%
mutate(dTam = max(dTa, na.rm=T)) %>%
mutate(maxTd = max(dist), na.rm = T) %>%
mutate(meanTd = mean(dist), na.rm = T) %>%
mutate(minTd = min(dist), na.rm = T) %>%
mutate(u = 119*(10^-3)*(dTm/dT - 1)^1.231, na.rm = T) #l m-2 s-1
SITEdata = left_join(tt_data_e,SITE_list,by="id")
SITEdata = SITEdata %>% mutate(diam = DBH / pi)
SITEdata = SITEdata %>%
mutate(Flux = u*3600*(diam^1.8777)*0.755/10000, na.rm = T)
#SITEdata = BEFadd(SITEdata, verboseFlag)
#Spectrometer calibration
SITEdata = TTR_add(SITEdata, verboseFlag)
if(verboseFlag =="mem"){
return(list(tt_imported, SITEdata,message_log))
}else {
return(list(tt_imported, SITEdata))
}
}
#Adding Biomass calculation data ==============================================
# Biomass calculated based on IPCC 2006 formula C = [V * D * BEF] * (1 + R) * CF
# BCEF = BEF * D is taken from paper doi:10.3390/f9060312 Dmitry Schepaschenko
# Improved Estimates of Biomass Expansion Factors for Russian Forests
# Big table of data from this paper is used in this function
BEFadd = function(data, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("Adding BEF data for biomass growth calculation \n"))
data = AllData
BEFdata = read_delim("data/BEF.csv", delim = ";")
data = data %>% mutate(genus = str_split(Species, " ", simplify = T)[,1])
data = data %>% mutate(age_group_indexes = recode(age_group_index, V = "IV", VI = "IV"))
data = data %>% mutate(Genum = recode(genus, Fraxinus = "Other hard deciduous", Acer = "Other hard deciduous",
Salix = "Other soft deciduous", Tilia = "Other soft deciduous"))
data = left_join(data, BEFdata, by =c("Genum","zone","age_group_indexes"))
data = data %>% select(-genus,-age_group_indexes )
return(data)
#Other hard deciduous Fraxinus, Acer
#Other soft deciduous Salix, Tilia
}
#Adding TTR connected variables=================================================
#IMPORTANT - we are assuming that one site is a group of TTs installed on trees
#which could be assumed to be in a same conditions and that there is one TTR per site
TTR_add = function(data, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("Starting site TTR data calculation \n"))
if(any(data$Species == "TTR")){
names(data)[names(data) == "id.x"] = "id"
data$b_R_650c[data$b_R_650c < 0] = 0
data$b_O_600c[data$b_O_600c < 0] = 0
data$b_Y_570c[data$b_Y_570c < 0] = 0
data$b_G_550c[data$b_G_550c < 0] = 0
data$b_B_500c[data$b_B_500c < 0] = 0
data$b_V_450c[data$b_V_450c < 0] = 0
data$b_W_860c[data$b_W_860c < 0] = 0
data$b_V_810c[data$b_V_810c < 0] = 0
data$b_U_760c[data$b_U_760c < 0] = 0
data$b_T_730c[data$b_T_730c < 0] = 0
data$b_S_680c[data$b_S_680c < 0] = 0
data$b_R_610c[data$b_R_610c < 0] = 0
TTRdatasum = data %>%filter(Species == "TTR") %>% group_by(Site,doy,hour) %>% summarise(
TTair = mean(tair,na.rm = T),
TTrh = mean(rh, na.rm = T),
TTR_650c = mean(b_R_650c,na.rm = T),
TTR_600c = mean(b_O_600c,na.rm = T),
TTR_570c = mean(b_Y_570c,na.rm = T),
TTR_550c = mean(b_G_550c,na.rm = T),
TTR_500c = mean(b_B_500c,na.rm = T),
TTR_450c = mean(b_V_450c,na.rm = T),
TTR_860c = mean(b_W_860c,na.rm = T),
TTR_810c = mean(b_V_810c,na.rm = T),
TTR_760c = mean(b_U_760c,na.rm = T),
TTR_730c = mean(b_T_730c,na.rm = T),
TTR_680c = mean(b_S_680c,na.rm = T),
TTR_610c = mean(b_R_610c,na.rm = T),
TTR_650 = mean(b_R_650,na.rm = T),
TTR_600 = mean(b_O_600,na.rm = T),
TTR_570 = mean(b_Y_570,na.rm = T),
TTR_550 = mean(b_G_550,na.rm = T),
TTR_500 = mean(b_B_500,na.rm = T),
TTR_450 = mean(b_V_450,na.rm = T),
TTR_860 = mean(b_W_860,na.rm = T),
TTR_810 = mean(b_V_810,na.rm = T),
TTR_760 = mean(b_U_760,na.rm = T),
TTR_730 = mean(b_T_730,na.rm = T),
TTR_680 = mean(b_S_680,na.rm = T),
TTR_610 = mean(b_R_610,na.rm = T))
data = data %>% left_join(TTRdatasum, by=c("Site","doy","hour"))
#LAI according to Beer-Law and light extinction coefficient, look into papers LAI folder
K = 5.2 # light extinction coefficient
data = data %>% mutate(LAInir = -log((b_V_810c+b_W_860c)/(TTR_860c+TTR_810c))/K)
data = data %>% mutate(LAIb = -log((b_V_450c+b_B_500c)/(TTR_450c+TTR_500c))/K)
return(data)
} else {
fun_log(verboseFlag = verboseFlag, c("Looks like your site dont have TTR, returning data without change \n"))
return(data)
}
}
#Exporting site data to excel===================================================
export_all_to_excel = function(AllData) {
var_list = c("time","id","Species","d","VTA_score","rec_num","tair","rh","VPD",
"theta","psi","phi","gz2","nt1","NDVIc","EVIc","VARIc","PRIc","NDVI","EVI","VARI","PRI","Rr",
"Br","Gr","Flux", "TTair","TTrh","LAIb","LAInir")
AllData = AllData %>% mutate(g2 = gz2+gy2+gx2)
AllData = AllData %>% mutate(W = mean((46000-Hz)/(Hz+46000)*50, na.rm=T))
foreach (site = AllData$SiteIndex %>% unique()) %dopar% {
list_of_datasets = list()
SITEdata = AllData%>%filter(SiteIndex == site)
for (i in SITEdata$id %>% unique) {
index = which(SITEdata$id %>% unique == i)
TT = SITEdata %>% filter(id == i)
TT = TT[,var_list]
list_of_datasets[[i]] = TT
names(list_of_datasets)[index] = i
}
sitename = site
write.xlsx(list_of_datasets, file = paste(sitename,".xlsx",sep=""))
# dat = loadWorkbook( file = paste(sitename,".xlsx",sep=""))
# desc = readWorkbook(insert_file, sheet=1)
# addWorksheet(dat, "Пояснения")
# writeData(dat,"Пояснения",desc)
# saveWorkbook(dat, paste(sitename,".xlsx",sep=""), overwrite = TRUE)
}
}
export_site_to_excel = function(site_object,sitename="site",
insert_file="RUDN_descr.xlsx") {
var_list = c("time","id","Species","d","VTA_score","rec_num","tair","rh","VPD",
"theta","psi","phi","gz2","nt1","NDVIc","EVIc","VARIc","PRIc","NDVI","EVI","VARI","PRI","Rr",
"Br","Gr","Flux", "TTair","TTrh","LAIb","LAInir")
list_of_datasets = list()
SITEdata = site_object[[2]]
foreach (i = SITEdata$id %>% unique) %dopar% {
index = which(SITEdata$id %>% unique == i)
TT = SITEdata %>% filter(id == i)
TT = TT[,var_list]
list_of_datasets[[i]] = TT
names(list_of_datasets)[index] = i
}
write.xlsx(list_of_datasets, file = paste(sitename,".xlsx",sep=""))
dat = loadWorkbook( file = paste(sitename,".xlsx",sep=""))
desc = readWorkbook(insert_file, sheet=1)
addWorksheet(dat, "Пояснения")
writeData(dat,"Пояснения",desc)
saveWorkbook(dat, paste(sitename,".xlsx",sep=""), overwrite = TRUE)
}
#Exporting site data to csv folder\=============================================
export_site_to_csv_folder = function(site_object, export_folder="csv_export") {
if(!dir.exists(export_folder)){
dir.create(export_folder)
}
setwd(export_folder)
for (i in 1:2){
SITEdata = site_object[[i]]
for (t in SITEdata$id %>% unique) {
index = which(SITEdata$id %>% unique == t)
TT = SITEdata %>% filter(id == t)
filename = paste(as.character(t),".csv",sep="")
if(i==1){
filename = paste("raw_",filename,sep="")
}
write.csv(TT,file = filename, sep=";", dec=".")
}
}
setwd("..")
return(NULL)
}
#========================================================================================================================
three_sigma_weekly_flagging = function(dt, var_name){
dtg = dt %>% group_by(id,year,week)
var_name = enquo(var_name)
var_name_f = paste0(quo_name(var_name),"_f", sep="", collapse = NULL)
dtg = dtg %>% mutate(!!var_name_f :=
!!var_name > mean(!!var_name) + 3*sd(!!var_name) |
!!var_name < mean(!!var_name) - 3*sd(!!var_name))
return(dtg %>% as.data.frame)
}
#========================================================================================================================
two_sigma_weekly_flagging = function(dt, var_name){
dtg = dt %>% group_by(id,year,week)
var_name = enquo(var_name)
var_name_f = paste0(quo_name(var_name),"_f", sep="", collapse = NULL)
dtg = dtg %>% mutate(!!var_name_f :=
!!var_name > mean(!!var_name) + 2*sd(!!var_name) |
!!var_name < mean(!!var_name) - 2*sd(!!var_name))
return(dtg %>% as.data.frame)
}
#===========Two sigma grouped flagging
two_sigma_grouped_flagging = function(dt, var_name, group_var1, group_var2, suffix){
group_var1 = enquo(group_var1)
group_var2 = enquo(group_var2)
dtg = dt %>% group_by(!!group_var1, !!group_var2)
var_name = enquo(var_name)
var_name_f = paste0(quo_name(var_name),suffix, sep="", collapse = NULL)
dtg = dtg %>% mutate(!!var_name_f :=
!!var_name > mean(!!var_name) + 2*sd(!!var_name) |
!!var_name < mean(!!var_name) - 2*sd(!!var_name))
return(dtg %>% as.data.frame)
}
out_of_two_sigma = function(var){
var[is.infinite(var)] = NA
var_n = var > mean(var, na.rm=T) + 2*sd(var, na.rm=T) | var < mean(var, na.rm=T) - 2*sd(var, na.rm=T)
return(var_n)
}
#===============================================================================
flagged = function(dt){
vars = names(dt)[names(dt) %>% str_ends("_f")]
for(var in vars){
dt = dt %>% filter(!!var == FALSE)
}
return(dt)
}
#===============================================================================
radiation_flagging = function(dt){
tt_data_e = dt
dtg = tt_data_e %>% mutate(radiation = b_V_810+b_B_500+b_G_550+b_R_650+b_R_610+
b_S_680+b_T_730+b_U_760+b_W_860+b_V_450+b_Y_570+b_O_600)
dtg = dtg %>% group_by(id,year,doy) %>%
mutate(rad_max = max(radiation, na.rm = T))
dtg = dtg %>% mutate(is_max_hour = (radiation == rad_max)) %>% as.data.frame
dtg$is_max_hour[is.na(dtg$is_max_hour)] = F
dtg = dtg %>%group_by(id,year,doy) %>%
mutate(max_hour = ifelse(is_max_hour %>% which %>% length > 0,
is_max_hour %>% which - 1, NA)) %>% as.data.frame
dtg = dtg %>% group_by(id,year,week) %>%
mutate(mean_max_hour =
ifelse(max_hour %>% is.na %>% which %>% length < max_hour %>% length,
mean(max_hour, na.rm=T) %>% round(0) , NA))%>% as.data.frame
dtg = dtg %>% group_by(id,year,doy) %>%
mutate(rad_flag = hour > mean_max_hour - 4 & hour < mean_max_hour+4) %>%
as.data.frame
dt$rad_flag = dtg$rad_flag
return(dt)
}
###########################TESTING$#############################################
#TODO remove record number calculate solid angle from 3
#TODO angles +180
#TODO collect gz2 for all treetalker - build frequency destribution
#TODO and for given number calculate percentile
#TODO descibe nt1 as stem temperature
#TODO the same index for whole day
#TODO add yellowness
#TODO Flux
#TODO quantity absolute values
#TODO quality curve
#TODO stomata closure
#TODO plot VPD vs FLUX
#TODO time of maximum flow
#TODO Fmax=gmaxVPD
#TODO Cumulative min max for all species
#TODO gmax = stomata conductance R2
#TODO frequency distribution of growth to temperature
#TODO average to same temp per week
#TODO LUT for distance T-rh-day-night
#TODO gap fill - lut
#TODO compare TTR with TT data
##
##
##### Report preparation function - gapfilling and stuff
TTsite_repport = function(data){
}
| /code/__TTcalc_site_6.R | no_license | yaroslavtsevam/TT- | R | false | false | 52,980 | r | # Developmen log
# version 6
# What is new
# Added BEFdata for central Russia species
# Added LAI calculation according top Beer law
# Corrected variable filtering according to it position, now filtered by name
# Starting thinking to switch to dtplyr
# Switched from cat to special logging function
# Switching to foreach %dopar%
# Added function for correcting wrong time extrapolation according to other results
# Added function for finding correct dates according to pattern matching of tair
# Moisture added
# PRI updated according to its logic - compare wavelengths around green(550) in classical PRI its 530 and 570
# in our case it is 500 and 600. Previous version which used asimetrical interval 550-570 - called PRI2
# Luca's spectral calibration added
# TODO prepare third version of results without this variables
# RRR, E2, G2, C2, ff20, DBH, DTT, installation_height, X1, Site, minTd, meanTd, maxTd, step_time, corr_time, serv_cor, imported, rn, wrong_server,zone, SiteIndex, na.rm, datetime, rec_num, PRI,PRI2, MCARI,recharge_flag, charge , cor_dt, MTCI,CIg,CIr,CRI1,CRI2,SIPI,PSSR,PSND,PSRI,CARI,EVI2,VARI, b_O_600,b_Y_570,b_G_550,b_B_500,b_V_450, b_R_610,source,id_rec, gx, gy, gz, serv_datetime, wrong_time,X,incr, type, b_R_650,b_W_860,b_V_810,b_U_760,b_T_730,b_S_680, NDVI,EVI,Nr,Rr,Br,Gr
source("code//TT_graphs.R")
library(dtplyr)
library(data.table)
library(tidyverse)
library(plyr)
library(lubridate)
library(DescTools)
library(plotly)
library(tsibble)
library(openxlsx)
library(stringr)
library(ggsci)
library(corrplot)
library(Hmisc)
library(plotly)
library(foreach)
#Logging function to log all activities into memory, to console or in file=====
message_log = "Starting data log"
fun_log = function(verboseFlag = "mem", message){
if (verboseFlag == "con"){
cat(message)
}
if (verboseFlag == "mem"){
assign(message_log, c("message_log",paste(message,collapse=" ")), env = globalenv())
}
if (verboseFlag == "file"){
file_name = paste("logs//",Sys.Date(),"-calc-loging.txt", collapse = "" )
write(paste(message,collapse = ""),file=file_name,append=TRUE)
}
}
#Basic calculations=============================================================
TTBasicCalc = function(tdt, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("\n",
"Basic calculation started, adding not algorithmic variables to table ",
dim(tdt)[1],"x",dim(tdt)[2],"\n"))
#Clearing unclear
tdt[tdt == -Inf] = NA
tdt[tdt == Inf] = NA
#Separate different types of devices
tdt2 = tdt %>% filter(X3 == 46 | X3 == 49)
tdt = tdt %>% filter(X3 == 45 )
tdt_40 = tdt %>% filter(X3 == 40)
# Basic variables for type 45
tdt$incr = 1:length(tdt$X1)
tdt$datetime = tdt$X4 %>% as_datetime( tz="Europe/Moscow",
origin = "1970-01-01 00:00:00")
tdt$rec_num = tdt$X2 %>% HexToDec
tdt$serv_datetime = tdt$X1 %>% str_trunc(17,"right", ellipsis = "") %>%
dmy_hms( tz="Europe/Rome") %>% as.integer %>%
as.POSIXct(,origin = "1970-01-01 00:00:00", tz="Europe/Moscow")
tdt$wrong_time=((tdt$datetime<157680000)|((tdt$datetime)>tdt$serv_datetime))
tdt$id = tdt$X1 %>% as.character %>%
str_trunc(8,"left", ellipsis = "")
tdt$type = tdt$X3 %>% as.double
fun_log(verboseFlag = verboseFlag, c("Voltage and proximity sensor \n"))
#Voltage and proximity sensor
tdt$volt = (1.1*131072 / tdt$X8)+0.65
tdt$dist = 4.6728*(tdt$X7*1.1/tdt$X8)^(-1.274)
tdt$pulses = tdt$X7
tdt$Hz = tdt$X20
#Climate
tdt$rh = tdt$X10
tdt$tair = tdt$X11/10
tdt$VPD = 0.6108*exp((17.27*tdt$tair)/(tdt$tair+265.5))*(1 - tdt$rh/100)
#Accelerometer
tdt$gx = tdt$X12/4096
tdt$gx2 = tdt$X13/16777216
tdt$gy = tdt$X14/4096
tdt$gy2 = tdt$X15/16777216
tdt$gz = tdt$X16/4096
tdt$gz2 = tdt$X17/16777216
tdt$accel = (tdt$gx^2 + tdt$gy^2 + tdt$gz^2)^0.5
tdt$theta = atan(tdt$gx/(tdt$gy^2+tdt$gz^2)^0.5) /pi * 180
tdt$psi = atan(tdt$gy/(tdt$gx^2+tdt$gz^2)^0.5) /pi * 180
tdt$phi = atan(tdt$gz/(tdt$gy^2+tdt$gx^2)^0.5) /pi * 180
#Temoerature probes
tdt$t1 = tdt$X5 /10
tdt$nt1 = tdt$X6 /10
tdt$t2 = tdt$X18/10
tdt$nt2 = tdt$X19/10
mx= 119.639 - (0.0420 * (tdt$t1)) - (0.00761 * tdt$X20)
y0=-209.9931
a=37.1602
b=-2.2091
c=0.0557
d=-0.0005
tdt$moist = y0+a*mx+b*mx^2+c*mx^3+d*mx^4
if (any(names(tdt)=="s")){
tdt$source = tdt$s
}
# Removing duplicates only according to data variables - slow but robust
# filtering only type 45 since left alignment - 46 type rows will be added
# only to rows of 45 with the same rec_num
#print(names(tdt))
duplicated_data_row = tdt[,c("id","dist","rh","tair","VPD","gx","gx2","gy","gy2")] %>% duplicated %>% which
fun_log(verboseFlag = verboseFlag, c("\n","Table had ",dim(tdt)[1]," rows, found ",
duplicated_data_row %>% length, " rows to be removed.","\n" ))
if(duplicated_data_row %>% length > 0){
tdt = tdt[-duplicated_data_row,]
}
# Basic variables for type 46
tdt2$datetime = tdt2$X4 %>% as_datetime( tz="Europe/Moscow",
origin = "1970-01-01 00:00:00")
tdt2$serv_datetime = tdt2$X1 %>% str_trunc(17,"right", ellipsis = "") %>%
dmy_hms( tz="Europe/Rome") %>% as.integer %>%
as.POSIXct(,origin = "1970-01-01 00:00:00", tz="Europe/Moscow")
tdt2$rec_num = tdt2$X2 %>% HexToDec
tdt2$id = tdt2$X1 %>% as.character %>%
str_trunc(8,"left", ellipsis = "")
# Calculating type 46 variables - spectometer
tdt2$b_R_650 = tdt2$X16
tdt2$b_O_600 = tdt2$X15
tdt2$b_Y_570 = tdt2$X14
tdt2$b_G_550 = tdt2$X13
tdt2$b_B_500 = tdt2$X12
tdt2$b_V_450 = tdt2$X11
tdt2$b_W_860 = tdt2$X10
tdt2$b_V_810 = tdt2$X9
tdt2$b_U_760 = tdt2$X8
tdt2$b_T_730 = tdt2$X7
tdt2$b_S_680 = tdt2$X6
tdt2$b_R_610 = tdt2$X5
tdt2$b_R_650c = tdt2$b_R_650*0.7829+202.77
tdt2$b_O_600c = tdt2$b_O_600*0.8654-328.08
tdt2$b_Y_570c = tdt2$b_Y_570*1.0462-666.72
tdt2$b_G_550c = tdt2$b_G_550*1.0546-842.1
tdt2$b_B_500c = tdt2$b_B_500*0.6257-232.13
tdt2$b_V_450c = tdt2$b_V_450*0.4562-212.62
tdt2$b_W_860c = tdt2$b_W_860*0.5319+334.88
tdt2$b_V_810c = tdt2$b_V_810*0.8414+91.58
tdt2$b_U_760c = tdt2$b_U_760*1.4549-1012.5
tdt2$b_T_730c = tdt2$b_T_730*1.6209-1511.2
tdt2$b_S_680c = tdt2$b_S_680*1.5199-561.56
tdt2$b_R_610c = tdt2$b_R_610*1.6699-312.45
tdt2 = tdt2 %>% mutate( NDVI = (b_V_810 - b_S_680)/(b_V_810 + b_S_680))
tdt2 = tdt2 %>% mutate( EVI = 2.5*(b_V_810-b_S_680)/
(b_V_810 + 6*b_S_680 - 7.5*b_B_500 + 1))
tdt2 = tdt2 %>% mutate( EVI2 = 2.4*(b_V_810 - b_S_680)/
(b_V_810 + b_S_680 + 1))
tdt2 = tdt2 %>% mutate( VARI = (b_G_550-b_S_680)/(b_V_810+b_S_680-b_B_500))
tdt2 = tdt2 %>% mutate( Nr = (b_V_810)/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( Rr = (b_R_610 )/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( Br = ( b_B_500)/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( Gr = (b_G_550)/
(b_V_810 + b_R_610 + b_B_500 + b_G_550))
tdt2 = tdt2 %>% mutate( SIPI = (b_V_810 - b_V_450)/(b_V_810 -b_S_680))
tdt2 = tdt2 %>% mutate( PSSR = (b_V_810 / b_R_650))
tdt2 = tdt2 %>% mutate( PSND = (b_V_810 - b_R_650)/(b_V_810 + b_R_650))
tdt2 = tdt2 %>% mutate( PSRI = (b_S_680 - b_B_500)/b_U_760)
tdt2 = tdt2 %>% mutate( CARI= ((b_T_730-b_S_680)-0.2*(b_T_730-b_G_550)))
tdt2 = tdt2 %>% mutate( MCARI= ((b_T_730-b_S_680)-0.2*(b_T_730-b_G_550))*
(b_T_730/b_S_680))
tdt2 = tdt2 %>% mutate( MTCI = ((b_U_760 - b_T_730) /(b_T_730 - b_S_680)))
tdt2 = tdt2 %>% mutate( CIg = ((b_W_860 - b_Y_570)/b_Y_570))
tdt2 = tdt2 %>% mutate( CIr = ((b_W_860 - b_T_730)/b_T_730))
tdt2 = tdt2 %>% mutate( CRI1 = (1/ b_B_500)-(1/b_G_550))
tdt2 = tdt2 %>% mutate( CRI2 = (1/ b_B_500)-(1/b_T_730))
tdt2 = tdt2 %>% mutate( PRI = (b_B_500 - b_O_600 )/(b_B_500 + b_O_600))
tdt2 = tdt2 %>% mutate( PRI2 = (b_G_550- b_Y_570 )/(b_G_550 + b_Y_570))
tdt2 = tdt2 %>% mutate( NDVIc = (b_V_810c - b_S_680c)/(b_V_810c + b_S_680c))
tdt2 = tdt2 %>% mutate( EVIc = 2.5*(b_V_810c-b_S_680c)/
(b_V_810c + 6*b_S_680c - 7.5*b_B_500c + 1))
tdt2 = tdt2 %>% mutate( EVI2c = 2.4*(b_V_810c - b_S_680c)/
(b_V_810c + b_S_680c + 1))
tdt2 = tdt2 %>% mutate( VARIc = (b_G_550c-b_S_680c)/(b_V_810c+b_S_680c-b_B_500c))
tdt2 = tdt2 %>% mutate( Nrc = (b_V_810c)/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( Rrc = (b_R_610c )/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( Brc = ( b_B_500c)/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( Grc = (b_G_550c)/
(b_V_810c + b_R_610c + b_B_500c + b_G_550c))
tdt2 = tdt2 %>% mutate( SIPIc = (b_V_810c - b_V_450c)/(b_V_810c - b_S_680c))
tdt2 = tdt2 %>% mutate( PSSRc = (b_V_810c / b_R_650c))
tdt2 = tdt2 %>% mutate( PSNDc = (b_V_810c - b_R_650c)/(b_V_810c + b_R_650c))
tdt2 = tdt2 %>% mutate( PSRIc = (b_S_680c - b_B_500c)/b_U_760c)
tdt2 = tdt2 %>% mutate( CARIc = ((b_T_730c - b_S_680c)-0.2*(b_T_730c - b_G_550c)))
tdt2 = tdt2 %>% mutate( MCARIc = ((b_T_730c - b_S_680c)-0.2*(b_T_730c - b_G_550c))*
(b_T_730c/b_S_680c))
tdt2 = tdt2 %>% mutate( MTCIc = ((b_U_760c - b_T_730c) /(b_T_730c - b_S_680c)))
tdt2 = tdt2 %>% mutate( CIgc = ((b_W_860c - b_Y_570c)/b_Y_570c))
tdt2 = tdt2 %>% mutate( CIrc = ((b_W_860c - b_T_730c)/b_T_730c))
tdt2 = tdt2 %>% mutate( CRI1c = (1/ b_B_500c)-(1/b_G_550c))
tdt2 = tdt2 %>% mutate( CRI2c = (1/ b_B_500c)-(1/b_T_730c))
tdt2 = tdt2 %>% mutate( PRIc = (b_B_500c - b_O_600c )/(b_B_500c + b_O_600c))
tdt2 = tdt2 %>% mutate( PRI2c = (b_G_550c - b_Y_570c )/(b_G_550c + b_Y_570c))
# To align type 45 and 46 producing correct unique index variable
tdt = tdt %>% mutate(id_rec = paste(id,rec_num,sep=""))
tdt2 = tdt2 %>% mutate(id_rec = paste(id,rec_num-1,sep=""))
# Uniting rows of type 45 and 46
tdt_a = left_join(tdt,tdt2, by="id_rec", copy=F)
names(tdt_a)[names(tdt_a) == "datetime.x"] = "datetime"
names(tdt_a)[names(tdt_a) == "rec_num.x"] = "rec_num"
names(tdt_a)[names(tdt_a) == "serv_datetime.x"] = "serv_datetime"
names(tdt_a)[names(tdt_a) == "id.x"] = "id"
#Recalculating bands and indexes for type 40 since it has only 4 bands
if(any(tdt_a$type == 40)){
tdt_40 = tdt_a %>% filter(type == 40)
tdt_40$b_R_650 = tdt_40$X24.x
tdt_40$b_G_550 = tdt_40$X23.x
tdt_40$b_B_500 = tdt_40$X22.x
tdt_40$b_V_810 = tdt_40$X21.x
tdt_40 = tdt_40 %>% mutate( NDVI = (b_V_810 - b_R_650)/
(b_V_810 + b_R_650) )
tdt_40 = tdt_40 %>% mutate( EVI = 2.5*(b_V_810-b_R_650)/
(b_V_810+6*b_R_650-7.5*b_B_500+1))
tdt_40 = tdt_40 %>% mutate( VARI = (b_G_550-b_R_650)/
(b_V_810+b_R_650-b_B_500))
tdt_40 = tdt_40 %>% mutate( Nr = (b_V_810)/
(b_V_810+b_R_650+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( Rr = (b_R_650 )/
(b_V_810+b_R_650+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( Br = ( b_B_500)/
(b_V_810+b_R_650+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( Gr = ( b_G_550)/
(b_V_810+b_R_610+b_B_500+b_G_550))
tdt_40 = tdt_40 %>% mutate( SIPI = (b_V_810 - b_B_500)/
(b_V_810 -b_R_650) )
tdt_40 = tdt_40 %>% mutate( PSSR = (b_V_810 / b_R_650) )
tdt_40 = tdt_40 %>% mutate( PSND = (b_V_810 - b_R_650)/
(b_V_810 + b_R_650) )
tdt_40 = tdt_40 %>% mutate( PSRI = (b_R_650 - b_B_500)/b_V_810)
tdt_40 = tdt_40 %>% mutate( CARI = ((b_V_810-b_R_650)-
0.2*(b_V_810-b_G_550)))
tdt_40 = tdt_40 %>% mutate( MCARI = ((b_V_810-b_R_650)-
0.2*(b_V_810-b_G_550))
*(b_V_810/b_R_650))
tdt_40 = tdt_40 %>% mutate( CRI1 = (1/ b_B_500)-(1/b_G_550))
tdt_40 = tdt_40 %>% mutate( CRI2 = (1/ b_B_500)-(1/b_V_810))
#tdt_a[tdt_a$type==40,] = tdt_40
}
#Removing unconverted variables
tt_data = tdt_a %>% select(.,-ends_with(".x")) %>% select(., -ends_with(".y"))
return(tt_data)
}
#Example input
#server = c("http://naturetalkers.altervista.org/C18A0031/ttcloud.txt",
#"http://naturetalkers.altervista.org/C18A0025/ttcloud.txt")
#installation_start = 1556845000
#import_folder_name = "./RUDN"
#Calculating charges============================================================
ischarged= function(data){
if(length(data$volt)>2){
charged = c(F,(data$volt[2:length(data$volt)] -
data$volt[1:(length(data$volt)-1)]) > 0.5)
} else {
charged = F
}
charged
}
#Dates correction and extrapolation - new, not used=============================
extrapolate_dates = function(data, timestep){
#print(data)
data$time = data$datetime
if(any(data$cor_dt))
if(length(data$cor_dt %>% na.omit)>0){
if(any(data$cor_dt) & length(data$cor_dt)>1 ){
start = which(data$cor_dt)[1]
if(start > 1){
#print("case 1")
data$time = data$datetime[start]
plus_time = (1:(length(data$cor_dt)-start-1))*timestep
minus_time = (1:(start-1))*(-timestep)
data$time = data$time[(start+1):length(data$time)]+plus_time
data$time = data$time[1:(start-1)]+minus_time
}else{
#print("case 2")
data$time = data$datetime[start]
plus_time = (1:(length(data$time)-start))*timestep
data$time[(start+1):length(data$time)] =
data$time[(start+1):length(data$time)]+plus_time
#print("error?")
}
} else {
if(any(data$serv_cor) & length(data$cor_dt)>1){
start = which(data$serv_cor)[1]
if(start > 1){
#print("case 4")
plus_time = (1:(length(data$cor_dt)-start)*timestep)
minus_time = (1:(start-1))*(-timestep)
data$time = data$datetime[start]
data$time = data$time[(start+1):length(data$time)]+plus_time
data$time = data$time[1:(start-1)]+minus_time
}else{
#print("case 5")
plus_time = (1:(length(data$cor_dt)-start)*timestep)
data$time = data$serv_datetime[start]
data$time[(start+1):length(data$time)] =
data$time[(start+1):length(data$time)]+plus_time
}
}
}
}
return(data)
}
#Flagging continious correct server time========================================
mark_continious_serv_time = function(data){
#is.null(data) %>% print
wst = which(!data$wrong_server)
if (length(wst)>2){
wst_cont = (wst[2:length(wst)]-wst[1:(length(wst)-1)]<2)[-1] &
(wst[2:(length(wst)-1)]-wst[3:(length(wst))]>-2)
data$wrong_server = T
data$wrong_server[wst[c(F,wst_cont,F)]] = F
}
data$wrong_server = T
return(data)
}
#Check measure pediod (step time) per one tt per charge=========================
check_measure_period = function(temp_data, verboseFlag){
real_time = which(!temp_data$time%>%is.na)
if(real_time%>% length >1){
mt = temp_data$time[real_time]
rti = real_time
model = lm(mt~rti)
step_time = plyr::round_any(summary(model)[[4]][2,1], 100)
fun_log(verboseFlag = verboseFlag, c("Estimated step time is ",step_time))
if(step_time < 4000) {step_time=3600}
if(step_time > 3999) {step_time=5400}
fun_log(verboseFlag = verboseFlag, c(" will be used step time ", step_time, "\n"))
} else {
step_time = 3600
}
return(step_time)
}
#Time correction - good old=====================================================
extrapolate_tt_date = function(tt_one, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("Starting data extrapolation \n"))
if(tt_one %>% nrow <2 ){
#print(tt_one)
}
if(tt_one$volt %>% length > 2){
fun_log(verboseFlag = verboseFlag, c("Starting time correction for ", tt_one$id %>% unique,"\n"))
bat_grow = c(F,
tt_one$volt[2:(nrow(tt_one))] - tt_one$volt[1:(nrow(tt_one)-1)] > 0.3)
fun_log(verboseFlag = verboseFlag, c("Found ",which(bat_grow) %>% length,"recharges","\n"))
tt_one$charge = cumsum(bat_grow)+1
fun_log(verboseFlag = verboseFlag, c("So, charges states are", tt_one$charge %>% unique(),"\n"))
tt_one$time = NA
tt_one$corr_time = 1
foreach (ci = tt_one$charge %>% unique) %dopar% {
#temp_data = tt_one[tt_one$charge == ci,]
temp_data = tt_one %>% filter(charge == ci)
temp_data$time[!temp_data$wrong_time] =
temp_data$datetime[!temp_data$wrong_time]
fun_log(verboseFlag = verboseFlag, c("Correct dates ",which(!temp_data$wrong_time) %>% length,
"correct times ",which(!is.na(temp_data$time)) %>% length,"\n"))
temp_data$corr_time[!temp_data$wrong_time] = 2
check_serv_time_too_high_according_it_record = function(x, dt){
# Check if record number of record with current serv_time is
# bigger of any record number of measurements with correct unix time
# which is less than current serv_time
# Index of elemnt in vector of correct unix time whcih are
# less than current serv_time
corr_unix_time_index = which(dt$serv_datetime[x] >
dt$datetime[!dt$wrong_time])
#Correct unix time which are less than current serv_time
corr_unix_time = dt$datetime[!dt$wrong_time[corr_unix_time_index]]
# Indexes of corr_unix_time elemnts in whole datetime variable
datetime_index = which(dt$datetime %in% corr_unix_time)
# If any record numbers of measurement with correct unix time, which
# is less than the current serv_time, is bigger of record number of
# current serv_time than this serv_time is INCORRECT
return (any(dt$rec_num[datetime_index] > dt$rec_num[x]))
}
#if (any(!temp_data$wrong_server)){
# cor_serv_records = which(!temp_data$wrong_server)
# wrong_serv_records = cor_serv_records[cor_serv_records %>%
# map_lgl(check_serv_time_too_high_according_it_record, dt = tt_one)]
# temp_data$wrong_server[wrong_serv_records] = T
#}
if(length(temp_data$serv_datetime[temp_data$wrong_server == F])>0){
# if there is any correct date in charge period don't use serv_date
# but for the case when datetime is wrong use correct server time
if (!any(!temp_data$time%>%is.na)) {
only_server_time_ok = temp_data$wrong_time ==
T & temp_data$wrong_server == F
temp_data$time[only_server_time_ok] =
temp_data$serv_datetime[only_server_time_ok]
temp_data$corr_time[only_server_time_ok] = 3
}
}
temp_data$corr_time[temp_data$datetime <
157680000 & temp_data$time %>% is.na] = 4
temp_data$lost_connect = F
temp_data$lost_connect[temp_data$datetime <
157680000 & temp_data$time %>% is.na] = T
real_time = which(!is.na(temp_data$time))
step_time = check_measure_period(temp_data, verboseFlag)
tt_one$step_time = step_time
fun_log(verboseFlag = verboseFlag, c("Charge:",ci," step time:",step_time,
", correct dates found",real_time %>% length,"\n"))
#If there are any correct time inside this data
if (real_time %>% length > 1){
#If last elemnt do not have correct time - set it as correct
if (real_time[length(real_time)] != length(temp_data$time)) {
real_time = c(real_time,length(temp_data$time))
}
#Extrapolating back from the first elemet with correct time
foreach( i = (real_time[1]-1):1) %dopar% {
temp_data$time[i] = temp_data$time[i+1]-step_time
}
fun_log(verboseFlag = verboseFlag, c("Found measurements with correct timestemp:",
length(real_time)-1, "\n"))
for( i in 1:(length(real_time)-1)) {
if(real_time[i+1] == real_time[i]+1){
next(i)
}
foreach( j = real_time[i]:(real_time[i+1]-2)) %dopar% {
temp_data$time[j+1] = temp_data$time[j]+step_time
}
}
tt_one = tt_one %>% as.data.frame()
temp_data = temp_data %>% as.data.frame()
tt_one[tt_one$charge == ci,] = temp_data
fun_log(verboseFlag = verboseFlag, c(tt_one$id %>% unique(), " charge ", ci,"was filled","\n"))
}
}
tt_one$time = tt_one$time %>% as.POSIXct(origin="1970-01-01 00:00:00",
tz="Europe/Moscow") }
else {
tt_one$charge=1
tt_one$time = NA
}
return(tt_one)
}
correct_extrap_date = function(tt_one, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("TT ", tt_one$id %>% unique, " fixing wrong corrections","\n"))
#Check if date goes back in time with with growing row nunber
ends = nrow(tt_one)
bad_extrap_index =c(F,(tt_one$time[2:ends] - tt_one$time[1:(ends-1)])<0)
# If tt data is very small in size previous step cana generate
# bad_extrap_index longer than the tt data itself, so NA produced
# lets remove them
bad_extrap_index = na.exclude(bad_extrap_index)
real_bad_extrap_index = which(bad_extrap_index)
foreach (bi = which(bad_extrap_index)) %dopar% {
before_problem = bi - 1
time_before_drop = tt_one$time[before_problem]
real_problem = which(c(rep(F, bi),tt_one$time[bi:nrow(tt_one)] < time_before_drop))#wrong
real_bad_extrap_index = c(real_bad_extrap_index,real_problem)
}
bad_extrap_index = rep(F,length(tt_one$time))
bad_extrap_index[real_bad_extrap_index] = T
bad_extrap_index = bad_extrap_index[1:nrow(tt_one)]
#Set this times and datetimes to 0
fun_log(verboseFlag = verboseFlag, c("Found ",which(bad_extrap_index)%>% length," bad extrapolation, fixing","\n" ))
#print(bad_extrap_index)
tt_one$time[bad_extrap_index] = NA
tt_one$wrong_time[bad_extrap_index] = TRUE
tt_one$wrong_server[bad_extrap_index] = TRUE
#tt_one$datetime[bad_extrap_index] = as.POSIXct(0, origin="1970-01-01 00:00:00", tz="Europe/Moscow")
#tt_one$serv_datetime[bad_extrap_index] = as.POSIXct(0, origin="1970-01-01 00:00:00", tz="Europe/Moscow")
fun_log(verboseFlag = verboseFlag, c("Removed incorrect time indexes","\n"))
#Restart extrapolation
for (ci in tt_one$charge %>% unique){
temp_data = tt_one %>% filter(charge == ci)
if(!any(is.na(temp_data$time))) {
fun_log(verboseFlag = verboseFlag, c("Nothing to correct in ",ci," charge, skipping","\n"))
next(ci)
}
real_time = which(!is.na(temp_data$time))
step_time = temp_data$step_time[1]
fun_log(verboseFlag = verboseFlag, c("Charge:",ci," step time:",step_time,
", correct dates found",real_time %>% length,"\n"))
#If there are any correct time inside this data
if (real_time %>% length > 1){
#If last elemnt do not have correct time - set it as correct
if (real_time[length(real_time)] != length(temp_data$time)) {
real_time = c(real_time,length(temp_data$time))
}
if (real_time[1]>2){
#Extrapolating back from the first elemet with correct time
fun_log(verboseFlag = verboseFlag, c("Filling backward\n"))
for( i in (real_time[1]-1):1) {
temp_data$time[i] = temp_data$time[i+1]-step_time
}
}
fun_log(verboseFlag = verboseFlag, c("Found measurements with correct timestamp:",
length(real_time)-1, "\n"))
for( i in 1:(length(real_time)-1)) {
if(real_time[i+1] == real_time[i]+1){
next(i)
}
foreach( j = real_time[i]:(real_time[i+1]-2)) %dopar% {
temp_data$time[j+1] = temp_data$time[j]+step_time
}
}
tt_one[tt_one$charge == ci,] = temp_data
fun_log(verboseFlag = verboseFlag, c(tt_one$id %>% unique(), " charge ", ci,"was filled","\n"))
}
}
tt_one$time = tt_one$time %>% as.POSIXct(origin="1970-01-01 00:00:00",tz="Europe/Moscow")
return(tt_one)
}
correct_time_ts_shift_matching = function(data, verboseFlag){
#data = tt_data_ec
fun_log(verboseFlag = verboseFlag, c("Starting correct_time_ts_shift_matching \n"))
abs.max.ccf <- function(a,b) {
d <- ccf(a, b, plot=FALSE, lag.max=length(a)-5)
cor <- d$acf[,,1]
abscor <- abs(d$acf[,,1])
lag <- d$lag[,,1]
abs.cor.max <- abscor[which.max(abscor)]
abs.cor.max.lag <- lag[which.max(abscor)]
return(c( abs.cor.max, abs.cor.max.lag))
}
lag_table = data.frame()
foreach(icharge = 1:6) %dopar% {
for(iid in (data$id %>% unique)) {
ttsi = data[data$id == iid & data$charge == icharge,]
if(nrow(ttsi)<64){ next()}
for(jid in (data$id %>% unique)) {
if(jid == iid){next()}
ttsj = data %>% filter(id == jid, charge == icharge)
print(nrow(ttsj))
if(nrow(ttsj)<64){ next() }
print(all(is.na(ttsj$time)))
if(all(is.na(ttsj$time))) { next()}
cor_time = which(!is.na(ttsj$time)) %>% length
tsi = ts(ttsi$tair, start=1, end = nrow(ttsi), frequency = 1)
tsj = ts(ttsj$tair, start=1, end = nrow(ttsj), frequency = 1)
lag = abs.max.ccf(tsi,tsj)
lag_row = data.frame(iid, jid, icharge, cor=lag[1], lag = lag[2], cor_time)
print("lag_row")
print(lag_row)
lag_table = rbind(lag_table, lag_row)
print(names(lag_table))
}
}
}
print("end")
print(lag_table)
result_table = lag_table %>% group_by(iid, icharge) %>% summarise(
MaxCor = max(cor), jid = jid[which.max(cor)], lag = lag[which.max(cor)],
cor_time = cor_time[which.max(cor)]
) %>% as.data.frame()
datac = data
foreach(i = 1:nrow(result_table)) %do% {
iid = result_table$iid[i]
jid = result_table$jid[i]
lag = result_table$lag[i]
icharge = result_table$icharge[i]
if(all(is.na(
datac%>%filter(id == iid, charge == icharge) %>% select(time)))){
subj = datac%>%filter(id == jid & charge == icharge)
subi = datac%>%filter(id == iid & charge == icharge)
time_index = match(subi$rn,subj$rn+lag)
subi$time = subj$time[time_index]
datac[datac$id == iid & datac$charge == icharge,] = subi
}
}
fun_log(verboseFlag = verboseFlag, c("Starting correct_extrap_date inside correct_time_ts_shift_matching \n"))
dataec = datac %>% group_by(id) %>%
do(correct_extrap_date(., verboseFlag)) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Stopped correct_time_ts_shift_matching \n"))
return(dataec)
# ggplot()+
# geom_point(data = filter(data, id=="218A0060", charge == 1), aes(x= rn, y=tair), color = 1)+
# geom_point(data = filter(data, id=="218A0178", charge == 1), aes(x= rn-23, y=tair), color = 2)
# ggplot(data = dataec)+
# geom_point(aes(x=rn, y=tair, color=charge,shape=is.na(time)),size=.1)+
# facet_wrap(~id)
}
#Calculating everything for one site - returns two tables=======================
TTcalc_site = function(server,
installation_start,
import_folder_name,
first_imported_dates_reconstructed,
descr_file,
sitename,
verboseFlag){
# Example of possible intput
# server = c("http://naturetalkers.altervista.org/C18A0031/ttcloud.txt",
# "http://naturetalkers.altervista.org/C18A0025/ttcloud.txt")
# installation_start = 1556845000
# import_folder_name="RUDN"
# first_imported_dates_reconstructed = F
# descr_file = "rudn_desc.csv"
temp_serv_dataset = data.frame()
for (serv_url in server) {
i = which(server %in% serv_url)
dest_file_name = paste("db_sync/dbsync-",sitename,"-",i,"-",Sys.Date(),".txt", sep="")
if(!any(str_c("db_sync/",dir("db_sync/")) == dest_file_name) ){
fun_log(verboseFlag = verboseFlag, c("No sync today - downloading file \n"))
download.file(url = serv_url, destfile = dest_file_name, method = "curl",mode="character")
} else {
fun_log(verboseFlag = verboseFlag, c("Sync was done today, no download \n"))
}
temp_read = suppressWarnings(suppressMessages(
read_delim(dest_file_name, col_names = FALSE, comment = "#",delim=";", col_types = "ccdddddddddddddddddd")))
temp_read$s = i
temp_serv_dataset = rbind( temp_serv_dataset,temp_read)
fun_log(verboseFlag = verboseFlag, c(as.character(serv_url), "data read.","\n"))
}
tt_data = TTBasicCalc(temp_serv_dataset, verboseFlag)
#tt_basic_data = lazy_dt(tt_data)
#If there are duplicates - remove
tt_data = tt_data[
-(tt_data[,c("volt","dist","pulses","Hz","rh","tair",
"VPD","gx","gx2","gy","gy2")]
%>% duplicated
%>% which), ]
#Removing rows with corrupted id
corrupted_id = unique(tt_data$id)[str_which(unique(tt_data$id),":")]
if(corrupted_id %>% length() > 0){
fun_log(verboseFlag = verboseFlag, c("Found corrupted id: ", corrupted_id, " removing \n"))
tt_data = tt_data %>% filter(!(id %in% corrupted_id))
}
#If data for one TT came from different clouds, we will try to give it some
#order according to appearance on server since row order and record numbers
#on different clouds will be different.
tt_data = tt_data %>% group_by(id) %>%
arrange(serv_datetime) %>% as.data.frame
#Adding flagging variable showing that this data was obtained from server
tt_data$imported = F
# TT sends record to cloud one by one, and clouds do this in same way,
# so on server records should appear in correct order.Between them could be
# gaps, but we can detect them by battery charge discontinuties
#tt_data = tt_data %>% group_by(id) %>% mutate(nrec_num = 1:length(rec_num))
fun_log(verboseFlag = verboseFlag, c("Checking if there are some back up data data? \n"))
if(!is.null(import_folder_name)){
fun_log(verboseFlag = verboseFlag, c("Yep, there are some \n"))
data_bc = data.frame()
import_folder_name = paste("",import_folder_name,sep="")
for(file in list.files(import_folder_name,pattern="TT*")){
temp = suppressWarnings(suppressMessages(
read_delim(paste(import_folder_name,file, sep="/"),
col_names = FALSE, delim=";", skip=0)
))
fun_log(verboseFlag = verboseFlag, c("Dimmension of the file", file,":",dim(temp)[1]," - rows, ",
dim(temp)[2]," - columns.","\n"))
data_bc = rbind(data_bc, temp)
fun_log(verboseFlag = verboseFlag, c("Binded array size", dim(data_bc)))
}
fun_log(verboseFlag = verboseFlag, c("Binded array size", dim(data_bc)))
first_column = paste("10.05.20 12:55:19",data_bc$X1, sep=",")
#imported data have no server time,so we should add some
data_bc$X1 = first_column
#source variable will be created from that
data_bc$s=i+1
#print(as.data.frame(data_bc))
tt_bc = TTBasicCalc(data_bc , verboseFlag = verboseFlag)
timestep = 3600
#Adding flagging variable showing that this data was obtained from impport
tt_bc$imported = T
fun_log(verboseFlag = verboseFlag, c("Basic calculated array size", dim(tt_bc),"\n"))
#Inserting exported(backuped) from TT data into server data,
#Very slow and stupid way, but there were no time for elegance
#
tt_imported = data.frame()
# logging cases into file or memory
log_imp = data.frame(id = "first", starts = NA, ends = NA,
var = 0, tt = 0,bc = 0,stringsAsFactors = FALSE)
print(names(tt_bc))
for (ids in unique(tt_data$id)){
bc = tt_bc %>% filter(id == ids) %>% as.data.frame
tt = tt_data %>% filter(id == ids) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Data from the TT ", ids," imported has ",
dim(bc)[1]," - rows, ",dim(bc)[2]," - columns.","\n" ))
fun_log(verboseFlag = verboseFlag, c("Data from the TT ", ids," on server has ",
dim(tt)[1]," - rows, ",dim(tt)[2]," - columns.","\n" ))
fun_log(verboseFlag = verboseFlag, c("Starting import of directly extracted from TT data","\n"))
u = tt
#if(dim(tt_imported)[1]>0){
if(length(tt$volt)>0){
#print(ids)
badnames = c("datetime","wrong_time","id","type","incr","rec_num",
"serv_datetime","dist","source","id_rec",
"b_O_600","b_Y_570","b_W_860","b_V_810","b_U_760","b_R_610",
"NDVI","EVI","EVI2","Nr","Rr","Br","Gr","PSSR","PSND",
"MTCI","PRI","b_R_650","b_G_550","b_B_500","b_V_450",
"b_T_730","b_S_680","VARI","SIPI","PSRI","CARI","MCARI",
"CIg","CIr","CRI1","CRI2","imported")
names_for_compare = names(tt)[!(names(tt) %in% badnames)]
frank=data.frame()
logs=data.frame()
if(dim(bc)[1]>10){ #if imported data from tt is to small in most cases it's crappy and better just to skip it
matches = match(do.call("paste",tt[, names_for_compare]),
do.call("paste", bc[, names_for_compare]))
msize = length(matches)
edges = c((is.na(matches[2:msize]) - is.na(matches[1:(msize-1)])),0)
if(any(edges == -1) & any(edges == 1) ) {
starts = which(edges == -1)
ends = which(edges == 1)
subset1 = 1:starts
subset2 = ends:msize
frank = rbind(tt[subset1,], bc, tt[subset2,])
#logs = data.frame(id = as.character(ids), starts = starts, ends = ends,
# var = 1, tt = dim(tt)[1],bc = dim(bc)[1],stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 1 intersect, start is ", starts," end is ",ends,"bc size ",dim(bc)[1]," tt size ",dim(tt)[1],"\n"))
} else {
if(any(edges == -1)){
starts = which(edges==-1)
frank = rbind(tt[1:starts,], bc)
#logs = data.frame(id = as.character(ids), starts = starts, ends = NA,
# var = 2, tt = dim(tt)[1],bc = dim(bc)[1],stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 2 add to end, start is ", starts,"bc size ",dim(bc)[1]," tt size ",dim(tt)[1],"\n"))
}
if(any(edges == 1)){
ends = which(edges==1)
frank = rbind( bc, tt[ends:msize,])
#logs = data.frame(id = as.character(ids), starts = NA, ends = ends,
# var = 3, tt = dim(tt)[1],bc = dim(bc)[1],stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 3 add to begin, end is ",ends,"bc size ",dim(bc)[1]," tt size ",dim(tt)[1],"\n"))
}
}
} else {
frank = tt
#logs = data.frame(id = as.character(ids), starts = NA, ends = NA,
# var = 0, tt = dim(tt)[1],bc = dim(bc)[1], stringsAsFactors = FALSE)
fun_log(verboseFlag = verboseFlag, c("Case 0 no backup","\n"))
}
u=frank
#matches %>% print
#fun_log(verboseFlag = verboseFlag, c("Found ", matches %>% length,
# " matches between importing and server data for TT ", ids,"\n")
} else {
u = bc
#logs = c(id = ids, starts = NA, ends = NA, var = 4)
fun_log(verboseFlag = verboseFlag, c("Case 4, only backup result is ", dim(u)[1]))
}
tt_imported = rbind(tt_imported,u)
#log_imp = rbind(log_imp,logs)
} # end of cycling through ids instruction
# if no import was done just take tt_data
#print(log_imp)
} else {
fun_log(verboseFlag = verboseFlag, c("Nope, nothing to import \n"))
tt_imported = tt_data
}
tt_imported = tt_imported %>% group_by(id) %>% mutate(rn = 1:length(volt))
#DONE Add flagging variable to mark source of data - imported from backup or
#got directly from server
#TODO Check what is wrong with Timiryazev
#TODO Add to time extrapolation option to understand measurement time
########NOT USING NOW
#Esoterical way to flag data before and after reseting -
#if rec_num falls more than 1000 records - it means that this TT was reseted
#(its magic because cloud was not reseted, but record numbers in
# cloud still drops)
#tt_data$before_reset = c(T, cumsum(tt_data$rec_num[2:length(tt_data$rec_num)]
#-tt_data$rec_num[1:(length(tt_data$rec_num)-1)] < -1000) < 1 )
#Finding last record number before reset and constructing new variable with
#continuous record number
#d = tt_data %>% filter(before_reset == T) %>% group_by(id)
#%>% summarise (max = max(rec_num))
#tt_data = left_join(tt_data,d, by="id")
#tt_data = tt_data %>% group_by(id) %>%
#mutate(nrec_num = if_else(before_reset, rec_num, rec_num+max))
#Function to detect recharge of battery - find rise on more than 0.5 volts
tt_imported = as.data.frame(tt_imported)
#print(head(tt_imported))
# Marking server time wrong if there are more than one measurement per hour
tt_imported$years = year(tt_imported$serv_datetime)
tt_imported$doys = yday(tt_imported$serv_datetime)
tt_imported$hours = hour(tt_imported$serv_datetime)
#tt_imported = lazy_dt(tt_imported)
tt_imported = tt_imported %>% group_by(id,years,doys, hours) %>%
mutate(wrong_server = length(doys)>1) %>% as.data.frame
#Marking recharge
#tt_imported = lazy_dt(tt_imported)
tt_imported = tt_imported %>% group_by(id) %>% arrange(rn) %>%
mutate(recharge_flag =ischarged(.data))
#Calculating charge cycles
tt_imported = tt_imported %>% group_by(id) %>% arrange(rn) %>%
mutate(charge = cumsum(recharge_flag))
#Simple detect of clearly wrong datetimes
tt_imported = tt_imported %>% group_by(id) %>% arrange(rn) %>%
mutate(cor_dt = (datetime > min(serv_datetime)) & (datetime <
max(serv_datetime)))
#Detecting correct server time - first assumption -
#it should one measurement per hour
tt_imported = tt_imported %>% group_by(id, years, doys,hours) %>%
mutate(serv_cor = length(serv_datetime) < 2)
timestep = 3600
#tt_imported = tt_imported %>% group_by(id, charge) %>%
# do(extrapolate_dates(., timestep))%>% as.data.frame
tt_imported = tt_imported %>% group_by(id) %>%
do(mark_continious_serv_time(.)) %>% as.data.frame
#tt_imported = lazy_dt(tt_imported)
tt_imported = tt_imported %>% group_by(id) %>%
do(mark_continious_serv_time(.)) %>% as.data.frame
SITE_list = suppressWarnings(suppressMessages(
read_delim(descr_file,col_names = T, delim=",")))
if (!is.null(sitename)){
SITE_list = SITE_list%>%filter(Site == sitename)
if(SITE_list %>% length <1) {
fun_log(verboseFlag = verboseFlag, c(
"Looks like you have error in site name.\n"
))
}
}
tt_imported = tt_imported %>% filter(id %in% SITE_list$id)
if(tt_imported %>% nrow <1) {
fun_log(verboseFlag = verboseFlag, c(
"Looks like you have error in site name.\n"
))
}
fun_log(verboseFlag = verboseFlag, c("Starting extrapolation of dates \n"))
tt_data_e = tt_imported %>% group_by(id) %>%
do(extrapolate_tt_date(., verboseFlag)) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Starting correction of extrapolated dates \n"))
tt_data_ec = tt_data_e %>% group_by(id) %>%
do(correct_extrap_date(., verboseFlag)) %>% as.data.frame
fun_log(verboseFlag = verboseFlag, c("Starting correct_time_ts_shift_matching of extrapolated dates \n"))
#tt_data_ec = correct_time_ts_shift_matching(tt_data_ec, verboseFlag)
#tt_data_e = tt_imported
tt_data_e = tt_data_ec %>% select(-c(years,doys,hours))
tt_data_e$year = year(tt_data_e$time)
tt_data_e$week = week(tt_data_e$time)
tt_data_e$doy = yday(tt_data_e$time)
tt_data_e$hour = hour(tt_data_e$time)
tt_data_e$min = minute(tt_data_e$time)
tt_data_e = tt_data_e %>% group_by(id,year,doy) %>%
mutate(dT = nt2 - t2, na.rm = T) %>%
mutate(dTa = nt2 - t2 - nt1 + t1, na.rm = T) %>%
mutate(dTm = max(dT, na.rm=T)) %>%
mutate(dTam = max(dTa, na.rm=T)) %>%
mutate(maxTd = max(dist), na.rm = T) %>%
mutate(meanTd = mean(dist), na.rm = T) %>%
mutate(minTd = min(dist), na.rm = T) %>%
mutate(u = 119*(10^-3)*(dTm/dT - 1)^1.231, na.rm = T) #l m-2 s-1
SITEdata = left_join(tt_data_e,SITE_list,by="id")
SITEdata = SITEdata %>% mutate(diam = DBH / pi)
SITEdata = SITEdata %>%
mutate(Flux = u*3600*(diam^1.8777)*0.755/10000, na.rm = T)
#SITEdata = BEFadd(SITEdata, verboseFlag)
#Spectrometer calibration
SITEdata = TTR_add(SITEdata, verboseFlag)
if(verboseFlag =="mem"){
return(list(tt_imported, SITEdata,message_log))
}else {
return(list(tt_imported, SITEdata))
}
}
#Adding Biomass calculation data ==============================================
# Biomass calculated based on IPCC 2006 formula C = [V * D * BEF] * (1 + R) * CF
# BCEF = BEF * D is taken from paper doi:10.3390/f9060312 Dmitry Schepaschenko
# Improved Estimates of Biomass Expansion Factors for Russian Forests
# Big table of data from this paper is used in this function
BEFadd = function(data, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("Adding BEF data for biomass growth calculation \n"))
data = AllData
BEFdata = read_delim("data/BEF.csv", delim = ";")
data = data %>% mutate(genus = str_split(Species, " ", simplify = T)[,1])
data = data %>% mutate(age_group_indexes = recode(age_group_index, V = "IV", VI = "IV"))
data = data %>% mutate(Genum = recode(genus, Fraxinus = "Other hard deciduous", Acer = "Other hard deciduous",
Salix = "Other soft deciduous", Tilia = "Other soft deciduous"))
data = left_join(data, BEFdata, by =c("Genum","zone","age_group_indexes"))
data = data %>% select(-genus,-age_group_indexes )
return(data)
#Other hard deciduous Fraxinus, Acer
#Other soft deciduous Salix, Tilia
}
#Adding TTR connected variables=================================================
#IMPORTANT - we are assuming that one site is a group of TTs installed on trees
#which could be assumed to be in a same conditions and that there is one TTR per site
TTR_add = function(data, verboseFlag){
fun_log(verboseFlag = verboseFlag, c("Starting site TTR data calculation \n"))
if(any(data$Species == "TTR")){
names(data)[names(data) == "id.x"] = "id"
data$b_R_650c[data$b_R_650c < 0] = 0
data$b_O_600c[data$b_O_600c < 0] = 0
data$b_Y_570c[data$b_Y_570c < 0] = 0
data$b_G_550c[data$b_G_550c < 0] = 0
data$b_B_500c[data$b_B_500c < 0] = 0
data$b_V_450c[data$b_V_450c < 0] = 0
data$b_W_860c[data$b_W_860c < 0] = 0
data$b_V_810c[data$b_V_810c < 0] = 0
data$b_U_760c[data$b_U_760c < 0] = 0
data$b_T_730c[data$b_T_730c < 0] = 0
data$b_S_680c[data$b_S_680c < 0] = 0
data$b_R_610c[data$b_R_610c < 0] = 0
TTRdatasum = data %>%filter(Species == "TTR") %>% group_by(Site,doy,hour) %>% summarise(
TTair = mean(tair,na.rm = T),
TTrh = mean(rh, na.rm = T),
TTR_650c = mean(b_R_650c,na.rm = T),
TTR_600c = mean(b_O_600c,na.rm = T),
TTR_570c = mean(b_Y_570c,na.rm = T),
TTR_550c = mean(b_G_550c,na.rm = T),
TTR_500c = mean(b_B_500c,na.rm = T),
TTR_450c = mean(b_V_450c,na.rm = T),
TTR_860c = mean(b_W_860c,na.rm = T),
TTR_810c = mean(b_V_810c,na.rm = T),
TTR_760c = mean(b_U_760c,na.rm = T),
TTR_730c = mean(b_T_730c,na.rm = T),
TTR_680c = mean(b_S_680c,na.rm = T),
TTR_610c = mean(b_R_610c,na.rm = T),
TTR_650 = mean(b_R_650,na.rm = T),
TTR_600 = mean(b_O_600,na.rm = T),
TTR_570 = mean(b_Y_570,na.rm = T),
TTR_550 = mean(b_G_550,na.rm = T),
TTR_500 = mean(b_B_500,na.rm = T),
TTR_450 = mean(b_V_450,na.rm = T),
TTR_860 = mean(b_W_860,na.rm = T),
TTR_810 = mean(b_V_810,na.rm = T),
TTR_760 = mean(b_U_760,na.rm = T),
TTR_730 = mean(b_T_730,na.rm = T),
TTR_680 = mean(b_S_680,na.rm = T),
TTR_610 = mean(b_R_610,na.rm = T))
data = data %>% left_join(TTRdatasum, by=c("Site","doy","hour"))
#LAI according to Beer-Law and light extinction coefficient, look into papers LAI folder
K = 5.2 # light extinction coefficient
data = data %>% mutate(LAInir = -log((b_V_810c+b_W_860c)/(TTR_860c+TTR_810c))/K)
data = data %>% mutate(LAIb = -log((b_V_450c+b_B_500c)/(TTR_450c+TTR_500c))/K)
return(data)
} else {
fun_log(verboseFlag = verboseFlag, c("Looks like your site dont have TTR, returning data without change \n"))
return(data)
}
}
#Exporting site data to excel===================================================
export_all_to_excel = function(AllData) {
var_list = c("time","id","Species","d","VTA_score","rec_num","tair","rh","VPD",
"theta","psi","phi","gz2","nt1","NDVIc","EVIc","VARIc","PRIc","NDVI","EVI","VARI","PRI","Rr",
"Br","Gr","Flux", "TTair","TTrh","LAIb","LAInir")
AllData = AllData %>% mutate(g2 = gz2+gy2+gx2)
AllData = AllData %>% mutate(W = mean((46000-Hz)/(Hz+46000)*50, na.rm=T))
foreach (site = AllData$SiteIndex %>% unique()) %dopar% {
list_of_datasets = list()
SITEdata = AllData%>%filter(SiteIndex == site)
for (i in SITEdata$id %>% unique) {
index = which(SITEdata$id %>% unique == i)
TT = SITEdata %>% filter(id == i)
TT = TT[,var_list]
list_of_datasets[[i]] = TT
names(list_of_datasets)[index] = i
}
sitename = site
write.xlsx(list_of_datasets, file = paste(sitename,".xlsx",sep=""))
# dat = loadWorkbook( file = paste(sitename,".xlsx",sep=""))
# desc = readWorkbook(insert_file, sheet=1)
# addWorksheet(dat, "Пояснения")
# writeData(dat,"Пояснения",desc)
# saveWorkbook(dat, paste(sitename,".xlsx",sep=""), overwrite = TRUE)
}
}
export_site_to_excel = function(site_object,sitename="site",
insert_file="RUDN_descr.xlsx") {
var_list = c("time","id","Species","d","VTA_score","rec_num","tair","rh","VPD",
"theta","psi","phi","gz2","nt1","NDVIc","EVIc","VARIc","PRIc","NDVI","EVI","VARI","PRI","Rr",
"Br","Gr","Flux", "TTair","TTrh","LAIb","LAInir")
list_of_datasets = list()
SITEdata = site_object[[2]]
foreach (i = SITEdata$id %>% unique) %dopar% {
index = which(SITEdata$id %>% unique == i)
TT = SITEdata %>% filter(id == i)
TT = TT[,var_list]
list_of_datasets[[i]] = TT
names(list_of_datasets)[index] = i
}
write.xlsx(list_of_datasets, file = paste(sitename,".xlsx",sep=""))
dat = loadWorkbook( file = paste(sitename,".xlsx",sep=""))
desc = readWorkbook(insert_file, sheet=1)
addWorksheet(dat, "Пояснения")
writeData(dat,"Пояснения",desc)
saveWorkbook(dat, paste(sitename,".xlsx",sep=""), overwrite = TRUE)
}
#Exporting site data to csv folder\=============================================
export_site_to_csv_folder = function(site_object, export_folder="csv_export") {
if(!dir.exists(export_folder)){
dir.create(export_folder)
}
setwd(export_folder)
for (i in 1:2){
SITEdata = site_object[[i]]
for (t in SITEdata$id %>% unique) {
index = which(SITEdata$id %>% unique == t)
TT = SITEdata %>% filter(id == t)
filename = paste(as.character(t),".csv",sep="")
if(i==1){
filename = paste("raw_",filename,sep="")
}
write.csv(TT,file = filename, sep=";", dec=".")
}
}
setwd("..")
return(NULL)
}
#========================================================================================================================
three_sigma_weekly_flagging = function(dt, var_name){
dtg = dt %>% group_by(id,year,week)
var_name = enquo(var_name)
var_name_f = paste0(quo_name(var_name),"_f", sep="", collapse = NULL)
dtg = dtg %>% mutate(!!var_name_f :=
!!var_name > mean(!!var_name) + 3*sd(!!var_name) |
!!var_name < mean(!!var_name) - 3*sd(!!var_name))
return(dtg %>% as.data.frame)
}
#========================================================================================================================
two_sigma_weekly_flagging = function(dt, var_name){
dtg = dt %>% group_by(id,year,week)
var_name = enquo(var_name)
var_name_f = paste0(quo_name(var_name),"_f", sep="", collapse = NULL)
dtg = dtg %>% mutate(!!var_name_f :=
!!var_name > mean(!!var_name) + 2*sd(!!var_name) |
!!var_name < mean(!!var_name) - 2*sd(!!var_name))
return(dtg %>% as.data.frame)
}
#===========Two sigma grouped flagging
two_sigma_grouped_flagging = function(dt, var_name, group_var1, group_var2, suffix){
group_var1 = enquo(group_var1)
group_var2 = enquo(group_var2)
dtg = dt %>% group_by(!!group_var1, !!group_var2)
var_name = enquo(var_name)
var_name_f = paste0(quo_name(var_name),suffix, sep="", collapse = NULL)
dtg = dtg %>% mutate(!!var_name_f :=
!!var_name > mean(!!var_name) + 2*sd(!!var_name) |
!!var_name < mean(!!var_name) - 2*sd(!!var_name))
return(dtg %>% as.data.frame)
}
out_of_two_sigma = function(var){
var[is.infinite(var)] = NA
var_n = var > mean(var, na.rm=T) + 2*sd(var, na.rm=T) | var < mean(var, na.rm=T) - 2*sd(var, na.rm=T)
return(var_n)
}
#===============================================================================
flagged = function(dt){
vars = names(dt)[names(dt) %>% str_ends("_f")]
for(var in vars){
dt = dt %>% filter(!!var == FALSE)
}
return(dt)
}
#===============================================================================
radiation_flagging = function(dt){
tt_data_e = dt
dtg = tt_data_e %>% mutate(radiation = b_V_810+b_B_500+b_G_550+b_R_650+b_R_610+
b_S_680+b_T_730+b_U_760+b_W_860+b_V_450+b_Y_570+b_O_600)
dtg = dtg %>% group_by(id,year,doy) %>%
mutate(rad_max = max(radiation, na.rm = T))
dtg = dtg %>% mutate(is_max_hour = (radiation == rad_max)) %>% as.data.frame
dtg$is_max_hour[is.na(dtg$is_max_hour)] = F
dtg = dtg %>%group_by(id,year,doy) %>%
mutate(max_hour = ifelse(is_max_hour %>% which %>% length > 0,
is_max_hour %>% which - 1, NA)) %>% as.data.frame
dtg = dtg %>% group_by(id,year,week) %>%
mutate(mean_max_hour =
ifelse(max_hour %>% is.na %>% which %>% length < max_hour %>% length,
mean(max_hour, na.rm=T) %>% round(0) , NA))%>% as.data.frame
dtg = dtg %>% group_by(id,year,doy) %>%
mutate(rad_flag = hour > mean_max_hour - 4 & hour < mean_max_hour+4) %>%
as.data.frame
dt$rad_flag = dtg$rad_flag
return(dt)
}
###########################TESTING$#############################################
#TODO remove record number calculate solid angle from 3
#TODO angles +180
#TODO collect gz2 for all treetalker - build frequency destribution
#TODO and for given number calculate percentile
#TODO descibe nt1 as stem temperature
#TODO the same index for whole day
#TODO add yellowness
#TODO Flux
#TODO quantity absolute values
#TODO quality curve
#TODO stomata closure
#TODO plot VPD vs FLUX
#TODO time of maximum flow
#TODO Fmax=gmaxVPD
#TODO Cumulative min max for all species
#TODO gmax = stomata conductance R2
#TODO frequency distribution of growth to temperature
#TODO average to same temp per week
#TODO LUT for distance T-rh-day-night
#TODO gap fill - lut
#TODO compare TTR with TT data
##
##
##### Report preparation function - gapfilling and stuff
TTsite_repport = function(data){
}
|
#' @title HTTP client
#' @description Create and execute HTTP requests
#'
#' @export
#' @template args
#' @param path URL path, appended to the base URL
#' @param query query terms, as a named list
#' @param body body as an R list
#' @param encode one of form, multipart, json, or raw
#' @param disk a path to write to. if NULL (default), memory used.
#' See [curl::curl_fetch_disk()] for help.
#' @param stream an R function to determine how to stream data. if
#' NULL (default), memory used. See [curl::curl_fetch_stream()]
#' for help
#' @param ... For `retry`, the options to be passed on to the method
#' implementing the requested verb, including curl options. Otherwise,
#' curl options, only those in the acceptable set from [curl::curl_options()]
#' except the following: httpget, httppost, post, postfields, postfieldsize,
#' and customrequest
#' @return an [HttpResponse] object
#' @section handles:
#' curl handles are re-used on the level of the connection object, that is,
#' each `HttpClient` object is separate from one another so as to better
#' separate connections.
#'
#' If you don't pass in a curl handle to the `handle` parameter,
#' it gets created when a HTTP verb is called. Thus, if you try to get `handle`
#' after creating a `HttpClient` object only passing `url` parameter, `handle`
#' will be `NULL`. If you pass a curl handle to the `handle parameter, then
#' you can get the handle from the `HttpClient` object. The response from a
#' http verb request does have the handle in the `handle` slot.
#'
#' @note A little quirk about `crul` is that because user agent string can
#' be passed as either a header or a curl option (both lead to a `User-Agent`
#' header being passed in the HTTP request), we return the user agent
#' string in the `request_headers` list of the response even if you
#' pass in a `useragent` string as a curl option. Note that whether you pass
#' in as a header like `User-Agent` or as a curl option like `useragent`,
#' it is returned as `request_headers$User-Agent` so at least accessing
#' it in the request headers is consistent.
#'
#' @seealso [post-requests], [delete-requests], [http-headers],
#' [writing-options], [cookies], [hooks]
#'
#' @examples \dontrun{
#' # set your own handle
#' (h <- handle("https://httpbin.org"))
#' (x <- HttpClient$new(handle = h))
#' x$handle
#' x$url
#' (out <- x$get("get"))
#' x$handle
#' x$url
#' class(out)
#' out$handle
#' out$request_headers
#' out$response_headers
#' out$response_headers_all
#'
#' # if you just pass a url, we create a handle for you
#' # this is how most people will use HttpClient
#' (x <- HttpClient$new(url = "https://httpbin.org"))
#' x$url
#' x$handle # is empty, it gets created when a HTTP verb is called
#' (r1 <- x$get('get'))
#' x$url
#' x$handle
#' r1$url
#' r1$handle
#' r1$content
#' r1$response_headers
#' r1$parse()
#'
#' (res_get2 <- x$get('get', query = list(hello = "world")))
#' res_get2$parse()
#' library("jsonlite")
#' jsonlite::fromJSON(res_get2$parse())
#'
#' # post request
#' (res_post <- x$post('post', body = list(hello = "world")))
#'
#' ## empty body request
#' x$post('post')
#'
#' # put request
#' (res_put <- x$put('put'))
#'
#' # delete request
#' (res_delete <- x$delete('delete'))
#'
#' # patch request
#' (res_patch <- x$patch('patch'))
#'
#' # head request
#' (res_head <- x$head())
#'
#' # query params are URL encoded for you, so DO NOT do it yourself
#' ## if you url encode yourself, it gets double encoded, and that's bad
#' (x <- HttpClient$new(url = "https://httpbin.org"))
#' res <- x$get("get", query = list(a = 'hello world'))
#'
#' # access intermediate headers in response_headers_all
#' x <- HttpClient$new("https://doi.org/10.1007/978-3-642-40455-9_52-1")
#' bb <- x$get()
#' bb$response_headers_all
#' }
HttpClient <- R6::R6Class(
'HttpClient',
public = list(
#' @field url (character) a url
url = NULL,
#' @field opts (list) named list of curl options
opts = list(),
#' @field proxies a [proxy()] object
proxies = list(),
#' @field auth an [auth()] object
auth = list(),
#' @field headers (list) named list of headers, see [http-headers]
headers = list(),
#' @field handle a [handle()]
handle = NULL,
#' @field progress only supports `httr::progress()`, see [progress]
progress = NULL,
#' @field hooks a named list, see [hooks]
hooks = list(),
#' @description print method for `HttpClient` objects
#' @param x self
#' @param ... ignored
print = function(x, ...) {
cat("<crul connection> ", sep = "\n")
cat(paste0(" url: ",
if (is.null(self$url)) self$handle$url else self$url),
sep = "\n")
cat(" curl options: ", sep = "\n")
for (i in seq_along(self$opts)) {
cat(sprintf(" %s: %s", names(self$opts)[i],
self$opts[[i]]), sep = "\n")
}
cat(" proxies: ", sep = "\n")
if (length(self$proxies)) cat(paste(" -", purl(self$proxies)),
sep = "\n")
cat(" auth: ", sep = "\n")
if (length(self$auth$userpwd)) {
cat(paste(" -", self$auth$userpwd), sep = "\n")
cat(paste(" - type: ", self$auth$httpauth), sep = "\n")
}
cat(" headers: ", sep = "\n")
for (i in seq_along(self$headers)) {
cat(sprintf(" %s: %s", names(self$headers)[i],
self$headers[[i]]), sep = "\n")
}
cat(paste0(" progress: ", !is.null(self$progress)), sep = "\n")
cat(" hooks: ", sep = "\n")
if (length(self$hooks) > 0) {
for (i in seq_along(self$hooks)) {
cat(sprintf(" %s: see $hooks", names(self$hooks)[i]), sep = "\n")
}
}
invisible(self)
},
#' @description Create a new HttpClient object
#' @param urls (character) one or more URLs
#' @param opts any curl options
#' @param proxies a [proxy()] object
#' @param auth an [auth()] object
#' @param headers named list of headers, see [http-headers]
#' @param handle a [handle()]
#' @param progress only supports `httr::progress()`, see [progress]
#' @param hooks a named list, see [hooks]
#' @return A new `HttpClient` object
initialize = function(url, opts, proxies, auth, headers, handle,
progress, hooks) {
private$crul_h_pool <- new.env(hash = TRUE, parent = emptyenv())
if (!missing(url)) self$url <- url
# curl options: check for set_opts first
if (!is.null(crul_opts$opts)) self$opts <- crul_opts$opts
if (!missing(opts)) self$opts <- opts
# proxy: check for set_proxy first
if (!is.null(crul_opts$proxies)) self$proxies <- crul_opts$proxies
if (!missing(proxies)) {
if (!inherits(proxies, "proxy")) {
stop("proxies input must be of class proxy", call. = FALSE)
}
self$proxies <- proxies
}
# auth: check for set_auth first
if (!is.null(crul_opts$auth)) self$auth <- crul_opts$auth
if (!missing(auth)) self$auth <- auth
# progress
if (!missing(progress)) {
assert(progress, "request")
self$progress <- progress$options
}
# headers: check for set_headers first
if (!is.null(crul_opts$headers)) self$headers <- crul_opts$headers
if (!missing(headers)) self$headers <- headers
if (!missing(handle)) {
assert(handle, "list")
stopifnot(all(c("url", "handle") %in% names(handle)))
self$handle <- handle
}
if (is.null(self$url) && is.null(self$handle)) {
stop("need one of url or handle", call. = FALSE)
}
# hooks
if (!missing(hooks)) {
assert(hooks, "list")
if (!all(has_name(hooks))) stop("'hooks' must be a named list",
call. = FALSE)
if (!all(names(hooks) %in% c("request", "response")))
stop("unsupported names in 'hooks' list: only request, ",
"response supported", call. = FALSE)
invisible(lapply(hooks, function(z) {
if (!inherits(z, "function"))
stop("hooks must be functions", call. = FALSE)
}))
self$hooks <- hooks
}
},
#' @description Make a GET request
get = function(path = NULL, query = list(), disk = NULL,
stream = NULL, ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
rr <- list(
url = url,
method = "get",
options = ccp(list(httpget = TRUE)),
headers = def_head()
)
rr$headers <- norm_headers(rr$headers, self$headers)
if (
!"useragent" %in% self$opts &&
!"user-agent" %in% tolower(names(rr$headers))
) {
rr$options$useragent <- make_ua()
}
rr$options <- utils::modifyList(
rr$options, c(self$opts, self$proxies, self$auth, self$progress, ...))
rr$options <- curl_opts_fil(rr$options)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a POST request
post = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("post", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a PUT request
put = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("put", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a PATCH request
patch = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("patch", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a DELETE request
delete = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("delete", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a HEAD request
head = function(path = NULL, query = list(), ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- list(customrequest = "HEAD", nobody = TRUE)
rr <- list(
url = url,
method = "head",
options = ccp(opts),
headers = self$headers
)
if (
!"useragent" %in% self$opts &&
!"user-agent" %in% tolower(names(rr$headers))
) {
rr$options$useragent <- make_ua()
}
rr$options <- utils::modifyList(
rr$options,
c(self$opts, self$proxies, self$auth, ...))
private$make_request(rr)
},
#' @description Use an arbitrary HTTP verb supported on this class
#' Supported verbs: get, post, put, patch, delete, head. Also
#' supports retry
#' @param verb an HTTP verb supported on this class: get,
#' post, put, patch, delete, head. Also supports retry.
#' @examples \dontrun{
#' (x <- HttpClient$new(url = "https://httpbin.org"))
#' x$verb('get')
#' x$verb('GET')
#' x$verb('GET', query = list(foo = "bar"))
#' x$verb('retry', 'GET', path = "status/400")
#' }
verb = function(verb, ...) {
stopifnot(is.character(verb), length(verb) > 0)
verbs <- c("get", "post", "put", "patch",
"delete", "head", "retry")
if (!tolower(verb) %in% verbs)
stop("'verb' must be one of: ", paste0(verbs, collapse = ", "))
verb_func <- self[[tolower(verb)]]
stopifnot(is.function(verb_func))
verb_func(...)
},
#' @description Retry a request
#' @details Retries the request given by `verb` until successful
#' (HTTP response status < 400), or a condition for giving up is met.
#' Automatically recognizes `Retry-After` and `X-RateLimit-Reset` headers
#' in the response for rate-limited remote APIs.
#' @param verb an HTTP verb supported on this class: get,
#' post, put, patch, delete, head. Also supports retry.
#' @param times the maximum number of times to retry. Set to `Inf` to
#' not stop retrying due to exhausting the number of attempts.
#' @param pause_base,pause_cap,pause_min basis, maximum, and minimum for
#' calculating wait time for retry. Wait time is calculated according to the
#' exponential backoff with full jitter algorithm. Specifically, wait time is
#' chosen randomly between `pause_min` and the lesser of `pause_base * 2` and
#' `pause_cap`, with `pause_base` doubling on each subsequent retry attempt.
#' Use `pause_cap = Inf` to not terminate retrying due to cap of wait time
#' reached.
#' @param terminate_on,retry_only_on a vector of HTTP status codes. For
#' `terminate_on`, the status codes for which to terminate retrying, and for
#' `retry_only_on`, the status codes for which to retry the request.
#' @param onwait a callback function if the request will be retried and
#' a wait time is being applied. The function will be passed two parameters,
#' the response object from the failed request, and the wait time in seconds.
#' Note that the time spent in the function effectively adds to the wait time,
#' so it should be kept simple.
#' @examples \dontrun{
#' x <- HttpClient$new(url = "https://httpbin.org")
#'
#' # retry, by default at most 3 times
#' (res_get <- x$retry("GET", path = "status/400"))
#'
#' # retry, but not for 404 NOT FOUND
#' (res_get <- x$retry("GET", path = "status/404", terminate_on = c(404)))
#'
#' # retry, but only for exceeding rate limit (note that e.g. Github uses 403)
#' (res_get <- x$retry("GET", path = "status/429", retry_only_on = c(403, 429)))
#' }
retry = function(verb, ...,
pause_base = 1, pause_cap = 60, pause_min = 1, times = 3,
terminate_on = NULL, retry_only_on = NULL,
onwait = NULL) {
stopifnot(is.character(verb), length(verb) > 0)
stopifnot(is.null(onwait) || is.function(onwait))
verb_func <- self[[tolower(verb)]]
stopifnot(is.function(verb_func))
resp <- verb_func(...)
if ((resp$status_code >= 400) &&
(! resp$status_code %in% terminate_on) &&
(is.null(retry_only_on) || resp$status_code %in% retry_only_on) &&
(times > 0) &&
(pause_base < pause_cap)) {
rh <- resp$response_headers
if (! is.null(rh[["retry-after"]])) {
wait_time <- as.numeric(rh[["retry-after"]])
} else if (identical(rh[["x-ratelimit-remaining"]], "0") &&
! is.null(rh[["x-ratelimit-reset"]])) {
wait_time <- max(0, as.numeric(rh[["x-ratelimit-reset"]]) -
as.numeric(Sys.time()))
} else {
if (is.null(pause_min)) pause_min <- pause_base
# exponential backoff with full jitter
wait_time <- stats::runif(1,
min = pause_min,
max = min(pause_base * 2, pause_cap))
}
if (! (wait_time > pause_cap)) {
if (is.function(onwait)) onwait(resp, wait_time)
Sys.sleep(wait_time)
resp <- self$retry(verb = verb, ...,
pause_base = pause_base * 2,
pause_cap = pause_cap,
pause_min = pause_min,
times = times - 1,
terminate_on = terminate_on,
retry_only_on = retry_only_on,
onwait = onwait)
}
}
resp
},
#' @description reset your curl handle
handle_pop = function() {
name <- handle_make(self$url)
if (exists(name, envir = private$crul_h_pool)) {
rm(list = name, envir = private$crul_h_pool)
}
},
#' @description get the URL that would be sent (i.e., before executing
#' the request) the only things that change the URL are path and query
#' parameters; body and any curl options don't change the URL
#' @return URL (character)
#' @examples
#' x <- HttpClient$new(url = "https://httpbin.org")
#' x$url_fetch()
#' x$url_fetch('get')
#' x$url_fetch('post')
#' x$url_fetch('get', query = list(foo = "bar"))
url_fetch = function(path = NULL, query = list()) {
private$make_url(self$url, path = path, query = query)$url
}
),
private = list(
request = NULL,
crul_h_pool = NULL,
handle_find = function(x) {
z <- handle_make(x)
if (exists(z, private$crul_h_pool)) {
handle <- private$crul_h_pool[[z]]
} else {
handle <- handle(z)
private$crul_h_pool[[z]] <- handle
}
return(handle)
},
make_url = function(url = NULL, handle = NULL, path, query) {
if (!is.null(handle)) {
url <- handle$url
} else {
handle <- private$handle_find(url)
url <- handle$url
}
if (!is.null(path)) {
urltools::path(url) <- path
}
url <- gsub("\\s", "%20", url)
url <- add_query(query, url)
return(list(url = url, handle = handle$handle))
},
make_request = function(opts) {
if (xor(!is.null(opts$disk), !is.null(opts$stream))) {
if (!is.null(opts$disk) && !is.null(opts$stream)) {
stop("disk and stream can not be used together", call. = FALSE)
}
}
curl::handle_setopt(opts$url$handle, .list = opts$options)
if (!is.null(opts$fields)) {
curl::handle_setform(opts$url$handle, .list = opts$fields)
}
curl::handle_setheaders(opts$url$handle, .list = opts$headers)
on.exit(curl::handle_reset(opts$url$handle), add = TRUE)
if ("request" %in% names(self$hooks)) self$hooks$request(opts)
if (crul_opts$mock) {
check_for_package("webmockr")
adap <- webmockr::CrulAdapter$new()
return(adap$handle_request(opts))
} else {
resp <- crul_fetch(opts)
}
if ("response" %in% names(self$hooks)) self$hooks$response(resp)
# prep headers
if (grepl("^ftp://", resp$url)) {
headers <- list()
} else {
hh <- rawToChar(resp$headers %||% raw(0))
if (is.null(hh) || nchar(hh) == 0) {
headers <- list()
} else {
headers <- lapply(curl::parse_headers(hh, multiple = TRUE),
headers_parse)
}
}
# build response
HttpResponse$new(
method = opts$method,
url = resp$url,
status_code = resp$status_code,
request_headers =
c("User-Agent" = opts$options$useragent, opts$headers),
response_headers = last(headers),
response_headers_all = headers,
modified = resp$modified,
times = resp$times,
content = resp$content,
handle = opts$url$handle,
request = opts
)
}
)
)
| /R/client.R | permissive | aaronwolen/crul | R | false | false | 19,836 | r | #' @title HTTP client
#' @description Create and execute HTTP requests
#'
#' @export
#' @template args
#' @param path URL path, appended to the base URL
#' @param query query terms, as a named list
#' @param body body as an R list
#' @param encode one of form, multipart, json, or raw
#' @param disk a path to write to. if NULL (default), memory used.
#' See [curl::curl_fetch_disk()] for help.
#' @param stream an R function to determine how to stream data. if
#' NULL (default), memory used. See [curl::curl_fetch_stream()]
#' for help
#' @param ... For `retry`, the options to be passed on to the method
#' implementing the requested verb, including curl options. Otherwise,
#' curl options, only those in the acceptable set from [curl::curl_options()]
#' except the following: httpget, httppost, post, postfields, postfieldsize,
#' and customrequest
#' @return an [HttpResponse] object
#' @section handles:
#' curl handles are re-used on the level of the connection object, that is,
#' each `HttpClient` object is separate from one another so as to better
#' separate connections.
#'
#' If you don't pass in a curl handle to the `handle` parameter,
#' it gets created when a HTTP verb is called. Thus, if you try to get `handle`
#' after creating a `HttpClient` object only passing `url` parameter, `handle`
#' will be `NULL`. If you pass a curl handle to the `handle parameter, then
#' you can get the handle from the `HttpClient` object. The response from a
#' http verb request does have the handle in the `handle` slot.
#'
#' @note A little quirk about `crul` is that because user agent string can
#' be passed as either a header or a curl option (both lead to a `User-Agent`
#' header being passed in the HTTP request), we return the user agent
#' string in the `request_headers` list of the response even if you
#' pass in a `useragent` string as a curl option. Note that whether you pass
#' in as a header like `User-Agent` or as a curl option like `useragent`,
#' it is returned as `request_headers$User-Agent` so at least accessing
#' it in the request headers is consistent.
#'
#' @seealso [post-requests], [delete-requests], [http-headers],
#' [writing-options], [cookies], [hooks]
#'
#' @examples \dontrun{
#' # set your own handle
#' (h <- handle("https://httpbin.org"))
#' (x <- HttpClient$new(handle = h))
#' x$handle
#' x$url
#' (out <- x$get("get"))
#' x$handle
#' x$url
#' class(out)
#' out$handle
#' out$request_headers
#' out$response_headers
#' out$response_headers_all
#'
#' # if you just pass a url, we create a handle for you
#' # this is how most people will use HttpClient
#' (x <- HttpClient$new(url = "https://httpbin.org"))
#' x$url
#' x$handle # is empty, it gets created when a HTTP verb is called
#' (r1 <- x$get('get'))
#' x$url
#' x$handle
#' r1$url
#' r1$handle
#' r1$content
#' r1$response_headers
#' r1$parse()
#'
#' (res_get2 <- x$get('get', query = list(hello = "world")))
#' res_get2$parse()
#' library("jsonlite")
#' jsonlite::fromJSON(res_get2$parse())
#'
#' # post request
#' (res_post <- x$post('post', body = list(hello = "world")))
#'
#' ## empty body request
#' x$post('post')
#'
#' # put request
#' (res_put <- x$put('put'))
#'
#' # delete request
#' (res_delete <- x$delete('delete'))
#'
#' # patch request
#' (res_patch <- x$patch('patch'))
#'
#' # head request
#' (res_head <- x$head())
#'
#' # query params are URL encoded for you, so DO NOT do it yourself
#' ## if you url encode yourself, it gets double encoded, and that's bad
#' (x <- HttpClient$new(url = "https://httpbin.org"))
#' res <- x$get("get", query = list(a = 'hello world'))
#'
#' # access intermediate headers in response_headers_all
#' x <- HttpClient$new("https://doi.org/10.1007/978-3-642-40455-9_52-1")
#' bb <- x$get()
#' bb$response_headers_all
#' }
HttpClient <- R6::R6Class(
'HttpClient',
public = list(
#' @field url (character) a url
url = NULL,
#' @field opts (list) named list of curl options
opts = list(),
#' @field proxies a [proxy()] object
proxies = list(),
#' @field auth an [auth()] object
auth = list(),
#' @field headers (list) named list of headers, see [http-headers]
headers = list(),
#' @field handle a [handle()]
handle = NULL,
#' @field progress only supports `httr::progress()`, see [progress]
progress = NULL,
#' @field hooks a named list, see [hooks]
hooks = list(),
#' @description print method for `HttpClient` objects
#' @param x self
#' @param ... ignored
print = function(x, ...) {
cat("<crul connection> ", sep = "\n")
cat(paste0(" url: ",
if (is.null(self$url)) self$handle$url else self$url),
sep = "\n")
cat(" curl options: ", sep = "\n")
for (i in seq_along(self$opts)) {
cat(sprintf(" %s: %s", names(self$opts)[i],
self$opts[[i]]), sep = "\n")
}
cat(" proxies: ", sep = "\n")
if (length(self$proxies)) cat(paste(" -", purl(self$proxies)),
sep = "\n")
cat(" auth: ", sep = "\n")
if (length(self$auth$userpwd)) {
cat(paste(" -", self$auth$userpwd), sep = "\n")
cat(paste(" - type: ", self$auth$httpauth), sep = "\n")
}
cat(" headers: ", sep = "\n")
for (i in seq_along(self$headers)) {
cat(sprintf(" %s: %s", names(self$headers)[i],
self$headers[[i]]), sep = "\n")
}
cat(paste0(" progress: ", !is.null(self$progress)), sep = "\n")
cat(" hooks: ", sep = "\n")
if (length(self$hooks) > 0) {
for (i in seq_along(self$hooks)) {
cat(sprintf(" %s: see $hooks", names(self$hooks)[i]), sep = "\n")
}
}
invisible(self)
},
#' @description Create a new HttpClient object
#' @param urls (character) one or more URLs
#' @param opts any curl options
#' @param proxies a [proxy()] object
#' @param auth an [auth()] object
#' @param headers named list of headers, see [http-headers]
#' @param handle a [handle()]
#' @param progress only supports `httr::progress()`, see [progress]
#' @param hooks a named list, see [hooks]
#' @return A new `HttpClient` object
initialize = function(url, opts, proxies, auth, headers, handle,
progress, hooks) {
private$crul_h_pool <- new.env(hash = TRUE, parent = emptyenv())
if (!missing(url)) self$url <- url
# curl options: check for set_opts first
if (!is.null(crul_opts$opts)) self$opts <- crul_opts$opts
if (!missing(opts)) self$opts <- opts
# proxy: check for set_proxy first
if (!is.null(crul_opts$proxies)) self$proxies <- crul_opts$proxies
if (!missing(proxies)) {
if (!inherits(proxies, "proxy")) {
stop("proxies input must be of class proxy", call. = FALSE)
}
self$proxies <- proxies
}
# auth: check for set_auth first
if (!is.null(crul_opts$auth)) self$auth <- crul_opts$auth
if (!missing(auth)) self$auth <- auth
# progress
if (!missing(progress)) {
assert(progress, "request")
self$progress <- progress$options
}
# headers: check for set_headers first
if (!is.null(crul_opts$headers)) self$headers <- crul_opts$headers
if (!missing(headers)) self$headers <- headers
if (!missing(handle)) {
assert(handle, "list")
stopifnot(all(c("url", "handle") %in% names(handle)))
self$handle <- handle
}
if (is.null(self$url) && is.null(self$handle)) {
stop("need one of url or handle", call. = FALSE)
}
# hooks
if (!missing(hooks)) {
assert(hooks, "list")
if (!all(has_name(hooks))) stop("'hooks' must be a named list",
call. = FALSE)
if (!all(names(hooks) %in% c("request", "response")))
stop("unsupported names in 'hooks' list: only request, ",
"response supported", call. = FALSE)
invisible(lapply(hooks, function(z) {
if (!inherits(z, "function"))
stop("hooks must be functions", call. = FALSE)
}))
self$hooks <- hooks
}
},
#' @description Make a GET request
get = function(path = NULL, query = list(), disk = NULL,
stream = NULL, ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
rr <- list(
url = url,
method = "get",
options = ccp(list(httpget = TRUE)),
headers = def_head()
)
rr$headers <- norm_headers(rr$headers, self$headers)
if (
!"useragent" %in% self$opts &&
!"user-agent" %in% tolower(names(rr$headers))
) {
rr$options$useragent <- make_ua()
}
rr$options <- utils::modifyList(
rr$options, c(self$opts, self$proxies, self$auth, self$progress, ...))
rr$options <- curl_opts_fil(rr$options)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a POST request
post = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("post", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a PUT request
put = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("put", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a PATCH request
patch = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("patch", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a DELETE request
delete = function(path = NULL, query = list(), body = NULL, disk = NULL,
stream = NULL, encode = "multipart", ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- prep_body(body, encode)
rr <- prep_opts("delete", url, self, opts, ...)
rr$disk <- disk
rr$stream <- stream
private$make_request(rr)
},
#' @description Make a HEAD request
head = function(path = NULL, query = list(), ...) {
curl_opts_check(...)
url <- private$make_url(self$url, self$handle, path, query)
opts <- list(customrequest = "HEAD", nobody = TRUE)
rr <- list(
url = url,
method = "head",
options = ccp(opts),
headers = self$headers
)
if (
!"useragent" %in% self$opts &&
!"user-agent" %in% tolower(names(rr$headers))
) {
rr$options$useragent <- make_ua()
}
rr$options <- utils::modifyList(
rr$options,
c(self$opts, self$proxies, self$auth, ...))
private$make_request(rr)
},
#' @description Use an arbitrary HTTP verb supported on this class
#' Supported verbs: get, post, put, patch, delete, head. Also
#' supports retry
#' @param verb an HTTP verb supported on this class: get,
#' post, put, patch, delete, head. Also supports retry.
#' @examples \dontrun{
#' (x <- HttpClient$new(url = "https://httpbin.org"))
#' x$verb('get')
#' x$verb('GET')
#' x$verb('GET', query = list(foo = "bar"))
#' x$verb('retry', 'GET', path = "status/400")
#' }
verb = function(verb, ...) {
stopifnot(is.character(verb), length(verb) > 0)
verbs <- c("get", "post", "put", "patch",
"delete", "head", "retry")
if (!tolower(verb) %in% verbs)
stop("'verb' must be one of: ", paste0(verbs, collapse = ", "))
verb_func <- self[[tolower(verb)]]
stopifnot(is.function(verb_func))
verb_func(...)
},
#' @description Retry a request
#' @details Retries the request given by `verb` until successful
#' (HTTP response status < 400), or a condition for giving up is met.
#' Automatically recognizes `Retry-After` and `X-RateLimit-Reset` headers
#' in the response for rate-limited remote APIs.
#' @param verb an HTTP verb supported on this class: get,
#' post, put, patch, delete, head. Also supports retry.
#' @param times the maximum number of times to retry. Set to `Inf` to
#' not stop retrying due to exhausting the number of attempts.
#' @param pause_base,pause_cap,pause_min basis, maximum, and minimum for
#' calculating wait time for retry. Wait time is calculated according to the
#' exponential backoff with full jitter algorithm. Specifically, wait time is
#' chosen randomly between `pause_min` and the lesser of `pause_base * 2` and
#' `pause_cap`, with `pause_base` doubling on each subsequent retry attempt.
#' Use `pause_cap = Inf` to not terminate retrying due to cap of wait time
#' reached.
#' @param terminate_on,retry_only_on a vector of HTTP status codes. For
#' `terminate_on`, the status codes for which to terminate retrying, and for
#' `retry_only_on`, the status codes for which to retry the request.
#' @param onwait a callback function if the request will be retried and
#' a wait time is being applied. The function will be passed two parameters,
#' the response object from the failed request, and the wait time in seconds.
#' Note that the time spent in the function effectively adds to the wait time,
#' so it should be kept simple.
#' @examples \dontrun{
#' x <- HttpClient$new(url = "https://httpbin.org")
#'
#' # retry, by default at most 3 times
#' (res_get <- x$retry("GET", path = "status/400"))
#'
#' # retry, but not for 404 NOT FOUND
#' (res_get <- x$retry("GET", path = "status/404", terminate_on = c(404)))
#'
#' # retry, but only for exceeding rate limit (note that e.g. Github uses 403)
#' (res_get <- x$retry("GET", path = "status/429", retry_only_on = c(403, 429)))
#' }
retry = function(verb, ...,
pause_base = 1, pause_cap = 60, pause_min = 1, times = 3,
terminate_on = NULL, retry_only_on = NULL,
onwait = NULL) {
stopifnot(is.character(verb), length(verb) > 0)
stopifnot(is.null(onwait) || is.function(onwait))
verb_func <- self[[tolower(verb)]]
stopifnot(is.function(verb_func))
resp <- verb_func(...)
if ((resp$status_code >= 400) &&
(! resp$status_code %in% terminate_on) &&
(is.null(retry_only_on) || resp$status_code %in% retry_only_on) &&
(times > 0) &&
(pause_base < pause_cap)) {
rh <- resp$response_headers
if (! is.null(rh[["retry-after"]])) {
wait_time <- as.numeric(rh[["retry-after"]])
} else if (identical(rh[["x-ratelimit-remaining"]], "0") &&
! is.null(rh[["x-ratelimit-reset"]])) {
wait_time <- max(0, as.numeric(rh[["x-ratelimit-reset"]]) -
as.numeric(Sys.time()))
} else {
if (is.null(pause_min)) pause_min <- pause_base
# exponential backoff with full jitter
wait_time <- stats::runif(1,
min = pause_min,
max = min(pause_base * 2, pause_cap))
}
if (! (wait_time > pause_cap)) {
if (is.function(onwait)) onwait(resp, wait_time)
Sys.sleep(wait_time)
resp <- self$retry(verb = verb, ...,
pause_base = pause_base * 2,
pause_cap = pause_cap,
pause_min = pause_min,
times = times - 1,
terminate_on = terminate_on,
retry_only_on = retry_only_on,
onwait = onwait)
}
}
resp
},
#' @description reset your curl handle
handle_pop = function() {
name <- handle_make(self$url)
if (exists(name, envir = private$crul_h_pool)) {
rm(list = name, envir = private$crul_h_pool)
}
},
#' @description get the URL that would be sent (i.e., before executing
#' the request) the only things that change the URL are path and query
#' parameters; body and any curl options don't change the URL
#' @return URL (character)
#' @examples
#' x <- HttpClient$new(url = "https://httpbin.org")
#' x$url_fetch()
#' x$url_fetch('get')
#' x$url_fetch('post')
#' x$url_fetch('get', query = list(foo = "bar"))
url_fetch = function(path = NULL, query = list()) {
private$make_url(self$url, path = path, query = query)$url
}
),
private = list(
request = NULL,
crul_h_pool = NULL,
handle_find = function(x) {
z <- handle_make(x)
if (exists(z, private$crul_h_pool)) {
handle <- private$crul_h_pool[[z]]
} else {
handle <- handle(z)
private$crul_h_pool[[z]] <- handle
}
return(handle)
},
make_url = function(url = NULL, handle = NULL, path, query) {
if (!is.null(handle)) {
url <- handle$url
} else {
handle <- private$handle_find(url)
url <- handle$url
}
if (!is.null(path)) {
urltools::path(url) <- path
}
url <- gsub("\\s", "%20", url)
url <- add_query(query, url)
return(list(url = url, handle = handle$handle))
},
make_request = function(opts) {
if (xor(!is.null(opts$disk), !is.null(opts$stream))) {
if (!is.null(opts$disk) && !is.null(opts$stream)) {
stop("disk and stream can not be used together", call. = FALSE)
}
}
curl::handle_setopt(opts$url$handle, .list = opts$options)
if (!is.null(opts$fields)) {
curl::handle_setform(opts$url$handle, .list = opts$fields)
}
curl::handle_setheaders(opts$url$handle, .list = opts$headers)
on.exit(curl::handle_reset(opts$url$handle), add = TRUE)
if ("request" %in% names(self$hooks)) self$hooks$request(opts)
if (crul_opts$mock) {
check_for_package("webmockr")
adap <- webmockr::CrulAdapter$new()
return(adap$handle_request(opts))
} else {
resp <- crul_fetch(opts)
}
if ("response" %in% names(self$hooks)) self$hooks$response(resp)
# prep headers
if (grepl("^ftp://", resp$url)) {
headers <- list()
} else {
hh <- rawToChar(resp$headers %||% raw(0))
if (is.null(hh) || nchar(hh) == 0) {
headers <- list()
} else {
headers <- lapply(curl::parse_headers(hh, multiple = TRUE),
headers_parse)
}
}
# build response
HttpResponse$new(
method = opts$method,
url = resp$url,
status_code = resp$status_code,
request_headers =
c("User-Agent" = opts$options$useragent, opts$headers),
response_headers = last(headers),
response_headers_all = headers,
modified = resp$modified,
times = resp$times,
content = resp$content,
handle = opts$url$handle,
request = opts
)
}
)
)
|
testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.5134579018881e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615832638-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 420 | r | testlist <- list(doy = c(-Inf, 0), latitude = c(-6.93132091139805e-107, 1.86807199752012e+112, -Inf, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, NaN, -1.5134579018881e+21, 1.44942408802595e-285, -1.72131968218895e+83, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
library(boot)
library(reshape2)
library(lme4)
demo<-read.csv('~/pnc_demo.csv')
subjects<-read.csv('~/participants.txt',header = F)
ageSex<-data.frame(demo$ageAtScan1,as.factor(demo$sex),demo$scanid,demo$bblid)
colnames(ageSex)[4]<-'bblid'
colnames(ageSex)[1]<-'Age'
Rest_Motion_Data <- read.csv("~/n1601_RestQAData_20170714.csv")
NBack_Motion_Data <- read.csv("~/n1601_NBACKQAData_20181001.csv")
Idemo_Motion_Data <- read.csv("~/n1601_idemo_FinalQA_092817.csv")
motmerge<-merge(Rest_Motion_Data,NBack_Motion_Data,by='bblid')
motmerge<-merge(motmerge,Idemo_Motion_Data,by='bblid')
motmerge$Motion <- (motmerge$restRelMeanRMSMotion + motmerge$nbackRelMeanRMSMotion + motmerge$idemoRelMeanRMSMotion)/3;
motiondf<-data.frame(motmerge$bblid,motmerge$Motion)
colnames(motiondf)<-c('bblid','Motion')
colnames(subjects)<-c("scanid")
colnames(ageSex)<-c("Age","Sex","scanid","bblid")
df<-merge(subjects,ageSex,by="scanid")
df<-merge(df,motiondf,by='bblid')
outputcolnames=c('Age','logScale','AgeXlogScale','Motion','Sex','ageInt')
outputrownames=c('Coef','BoStrCI_Low','BoStr_CI_Upp')
outarray=matrix(1,nrow=3,ncol=6)
colnames(outarray)=outputcolnames
rownames(outarray)=outputrownames
# 9/10/20 section to write out subject level intercepts and slopes
subjoutputcolnames=c('bblid','Intercept')
subjoutarray=matrix(1,nrow=693,ncol=2)
colnames(subjoutarray)=subjoutputcolnames
# 9/21/20 - bootstrap section added
returnLmCoefs<-function(d,i){
# this sets I to the bootstrapped sample
d2<-d[i,]
model=glmer(value~Motion+Sex+log(Scale)*Age+(1|bblid),data=d2)
return(fixef(model))
}
for (v in 1:17734){
print(v)
# wrangle data, get variables into approp. var type
vFP=paste('~/mixedEffectModels/v',v,'_bwVals_overScales.csv',sep='')
verts=read.csv(vFP)
colnames(verts)[30]<-'bblid'
df_verts<-merge(df,verts,by="bblid")
mdf_verts<-melt(df_verts,id=c(1,2,3,4,5))
mdf_verts$bblid<-as.factor(mdf_verts$bblid)
colnames(mdf_verts)[6]<-c('Scale')
mdf_verts$Scale<-as.integer(mdf_verts$Scale)
mdf_verts$Sex<-as.factor(mdf_verts$Sex)
mdf_verts$Age<-as.numeric(mdf_verts$Age)
# model for this vertex
model=glmer(value~Motion+Sex+log(Scale)*Age+(1|bblid),data=mdf_verts)
fe=fixef(model)
re=ranef(model)
re=re$bblid
# pull out coefficients
fMot=fe['Motion']
fSex=fe['Sex']
fAge=fe['Age']
flSc=fe['log(Scale)']
fIntrxn=fe['log(Scale):Age']
Intercept=fe['(Intercept)']
# intercept correlated with Age?
ageInt=cor.test(re$`(Intercept)`,df$Age,method='spearman')$estimate
# coefficients into first row, along with age x Intercept corr.
outarray[1,1]=fAge
outarray[1,2]=flSc
outarray[1,3]=fIntrxn
outarray[1,4]=fMot
outarray[1,5]=fSex
outarray[1,6]=ageInt
# hang on to yer bootstraps
results <- boot(data=mdf_verts, statistic=returnLmCoefs, R=1000)
# index 1 is intercept
# index 2 is Motion
outarray[2,4]=boot.ci(results,type="norm",index=2)$normal[2]
outarray[3,4]=boot.ci(results,type="norm",index=2)$normal[3]
# index 3 is Sex
outarray[2,5]=boot.ci(results,type="norm",index=3)$normal[2]
outarray[3,5]=boot.ci(results,type="norm",index=3)$normal[3]
# index 4 is logScale
outarray[2,2]=boot.ci(results,type="norm",index=4)$normal[2]
outarray[3,2]=boot.ci(results,type="norm",index=4)$normal[3]
# index 5 is Age
outarray[2,1]=boot.ci(results,type="norm",index=5)$normal[2]
outarray[3,1]=boot.ci(results,type="norm",index=5)$normal[3]
# index 6 is age*logScale
outarray[2,3]=boot.ci(results,type="norm",index=6)$normal[2]
outarray[3,3]=boot.ci(results,type="norm",index=6)$normal[3]
# no bootstrap equiv for int age correlation at the moment
write.table(outarray,file=paste('~/mixedEffectModels/Modeled_fSex_fMot_fAgexScale_raInt_v',v,'_bwVals_BoStr_overScales.csv',sep=''),sep=',',row.names=F,quote=F)
# 9/10/20 section to write out subject level intercepts and slopes
#subjoutarray[,1]=df_verts$bblid
#subjoutarray[,2]=re$`(Intercept)`
#subjoutarray[,3]=re$`log(Scale)`
#write.table(subjoutarray,file=paste('~/mixedEffectModels/Scales10thru20_subj_level_Modeled_fSca_fAge_fScaxAge_raScaS_raScaI_fM',v,'_bwVals_overScales.csv',sep=''),sep=',',row.names=F,quote=F)
}
| /scripts/derive_mixedModels/mixedEf_vert_looper_pmacs921_BoStr.R | no_license | weiwei-wch/multiscale | R | false | false | 4,184 | r |
library(boot)
library(reshape2)
library(lme4)
demo<-read.csv('~/pnc_demo.csv')
subjects<-read.csv('~/participants.txt',header = F)
ageSex<-data.frame(demo$ageAtScan1,as.factor(demo$sex),demo$scanid,demo$bblid)
colnames(ageSex)[4]<-'bblid'
colnames(ageSex)[1]<-'Age'
Rest_Motion_Data <- read.csv("~/n1601_RestQAData_20170714.csv")
NBack_Motion_Data <- read.csv("~/n1601_NBACKQAData_20181001.csv")
Idemo_Motion_Data <- read.csv("~/n1601_idemo_FinalQA_092817.csv")
motmerge<-merge(Rest_Motion_Data,NBack_Motion_Data,by='bblid')
motmerge<-merge(motmerge,Idemo_Motion_Data,by='bblid')
motmerge$Motion <- (motmerge$restRelMeanRMSMotion + motmerge$nbackRelMeanRMSMotion + motmerge$idemoRelMeanRMSMotion)/3;
motiondf<-data.frame(motmerge$bblid,motmerge$Motion)
colnames(motiondf)<-c('bblid','Motion')
colnames(subjects)<-c("scanid")
colnames(ageSex)<-c("Age","Sex","scanid","bblid")
df<-merge(subjects,ageSex,by="scanid")
df<-merge(df,motiondf,by='bblid')
outputcolnames=c('Age','logScale','AgeXlogScale','Motion','Sex','ageInt')
outputrownames=c('Coef','BoStrCI_Low','BoStr_CI_Upp')
outarray=matrix(1,nrow=3,ncol=6)
colnames(outarray)=outputcolnames
rownames(outarray)=outputrownames
# 9/10/20 section to write out subject level intercepts and slopes
subjoutputcolnames=c('bblid','Intercept')
subjoutarray=matrix(1,nrow=693,ncol=2)
colnames(subjoutarray)=subjoutputcolnames
# 9/21/20 - bootstrap section added
returnLmCoefs<-function(d,i){
# this sets I to the bootstrapped sample
d2<-d[i,]
model=glmer(value~Motion+Sex+log(Scale)*Age+(1|bblid),data=d2)
return(fixef(model))
}
for (v in 1:17734){
print(v)
# wrangle data, get variables into approp. var type
vFP=paste('~/mixedEffectModels/v',v,'_bwVals_overScales.csv',sep='')
verts=read.csv(vFP)
colnames(verts)[30]<-'bblid'
df_verts<-merge(df,verts,by="bblid")
mdf_verts<-melt(df_verts,id=c(1,2,3,4,5))
mdf_verts$bblid<-as.factor(mdf_verts$bblid)
colnames(mdf_verts)[6]<-c('Scale')
mdf_verts$Scale<-as.integer(mdf_verts$Scale)
mdf_verts$Sex<-as.factor(mdf_verts$Sex)
mdf_verts$Age<-as.numeric(mdf_verts$Age)
# model for this vertex
model=glmer(value~Motion+Sex+log(Scale)*Age+(1|bblid),data=mdf_verts)
fe=fixef(model)
re=ranef(model)
re=re$bblid
# pull out coefficients
fMot=fe['Motion']
fSex=fe['Sex']
fAge=fe['Age']
flSc=fe['log(Scale)']
fIntrxn=fe['log(Scale):Age']
Intercept=fe['(Intercept)']
# intercept correlated with Age?
ageInt=cor.test(re$`(Intercept)`,df$Age,method='spearman')$estimate
# coefficients into first row, along with age x Intercept corr.
outarray[1,1]=fAge
outarray[1,2]=flSc
outarray[1,3]=fIntrxn
outarray[1,4]=fMot
outarray[1,5]=fSex
outarray[1,6]=ageInt
# hang on to yer bootstraps
results <- boot(data=mdf_verts, statistic=returnLmCoefs, R=1000)
# index 1 is intercept
# index 2 is Motion
outarray[2,4]=boot.ci(results,type="norm",index=2)$normal[2]
outarray[3,4]=boot.ci(results,type="norm",index=2)$normal[3]
# index 3 is Sex
outarray[2,5]=boot.ci(results,type="norm",index=3)$normal[2]
outarray[3,5]=boot.ci(results,type="norm",index=3)$normal[3]
# index 4 is logScale
outarray[2,2]=boot.ci(results,type="norm",index=4)$normal[2]
outarray[3,2]=boot.ci(results,type="norm",index=4)$normal[3]
# index 5 is Age
outarray[2,1]=boot.ci(results,type="norm",index=5)$normal[2]
outarray[3,1]=boot.ci(results,type="norm",index=5)$normal[3]
# index 6 is age*logScale
outarray[2,3]=boot.ci(results,type="norm",index=6)$normal[2]
outarray[3,3]=boot.ci(results,type="norm",index=6)$normal[3]
# no bootstrap equiv for int age correlation at the moment
write.table(outarray,file=paste('~/mixedEffectModels/Modeled_fSex_fMot_fAgexScale_raInt_v',v,'_bwVals_BoStr_overScales.csv',sep=''),sep=',',row.names=F,quote=F)
# 9/10/20 section to write out subject level intercepts and slopes
#subjoutarray[,1]=df_verts$bblid
#subjoutarray[,2]=re$`(Intercept)`
#subjoutarray[,3]=re$`log(Scale)`
#write.table(subjoutarray,file=paste('~/mixedEffectModels/Scales10thru20_subj_level_Modeled_fSca_fAge_fScaxAge_raScaS_raScaI_fM',v,'_bwVals_overScales.csv',sep=''),sep=',',row.names=F,quote=F)
}
|
#########################################
## structable
structable <- function(x, ...)
UseMethod("structable")
structable.formula <- function(formula, data = NULL, direction = NULL,
split_vertical = NULL, ..., subset, na.action) {
if (missing(formula) || !inherits(formula, "formula"))
stop("formula is incorrect or missing")
m <- match.call(expand.dots = FALSE)
edata <- eval(m$data, parent.frame())
if (!is.null(direction))
split_vertical <- direction == "v"
if (is.structable(data)) {
split_vertical <- attr(data, "split_vertical")
data <- as.table(data)
}
if (is.null(split_vertical))
split_vertical <- FALSE
if (length(formula) == 3 && formula[[2]] == "Freq")
formula[[2]] = NULL
## only rhs present without `.' in lhs => xtabs-interface
if (length(formula) != 3) {
if (formula[[1]] == "~") {
if (inherits(edata, "ftable") || inherits(edata, "table") ||
length(dim(edata)) > 2) {
data <- as.table(data)
varnames <- attr(terms(formula, allowDotAsName = TRUE), "term.labels")
dnames <- names(dimnames(data))
di <- match(varnames, dnames)
if (any(is.na(di)))
stop("incorrect variable names in formula")
if (all(varnames != "."))
data <- margin.table(data, di)
return(structable(data, split_vertical = split_vertical, ...))
}
else if (is.data.frame(data)) {
if ("Freq" %in% colnames(data))
return(structable(xtabs(formula(paste("Freq", deparse(formula))),
data = data),
split_vertical = split_vertical, ...))
else
return(structable(xtabs(formula, data), split_vertical = split_vertical, ...))
} else {
if (is.matrix(edata))
m$data <- as.data.frame(data)
m$... <- m$split_vertical <- m$direction <- NULL
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
return(structable(table(mf), split_vertical = split_vertical, ...))
}
} else
stop("formula must have both left and right hand sides")
}
## `ftable' behavior
if (any(attr(terms(formula, allowDotAsName = TRUE), "order") > 1))
stop("interactions are not allowed")
rvars <- attr(terms(formula[-2], allowDotAsName = TRUE), "term.labels")
cvars <- attr(terms(formula[-3], allowDotAsName = TRUE), "term.labels")
rhs.has.dot <- any(rvars == ".")
lhs.has.dot <- any(cvars == ".")
if (lhs.has.dot && rhs.has.dot)
stop(paste("formula has", sQuote("."), "in both left and right hand side"))
if (inherits(edata, "ftable") || inherits(edata, "table") ||
length(dim(edata)) > 2) {
if (inherits(edata, "ftable"))
data <- as.table(data)
dnames <- names(dimnames(data))
rvars <- pmatch(rvars, dnames)
cvars <- pmatch(cvars, dnames)
if (rhs.has.dot)
rvars <- seq_along(dnames)[-cvars]
else if (any(is.na(rvars)))
stop("incorrect variable names in rhs of formula")
if (lhs.has.dot)
cvars <- seq_along(dnames)[-rvars]
else if (any(is.na(cvars)))
stop("incorrect variable names in lhs of formula")
split_vertical <- c(rep(FALSE, length(rvars)), rep(TRUE, length(cvars)))
structable(margin.table(data, c(rvars, cvars)), split_vertical = split_vertical, ...)
} else {
if (is.matrix(edata))
m$data <- as.data.frame(data)
m$... <- m$split_vertical <- m$direction <- NULL
if (!is.null(data) && is.environment(data)) {
dnames <- names(data)
if (rhs.has.dot)
rvars <- seq_along(dnames)[-cvars]
if (lhs.has.dot)
cvars <- seq_along(dnames)[-rvars]
}
else {
if (lhs.has.dot || rhs.has.dot)
stop("cannot use dots in formula with given data")
}
if ("Freq" %in% colnames(m$data))
m$formula <- formula(paste("Freq~", paste(c(rvars, cvars), collapse = "+")))
else
m$formula <- formula(paste("~", paste(c(rvars, cvars), collapse = "+")))
m[[1]] <- as.name("xtabs")
mf <- eval(m, parent.frame())
split_vertical <- c(rep(FALSE, length(rvars)), rep(TRUE, length(cvars)))
structable(mf, split_vertical = split_vertical, ...)
}
}
structable.default <- function(..., direction = NULL, split_vertical = FALSE) {
## several checks & transformations for arguments
args <- list(...)
if (length(args) == 0)
stop("Nothing to tabulate")
x <- args[[1]]
x <- if (is.list(x))
table(x)
else if (inherits(x, "ftable"))
as.table(x)
else if (!(is.array(x) && length(dim(x)) > 1 || inherits(x, "table")))
do.call("table", as.list(substitute(list(...)))[-1])
else
x
if (is.null(dimnames(x)))
dimnames(x) <- lapply(dim(x), function(i) letters[seq_len(i)])
if (is.null(names(dimnames(x))))
names(dimnames(x)) <- LETTERS[seq_along(dim(x))]
idx <- sapply(names(dimnames(x)), nchar) < 1
if(any(idx))
names(dimnames(x))[idx] <- LETTERS[seq_len(sum(idx))]
## splitting argument
dl <- length(dim(x))
if (!is.null(direction))
split_vertical <- direction == "v"
if (length(split_vertical) == 1)
split_vertical <- rep(c(split_vertical, !split_vertical), length.out = dl)
if (length(split_vertical) < dl)
split_vertical <- rep(split_vertical, length.out = dl)
## permute & reshape
ret <- base::aperm(x, c(rev(which(!split_vertical)), rev(which(split_vertical))))
dn <- dimnames(x)
rv <- dn[split_vertical]
cv <- dn[!split_vertical]
rl <- if (length(rv)) sapply(rv, length) else 1
cl <- if (length(cv)) sapply(cv, length) else 1
dim(ret) <- c(prod(cl), prod(rl))
## add dimnames
attr(ret, "dnames") <- dn
attr(ret, "split_vertical") <- split_vertical
## add dimension attributes in ftable-format
attr(ret, "col.vars") <- rv
attr(ret, "row.vars") <- cv
class(ret) <- c("structable", "ftable")
ret
}
"[[.structable" <- function(x, ...) {
if(nargs() > 3)
stop("Incorrect number of dimensions (max: 2).")
args <- if (nargs() < 3)
list(..1)
else
.massage_args(...)
args <- lapply(args, function(x) if (is.logical(x)) which(x) else x)
## handle one-arg cases
if (nargs() < 3)
if (length(args[[1]]) > 1)
## resolve calls like x[[c(1,2)]]
return(x[[ args[[1]][1] ]] [[ args[[1]][-1] ]])
else
## resolve x[[foo]]
return(if (attr(x, "split_vertical")[1]) x[[,args[[1]] ]] else x[[args[[1]],]])
## handle calls like x[[c(1,2), c(3,4)]]
if (length(args[[1]]) > 1 && length(args[[2]]) > 1)
return(x[[ args[[1]][1], args[[2]][1] ]] [[ args[[1]][-1], args[[2]][-1] ]])
## handle calls like x[[c(1,2), 3]]
if (length(args[[1]]) > 1)
return(x[[ args[[1]][1], args[[2]] ]] [[ args[[1]][-1], ]])
## handle calls like x[[1, c(1,3)]]
if (length(args[[2]]) > 1)
return(x[[ args[[1]], args[[2]][1] ]] [[ , args[[2]][-1] ]])
## final cases like x[[1,2]] or x[[1,]] or x[[,1]]
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)[lind, rind, drop = FALSE]
if (!lsym) {
i <- which(!split)[1]
split <- split[-i]
dnames <- dnames[-i]
}
if (!rsym) {
i <- which(split)[1]
split <- split[-i]
dnames <- dnames[-i]
}
attr(ret, "split_vertical") <- split
attr(ret, "dnames") <- dnames
## add dimension attributes in ftable-format
attr(ret, "col.vars") <- dnames[split]
attr(ret, "row.vars") <- dnames[!split]
class(ret) <- class(x)
ret
}
"[[<-.structable" <- function(x, ..., value) {
args <- if (nargs() < 4)
list(..1)
else
.massage_args(...)
## handle one-arg cases
if (nargs() < 4)
return(if (length(args[[1]]) > 1)
## resolve calls like x[[c(1,2)]]<-value
Recall(x, args[[1]][1],
value = Recall(x[[ args[[1]][1] ]], args[[1]][-1], value = value))
else
## resolve x[[foo]]<-value
if (attr(x, "split_vertical")[1])
Recall(x,,args[[1]], value = value)
else
Recall(x,args[[1]],, value = value)
)
## handle calls like x[[c(1,2), c(3,4)]]<-value
if (length(args[[1]]) > 1 && length(args[[2]]) > 1)
return(Recall(x, args[[1]][1], args[[2]][1],
value = Recall(x[[ args[[1]][1], args[[2]][1] ]],
args[[1]][-1], args[[2]][-1], value = value)))
## handle calls like x[[c(1,2), 3]]<-value
if (length(args[[1]]) > 1)
return(Recall(x, args[[1]][1], args[[2]],
value = Recall(x[[ args[[1]][1], args[[2]] ]],
args[[1]][-1], ,value = value)))
## handle calls like x[[1, c(1,3)]]<-value
if (length(args[[2]]) > 1)
return(Recall(x, args[[1]], args[[2]][1],
value = Recall(x[[ args[[1]], args[[2]][1] ]],,
args[[2]][-1], value = value)))
## final cases like x[[1,2]]<-value or x[[1,]]<-value or x[[,1]]<-value
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)
ret[lind, rind] <- value
class(ret) <- class(x)
ret
}
"[.structable" <- function(x, ...) {
if(nargs() > 3)
stop("Incorrect number of dimensions (max: 2).")
args <- if (nargs() < 3)
list(..1)
else
.massage_args(...)
args <- lapply(args, function(x) if (is.logical(x)) which(x) else x)
## handle one-arg cases
if (nargs() < 3)
return(if (attr(x, "split_vertical")[1]) x[,args[[1]] ] else x[args[[1]],])
## handle calls like x[c(1,2), foo]
if (length(args[[1]]) > 1)
return(do.call(rbind, lapply(args[[1]], function(i) x[i, args[[2]]])))
## handle calls like x[foo, c(1,3)]
if (length(args[[2]]) > 1)
return(do.call(cbind, lapply(args[[2]], function(i) x[args[[1]], i])))
## final cases like x[1,2] or x[1,] or x[,1]
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)[lind, rind, drop = FALSE]
if (!lsym) {
i <- which(!split)[1]
dnames[[i]] <- dnames[[i]][args[[1]]]
}
if (!rsym) {
i <- which(split)[1]
dnames[[i]] <- dnames[[i]][args[[2]]]
}
attr(ret, "split_vertical") <- split
attr(ret, "dnames") <- dnames
## add dimension attributes in ftable-format
attr(ret, "col.vars") <- dnames[split]
attr(ret, "row.vars") <- dnames[!split]
class(ret) <- class(x)
ret
}
"[<-.structable" <- function(x, ..., value) {
args <- if (nargs() < 4)
list(..1)
else
.massage_args(...)
## handle one-arg cases
if (nargs() < 4)
return(## resolve x[foo]
if (attr(x, "split_vertical")[1])
Recall(x,,args[[1]], value = value)
else
Recall(x,args[[1]],, value = value)
)
## handle calls like x[c(1,2), 3]
if (length(args[[1]]) > 1) {
for (i in seq_along(args[[1]]))
x[ args[[1]][i], args[[2]] ] <- value[i,]
return(x)
}
## handle calls like x[1, c(2,3)]
if (length(args[[2]]) > 1) {
for (i in seq_along(args[[2]]))
x[ args[[1]], args[[2]][i] ] <- value[,i]
return(x)
}
## final cases like x[1,2] or x[1,] or x[,1]
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)
ret[lind, rind] <- value
class(ret) <- class(x)
ret
}
cbind.structable <- function(..., deparse.level = 1) {
mergetables <- function(t1, t2) {
ret <- cbind(unclass(t1),unclass(t2))
class(ret) <- class(t1)
attr(ret, "split_vertical") <- attr(t1, "split_vertical")
attr(ret, "dnames") <- attr(t1, "dnames")
attr(ret, "row.vars") <- attr(t1, "row.vars")
attr(ret, "col.vars") <- attr(t1, "col.vars")
attr(ret, "col.vars")[[1]] <- c(attr(t1, "col.vars")[[1]],attr(t2, "col.vars")[[1]])
if (length(unique(attr(ret, "col.vars")[[1]])) != length(attr(ret, "col.vars")[[1]]))
stop("Levels of factor(s) to be merged must be unique.")
attr(ret, "dnames")[names(attr(ret, "col.vars"))] <- attr(ret, "col.vars")
ret
}
args <- list(...)
if (length(args) < 2)
return(args[[1]])
ret <- mergetables(args[[1]], args[[2]])
if (length(args) > 2)
do.call(cbind, c(list(ret), args[-(1:2)]))
else
ret
}
rbind.structable <- function(..., deparse.level = 1) {
mergetables <- function(t1, t2) {
ret <- rbind(unclass(t1),unclass(t2))
class(ret) <- class(t1)
attr(ret, "split_vertical") <- attr(t1, "split_vertical")
attr(ret, "dnames") <- attr(t1, "dnames")
attr(ret, "row.vars") <- attr(t1, "row.vars")
attr(ret, "col.vars") <- attr(t1, "col.vars")
attr(ret, "row.vars")[[1]] <- c(attr(t1, "row.vars")[[1]],attr(t2, "row.vars")[[1]])
if (length(unique(attr(ret, "row.vars")[[1]])) != length(attr(ret, "row.vars")[[1]]))
stop("Levels of factor(s) to be merged must be unique.")
attr(ret, "dnames")[names(attr(ret, "row.vars"))] <- attr(ret, "row.vars")
ret
}
args <- list(...)
if (length(args) < 2)
return(args[[1]])
ret <- mergetables(args[[1]], args[[2]])
if (length(args) > 2)
do.call(rbind, c(list(ret), args[-(1:2)]))
else
ret
}
as.table.structable <- function(x, ...) {
class(x) <- "ftable"
ret <- NextMethod("as.table", object = x)
structure(base::aperm(ret, match(names(attr(x, "dnames")),
names(dimnames(ret)))),
class = "table")
}
plot.structable <- function(x, ...)
mosaic(x, ...)
t.structable <- function(x) {
ret <- t.default(x)
attr(ret, "split_vertical") <- !attr(ret, "split_vertical")
hold <- attr(ret, "row.vars")
attr(ret, "row.vars") = attr(ret, "col.vars")
attr(ret, "col.vars") = hold
ret
}
is.structable <- function(x)
inherits(x, "structable")
dim.structable <- function(x)
as.integer(sapply(attr(x, "dnames"), length))
print.structable <- function(x, ...) {
class(x) <- "ftable"
NextMethod("print", object = x)
}
dimnames.structable <- function(x) attr(x,"dnames")
as.vector.structable <- function(x, ...)
as.vector(as.table(x), ...)
## FIXME: copy as.matrix.ftable, committed to R-devel on 2014/1/12
## replace by call to as.matrix.ftable when this becomes stable
as_matrix_ftable <-
function (x, sep = "_", ...)
{
if (!inherits(x, "ftable"))
stop("'x' must be an \"ftable\" object")
make_dimnames <- function(vars) {
structure(list(do.call(paste, c(rev(expand.grid(rev(vars))),
list(sep = sep)))), names = paste(collapse = sep,
names(vars)))
}
structure(unclass(x), dimnames = c(make_dimnames(attr(x,
"row.vars")), make_dimnames(attr(x, "col.vars"))), row.vars = NULL,
col.vars = NULL)
}
as.matrix.structable <- function(x, sep="_", ...) {
structure(as_matrix_ftable(x, sep, ...),
dnames = NULL,
split_vertical = NULL
)
}
length.structable <- function(x) dim(x)[1]
is.na.structable <- function(x)
sapply(seq_along(x), function(sub) any(is.na(sub)))
str.structable <- function(object, ...)
str(unclass(object), ...)
find.perm <- function(vec1, vec2) {
unlist(Map(function(x) which(x == vec2), vec1))
}
aperm.structable <- function(a, perm, resize=TRUE, ...){
newtable <- aperm(as.table(a), perm = perm, resize = resize, ...)
if (!is.numeric(perm))
perm <- find.perm(names(dimnames(newtable)), names(dimnames(a)))
structable(newtable, split_vertical = attr(a, "split_vertical")[perm])
}
############# helper function
.massage_args <- function(...) {
args <- vector("list", 2)
args[[1]] <- if(missing(..1)) as.symbol("grrr") else ..1
args[[2]] <- if(missing(..2)) as.symbol("grrr") else ..2
args
}
| /pkgs/vcd/R/structable.R | no_license | vaguiar/EDAV_Project_2017 | R | false | false | 18,364 | r | #########################################
## structable
structable <- function(x, ...)
UseMethod("structable")
structable.formula <- function(formula, data = NULL, direction = NULL,
split_vertical = NULL, ..., subset, na.action) {
if (missing(formula) || !inherits(formula, "formula"))
stop("formula is incorrect or missing")
m <- match.call(expand.dots = FALSE)
edata <- eval(m$data, parent.frame())
if (!is.null(direction))
split_vertical <- direction == "v"
if (is.structable(data)) {
split_vertical <- attr(data, "split_vertical")
data <- as.table(data)
}
if (is.null(split_vertical))
split_vertical <- FALSE
if (length(formula) == 3 && formula[[2]] == "Freq")
formula[[2]] = NULL
## only rhs present without `.' in lhs => xtabs-interface
if (length(formula) != 3) {
if (formula[[1]] == "~") {
if (inherits(edata, "ftable") || inherits(edata, "table") ||
length(dim(edata)) > 2) {
data <- as.table(data)
varnames <- attr(terms(formula, allowDotAsName = TRUE), "term.labels")
dnames <- names(dimnames(data))
di <- match(varnames, dnames)
if (any(is.na(di)))
stop("incorrect variable names in formula")
if (all(varnames != "."))
data <- margin.table(data, di)
return(structable(data, split_vertical = split_vertical, ...))
}
else if (is.data.frame(data)) {
if ("Freq" %in% colnames(data))
return(structable(xtabs(formula(paste("Freq", deparse(formula))),
data = data),
split_vertical = split_vertical, ...))
else
return(structable(xtabs(formula, data), split_vertical = split_vertical, ...))
} else {
if (is.matrix(edata))
m$data <- as.data.frame(data)
m$... <- m$split_vertical <- m$direction <- NULL
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
return(structable(table(mf), split_vertical = split_vertical, ...))
}
} else
stop("formula must have both left and right hand sides")
}
## `ftable' behavior
if (any(attr(terms(formula, allowDotAsName = TRUE), "order") > 1))
stop("interactions are not allowed")
rvars <- attr(terms(formula[-2], allowDotAsName = TRUE), "term.labels")
cvars <- attr(terms(formula[-3], allowDotAsName = TRUE), "term.labels")
rhs.has.dot <- any(rvars == ".")
lhs.has.dot <- any(cvars == ".")
if (lhs.has.dot && rhs.has.dot)
stop(paste("formula has", sQuote("."), "in both left and right hand side"))
if (inherits(edata, "ftable") || inherits(edata, "table") ||
length(dim(edata)) > 2) {
if (inherits(edata, "ftable"))
data <- as.table(data)
dnames <- names(dimnames(data))
rvars <- pmatch(rvars, dnames)
cvars <- pmatch(cvars, dnames)
if (rhs.has.dot)
rvars <- seq_along(dnames)[-cvars]
else if (any(is.na(rvars)))
stop("incorrect variable names in rhs of formula")
if (lhs.has.dot)
cvars <- seq_along(dnames)[-rvars]
else if (any(is.na(cvars)))
stop("incorrect variable names in lhs of formula")
split_vertical <- c(rep(FALSE, length(rvars)), rep(TRUE, length(cvars)))
structable(margin.table(data, c(rvars, cvars)), split_vertical = split_vertical, ...)
} else {
if (is.matrix(edata))
m$data <- as.data.frame(data)
m$... <- m$split_vertical <- m$direction <- NULL
if (!is.null(data) && is.environment(data)) {
dnames <- names(data)
if (rhs.has.dot)
rvars <- seq_along(dnames)[-cvars]
if (lhs.has.dot)
cvars <- seq_along(dnames)[-rvars]
}
else {
if (lhs.has.dot || rhs.has.dot)
stop("cannot use dots in formula with given data")
}
if ("Freq" %in% colnames(m$data))
m$formula <- formula(paste("Freq~", paste(c(rvars, cvars), collapse = "+")))
else
m$formula <- formula(paste("~", paste(c(rvars, cvars), collapse = "+")))
m[[1]] <- as.name("xtabs")
mf <- eval(m, parent.frame())
split_vertical <- c(rep(FALSE, length(rvars)), rep(TRUE, length(cvars)))
structable(mf, split_vertical = split_vertical, ...)
}
}
structable.default <- function(..., direction = NULL, split_vertical = FALSE) {
## several checks & transformations for arguments
args <- list(...)
if (length(args) == 0)
stop("Nothing to tabulate")
x <- args[[1]]
x <- if (is.list(x))
table(x)
else if (inherits(x, "ftable"))
as.table(x)
else if (!(is.array(x) && length(dim(x)) > 1 || inherits(x, "table")))
do.call("table", as.list(substitute(list(...)))[-1])
else
x
if (is.null(dimnames(x)))
dimnames(x) <- lapply(dim(x), function(i) letters[seq_len(i)])
if (is.null(names(dimnames(x))))
names(dimnames(x)) <- LETTERS[seq_along(dim(x))]
idx <- sapply(names(dimnames(x)), nchar) < 1
if(any(idx))
names(dimnames(x))[idx] <- LETTERS[seq_len(sum(idx))]
## splitting argument
dl <- length(dim(x))
if (!is.null(direction))
split_vertical <- direction == "v"
if (length(split_vertical) == 1)
split_vertical <- rep(c(split_vertical, !split_vertical), length.out = dl)
if (length(split_vertical) < dl)
split_vertical <- rep(split_vertical, length.out = dl)
## permute & reshape
ret <- base::aperm(x, c(rev(which(!split_vertical)), rev(which(split_vertical))))
dn <- dimnames(x)
rv <- dn[split_vertical]
cv <- dn[!split_vertical]
rl <- if (length(rv)) sapply(rv, length) else 1
cl <- if (length(cv)) sapply(cv, length) else 1
dim(ret) <- c(prod(cl), prod(rl))
## add dimnames
attr(ret, "dnames") <- dn
attr(ret, "split_vertical") <- split_vertical
## add dimension attributes in ftable-format
attr(ret, "col.vars") <- rv
attr(ret, "row.vars") <- cv
class(ret) <- c("structable", "ftable")
ret
}
"[[.structable" <- function(x, ...) {
if(nargs() > 3)
stop("Incorrect number of dimensions (max: 2).")
args <- if (nargs() < 3)
list(..1)
else
.massage_args(...)
args <- lapply(args, function(x) if (is.logical(x)) which(x) else x)
## handle one-arg cases
if (nargs() < 3)
if (length(args[[1]]) > 1)
## resolve calls like x[[c(1,2)]]
return(x[[ args[[1]][1] ]] [[ args[[1]][-1] ]])
else
## resolve x[[foo]]
return(if (attr(x, "split_vertical")[1]) x[[,args[[1]] ]] else x[[args[[1]],]])
## handle calls like x[[c(1,2), c(3,4)]]
if (length(args[[1]]) > 1 && length(args[[2]]) > 1)
return(x[[ args[[1]][1], args[[2]][1] ]] [[ args[[1]][-1], args[[2]][-1] ]])
## handle calls like x[[c(1,2), 3]]
if (length(args[[1]]) > 1)
return(x[[ args[[1]][1], args[[2]] ]] [[ args[[1]][-1], ]])
## handle calls like x[[1, c(1,3)]]
if (length(args[[2]]) > 1)
return(x[[ args[[1]], args[[2]][1] ]] [[ , args[[2]][-1] ]])
## final cases like x[[1,2]] or x[[1,]] or x[[,1]]
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)[lind, rind, drop = FALSE]
if (!lsym) {
i <- which(!split)[1]
split <- split[-i]
dnames <- dnames[-i]
}
if (!rsym) {
i <- which(split)[1]
split <- split[-i]
dnames <- dnames[-i]
}
attr(ret, "split_vertical") <- split
attr(ret, "dnames") <- dnames
## add dimension attributes in ftable-format
attr(ret, "col.vars") <- dnames[split]
attr(ret, "row.vars") <- dnames[!split]
class(ret) <- class(x)
ret
}
"[[<-.structable" <- function(x, ..., value) {
args <- if (nargs() < 4)
list(..1)
else
.massage_args(...)
## handle one-arg cases
if (nargs() < 4)
return(if (length(args[[1]]) > 1)
## resolve calls like x[[c(1,2)]]<-value
Recall(x, args[[1]][1],
value = Recall(x[[ args[[1]][1] ]], args[[1]][-1], value = value))
else
## resolve x[[foo]]<-value
if (attr(x, "split_vertical")[1])
Recall(x,,args[[1]], value = value)
else
Recall(x,args[[1]],, value = value)
)
## handle calls like x[[c(1,2), c(3,4)]]<-value
if (length(args[[1]]) > 1 && length(args[[2]]) > 1)
return(Recall(x, args[[1]][1], args[[2]][1],
value = Recall(x[[ args[[1]][1], args[[2]][1] ]],
args[[1]][-1], args[[2]][-1], value = value)))
## handle calls like x[[c(1,2), 3]]<-value
if (length(args[[1]]) > 1)
return(Recall(x, args[[1]][1], args[[2]],
value = Recall(x[[ args[[1]][1], args[[2]] ]],
args[[1]][-1], ,value = value)))
## handle calls like x[[1, c(1,3)]]<-value
if (length(args[[2]]) > 1)
return(Recall(x, args[[1]], args[[2]][1],
value = Recall(x[[ args[[1]], args[[2]][1] ]],,
args[[2]][-1], value = value)))
## final cases like x[[1,2]]<-value or x[[1,]]<-value or x[[,1]]<-value
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)
ret[lind, rind] <- value
class(ret) <- class(x)
ret
}
"[.structable" <- function(x, ...) {
if(nargs() > 3)
stop("Incorrect number of dimensions (max: 2).")
args <- if (nargs() < 3)
list(..1)
else
.massage_args(...)
args <- lapply(args, function(x) if (is.logical(x)) which(x) else x)
## handle one-arg cases
if (nargs() < 3)
return(if (attr(x, "split_vertical")[1]) x[,args[[1]] ] else x[args[[1]],])
## handle calls like x[c(1,2), foo]
if (length(args[[1]]) > 1)
return(do.call(rbind, lapply(args[[1]], function(i) x[i, args[[2]]])))
## handle calls like x[foo, c(1,3)]
if (length(args[[2]]) > 1)
return(do.call(cbind, lapply(args[[2]], function(i) x[args[[1]], i])))
## final cases like x[1,2] or x[1,] or x[,1]
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)[lind, rind, drop = FALSE]
if (!lsym) {
i <- which(!split)[1]
dnames[[i]] <- dnames[[i]][args[[1]]]
}
if (!rsym) {
i <- which(split)[1]
dnames[[i]] <- dnames[[i]][args[[2]]]
}
attr(ret, "split_vertical") <- split
attr(ret, "dnames") <- dnames
## add dimension attributes in ftable-format
attr(ret, "col.vars") <- dnames[split]
attr(ret, "row.vars") <- dnames[!split]
class(ret) <- class(x)
ret
}
"[<-.structable" <- function(x, ..., value) {
args <- if (nargs() < 4)
list(..1)
else
.massage_args(...)
## handle one-arg cases
if (nargs() < 4)
return(## resolve x[foo]
if (attr(x, "split_vertical")[1])
Recall(x,,args[[1]], value = value)
else
Recall(x,args[[1]],, value = value)
)
## handle calls like x[c(1,2), 3]
if (length(args[[1]]) > 1) {
for (i in seq_along(args[[1]]))
x[ args[[1]][i], args[[2]] ] <- value[i,]
return(x)
}
## handle calls like x[1, c(2,3)]
if (length(args[[2]]) > 1) {
for (i in seq_along(args[[2]]))
x[ args[[1]], args[[2]][i] ] <- value[,i]
return(x)
}
## final cases like x[1,2] or x[1,] or x[,1]
dnames <- attr(x, "dnames")
split <- attr(x, "split_vertical")
rv <- dnames[!split]
cv <- dnames[split]
lsym <- is.symbol(args[[1]])
rsym <- is.symbol(args[[2]])
if (!lsym) {
rstep <- dim(unclass(x))[1] / length(rv[[1]])
if (is.character(args[[1]]))
args[[1]] <- match(args[[1]], rv[[1]])
}
if (!rsym) {
cstep <- dim(unclass(x))[2] / length(cv[[1]])
if (is.character(args[[2]]))
args[[2]] <- match(args[[2]], cv[[1]])
}
lind <- if (!lsym)
(1 + (args[[1]] - 1) * rstep) : (args[[1]] * rstep)
else
1:nrow(unclass(x))
rind <- if (!rsym)
(1 + (args[[2]] - 1) * cstep) : (args[[2]] * cstep)
else
1:ncol(unclass(x))
ret <- unclass(x)
ret[lind, rind] <- value
class(ret) <- class(x)
ret
}
cbind.structable <- function(..., deparse.level = 1) {
mergetables <- function(t1, t2) {
ret <- cbind(unclass(t1),unclass(t2))
class(ret) <- class(t1)
attr(ret, "split_vertical") <- attr(t1, "split_vertical")
attr(ret, "dnames") <- attr(t1, "dnames")
attr(ret, "row.vars") <- attr(t1, "row.vars")
attr(ret, "col.vars") <- attr(t1, "col.vars")
attr(ret, "col.vars")[[1]] <- c(attr(t1, "col.vars")[[1]],attr(t2, "col.vars")[[1]])
if (length(unique(attr(ret, "col.vars")[[1]])) != length(attr(ret, "col.vars")[[1]]))
stop("Levels of factor(s) to be merged must be unique.")
attr(ret, "dnames")[names(attr(ret, "col.vars"))] <- attr(ret, "col.vars")
ret
}
args <- list(...)
if (length(args) < 2)
return(args[[1]])
ret <- mergetables(args[[1]], args[[2]])
if (length(args) > 2)
do.call(cbind, c(list(ret), args[-(1:2)]))
else
ret
}
rbind.structable <- function(..., deparse.level = 1) {
mergetables <- function(t1, t2) {
ret <- rbind(unclass(t1),unclass(t2))
class(ret) <- class(t1)
attr(ret, "split_vertical") <- attr(t1, "split_vertical")
attr(ret, "dnames") <- attr(t1, "dnames")
attr(ret, "row.vars") <- attr(t1, "row.vars")
attr(ret, "col.vars") <- attr(t1, "col.vars")
attr(ret, "row.vars")[[1]] <- c(attr(t1, "row.vars")[[1]],attr(t2, "row.vars")[[1]])
if (length(unique(attr(ret, "row.vars")[[1]])) != length(attr(ret, "row.vars")[[1]]))
stop("Levels of factor(s) to be merged must be unique.")
attr(ret, "dnames")[names(attr(ret, "row.vars"))] <- attr(ret, "row.vars")
ret
}
args <- list(...)
if (length(args) < 2)
return(args[[1]])
ret <- mergetables(args[[1]], args[[2]])
if (length(args) > 2)
do.call(rbind, c(list(ret), args[-(1:2)]))
else
ret
}
as.table.structable <- function(x, ...) {
class(x) <- "ftable"
ret <- NextMethod("as.table", object = x)
structure(base::aperm(ret, match(names(attr(x, "dnames")),
names(dimnames(ret)))),
class = "table")
}
plot.structable <- function(x, ...)
mosaic(x, ...)
t.structable <- function(x) {
ret <- t.default(x)
attr(ret, "split_vertical") <- !attr(ret, "split_vertical")
hold <- attr(ret, "row.vars")
attr(ret, "row.vars") = attr(ret, "col.vars")
attr(ret, "col.vars") = hold
ret
}
is.structable <- function(x)
inherits(x, "structable")
dim.structable <- function(x)
as.integer(sapply(attr(x, "dnames"), length))
print.structable <- function(x, ...) {
class(x) <- "ftable"
NextMethod("print", object = x)
}
dimnames.structable <- function(x) attr(x,"dnames")
as.vector.structable <- function(x, ...)
as.vector(as.table(x), ...)
## FIXME: copy as.matrix.ftable, committed to R-devel on 2014/1/12
## replace by call to as.matrix.ftable when this becomes stable
as_matrix_ftable <-
function (x, sep = "_", ...)
{
if (!inherits(x, "ftable"))
stop("'x' must be an \"ftable\" object")
make_dimnames <- function(vars) {
structure(list(do.call(paste, c(rev(expand.grid(rev(vars))),
list(sep = sep)))), names = paste(collapse = sep,
names(vars)))
}
structure(unclass(x), dimnames = c(make_dimnames(attr(x,
"row.vars")), make_dimnames(attr(x, "col.vars"))), row.vars = NULL,
col.vars = NULL)
}
as.matrix.structable <- function(x, sep="_", ...) {
structure(as_matrix_ftable(x, sep, ...),
dnames = NULL,
split_vertical = NULL
)
}
length.structable <- function(x) dim(x)[1]
is.na.structable <- function(x)
sapply(seq_along(x), function(sub) any(is.na(sub)))
str.structable <- function(object, ...)
str(unclass(object), ...)
find.perm <- function(vec1, vec2) {
unlist(Map(function(x) which(x == vec2), vec1))
}
aperm.structable <- function(a, perm, resize=TRUE, ...){
newtable <- aperm(as.table(a), perm = perm, resize = resize, ...)
if (!is.numeric(perm))
perm <- find.perm(names(dimnames(newtable)), names(dimnames(a)))
structable(newtable, split_vertical = attr(a, "split_vertical")[perm])
}
############# helper function
.massage_args <- function(...) {
args <- vector("list", 2)
args[[1]] <- if(missing(..1)) as.symbol("grrr") else ..1
args[[2]] <- if(missing(..2)) as.symbol("grrr") else ..2
args
}
|
PlotCDTDatasetCmd <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur0 <- .cdtEnv$tcl$fun$w.widgets(33)
largeur1 <- .cdtEnv$tcl$fun$w.widgets(31)
largeur2 <- 23
largeur3 <- 12
largeur4 <- 10
}else{
largeur0 <- .cdtEnv$tcl$fun$w.widgets(23)
largeur1 <- .cdtEnv$tcl$fun$w.widgets(22)
largeur2 <- 14
largeur3 <- 8
largeur4 <- 6
}
###################
.cdtData$EnvData$TSGraphOp <- list(
bar = list(
xlim = list(is.min = FALSE, min = "1981-1-1", is.max = FALSE, max = "2017-12-3"),
ylim = list(is.min = FALSE, min = 0, is.max = FALSE, max = 200),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
colors = list(col = "darkblue")
),
line = list(
xlim = list(is.min = FALSE, min = "1981-1-1", is.max = FALSE, max = "2017-12-3"),
ylim = list(is.min = FALSE, min = 0, is.max = FALSE, max = 200),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
plot = list(type = 'both',
col = list(line = "red", points = "blue"),
lwd = 2, cex = 1.4),
legend = NULL)
)
.cdtData$EnvData$plot.maps$data.type <- 'cdtdataset'
###################
xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtPlot_CDTDataset_leftCmd.xml")
lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
.cdtData$EnvData$message <- lang.dlg[['message']]
###################
.cdtEnv$tcl$main$cmd.frame <- tkframe(.cdtEnv$tcl$main$panel.left)
tknote.cmd <- bwNoteBook(.cdtEnv$tcl$main$cmd.frame)
cmd.tab1 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['1']])
cmd.tab2 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['2']])
bwRaiseTab(tknote.cmd, cmd.tab1)
tkgrid.columnconfigure(cmd.tab1, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab2, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab1, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab2, 0, weight = 1)
#######################################################################################################
#Tab1
subfr1 <- bwTabScrollableFrame(cmd.tab1)
#######################
frameData <- ttklabelframe(subfr1, text = lang.dlg[['label']][['1']], relief = 'groove')
file.index.data <- tclVar()
txt.cdtdata <- tklabel(frameData, text = lang.dlg[['label']][['2']], anchor = 'w', justify = 'left')
en.cdtdata <- tkentry(frameData, textvariable = file.index.data, width = largeur0)
bt.cdtdata <- tkbutton(frameData, text = "...")
tkconfigure(bt.cdtdata, command = function(){
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), filetypes = .cdtEnv$tcl$data$filetypes6))
tclvalue(file.index.data) <- if(path.rds %in% c("", "NA") | is.na(path.rds)) "" else path.rds
ret <- try(get.CDT.dataset.Idx(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
tkgrid(txt.cdtdata, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.cdtdata, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.cdtdata, row = 1, column = 5, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
##############################################
frameSHP <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
shpFile <- tclVar()
shpAttr <- tclVar()
txt.addshp <- tklabel(frameSHP, text = lang.dlg[['label']][['3']], anchor = 'w', justify = 'left')
cb.addshp <- ttkcombobox(frameSHP, values = unlist(listOpenFiles), textvariable = shpFile, width = largeur1)
bt.addshp <- tkbutton(frameSHP, text = "...")
txt.attrshp <- tklabel(frameSHP, text = lang.dlg[['label']][['4']], anchor = 'w', justify = 'left')
cb.attrshp <- ttkcombobox(frameSHP, values = "", textvariable = shpAttr, width = largeur1)
bt.TableAttr <- ttkbutton(frameSHP, text = lang.dlg[['button']][['1']])
bt.MapPixel <- ttkbutton(frameSHP, text = lang.dlg[['button']][['2']])
########
tkgrid(txt.addshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
tkgrid(cb.addshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.addshp, row = 1, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1)
tkgrid(txt.attrshp, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
tkgrid(cb.attrshp, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.TableAttr, row = 4, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1)
tkgrid(bt.MapPixel, row = 4, column = 4, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
########
tkconfigure(bt.addshp, command = function(){
shp.opfiles <- getOpenShp(.cdtEnv$tcl$main$win)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(shpFile) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
tkconfigure(cb.addshp, values = unlist(listOpenFiles))
shpf <- getShpOpenData(shpFile)
if(is.null(shpf)){
.cdtData$EnvData$shp$data <- NULL
.cdtData$EnvData$shp$ocrds <- NULL
return(NULL)
}
AttrTable <- names(shpf[[2]]@data)
tkconfigure(cb.attrshp, values = AttrTable)
tclvalue(shpAttr) <- AttrTable[1]
.cdtData$EnvData$shp$data <- shpf
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpf[[2]])
.cdtData$EnvData$plot.maps$shp$display <- TRUE
.cdtData$EnvData$plot.maps$shp$shp <- shpf[[2]]
.cdtData$EnvData$plot.maps$shp$field <- cb.attrshp
}else return(NULL)
ret <- try(get.CDT.dataset.Idx(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
########
.cdtData$EnvData$tab$TableAttr <- NULL
tkconfigure(bt.TableAttr, command = function(){
shpf <- .cdtData$EnvData$shp$data
if(!is.null(shpf))
.cdtData$EnvData$tab$TableAttr <- tableNotebookTab_unik(shpf[[2]]@data, .cdtData$EnvData$tab$TableAttr, shpf[[1]], 10)
})
########
.cdtData$EnvData$tab$MapSelect <- NULL
tkconfigure(bt.MapPixel, command = function(){
if(!is.null(.cdtData$EnvData$map) |
!is.null(.cdtData$EnvData$shp$ocrds))
CDTdataset.Display.Map()
})
########
tkbind(cb.addshp, "<<ComboboxSelected>>", function(){
shpf <- getShpOpenData(shpFile)
if(is.null(shpf)){
.cdtData$EnvData$shp$data <- NULL
.cdtData$EnvData$shp$ocrds <- NULL
return(NULL)
}
AttrTable <- names(shpf[[2]]@data)
tkconfigure(cb.attrshp, values = AttrTable)
tclvalue(shpAttr) <- AttrTable[1]
.cdtData$EnvData$shp$data <- shpf
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpf[[2]])
.cdtData$EnvData$plot.maps$shp$display <- TRUE
.cdtData$EnvData$plot.maps$shp$shp <- shpf[[2]]
.cdtData$EnvData$plot.maps$shp$field <- cb.attrshp
ret <- try(get.CDT.dataset.Idx(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
############################################
tkgrid(frameData, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameSHP, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab2
subfr2 <- bwTabScrollableFrame(cmd.tab2)
##############################################
frameGraph <- ttklabelframe(subfr2, text = lang.dlg[['label']][['5']], relief = 'groove')
#################
frGph1 <- tkframe(frameGraph)
typeTSPLOT <- c("Line", "Barplot")
.cdtData$EnvData$plot.maps$typeTSp <- tclVar("Line")
cb.typeTSp <- ttkcombobox(frGph1, values = typeTSPLOT, textvariable = .cdtData$EnvData$plot.maps$typeTSp, width = largeur2)
bt.TsGraph.plot <- ttkbutton(frGph1, text = .cdtEnv$tcl$lang$global[['button']][['3']], width = 7)
bt.TSGraphOpt <- ttkbutton(frGph1, text = .cdtEnv$tcl$lang$global[['button']][['4']], width = 8)
tkgrid(cb.typeTSp, row = 0, column = 0, sticky = 'we', pady = 1, columnspan = 1)
tkgrid(bt.TSGraphOpt, row = 0, column = 1, sticky = 'we', padx = 4, pady = 1, columnspan = 1)
tkgrid(bt.TsGraph.plot, row = 0, column = 2, sticky = 'we', pady = 1, columnspan = 1)
#########
tkconfigure(bt.TSGraphOpt, command = function(){
suffix.fun <- switch(str_trim(tclvalue(.cdtData$EnvData$plot.maps$typeTSp)),
"Barplot" = "Bar",
"Line" = "Line")
plot.fun <- get(paste0("MapGraph.GraphOptions.", suffix.fun), mode = "function")
.cdtData$EnvData$TSGraphOp <- plot.fun(.cdtData$EnvData$TSGraphOp)
})
#########
.cdtData$EnvData$tab$dataGraph <- NULL
tkconfigure(bt.TsGraph.plot, command = function(){
if(!is.null(.cdtData$EnvData$cdtdataset)){
imgContainer <- CDT.Display.Graph(CDTdataset.Plot.Graph, .cdtData$EnvData$tab$dataGraph, "CDT Dataset - TS")
.cdtData$EnvData$tab$dataGraph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$dataGraph)
}
})
#################
frGph2 <- tkframe(frameGraph)
.cdtData$EnvData$plot.maps$lonLOC <- tclVar()
.cdtData$EnvData$plot.maps$latLOC <- tclVar()
txt.crdSel <- tklabel(frGph2, text = lang.dlg[['label']][['6']], anchor = 'w', justify = 'left')
txt.lonLoc <- tklabel(frGph2, text = lang.dlg[['label']][['7']], anchor = 'e', justify = 'right')
en.lonLoc <- tkentry(frGph2, textvariable = .cdtData$EnvData$plot.maps$lonLOC, width = largeur3)
txt.latLoc <- tklabel(frGph2, text = lang.dlg[['label']][['8']], anchor = 'e', justify = 'right')
en.latLoc <- tkentry(frGph2, textvariable = .cdtData$EnvData$plot.maps$latLOC, width = largeur3)
tkgrid(txt.crdSel, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.lonLoc, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.lonLoc, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.latLoc, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.latLoc, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
#################
frGph3 <- tkframe(frameGraph)
.cdtData$EnvData$plot.maps$lonPAD <- tclVar('0.0')
.cdtData$EnvData$plot.maps$latPAD <- tclVar('0.0')
txt.spPAD <- tklabel(frGph3, text = lang.dlg[['label']][['9']], anchor = 'w', justify = 'left')
txt.lonPAD <- tklabel(frGph3, text = paste(lang.dlg[['label']][['7']], "\u00B1"), anchor = 'e', justify = 'right')
en.lonPAD <- tkentry(frGph3, textvariable = .cdtData$EnvData$plot.maps$lonPAD, width = largeur4)
txt.latPAD <- tklabel(frGph3, text = paste(lang.dlg[['label']][['8']], "\u00B1"), anchor = 'e', justify = 'right')
en.latPAD <- tkentry(frGph3, textvariable = .cdtData$EnvData$plot.maps$latPAD, width = largeur4)
tkgrid(txt.spPAD, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.lonPAD, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.lonPAD, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.latPAD, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.latPAD, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
helpWidget(en.lonPAD, lang.dlg[['tooltip']][['1']], lang.dlg[['status']][['1']])
helpWidget(en.latPAD, lang.dlg[['tooltip']][['2']], lang.dlg[['status']][['2']])
#################
tkgrid(frGph1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frGph2, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frGph3, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
############################################
tkgrid(frameGraph, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
#######################################################################################################
get.CDT.dataset.Idx <- function(){
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
file.CDT.Idx <- str_trim(tclvalue(file.index.data))
if(file.CDT.Idx == "") return(NULL)
read.cdt.dataIdx <- TRUE
if(!is.null(.cdtData$EnvData$cdtdataset))
if(!is.null(.cdtData$EnvData$file.CDT.Idx))
if(.cdtData$EnvData$file.CDT.Idx == file.CDT.Idx) read.cdt.dataIdx <- FALSE
if(read.cdt.dataIdx){
if(file.exists(file.CDT.Idx)){
OutIndexdata <- try(readRDS(file.CDT.Idx), silent = TRUE)
if(inherits(OutIndexdata, "try-error")){
Insert.Messages.Out(lang.dlg[['message']][['1']], format = TRUE)
Insert.Messages.Out(gsub('[\r\n]', '', OutIndexdata[1]), format = TRUE)
.cdtData$EnvData$cdtdataset <- NULL
return(NULL)
}
.cdtData$EnvData$cdtdataset <- OutIndexdata
.cdtData$EnvData$cdtdataset$fileInfo <- file.CDT.Idx
.cdtData$EnvData$file.CDT.Idx <- file.CDT.Idx
####
cdtParallelCond <- .cdtData$Config[c('dopar', 'detect.cores', 'nb.cores')]
.cdtData$EnvData$map <- readCdtDatasetChunk.multi.dates.order(file.CDT.Idx, OutIndexdata$dateInfo$date[1], cdtParallelCond, onedate = TRUE)
}
}
return(0)
}
#######################################################################################################
tkgrid(tknote.cmd, sticky = 'nwes')
tkgrid.columnconfigure(tknote.cmd, 0, weight = 1)
tkgrid.rowconfigure(tknote.cmd, 0, weight = 1)
tcl('update')
tkgrid(.cdtEnv$tcl$main$cmd.frame, sticky = 'nwes', pady = 1)
tkgrid.columnconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
tkgrid.rowconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
invisible()
}
| /R/cdtPlot_CDTDataset_leftCmd.R | no_license | YabOusmane/CDT | R | false | false | 16,355 | r |
PlotCDTDatasetCmd <- function(){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur0 <- .cdtEnv$tcl$fun$w.widgets(33)
largeur1 <- .cdtEnv$tcl$fun$w.widgets(31)
largeur2 <- 23
largeur3 <- 12
largeur4 <- 10
}else{
largeur0 <- .cdtEnv$tcl$fun$w.widgets(23)
largeur1 <- .cdtEnv$tcl$fun$w.widgets(22)
largeur2 <- 14
largeur3 <- 8
largeur4 <- 6
}
###################
.cdtData$EnvData$TSGraphOp <- list(
bar = list(
xlim = list(is.min = FALSE, min = "1981-1-1", is.max = FALSE, max = "2017-12-3"),
ylim = list(is.min = FALSE, min = 0, is.max = FALSE, max = 200),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
colors = list(col = "darkblue")
),
line = list(
xlim = list(is.min = FALSE, min = "1981-1-1", is.max = FALSE, max = "2017-12-3"),
ylim = list(is.min = FALSE, min = 0, is.max = FALSE, max = 200),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
plot = list(type = 'both',
col = list(line = "red", points = "blue"),
lwd = 2, cex = 1.4),
legend = NULL)
)
.cdtData$EnvData$plot.maps$data.type <- 'cdtdataset'
###################
xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtPlot_CDTDataset_leftCmd.xml")
lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
.cdtData$EnvData$message <- lang.dlg[['message']]
###################
.cdtEnv$tcl$main$cmd.frame <- tkframe(.cdtEnv$tcl$main$panel.left)
tknote.cmd <- bwNoteBook(.cdtEnv$tcl$main$cmd.frame)
cmd.tab1 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['1']])
cmd.tab2 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['2']])
bwRaiseTab(tknote.cmd, cmd.tab1)
tkgrid.columnconfigure(cmd.tab1, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab2, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab1, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab2, 0, weight = 1)
#######################################################################################################
#Tab1
subfr1 <- bwTabScrollableFrame(cmd.tab1)
#######################
frameData <- ttklabelframe(subfr1, text = lang.dlg[['label']][['1']], relief = 'groove')
file.index.data <- tclVar()
txt.cdtdata <- tklabel(frameData, text = lang.dlg[['label']][['2']], anchor = 'w', justify = 'left')
en.cdtdata <- tkentry(frameData, textvariable = file.index.data, width = largeur0)
bt.cdtdata <- tkbutton(frameData, text = "...")
tkconfigure(bt.cdtdata, command = function(){
path.rds <- tclvalue(tkgetOpenFile(initialdir = getwd(), filetypes = .cdtEnv$tcl$data$filetypes6))
tclvalue(file.index.data) <- if(path.rds %in% c("", "NA") | is.na(path.rds)) "" else path.rds
ret <- try(get.CDT.dataset.Idx(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
tkgrid(txt.cdtdata, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.cdtdata, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.cdtdata, row = 1, column = 5, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
##############################################
frameSHP <- tkframe(subfr1, relief = 'groove', borderwidth = 2)
shpFile <- tclVar()
shpAttr <- tclVar()
txt.addshp <- tklabel(frameSHP, text = lang.dlg[['label']][['3']], anchor = 'w', justify = 'left')
cb.addshp <- ttkcombobox(frameSHP, values = unlist(listOpenFiles), textvariable = shpFile, width = largeur1)
bt.addshp <- tkbutton(frameSHP, text = "...")
txt.attrshp <- tklabel(frameSHP, text = lang.dlg[['label']][['4']], anchor = 'w', justify = 'left')
cb.attrshp <- ttkcombobox(frameSHP, values = "", textvariable = shpAttr, width = largeur1)
bt.TableAttr <- ttkbutton(frameSHP, text = lang.dlg[['button']][['1']])
bt.MapPixel <- ttkbutton(frameSHP, text = lang.dlg[['button']][['2']])
########
tkgrid(txt.addshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
tkgrid(cb.addshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.addshp, row = 1, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1)
tkgrid(txt.attrshp, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
tkgrid(cb.attrshp, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.TableAttr, row = 4, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1)
tkgrid(bt.MapPixel, row = 4, column = 4, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
########
tkconfigure(bt.addshp, command = function(){
shp.opfiles <- getOpenShp(.cdtEnv$tcl$main$win)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(shpFile) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
tkconfigure(cb.addshp, values = unlist(listOpenFiles))
shpf <- getShpOpenData(shpFile)
if(is.null(shpf)){
.cdtData$EnvData$shp$data <- NULL
.cdtData$EnvData$shp$ocrds <- NULL
return(NULL)
}
AttrTable <- names(shpf[[2]]@data)
tkconfigure(cb.attrshp, values = AttrTable)
tclvalue(shpAttr) <- AttrTable[1]
.cdtData$EnvData$shp$data <- shpf
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpf[[2]])
.cdtData$EnvData$plot.maps$shp$display <- TRUE
.cdtData$EnvData$plot.maps$shp$shp <- shpf[[2]]
.cdtData$EnvData$plot.maps$shp$field <- cb.attrshp
}else return(NULL)
ret <- try(get.CDT.dataset.Idx(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
########
.cdtData$EnvData$tab$TableAttr <- NULL
tkconfigure(bt.TableAttr, command = function(){
shpf <- .cdtData$EnvData$shp$data
if(!is.null(shpf))
.cdtData$EnvData$tab$TableAttr <- tableNotebookTab_unik(shpf[[2]]@data, .cdtData$EnvData$tab$TableAttr, shpf[[1]], 10)
})
########
.cdtData$EnvData$tab$MapSelect <- NULL
tkconfigure(bt.MapPixel, command = function(){
if(!is.null(.cdtData$EnvData$map) |
!is.null(.cdtData$EnvData$shp$ocrds))
CDTdataset.Display.Map()
})
########
tkbind(cb.addshp, "<<ComboboxSelected>>", function(){
shpf <- getShpOpenData(shpFile)
if(is.null(shpf)){
.cdtData$EnvData$shp$data <- NULL
.cdtData$EnvData$shp$ocrds <- NULL
return(NULL)
}
AttrTable <- names(shpf[[2]]@data)
tkconfigure(cb.attrshp, values = AttrTable)
tclvalue(shpAttr) <- AttrTable[1]
.cdtData$EnvData$shp$data <- shpf
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpf[[2]])
.cdtData$EnvData$plot.maps$shp$display <- TRUE
.cdtData$EnvData$plot.maps$shp$shp <- shpf[[2]]
.cdtData$EnvData$plot.maps$shp$field <- cb.attrshp
ret <- try(get.CDT.dataset.Idx(), silent = TRUE)
if(inherits(ret, "try-error") | is.null(ret)) return(NULL)
})
############################################
tkgrid(frameData, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frameSHP, row = 1, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab2
subfr2 <- bwTabScrollableFrame(cmd.tab2)
##############################################
frameGraph <- ttklabelframe(subfr2, text = lang.dlg[['label']][['5']], relief = 'groove')
#################
frGph1 <- tkframe(frameGraph)
typeTSPLOT <- c("Line", "Barplot")
.cdtData$EnvData$plot.maps$typeTSp <- tclVar("Line")
cb.typeTSp <- ttkcombobox(frGph1, values = typeTSPLOT, textvariable = .cdtData$EnvData$plot.maps$typeTSp, width = largeur2)
bt.TsGraph.plot <- ttkbutton(frGph1, text = .cdtEnv$tcl$lang$global[['button']][['3']], width = 7)
bt.TSGraphOpt <- ttkbutton(frGph1, text = .cdtEnv$tcl$lang$global[['button']][['4']], width = 8)
tkgrid(cb.typeTSp, row = 0, column = 0, sticky = 'we', pady = 1, columnspan = 1)
tkgrid(bt.TSGraphOpt, row = 0, column = 1, sticky = 'we', padx = 4, pady = 1, columnspan = 1)
tkgrid(bt.TsGraph.plot, row = 0, column = 2, sticky = 'we', pady = 1, columnspan = 1)
#########
tkconfigure(bt.TSGraphOpt, command = function(){
suffix.fun <- switch(str_trim(tclvalue(.cdtData$EnvData$plot.maps$typeTSp)),
"Barplot" = "Bar",
"Line" = "Line")
plot.fun <- get(paste0("MapGraph.GraphOptions.", suffix.fun), mode = "function")
.cdtData$EnvData$TSGraphOp <- plot.fun(.cdtData$EnvData$TSGraphOp)
})
#########
.cdtData$EnvData$tab$dataGraph <- NULL
tkconfigure(bt.TsGraph.plot, command = function(){
if(!is.null(.cdtData$EnvData$cdtdataset)){
imgContainer <- CDT.Display.Graph(CDTdataset.Plot.Graph, .cdtData$EnvData$tab$dataGraph, "CDT Dataset - TS")
.cdtData$EnvData$tab$dataGraph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$dataGraph)
}
})
#################
frGph2 <- tkframe(frameGraph)
.cdtData$EnvData$plot.maps$lonLOC <- tclVar()
.cdtData$EnvData$plot.maps$latLOC <- tclVar()
txt.crdSel <- tklabel(frGph2, text = lang.dlg[['label']][['6']], anchor = 'w', justify = 'left')
txt.lonLoc <- tklabel(frGph2, text = lang.dlg[['label']][['7']], anchor = 'e', justify = 'right')
en.lonLoc <- tkentry(frGph2, textvariable = .cdtData$EnvData$plot.maps$lonLOC, width = largeur3)
txt.latLoc <- tklabel(frGph2, text = lang.dlg[['label']][['8']], anchor = 'e', justify = 'right')
en.latLoc <- tkentry(frGph2, textvariable = .cdtData$EnvData$plot.maps$latLOC, width = largeur3)
tkgrid(txt.crdSel, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.lonLoc, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.lonLoc, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.latLoc, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.latLoc, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
#################
frGph3 <- tkframe(frameGraph)
.cdtData$EnvData$plot.maps$lonPAD <- tclVar('0.0')
.cdtData$EnvData$plot.maps$latPAD <- tclVar('0.0')
txt.spPAD <- tklabel(frGph3, text = lang.dlg[['label']][['9']], anchor = 'w', justify = 'left')
txt.lonPAD <- tklabel(frGph3, text = paste(lang.dlg[['label']][['7']], "\u00B1"), anchor = 'e', justify = 'right')
en.lonPAD <- tkentry(frGph3, textvariable = .cdtData$EnvData$plot.maps$lonPAD, width = largeur4)
txt.latPAD <- tklabel(frGph3, text = paste(lang.dlg[['label']][['8']], "\u00B1"), anchor = 'e', justify = 'right')
en.latPAD <- tkentry(frGph3, textvariable = .cdtData$EnvData$plot.maps$latPAD, width = largeur4)
tkgrid(txt.spPAD, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.lonPAD, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.lonPAD, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.latPAD, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.latPAD, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
helpWidget(en.lonPAD, lang.dlg[['tooltip']][['1']], lang.dlg[['status']][['1']])
helpWidget(en.latPAD, lang.dlg[['tooltip']][['2']], lang.dlg[['status']][['2']])
#################
tkgrid(frGph1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frGph2, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(frGph3, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
############################################
tkgrid(frameGraph, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
#######################################################################################################
get.CDT.dataset.Idx <- function(){
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
on.exit({
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
})
file.CDT.Idx <- str_trim(tclvalue(file.index.data))
if(file.CDT.Idx == "") return(NULL)
read.cdt.dataIdx <- TRUE
if(!is.null(.cdtData$EnvData$cdtdataset))
if(!is.null(.cdtData$EnvData$file.CDT.Idx))
if(.cdtData$EnvData$file.CDT.Idx == file.CDT.Idx) read.cdt.dataIdx <- FALSE
if(read.cdt.dataIdx){
if(file.exists(file.CDT.Idx)){
OutIndexdata <- try(readRDS(file.CDT.Idx), silent = TRUE)
if(inherits(OutIndexdata, "try-error")){
Insert.Messages.Out(lang.dlg[['message']][['1']], format = TRUE)
Insert.Messages.Out(gsub('[\r\n]', '', OutIndexdata[1]), format = TRUE)
.cdtData$EnvData$cdtdataset <- NULL
return(NULL)
}
.cdtData$EnvData$cdtdataset <- OutIndexdata
.cdtData$EnvData$cdtdataset$fileInfo <- file.CDT.Idx
.cdtData$EnvData$file.CDT.Idx <- file.CDT.Idx
####
cdtParallelCond <- .cdtData$Config[c('dopar', 'detect.cores', 'nb.cores')]
.cdtData$EnvData$map <- readCdtDatasetChunk.multi.dates.order(file.CDT.Idx, OutIndexdata$dateInfo$date[1], cdtParallelCond, onedate = TRUE)
}
}
return(0)
}
#######################################################################################################
tkgrid(tknote.cmd, sticky = 'nwes')
tkgrid.columnconfigure(tknote.cmd, 0, weight = 1)
tkgrid.rowconfigure(tknote.cmd, 0, weight = 1)
tcl('update')
tkgrid(.cdtEnv$tcl$main$cmd.frame, sticky = 'nwes', pady = 1)
tkgrid.columnconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
tkgrid.rowconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
invisible()
}
|
library(rwty)
### Name: tree.dist.matrix
### Title: Tree distance matrix calculation
### Aliases: tree.dist.matrix
### Keywords: distance, robinson-foulds tree treespace,
### ** Examples
data(fungus)
tree.dist.matrix(fungus$Fungus.Run1$trees)
| /data/genthat_extracted_code/rwty/examples/tree.dist.matrix.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 250 | r | library(rwty)
### Name: tree.dist.matrix
### Title: Tree distance matrix calculation
### Aliases: tree.dist.matrix
### Keywords: distance, robinson-foulds tree treespace,
### ** Examples
data(fungus)
tree.dist.matrix(fungus$Fungus.Run1$trees)
|
library(shiny)
options(shiny.maxRequestSize = 3000*1024^2)
suppressWarnings(suppressMessages(library(data.table)))
suppressWarnings(suppressMessages(library(plyr)))
suppressWarnings(suppressMessages(library(dplyr)))
suppressWarnings(suppressMessages(library(reshape2)))
suppressWarnings(suppressMessages(library(GenomicFeatures)))
suppressWarnings(suppressMessages(library(GenomicRanges)))
suppressWarnings(suppressMessages(library(GenomeInfoDb)))
suppressWarnings(suppressMessages(library(Rsamtools)))
suppressWarnings(suppressMessages(library(GenomicAlignments)))
suppressWarnings(suppressMessages(library(ggplot2)))
suppressWarnings(suppressMessages(library(gplots)))
suppressWarnings(suppressMessages(library(RColorBrewer)))
suppressWarnings(suppressMessages(library(extrafont)))
suppressWarnings(suppressMessages(library(grid)))
suppressWarnings(suppressMessages(library(gridExtra)))
suppressWarnings(suppressMessages(library(reporttools)))
suppressWarnings(suppressMessages(library(rtracklayer)))
# suppressWarnings(suppressMessages(library(MVN)))
suppressWarnings(suppressMessages(library(ggrepel)))
suppressWarnings(suppressMessages(library(plyr)))
suppressWarnings(suppressMessages(library(dplyr)))
suppressWarnings(suppressMessages(library(ggpubr)))
suppressWarnings(suppressMessages(library(plotly)))
# suppressWarnings(suppressMessages(library(idr)))
suppressWarnings(suppressMessages(library(htmlwidgets)))
# suppressWarnings(suppressMessages(library(ggfortify)))
# suppressWarnings(suppressMessages(library(mclust)))
source(paste0("ggCDPbamv1.R"))
library(shinythemes)
ui <- fluidPage(theme = shinytheme("cerulean"),
titlePanel("Input Paramters"),
fluidRow(
column(3,
fileInput("myWTpaths", h6("WT bam files"), multiple = TRUE)),
column(3,
fileInput("myTreatmentpaths", h6("Treatment bam files"), multiple = TRUE)),
column(3,
fileInput("myPARCLIPpath", h6("PAR-CLIP bam file"))),
column(3,
fileInput("gtfpath", h6("GTF file")))
),
fluidRow(
column(2,
h6("Reverse RNAseq orientation"),
checkboxInput("ReverseBamFileStrand", "Reverse", value = TRUE)),
column(2,
h6("Paired-End"),
checkboxInput("ispairedendread", "Paired-end", value = FALSE)),
column(2,
numericInput("readlength",
h6("Approx. RNAseq read Length"),
value = 50)),
column(2,
numericInput("ThreeutrExtension",
h6("Length of 3utr extention"),
value = 1000)),
column(2,
h6("TPM/RPKM"),
checkboxInput("RNAseqLengthNormalize", "RPKM", value = FALSE))
),
fluidRow(
column(3,
numericInput("TargetTolerance",
h6("PAR-CLIP gene target tolerance"),value = 50)),
column(3,
h6("Filter Chromosomes"),
checkboxInput("Only24chromosomes", "Only 24 Chromosomes", value = TRUE)),
column(2,
h6("Run"),
actionButton("submit","Submit"))
),
fluidRow(
textOutput("MasterTableName"),
textOutput("MasterTableReplicatesName")
),
fluidRow(
column(12,
tableOutput("MT"))
)
)
# Define server logic ----
server <- function(input, output) {
observeEvent(input$do, {
output$MT <- renderTable({
inFile <- input$gtfpath$datapath
if (is.null(inFile))
return("NA")
#myPARCLIPpath = paste0(rootpath,"HNRNPK_P_Cyt_224_226_RPI36.aligned_TtoC.sorted.bam")
# myPARCLIPpath=paste0(rootpath,gsub(myaccount,"",myPARCLIPpath))
#myWTpaths = c("AF_P_mCh_V_CKDL200153406-1a-DY0088-AK1682_HC3L5BBXX_L7Aligned.sortedByCoord.out.bam")
#myWTpaths = paste0(rootpath,myWTpaths)
#myTreatmentpaths = c("AF_KO1_mCh_V_CKDL200153406-1a-DY0088-AK1544_HC3L5BBXX_L7Aligned.sortedByCoord.out.bam")
#myTreatmentpaths = paste0(rootpath,myTreatmentpaths)
#mygtfpath=c("genes_no_mir_snor_hist.gtf")
#mygtfpath=paste0(rootpath,mygtfpath)
# output directory path for results.
#outputwd=paste0(rootpath,"bin/ggCDPbamv1results/")
#outputwd=paste0(rootpath,"ggCDPbamv1results/")
#myTestName=paste0("PARCLIP_",basename(gsub(".bam","",myPARCLIPpath)),"__WT_",basename(gsub("Aligned.sortedByCoord.out.bam","",myWTpaths[1])),"__Treatment_",basename(gsub("Aligned.sortedByCoord.out.bam","",myTreatmentpaths[1])))
# PARAMETERS AND PROCESSING
ProcessedTables <- ggCDPbamv1(gtfpath = input$gtfpath$datapath,
myWTpaths = input$myWTpaths$datapath,
myTreatmentpaths = input$myTreatmentpaths$datapath,
myPARCLIPpath = input$myPARCLIPpath$datapath,
ReverseBamFileStrand = input$ReverseBamFileStrand, # TRUE RNAseq bam files whose strand orientation needs to be reverted. FALSE: no change to strand orientation of RNAseq bam files.
ispairedendread = input$ispairedendread, # TRUE for paired-end bam. FALSE: single-end bam.
readlength = input$readlength, # length of reads. usually 150 for paired-end and 50 for single-end
Only24chromosomes=input$Only24chromosomes, # TRUE: filter gtf for the 24 chomosomes. FALSE: include all scafolds from gtf.
minTPM = 0.01,
maxTPM = Inf,
minexon = 5,
minintron = 5,
minExtensionRatio = 0.1,
absminTPM = 0.01,
absminexon = 1,
absminintron = 1,
absminExtensionRatio = 0.05,
minTxSize = 500,
minExonSize = 500,
minIntronSize = 200,
Extension = input$ThreeutrExtension,
RNAseqLengthNormalize = input$RNAseqLengthNormalize,
TargetTolerance=input$TargetTolerance,
ignoreParclipStrand = FALSE
)
MT <- ProcessedTables[[1]]
MTreplicates <- ProcessedTables[[2]]
MT[1:20,]
## Save Processing
#write.table(MT, file = paste0(outputwd,myTestName,"_MasterTable.csv"), sep = "\t", row.names = FALSE)
#write.table(MTreplicates, file = paste0(outputwd,myTestName,"_MasterTablereplicates.csv"), sep = "\t", row.names = FALSE)
})
output$MasterTableName <- renderText({
myTestName=paste0("PARCLIP_",basename(gsub(".bam","",input$myPARCLIPpath$datapath)),"__WT_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myWTpaths$datapath[1])),"__Treatment_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myTreatmentpaths$datapath[1])))
paste0("Output MasterTable name is:", myTestName,"_MasterTable.csv")
})
output$MasterTableReplicatesName <- renderText({
myTestName=paste0("PARCLIP_",basename(gsub(".bam","",input$myPARCLIPpath$datapath)),"__WT_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myWTpaths$datapath[1])),"__Treatment_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myTreatmentpaths$datapath[1])))
paste0("Output MasterTableReplicates name is:", myTestName,"_MasterTablereplicates.csv")
})
})
}
# Run the app ----
shinyApp(ui = ui, server = server)
| /processapp.R | no_license | paulahsan/CDP | R | false | false | 7,159 | r |
library(shiny)
options(shiny.maxRequestSize = 3000*1024^2)
suppressWarnings(suppressMessages(library(data.table)))
suppressWarnings(suppressMessages(library(plyr)))
suppressWarnings(suppressMessages(library(dplyr)))
suppressWarnings(suppressMessages(library(reshape2)))
suppressWarnings(suppressMessages(library(GenomicFeatures)))
suppressWarnings(suppressMessages(library(GenomicRanges)))
suppressWarnings(suppressMessages(library(GenomeInfoDb)))
suppressWarnings(suppressMessages(library(Rsamtools)))
suppressWarnings(suppressMessages(library(GenomicAlignments)))
suppressWarnings(suppressMessages(library(ggplot2)))
suppressWarnings(suppressMessages(library(gplots)))
suppressWarnings(suppressMessages(library(RColorBrewer)))
suppressWarnings(suppressMessages(library(extrafont)))
suppressWarnings(suppressMessages(library(grid)))
suppressWarnings(suppressMessages(library(gridExtra)))
suppressWarnings(suppressMessages(library(reporttools)))
suppressWarnings(suppressMessages(library(rtracklayer)))
# suppressWarnings(suppressMessages(library(MVN)))
suppressWarnings(suppressMessages(library(ggrepel)))
suppressWarnings(suppressMessages(library(plyr)))
suppressWarnings(suppressMessages(library(dplyr)))
suppressWarnings(suppressMessages(library(ggpubr)))
suppressWarnings(suppressMessages(library(plotly)))
# suppressWarnings(suppressMessages(library(idr)))
suppressWarnings(suppressMessages(library(htmlwidgets)))
# suppressWarnings(suppressMessages(library(ggfortify)))
# suppressWarnings(suppressMessages(library(mclust)))
source(paste0("ggCDPbamv1.R"))
library(shinythemes)
ui <- fluidPage(theme = shinytheme("cerulean"),
titlePanel("Input Paramters"),
fluidRow(
column(3,
fileInput("myWTpaths", h6("WT bam files"), multiple = TRUE)),
column(3,
fileInput("myTreatmentpaths", h6("Treatment bam files"), multiple = TRUE)),
column(3,
fileInput("myPARCLIPpath", h6("PAR-CLIP bam file"))),
column(3,
fileInput("gtfpath", h6("GTF file")))
),
fluidRow(
column(2,
h6("Reverse RNAseq orientation"),
checkboxInput("ReverseBamFileStrand", "Reverse", value = TRUE)),
column(2,
h6("Paired-End"),
checkboxInput("ispairedendread", "Paired-end", value = FALSE)),
column(2,
numericInput("readlength",
h6("Approx. RNAseq read Length"),
value = 50)),
column(2,
numericInput("ThreeutrExtension",
h6("Length of 3utr extention"),
value = 1000)),
column(2,
h6("TPM/RPKM"),
checkboxInput("RNAseqLengthNormalize", "RPKM", value = FALSE))
),
fluidRow(
column(3,
numericInput("TargetTolerance",
h6("PAR-CLIP gene target tolerance"),value = 50)),
column(3,
h6("Filter Chromosomes"),
checkboxInput("Only24chromosomes", "Only 24 Chromosomes", value = TRUE)),
column(2,
h6("Run"),
actionButton("submit","Submit"))
),
fluidRow(
textOutput("MasterTableName"),
textOutput("MasterTableReplicatesName")
),
fluidRow(
column(12,
tableOutput("MT"))
)
)
# Define server logic ----
server <- function(input, output) {
observeEvent(input$do, {
output$MT <- renderTable({
inFile <- input$gtfpath$datapath
if (is.null(inFile))
return("NA")
#myPARCLIPpath = paste0(rootpath,"HNRNPK_P_Cyt_224_226_RPI36.aligned_TtoC.sorted.bam")
# myPARCLIPpath=paste0(rootpath,gsub(myaccount,"",myPARCLIPpath))
#myWTpaths = c("AF_P_mCh_V_CKDL200153406-1a-DY0088-AK1682_HC3L5BBXX_L7Aligned.sortedByCoord.out.bam")
#myWTpaths = paste0(rootpath,myWTpaths)
#myTreatmentpaths = c("AF_KO1_mCh_V_CKDL200153406-1a-DY0088-AK1544_HC3L5BBXX_L7Aligned.sortedByCoord.out.bam")
#myTreatmentpaths = paste0(rootpath,myTreatmentpaths)
#mygtfpath=c("genes_no_mir_snor_hist.gtf")
#mygtfpath=paste0(rootpath,mygtfpath)
# output directory path for results.
#outputwd=paste0(rootpath,"bin/ggCDPbamv1results/")
#outputwd=paste0(rootpath,"ggCDPbamv1results/")
#myTestName=paste0("PARCLIP_",basename(gsub(".bam","",myPARCLIPpath)),"__WT_",basename(gsub("Aligned.sortedByCoord.out.bam","",myWTpaths[1])),"__Treatment_",basename(gsub("Aligned.sortedByCoord.out.bam","",myTreatmentpaths[1])))
# PARAMETERS AND PROCESSING
ProcessedTables <- ggCDPbamv1(gtfpath = input$gtfpath$datapath,
myWTpaths = input$myWTpaths$datapath,
myTreatmentpaths = input$myTreatmentpaths$datapath,
myPARCLIPpath = input$myPARCLIPpath$datapath,
ReverseBamFileStrand = input$ReverseBamFileStrand, # TRUE RNAseq bam files whose strand orientation needs to be reverted. FALSE: no change to strand orientation of RNAseq bam files.
ispairedendread = input$ispairedendread, # TRUE for paired-end bam. FALSE: single-end bam.
readlength = input$readlength, # length of reads. usually 150 for paired-end and 50 for single-end
Only24chromosomes=input$Only24chromosomes, # TRUE: filter gtf for the 24 chomosomes. FALSE: include all scafolds from gtf.
minTPM = 0.01,
maxTPM = Inf,
minexon = 5,
minintron = 5,
minExtensionRatio = 0.1,
absminTPM = 0.01,
absminexon = 1,
absminintron = 1,
absminExtensionRatio = 0.05,
minTxSize = 500,
minExonSize = 500,
minIntronSize = 200,
Extension = input$ThreeutrExtension,
RNAseqLengthNormalize = input$RNAseqLengthNormalize,
TargetTolerance=input$TargetTolerance,
ignoreParclipStrand = FALSE
)
MT <- ProcessedTables[[1]]
MTreplicates <- ProcessedTables[[2]]
MT[1:20,]
## Save Processing
#write.table(MT, file = paste0(outputwd,myTestName,"_MasterTable.csv"), sep = "\t", row.names = FALSE)
#write.table(MTreplicates, file = paste0(outputwd,myTestName,"_MasterTablereplicates.csv"), sep = "\t", row.names = FALSE)
})
output$MasterTableName <- renderText({
myTestName=paste0("PARCLIP_",basename(gsub(".bam","",input$myPARCLIPpath$datapath)),"__WT_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myWTpaths$datapath[1])),"__Treatment_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myTreatmentpaths$datapath[1])))
paste0("Output MasterTable name is:", myTestName,"_MasterTable.csv")
})
output$MasterTableReplicatesName <- renderText({
myTestName=paste0("PARCLIP_",basename(gsub(".bam","",input$myPARCLIPpath$datapath)),"__WT_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myWTpaths$datapath[1])),"__Treatment_",basename(gsub("Aligned.sortedByCoord.out.bam","",input$myTreatmentpaths$datapath[1])))
paste0("Output MasterTableReplicates name is:", myTestName,"_MasterTablereplicates.csv")
})
})
}
# Run the app ----
shinyApp(ui = ui, server = server)
|
#'CreateConceptSetDatasets
#'
#' The function CreateConceptSetDatasets inspects a set of input tables af data and creates a group of datasets, each corresponding to a concept set. Each dataset contains the records of the input tables that match the corresponding concept set and is named out of it.
#'
#'
#' @param dataset a 2-level list containing, for each domain, the names of the corresponding input tables of data
#' @param codvar a 3-level list containing, for each input table of data and each domain, the name(s) of the column(s) containing the codes of interest
#' @param datevar (optional): a 2-level list containing, for each input table of data, the name(s) of the column(s) containing dates (only if extension=”csv”), to be saved as dates in the output
#' @param numericvar (optional): a 2-level list containing, for each input table of data, the name(s) of the column(s) containing numbers (only if extension=”csv”), to be saved as a number in the output
#' @param EAVtables (optional): a 2-level list specifying, for each domain, tables in a Entity-Attribute-Value structure; each table is listed with the name of two columns: the one contaning attributes and the one containing values
#' @param EAVattributes (optional): a 3-level list specifying, for each domain and table in a Entity-Attribute-Value structure, the attributes whose values should be browsed to retrieve codes belonging to that domain; each attribute is listed along with its coding system
#' @param dateformat (optional): a string containing the format of the dates in the input tables of data (only if -datevar- is indicated); the string must be in one of the following:
# YYYYDDMM...
#' @param rename_col (optional) this is a list of 3-level lists; each 3-level list contains a column name for each input table of data (associated to a data domain) to be renamed in the output (for instance: the personal identifier, or the date); in the output all the columns will be renamed with the name of the list.
#' @param concept_set_domains a 2-level list containing, for each concept set, the corresponding domain
#' @param concept_set_codes a 3-level list containing, for each concept set, for each coding system, the list of the corresponding codes to be used as inclusion criteria for records: records must be included if the their code(s) starts with at least one string in this list; the match is executed ignoring points
#' @param concept_set_codes_excl (optional) a 3-level list containing, for each concept set, for each coding system, the list of the corresponding codes to be used as exclusion criteria for records: records must be excluded if the their code(s) starts with at least one string in this list; the match is executed ignoring points
#' @param concept_set_names (optional) a vector containing the names of the concept sets to be processed; if this is missing, all the concept sets included in the previous lists are processed
#' @param vocabulary (optional) a 3-level list containing, for each table of data and data domain, the name of the column containing the vocabulary of the column(s) -codvar-
#' @param addtabcol a logical parameter, by default set to TRUE: if so, the columns "Table_cdm" and "Col" are added to the output, indicating respectively from which original table and column the code is taken.
#' @param verbose a logical parameter, by default set to FALSE. If it is TRUE additional intermediate output datasets will be shown in the R environment
#' @param discard_from_environment (optional) a logical parameter, by default set to FALSE. If it is TRUE, the output datasets are removed from the global environment
#' @param dirinput (optional) the directory where the input tables of data are stored. If not provided the working directory is considered.
#' @param diroutput (optional) the directory where the output concept sets datasets will be saved. If not provided the working directory is considered.
#' @param extension the extension of the input tables of data (csv and dta are supported)
#'
#' @details
#'
#' A concept set is a set of medical concepts (eg the concept set "DIABETES" may contain the concepts "type 2 diabets" and "type 1 diabetes") that may be recorded in the tables of data in some coding systems (for instance, "ICD10", or "ATC"). Each concept set is associated to a data domain (eg "diagnosis" or "medication") which is the topic of one or more tables of data. When calling CreateConceptSetDatasets, the concept sets, their domains and the associated codes are listed as input in the format of multi-level lists.
#'
#' @seealso
#'
#' We open the table, add a column named "general" initially set to 0. For each concept set linked to the domain, we create a column named "Filter_conceptset" that takes the value 1 for each row that match the concept set codes. After checking for each concept set, the column general is updated and only the rows for which general=1 are kept. The dataset is saved locally as "FILTERED_table" (you will have these datasets in the global environment only if verbose=T).
#' We split each of the new FILTERED_table relying on the column "Filter_conceptset" and we create one dataset for each concept set and each dataset. (you will have these datasets in output only if verbose=T).
#' Finally we put together all the datasets related to the same concept set and we save it in the -dirtemp- given as input with the extenstion .R .
#'
#'#'CHECK VOCABULARY
CreateConceptSetDatasets <- function(dataset,codvar,datevar,EAVtables,EAVattributes,dateformat, rename_col,
concept_set_domains,concept_set_codes,concept_set_codes_excl,concept_set_names,vocabulary,
addtabcol=T, verbose=F,discard_from_environment=F,
dirinput,diroutput,extension,vocabularies_with_dot_wildcard) {
if (!require("haven")) install.packages("haven")
library(haven)
if (!require("stringr")) install.packages("stringr")
library(stringr)
if (!require("purrr")) install.packages("purrr") #flatten
library(purrr)
if (!require("readr")) install.packages("readr")
library(readr)
if (!require("data.table")) install.packages("data.table")
library(data.table)
if (!require("lubridate")) install.packages("lubridate")
library(lubridate)
'%!in%' <- function(x,y)!('%in%'(x,y))
if (missing(diroutput)) diroutput<-getwd()
#Check that output folder exist otherwise create it
suppressWarnings( if (!(file.exists(diroutput))){
dir.create(file.path( diroutput))
})
if(missing(concept_set_names)){
concept_set_names=unique(names(concept_set_domains))
}
if(!missing(concept_set_names)){
concept_set_domains<-concept_set_domains[names(concept_set_domains) %in% concept_set_names]
dataset<-dataset[names(dataset) %in% unique(flatten_chr(concept_set_domains))]
}
used_domains<-unique(concept_set_domains)
concept_set_dom <- vector(mode = "list",length = length(used_domains))
names(concept_set_dom) = unique(flatten_chr(concept_set_domains))
for (i in 1:length(concept_set_dom)) {
for (j in 1:length(concept_set_domains))
if (names(concept_set_dom[i]) == concept_set_domains[j])
concept_set_dom[[i]] = append(flatten_chr(concept_set_dom[i]),names(concept_set_domains[j]))
}
dataset1<-list()
for (dom in used_domains) {
if (!missing(EAVtables) & !missing(EAVattributes)){
if (dom %in% names(EAVtables)){
dataset1[[dom]]<-dataset[[dom]]
for (f in 1:length(EAVtables[[dom]])){
dataset1[[dom]]<-append(dataset1[[dom]],EAVtables[[dom]][[f]][[1]][[1]])
}
}else{dataset1[[dom]]<-dataset[[dom]]}
}else{dataset1[[dom]]<-dataset[[dom]]}
print(paste("I'm analysing domain",dom))
for (df2 in dataset1[[dom]]) {
print(paste0("I'm analysing table ",df2," [for domain ",dom,"]"))
if (missing(dirinput)) dirinput<-getwd()
if (extension == "dta") {
used_df <- as.data.table(read_dta(paste0(dirinput,"/",df2,".",extension)))
} else if (extension == "csv") {
options(readr.num_columns = 0)
used_df <- fread(paste0(dirinput,"/",df2,".",extension))
}
else if (extension == "RData") {
assign('used_df', get(load(paste0(dirinput,"/",df2,".",extension))))
}
if (!missing(dateformat)){
for (n in 1:length(datevar[[dom]][[df2]])) {
if(str_count(dateformat, "m")==3 |str_count(dateformat, "M")==3) {
used_df[,datevar[[dom]][[df2]][[n]]]<-as.Date(used_df[,get(datevar[[dom]][[df2]][[n]])],"%d%b%Y")
} else if (substring(dateformat, 1,1)=="Y" | substring(dateformat, 1,1)=="y" ) {
used_df[,datevar[[dom]][[df2]][[n]]]<-ymd(used_df[,get(datevar[[dom]][[df2]][[n]])])
}else if (substring(dateformat, 1,1)=="D" | substring(dateformat, 1,1)=="d" ) {
used_df[,datevar[[dom]][[df2]][[n]]]<-dmy(used_df[,get(datevar[[dom]][[df2]][[n]])])
}
}
}
used_df[, General:=0]
used_df0<-as.data.table(data.frame(matrix(ncol = 0, nrow = 0)))
#for each dataset search for the codes in all concept sets
for (concept in concept_set_dom[[dom]]) {
if (concept %in% concept_set_names) {
print(paste("concept set",concept))
if ( !missing(EAVtables)){
for (p in 1:length(EAVtables[[dom]])){
if (df2 %in% EAVtables[[dom]][[p]][[1]][[1]]){
used_dfAEV<-data.table()
for (elem1 in names(EAVattributes[[concept_set_domains[[concept]]]][[df2]])) {
for (i in 1:length(EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]])) {
if (length(EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[1]])>=2){
used_dfAEV<-rbind(used_dfAEV,used_df[get(EAVtables[[concept_set_domains[[concept]]]][[p]][[1]][[2]])==EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[i]][[1]] & get(EAVtables[[concept_set_domains[[concept]]]][[p]][[1]][[3]])==EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[i]][[2]],],fill=T)
}else{
used_dfAEV<-rbind(used_dfAEV,used_df[get(EAVtables[[concept_set_domains[[concept]]]][[p]][[2]])==EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[i]][[1]],])
}
}
}
used_df<-used_dfAEV
}
}
}
if ((!missing(vocabulary))) {
if (dom %in% names(vocabulary)) {
if (df2 %in% names(vocabulary[[dom]])) {
cod_system_indataset1<-unique(used_df[,get(vocabulary[[dom]][[df2]])])
cod_system_indataset<-intersect(cod_system_indataset1,names(concept_set_codes[[concept]]))
}else{
cod_system_indataset<-names(concept_set_codes[[concept]])
}
}else{
cod_system_indataset<-names(concept_set_codes[[concept]])
}
}else{
cod_system_indataset<-names(concept_set_codes[[concept]])
}
if (length(cod_system_indataset)==0) {
used_df[,c("Filter", paste0("Col_", concept)) := list(0, NA)]
}else{
for (col in codvar[[concept_set_domains[[concept]]]][[df2]]) {
used_df<-used_df[, paste0(col, "_tmp") := gsub("\\.", "", get(col))]
for (type_cod in cod_system_indataset) {
stop = FALSE
codes_rev <- concept_set_codes[[concept]][[type_cod]]
for (single_cod in concept_set_codes[[concept]][[type_cod]]) {
if (single_cod == "ALL_CODES") {
print("allcodes")
used_df[,Filter:=1]
used_df[,paste0("Col_",concept):=codvar[[dom]][[df2]][1]]
stop = TRUE
break
}
}
if (stop == TRUE) {
next
}
if ((!missing(vocabulary))) {################### IF I GIVE VOCABULARY IN INPUT
if (df2 %in% dataset[[dom]]) {
if (dom %in% names(vocabulary)) {
if (!missing(vocabularies_with_dot_wildcard)) {
if (type_cod %in% vocabularies_with_dot_wildcard) {
used_df[(str_detect(get(col), paste(paste0("^", codes_rev), collapse = "|"))) & get(vocabulary[[dom]][[df2]]) == type_cod, c("Filter", paste0("Col_", concept)) := list(1, col)]
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))) & get(vocabulary[[dom]][[df2]]) == type_cod, c("Filter", paste0("Col_", concept)) := list(1, col)]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, col)]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, col)]
}
} else {
for (p in 1:length(EAVtables[[dom]])) {
if (df2 %in% EAVtables[[dom]][[p]][[1]][[1]]) {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, list(c(get(EAVtables[[dom]][[p]][[1]][[2]]), get(EAVtables[[dom]][[p]][[1]][[3]]))))]
}
}
}
} else {
if (df2 %!in% dataset[[dom]]) {
for (p in 1:length(EAVtables[[dom]])) {
if (df2 %in% EAVtables[[dom]][[p]][[1]][[1]]) {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, list(c(get(EAVtables[[dom]][[p]][[1]][[2]]), get(EAVtables[[dom]][[p]][[1]][[3]]))))]
}
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, col)]
}
}
}
}
#if we have codes to exclude
if (!missing(concept_set_codes_excl)){
if ((!missing(vocabulary))) {
if (dom %in% names(vocabulary)) {
if (df2 %in% names(vocabulary[[dom]])) {
cod_system_indataset1_excl<-unique(used_df[,get(vocabulary[[dom]][[df2]])])
cod_system_indataset_excl<-Reduce(intersect, list(cod_system_indataset1_excl,names(concept_set_codes_excl[[concept]])))
}else{
cod_system_indataset_excl<-names(concept_set_codes_excl[[concept]])
}
}else{
cod_system_indataset_excl<-names(concept_set_codes_excl[[concept]])
}
}else{
cod_system_indataset_excl<-names(concept_set_codes_excl[[concept]])
}
for (type_cod_2 in cod_system_indataset_excl) {
codes_rev <- concept_set_codes_excl[[concept]][[type_cod_2]]
if ((!missing(vocabulary))) {
if (df2 %in% dataset[[dom]]) {
if (dom %in% names(vocabulary)) {
if (!missing(vocabularies_with_dot_wildcard)) {
if (type_cod_2 %in% vocabularies_with_dot_wildcard) {
used_df[(str_detect(get(col), paste(paste0("^", codes_rev), collapse = "|"))) & get(vocabulary[[dom]][[df2]]) == type_cod_2, Filter := 0]
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))) & get(vocabulary[[dom]][[df2]]) == type_cod_2, Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
}
}
used_df[, paste0(col, "_tmp") := NULL]
}
if ("Filter" %in% colnames(used_df)) {
used_df[Filter == 1,General:=1]
Newfilter1 <- paste0("Filter_",concept)
setnames(used_df,old = "Filter",new = Newfilter1)
}
}
}
for (col in names(used_df)) {
if (col == codvar[[dom]][[df2]]) {
setnames(used_df, col, "codvar" )
}
}
if(!missing(rename_col)){
###################RENAME THE COLUMNS ID AND DATE
for (elem in names(rename_col)) {
data<-eval(parse(text=elem))
for (col in names(used_df)) {
if (col == data[[dom]][[df2]]) {
setnames(used_df, col, elem )
}
}
}
}
#keep only the rows that have matched codes
filtered_df <- used_df[General == 1,][,Table_cdm:=df2]
if (verbose == F) {
assign(paste0("FILTERED","_",df2),filtered_df)
} else {
assign(paste0(dom,"_","FILTERED","_",df2),filtered_df,envir = parent.frame())
}
#split the dataset with respect to the concept set
for (concept in concept_set_dom[[dom]]) {
if (concept %in% concept_set_names) {
if (paste0("Filter_",concept) %in% colnames(filtered_df)) {
setnames(filtered_df,unique(names(filtered_df[,grepl(paste0("\\b","Filter_",concept,"\\b"),colnames(filtered_df)), with = F])),"Filter")
filtered_df2 <- filtered_df[Filter == 1,] [,"General":=NULL]
filtered_df2 <- filtered_df2[,!grep("^Filter",names(filtered_df2)),with = F]
if (paste0("Col_",concept) %in% colnames(filtered_df2)) {
setnames(filtered_df2,unique(names(filtered_df2[,grepl(paste0("\\b","Col_",concept,"\\b"),colnames(filtered_df2)), with = F])),"Col")
filtered_df2 <- filtered_df2[,!grep("^Col_",names(filtered_df2)),with = F]
}
Newfilter2 <- paste0("Filter_",concept)
setnames(filtered_df,old = "Filter",new = Newfilter2)
}else{filtered_df2<-used_df[1,!grep("^Filter",names(used_df)),with = F] [,"General":=NULL]
filtered_df2[,]<-NA
filtered_df2<-filtered_df2[,!grep("^Col",names(filtered_df2)),with = F]
}
if (verbose == F) {
assign(paste0(concept,"_",df2),filtered_df2)
}else{
assign(paste0(concept,"_",df2),filtered_df2,envir = parent.frame())
}
}
}
}
###########append all the datasets related to the same concept
for (concept in concept_set_dom[[dom]]) {
if (concept %in% concept_set_names) {
export_df <- as.data.table(data.frame(matrix(ncol = 0, nrow = 0)))
for (df2 in dataset1[[dom]]) {
if (exists(paste0(concept,"_",df2))){
export_df = suppressWarnings( rbind(export_df, eval(parse(text = paste0(concept,"_",df2))),fill = T) )
}
}
export_df<-export_df[, .SD[!all(is.na(.SD))]]
if (addtabcol == F) export_df<-export_df[,c("Table_cdm","Col"):=NULL]
if (discard_from_environment==T) {
assign(concept, export_df)
}else{ assign(concept, export_df, envir = parent.frame())}
save(concept, file = paste0(diroutput,"/",concept,".RData"),list = concept)
}
}
}
print(paste("Concept set datasets saved in",diroutput))
}
| /p_macro/CreateConceptSetDatasets_v14.R | no_license | ARS-toscana/CONSIGN | R | false | false | 20,825 | r | #'CreateConceptSetDatasets
#'
#' The function CreateConceptSetDatasets inspects a set of input tables af data and creates a group of datasets, each corresponding to a concept set. Each dataset contains the records of the input tables that match the corresponding concept set and is named out of it.
#'
#'
#' @param dataset a 2-level list containing, for each domain, the names of the corresponding input tables of data
#' @param codvar a 3-level list containing, for each input table of data and each domain, the name(s) of the column(s) containing the codes of interest
#' @param datevar (optional): a 2-level list containing, for each input table of data, the name(s) of the column(s) containing dates (only if extension=”csv”), to be saved as dates in the output
#' @param numericvar (optional): a 2-level list containing, for each input table of data, the name(s) of the column(s) containing numbers (only if extension=”csv”), to be saved as a number in the output
#' @param EAVtables (optional): a 2-level list specifying, for each domain, tables in a Entity-Attribute-Value structure; each table is listed with the name of two columns: the one contaning attributes and the one containing values
#' @param EAVattributes (optional): a 3-level list specifying, for each domain and table in a Entity-Attribute-Value structure, the attributes whose values should be browsed to retrieve codes belonging to that domain; each attribute is listed along with its coding system
#' @param dateformat (optional): a string containing the format of the dates in the input tables of data (only if -datevar- is indicated); the string must be in one of the following:
# YYYYDDMM...
#' @param rename_col (optional) this is a list of 3-level lists; each 3-level list contains a column name for each input table of data (associated to a data domain) to be renamed in the output (for instance: the personal identifier, or the date); in the output all the columns will be renamed with the name of the list.
#' @param concept_set_domains a 2-level list containing, for each concept set, the corresponding domain
#' @param concept_set_codes a 3-level list containing, for each concept set, for each coding system, the list of the corresponding codes to be used as inclusion criteria for records: records must be included if the their code(s) starts with at least one string in this list; the match is executed ignoring points
#' @param concept_set_codes_excl (optional) a 3-level list containing, for each concept set, for each coding system, the list of the corresponding codes to be used as exclusion criteria for records: records must be excluded if the their code(s) starts with at least one string in this list; the match is executed ignoring points
#' @param concept_set_names (optional) a vector containing the names of the concept sets to be processed; if this is missing, all the concept sets included in the previous lists are processed
#' @param vocabulary (optional) a 3-level list containing, for each table of data and data domain, the name of the column containing the vocabulary of the column(s) -codvar-
#' @param addtabcol a logical parameter, by default set to TRUE: if so, the columns "Table_cdm" and "Col" are added to the output, indicating respectively from which original table and column the code is taken.
#' @param verbose a logical parameter, by default set to FALSE. If it is TRUE additional intermediate output datasets will be shown in the R environment
#' @param discard_from_environment (optional) a logical parameter, by default set to FALSE. If it is TRUE, the output datasets are removed from the global environment
#' @param dirinput (optional) the directory where the input tables of data are stored. If not provided the working directory is considered.
#' @param diroutput (optional) the directory where the output concept sets datasets will be saved. If not provided the working directory is considered.
#' @param extension the extension of the input tables of data (csv and dta are supported)
#'
#' @details
#'
#' A concept set is a set of medical concepts (eg the concept set "DIABETES" may contain the concepts "type 2 diabets" and "type 1 diabetes") that may be recorded in the tables of data in some coding systems (for instance, "ICD10", or "ATC"). Each concept set is associated to a data domain (eg "diagnosis" or "medication") which is the topic of one or more tables of data. When calling CreateConceptSetDatasets, the concept sets, their domains and the associated codes are listed as input in the format of multi-level lists.
#'
#' @seealso
#'
#' We open the table, add a column named "general" initially set to 0. For each concept set linked to the domain, we create a column named "Filter_conceptset" that takes the value 1 for each row that match the concept set codes. After checking for each concept set, the column general is updated and only the rows for which general=1 are kept. The dataset is saved locally as "FILTERED_table" (you will have these datasets in the global environment only if verbose=T).
#' We split each of the new FILTERED_table relying on the column "Filter_conceptset" and we create one dataset for each concept set and each dataset. (you will have these datasets in output only if verbose=T).
#' Finally we put together all the datasets related to the same concept set and we save it in the -dirtemp- given as input with the extenstion .R .
#'
#'#'CHECK VOCABULARY
CreateConceptSetDatasets <- function(dataset,codvar,datevar,EAVtables,EAVattributes,dateformat, rename_col,
concept_set_domains,concept_set_codes,concept_set_codes_excl,concept_set_names,vocabulary,
addtabcol=T, verbose=F,discard_from_environment=F,
dirinput,diroutput,extension,vocabularies_with_dot_wildcard) {
if (!require("haven")) install.packages("haven")
library(haven)
if (!require("stringr")) install.packages("stringr")
library(stringr)
if (!require("purrr")) install.packages("purrr") #flatten
library(purrr)
if (!require("readr")) install.packages("readr")
library(readr)
if (!require("data.table")) install.packages("data.table")
library(data.table)
if (!require("lubridate")) install.packages("lubridate")
library(lubridate)
'%!in%' <- function(x,y)!('%in%'(x,y))
if (missing(diroutput)) diroutput<-getwd()
#Check that output folder exist otherwise create it
suppressWarnings( if (!(file.exists(diroutput))){
dir.create(file.path( diroutput))
})
if(missing(concept_set_names)){
concept_set_names=unique(names(concept_set_domains))
}
if(!missing(concept_set_names)){
concept_set_domains<-concept_set_domains[names(concept_set_domains) %in% concept_set_names]
dataset<-dataset[names(dataset) %in% unique(flatten_chr(concept_set_domains))]
}
used_domains<-unique(concept_set_domains)
concept_set_dom <- vector(mode = "list",length = length(used_domains))
names(concept_set_dom) = unique(flatten_chr(concept_set_domains))
for (i in 1:length(concept_set_dom)) {
for (j in 1:length(concept_set_domains))
if (names(concept_set_dom[i]) == concept_set_domains[j])
concept_set_dom[[i]] = append(flatten_chr(concept_set_dom[i]),names(concept_set_domains[j]))
}
dataset1<-list()
for (dom in used_domains) {
if (!missing(EAVtables) & !missing(EAVattributes)){
if (dom %in% names(EAVtables)){
dataset1[[dom]]<-dataset[[dom]]
for (f in 1:length(EAVtables[[dom]])){
dataset1[[dom]]<-append(dataset1[[dom]],EAVtables[[dom]][[f]][[1]][[1]])
}
}else{dataset1[[dom]]<-dataset[[dom]]}
}else{dataset1[[dom]]<-dataset[[dom]]}
print(paste("I'm analysing domain",dom))
for (df2 in dataset1[[dom]]) {
print(paste0("I'm analysing table ",df2," [for domain ",dom,"]"))
if (missing(dirinput)) dirinput<-getwd()
if (extension == "dta") {
used_df <- as.data.table(read_dta(paste0(dirinput,"/",df2,".",extension)))
} else if (extension == "csv") {
options(readr.num_columns = 0)
used_df <- fread(paste0(dirinput,"/",df2,".",extension))
}
else if (extension == "RData") {
assign('used_df', get(load(paste0(dirinput,"/",df2,".",extension))))
}
if (!missing(dateformat)){
for (n in 1:length(datevar[[dom]][[df2]])) {
if(str_count(dateformat, "m")==3 |str_count(dateformat, "M")==3) {
used_df[,datevar[[dom]][[df2]][[n]]]<-as.Date(used_df[,get(datevar[[dom]][[df2]][[n]])],"%d%b%Y")
} else if (substring(dateformat, 1,1)=="Y" | substring(dateformat, 1,1)=="y" ) {
used_df[,datevar[[dom]][[df2]][[n]]]<-ymd(used_df[,get(datevar[[dom]][[df2]][[n]])])
}else if (substring(dateformat, 1,1)=="D" | substring(dateformat, 1,1)=="d" ) {
used_df[,datevar[[dom]][[df2]][[n]]]<-dmy(used_df[,get(datevar[[dom]][[df2]][[n]])])
}
}
}
used_df[, General:=0]
used_df0<-as.data.table(data.frame(matrix(ncol = 0, nrow = 0)))
#for each dataset search for the codes in all concept sets
for (concept in concept_set_dom[[dom]]) {
if (concept %in% concept_set_names) {
print(paste("concept set",concept))
if ( !missing(EAVtables)){
for (p in 1:length(EAVtables[[dom]])){
if (df2 %in% EAVtables[[dom]][[p]][[1]][[1]]){
used_dfAEV<-data.table()
for (elem1 in names(EAVattributes[[concept_set_domains[[concept]]]][[df2]])) {
for (i in 1:length(EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]])) {
if (length(EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[1]])>=2){
used_dfAEV<-rbind(used_dfAEV,used_df[get(EAVtables[[concept_set_domains[[concept]]]][[p]][[1]][[2]])==EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[i]][[1]] & get(EAVtables[[concept_set_domains[[concept]]]][[p]][[1]][[3]])==EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[i]][[2]],],fill=T)
}else{
used_dfAEV<-rbind(used_dfAEV,used_df[get(EAVtables[[concept_set_domains[[concept]]]][[p]][[2]])==EAVattributes[[concept_set_domains[[concept]]]][[df2]][[elem1]][[i]][[1]],])
}
}
}
used_df<-used_dfAEV
}
}
}
if ((!missing(vocabulary))) {
if (dom %in% names(vocabulary)) {
if (df2 %in% names(vocabulary[[dom]])) {
cod_system_indataset1<-unique(used_df[,get(vocabulary[[dom]][[df2]])])
cod_system_indataset<-intersect(cod_system_indataset1,names(concept_set_codes[[concept]]))
}else{
cod_system_indataset<-names(concept_set_codes[[concept]])
}
}else{
cod_system_indataset<-names(concept_set_codes[[concept]])
}
}else{
cod_system_indataset<-names(concept_set_codes[[concept]])
}
if (length(cod_system_indataset)==0) {
used_df[,c("Filter", paste0("Col_", concept)) := list(0, NA)]
}else{
for (col in codvar[[concept_set_domains[[concept]]]][[df2]]) {
used_df<-used_df[, paste0(col, "_tmp") := gsub("\\.", "", get(col))]
for (type_cod in cod_system_indataset) {
stop = FALSE
codes_rev <- concept_set_codes[[concept]][[type_cod]]
for (single_cod in concept_set_codes[[concept]][[type_cod]]) {
if (single_cod == "ALL_CODES") {
print("allcodes")
used_df[,Filter:=1]
used_df[,paste0("Col_",concept):=codvar[[dom]][[df2]][1]]
stop = TRUE
break
}
}
if (stop == TRUE) {
next
}
if ((!missing(vocabulary))) {################### IF I GIVE VOCABULARY IN INPUT
if (df2 %in% dataset[[dom]]) {
if (dom %in% names(vocabulary)) {
if (!missing(vocabularies_with_dot_wildcard)) {
if (type_cod %in% vocabularies_with_dot_wildcard) {
used_df[(str_detect(get(col), paste(paste0("^", codes_rev), collapse = "|"))) & get(vocabulary[[dom]][[df2]]) == type_cod, c("Filter", paste0("Col_", concept)) := list(1, col)]
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))) & get(vocabulary[[dom]][[df2]]) == type_cod, c("Filter", paste0("Col_", concept)) := list(1, col)]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, col)]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, col)]
}
} else {
for (p in 1:length(EAVtables[[dom]])) {
if (df2 %in% EAVtables[[dom]][[p]][[1]][[1]]) {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, list(c(get(EAVtables[[dom]][[p]][[1]][[2]]), get(EAVtables[[dom]][[p]][[1]][[3]]))))]
}
}
}
} else {
if (df2 %!in% dataset[[dom]]) {
for (p in 1:length(EAVtables[[dom]])) {
if (df2 %in% EAVtables[[dom]][[p]][[1]][[1]]) {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, list(c(get(EAVtables[[dom]][[p]][[1]][[2]]), get(EAVtables[[dom]][[p]][[1]][[3]]))))]
}
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), c("Filter", paste0("Col_", concept)) := list(1, col)]
}
}
}
}
#if we have codes to exclude
if (!missing(concept_set_codes_excl)){
if ((!missing(vocabulary))) {
if (dom %in% names(vocabulary)) {
if (df2 %in% names(vocabulary[[dom]])) {
cod_system_indataset1_excl<-unique(used_df[,get(vocabulary[[dom]][[df2]])])
cod_system_indataset_excl<-Reduce(intersect, list(cod_system_indataset1_excl,names(concept_set_codes_excl[[concept]])))
}else{
cod_system_indataset_excl<-names(concept_set_codes_excl[[concept]])
}
}else{
cod_system_indataset_excl<-names(concept_set_codes_excl[[concept]])
}
}else{
cod_system_indataset_excl<-names(concept_set_codes_excl[[concept]])
}
for (type_cod_2 in cod_system_indataset_excl) {
codes_rev <- concept_set_codes_excl[[concept]][[type_cod_2]]
if ((!missing(vocabulary))) {
if (df2 %in% dataset[[dom]]) {
if (dom %in% names(vocabulary)) {
if (!missing(vocabularies_with_dot_wildcard)) {
if (type_cod_2 %in% vocabularies_with_dot_wildcard) {
used_df[(str_detect(get(col), paste(paste0("^", codes_rev), collapse = "|"))) & get(vocabulary[[dom]][[df2]]) == type_cod_2, Filter := 0]
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))) & get(vocabulary[[dom]][[df2]]) == type_cod_2, Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
} else {
used_df[(str_detect(get(paste0(col, "_tmp")), gsub("\\*", ".", paste(gsub("\\.", "", paste0("^", codes_rev)), collapse = "|")))), Filter := 0]
}
}
}
used_df[, paste0(col, "_tmp") := NULL]
}
if ("Filter" %in% colnames(used_df)) {
used_df[Filter == 1,General:=1]
Newfilter1 <- paste0("Filter_",concept)
setnames(used_df,old = "Filter",new = Newfilter1)
}
}
}
for (col in names(used_df)) {
if (col == codvar[[dom]][[df2]]) {
setnames(used_df, col, "codvar" )
}
}
if(!missing(rename_col)){
###################RENAME THE COLUMNS ID AND DATE
for (elem in names(rename_col)) {
data<-eval(parse(text=elem))
for (col in names(used_df)) {
if (col == data[[dom]][[df2]]) {
setnames(used_df, col, elem )
}
}
}
}
#keep only the rows that have matched codes
filtered_df <- used_df[General == 1,][,Table_cdm:=df2]
if (verbose == F) {
assign(paste0("FILTERED","_",df2),filtered_df)
} else {
assign(paste0(dom,"_","FILTERED","_",df2),filtered_df,envir = parent.frame())
}
#split the dataset with respect to the concept set
for (concept in concept_set_dom[[dom]]) {
if (concept %in% concept_set_names) {
if (paste0("Filter_",concept) %in% colnames(filtered_df)) {
setnames(filtered_df,unique(names(filtered_df[,grepl(paste0("\\b","Filter_",concept,"\\b"),colnames(filtered_df)), with = F])),"Filter")
filtered_df2 <- filtered_df[Filter == 1,] [,"General":=NULL]
filtered_df2 <- filtered_df2[,!grep("^Filter",names(filtered_df2)),with = F]
if (paste0("Col_",concept) %in% colnames(filtered_df2)) {
setnames(filtered_df2,unique(names(filtered_df2[,grepl(paste0("\\b","Col_",concept,"\\b"),colnames(filtered_df2)), with = F])),"Col")
filtered_df2 <- filtered_df2[,!grep("^Col_",names(filtered_df2)),with = F]
}
Newfilter2 <- paste0("Filter_",concept)
setnames(filtered_df,old = "Filter",new = Newfilter2)
}else{filtered_df2<-used_df[1,!grep("^Filter",names(used_df)),with = F] [,"General":=NULL]
filtered_df2[,]<-NA
filtered_df2<-filtered_df2[,!grep("^Col",names(filtered_df2)),with = F]
}
if (verbose == F) {
assign(paste0(concept,"_",df2),filtered_df2)
}else{
assign(paste0(concept,"_",df2),filtered_df2,envir = parent.frame())
}
}
}
}
###########append all the datasets related to the same concept
for (concept in concept_set_dom[[dom]]) {
if (concept %in% concept_set_names) {
export_df <- as.data.table(data.frame(matrix(ncol = 0, nrow = 0)))
for (df2 in dataset1[[dom]]) {
if (exists(paste0(concept,"_",df2))){
export_df = suppressWarnings( rbind(export_df, eval(parse(text = paste0(concept,"_",df2))),fill = T) )
}
}
export_df<-export_df[, .SD[!all(is.na(.SD))]]
if (addtabcol == F) export_df<-export_df[,c("Table_cdm","Col"):=NULL]
if (discard_from_environment==T) {
assign(concept, export_df)
}else{ assign(concept, export_df, envir = parent.frame())}
save(concept, file = paste0(diroutput,"/",concept,".RData"),list = concept)
}
}
}
print(paste("Concept set datasets saved in",diroutput))
}
|
#' dimsum__check_countfile
#'
#' Check whether user-specified count file correctly formatted.
#'
#' @param dimsum_meta an experiment metadata object (required)
#' @param input_dt input data.table (required)
#'
#' @return Reformatted data.table
#' @export
#' @import data.table
dimsum__check_countfile <- function(
dimsum_meta,
input_dt
){
### Nucleotide sequence checks (nt_seq column)
#Check if mandatory columns present
mandatory_cols <- c("nt_seq")
if(sum(unlist(lapply(mandatory_cols, "%in%", colnames(input_dt)))==FALSE)!=0){
stop(paste0("One or more mandatory columns missing from file specified by countPath ('nt_seq')"), call. = FALSE)
}
#Check nucleotide sequence column is of type character
if(sapply(input_dt, typeof)["nt_seq"]!="character"){
stop("One or more invalid 'nt_seq' values in variant count file specified by 'countPath'. Only valid nucleotide sequences allowed (A/C/T/G).", call. = FALSE)
}
#Set nucleotide sequence to lower case
input_dt[, nt_seq := tolower(nt_seq)]
#Check nucleotide sequences are valid (ACGT characters only)
if(sum(!input_dt[,unique(unlist(strsplit(nt_seq, "")))] %in% c('a', 'c', 'g', 't'))!=0){
stop("One or more invalid 'nt_seq' values in variant count file specified by 'countPath'. Only valid nucleotide sequences allowed (A/C/T/G).", call. = FALSE)
}
### Count column checks
#Check if sample name columns present
mandatory_cols <- unique(dimsum_meta[["exp_design"]][,"sample_name"])
if(sum(unlist(lapply(mandatory_cols, "%in%", colnames(input_dt)))==FALSE)!=0){
stop(paste0("One or more sample names in experimentDesign file missing from column names in variant count file specified by 'countPath'"), call. = FALSE)
}
#Check all count columns are of type integer
typeof_cols <- sapply(input_dt[,.SD,,.SDcols = names(input_dt)[names(input_dt)!="nt_seq"]], typeof)
if(sum(typeof_cols!="integer")!=0){
stop(paste0("Invalid type of sample count column in variant count file specified by 'countPath'. Only positive integers allowed (zero inclusive)."), call. = FALSE)
}
#Check all count columns positive integer zero inclusive
if(input_dt[,min(.SD, na.rm = T),,.SDcols = names(input_dt)[names(input_dt)!="nt_seq"]]<0){
stop(paste0("Invalid type of sample count column in variant count file specified by 'countPath'. Only positive integers allowed (zero inclusive)."), call. = FALSE)
}
### Duplicated variants check
if(input_dt[,sum(duplicated(nt_seq))]!=0){
stop(paste0("Duplicated 'nt_seq' values not allowed in variant count file specified by 'countPath'."), call. = FALSE)
}
#Sample names (ignore 'technical_replicate' column)
sample_names <- as.list(paste0(
dimsum_meta[["exp_design"]][,"sample_name"], '_e',
dimsum_meta[["exp_design"]][,"experiment"], '_s',
dimsum_meta[["exp_design"]][,"selection_id"], '_b',
dimsum_meta[["exp_design"]][,"biological_replicate"], '_tNA_count', sep = ""))
names(sample_names) <- dimsum_meta[["exp_design"]][,"sample_name"]
#Reformat count column names
names(input_dt)[names(input_dt)!="nt_seq"] <- unlist(sample_names[names(input_dt)[names(input_dt)!="nt_seq"]])
return(input_dt)
}
| /R/dimsum__check_countfile.R | permissive | hemantkiitg/DiMSum | R | false | false | 3,196 | r |
#' dimsum__check_countfile
#'
#' Check whether user-specified count file correctly formatted.
#'
#' @param dimsum_meta an experiment metadata object (required)
#' @param input_dt input data.table (required)
#'
#' @return Reformatted data.table
#' @export
#' @import data.table
dimsum__check_countfile <- function(
dimsum_meta,
input_dt
){
### Nucleotide sequence checks (nt_seq column)
#Check if mandatory columns present
mandatory_cols <- c("nt_seq")
if(sum(unlist(lapply(mandatory_cols, "%in%", colnames(input_dt)))==FALSE)!=0){
stop(paste0("One or more mandatory columns missing from file specified by countPath ('nt_seq')"), call. = FALSE)
}
#Check nucleotide sequence column is of type character
if(sapply(input_dt, typeof)["nt_seq"]!="character"){
stop("One or more invalid 'nt_seq' values in variant count file specified by 'countPath'. Only valid nucleotide sequences allowed (A/C/T/G).", call. = FALSE)
}
#Set nucleotide sequence to lower case
input_dt[, nt_seq := tolower(nt_seq)]
#Check nucleotide sequences are valid (ACGT characters only)
if(sum(!input_dt[,unique(unlist(strsplit(nt_seq, "")))] %in% c('a', 'c', 'g', 't'))!=0){
stop("One or more invalid 'nt_seq' values in variant count file specified by 'countPath'. Only valid nucleotide sequences allowed (A/C/T/G).", call. = FALSE)
}
### Count column checks
#Check if sample name columns present
mandatory_cols <- unique(dimsum_meta[["exp_design"]][,"sample_name"])
if(sum(unlist(lapply(mandatory_cols, "%in%", colnames(input_dt)))==FALSE)!=0){
stop(paste0("One or more sample names in experimentDesign file missing from column names in variant count file specified by 'countPath'"), call. = FALSE)
}
#Check all count columns are of type integer
typeof_cols <- sapply(input_dt[,.SD,,.SDcols = names(input_dt)[names(input_dt)!="nt_seq"]], typeof)
if(sum(typeof_cols!="integer")!=0){
stop(paste0("Invalid type of sample count column in variant count file specified by 'countPath'. Only positive integers allowed (zero inclusive)."), call. = FALSE)
}
#Check all count columns positive integer zero inclusive
if(input_dt[,min(.SD, na.rm = T),,.SDcols = names(input_dt)[names(input_dt)!="nt_seq"]]<0){
stop(paste0("Invalid type of sample count column in variant count file specified by 'countPath'. Only positive integers allowed (zero inclusive)."), call. = FALSE)
}
### Duplicated variants check
if(input_dt[,sum(duplicated(nt_seq))]!=0){
stop(paste0("Duplicated 'nt_seq' values not allowed in variant count file specified by 'countPath'."), call. = FALSE)
}
#Sample names (ignore 'technical_replicate' column)
sample_names <- as.list(paste0(
dimsum_meta[["exp_design"]][,"sample_name"], '_e',
dimsum_meta[["exp_design"]][,"experiment"], '_s',
dimsum_meta[["exp_design"]][,"selection_id"], '_b',
dimsum_meta[["exp_design"]][,"biological_replicate"], '_tNA_count', sep = ""))
names(sample_names) <- dimsum_meta[["exp_design"]][,"sample_name"]
#Reformat count column names
names(input_dt)[names(input_dt)!="nt_seq"] <- unlist(sample_names[names(input_dt)[names(input_dt)!="nt_seq"]])
return(input_dt)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{primate.dat}
\alias{primate.dat}
\title{Primate line transect survey data.}
\format{A list with elements x (perpendicular distance) and y (forward distance).}
\source{
We are grateful to Matthew Nowak from the Sumatran Orangutan Conservation
Programme (SOCP) for allowing us to use the primate survey data from the Jantho
Reintroduction Station. The initial survey was developed by Matthew Nowak and Serge
Wich (Liverpool John Moores University) and then undertaken by the SOCP with funding
from Chester Zoo.
}
\usage{
primate.dat
}
\description{
Locations relative to the observer of 127 detections of primates from a
visual survey conducted by three sets of trained observers walking previously cut line
transects in primary tropical rainforest.
}
\examples{
data(primate.dat)
}
| /LT2D/man/primate.dat.Rd | no_license | calliste-fagard-jenkin/LT2D-work | R | false | true | 894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{primate.dat}
\alias{primate.dat}
\title{Primate line transect survey data.}
\format{A list with elements x (perpendicular distance) and y (forward distance).}
\source{
We are grateful to Matthew Nowak from the Sumatran Orangutan Conservation
Programme (SOCP) for allowing us to use the primate survey data from the Jantho
Reintroduction Station. The initial survey was developed by Matthew Nowak and Serge
Wich (Liverpool John Moores University) and then undertaken by the SOCP with funding
from Chester Zoo.
}
\usage{
primate.dat
}
\description{
Locations relative to the observer of 127 detections of primates from a
visual survey conducted by three sets of trained observers walking previously cut line
transects in primary tropical rainforest.
}
\examples{
data(primate.dat)
}
|
\name{RandomPortfolios}
\alias{RandomPortfolios}
\title{Some Title}
\usage{
RandomPortfolios(strategy, pars, n = 100, parallel = T)
}
\description{
Some Title
}
| /man/RandomPortfolios.Rd | no_license | vzs/strategery | R | false | false | 166 | rd | \name{RandomPortfolios}
\alias{RandomPortfolios}
\title{Some Title}
\usage{
RandomPortfolios(strategy, pars, n = 100, parallel = T)
}
\description{
Some Title
}
|
getCCC2<-function(F, model="weibull") {
# getCCC2_r.r
# Correlation developed by David Silkworth in Oct 2013
# Back-converted in Nov 2019 from ansi c code in abpv.c from abremPivotals latest modification (May 2014)
getCCC2_r<-function(nF, dist="weibull") {
T1w<-c( 0.792235,0.7990604,0.8076126,0.8204102,0.832331,0.8425375,0.8514909,0.8593213,0.8662665,
0.8724075,0.8779149,0.8828711,0.887337,0.8914107,0.8951199,0.8985272,0.9016913,0.9045873,
0.9073225,0.9098415,0.9121919,0.9144172,0.9164957)
T2w<-c( 2.482857,2.593721,2.689116,2.773029,2.847969,2.915728,2.977454,3.034384,3.087205,3.136568,
3.182761,3.226223,3.267092,3.306225,3.343156,3.378561,3.412309,3.444714,3.475747,3.505642,
3.534433,3.562236,3.588777,3.614618,3.639701,3.663851)
T3w<-c( 3.663851,3.872307,4.037513,4.17472,4.292113,4.394901,4.486445,4.568626,4.643578,4.712588,
4.776053,4.83515,4.890495,4.942337,4.991377,5.037647,5.081707,5.12344,5.163087,5.2011,
5.237467,5.272338,5.305946,5.338348,5.369513,5.399487,5.428722,5.456727)
T1l<-c(0.7938923,0.7992166,0.8143357,0.8286594,0.8416131,0.8531055,0.863076,0.8717764,0.8794219,
0.8862083,0.8921895,0.8975986,0.9024265,0.9068011,0.9107908,0.9144347,0.9177708,0.9208458,
0.9236726,0.9262948,0.9287454,0.931017,0.9331573)
T2l<-c(2.705413,2.847212,2.969813,3.077389,3.173427,3.260117,3.339296,3.412094,3.479407,3.542081,
3.600777,3.655789,3.707801,3.756996,3.803559,3.847988,3.890183,3.93063,3.969467,4.006691,
4.042462,4.076862,4.109992,4.142034,4.173005,4.202877)
T3l<-c(4.202877,4.458735,4.659025,4.823861,4.963904,5.085855,5.193705,5.290567,5.378335,5.45858,
5.532547,5.601127,5.6651,5.725016,5.781398,5.834598,5.884945,5.932743,5.978272,6.0218,
6.06339,6.103111,6.141497,6.178083,6.213509,6.2477,6.280481,6.312331)
if(dist=="weibull") {
T1<-T1w
T2<-T2w
T3<-T3w
}else{
T1<-T1l
T2<-T2l
T3<-T3l
}
if(nF<26) {
CCC2<-T1[nF-2]
}else{
if(nF<151) {
i=5
if(nF%%i==0) {
## The qweibull of CCC2 can be taken directly from T2
## Offset value is 25/i-1 (for R) Will be 25/i for C++
CCC2<-1-1/exp(T2[nF/i-4])
}else{
## The qweibull of CCC2 will have to be interpolated from T2
## establish nF and qweibull bounds
nFbl<-i*as.integer(nF/i)
nFbu<-nFbl+i
qwl<-T2[nFbl/i-4]
qwu<-T2[nFbu/i-4]
## Then interpolate using log(F) and log(Fbounds)
qwccc2<-qwl+((log(nF)-log(nFbl))/(log(nFbu)-log(nFbl))*(qwu-qwl))
CCC2<-1-1/exp(qwccc2)
}
}else{
if(nF<1401) {
i=50
if(nF%%i==0) {
## The qweibull of CCC2 can be taken directly from T3
## Note there is a difference in the F/i offset for element selection!!!
## In this case the offset = 150/i-1 (for R), will be 150/i for C++
CCC2<-1-1/exp(T3[nF/i-2])
}else{
## The qweibull of CCC2 will have to be interpolated from T3
## establish nF and qweibull bounds
nFbl<-i*as.integer(nF/i)
nFbu<-nFbl+i
qwl<-T3[nFbl/i-2]
qwu<-T3[nFbu/i-2]
## Then interpolate using log(nF) and log(nFbounds)
qwccc2<-qwl+((log(nF)-log(nFbl))/(log(nFbu)-log(nFbl))*(qwu-qwl))
CCC2<-1-1/exp(qwccc2)
}
}else{
warning(paste0("Quantity ",nF," failures has not been correlated to CCC2"))
CCC2<-NA
}
}
}
CCC2
}
return(getCCC2_r(F, model))
}
| /R/getCCC2.r | no_license | Weibull-R/WeibullR | R | false | false | 3,617 | r | getCCC2<-function(F, model="weibull") {
# getCCC2_r.r
# Correlation developed by David Silkworth in Oct 2013
# Back-converted in Nov 2019 from ansi c code in abpv.c from abremPivotals latest modification (May 2014)
getCCC2_r<-function(nF, dist="weibull") {
T1w<-c( 0.792235,0.7990604,0.8076126,0.8204102,0.832331,0.8425375,0.8514909,0.8593213,0.8662665,
0.8724075,0.8779149,0.8828711,0.887337,0.8914107,0.8951199,0.8985272,0.9016913,0.9045873,
0.9073225,0.9098415,0.9121919,0.9144172,0.9164957)
T2w<-c( 2.482857,2.593721,2.689116,2.773029,2.847969,2.915728,2.977454,3.034384,3.087205,3.136568,
3.182761,3.226223,3.267092,3.306225,3.343156,3.378561,3.412309,3.444714,3.475747,3.505642,
3.534433,3.562236,3.588777,3.614618,3.639701,3.663851)
T3w<-c( 3.663851,3.872307,4.037513,4.17472,4.292113,4.394901,4.486445,4.568626,4.643578,4.712588,
4.776053,4.83515,4.890495,4.942337,4.991377,5.037647,5.081707,5.12344,5.163087,5.2011,
5.237467,5.272338,5.305946,5.338348,5.369513,5.399487,5.428722,5.456727)
T1l<-c(0.7938923,0.7992166,0.8143357,0.8286594,0.8416131,0.8531055,0.863076,0.8717764,0.8794219,
0.8862083,0.8921895,0.8975986,0.9024265,0.9068011,0.9107908,0.9144347,0.9177708,0.9208458,
0.9236726,0.9262948,0.9287454,0.931017,0.9331573)
T2l<-c(2.705413,2.847212,2.969813,3.077389,3.173427,3.260117,3.339296,3.412094,3.479407,3.542081,
3.600777,3.655789,3.707801,3.756996,3.803559,3.847988,3.890183,3.93063,3.969467,4.006691,
4.042462,4.076862,4.109992,4.142034,4.173005,4.202877)
T3l<-c(4.202877,4.458735,4.659025,4.823861,4.963904,5.085855,5.193705,5.290567,5.378335,5.45858,
5.532547,5.601127,5.6651,5.725016,5.781398,5.834598,5.884945,5.932743,5.978272,6.0218,
6.06339,6.103111,6.141497,6.178083,6.213509,6.2477,6.280481,6.312331)
if(dist=="weibull") {
T1<-T1w
T2<-T2w
T3<-T3w
}else{
T1<-T1l
T2<-T2l
T3<-T3l
}
if(nF<26) {
CCC2<-T1[nF-2]
}else{
if(nF<151) {
i=5
if(nF%%i==0) {
## The qweibull of CCC2 can be taken directly from T2
## Offset value is 25/i-1 (for R) Will be 25/i for C++
CCC2<-1-1/exp(T2[nF/i-4])
}else{
## The qweibull of CCC2 will have to be interpolated from T2
## establish nF and qweibull bounds
nFbl<-i*as.integer(nF/i)
nFbu<-nFbl+i
qwl<-T2[nFbl/i-4]
qwu<-T2[nFbu/i-4]
## Then interpolate using log(F) and log(Fbounds)
qwccc2<-qwl+((log(nF)-log(nFbl))/(log(nFbu)-log(nFbl))*(qwu-qwl))
CCC2<-1-1/exp(qwccc2)
}
}else{
if(nF<1401) {
i=50
if(nF%%i==0) {
## The qweibull of CCC2 can be taken directly from T3
## Note there is a difference in the F/i offset for element selection!!!
## In this case the offset = 150/i-1 (for R), will be 150/i for C++
CCC2<-1-1/exp(T3[nF/i-2])
}else{
## The qweibull of CCC2 will have to be interpolated from T3
## establish nF and qweibull bounds
nFbl<-i*as.integer(nF/i)
nFbu<-nFbl+i
qwl<-T3[nFbl/i-2]
qwu<-T3[nFbu/i-2]
## Then interpolate using log(nF) and log(nFbounds)
qwccc2<-qwl+((log(nF)-log(nFbl))/(log(nFbu)-log(nFbl))*(qwu-qwl))
CCC2<-1-1/exp(qwccc2)
}
}else{
warning(paste0("Quantity ",nF," failures has not been correlated to CCC2"))
CCC2<-NA
}
}
}
CCC2
}
return(getCCC2_r(F, model))
}
|
#################################
### Linear regression example ###
### with simulated data 1 ###
#################################
# Set seed (so that simulations may be redone)
set.seed(100)
# Number of data points
n <- 20
# Intercept, slope, and std. deviation for simulations
beta0 <- 50
beta1 <- 200
sigma <- 90
# Simulated data points
x <- runif(n, -2, 4)
y <- beta0 + beta1 * x + rnorm(n, mean = 0, sd = sigma)
# Scatter plot of x and y
plot(x, y)
# Add 'true' line to the plot
lines(x, beta0 + beta1*x, col = 2)
#################################
### Linear regression example ###
### with simulated data 2 ###
#################################
# Set seed (so that simulations may be redone)
set.seed(100)
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 <- 50; beta1 <- 200; sigma <- 90
y <- beta0 + beta1 * x + rnorm(n = length(x), mean = 0, sd = sigma)
# From here: like for the analysis of 'real data', we have data in x and y:
# Scatter plot of y against x
plot(x, y)
# Find the least squares estimates, use Theorem 5.4
(beta1hat <- sum( (y - mean(y))*(x-mean(x)) ) / sum( (x-mean(x))^2 ))
(bet0hat <- mean(y) - beta1hat*mean(x))
# Use lm() to find the estimates
lm(y ~ x)
# Plot the fitted line
abline(lm(y ~ x), col="red")
################################################
### Distribution of estimators of regression ###
### coefficients by simulation ###
################################################
# Number of repetitions
nRepeat <- 1000
# Two vectors to save the estimates in
Beta0Hat <- numeric(nRepeat)
Beta1Hat <- numeric(nRepeat)
# Repeat the simulation and estimation nRepeat times
for(i in 1:nRepeat){
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate from the linear regression model
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), mean = 0, sd = sigma)
# Use lm() to find the estimates
fit <- lm(y ~ x)
# Save the estimates
Beta0Hat[i] <- fit$coefficients[1]
Beta1Hat[i] <- fit$coefficients[2]
}
# See empirical distributions of the estimates
hist(Beta0Hat, probability = TRUE)
hist(Beta1Hat, probability = TRUE)
###########################################
### Linear regression: Hypothesis tests ###
### Example: Height-Weight data ###
###########################################
# Read data into R
x <- c(168, 161, 167, 179, 184, 166, 198, 187, 191, 179)
y <- c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
# Fit model to data
fit <- lm(y ~ x)
# Look at model summary to find Tobs-values and p-values
summary(fit)
##################################################
### Example: Illustration of CIs by simulation ###
##################################################
# Number of repetitions (here: CIs)
nRepeat <- 1000
# Empty logical vector of length nRepeat
TrueValInCI <- logical(nRepeat)
# Repeat the simulation and estimation nRepeat times:
for(i in 1:nRepeat){
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), mean = 0, sd = sigma)
# Use lm() to fit model
fit <- lm(y ~ x)
# Use confint() to compute 95% CI for intercept
ci <- confint(fit, "(Intercept)", level=0.95)
# Was the 'true' intercept included in the interval? (covered)
(TrueValInCI[i] <- ci[1] < beta0 & beta0 < ci[2])
}
# How often was the true intercept included in the CI?
sum(TrueValInCI) / nRepeat
##################################################
### Example: Confidence intervals for the line ###
##################################################
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), sd = sigma)
# Use lm() to fit model
fit <- lm(y ~ x)
# Make a sequence of 100 x-values
xval <- seq(from = -2, to = 6, length.out = 100)
# Use the predict function
CI <- predict(fit, newdata = data.frame(x = xval),
interval = "confidence",
level = 0.95)
# Check what we got
head(CI)
# Plot the data, model fit and intervals
plot(x, y, pch = 20)
abline(fit)
lines(xval, CI[, "lwr"], lty=2, col = "red", lwd = 2)
lines(xval, CI[, "upr"], lty=2, col = "red", lwd = 2)
##################################################
### Example: Prediction intervals for the line ###
##################################################
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), sd = sigma)
# Use lm() to fit model
fit <- lm(y ~ x)
# Make a sequence of 100 x-values
xval <- seq(from = -2, to = 6, length.out = 100)
# Use the predict function
PI <- predict(fit, newdata = data.frame(x = xval),
interval = "prediction",
level = 0.95)
# Check what we got
head(CI)
# Plot the data, model fit and intervals
plot(x, y, pch = 20)
abline(fit)
lines(xval, PI[, "lwr"], lty = 2, col = "blue", lwd = 2)
lines(xval, PI[, "upr"], lty = 2, col = "blue", lwd = 2)
##############################################
### Linear regression: Correlation and R^2 ###
### Example: Height-Weight data ###
##############################################
# Read data into R
x <- c(168, 161, 167, 179, 184, 166, 198, 187, 191, 179)
y <- c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
# Fit model to data
fit <- lm(y ~ x)
# Scatter plot of data with fitted line
plot(x,y, xlab = "Height", ylab = "Weight")
abline(fit, col="red")
# See summary
summary(fit)
# Correlation between x and y
cor(x,y)
# Squared correlation is the "Multiple R-squared" from summary(fit)
cor(x,y)^2
###########################################
### Linear regression: Model validation ###
### Example: Height-Weight data ###
###########################################
# Read data into R
x <- c(168, 161, 167, 179, 184, 166, 198, 187, 191, 179)
y <- c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
# Fit model to data
fit <- lm(y ~ x)
# QQ-plot of residuals
qqnorm(fit$residuals, main = "") # or "Wally plot" of residuals
# Plots of residuals against fitted values
plot(fit$fitted, fit$residuals)
| /introstat/slides02402/week8.R | no_license | Collinnn/Aflevering1 | R | false | false | 6,278 | r | #################################
### Linear regression example ###
### with simulated data 1 ###
#################################
# Set seed (so that simulations may be redone)
set.seed(100)
# Number of data points
n <- 20
# Intercept, slope, and std. deviation for simulations
beta0 <- 50
beta1 <- 200
sigma <- 90
# Simulated data points
x <- runif(n, -2, 4)
y <- beta0 + beta1 * x + rnorm(n, mean = 0, sd = sigma)
# Scatter plot of x and y
plot(x, y)
# Add 'true' line to the plot
lines(x, beta0 + beta1*x, col = 2)
#################################
### Linear regression example ###
### with simulated data 2 ###
#################################
# Set seed (so that simulations may be redone)
set.seed(100)
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 <- 50; beta1 <- 200; sigma <- 90
y <- beta0 + beta1 * x + rnorm(n = length(x), mean = 0, sd = sigma)
# From here: like for the analysis of 'real data', we have data in x and y:
# Scatter plot of y against x
plot(x, y)
# Find the least squares estimates, use Theorem 5.4
(beta1hat <- sum( (y - mean(y))*(x-mean(x)) ) / sum( (x-mean(x))^2 ))
(bet0hat <- mean(y) - beta1hat*mean(x))
# Use lm() to find the estimates
lm(y ~ x)
# Plot the fitted line
abline(lm(y ~ x), col="red")
################################################
### Distribution of estimators of regression ###
### coefficients by simulation ###
################################################
# Number of repetitions
nRepeat <- 1000
# Two vectors to save the estimates in
Beta0Hat <- numeric(nRepeat)
Beta1Hat <- numeric(nRepeat)
# Repeat the simulation and estimation nRepeat times
for(i in 1:nRepeat){
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate from the linear regression model
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), mean = 0, sd = sigma)
# Use lm() to find the estimates
fit <- lm(y ~ x)
# Save the estimates
Beta0Hat[i] <- fit$coefficients[1]
Beta1Hat[i] <- fit$coefficients[2]
}
# See empirical distributions of the estimates
hist(Beta0Hat, probability = TRUE)
hist(Beta1Hat, probability = TRUE)
###########################################
### Linear regression: Hypothesis tests ###
### Example: Height-Weight data ###
###########################################
# Read data into R
x <- c(168, 161, 167, 179, 184, 166, 198, 187, 191, 179)
y <- c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
# Fit model to data
fit <- lm(y ~ x)
# Look at model summary to find Tobs-values and p-values
summary(fit)
##################################################
### Example: Illustration of CIs by simulation ###
##################################################
# Number of repetitions (here: CIs)
nRepeat <- 1000
# Empty logical vector of length nRepeat
TrueValInCI <- logical(nRepeat)
# Repeat the simulation and estimation nRepeat times:
for(i in 1:nRepeat){
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), mean = 0, sd = sigma)
# Use lm() to fit model
fit <- lm(y ~ x)
# Use confint() to compute 95% CI for intercept
ci <- confint(fit, "(Intercept)", level=0.95)
# Was the 'true' intercept included in the interval? (covered)
(TrueValInCI[i] <- ci[1] < beta0 & beta0 < ci[2])
}
# How often was the true intercept included in the CI?
sum(TrueValInCI) / nRepeat
##################################################
### Example: Confidence intervals for the line ###
##################################################
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), sd = sigma)
# Use lm() to fit model
fit <- lm(y ~ x)
# Make a sequence of 100 x-values
xval <- seq(from = -2, to = 6, length.out = 100)
# Use the predict function
CI <- predict(fit, newdata = data.frame(x = xval),
interval = "confidence",
level = 0.95)
# Check what we got
head(CI)
# Plot the data, model fit and intervals
plot(x, y, pch = 20)
abline(fit)
lines(xval, CI[, "lwr"], lty=2, col = "red", lwd = 2)
lines(xval, CI[, "upr"], lty=2, col = "red", lwd = 2)
##################################################
### Example: Prediction intervals for the line ###
##################################################
# Generate x
x <- runif(n = 20, min = -2, max = 4)
# Simulate y
beta0 = 50; beta1 = 200; sigma = 90
y <- beta0 + beta1 * x + rnorm(n = length(x), sd = sigma)
# Use lm() to fit model
fit <- lm(y ~ x)
# Make a sequence of 100 x-values
xval <- seq(from = -2, to = 6, length.out = 100)
# Use the predict function
PI <- predict(fit, newdata = data.frame(x = xval),
interval = "prediction",
level = 0.95)
# Check what we got
head(CI)
# Plot the data, model fit and intervals
plot(x, y, pch = 20)
abline(fit)
lines(xval, PI[, "lwr"], lty = 2, col = "blue", lwd = 2)
lines(xval, PI[, "upr"], lty = 2, col = "blue", lwd = 2)
##############################################
### Linear regression: Correlation and R^2 ###
### Example: Height-Weight data ###
##############################################
# Read data into R
x <- c(168, 161, 167, 179, 184, 166, 198, 187, 191, 179)
y <- c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
# Fit model to data
fit <- lm(y ~ x)
# Scatter plot of data with fitted line
plot(x,y, xlab = "Height", ylab = "Weight")
abline(fit, col="red")
# See summary
summary(fit)
# Correlation between x and y
cor(x,y)
# Squared correlation is the "Multiple R-squared" from summary(fit)
cor(x,y)^2
###########################################
### Linear regression: Model validation ###
### Example: Height-Weight data ###
###########################################
# Read data into R
x <- c(168, 161, 167, 179, 184, 166, 198, 187, 191, 179)
y <- c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
# Fit model to data
fit <- lm(y ~ x)
# QQ-plot of residuals
qqnorm(fit$residuals, main = "") # or "Wally plot" of residuals
# Plots of residuals against fitted values
plot(fit$fitted, fit$residuals)
|
avg_wrapper = function(counts, interval, isGA) {
library(dplyr)
pp = counts %>% filter(dCD>0)
counts = smooth_intrapatient_avg(counts, interval, isGA)
counts = interpatient_avg(counts, pp, interval, isGA)
return(counts)
}
smooth_intrapatient_avg = function(counts, interval, isGA) {
counts = counts %>% filter(dCD <= 0) #Remove post-partum samples
#Take average for every interval weeks either for GA or dCD
if (isGA) {
weeks = seq(4, 44, by=interval)
} else {
weeks = seq(floor(min(counts$dCD)), 1, by = interval)
}
avg = data.frame()
#Take intrapatient average either for GA or dCD
for (idx in 2:length(weeks)) {
if (isGA) {
temp_avg = counts %>% group_by(patient) %>% filter(ga>weeks[idx-1] & ga<= weeks[idx]) %>% summarize_if(is.numeric, funs(mean(., na.rm=TRUE)))
} else {
temp_avg = counts %>% group_by(patient) %>% filter(dCD>weeks[idx-1] & dCD<= weeks[idx]) %>% summarize_if(is.numeric, funs(mean(., na.rm=TRUE)))
}
avg = rbind(avg, temp_avg)
}
return(avg)
}
interpatient_avg = function(counts, pp, interval, isGA) {
library(reshape2)
all_patient_avg = data.frame()
all_patient_sd = data.frame()
sem_n = c() #the n for every month's average to calculate SEM
#Address post-partum separately
pp_mean = pp %>% summarize_if(is.numeric, funs(mean(.,na.rm=TRUE)))
pp_sd = pp %>% summarize_if(is.numeric, funs(sd(.,na.rm=TRUE)))
pp_sd = pp_sd/sqrt(dim(pp)[1]) #sem
#Take average for every interval weeks either for GA or dCD
if (isGA) {
weeks = seq(4, 44, by=interval)
} else {
weeks = seq(floor(min(counts$dCD)), 1, by = interval)
}
for (idx in 2:length(weeks)) {
if (isGA) {
temp = counts %>% filter(ga>weeks[idx-1] & ga<= weeks[idx])
} else {
temp = counts %>% filter(dCD>weeks[idx-1] & dCD<= weeks[idx])
}
sem_n = c(sem_n, dim(temp)[1])
all_patient_avg = rbind(all_patient_avg, temp %>% summarize_if(is.numeric, funs(mean(., na.rm=TRUE))))
all_patient_sd = rbind(all_patient_sd, temp %>% summarize_if(is.numeric, funs(sd(., na.rm=TRUE))))
}
sem_n = 1/sqrt(sem_n)
pp_mean$ga = round(pp_mean$ga)
pp_mean$dCD = round(pp_mean$dCD)
all_patient_sd = sweep(all_patient_sd %>% select(-starts_with('ga'), -starts_with('dCD'), -starts_with('delivery')), 1, sem_n, '*')
all_patient_avg = rbind(all_patient_avg, pp_mean)
all_patient_sd = rbind(all_patient_sd, pp_sd %>% select(-starts_with('ga'), -starts_with('dCD'), -starts_with('delivery'), -starts_with('GAPDH')))
#Add SEM column to Avg dataframe
all_patient_avg = melt(all_patient_avg %>% select(-starts_with('GAPDH')), id = c('ga', 'dCD', 'delivery'))
all_patient_sd = melt(all_patient_sd)
all_patient_avg$sd = all_patient_sd$value
return(all_patient_avg)
}
plot_panel = function(counts, panel, isGA, plot_name) {
toPlot = counts[counts$variable %in% panel,]
toPlot$variable = factor(toPlot$variable, levels = panel)
row_n = ceiling(length(panel)/7)
if (isGA) {
plot = ggplot(data = toPlot, aes(x=ga, y= value)) +
geom_line() +
geom_point(size = 2.5) +
geom_errorbar(aes(x=ga, ymin = value-sd, ymax = value+sd)) +
theme_pub() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = 'bold')) +
coord_cartesian(xlim = c(0, 45)) +
expand_limits(y=0) +
labs(x='Gestational age (weeks)', y=expression(over('Estimated transcript count', 'mL plasma'))) +
facet_wrap(~variable, nrow = row_n, scales = "free")
} else {
plot = ggplot(data = toPlot, aes(x=dCD, y= value)) +
geom_line() +
geom_point(size = 2.5) +
geom_errorbar(aes(x=dCD, ymin = value-sd, ymax = value+sd)) +
theme_pub() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = 'bold')) +
coord_cartesian(xlim = c(-40, 5)) +
expand_limits(y=0) +
labs(x='Time to delivery (weeks)', y=expression(over('Estimated transcript count', 'mL plasma'))) +
facet_wrap(~variable, nrow = row_n, scales = "free")
}
pdf(plot_name, useDingbats = FALSE, width = 20, height = 10)
print(plot)
dev.off()
}
| /fig_2/a/fig_2a_helper.R | no_license | jianglst/pregnancy_cfRNA | R | false | false | 4,218 | r | avg_wrapper = function(counts, interval, isGA) {
library(dplyr)
pp = counts %>% filter(dCD>0)
counts = smooth_intrapatient_avg(counts, interval, isGA)
counts = interpatient_avg(counts, pp, interval, isGA)
return(counts)
}
smooth_intrapatient_avg = function(counts, interval, isGA) {
counts = counts %>% filter(dCD <= 0) #Remove post-partum samples
#Take average for every interval weeks either for GA or dCD
if (isGA) {
weeks = seq(4, 44, by=interval)
} else {
weeks = seq(floor(min(counts$dCD)), 1, by = interval)
}
avg = data.frame()
#Take intrapatient average either for GA or dCD
for (idx in 2:length(weeks)) {
if (isGA) {
temp_avg = counts %>% group_by(patient) %>% filter(ga>weeks[idx-1] & ga<= weeks[idx]) %>% summarize_if(is.numeric, funs(mean(., na.rm=TRUE)))
} else {
temp_avg = counts %>% group_by(patient) %>% filter(dCD>weeks[idx-1] & dCD<= weeks[idx]) %>% summarize_if(is.numeric, funs(mean(., na.rm=TRUE)))
}
avg = rbind(avg, temp_avg)
}
return(avg)
}
interpatient_avg = function(counts, pp, interval, isGA) {
library(reshape2)
all_patient_avg = data.frame()
all_patient_sd = data.frame()
sem_n = c() #the n for every month's average to calculate SEM
#Address post-partum separately
pp_mean = pp %>% summarize_if(is.numeric, funs(mean(.,na.rm=TRUE)))
pp_sd = pp %>% summarize_if(is.numeric, funs(sd(.,na.rm=TRUE)))
pp_sd = pp_sd/sqrt(dim(pp)[1]) #sem
#Take average for every interval weeks either for GA or dCD
if (isGA) {
weeks = seq(4, 44, by=interval)
} else {
weeks = seq(floor(min(counts$dCD)), 1, by = interval)
}
for (idx in 2:length(weeks)) {
if (isGA) {
temp = counts %>% filter(ga>weeks[idx-1] & ga<= weeks[idx])
} else {
temp = counts %>% filter(dCD>weeks[idx-1] & dCD<= weeks[idx])
}
sem_n = c(sem_n, dim(temp)[1])
all_patient_avg = rbind(all_patient_avg, temp %>% summarize_if(is.numeric, funs(mean(., na.rm=TRUE))))
all_patient_sd = rbind(all_patient_sd, temp %>% summarize_if(is.numeric, funs(sd(., na.rm=TRUE))))
}
sem_n = 1/sqrt(sem_n)
pp_mean$ga = round(pp_mean$ga)
pp_mean$dCD = round(pp_mean$dCD)
all_patient_sd = sweep(all_patient_sd %>% select(-starts_with('ga'), -starts_with('dCD'), -starts_with('delivery')), 1, sem_n, '*')
all_patient_avg = rbind(all_patient_avg, pp_mean)
all_patient_sd = rbind(all_patient_sd, pp_sd %>% select(-starts_with('ga'), -starts_with('dCD'), -starts_with('delivery'), -starts_with('GAPDH')))
#Add SEM column to Avg dataframe
all_patient_avg = melt(all_patient_avg %>% select(-starts_with('GAPDH')), id = c('ga', 'dCD', 'delivery'))
all_patient_sd = melt(all_patient_sd)
all_patient_avg$sd = all_patient_sd$value
return(all_patient_avg)
}
plot_panel = function(counts, panel, isGA, plot_name) {
toPlot = counts[counts$variable %in% panel,]
toPlot$variable = factor(toPlot$variable, levels = panel)
row_n = ceiling(length(panel)/7)
if (isGA) {
plot = ggplot(data = toPlot, aes(x=ga, y= value)) +
geom_line() +
geom_point(size = 2.5) +
geom_errorbar(aes(x=ga, ymin = value-sd, ymax = value+sd)) +
theme_pub() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = 'bold')) +
coord_cartesian(xlim = c(0, 45)) +
expand_limits(y=0) +
labs(x='Gestational age (weeks)', y=expression(over('Estimated transcript count', 'mL plasma'))) +
facet_wrap(~variable, nrow = row_n, scales = "free")
} else {
plot = ggplot(data = toPlot, aes(x=dCD, y= value)) +
geom_line() +
geom_point(size = 2.5) +
geom_errorbar(aes(x=dCD, ymin = value-sd, ymax = value+sd)) +
theme_pub() +
theme(
strip.background = element_blank(),
strip.text = element_text(face = 'bold')) +
coord_cartesian(xlim = c(-40, 5)) +
expand_limits(y=0) +
labs(x='Time to delivery (weeks)', y=expression(over('Estimated transcript count', 'mL plasma'))) +
facet_wrap(~variable, nrow = row_n, scales = "free")
}
pdf(plot_name, useDingbats = FALSE, width = 20, height = 10)
print(plot)
dev.off()
}
|
## Put comments here that give an overall description of what your
## functions do
## Creates a matrix that will cache its inverse, a costly operation
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
list(
set = function (y) {
x <<- y
inv <<- NULL
},
get = function() x,
setInv = function(new_inv) inv <<- new_inv,
getInv = function() inv
)
}
# Find the inverse of the passed matrix and return it and cache it in the passed param
# note that x must be a value returned from 'makeCachedMatrix'
# The following function calculates the inverse of the special matrix
# created with 'makeCachedMatrix' However, it first checks to see if the
# inverse has already been calculated. If so, it gets the inverse from the
# cache and skips the computation. Otherwise, it calculates the inverse of
# the matrix and sets the value using the setInv function on x
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if (!is.null(inv)) {
return(inv)
}
inv <- solve(x$get())
x$setInv(inv)
inv
}
| /cachematrix.R | no_license | abrady/ProgrammingAssignment2 | R | false | false | 1,046 | r | ## Put comments here that give an overall description of what your
## functions do
## Creates a matrix that will cache its inverse, a costly operation
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
list(
set = function (y) {
x <<- y
inv <<- NULL
},
get = function() x,
setInv = function(new_inv) inv <<- new_inv,
getInv = function() inv
)
}
# Find the inverse of the passed matrix and return it and cache it in the passed param
# note that x must be a value returned from 'makeCachedMatrix'
# The following function calculates the inverse of the special matrix
# created with 'makeCachedMatrix' However, it first checks to see if the
# inverse has already been calculated. If so, it gets the inverse from the
# cache and skips the computation. Otherwise, it calculates the inverse of
# the matrix and sets the value using the setInv function on x
cacheSolve <- function(x, ...) {
inv <- x$getInv()
if (!is.null(inv)) {
return(inv)
}
inv <- solve(x$get())
x$setInv(inv)
inv
}
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include amplify_service.R
NULL
#' Creates a new Amplify app
#'
#' @description
#' Creates a new Amplify app.
#'
#' @usage
#' amplify_create_app(name, description, repository, platform,
#' iamServiceRoleArn, oauthToken, accessToken, environmentVariables,
#' enableBranchAutoBuild, enableBranchAutoDeletion, enableBasicAuth,
#' basicAuthCredentials, customRules, tags, buildSpec, customHeaders,
#' enableAutoBranchCreation, autoBranchCreationPatterns,
#' autoBranchCreationConfig)
#'
#' @param name [required] The name for an Amplify app.
#' @param description The description for an Amplify app.
#' @param repository The repository for an Amplify app.
#' @param platform The platform or framework for an Amplify app.
#' @param iamServiceRoleArn The AWS Identity and Access Management (IAM) service role for an Amplify
#' app.
#' @param oauthToken The OAuth token for a third-party source control system for an Amplify
#' app. The OAuth token is used to create a webhook and a read-only deploy
#' key. The OAuth token is not stored.
#' @param accessToken The personal access token for a third-party source control system for an
#' Amplify app. The personal access token is used to create a webhook and a
#' read-only deploy key. The token is not stored.
#' @param environmentVariables The environment variables map for an Amplify app.
#' @param enableBranchAutoBuild Enables the auto building of branches for an Amplify app.
#' @param enableBranchAutoDeletion Automatically disconnects a branch in the Amplify Console when you
#' delete a branch from your Git repository.
#' @param enableBasicAuth Enables basic authorization for an Amplify app. This will apply to all
#' branches that are part of this app.
#' @param basicAuthCredentials The credentials for basic authorization for an Amplify app.
#' @param customRules The custom rewrite and redirect rules for an Amplify app.
#' @param tags The tag for an Amplify app.
#' @param buildSpec The build specification (build spec) for an Amplify app.
#' @param customHeaders The custom HTTP headers for an Amplify app.
#' @param enableAutoBranchCreation Enables automated branch creation for an Amplify app.
#' @param autoBranchCreationPatterns The automated branch creation glob patterns for an Amplify app.
#' @param autoBranchCreationConfig The automated branch creation configuration for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$create_app(
#' name = "string",
#' description = "string",
#' repository = "string",
#' platform = "WEB",
#' iamServiceRoleArn = "string",
#' oauthToken = "string",
#' accessToken = "string",
#' environmentVariables = list(
#' "string"
#' ),
#' enableBranchAutoBuild = TRUE|FALSE,
#' enableBranchAutoDeletion = TRUE|FALSE,
#' enableBasicAuth = TRUE|FALSE,
#' basicAuthCredentials = "string",
#' customRules = list(
#' list(
#' source = "string",
#' target = "string",
#' status = "string",
#' condition = "string"
#' )
#' ),
#' tags = list(
#' "string"
#' ),
#' buildSpec = "string",
#' customHeaders = "string",
#' enableAutoBranchCreation = TRUE|FALSE,
#' autoBranchCreationPatterns = list(
#' "string"
#' ),
#' autoBranchCreationConfig = list(
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' framework = "string",
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' buildSpec = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_app
amplify_create_app <- function(name, description = NULL, repository = NULL, platform = NULL, iamServiceRoleArn = NULL, oauthToken = NULL, accessToken = NULL, environmentVariables = NULL, enableBranchAutoBuild = NULL, enableBranchAutoDeletion = NULL, enableBasicAuth = NULL, basicAuthCredentials = NULL, customRules = NULL, tags = NULL, buildSpec = NULL, customHeaders = NULL, enableAutoBranchCreation = NULL, autoBranchCreationPatterns = NULL, autoBranchCreationConfig = NULL) {
op <- new_operation(
name = "CreateApp",
http_method = "POST",
http_path = "/apps",
paginator = list()
)
input <- .amplify$create_app_input(name = name, description = description, repository = repository, platform = platform, iamServiceRoleArn = iamServiceRoleArn, oauthToken = oauthToken, accessToken = accessToken, environmentVariables = environmentVariables, enableBranchAutoBuild = enableBranchAutoBuild, enableBranchAutoDeletion = enableBranchAutoDeletion, enableBasicAuth = enableBasicAuth, basicAuthCredentials = basicAuthCredentials, customRules = customRules, tags = tags, buildSpec = buildSpec, customHeaders = customHeaders, enableAutoBranchCreation = enableAutoBranchCreation, autoBranchCreationPatterns = autoBranchCreationPatterns, autoBranchCreationConfig = autoBranchCreationConfig)
output <- .amplify$create_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_app <- amplify_create_app
#' Creates a new backend environment for an Amplify app
#'
#' @description
#' Creates a new backend environment for an Amplify app.
#'
#' @usage
#' amplify_create_backend_environment(appId, environmentName, stackName,
#' deploymentArtifacts)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param environmentName [required] The name for the backend environment.
#' @param stackName The AWS CloudFormation stack name of a backend environment.
#' @param deploymentArtifacts The name of deployment artifacts.
#'
#' @section Request syntax:
#' ```
#' svc$create_backend_environment(
#' appId = "string",
#' environmentName = "string",
#' stackName = "string",
#' deploymentArtifacts = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_backend_environment
amplify_create_backend_environment <- function(appId, environmentName, stackName = NULL, deploymentArtifacts = NULL) {
op <- new_operation(
name = "CreateBackendEnvironment",
http_method = "POST",
http_path = "/apps/{appId}/backendenvironments",
paginator = list()
)
input <- .amplify$create_backend_environment_input(appId = appId, environmentName = environmentName, stackName = stackName, deploymentArtifacts = deploymentArtifacts)
output <- .amplify$create_backend_environment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_backend_environment <- amplify_create_backend_environment
#' Creates a new branch for an Amplify app
#'
#' @description
#' Creates a new branch for an Amplify app.
#'
#' @usage
#' amplify_create_branch(appId, branchName, description, stage, framework,
#' enableNotification, enableAutoBuild, environmentVariables,
#' basicAuthCredentials, enableBasicAuth, enablePerformanceMode, tags,
#' buildSpec, ttl, displayName, enablePullRequestPreview,
#' pullRequestEnvironmentName, backendEnvironmentArn)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#' @param description The description for the branch.
#' @param stage Describes the current stage for the branch.
#' @param framework The framework for the branch.
#' @param enableNotification Enables notifications for the branch.
#' @param enableAutoBuild Enables auto building for the branch.
#' @param environmentVariables The environment variables for the branch.
#' @param basicAuthCredentials The basic authorization credentials for the branch.
#' @param enableBasicAuth Enables basic authorization for the branch.
#' @param enablePerformanceMode Enables performance mode for the branch.
#'
#' Performance mode optimizes for faster hosting performance by keeping
#' content cached at the edge for a longer interval. When performance mode
#' is enabled, hosting configuration or code changes can take up to 10
#' minutes to roll out.
#' @param tags The tag for the branch.
#' @param buildSpec The build specification (build spec) for the branch.
#' @param ttl The content Time To Live (TTL) for the website in seconds.
#' @param displayName The display name for a branch. This is used as the default domain
#' prefix.
#' @param enablePullRequestPreview Enables pull request previews for this branch.
#' @param pullRequestEnvironmentName The Amplify environment name for the pull request.
#' @param backendEnvironmentArn The Amazon Resource Name (ARN) for a backend environment that is part of
#' an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$create_branch(
#' appId = "string",
#' branchName = "string",
#' description = "string",
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' framework = "string",
#' enableNotification = TRUE|FALSE,
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' tags = list(
#' "string"
#' ),
#' buildSpec = "string",
#' ttl = "string",
#' displayName = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string",
#' backendEnvironmentArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_branch
amplify_create_branch <- function(appId, branchName, description = NULL, stage = NULL, framework = NULL, enableNotification = NULL, enableAutoBuild = NULL, environmentVariables = NULL, basicAuthCredentials = NULL, enableBasicAuth = NULL, enablePerformanceMode = NULL, tags = NULL, buildSpec = NULL, ttl = NULL, displayName = NULL, enablePullRequestPreview = NULL, pullRequestEnvironmentName = NULL, backendEnvironmentArn = NULL) {
op <- new_operation(
name = "CreateBranch",
http_method = "POST",
http_path = "/apps/{appId}/branches",
paginator = list()
)
input <- .amplify$create_branch_input(appId = appId, branchName = branchName, description = description, stage = stage, framework = framework, enableNotification = enableNotification, enableAutoBuild = enableAutoBuild, environmentVariables = environmentVariables, basicAuthCredentials = basicAuthCredentials, enableBasicAuth = enableBasicAuth, enablePerformanceMode = enablePerformanceMode, tags = tags, buildSpec = buildSpec, ttl = ttl, displayName = displayName, enablePullRequestPreview = enablePullRequestPreview, pullRequestEnvironmentName = pullRequestEnvironmentName, backendEnvironmentArn = backendEnvironmentArn)
output <- .amplify$create_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_branch <- amplify_create_branch
#' Creates a deployment for a manually deployed Amplify app
#'
#' @description
#' Creates a deployment for a manually deployed Amplify app. Manually
#' deployed apps are not connected to a repository.
#'
#' @usage
#' amplify_create_deployment(appId, branchName, fileMap)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param fileMap An optional file map that contains the file name as the key and the file
#' content md5 hash as the value. If this argument is provided, the service
#' will generate a unique upload URL per file. Otherwise, the service will
#' only generate a single upload URL for the zipped files.
#'
#' @section Request syntax:
#' ```
#' svc$create_deployment(
#' appId = "string",
#' branchName = "string",
#' fileMap = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_deployment
amplify_create_deployment <- function(appId, branchName, fileMap = NULL) {
op <- new_operation(
name = "CreateDeployment",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}/deployments",
paginator = list()
)
input <- .amplify$create_deployment_input(appId = appId, branchName = branchName, fileMap = fileMap)
output <- .amplify$create_deployment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_deployment <- amplify_create_deployment
#' Creates a new domain association for an Amplify app
#'
#' @description
#' Creates a new domain association for an Amplify app. This action
#' associates a custom domain with the Amplify app
#'
#' @usage
#' amplify_create_domain_association(appId, domainName,
#' enableAutoSubDomain, subDomainSettings, autoSubDomainCreationPatterns,
#' autoSubDomainIAMRole)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param domainName [required] The domain name for the domain association.
#' @param enableAutoSubDomain Enables the automated creation of subdomains for branches.
#' @param subDomainSettings [required] The setting for the subdomain.
#' @param autoSubDomainCreationPatterns Sets the branch patterns for automatic subdomain creation.
#' @param autoSubDomainIAMRole The required AWS Identity and Access Management (IAM) service role for
#' the Amazon Resource Name (ARN) for automatically creating subdomains.
#'
#' @section Request syntax:
#' ```
#' svc$create_domain_association(
#' appId = "string",
#' domainName = "string",
#' enableAutoSubDomain = TRUE|FALSE,
#' subDomainSettings = list(
#' list(
#' prefix = "string",
#' branchName = "string"
#' )
#' ),
#' autoSubDomainCreationPatterns = list(
#' "string"
#' ),
#' autoSubDomainIAMRole = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_domain_association
amplify_create_domain_association <- function(appId, domainName, enableAutoSubDomain = NULL, subDomainSettings, autoSubDomainCreationPatterns = NULL, autoSubDomainIAMRole = NULL) {
op <- new_operation(
name = "CreateDomainAssociation",
http_method = "POST",
http_path = "/apps/{appId}/domains",
paginator = list()
)
input <- .amplify$create_domain_association_input(appId = appId, domainName = domainName, enableAutoSubDomain = enableAutoSubDomain, subDomainSettings = subDomainSettings, autoSubDomainCreationPatterns = autoSubDomainCreationPatterns, autoSubDomainIAMRole = autoSubDomainIAMRole)
output <- .amplify$create_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_domain_association <- amplify_create_domain_association
#' Creates a new webhook on an Amplify app
#'
#' @description
#' Creates a new webhook on an Amplify app.
#'
#' @usage
#' amplify_create_webhook(appId, branchName, description)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for a branch that is part of an Amplify app.
#' @param description The description for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$create_webhook(
#' appId = "string",
#' branchName = "string",
#' description = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_webhook
amplify_create_webhook <- function(appId, branchName, description = NULL) {
op <- new_operation(
name = "CreateWebhook",
http_method = "POST",
http_path = "/apps/{appId}/webhooks",
paginator = list()
)
input <- .amplify$create_webhook_input(appId = appId, branchName = branchName, description = description)
output <- .amplify$create_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_webhook <- amplify_create_webhook
#' Deletes an existing Amplify app specified by an app ID
#'
#' @description
#' Deletes an existing Amplify app specified by an app ID.
#'
#' @usage
#' amplify_delete_app(appId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$delete_app(
#' appId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_app
amplify_delete_app <- function(appId) {
op <- new_operation(
name = "DeleteApp",
http_method = "DELETE",
http_path = "/apps/{appId}",
paginator = list()
)
input <- .amplify$delete_app_input(appId = appId)
output <- .amplify$delete_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_app <- amplify_delete_app
#' Deletes a backend environment for an Amplify app
#'
#' @description
#' Deletes a backend environment for an Amplify app.
#'
#' @usage
#' amplify_delete_backend_environment(appId, environmentName)
#'
#' @param appId [required] The unique ID of an Amplify app.
#' @param environmentName [required] The name of a backend environment of an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$delete_backend_environment(
#' appId = "string",
#' environmentName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_backend_environment
amplify_delete_backend_environment <- function(appId, environmentName) {
op <- new_operation(
name = "DeleteBackendEnvironment",
http_method = "DELETE",
http_path = "/apps/{appId}/backendenvironments/{environmentName}",
paginator = list()
)
input <- .amplify$delete_backend_environment_input(appId = appId, environmentName = environmentName)
output <- .amplify$delete_backend_environment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_backend_environment <- amplify_delete_backend_environment
#' Deletes a branch for an Amplify app
#'
#' @description
#' Deletes a branch for an Amplify app.
#'
#' @usage
#' amplify_delete_branch(appId, branchName)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#'
#' @section Request syntax:
#' ```
#' svc$delete_branch(
#' appId = "string",
#' branchName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_branch
amplify_delete_branch <- function(appId, branchName) {
op <- new_operation(
name = "DeleteBranch",
http_method = "DELETE",
http_path = "/apps/{appId}/branches/{branchName}",
paginator = list()
)
input <- .amplify$delete_branch_input(appId = appId, branchName = branchName)
output <- .amplify$delete_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_branch <- amplify_delete_branch
#' Deletes a domain association for an Amplify app
#'
#' @description
#' Deletes a domain association for an Amplify app.
#'
#' @usage
#' amplify_delete_domain_association(appId, domainName)
#'
#' @param appId [required] The unique id for an Amplify app.
#' @param domainName [required] The name of the domain.
#'
#' @section Request syntax:
#' ```
#' svc$delete_domain_association(
#' appId = "string",
#' domainName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_domain_association
amplify_delete_domain_association <- function(appId, domainName) {
op <- new_operation(
name = "DeleteDomainAssociation",
http_method = "DELETE",
http_path = "/apps/{appId}/domains/{domainName}",
paginator = list()
)
input <- .amplify$delete_domain_association_input(appId = appId, domainName = domainName)
output <- .amplify$delete_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_domain_association <- amplify_delete_domain_association
#' Deletes a job for a branch of an Amplify app
#'
#' @description
#' Deletes a job for a branch of an Amplify app.
#'
#' @usage
#' amplify_delete_job(appId, branchName, jobId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param jobId [required] The unique ID for the job.
#'
#' @section Request syntax:
#' ```
#' svc$delete_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_job
amplify_delete_job <- function(appId, branchName, jobId) {
op <- new_operation(
name = "DeleteJob",
http_method = "DELETE",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}",
paginator = list()
)
input <- .amplify$delete_job_input(appId = appId, branchName = branchName, jobId = jobId)
output <- .amplify$delete_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_job <- amplify_delete_job
#' Deletes a webhook
#'
#' @description
#' Deletes a webhook.
#'
#' @usage
#' amplify_delete_webhook(webhookId)
#'
#' @param webhookId [required] The unique ID for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$delete_webhook(
#' webhookId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_webhook
amplify_delete_webhook <- function(webhookId) {
op <- new_operation(
name = "DeleteWebhook",
http_method = "DELETE",
http_path = "/webhooks/{webhookId}",
paginator = list()
)
input <- .amplify$delete_webhook_input(webhookId = webhookId)
output <- .amplify$delete_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_webhook <- amplify_delete_webhook
#' Returns the website access logs for a specific time range using a
#' presigned URL
#'
#' @description
#' Returns the website access logs for a specific time range using a
#' presigned URL.
#'
#' @usage
#' amplify_generate_access_logs(startTime, endTime, domainName, appId)
#'
#' @param startTime The time at which the logs should start. The time range specified is
#' inclusive of the start time.
#' @param endTime The time at which the logs should end. The time range specified is
#' inclusive of the end time.
#' @param domainName [required] The name of the domain.
#' @param appId [required] The unique ID for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$generate_access_logs(
#' startTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' endTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' domainName = "string",
#' appId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_generate_access_logs
amplify_generate_access_logs <- function(startTime = NULL, endTime = NULL, domainName, appId) {
op <- new_operation(
name = "GenerateAccessLogs",
http_method = "POST",
http_path = "/apps/{appId}/accesslogs",
paginator = list()
)
input <- .amplify$generate_access_logs_input(startTime = startTime, endTime = endTime, domainName = domainName, appId = appId)
output <- .amplify$generate_access_logs_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$generate_access_logs <- amplify_generate_access_logs
#' Returns an existing Amplify app by appID
#'
#' @description
#' Returns an existing Amplify app by appID.
#'
#' @usage
#' amplify_get_app(appId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$get_app(
#' appId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_app
amplify_get_app <- function(appId) {
op <- new_operation(
name = "GetApp",
http_method = "GET",
http_path = "/apps/{appId}",
paginator = list()
)
input <- .amplify$get_app_input(appId = appId)
output <- .amplify$get_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_app <- amplify_get_app
#' Returns the artifact info that corresponds to an artifact id
#'
#' @description
#' Returns the artifact info that corresponds to an artifact id.
#'
#' @usage
#' amplify_get_artifact_url(artifactId)
#'
#' @param artifactId [required] The unique ID for an artifact.
#'
#' @section Request syntax:
#' ```
#' svc$get_artifact_url(
#' artifactId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_artifact_url
amplify_get_artifact_url <- function(artifactId) {
op <- new_operation(
name = "GetArtifactUrl",
http_method = "GET",
http_path = "/artifacts/{artifactId}",
paginator = list()
)
input <- .amplify$get_artifact_url_input(artifactId = artifactId)
output <- .amplify$get_artifact_url_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_artifact_url <- amplify_get_artifact_url
#' Returns a backend environment for an Amplify app
#'
#' @description
#' Returns a backend environment for an Amplify app.
#'
#' @usage
#' amplify_get_backend_environment(appId, environmentName)
#'
#' @param appId [required] The unique id for an Amplify app.
#' @param environmentName [required] The name for the backend environment.
#'
#' @section Request syntax:
#' ```
#' svc$get_backend_environment(
#' appId = "string",
#' environmentName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_backend_environment
amplify_get_backend_environment <- function(appId, environmentName) {
op <- new_operation(
name = "GetBackendEnvironment",
http_method = "GET",
http_path = "/apps/{appId}/backendenvironments/{environmentName}",
paginator = list()
)
input <- .amplify$get_backend_environment_input(appId = appId, environmentName = environmentName)
output <- .amplify$get_backend_environment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_backend_environment <- amplify_get_backend_environment
#' Returns a branch for an Amplify app
#'
#' @description
#' Returns a branch for an Amplify app.
#'
#' @usage
#' amplify_get_branch(appId, branchName)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#'
#' @section Request syntax:
#' ```
#' svc$get_branch(
#' appId = "string",
#' branchName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_branch
amplify_get_branch <- function(appId, branchName) {
op <- new_operation(
name = "GetBranch",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}",
paginator = list()
)
input <- .amplify$get_branch_input(appId = appId, branchName = branchName)
output <- .amplify$get_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_branch <- amplify_get_branch
#' Returns the domain information for an Amplify app
#'
#' @description
#' Returns the domain information for an Amplify app.
#'
#' @usage
#' amplify_get_domain_association(appId, domainName)
#'
#' @param appId [required] The unique id for an Amplify app.
#' @param domainName [required] The name of the domain.
#'
#' @section Request syntax:
#' ```
#' svc$get_domain_association(
#' appId = "string",
#' domainName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_domain_association
amplify_get_domain_association <- function(appId, domainName) {
op <- new_operation(
name = "GetDomainAssociation",
http_method = "GET",
http_path = "/apps/{appId}/domains/{domainName}",
paginator = list()
)
input <- .amplify$get_domain_association_input(appId = appId, domainName = domainName)
output <- .amplify$get_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_domain_association <- amplify_get_domain_association
#' Returns a job for a branch of an Amplify app
#'
#' @description
#' Returns a job for a branch of an Amplify app.
#'
#' @usage
#' amplify_get_job(appId, branchName, jobId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The branch name for the job.
#' @param jobId [required] The unique ID for the job.
#'
#' @section Request syntax:
#' ```
#' svc$get_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_job
amplify_get_job <- function(appId, branchName, jobId) {
op <- new_operation(
name = "GetJob",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}",
paginator = list()
)
input <- .amplify$get_job_input(appId = appId, branchName = branchName, jobId = jobId)
output <- .amplify$get_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_job <- amplify_get_job
#' Returns the webhook information that corresponds to a specified webhook
#' ID
#'
#' @description
#' Returns the webhook information that corresponds to a specified webhook
#' ID.
#'
#' @usage
#' amplify_get_webhook(webhookId)
#'
#' @param webhookId [required] The unique ID for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$get_webhook(
#' webhookId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_webhook
amplify_get_webhook <- function(webhookId) {
op <- new_operation(
name = "GetWebhook",
http_method = "GET",
http_path = "/webhooks/{webhookId}",
paginator = list()
)
input <- .amplify$get_webhook_input(webhookId = webhookId)
output <- .amplify$get_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_webhook <- amplify_get_webhook
#' Returns a list of the existing Amplify apps
#'
#' @description
#' Returns a list of the existing Amplify apps.
#'
#' @usage
#' amplify_list_apps(nextToken, maxResults)
#'
#' @param nextToken A pagination token. If non-null, the pagination token is returned in a
#' result. Pass its value in another request to retrieve more entries.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_apps(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_apps
amplify_list_apps <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListApps",
http_method = "GET",
http_path = "/apps",
paginator = list()
)
input <- .amplify$list_apps_input(nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_apps_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_apps <- amplify_list_apps
#' Returns a list of artifacts for a specified app, branch, and job
#'
#' @description
#' Returns a list of artifacts for a specified app, branch, and job.
#'
#' @usage
#' amplify_list_artifacts(appId, branchName, jobId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name of a branch that is part of an Amplify app.
#' @param jobId [required] The unique ID for a job.
#' @param nextToken A pagination token. Set to null to start listing artifacts from start.
#' If a non-null pagination token is returned in a result, pass its value
#' in here to list more artifacts.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_artifacts(
#' appId = "string",
#' branchName = "string",
#' jobId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_artifacts
amplify_list_artifacts <- function(appId, branchName, jobId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListArtifacts",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}/artifacts",
paginator = list()
)
input <- .amplify$list_artifacts_input(appId = appId, branchName = branchName, jobId = jobId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_artifacts_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_artifacts <- amplify_list_artifacts
#' Lists the backend environments for an Amplify app
#'
#' @description
#' Lists the backend environments for an Amplify app.
#'
#' @usage
#' amplify_list_backend_environments(appId, environmentName, nextToken,
#' maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param environmentName The name of the backend environment
#' @param nextToken A pagination token. Set to null to start listing backend environments
#' from the start. If a non-null pagination token is returned in a result,
#' pass its value in here to list more backend environments.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_backend_environments(
#' appId = "string",
#' environmentName = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_backend_environments
amplify_list_backend_environments <- function(appId, environmentName = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListBackendEnvironments",
http_method = "GET",
http_path = "/apps/{appId}/backendenvironments",
paginator = list()
)
input <- .amplify$list_backend_environments_input(appId = appId, environmentName = environmentName, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_backend_environments_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_backend_environments <- amplify_list_backend_environments
#' Lists the branches of an Amplify app
#'
#' @description
#' Lists the branches of an Amplify app.
#'
#' @usage
#' amplify_list_branches(appId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param nextToken A pagination token. Set to null to start listing branches from the
#' start. If a non-null pagination token is returned in a result, pass its
#' value in here to list more branches.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_branches(
#' appId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_branches
amplify_list_branches <- function(appId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListBranches",
http_method = "GET",
http_path = "/apps/{appId}/branches",
paginator = list()
)
input <- .amplify$list_branches_input(appId = appId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_branches_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_branches <- amplify_list_branches
#' Returns the domain associations for an Amplify app
#'
#' @description
#' Returns the domain associations for an Amplify app.
#'
#' @usage
#' amplify_list_domain_associations(appId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param nextToken A pagination token. Set to null to start listing apps from the start. If
#' non-null, a pagination token is returned in a result. Pass its value in
#' here to list more projects.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_domain_associations(
#' appId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_domain_associations
amplify_list_domain_associations <- function(appId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDomainAssociations",
http_method = "GET",
http_path = "/apps/{appId}/domains",
paginator = list()
)
input <- .amplify$list_domain_associations_input(appId = appId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_domain_associations_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_domain_associations <- amplify_list_domain_associations
#' Lists the jobs for a branch of an Amplify app
#'
#' @description
#' Lists the jobs for a branch of an Amplify app.
#'
#' @usage
#' amplify_list_jobs(appId, branchName, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for a branch.
#' @param nextToken A pagination token. Set to null to start listing steps from the start.
#' If a non-null pagination token is returned in a result, pass its value
#' in here to list more steps.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_jobs(
#' appId = "string",
#' branchName = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_jobs
amplify_list_jobs <- function(appId, branchName, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListJobs",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}/jobs",
paginator = list()
)
input <- .amplify$list_jobs_input(appId = appId, branchName = branchName, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_jobs_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_jobs <- amplify_list_jobs
#' Returns a list of tags for a specified Amazon Resource Name (ARN)
#'
#' @description
#' Returns a list of tags for a specified Amazon Resource Name (ARN).
#'
#' @usage
#' amplify_list_tags_for_resource(resourceArn)
#'
#' @param resourceArn [required] The Amazon Resource Name (ARN) to use to list tags.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' resourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_tags_for_resource
amplify_list_tags_for_resource <- function(resourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .amplify$list_tags_for_resource_input(resourceArn = resourceArn)
output <- .amplify$list_tags_for_resource_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_tags_for_resource <- amplify_list_tags_for_resource
#' Returns a list of webhooks for an Amplify app
#'
#' @description
#' Returns a list of webhooks for an Amplify app.
#'
#' @usage
#' amplify_list_webhooks(appId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param nextToken A pagination token. Set to null to start listing webhooks from the
#' start. If non-null,the pagination token is returned in a result. Pass
#' its value in here to list more webhooks.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_webhooks(
#' appId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_webhooks
amplify_list_webhooks <- function(appId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListWebhooks",
http_method = "GET",
http_path = "/apps/{appId}/webhooks",
paginator = list()
)
input <- .amplify$list_webhooks_input(appId = appId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_webhooks_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_webhooks <- amplify_list_webhooks
#' Starts a deployment for a manually deployed app
#'
#' @description
#' Starts a deployment for a manually deployed app. Manually deployed apps
#' are not connected to a repository.
#'
#' @usage
#' amplify_start_deployment(appId, branchName, jobId, sourceUrl)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param jobId The job ID for this deployment, generated by the create deployment
#' request.
#' @param sourceUrl The source URL for this deployment, used when calling start deployment
#' without create deployment. The source URL can be any HTTP GET URL that
#' is publicly accessible and downloads a single .zip file.
#'
#' @section Request syntax:
#' ```
#' svc$start_deployment(
#' appId = "string",
#' branchName = "string",
#' jobId = "string",
#' sourceUrl = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_start_deployment
amplify_start_deployment <- function(appId, branchName, jobId = NULL, sourceUrl = NULL) {
op <- new_operation(
name = "StartDeployment",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}/deployments/start",
paginator = list()
)
input <- .amplify$start_deployment_input(appId = appId, branchName = branchName, jobId = jobId, sourceUrl = sourceUrl)
output <- .amplify$start_deployment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$start_deployment <- amplify_start_deployment
#' Starts a new job for a branch of an Amplify app
#'
#' @description
#' Starts a new job for a branch of an Amplify app.
#'
#' @usage
#' amplify_start_job(appId, branchName, jobId, jobType, jobReason,
#' commitId, commitMessage, commitTime)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The branch name for the job.
#' @param jobId The unique ID for an existing job. This is required if the value of
#' `jobType` is `RETRY`.
#' @param jobType [required] Describes the type for the job. The job type `RELEASE` starts a new job
#' with the latest change from the specified branch. This value is
#' available only for apps that are connected to a repository. The job type
#' `RETRY` retries an existing job. If the job type value is `RETRY`, the
#' `jobId` is also required.
#' @param jobReason A descriptive reason for starting this job.
#' @param commitId The commit ID from a third-party repository provider for the job.
#' @param commitMessage The commit message from a third-party repository provider for the job.
#' @param commitTime The commit date and time for the job.
#'
#' @section Request syntax:
#' ```
#' svc$start_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string",
#' jobType = "RELEASE"|"RETRY"|"MANUAL"|"WEB_HOOK",
#' jobReason = "string",
#' commitId = "string",
#' commitMessage = "string",
#' commitTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_start_job
amplify_start_job <- function(appId, branchName, jobId = NULL, jobType, jobReason = NULL, commitId = NULL, commitMessage = NULL, commitTime = NULL) {
op <- new_operation(
name = "StartJob",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}/jobs",
paginator = list()
)
input <- .amplify$start_job_input(appId = appId, branchName = branchName, jobId = jobId, jobType = jobType, jobReason = jobReason, commitId = commitId, commitMessage = commitMessage, commitTime = commitTime)
output <- .amplify$start_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$start_job <- amplify_start_job
#' Stops a job that is in progress for a branch of an Amplify app
#'
#' @description
#' Stops a job that is in progress for a branch of an Amplify app.
#'
#' @usage
#' amplify_stop_job(appId, branchName, jobId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param jobId [required] The unique id for the job.
#'
#' @section Request syntax:
#' ```
#' svc$stop_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_stop_job
amplify_stop_job <- function(appId, branchName, jobId) {
op <- new_operation(
name = "StopJob",
http_method = "DELETE",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}/stop",
paginator = list()
)
input <- .amplify$stop_job_input(appId = appId, branchName = branchName, jobId = jobId)
output <- .amplify$stop_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$stop_job <- amplify_stop_job
#' Tags the resource with a tag key and value
#'
#' @description
#' Tags the resource with a tag key and value.
#'
#' @usage
#' amplify_tag_resource(resourceArn, tags)
#'
#' @param resourceArn [required] The Amazon Resource Name (ARN) to use to tag a resource.
#' @param tags [required] The tags used to tag the resource.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' resourceArn = "string",
#' tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_tag_resource
amplify_tag_resource <- function(resourceArn, tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .amplify$tag_resource_input(resourceArn = resourceArn, tags = tags)
output <- .amplify$tag_resource_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$tag_resource <- amplify_tag_resource
#' Untags a resource with a specified Amazon Resource Name (ARN)
#'
#' @description
#' Untags a resource with a specified Amazon Resource Name (ARN).
#'
#' @usage
#' amplify_untag_resource(resourceArn, tagKeys)
#'
#' @param resourceArn [required] The Amazon Resource Name (ARN) to use to untag a resource.
#' @param tagKeys [required] The tag keys to use to untag a resource.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' resourceArn = "string",
#' tagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_untag_resource
amplify_untag_resource <- function(resourceArn, tagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .amplify$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys)
output <- .amplify$untag_resource_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$untag_resource <- amplify_untag_resource
#' Updates an existing Amplify app
#'
#' @description
#' Updates an existing Amplify app.
#'
#' @usage
#' amplify_update_app(appId, name, description, platform,
#' iamServiceRoleArn, environmentVariables, enableBranchAutoBuild,
#' enableBranchAutoDeletion, enableBasicAuth, basicAuthCredentials,
#' customRules, buildSpec, customHeaders, enableAutoBranchCreation,
#' autoBranchCreationPatterns, autoBranchCreationConfig, repository,
#' oauthToken, accessToken)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param name The name for an Amplify app.
#' @param description The description for an Amplify app.
#' @param platform The platform for an Amplify app.
#' @param iamServiceRoleArn The AWS Identity and Access Management (IAM) service role for an Amplify
#' app.
#' @param environmentVariables The environment variables for an Amplify app.
#' @param enableBranchAutoBuild Enables branch auto-building for an Amplify app.
#' @param enableBranchAutoDeletion Automatically disconnects a branch in the Amplify Console when you
#' delete a branch from your Git repository.
#' @param enableBasicAuth Enables basic authorization for an Amplify app.
#' @param basicAuthCredentials The basic authorization credentials for an Amplify app.
#' @param customRules The custom redirect and rewrite rules for an Amplify app.
#' @param buildSpec The build specification (build spec) for an Amplify app.
#' @param customHeaders The custom HTTP headers for an Amplify app.
#' @param enableAutoBranchCreation Enables automated branch creation for an Amplify app.
#' @param autoBranchCreationPatterns Describes the automated branch creation glob patterns for an Amplify
#' app.
#' @param autoBranchCreationConfig The automated branch creation configuration for an Amplify app.
#' @param repository The name of the repository for an Amplify app
#' @param oauthToken The OAuth token for a third-party source control system for an Amplify
#' app. The token is used to create a webhook and a read-only deploy key.
#' The OAuth token is not stored.
#' @param accessToken The personal access token for a third-party source control system for an
#' Amplify app. The token is used to create webhook and a read-only deploy
#' key. The token is not stored.
#'
#' @section Request syntax:
#' ```
#' svc$update_app(
#' appId = "string",
#' name = "string",
#' description = "string",
#' platform = "WEB",
#' iamServiceRoleArn = "string",
#' environmentVariables = list(
#' "string"
#' ),
#' enableBranchAutoBuild = TRUE|FALSE,
#' enableBranchAutoDeletion = TRUE|FALSE,
#' enableBasicAuth = TRUE|FALSE,
#' basicAuthCredentials = "string",
#' customRules = list(
#' list(
#' source = "string",
#' target = "string",
#' status = "string",
#' condition = "string"
#' )
#' ),
#' buildSpec = "string",
#' customHeaders = "string",
#' enableAutoBranchCreation = TRUE|FALSE,
#' autoBranchCreationPatterns = list(
#' "string"
#' ),
#' autoBranchCreationConfig = list(
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' framework = "string",
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' buildSpec = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string"
#' ),
#' repository = "string",
#' oauthToken = "string",
#' accessToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_app
amplify_update_app <- function(appId, name = NULL, description = NULL, platform = NULL, iamServiceRoleArn = NULL, environmentVariables = NULL, enableBranchAutoBuild = NULL, enableBranchAutoDeletion = NULL, enableBasicAuth = NULL, basicAuthCredentials = NULL, customRules = NULL, buildSpec = NULL, customHeaders = NULL, enableAutoBranchCreation = NULL, autoBranchCreationPatterns = NULL, autoBranchCreationConfig = NULL, repository = NULL, oauthToken = NULL, accessToken = NULL) {
op <- new_operation(
name = "UpdateApp",
http_method = "POST",
http_path = "/apps/{appId}",
paginator = list()
)
input <- .amplify$update_app_input(appId = appId, name = name, description = description, platform = platform, iamServiceRoleArn = iamServiceRoleArn, environmentVariables = environmentVariables, enableBranchAutoBuild = enableBranchAutoBuild, enableBranchAutoDeletion = enableBranchAutoDeletion, enableBasicAuth = enableBasicAuth, basicAuthCredentials = basicAuthCredentials, customRules = customRules, buildSpec = buildSpec, customHeaders = customHeaders, enableAutoBranchCreation = enableAutoBranchCreation, autoBranchCreationPatterns = autoBranchCreationPatterns, autoBranchCreationConfig = autoBranchCreationConfig, repository = repository, oauthToken = oauthToken, accessToken = accessToken)
output <- .amplify$update_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_app <- amplify_update_app
#' Updates a branch for an Amplify app
#'
#' @description
#' Updates a branch for an Amplify app.
#'
#' @usage
#' amplify_update_branch(appId, branchName, description, framework, stage,
#' enableNotification, enableAutoBuild, environmentVariables,
#' basicAuthCredentials, enableBasicAuth, enablePerformanceMode, buildSpec,
#' ttl, displayName, enablePullRequestPreview, pullRequestEnvironmentName,
#' backendEnvironmentArn)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#' @param description The description for the branch.
#' @param framework The framework for the branch.
#' @param stage Describes the current stage for the branch.
#' @param enableNotification Enables notifications for the branch.
#' @param enableAutoBuild Enables auto building for the branch.
#' @param environmentVariables The environment variables for the branch.
#' @param basicAuthCredentials The basic authorization credentials for the branch.
#' @param enableBasicAuth Enables basic authorization for the branch.
#' @param enablePerformanceMode Enables performance mode for the branch.
#'
#' Performance mode optimizes for faster hosting performance by keeping
#' content cached at the edge for a longer interval. When performance mode
#' is enabled, hosting configuration or code changes can take up to 10
#' minutes to roll out.
#' @param buildSpec The build specification (build spec) for the branch.
#' @param ttl The content Time to Live (TTL) for the website in seconds.
#' @param displayName The display name for a branch. This is used as the default domain
#' prefix.
#' @param enablePullRequestPreview Enables pull request previews for this branch.
#' @param pullRequestEnvironmentName The Amplify environment name for the pull request.
#' @param backendEnvironmentArn The Amazon Resource Name (ARN) for a backend environment that is part of
#' an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$update_branch(
#' appId = "string",
#' branchName = "string",
#' description = "string",
#' framework = "string",
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' enableNotification = TRUE|FALSE,
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' buildSpec = "string",
#' ttl = "string",
#' displayName = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string",
#' backendEnvironmentArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_branch
amplify_update_branch <- function(appId, branchName, description = NULL, framework = NULL, stage = NULL, enableNotification = NULL, enableAutoBuild = NULL, environmentVariables = NULL, basicAuthCredentials = NULL, enableBasicAuth = NULL, enablePerformanceMode = NULL, buildSpec = NULL, ttl = NULL, displayName = NULL, enablePullRequestPreview = NULL, pullRequestEnvironmentName = NULL, backendEnvironmentArn = NULL) {
op <- new_operation(
name = "UpdateBranch",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}",
paginator = list()
)
input <- .amplify$update_branch_input(appId = appId, branchName = branchName, description = description, framework = framework, stage = stage, enableNotification = enableNotification, enableAutoBuild = enableAutoBuild, environmentVariables = environmentVariables, basicAuthCredentials = basicAuthCredentials, enableBasicAuth = enableBasicAuth, enablePerformanceMode = enablePerformanceMode, buildSpec = buildSpec, ttl = ttl, displayName = displayName, enablePullRequestPreview = enablePullRequestPreview, pullRequestEnvironmentName = pullRequestEnvironmentName, backendEnvironmentArn = backendEnvironmentArn)
output <- .amplify$update_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_branch <- amplify_update_branch
#' Creates a new domain association for an Amplify app
#'
#' @description
#' Creates a new domain association for an Amplify app.
#'
#' @usage
#' amplify_update_domain_association(appId, domainName,
#' enableAutoSubDomain, subDomainSettings, autoSubDomainCreationPatterns,
#' autoSubDomainIAMRole)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param domainName [required] The name of the domain.
#' @param enableAutoSubDomain Enables the automated creation of subdomains for branches.
#' @param subDomainSettings [required] Describes the settings for the subdomain.
#' @param autoSubDomainCreationPatterns Sets the branch patterns for automatic subdomain creation.
#' @param autoSubDomainIAMRole The required AWS Identity and Access Management (IAM) service role for
#' the Amazon Resource Name (ARN) for automatically creating subdomains.
#'
#' @section Request syntax:
#' ```
#' svc$update_domain_association(
#' appId = "string",
#' domainName = "string",
#' enableAutoSubDomain = TRUE|FALSE,
#' subDomainSettings = list(
#' list(
#' prefix = "string",
#' branchName = "string"
#' )
#' ),
#' autoSubDomainCreationPatterns = list(
#' "string"
#' ),
#' autoSubDomainIAMRole = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_domain_association
amplify_update_domain_association <- function(appId, domainName, enableAutoSubDomain = NULL, subDomainSettings, autoSubDomainCreationPatterns = NULL, autoSubDomainIAMRole = NULL) {
op <- new_operation(
name = "UpdateDomainAssociation",
http_method = "POST",
http_path = "/apps/{appId}/domains/{domainName}",
paginator = list()
)
input <- .amplify$update_domain_association_input(appId = appId, domainName = domainName, enableAutoSubDomain = enableAutoSubDomain, subDomainSettings = subDomainSettings, autoSubDomainCreationPatterns = autoSubDomainCreationPatterns, autoSubDomainIAMRole = autoSubDomainIAMRole)
output <- .amplify$update_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_domain_association <- amplify_update_domain_association
#' Updates a webhook
#'
#' @description
#' Updates a webhook.
#'
#' @usage
#' amplify_update_webhook(webhookId, branchName, description)
#'
#' @param webhookId [required] The unique ID for a webhook.
#' @param branchName The name for a branch that is part of an Amplify app.
#' @param description The description for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$update_webhook(
#' webhookId = "string",
#' branchName = "string",
#' description = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_webhook
amplify_update_webhook <- function(webhookId, branchName = NULL, description = NULL) {
op <- new_operation(
name = "UpdateWebhook",
http_method = "POST",
http_path = "/webhooks/{webhookId}",
paginator = list()
)
input <- .amplify$update_webhook_input(webhookId = webhookId, branchName = branchName, description = description)
output <- .amplify$update_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_webhook <- amplify_update_webhook
| /cran/paws.mobile/R/amplify_operations.R | permissive | sanchezvivi/paws | R | false | false | 63,203 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include amplify_service.R
NULL
#' Creates a new Amplify app
#'
#' @description
#' Creates a new Amplify app.
#'
#' @usage
#' amplify_create_app(name, description, repository, platform,
#' iamServiceRoleArn, oauthToken, accessToken, environmentVariables,
#' enableBranchAutoBuild, enableBranchAutoDeletion, enableBasicAuth,
#' basicAuthCredentials, customRules, tags, buildSpec, customHeaders,
#' enableAutoBranchCreation, autoBranchCreationPatterns,
#' autoBranchCreationConfig)
#'
#' @param name [required] The name for an Amplify app.
#' @param description The description for an Amplify app.
#' @param repository The repository for an Amplify app.
#' @param platform The platform or framework for an Amplify app.
#' @param iamServiceRoleArn The AWS Identity and Access Management (IAM) service role for an Amplify
#' app.
#' @param oauthToken The OAuth token for a third-party source control system for an Amplify
#' app. The OAuth token is used to create a webhook and a read-only deploy
#' key. The OAuth token is not stored.
#' @param accessToken The personal access token for a third-party source control system for an
#' Amplify app. The personal access token is used to create a webhook and a
#' read-only deploy key. The token is not stored.
#' @param environmentVariables The environment variables map for an Amplify app.
#' @param enableBranchAutoBuild Enables the auto building of branches for an Amplify app.
#' @param enableBranchAutoDeletion Automatically disconnects a branch in the Amplify Console when you
#' delete a branch from your Git repository.
#' @param enableBasicAuth Enables basic authorization for an Amplify app. This will apply to all
#' branches that are part of this app.
#' @param basicAuthCredentials The credentials for basic authorization for an Amplify app.
#' @param customRules The custom rewrite and redirect rules for an Amplify app.
#' @param tags The tag for an Amplify app.
#' @param buildSpec The build specification (build spec) for an Amplify app.
#' @param customHeaders The custom HTTP headers for an Amplify app.
#' @param enableAutoBranchCreation Enables automated branch creation for an Amplify app.
#' @param autoBranchCreationPatterns The automated branch creation glob patterns for an Amplify app.
#' @param autoBranchCreationConfig The automated branch creation configuration for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$create_app(
#' name = "string",
#' description = "string",
#' repository = "string",
#' platform = "WEB",
#' iamServiceRoleArn = "string",
#' oauthToken = "string",
#' accessToken = "string",
#' environmentVariables = list(
#' "string"
#' ),
#' enableBranchAutoBuild = TRUE|FALSE,
#' enableBranchAutoDeletion = TRUE|FALSE,
#' enableBasicAuth = TRUE|FALSE,
#' basicAuthCredentials = "string",
#' customRules = list(
#' list(
#' source = "string",
#' target = "string",
#' status = "string",
#' condition = "string"
#' )
#' ),
#' tags = list(
#' "string"
#' ),
#' buildSpec = "string",
#' customHeaders = "string",
#' enableAutoBranchCreation = TRUE|FALSE,
#' autoBranchCreationPatterns = list(
#' "string"
#' ),
#' autoBranchCreationConfig = list(
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' framework = "string",
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' buildSpec = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_app
amplify_create_app <- function(name, description = NULL, repository = NULL, platform = NULL, iamServiceRoleArn = NULL, oauthToken = NULL, accessToken = NULL, environmentVariables = NULL, enableBranchAutoBuild = NULL, enableBranchAutoDeletion = NULL, enableBasicAuth = NULL, basicAuthCredentials = NULL, customRules = NULL, tags = NULL, buildSpec = NULL, customHeaders = NULL, enableAutoBranchCreation = NULL, autoBranchCreationPatterns = NULL, autoBranchCreationConfig = NULL) {
op <- new_operation(
name = "CreateApp",
http_method = "POST",
http_path = "/apps",
paginator = list()
)
input <- .amplify$create_app_input(name = name, description = description, repository = repository, platform = platform, iamServiceRoleArn = iamServiceRoleArn, oauthToken = oauthToken, accessToken = accessToken, environmentVariables = environmentVariables, enableBranchAutoBuild = enableBranchAutoBuild, enableBranchAutoDeletion = enableBranchAutoDeletion, enableBasicAuth = enableBasicAuth, basicAuthCredentials = basicAuthCredentials, customRules = customRules, tags = tags, buildSpec = buildSpec, customHeaders = customHeaders, enableAutoBranchCreation = enableAutoBranchCreation, autoBranchCreationPatterns = autoBranchCreationPatterns, autoBranchCreationConfig = autoBranchCreationConfig)
output <- .amplify$create_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_app <- amplify_create_app
#' Creates a new backend environment for an Amplify app
#'
#' @description
#' Creates a new backend environment for an Amplify app.
#'
#' @usage
#' amplify_create_backend_environment(appId, environmentName, stackName,
#' deploymentArtifacts)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param environmentName [required] The name for the backend environment.
#' @param stackName The AWS CloudFormation stack name of a backend environment.
#' @param deploymentArtifacts The name of deployment artifacts.
#'
#' @section Request syntax:
#' ```
#' svc$create_backend_environment(
#' appId = "string",
#' environmentName = "string",
#' stackName = "string",
#' deploymentArtifacts = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_backend_environment
amplify_create_backend_environment <- function(appId, environmentName, stackName = NULL, deploymentArtifacts = NULL) {
op <- new_operation(
name = "CreateBackendEnvironment",
http_method = "POST",
http_path = "/apps/{appId}/backendenvironments",
paginator = list()
)
input <- .amplify$create_backend_environment_input(appId = appId, environmentName = environmentName, stackName = stackName, deploymentArtifacts = deploymentArtifacts)
output <- .amplify$create_backend_environment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_backend_environment <- amplify_create_backend_environment
#' Creates a new branch for an Amplify app
#'
#' @description
#' Creates a new branch for an Amplify app.
#'
#' @usage
#' amplify_create_branch(appId, branchName, description, stage, framework,
#' enableNotification, enableAutoBuild, environmentVariables,
#' basicAuthCredentials, enableBasicAuth, enablePerformanceMode, tags,
#' buildSpec, ttl, displayName, enablePullRequestPreview,
#' pullRequestEnvironmentName, backendEnvironmentArn)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#' @param description The description for the branch.
#' @param stage Describes the current stage for the branch.
#' @param framework The framework for the branch.
#' @param enableNotification Enables notifications for the branch.
#' @param enableAutoBuild Enables auto building for the branch.
#' @param environmentVariables The environment variables for the branch.
#' @param basicAuthCredentials The basic authorization credentials for the branch.
#' @param enableBasicAuth Enables basic authorization for the branch.
#' @param enablePerformanceMode Enables performance mode for the branch.
#'
#' Performance mode optimizes for faster hosting performance by keeping
#' content cached at the edge for a longer interval. When performance mode
#' is enabled, hosting configuration or code changes can take up to 10
#' minutes to roll out.
#' @param tags The tag for the branch.
#' @param buildSpec The build specification (build spec) for the branch.
#' @param ttl The content Time To Live (TTL) for the website in seconds.
#' @param displayName The display name for a branch. This is used as the default domain
#' prefix.
#' @param enablePullRequestPreview Enables pull request previews for this branch.
#' @param pullRequestEnvironmentName The Amplify environment name for the pull request.
#' @param backendEnvironmentArn The Amazon Resource Name (ARN) for a backend environment that is part of
#' an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$create_branch(
#' appId = "string",
#' branchName = "string",
#' description = "string",
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' framework = "string",
#' enableNotification = TRUE|FALSE,
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' tags = list(
#' "string"
#' ),
#' buildSpec = "string",
#' ttl = "string",
#' displayName = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string",
#' backendEnvironmentArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_branch
amplify_create_branch <- function(appId, branchName, description = NULL, stage = NULL, framework = NULL, enableNotification = NULL, enableAutoBuild = NULL, environmentVariables = NULL, basicAuthCredentials = NULL, enableBasicAuth = NULL, enablePerformanceMode = NULL, tags = NULL, buildSpec = NULL, ttl = NULL, displayName = NULL, enablePullRequestPreview = NULL, pullRequestEnvironmentName = NULL, backendEnvironmentArn = NULL) {
op <- new_operation(
name = "CreateBranch",
http_method = "POST",
http_path = "/apps/{appId}/branches",
paginator = list()
)
input <- .amplify$create_branch_input(appId = appId, branchName = branchName, description = description, stage = stage, framework = framework, enableNotification = enableNotification, enableAutoBuild = enableAutoBuild, environmentVariables = environmentVariables, basicAuthCredentials = basicAuthCredentials, enableBasicAuth = enableBasicAuth, enablePerformanceMode = enablePerformanceMode, tags = tags, buildSpec = buildSpec, ttl = ttl, displayName = displayName, enablePullRequestPreview = enablePullRequestPreview, pullRequestEnvironmentName = pullRequestEnvironmentName, backendEnvironmentArn = backendEnvironmentArn)
output <- .amplify$create_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_branch <- amplify_create_branch
#' Creates a deployment for a manually deployed Amplify app
#'
#' @description
#' Creates a deployment for a manually deployed Amplify app. Manually
#' deployed apps are not connected to a repository.
#'
#' @usage
#' amplify_create_deployment(appId, branchName, fileMap)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param fileMap An optional file map that contains the file name as the key and the file
#' content md5 hash as the value. If this argument is provided, the service
#' will generate a unique upload URL per file. Otherwise, the service will
#' only generate a single upload URL for the zipped files.
#'
#' @section Request syntax:
#' ```
#' svc$create_deployment(
#' appId = "string",
#' branchName = "string",
#' fileMap = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_deployment
amplify_create_deployment <- function(appId, branchName, fileMap = NULL) {
op <- new_operation(
name = "CreateDeployment",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}/deployments",
paginator = list()
)
input <- .amplify$create_deployment_input(appId = appId, branchName = branchName, fileMap = fileMap)
output <- .amplify$create_deployment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_deployment <- amplify_create_deployment
#' Creates a new domain association for an Amplify app
#'
#' @description
#' Creates a new domain association for an Amplify app. This action
#' associates a custom domain with the Amplify app
#'
#' @usage
#' amplify_create_domain_association(appId, domainName,
#' enableAutoSubDomain, subDomainSettings, autoSubDomainCreationPatterns,
#' autoSubDomainIAMRole)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param domainName [required] The domain name for the domain association.
#' @param enableAutoSubDomain Enables the automated creation of subdomains for branches.
#' @param subDomainSettings [required] The setting for the subdomain.
#' @param autoSubDomainCreationPatterns Sets the branch patterns for automatic subdomain creation.
#' @param autoSubDomainIAMRole The required AWS Identity and Access Management (IAM) service role for
#' the Amazon Resource Name (ARN) for automatically creating subdomains.
#'
#' @section Request syntax:
#' ```
#' svc$create_domain_association(
#' appId = "string",
#' domainName = "string",
#' enableAutoSubDomain = TRUE|FALSE,
#' subDomainSettings = list(
#' list(
#' prefix = "string",
#' branchName = "string"
#' )
#' ),
#' autoSubDomainCreationPatterns = list(
#' "string"
#' ),
#' autoSubDomainIAMRole = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_domain_association
amplify_create_domain_association <- function(appId, domainName, enableAutoSubDomain = NULL, subDomainSettings, autoSubDomainCreationPatterns = NULL, autoSubDomainIAMRole = NULL) {
op <- new_operation(
name = "CreateDomainAssociation",
http_method = "POST",
http_path = "/apps/{appId}/domains",
paginator = list()
)
input <- .amplify$create_domain_association_input(appId = appId, domainName = domainName, enableAutoSubDomain = enableAutoSubDomain, subDomainSettings = subDomainSettings, autoSubDomainCreationPatterns = autoSubDomainCreationPatterns, autoSubDomainIAMRole = autoSubDomainIAMRole)
output <- .amplify$create_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_domain_association <- amplify_create_domain_association
#' Creates a new webhook on an Amplify app
#'
#' @description
#' Creates a new webhook on an Amplify app.
#'
#' @usage
#' amplify_create_webhook(appId, branchName, description)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for a branch that is part of an Amplify app.
#' @param description The description for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$create_webhook(
#' appId = "string",
#' branchName = "string",
#' description = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_create_webhook
amplify_create_webhook <- function(appId, branchName, description = NULL) {
op <- new_operation(
name = "CreateWebhook",
http_method = "POST",
http_path = "/apps/{appId}/webhooks",
paginator = list()
)
input <- .amplify$create_webhook_input(appId = appId, branchName = branchName, description = description)
output <- .amplify$create_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$create_webhook <- amplify_create_webhook
#' Deletes an existing Amplify app specified by an app ID
#'
#' @description
#' Deletes an existing Amplify app specified by an app ID.
#'
#' @usage
#' amplify_delete_app(appId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$delete_app(
#' appId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_app
amplify_delete_app <- function(appId) {
op <- new_operation(
name = "DeleteApp",
http_method = "DELETE",
http_path = "/apps/{appId}",
paginator = list()
)
input <- .amplify$delete_app_input(appId = appId)
output <- .amplify$delete_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_app <- amplify_delete_app
#' Deletes a backend environment for an Amplify app
#'
#' @description
#' Deletes a backend environment for an Amplify app.
#'
#' @usage
#' amplify_delete_backend_environment(appId, environmentName)
#'
#' @param appId [required] The unique ID of an Amplify app.
#' @param environmentName [required] The name of a backend environment of an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$delete_backend_environment(
#' appId = "string",
#' environmentName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_backend_environment
amplify_delete_backend_environment <- function(appId, environmentName) {
op <- new_operation(
name = "DeleteBackendEnvironment",
http_method = "DELETE",
http_path = "/apps/{appId}/backendenvironments/{environmentName}",
paginator = list()
)
input <- .amplify$delete_backend_environment_input(appId = appId, environmentName = environmentName)
output <- .amplify$delete_backend_environment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_backend_environment <- amplify_delete_backend_environment
#' Deletes a branch for an Amplify app
#'
#' @description
#' Deletes a branch for an Amplify app.
#'
#' @usage
#' amplify_delete_branch(appId, branchName)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#'
#' @section Request syntax:
#' ```
#' svc$delete_branch(
#' appId = "string",
#' branchName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_branch
amplify_delete_branch <- function(appId, branchName) {
op <- new_operation(
name = "DeleteBranch",
http_method = "DELETE",
http_path = "/apps/{appId}/branches/{branchName}",
paginator = list()
)
input <- .amplify$delete_branch_input(appId = appId, branchName = branchName)
output <- .amplify$delete_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_branch <- amplify_delete_branch
#' Deletes a domain association for an Amplify app
#'
#' @description
#' Deletes a domain association for an Amplify app.
#'
#' @usage
#' amplify_delete_domain_association(appId, domainName)
#'
#' @param appId [required] The unique id for an Amplify app.
#' @param domainName [required] The name of the domain.
#'
#' @section Request syntax:
#' ```
#' svc$delete_domain_association(
#' appId = "string",
#' domainName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_domain_association
amplify_delete_domain_association <- function(appId, domainName) {
op <- new_operation(
name = "DeleteDomainAssociation",
http_method = "DELETE",
http_path = "/apps/{appId}/domains/{domainName}",
paginator = list()
)
input <- .amplify$delete_domain_association_input(appId = appId, domainName = domainName)
output <- .amplify$delete_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_domain_association <- amplify_delete_domain_association
#' Deletes a job for a branch of an Amplify app
#'
#' @description
#' Deletes a job for a branch of an Amplify app.
#'
#' @usage
#' amplify_delete_job(appId, branchName, jobId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param jobId [required] The unique ID for the job.
#'
#' @section Request syntax:
#' ```
#' svc$delete_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_job
amplify_delete_job <- function(appId, branchName, jobId) {
op <- new_operation(
name = "DeleteJob",
http_method = "DELETE",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}",
paginator = list()
)
input <- .amplify$delete_job_input(appId = appId, branchName = branchName, jobId = jobId)
output <- .amplify$delete_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_job <- amplify_delete_job
#' Deletes a webhook
#'
#' @description
#' Deletes a webhook.
#'
#' @usage
#' amplify_delete_webhook(webhookId)
#'
#' @param webhookId [required] The unique ID for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$delete_webhook(
#' webhookId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_delete_webhook
amplify_delete_webhook <- function(webhookId) {
op <- new_operation(
name = "DeleteWebhook",
http_method = "DELETE",
http_path = "/webhooks/{webhookId}",
paginator = list()
)
input <- .amplify$delete_webhook_input(webhookId = webhookId)
output <- .amplify$delete_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$delete_webhook <- amplify_delete_webhook
#' Returns the website access logs for a specific time range using a
#' presigned URL
#'
#' @description
#' Returns the website access logs for a specific time range using a
#' presigned URL.
#'
#' @usage
#' amplify_generate_access_logs(startTime, endTime, domainName, appId)
#'
#' @param startTime The time at which the logs should start. The time range specified is
#' inclusive of the start time.
#' @param endTime The time at which the logs should end. The time range specified is
#' inclusive of the end time.
#' @param domainName [required] The name of the domain.
#' @param appId [required] The unique ID for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$generate_access_logs(
#' startTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' endTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' domainName = "string",
#' appId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_generate_access_logs
amplify_generate_access_logs <- function(startTime = NULL, endTime = NULL, domainName, appId) {
op <- new_operation(
name = "GenerateAccessLogs",
http_method = "POST",
http_path = "/apps/{appId}/accesslogs",
paginator = list()
)
input <- .amplify$generate_access_logs_input(startTime = startTime, endTime = endTime, domainName = domainName, appId = appId)
output <- .amplify$generate_access_logs_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$generate_access_logs <- amplify_generate_access_logs
#' Returns an existing Amplify app by appID
#'
#' @description
#' Returns an existing Amplify app by appID.
#'
#' @usage
#' amplify_get_app(appId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$get_app(
#' appId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_app
amplify_get_app <- function(appId) {
op <- new_operation(
name = "GetApp",
http_method = "GET",
http_path = "/apps/{appId}",
paginator = list()
)
input <- .amplify$get_app_input(appId = appId)
output <- .amplify$get_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_app <- amplify_get_app
#' Returns the artifact info that corresponds to an artifact id
#'
#' @description
#' Returns the artifact info that corresponds to an artifact id.
#'
#' @usage
#' amplify_get_artifact_url(artifactId)
#'
#' @param artifactId [required] The unique ID for an artifact.
#'
#' @section Request syntax:
#' ```
#' svc$get_artifact_url(
#' artifactId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_artifact_url
amplify_get_artifact_url <- function(artifactId) {
op <- new_operation(
name = "GetArtifactUrl",
http_method = "GET",
http_path = "/artifacts/{artifactId}",
paginator = list()
)
input <- .amplify$get_artifact_url_input(artifactId = artifactId)
output <- .amplify$get_artifact_url_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_artifact_url <- amplify_get_artifact_url
#' Returns a backend environment for an Amplify app
#'
#' @description
#' Returns a backend environment for an Amplify app.
#'
#' @usage
#' amplify_get_backend_environment(appId, environmentName)
#'
#' @param appId [required] The unique id for an Amplify app.
#' @param environmentName [required] The name for the backend environment.
#'
#' @section Request syntax:
#' ```
#' svc$get_backend_environment(
#' appId = "string",
#' environmentName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_backend_environment
amplify_get_backend_environment <- function(appId, environmentName) {
op <- new_operation(
name = "GetBackendEnvironment",
http_method = "GET",
http_path = "/apps/{appId}/backendenvironments/{environmentName}",
paginator = list()
)
input <- .amplify$get_backend_environment_input(appId = appId, environmentName = environmentName)
output <- .amplify$get_backend_environment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_backend_environment <- amplify_get_backend_environment
#' Returns a branch for an Amplify app
#'
#' @description
#' Returns a branch for an Amplify app.
#'
#' @usage
#' amplify_get_branch(appId, branchName)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#'
#' @section Request syntax:
#' ```
#' svc$get_branch(
#' appId = "string",
#' branchName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_branch
amplify_get_branch <- function(appId, branchName) {
op <- new_operation(
name = "GetBranch",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}",
paginator = list()
)
input <- .amplify$get_branch_input(appId = appId, branchName = branchName)
output <- .amplify$get_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_branch <- amplify_get_branch
#' Returns the domain information for an Amplify app
#'
#' @description
#' Returns the domain information for an Amplify app.
#'
#' @usage
#' amplify_get_domain_association(appId, domainName)
#'
#' @param appId [required] The unique id for an Amplify app.
#' @param domainName [required] The name of the domain.
#'
#' @section Request syntax:
#' ```
#' svc$get_domain_association(
#' appId = "string",
#' domainName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_domain_association
amplify_get_domain_association <- function(appId, domainName) {
op <- new_operation(
name = "GetDomainAssociation",
http_method = "GET",
http_path = "/apps/{appId}/domains/{domainName}",
paginator = list()
)
input <- .amplify$get_domain_association_input(appId = appId, domainName = domainName)
output <- .amplify$get_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_domain_association <- amplify_get_domain_association
#' Returns a job for a branch of an Amplify app
#'
#' @description
#' Returns a job for a branch of an Amplify app.
#'
#' @usage
#' amplify_get_job(appId, branchName, jobId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The branch name for the job.
#' @param jobId [required] The unique ID for the job.
#'
#' @section Request syntax:
#' ```
#' svc$get_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_job
amplify_get_job <- function(appId, branchName, jobId) {
op <- new_operation(
name = "GetJob",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}",
paginator = list()
)
input <- .amplify$get_job_input(appId = appId, branchName = branchName, jobId = jobId)
output <- .amplify$get_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_job <- amplify_get_job
#' Returns the webhook information that corresponds to a specified webhook
#' ID
#'
#' @description
#' Returns the webhook information that corresponds to a specified webhook
#' ID.
#'
#' @usage
#' amplify_get_webhook(webhookId)
#'
#' @param webhookId [required] The unique ID for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$get_webhook(
#' webhookId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_get_webhook
amplify_get_webhook <- function(webhookId) {
op <- new_operation(
name = "GetWebhook",
http_method = "GET",
http_path = "/webhooks/{webhookId}",
paginator = list()
)
input <- .amplify$get_webhook_input(webhookId = webhookId)
output <- .amplify$get_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$get_webhook <- amplify_get_webhook
#' Returns a list of the existing Amplify apps
#'
#' @description
#' Returns a list of the existing Amplify apps.
#'
#' @usage
#' amplify_list_apps(nextToken, maxResults)
#'
#' @param nextToken A pagination token. If non-null, the pagination token is returned in a
#' result. Pass its value in another request to retrieve more entries.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_apps(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_apps
amplify_list_apps <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListApps",
http_method = "GET",
http_path = "/apps",
paginator = list()
)
input <- .amplify$list_apps_input(nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_apps_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_apps <- amplify_list_apps
#' Returns a list of artifacts for a specified app, branch, and job
#'
#' @description
#' Returns a list of artifacts for a specified app, branch, and job.
#'
#' @usage
#' amplify_list_artifacts(appId, branchName, jobId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name of a branch that is part of an Amplify app.
#' @param jobId [required] The unique ID for a job.
#' @param nextToken A pagination token. Set to null to start listing artifacts from start.
#' If a non-null pagination token is returned in a result, pass its value
#' in here to list more artifacts.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_artifacts(
#' appId = "string",
#' branchName = "string",
#' jobId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_artifacts
amplify_list_artifacts <- function(appId, branchName, jobId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListArtifacts",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}/artifacts",
paginator = list()
)
input <- .amplify$list_artifacts_input(appId = appId, branchName = branchName, jobId = jobId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_artifacts_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_artifacts <- amplify_list_artifacts
#' Lists the backend environments for an Amplify app
#'
#' @description
#' Lists the backend environments for an Amplify app.
#'
#' @usage
#' amplify_list_backend_environments(appId, environmentName, nextToken,
#' maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param environmentName The name of the backend environment
#' @param nextToken A pagination token. Set to null to start listing backend environments
#' from the start. If a non-null pagination token is returned in a result,
#' pass its value in here to list more backend environments.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_backend_environments(
#' appId = "string",
#' environmentName = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_backend_environments
amplify_list_backend_environments <- function(appId, environmentName = NULL, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListBackendEnvironments",
http_method = "GET",
http_path = "/apps/{appId}/backendenvironments",
paginator = list()
)
input <- .amplify$list_backend_environments_input(appId = appId, environmentName = environmentName, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_backend_environments_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_backend_environments <- amplify_list_backend_environments
#' Lists the branches of an Amplify app
#'
#' @description
#' Lists the branches of an Amplify app.
#'
#' @usage
#' amplify_list_branches(appId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param nextToken A pagination token. Set to null to start listing branches from the
#' start. If a non-null pagination token is returned in a result, pass its
#' value in here to list more branches.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_branches(
#' appId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_branches
amplify_list_branches <- function(appId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListBranches",
http_method = "GET",
http_path = "/apps/{appId}/branches",
paginator = list()
)
input <- .amplify$list_branches_input(appId = appId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_branches_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_branches <- amplify_list_branches
#' Returns the domain associations for an Amplify app
#'
#' @description
#' Returns the domain associations for an Amplify app.
#'
#' @usage
#' amplify_list_domain_associations(appId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param nextToken A pagination token. Set to null to start listing apps from the start. If
#' non-null, a pagination token is returned in a result. Pass its value in
#' here to list more projects.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_domain_associations(
#' appId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_domain_associations
amplify_list_domain_associations <- function(appId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDomainAssociations",
http_method = "GET",
http_path = "/apps/{appId}/domains",
paginator = list()
)
input <- .amplify$list_domain_associations_input(appId = appId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_domain_associations_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_domain_associations <- amplify_list_domain_associations
#' Lists the jobs for a branch of an Amplify app
#'
#' @description
#' Lists the jobs for a branch of an Amplify app.
#'
#' @usage
#' amplify_list_jobs(appId, branchName, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for a branch.
#' @param nextToken A pagination token. Set to null to start listing steps from the start.
#' If a non-null pagination token is returned in a result, pass its value
#' in here to list more steps.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_jobs(
#' appId = "string",
#' branchName = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_jobs
amplify_list_jobs <- function(appId, branchName, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListJobs",
http_method = "GET",
http_path = "/apps/{appId}/branches/{branchName}/jobs",
paginator = list()
)
input <- .amplify$list_jobs_input(appId = appId, branchName = branchName, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_jobs_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_jobs <- amplify_list_jobs
#' Returns a list of tags for a specified Amazon Resource Name (ARN)
#'
#' @description
#' Returns a list of tags for a specified Amazon Resource Name (ARN).
#'
#' @usage
#' amplify_list_tags_for_resource(resourceArn)
#'
#' @param resourceArn [required] The Amazon Resource Name (ARN) to use to list tags.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' resourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_tags_for_resource
amplify_list_tags_for_resource <- function(resourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .amplify$list_tags_for_resource_input(resourceArn = resourceArn)
output <- .amplify$list_tags_for_resource_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_tags_for_resource <- amplify_list_tags_for_resource
#' Returns a list of webhooks for an Amplify app
#'
#' @description
#' Returns a list of webhooks for an Amplify app.
#'
#' @usage
#' amplify_list_webhooks(appId, nextToken, maxResults)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param nextToken A pagination token. Set to null to start listing webhooks from the
#' start. If non-null,the pagination token is returned in a result. Pass
#' its value in here to list more webhooks.
#' @param maxResults The maximum number of records to list in a single response.
#'
#' @section Request syntax:
#' ```
#' svc$list_webhooks(
#' appId = "string",
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_list_webhooks
amplify_list_webhooks <- function(appId, nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListWebhooks",
http_method = "GET",
http_path = "/apps/{appId}/webhooks",
paginator = list()
)
input <- .amplify$list_webhooks_input(appId = appId, nextToken = nextToken, maxResults = maxResults)
output <- .amplify$list_webhooks_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$list_webhooks <- amplify_list_webhooks
#' Starts a deployment for a manually deployed app
#'
#' @description
#' Starts a deployment for a manually deployed app. Manually deployed apps
#' are not connected to a repository.
#'
#' @usage
#' amplify_start_deployment(appId, branchName, jobId, sourceUrl)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param jobId The job ID for this deployment, generated by the create deployment
#' request.
#' @param sourceUrl The source URL for this deployment, used when calling start deployment
#' without create deployment. The source URL can be any HTTP GET URL that
#' is publicly accessible and downloads a single .zip file.
#'
#' @section Request syntax:
#' ```
#' svc$start_deployment(
#' appId = "string",
#' branchName = "string",
#' jobId = "string",
#' sourceUrl = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_start_deployment
amplify_start_deployment <- function(appId, branchName, jobId = NULL, sourceUrl = NULL) {
op <- new_operation(
name = "StartDeployment",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}/deployments/start",
paginator = list()
)
input <- .amplify$start_deployment_input(appId = appId, branchName = branchName, jobId = jobId, sourceUrl = sourceUrl)
output <- .amplify$start_deployment_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$start_deployment <- amplify_start_deployment
#' Starts a new job for a branch of an Amplify app
#'
#' @description
#' Starts a new job for a branch of an Amplify app.
#'
#' @usage
#' amplify_start_job(appId, branchName, jobId, jobType, jobReason,
#' commitId, commitMessage, commitTime)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The branch name for the job.
#' @param jobId The unique ID for an existing job. This is required if the value of
#' `jobType` is `RETRY`.
#' @param jobType [required] Describes the type for the job. The job type `RELEASE` starts a new job
#' with the latest change from the specified branch. This value is
#' available only for apps that are connected to a repository. The job type
#' `RETRY` retries an existing job. If the job type value is `RETRY`, the
#' `jobId` is also required.
#' @param jobReason A descriptive reason for starting this job.
#' @param commitId The commit ID from a third-party repository provider for the job.
#' @param commitMessage The commit message from a third-party repository provider for the job.
#' @param commitTime The commit date and time for the job.
#'
#' @section Request syntax:
#' ```
#' svc$start_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string",
#' jobType = "RELEASE"|"RETRY"|"MANUAL"|"WEB_HOOK",
#' jobReason = "string",
#' commitId = "string",
#' commitMessage = "string",
#' commitTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_start_job
amplify_start_job <- function(appId, branchName, jobId = NULL, jobType, jobReason = NULL, commitId = NULL, commitMessage = NULL, commitTime = NULL) {
op <- new_operation(
name = "StartJob",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}/jobs",
paginator = list()
)
input <- .amplify$start_job_input(appId = appId, branchName = branchName, jobId = jobId, jobType = jobType, jobReason = jobReason, commitId = commitId, commitMessage = commitMessage, commitTime = commitTime)
output <- .amplify$start_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$start_job <- amplify_start_job
#' Stops a job that is in progress for a branch of an Amplify app
#'
#' @description
#' Stops a job that is in progress for a branch of an Amplify app.
#'
#' @usage
#' amplify_stop_job(appId, branchName, jobId)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch, for the job.
#' @param jobId [required] The unique id for the job.
#'
#' @section Request syntax:
#' ```
#' svc$stop_job(
#' appId = "string",
#' branchName = "string",
#' jobId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_stop_job
amplify_stop_job <- function(appId, branchName, jobId) {
op <- new_operation(
name = "StopJob",
http_method = "DELETE",
http_path = "/apps/{appId}/branches/{branchName}/jobs/{jobId}/stop",
paginator = list()
)
input <- .amplify$stop_job_input(appId = appId, branchName = branchName, jobId = jobId)
output <- .amplify$stop_job_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$stop_job <- amplify_stop_job
#' Tags the resource with a tag key and value
#'
#' @description
#' Tags the resource with a tag key and value.
#'
#' @usage
#' amplify_tag_resource(resourceArn, tags)
#'
#' @param resourceArn [required] The Amazon Resource Name (ARN) to use to tag a resource.
#' @param tags [required] The tags used to tag the resource.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' resourceArn = "string",
#' tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_tag_resource
amplify_tag_resource <- function(resourceArn, tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .amplify$tag_resource_input(resourceArn = resourceArn, tags = tags)
output <- .amplify$tag_resource_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$tag_resource <- amplify_tag_resource
#' Untags a resource with a specified Amazon Resource Name (ARN)
#'
#' @description
#' Untags a resource with a specified Amazon Resource Name (ARN).
#'
#' @usage
#' amplify_untag_resource(resourceArn, tagKeys)
#'
#' @param resourceArn [required] The Amazon Resource Name (ARN) to use to untag a resource.
#' @param tagKeys [required] The tag keys to use to untag a resource.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' resourceArn = "string",
#' tagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_untag_resource
amplify_untag_resource <- function(resourceArn, tagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{resourceArn}",
paginator = list()
)
input <- .amplify$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys)
output <- .amplify$untag_resource_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$untag_resource <- amplify_untag_resource
#' Updates an existing Amplify app
#'
#' @description
#' Updates an existing Amplify app.
#'
#' @usage
#' amplify_update_app(appId, name, description, platform,
#' iamServiceRoleArn, environmentVariables, enableBranchAutoBuild,
#' enableBranchAutoDeletion, enableBasicAuth, basicAuthCredentials,
#' customRules, buildSpec, customHeaders, enableAutoBranchCreation,
#' autoBranchCreationPatterns, autoBranchCreationConfig, repository,
#' oauthToken, accessToken)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param name The name for an Amplify app.
#' @param description The description for an Amplify app.
#' @param platform The platform for an Amplify app.
#' @param iamServiceRoleArn The AWS Identity and Access Management (IAM) service role for an Amplify
#' app.
#' @param environmentVariables The environment variables for an Amplify app.
#' @param enableBranchAutoBuild Enables branch auto-building for an Amplify app.
#' @param enableBranchAutoDeletion Automatically disconnects a branch in the Amplify Console when you
#' delete a branch from your Git repository.
#' @param enableBasicAuth Enables basic authorization for an Amplify app.
#' @param basicAuthCredentials The basic authorization credentials for an Amplify app.
#' @param customRules The custom redirect and rewrite rules for an Amplify app.
#' @param buildSpec The build specification (build spec) for an Amplify app.
#' @param customHeaders The custom HTTP headers for an Amplify app.
#' @param enableAutoBranchCreation Enables automated branch creation for an Amplify app.
#' @param autoBranchCreationPatterns Describes the automated branch creation glob patterns for an Amplify
#' app.
#' @param autoBranchCreationConfig The automated branch creation configuration for an Amplify app.
#' @param repository The name of the repository for an Amplify app
#' @param oauthToken The OAuth token for a third-party source control system for an Amplify
#' app. The token is used to create a webhook and a read-only deploy key.
#' The OAuth token is not stored.
#' @param accessToken The personal access token for a third-party source control system for an
#' Amplify app. The token is used to create webhook and a read-only deploy
#' key. The token is not stored.
#'
#' @section Request syntax:
#' ```
#' svc$update_app(
#' appId = "string",
#' name = "string",
#' description = "string",
#' platform = "WEB",
#' iamServiceRoleArn = "string",
#' environmentVariables = list(
#' "string"
#' ),
#' enableBranchAutoBuild = TRUE|FALSE,
#' enableBranchAutoDeletion = TRUE|FALSE,
#' enableBasicAuth = TRUE|FALSE,
#' basicAuthCredentials = "string",
#' customRules = list(
#' list(
#' source = "string",
#' target = "string",
#' status = "string",
#' condition = "string"
#' )
#' ),
#' buildSpec = "string",
#' customHeaders = "string",
#' enableAutoBranchCreation = TRUE|FALSE,
#' autoBranchCreationPatterns = list(
#' "string"
#' ),
#' autoBranchCreationConfig = list(
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' framework = "string",
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' buildSpec = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string"
#' ),
#' repository = "string",
#' oauthToken = "string",
#' accessToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_app
amplify_update_app <- function(appId, name = NULL, description = NULL, platform = NULL, iamServiceRoleArn = NULL, environmentVariables = NULL, enableBranchAutoBuild = NULL, enableBranchAutoDeletion = NULL, enableBasicAuth = NULL, basicAuthCredentials = NULL, customRules = NULL, buildSpec = NULL, customHeaders = NULL, enableAutoBranchCreation = NULL, autoBranchCreationPatterns = NULL, autoBranchCreationConfig = NULL, repository = NULL, oauthToken = NULL, accessToken = NULL) {
op <- new_operation(
name = "UpdateApp",
http_method = "POST",
http_path = "/apps/{appId}",
paginator = list()
)
input <- .amplify$update_app_input(appId = appId, name = name, description = description, platform = platform, iamServiceRoleArn = iamServiceRoleArn, environmentVariables = environmentVariables, enableBranchAutoBuild = enableBranchAutoBuild, enableBranchAutoDeletion = enableBranchAutoDeletion, enableBasicAuth = enableBasicAuth, basicAuthCredentials = basicAuthCredentials, customRules = customRules, buildSpec = buildSpec, customHeaders = customHeaders, enableAutoBranchCreation = enableAutoBranchCreation, autoBranchCreationPatterns = autoBranchCreationPatterns, autoBranchCreationConfig = autoBranchCreationConfig, repository = repository, oauthToken = oauthToken, accessToken = accessToken)
output <- .amplify$update_app_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_app <- amplify_update_app
#' Updates a branch for an Amplify app
#'
#' @description
#' Updates a branch for an Amplify app.
#'
#' @usage
#' amplify_update_branch(appId, branchName, description, framework, stage,
#' enableNotification, enableAutoBuild, environmentVariables,
#' basicAuthCredentials, enableBasicAuth, enablePerformanceMode, buildSpec,
#' ttl, displayName, enablePullRequestPreview, pullRequestEnvironmentName,
#' backendEnvironmentArn)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param branchName [required] The name for the branch.
#' @param description The description for the branch.
#' @param framework The framework for the branch.
#' @param stage Describes the current stage for the branch.
#' @param enableNotification Enables notifications for the branch.
#' @param enableAutoBuild Enables auto building for the branch.
#' @param environmentVariables The environment variables for the branch.
#' @param basicAuthCredentials The basic authorization credentials for the branch.
#' @param enableBasicAuth Enables basic authorization for the branch.
#' @param enablePerformanceMode Enables performance mode for the branch.
#'
#' Performance mode optimizes for faster hosting performance by keeping
#' content cached at the edge for a longer interval. When performance mode
#' is enabled, hosting configuration or code changes can take up to 10
#' minutes to roll out.
#' @param buildSpec The build specification (build spec) for the branch.
#' @param ttl The content Time to Live (TTL) for the website in seconds.
#' @param displayName The display name for a branch. This is used as the default domain
#' prefix.
#' @param enablePullRequestPreview Enables pull request previews for this branch.
#' @param pullRequestEnvironmentName The Amplify environment name for the pull request.
#' @param backendEnvironmentArn The Amazon Resource Name (ARN) for a backend environment that is part of
#' an Amplify app.
#'
#' @section Request syntax:
#' ```
#' svc$update_branch(
#' appId = "string",
#' branchName = "string",
#' description = "string",
#' framework = "string",
#' stage = "PRODUCTION"|"BETA"|"DEVELOPMENT"|"EXPERIMENTAL"|"PULL_REQUEST",
#' enableNotification = TRUE|FALSE,
#' enableAutoBuild = TRUE|FALSE,
#' environmentVariables = list(
#' "string"
#' ),
#' basicAuthCredentials = "string",
#' enableBasicAuth = TRUE|FALSE,
#' enablePerformanceMode = TRUE|FALSE,
#' buildSpec = "string",
#' ttl = "string",
#' displayName = "string",
#' enablePullRequestPreview = TRUE|FALSE,
#' pullRequestEnvironmentName = "string",
#' backendEnvironmentArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_branch
amplify_update_branch <- function(appId, branchName, description = NULL, framework = NULL, stage = NULL, enableNotification = NULL, enableAutoBuild = NULL, environmentVariables = NULL, basicAuthCredentials = NULL, enableBasicAuth = NULL, enablePerformanceMode = NULL, buildSpec = NULL, ttl = NULL, displayName = NULL, enablePullRequestPreview = NULL, pullRequestEnvironmentName = NULL, backendEnvironmentArn = NULL) {
op <- new_operation(
name = "UpdateBranch",
http_method = "POST",
http_path = "/apps/{appId}/branches/{branchName}",
paginator = list()
)
input <- .amplify$update_branch_input(appId = appId, branchName = branchName, description = description, framework = framework, stage = stage, enableNotification = enableNotification, enableAutoBuild = enableAutoBuild, environmentVariables = environmentVariables, basicAuthCredentials = basicAuthCredentials, enableBasicAuth = enableBasicAuth, enablePerformanceMode = enablePerformanceMode, buildSpec = buildSpec, ttl = ttl, displayName = displayName, enablePullRequestPreview = enablePullRequestPreview, pullRequestEnvironmentName = pullRequestEnvironmentName, backendEnvironmentArn = backendEnvironmentArn)
output <- .amplify$update_branch_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_branch <- amplify_update_branch
#' Creates a new domain association for an Amplify app
#'
#' @description
#' Creates a new domain association for an Amplify app.
#'
#' @usage
#' amplify_update_domain_association(appId, domainName,
#' enableAutoSubDomain, subDomainSettings, autoSubDomainCreationPatterns,
#' autoSubDomainIAMRole)
#'
#' @param appId [required] The unique ID for an Amplify app.
#' @param domainName [required] The name of the domain.
#' @param enableAutoSubDomain Enables the automated creation of subdomains for branches.
#' @param subDomainSettings [required] Describes the settings for the subdomain.
#' @param autoSubDomainCreationPatterns Sets the branch patterns for automatic subdomain creation.
#' @param autoSubDomainIAMRole The required AWS Identity and Access Management (IAM) service role for
#' the Amazon Resource Name (ARN) for automatically creating subdomains.
#'
#' @section Request syntax:
#' ```
#' svc$update_domain_association(
#' appId = "string",
#' domainName = "string",
#' enableAutoSubDomain = TRUE|FALSE,
#' subDomainSettings = list(
#' list(
#' prefix = "string",
#' branchName = "string"
#' )
#' ),
#' autoSubDomainCreationPatterns = list(
#' "string"
#' ),
#' autoSubDomainIAMRole = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_domain_association
amplify_update_domain_association <- function(appId, domainName, enableAutoSubDomain = NULL, subDomainSettings, autoSubDomainCreationPatterns = NULL, autoSubDomainIAMRole = NULL) {
op <- new_operation(
name = "UpdateDomainAssociation",
http_method = "POST",
http_path = "/apps/{appId}/domains/{domainName}",
paginator = list()
)
input <- .amplify$update_domain_association_input(appId = appId, domainName = domainName, enableAutoSubDomain = enableAutoSubDomain, subDomainSettings = subDomainSettings, autoSubDomainCreationPatterns = autoSubDomainCreationPatterns, autoSubDomainIAMRole = autoSubDomainIAMRole)
output <- .amplify$update_domain_association_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_domain_association <- amplify_update_domain_association
#' Updates a webhook
#'
#' @description
#' Updates a webhook.
#'
#' @usage
#' amplify_update_webhook(webhookId, branchName, description)
#'
#' @param webhookId [required] The unique ID for a webhook.
#' @param branchName The name for a branch that is part of an Amplify app.
#' @param description The description for a webhook.
#'
#' @section Request syntax:
#' ```
#' svc$update_webhook(
#' webhookId = "string",
#' branchName = "string",
#' description = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname amplify_update_webhook
amplify_update_webhook <- function(webhookId, branchName = NULL, description = NULL) {
op <- new_operation(
name = "UpdateWebhook",
http_method = "POST",
http_path = "/webhooks/{webhookId}",
paginator = list()
)
input <- .amplify$update_webhook_input(webhookId = webhookId, branchName = branchName, description = description)
output <- .amplify$update_webhook_output()
config <- get_config()
svc <- .amplify$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.amplify$operations$update_webhook <- amplify_update_webhook
|
\name{plot_offense_counts}
\alias{plot_offense_counts}
\title{Plot Offense Counts of Crime in DC}
\usage{
plot_offense_counts(crime_data)
}
\arguments{
\item{crime_data}{data.frame to plot}
}
\description{
Plots counts of crimes from 2006-2012.
}
\examples{
plot_offense_counts(crime_data=dc_crime_data_sample)
}
| /man/plot_offense_counts.Rd | no_license | DaveDCGIS/dc_crime_analysis | R | false | false | 320 | rd | \name{plot_offense_counts}
\alias{plot_offense_counts}
\title{Plot Offense Counts of Crime in DC}
\usage{
plot_offense_counts(crime_data)
}
\arguments{
\item{crime_data}{data.frame to plot}
}
\description{
Plots counts of crimes from 2006-2012.
}
\examples{
plot_offense_counts(crime_data=dc_crime_data_sample)
}
|
library(proftools)
### Name: plotProfileCallGraph
### Title: Plot Call Graph for Rprof Profile Data
### Aliases: plotProfileCallGraph
### Keywords: programming utilities
### ** Examples
pd <- readProfileData(system.file("samples", "glmEx.out", package="proftools"))
plotProfileCallGraph(pd)
plotProfileCallGraph(pd, score = "none")
plotProfileCallGraph(pd, style = plain.style, score = "total")
| /data/genthat_extracted_code/proftools/examples/plotProfileCallGraph.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 402 | r | library(proftools)
### Name: plotProfileCallGraph
### Title: Plot Call Graph for Rprof Profile Data
### Aliases: plotProfileCallGraph
### Keywords: programming utilities
### ** Examples
pd <- readProfileData(system.file("samples", "glmEx.out", package="proftools"))
plotProfileCallGraph(pd)
plotProfileCallGraph(pd, score = "none")
plotProfileCallGraph(pd, style = plain.style, score = "total")
|
#' Interactive artifact-distribution meta-analysis correcting for Case II direct range restriction and measurement error
#'
#' @param x List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.
#'
#' @return A meta-analysis class object containing all results.
#' @export
#'
#' @references
#' Schmidt, F. L., & Hunter, J. E. (2015).
#' \emph{Methods of meta-analysis: Correcting error and bias in research findings} (3rd ed.).
#' Thousand Oaks, CA: Sage. \url{https://doi.org/10/b6mg}. Chapter 4.
#'
#' Law, K. S., Schmidt, F. L., & Hunter, J. E. (1994).
#' Nonlinearity of range corrections in meta-analysis: Test of an improved procedure.
#' \emph{Journal of Applied Psychology, 79}(3), 425–438. \url{https://doi.org/10.1037/0021-9010.79.3.425}
#'
#' Raju, N. S., & Burke, M. J. (1983).
#' Two new procedures for studying validity generalization.
#' \emph{Journal of Applied Psychology, 68}(3), 382–395. \url{https://doi.org/10.1037/0021-9010.68.3.382}
#'
#' @keywords internal
"ma_r_ad.int_rbAdj" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
decimals <- x$decimals
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
ad_obj_x <- prepare_ad_int(ad_obj = ad_obj_x, residual_ads = residual_ads, decimals = decimals)
ad_obj_y <- prepare_ad_int(ad_obj = ad_obj_y, residual_ads = residual_ads, decimals = decimals)
if(!correct_rxx) ad_obj_x$qxa_irr <- ad_obj_x$qxi_irr <- ad_obj_x$qxa_drr <- ad_obj_x$qxi_drr <- data.frame(Value = 1, Weight = 1, stringsAsFactors = FALSE)
if(!correct_ryy) ad_obj_y$qxa_irr <- ad_obj_y$qxi_irr <- ad_obj_y$qxa_drr <- ad_obj_y$qxi_drr <- data.frame(Value = 1, Weight = 1, stringsAsFactors = FALSE)
## flip_xy switches the internal designations of x and y and switches them back at the end of the function
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_qxa <- wt_mean(x = .ad_obj_x$qxa_drr$Value, wt = .ad_obj_x$qxa_drr$Weight)
.mean_ux <- wt_mean(x = .ad_obj_x$ux$Value, wt = .ad_obj_x$ux$Weight)
.ad_obj_y$qxi_irr$Value <- estimate_ryya(ryyi = .ad_obj_y$qxi_irr$Value^2, rxyi = mean_rxyi, ux = .mean_ux)^.5
.mean_qya <- wt_mean(x = .ad_obj_y$qxi_irr$Value, wt = .ad_obj_y$qxi_irr$Weight)
ad_list <- list(.qxa = .ad_obj_x$qxa_drr,
.qya = .ad_obj_y$qxi_irr,
.ux = .ad_obj_x$ux)
art_grid <- create_ad_array(ad_list = ad_list, name_vec = names(ad_list))
.qxa <- art_grid$.qxa
.qya <- art_grid$.qya
.ux <- art_grid$.ux
wt_vec <- art_grid$wt
mean_rtpa <- .correct_r_rb(rxyi = mean_rxyi, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
ci_tp <- .correct_r_rb(rxyi = ci_xy_i, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
var_art <- apply(t(mean_rtpa), 2, function(x){
wt_var(x = .attenuate_r_rb(rtpa = x, qx = .qxa, qy = .qya, ux = .ux), wt = wt_vec, unbiased = var_unbiased)
})
var_pre <- var_e + var_art
var_res <- var_r - var_pre
var_rho_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_res)
.mean_rxpa <- mean_rtpa * .mean_qxa
.ci_xp <- ci_tp * .mean_qxa
.var_rho_xp <- var_rho_tp * .mean_qxa^2
.mean_rtya <- mean_rtpa * .mean_qya
.ci_ty <- ci_tp * .mean_qya
.var_rho_ty <- var_rho_tp * .mean_qya^2
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
## New variances
var_r_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_r)
var_e_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_e)
var_art_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_art)
var_pre_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_pre)
se_r_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = se_r^2)^.5
.var_r_xp <- var_r_tp * .mean_qxa^2
.var_e_xp <- var_e_tp * .mean_qxa^2
.var_art_xp <- var_art_tp * .mean_qxa^2
.var_pre_xp <- var_pre_tp * .mean_qxa^2
.se_r_xp <- se_r_tp * .mean_qxa
.var_r_ty <- var_r_tp * .mean_qya^2
.var_e_ty <- var_e_tp * .mean_qya^2
.var_art_ty <- var_art_tp * .mean_qya^2
.var_pre_ty <- var_pre_tp * .mean_qya^2
.se_r_ty <- se_r_tp * .mean_qya
##
if(flip_xy){
correct_meas_y <- !(all(.qxa == 1))
correct_meas_x <- !(all(.qya == 1))
correct_drr <- !(all(.ux == 1))
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_x <- !(all(.qxa == 1))
correct_meas_y <- !(all(.qya == 1))
correct_drr <- !(all(.ux == 1))
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
#' Taylor series approximation artifact-distribution meta-analysis correcting for Raju and Burke's case 1 direct range restriction and measurement error
#'
#' @param x List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.
#'
#' @return A list of artifact-distribution meta-analysis results to be returned to the ma_r_ad function.
#'
#' @references
#' Raju, N. S., & Burke, M. J. (1983).
#' Two new procedures for studying validity generalization.
#' \emph{Journal of Applied Psychology, 68}(3), 382–395. \url{https://doi.org/10.1037/0021-9010.68.3.382}
#'
#' @keywords internal
"ma_r_ad.tsa_rb1Adj" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
if(!correct_rxx){
ad_obj_x[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"mean"] <- 1
ad_obj_x[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var"] <- 0
ad_obj_x[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var_res"] <- 0
}
if(!correct_ryy){
ad_obj_y[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"mean"] <- 1
ad_obj_y[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var"] <- 0
ad_obj_y[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var_res"] <- 0
}
var_label <- ifelse(residual_ads, "var_res", "var")
## flip_xy switches the internal designations of x and y and switches them back at the end of the function
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_rxxa <- .ad_obj_x["rxxa_drr", "mean"]
.var_rxxa <- .ad_obj_x["rxxa_drr", var_label]
.mean_ryyi <- .ad_obj_y["rxxi_irr", "mean"]
.var_ryyi <- .ad_obj_y["rxxi_irr", var_label]
.mean_ux <- .ad_obj_x["ux", "mean"]
.var_ux <- .ad_obj_x["ux", var_label]
.mean_ryya <- estimate_ryya(ryyi = .mean_ryyi, rxyi = mean_rxyi, ux = .mean_ux)
.var_ryya <- estimate_var_ryya(ryyi = .mean_ryya, var_ryyi = .var_ryyi, rxyi = mean_rxyi, ux = .mean_ux)
mean_rtpa <- .correct_r_rb(rxyi = mean_rxyi, qx = .mean_rxxa^.5, qy = .mean_ryya^.5, ux = .mean_ux)
ci_tp <- .correct_r_rb(rxyi = ci_xy_i, qx = .mean_rxxa^.5, qy = .mean_ryya^.5, ux = .mean_ux)
var_mat_tp <- estimate_var_rho_tsa_rb1(mean_rtpa = mean_rtpa, var_rxyi = var_r, var_e = var_e,
mean_ux = .mean_ux, mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
var_ux = .var_ux, var_rxx = .var_rxxa, var_ryy = .var_ryya, show_variance_warnings = FALSE)
.mean_rxpa <- mean_rtpa * .mean_rxxa^.5
.ci_xp <- ci_tp * .mean_rxxa^.5
.mean_rtya <- mean_rtpa * .mean_ryya^.5
.ci_ty <- ci_tp * .mean_ryya^.5
var_art <- var_mat_tp$var_art
var_pre <- var_mat_tp$var_pre
var_res <- var_mat_tp$var_res
var_rho_tp <- var_mat_tp$var_rho
.var_rho_xp <- var_rho_tp * .mean_rxxa
.var_rho_ty <- var_rho_tp * .mean_ryya
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
## New variances
var_r_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_r)
var_e_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_e)
var_art_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_art)
var_pre_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_pre)
se_r_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = se_r^2)^.5
.var_r_xp <- var_r_tp * .mean_rxxa
.var_e_xp <- var_e_tp * .mean_rxxa
.var_art_xp <- var_art_tp * .mean_rxxa
.var_pre_xp <- var_pre_tp * .mean_rxxa
.se_r_xp <- se_r_tp * .mean_rxxa^.5
.var_r_ty <- var_r_tp * .mean_ryya
.var_e_ty <- var_e_tp * .mean_ryya
.var_art_ty <- var_art_tp * .mean_ryya
.var_pre_ty <- var_pre_tp * .mean_ryya
.se_r_ty <- se_r_tp * .mean_ryya^.5
##
if(flip_xy){
correct_meas_x <- .mean_rxxa != 1
correct_meas_y <- .mean_ryyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_y <- .mean_rxxa != 1
correct_meas_x <- .mean_ryyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
#' Taylor series approximation artifact-distribution meta-analysis correcting for Raju and Burke's case 2 direct range restriction and measurement error
#'
#' @param x List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.
#'
#' @return A list of artifact-distribution meta-analysis results to be returned to the ma_r_ad function.
#'
#' @references
#' Raju, N. S., & Burke, M. J. (1983).
#' Two new procedures for studying validity generalization.
#' \emph{Journal of Applied Psychology, 68}(3), 382–395. \url{https://doi.org/10.1037/0021-9010.68.3.382}
#' @keywords internal
"ma_r_ad.tsa_rb2Adj" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
if(!correct_rxx){
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"mean"] <- 1
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var"] <- 0
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var_res"] <- 0
}
if(!correct_ryy){
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"mean"] <- 1
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var"] <- 0
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var_res"] <- 0
}
var_label <- ifelse(residual_ads, "var_res", "var")
## flip_xy switches the internal designations of x and y and switches them back at the end of the function
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_qxa <- .ad_obj_x["qxa_drr", "mean"]
.var_qxa <- .ad_obj_x["qxa_drr", var_label]
.mean_qyi <- .ad_obj_y["qxi_irr", "mean"]
.var_qyi <- .ad_obj_y["qxi_irr", var_label]
.mean_ux <- .ad_obj_x["ux", "mean"]
.var_ux <- .ad_obj_x["ux", var_label]
.mean_qya <- estimate_ryya(ryyi = .mean_qyi^2, rxyi = mean_rxyi, ux = .mean_ux)^.5
.var_qya <- estimate_var_qya(qyi = .mean_qyi, var_qyi = .var_qyi, rxyi = mean_rxyi, ux = .mean_ux)
mean_rtpa <- .correct_r_rb(rxyi = mean_rxyi, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
ci_tp <- .correct_r_rb(rxyi = ci_xy_i, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
var_mat_tp <- estimate_var_rho_tsa_rb2(mean_rtpa = mean_rtpa, var_rxyi = var_r, var_e = var_e,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya,
var_ux = .var_ux, var_qx = .var_qxa, var_qy = .var_qya, show_variance_warnings = FALSE)
.mean_rxpa <- mean_rtpa * .mean_qxa
.ci_xp <- ci_tp * .mean_qxa
.mean_rtya <- mean_rtpa * .mean_qya
.ci_ty <- ci_tp * .mean_qya
var_art <- var_mat_tp$var_art
var_pre <- var_mat_tp$var_pre
var_res <- var_mat_tp$var_res
var_rho_tp <- var_mat_tp$var_rho
.var_rho_xp <- var_rho_tp * .mean_qxa^2
.var_rho_ty <- var_rho_tp * .mean_qya^2
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
## New variances
var_r_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_r,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
var_e_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_e,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
var_art_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_art,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
var_pre_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_pre,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
se_r_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = se_r^2,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)^.5
.var_r_xp <- var_r_tp * .mean_qxa^2
.var_e_xp <- var_e_tp * .mean_qxa^2
.var_art_xp <- var_art_tp * .mean_qxa^2
.var_pre_xp <- var_pre_tp * .mean_qxa^2
.se_r_xp <- se_r_tp * .mean_qxa
.var_r_ty <- var_r_tp * .mean_qya^2
.var_e_ty <- var_e_tp * .mean_qya^2
.var_art_ty <- var_art_tp * .mean_qya^2
.var_pre_ty <- var_pre_tp * .mean_qya^2
.se_r_ty <- se_r_tp * .mean_qya
##
if(flip_xy){
correct_meas_x <- .mean_qxa != 1
correct_meas_y <- .mean_qyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_y <- .mean_qxa != 1
correct_meas_x <- .mean_qyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
| /R/ma_r_ad_rb_adj.R | no_license | Meta-Repository/psychmeta | R | false | false | 21,872 | r | #' Interactive artifact-distribution meta-analysis correcting for Case II direct range restriction and measurement error
#'
#' @param x List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.
#'
#' @return A meta-analysis class object containing all results.
#' @export
#'
#' @references
#' Schmidt, F. L., & Hunter, J. E. (2015).
#' \emph{Methods of meta-analysis: Correcting error and bias in research findings} (3rd ed.).
#' Thousand Oaks, CA: Sage. \url{https://doi.org/10/b6mg}. Chapter 4.
#'
#' Law, K. S., Schmidt, F. L., & Hunter, J. E. (1994).
#' Nonlinearity of range corrections in meta-analysis: Test of an improved procedure.
#' \emph{Journal of Applied Psychology, 79}(3), 425–438. \url{https://doi.org/10.1037/0021-9010.79.3.425}
#'
#' Raju, N. S., & Burke, M. J. (1983).
#' Two new procedures for studying validity generalization.
#' \emph{Journal of Applied Psychology, 68}(3), 382–395. \url{https://doi.org/10.1037/0021-9010.68.3.382}
#'
#' @keywords internal
"ma_r_ad.int_rbAdj" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
decimals <- x$decimals
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
ad_obj_x <- prepare_ad_int(ad_obj = ad_obj_x, residual_ads = residual_ads, decimals = decimals)
ad_obj_y <- prepare_ad_int(ad_obj = ad_obj_y, residual_ads = residual_ads, decimals = decimals)
if(!correct_rxx) ad_obj_x$qxa_irr <- ad_obj_x$qxi_irr <- ad_obj_x$qxa_drr <- ad_obj_x$qxi_drr <- data.frame(Value = 1, Weight = 1, stringsAsFactors = FALSE)
if(!correct_ryy) ad_obj_y$qxa_irr <- ad_obj_y$qxi_irr <- ad_obj_y$qxa_drr <- ad_obj_y$qxi_drr <- data.frame(Value = 1, Weight = 1, stringsAsFactors = FALSE)
## flip_xy switches the internal designations of x and y and switches them back at the end of the function
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_qxa <- wt_mean(x = .ad_obj_x$qxa_drr$Value, wt = .ad_obj_x$qxa_drr$Weight)
.mean_ux <- wt_mean(x = .ad_obj_x$ux$Value, wt = .ad_obj_x$ux$Weight)
.ad_obj_y$qxi_irr$Value <- estimate_ryya(ryyi = .ad_obj_y$qxi_irr$Value^2, rxyi = mean_rxyi, ux = .mean_ux)^.5
.mean_qya <- wt_mean(x = .ad_obj_y$qxi_irr$Value, wt = .ad_obj_y$qxi_irr$Weight)
ad_list <- list(.qxa = .ad_obj_x$qxa_drr,
.qya = .ad_obj_y$qxi_irr,
.ux = .ad_obj_x$ux)
art_grid <- create_ad_array(ad_list = ad_list, name_vec = names(ad_list))
.qxa <- art_grid$.qxa
.qya <- art_grid$.qya
.ux <- art_grid$.ux
wt_vec <- art_grid$wt
mean_rtpa <- .correct_r_rb(rxyi = mean_rxyi, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
ci_tp <- .correct_r_rb(rxyi = ci_xy_i, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
var_art <- apply(t(mean_rtpa), 2, function(x){
wt_var(x = .attenuate_r_rb(rtpa = x, qx = .qxa, qy = .qya, ux = .ux), wt = wt_vec, unbiased = var_unbiased)
})
var_pre <- var_e + var_art
var_res <- var_r - var_pre
var_rho_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_res)
.mean_rxpa <- mean_rtpa * .mean_qxa
.ci_xp <- ci_tp * .mean_qxa
.var_rho_xp <- var_rho_tp * .mean_qxa^2
.mean_rtya <- mean_rtpa * .mean_qya
.ci_ty <- ci_tp * .mean_qya
.var_rho_ty <- var_rho_tp * .mean_qya^2
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
## New variances
var_r_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_r)
var_e_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_e)
var_art_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_art)
var_pre_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = var_pre)
se_r_tp <- estimate_var_rho_int_rb(mean_rxyi = mean_rxyi, mean_rtpa = mean_rtpa,
mean_qx = .mean_qxa, mean_qy = .mean_qya,
mean_ux = .mean_ux, var_res = se_r^2)^.5
.var_r_xp <- var_r_tp * .mean_qxa^2
.var_e_xp <- var_e_tp * .mean_qxa^2
.var_art_xp <- var_art_tp * .mean_qxa^2
.var_pre_xp <- var_pre_tp * .mean_qxa^2
.se_r_xp <- se_r_tp * .mean_qxa
.var_r_ty <- var_r_tp * .mean_qya^2
.var_e_ty <- var_e_tp * .mean_qya^2
.var_art_ty <- var_art_tp * .mean_qya^2
.var_pre_ty <- var_pre_tp * .mean_qya^2
.se_r_ty <- se_r_tp * .mean_qya
##
if(flip_xy){
correct_meas_y <- !(all(.qxa == 1))
correct_meas_x <- !(all(.qya == 1))
correct_drr <- !(all(.ux == 1))
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_x <- !(all(.qxa == 1))
correct_meas_y <- !(all(.qya == 1))
correct_drr <- !(all(.ux == 1))
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
#' Taylor series approximation artifact-distribution meta-analysis correcting for Raju and Burke's case 1 direct range restriction and measurement error
#'
#' @param x List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.
#'
#' @return A list of artifact-distribution meta-analysis results to be returned to the ma_r_ad function.
#'
#' @references
#' Raju, N. S., & Burke, M. J. (1983).
#' Two new procedures for studying validity generalization.
#' \emph{Journal of Applied Psychology, 68}(3), 382–395. \url{https://doi.org/10.1037/0021-9010.68.3.382}
#'
#' @keywords internal
"ma_r_ad.tsa_rb1Adj" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
if(!correct_rxx){
ad_obj_x[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"mean"] <- 1
ad_obj_x[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var"] <- 0
ad_obj_x[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var_res"] <- 0
}
if(!correct_ryy){
ad_obj_y[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"mean"] <- 1
ad_obj_y[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var"] <- 0
ad_obj_y[c("rxxi_irr", "rxxi_drr", "rxxa_irr", "rxxa_drr"),"var_res"] <- 0
}
var_label <- ifelse(residual_ads, "var_res", "var")
## flip_xy switches the internal designations of x and y and switches them back at the end of the function
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_rxxa <- .ad_obj_x["rxxa_drr", "mean"]
.var_rxxa <- .ad_obj_x["rxxa_drr", var_label]
.mean_ryyi <- .ad_obj_y["rxxi_irr", "mean"]
.var_ryyi <- .ad_obj_y["rxxi_irr", var_label]
.mean_ux <- .ad_obj_x["ux", "mean"]
.var_ux <- .ad_obj_x["ux", var_label]
.mean_ryya <- estimate_ryya(ryyi = .mean_ryyi, rxyi = mean_rxyi, ux = .mean_ux)
.var_ryya <- estimate_var_ryya(ryyi = .mean_ryya, var_ryyi = .var_ryyi, rxyi = mean_rxyi, ux = .mean_ux)
mean_rtpa <- .correct_r_rb(rxyi = mean_rxyi, qx = .mean_rxxa^.5, qy = .mean_ryya^.5, ux = .mean_ux)
ci_tp <- .correct_r_rb(rxyi = ci_xy_i, qx = .mean_rxxa^.5, qy = .mean_ryya^.5, ux = .mean_ux)
var_mat_tp <- estimate_var_rho_tsa_rb1(mean_rtpa = mean_rtpa, var_rxyi = var_r, var_e = var_e,
mean_ux = .mean_ux, mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
var_ux = .var_ux, var_rxx = .var_rxxa, var_ryy = .var_ryya, show_variance_warnings = FALSE)
.mean_rxpa <- mean_rtpa * .mean_rxxa^.5
.ci_xp <- ci_tp * .mean_rxxa^.5
.mean_rtya <- mean_rtpa * .mean_ryya^.5
.ci_ty <- ci_tp * .mean_ryya^.5
var_art <- var_mat_tp$var_art
var_pre <- var_mat_tp$var_pre
var_res <- var_mat_tp$var_res
var_rho_tp <- var_mat_tp$var_rho
.var_rho_xp <- var_rho_tp * .mean_rxxa
.var_rho_ty <- var_rho_tp * .mean_ryya
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
## New variances
var_r_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_r)
var_e_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_e)
var_art_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_art)
var_pre_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = var_pre)
se_r_tp <- estimate_var_tsa_rb1(mean_rtpa = mean_rtpa,
mean_rxx = .mean_rxxa, mean_ryy = .mean_ryya,
mean_ux = .mean_ux, var_res = se_r^2)^.5
.var_r_xp <- var_r_tp * .mean_rxxa
.var_e_xp <- var_e_tp * .mean_rxxa
.var_art_xp <- var_art_tp * .mean_rxxa
.var_pre_xp <- var_pre_tp * .mean_rxxa
.se_r_xp <- se_r_tp * .mean_rxxa^.5
.var_r_ty <- var_r_tp * .mean_ryya
.var_e_ty <- var_e_tp * .mean_ryya
.var_art_ty <- var_art_tp * .mean_ryya
.var_pre_ty <- var_pre_tp * .mean_ryya
.se_r_ty <- se_r_tp * .mean_ryya^.5
##
if(flip_xy){
correct_meas_x <- .mean_rxxa != 1
correct_meas_y <- .mean_ryyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_y <- .mean_rxxa != 1
correct_meas_x <- .mean_ryyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
#' Taylor series approximation artifact-distribution meta-analysis correcting for Raju and Burke's case 2 direct range restriction and measurement error
#'
#' @param x List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.
#'
#' @return A list of artifact-distribution meta-analysis results to be returned to the ma_r_ad function.
#'
#' @references
#' Raju, N. S., & Burke, M. J. (1983).
#' Two new procedures for studying validity generalization.
#' \emph{Journal of Applied Psychology, 68}(3), 382–395. \url{https://doi.org/10.1037/0021-9010.68.3.382}
#' @keywords internal
"ma_r_ad.tsa_rb2Adj" <- function(x){
barebones <- x$barebones
ad_obj_x <- x$ad_obj_x
ad_obj_y <- x$ad_obj_y
correct_rxx <- x$correct_rxx
correct_ryy <- x$correct_ryy
residual_ads <- x$residual_ads
cred_level <- x$cred_level
cred_method <- x$cred_method
var_unbiased <- x$var_unbiased
flip_xy <- x$flip_xy
k <- barebones[,"k"]
N <- barebones[,"N"]
mean_rxyi <- barebones[,"mean_r"]
var_r <- barebones[,"var_r"]
var_e <- barebones[,"var_e"]
ci_xy_i <- barebones[,grepl(x = colnames(barebones), pattern = "CI")]
se_r <- barebones[,"se_r"]
if(!correct_rxx){
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"mean"] <- 1
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var"] <- 0
ad_obj_x[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var_res"] <- 0
}
if(!correct_ryy){
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"mean"] <- 1
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var"] <- 0
ad_obj_y[c("qxi_irr", "qxi_drr", "qxa_irr", "qxa_drr"),"var_res"] <- 0
}
var_label <- ifelse(residual_ads, "var_res", "var")
## flip_xy switches the internal designations of x and y and switches them back at the end of the function
if(flip_xy){
.ad_obj_x <- ad_obj_y
.ad_obj_y <- ad_obj_x
}else{
.ad_obj_x <- ad_obj_x
.ad_obj_y <- ad_obj_y
}
.mean_qxa <- .ad_obj_x["qxa_drr", "mean"]
.var_qxa <- .ad_obj_x["qxa_drr", var_label]
.mean_qyi <- .ad_obj_y["qxi_irr", "mean"]
.var_qyi <- .ad_obj_y["qxi_irr", var_label]
.mean_ux <- .ad_obj_x["ux", "mean"]
.var_ux <- .ad_obj_x["ux", var_label]
.mean_qya <- estimate_ryya(ryyi = .mean_qyi^2, rxyi = mean_rxyi, ux = .mean_ux)^.5
.var_qya <- estimate_var_qya(qyi = .mean_qyi, var_qyi = .var_qyi, rxyi = mean_rxyi, ux = .mean_ux)
mean_rtpa <- .correct_r_rb(rxyi = mean_rxyi, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
ci_tp <- .correct_r_rb(rxyi = ci_xy_i, qx = .mean_qxa, qy = .mean_qya, ux = .mean_ux)
var_mat_tp <- estimate_var_rho_tsa_rb2(mean_rtpa = mean_rtpa, var_rxyi = var_r, var_e = var_e,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya,
var_ux = .var_ux, var_qx = .var_qxa, var_qy = .var_qya, show_variance_warnings = FALSE)
.mean_rxpa <- mean_rtpa * .mean_qxa
.ci_xp <- ci_tp * .mean_qxa
.mean_rtya <- mean_rtpa * .mean_qya
.ci_ty <- ci_tp * .mean_qya
var_art <- var_mat_tp$var_art
var_pre <- var_mat_tp$var_pre
var_res <- var_mat_tp$var_res
var_rho_tp <- var_mat_tp$var_rho
.var_rho_xp <- var_rho_tp * .mean_qxa^2
.var_rho_ty <- var_rho_tp * .mean_qya^2
sd_r <- var_r^.5
sd_e <- var_e^.5
sd_art <- var_art^.5
sd_pre <- var_pre^.5
sd_res <- var_res^.5
sd_rho_tp <- var_rho_tp^.5
## New variances
var_r_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_r,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
var_e_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_e,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
var_art_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_art,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
var_pre_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = var_pre,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)
se_r_tp <- estimate_var_tsa_rb2(mean_rtpa = mean_rtpa, var = se_r^2,
mean_ux = .mean_ux, mean_qx = .mean_qxa, mean_qy = .mean_qya)^.5
.var_r_xp <- var_r_tp * .mean_qxa^2
.var_e_xp <- var_e_tp * .mean_qxa^2
.var_art_xp <- var_art_tp * .mean_qxa^2
.var_pre_xp <- var_pre_tp * .mean_qxa^2
.se_r_xp <- se_r_tp * .mean_qxa
.var_r_ty <- var_r_tp * .mean_qya^2
.var_e_ty <- var_e_tp * .mean_qya^2
.var_art_ty <- var_art_tp * .mean_qya^2
.var_pre_ty <- var_pre_tp * .mean_qya^2
.se_r_ty <- se_r_tp * .mean_qya
##
if(flip_xy){
correct_meas_x <- .mean_qxa != 1
correct_meas_y <- .mean_qyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rtya
ci_xp <- .ci_ty
var_rho_xp <- .var_rho_ty
mean_rtya <- .mean_rxpa
ci_ty <- .ci_xp
var_rho_ty <- .var_rho_xp
var_r_xp <- .var_r_ty
var_e_xp <- .var_e_ty
var_art_xp <- .var_art_ty
var_pre_xp <- .var_pre_ty
se_r_xp <- .se_r_ty
var_r_ty <- .var_r_xp
var_e_ty <- .var_e_xp
var_art_ty <- .var_art_xp
var_pre_ty <- .var_pre_xp
se_r_ty <- .se_r_xp
}else{
correct_meas_y <- .mean_qxa != 1
correct_meas_x <- .mean_qyi != 1
correct_drr <- .mean_ux != 1
mean_rxpa <- .mean_rxpa
ci_xp <- .ci_xp
var_rho_xp <- .var_rho_xp
mean_rtya <- .mean_rtya
ci_ty <- .ci_ty
var_rho_ty <- .var_rho_ty
var_r_xp <- .var_r_xp
var_e_xp <- .var_e_xp
var_art_xp <- .var_art_xp
var_pre_xp <- .var_pre_xp
se_r_xp <- .se_r_xp
var_r_ty <- .var_r_ty
var_e_ty <- .var_e_ty
var_art_ty <- .var_art_ty
var_pre_ty <- .var_pre_ty
se_r_ty <- .se_r_ty
}
sd_rho_xp <- var_rho_xp^.5
sd_rho_ty <- var_rho_ty^.5
sd_r_tp <- var_r_tp^.5
sd_r_xp <- var_r_xp^.5
sd_r_ty <- var_r_ty^.5
sd_e_tp <- var_e_tp^.5
sd_e_xp <- var_e_xp^.5
sd_e_ty <- var_e_ty^.5
sd_art_tp <- var_art_tp^.5
sd_art_xp <- var_art_xp^.5
sd_art_ty <- var_art_ty^.5
sd_pre_tp <- var_pre_tp^.5
sd_pre_xp <- var_pre_xp^.5
sd_pre_ty <- var_pre_ty^.5
out <- as.list(environment())
class(out) <- class(x)
out
}
|
binary.file <- "file.Rdata"
if (file.exists(binary.file)) {
load(binary.file)
} else {
## do commands
} | /loadfile.R | no_license | adbecker/snips | R | false | false | 113 | r | binary.file <- "file.Rdata"
if (file.exists(binary.file)) {
load(binary.file)
} else {
## do commands
} |
#Logan Thayer and Diane Lee
#getting every odd row from a dataframe
every.odd <- function(x){
odd.rows = x[seq(from = 1, to = nrow(x), 2), ]
return(odd.rows)
}
| /ex7_script1.R | no_license | dlee24/ICB2019_Exercise07 | R | false | false | 171 | r | #Logan Thayer and Diane Lee
#getting every odd row from a dataframe
every.odd <- function(x){
odd.rows = x[seq(from = 1, to = nrow(x), 2), ]
return(odd.rows)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeNA.R
\name{makeNA}
\alias{makeNA}
\title{makeNA}
\usage{
makeNA(data, n_missing = 10, value_NA = NA)
}
\arguments{
\item{data}{an object of class data.frame to apply missing values.}
\item{n_missing}{an object of class int specifying NA values in the returned data frame. Default is 10.}
\item{value_NA}{value to specified in the data frame. Default parameter is NA.}
}
\value{
data.frame object
}
\description{
\code{makeNA} takes a dataframe and converts n_missing random values into the default value of value_NA.
}
\examples{
# data("mtcars")
# makeNA(mtcars)
}
| /man/makeNA.Rd | permissive | phillip-wong/seeNA | R | false | true | 650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeNA.R
\name{makeNA}
\alias{makeNA}
\title{makeNA}
\usage{
makeNA(data, n_missing = 10, value_NA = NA)
}
\arguments{
\item{data}{an object of class data.frame to apply missing values.}
\item{n_missing}{an object of class int specifying NA values in the returned data frame. Default is 10.}
\item{value_NA}{value to specified in the data frame. Default parameter is NA.}
}
\value{
data.frame object
}
\description{
\code{makeNA} takes a dataframe and converts n_missing random values into the default value of value_NA.
}
\examples{
# data("mtcars")
# makeNA(mtcars)
}
|
library(ggplot2)
library(plotly)
library(RColorBrewer)
library(ggthemes)
library(maps)
library(RColorBrewer)
library(leaflet)
library(htmltools)
source("data_prep2.R")
max_count <- max(tidydistrict_counts$Count)
schools <- unique(tidydata$School)
groups <- unique(tidydata$Group)
types <- c("All", unique(tidydata$Type))
communities <- c("All", unique(tidydata$LearningCommunity))
min_year <- min(tidydata$Year)
max_year <- max(tidydata$Year)
ui <- fluidPage(
tags$head(
tags$style(
type = "text/css",
".irs-grid-text {font-size: 12pt !important; transform: translate(0px,40px);"
)),
fluidRow(
column(1),
column(10,
br(),
sliderInput("year", "Select year (or click play button)",
min=min_year, max=max_year, value=min_year,
animate = animationOptions(interval = 650,
loop = FALSE),
step = 1,
sep='',
width="100%"),
HTML('<br>')
),
column(1)
),
br(),
br(),
br(),
fluidRow(
tabsetPanel(
id = "tab_being_displayed", # will set input$tab_being_displayed
tabPanel("CMS District Overview",
sidebarLayout(
sidebarPanel(
h4("CMS District Diversity"),
p("\n The CMS school district appears to be relatively diverse.
For example, in the year 2020 no single group makes up more than 36% of the student
population in the district."),
p("\n But do the individual schools reflect that diversity? Explore the tabs in this app
to draw your own conclusions."),
hr(),
radioButtons("district_method", "What would you like to display?",
choices = c("Counts", "Percentages")),
hr(),
p("\n NOTE: At any point you can select a different year on the slider at the top of
the screen, or click the play button to cycle through all of the years."),
hr(),
img(src = "CMS_logo_blue.png", width="45%", height="45%", alt = "CMS Logo")
),
mainPanel(
fluidRow(plotOutput("district", height = "600px", width = "600px")),
fluidRow(
column(2),
column(10,
br(),
p('\n All race/ethnicity category names are presented here as defined by CMS.'),
p('(For example, "AMIN" in the CMS data refers to "American Indian")')
)
)
)
)
),
tabPanel("Map",
sidebarLayout(
sidebarPanel(
h4("Map of All CMS Schools"),
p("Select one or more race/ethnicity from the dropdown menu below"),
hr(),
selectInput("Race", "Race/Ethnicity (select one or more)", choices = groups,
selected = "Black",
multiple = TRUE),
p("NOTE: You can also delete selections from the dropdown menu above using the backspace key"),
hr(),
p("ADDITIONAL FILTERS:"),
selectInput("Type", "School Grade Levels (select one)", choices = types,
selected = "All"),
selectInput("Community", "Learning Communities (select one)", choices = communities,
selected = "All"),
# p("ADDITIONAL INSTRUCTIONS: "),
# HTML('<ul>
# <li></li>
# <li>Hover mouse over the center of a dot on the map to see the School and Percentage</li>
# </ul>'),
hr(),
img(src = "CMS_logo_blue.png", width="40%", height="40%", alt = "CMS Logo")
),
mainPanel(
fluidRow(
column(9,
HTML('<br>'),
textOutput("shading")
),
column(3)
),
fluidRow(leafletOutput("map", height = "650px", width = "800px")),
br(),
fluidRow(
column(1),
column(8,
p('("Pacific Islander" and "Two or More" did not exist as categories in the data until 2011.)')
),
column(3)
)
)
)
),
tabPanel("Individual Schools",
sidebarLayout(
sidebarPanel(
h4("Individual School Diversity"),
hr(),
selectInput("school", "Select the school", choices = schools),
checkboxInput("district_overlay", "Show district percentages", value=FALSE),
hr(),
p("NOTE: Some of these schools did not exist during all years."),
hr(),
img(src = "CMS_logo_blue.png", width="40%", height="40%", alt = "CMS Logo")
),
mainPanel(
plotOutput("barchart", height = "600px", width = "600px")
)
)
),
tabPanel("Schools Resembling District",
sidebarLayout(
sidebarPanel(
h4("Schools Similar to District Makeup"),
p("With respect to the three largest groups in the district (Black, White, and Hispanic),
do any individual schools have percentages similar to that of the district
as a whole?"),
hr(),
radioButtons("level", "Specificy Level of Resemblance:",
choices = c("Very Closely Resembles District (within 10%)",
"Closely Resembles District (within 20%)",
"Somewhat Resembles District (within 30%)")),
hr(),
h5("CMS District Breakdown"),
tableOutput("district_table"),
hr(),
img(src = "CMS_logo_blue.png", width="40%", height="40%", alt = "CMS Logo")
),
mainPanel(
fluidRow(
column(11,
hr(),
p("This is a table of all schools whose population percentage of Black, White,
and Hispanic students only differs from the districts percentage by the level you specified."),
em('\nFor example, if in a given year the district was 40% Black, 30% White, and 20% Hispanic,
then at a 10% ("Very Closely Resembles District") level the table will only contain schools that
are 36-44% Black and 27-33% White and 18-22% Hispanic. And the
20% ("Closely Resembles District") level would increase the width of those ranges.'),
hr(),
h4("LIST OF SCHOOLS")
),
column(1)
),
fluidRow(
column(11, DT::DTOutput("table")),
column(1)
)
)
)
),
tabPanel("About",
br(),
column(1),
column(8,
h5('This app was developed by Chase Romano, Kirk Mason, and Nityanand Kore.'),
p("It was the result of Ryan Wesslen's Visual Analytics course at the University of
North Carolina Charlotte through the Data Science and Business Analytics MS program."),
br(),
HTML('<a href="https://github.com/chasearomano/CMS-Diversity" style="color: #e36209">View Code on GitHub</a>')
),
column(3)
)
)
)
)
server <- function(input, output) {
selected_year <- reactive({ input$year })
# ------------------------------ CMS DISTRICT Bar Chart ------------------------------
data_district <- reactive({
if(input$district_method == "Percentages") {
tidydistrict_prcnts %>% filter(Year == selected_year())
} else if(input$district_method == "Counts") {
tidydistrict_counts %>% filter(Year == selected_year())
}
})
gg_district <- reactive({
if(input$district_method == "Percentages") {
gg <- tidydistrict_prcnts %>%
filter(Year == selected_year()) %>%
ggplot(aes(x = factor(Group, levels = rev(levels(Group))),
y = Percent_of_District / 100)) +
labs(title = "CMS School District Breakdown",
subtitle = "Percent of District's Student Population",
y='Percentage of All Students in the District', x ='') +
scale_y_continuous(limits = c(0, 1),
breaks = seq(0,1,by = .10),
labels = scales::percent_format(accuracy = 1))
} else if(input$district_method == "Counts") {
gg <- tidydistrict_counts %>%
filter(Year == selected_year()) %>%
ggplot(aes(x = factor(Group, levels = rev(levels(Group))),
y = Count)) +
labs(title = "CMS School District Breakdown",
subtitle = "Number of Students in the District",
y='Number of Students', x ='') +
scale_y_continuous(limits = c(0, max_count))
}
gg <- gg +
scale_color_brewer(palette="Dark2") +
scale_fill_brewer(palette="Dark2")
return(gg)
})
output$district <- renderPlot({
p <- gg_district() +
geom_bar(stat="identity",
aes(color = Group, fill = Group),
show.legend = FALSE) +
theme_bw() +
theme(title = element_text(size = 17),
axis.text.x = element_text(size = 15),
axis.text.y = element_text(size = 15, face = "bold")) +
coord_flip()
p
})
# ------------------------------------- MAP -------------------------------------
df_groups <- reactive({
tidydata %>% filter(Group %in% input$Race)
})
df_groups_year <- reactive({
if(input$Type == "All"){
df <- df_groups()
} else {
df <- df_groups() %>%
filter(Type==input$Type)
}
if(input$Community == "All") {
df <- df
} else {
df <- df %>%
filter(LearningCommunity==input$Community)
}
df <- df %>%
filter(Year == selected_year()) %>%
group_by(School, Year, lat, lon) %>%
summarise(Percentage = sum(Percentage)) %>%
ungroup()
return(df)
})
output$shading <- renderText({
text <- paste0("The shading of each point on the map is ",
"currently conveying the percent of students in each school in ",
selected_year(),
" who were _________.")
if(!is.null(input$Race)) {
text <- substr(text, 0, nchar(text)-10)
for (race in input$Race){
text <- paste0(text, race, " or ")
}
text <- substr(text, 0, nchar(text)-4)
text <- paste0(text, ".")
}
return(text)
})
# IMPROVED LEAFLET VERSION OF THE MAP...
mypal <- reactive({
colorNumeric(palette = c("navy", "lightcoral"), domain = df_groups_year()$Percentage, reverse = TRUE)
})
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
fitBounds(min(tidydata$lon), min(tidydata$lat), max(tidydata$lon), max(tidydata$lat))
})
observe({
req(input$tab_being_displayed == "Map") # Only display if tab is 'Map'
df <- df_groups_year()
labels <- sprintf(
"<strong>%s</strong><br/>%g%%",
df$School, df$Percentage
) %>% lapply(htmltools::HTML)
if(nrow(df) == 0){
leafletProxy("map", data = df) %>%
clearMarkers()
}
else{
leafletProxy("map", data = df) %>%
clearMarkers() %>%
clearPopups()%>%
removeControl("legend") %>%
addCircleMarkers(radius = 8,
weight = 1,
color = ~mypal()(df$Percentage),
stroke = FALSE,
fillOpacity = 0.95,
label =labels,
labelOptions = labelOptions(noHide = F, offset=c(0,-12))) %>%
addLegend("bottomleft",
title = "Percentage",
pal = mypal(),
values = df$Percentage,
layerId = "legend",
opacity = 0.90)
}
})
# OLD LEAFLET VERSION OF THE MAP...
# mypal <- reactive({
# colorNumeric(palette = c("navy", "lightcoral"), domain = df_groups_year()$Percentage, reverse = TRUE)
# })
#
# output$map <- renderLeaflet({
# leaflet() %>%
# addTiles() %>%
# fitBounds(min(tidydata$lon), min(tidydata$lat), max(tidydata$lon), max(tidydata$lat))
# })
#
# observe({
# df <- df_groups_year()
#
# if(nrow(df) == 0){
# leafletProxy("map", data = df) %>%
# clearMarkers()
# }
# else{
# leafletProxy("map", data = df) %>%
# clearMarkers() %>%
# removeControl("legend") %>%
# addCircleMarkers(radius = 8, weight = 2,
# color = ~mypal()(df$Percentage),stroke = FALSE, fillOpacity = 0.95
# ) %>%
# addLegend("bottomleft",title = "Percentage",pal = mypal(),
# values = df$Percentage,
# opacity = 1,
# layerId = "legend") %>%
# addLabelOnlyMarkers(data = df,
# lng = ~lon, lat = ~lat,label = (paste("School:",df$School," & Percentage:",df$Percentage)))#,
# #labelOptions = labelOptions(noHide = FALSE, direction = 'top', textOnly = TRUE))
# }
# })
# ORIGINAL VERSION OF THE MAP...
# output$map <- renderPlotly({
# df <- df_groups_year()
# max_per <- max(df_groups()$Percentage)
# min_per <- min(df_groups()$Percentage)
#
# #myPalette <- colorRampPalette(rev(brewer.pal(8, "Dark2")))
# sc <- scale_colour_gradient(low = "white", high = "forestgreen", limits=c(min_per, max_per))
#
# mecklenburg <- ggplot(data=df) +
# borders("county","North Carolina",xlim = c(-80.9, -80.8),
# ylim = c(35.2, 35.4), colour = "gray85", fill = "gray80") +
# theme_map()
#
#
# map <- mecklenburg +
# geom_point(aes(x = lon, y = lat, group= School,colour = Percentage),
# data = df,
# size = 2)+
# #scale_colour_gradient(low = "blue", high="white",limits=c(0,.9))+
# labs(title = "",
# x='', y ='') +
# sc +
# theme(title = element_text(size = 10))
#
# ggplotly(map, tooltip = c("School", "Percentage"))
#
# })
# ------------------------- INDIVIDUAL SCHOOLS Bar Chart --------------------------
df_school_year <- reactive({
tidydata %>% filter(School == input$school, Year == selected_year())
})
output$barchart <- renderPlot({
df <- df_school_year()
p <- ggplot(data = df,
aes(x = factor(Group, levels = rev(levels(Group))),
y = Percentage / 100)) +
geom_bar(stat="identity",
aes(color = Group, fill = Group),
show.legend = FALSE) +
labs(title = input$school,
subtitle = "Percent of School's Student Population",
x='', y ='') +
scale_y_continuous(limits = c(0, 1),
breaks = seq(0,1,by = .10),
labels = scales::percent_format(accuracy = 1)) +
theme_bw() +
theme(title = element_text(size = 17),
axis.text.x = element_text(size = 15),
axis.text.y = element_text(size = 15, face = "bold")) +
coord_flip()
if (input$district_overlay) {
p <- p +
geom_point(data = tidydistrict_prcnts %>%
filter(Year == selected_year()),
aes(x = factor(Group, levels = rev(levels(Group))),
y = Percent_of_District / 100),
color="gray",
fill="blue",
shape=25,
size =4,
stroke = 2,
alpha=.75)
}
if (nrow(df) == 0){
p <- p +
labs(caption = "School did not exist in this year.")
}
p <- p +
scale_color_brewer(palette="Dark2") +
scale_fill_brewer(palette="Dark2")
p
})
# ------------------------- SCHOOLS THAT MATCH DISTRICT --------------------------
# Sidebar Table containing district percentages
df_district <- reactive({
tidydistrict_prcnts %>%
filter(Year == selected_year()) %>%
mutate(`% of District` = Percent_of_District) %>%
select(Year, Group, `% of District`)
})
output$district_table <- renderTable({
df_district()
})
# Create a reactive dataframe that contains just the schools that mirror the district
# makeup to the degree specified in the radio button
upper_bound <- reactive({
if(input$level == "Very Closely Resembles District (within 10%)") {
upper_bound <- 1.10
} else if(input$level == "Closely Resembles District (within 20%)") {
upper_bound <- 1.20
} else {
upper_bound <- 1.30
}
})
lower_bound <- reactive({
if(input$level == "Very Closely Resembles District (within 10%)") {
lower_bound <- 0.90
} else if(input$level == "Closely Resembles District (within 20%)") {
lower_bound <- 0.80
} else {
lower_bound <- 0.70
}
})
data_matching_schools <- reactive({
data_district_added %>%
filter(Year == selected_year()) %>%
filter( (Hispanic_Prcnt <= upper_bound() * district_prcnt_Hispanic &
Hispanic_Prcnt >= lower_bound() * district_prcnt_Hispanic) &
(Black_Prcnt <= upper_bound() * district_prcnt_Black &
Black_Prcnt >= lower_bound() * district_prcnt_Black) &
(White_Prcnt <= upper_bound() * district_prcnt_White &
White_Prcnt >= lower_bound() * district_prcnt_White) ) %>%
mutate(`Black %` = Black_Prcnt,
`White %` = White_Prcnt,
`Hispanic %` = Hispanic_Prcnt,
`Asian %` = Asian_Prcnt,
`AMIN %` = AMIN_Prcnt,
`Pacific Islander %` = Pacific_Prcnt,
`Two or More %` = Mixed_Prcnt) %>%
select(Year,
School,
`Black %`,
`White %`,
`Hispanic %`,
`Asian %`,
`AMIN %`,
`Pacific Islander %`,
`Two or More %`
)
})
output$table <- DT::renderDT({
DT::datatable(data_matching_schools(),
options = list(pageLength = 25))
})
} # END SERVER FUNCTION
shinyApp(ui = ui, server = server)
| /CMS_Diversity/app.R | no_license | cromano8/CMS-Diversity | R | false | false | 19,418 | r | library(ggplot2)
library(plotly)
library(RColorBrewer)
library(ggthemes)
library(maps)
library(RColorBrewer)
library(leaflet)
library(htmltools)
source("data_prep2.R")
max_count <- max(tidydistrict_counts$Count)
schools <- unique(tidydata$School)
groups <- unique(tidydata$Group)
types <- c("All", unique(tidydata$Type))
communities <- c("All", unique(tidydata$LearningCommunity))
min_year <- min(tidydata$Year)
max_year <- max(tidydata$Year)
ui <- fluidPage(
tags$head(
tags$style(
type = "text/css",
".irs-grid-text {font-size: 12pt !important; transform: translate(0px,40px);"
)),
fluidRow(
column(1),
column(10,
br(),
sliderInput("year", "Select year (or click play button)",
min=min_year, max=max_year, value=min_year,
animate = animationOptions(interval = 650,
loop = FALSE),
step = 1,
sep='',
width="100%"),
HTML('<br>')
),
column(1)
),
br(),
br(),
br(),
fluidRow(
tabsetPanel(
id = "tab_being_displayed", # will set input$tab_being_displayed
tabPanel("CMS District Overview",
sidebarLayout(
sidebarPanel(
h4("CMS District Diversity"),
p("\n The CMS school district appears to be relatively diverse.
For example, in the year 2020 no single group makes up more than 36% of the student
population in the district."),
p("\n But do the individual schools reflect that diversity? Explore the tabs in this app
to draw your own conclusions."),
hr(),
radioButtons("district_method", "What would you like to display?",
choices = c("Counts", "Percentages")),
hr(),
p("\n NOTE: At any point you can select a different year on the slider at the top of
the screen, or click the play button to cycle through all of the years."),
hr(),
img(src = "CMS_logo_blue.png", width="45%", height="45%", alt = "CMS Logo")
),
mainPanel(
fluidRow(plotOutput("district", height = "600px", width = "600px")),
fluidRow(
column(2),
column(10,
br(),
p('\n All race/ethnicity category names are presented here as defined by CMS.'),
p('(For example, "AMIN" in the CMS data refers to "American Indian")')
)
)
)
)
),
tabPanel("Map",
sidebarLayout(
sidebarPanel(
h4("Map of All CMS Schools"),
p("Select one or more race/ethnicity from the dropdown menu below"),
hr(),
selectInput("Race", "Race/Ethnicity (select one or more)", choices = groups,
selected = "Black",
multiple = TRUE),
p("NOTE: You can also delete selections from the dropdown menu above using the backspace key"),
hr(),
p("ADDITIONAL FILTERS:"),
selectInput("Type", "School Grade Levels (select one)", choices = types,
selected = "All"),
selectInput("Community", "Learning Communities (select one)", choices = communities,
selected = "All"),
# p("ADDITIONAL INSTRUCTIONS: "),
# HTML('<ul>
# <li></li>
# <li>Hover mouse over the center of a dot on the map to see the School and Percentage</li>
# </ul>'),
hr(),
img(src = "CMS_logo_blue.png", width="40%", height="40%", alt = "CMS Logo")
),
mainPanel(
fluidRow(
column(9,
HTML('<br>'),
textOutput("shading")
),
column(3)
),
fluidRow(leafletOutput("map", height = "650px", width = "800px")),
br(),
fluidRow(
column(1),
column(8,
p('("Pacific Islander" and "Two or More" did not exist as categories in the data until 2011.)')
),
column(3)
)
)
)
),
tabPanel("Individual Schools",
sidebarLayout(
sidebarPanel(
h4("Individual School Diversity"),
hr(),
selectInput("school", "Select the school", choices = schools),
checkboxInput("district_overlay", "Show district percentages", value=FALSE),
hr(),
p("NOTE: Some of these schools did not exist during all years."),
hr(),
img(src = "CMS_logo_blue.png", width="40%", height="40%", alt = "CMS Logo")
),
mainPanel(
plotOutput("barchart", height = "600px", width = "600px")
)
)
),
tabPanel("Schools Resembling District",
sidebarLayout(
sidebarPanel(
h4("Schools Similar to District Makeup"),
p("With respect to the three largest groups in the district (Black, White, and Hispanic),
do any individual schools have percentages similar to that of the district
as a whole?"),
hr(),
radioButtons("level", "Specificy Level of Resemblance:",
choices = c("Very Closely Resembles District (within 10%)",
"Closely Resembles District (within 20%)",
"Somewhat Resembles District (within 30%)")),
hr(),
h5("CMS District Breakdown"),
tableOutput("district_table"),
hr(),
img(src = "CMS_logo_blue.png", width="40%", height="40%", alt = "CMS Logo")
),
mainPanel(
fluidRow(
column(11,
hr(),
p("This is a table of all schools whose population percentage of Black, White,
and Hispanic students only differs from the districts percentage by the level you specified."),
em('\nFor example, if in a given year the district was 40% Black, 30% White, and 20% Hispanic,
then at a 10% ("Very Closely Resembles District") level the table will only contain schools that
are 36-44% Black and 27-33% White and 18-22% Hispanic. And the
20% ("Closely Resembles District") level would increase the width of those ranges.'),
hr(),
h4("LIST OF SCHOOLS")
),
column(1)
),
fluidRow(
column(11, DT::DTOutput("table")),
column(1)
)
)
)
),
tabPanel("About",
br(),
column(1),
column(8,
h5('This app was developed by Chase Romano, Kirk Mason, and Nityanand Kore.'),
p("It was the result of Ryan Wesslen's Visual Analytics course at the University of
North Carolina Charlotte through the Data Science and Business Analytics MS program."),
br(),
HTML('<a href="https://github.com/chasearomano/CMS-Diversity" style="color: #e36209">View Code on GitHub</a>')
),
column(3)
)
)
)
)
server <- function(input, output) {
selected_year <- reactive({ input$year })
# ------------------------------ CMS DISTRICT Bar Chart ------------------------------
data_district <- reactive({
if(input$district_method == "Percentages") {
tidydistrict_prcnts %>% filter(Year == selected_year())
} else if(input$district_method == "Counts") {
tidydistrict_counts %>% filter(Year == selected_year())
}
})
gg_district <- reactive({
if(input$district_method == "Percentages") {
gg <- tidydistrict_prcnts %>%
filter(Year == selected_year()) %>%
ggplot(aes(x = factor(Group, levels = rev(levels(Group))),
y = Percent_of_District / 100)) +
labs(title = "CMS School District Breakdown",
subtitle = "Percent of District's Student Population",
y='Percentage of All Students in the District', x ='') +
scale_y_continuous(limits = c(0, 1),
breaks = seq(0,1,by = .10),
labels = scales::percent_format(accuracy = 1))
} else if(input$district_method == "Counts") {
gg <- tidydistrict_counts %>%
filter(Year == selected_year()) %>%
ggplot(aes(x = factor(Group, levels = rev(levels(Group))),
y = Count)) +
labs(title = "CMS School District Breakdown",
subtitle = "Number of Students in the District",
y='Number of Students', x ='') +
scale_y_continuous(limits = c(0, max_count))
}
gg <- gg +
scale_color_brewer(palette="Dark2") +
scale_fill_brewer(palette="Dark2")
return(gg)
})
output$district <- renderPlot({
p <- gg_district() +
geom_bar(stat="identity",
aes(color = Group, fill = Group),
show.legend = FALSE) +
theme_bw() +
theme(title = element_text(size = 17),
axis.text.x = element_text(size = 15),
axis.text.y = element_text(size = 15, face = "bold")) +
coord_flip()
p
})
# ------------------------------------- MAP -------------------------------------
df_groups <- reactive({
tidydata %>% filter(Group %in% input$Race)
})
df_groups_year <- reactive({
if(input$Type == "All"){
df <- df_groups()
} else {
df <- df_groups() %>%
filter(Type==input$Type)
}
if(input$Community == "All") {
df <- df
} else {
df <- df %>%
filter(LearningCommunity==input$Community)
}
df <- df %>%
filter(Year == selected_year()) %>%
group_by(School, Year, lat, lon) %>%
summarise(Percentage = sum(Percentage)) %>%
ungroup()
return(df)
})
output$shading <- renderText({
text <- paste0("The shading of each point on the map is ",
"currently conveying the percent of students in each school in ",
selected_year(),
" who were _________.")
if(!is.null(input$Race)) {
text <- substr(text, 0, nchar(text)-10)
for (race in input$Race){
text <- paste0(text, race, " or ")
}
text <- substr(text, 0, nchar(text)-4)
text <- paste0(text, ".")
}
return(text)
})
# IMPROVED LEAFLET VERSION OF THE MAP...
mypal <- reactive({
colorNumeric(palette = c("navy", "lightcoral"), domain = df_groups_year()$Percentage, reverse = TRUE)
})
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>%
fitBounds(min(tidydata$lon), min(tidydata$lat), max(tidydata$lon), max(tidydata$lat))
})
observe({
req(input$tab_being_displayed == "Map") # Only display if tab is 'Map'
df <- df_groups_year()
labels <- sprintf(
"<strong>%s</strong><br/>%g%%",
df$School, df$Percentage
) %>% lapply(htmltools::HTML)
if(nrow(df) == 0){
leafletProxy("map", data = df) %>%
clearMarkers()
}
else{
leafletProxy("map", data = df) %>%
clearMarkers() %>%
clearPopups()%>%
removeControl("legend") %>%
addCircleMarkers(radius = 8,
weight = 1,
color = ~mypal()(df$Percentage),
stroke = FALSE,
fillOpacity = 0.95,
label =labels,
labelOptions = labelOptions(noHide = F, offset=c(0,-12))) %>%
addLegend("bottomleft",
title = "Percentage",
pal = mypal(),
values = df$Percentage,
layerId = "legend",
opacity = 0.90)
}
})
# OLD LEAFLET VERSION OF THE MAP...
# mypal <- reactive({
# colorNumeric(palette = c("navy", "lightcoral"), domain = df_groups_year()$Percentage, reverse = TRUE)
# })
#
# output$map <- renderLeaflet({
# leaflet() %>%
# addTiles() %>%
# fitBounds(min(tidydata$lon), min(tidydata$lat), max(tidydata$lon), max(tidydata$lat))
# })
#
# observe({
# df <- df_groups_year()
#
# if(nrow(df) == 0){
# leafletProxy("map", data = df) %>%
# clearMarkers()
# }
# else{
# leafletProxy("map", data = df) %>%
# clearMarkers() %>%
# removeControl("legend") %>%
# addCircleMarkers(radius = 8, weight = 2,
# color = ~mypal()(df$Percentage),stroke = FALSE, fillOpacity = 0.95
# ) %>%
# addLegend("bottomleft",title = "Percentage",pal = mypal(),
# values = df$Percentage,
# opacity = 1,
# layerId = "legend") %>%
# addLabelOnlyMarkers(data = df,
# lng = ~lon, lat = ~lat,label = (paste("School:",df$School," & Percentage:",df$Percentage)))#,
# #labelOptions = labelOptions(noHide = FALSE, direction = 'top', textOnly = TRUE))
# }
# })
# ORIGINAL VERSION OF THE MAP...
# output$map <- renderPlotly({
# df <- df_groups_year()
# max_per <- max(df_groups()$Percentage)
# min_per <- min(df_groups()$Percentage)
#
# #myPalette <- colorRampPalette(rev(brewer.pal(8, "Dark2")))
# sc <- scale_colour_gradient(low = "white", high = "forestgreen", limits=c(min_per, max_per))
#
# mecklenburg <- ggplot(data=df) +
# borders("county","North Carolina",xlim = c(-80.9, -80.8),
# ylim = c(35.2, 35.4), colour = "gray85", fill = "gray80") +
# theme_map()
#
#
# map <- mecklenburg +
# geom_point(aes(x = lon, y = lat, group= School,colour = Percentage),
# data = df,
# size = 2)+
# #scale_colour_gradient(low = "blue", high="white",limits=c(0,.9))+
# labs(title = "",
# x='', y ='') +
# sc +
# theme(title = element_text(size = 10))
#
# ggplotly(map, tooltip = c("School", "Percentage"))
#
# })
# ------------------------- INDIVIDUAL SCHOOLS Bar Chart --------------------------
df_school_year <- reactive({
tidydata %>% filter(School == input$school, Year == selected_year())
})
output$barchart <- renderPlot({
df <- df_school_year()
p <- ggplot(data = df,
aes(x = factor(Group, levels = rev(levels(Group))),
y = Percentage / 100)) +
geom_bar(stat="identity",
aes(color = Group, fill = Group),
show.legend = FALSE) +
labs(title = input$school,
subtitle = "Percent of School's Student Population",
x='', y ='') +
scale_y_continuous(limits = c(0, 1),
breaks = seq(0,1,by = .10),
labels = scales::percent_format(accuracy = 1)) +
theme_bw() +
theme(title = element_text(size = 17),
axis.text.x = element_text(size = 15),
axis.text.y = element_text(size = 15, face = "bold")) +
coord_flip()
if (input$district_overlay) {
p <- p +
geom_point(data = tidydistrict_prcnts %>%
filter(Year == selected_year()),
aes(x = factor(Group, levels = rev(levels(Group))),
y = Percent_of_District / 100),
color="gray",
fill="blue",
shape=25,
size =4,
stroke = 2,
alpha=.75)
}
if (nrow(df) == 0){
p <- p +
labs(caption = "School did not exist in this year.")
}
p <- p +
scale_color_brewer(palette="Dark2") +
scale_fill_brewer(palette="Dark2")
p
})
# ------------------------- SCHOOLS THAT MATCH DISTRICT --------------------------
# Sidebar Table containing district percentages
df_district <- reactive({
tidydistrict_prcnts %>%
filter(Year == selected_year()) %>%
mutate(`% of District` = Percent_of_District) %>%
select(Year, Group, `% of District`)
})
output$district_table <- renderTable({
df_district()
})
# Create a reactive dataframe that contains just the schools that mirror the district
# makeup to the degree specified in the radio button
upper_bound <- reactive({
if(input$level == "Very Closely Resembles District (within 10%)") {
upper_bound <- 1.10
} else if(input$level == "Closely Resembles District (within 20%)") {
upper_bound <- 1.20
} else {
upper_bound <- 1.30
}
})
lower_bound <- reactive({
if(input$level == "Very Closely Resembles District (within 10%)") {
lower_bound <- 0.90
} else if(input$level == "Closely Resembles District (within 20%)") {
lower_bound <- 0.80
} else {
lower_bound <- 0.70
}
})
data_matching_schools <- reactive({
data_district_added %>%
filter(Year == selected_year()) %>%
filter( (Hispanic_Prcnt <= upper_bound() * district_prcnt_Hispanic &
Hispanic_Prcnt >= lower_bound() * district_prcnt_Hispanic) &
(Black_Prcnt <= upper_bound() * district_prcnt_Black &
Black_Prcnt >= lower_bound() * district_prcnt_Black) &
(White_Prcnt <= upper_bound() * district_prcnt_White &
White_Prcnt >= lower_bound() * district_prcnt_White) ) %>%
mutate(`Black %` = Black_Prcnt,
`White %` = White_Prcnt,
`Hispanic %` = Hispanic_Prcnt,
`Asian %` = Asian_Prcnt,
`AMIN %` = AMIN_Prcnt,
`Pacific Islander %` = Pacific_Prcnt,
`Two or More %` = Mixed_Prcnt) %>%
select(Year,
School,
`Black %`,
`White %`,
`Hispanic %`,
`Asian %`,
`AMIN %`,
`Pacific Islander %`,
`Two or More %`
)
})
output$table <- DT::renderDT({
DT::datatable(data_matching_schools(),
options = list(pageLength = 25))
})
} # END SERVER FUNCTION
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build.R
\name{build_create_image}
\alias{build_create_image}
\title{Building create image request}
\usage{
build_create_image(url = NULL, pageElementProperty = NULL,
objectId = NULL, requests_list = NULL)
}
\arguments{
\item{url}{A character vector container a list of image urls that is to be used to add image to
the slides}
\item{pageElementProperty}{A list that contains a page element property. The page element is to be
generated by the common_page_element_property function in this package.}
\item{objectId}{(Optional) A character vector to name the object created instead of leaving it to Google}
}
\description{
Building create image request
}
| /man/build_create_image.Rd | no_license | nazroll/googleslides | R | false | true | 736 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build.R
\name{build_create_image}
\alias{build_create_image}
\title{Building create image request}
\usage{
build_create_image(url = NULL, pageElementProperty = NULL,
objectId = NULL, requests_list = NULL)
}
\arguments{
\item{url}{A character vector container a list of image urls that is to be used to add image to
the slides}
\item{pageElementProperty}{A list that contains a page element property. The page element is to be
generated by the common_page_element_property function in this package.}
\item{objectId}{(Optional) A character vector to name the object created instead of leaving it to Google}
}
\description{
Building create image request
}
|
regressionData<-read.table("/Users/Anesthesia/Desktop/GUIDE628/roundData/RegData.txt", header=TRUE)
#1
library(rpart)
# 我们可以使用help(rpart)来获取rpart的使用帮助,帮助文档Usage如下
# rpart(formula, data, weights, subset, na.action = na.rpart, method,
# model = FALSE, x = FALSE, y = TRUE, parms, control, cost, ...)
fit <- rpart(regressionData$stars~.,data=regressionData,method="class",parms=list(prior=c(.2,.2,.2,.2,.2)),control=rpart.control(xval=5))
pdf(file="/Users/Anesthesia/Desktop/GUIDE628/rpart/rpart.pdf")
#plot(fit,uniform=TRUE,main="Classification Tree") #画决策树图(数据量少,上面minsplit也小)
#text(fit,use.n=TRUE,all=TRUE)
#rpart.control对树进行一些设置
#minsplit是最小分支节点数,这里指大于等于20,那么该节点会继续分划下去,否则停止
#minbucket:树中叶节点包含的最小样本数
#maxdepth:决策树最大深度
#xval:交叉验证的次数
#cp全称为complexity parameter,指某个点的复杂度,对每一步拆分,模型的拟合优度必须提高的程度
library(rpart.plot)
rpart.plot(fit,type=1,main="classification tree by rpart")
# rpart.plot(fit, branch=1, branch.type=2, type=1, extra=102,
# shadow.col="gray", box.col="green",
# border.col="blue", split.col="red",
# split.cex=1.2, main="classification tree by rpart")
dev.off()
## rpart包提供了复杂度损失修剪的修剪方法,printcp会告诉分裂到每一层,cp是多少,平均相对误差是多少
## 交叉验证的估计误差(“xerror”列),以及标准误差(“xstd”列),平均相对误差=xerror±xstd
printcp(fit)
## 通过上面的分析来确定cp的值
## 我们可以用下面的办法选择具有最小xerror的cp的办法:
fit_prune <- prune(fit, cp=fit$cptable[which.min(fit$cptable[,"xerror"]),"CP"])
pdf(file="/Users/Anesthesia/Desktop/GUIDE628/rpart/rpart_prune.pdf")
library(rpart.plot)
rpart.plot(fit_prune,type=1,main="classification tree by pruning rpart")
# rpart.plot(fit, branch=1, branch.type=2, type=1, extra=102,
# shadow.col="gray", box.col="green",
# border.col="blue", split.col="red",
# split.cex=1.2, main="classification tree by rpart")
dev.off()
printcp(fit_prune)
#misclassification error = root node error * xerror * 100%
data_model_pred <- predict(fit_prune,regressionData[,-72],type="class")
source("/Users/Anesthesia/Desktop/GUIDE628/rpart/count_result.R")
count_result(data_model_pred,regressionData$stars)
| /codes/Goal1/attribute_analysis/rpart.R | no_license | junxiazhujoy/STAT628_Module2 | R | false | false | 2,530 | r | regressionData<-read.table("/Users/Anesthesia/Desktop/GUIDE628/roundData/RegData.txt", header=TRUE)
#1
library(rpart)
# 我们可以使用help(rpart)来获取rpart的使用帮助,帮助文档Usage如下
# rpart(formula, data, weights, subset, na.action = na.rpart, method,
# model = FALSE, x = FALSE, y = TRUE, parms, control, cost, ...)
fit <- rpart(regressionData$stars~.,data=regressionData,method="class",parms=list(prior=c(.2,.2,.2,.2,.2)),control=rpart.control(xval=5))
pdf(file="/Users/Anesthesia/Desktop/GUIDE628/rpart/rpart.pdf")
#plot(fit,uniform=TRUE,main="Classification Tree") #画决策树图(数据量少,上面minsplit也小)
#text(fit,use.n=TRUE,all=TRUE)
#rpart.control对树进行一些设置
#minsplit是最小分支节点数,这里指大于等于20,那么该节点会继续分划下去,否则停止
#minbucket:树中叶节点包含的最小样本数
#maxdepth:决策树最大深度
#xval:交叉验证的次数
#cp全称为complexity parameter,指某个点的复杂度,对每一步拆分,模型的拟合优度必须提高的程度
library(rpart.plot)
rpart.plot(fit,type=1,main="classification tree by rpart")
# rpart.plot(fit, branch=1, branch.type=2, type=1, extra=102,
# shadow.col="gray", box.col="green",
# border.col="blue", split.col="red",
# split.cex=1.2, main="classification tree by rpart")
dev.off()
## rpart包提供了复杂度损失修剪的修剪方法,printcp会告诉分裂到每一层,cp是多少,平均相对误差是多少
## 交叉验证的估计误差(“xerror”列),以及标准误差(“xstd”列),平均相对误差=xerror±xstd
printcp(fit)
## 通过上面的分析来确定cp的值
## 我们可以用下面的办法选择具有最小xerror的cp的办法:
fit_prune <- prune(fit, cp=fit$cptable[which.min(fit$cptable[,"xerror"]),"CP"])
pdf(file="/Users/Anesthesia/Desktop/GUIDE628/rpart/rpart_prune.pdf")
library(rpart.plot)
rpart.plot(fit_prune,type=1,main="classification tree by pruning rpart")
# rpart.plot(fit, branch=1, branch.type=2, type=1, extra=102,
# shadow.col="gray", box.col="green",
# border.col="blue", split.col="red",
# split.cex=1.2, main="classification tree by rpart")
dev.off()
printcp(fit_prune)
#misclassification error = root node error * xerror * 100%
data_model_pred <- predict(fit_prune,regressionData[,-72],type="class")
source("/Users/Anesthesia/Desktop/GUIDE628/rpart/count_result.R")
count_result(data_model_pred,regressionData$stars)
|
## Merge the test ans training datasets
# 1. Load the librart
library(dplyr)
# 2. work directory
if(!dir.exists("./data")) dir.create("./data")
setwd("./data/")
# 3. Download the data
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destFile <- "UCI HAR Dataset.zip"
download.file(fileUrl, destFile)
# 4. Verify an decompress file
if(file.exists(destFile))
unzip(destFile)
# 5. Read the files
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("rownumber", "variablename"))
# a. make variables tidy
features <- mutate(features, variablename = gsub("BodyBody", "Body", variablename))
features <- mutate(features, variablename = tolower(variablename))
features <- mutate(features, variablename = gsub("-", "", variablename))
features <- mutate(features, variablename = gsub("\\(", "", variablename))
features <- mutate(features, variablename = gsub("\\)", "", variablename))
features <- mutate(features, variablename = gsub(",", "", variablename))
activitieslabels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("activity", "description"))
# b. make activities tidy
activitieslabels <- mutate(activitieslabels, description = gsub("_", "", description))
activitieslabels <- mutate(activitieslabels,description = tolower(description))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
# 6. Merges the data
testData <- rbind(x_train, x_test)
trainData <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
mergetesttrain <- cbind(Subject, trainData, testData)
# 7. Filter by mean and std
filtermergetesttrain <- mergetesttrain %>% select(subject, code, contains("mean"), contains("std"))
filtermergetesttrain$code <- activitieslabels[filtermergetesttrain$code, 2]
# 8. Average of activity and subject
data <- filtermergetesttrain %>% group_by(subject, filtermergetesttrain$code) %>% summarise_all(funs(mean))
# 9. Write the file#filter by the mean and standard deviation
write.table(data, "Average_by_activityandsubject.txt", row.name=FALSE)
| /run_Analysis.R | no_license | gtibana/DataCleaning_project | R | false | false | 2,589 | r | ## Merge the test ans training datasets
# 1. Load the librart
library(dplyr)
# 2. work directory
if(!dir.exists("./data")) dir.create("./data")
setwd("./data/")
# 3. Download the data
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destFile <- "UCI HAR Dataset.zip"
download.file(fileUrl, destFile)
# 4. Verify an decompress file
if(file.exists(destFile))
unzip(destFile)
# 5. Read the files
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("rownumber", "variablename"))
# a. make variables tidy
features <- mutate(features, variablename = gsub("BodyBody", "Body", variablename))
features <- mutate(features, variablename = tolower(variablename))
features <- mutate(features, variablename = gsub("-", "", variablename))
features <- mutate(features, variablename = gsub("\\(", "", variablename))
features <- mutate(features, variablename = gsub("\\)", "", variablename))
features <- mutate(features, variablename = gsub(",", "", variablename))
activitieslabels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("activity", "description"))
# b. make activities tidy
activitieslabels <- mutate(activitieslabels, description = gsub("_", "", description))
activitieslabels <- mutate(activitieslabels,description = tolower(description))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
# 6. Merges the data
testData <- rbind(x_train, x_test)
trainData <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
mergetesttrain <- cbind(Subject, trainData, testData)
# 7. Filter by mean and std
filtermergetesttrain <- mergetesttrain %>% select(subject, code, contains("mean"), contains("std"))
filtermergetesttrain$code <- activitieslabels[filtermergetesttrain$code, 2]
# 8. Average of activity and subject
data <- filtermergetesttrain %>% group_by(subject, filtermergetesttrain$code) %>% summarise_all(funs(mean))
# 9. Write the file#filter by the mean and standard deviation
write.table(data, "Average_by_activityandsubject.txt", row.name=FALSE)
|
#' tsf_paired
#'
#' OTU transformation for paired data. Computes within-subject change (in presence
#' for qualitative metrics and abundance for quantitative metrics) between time
#' points for each taxon.
#'
#' @param otus Matrix of OTU counts or proportions. Notes: (1) Will be transformed to
#' proportions if it's not already; (2) Row names must be sample identifiers
#' (matching metadata), and column names must be OTU identifiers (enforced if
#' using UniFrac distances).
#' @param metadata Data frame with three columns: subject identifiers (n unique values, column name "subjID"),
#' sample identifiers (must match row names of otu.tab, column name "sampID"),
#' and time point or group identifier (must have two unique values for paired transformation).
#' @importFrom stats aggregate
#'
#' @return List with the following elements. Both data matrices have subject identifiers
#' as row names and OTU identifiers as column names.
#' \item{dat.binary}{n x p matrix of data after paired, binary/qualitative transformation}
#' \item{dat.quant}{n x p matrix of data after paired, quantitative transformation}
#' \item{avg.prop}{n x p matrix with overall average proportion of each taxon}
#'
#' @export
#'
tsf_paired <- function(otus, metadata) {
## Prepare output data frame
n <- length(unique(metadata$subjID))
out.data <- matrix(0, nrow = n, ncol = ncol(otus))
rownames(out.data) <- unique(metadata$subjID)
colnames(out.data) <- colnames(otus)
## Main function
out.binary = out.quant = out.avgprop = out.data
for (i in 1:nrow(out.data)) {
t1.idx <- which(metadata$subjID == rownames(out.data)[i] & metadata$time == 1)
t2.idx <- which(metadata$subjID == rownames(out.data)[i] & metadata$time == 2)
out.binary[i, ] <- 0.5 * (as.numeric(otus[t2.idx,] > 0) - as.numeric(otus[t1.idx,] > 0))
nonz <- which(otus[t2.idx,] != 0 | otus[t1.idx,] != 0)
out.quant[i, nonz] <- 0.5 * (otus[t2.idx, nonz] - otus[t1.idx, nonz]) / (otus[t2.idx, nonz] + otus[t1.idx, nonz])
out.avgprop[i, ] <- 0.5 * (otus[t2.idx, ] + otus[t1.idx, ])
}
return(list(dat.binary = out.binary, dat.quant = out.quant, avg.prop = out.avgprop))
}
#' tsf_long
#'
#' OTU transformation for longitudinal data. Computes average within-subject change
#' (in presence for qualitative metrics, abundance for quantitative metrics)
#' during one unit of time for each taxon.
#'
#' @param otus Matrix of OTU counts or proportions. Notes: (1) Will be transformed to
#' proportions if it's not already; (2) Row names must be sample identifiers
#' (matching metadata), and column names must be OTU identifiers (enforced if
#' using UniFrac distances).
#' @param metadata Data frame with three columns: subject identifiers (n unique values, column name "subjID"),
#' sample identifiers (must match row names of otu.tab, column name "sampID"),
#' and time point or group identifier (if using longitudinal distances, this must be numeric or
#' convertable to numeric).
#'
#' @return List with the following elements. Both data matrices have subject identifiers
#' as row names and OTU identifiers as column names.
#' \item{dat.binary}{n x p matrix of data after longitudinal, binary/qualitative transformation}
#' \item{dat.quant}{n x p matrix of data after longitudinal, quantitative transformation}
#' \item{avg.prop}{n x p matrix with overall average proportion of each taxon}
#'
#' @export
#'
tsf_long <- function(otus, metadata) {
## Prepare output data frame
n <- length(unique(metadata$subjID))
out.data <- matrix(0, nrow = n, ncol = ncol(otus))
rownames(out.data) <- unique(metadata$subjID)
colnames(out.data) <- colnames(otus)
## Main function
out.binary = out.quant = out.data
out.avgprop = out.data
for (i in 1:nrow(out.data)) {
## Prep subject
subj.idx <- which(metadata$subjID == rownames(out.data)[i])
subj.otu <- otus[subj.idx, ]
subj.times <- metadata$time[subj.idx]
ord <- order(metadata$time[subj.idx])
subj.otu <- subj.otu[ord, ]
subj.times <- subj.times[ord]
qi <- nrow(subj.otu)
## Calculate both
dk.uw <- rep(0, ncol(otus))
dk.g <- rep(0, ncol(otus))
cumprop <- subj.otu[1,]
for (j in 1:(qi-1)) {
dk.uw = dk.uw + (1/(subj.times[j+1] - subj.times[j])) * abs(as.numeric(subj.otu[(j+1), ] > 0) - as.numeric(subj.otu[j, ] > 0))
nonz <- which(subj.otu[(j+1), ] != 0 | subj.otu[j, ] != 0)
dk.g[nonz] = dk.g[nonz] + (1/(subj.times[j+1] - subj.times[j])) *
abs((subj.otu[(j+1), nonz] - subj.otu[j, nonz])/(subj.otu[(j+1), nonz] + subj.otu[j, nonz]))
cumprop = cumprop + subj.otu[(j+1), ]
}
dk.uw = dk.uw/(qi - 1)
dk.g = dk.g/(qi - 1)
cumprop = cumprop/qi
## Fill row
out.binary[i, ] <- dk.uw
out.quant[i, ] <- dk.g
out.avgprop[i, ] <- cumprop
}
return(list(dat.binary = out.binary, dat.quant = out.quant, avg.prop = out.avgprop))
}
#' counts2props
#'
#' Converts OTU counts to OTU proportions/relative abundances.
#'
#' @param x Matrix of OTU counts (rows are subjects, columns are taxa).
#'
#' @return n x p matrix of OTU proportions.
#'
#' @export
#'
counts2props <- function(x) {
return(t(apply(x, 1, FUN = function(y) y/sum(y))))
}
#' pltransform
#'
#' OTU transformation for longitudinal data. Computes average within-subject change
#' (in presence for qualitative metrics, abundance for quantitative metrics)
#' during one unit of time for each taxon.
#'
#' @param otus Matrix of OTU counts or proportions. Notes: (1) Will be transformed to
#' proportions if it's not already; (2) Row names must be sample identifiers
#' (matching metadata), and column names must be OTU identifiers (enforced if
#' using UniFrac distances).
#' @param metadata Data frame with three columns: subject identifiers (n unique values, column name "subjID"),
#' sample identifiers (must match row names of otu.tab, column name "sampID"),
#' and time point or group identifier (if using longitudinal distances, this must be numeric or
#' convertable to numeric).
#' @param paired Logical indicating whether to use the paired version of the metric (TRUE) or the
#' longitudinal version (FALSE). Paired analyis is only possible when there are exactly 2
#' unique time points/identifiers for each subject or pair.
#' @param check.input Logical indicating whether to check input values (default TRUE).
#'
#' @return List with the following elements. Both data matrices have subject identifiers
#' as row names and OTU identifiers as column names.
#' \item{dat.binary}{n x p matrix of data after longitudinal, binary/qualitative transformation}
#' \item{dat.quant}{n x p matrix of data after longitudinal, quantitative transformation}
#' \item{avg.prop}{n x p matrix with overall average proportion of each taxon}
#' \item{type}{Type of transformation that was used (paired, balanced longitudinal,
#' unbalanced longitudinal) with a warning if unbalanced longitudinal.}
#' @examples
#' data("paired.otus")
#' data("paired.meta")
#' # paired transformation
#' res1 <- pltransform(paired.otus, paired.meta, paired = TRUE, check.input = TRUE)
#' # longitudinal transformation
#' res2 <- pltransform(paired.otus, paired.meta, paired = FALSE, check.input = TRUE)
#'
#' @export
#'
pltransform <- function(otus, metadata, paired, check.input = TRUE) {
if (check.input) {
okdat <- check_input(otus, metadata, paired)
otus <- okdat$otus
metadata <- okdat$metadata
remove(okdat)
}
## calculate appropriate transformations
if (paired) {
res <- tsf_paired(otus, metadata)
} else {
res <- tsf_long(otus, metadata)
if (length(unique(table(metadata$time))) != 1) { balanced = FALSE } else { balanced = TRUE }
}
if (paired) { type = "paired"
} else if (balanced) { type = "balanced longitudinal"
} else {
type = "unbalanced longitudinal (WARNING: this transformation is not recommended for strongly unbalanced designs!)"
warning("WARNING: this transformation is not recommended for strongly unbalanced designs!")
}
## return
return(list(dat.binary = res$dat.binary, dat.quant = res$dat.quant, avg.prop = res$avg.prop, type = type))
}
| /pldist.Rcheck/00_pkg_src/pldist/R/pltransform.R | no_license | aplantin/pldist_master | R | false | false | 8,446 | r | #' tsf_paired
#'
#' OTU transformation for paired data. Computes within-subject change (in presence
#' for qualitative metrics and abundance for quantitative metrics) between time
#' points for each taxon.
#'
#' @param otus Matrix of OTU counts or proportions. Notes: (1) Will be transformed to
#' proportions if it's not already; (2) Row names must be sample identifiers
#' (matching metadata), and column names must be OTU identifiers (enforced if
#' using UniFrac distances).
#' @param metadata Data frame with three columns: subject identifiers (n unique values, column name "subjID"),
#' sample identifiers (must match row names of otu.tab, column name "sampID"),
#' and time point or group identifier (must have two unique values for paired transformation).
#' @importFrom stats aggregate
#'
#' @return List with the following elements. Both data matrices have subject identifiers
#' as row names and OTU identifiers as column names.
#' \item{dat.binary}{n x p matrix of data after paired, binary/qualitative transformation}
#' \item{dat.quant}{n x p matrix of data after paired, quantitative transformation}
#' \item{avg.prop}{n x p matrix with overall average proportion of each taxon}
#'
#' @export
#'
tsf_paired <- function(otus, metadata) {
## Prepare output data frame
n <- length(unique(metadata$subjID))
out.data <- matrix(0, nrow = n, ncol = ncol(otus))
rownames(out.data) <- unique(metadata$subjID)
colnames(out.data) <- colnames(otus)
## Main function
out.binary = out.quant = out.avgprop = out.data
for (i in 1:nrow(out.data)) {
t1.idx <- which(metadata$subjID == rownames(out.data)[i] & metadata$time == 1)
t2.idx <- which(metadata$subjID == rownames(out.data)[i] & metadata$time == 2)
out.binary[i, ] <- 0.5 * (as.numeric(otus[t2.idx,] > 0) - as.numeric(otus[t1.idx,] > 0))
nonz <- which(otus[t2.idx,] != 0 | otus[t1.idx,] != 0)
out.quant[i, nonz] <- 0.5 * (otus[t2.idx, nonz] - otus[t1.idx, nonz]) / (otus[t2.idx, nonz] + otus[t1.idx, nonz])
out.avgprop[i, ] <- 0.5 * (otus[t2.idx, ] + otus[t1.idx, ])
}
return(list(dat.binary = out.binary, dat.quant = out.quant, avg.prop = out.avgprop))
}
#' tsf_long
#'
#' OTU transformation for longitudinal data. Computes average within-subject change
#' (in presence for qualitative metrics, abundance for quantitative metrics)
#' during one unit of time for each taxon.
#'
#' @param otus Matrix of OTU counts or proportions. Notes: (1) Will be transformed to
#' proportions if it's not already; (2) Row names must be sample identifiers
#' (matching metadata), and column names must be OTU identifiers (enforced if
#' using UniFrac distances).
#' @param metadata Data frame with three columns: subject identifiers (n unique values, column name "subjID"),
#' sample identifiers (must match row names of otu.tab, column name "sampID"),
#' and time point or group identifier (if using longitudinal distances, this must be numeric or
#' convertable to numeric).
#'
#' @return List with the following elements. Both data matrices have subject identifiers
#' as row names and OTU identifiers as column names.
#' \item{dat.binary}{n x p matrix of data after longitudinal, binary/qualitative transformation}
#' \item{dat.quant}{n x p matrix of data after longitudinal, quantitative transformation}
#' \item{avg.prop}{n x p matrix with overall average proportion of each taxon}
#'
#' @export
#'
tsf_long <- function(otus, metadata) {
## Prepare output data frame
n <- length(unique(metadata$subjID))
out.data <- matrix(0, nrow = n, ncol = ncol(otus))
rownames(out.data) <- unique(metadata$subjID)
colnames(out.data) <- colnames(otus)
## Main function
out.binary = out.quant = out.data
out.avgprop = out.data
for (i in 1:nrow(out.data)) {
## Prep subject
subj.idx <- which(metadata$subjID == rownames(out.data)[i])
subj.otu <- otus[subj.idx, ]
subj.times <- metadata$time[subj.idx]
ord <- order(metadata$time[subj.idx])
subj.otu <- subj.otu[ord, ]
subj.times <- subj.times[ord]
qi <- nrow(subj.otu)
## Calculate both
dk.uw <- rep(0, ncol(otus))
dk.g <- rep(0, ncol(otus))
cumprop <- subj.otu[1,]
for (j in 1:(qi-1)) {
dk.uw = dk.uw + (1/(subj.times[j+1] - subj.times[j])) * abs(as.numeric(subj.otu[(j+1), ] > 0) - as.numeric(subj.otu[j, ] > 0))
nonz <- which(subj.otu[(j+1), ] != 0 | subj.otu[j, ] != 0)
dk.g[nonz] = dk.g[nonz] + (1/(subj.times[j+1] - subj.times[j])) *
abs((subj.otu[(j+1), nonz] - subj.otu[j, nonz])/(subj.otu[(j+1), nonz] + subj.otu[j, nonz]))
cumprop = cumprop + subj.otu[(j+1), ]
}
dk.uw = dk.uw/(qi - 1)
dk.g = dk.g/(qi - 1)
cumprop = cumprop/qi
## Fill row
out.binary[i, ] <- dk.uw
out.quant[i, ] <- dk.g
out.avgprop[i, ] <- cumprop
}
return(list(dat.binary = out.binary, dat.quant = out.quant, avg.prop = out.avgprop))
}
#' counts2props
#'
#' Converts OTU counts to OTU proportions/relative abundances.
#'
#' @param x Matrix of OTU counts (rows are subjects, columns are taxa).
#'
#' @return n x p matrix of OTU proportions.
#'
#' @export
#'
counts2props <- function(x) {
return(t(apply(x, 1, FUN = function(y) y/sum(y))))
}
#' pltransform
#'
#' OTU transformation for longitudinal data. Computes average within-subject change
#' (in presence for qualitative metrics, abundance for quantitative metrics)
#' during one unit of time for each taxon.
#'
#' @param otus Matrix of OTU counts or proportions. Notes: (1) Will be transformed to
#' proportions if it's not already; (2) Row names must be sample identifiers
#' (matching metadata), and column names must be OTU identifiers (enforced if
#' using UniFrac distances).
#' @param metadata Data frame with three columns: subject identifiers (n unique values, column name "subjID"),
#' sample identifiers (must match row names of otu.tab, column name "sampID"),
#' and time point or group identifier (if using longitudinal distances, this must be numeric or
#' convertable to numeric).
#' @param paired Logical indicating whether to use the paired version of the metric (TRUE) or the
#' longitudinal version (FALSE). Paired analyis is only possible when there are exactly 2
#' unique time points/identifiers for each subject or pair.
#' @param check.input Logical indicating whether to check input values (default TRUE).
#'
#' @return List with the following elements. Both data matrices have subject identifiers
#' as row names and OTU identifiers as column names.
#' \item{dat.binary}{n x p matrix of data after longitudinal, binary/qualitative transformation}
#' \item{dat.quant}{n x p matrix of data after longitudinal, quantitative transformation}
#' \item{avg.prop}{n x p matrix with overall average proportion of each taxon}
#' \item{type}{Type of transformation that was used (paired, balanced longitudinal,
#' unbalanced longitudinal) with a warning if unbalanced longitudinal.}
#' @examples
#' data("paired.otus")
#' data("paired.meta")
#' # paired transformation
#' res1 <- pltransform(paired.otus, paired.meta, paired = TRUE, check.input = TRUE)
#' # longitudinal transformation
#' res2 <- pltransform(paired.otus, paired.meta, paired = FALSE, check.input = TRUE)
#'
#' @export
#'
pltransform <- function(otus, metadata, paired, check.input = TRUE) {
if (check.input) {
okdat <- check_input(otus, metadata, paired)
otus <- okdat$otus
metadata <- okdat$metadata
remove(okdat)
}
## calculate appropriate transformations
if (paired) {
res <- tsf_paired(otus, metadata)
} else {
res <- tsf_long(otus, metadata)
if (length(unique(table(metadata$time))) != 1) { balanced = FALSE } else { balanced = TRUE }
}
if (paired) { type = "paired"
} else if (balanced) { type = "balanced longitudinal"
} else {
type = "unbalanced longitudinal (WARNING: this transformation is not recommended for strongly unbalanced designs!)"
warning("WARNING: this transformation is not recommended for strongly unbalanced designs!")
}
## return
return(list(dat.binary = res$dat.binary, dat.quant = res$dat.quant, avg.prop = res$avg.prop, type = type))
}
|
\alias{GtkProgress}
\name{GtkProgress}
\title{GtkProgress}
\description{Base class for GtkProgressBar}
\section{Methods and Functions}{
\code{\link{gtkProgressSetShowText}(object, show.text)}\cr
\code{\link{gtkProgressSetTextAlignment}(object, x.align, y.align)}\cr
\code{\link{gtkProgressSetFormatString}(object, format)}\cr
\code{\link{gtkProgressSetAdjustment}(object, adjustment)}\cr
\code{\link{gtkProgressSetPercentage}(object, percentage)}\cr
\code{\link{gtkProgressSetValue}(object, value)}\cr
\code{\link{gtkProgressGetValue}(object)}\cr
\code{\link{gtkProgressSetActivityMode}(object, activity.mode)}\cr
\code{\link{gtkProgressGetCurrentText}(object)}\cr
\code{\link{gtkProgressGetTextFromValue}(object, value)}\cr
\code{\link{gtkProgressGetCurrentPercentage}(object)}\cr
\code{\link{gtkProgressGetPercentageFromValue}(object, value)}\cr
\code{\link{gtkProgressConfigure}(object, value, min, max)}\cr
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkProgress
+----GtkProgressBar}}
\section{Interfaces}{GtkProgress implements
AtkImplementorIface and \code{\link{GtkBuildable}}.}
\section{Detailed Description}{A \code{\link{GtkProgress}} is the abstract base class used to derive
a \code{\link{GtkProgressBar}} which provides a visual representation of
the progress of a long running operation.}
\section{Structures}{\describe{\item{\verb{GtkProgress}}{
The \code{\link{GtkProgress}} struct contains private data only.
and should be accessed using the functions below.
}}}
\section{Properties}{\describe{
\item{\verb{activity-mode} [logical : Read / Write]}{
If TRUE, the GtkProgress is in activity mode, meaning that it signals something is happening, but not how much of the activity is finished. This is used when you're doing something but don't know how long it will take. Default value: FALSE
}
\item{\verb{show-text} [logical : Read / Write]}{
Whether the progress is shown as text. Default value: FALSE
}
\item{\verb{text-xalign} [numeric : Read / Write]}{
The horizontal text alignment, from 0 (left) to 1 (right). Reversed for RTL layouts. Allowed values: [0,1] Default value: 0.5
}
\item{\verb{text-yalign} [numeric : Read / Write]}{
The vertical text alignment, from 0 (top) to 1 (bottom). Allowed values: [0,1] Default value: 0.5
}
}}
\references{\url{http://library.gnome.org/devel//gtk/GtkProgress.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/GtkProgress.Rd | no_license | hjy1210/RGtk2 | R | false | false | 2,549 | rd | \alias{GtkProgress}
\name{GtkProgress}
\title{GtkProgress}
\description{Base class for GtkProgressBar}
\section{Methods and Functions}{
\code{\link{gtkProgressSetShowText}(object, show.text)}\cr
\code{\link{gtkProgressSetTextAlignment}(object, x.align, y.align)}\cr
\code{\link{gtkProgressSetFormatString}(object, format)}\cr
\code{\link{gtkProgressSetAdjustment}(object, adjustment)}\cr
\code{\link{gtkProgressSetPercentage}(object, percentage)}\cr
\code{\link{gtkProgressSetValue}(object, value)}\cr
\code{\link{gtkProgressGetValue}(object)}\cr
\code{\link{gtkProgressSetActivityMode}(object, activity.mode)}\cr
\code{\link{gtkProgressGetCurrentText}(object)}\cr
\code{\link{gtkProgressGetTextFromValue}(object, value)}\cr
\code{\link{gtkProgressGetCurrentPercentage}(object)}\cr
\code{\link{gtkProgressGetPercentageFromValue}(object, value)}\cr
\code{\link{gtkProgressConfigure}(object, value, min, max)}\cr
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkProgress
+----GtkProgressBar}}
\section{Interfaces}{GtkProgress implements
AtkImplementorIface and \code{\link{GtkBuildable}}.}
\section{Detailed Description}{A \code{\link{GtkProgress}} is the abstract base class used to derive
a \code{\link{GtkProgressBar}} which provides a visual representation of
the progress of a long running operation.}
\section{Structures}{\describe{\item{\verb{GtkProgress}}{
The \code{\link{GtkProgress}} struct contains private data only.
and should be accessed using the functions below.
}}}
\section{Properties}{\describe{
\item{\verb{activity-mode} [logical : Read / Write]}{
If TRUE, the GtkProgress is in activity mode, meaning that it signals something is happening, but not how much of the activity is finished. This is used when you're doing something but don't know how long it will take. Default value: FALSE
}
\item{\verb{show-text} [logical : Read / Write]}{
Whether the progress is shown as text. Default value: FALSE
}
\item{\verb{text-xalign} [numeric : Read / Write]}{
The horizontal text alignment, from 0 (left) to 1 (right). Reversed for RTL layouts. Allowed values: [0,1] Default value: 0.5
}
\item{\verb{text-yalign} [numeric : Read / Write]}{
The vertical text alignment, from 0 (top) to 1 (bottom). Allowed values: [0,1] Default value: 0.5
}
}}
\references{\url{http://library.gnome.org/devel//gtk/GtkProgress.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(rethinking)
d<-read.table("data.clean.txt",sep="\t",header=T,stringsAsFactors = F)
names(d)
dat <- list(
C=d$CountC,
A = standardize(d$Age),
G = as.integer(as.factor(d$Gender)), #1 Man, 2 Woman
CD = as.integer(d$COND),
Mfm = standardize(d$MAC.fam),
Mg = standardize(d$MAC.gro),
Mr = standardize(d$MAC.rec),
Mh = standardize(d$MAC.her),
Md = standardize(d$MAC.def),
Mfi = standardize(d$MAC.fai),
Mp = standardize(d$MAC.pro),
P = standardize(d$Precaution),
S = standardize(d$Prosociality),
D = standardize(d$donation),
Dgr=standardize(d$danger.for.participant),
CC = ifelse(d$COND==1,0,ifelse(d$COND==2,0,ifelse(d$COND==3,0,ifelse(d$COND==4,standardize(d$MAC.fam),ifelse(d$COND==5,standardize(d$MAC.gro),ifelse(d$COND==6,standardize(d$MAC.rec),ifelse(d$COND==7,standardize(d$MAC.her),ifelse(d$COND==8,standardize(d$MAC.def),ifelse(d$COND==9,standardize(d$MAC.fai),ifelse(d$COND==10,standardize(d$MAC.pro),NA)))))))))) #MAC dimension concordant with the condition
)
set.seed(42)
mC <- ulam(
alist(
D ~ dnorm(muD,sigmaD),
muD<-aG[G]+aGC[G]*C+bA*A+bAC*A*C+bP*P+bPC*P*C+bS*S+bSC*S*C+aC[CD]+aCC[CD]*C+bCon*CC+bConC*CC*C+bFam*Mfm+bFamC*Mfm*C+bGro*Mg+bGroC*Mg*C+bRec*Mr+bRecC*Mr*C+bHer*Mh+bHerC*Mh*C+bDef*Md+bDefC*Md*C+bFai*Mfi+bFaiC*Mfi*C+bPro*Mp+bProC*Mp*C+bDg*Dgr+bDgC*Dgr*C,
#Donation
aG[G]~dnorm(0,0.2),
bA~dnorm(0,0.5),
bP~dnorm(0,0.5),
bS~dnorm(0,0.5),
aC[CD]~dnorm(0,0.2),
bCon~dnorm(0,0.5),
bFam~dnorm(0,0.5),
bGro~dnorm(0,0.5),
bRec~dnorm(0,0.5),
bHer~dnorm(0,0.5),
bDef~dnorm(0,0.5),
bFai~dnorm(0,0.5),
bPro~dnorm(0,0.5),
bDg~dnorm(0,0.5),
#Model of precaution and prosociality
P ~ dnorm(muP,sigmaP),
S ~ dnorm(muS,sigmaS),
muP<-aGP[G]+aGPC[G]*C+bAP*A+bAPC*A*C+aCP[CD]+aCPC[CD]*C+bConP*CC+bConPC*CC*C+bFamP*Mfm+bFamPC*Mfm*C+bGroP*Mg+bGroPC*Mg*C+bRecP*Mr+bRecPC*Mr*C+bHerP*Mh+bHerPC*Mh*C+bDefP*Md+bDefPC*Md*C+bFaiP*Mfi+bFaiPC*Mfi*C+bProP*Mp+bProPC*Mp*C+bDgP*Dgr+bDgPC*Dgr*C,
muS<-aGS[G]+aGSC[G]*C+bAS*A+bASC*A*C+aCS[CD]+aCSC[CD]*C+bConS*CC+bConSC*CC*C+bFamS*Mfm+bFamSC*Mfm*C+bGroS*Mg+bGroSC*Mg*C+bRecS*Mr+bRecSC*Mr*C+bHerS*Mh+bHerSC*Mh*C+bDefS*Md+bDefSC*Md*C+bFaiS*Mfi+bFaiSC*Mfi*C+bProS*Mp+bProSC*Mp*C+bDgS*Dgr+bDgSC*Dgr*C,
#Priors
#Precaution
aGP[G]~dnorm(0,0.2),
bAP~dnorm(0,0.5),
aCP[CD]~dnorm(0,0.2),
bConP~dnorm(0,0.5),
bFamP~dnorm(0,0.5),
bGroP~dnorm(0,0.5),
bRecP~dnorm(0,0.5),
bHerP~dnorm(0,0.5),
bDefP~dnorm(0,0.5),
bFaiP~dnorm(0,0.5),
bProP~dnorm(0,0.5),
bDgP~dnorm(0,0.5),
#ProSociality
aGS[G]~dnorm(0,0.2),
bAS~dnorm(0,0.5),
aCS[CD]~dnorm(0,0.2),
bConS~dnorm(0,0.5),
bFamS~dnorm(0,0.5),
bGroS~dnorm(0,0.5),
bRecS~dnorm(0,0.5),
bHerS~dnorm(0,0.5),
bDefS~dnorm(0,0.5),
bFaiS~dnorm(0,0.5),
bProS~dnorm(0,0.5),
bDgS~dnorm(0,0.5),
#sigmas
sigmaD~dexp(1),
sigmaP~dexp(1),
sigmaS~dexp(1),
#Models of MAC dimensions
Mfm ~ dnorm(mu_Fam,sigma_Fam),
Mg ~ dnorm(mu_Gro,sigma_Gro),
Mr ~ dnorm(mu_Rec,sigma_Rec),
Mh ~ dnorm(mu_Her,sigma_Her),
Md ~ dnorm(mu_Def,sigma_Def),
Mfi ~ dnorm(mu_Fai,sigma_Fai),
Mp ~ dnorm(mu_Pro,sigma_Pro),
mu_Fam<-aG_Fam[G]+aG_FamC[G]*C+bAge_Fam*A+bAge_FamC*A*C,
mu_Gro<-aG_Gro[G]+aG_GroC[G]*C+bAge_Gro*A+bAge_GroC*A*C,
mu_Rec<-aG_Rec[G]+aG_RecC[G]*C+bAge_Rec*A+bAge_RecC*A*C,
mu_Her<-aG_Her[G]+aG_HerC[G]*C+bAge_Her*A+bAge_HerC*A*C,
mu_Def<-aG_Def[G]+aG_DefC[G]*C+bAge_Def*A+bAge_DefC*A*C,
mu_Fai<-aG_Fai[G]+aG_FaiC[G]*C+bAge_Fai*A+bAge_FaiC*A*C,
mu_Pro<-aG_Pro[G]+aG_ProC[G]*C+bAge_Pro*A+bAge_ProC*A*C,
#priors of MAC intercepts and slopes
aG_Fam[G]~dnorm(0,0.2),
aG_Gro[G]~dnorm(0,0.2),
aG_Rec[G]~dnorm(0,0.2),
aG_Her[G]~dnorm(0,0.2),
aG_Def[G]~dnorm(0,0.2),
aG_Fai[G]~dnorm(0,0.2),
aG_Pro[G]~dnorm(0,0.2),
bAge_Fam~dnorm(0,0.5),
bAge_Gro~dnorm(0,0.5),
bAge_Rec~dnorm(0,0.5),
bAge_Her~dnorm(0,0.5),
bAge_Def~dnorm(0,0.5),
bAge_Fai~dnorm(0,0.5),
bAge_Pro~dnorm(0,0.5),
#sigmas
sigma_Fam~dexp(1),
sigma_Gro~dexp(1),
sigma_Rec~dexp(1),
sigma_Her~dexp(1),
sigma_Def~dexp(1),
sigma_Fai~dexp(1),
sigma_Pro~dexp(1),
#model of how dangerous COVID is perceived for the participant
Dgr ~ dnorm(mu_Dang,sigma_Dang),
mu_Dang<-aG_Dang[G]+aG_DangC[G]*C+bAge_Dang*A+bAge_DangC*A*C,
#priors
aG_Dang[G]~dnorm(0,0.2),
bAge_Dang~dnorm(0,0.5),
sigma_Dang~dexp(1),
#Contrasts priors
#Donation
aGC[G]~dnorm(0,0.2),
bAC~dnorm(0,0.2),
bPC~dnorm(0,0.2),
bSC~dnorm(0,0.2),
aCC[CD]~dnorm(0,0.2),
bConC~dnorm(0,0.2),
bFamC~dnorm(0,0.2),
bGroC~dnorm(0,0.2),
bRecC~dnorm(0,0.2),
bHerC~dnorm(0,0.2),
bDefC~dnorm(0,0.2),
bFaiC~dnorm(0,0.2),
bProC~dnorm(0,0.2),
bDgC~dnorm(0,0.2),
#Precaution
aGPC[G]~dnorm(0,0.2),
bAPC~dnorm(0,0.2),
aCPC[CD]~dnorm(0,0.2),
bConPC~dnorm(0,0.2),
bFamPC~dnorm(0,0.2),
bGroPC~dnorm(0,0.2),
bRecPC~dnorm(0,0.2),
bHerPC~dnorm(0,0.2),
bDefPC~dnorm(0,0.2),
bFaiPC~dnorm(0,0.2),
bProPC~dnorm(0,0.2),
bDgPC~dnorm(0,0.2),
#Prosociality
aGSC[G]~dnorm(0,0.2),
bASC~dnorm(0,0.2),
aCSC[CD]~dnorm(0,0.2),
bConSC~dnorm(0,0.2),
bFamSC~dnorm(0,0.2),
bGroSC~dnorm(0,0.2),
bRecSC~dnorm(0,0.2),
bHerSC~dnorm(0,0.2),
bDefSC~dnorm(0,0.2),
bFaiSC~dnorm(0,0.2),
bProSC~dnorm(0,0.2),
bDgSC~dnorm(0,0.2),
#contrasts in MAC dimension predictors
aG_FamC[G]~dnorm(0,0.2),
aG_GroC[G]~dnorm(0,0.2),
aG_RecC[G]~dnorm(0,0.2),
aG_HerC[G]~dnorm(0,0.2),
aG_DefC[G]~dnorm(0,0.2),
aG_FaiC[G]~dnorm(0,0.2),
aG_ProC[G]~dnorm(0,0.2),
bAge_FamC~dnorm(0,0.2),
bAge_GroC~dnorm(0,0.2),
bAge_RecC~dnorm(0,0.2),
bAge_HerC~dnorm(0,0.2),
bAge_DefC~dnorm(0,0.2),
bAge_FaiC~dnorm(0,0.2),
bAge_ProC~dnorm(0,0.2),
#danger contrasts
aG_DangC[G]~dnorm(0,0.2),
bAge_DangC~dnorm(0,0.2)
) , data=dat, chains=4 , cores=4 , log_lik=TRUE ,iter = 1000,control=list(max_treedepth=10,adapt_delta=0.95))
#Sumarize the model
precis(mC,depth=2)
#Sample posetrior and prior for graphical comparison
postC<-extract.samples(mC)
set.seed(42)
prioC<-extract.prior(mC,n=2000)
save.image(file="posterior_samples_contrasts.RData")
| /02_analysis_country_contrast.R | no_license | costlysignalling/coronaMACprereg | R | false | false | 6,851 | r | library(rethinking)
d<-read.table("data.clean.txt",sep="\t",header=T,stringsAsFactors = F)
names(d)
dat <- list(
C=d$CountC,
A = standardize(d$Age),
G = as.integer(as.factor(d$Gender)), #1 Man, 2 Woman
CD = as.integer(d$COND),
Mfm = standardize(d$MAC.fam),
Mg = standardize(d$MAC.gro),
Mr = standardize(d$MAC.rec),
Mh = standardize(d$MAC.her),
Md = standardize(d$MAC.def),
Mfi = standardize(d$MAC.fai),
Mp = standardize(d$MAC.pro),
P = standardize(d$Precaution),
S = standardize(d$Prosociality),
D = standardize(d$donation),
Dgr=standardize(d$danger.for.participant),
CC = ifelse(d$COND==1,0,ifelse(d$COND==2,0,ifelse(d$COND==3,0,ifelse(d$COND==4,standardize(d$MAC.fam),ifelse(d$COND==5,standardize(d$MAC.gro),ifelse(d$COND==6,standardize(d$MAC.rec),ifelse(d$COND==7,standardize(d$MAC.her),ifelse(d$COND==8,standardize(d$MAC.def),ifelse(d$COND==9,standardize(d$MAC.fai),ifelse(d$COND==10,standardize(d$MAC.pro),NA)))))))))) #MAC dimension concordant with the condition
)
set.seed(42)
mC <- ulam(
alist(
D ~ dnorm(muD,sigmaD),
muD<-aG[G]+aGC[G]*C+bA*A+bAC*A*C+bP*P+bPC*P*C+bS*S+bSC*S*C+aC[CD]+aCC[CD]*C+bCon*CC+bConC*CC*C+bFam*Mfm+bFamC*Mfm*C+bGro*Mg+bGroC*Mg*C+bRec*Mr+bRecC*Mr*C+bHer*Mh+bHerC*Mh*C+bDef*Md+bDefC*Md*C+bFai*Mfi+bFaiC*Mfi*C+bPro*Mp+bProC*Mp*C+bDg*Dgr+bDgC*Dgr*C,
#Donation
aG[G]~dnorm(0,0.2),
bA~dnorm(0,0.5),
bP~dnorm(0,0.5),
bS~dnorm(0,0.5),
aC[CD]~dnorm(0,0.2),
bCon~dnorm(0,0.5),
bFam~dnorm(0,0.5),
bGro~dnorm(0,0.5),
bRec~dnorm(0,0.5),
bHer~dnorm(0,0.5),
bDef~dnorm(0,0.5),
bFai~dnorm(0,0.5),
bPro~dnorm(0,0.5),
bDg~dnorm(0,0.5),
#Model of precaution and prosociality
P ~ dnorm(muP,sigmaP),
S ~ dnorm(muS,sigmaS),
muP<-aGP[G]+aGPC[G]*C+bAP*A+bAPC*A*C+aCP[CD]+aCPC[CD]*C+bConP*CC+bConPC*CC*C+bFamP*Mfm+bFamPC*Mfm*C+bGroP*Mg+bGroPC*Mg*C+bRecP*Mr+bRecPC*Mr*C+bHerP*Mh+bHerPC*Mh*C+bDefP*Md+bDefPC*Md*C+bFaiP*Mfi+bFaiPC*Mfi*C+bProP*Mp+bProPC*Mp*C+bDgP*Dgr+bDgPC*Dgr*C,
muS<-aGS[G]+aGSC[G]*C+bAS*A+bASC*A*C+aCS[CD]+aCSC[CD]*C+bConS*CC+bConSC*CC*C+bFamS*Mfm+bFamSC*Mfm*C+bGroS*Mg+bGroSC*Mg*C+bRecS*Mr+bRecSC*Mr*C+bHerS*Mh+bHerSC*Mh*C+bDefS*Md+bDefSC*Md*C+bFaiS*Mfi+bFaiSC*Mfi*C+bProS*Mp+bProSC*Mp*C+bDgS*Dgr+bDgSC*Dgr*C,
#Priors
#Precaution
aGP[G]~dnorm(0,0.2),
bAP~dnorm(0,0.5),
aCP[CD]~dnorm(0,0.2),
bConP~dnorm(0,0.5),
bFamP~dnorm(0,0.5),
bGroP~dnorm(0,0.5),
bRecP~dnorm(0,0.5),
bHerP~dnorm(0,0.5),
bDefP~dnorm(0,0.5),
bFaiP~dnorm(0,0.5),
bProP~dnorm(0,0.5),
bDgP~dnorm(0,0.5),
#ProSociality
aGS[G]~dnorm(0,0.2),
bAS~dnorm(0,0.5),
aCS[CD]~dnorm(0,0.2),
bConS~dnorm(0,0.5),
bFamS~dnorm(0,0.5),
bGroS~dnorm(0,0.5),
bRecS~dnorm(0,0.5),
bHerS~dnorm(0,0.5),
bDefS~dnorm(0,0.5),
bFaiS~dnorm(0,0.5),
bProS~dnorm(0,0.5),
bDgS~dnorm(0,0.5),
#sigmas
sigmaD~dexp(1),
sigmaP~dexp(1),
sigmaS~dexp(1),
#Models of MAC dimensions
Mfm ~ dnorm(mu_Fam,sigma_Fam),
Mg ~ dnorm(mu_Gro,sigma_Gro),
Mr ~ dnorm(mu_Rec,sigma_Rec),
Mh ~ dnorm(mu_Her,sigma_Her),
Md ~ dnorm(mu_Def,sigma_Def),
Mfi ~ dnorm(mu_Fai,sigma_Fai),
Mp ~ dnorm(mu_Pro,sigma_Pro),
mu_Fam<-aG_Fam[G]+aG_FamC[G]*C+bAge_Fam*A+bAge_FamC*A*C,
mu_Gro<-aG_Gro[G]+aG_GroC[G]*C+bAge_Gro*A+bAge_GroC*A*C,
mu_Rec<-aG_Rec[G]+aG_RecC[G]*C+bAge_Rec*A+bAge_RecC*A*C,
mu_Her<-aG_Her[G]+aG_HerC[G]*C+bAge_Her*A+bAge_HerC*A*C,
mu_Def<-aG_Def[G]+aG_DefC[G]*C+bAge_Def*A+bAge_DefC*A*C,
mu_Fai<-aG_Fai[G]+aG_FaiC[G]*C+bAge_Fai*A+bAge_FaiC*A*C,
mu_Pro<-aG_Pro[G]+aG_ProC[G]*C+bAge_Pro*A+bAge_ProC*A*C,
#priors of MAC intercepts and slopes
aG_Fam[G]~dnorm(0,0.2),
aG_Gro[G]~dnorm(0,0.2),
aG_Rec[G]~dnorm(0,0.2),
aG_Her[G]~dnorm(0,0.2),
aG_Def[G]~dnorm(0,0.2),
aG_Fai[G]~dnorm(0,0.2),
aG_Pro[G]~dnorm(0,0.2),
bAge_Fam~dnorm(0,0.5),
bAge_Gro~dnorm(0,0.5),
bAge_Rec~dnorm(0,0.5),
bAge_Her~dnorm(0,0.5),
bAge_Def~dnorm(0,0.5),
bAge_Fai~dnorm(0,0.5),
bAge_Pro~dnorm(0,0.5),
#sigmas
sigma_Fam~dexp(1),
sigma_Gro~dexp(1),
sigma_Rec~dexp(1),
sigma_Her~dexp(1),
sigma_Def~dexp(1),
sigma_Fai~dexp(1),
sigma_Pro~dexp(1),
#model of how dangerous COVID is perceived for the participant
Dgr ~ dnorm(mu_Dang,sigma_Dang),
mu_Dang<-aG_Dang[G]+aG_DangC[G]*C+bAge_Dang*A+bAge_DangC*A*C,
#priors
aG_Dang[G]~dnorm(0,0.2),
bAge_Dang~dnorm(0,0.5),
sigma_Dang~dexp(1),
#Contrasts priors
#Donation
aGC[G]~dnorm(0,0.2),
bAC~dnorm(0,0.2),
bPC~dnorm(0,0.2),
bSC~dnorm(0,0.2),
aCC[CD]~dnorm(0,0.2),
bConC~dnorm(0,0.2),
bFamC~dnorm(0,0.2),
bGroC~dnorm(0,0.2),
bRecC~dnorm(0,0.2),
bHerC~dnorm(0,0.2),
bDefC~dnorm(0,0.2),
bFaiC~dnorm(0,0.2),
bProC~dnorm(0,0.2),
bDgC~dnorm(0,0.2),
#Precaution
aGPC[G]~dnorm(0,0.2),
bAPC~dnorm(0,0.2),
aCPC[CD]~dnorm(0,0.2),
bConPC~dnorm(0,0.2),
bFamPC~dnorm(0,0.2),
bGroPC~dnorm(0,0.2),
bRecPC~dnorm(0,0.2),
bHerPC~dnorm(0,0.2),
bDefPC~dnorm(0,0.2),
bFaiPC~dnorm(0,0.2),
bProPC~dnorm(0,0.2),
bDgPC~dnorm(0,0.2),
#Prosociality
aGSC[G]~dnorm(0,0.2),
bASC~dnorm(0,0.2),
aCSC[CD]~dnorm(0,0.2),
bConSC~dnorm(0,0.2),
bFamSC~dnorm(0,0.2),
bGroSC~dnorm(0,0.2),
bRecSC~dnorm(0,0.2),
bHerSC~dnorm(0,0.2),
bDefSC~dnorm(0,0.2),
bFaiSC~dnorm(0,0.2),
bProSC~dnorm(0,0.2),
bDgSC~dnorm(0,0.2),
#contrasts in MAC dimension predictors
aG_FamC[G]~dnorm(0,0.2),
aG_GroC[G]~dnorm(0,0.2),
aG_RecC[G]~dnorm(0,0.2),
aG_HerC[G]~dnorm(0,0.2),
aG_DefC[G]~dnorm(0,0.2),
aG_FaiC[G]~dnorm(0,0.2),
aG_ProC[G]~dnorm(0,0.2),
bAge_FamC~dnorm(0,0.2),
bAge_GroC~dnorm(0,0.2),
bAge_RecC~dnorm(0,0.2),
bAge_HerC~dnorm(0,0.2),
bAge_DefC~dnorm(0,0.2),
bAge_FaiC~dnorm(0,0.2),
bAge_ProC~dnorm(0,0.2),
#danger contrasts
aG_DangC[G]~dnorm(0,0.2),
bAge_DangC~dnorm(0,0.2)
) , data=dat, chains=4 , cores=4 , log_lik=TRUE ,iter = 1000,control=list(max_treedepth=10,adapt_delta=0.95))
#Sumarize the model
precis(mC,depth=2)
#Sample posetrior and prior for graphical comparison
postC<-extract.samples(mC)
set.seed(42)
prioC<-extract.prior(mC,n=2000)
save.image(file="posterior_samples_contrasts.RData")
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.38354125570936e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609866618-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 831 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.38354125570936e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/template.R
\name{metric_summarizer}
\alias{metric_summarizer}
\title{Developer function for summarizing new metrics}
\usage{
metric_summarizer(
metric_nm,
metric_fn,
data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
...,
metric_fn_options = list()
)
}
\arguments{
\item{metric_nm}{A single character representing the name of the metric to
use in the \code{tibble} output. This will be modified to include the type
of averaging if appropriate.}
\item{metric_fn}{The vector version of your custom metric function. It
generally takes \code{truth}, \code{estimate}, \code{na_rm}, and any other extra arguments
needed to calculate the metric.}
\item{data}{The data frame with \code{truth} and \code{estimate} columns passed
in from the data frame version of your metric function that called
\code{metric_summarizer()}.}
\item{truth}{The unquoted column name corresponding to the \code{truth} column.}
\item{estimate}{Generally, the unquoted column name corresponding to
the \code{estimate} column. For metrics that take multiple columns through \code{...}
like class probability metrics, this is a result of \code{\link[=dots_to_estimate]{dots_to_estimate()}}.}
\item{estimator}{For numeric metrics, this is left as \code{NA} so averaging
is not passed on to the metric function implementation. For classification
metrics, this can either be \code{NULL} for the default auto-selection of
averaging (\code{"binary"} or \code{"macro"}), or a single character to pass along
to the metric implementation describing the kind of averaging to use.}
\item{na_rm}{A \code{logical} value indicating whether \code{NA} values should be
stripped before the computation proceeds. The removal is executed in
\code{metric_vec_template()}.}
\item{...}{Currently not used. Metric specific options are passed in
through \code{metric_fn_options}.}
\item{metric_fn_options}{A named list of metric specific options. These
are spliced into the metric function call using \verb{!!!} from \code{rlang}. The
default results in nothing being spliced into the call.}
}
\description{
\code{metric_summarizer()} is useful alongside \code{\link[=metric_vec_template]{metric_vec_template()}} for
implementing new custom metrics. \code{metric_summarizer()} calls the metric
function inside \code{dplyr::summarise()}. \code{metric_vec_template()} is a
generalized function that calls the core implementation of a metric function,
and includes a number of checks on the types, lengths, and argument inputs.
See \code{vignette("custom-metrics", "yardstick")} for more information.
}
\details{
\code{metric_summarizer()} is generally called from the data frame version
of your metric function. It knows how to call your metric over grouped data
frames and returns a \code{tibble} consistent with other metrics.
}
\seealso{
\code{\link[=metric_vec_template]{metric_vec_template()}} \code{\link[=finalize_estimator]{finalize_estimator()}} \code{\link[=dots_to_estimate]{dots_to_estimate()}}
}
| /man/metric_summarizer.Rd | no_license | trangdata/yardstick | R | false | true | 3,058 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/template.R
\name{metric_summarizer}
\alias{metric_summarizer}
\title{Developer function for summarizing new metrics}
\usage{
metric_summarizer(
metric_nm,
metric_fn,
data,
truth,
estimate,
estimator = NULL,
na_rm = TRUE,
...,
metric_fn_options = list()
)
}
\arguments{
\item{metric_nm}{A single character representing the name of the metric to
use in the \code{tibble} output. This will be modified to include the type
of averaging if appropriate.}
\item{metric_fn}{The vector version of your custom metric function. It
generally takes \code{truth}, \code{estimate}, \code{na_rm}, and any other extra arguments
needed to calculate the metric.}
\item{data}{The data frame with \code{truth} and \code{estimate} columns passed
in from the data frame version of your metric function that called
\code{metric_summarizer()}.}
\item{truth}{The unquoted column name corresponding to the \code{truth} column.}
\item{estimate}{Generally, the unquoted column name corresponding to
the \code{estimate} column. For metrics that take multiple columns through \code{...}
like class probability metrics, this is a result of \code{\link[=dots_to_estimate]{dots_to_estimate()}}.}
\item{estimator}{For numeric metrics, this is left as \code{NA} so averaging
is not passed on to the metric function implementation. For classification
metrics, this can either be \code{NULL} for the default auto-selection of
averaging (\code{"binary"} or \code{"macro"}), or a single character to pass along
to the metric implementation describing the kind of averaging to use.}
\item{na_rm}{A \code{logical} value indicating whether \code{NA} values should be
stripped before the computation proceeds. The removal is executed in
\code{metric_vec_template()}.}
\item{...}{Currently not used. Metric specific options are passed in
through \code{metric_fn_options}.}
\item{metric_fn_options}{A named list of metric specific options. These
are spliced into the metric function call using \verb{!!!} from \code{rlang}. The
default results in nothing being spliced into the call.}
}
\description{
\code{metric_summarizer()} is useful alongside \code{\link[=metric_vec_template]{metric_vec_template()}} for
implementing new custom metrics. \code{metric_summarizer()} calls the metric
function inside \code{dplyr::summarise()}. \code{metric_vec_template()} is a
generalized function that calls the core implementation of a metric function,
and includes a number of checks on the types, lengths, and argument inputs.
See \code{vignette("custom-metrics", "yardstick")} for more information.
}
\details{
\code{metric_summarizer()} is generally called from the data frame version
of your metric function. It knows how to call your metric over grouped data
frames and returns a \code{tibble} consistent with other metrics.
}
\seealso{
\code{\link[=metric_vec_template]{metric_vec_template()}} \code{\link[=finalize_estimator]{finalize_estimator()}} \code{\link[=dots_to_estimate]{dots_to_estimate()}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kendra_operations.R
\name{kendra_create_data_source}
\alias{kendra_create_data_source}
\title{Creates a data source that you use to with an Amazon Kendra index}
\usage{
kendra_create_data_source(Name, IndexId, Type, Configuration,
Description, Schedule, RoleArn, Tags, ClientToken)
}
\arguments{
\item{Name}{[required] A unique name for the data source. A data source name can't be changed
without deleting and recreating the data source.}
\item{IndexId}{[required] The identifier of the index that should be associated with this data
source.}
\item{Type}{[required] The type of repository that contains the data source.}
\item{Configuration}{The connector configuration information that is required to access the
repository.
You can't specify the \code{Configuration} parameter when the \code{Type}
parameter is set to \code{CUSTOM}. If you do, you receive a
\code{ValidationException} exception.
The \code{Configuration} parameter is required for all other data sources.}
\item{Description}{A description for the data source.}
\item{Schedule}{Sets the frequency that Amazon Kendra will check the documents in your
repository and update the index. If you don't set a schedule Amazon
Kendra will not periodically update the index. You can call the
\code{StartDataSourceSyncJob} operation to update the index.
You can't specify the \code{Schedule} parameter when the \code{Type} parameter is
set to \code{CUSTOM}. If you do, you receive a \code{ValidationException}
exception.}
\item{RoleArn}{The Amazon Resource Name (ARN) of a role with permission to access the
data source. For more information, see \href{https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html}{IAM Roles for Amazon Kendra}.
You can't specify the \code{RoleArn} parameter when the \code{Type} parameter is
set to \code{CUSTOM}. If you do, you receive a \code{ValidationException}
exception.
The \code{RoleArn} parameter is required for all other data sources.}
\item{Tags}{A list of key-value pairs that identify the data source. You can use the
tags to identify and organize your resources and to control access to
resources.}
\item{ClientToken}{A token that you provide to identify the request to create a data
source. Multiple calls to the \code{CreateDataSource} operation with the same
client token will create only one data source.}
}
\description{
Creates a data source that you use to with an Amazon Kendra index.
You specify a name, data source connector type and description for your
data source. You also specify configuration information such as document
metadata (author, source URI, and so on) and user context information.
\code{CreateDataSource} is a synchronous operation. The operation returns 200
if the data source was successfully created. Otherwise, an exception is
raised.
}
\section{Request syntax}{
\preformatted{svc$create_data_source(
Name = "string",
IndexId = "string",
Type = "S3"|"SHAREPOINT"|"DATABASE"|"SALESFORCE"|"ONEDRIVE"|"SERVICENOW"|"CUSTOM"|"CONFLUENCE"|"GOOGLEDRIVE",
Configuration = list(
S3Configuration = list(
BucketName = "string",
InclusionPrefixes = list(
"string"
),
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
DocumentsMetadataConfiguration = list(
S3Prefix = "string"
),
AccessControlListConfiguration = list(
KeyPath = "string"
)
),
SharePointConfiguration = list(
SharePointVersion = "SHAREPOINT_ONLINE",
Urls = list(
"string"
),
SecretArn = "string",
CrawlAttachments = TRUE|FALSE,
UseChangeLog = TRUE|FALSE,
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
VpcConfiguration = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
DocumentTitleFieldName = "string",
DisableLocalGroups = TRUE|FALSE
),
DatabaseConfiguration = list(
DatabaseEngineType = "RDS_AURORA_MYSQL"|"RDS_AURORA_POSTGRESQL"|"RDS_MYSQL"|"RDS_POSTGRESQL",
ConnectionConfiguration = list(
DatabaseHost = "string",
DatabasePort = 123,
DatabaseName = "string",
TableName = "string",
SecretArn = "string"
),
VpcConfiguration = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
ColumnConfiguration = list(
DocumentIdColumnName = "string",
DocumentDataColumnName = "string",
DocumentTitleColumnName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
ChangeDetectingColumns = list(
"string"
)
),
AclConfiguration = list(
AllowedGroupsColumnName = "string"
),
SqlConfiguration = list(
QueryIdentifiersEnclosingOption = "DOUBLE_QUOTES"|"NONE"
)
),
SalesforceConfiguration = list(
ServerUrl = "string",
SecretArn = "string",
StandardObjectConfigurations = list(
list(
Name = "ACCOUNT"|"CAMPAIGN"|"CASE"|"CONTACT"|"CONTRACT"|"DOCUMENT"|"GROUP"|"IDEA"|"LEAD"|"OPPORTUNITY"|"PARTNER"|"PRICEBOOK"|"PRODUCT"|"PROFILE"|"SOLUTION"|"TASK"|"USER",
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
)
),
KnowledgeArticleConfiguration = list(
IncludedStates = list(
"DRAFT"|"PUBLISHED"|"ARCHIVED"
),
StandardKnowledgeArticleTypeConfiguration = list(
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
CustomKnowledgeArticleTypeConfigurations = list(
list(
Name = "string",
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
)
)
),
ChatterFeedConfiguration = list(
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
IncludeFilterTypes = list(
"ACTIVE_USER"|"STANDARD_USER"
)
),
CrawlAttachments = TRUE|FALSE,
StandardObjectAttachmentConfiguration = list(
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
IncludeAttachmentFilePatterns = list(
"string"
),
ExcludeAttachmentFilePatterns = list(
"string"
)
),
OneDriveConfiguration = list(
TenantDomain = "string",
SecretArn = "string",
OneDriveUsers = list(
OneDriveUserList = list(
"string"
),
OneDriveUserS3Path = list(
Bucket = "string",
Key = "string"
)
),
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
DisableLocalGroups = TRUE|FALSE
),
ServiceNowConfiguration = list(
HostUrl = "string",
SecretArn = "string",
ServiceNowBuildVersion = "LONDON"|"OTHERS",
KnowledgeArticleConfiguration = list(
CrawlAttachments = TRUE|FALSE,
IncludeAttachmentFilePatterns = list(
"string"
),
ExcludeAttachmentFilePatterns = list(
"string"
),
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
ServiceCatalogConfiguration = list(
CrawlAttachments = TRUE|FALSE,
IncludeAttachmentFilePatterns = list(
"string"
),
ExcludeAttachmentFilePatterns = list(
"string"
),
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
)
),
ConfluenceConfiguration = list(
ServerUrl = "string",
SecretArn = "string",
Version = "CLOUD"|"SERVER",
SpaceConfiguration = list(
CrawlPersonalSpaces = TRUE|FALSE,
CrawlArchivedSpaces = TRUE|FALSE,
IncludeSpaces = list(
"string"
),
ExcludeSpaces = list(
"string"
),
SpaceFieldMappings = list(
list(
DataSourceFieldName = "DISPLAY_URL"|"ITEM_TYPE"|"SPACE_KEY"|"URL",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
PageConfiguration = list(
PageFieldMappings = list(
list(
DataSourceFieldName = "AUTHOR"|"CONTENT_STATUS"|"CREATED_DATE"|"DISPLAY_URL"|"ITEM_TYPE"|"LABELS"|"MODIFIED_DATE"|"PARENT_ID"|"SPACE_KEY"|"SPACE_NAME"|"URL"|"VERSION",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
BlogConfiguration = list(
BlogFieldMappings = list(
list(
DataSourceFieldName = "AUTHOR"|"DISPLAY_URL"|"ITEM_TYPE"|"LABELS"|"PUBLISH_DATE"|"SPACE_KEY"|"SPACE_NAME"|"URL"|"VERSION",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
AttachmentConfiguration = list(
CrawlAttachments = TRUE|FALSE,
AttachmentFieldMappings = list(
list(
DataSourceFieldName = "AUTHOR"|"CONTENT_TYPE"|"CREATED_DATE"|"DISPLAY_URL"|"FILE_SIZE"|"ITEM_TYPE"|"PARENT_ID"|"SPACE_KEY"|"SPACE_NAME"|"URL"|"VERSION",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
VpcConfiguration = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
)
),
GoogleDriveConfiguration = list(
SecretArn = "string",
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
ExcludeMimeTypes = list(
"string"
),
ExcludeUserAccounts = list(
"string"
),
ExcludeSharedDrives = list(
"string"
)
)
),
Description = "string",
Schedule = "string",
RoleArn = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
),
ClientToken = "string"
)
}
}
\keyword{internal}
| /paws/man/kendra_create_data_source.Rd | permissive | sanchezvivi/paws | R | false | true | 12,394 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kendra_operations.R
\name{kendra_create_data_source}
\alias{kendra_create_data_source}
\title{Creates a data source that you use to with an Amazon Kendra index}
\usage{
kendra_create_data_source(Name, IndexId, Type, Configuration,
Description, Schedule, RoleArn, Tags, ClientToken)
}
\arguments{
\item{Name}{[required] A unique name for the data source. A data source name can't be changed
without deleting and recreating the data source.}
\item{IndexId}{[required] The identifier of the index that should be associated with this data
source.}
\item{Type}{[required] The type of repository that contains the data source.}
\item{Configuration}{The connector configuration information that is required to access the
repository.
You can't specify the \code{Configuration} parameter when the \code{Type}
parameter is set to \code{CUSTOM}. If you do, you receive a
\code{ValidationException} exception.
The \code{Configuration} parameter is required for all other data sources.}
\item{Description}{A description for the data source.}
\item{Schedule}{Sets the frequency that Amazon Kendra will check the documents in your
repository and update the index. If you don't set a schedule Amazon
Kendra will not periodically update the index. You can call the
\code{StartDataSourceSyncJob} operation to update the index.
You can't specify the \code{Schedule} parameter when the \code{Type} parameter is
set to \code{CUSTOM}. If you do, you receive a \code{ValidationException}
exception.}
\item{RoleArn}{The Amazon Resource Name (ARN) of a role with permission to access the
data source. For more information, see \href{https://docs.aws.amazon.com/kendra/latest/dg/iam-roles.html}{IAM Roles for Amazon Kendra}.
You can't specify the \code{RoleArn} parameter when the \code{Type} parameter is
set to \code{CUSTOM}. If you do, you receive a \code{ValidationException}
exception.
The \code{RoleArn} parameter is required for all other data sources.}
\item{Tags}{A list of key-value pairs that identify the data source. You can use the
tags to identify and organize your resources and to control access to
resources.}
\item{ClientToken}{A token that you provide to identify the request to create a data
source. Multiple calls to the \code{CreateDataSource} operation with the same
client token will create only one data source.}
}
\description{
Creates a data source that you use to with an Amazon Kendra index.
You specify a name, data source connector type and description for your
data source. You also specify configuration information such as document
metadata (author, source URI, and so on) and user context information.
\code{CreateDataSource} is a synchronous operation. The operation returns 200
if the data source was successfully created. Otherwise, an exception is
raised.
}
\section{Request syntax}{
\preformatted{svc$create_data_source(
Name = "string",
IndexId = "string",
Type = "S3"|"SHAREPOINT"|"DATABASE"|"SALESFORCE"|"ONEDRIVE"|"SERVICENOW"|"CUSTOM"|"CONFLUENCE"|"GOOGLEDRIVE",
Configuration = list(
S3Configuration = list(
BucketName = "string",
InclusionPrefixes = list(
"string"
),
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
DocumentsMetadataConfiguration = list(
S3Prefix = "string"
),
AccessControlListConfiguration = list(
KeyPath = "string"
)
),
SharePointConfiguration = list(
SharePointVersion = "SHAREPOINT_ONLINE",
Urls = list(
"string"
),
SecretArn = "string",
CrawlAttachments = TRUE|FALSE,
UseChangeLog = TRUE|FALSE,
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
VpcConfiguration = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
DocumentTitleFieldName = "string",
DisableLocalGroups = TRUE|FALSE
),
DatabaseConfiguration = list(
DatabaseEngineType = "RDS_AURORA_MYSQL"|"RDS_AURORA_POSTGRESQL"|"RDS_MYSQL"|"RDS_POSTGRESQL",
ConnectionConfiguration = list(
DatabaseHost = "string",
DatabasePort = 123,
DatabaseName = "string",
TableName = "string",
SecretArn = "string"
),
VpcConfiguration = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
ColumnConfiguration = list(
DocumentIdColumnName = "string",
DocumentDataColumnName = "string",
DocumentTitleColumnName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
ChangeDetectingColumns = list(
"string"
)
),
AclConfiguration = list(
AllowedGroupsColumnName = "string"
),
SqlConfiguration = list(
QueryIdentifiersEnclosingOption = "DOUBLE_QUOTES"|"NONE"
)
),
SalesforceConfiguration = list(
ServerUrl = "string",
SecretArn = "string",
StandardObjectConfigurations = list(
list(
Name = "ACCOUNT"|"CAMPAIGN"|"CASE"|"CONTACT"|"CONTRACT"|"DOCUMENT"|"GROUP"|"IDEA"|"LEAD"|"OPPORTUNITY"|"PARTNER"|"PRICEBOOK"|"PRODUCT"|"PROFILE"|"SOLUTION"|"TASK"|"USER",
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
)
),
KnowledgeArticleConfiguration = list(
IncludedStates = list(
"DRAFT"|"PUBLISHED"|"ARCHIVED"
),
StandardKnowledgeArticleTypeConfiguration = list(
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
CustomKnowledgeArticleTypeConfigurations = list(
list(
Name = "string",
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
)
)
),
ChatterFeedConfiguration = list(
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
IncludeFilterTypes = list(
"ACTIVE_USER"|"STANDARD_USER"
)
),
CrawlAttachments = TRUE|FALSE,
StandardObjectAttachmentConfiguration = list(
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
IncludeAttachmentFilePatterns = list(
"string"
),
ExcludeAttachmentFilePatterns = list(
"string"
)
),
OneDriveConfiguration = list(
TenantDomain = "string",
SecretArn = "string",
OneDriveUsers = list(
OneDriveUserList = list(
"string"
),
OneDriveUserS3Path = list(
Bucket = "string",
Key = "string"
)
),
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
DisableLocalGroups = TRUE|FALSE
),
ServiceNowConfiguration = list(
HostUrl = "string",
SecretArn = "string",
ServiceNowBuildVersion = "LONDON"|"OTHERS",
KnowledgeArticleConfiguration = list(
CrawlAttachments = TRUE|FALSE,
IncludeAttachmentFilePatterns = list(
"string"
),
ExcludeAttachmentFilePatterns = list(
"string"
),
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
ServiceCatalogConfiguration = list(
CrawlAttachments = TRUE|FALSE,
IncludeAttachmentFilePatterns = list(
"string"
),
ExcludeAttachmentFilePatterns = list(
"string"
),
DocumentDataFieldName = "string",
DocumentTitleFieldName = "string",
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
)
),
ConfluenceConfiguration = list(
ServerUrl = "string",
SecretArn = "string",
Version = "CLOUD"|"SERVER",
SpaceConfiguration = list(
CrawlPersonalSpaces = TRUE|FALSE,
CrawlArchivedSpaces = TRUE|FALSE,
IncludeSpaces = list(
"string"
),
ExcludeSpaces = list(
"string"
),
SpaceFieldMappings = list(
list(
DataSourceFieldName = "DISPLAY_URL"|"ITEM_TYPE"|"SPACE_KEY"|"URL",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
PageConfiguration = list(
PageFieldMappings = list(
list(
DataSourceFieldName = "AUTHOR"|"CONTENT_STATUS"|"CREATED_DATE"|"DISPLAY_URL"|"ITEM_TYPE"|"LABELS"|"MODIFIED_DATE"|"PARENT_ID"|"SPACE_KEY"|"SPACE_NAME"|"URL"|"VERSION",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
BlogConfiguration = list(
BlogFieldMappings = list(
list(
DataSourceFieldName = "AUTHOR"|"DISPLAY_URL"|"ITEM_TYPE"|"LABELS"|"PUBLISH_DATE"|"SPACE_KEY"|"SPACE_NAME"|"URL"|"VERSION",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
AttachmentConfiguration = list(
CrawlAttachments = TRUE|FALSE,
AttachmentFieldMappings = list(
list(
DataSourceFieldName = "AUTHOR"|"CONTENT_TYPE"|"CREATED_DATE"|"DISPLAY_URL"|"FILE_SIZE"|"ITEM_TYPE"|"PARENT_ID"|"SPACE_KEY"|"SPACE_NAME"|"URL"|"VERSION",
DateFieldFormat = "string",
IndexFieldName = "string"
)
)
),
VpcConfiguration = list(
SubnetIds = list(
"string"
),
SecurityGroupIds = list(
"string"
)
),
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
)
),
GoogleDriveConfiguration = list(
SecretArn = "string",
InclusionPatterns = list(
"string"
),
ExclusionPatterns = list(
"string"
),
FieldMappings = list(
list(
DataSourceFieldName = "string",
DateFieldFormat = "string",
IndexFieldName = "string"
)
),
ExcludeMimeTypes = list(
"string"
),
ExcludeUserAccounts = list(
"string"
),
ExcludeSharedDrives = list(
"string"
)
)
),
Description = "string",
Schedule = "string",
RoleArn = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
),
ClientToken = "string"
)
}
}
\keyword{internal}
|
#Plot log scale daily cases and total cases for log-curve prediction
library(tidyverse)
library(gghighlight)
library(scales)
library(here)
library(ggsci)
library(TTR)
library(ggsci)
library(gganimate)
library(gifski)
source("scripts/getCases.R")
COVID <- as_tibble(COVID)
#Process data into sum form
COVID <- rename(COVID, "dailyCases" = "cases", "dailyDeaths" = "deaths")
ID <- unique(COVID$geoId)
COVIDSum <- data.frame()
for(i in 1:length(ID)){
tempCountry <- COVID %>% filter(geoId == ID[i]) %>% arrange(dateRep)
tempCountry[, "totalCases"] <- cumsum(tempCountry$dailyCases)
tempCountry[, "totalDeaths"] <- cumsum(tempCountry$dailyDeaths)
COVIDSum <- bind_rows(COVIDSum, tempCountry)
}
COVID <- COVIDSum
COVIDTop <- filter(COVID, totalCases >= 2500)
IDTop <- unique(COVIDTop$geoId)
COVID <- filter(COVID, geoId %in% IDTop)
COVIDAvg <- data.frame()
for (i in 1:length(ID)){
tempCountry <- COVID %>% filter(geoId == IDTop[i]) %>% arrange(dateRep)
tempCountry[, "dailyCases"] <- frollmean(tempCountry$dailyCases, 5)
tempCountry[, "totalCases"] <- frollmean(tempCountry$totalCases, 5)
tempCountry[, "dailyDeaths"] <- frollmean(tempCountry$dailyDeaths, 5)
tempCountry[, "totalDeaths"] <- frollmean(tempCountry$totalDeaths, 5)
COVIDAvg <- bind_rows(COVIDAvg, tempCountry)
}
COVID <- COVIDAvg
COVID <- filter(COVID, totalCases >= 10, totalDeaths >= 10)
#Plot log-log values in animated form for cases and deaths
loglogCasePlot <- ggplot(data = COVID) +
geom_point(mapping = aes(x = totalCases, y = dailyCases, color = geoId)) +
geom_line(mapping = aes(x = totalCases, y = dailyCases, color = geoId)) +
scale_y_log10(limits = c(10, 20000)) +
scale_x_log10(limits = c(10, 150000)) +
annotation_logticks(sides="lb") +
transition_reveal(dateRep) +
theme_bw()
animate(loglogCasePlot, renderer = gifski_renderer(loop = F), end_pause = 60, width = 800, height = 600)
anim_save("../figures/loglogCasePlot.gif")
loglogDeathPlot <- ggplot(data = COVID) +
geom_point(mapping = aes(x = totalDeaths, y = dailyDeaths, color = geoId)) +
geom_line(mapping = aes(x = totalDeaths, y = dailyDeaths, color = geoId)) +
scale_y_log10(limits = c(10, 1000)) +
scale_x_log10(limits = c(10, 10000)) +
annotation_logticks(sides="lb") +
transition_reveal(dateRep) +
theme_bw()
animate(loglogDeathPlot, renderer = gifski_renderer(loop = F), end_pause = 60, width = 800, height = 600)
anim_save("../figures/loglogDeathPlot.gif", width = 8, height = 8)
| /scripts/loglogAnimation.R | no_license | cortbreuer/COVID-19 | R | false | false | 2,495 | r | #Plot log scale daily cases and total cases for log-curve prediction
library(tidyverse)
library(gghighlight)
library(scales)
library(here)
library(ggsci)
library(TTR)
library(ggsci)
library(gganimate)
library(gifski)
source("scripts/getCases.R")
COVID <- as_tibble(COVID)
#Process data into sum form
COVID <- rename(COVID, "dailyCases" = "cases", "dailyDeaths" = "deaths")
ID <- unique(COVID$geoId)
COVIDSum <- data.frame()
for(i in 1:length(ID)){
tempCountry <- COVID %>% filter(geoId == ID[i]) %>% arrange(dateRep)
tempCountry[, "totalCases"] <- cumsum(tempCountry$dailyCases)
tempCountry[, "totalDeaths"] <- cumsum(tempCountry$dailyDeaths)
COVIDSum <- bind_rows(COVIDSum, tempCountry)
}
COVID <- COVIDSum
COVIDTop <- filter(COVID, totalCases >= 2500)
IDTop <- unique(COVIDTop$geoId)
COVID <- filter(COVID, geoId %in% IDTop)
COVIDAvg <- data.frame()
for (i in 1:length(ID)){
tempCountry <- COVID %>% filter(geoId == IDTop[i]) %>% arrange(dateRep)
tempCountry[, "dailyCases"] <- frollmean(tempCountry$dailyCases, 5)
tempCountry[, "totalCases"] <- frollmean(tempCountry$totalCases, 5)
tempCountry[, "dailyDeaths"] <- frollmean(tempCountry$dailyDeaths, 5)
tempCountry[, "totalDeaths"] <- frollmean(tempCountry$totalDeaths, 5)
COVIDAvg <- bind_rows(COVIDAvg, tempCountry)
}
COVID <- COVIDAvg
COVID <- filter(COVID, totalCases >= 10, totalDeaths >= 10)
#Plot log-log values in animated form for cases and deaths
loglogCasePlot <- ggplot(data = COVID) +
geom_point(mapping = aes(x = totalCases, y = dailyCases, color = geoId)) +
geom_line(mapping = aes(x = totalCases, y = dailyCases, color = geoId)) +
scale_y_log10(limits = c(10, 20000)) +
scale_x_log10(limits = c(10, 150000)) +
annotation_logticks(sides="lb") +
transition_reveal(dateRep) +
theme_bw()
animate(loglogCasePlot, renderer = gifski_renderer(loop = F), end_pause = 60, width = 800, height = 600)
anim_save("../figures/loglogCasePlot.gif")
loglogDeathPlot <- ggplot(data = COVID) +
geom_point(mapping = aes(x = totalDeaths, y = dailyDeaths, color = geoId)) +
geom_line(mapping = aes(x = totalDeaths, y = dailyDeaths, color = geoId)) +
scale_y_log10(limits = c(10, 1000)) +
scale_x_log10(limits = c(10, 10000)) +
annotation_logticks(sides="lb") +
transition_reveal(dateRep) +
theme_bw()
animate(loglogDeathPlot, renderer = gifski_renderer(loop = F), end_pause = 60, width = 800, height = 600)
anim_save("../figures/loglogDeathPlot.gif", width = 8, height = 8)
|
pdf_file<-"pdf/tablecharts_gantt_simplified.pdf"
c0<-"black"; c1<-"green"; c2<-"red"; c3<-"blue"; c4<-"orange"; c5<-"brown"
myColour_done<-"grey"
myColour<-c(c0,c1,c1,c1,c0,c0,c2,c2,c2,c2,c0,c0,c3,c3,c3,c0,c0,c4,c4,c4,c0,c0,c5)
source("scripts/inc_gantt_simplified.r")
dev.off() | /dataFromBook DataVISWithR/scripts/tablecharts_gantt_simplified.r | no_license | shahidnawazkhan/Machine-Learning-Book | R | false | false | 278 | r | pdf_file<-"pdf/tablecharts_gantt_simplified.pdf"
c0<-"black"; c1<-"green"; c2<-"red"; c3<-"blue"; c4<-"orange"; c5<-"brown"
myColour_done<-"grey"
myColour<-c(c0,c1,c1,c1,c0,c0,c2,c2,c2,c2,c0,c0,c3,c3,c3,c0,c0,c4,c4,c4,c0,c0,c5)
source("scripts/inc_gantt_simplified.r")
dev.off() |
testlist <- list(a = -1L, b = -1L, x = c(-256L, -256L, -1L, -2071690108L, -2071952252L, -2071690108L, -2071690108L, -2071690108L, 1361077359L, 1969384549L, 703787569L, 184549375L, -218972161L, -1L, NA, -868879630L, -218972161L, -52L, -858980353L, -870527796L, -858993460L, -2228225L, -1L, NA, -572653569L, -1L, -1L, -1L, -1L, -1L, -1073741825L, -1L, -1L, -1L, -1L, -13563137L, 901775359L, -212L, 58095222L, 458358783L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -9013719L, 1375731711L, -1L, -1L, 704643037L, -572662307L, -55040L, 12554L, -13254657L, -1L, 738424438L, 1981501696L, 65535L, -149L, -6225921L, 825113403L ))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055867-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 680 | r | testlist <- list(a = -1L, b = -1L, x = c(-256L, -256L, -1L, -2071690108L, -2071952252L, -2071690108L, -2071690108L, -2071690108L, 1361077359L, 1969384549L, 703787569L, 184549375L, -218972161L, -1L, NA, -868879630L, -218972161L, -52L, -858980353L, -870527796L, -858993460L, -2228225L, -1L, NA, -572653569L, -1L, -1L, -1L, -1L, -1L, -1073741825L, -1L, -1L, -1L, -1L, -13563137L, 901775359L, -212L, 58095222L, 458358783L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -9013719L, 1375731711L, -1L, -1L, 704643037L, -572662307L, -55040L, 12554L, -13254657L, -1L, 738424438L, 1981501696L, 65535L, -149L, -6225921L, 825113403L ))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
################################################################################
## PROGRAM: write.merlin.R
## BY: Bhoom Suktitipat, MD,PhD
## DATE: Tue Dec 20 15:38:18 EST 2011
################################################################################
## GOAL: write pedigree file for Merlin (LINKAGE FORMAT) with DAT file from
## 1) object from read.merlin
## 2) when giving both pedfile and dat file, will write out pedfile&datfile
## 3) when giving only pedfile and "type", will create datfile coresponding
## to "type"
## INPUT:
## - x: a list of class readMerlin from read.merlin()
## - prefix: the prefix of pedfile and datfile you want to create
## - ped: name of pedigree file. This is a linkage format file
## - dat: dat type file: a two column file format with 1st column
## describing the data type "M/S/C/T", and 2nd column
## for variable names
## - map: Four columns format with "CHR, SNP, CM, BP"
## OPTIONAL: If map specified, it will be writen out.
## - type: NULL (default) use data from .dat
## when specified as a character string same as the first column of
## .dat
## OUTPUT: write ${prefix}.ped file and ${prefix}.dat file to the current directory
## readMerlin contains "$pedigree" and "$dat"
################################################################################
write.merlin <- function(x,prefix,ped,dat,map,type=NULL) {
if (missing(prefix)) stop("Please specify output file")
## SITUATION 1: Given readMerlin class data
if (!missing(x)) {
if (class(x) == "readMerlin") {
write.table(x$ped,file=paste(prefix,"ped",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
write.table(x$dat,file=paste(prefix,"dat",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
}
}
## SITUATION 2: given pedigree, map, and dat data frame
if (!missing(ped) & !missing(dat) ) {
write.table(ped,file=paste(prefix,"ped",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
write.table(dat,file=paste(prefix,"dat",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
if ( !missing(map)) write.table(map,file=paste(prefix,"map",sep="."),sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
} else if (missing(ped) & missing(dat)) {
stop("Please specify Pedigree and dat data")
} else
## SITUATION 3: pedigree data frame available but no dat --- specify type to create dat automatically
if (!missing(ped) & missing(dat) & length(type) > 0) {
cat("generating dat file from pedigree data frame\r")
## calculate expected length of data name
length.type <- length(type[type=="M"])*2+length(type[!type=="M"])
if (length.type != ncol(ped)-5) stop("type has length shorter than pedigree data")
type.name <- character(length(type))
colnum <- 6
allname <- names(ped)
for ( i in 1:length(type) ) {
if (type[i] == "M" ) {
marker <- allname[colname]
marker <- strsplit(marker,"\\_")
type.name[i] <- marker
colnum <- colnum+2
} else {
type.name[i] <- allname[colnum]
colnum <- colnum+1
}
}
dat <- data.frame("type"=type,"name"=type.name)
ord <- order(ped[,1],ped[,2])
ped <- ped[ord,]
write.table(ped,file=paste(prefix,"ped",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
write.table(dat,file=paste(prefix,"dat",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
if ( !missing(map)) write.table(map,file=paste(prefix,"map",sep="."),sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
} else stop("Please specify type for dat file to write out")
}
| /write.merlin.r | no_license | hypotheses/Genetics | R | false | false | 3,804 | r | ################################################################################
## PROGRAM: write.merlin.R
## BY: Bhoom Suktitipat, MD,PhD
## DATE: Tue Dec 20 15:38:18 EST 2011
################################################################################
## GOAL: write pedigree file for Merlin (LINKAGE FORMAT) with DAT file from
## 1) object from read.merlin
## 2) when giving both pedfile and dat file, will write out pedfile&datfile
## 3) when giving only pedfile and "type", will create datfile coresponding
## to "type"
## INPUT:
## - x: a list of class readMerlin from read.merlin()
## - prefix: the prefix of pedfile and datfile you want to create
## - ped: name of pedigree file. This is a linkage format file
## - dat: dat type file: a two column file format with 1st column
## describing the data type "M/S/C/T", and 2nd column
## for variable names
## - map: Four columns format with "CHR, SNP, CM, BP"
## OPTIONAL: If map specified, it will be writen out.
## - type: NULL (default) use data from .dat
## when specified as a character string same as the first column of
## .dat
## OUTPUT: write ${prefix}.ped file and ${prefix}.dat file to the current directory
## readMerlin contains "$pedigree" and "$dat"
################################################################################
write.merlin <- function(x,prefix,ped,dat,map,type=NULL) {
if (missing(prefix)) stop("Please specify output file")
## SITUATION 1: Given readMerlin class data
if (!missing(x)) {
if (class(x) == "readMerlin") {
write.table(x$ped,file=paste(prefix,"ped",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
write.table(x$dat,file=paste(prefix,"dat",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
}
}
## SITUATION 2: given pedigree, map, and dat data frame
if (!missing(ped) & !missing(dat) ) {
write.table(ped,file=paste(prefix,"ped",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
write.table(dat,file=paste(prefix,"dat",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
if ( !missing(map)) write.table(map,file=paste(prefix,"map",sep="."),sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
} else if (missing(ped) & missing(dat)) {
stop("Please specify Pedigree and dat data")
} else
## SITUATION 3: pedigree data frame available but no dat --- specify type to create dat automatically
if (!missing(ped) & missing(dat) & length(type) > 0) {
cat("generating dat file from pedigree data frame\r")
## calculate expected length of data name
length.type <- length(type[type=="M"])*2+length(type[!type=="M"])
if (length.type != ncol(ped)-5) stop("type has length shorter than pedigree data")
type.name <- character(length(type))
colnum <- 6
allname <- names(ped)
for ( i in 1:length(type) ) {
if (type[i] == "M" ) {
marker <- allname[colname]
marker <- strsplit(marker,"\\_")
type.name[i] <- marker
colnum <- colnum+2
} else {
type.name[i] <- allname[colnum]
colnum <- colnum+1
}
}
dat <- data.frame("type"=type,"name"=type.name)
ord <- order(ped[,1],ped[,2])
ped <- ped[ord,]
write.table(ped,file=paste(prefix,"ped",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
write.table(dat,file=paste(prefix,"dat",sep="."),sep="\t",col.names=F,row.names=F,na="x",quote=F)
if ( !missing(map)) write.table(map,file=paste(prefix,"map",sep="."),sep="\t",col.names=FALSE,row.names=FALSE,quote=FALSE)
} else stop("Please specify type for dat file to write out")
}
|
library(Sleuth2)
### Name: ex2119
### Title: Meta-Analysis of Breast Cancer and Lactation Studies
### Aliases: ex2119
### Keywords: datasets
### ** Examples
str(ex2119)
| /data/genthat_extracted_code/Sleuth2/examples/ex2119.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 176 | r | library(Sleuth2)
### Name: ex2119
### Title: Meta-Analysis of Breast Cancer and Lactation Studies
### Aliases: ex2119
### Keywords: datasets
### ** Examples
str(ex2119)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stfilter.R
\name{TWSE_csv}
\alias{TWSE_csv}
\title{Get daily record of stock from TWSE}
\usage{
TWSE_csv(stock = "1215", year_in, month_in)
}
\arguments{
\item{stock}{stock code}
\item{year_in}{year, ex:'2017'}
\item{month_in}{month, ex: '03'}
}
\description{
Get daily record of stock from TWSE
\url{http://www.twse.com.tw/en/trading/exchange/STOCK_DAY/STOCK_DAY.php}
}
\details{
}
\examples{
TWSE_csv(stock='1215','2017','03')
}
| /man/TWSE_csv.Rd | no_license | KuiMing/stfilter | R | false | true | 514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stfilter.R
\name{TWSE_csv}
\alias{TWSE_csv}
\title{Get daily record of stock from TWSE}
\usage{
TWSE_csv(stock = "1215", year_in, month_in)
}
\arguments{
\item{stock}{stock code}
\item{year_in}{year, ex:'2017'}
\item{month_in}{month, ex: '03'}
}
\description{
Get daily record of stock from TWSE
\url{http://www.twse.com.tw/en/trading/exchange/STOCK_DAY/STOCK_DAY.php}
}
\details{
}
\examples{
TWSE_csv(stock='1215','2017','03')
}
|
#' Add covariate levels detection function plots
#'
#' @inherit mrds::add.df.covar.line
#' @name add_df_covar_line
#' @docType methods
#' @param ddf a fitted detection function object.
#' @param data a `data.frame` with the covariate combination you want to plot.
#' @param \dots extra arguments to give to [`lines`][graphics::lines] (e.g.,
#' `lty`, `lwd`, `col`).
#' @param ndist number of distances at which to evaluate the detection function.
#' @param pdf should the line be drawn on the probability density scale;
#' ignored for line transects
#' @param breaks required to ensure that PDF lines are the right size, should
#' match what is supplied to original `plot` command. Defaults to
#' "Sturges" breaks, as in [`hist`][graphics::hist]. Only used if `pdf=TRUE`
#' @note This function is located in the `mrds` package but the
#' documentation is provided here for easy access.
#' @examples
#' \dontrun{
#' # example using a model for the minke data
#' data(minke)
#' # fit a model
#' result <- ds(minke, formula=~Region.Label)
#'
#' # make a base plot, showpoints=FALSE makes the plot less busy
#' plot(result, showpoints=FALSE)
#'
#' # add lines for sex one at a time
#' add_df_covar_line(result, data.frame(Region.Label="South"), lty=2)
#' add_df_covar_line(result, data.frame(Region.Label="North"), lty=3)
#'
#' # add a legend
#' legend(1.5, 1, c("Average", "South", "North"), lty=1:3)
#'
#' # point transect example
#' data(amakihi)
#' result <- ds(amakihi, truncation=150, transect="point", formula=~OBs)
#' plot(result, showpoints=FALSE, pdf=TRUE)
#' add_df_covar_line(result,
#' data.frame(OBs=na.omit(unique(amakihi$OBs))), pdf=TRUE)
#' }
NULL
| /R/add_df_covar_line.R | no_license | cran/Distance | R | false | false | 1,721 | r | #' Add covariate levels detection function plots
#'
#' @inherit mrds::add.df.covar.line
#' @name add_df_covar_line
#' @docType methods
#' @param ddf a fitted detection function object.
#' @param data a `data.frame` with the covariate combination you want to plot.
#' @param \dots extra arguments to give to [`lines`][graphics::lines] (e.g.,
#' `lty`, `lwd`, `col`).
#' @param ndist number of distances at which to evaluate the detection function.
#' @param pdf should the line be drawn on the probability density scale;
#' ignored for line transects
#' @param breaks required to ensure that PDF lines are the right size, should
#' match what is supplied to original `plot` command. Defaults to
#' "Sturges" breaks, as in [`hist`][graphics::hist]. Only used if `pdf=TRUE`
#' @note This function is located in the `mrds` package but the
#' documentation is provided here for easy access.
#' @examples
#' \dontrun{
#' # example using a model for the minke data
#' data(minke)
#' # fit a model
#' result <- ds(minke, formula=~Region.Label)
#'
#' # make a base plot, showpoints=FALSE makes the plot less busy
#' plot(result, showpoints=FALSE)
#'
#' # add lines for sex one at a time
#' add_df_covar_line(result, data.frame(Region.Label="South"), lty=2)
#' add_df_covar_line(result, data.frame(Region.Label="North"), lty=3)
#'
#' # add a legend
#' legend(1.5, 1, c("Average", "South", "North"), lty=1:3)
#'
#' # point transect example
#' data(amakihi)
#' result <- ds(amakihi, truncation=150, transect="point", formula=~OBs)
#' plot(result, showpoints=FALSE, pdf=TRUE)
#' add_df_covar_line(result,
#' data.frame(OBs=na.omit(unique(amakihi$OBs))), pdf=TRUE)
#' }
NULL
|
\name{invalsi}
\alias{invalsi}
\docType{data}
\title{The Invalsi data Set}
\description{
The INVALSI "Istituto nazionale per la valutazione del sistema educativo di istruzione e di formazione" data set is a subset of 15398 tweets from the original data collected during the days of the administration of a national test to secondary school children in Italy (from May 6th to June 18th, 2014). The INVALSI data set includes only tweets that include the word or the hashtag \code{INVALSI}. Unfortunately, Twitter policies do not allow for the distribution of the original data, and the original IDs where lost, but this R version provides the Document-Term matrix which is enough for the text-ming task.
}
\usage{data(invalsi)}
\examples{
data(invalsi)
S <- invalsi$S # iSA feature vectors
dtm <- invalsi$dtm # DocumentTerm matrix
dim(dtm)
table(tr) # the hand coded data tags
}
\keyword{datasets}
| /man/invalsi.Rd | no_license | blogsvoices/iSAX | R | false | false | 899 | rd | \name{invalsi}
\alias{invalsi}
\docType{data}
\title{The Invalsi data Set}
\description{
The INVALSI "Istituto nazionale per la valutazione del sistema educativo di istruzione e di formazione" data set is a subset of 15398 tweets from the original data collected during the days of the administration of a national test to secondary school children in Italy (from May 6th to June 18th, 2014). The INVALSI data set includes only tweets that include the word or the hashtag \code{INVALSI}. Unfortunately, Twitter policies do not allow for the distribution of the original data, and the original IDs where lost, but this R version provides the Document-Term matrix which is enough for the text-ming task.
}
\usage{data(invalsi)}
\examples{
data(invalsi)
S <- invalsi$S # iSA feature vectors
dtm <- invalsi$dtm # DocumentTerm matrix
dim(dtm)
table(tr) # the hand coded data tags
}
\keyword{datasets}
|
library(rechonest)
### Name: get_artist_videos
### Title: To get a list of video documents found on the web related to an
### artist
### Aliases: get_artist_videos
### ** Examples
## Not run:
##D data=get_artist_videos(api_key,name="coldplay")
## End(Not run)
| /data/genthat_extracted_code/rechonest/examples/get_artist_videos.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 270 | r | library(rechonest)
### Name: get_artist_videos
### Title: To get a list of video documents found on the web related to an
### artist
### Aliases: get_artist_videos
### ** Examples
## Not run:
##D data=get_artist_videos(api_key,name="coldplay")
## End(Not run)
|
library(data.table)
library(dplyr)
# Require access SNOMED CT content
# Read required files
language <- fread("./Language/der2_cRefset_LanguageSnapshot.txt", colClasses = 'character')
Concept <- fread("./Terminology/sct2_Concept_Snapshot.txt", colClasses = 'character')
description <- fread("./Terminology/sct2_Description_Snapshot.txt", colClasses = 'character')
ExtendedMapFull <- fread("./Terminology/snomed/SNOMEDCT_InternationalRF2/Full/Refset/Map/der2_iisssccRefset_ExtendedMapFull.txt", colClasses = 'character')
# Dataset of Active concept, Fully specified name for description & Preferred acceptability
medical_terminology <- Concept %>%
left_join(description, by = c("id" = "conceptId"), keep=FALSE) %>%
left_join(language, by = c("id.y" = "referencedComponentId"), keep=FALSE) %>%
filter(active.x == "1", typeId == "900000000000003001", refsetId == "900000000000509007", acceptabilityId == "900000000000548007") %>%
select(term, id.y)
# Dataset for concepts synonym
synonym <- Concept %>%
left_join(description, by = c("id" = "conceptId"), keep=FALSE) %>%
left_join(language, by = c("id.y" = "referencedComponentId"), keep=FALSE) %>%
filter(active.x == "1", typeId == "900000000000013009", refsetId == "900000000000509007", acceptabilityId == "900000000000549004") %>%
select(term, id.y)
# Mapping snomded ct terminology to ICD10
snomedct_icd10 <- ExtendedMapFull %>%
left_join(synonym, by = c("referencedComponentId" = "id.y")) %>%
na_if("") %>%
filter(!is.na(mapTarget)) %>%
distinct(mapTarget, referencedComponentId)
| /data_manipulation_dplyr.R | no_license | Al-Murrani/data-manipulation | R | false | false | 1,603 | r | library(data.table)
library(dplyr)
# Require access SNOMED CT content
# Read required files
language <- fread("./Language/der2_cRefset_LanguageSnapshot.txt", colClasses = 'character')
Concept <- fread("./Terminology/sct2_Concept_Snapshot.txt", colClasses = 'character')
description <- fread("./Terminology/sct2_Description_Snapshot.txt", colClasses = 'character')
ExtendedMapFull <- fread("./Terminology/snomed/SNOMEDCT_InternationalRF2/Full/Refset/Map/der2_iisssccRefset_ExtendedMapFull.txt", colClasses = 'character')
# Dataset of Active concept, Fully specified name for description & Preferred acceptability
medical_terminology <- Concept %>%
left_join(description, by = c("id" = "conceptId"), keep=FALSE) %>%
left_join(language, by = c("id.y" = "referencedComponentId"), keep=FALSE) %>%
filter(active.x == "1", typeId == "900000000000003001", refsetId == "900000000000509007", acceptabilityId == "900000000000548007") %>%
select(term, id.y)
# Dataset for concepts synonym
synonym <- Concept %>%
left_join(description, by = c("id" = "conceptId"), keep=FALSE) %>%
left_join(language, by = c("id.y" = "referencedComponentId"), keep=FALSE) %>%
filter(active.x == "1", typeId == "900000000000013009", refsetId == "900000000000509007", acceptabilityId == "900000000000549004") %>%
select(term, id.y)
# Mapping snomded ct terminology to ICD10
snomedct_icd10 <- ExtendedMapFull %>%
left_join(synonym, by = c("referencedComponentId" = "id.y")) %>%
na_if("") %>%
filter(!is.na(mapTarget)) %>%
distinct(mapTarget, referencedComponentId)
|
/DataPostProcessing/perform_statistical_analysis_branch_lengths.R | no_license | g-heemeryck/SLiMTree | R | false | false | 19,585 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{plot_distrib}
\alias{plot_distrib}
\title{Plot marginal distrib}
\usage{
plot_distrib(asrStructure, distribDF = NULL, type = "colouredText",
colour = NULL, columns = NULL, aas = NULL)
}
\arguments{
\item{asrStructure}{the named list returned by \code{\link{runASR}}, \code{\link{loadASR}} or \code{\link{reduce_alphabet}}. Set this to NULL
to specify other variables}
\item{distribDF}{a dataframe created by \code{\link{read_distrib}}}
\item{type}{how the distribution should be displayed, default plain text.}
\item{colour}{only available in conjunction with type="logo" to specify the colour scheme for AAs. options: "clustal", "zappo", "taylor", "mixed"
options: "colour", "text", "logo", "colouredText"
NOTE: "logo" uses \code{\link{plot_logo_distrib}}}
\item{columns}{a vector containing the column numbers of interest. By default = NULL and all columns are displayed}
\item{aas}{a vector containing the amino acids of interest. By default = NULL and all amino acids are displayed}
}
\value{
plots a matrix of amino acid probability at given columns in alignment
}
\description{
Displays a heatmap showing the probabilities of each amino acid across all columns in the alignment
}
\examples{
data(asrStructure)
plot_distrib(asrStructure) #if you want to use the information stored in the structure
#To change the display of the plot
plot_distrib(asrStructure, type = "colour")
plot_distrib(asrStructure, type = "text")
plot_distrib(asrStructure, type = "logo")
plot_distrib(asrStructure, type = "colouredText")
#if you want to use a specific set of data
#' #retrieve example file stored in the package
id_distribution <- system.file("extdata", "runASR_distribution.txt", package="ASR")
#alternatively, specify the filename as a string
#id_distribution <- "id_distribution.txt"
distribDF <- read_distrib(NULL, id_distribution)
plot_distrib(NULL, distribDF = distribDF)
#to specify particular columns and AAs to include in figure
columns = c(4,5,6,7)
aas = c("G", "P")
plot_distrib(asrStructure, columns = columns)
plot_distrib(asrStructure, aas = aas)
plot_distrib(asrStructure, columns = columns, aas = aas)
#OR
plot_distrib(NULL, distribDF = distribDF, columns = columns)
}
| /ASR/man/plot_distrib.Rd | no_license | laiso1983/ASR | R | false | true | 2,296 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{plot_distrib}
\alias{plot_distrib}
\title{Plot marginal distrib}
\usage{
plot_distrib(asrStructure, distribDF = NULL, type = "colouredText",
colour = NULL, columns = NULL, aas = NULL)
}
\arguments{
\item{asrStructure}{the named list returned by \code{\link{runASR}}, \code{\link{loadASR}} or \code{\link{reduce_alphabet}}. Set this to NULL
to specify other variables}
\item{distribDF}{a dataframe created by \code{\link{read_distrib}}}
\item{type}{how the distribution should be displayed, default plain text.}
\item{colour}{only available in conjunction with type="logo" to specify the colour scheme for AAs. options: "clustal", "zappo", "taylor", "mixed"
options: "colour", "text", "logo", "colouredText"
NOTE: "logo" uses \code{\link{plot_logo_distrib}}}
\item{columns}{a vector containing the column numbers of interest. By default = NULL and all columns are displayed}
\item{aas}{a vector containing the amino acids of interest. By default = NULL and all amino acids are displayed}
}
\value{
plots a matrix of amino acid probability at given columns in alignment
}
\description{
Displays a heatmap showing the probabilities of each amino acid across all columns in the alignment
}
\examples{
data(asrStructure)
plot_distrib(asrStructure) #if you want to use the information stored in the structure
#To change the display of the plot
plot_distrib(asrStructure, type = "colour")
plot_distrib(asrStructure, type = "text")
plot_distrib(asrStructure, type = "logo")
plot_distrib(asrStructure, type = "colouredText")
#if you want to use a specific set of data
#' #retrieve example file stored in the package
id_distribution <- system.file("extdata", "runASR_distribution.txt", package="ASR")
#alternatively, specify the filename as a string
#id_distribution <- "id_distribution.txt"
distribDF <- read_distrib(NULL, id_distribution)
plot_distrib(NULL, distribDF = distribDF)
#to specify particular columns and AAs to include in figure
columns = c(4,5,6,7)
aas = c("G", "P")
plot_distrib(asrStructure, columns = columns)
plot_distrib(asrStructure, aas = aas)
plot_distrib(asrStructure, columns = columns, aas = aas)
#OR
plot_distrib(NULL, distribDF = distribDF, columns = columns)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xlsx_date.R
\name{xlsx_date}
\alias{xlsx_date}
\title{number to date for vector/column from excel sheet with mixed value types}
\usage{
xlsx_date(x, msg = NULL, minyear = 2000, maxyear = year_() + 2)
}
\arguments{
\item{x}{(vector of) would be dates to transform back}
}
\description{
readxl::read_excel() on a column with mixed formated cells will not easily parse dates
consider read_excel(..., col_types = c("date","guess",...))
unless column is mixed valued (e.g. "42068", "1/31/2010,1/2/2011", ...)
NB. excel's date origin is 1899-12-30
}
\examples{
print(xlsx_date(c(41977,"42068"))) # "2014-12-04" "2015-03-05"
xlsx_date("10977") # error out of range (1933-01-14)
}
| /man/xlsx_date.Rd | no_license | LabNeuroCogDevel/LNCDR | R | false | true | 751 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xlsx_date.R
\name{xlsx_date}
\alias{xlsx_date}
\title{number to date for vector/column from excel sheet with mixed value types}
\usage{
xlsx_date(x, msg = NULL, minyear = 2000, maxyear = year_() + 2)
}
\arguments{
\item{x}{(vector of) would be dates to transform back}
}
\description{
readxl::read_excel() on a column with mixed formated cells will not easily parse dates
consider read_excel(..., col_types = c("date","guess",...))
unless column is mixed valued (e.g. "42068", "1/31/2010,1/2/2011", ...)
NB. excel's date origin is 1899-12-30
}
\examples{
print(xlsx_date(c(41977,"42068"))) # "2014-12-04" "2015-03-05"
xlsx_date("10977") # error out of range (1933-01-14)
}
|
x <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", as.is = TRUE, na.strings = "?")
y <- x[x$Date == "1/2/2007" | x$Date == "2/2/2007", ]
y$DateTime <- strptime(paste(y$Date, y$Time), "%d/%m/%Y %H:%M:%S")
png(file = "plot4.png")
par(mfrow = c(2, 2))
plot(y$DateTime, y$Global_active_power,
pch = NA,
xlab = "",
ylab = "Global Active Power")
lines(y$DateTime, y$Global_active_power)
plot(y$DateTime, y$Voltage, ylab = "Voltage", xlab = "datetime", pch = NA)
lines(y$DateTime, y$Voltage)
plot(y$DateTime, y$Sub_metering_1,
pch = NA,
xlab = "",
ylab = "Energy sub metering")
lines(y$DateTime, y$Sub_metering_1)
lines(y$DateTime, y$Sub_metering_2, col = 'red')
lines(y$DateTime, y$Sub_metering_3, col = 'blue')
legend('topright',
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c('black', 'red', 'blue'),
lty = c(1, 1, 1),
bty = 'n')
with(y, plot(DateTime, Global_reactive_power, xlab = 'datetime', pch = NA))
with(y, lines(DateTime, Global_reactive_power))
dev.off()
| /plot4.R | no_license | apavelyev/ExData_Plotting1 | R | false | false | 1,078 | r | x <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", as.is = TRUE, na.strings = "?")
y <- x[x$Date == "1/2/2007" | x$Date == "2/2/2007", ]
y$DateTime <- strptime(paste(y$Date, y$Time), "%d/%m/%Y %H:%M:%S")
png(file = "plot4.png")
par(mfrow = c(2, 2))
plot(y$DateTime, y$Global_active_power,
pch = NA,
xlab = "",
ylab = "Global Active Power")
lines(y$DateTime, y$Global_active_power)
plot(y$DateTime, y$Voltage, ylab = "Voltage", xlab = "datetime", pch = NA)
lines(y$DateTime, y$Voltage)
plot(y$DateTime, y$Sub_metering_1,
pch = NA,
xlab = "",
ylab = "Energy sub metering")
lines(y$DateTime, y$Sub_metering_1)
lines(y$DateTime, y$Sub_metering_2, col = 'red')
lines(y$DateTime, y$Sub_metering_3, col = 'blue')
legend('topright',
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c('black', 'red', 'blue'),
lty = c(1, 1, 1),
bty = 'n')
with(y, plot(DateTime, Global_reactive_power, xlab = 'datetime', pch = NA))
with(y, lines(DateTime, Global_reactive_power))
dev.off()
|
setwd("/home/gerardo/MEGAsync/Niche models")
load('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
library(raster) ; library(rgdal); library(maptools); library(dismo); library(rgeos)
library(lgcp); library(spatstat); library(plyr); library(coda)
######################
#####Data loading#####
######################
##Load and reproject raster data
raster.data <- stack(c(paste('BIOCLIM/p-consp/ascii/bio',c(2, 3, 5, 9, 13),'.asc',sep=''),
'Bat model results/Maxent/p-consp.asc'))
proj4string(raster.data) <- CRS('+init=epsg:4326') #Original CRS
raster.data <- projectRaster(raster.data, crs = CRS('+init=epsg:3577')) #Target CRS
##Standardise raster data
cel.mean <- cellStats(raster.data, median)
cel.mean[6] <- 0
cel.sd <- cellStats(raster.data, sd)
cel.sd[6] <- 1
raster.data <- (raster.data - cel.mean)/cel.sd
#Load and project presence coordinates
Hendra.incidents <- read.csv('Hendra incidents-P-consp.csv')
coordinates(Hendra.incidents) <- ~Longitude+Latitude
proj4string(Hendra.incidents) <- CRS('+init=epsg:4326')
Hendra.incidents <- spTransform(Hendra.incidents, CRS('+init=epsg:3577'))
#########################################
###Formatting the data to run analyses###
#########################################
##Computing the polygon window
r <- raster.data[[1]]; r[!is.na(r[])] <- 1; r <- buffer(r, 5000)
window <- rasterToPolygons(r, dissolve = T)
spatstat.options(checkpolygons = F)
window <- as(window, 'owin')
window <- simplify.owin(window, dmin = 5000)
spatstat.options(checkpolygons = T)
#Formatting presence points as a spatstat ppp object
sd.p <- ppp(x = Hendra.incidents@coords[,1], y = Hendra.incidents@coords[,2],
window = window) # mask = as.matrix(r))#,
##Calculating optimal grid size for the point process
scale.analyses <- minimum.contrast(sd.p, model = 'exponential', method = 'K', intens = density(sd.p), transform = log)
chooseCellwidth(sd.p, cwinit = 14908.3)
points(sd.p)
cellwidth <- 14908.3
env.data <- data.frame(rasterToPoints(raster.data))
env.data <- SpatialPixelsDataFrame(cbind(env.data$x, env.data$y), env.data)
##Creating the samplig grid over whic to calculate point intensity
polyolay <- getpolyol(data = sd.p, pixelcovariates = env.data, cellwidth = cellwidth)
polyolay <- getpolyol(data = sd.p, pixelcovariates = env.data, cellwidth = cellwidth, ext = 4)
save.image('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
##Formatting the environmental data
covar <- data.frame(rasterToPoints(raster.data))
covar <- SpatialPixelsDataFrame(cbind(covar$x, covar$y), data = covar[,3:ncol(covar)])
##Setting up the interpolation routine for every variable type (numerical or categorical)
covar@data=guessinterp(covar@data)
covar@data <- assigninterp(df = covar@data, vars = c('bio2', 'bio3', 'bio5', 'bio9', 'bio13', 'p.consp'),
value = "ArealWeightedSum")
##Generate the data from the computational grid and a model formula
Zmat <- getZmat(formula = X ~ bio2 +bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2),
data = sd.p, pixelcovariates = covar,
cellwidth = cellwidth, overl = polyolay, ext = 4)
#################################
###Spatial covariance function###
#################################
cf <- CovFunction(SpikedExponentialCovFct)#RandomFieldsCovFct(model = 'matern', additionalparameters = c(1)))#Cauchy, so far the best
###########################
###Specifying the priors###
###########################
nbeta <- 2 + nlayers(raster.data) #Number of predictors
priors <-lgcpPrior(etaprior = PriorSpec(LogGaussianPrior(mean = log(c(2, 500)),
variance = diag(c(0.15, 2)))),
betaprior = PriorSpec(GaussianPrior(mean = rep(0, nbeta),
variance = diag(rep(10^6, nbeta)))))
#################
#MCMC parameters#
#################
len.burn.thin <- c(2000000, 200000, 1800) #a short chain for the beginning
################################
######RUNNING MALA SAMPLER######
################################
library(doParallel)
registerDoParallel(cores = 3)
ft.lgcp= foreach(i=1:3) %dopar% {lgcpPredictSpatialPlusPars(formula = X ~ bio2 + bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2),
sd = sd.p , Zmat = Zmat,
model.priors = priors,
model.inits = NULL ,
spatial.covmodel = cf,
ext = 4,
cellwidth = cellwidth,
poisson.offset = NULL,
output.control = setoutput(gridfunction = dump2dir(paste0('Future scenarios Bayesian/dumped/func/P-consp/',i), forceSave = T),
gridmeans = MonteCarloAverage(c('mean'), lastonly = T)),
mcmc.control = mcmcpars(mala.length = len.burn.thin[1], burnin = len.burn.thin[2],retain = len.burn.thin[3],
adaptivescheme = andrieuthomsh(inith = 1, alpha = 0.5, C = 1, targetacceptance = 0.574))
)
}
save.image('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
###############################
#########Diagnostics###########
###############################
par(mfrow = c(2,5)); priorpost(ft.lgcp[[3]]) #Density plots of parameter values
#
par(mfrow = c(2,5)); traceplots(ft.lgcp[[2]]) #Traces of parameter samples
#
par(mfrow = c(1,1)); postcov(ft.lgcp[[2]]) #The fitted covariance function
#
par(mfrow = c(2,5)); parautocorr(ft.lgcp[[1]])
#
parsummary(ft.lgcp[[1]]) #Summary of parameter estimates
#Autocorrelation
acor <- lgcp::autocorr(ft.lgcp[[1]], lags = c(10, 15, 19), inWindow = window)
par(mfrow = c(1, 3)); plot(acor)
#Trace plots for individual cells
par(mfrow = c(1,1))
tr <- lgcp::extract(ft.lgcp, x = 6, y = 120, t = 1, s = -1)
plot(tr, type = 'l', xlab = 'Iteration', ylab = 'Y')
#Lagged residuals
acf(tr)
par(mfrow = c(1,1))
plot(ft.lgcp)
plot(ft.lgcp, type = 'intensity')
points(sd.p)
###################
####Forecasting####
###################
newdata <- data.frame(rasterToPoints(raster.data))
coeffs <- rbind(ft.lgcp[[1]]$betarec, ft.lgcp[[2]]$betarec, ft.lgcp[[3]]$betarec)
Xmat <- model.matrix( ~ 1 + bio2 + bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2), data = newdata)
pred <- poisson()$linkinv(coeffs[,1:8]%*%t(Xmat))
pred.intervals <- adply(pred, 2, function(x) {
data.frame(Mean=mean(x), Median=median(x), HPDinterval(as.mcmc(x)))
})
pred.intervals$x <- newdata$x; pred.intervals$y <- newdata$y
pred.median <- rasterFromXYZ(subset(pred.intervals, select = c('x', 'y', 'Median'))) * 17305600
pred.up <- rasterFromXYZ(subset(pred.intervals, select = c('x', 'y', 'upper'))) * 17305600
pred.low <- rasterFromXYZ(subset(pred.intervals, select = c('x', 'y', 'lower'))) * 17305600
par(mfrow = c(1,1))
plot(pred.median, main = 'Median'); points(Hendra.incidents)
#
plot(pred.up, main = 'Upper (97.5%)'); points(Hendra.incidents)
#
plot(pred.low, main = 'Lower (2.5%')
#
writeRaster(pred.median, 'Future scenarios Bayesian/Model predictions/P consp/Median', 'GTiff', overwrite = T)
writeRaster(pred.up, 'Future scenarios Bayesian/Model predictions/P consp/Upper', 'GTiff', overwrite = T)
writeRaster(pred.low, 'Future scenarios Bayesian/Model predictions/P consp/Lower', 'GTiff', overwrite = T)
#EXceeding probabilities maps
Xmat.locs <- model.matrix(~1 + bio2 + bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2),
data = data.frame(raster::extract(raster.data, Hendra.incidents)))
pred.locs <- poisson()$linkinv(coeffs %*% t(Xmat.locs))
pred.locs <- adply(pred.locs, 2, function(x) {
data.frame(Mean=mean(x), Median=median(x), HPDinterval(as.mcmc(x)))
})
#Median
prob.median <- adply(pred, 2, function(x){
length(which(x > quantile(pred.locs$Median, 0.8)))/length(x)
})
prob.median$x <- newdata$x; prob.median$y <- newdata$y
prob.median.r <- rasterFromXYZ(subset(prob.median, select = c('x', 'y', 'V1')))
plot(prob.median.r); points(Hendra.incidents)
#Upper
prob.95 <- adply(pred, 2, function(x){
length(which(x > median(pred.locs$upper)))/length(x)
})
prob.95$x <- newdata$x; prob.95$y <- newdata$y
prob.95.r <- rasterFromXYZ(subset(prob.95, select = c('x', 'y', 'V1')))
plot(prob.95.r); points(Hendra.incidents)
#lower
prob.5 <- adply(pred, 2, function(x){
length(which(x > median(pred.locs$lower)))/length(x)
})
prob.5$x <- newdata$x; prob.5$y <- newdata$y
prob.5.r <- rasterFromXYZ(subset(prob.5, select = c('x', 'y', 'V1')))
plot(prob.5.r)
save.image('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
| /Preparacion lgcp.R | no_license | gerardommc/Curso-de-metodos-bayesianos-en-ENM | R | false | false | 8,943 | r | setwd("/home/gerardo/MEGAsync/Niche models")
load('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
library(raster) ; library(rgdal); library(maptools); library(dismo); library(rgeos)
library(lgcp); library(spatstat); library(plyr); library(coda)
######################
#####Data loading#####
######################
##Load and reproject raster data
raster.data <- stack(c(paste('BIOCLIM/p-consp/ascii/bio',c(2, 3, 5, 9, 13),'.asc',sep=''),
'Bat model results/Maxent/p-consp.asc'))
proj4string(raster.data) <- CRS('+init=epsg:4326') #Original CRS
raster.data <- projectRaster(raster.data, crs = CRS('+init=epsg:3577')) #Target CRS
##Standardise raster data
cel.mean <- cellStats(raster.data, median)
cel.mean[6] <- 0
cel.sd <- cellStats(raster.data, sd)
cel.sd[6] <- 1
raster.data <- (raster.data - cel.mean)/cel.sd
#Load and project presence coordinates
Hendra.incidents <- read.csv('Hendra incidents-P-consp.csv')
coordinates(Hendra.incidents) <- ~Longitude+Latitude
proj4string(Hendra.incidents) <- CRS('+init=epsg:4326')
Hendra.incidents <- spTransform(Hendra.incidents, CRS('+init=epsg:3577'))
#########################################
###Formatting the data to run analyses###
#########################################
##Computing the polygon window
r <- raster.data[[1]]; r[!is.na(r[])] <- 1; r <- buffer(r, 5000)
window <- rasterToPolygons(r, dissolve = T)
spatstat.options(checkpolygons = F)
window <- as(window, 'owin')
window <- simplify.owin(window, dmin = 5000)
spatstat.options(checkpolygons = T)
#Formatting presence points as a spatstat ppp object
sd.p <- ppp(x = Hendra.incidents@coords[,1], y = Hendra.incidents@coords[,2],
window = window) # mask = as.matrix(r))#,
##Calculating optimal grid size for the point process
scale.analyses <- minimum.contrast(sd.p, model = 'exponential', method = 'K', intens = density(sd.p), transform = log)
chooseCellwidth(sd.p, cwinit = 14908.3)
points(sd.p)
cellwidth <- 14908.3
env.data <- data.frame(rasterToPoints(raster.data))
env.data <- SpatialPixelsDataFrame(cbind(env.data$x, env.data$y), env.data)
##Creating the samplig grid over whic to calculate point intensity
polyolay <- getpolyol(data = sd.p, pixelcovariates = env.data, cellwidth = cellwidth)
polyolay <- getpolyol(data = sd.p, pixelcovariates = env.data, cellwidth = cellwidth, ext = 4)
save.image('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
##Formatting the environmental data
covar <- data.frame(rasterToPoints(raster.data))
covar <- SpatialPixelsDataFrame(cbind(covar$x, covar$y), data = covar[,3:ncol(covar)])
##Setting up the interpolation routine for every variable type (numerical or categorical)
covar@data=guessinterp(covar@data)
covar@data <- assigninterp(df = covar@data, vars = c('bio2', 'bio3', 'bio5', 'bio9', 'bio13', 'p.consp'),
value = "ArealWeightedSum")
##Generate the data from the computational grid and a model formula
Zmat <- getZmat(formula = X ~ bio2 +bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2),
data = sd.p, pixelcovariates = covar,
cellwidth = cellwidth, overl = polyolay, ext = 4)
#################################
###Spatial covariance function###
#################################
cf <- CovFunction(SpikedExponentialCovFct)#RandomFieldsCovFct(model = 'matern', additionalparameters = c(1)))#Cauchy, so far the best
###########################
###Specifying the priors###
###########################
nbeta <- 2 + nlayers(raster.data) #Number of predictors
priors <-lgcpPrior(etaprior = PriorSpec(LogGaussianPrior(mean = log(c(2, 500)),
variance = diag(c(0.15, 2)))),
betaprior = PriorSpec(GaussianPrior(mean = rep(0, nbeta),
variance = diag(rep(10^6, nbeta)))))
#################
#MCMC parameters#
#################
len.burn.thin <- c(2000000, 200000, 1800) #a short chain for the beginning
################################
######RUNNING MALA SAMPLER######
################################
library(doParallel)
registerDoParallel(cores = 3)
ft.lgcp= foreach(i=1:3) %dopar% {lgcpPredictSpatialPlusPars(formula = X ~ bio2 + bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2),
sd = sd.p , Zmat = Zmat,
model.priors = priors,
model.inits = NULL ,
spatial.covmodel = cf,
ext = 4,
cellwidth = cellwidth,
poisson.offset = NULL,
output.control = setoutput(gridfunction = dump2dir(paste0('Future scenarios Bayesian/dumped/func/P-consp/',i), forceSave = T),
gridmeans = MonteCarloAverage(c('mean'), lastonly = T)),
mcmc.control = mcmcpars(mala.length = len.burn.thin[1], burnin = len.burn.thin[2],retain = len.burn.thin[3],
adaptivescheme = andrieuthomsh(inith = 1, alpha = 0.5, C = 1, targetacceptance = 0.574))
)
}
save.image('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
###############################
#########Diagnostics###########
###############################
par(mfrow = c(2,5)); priorpost(ft.lgcp[[3]]) #Density plots of parameter values
#
par(mfrow = c(2,5)); traceplots(ft.lgcp[[2]]) #Traces of parameter samples
#
par(mfrow = c(1,1)); postcov(ft.lgcp[[2]]) #The fitted covariance function
#
par(mfrow = c(2,5)); parautocorr(ft.lgcp[[1]])
#
parsummary(ft.lgcp[[1]]) #Summary of parameter estimates
#Autocorrelation
acor <- lgcp::autocorr(ft.lgcp[[1]], lags = c(10, 15, 19), inWindow = window)
par(mfrow = c(1, 3)); plot(acor)
#Trace plots for individual cells
par(mfrow = c(1,1))
tr <- lgcp::extract(ft.lgcp, x = 6, y = 120, t = 1, s = -1)
plot(tr, type = 'l', xlab = 'Iteration', ylab = 'Y')
#Lagged residuals
acf(tr)
par(mfrow = c(1,1))
plot(ft.lgcp)
plot(ft.lgcp, type = 'intensity')
points(sd.p)
###################
####Forecasting####
###################
newdata <- data.frame(rasterToPoints(raster.data))
coeffs <- rbind(ft.lgcp[[1]]$betarec, ft.lgcp[[2]]$betarec, ft.lgcp[[3]]$betarec)
Xmat <- model.matrix( ~ 1 + bio2 + bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2), data = newdata)
pred <- poisson()$linkinv(coeffs[,1:8]%*%t(Xmat))
pred.intervals <- adply(pred, 2, function(x) {
data.frame(Mean=mean(x), Median=median(x), HPDinterval(as.mcmc(x)))
})
pred.intervals$x <- newdata$x; pred.intervals$y <- newdata$y
pred.median <- rasterFromXYZ(subset(pred.intervals, select = c('x', 'y', 'Median'))) * 17305600
pred.up <- rasterFromXYZ(subset(pred.intervals, select = c('x', 'y', 'upper'))) * 17305600
pred.low <- rasterFromXYZ(subset(pred.intervals, select = c('x', 'y', 'lower'))) * 17305600
par(mfrow = c(1,1))
plot(pred.median, main = 'Median'); points(Hendra.incidents)
#
plot(pred.up, main = 'Upper (97.5%)'); points(Hendra.incidents)
#
plot(pred.low, main = 'Lower (2.5%')
#
writeRaster(pred.median, 'Future scenarios Bayesian/Model predictions/P consp/Median', 'GTiff', overwrite = T)
writeRaster(pred.up, 'Future scenarios Bayesian/Model predictions/P consp/Upper', 'GTiff', overwrite = T)
writeRaster(pred.low, 'Future scenarios Bayesian/Model predictions/P consp/Lower', 'GTiff', overwrite = T)
#EXceeding probabilities maps
Xmat.locs <- model.matrix(~1 + bio2 + bio3 + bio5 + bio9 + bio13 + p.consp + I(p.consp^2),
data = data.frame(raster::extract(raster.data, Hendra.incidents)))
pred.locs <- poisson()$linkinv(coeffs %*% t(Xmat.locs))
pred.locs <- adply(pred.locs, 2, function(x) {
data.frame(Mean=mean(x), Median=median(x), HPDinterval(as.mcmc(x)))
})
#Median
prob.median <- adply(pred, 2, function(x){
length(which(x > quantile(pred.locs$Median, 0.8)))/length(x)
})
prob.median$x <- newdata$x; prob.median$y <- newdata$y
prob.median.r <- rasterFromXYZ(subset(prob.median, select = c('x', 'y', 'V1')))
plot(prob.median.r); points(Hendra.incidents)
#Upper
prob.95 <- adply(pred, 2, function(x){
length(which(x > median(pred.locs$upper)))/length(x)
})
prob.95$x <- newdata$x; prob.95$y <- newdata$y
prob.95.r <- rasterFromXYZ(subset(prob.95, select = c('x', 'y', 'V1')))
plot(prob.95.r); points(Hendra.incidents)
#lower
prob.5 <- adply(pred, 2, function(x){
length(which(x > median(pred.locs$lower)))/length(x)
})
prob.5$x <- newdata$x; prob.5$y <- newdata$y
prob.5.r <- rasterFromXYZ(subset(prob.5, select = c('x', 'y', 'V1')))
plot(prob.5.r)
save.image('Future scenarios Bayesian/Future scenarios bayesian-P-consp.RData')
|
## makeCacheMatrix creates a special matrix that is able to cache the inverse of the matrix
## cacheSolve makes used o the cache inverse matrix; if the inverse is available, then return it immediately without calculation.
## Otherwise calculate the inverse of the matrix, cache it before returning the inverse
## the inverse matrix is set to NULL as default
## there is also the getinverse and setinverse methods to get and set the inverse matrix respectively
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(im) inverse <<- im
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse= getinverse)
}
## cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve will retrieve the inverse from the cache
## Otherwise the solve() method will be used to calculate and cache the inverse matrix before returning it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if( !is.null(inverse)) {
message("getting cached data")
return(inverse)
}
cur_matrix <- x$get()
inverse <- solve(cur_matrix, ...)
x$setinverse(inverse)
inverse
}
| /ProgrammingAssignment2/cachematrix.R | no_license | number9dream/datasciencecoursera | R | false | false | 1,395 | r | ## makeCacheMatrix creates a special matrix that is able to cache the inverse of the matrix
## cacheSolve makes used o the cache inverse matrix; if the inverse is available, then return it immediately without calculation.
## Otherwise calculate the inverse of the matrix, cache it before returning the inverse
## the inverse matrix is set to NULL as default
## there is also the getinverse and setinverse methods to get and set the inverse matrix respectively
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(im) inverse <<- im
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse= getinverse)
}
## cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve will retrieve the inverse from the cache
## Otherwise the solve() method will be used to calculate and cache the inverse matrix before returning it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if( !is.null(inverse)) {
message("getting cached data")
return(inverse)
}
cur_matrix <- x$get()
inverse <- solve(cur_matrix, ...)
x$setinverse(inverse)
inverse
}
|
\name{plotSwim}
\alias{plotSwim}
\title{Plot a swim object}
\description{Plots a swim object (the results of fitting the HMMM) as a spatial object with surrounding land boundaries. }
\usage{plotSwim(object)}
\arguments{
\item{object}{An object of class 'swim'.}
}
\author{K. Whoriskey}
\references{
Vector land data were taken from Natural Earth.
}
\examples{
data(blueshark)
fit = fitSwim(blueshark, 6)
plotSwim(fit)
}
| /man/plotSwim.Rd | no_license | kimwhoriskey/swim | R | false | false | 430 | rd | \name{plotSwim}
\alias{plotSwim}
\title{Plot a swim object}
\description{Plots a swim object (the results of fitting the HMMM) as a spatial object with surrounding land boundaries. }
\usage{plotSwim(object)}
\arguments{
\item{object}{An object of class 'swim'.}
}
\author{K. Whoriskey}
\references{
Vector land data were taken from Natural Earth.
}
\examples{
data(blueshark)
fit = fitSwim(blueshark, 6)
plotSwim(fit)
}
|
## File Name: skillspace.approximation.R
## File Version: 0.08
###########################################
# skill space approximation
skillspace.approximation <- function( L, K, nmax=5000 )
{
n <- nmax
ndim <- K
res <- sfsmisc::QUnif (n, p=ndim, leap=409)
res <- 1*(res>.5)
res <- rbind( rep( 0,ndim), rep(1,ndim), res )
v1 <- paste0("P", res[,1] )
for (vv in 2:ndim){
v1 <- paste0( v1, res[,vv] )
}
rownames(res) <- v1
res <- res[ ! duplicated(v1), ]
res <- res[ 1:L, ]
res <- res[ order( rownames(res) ), ]
return(res)
}
###########################################
| /R/skillspace.approximation.R | no_license | Janehappiest/CDM | R | false | false | 628 | r | ## File Name: skillspace.approximation.R
## File Version: 0.08
###########################################
# skill space approximation
skillspace.approximation <- function( L, K, nmax=5000 )
{
n <- nmax
ndim <- K
res <- sfsmisc::QUnif (n, p=ndim, leap=409)
res <- 1*(res>.5)
res <- rbind( rep( 0,ndim), rep(1,ndim), res )
v1 <- paste0("P", res[,1] )
for (vv in 2:ndim){
v1 <- paste0( v1, res[,vv] )
}
rownames(res) <- v1
res <- res[ ! duplicated(v1), ]
res <- res[ 1:L, ]
res <- res[ order( rownames(res) ), ]
return(res)
}
###########################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotatr_genomes_doc.R
\name{mm9_chrom_sizes}
\alias{mm9_chrom_sizes}
\title{mm9_chrom_sizes}
\format{A numeric vector with names of chromsomes and values of length of chromsomes.}
\usage{
data(mm9_chrom_sizes)
}
\description{
Chromosome sizes for mm9
}
\details{
Chromosome sizes come from UCSC Genome Browser.
}
\keyword{datasets}
| /man/mm9_chrom_sizes.Rd | no_license | Al3n70rn/annotatr | R | false | true | 412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/annotatr_genomes_doc.R
\name{mm9_chrom_sizes}
\alias{mm9_chrom_sizes}
\title{mm9_chrom_sizes}
\format{A numeric vector with names of chromsomes and values of length of chromsomes.}
\usage{
data(mm9_chrom_sizes)
}
\description{
Chromosome sizes for mm9
}
\details{
Chromosome sizes come from UCSC Genome Browser.
}
\keyword{datasets}
|
DescStats = function(x) {
descriptiveStats = c(sum(!is.na(x)),mean(x),sd(x),quantile(x, 0.25),median(x),quantile(x, 0.75),min(x),max(x),max(x)-min(x))
descriptiveStats = round(descriptiveStats,2)
descriptiveMeasure = c("Valid Sample", "Mean", "STD", "Q1", "Median", "Q3", "Min", "Max", "Range")
descriptives = data.frame(descriptiveMeasure, descriptiveStats)
colnames(descriptives) = c("Statistics", "Value")
return (descriptives)
}
addDesc = function(x, prec){
library(moments)
adddescStats = c(mean(x,trim = prec/100),skewness(x),kurtosis(x),sd(x)/mean(x))
adddescStats = round(adddescStats,2)
adddescMeasure = c(sprintf("%s Trimmed Mean", prec),"Skewness","Kurtosis","CV")
adddesc = data.frame(adddescMeasure,adddescStats)
colnames(adddesc) = c("Statistic", "Value")
return (adddesc)
}
print(DescStats(Age))
print(addDesc(Age, 2))
| /Logistic Regression/Descriptive Stats Function.R | no_license | dean-sh/Statistical-Learning | R | false | false | 862 | r | DescStats = function(x) {
descriptiveStats = c(sum(!is.na(x)),mean(x),sd(x),quantile(x, 0.25),median(x),quantile(x, 0.75),min(x),max(x),max(x)-min(x))
descriptiveStats = round(descriptiveStats,2)
descriptiveMeasure = c("Valid Sample", "Mean", "STD", "Q1", "Median", "Q3", "Min", "Max", "Range")
descriptives = data.frame(descriptiveMeasure, descriptiveStats)
colnames(descriptives) = c("Statistics", "Value")
return (descriptives)
}
addDesc = function(x, prec){
library(moments)
adddescStats = c(mean(x,trim = prec/100),skewness(x),kurtosis(x),sd(x)/mean(x))
adddescStats = round(adddescStats,2)
adddescMeasure = c(sprintf("%s Trimmed Mean", prec),"Skewness","Kurtosis","CV")
adddesc = data.frame(adddescMeasure,adddescStats)
colnames(adddesc) = c("Statistic", "Value")
return (adddesc)
}
print(DescStats(Age))
print(addDesc(Age, 2))
|
#Simple manhattan plot of the Ods Ratio (OR) of assoc files
#Will output a png plot
#argv[1] Title
#argv[2] Assoc file location
#'qqman' package needs to be installed
#(install.packages("qqman")
library(qqman)
#Able to read given arguments
#args[1]:Title name for the plot
args <- commandArgs(trailing = TRUE)
title <- args[1]
location <- args[2]
#Tell R that output should be .png
png(paste(location, ".OR.png", sep=""), width = 900 , height = 900, res = 90)
#Load in the table to
theTable <- read.table(location, header = TRUE)
#Plot the table into (X=Chromosone possition, Y=log P value)
manhattan(theTable, p = "OR", logp =FALSE, ylab="Ods Ratio", genomewideline = FALSE, suggestiveline = FALSE, main = title )
| /rScripts/manORPlot.r | permissive | RunarReve/PredictICDwithHPO | R | false | false | 726 | r | #Simple manhattan plot of the Ods Ratio (OR) of assoc files
#Will output a png plot
#argv[1] Title
#argv[2] Assoc file location
#'qqman' package needs to be installed
#(install.packages("qqman")
library(qqman)
#Able to read given arguments
#args[1]:Title name for the plot
args <- commandArgs(trailing = TRUE)
title <- args[1]
location <- args[2]
#Tell R that output should be .png
png(paste(location, ".OR.png", sep=""), width = 900 , height = 900, res = 90)
#Load in the table to
theTable <- read.table(location, header = TRUE)
#Plot the table into (X=Chromosone possition, Y=log P value)
manhattan(theTable, p = "OR", logp =FALSE, ylab="Ods Ratio", genomewideline = FALSE, suggestiveline = FALSE, main = title )
|
library(ggplot2)
library(plyr)
str(allfrog)
summary(allfrog)
colnames(allfrog)
# indices
#frogs
unique.frogs <- unique(allfrog$Species)
for(frog in unique.frogs){print(frog)}
index.green <- which(allfrog$Species=="Green treefrog")
print(paste("# green =",length(index.green)))
index.barking <- which(allfrog$Species=="Barking treefrog")
print(paste("# barking =",length(index.barking)))
index.mole <- which(allfrog$Species=="Mole salamander")
print(paste("# mole salamanders =",length(index.mole)))
index.leopard <- which(allfrog$Species=="Leopard frog")
print(paste("# leopard =",length(index.leopard)))
index.fowlers <- which(allfrog$Species=="Fowlers toad")
print(paste("# fowlers =",length(index.fowlers)))
index.gray <- which(allfrog$Species=="Gray treefrog")
print(paste("# gray =",length(index.gray)))
index.cricket <- which(allfrog$Species=="Cricket frog")
print(paste("# cricket =",length(index.cricket)))
index.narrowmouth <- which(allfrog$Species=="Narrowmouth toad")
print(paste("# narrowmouth =",length(index.narrowmouth)))
print(paste("frog records =",length(allfrog$Species)))
count.frogs = length(index.green) + length(index.barking)+ length(index.mole)+ length(index.leopard) +
length(index.fowlers)+ length(index.gray)+ length(index.cricket)+ length(index.narrowmouth)
print(paste("frog species records =",count.frogs))
#chemicals
unique.chemicals <- unique(allfrog$Chemical)
for(chemical in unique.chemicals){print(chemical)}
index.atrazine <- which(allfrog$Chemical=="Atrazine")
print(paste("# atrazine =",length(index.atrazine)))
index.fipronil <- which(allfrog$Chemical=="Fipronil")
print(paste("# fipronil =",length(index.fipronil)))
index.pendimethalin <- which(allfrog$Chemical=="Pendimethalin")
print(paste("# pendimethalin =",length(index.pendimethalin)))
index.triadimefon <- which(allfrog$Chemical=="Triadimefon")
print(paste("# triadimefon =",length(index.triadimefon)))
index.imidacloprid <- which(allfrog$Chemical=="Imidacloprid")
print(paste("# imidacloprid =",length(index.imidacloprid)))
Nchemicals = length(index.atrazine)+length(index.fipronil)+length(index.pendimethalin)+length(index.triadimefon)+length(index.imidacloprid)
print(paste("# chemicals =",Nchemicals))
#metabolites
index.sulfone <- which(allfrog$Chemical=="Fipronil-Sulfone")
print(paste("# sulfone =",length(index.sulfone)))
index.triadimenol <- which(allfrog$Chemical=="Triadimenol")
print(paste("# triadimenol =",length(index.triadimenol)))
index.deisopropyl <- which(allfrog$Chemical=="Deisopropyl Atrazine")
print(paste("# deisopropyl =",length(index.deisopropyl)))
index.desethyl <- which(allfrog$Chemical=="Desethyl Atrazine")
print(paste("# desethyl =",length(index.desethyl)))
Nmetabolites=length(index.sulfone)+length(index.triadimenol)+length(index.deisopropyl)+length(index.desethyl)
print(paste("# metabolites =",Nmetabolites))
#totals
index.totalatrazine <- which(allfrog$Chemical=="Total Atrazine")
print(paste("# total atrazine =",length(index.totalatrazine)))
index.totaltriadimefon <- which(allfrog$Chemical=="Total Triadimefon")
print(paste("# total triadimefon=",length(index.totaltriadimefon)))
index.totalfipronil <- which(allfrog$Chemical=="Total Fipronil")
print(paste("# total fipronil=",length(index.totalfipronil)))
Ntotals = length(index.totalatrazine)+length(index.totaltriadimefon)+length(index.totalfipronil)
print(paste("# totals =",Ntotals))
Ntotaltotal = Nchemicals + Nmetabolites+Ntotals
print(paste("# total chemical entries =",Ntotaltotal))
print(paste("frog species records =",count.frogs))
#instruments
unique.instruments <- unique(allfrog$Instrument)
for(instrument in unique.instruments){print(instrument)}
index.gcms <- which(allfrog$Instrument=="GCMS")
index.lcms <- which(allfrog$Instrument=="LCMS")
#applications
unique.applications <- unique(allfrog$Application)
for(application in unique.applications){print(application)}
index.soil <- which(allfrog$Application=="Soil")
index.overspray <- which(allfrog$Application=="Overspray")
#construct some factor fields as labels
attach(allfrog)
allfrog$ChemLabel <- paste("Log",allfrog$logKow,allfrog$Chemical,allfrog$Application,allfrog$Instrument)
allfrog$ChemLabel <- as.factor(allfrog$ChemLabel)
unique(paste(Chemical,Application,Instrument))
##############################
#basic histograms and test for normality
allsoil <- allfrog[index.soil,]
dim(allsoil)
#allsoil.lcms <- allsoil[which(allsoil$Instrument=="LCMS"),]
#allsoil.gcms <- allsoil[which(allsoil$Instrument=="GCMS"),]
#View(allsoil)
#View(allsoil.lcms)
#View(allsoil.gcms)
alloverspray <- allfrog[index.overspray,]
dim(alloverspray)
#View(alloverspray)
unique(alloverspray$Species)
index.allsoil.overspray <- which(allfrog$Species==unique(alloverspray$Species))
allsoil.overspray <- allsoil[index.allsoil.overspray,]
dim(allsoil.overspray)
#View(alloverspray)
#alloverspray.lcms <- alloverspray[which(alloverspray$Instrument=="LCMS"),]
#alloverspray.gcms <- alloverspray[which(alloverspray$Instrument=="GCMS"),]
#View(alloverspray)
# ## lump triademefons and fipronils and atrazines (tba)
# ## barkers and greens
# #ignore frogs as a factor for distribution fitting
# ##LCMS
# pdf(paste(frog_out,"hist_app_overspray.pdf",sep=""),width=11,height=8)
# par(mfrow=c(2,2))
# for(chemical in unique.chemicals){
# chem.soil <- allsoil.overspray$TissueConc[allsoil.overspray$Chemical==chemical]
# chem.overspray <- alloverspray$TissueConc[alloverspray$Chemical==chemical]
# this.instrument <- unique(allsoil.overspray$Instrument[allsoil.overspray$Chemical==chemical])
# #report out sample size
# print(paste(chemical,this.instrument, "soil samples = ", length(chem.soil)," overspray samples = ", length(chem.overspray)))
# if(length(chem.soil)>0 && length(chem.overspray)>0){
# histmin <- min(c(chem.soil,chem.overspray),na.rm=TRUE)
# histmax <- max(c(chem.soil,chem.overspray),na.rm=TRUE)
# t.p <- round(t.test(chem.soil,chem.overspray)$p.value,digits=5)
# hist(chem.soil,main=paste(this.instrument,chemical,"p=",t.p),xlab="Soil Application: Tissue Concentration",col="blue",xlim=c(histmin,histmax))
# hist(chem.overspray,main=paste(this.instrument,chemical,"p=",t.p),xlab="Overspray Application: Tissue Concentration",col="red",xlim=c(histmin,histmax))
# }
# }
# dev.off()
#lcms boxplots for barkers and greens that compare soil versus overspray
# for imidacloprid, total atrazine, total triadimefon, total fipronil, and pendimethalin
index.goodchems = c(index.imidacloprid,index.totalatrazine,index.totaltriadimefon,index.totalfipronil,index.pendimethalin)
spray.boxplot <- allfrog[index.goodchems,]
spray.boxplot <- spray.boxplot[spray.boxplot$Instrument=="LCMS",]
#View(spray.boxplot[spray.boxplot$Species=="Barking treefrog"|spray.boxplot$Species=="Green treefrog",])
spray.boxplot <- spray.boxplot[spray.boxplot$Species=="Barking treefrog"|spray.boxplot$Species=="Green treefrog",]
dim(spray.boxplot)
#View(spray.boxplot)
colnames(spray.boxplot)
spray.boxplot[which(spray.boxplot$Chemical=="Total Atrazine"),3] = "Atrazine"
spray.boxplot[which(spray.boxplot$Chemical=="Total Triadimefon"),3] = "Triadimefon"
spray.boxplot[which(spray.boxplot$Chemical=="Total Fipronil"),3] = "Fipronil"
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
dim(spray.boxplot)
#View(spray.boxplot)
spray.boxplot[11:15,11] <- NA
pdf(paste(frog_out,"fig2_boxplot_soil_spray_bcf.pdf",sep=""),width=8.5,height=11)
spray.barkingtreefrog <- na.omit(spray.boxplot[spray.boxplot=="Barking treefrog",])
spray.factors <- reorder(spray.barkingtreefrog$Chemical, spray.barkingtreefrog$logKow)
p1 <- qplot(spray.factors, BCF, fill=factor(Application), data=spray.barkingtreefrog,
geom="boxplot",xlab="",ylab="Barking treefrog BCF")+annotate("text", x=5, y=3.3, label="A")+
#annotate("text", x=1, y=-0.25, label="***")+annotate("text", x=2, y=-0.25, label="*")+
#annotate("text", x=3, y=-0.25, label="**")+annotate("text", x=4, y=-0.25, label="***")+
#annotate("text", x=5, y=-0.25, label="***")+
theme_bw() +scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
spray.greentreefrog <- na.omit(spray.boxplot[spray.boxplot=="Green treefrog",])
p2 <- qplot(reorder(Chemical,logKow), BCF, fill=factor(Application), data=spray.greentreefrog,
geom="boxplot",xlab="Pesticide",ylab="Green treefrog BCF")+annotate("text", x=4, y=1.2, label="B")+
#annotate("text", x=1, y=-0.25, label="***")+annotate("text", x=2, y=-0.25, label="**")+
#annotate("text", x=3, y=-0.25, label="**")+annotate("text", x=4, y=-0.25, label="***")+
theme_bw()+scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
multiplot(p1, p2)
dev.off()
pdf(paste(frog_out,"fig1_boxplot_soil_spray_tissueconc.pdf",sep=""),width=8.5,height=11)
spray.barkingtreefrog <- na.omit(spray.boxplot[spray.boxplot=="Barking treefrog",])
spray.factors <- reorder(spray.barkingtreefrog$Chemical, spray.barkingtreefrog$logKow)
p1 <- qplot(spray.factors, TissueConc, fill=factor(Application), data=spray.barkingtreefrog,
geom="boxplot",xlab="",ylab="Barking treefrog Tissue Concentration (ppm)")+annotate("text", x=5, y=17, label="A")+
#annotate("text", x=1, y=-1.25, label="***")+annotate("text", x=2, y=-1.25, label="*")+
#annotate("text", x=3, y=-1.25, label="**")+annotate("text", x=4, y=-1.25, label="***")+
#annotate("text", x=5, y=-1.25, label="***")+
theme_bw() +scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
spray.greentreefrog <- na.omit(spray.boxplot[spray.boxplot=="Green treefrog",])
p2 <- qplot(reorder(Chemical,logKow), TissueConc, fill=factor(Application), data=spray.greentreefrog,
geom="boxplot",xlab="Pesticide",ylab="Green treefrog Tissue Concentration (ppm)")+annotate("text", x=4, y=21, label="B")+
#annotate("text", x=1, y=-1.25, label="***")+annotate("text", x=2, y=-1.25, label="**")+
#annotate("text", x=3, y=-1.25, label="**")+annotate("text", x=4, y=-1.25, label="***")+
theme_bw()+scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
multiplot(p1, p2)
dev.off()
# pdf(paste(frog_out,"barchart_soil_spray.pdf",sep=""),width=8.5,height=11)
# #create a data frame with averages and standard deviations
# bt <- spray.boxplot[spray.boxplot=="Barking treefrog",]
# bcf.avg<-ddply(bt, c("Chemical", "Application"), function(df)
# return(c(bcf.avg=mean(df$BCF), bcf.sd=sd(df$BCF),bcf.logKow=mean(df$logKow))))
# #create the barplot component
# dodge <- position_dodge(width=0.9)
# avg.plot<-qplot(reorder(Chemical,bcf.logKow), bcf.avg, fill=factor(Application),
# data=bcf.avg, xlab="",ylab="Barking treefrog BCF",geom="bar", position="dodge")
# #add error bars
# p1 <- avg.plot+geom_errorbar(aes(ymax=bcf.avg+bcf.sd, ymin=bcf.avg-bcf.sd),position="dodge")+
# annotate("text", x=5, y=3.3, label="A")+theme_bw()+ labs(fill="Application")
#
# gt <- spray.boxplot[spray.boxplot=="Green treefrog",]
# bcf.avg<-ddply(gt, c("Chemical", "Application"), function(df)
# return(c(bcf.avg=mean(df$BCF), bcf.sd=sd(df$BCF),bcf.logKow=mean(df$logKow))))
# bcf.avg[5,3]=NA
# bcf.avg[5,4]=NA
# #create the barplot component
# dodge <- position_dodge(width=0.9)
# avg.plot<-qplot(reorder(Chemical,bcf.logKow), bcf.avg, fill=factor(Application),
# data=bcf.avg, xlab="Pesticide",ylab="Green treefrog BCF",geom="bar", position="dodge")
# #add error bars
# p2 <- avg.plot+geom_errorbar(aes(ymax=bcf.avg+bcf.sd, ymin=bcf.avg-bcf.sd), position="dodge")+
# annotate("text", x=5, y=1.2, label="B")+theme_bw()+ labs(fill="Application")
# multiplot(p1, p2)
# dev.off()
| /01vanmeter_archives_figs.R | no_license | amphibian-exeff/vanmeter_aerial_aect2015 | R | false | false | 13,017 | r | library(ggplot2)
library(plyr)
str(allfrog)
summary(allfrog)
colnames(allfrog)
# indices
#frogs
unique.frogs <- unique(allfrog$Species)
for(frog in unique.frogs){print(frog)}
index.green <- which(allfrog$Species=="Green treefrog")
print(paste("# green =",length(index.green)))
index.barking <- which(allfrog$Species=="Barking treefrog")
print(paste("# barking =",length(index.barking)))
index.mole <- which(allfrog$Species=="Mole salamander")
print(paste("# mole salamanders =",length(index.mole)))
index.leopard <- which(allfrog$Species=="Leopard frog")
print(paste("# leopard =",length(index.leopard)))
index.fowlers <- which(allfrog$Species=="Fowlers toad")
print(paste("# fowlers =",length(index.fowlers)))
index.gray <- which(allfrog$Species=="Gray treefrog")
print(paste("# gray =",length(index.gray)))
index.cricket <- which(allfrog$Species=="Cricket frog")
print(paste("# cricket =",length(index.cricket)))
index.narrowmouth <- which(allfrog$Species=="Narrowmouth toad")
print(paste("# narrowmouth =",length(index.narrowmouth)))
print(paste("frog records =",length(allfrog$Species)))
count.frogs = length(index.green) + length(index.barking)+ length(index.mole)+ length(index.leopard) +
length(index.fowlers)+ length(index.gray)+ length(index.cricket)+ length(index.narrowmouth)
print(paste("frog species records =",count.frogs))
#chemicals
unique.chemicals <- unique(allfrog$Chemical)
for(chemical in unique.chemicals){print(chemical)}
index.atrazine <- which(allfrog$Chemical=="Atrazine")
print(paste("# atrazine =",length(index.atrazine)))
index.fipronil <- which(allfrog$Chemical=="Fipronil")
print(paste("# fipronil =",length(index.fipronil)))
index.pendimethalin <- which(allfrog$Chemical=="Pendimethalin")
print(paste("# pendimethalin =",length(index.pendimethalin)))
index.triadimefon <- which(allfrog$Chemical=="Triadimefon")
print(paste("# triadimefon =",length(index.triadimefon)))
index.imidacloprid <- which(allfrog$Chemical=="Imidacloprid")
print(paste("# imidacloprid =",length(index.imidacloprid)))
Nchemicals = length(index.atrazine)+length(index.fipronil)+length(index.pendimethalin)+length(index.triadimefon)+length(index.imidacloprid)
print(paste("# chemicals =",Nchemicals))
#metabolites
index.sulfone <- which(allfrog$Chemical=="Fipronil-Sulfone")
print(paste("# sulfone =",length(index.sulfone)))
index.triadimenol <- which(allfrog$Chemical=="Triadimenol")
print(paste("# triadimenol =",length(index.triadimenol)))
index.deisopropyl <- which(allfrog$Chemical=="Deisopropyl Atrazine")
print(paste("# deisopropyl =",length(index.deisopropyl)))
index.desethyl <- which(allfrog$Chemical=="Desethyl Atrazine")
print(paste("# desethyl =",length(index.desethyl)))
Nmetabolites=length(index.sulfone)+length(index.triadimenol)+length(index.deisopropyl)+length(index.desethyl)
print(paste("# metabolites =",Nmetabolites))
#totals
index.totalatrazine <- which(allfrog$Chemical=="Total Atrazine")
print(paste("# total atrazine =",length(index.totalatrazine)))
index.totaltriadimefon <- which(allfrog$Chemical=="Total Triadimefon")
print(paste("# total triadimefon=",length(index.totaltriadimefon)))
index.totalfipronil <- which(allfrog$Chemical=="Total Fipronil")
print(paste("# total fipronil=",length(index.totalfipronil)))
Ntotals = length(index.totalatrazine)+length(index.totaltriadimefon)+length(index.totalfipronil)
print(paste("# totals =",Ntotals))
Ntotaltotal = Nchemicals + Nmetabolites+Ntotals
print(paste("# total chemical entries =",Ntotaltotal))
print(paste("frog species records =",count.frogs))
#instruments
unique.instruments <- unique(allfrog$Instrument)
for(instrument in unique.instruments){print(instrument)}
index.gcms <- which(allfrog$Instrument=="GCMS")
index.lcms <- which(allfrog$Instrument=="LCMS")
#applications
unique.applications <- unique(allfrog$Application)
for(application in unique.applications){print(application)}
index.soil <- which(allfrog$Application=="Soil")
index.overspray <- which(allfrog$Application=="Overspray")
#construct some factor fields as labels
attach(allfrog)
allfrog$ChemLabel <- paste("Log",allfrog$logKow,allfrog$Chemical,allfrog$Application,allfrog$Instrument)
allfrog$ChemLabel <- as.factor(allfrog$ChemLabel)
unique(paste(Chemical,Application,Instrument))
##############################
#basic histograms and test for normality
allsoil <- allfrog[index.soil,]
dim(allsoil)
#allsoil.lcms <- allsoil[which(allsoil$Instrument=="LCMS"),]
#allsoil.gcms <- allsoil[which(allsoil$Instrument=="GCMS"),]
#View(allsoil)
#View(allsoil.lcms)
#View(allsoil.gcms)
alloverspray <- allfrog[index.overspray,]
dim(alloverspray)
#View(alloverspray)
unique(alloverspray$Species)
index.allsoil.overspray <- which(allfrog$Species==unique(alloverspray$Species))
allsoil.overspray <- allsoil[index.allsoil.overspray,]
dim(allsoil.overspray)
#View(alloverspray)
#alloverspray.lcms <- alloverspray[which(alloverspray$Instrument=="LCMS"),]
#alloverspray.gcms <- alloverspray[which(alloverspray$Instrument=="GCMS"),]
#View(alloverspray)
# ## lump triademefons and fipronils and atrazines (tba)
# ## barkers and greens
# #ignore frogs as a factor for distribution fitting
# ##LCMS
# pdf(paste(frog_out,"hist_app_overspray.pdf",sep=""),width=11,height=8)
# par(mfrow=c(2,2))
# for(chemical in unique.chemicals){
# chem.soil <- allsoil.overspray$TissueConc[allsoil.overspray$Chemical==chemical]
# chem.overspray <- alloverspray$TissueConc[alloverspray$Chemical==chemical]
# this.instrument <- unique(allsoil.overspray$Instrument[allsoil.overspray$Chemical==chemical])
# #report out sample size
# print(paste(chemical,this.instrument, "soil samples = ", length(chem.soil)," overspray samples = ", length(chem.overspray)))
# if(length(chem.soil)>0 && length(chem.overspray)>0){
# histmin <- min(c(chem.soil,chem.overspray),na.rm=TRUE)
# histmax <- max(c(chem.soil,chem.overspray),na.rm=TRUE)
# t.p <- round(t.test(chem.soil,chem.overspray)$p.value,digits=5)
# hist(chem.soil,main=paste(this.instrument,chemical,"p=",t.p),xlab="Soil Application: Tissue Concentration",col="blue",xlim=c(histmin,histmax))
# hist(chem.overspray,main=paste(this.instrument,chemical,"p=",t.p),xlab="Overspray Application: Tissue Concentration",col="red",xlim=c(histmin,histmax))
# }
# }
# dev.off()
#lcms boxplots for barkers and greens that compare soil versus overspray
# for imidacloprid, total atrazine, total triadimefon, total fipronil, and pendimethalin
index.goodchems = c(index.imidacloprid,index.totalatrazine,index.totaltriadimefon,index.totalfipronil,index.pendimethalin)
spray.boxplot <- allfrog[index.goodchems,]
spray.boxplot <- spray.boxplot[spray.boxplot$Instrument=="LCMS",]
#View(spray.boxplot[spray.boxplot$Species=="Barking treefrog"|spray.boxplot$Species=="Green treefrog",])
spray.boxplot <- spray.boxplot[spray.boxplot$Species=="Barking treefrog"|spray.boxplot$Species=="Green treefrog",]
dim(spray.boxplot)
#View(spray.boxplot)
colnames(spray.boxplot)
spray.boxplot[which(spray.boxplot$Chemical=="Total Atrazine"),3] = "Atrazine"
spray.boxplot[which(spray.boxplot$Chemical=="Total Triadimefon"),3] = "Triadimefon"
spray.boxplot[which(spray.boxplot$Chemical=="Total Fipronil"),3] = "Fipronil"
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
dim(spray.boxplot)
#View(spray.boxplot)
spray.boxplot[11:15,11] <- NA
pdf(paste(frog_out,"fig2_boxplot_soil_spray_bcf.pdf",sep=""),width=8.5,height=11)
spray.barkingtreefrog <- na.omit(spray.boxplot[spray.boxplot=="Barking treefrog",])
spray.factors <- reorder(spray.barkingtreefrog$Chemical, spray.barkingtreefrog$logKow)
p1 <- qplot(spray.factors, BCF, fill=factor(Application), data=spray.barkingtreefrog,
geom="boxplot",xlab="",ylab="Barking treefrog BCF")+annotate("text", x=5, y=3.3, label="A")+
#annotate("text", x=1, y=-0.25, label="***")+annotate("text", x=2, y=-0.25, label="*")+
#annotate("text", x=3, y=-0.25, label="**")+annotate("text", x=4, y=-0.25, label="***")+
#annotate("text", x=5, y=-0.25, label="***")+
theme_bw() +scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
spray.greentreefrog <- na.omit(spray.boxplot[spray.boxplot=="Green treefrog",])
p2 <- qplot(reorder(Chemical,logKow), BCF, fill=factor(Application), data=spray.greentreefrog,
geom="boxplot",xlab="Pesticide",ylab="Green treefrog BCF")+annotate("text", x=4, y=1.2, label="B")+
#annotate("text", x=1, y=-0.25, label="***")+annotate("text", x=2, y=-0.25, label="**")+
#annotate("text", x=3, y=-0.25, label="**")+annotate("text", x=4, y=-0.25, label="***")+
theme_bw()+scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
multiplot(p1, p2)
dev.off()
pdf(paste(frog_out,"fig1_boxplot_soil_spray_tissueconc.pdf",sep=""),width=8.5,height=11)
spray.barkingtreefrog <- na.omit(spray.boxplot[spray.boxplot=="Barking treefrog",])
spray.factors <- reorder(spray.barkingtreefrog$Chemical, spray.barkingtreefrog$logKow)
p1 <- qplot(spray.factors, TissueConc, fill=factor(Application), data=spray.barkingtreefrog,
geom="boxplot",xlab="",ylab="Barking treefrog Tissue Concentration (ppm)")+annotate("text", x=5, y=17, label="A")+
#annotate("text", x=1, y=-1.25, label="***")+annotate("text", x=2, y=-1.25, label="*")+
#annotate("text", x=3, y=-1.25, label="**")+annotate("text", x=4, y=-1.25, label="***")+
#annotate("text", x=5, y=-1.25, label="***")+
theme_bw() +scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
spray.greentreefrog <- na.omit(spray.boxplot[spray.boxplot=="Green treefrog",])
p2 <- qplot(reorder(Chemical,logKow), TissueConc, fill=factor(Application), data=spray.greentreefrog,
geom="boxplot",xlab="Pesticide",ylab="Green treefrog Tissue Concentration (ppm)")+annotate("text", x=4, y=21, label="B")+
#annotate("text", x=1, y=-1.25, label="***")+annotate("text", x=2, y=-1.25, label="**")+
#annotate("text", x=3, y=-1.25, label="**")+annotate("text", x=4, y=-1.25, label="***")+
theme_bw()+scale_fill_grey(start=0.5, end=1) + labs(fill="Application")
multiplot(p1, p2)
dev.off()
# pdf(paste(frog_out,"barchart_soil_spray.pdf",sep=""),width=8.5,height=11)
# #create a data frame with averages and standard deviations
# bt <- spray.boxplot[spray.boxplot=="Barking treefrog",]
# bcf.avg<-ddply(bt, c("Chemical", "Application"), function(df)
# return(c(bcf.avg=mean(df$BCF), bcf.sd=sd(df$BCF),bcf.logKow=mean(df$logKow))))
# #create the barplot component
# dodge <- position_dodge(width=0.9)
# avg.plot<-qplot(reorder(Chemical,bcf.logKow), bcf.avg, fill=factor(Application),
# data=bcf.avg, xlab="",ylab="Barking treefrog BCF",geom="bar", position="dodge")
# #add error bars
# p1 <- avg.plot+geom_errorbar(aes(ymax=bcf.avg+bcf.sd, ymin=bcf.avg-bcf.sd),position="dodge")+
# annotate("text", x=5, y=3.3, label="A")+theme_bw()+ labs(fill="Application")
#
# gt <- spray.boxplot[spray.boxplot=="Green treefrog",]
# bcf.avg<-ddply(gt, c("Chemical", "Application"), function(df)
# return(c(bcf.avg=mean(df$BCF), bcf.sd=sd(df$BCF),bcf.logKow=mean(df$logKow))))
# bcf.avg[5,3]=NA
# bcf.avg[5,4]=NA
# #create the barplot component
# dodge <- position_dodge(width=0.9)
# avg.plot<-qplot(reorder(Chemical,bcf.logKow), bcf.avg, fill=factor(Application),
# data=bcf.avg, xlab="Pesticide",ylab="Green treefrog BCF",geom="bar", position="dodge")
# #add error bars
# p2 <- avg.plot+geom_errorbar(aes(ymax=bcf.avg+bcf.sd, ymin=bcf.avg-bcf.sd), position="dodge")+
# annotate("text", x=5, y=1.2, label="B")+theme_bw()+ labs(fill="Application")
# multiplot(p1, p2)
# dev.off()
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("State of the Union"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /SOTU/app.R | no_license | teresarokos/class-2018-10-29 | R | false | false | 1,214 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("State of the Union"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Map FARS accidents in a state}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{Code for the wanted state}
\item{year}{Wanted year in the format yyyy (four digits)}
}
\value{
A plot of the state with dots locating each accident.
Errors are issued for invalid state numbers. A message is issued when there
are no accidents to plot.
}
\description{
Plot the state's map with dots locating the year's accidents.
}
\examples{
\dontrun{
fars_map_state(1, 2015)
}
}
| /man/fars_map_state.Rd | no_license | danielmoralesx/dmorales.fars | R | false | true | 660 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_map_state}
\alias{fars_map_state}
\title{Map FARS accidents in a state}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{Code for the wanted state}
\item{year}{Wanted year in the format yyyy (four digits)}
}
\value{
A plot of the state with dots locating each accident.
Errors are issued for invalid state numbers. A message is issued when there
are no accidents to plot.
}
\description{
Plot the state's map with dots locating the year's accidents.
}
\examples{
\dontrun{
fars_map_state(1, 2015)
}
}
|
#' @title Identify sudden gains.
#'
#' @description Function to identify sudden gains in longitudinal data structured in wide format.
#'
#' @param data A data set in wide format including an ID variable and variables for each measurement point.
#' @param id_var_name String, specifying the name of the ID variable. Each row should have a unique value.
#' @param sg_var_list Vector, specifying the variable names of each measurement point sequentially.
#' @param sg_crit1_cutoff Numeric, specifying the cut-off value to be used for the first sudden gains criterion.
#' The function \code{\link{define_crit1_cutoff}} can be used to calculate a cutoff value based on the Reliable Change Index (RCI; Jacobson & Truax, 1991).
#' If set to \code{NULL} the first criterion wont be applied.
#' @param sg_crit2_pct Numeric, specifying the percentage change to be used for the second sudden gains criterion.
#' If set to \code{NULL} the second criterion wont be applied.
#' @param sg_crit3 If set to \code{TRUE} the third criterion will be applied automatically adjusting the critical value for missingness.
#' If set to \code{FALSE} the third criterion wont be applied.
#' @param sg_crit3_alpha Numeric, alpha for the two-tailed student t-test to determine the critical value to be used for the third criterion.
#' Degrees of freedom are based on the number of available data in the three sessions preceding the gain and the three sessions following the gain.
#' @param sg_crit3_adjust Logical, specify whether critical value gets adjusted for missingness, see Lutz et al. (2013) and the documentation of this R package for further details.
#' This argument is set to \code{TRUE} by default adjusting the critical value for missingness as described in the package documentation and Lutz et al. (2013):
#' A critical value of 2.776 is used when all three data points before and after a potential gain are available,
#' where one data point is missing either before or after a potential gain a critical value of 3.182 is used,
#' and where one data point is missing both before and after the gain a critical value of 4.303 is used (for sg_crit3_alpha = 0.05).
#' If set to \code{FALSE} the critical value set in \code{sg_crit3_critical_value} will instead be used for all comparisons, regardless of missingnes in the sequence of data points that are investigated for potential sudden gains.
#' @param sg_crit3_critical_value Numeric, specifying the critical value to instead be used for all comparisons, regardless of missingnes in the sequence of data points that are investigated for potential sudden gains.
#' @param identify_sg_1to2 Logical, indicating whether to identify sudden gains from measurement point 1 to 2.
#' If set to TRUE, this implies that the first variable specified in \code{sg_var_list} represents a baseline measurement point, e.g. pre-intervention assessment.
#' @param crit123_details Logical, if set to \code{TRUE} this function returns information about which of the three criteria (e.g. "sg_crit1_2to3", "sg_crit2_2to3", and "sg_crit3_2to3") are met for each session to session interval for all cases.
#' Variables named "sg_2to3", "sg_3to4" summarise all criteria that were selected to identify sudden gains.
#' @return A wide data set indicating whether sudden gains are present for each session to session interval for all cases in \code{data}.
#' @references Lutz, W., Ehrlich, T., Rubel, J., Hallwachs, N., Röttger, M.-A., Jorasz, C., … Tschitsaz-Stucki, A. (2013). The ups and downs of psychotherapy: Sudden gains and sudden losses identified with session reports. Psychotherapy Research, 23(1), 14–24. \doi{10.1080/10503307.2012.693837}.
#'
#' Tang, T. Z., & DeRubeis, R. J. (1999). Sudden gains and critical sessions in cognitive-behavioral therapy for depression. Journal of Consulting and Clinical Psychology, 67(6), 894–904. \doi{10.1037/0022-006X.67.6.894}.
#' @examples # Identify sudden gains
#' identify_sg(data = sgdata,
#' sg_crit1_cutoff = 7,
#' id_var_name = "id",
#' sg_var_list = c("bdi_s1", "bdi_s2", "bdi_s3",
#' "bdi_s4", "bdi_s5", "bdi_s6",
#' "bdi_s7", "bdi_s8", "bdi_s9",
#' "bdi_s10", "bdi_s11", "bdi_s12"))
#' @export
identify_sg <- function(data, id_var_name, sg_var_list, sg_crit1_cutoff, sg_crit2_pct = .25, sg_crit3 = TRUE, sg_crit3_alpha = .05, sg_crit3_adjust = TRUE, sg_crit3_critical_value = 2.776, identify_sg_1to2 = FALSE, crit123_details = FALSE) {
# Create tibble necessary for further data manipulations
data <- tibble::as_tibble(data)
if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == FALSE) {
stop("Please specify at least one of the three sudden gains criteria using the following arguments: sg_crit1_cutoff, sg_crit2_pct, sg_crit3.", call. = FALSE)
}
if (base::is.null(sg_crit1_cutoff) == FALSE) {
if (sg_crit1_cutoff < 0 ) {
stop("The cut-off value specified in 'sg_crit1_cutoff' needs to be positive to identify sudden gains.", call. = FALSE)
}
}
# Set p for qt function needed for 3rd criterion
sg_crit3_alpha_critical_value <- sg_crit3_alpha
# Select data for identifying sudden gains
# Only ID variable and sudden gains variables needed
data_select <- data %>%
dplyr::arrange(!!rlang::sym(id_var_name)) %>%
dplyr::select(!!rlang::sym(id_var_name), dplyr::all_of(sg_var_list))
# Remove ID from dataframe for loop
data_loop <- data_select %>%
dplyr::arrange(!!rlang::sym(id_var_name)) %>%
dplyr::select(2:base::ncol(data_select))
# Create one empty dataframe for each sudden gains criterion
crit1 <- base::data.frame(base::matrix(NA,
nrow = base::nrow(data_loop),
ncol = base::ncol(data_loop) - 3))
crit2 <- base::data.frame(base::matrix(NA,
nrow = base::nrow(data_loop),
ncol = base::ncol(data_loop) - 3))
crit3 <- base::data.frame(base::matrix(NA,
nrow = base::nrow(data_loop),
ncol = base::ncol(data_loop) - 3))
# Iterate through all rows
for (row_i in 1:base::nrow(data_loop)) {
# Iterate through all columns
for (col_j in 3:(base::ncol(data_loop) - 1)) {
# Check 1st sudden gains criterion ----
if (base::is.null(sg_crit1_cutoff) == TRUE) {
crit1[row_i, col_j - 2] <- NA
} else {
crit1[row_i, col_j - 2] <- (data_loop[row_i, col_j - 1] - data_loop[row_i, col_j] >= sg_crit1_cutoff)
}
# Check 2nd sudden gains criterion ----
if (base::is.null(sg_crit2_pct) == TRUE) {
crit2[row_i, col_j - 2] <- NA
} else {
crit2[row_i, col_j - 2] <- (data_loop[row_i, col_j - 1] - data_loop[row_i, col_j] >= sg_crit2_pct * data_loop[row_i, col_j - 1])
}
# Check 3rd sudden gains criterion ----
if (sg_crit3 == FALSE) {
crit3[row_i, col_j - 2] <- NA
} else {
# First, create pre and post indices for 3rd criterion
pre_indices <- base::max(1, col_j - 3):(col_j - 1) # Create index for pregain
post_indices <- col_j:min(base::ncol(data_loop), col_j + 2) # Create index for postgain
# Define pre and post mean, sdn and number of available data points for 3rd criterion
mean_pre <- base::mean(base::as.matrix(data_loop[row_i, base::c(pre_indices)]), na.rm = T)
mean_post <- base::mean(base::as.matrix(data_loop[row_i, base::c(post_indices)]), na.rm = T)
sd_pre <- stats::sd(base::as.matrix(data_loop[row_i, base::c(pre_indices)]), na.rm = T)
sd_post <- stats::sd(base::as.matrix(data_loop[row_i, base::c(post_indices)]), na.rm = T)
sum_n_pre <- base::sum(!is.na(data_loop[row_i, base::c(pre_indices)]), na.rm = T)
sum_n_post <- base::sum(!is.na(data_loop[row_i, base::c(post_indices)]), na.rm = T)
sum_n_pre_post <- sum_n_pre + sum_n_post
# Check 3rd criterion for two or more values at both pre and post
if (sum_n_pre >= 2 & sum_n_post >= 2) {
# Calculate critical value to be used based on how many pre and postgain sessions are available
if (sg_crit3_adjust == TRUE) {
sg_crit3_critical_value_set <- base::abs(stats::qt(p = (sg_crit3_alpha_critical_value / 2), df = (sum_n_pre_post - 2)))
} else if (sg_crit3_adjust == FALSE) {
sg_crit3_critical_value_set <- sg_crit3_critical_value
}
# Test for third criterion using adjusted critical value
crit3[row_i, col_j - 2] <- mean_pre - mean_post > sg_crit3_critical_value_set * base::sqrt((((sum_n_pre - 1) * (sd_pre ^ 2)) + ((sum_n_post - 1) * (sd_post ^ 2))) / (sum_n_pre + sum_n_post - 2))
# Add missing value if less than two pregain or postgain sessions are available
} else if (sum_n_pre < 2 | sum_n_post < 2) {
crit3[row_i, col_j - 2] <- NA
}
} # Close loop that applies 3rg criterion
} # Close loop that iterates through columns
} # Close loop that iterates through rows
# Multiply dataframes with information on whether sudden gains criteria 1, 2, and 3 are met
# 1 = criterion is met, 0 = criterion is not met, NA = not enough data to identify sudden gains
if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == FALSE) {
crit123 <- crit1 * TRUE
base::message("First sudden gains criterion was applied.")
} else if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == FALSE) {
crit123 <- crit2 * TRUE
base::message("Second sudden gains criterion was applied.")
} else if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == TRUE) {
crit123 <- crit3 * TRUE
base::message("Third sudden gains criterion was applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
} else if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == FALSE) {
crit123 <- crit1 * crit2
base::message("First and second sudden gains criteria were applied.")
} else if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == TRUE) {
crit123 <- crit2 * crit3
base::message("Second and third sudden gains criteria were applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
} else if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == TRUE) {
crit123 <- crit1 * crit3
base::message("First and third sudden gains criteria were applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
} else if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == TRUE) {
crit123 <- crit1 * crit2 * crit3
base::message("First, second, and third sudden gains criteria were applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
}
# Create empty list for renaming variables
sg_col_names <- base::c()
sg_col_names_crit1 <- base::c()
sg_col_names_crit2 <- base::c()
sg_col_names_crit3 <- base::c()
# Create new variable names for sudden gains variables
# If identify_sg_1to2 is TRUE, sg variables will start with "sg_1to2"
if (identify_sg_1to2 == FALSE) {
for (i in 1:(base::ncol(data_loop) - 3)) {
sg_col_names[i] <- base::paste0("sg_", i + 1, "to", i + 2)
sg_col_names_crit1[i] <- base::paste0("sg_crit1_", i + 1, "to", i + 2)
sg_col_names_crit2[i] <- base::paste0("sg_crit2_", i + 1, "to", i + 2)
sg_col_names_crit3[i] <- base::paste0("sg_crit3_", i + 1, "to", i + 2)
}
# If identify_sg_1to2 is FALSE, sg variables will start with "sg_2to3"
} else if (identify_sg_1to2 == TRUE) {
# base::message("The argument identify_sg_1to2 is set to TRUE, this implies that the first variable specified in the argument 'sg_var_list' represents a baseline measurement point, e.g. pre-intervention assessment.")
for (i in 1:(base::ncol(data_loop) - 3)) {
sg_col_names[i] <- base::paste0("sg_", i, "to", i + 1)
sg_col_names_crit1[i] <- base::paste0("sg_crit1_", i, "to", i + 1)
sg_col_names_crit2[i] <- base::paste0("sg_crit2_", i, "to", i + 1)
sg_col_names_crit3[i] <- base::paste0("sg_crit3_", i, "to", i + 1)
}
}
# Name sudden gains variables of main datafile
names(crit123) <- sg_col_names
# Name sudden gains variables individual datafiles with criteria 1, 2, 3
names(crit1) <- sg_col_names_crit1
names(crit2) <- sg_col_names_crit2
names(crit3) <- sg_col_names_crit3
# Calculate number of sudden gains
sg_sum <- base::sum(crit123, na.rm = T)
# Return message if no sudden gains were identified
# Have this down here so it's the last message and more visible
if (sg_sum == 0) {
base::warning("No sudden gains were identified.", call. = FALSE)
}
# Export dataframe with information whether individual criteria were met
if (crit123_details == TRUE) {
data_crit123_details <- base::cbind(data_select[, 1], crit1, crit2, crit3, crit123)
# Return dataframe with details about each criteria instead of combined crit123
data_crit123_details %>%
dplyr::arrange(!! rlang::sym(id_var_name)) %>%
tibble::as_tibble()
} else if (crit123_details == FALSE) {
# Combine ID with results from identify sudden gains loop
data_crit123 <- base::cbind(data_select[, 1], crit123)
# Combine data with variables used to identify sudden gains
data_select %>%
dplyr::left_join(data_crit123, by = id_var_name) %>%
dplyr::arrange(!! rlang::sym(id_var_name)) %>%
tibble::as_tibble()
}
}
| /R/identify_sg.R | permissive | milanwiedemann/suddengains | R | false | false | 16,968 | r | #' @title Identify sudden gains.
#'
#' @description Function to identify sudden gains in longitudinal data structured in wide format.
#'
#' @param data A data set in wide format including an ID variable and variables for each measurement point.
#' @param id_var_name String, specifying the name of the ID variable. Each row should have a unique value.
#' @param sg_var_list Vector, specifying the variable names of each measurement point sequentially.
#' @param sg_crit1_cutoff Numeric, specifying the cut-off value to be used for the first sudden gains criterion.
#' The function \code{\link{define_crit1_cutoff}} can be used to calculate a cutoff value based on the Reliable Change Index (RCI; Jacobson & Truax, 1991).
#' If set to \code{NULL} the first criterion wont be applied.
#' @param sg_crit2_pct Numeric, specifying the percentage change to be used for the second sudden gains criterion.
#' If set to \code{NULL} the second criterion wont be applied.
#' @param sg_crit3 If set to \code{TRUE} the third criterion will be applied automatically adjusting the critical value for missingness.
#' If set to \code{FALSE} the third criterion wont be applied.
#' @param sg_crit3_alpha Numeric, alpha for the two-tailed student t-test to determine the critical value to be used for the third criterion.
#' Degrees of freedom are based on the number of available data in the three sessions preceding the gain and the three sessions following the gain.
#' @param sg_crit3_adjust Logical, specify whether critical value gets adjusted for missingness, see Lutz et al. (2013) and the documentation of this R package for further details.
#' This argument is set to \code{TRUE} by default adjusting the critical value for missingness as described in the package documentation and Lutz et al. (2013):
#' A critical value of 2.776 is used when all three data points before and after a potential gain are available,
#' where one data point is missing either before or after a potential gain a critical value of 3.182 is used,
#' and where one data point is missing both before and after the gain a critical value of 4.303 is used (for sg_crit3_alpha = 0.05).
#' If set to \code{FALSE} the critical value set in \code{sg_crit3_critical_value} will instead be used for all comparisons, regardless of missingnes in the sequence of data points that are investigated for potential sudden gains.
#' @param sg_crit3_critical_value Numeric, specifying the critical value to instead be used for all comparisons, regardless of missingnes in the sequence of data points that are investigated for potential sudden gains.
#' @param identify_sg_1to2 Logical, indicating whether to identify sudden gains from measurement point 1 to 2.
#' If set to TRUE, this implies that the first variable specified in \code{sg_var_list} represents a baseline measurement point, e.g. pre-intervention assessment.
#' @param crit123_details Logical, if set to \code{TRUE} this function returns information about which of the three criteria (e.g. "sg_crit1_2to3", "sg_crit2_2to3", and "sg_crit3_2to3") are met for each session to session interval for all cases.
#' Variables named "sg_2to3", "sg_3to4" summarise all criteria that were selected to identify sudden gains.
#' @return A wide data set indicating whether sudden gains are present for each session to session interval for all cases in \code{data}.
#' @references Lutz, W., Ehrlich, T., Rubel, J., Hallwachs, N., Röttger, M.-A., Jorasz, C., … Tschitsaz-Stucki, A. (2013). The ups and downs of psychotherapy: Sudden gains and sudden losses identified with session reports. Psychotherapy Research, 23(1), 14–24. \doi{10.1080/10503307.2012.693837}.
#'
#' Tang, T. Z., & DeRubeis, R. J. (1999). Sudden gains and critical sessions in cognitive-behavioral therapy for depression. Journal of Consulting and Clinical Psychology, 67(6), 894–904. \doi{10.1037/0022-006X.67.6.894}.
#' @examples # Identify sudden gains
#' identify_sg(data = sgdata,
#' sg_crit1_cutoff = 7,
#' id_var_name = "id",
#' sg_var_list = c("bdi_s1", "bdi_s2", "bdi_s3",
#' "bdi_s4", "bdi_s5", "bdi_s6",
#' "bdi_s7", "bdi_s8", "bdi_s9",
#' "bdi_s10", "bdi_s11", "bdi_s12"))
#' @export
identify_sg <- function(data, id_var_name, sg_var_list, sg_crit1_cutoff, sg_crit2_pct = .25, sg_crit3 = TRUE, sg_crit3_alpha = .05, sg_crit3_adjust = TRUE, sg_crit3_critical_value = 2.776, identify_sg_1to2 = FALSE, crit123_details = FALSE) {
# Create tibble necessary for further data manipulations
data <- tibble::as_tibble(data)
if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == FALSE) {
stop("Please specify at least one of the three sudden gains criteria using the following arguments: sg_crit1_cutoff, sg_crit2_pct, sg_crit3.", call. = FALSE)
}
if (base::is.null(sg_crit1_cutoff) == FALSE) {
if (sg_crit1_cutoff < 0 ) {
stop("The cut-off value specified in 'sg_crit1_cutoff' needs to be positive to identify sudden gains.", call. = FALSE)
}
}
# Set p for qt function needed for 3rd criterion
sg_crit3_alpha_critical_value <- sg_crit3_alpha
# Select data for identifying sudden gains
# Only ID variable and sudden gains variables needed
data_select <- data %>%
dplyr::arrange(!!rlang::sym(id_var_name)) %>%
dplyr::select(!!rlang::sym(id_var_name), dplyr::all_of(sg_var_list))
# Remove ID from dataframe for loop
data_loop <- data_select %>%
dplyr::arrange(!!rlang::sym(id_var_name)) %>%
dplyr::select(2:base::ncol(data_select))
# Create one empty dataframe for each sudden gains criterion
crit1 <- base::data.frame(base::matrix(NA,
nrow = base::nrow(data_loop),
ncol = base::ncol(data_loop) - 3))
crit2 <- base::data.frame(base::matrix(NA,
nrow = base::nrow(data_loop),
ncol = base::ncol(data_loop) - 3))
crit3 <- base::data.frame(base::matrix(NA,
nrow = base::nrow(data_loop),
ncol = base::ncol(data_loop) - 3))
# Iterate through all rows
for (row_i in 1:base::nrow(data_loop)) {
# Iterate through all columns
for (col_j in 3:(base::ncol(data_loop) - 1)) {
# Check 1st sudden gains criterion ----
if (base::is.null(sg_crit1_cutoff) == TRUE) {
crit1[row_i, col_j - 2] <- NA
} else {
crit1[row_i, col_j - 2] <- (data_loop[row_i, col_j - 1] - data_loop[row_i, col_j] >= sg_crit1_cutoff)
}
# Check 2nd sudden gains criterion ----
if (base::is.null(sg_crit2_pct) == TRUE) {
crit2[row_i, col_j - 2] <- NA
} else {
crit2[row_i, col_j - 2] <- (data_loop[row_i, col_j - 1] - data_loop[row_i, col_j] >= sg_crit2_pct * data_loop[row_i, col_j - 1])
}
# Check 3rd sudden gains criterion ----
if (sg_crit3 == FALSE) {
crit3[row_i, col_j - 2] <- NA
} else {
# First, create pre and post indices for 3rd criterion
pre_indices <- base::max(1, col_j - 3):(col_j - 1) # Create index for pregain
post_indices <- col_j:min(base::ncol(data_loop), col_j + 2) # Create index for postgain
# Define pre and post mean, sdn and number of available data points for 3rd criterion
mean_pre <- base::mean(base::as.matrix(data_loop[row_i, base::c(pre_indices)]), na.rm = T)
mean_post <- base::mean(base::as.matrix(data_loop[row_i, base::c(post_indices)]), na.rm = T)
sd_pre <- stats::sd(base::as.matrix(data_loop[row_i, base::c(pre_indices)]), na.rm = T)
sd_post <- stats::sd(base::as.matrix(data_loop[row_i, base::c(post_indices)]), na.rm = T)
sum_n_pre <- base::sum(!is.na(data_loop[row_i, base::c(pre_indices)]), na.rm = T)
sum_n_post <- base::sum(!is.na(data_loop[row_i, base::c(post_indices)]), na.rm = T)
sum_n_pre_post <- sum_n_pre + sum_n_post
# Check 3rd criterion for two or more values at both pre and post
if (sum_n_pre >= 2 & sum_n_post >= 2) {
# Calculate critical value to be used based on how many pre and postgain sessions are available
if (sg_crit3_adjust == TRUE) {
sg_crit3_critical_value_set <- base::abs(stats::qt(p = (sg_crit3_alpha_critical_value / 2), df = (sum_n_pre_post - 2)))
} else if (sg_crit3_adjust == FALSE) {
sg_crit3_critical_value_set <- sg_crit3_critical_value
}
# Test for third criterion using adjusted critical value
crit3[row_i, col_j - 2] <- mean_pre - mean_post > sg_crit3_critical_value_set * base::sqrt((((sum_n_pre - 1) * (sd_pre ^ 2)) + ((sum_n_post - 1) * (sd_post ^ 2))) / (sum_n_pre + sum_n_post - 2))
# Add missing value if less than two pregain or postgain sessions are available
} else if (sum_n_pre < 2 | sum_n_post < 2) {
crit3[row_i, col_j - 2] <- NA
}
} # Close loop that applies 3rg criterion
} # Close loop that iterates through columns
} # Close loop that iterates through rows
# Multiply dataframes with information on whether sudden gains criteria 1, 2, and 3 are met
# 1 = criterion is met, 0 = criterion is not met, NA = not enough data to identify sudden gains
if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == FALSE) {
crit123 <- crit1 * TRUE
base::message("First sudden gains criterion was applied.")
} else if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == FALSE) {
crit123 <- crit2 * TRUE
base::message("Second sudden gains criterion was applied.")
} else if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == TRUE) {
crit123 <- crit3 * TRUE
base::message("Third sudden gains criterion was applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
} else if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == FALSE) {
crit123 <- crit1 * crit2
base::message("First and second sudden gains criteria were applied.")
} else if (base::is.null(sg_crit1_cutoff) == TRUE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == TRUE) {
crit123 <- crit2 * crit3
base::message("Second and third sudden gains criteria were applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
} else if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == TRUE & sg_crit3 == TRUE) {
crit123 <- crit1 * crit3
base::message("First and third sudden gains criteria were applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
} else if (base::is.null(sg_crit1_cutoff) == FALSE & base::is.null(sg_crit2_pct) == FALSE & sg_crit3 == TRUE) {
crit123 <- crit1 * crit2 * crit3
base::message("First, second, and third sudden gains criteria were applied.")
if (sg_crit3_adjust == TRUE) {
message("The critical value for the third criterion was adjusted for missingness.")
} else if (sg_crit3_adjust == FALSE) {
message(paste0("Note: The critical value for the third criterion was not adjusted for missingness: ", sg_crit3_critical_value, " was used for all comparisons."))
}
}
# Create empty list for renaming variables
sg_col_names <- base::c()
sg_col_names_crit1 <- base::c()
sg_col_names_crit2 <- base::c()
sg_col_names_crit3 <- base::c()
# Create new variable names for sudden gains variables
# If identify_sg_1to2 is TRUE, sg variables will start with "sg_1to2"
if (identify_sg_1to2 == FALSE) {
for (i in 1:(base::ncol(data_loop) - 3)) {
sg_col_names[i] <- base::paste0("sg_", i + 1, "to", i + 2)
sg_col_names_crit1[i] <- base::paste0("sg_crit1_", i + 1, "to", i + 2)
sg_col_names_crit2[i] <- base::paste0("sg_crit2_", i + 1, "to", i + 2)
sg_col_names_crit3[i] <- base::paste0("sg_crit3_", i + 1, "to", i + 2)
}
# If identify_sg_1to2 is FALSE, sg variables will start with "sg_2to3"
} else if (identify_sg_1to2 == TRUE) {
# base::message("The argument identify_sg_1to2 is set to TRUE, this implies that the first variable specified in the argument 'sg_var_list' represents a baseline measurement point, e.g. pre-intervention assessment.")
for (i in 1:(base::ncol(data_loop) - 3)) {
sg_col_names[i] <- base::paste0("sg_", i, "to", i + 1)
sg_col_names_crit1[i] <- base::paste0("sg_crit1_", i, "to", i + 1)
sg_col_names_crit2[i] <- base::paste0("sg_crit2_", i, "to", i + 1)
sg_col_names_crit3[i] <- base::paste0("sg_crit3_", i, "to", i + 1)
}
}
# Name sudden gains variables of main datafile
names(crit123) <- sg_col_names
# Name sudden gains variables individual datafiles with criteria 1, 2, 3
names(crit1) <- sg_col_names_crit1
names(crit2) <- sg_col_names_crit2
names(crit3) <- sg_col_names_crit3
# Calculate number of sudden gains
sg_sum <- base::sum(crit123, na.rm = T)
# Return message if no sudden gains were identified
# Have this down here so it's the last message and more visible
if (sg_sum == 0) {
base::warning("No sudden gains were identified.", call. = FALSE)
}
# Export dataframe with information whether individual criteria were met
if (crit123_details == TRUE) {
data_crit123_details <- base::cbind(data_select[, 1], crit1, crit2, crit3, crit123)
# Return dataframe with details about each criteria instead of combined crit123
data_crit123_details %>%
dplyr::arrange(!! rlang::sym(id_var_name)) %>%
tibble::as_tibble()
} else if (crit123_details == FALSE) {
# Combine ID with results from identify sudden gains loop
data_crit123 <- base::cbind(data_select[, 1], crit123)
# Combine data with variables used to identify sudden gains
data_select %>%
dplyr::left_join(data_crit123, by = id_var_name) %>%
dplyr::arrange(!! rlang::sym(id_var_name)) %>%
tibble::as_tibble()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_functions.R
\name{videos.insert}
\alias{videos.insert}
\title{Uploads a video to YouTube and optionally sets the video's metadata.}
\usage{
videos.insert(Video, part, autoLevels = NULL, notifySubscribers = NULL,
onBehalfOfContentOwner = NULL, onBehalfOfContentOwnerChannel = NULL,
stabilize = NULL)
}
\arguments{
\item{Video}{The \link{Video} object to pass to this method}
\item{part}{The part parameter serves two purposes in this operation}
\item{autoLevels}{The autoLevels parameter indicates whether YouTube should automatically enhance the video's lighting and color}
\item{notifySubscribers}{The notifySubscribers parameter indicates whether YouTube should send a notification about the new video to users who subscribe to the video's channel}
\item{onBehalfOfContentOwner}{Note: This parameter is intended exclusively for YouTube content partners}
\item{onBehalfOfContentOwnerChannel}{This parameter can only be used in a properly authorized request}
\item{stabilize}{The stabilize parameter indicates whether YouTube should adjust the video to remove shaky camera motions}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/youtube
\item https://www.googleapis.com/auth/youtube.force-ssl
\item https://www.googleapis.com/auth/youtube.upload
\item https://www.googleapis.com/auth/youtubepartner
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/youtube, https://www.googleapis.com/auth/youtube.force-ssl, https://www.googleapis.com/auth/youtube.upload, https://www.googleapis.com/auth/youtubepartner)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/youtube/v3}{Google Documentation}
Other Video functions: \code{\link{Video.localizations}},
\code{\link{Video}}, \code{\link{videos.update}}
}
| /googleyoutubev3.auto/man/videos.insert.Rd | permissive | uwazac/autoGoogleAPI | R | false | true | 2,091 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_functions.R
\name{videos.insert}
\alias{videos.insert}
\title{Uploads a video to YouTube and optionally sets the video's metadata.}
\usage{
videos.insert(Video, part, autoLevels = NULL, notifySubscribers = NULL,
onBehalfOfContentOwner = NULL, onBehalfOfContentOwnerChannel = NULL,
stabilize = NULL)
}
\arguments{
\item{Video}{The \link{Video} object to pass to this method}
\item{part}{The part parameter serves two purposes in this operation}
\item{autoLevels}{The autoLevels parameter indicates whether YouTube should automatically enhance the video's lighting and color}
\item{notifySubscribers}{The notifySubscribers parameter indicates whether YouTube should send a notification about the new video to users who subscribe to the video's channel}
\item{onBehalfOfContentOwner}{Note: This parameter is intended exclusively for YouTube content partners}
\item{onBehalfOfContentOwnerChannel}{This parameter can only be used in a properly authorized request}
\item{stabilize}{The stabilize parameter indicates whether YouTube should adjust the video to remove shaky camera motions}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/youtube
\item https://www.googleapis.com/auth/youtube.force-ssl
\item https://www.googleapis.com/auth/youtube.upload
\item https://www.googleapis.com/auth/youtubepartner
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/youtube, https://www.googleapis.com/auth/youtube.force-ssl, https://www.googleapis.com/auth/youtube.upload, https://www.googleapis.com/auth/youtubepartner)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/youtube/v3}{Google Documentation}
Other Video functions: \code{\link{Video.localizations}},
\code{\link{Video}}, \code{\link{videos.update}}
}
|
########################################
#### Example of a profile iteration ####
########################################
require(pomp)
require(plyr)
dat <- read.csv("DataCovar_1975-2000.csv",header=T) # read in data file with the malaria data and covariates
y <- read.csv("mifOutputProf_rho.csv", header=T)
y <- arrange(y, -loglik)
y <- y[,2:27]
param <- as.numeric(y[1,])
param.names <- colnames(y)
names(param) <- param.names
source("poObject_TmeanB.R")
index <- as.numeric(Sys.getenv("SLURM_ARRAY_TASK_ID"))
temp <- seq(from = 0.0025, to=0.0065,length.out = 201)
param['rho'] <- temp[index]
tempfilt <- -10000
for(i in 1:4){
seed <- ceiling(runif(1,min=1,max=2^30))
set.seed(seed)
tryCatch(mif2(
po,
Nmif = 150,
start = param,
Np = 3000,
cooling.type="hyperbolic",
cooling.fraction.50 = 0.3,
rw.sd = rw.sd(muIS = 0.02, muIQ = 0.02, muQS = 0.02, sigOBS = 0.02, sigPRO = 0.02, muEI = 0.02,
tau = 0.02, betaOUT = 0.02, bT4 = 0.02, bT6 = 0.02, b1 = 0.02, b2 = 0.02,
b3 = 0.02, b4 = 0.02, b5 = 0.02, b6 = 0.02, q0 = 0.03,
S_0 = 0.03, E_0 = 0.03, I_0 = 0.03, Q_0 = 0.03, K_0 = 0.03, F_0 = 0.03),
transform=TRUE), error = function(e) e) -> mifout
if(length(coef(mifout)) > 0){
loglik.mif <- replicate(n=10, logLik(pfilter(po, params=coef(mifout), Np=3000, max.fail=500)))
bl <- logmeanexp(loglik.mif,se=TRUE)
if(bl[1] > tempfilt){
par.out <- coef(mifout)
names(par.out) <- param.names
loglik.mif.est <- bl[1]
loglik.mif.se <- bl[2]
tempfilt <- loglik.mif.est
finalSeed <- seed
}
}
}
if(file.exists("mifOutputProf_rho.csv")) {
write.table(t(as.matrix(c(index,par.out,loglik.mif.est,loglik.mif.se))), "mifOutputProf_rho.csv",
append = T, col.names=F, row.names=F, sep = ",")
} else{
write.table(t(as.matrix(c(index,par.out,loglik.mif.est,loglik.mif.se))), "mifOutputProf_rho.csv",
append = T, col.names=c("run",param.names, "loglik", "loglik.se"), row.names=F, sep = ",")
}
###############################
| /fitting/mifProf_rho.R | no_license | pascualgroup/Malaria-highlands | R | false | false | 2,105 | r | ########################################
#### Example of a profile iteration ####
########################################
require(pomp)
require(plyr)
dat <- read.csv("DataCovar_1975-2000.csv",header=T) # read in data file with the malaria data and covariates
y <- read.csv("mifOutputProf_rho.csv", header=T)
y <- arrange(y, -loglik)
y <- y[,2:27]
param <- as.numeric(y[1,])
param.names <- colnames(y)
names(param) <- param.names
source("poObject_TmeanB.R")
index <- as.numeric(Sys.getenv("SLURM_ARRAY_TASK_ID"))
temp <- seq(from = 0.0025, to=0.0065,length.out = 201)
param['rho'] <- temp[index]
tempfilt <- -10000
for(i in 1:4){
seed <- ceiling(runif(1,min=1,max=2^30))
set.seed(seed)
tryCatch(mif2(
po,
Nmif = 150,
start = param,
Np = 3000,
cooling.type="hyperbolic",
cooling.fraction.50 = 0.3,
rw.sd = rw.sd(muIS = 0.02, muIQ = 0.02, muQS = 0.02, sigOBS = 0.02, sigPRO = 0.02, muEI = 0.02,
tau = 0.02, betaOUT = 0.02, bT4 = 0.02, bT6 = 0.02, b1 = 0.02, b2 = 0.02,
b3 = 0.02, b4 = 0.02, b5 = 0.02, b6 = 0.02, q0 = 0.03,
S_0 = 0.03, E_0 = 0.03, I_0 = 0.03, Q_0 = 0.03, K_0 = 0.03, F_0 = 0.03),
transform=TRUE), error = function(e) e) -> mifout
if(length(coef(mifout)) > 0){
loglik.mif <- replicate(n=10, logLik(pfilter(po, params=coef(mifout), Np=3000, max.fail=500)))
bl <- logmeanexp(loglik.mif,se=TRUE)
if(bl[1] > tempfilt){
par.out <- coef(mifout)
names(par.out) <- param.names
loglik.mif.est <- bl[1]
loglik.mif.se <- bl[2]
tempfilt <- loglik.mif.est
finalSeed <- seed
}
}
}
if(file.exists("mifOutputProf_rho.csv")) {
write.table(t(as.matrix(c(index,par.out,loglik.mif.est,loglik.mif.se))), "mifOutputProf_rho.csv",
append = T, col.names=F, row.names=F, sep = ",")
} else{
write.table(t(as.matrix(c(index,par.out,loglik.mif.est,loglik.mif.se))), "mifOutputProf_rho.csv",
append = T, col.names=c("run",param.names, "loglik", "loglik.se"), row.names=F, sep = ",")
}
###############################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crawlers.R
\name{get_ais_urls}
\alias{get_ais_urls}
\title{Get latest AIS URLs since last given URL to start}
\usage{
get_ais_urls(start_url)
}
\arguments{
\item{start_url}{The path to a TXT url. See notes and examples}
}
\description{
This will take our start url, find sibs, then walk recursively UP the tree
and find the next directories if any avail...
}
\details{
basically it works like this:
It will forcefully find all siblings then recurse back down the tree until it
finds all the txt files
}
\note{
THIS REQUIRES THE START URL TO BE A TXT FILE PATH
}
\examples{
links <- get_ais_urls("https://ais.sbarc.org/logs_delimited/2019/191008/AIS_SBARC_191008-12.txt")
}
| /man/get_ais_urls.Rd | no_license | BenioffOceanInitiative/whalesafe4r | R | false | true | 754 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crawlers.R
\name{get_ais_urls}
\alias{get_ais_urls}
\title{Get latest AIS URLs since last given URL to start}
\usage{
get_ais_urls(start_url)
}
\arguments{
\item{start_url}{The path to a TXT url. See notes and examples}
}
\description{
This will take our start url, find sibs, then walk recursively UP the tree
and find the next directories if any avail...
}
\details{
basically it works like this:
It will forcefully find all siblings then recurse back down the tree until it
finds all the txt files
}
\note{
THIS REQUIRES THE START URL TO BE A TXT FILE PATH
}
\examples{
links <- get_ais_urls("https://ais.sbarc.org/logs_delimited/2019/191008/AIS_SBARC_191008-12.txt")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{soap}
\alias{soap}
\title{Dwindling soap}
\format{A data frame with 15 observations on the following 3 variables.
\itemize{ \item{Date}{ } \item{Day}{ days since start of soap
usage and data collection} \item{Weight}{ weight of bar of soap (in
grams) } }}
\source{
Data collected by Rex Boggs and available from OzDASL
(\url{http://www.statsci.org/data/}).
}
\description{
A bar of soap was weighed after showering to see how much soap was used each
shower.
}
\details{
According to Rex Boggs:
I had a hypothesis that the daily weight of my bar of soap [in grams] in my
shower wasn't a linear function, the reason being that the tiny little bar
of soap at the end of its life seemed to hang around for just about ever. I
wanted to throw it out, but I felt I shouldn't do so until it became
unusable. And that seemed to take weeks.
Also I had recently bought some digital kitchen scales and felt I needed to
use them to justify the cost. I hypothesized that the daily weight of a bar
of soap might be dependent upon surface area, and hence would be a quadratic
function \dots{} .
The data ends at day 22. On day 23 the soap broke into two pieces and one
piece went down the plughole.
}
\examples{
data(soap)
xyplot(Weight~Day, data=soap)
}
\keyword{datasets}
| /man/soap.Rd | no_license | cran/fastR | R | false | true | 1,373 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{soap}
\alias{soap}
\title{Dwindling soap}
\format{A data frame with 15 observations on the following 3 variables.
\itemize{ \item{Date}{ } \item{Day}{ days since start of soap
usage and data collection} \item{Weight}{ weight of bar of soap (in
grams) } }}
\source{
Data collected by Rex Boggs and available from OzDASL
(\url{http://www.statsci.org/data/}).
}
\description{
A bar of soap was weighed after showering to see how much soap was used each
shower.
}
\details{
According to Rex Boggs:
I had a hypothesis that the daily weight of my bar of soap [in grams] in my
shower wasn't a linear function, the reason being that the tiny little bar
of soap at the end of its life seemed to hang around for just about ever. I
wanted to throw it out, but I felt I shouldn't do so until it became
unusable. And that seemed to take weeks.
Also I had recently bought some digital kitchen scales and felt I needed to
use them to justify the cost. I hypothesized that the daily weight of a bar
of soap might be dependent upon surface area, and hence would be a quadratic
function \dots{} .
The data ends at day 22. On day 23 the soap broke into two pieces and one
piece went down the plughole.
}
\examples{
data(soap)
xyplot(Weight~Day, data=soap)
}
\keyword{datasets}
|
################################################################################
#' @title gls calculation of mean elevation
#' @author: James Margrove
#' @description calculating the mean elevation using gls
#'
Sys.setenv(LANG = "en")
# Clear work space
rm(list=ls())
# Import packages
require(ggplot2)
require(nlme)
source("./functions/booter.R")
# Import data
sp <- read.table("./data/riskratio_data.txt", header = TRUE)$sp[-c(4,7, 13)]
data <- read.table( "./data/Reuben_data/data_cleaned.txt", header = TRUE)
load("./models/pele_fsen_dden_Abundance")
head(data)
# Predict elevation
nullModel1 <- gls(elev ~ Species, data = data)
var1 <- Variogram(nullModel1, form = ~x + y, resType = "pearson")
plot(var1)
data$xp <- data$x <- rnorm(nrow(data), 0, 0.1)
data$yp <- data$y <- rnorm(nrow(data), 0, 0.1)
spatModel1 <- gls(elev ~ Species, data = data, correlation = corExp(form = ~xp + yp))
spatModel2 <- gls(elev ~ Species, data = data, correlation = corGaus(form = ~xp + yp))
AIC(spatModel1, spatModel2)
# no dif in AIC
spatModel3 <- gls(elev ~ Species, data = data, correlation = corExp(form = ~xp + yp | Forest))
spatModel4 <- gls(elev ~ Species, data = data, correlation = corGaus(form = ~xp + yp | Forest))
AIC(spatModel3, spatModel4)
summary(spatModel4)
var2 <- Variogram(spatModel1, form = ~x + y, resType = "pearson")
plot(var2)
summary(spatModel1)
# Ok so there is no spatial auto-correlation to account for in these plots
| /analysis/sp_dist_4ha_spatialModel.R | no_license | jmargrove/ForestFloodingSensitivityAnalysis | R | false | false | 1,503 | r | ################################################################################
#' @title gls calculation of mean elevation
#' @author: James Margrove
#' @description calculating the mean elevation using gls
#'
Sys.setenv(LANG = "en")
# Clear work space
rm(list=ls())
# Import packages
require(ggplot2)
require(nlme)
source("./functions/booter.R")
# Import data
sp <- read.table("./data/riskratio_data.txt", header = TRUE)$sp[-c(4,7, 13)]
data <- read.table( "./data/Reuben_data/data_cleaned.txt", header = TRUE)
load("./models/pele_fsen_dden_Abundance")
head(data)
# Predict elevation
nullModel1 <- gls(elev ~ Species, data = data)
var1 <- Variogram(nullModel1, form = ~x + y, resType = "pearson")
plot(var1)
data$xp <- data$x <- rnorm(nrow(data), 0, 0.1)
data$yp <- data$y <- rnorm(nrow(data), 0, 0.1)
spatModel1 <- gls(elev ~ Species, data = data, correlation = corExp(form = ~xp + yp))
spatModel2 <- gls(elev ~ Species, data = data, correlation = corGaus(form = ~xp + yp))
AIC(spatModel1, spatModel2)
# no dif in AIC
spatModel3 <- gls(elev ~ Species, data = data, correlation = corExp(form = ~xp + yp | Forest))
spatModel4 <- gls(elev ~ Species, data = data, correlation = corGaus(form = ~xp + yp | Forest))
AIC(spatModel3, spatModel4)
summary(spatModel4)
var2 <- Variogram(spatModel1, form = ~x + y, resType = "pearson")
plot(var2)
summary(spatModel1)
# Ok so there is no spatial auto-correlation to account for in these plots
|
\name{Pade}
\encoding{UTF-8}
\alias{Pade}
\title{Padé Approximant Coefficients}
\description{
Given Taylor series coefficients \eqn{a_n} from \eqn{n = 0} up to \eqn{n = T},
the function will calculate the Padé \eqn{\left[L / M\right]}{[L / M]}
approximant coefficients so long as \eqn{L + M \leq T}{L + M ≤ T}.
}
\usage{
Pade(L, M, A)
}
\arguments{
\item{L}{Order of Padé numerator}
\item{M}{Order of Padé denominator}
\item{A}{vector of Taylor series coefficients, starting at \eqn{x^0}}
}
\details{
As the Taylor series expansion is the \dQuote{best} polynomial approximation to
a function, the Padé approximants are the \dQuote{best} rational function
approximations to the original function. The Padé approximant often has a wider
radius of convergence than the corresponding Taylor series, and can even
converge where the Taylor series does not. This makes it very suitable for
computer-based numerical analysis.
The \eqn{\left[L / M\right]}{[L / M]} Padé approximant to a Taylor series
\eqn{A(x)} is the quotient
\deqn{\frac{P_L(x)}{Q_M(x)}}{P_L(x) / Q_M(x)}
where \eqn{P_L(x)} is of order \eqn{L} and \eqn{Q_M(x)} is of order \eqn{M}. In
this case:
\deqn{A(x) - \frac{P_L(x)}{Q_M(x)} = \mathcal{O}\left(x^{L + M + 1}\right)}{A(x)
- P_L(x) / Q_M(X) = O(x^{L + M + 1})}
When \eqn{q_0} is defined to be \eqn{1}, there is a unique solution to the
system of linear equations which can be used to calculate the coefficients.
The function accepts a vector \code{A} of length \code{T + 1}, composed of the
\eqn{a_n} of the of truncated Taylor series
\deqn{A(x) = \sum_{j=0}^T a_j x^j}{A = Σ(j = 0 to T) a_j x^j}
and returns a list of two elements, \code{Px} and \code{Qx}, the Padé numerator
and denominator coefficients respectively, as long as
\eqn{L + M \leq T}{L + M ≤ T}.
}
\value{
\code{Pade} returns a list with two entries:
\item{Px}{Coefficients of the numerator polynomial starting at \eqn{x^0}.}
\item{Qx}{Coefficients of the denominator polynomial starting at \eqn{x^0}.}
}
\references{
Baker, George Allen (1975) \emph{Essentials of Padé Approximants} Academic
Press. ISBN 978-0-120-74855-6
}
\author{
Avraham Adler \email{Avraham.Adler@gmail.com}
}
\seealso{
This package provides similar functionality to the \code{pade} function in the
\code{pracma} package. However, it does not allow computation of coefficients
beyond the supplied Taylor coefficients and it expects its input and provides
its output in ascending---instead of descending---order.
}
\examples{
A <- 1 / factorial(seq_len(11L) - 1) ## Taylor sequence for e^x up to x^{10} around x_0 = 0
Z <- Pade(5, 5, A)
print(Z) ## Padé approximant of order [5 / 5]
x <- -.01 ## Test value
Actual <- exp(x) ## Proper value
print(Actual, digits = 16)
Estimate <- sum(Z[[1L]] * x ^ (seq_along(Z[[1L]]) - 1)) /
sum(Z[[2L]] * x ^ (seq_along(Z[[2L]]) - 1))
print(Estimate, digits = 16) ## Approximant value
all.equal(Actual, Estimate)
}
\keyword{NumericalMathematics}
| /man/Pade.Rd | no_license | cran/Pade | R | false | false | 3,148 | rd | \name{Pade}
\encoding{UTF-8}
\alias{Pade}
\title{Padé Approximant Coefficients}
\description{
Given Taylor series coefficients \eqn{a_n} from \eqn{n = 0} up to \eqn{n = T},
the function will calculate the Padé \eqn{\left[L / M\right]}{[L / M]}
approximant coefficients so long as \eqn{L + M \leq T}{L + M ≤ T}.
}
\usage{
Pade(L, M, A)
}
\arguments{
\item{L}{Order of Padé numerator}
\item{M}{Order of Padé denominator}
\item{A}{vector of Taylor series coefficients, starting at \eqn{x^0}}
}
\details{
As the Taylor series expansion is the \dQuote{best} polynomial approximation to
a function, the Padé approximants are the \dQuote{best} rational function
approximations to the original function. The Padé approximant often has a wider
radius of convergence than the corresponding Taylor series, and can even
converge where the Taylor series does not. This makes it very suitable for
computer-based numerical analysis.
The \eqn{\left[L / M\right]}{[L / M]} Padé approximant to a Taylor series
\eqn{A(x)} is the quotient
\deqn{\frac{P_L(x)}{Q_M(x)}}{P_L(x) / Q_M(x)}
where \eqn{P_L(x)} is of order \eqn{L} and \eqn{Q_M(x)} is of order \eqn{M}. In
this case:
\deqn{A(x) - \frac{P_L(x)}{Q_M(x)} = \mathcal{O}\left(x^{L + M + 1}\right)}{A(x)
- P_L(x) / Q_M(X) = O(x^{L + M + 1})}
When \eqn{q_0} is defined to be \eqn{1}, there is a unique solution to the
system of linear equations which can be used to calculate the coefficients.
The function accepts a vector \code{A} of length \code{T + 1}, composed of the
\eqn{a_n} of the of truncated Taylor series
\deqn{A(x) = \sum_{j=0}^T a_j x^j}{A = Σ(j = 0 to T) a_j x^j}
and returns a list of two elements, \code{Px} and \code{Qx}, the Padé numerator
and denominator coefficients respectively, as long as
\eqn{L + M \leq T}{L + M ≤ T}.
}
\value{
\code{Pade} returns a list with two entries:
\item{Px}{Coefficients of the numerator polynomial starting at \eqn{x^0}.}
\item{Qx}{Coefficients of the denominator polynomial starting at \eqn{x^0}.}
}
\references{
Baker, George Allen (1975) \emph{Essentials of Padé Approximants} Academic
Press. ISBN 978-0-120-74855-6
}
\author{
Avraham Adler \email{Avraham.Adler@gmail.com}
}
\seealso{
This package provides similar functionality to the \code{pade} function in the
\code{pracma} package. However, it does not allow computation of coefficients
beyond the supplied Taylor coefficients and it expects its input and provides
its output in ascending---instead of descending---order.
}
\examples{
A <- 1 / factorial(seq_len(11L) - 1) ## Taylor sequence for e^x up to x^{10} around x_0 = 0
Z <- Pade(5, 5, A)
print(Z) ## Padé approximant of order [5 / 5]
x <- -.01 ## Test value
Actual <- exp(x) ## Proper value
print(Actual, digits = 16)
Estimate <- sum(Z[[1L]] * x ^ (seq_along(Z[[1L]]) - 1)) /
sum(Z[[2L]] * x ^ (seq_along(Z[[2L]]) - 1))
print(Estimate, digits = 16) ## Approximant value
all.equal(Actual, Estimate)
}
\keyword{NumericalMathematics}
|
library(dplyr)
library(readr)
library(ggplot2)
library(readr)
paper_search <- read_csv("paper_search.csv",
col_types = cols(id_index = col_character(),
id_index_1 = col_character()))
# 体重身高有异常值手动修改
Data <- paper_search %>%
filter(CysC < 50,
CRE > 2 ) %>%
select(-c(id_index_1)) %>%
mutate(bmi = weight / (height ^ 2)) %>%
mutate(Y_CLASS = case_when(
ACR >= 300 ~ "大量白蛋白尿组",
ACR >= 30 & ACR < 300 ~ "微量白蛋白尿组",
ACR < 30 ~ "正常白蛋白尿组",
TRUE ~ "未知"
)) %>%
mutate(ageEffect = if_else(gender == '男', 1, 0.742)) %>%
mutate(GFR = 30849 * (CRE^(-1.154)) * (age^(-0.203)) * ageEffect) %>%
mutate(GFR_CLASS = case_when(
GFR >= 90 ~ "正常",
GFR >= 60 & GFR <90 ~ "轻度下降",
GFR >= 30 & GFR <60 ~ "中度下降",
GFR >= 15 & GFR <30 ~ "重度下降",
GFR <15 ~ "肾衰竭",
TRUE ~ "未知"
))
# 患者共
Data %>% summarise(n())
#其中男 例,女 例
Data %>% group_by(gender) %>% summarise(n())
#平均 岁
xx <- t.test(Data$age)
xx$conf.int
xx$estimate
#病程平均 年
t.test(Data$courseofdiabetes)
#BMI
t.test(Data$bmi)
# 各组有多少人
xx <- Data %>% group_by(Y_CLASS) %>% summarise(n())
xx
# 正常白蛋白尿组
ZC_DATA <- Data %>%
filter(Y_CLASS == '正常白蛋白尿组')
Data %>% filter(Y_CLASS == '正常白蛋白尿组') %>% select(CysC) %>% .[,1]
##Cys
t.test(ZC_DATA$CysC)
group_index <- Data %>%
group_by(Y_CLASS) %>%
summarise(CysC_mean = mean(CysC), CysC_std = 1.96 * sd(CysC),
UREA_mean = mean(UREA), UREA_std = 1.96 * sd(UREA),
CRE_mean = mean(CRE), CRE_std = 1.96 * sd(CRE),
GFR_mean = mean(GFR), GFR_std = 1.96 * sd(GFR)
) %>%
ungroup()
T.test <- function(Data, Class, Col){
DATA <- Data %>% filter(Y_CLASS == Class) %>% as.data.frame(.) %>% .[, quo_name(Col)]
print(t.test(DATA))
data.frame(mean = t.test(DATA)$estimate,
conf = t.test(DATA)$estimate - as.numeric(t.test(DATA)$conf.int)[1],row.names = NULL)
}
Y_CLASS_name <- distinct(Data, Y_CLASS)$Y_CLASS
ww <- T.test(Data = Data, Class = '大量白蛋白尿组', Col = 'CysC')
Y_CLASS_name <- distinct(Data, Y_CLASS) %>% mutate(num = 1L)
select_name <- tibble(name = c('CysC', 'UREA', 'CRE', 'ACR'), num=1L)
Y_CLASS_name %>% left_join(select_name, by = c("num"))
xxx <- Data %>%
filter(Y_CLASS == '大量白蛋白尿组')
z <- t.test(xxx$GFR, mu=30)
z$null.value
#CysC GFR
CysC_GFR_plot <- Data %>%
#filter(NBDBJGBZ< 3)
select(CysC, GFR, GFR_CLASS)
ggplot(CysC_GFR_plot, aes(CysC, GFR, colour = GFR_CLASS)) +
geom_point()
| /doument.R | no_license | xiaodi007/hospital | R | false | false | 2,796 | r | library(dplyr)
library(readr)
library(ggplot2)
library(readr)
paper_search <- read_csv("paper_search.csv",
col_types = cols(id_index = col_character(),
id_index_1 = col_character()))
# 体重身高有异常值手动修改
Data <- paper_search %>%
filter(CysC < 50,
CRE > 2 ) %>%
select(-c(id_index_1)) %>%
mutate(bmi = weight / (height ^ 2)) %>%
mutate(Y_CLASS = case_when(
ACR >= 300 ~ "大量白蛋白尿组",
ACR >= 30 & ACR < 300 ~ "微量白蛋白尿组",
ACR < 30 ~ "正常白蛋白尿组",
TRUE ~ "未知"
)) %>%
mutate(ageEffect = if_else(gender == '男', 1, 0.742)) %>%
mutate(GFR = 30849 * (CRE^(-1.154)) * (age^(-0.203)) * ageEffect) %>%
mutate(GFR_CLASS = case_when(
GFR >= 90 ~ "正常",
GFR >= 60 & GFR <90 ~ "轻度下降",
GFR >= 30 & GFR <60 ~ "中度下降",
GFR >= 15 & GFR <30 ~ "重度下降",
GFR <15 ~ "肾衰竭",
TRUE ~ "未知"
))
# 患者共
Data %>% summarise(n())
#其中男 例,女 例
Data %>% group_by(gender) %>% summarise(n())
#平均 岁
xx <- t.test(Data$age)
xx$conf.int
xx$estimate
#病程平均 年
t.test(Data$courseofdiabetes)
#BMI
t.test(Data$bmi)
# 各组有多少人
xx <- Data %>% group_by(Y_CLASS) %>% summarise(n())
xx
# 正常白蛋白尿组
ZC_DATA <- Data %>%
filter(Y_CLASS == '正常白蛋白尿组')
Data %>% filter(Y_CLASS == '正常白蛋白尿组') %>% select(CysC) %>% .[,1]
##Cys
t.test(ZC_DATA$CysC)
group_index <- Data %>%
group_by(Y_CLASS) %>%
summarise(CysC_mean = mean(CysC), CysC_std = 1.96 * sd(CysC),
UREA_mean = mean(UREA), UREA_std = 1.96 * sd(UREA),
CRE_mean = mean(CRE), CRE_std = 1.96 * sd(CRE),
GFR_mean = mean(GFR), GFR_std = 1.96 * sd(GFR)
) %>%
ungroup()
T.test <- function(Data, Class, Col){
DATA <- Data %>% filter(Y_CLASS == Class) %>% as.data.frame(.) %>% .[, quo_name(Col)]
print(t.test(DATA))
data.frame(mean = t.test(DATA)$estimate,
conf = t.test(DATA)$estimate - as.numeric(t.test(DATA)$conf.int)[1],row.names = NULL)
}
Y_CLASS_name <- distinct(Data, Y_CLASS)$Y_CLASS
ww <- T.test(Data = Data, Class = '大量白蛋白尿组', Col = 'CysC')
Y_CLASS_name <- distinct(Data, Y_CLASS) %>% mutate(num = 1L)
select_name <- tibble(name = c('CysC', 'UREA', 'CRE', 'ACR'), num=1L)
Y_CLASS_name %>% left_join(select_name, by = c("num"))
xxx <- Data %>%
filter(Y_CLASS == '大量白蛋白尿组')
z <- t.test(xxx$GFR, mu=30)
z$null.value
#CysC GFR
CysC_GFR_plot <- Data %>%
#filter(NBDBJGBZ< 3)
select(CysC, GFR, GFR_CLASS)
ggplot(CysC_GFR_plot, aes(CysC, GFR, colour = GFR_CLASS)) +
geom_point()
|
#' Adds a Layer with Observations to a Profile Plot
#'
#' Function \code{\link{show_observations}} adds a layer to a plot created with
#' \code{\link{plot.ceteris_paribus_explainer}} for selected observations.
#' Various parameters help to decide what should be plotted, profiles, aggregated profiles, points or rugs.
#'
#' @param x a ceteris paribus explainer produced with function \code{ceteris_paribus()}
#' @param ... other explainers that shall be plotted together
#' @param color a character. Either name of a color or name of a variable that should be used for coloring
#' @param size a numeric. Size of lines to be plotted
#' @param alpha a numeric between \code{0} and \code{1}. Opacity of lines
#' @param variables if not \code{NULL} then only \code{variables} will be presented
#' @param variable_type a character. If \code{numerical} then only numerical variables will be plotted.
#' If \code{categorical} then only categorical variables will be plotted.
#'
#' @return a \code{ggplot2} layer
#'
#' @examples
#' library("DALEX")
#' library("randomForest")
#'
#' rf_model <- randomForest(survived ~.,
#' data = titanic_imputed)
#'
#' explainer_rf <- explain(rf_model,
#' data = titanic_imputed[,-8],
#' y = titanic_imputed[,8],
#' label = "RF", verbose = FALSE)
#'
#' selected_passangers <- select_sample(titanic_imputed, n = 100)
#' cp_rf <- ceteris_paribus(explainer_rf, selected_passangers)
#' cp_rf
#'
#' plot(cp_rf, variables = "age", color = "grey") +
#' show_observations(cp_rf, variables = "age", color = "black") +
#' show_rugs(cp_rf, variables = "age", color = "red")
#'
#'
#' @export
show_observations <- function(x, ...,
size = 2,
alpha = 1,
color = "#371ea3",
variable_type = "numerical",
variables = NULL) {
check_variable_type(variable_type)
# if there is more explainers, they should be merged into a single data frame
dfl <- c(list(x), list(...))
all_observations <- lapply(dfl, function(tmp) {
attr(tmp, "observations")
})
all_observations <- do.call(rbind, all_observations)
all_observations$`_ids_` <- factor(rownames(all_observations))
# variables to use
all_variables <- grep(colnames(all_observations), pattern = "^[^_]", value = TRUE)
if (!is.null(variables)) {
all_variables <- intersect(all_variables, variables)
if (length(all_variables) == 0) stop(paste0("variables do not overlap with ", paste(all_variables, collapse = ", ")))
}
# only numerical or only factors?
is_numeric <- sapply(all_observations[, all_variables, drop = FALSE], is.numeric)
if (variable_type == "numerical") {
vnames <- all_variables[which(is_numeric)]
if (length(vnames) == 0) stop("There are no numerical variables")
} else {
vnames <- all_variables[which(!is_numeric)]
if (length(vnames) == 0) stop("There are no non-numerical variables")
}
# prepare data for plotting points
is_color_points_a_variable <- color %in% c(all_variables, "_label_", "_vname_", "_ids_")
tmp <- lapply(vnames, function(var) {
data.frame(`_x_` = all_observations[,var],
`_vname_` = var,
`_yhat_` = all_observations$`_yhat_`,
`_y_` = if (is.null(all_observations$`_y_`)) NA else all_observations$`_y_`,
`_color_` = if (!is_color_points_a_variable) NA else {
if (color == "_vname_") var else all_observations[,color]
},
`_ids_` = all_observations$`_ids_`,
`_label_` = all_observations$`_label_`)
})
all_observations_long <- do.call(rbind, tmp)
colnames(all_observations_long) <- c("_x_", "_vname_", "_yhat_", "_y_", "_color_", "_ids_", "_label_")
if ((is_color_points_a_variable ) & !(color %in% colnames(all_observations_long)))
colnames(all_observations_long)[5] = color
# show observations
if (is_color_points_a_variable) {
res <- geom_point(data = all_observations_long, aes_string(color = paste0("`",color,"`")), size = size, alpha = alpha)
} else {
res <- geom_point(data = all_observations_long, size = size, alpha = alpha, color = color)
}
res
}
| /R/show_observations.R | no_license | WojciechKretowicz/ingredients | R | false | false | 4,333 | r | #' Adds a Layer with Observations to a Profile Plot
#'
#' Function \code{\link{show_observations}} adds a layer to a plot created with
#' \code{\link{plot.ceteris_paribus_explainer}} for selected observations.
#' Various parameters help to decide what should be plotted, profiles, aggregated profiles, points or rugs.
#'
#' @param x a ceteris paribus explainer produced with function \code{ceteris_paribus()}
#' @param ... other explainers that shall be plotted together
#' @param color a character. Either name of a color or name of a variable that should be used for coloring
#' @param size a numeric. Size of lines to be plotted
#' @param alpha a numeric between \code{0} and \code{1}. Opacity of lines
#' @param variables if not \code{NULL} then only \code{variables} will be presented
#' @param variable_type a character. If \code{numerical} then only numerical variables will be plotted.
#' If \code{categorical} then only categorical variables will be plotted.
#'
#' @return a \code{ggplot2} layer
#'
#' @examples
#' library("DALEX")
#' library("randomForest")
#'
#' rf_model <- randomForest(survived ~.,
#' data = titanic_imputed)
#'
#' explainer_rf <- explain(rf_model,
#' data = titanic_imputed[,-8],
#' y = titanic_imputed[,8],
#' label = "RF", verbose = FALSE)
#'
#' selected_passangers <- select_sample(titanic_imputed, n = 100)
#' cp_rf <- ceteris_paribus(explainer_rf, selected_passangers)
#' cp_rf
#'
#' plot(cp_rf, variables = "age", color = "grey") +
#' show_observations(cp_rf, variables = "age", color = "black") +
#' show_rugs(cp_rf, variables = "age", color = "red")
#'
#'
#' @export
show_observations <- function(x, ...,
size = 2,
alpha = 1,
color = "#371ea3",
variable_type = "numerical",
variables = NULL) {
check_variable_type(variable_type)
# if there is more explainers, they should be merged into a single data frame
dfl <- c(list(x), list(...))
all_observations <- lapply(dfl, function(tmp) {
attr(tmp, "observations")
})
all_observations <- do.call(rbind, all_observations)
all_observations$`_ids_` <- factor(rownames(all_observations))
# variables to use
all_variables <- grep(colnames(all_observations), pattern = "^[^_]", value = TRUE)
if (!is.null(variables)) {
all_variables <- intersect(all_variables, variables)
if (length(all_variables) == 0) stop(paste0("variables do not overlap with ", paste(all_variables, collapse = ", ")))
}
# only numerical or only factors?
is_numeric <- sapply(all_observations[, all_variables, drop = FALSE], is.numeric)
if (variable_type == "numerical") {
vnames <- all_variables[which(is_numeric)]
if (length(vnames) == 0) stop("There are no numerical variables")
} else {
vnames <- all_variables[which(!is_numeric)]
if (length(vnames) == 0) stop("There are no non-numerical variables")
}
# prepare data for plotting points
is_color_points_a_variable <- color %in% c(all_variables, "_label_", "_vname_", "_ids_")
tmp <- lapply(vnames, function(var) {
data.frame(`_x_` = all_observations[,var],
`_vname_` = var,
`_yhat_` = all_observations$`_yhat_`,
`_y_` = if (is.null(all_observations$`_y_`)) NA else all_observations$`_y_`,
`_color_` = if (!is_color_points_a_variable) NA else {
if (color == "_vname_") var else all_observations[,color]
},
`_ids_` = all_observations$`_ids_`,
`_label_` = all_observations$`_label_`)
})
all_observations_long <- do.call(rbind, tmp)
colnames(all_observations_long) <- c("_x_", "_vname_", "_yhat_", "_y_", "_color_", "_ids_", "_label_")
if ((is_color_points_a_variable ) & !(color %in% colnames(all_observations_long)))
colnames(all_observations_long)[5] = color
# show observations
if (is_color_points_a_variable) {
res <- geom_point(data = all_observations_long, aes_string(color = paste0("`",color,"`")), size = size, alpha = alpha)
} else {
res <- geom_point(data = all_observations_long, size = size, alpha = alpha, color = color)
}
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_log1p.R
\name{my_log1p}
\alias{my_log1p}
\title{Customed log1p for data with negative value less than -1}
\usage{
my_log1p(x, base = exp(1))
}
\arguments{
\item{x}{a numeric vector}
\item{base}{exp(1) or 10, or 2 based log (x+a)}
}
\value{
a dataframe
}
\description{
Customed log1p for data with negative value less than -1
}
| /man/my_log1p.Rd | no_license | kun-ecology/ecoloop | R | false | true | 409 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/my_log1p.R
\name{my_log1p}
\alias{my_log1p}
\title{Customed log1p for data with negative value less than -1}
\usage{
my_log1p(x, base = exp(1))
}
\arguments{
\item{x}{a numeric vector}
\item{base}{exp(1) or 10, or 2 based log (x+a)}
}
\value{
a dataframe
}
\description{
Customed log1p for data with negative value less than -1
}
|
#' @import mlr
predict_DT <- function(object,
newdata,
pred.all = TRUE) {
funScale <- approxfun(x = c(0, object$threshold, 1),
y = c(0, 0.5, 1))
IP_all <- pbapply::pblapply(object$models,
mlr:::predict.WrappedModel,
newdata = newdata) %>%
lapply(function(pred) {
funScale(pred$data$prob.impaired)
}) %>%
do.call(what = cbind) %>%
data.frame() %>%
dplyr::as.tbl()
colnames(IP_all) <- paste("iter", 1:length(object$models), sep = "_")
IP_summ <- apply(X = IP_all, MARGIN = 1, FUN = mean) %>%
data.frame() %>%
dplyr::as.tbl()
colnames(IP_summ) <- "average"
if (pred.all) {
return(list(IP_all = IP_all, IP_summ = IP_summ))
} else {
return(IP_summ)
}
}
| /R/predict_DT.R | no_license | CedricMondy/ecodiag | R | false | false | 957 | r | #' @import mlr
predict_DT <- function(object,
newdata,
pred.all = TRUE) {
funScale <- approxfun(x = c(0, object$threshold, 1),
y = c(0, 0.5, 1))
IP_all <- pbapply::pblapply(object$models,
mlr:::predict.WrappedModel,
newdata = newdata) %>%
lapply(function(pred) {
funScale(pred$data$prob.impaired)
}) %>%
do.call(what = cbind) %>%
data.frame() %>%
dplyr::as.tbl()
colnames(IP_all) <- paste("iter", 1:length(object$models), sep = "_")
IP_summ <- apply(X = IP_all, MARGIN = 1, FUN = mean) %>%
data.frame() %>%
dplyr::as.tbl()
colnames(IP_summ) <- "average"
if (pred.all) {
return(list(IP_all = IP_all, IP_summ = IP_summ))
} else {
return(IP_summ)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog.range.overlap.R
\name{geog.range.overlap}
\alias{geog.range.overlap}
\title{Takes two emtools.species objects with range rasters, calculates overlap between them as in Fitzpatrick and Turelli 2006}
\usage{
geog.range.overlap(x, y)
}
\arguments{
\item{x}{An enmtools.species object containing a range raster}
\item{y}{An enmtools.species object containing a range raster}
}
\description{
Takes two emtools.species objects with range rasters, calculates overlap between them as in Fitzpatrick and Turelli 2006
}
\examples{
data(iberolacerta.clade)
data(euro.worldclim)
cyreni <- iberolacerta.clade$species$cyreni
monticola <- iberolacerta.clade$species$monticola
cyreni$range <- background.raster.buffer(cyreni$presence.points, 100000, euro.worldclim)
monticola$range <- background.raster.buffer(monticola$presence.points, 100000, euro.worldclim)
geog.range.overlap(cyreni, monticola)
}
| /man/geog.range.overlap.Rd | no_license | helixcn/ENMTools | R | false | true | 969 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog.range.overlap.R
\name{geog.range.overlap}
\alias{geog.range.overlap}
\title{Takes two emtools.species objects with range rasters, calculates overlap between them as in Fitzpatrick and Turelli 2006}
\usage{
geog.range.overlap(x, y)
}
\arguments{
\item{x}{An enmtools.species object containing a range raster}
\item{y}{An enmtools.species object containing a range raster}
}
\description{
Takes two emtools.species objects with range rasters, calculates overlap between them as in Fitzpatrick and Turelli 2006
}
\examples{
data(iberolacerta.clade)
data(euro.worldclim)
cyreni <- iberolacerta.clade$species$cyreni
monticola <- iberolacerta.clade$species$monticola
cyreni$range <- background.raster.buffer(cyreni$presence.points, 100000, euro.worldclim)
monticola$range <- background.raster.buffer(monticola$presence.points, 100000, euro.worldclim)
geog.range.overlap(cyreni, monticola)
}
|
library(dirmult) # example code functions and dirmult, equalTheta functions
library(ggplot2) # pretty plots for pi
library(doParallel) # parallelizing
library(MASS) # ginv function for Xsc.statistics
library(vegan) # ga distances
library(gplots) # heatmap plots
library(rpart) # base rpart
library(rpart.plot) # rpart plotting
library(lattice) # Repeated measures plotting
### Define a global environment to use with rpart
hmp.pkg.env <- new.env(parent=emptyenv())
hmp.pkg.env$EVAL_COUNT_RPART <- 1
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### External
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### ~~~~~~~~~~~~~~~~~~~~~
### Generation functions
### ~~~~~~~~~~~~~~~~~~~~~
Multinomial <- function(Nrs, probs){
if(missing(Nrs) || missing(probs))
stop("Nrs and/or probs missing.")
# Create the data from the rmultinom
mData <- matrix(0, length(Nrs), length(probs))
for(i in 1:length(Nrs))
mData[i,] <- stats::rmultinom(1, Nrs[i], probs)
# Label the created data
colnames(mData) <- paste("Taxa", 1:ncol(mData))
rownames(mData) <- paste("Sample", 1:nrow(mData))
return(mData)
}
Dirichlet.multinomial <- function(Nrs, shape){
if(missing(Nrs) || missing(shape))
stop("Nrs and/or shape missing.")
# Create the data from the rmultinom
dmData <- matrix(0, length(Nrs), length(shape))
for(i in 1:length(Nrs))
dmData[i,] <- stats::rmultinom(1, Nrs[i], dirmult::rdirichlet(1, shape))
# Label the created data
colnames(dmData) <- paste("Taxa", 1:ncol(dmData))
rownames(dmData) <- paste("Sample", 1:nrow(dmData))
return(dmData)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Other functions
### ~~~~~~~~~~~~~~~~~~~~~
C.alpha.multinomial <- function(data){
if(missing(data))
stop("data missing.")
perNumReadsSubs <- rowSums(data)/sum(data)
# Get T statistic
Ts <- T.statistics(data)
M.alpha <- diag(perNumReadsSubs)- as.matrix(perNumReadsSubs) %*% t(as.matrix(perNumReadsSubs))
g <- sum(diag(M.alpha %*% M.alpha)) / sum(diag(M.alpha))
df <- (ncol(data)-1)*((sum(diag(M.alpha)))^2) / (sum(diag(M.alpha %*% M.alpha)))
# Get pvalue
pval <- 1-pchisq(q=Ts/g, df=df, ncp=0, lower.tail=TRUE)
GoF.test <- list("T statistics"=Ts, "p value"=pval)
return(GoF.test)
}
DM.MoM <- function(data){
if(missing(data))
stop("data missing.")
pi.MoM <- colSums(data)/sum(data)
theta.MoM <- weirMoM(data, pi.MoM)$theta
gamma.MoM <- pi.MoM*((1-theta.MoM)/theta.MoM)
# Set LL to Inf if we only have 1 sample
if(nrow(data) == 1){
loglikdm <- Inf
}else{
loglikdm <- loglikDM(data, gamma.MoM)
}
fit.MoM <- list(loglik=loglikdm, gamma=gamma.MoM, pi=pi.MoM, theta=theta.MoM)
return(fit.MoM)
}
Kullback.Leibler <- function(group.data, plot=TRUE, main="Kullback Leibler Divergences", parallel=FALSE, cores=3){
if(missing(group.data))
stop("data missing.")
# Check the number of groups
numGrps <- length(group.data)
if(numGrps < 2)
stop("At least 2 data sets are required.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGrps)
}else{
grpNames <- names(group.data)
}
# Add 1 so we don't ever get an all 0 comparison
group.data <- lapply(group.data, function(x) x+1)
# Run dirmult on every group
if(parallel){
cl <- parallel::makeCluster(min(cores, numGrps))
doParallel::registerDoParallel(cl)
tryCatch({
results <- foreach::foreach(i=1:numGrps, .combine=list, .multicombine=TRUE, .inorder=TRUE, .packages=c("dirmult")) %dopar%{
param <- DM.MoM(group.data[[i]])
return(param)
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
results <- vector("list", numGrps)
for(i in 1:numGrps)
results[[i]] <- DM.MoM(group.data[[i]])
}
# Get alpha for every group
alpha <- lapply(results, function(x) x$gamma)
names(alpha) <- grpNames
# Get LL given alpha
LL.vals <- sapply(results, function(x) x$loglik)
# Get LL for every group using another alpha
KLmat <- matrix(0, numGrps, numGrps)
for(i in 1:numGrps){
for(j in i:numGrps){
if(i == j)
next
KLval1 <- LL.vals[i] - loglikDM(group.data[[i]], alpha[[j]])
KLval2 <- LL.vals[j] - loglikDM(group.data[[j]], alpha[[i]])
KLmat[i, j] <- KLval1 + KLval2
KLmat[j, i] <- KLval1 + KLval2
}
}
colnames(KLmat) <- grpNames
rownames(KLmat) <- grpNames
if(plot){
gplots::heatmap.2(as.matrix(KLmat), dendrogram="both", Rowv=TRUE, Colv=TRUE,
trace="none", symm=TRUE, margins=c(12, 9), density.info="none",
main=main
)
}
return(KLmat)
}
Xmcupo.effectsize <- function(group.data){
if(missing(group.data))
stop("group.data missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
totalReads <- sum(sapply(group.data, sum))
if(numTaxa < numGroups)
stop("The number of taxa must be greater than the number of groups.")
# Get the parameters for every group
groupParameter <- lapply(group.data, function(x){
# Calc pi, theta and the number of reads
numReadsSubs <- rowSums(x)
pi.MoM <- colSums(x)/sum(x)
theta.MoM <- weirMoM(x, pi.MoM)$theta
return(list(pi=pi.MoM, theta=theta.MoM, nrs=numReadsSubs))
})
# Calculate Xmcupo stats for base data
Xmcupo <- Xmcupo.statistics(groupParameter)
# Edit parameters to use the biggest difference between pis
groupParameterMax <- groupParameter
for(i in 1:numGroups){
newPi <- rep(0, numTaxa)
newPi[i] <- 1
groupParameterMax[[i]]$pi <- newPi
}
# Calculate Xmcupo stats for biggest difference
XmcupoMax <- Xmcupo.statistics(groupParameterMax)
# Calculate Cramers
CramerV <- sqrt(Xmcupo/(totalReads*min(numTaxa-1, numGroups-1)))
Mod.CramerV <- sqrt(Xmcupo/XmcupoMax)
# Calculate pvalue
pval <- 1-pchisq(q=Xmcupo, df=(numGroups-1)*(numTaxa-1), ncp=0, lower.tail=TRUE)
result <- c("Chi-Squared"=Xmcupo, "Cramer Phi"=CramerV, "Modified-Cramer Phi"=Mod.CramerV, "P value"=pval)
return(result)
}
Est.PI <- function(group.data, conf=.95){
if(missing(group.data))
stop("group.data is missing.")
# Check the number of groups
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Calculate the pi and error bars for each group
allParamsMLE <- data.frame(matrix(0, 0, 6))
allParamsMOM <- data.frame(matrix(0, 0, 6))
thetaMLE <- data.frame(matrix(0, numGroups, 3))
thetaMOM <- data.frame(matrix(0, numGroups, 3))
for(i in 1:numGroups){
tempData <- group.data[[i]]
# Check the data has samples
numSub <- nrow(tempData)
if(numSub < 1)
stop("At least one data set in group.data is empty")
tempParam1 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2[,2] <- grpNames[i]
tempParam1[,2] <- grpNames[i]
# Check for taxa with 0 column sums (add 1 to everything if this happens)
badTaxa <- which(colSums(tempData) == 0)
if(length(badTaxa) != 0)
tempData <- tempData + 1
# Handle having 1 sample
if(numSub == 1){
tempParam1[,1] <- colnames(tempData)
tempParam1[,3] <- unlist(tempData[1,]/sum(tempData))
tempParam1[,4] <- NA
tempParam1[,5] <- tempParam1[,3]
tempParam1[,6] <- tempParam1[,3]
tempParam1 <- tempParam1[order(tempParam1[,1]),]
tempTheta1 <- c(0, NA)
tempParam2 <- tempParam1
tempTheta2 <- tempTheta1
}else{
# Get the MoM and MLE for every taxa
fsum <- dirmult::dirmult.summary(tempData, dirmult::dirmult(tempData, trace=FALSE))
tempTheta <- fsum[nrow(fsum),]
fsum <- fsum[-nrow(fsum),]
# Turn the summary into a data frame we can plot from
tempParam1[,1] <- rownames(fsum)
tempParam1[,3] <- fsum$MLE
tempParam1[,4] <- fsum$se.MLE
tempTheta1 <- tempTheta[,2:3]
tempParam2[,1] <- rownames(fsum)
tempParam2[,3] <- fsum$MoM
tempParam2[,4] <- fsum$se.MOM
tempTheta2 <- tempTheta[,4:5]
# Calc Upper and Lower bounds for CI
minSubj <- min(sapply(group.data, function(x) nrow(x)))
if(minSubj < 30){
val <- stats::qt(0.5 + conf *0.5, df=minSubj-1)
}else{
val <- stats::qnorm(0.5 + conf*0.5)
}
tempParam1[,5] <- tempParam1[,3] + val*tempParam1[,4]
tempParam1[,6] <- tempParam1[,3] - val*tempParam1[,4]
tempParam2[,5] <- tempParam2[,3] + val*tempParam2[,4]
tempParam2[,6] <- tempParam2[,3] - val*tempParam2[,4]
}
# Save outside of loop
allParamsMLE <- rbind(allParamsMLE, tempParam1)
thetaMLE[i,] <- c(grpNames[i], tempTheta1)
allParamsMOM <- rbind(allParamsMOM, tempParam2)
thetaMOM[i,] <- c(grpNames[i], tempTheta2)
}
colnames(allParamsMLE) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMLE) <- c("Group", colnames(tempTheta1))
colnames(allParamsMOM) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMOM) <- c("Group", colnames(tempTheta2))
# Make sure none of our error bars go over 100 or below 0
allParamsMLE$Upper <- ifelse(allParamsMLE$Upper > 1, 1, allParamsMLE$Upper)
allParamsMLE$Lower <- ifelse(allParamsMLE$Lower < 0, 0, allParamsMLE$Lower)
allParamsMOM$Upper <- ifelse(allParamsMOM$Upper > 1, 1, allParamsMOM$Upper)
allParamsMOM$Lower <- ifelse(allParamsMOM$Lower < 0, 0, allParamsMOM$Lower)
# Factor the data so it stays in the right order
allParamsMLE$Group <- factor(allParamsMLE$Group, levels=grpNames)
allParamsMLE$Taxa <- factor(allParamsMLE$Taxa, levels=unique(colnames(group.data[[1]])))
allParamsMOM$Group <- factor(allParamsMOM$Group, levels=grpNames)
allParamsMOM$Taxa <- factor(allParamsMOM$Taxa, levels=unique(colnames(group.data[[1]])))
MLE <- list(params=allParamsMLE, theta=thetaMLE)
MOM <- list(params=allParamsMOM, theta=thetaMOM)
return(list(MLE=MLE, MOM=MOM))
}
Test.Paired <- function(group.data, numPerms=1000, parallel=FALSE, cores=3){
if(missing(group.data))
stop("group.data is missing.")
if(length(group.data) != 2)
stop("group.data must have exactly 2 data sets.")
if(numPerms <= 0)
stop("The number of permutations must be an integer greater than 0.")
# Make sure we have the same columns
if(ncol(group.data[[1]]) != ncol(group.data[[2]])){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Check they have the same number of subjects
numSub <- nrow(group.data[[1]])
if(numSub != nrow(group.data[[2]]))
stop("Groups must have the same number of subjects.")
# Check row names match
rNames1 <- rownames(group.data[[1]])
rNames2 <- rownames(group.data[[2]])
if(!all(rNames1 == rNames2)){ # check names in the same order
if(all(rNames1 %in% rNames2)){ # check names match in wrong order
group.data[[1]] <- group.data[[1]][order(rNames1),]
group.data[[2]] <- group.data[[2]][order(rNames2),]
}else{
warning("Subject names do not match, assuming data is ordered correctly.")
}
}
# Turn into abundances
group.data[[1]] <- group.data[[1]]/rowSums(group.data[[1]])
group.data[[2]] <- group.data[[2]]/rowSums(group.data[[2]])
# Merge data1 and data2 together
dataComb <- rbind(group.data[[1]], group.data[[2]])
# Get the differences between the groups
dataDiff <- group.data[[1]] - group.data[[2]]
meanDiff <- apply(dataDiff, 2, mean)
# Calculate the sum of squares
obsDiff <- sum(meanDiff^2)
# Permute the group membership
if(parallel){
cl <- parallel::makeCluster(cores)
doParallel::registerDoParallel(cl)
tryCatch({
permDiffs <- foreach::foreach(i=1:numPerms, .combine=c, .inorder=FALSE, .multicombine=TRUE) %dopar%{
# Randomly swap group membership by reverseing difference sign
swaps <- sample(c(1, -1), numSub, replace=TRUE)
dataDiffTemp <- dataDiff * swaps
meanDiffTemp <- apply(dataDiffTemp, 2, mean)
# Calculate the sum of squares
obsDiffTemp <- sum(meanDiffTemp^2)
return(obsDiffTemp)
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
permDiffs <- rep(0, numPerms)
for(i in 1:numPerms){
# Randomly swap group membership by reverseing difference sign
swaps <- sample(c(1, -1), numSub, replace=TRUE)
dataDiffTemp <- dataDiff * swaps
meanDiffTemp <- apply(dataDiffTemp, 2, mean)
# Calculate the sum of squares
permDiffs[i] <- sum(meanDiffTemp^2)
}
}
# Calculate pvalue
pval <- (sum(permDiffs >= obsDiff) + 1)/(numPerms + 1)
return(pval)
}
DM.Rpart <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0, numCV=10, numCon=0, parallel=FALSE, cores=3, use1SE=FALSE, lowerSE=TRUE){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(numCV < 2){
ret <- DM.Rpart.Base(data, covars, plot, minsplit, minbucket, cp)
}else if(numCon < 2){
ret <- DM.Rpart.CV(data, covars, plot, minsplit, minbucket, cp, numCV, parallel, cores, use1SE, lowerSE)
}else{
ret <- DM.Rpart.CV.Consensus(data, covars, plot, minsplit, minbucket, cp, numCV, numCon, parallel, cores, use1SE, lowerSE)
}
return(ret)
}
DM.Rpart.Base <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
# Set the methods to use and call rpart
methods <- list(init=rpartInit, eval=rpartEval, split=rpartSplit)
rpartRes <- rpart::rpart(as.matrix(data) ~., data=covars, method=methods, minsplit=minsplit, minbucket=minbucket, cp=cp)
cpInfo <- rpartRes$cptable
size <- cpInfo[nrow(cpInfo), 2] + 1
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(rpartRes)
# Plot the rpart results
if(plot)
suppressWarnings(rpart.plot::rpart.plot(rpartRes, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=cpInfo, fullTree=rpartRes, bestTree=rpartRes, errorRate=NULL, size=size, splits=splits))
}
DM.Rpart.CV <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0, numCV=10, parallel=FALSE, cores=3, use1SE=FALSE, lowerSE=TRUE){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(numCV < 2)
stop("numCV must be at least 2.")
# Run initial Rpart
rpartBase <- DM.Rpart.Base(data, covars, FALSE, minsplit, minbucket, cp)
rpartRes <- rpartBase$fullTree
# Check for a valid starting tree
if(nrow(rpartRes$cptable) == 1){
warning("No splits in the data.")
return(rpartBase)
}
cvRes <- rpartCV(data, covars, rpartRes, minsplit, minbucket, numCV, parallel, cores)
# Calculate the best tree
ciInfo <- as.data.frame(cvRes$ciInfo)
# Find the tree with the lowest MSE
minMSE <- min(ciInfo$MSE)
lowestMSELoc <- which(ciInfo$MSE == minMSE)[1]
# Find which trees are within 1 SE of the lowest mse tree
cutoffU <- ciInfo$MSE[lowestMSELoc] + ciInfo$SE[lowestMSELoc]
cutoffL <- ciInfo$MSE[lowestMSELoc] - ciInfo$SE[lowestMSELoc]
ciInfo$within1SE <- ifelse(ciInfo$MSE <= cutoffU & ciInfo$MSE >= cutoffL, 1, 0)
if(use1SE){
# Find the smallest/biggest tree within 1 SE
within <- which(ciInfo$within1SE == 1)
if(lowerSE){
bestTreeLoc <- min(within)
}else{
bestTreeLoc <- max(within)
}
}else{
bestTreeLoc <- lowestMSELoc
}
# Pull out the best tree
size <- ciInfo[bestTreeLoc, 2] + 1
best <- rpart::prune(rpartRes, cp=ciInfo[bestTreeLoc, 1])
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(best)
if(plot)
suppressWarnings(rpart.plot::rpart.plot(best, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=ciInfo, fullTree=rpartRes, bestTree=best, errorRate=cvRes$errorRate, size=size, splits=splits))
}
DM.Rpart.CV.Consensus <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0, numCV=10, numCon=100, parallel=FALSE, cores=3, use1SE=FALSE, lowerSE=TRUE){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(numCV < 2)
stop("numCV must be at least 2.")
if(numCon < 2)
stop("numCon must be at least 2.")
if(parallel){
cl <- parallel::makeCluster(min(cores, numCon))
doParallel::registerDoParallel(cl)
tryCatch({
results <- foreach::foreach(i=1:numCon, .combine=append, .multicombine=FALSE, .inorder=FALSE, .errorhandling="pass", .packages=c("rpart", "HMP")) %dopar%{
cvList <- DM.Rpart.CV(data, covars, FALSE, minsplit, minbucket, cp, numCV, FALSE, 1, use1SE, lowerSE)
return(list(cvList))
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
results <- vector("list", numCon)
for(i in 1:numCon)
results[[i]] <- DM.Rpart.CV(data, covars, FALSE, minsplit, minbucket, cp, numCV, FALSE, 1, use1SE, lowerSE)
}
# Combine cv results
MSETab <- do.call("cbind", lapply(results, function(x){x$cpTable[,4]}))
rankTab <- do.call("cbind", lapply(results, function(x){x$cpTable[,8]}))
ciInfo <- cbind(
results[[1]]$cpTable[,1:3],
"MeanMSE"=rowMeans(MSETab),
"sdMSE"=apply(MSETab, 1, sd),
"MeanRank"=rowMeans(rankTab),
"sdRank"=apply(rankTab, 1, sd)
)
# Find the tree with the lowest MSE
minMSE <- min(ciInfo$MeanMSE)
bestTreeLoc <- which(ciInfo$MeanMSE == minMSE)[1]
# Pull out the best tree
size <- ciInfo[bestTreeLoc, 2] + 1
best <- rpart::prune(results[[1]]$fullTree, cp=ciInfo[bestTreeLoc, 1])
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(best)
if(plot)
suppressWarnings(rpart.plot::rpart.plot(best, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=ciInfo, fullTree=results[[1]]$fullTree, bestTree=best, errorRate=NULL, size=size, splits=splits))
}
Gen.Alg <- function(data, covars, iters=50, popSize=200, earlyStop=0, dataDist="euclidean", covarDist="gower",
verbose=FALSE, plot=TRUE, minSolLen=NULL, maxSolLen=NULL, custCovDist=NULL, penalty=0){
if(missing(data) || (missing(covars) && is.null(custCovDist)))
stop("data and/or covars are missing.")
# Check for any bad numbers
if(iters <= 0)
stop("iters must be an integer greater than 0")
if(popSize <= 0)
stop("popSize must be an integer greater than 0")
if(earlyStop < 0)
stop("earlyStop must be an integer greater than or equal to 0")
if(penalty < 0 || penalty > 1)
stop("penalty must be between 0 and 1")
# Check distances
if(dataDist != "euclidean" && dataDist != "gower")
stop("data.dist must be euclidean or gower.")
if(covarDist != "euclidean" && covarDist != "gower")
stop("covars.dist must be euclidean or gower.")
# Define size
size <- ncol(data)
# Check stopping rules
if(!is.null(minSolLen))
if(minSolLen < 0 || minSolLen >= size)
stop("minSolLen must be 0 or greater and less than the number of columns in data.")
if(!is.null(maxSolLen))
if(maxSolLen <= 0 || maxSolLen > size)
stop("maxSolLen must be greater than 0 and less than or equal to the number columns in data.")
if(!is.null(maxSolLen) && !is.null(minSolLen))
if(maxSolLen < minSolLen)
stop("maxSolLen must be bigger than minSolLen.")
# Define some variables for use in the GA loop
mutationChance <- 1/(size+1)
elitism <- floor(popSize/5)
evalSumm <- matrix(NA, iters, 6)
newPopSize <- popSize - elitism
newPopulation <- matrix(NA, newPopSize, size)
parentProb <- stats::dnorm(1:popSize, mean=0, sd=(popSize/3))
if(verbose){
print("X. Current Step : Current Time Taken")
runningTime <- proc.time()
print(paste("1. Calculating Distances:", round((proc.time() - runningTime)[3], 3)))
}
# Set up our base distance matrix
if(is.null(custCovDist)){
covarDists <- vegan::vegdist(covars, covarDist)
}else{
covarDists <- custCovDist
}
# Get each columns distance contribution
colDists <- vector("list", ncol(data))
for(i in 1:ncol(data))
colDists[[i]] <- vegan::vegdist(data[,i], dataDist)
if(dataDist == "euclidean")
colDists <- lapply(colDists, function(x) x^2)
if(verbose)
print(paste("2. Creating Starting Data:", round((proc.time() - runningTime)[3], 3)))
# Create our starting data
population <- gaCreation(data, popSize)
if(verbose)
print(paste("3. Scoring Starting Data:", round((proc.time() - runningTime)[3], 3)))
# Score and sort
evalVals <- rep(NA, popSize)
for(e in 1:popSize)
evalVals[e] <- gaScoring(population[e,], covarDists, colDists, dataDist, penalty, minSolLen, maxSolLen)
population <- population[order(evalVals, decreasing=TRUE),]
bestScoreValue <- max(evalVals)
bestScoreCounter <- 0
if(verbose)
print(paste("4. Running Iterations:", round((proc.time() - runningTime)[3], 3)))
# Run GA
ptr <- proc.time()
for(i in 1:iters){
if(verbose){
if(i %% round(iters/10) == 0)
print(paste("Iteration - ", i, ": ", round((proc.time() - runningTime)[3], 3), sep=""))
}
# Cross over to fill rest of new population
for(child in 1:newPopSize){
parentIDs <- sample(1:popSize, 2, prob=parentProb)
parents <- population[parentIDs,]
crossOverPoint <- sample(0:size, 1)
if(crossOverPoint == 0){
newPopulation[child,] <- parents[2,]
}else if(crossOverPoint == size){
newPopulation[child,] <- parents[1,]
}else{
newPopulation[child,] <- c(parents[1,][1:crossOverPoint], parents[2,][(crossOverPoint+1):size])
}
}
# Mutate all but elite
if(mutationChance > 0){
population[(elitism+1):popSize,] <- apply(newPopulation, 2, function(x){ifelse(stats::runif(newPopSize) < mutationChance, 1-x, x)})
}else{
population[(elitism+1):popSize,] <- newPopulation
}
# Score and sort our new solutions
for(e in 1:popSize)
evalVals[e] <- gaScoring(population[e,], covarDists, colDists, dataDist, penalty, minSolLen, maxSolLen)
population <- population[order(evalVals, decreasing=TRUE),]
evalSumm[i,] <- summary(evalVals)
# Check if we want to stop early
if(bestScoreValue == max(evalVals)){
bestScoreCounter <- bestScoreCounter + 1
}else{
bestScoreCounter <- 0
bestScoreValue <- max(evalVals)
}
if(bestScoreCounter == earlyStop && earlyStop != 0)
break
}
gaTime <- (proc.time() - ptr)[3]
if(verbose)
print(paste("5. Prettying Results", round((proc.time() - runningTime)[3], 3)))
# Pretty up our data for returning
rownames(population) <- paste("Solution", 1:nrow(population))
colnames(population) <- colnames(data)
rownames(evalSumm) <- paste("Iteration", 1:nrow(evalSumm))
colnames(evalSumm) <- c("Worst", "25%ile", "Median", "Mean", "75%ile", "Best")
evalVals <- matrix(evalVals[order(evalVals, decreasing=TRUE)], 1, length(evalVals))
colnames(evalVals) <- paste("Solution ", 1:length(evalVals))
rownames(evalVals) <- "Score"
# Get selected columns using a consensus
selIndex <- which(population[1,] == 1)
sel <- colnames(data)[selIndex]
# Get the nonselected columns
nonSel <- colnames(data)[-selIndex]
# Plot scoring summary
if(plot)
gaPlot(evalSumm)
return(list(scoreSumm=evalSumm, solutions=population, scores=evalVals, time=gaTime, selected=sel, nonSelected=nonSel, selectedIndex=selIndex))
}
Gen.Alg.Consensus <- function(data, covars, consensus=.5, numRuns=10, parallel=FALSE, cores=3, ...){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(consensus <= 0 || consensus > 1)
stop("consensus must be greater than 0 and equal or less than 1")
# Run the GA X times
if(parallel){
cl <- parallel::makeCluster(min(cores, numRuns))
doParallel::registerDoParallel(cl)
tryCatch({
gaRes <- foreach::foreach(i=1:numRuns, .combine=list, .multicombine=TRUE, .inorder=FALSE, .packages=c("vegan", "HMP")) %dopar%{
tempResults <- Gen.Alg(data, covars, plot=FALSE, verbose=FALSE, ...)
return(tempResults)
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
gaRes <- vector("list", numRuns)
for(i in 1:numRuns)
gaRes[[i]] <- Gen.Alg(data, covars, plot=FALSE, verbose=FALSE, ...)
}
# Get all the best solutions
bestSols <- sapply(gaRes, function(x){x$solutions[1,]})
# Get the consensus solution vector
consSol <- (rowSums(bestSols) >= (numRuns * consensus)) * 1
# Get the selected Index's
selInd <- which(consSol == 1)
return(list(solutions=bestSols, consSol=consSol, selectedIndex=selInd))
}
### ~~~~~~~~~~~~~~~~~~~~~
### Plot functions
### ~~~~~~~~~~~~~~~~~~~~~
Barchart.data <- function(data, title="Taxa Proportions"){
if(missing(data))
stop("data missing.")
dataProp <- apply(data, 1, function(x){x/sum(x)})
barplot(dataProp, col=rainbow(ncol(data)), horiz=TRUE,
main=title, axisnames=FALSE, font.main=20, font.sub=16)
}
Plot.PI <- function(estPi, errorBars=TRUE, logScale=FALSE, main="PI Vector", ylab="Fractional Abundance"){
if(missing(estPi))
stop("estPi is missing.")
# Move title to the middle
ggplot2::theme_update(plot.title=ggplot2::element_text(hjust=0.5))
# Make the base plot
piPlot <- ggplot2::ggplot(estPi$params, ggplot2::aes_string(y="PI", x="Taxa", colour="Group")) +
ggplot2::geom_point() +
ggplot2::theme(legend.position = "top", text=ggplot2::element_text(size=15)) +
ggplot2::labs(title=main, y=ylab, x="") +
ggplot2::theme(axis.text.x=ggplot2::element_text(hjust=1, angle=45, size=10))
# Add error bars
if(errorBars){
piPlot <- piPlot + ggplot2::geom_errorbar(ggplot2::aes_string(ymax="Upper", ymin="Lower"))
}else{
piPlot <- piPlot + ggplot2::geom_line(ggplot2::aes_string(group="Group"))
}
# Do log scaling
if(logScale)
piPlot <- piPlot + ggplot2::scale_y_log10()
if(logScale)
piPlot <- piPlot + ggplot2::labs(y=paste(ylab, "(Logged)"))
print(piPlot)
}
Plot.MDS <- function(group.data, main="Group MDS", retCords=FALSE){
if(missing(group.data))
stop("group.data is missing.")
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Merge all the data sets together
mData <- do.call("rbind", group.data)
# Get their mds location
loc <- getBC(mData)
# Set color
availCols <- rainbow(numGroups)
cols <- NULL
for(i in 1:numGroups)
cols <- c(cols, rep(availCols[i], nrow(group.data[[i]])))
# Plot MDS
plot(loc, pch=16, ylab="MDS 2", xlab="MDS 1", col=cols, main=main)
legend("topright", legend=grpNames, pch=15, col=availCols)
if(retCords)
return(loc)
}
Plot.RM.Barchart <- function(group.data, groups, times, plotByGrp=TRUE, col=NULL, conf=.95){
if(missing(group.data) || missing(groups) || missing(times))
stop("group.data, groups and/or times are missing.")
numSamps <- length(group.data)
### Get the pi params
myEst <- Est.PI(group.data, conf)
params <- myEst$MLE$params
### Add the group and time information to the params
myGroups <- NULL
myTimes <- NULL
for(i in 1:numSamps){
myGroups <- c(myGroups, rep(groups[i], ncol(group.data[[1]])))
myTimes <- c(myTimes, rep(times[i], ncol(group.data[[1]])))
}
params$Grp <- as.character(myGroups)
params$Time <- as.character(myTimes)
if(is.null(col))
col <- rainbow(length(unique(params$Taxa)))
if(plotByGrp){
lattice::barchart(params$PI ~ params$Time | paste("Group", params$Grp),
ylab="Fractional Abundance", xlab="Time",
stack=TRUE, groups=params$Taxa, col=col,
key=list(
text=list(levels(params$Taxa)),
points=list(pch=19, col=col),
columns=5
)
)
}else{
lattice::barchart(params$PI ~ params$Grp | paste("Time", params$Time),
ylab="Fractional Abundance", xlab="Time",
stack=TRUE, groups=params$Taxa, col=col,
key=list(
text=list(levels(params$Taxa)),
points=list(pch=19, col=col),
columns=5
)
)
}
}
Plot.RM.Dotplot <- function(group.data, groups, times, errorBars=TRUE, col=NULL, conf=.95, alpha=1){
if(missing(group.data) || missing(groups) || missing(times))
stop("group.data, groups and/or times are missing.")
numSamps <- length(group.data)
numGrps <- length(unique(groups))
### Get the pi params
myEst <- Est.PI(group.data, conf)
params <- myEst$MLE$params
### Add the group and time information to the params
myGroups <- NULL
myTimes <- NULL
for(i in 1:numSamps){
myGroups <- c(myGroups, rep(groups[i], ncol(group.data[[1]])))
myTimes <- c(myTimes, rep(times[i], ncol(group.data[[1]])))
}
params$Grp <- as.character(myGroups)
params$Time <- as.character(myTimes)
if(is.null(col))
col <- rainbow(numGrps)
### Add alpha to the colors
col <- apply(sapply(col, grDevices::col2rgb)/255, 2, function(x){grDevices::rgb(x[1], x[2], x[3], alpha=alpha)})
if(errorBars){
lattice::dotplot(params$Taxa ~ params$PI | paste("Time", params$Time),
pch=19, groups=params$Grp, col=col,
ylab="Taxa", xlab="Fractional Abundance",
panel=lattice::panel.superpose,
panel.groups=function(x, y, subscripts, col, ...){
lattice::panel.xyplot(x, y, ...)
lattice::panel.segments(params$Lower[subscripts], y, params$Upper[subscripts], y, col=col)
},
key=list(
text=list(as.character(unique(params$Grp))),
points=list(pch=19, col=col)
)
)
}else{
lattice::dotplot(params$Taxa ~ params$PI | paste("Time", params$Time),
pch=19, groups=params$Grp, col=col,
ylab="Taxa", xlab="Fractional Abundance",
key=list(
text=list(as.character(unique(params$Grp))),
points=list(pch=19, col=col)
)
)
}
}
Plot.Theta <- function(estPi, main="Theta by Group"){
if(missing(estPi))
stop("estPi is missing.")
# Create the theta table
theta <- estPi$theta
thetaci <- cbind(
theta,
lci = theta[,2] - 1.96*theta[,3],
lci = theta[,2] + 1.96*theta[,3]
)
thetaci <- thetaci[order(thetaci[2]),]
xlim <- range(thetaci[,c(4, 5)])
# Plot the tornado plot
plot(thetaci[,2], 1:nrow(theta), pch=16, yaxt="n", xlim=xlim,
main=main, ylab="", xlab="Theta +/- 95% CI")
grid(ny=15, lwd=2)
axis(2, at=1:nrow(theta), labels=thetaci[,1])
for (i in 1:nrow(theta))
lines(c(thetaci[i, 4], thetaci[i, 5]), c(i, i))
}
Plot.MDS.wPI <- function(group.data, pi, main="Group MDS", retCords=FALSE){
if(missing(group.data) || missing(pi))
stop("group.data and/or pi is missing.")
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Merge all the data sets together
mData <- do.call("rbind", group.data)
numSubs <- nrow(mData)
# Get the pi data, label, sort and combine
numTaxa <- ncol(mData)
piData <- matrix(pi$params$PI, length(group.data), numTaxa, byrow=TRUE)
colnames(piData) <- pi$params$Taxa[1:numTaxa]
rownames(piData) <- unique(pi$params$Group)
piData <- piData[, colnames(mData)]
mData <- rbind(mData, piData)
# Get their mds location
loc <- getBC(mData)
# Set color
availCols <- rainbow(numGroups)
cols <- NULL
for(i in 1:numGroups)
cols <- c(cols, rep(availCols[i], nrow(group.data[[i]])))
cols <- c(cols, availCols)
# Set size and shape
pch <- c(rep(16, numSubs), rep(17, numGroups))
cex <- c(rep(1, numSubs), rep(2, numGroups))
# Plot MDS
plot(loc, pch=pch, ylab="MDS 2", xlab="MDS 1", col=cols, main=main, cex=cex)
legend("topright", legend=grpNames, pch=15, col=availCols)
if(retCords)
return(loc)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Filter functions
### ~~~~~~~~~~~~~~~~~~~~~
formatDataSets <- function(group.data){
if(missing(group.data))
stop("group.data missing.")
# Make sure we have more than 1 data set
numGroups <- length(group.data)
if(numGroups < 2)
stop("At least 2 data sets are required.")
# Remove all 0 total read subjects from the data
group.data <- lapply(group.data, function(x){x[rowSums(x) > 0,, drop=FALSE]})
# Merge all the data together
dataNames <- vector("list", numGroups)
newData <- NULL
for(i in 1:length(group.data)){
tempData <- group.data[[i]]
# Save the current row names
dataNames[[i]] <- rownames(tempData)
newData <- merge(newData, t(group.data[[i]]), by=0, all=TRUE)
rownames(newData) <- newData[,1]
newData <- newData[,-1]
}
# Remove any nas
newData[is.na(newData)] <- 0
newData <- t(newData)
# Remove any all 0 columns and sort them
newData <- newData[,colSums(newData) != 0, drop=FALSE]
newData <- newData[,order(colSums(newData), decreasing=TRUE)]
# Turn the data back into a list
retData <- vector("list", numGroups)
base <- 0
for(i in 1:numGroups){
retData[[i]] <- newData[(base+1):(nrow(group.data[[i]])+ base),]
rownames(retData[[i]]) <- dataNames[[i]]
base <- base + nrow(group.data[[i]])
}
names(retData) <- names(group.data)
return(retData)
}
Data.filter <- function(data, order.type="data", minReads=0, numTaxa=NULL, perTaxa=NULL){
if(missing(data))
stop("data is missing.")
if(tolower(order.type) != "data" && tolower(order.type) != "sample")
stop(sprintf("'%s' not recognized, order.type must be 'data' or 'sample'", as.character(order.type)))
# Check if numTaxa or perTaxa is being used
if(!is.null(numTaxa) && !is.null(perTaxa))
stop("numTaxa and perTaxa cannot be used at the same time")
if(!is.null(numTaxa)){
if(numTaxa > ncol(data) || numTaxa <= 0)
stop(sprintf("numTaxa must be between 0 and %i.", ncol(data)))
}
if(!is.null(perTaxa)){
if(perTaxa >= 1 || perTaxa <= 0)
stop("perTaxa must be between 0 and 1.")
}
if(is.null(numTaxa) && is.null(perTaxa))
numTaxa <- ncol(data)
taxaNames <- colnames(data)
# Drop all subjects that don't have enough reads
data <- data[rowSums(data)>minReads,, drop=FALSE]
if(nrow(data) < 2)
stop("minReads is too large and is excluding too many samples. Please try lowering its value.")
# Drop all 0 taxa
data <- data[,colSums(data)>0, drop=FALSE]
# Order the data based on order.type
if(tolower(order.type) == "sample"){
data <- t(apply(data, 1, function(x){x[order(x, decreasing=TRUE)]}))
}else{
data <- data[,order(colSums(data), decreasing=TRUE)]
}
# Use a percentage based approach to find the number of taxa to collapse
if(!is.null(perTaxa)){
perNumReadsTaxa <- colSums(data)/sum(data)
cumSumReads <- cumsum(perNumReadsTaxa)
taxaAboveThrs <- which(cumSumReads > perTaxa)
if(length(taxaAboveThrs) == 0){
numTaxa <- 1
}else{
numTaxa <- min(taxaAboveThrs)
}
}
if(numTaxa >= ncol(data)){
retData <- data
}else{
# Pull out the taxa we want to collapse
otherData <- data[,-c(1:numTaxa), drop=FALSE]
# Put the data back together and relabel
retData <- cbind(data[,1:numTaxa], Other=rowSums(otherData))
}
return(retData)
}
### ~~~~~~~~~~~~~~~~~~~~~
### MC functions
### ~~~~~~~~~~~~~~~~~~~~~
MC.ZT.statistics <- function(Nrs, numMC=10, fit, type="ha", siglev=0.05) {
if(missing(Nrs) || missing(fit))
stop("Nrs and/or fit missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
# Get all the ZT values
ZTstatMatrix <- matrix(0, numMC, 2)
for(i in 1:numMC)
ZTstatMatrix[i,] <- ZT.statistics.Hnull.Ha(Nrs, fit, type)
# Pull out z and t and remove NAs
z <- ZTstatMatrix[,1]
z <- z[!is.na(z)]
t <- ZTstatMatrix[,2]
t <- t[!is.na(t)]
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(fit$pi)-1, ncp=0, lower.tail=TRUE)
# Calculate our pvalues for z and t
zpval <- (sum(z > qAlpha) + 1)/(length(z) + 1)
tpval <- (sum(t > qAlpha) + 1)/(length(t) + 1)
return(cbind(zpval, tpval))
}
MC.Xsc.statistics <- function(Nrs, numMC=10, fit, pi0=NULL, type="ha", siglev=0.05) {
if(missing(Nrs) || missing(fit))
stop("Nrs and/or fit missing.")
if(is.null(pi0) && tolower(type) == "ha")
stop("pi0 cannot be null with type 'ha'.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
# Get all the XSC values
XscStatVector <- rep(0, numMC)
for(i in 1:numMC)
XscStatVector[i] <- Xsc.statistics.Hnull.Ha(Nrs, fit, type, pi0)
# Remove NAs
XscStatVector <- XscStatVector[!is.na(XscStatVector)]
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(fit$pi)-1, ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XscStatVector > qAlpha) + 1)/(length(XscStatVector) + 1)
return(pval)
}
MC.Xmc.statistics <- function(group.Nrs, numMC=10, pi0, group.pi, group.theta, type="ha", siglev=0.05) {
if(missing(group.theta) || missing(pi0) || missing(group.Nrs))
stop("group.Nrs, pi0 and/or group.theta missing.")
if(missing(group.pi) && tolower(type) == "ha")
stop("group.pi missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
numGroups <- length(group.Nrs)
numTaxa <- length(pi0)
# If the type is ha this will change in the for loop
tempPi <- pi0
# Create the parameters for every group
groupParameter <- vector("list", numGroups)
for (i in 1:numGroups){
if(tolower(type) == "ha")
tempPi <- group.pi[i,]
groupParameter[[i]] <- list(pi=tempPi, theta=group.theta[i], nrs=group.Nrs[[i]])
}
# Get all the Xmc values
XmcStatVector <- rep(0, numMC)
for(i in 1:numMC)
XmcStatVector[i] <- Xmc.statistics.Hnull.Ha(groupParameter, pi0)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(group.theta)*(numTaxa-1), ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XmcStatVector > qAlpha) + 1)/(length(XmcStatVector) + 1)
return(pval)
}
MC.Xmcupo.statistics <- function(group.Nrs, numMC=10, pi0, group.pi, group.theta, type="ha", siglev=0.05) {
if(missing(group.theta) || missing(group.Nrs))
stop("group.Nrs and/or group.theta missing.")
if(missing(group.pi) && tolower(type) == "ha")
stop("group.pi missing.")
if(missing(pi0) && tolower(type) == "hnull")
stop("pi0 missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
numGroups <- length(group.Nrs)
# Create the parameters for every group
groupParameter <- vector("list", numGroups)
for (i in 1:numGroups){
if(tolower(type) == "ha"){
numTaxa <- ncol(group.pi)
tempPi <- group.pi[i,]
}else{
numTaxa <- length(pi0)
tempPi <- pi0
}
groupParameter[[i]] <- list(pi=tempPi, theta=group.theta[i], nrs=group.Nrs[[i]])
}
# Get all the Xmcupo values
XmcupoStatVector <- rep(0, numMC)
for(i in 1:numMC)
XmcupoStatVector[i] <- Xmcupo.statistics.Hnull.Ha(groupParameter)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(group.theta)*(numTaxa-1), ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XmcupoStatVector > qAlpha) + 1)/(length(XmcupoStatVector) + 1)
return(pval)
}
MC.Xdc.statistics <- function(group.Nrs, numMC=10, alphap, type="ha", siglev=0.05, est="mom") {
if(missing(alphap) || missing(group.Nrs))
stop("group.Nrs and/or alphap missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
if(tolower(est) != "mom" && tolower(est) != "mle")
stop(sprintf("Est '%s' not found. Est must be 'mle' or 'mom'.", as.character(est)))
numGroups <- length(group.Nrs)
if(tolower(type) == "hnull"){
numTaxa <- length(alphap)
}else{
numTaxa <- ncol(alphap)
}
# Get all the Xdc values
XdcStatVector <- rep(0, numMC)
for(i in 1:numMC)
XdcStatVector[i] <- Xdc.statistics.Hnull.Ha(alphap, group.Nrs, type, est)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=(numGroups-1)*numTaxa, ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XdcStatVector > qAlpha) + 1)/(length(XdcStatVector) + 1)
return(pval)
}
MC.Xoc.statistics <- function(group.Nrs, numMC=10, group.alphap, type="ha", siglev=0.05) {
if(missing(group.alphap) || missing(group.Nrs))
stop("group.Nrs and/or group.alphap missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
numGroups <- length(group.Nrs)
# Get all the Xoc values
XocStatVector <- rep(0, numMC)
for(i in 1:numMC)
XocStatVector[i] <- Xoc.statistics.Hnull.Ha(group.Nrs, group.alphap, type)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=(numGroups-1), ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XocStatVector > qAlpha) + 1)/(length(XocStatVector) + 1)
return(pval)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Sample functions
### ~~~~~~~~~~~~~~~~~~~~~
Xsc.onesample <- function(data, pi0){
if(missing(data) || missing(pi0))
stop("data and/or pi0 missing.")
numReadsSubs <- rowSums(data)
numTaxa <- length(pi0)
# Check the data set has the same number of taxa
numTaxa <- length(pi0)
if(ncol(data) != numTaxa)
stop("Every data set must have the same length as pi0")
# Get parameters
fit.MoM <- DM.MoM(data)
# Get Xsc and calculate pvalue
Xsc <- Xsc.statistics(fit.MoM$pi, fit.MoM$theta, numReadsSubs, pi0)
pval <- 1-pchisq(q=Xsc, df=numTaxa-1, ncp=0, lower.tail=TRUE)
RAD.mean.test <- list("Xsc statistics"=Xsc, "p value"=pval)
return(RAD.mean.test)
}
Xmc.sevsample <- function(group.data, pi0){
if(missing(group.data) || missing(pi0))
stop("group.data and/or pi0 missing.")
# Check every data set has the same number of taxa
taxaCounts <- sapply(group.data, ncol)
numTaxa <- length(pi0)
if(any(taxaCounts != numTaxa))
stop("Every data set must have matching taxa, including pi0")
numGroups <- length(group.data)
# Get the parameters for every group
groupParameter <- lapply(group.data, function(x){
# Calc pi, theta and the number of reads
numReadsSubs <- rowSums(x)
pi.MoM <- colSums(x)/sum(x)
theta.MoM <- weirMoM(x, pi.MoM)$theta
return(list(pi=pi.MoM, theta=theta.MoM, nrs=numReadsSubs))
})
# Get Xmc and calculate pvalue
Xmc <- Xmc.statistics(groupParameter, pi0)
pval <- 1-pchisq(q=Xmc, df=numGroups*(numTaxa-1), ncp=0, lower.tail=TRUE)
sevRAD.mean.test <- list("Xmc statistics"=Xmc, "p value"=pval)
return(sevRAD.mean.test)
}
Xmcupo.sevsample <- function(group.data){
if(missing(group.data))
stop("group.data is missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
# Get the parameters for every group
groupParameter <- lapply(group.data, function(x){
# Calc pi, theta and the number of reads
numReadsSubs <- rowSums(x)
pi.MoM <- colSums(x)/sum(x)
theta.MoM <- weirMoM(x, pi.MoM)$theta
return(list(pi=pi.MoM, theta=theta.MoM, nrs=numReadsSubs))
})
# Get Xmcupo and calculate pvalue
Xmcupo <- Xmcupo.statistics(groupParameter)
pval <- 1-pchisq(q=Xmcupo, df=(numGroups-1)*(numTaxa-1), ncp=0, lower.tail=TRUE)
ret <- list("Xmcupo statistics"=Xmcupo, "p value"=pval)
return(ret)
}
Xoc.sevsample <- function(group.data, epsilon=10^(-4)){
if(missing(group.data))
stop("group.data missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
numGroups <- length(group.data)
# Get Xoc and calculate pvalue
Xoc <- Xoc.statistics(group.data, epsilon)
pval <- 1-pchisq(q=Xoc, df=numGroups-1, ncp=0, lower.tail=TRUE)
sev.overd.test <- list("Xoc statistics"=Xoc, "p value"=pval)
return(sev.overd.test)
}
Xdc.sevsample <- function(group.data, epsilon=10^(-4), est="mom"){
if(missing(group.data))
stop("group.data missing.")
if(tolower(est) != "mle" && tolower(est) != "mom")
stop(sprintf("Est '%s' not found. Est must be 'mle' or 'mom'.", as.character(est)))
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
# Get Xdc and calculate pvalue
if(tolower(est) == "mle"){
Xdc <- Xdc.statistics(group.data, epsilon)
}else{
Xdc <- Xdc.statistics.MoM(group.data)
}
pval <- 1-pchisq(q=Xdc, df=(numGroups-1)*numTaxa, ncp=0, lower.tail=TRUE)
xdc.sevsamp.test <- list("Xdc statistics"=Xdc, "p value"=pval)
return(xdc.sevsamp.test)
}
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Internal
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### ~~~~~~~~~~~~~~~~~~~~~
### Other functions
### ~~~~~~~~~~~~~~~~~~~~~
kullbackLeiber <- function(data, plot=TRUE, parallel=FALSE, cores=3){
warning("This function has been spellchecked. Please use 'Kullback.Leibler' instead.")
kl <- Kullback.Leibler(data, plot, parallel, cores)
return(kl)
}
loglikDM <- function(data, alphap){
data <- data[,colSums(data) != 0, drop=FALSE]
alphap <- alphap[alphap != 0]
ll <- sum(lgamma(rowSums(data)+1) + lgamma(sum(alphap)) - lgamma(sum(alphap)+rowSums(data))) +
sum(rowSums(lgamma(sweep(data, 2, alphap, "+")) - lgamma(data+1) - lgamma(t(replicate(nrow(data), alphap)))))
return(ll)
}
weirMoM <- function(data, MoM, se=FALSE){
numTaxa <- ncol(data)
numSamp <- nrow(data)
rowSumsData <- rowSums(data) + 0.000001
colSumsData <- colSums(data)
if(numSamp == 1)
return(list(theta=0, se=0))
MSP <- (numSamp-1)^(-1) * sum(rowSums((data/rowSumsData - matrix(rep(MoM, numSamp), numSamp, numTaxa, byrow=TRUE))^2) * rowSumsData)
MSG <- (sum(colSumsData)-numSamp)^(-1) * sum(rowSums(data/rowSumsData * (1-data/rowSumsData)) * rowSumsData)
nc <- 1/(numSamp-1) * (sum(rowSumsData)-sum(rowSumsData^2)/sum(rowSumsData))
MoM.wh <- (MSP-MSG)/(MSP+(nc-1)*MSG)
std.er <- NULL
if(se)
std.er <- sqrt(2 * (1-MoM.wh)^2/(numSamp-1) * ((1+(nc-1) * MoM.wh)/nc)^2)
return(list(theta=MoM.wh, se=std.er))
}
getBC <- function(data){
dataPer <- data/rowSums(data)
### Remove any points with exactly the same values
dups <- duplicated(dataPer)
if(any(dups))
dataPer <- dataPer[!dups,]
bcDist <- vegan::vegdist(dataPer, method="bray")
nonMetricMDS <- MASS::isoMDS(bcDist, trace=FALSE)
mdsPoints <- vegan::postMDS(nonMetricMDS$points, bcDist)
mds <- vegan::scores(mdsPoints)
return(mds[, 1:2])
}
pioest <- function(group.data){
if(missing(group.data))
stop("data.groups missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
# Pull out pi and calculate xsc
pis <- matrix(0, numTaxa, numGroups)
xscs <- rep(0, numGroups)
for(i in 1:numGroups){
tempData <- group.data[[i]]
numReadsSubs <- rowSums(tempData)
totalReads <- sum(tempData)
pi <- colSums(tempData)/totalReads
theta <- weirMoM(tempData, pi)$theta
xscs[i] <- (theta * (sum(numReadsSubs^2)-totalReads) + totalReads) / totalReads^2
pis[,i] <- pi
}
# Remove any 0 taxa
pis <- pis[rowSums(pis) != 0,]
# Calculate pi0
pi0 <- rowSums(pis/xscs)/sum(1/xscs)
names(pi0) <- colnames(group.data[[1]])
return(pi0)
}
### ~~~~~~~~~~~~~~~~~~~~~
### ga functions
### ~~~~~~~~~~~~~~~~~~~~~
gaScoring <- function(indices, covarDists, colDists, distType, lambda, minSolLen, maxSolLen) {
BAD_RETURN <- -2 # Return worse than cor could do
numSel <- sum(indices)
# Check if nothing is selected
if(numSel == 0)
return(BAD_RETURN)
# Check if we dont have enough selected
if(!is.null(minSolLen))
if(numSel < minSolLen)
return(BAD_RETURN)
# Check if we dont have too many selected
if(!is.null(maxSolLen))
if(numSel > maxSolLen)
return(BAD_RETURN)
edges <- which(indices==1)
# Combine the distance matrices based on distance type
if(distType == "gower"){
combinedSolDists <- Reduce("+", colDists[edges])/sum(indices)
}else if(distType == "euclidean"){
combinedSolDists <- sqrt(Reduce("+", colDists[edges]))
}
# Get the correlation and penalize it based on the number of columns selected
mycor <- stats::cor(combinedSolDists, covarDists)
mycor <- mycor - (lambda * (sum(indices)/length(indices)))
return(mycor)
}
gaCreation <- function(data, popSize){
ZERO_TO_ONE_RATIO <- 10 # Ratio of 0 to 1s for the random data
SUGGESTION_COUNT <- 10 # Number starting points we should make from the data
size <- ncol(data)
population <- matrix(NA, popSize, size)
# Make 10 starting points as long as our popSize is > 10
if(popSize >= SUGGESTION_COUNT){
# Get a rough starting point
rstart <- apply(data, 2, mean)
# Use the rough difference to make starting solutions
breaks <- seq(.05, 1, 1/SUGGESTION_COUNT)
breakVals <- stats::quantile(rstart, breaks)
suggestions <- matrix(0, length(breaks), length(rstart))
for(i in 1:length(breaks))
suggestions[i,] <- ifelse(rstart >= breakVals[i], 1, 0)
population[1:SUGGESTION_COUNT,] <- suggestions
numCreated <- SUGGESTION_COUNT
}else{
numCreated <- 0
}
# Fill any remaining population spots with random solutions
if(popSize != SUGGESTION_COUNT){
for(child in (numCreated+1):popSize)
population[child,] <- sample(c(rep(0, ZERO_TO_ONE_RATIO), 1), size, replace=TRUE)
}
return(population)
}
gaPlot <- function(evalSumm){
plot(evalSumm[,4], type="l", ylab="Score", ylim=c(0, 1), lwd=2, main="Eval Scores by Iteration", xlab="Iteration")
lines(evalSumm[,6], col="red", lwd=2)
lines(evalSumm[,1], col="blue", lwd=2)
legend("topleft", colnames(evalSumm)[c(4, 6, 1)], pch=16, col=c("black", "red", "blue"))
}
### ~~~~~~~~~~~~~~~~~~~~~
### rpart functions
### ~~~~~~~~~~~~~~~~~~~~~
rpartInit <- function(y, offset, parms, wt){
hmp.pkg.env$EVAL_COUNT_RPART <- 1 # reset eval counts
sfun <- function(yval, dev, wt, ylevel, digits){
paste(" mean=", round(mean(yval), 3), sep="")
}
environment(sfun) <- .GlobalEnv
list(y=y, parms=NULL, numresp=1, numy=ncol(y), summary=sfun)
}
rpartEval <- function(y, wt, parms){
# Set a unique label
label <- hmp.pkg.env$EVAL_COUNT_RPART
hmp.pkg.env$EVAL_COUNT_RPART <- hmp.pkg.env$EVAL_COUNT_RPART + 1
dev <- DM.MoM(y)$loglik * -1
# Skip any infinite LL comparisons (makes lrt 0)
if(dev == Inf || dev == -Inf)
dev <- 0
list(label=label, deviance=dev)
}
rpartSplit <- function(y, wt, x, parms, continuous){
# Get initial LL
LL <- DM.MoM(y)$loglik
uniqX <- sort(unique(x))
numUni <- length(uniqX) - 1
# Determine what we are comparing
if(continuous){
numTests <- length(x) - 1
dir <- rep(-1, numTests)
}else{
numTests <- numUni
dir <- uniqX
}
# Run through every comparison
LRT <- rep(0, numTests)
for(i in 1:numUni){
if(continuous){
id <- which(x <= uniqX[i])
grp1 <- y[id,, drop=FALSE]
grp2 <- y[-id,, drop=FALSE]
}else{
grp1 <- y[x == uniqX[i],, drop=FALSE]
grp2 <- y[x != uniqX[i],, drop=FALSE]
}
# Skip any 1 subject groups
if(nrow(grp1) == 1 || nrow(grp2) == 1)
next
LLgrp1 <- DM.MoM(grp1)$loglik
LLgrp2 <- DM.MoM(grp2)$loglik
# Skip any infinite LL comparisons (makes lrt 0)
if(LLgrp1 == Inf || LLgrp2 == Inf)
next
if(continuous){
LRT[id[length(id)]] <- -2*(LL-LLgrp1-LLgrp2)
}else{
LRT[i] <- -2*(LL-LLgrp1-LLgrp2)
}
}
ret <- list(goodness=LRT, direction=dir)
return(ret)
}
rpartCV <- function(data, covars, rpartRes, minsplit, minbucket, numCV, parallel, cores){
# Pull out cp info
cpTable <- rpartRes$cptable
numCPLvls <- nrow(cpTable)
# New cp for pruning
if(numCPLvls > 2){
oldCPs <- cpTable[,1]
cpTable[1, 1] <- Inf
cpTable[numCPLvls, 1] <- 0
for(m in 2:(numCPLvls-1))
cpTable[m, 1] <- sqrt(oldCPs[m] * oldCPs[m-1])
}
# Set up groups for dropping data
numSub <- nrow(data)
dropNums <- cut(1:numSub, numCV, FALSE)
dropGrps <- sample(dropNums, numSub)
errorRate <- vector("list", numCV)
if(parallel){
cl <- parallel::makeCluster(min(cores, numCV))
doParallel::registerDoParallel(cl)
tryCatch({
cvRes <- foreach::foreach(k=1:numCV, .combine=append, .multicombine=FALSE, .inorder=FALSE, .errorhandling="pass", .packages=c("rpart", "HMP")) %dopar%{
cvRes <- rpartCVSingle(data, covars, k, cpTable, dropGrps, numCPLvls, minsplit, minbucket)
return(list(cvRes))
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
errorRate <- lapply(cvRes, function(x)x[[1]])
subTree <- lapply(cvRes, function(x)x[[2]])
}else{
errorRate <- vector("list", numCV)
subTree <- vector("list", numCV)
for(k in 1:numCV){
cvRes <- rpartCVSingle(data, covars, k, cpTable, dropGrps, numCPLvls, minsplit, minbucket)
errorRate[[k]] <- cvRes[[1]]
subTree[[k]] <- cvRes[[2]]
}
}
# Calculate the square root of the errors
error <- sapply(errorRate, sqrt)
# Calculate CI of MSE
ciInfo <- matrix(NA, numCPLvls, 4)
for(j in 1:numCPLvls){
ciInfo[j, 1] <- mean(error[j,])
ciInfo[j, 2:3] <- rpartCI(error[j,], 0.95)
ciInfo[j, 4] <- sd(error[j,])/sqrt(ncol(error))
}
ciInfo <- cbind(ciInfo, rank(ciInfo[,1]))
colnames(ciInfo) <- c("MSE", "Lower", "Upper", "SE", "Rank")
# Add ci info back into cp table
cpTable2 <- cbind(rpartRes$cptable, ciInfo)
return(list(subTree=subTree, errorRate=do.call("cbind", errorRate), ciInfo=cpTable2))
}
rpartCVSingle <- function(data, covars, cvNum, cpTable, dropGrps, numCPLvls, minsplit, minbucket){
# Get location of data to drop
dropLoc <- which(dropGrps == cvNum)
# Seperate dropped data
subData <- data[-dropLoc,, drop=FALSE]
dropData <- data[dropLoc,, drop=FALSE]
# Seperate dropped covars
subCovs <- covars[-dropLoc,, drop=FALSE]
dropCovs <- covars[dropLoc,, drop=FALSE]
# Run rpart on smaller data
subRpartRes <- DM.Rpart.Base(subData, subCovs, FALSE, minsplit, minbucket)$fullTree
# Need to be abandance for later code
subData <- subData/(rowSums(subData))
dropData <- dropData/(rowSums(dropData))
# Calculate relative error
MSEn <- rep(NA, numCPLvls)
subTree <- vector("list", numCPLvls)
for(i in 1:numCPLvls){
subTree[[i]] <- rpart::prune(subRpartRes, cp=cpTable[i, 1])
pruneSubTree <- subTree[[i]]
subPre <- predict(pruneSubTree, newdata=subCovs, type="vector")
dropPre <- predict(pruneSubTree, newdata=dropCovs, type="vector")
## 1.a. Distance: new subject to the mean Taxa in the signed Terminal node
tempDist <- 0
for(j in 1:length(dropPre)){
tempVal <- (dist(rbind(dropData[j,, drop=FALSE], colMeans(subData[subPre == dropPre[j],, drop=FALSE]))))^2
if(!is.na(tempVal))
tempDist <- tempDist + tempVal
}
MSEn[i] <- tempDist/length(dropPre)
}
names(MSEn) <- cpTable[,2] + 1
return(list(errorRate=MSEn, subTree=subTree))
}
rpartCI <- function(vector, interval) {
vec_sd <- sd(vector)
numSamp <- length(vector)
vec_mean <- mean(vector)
# Error according to t distribution
error <- qt((interval + 1)/2, df = numSamp - 1) * vec_sd/sqrt(numSamp)
# Confidence interval as a vector
res <- c("Lower"=vec_mean - error, "Upper"=vec_mean + error)
return(res)
}
rpartCS <- function(fit) {
# Pull out split information
splitNames <- rownames(fit$splits)
allVars <- colnames(attributes(fit$terms)$factors)
# Rename splits to fit into data frame
rownames(fit$splits) <- 1:nrow(fit$splits)
splits <- data.frame(fit$splits)
splits$var <- splitNames
splits$type <- ""
splits$primary <- ""
# Get the frame
frame <- as.data.frame(fit$frame)
frame$var <- as.character(frame$var)
primeFr <- frame[frame$var != "<leaf>",]
# Go through every node and check competing and surrogaet splits
index <- 0
for(i in 1:nrow(primeFr)){
spltPrimName <- paste("Split", primeFr$yval[i], primeFr$var[i])
# Fill in primary info
index <- index + 1
splits$type[index] <- "primary"
splits$primary[index] <- spltPrimName
# Check for competing splits
if(primeFr$ncompete[i] > 0){
for(j in 1:primeFr$ncompete[i]){
index <- index + 1
splits$type[index] <- "competing"
splits$primary[index] <- spltPrimName
}
}
# Check for surrogate splits
if(primeFr$nsurrogate[i] > 0){
for(j in 1:primeFr$nsurrogate[i]){
index <- index + 1
splits$type[index] <- "surrogate"
splits$primary[index] <- spltPrimName
}
}
}
return(splits)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Stat functions
### ~~~~~~~~~~~~~~~~~~~~~
Xmcupo.statistics <- function(groupParameter){
numGroups <- length(groupParameter)
numTaxa <- length(groupParameter[[1]]$pi)
# Pull out pi and calculate xsc
pis <- matrix(0, numTaxa, numGroups)
xscs <- rep(0, numGroups)
for(i in 1:numGroups){
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
totalReads <- sum(numReads)
# Calculate the Xsc for each group
xscs[i] <- (theta * (sum(numReads^2)-totalReads) + totalReads) / totalReads^2
pis[,i] <- groupParameter[[i]]$pi
}
# Remove any 0 taxa
pis <- pis[rowSums(pis)!=0,]
# Calculate pi0
pi0 <- colSums(t(pis)/xscs)/sum(1/xscs)
# Calculate Xmcupo
Xmcupo <- sum(colSums((pis-pi0)^2/pi0)/xscs)
return(Xmcupo)
}
Z.statistics <- function(data){
numTaxa <- ncol(data)
numReadsTaxa <- colSums(data)
numReadsSubs <- rowSums(data)
totalReads <- sum(data)
taxaSqSum <- sum(apply(data, 2, function(x){sum((x-1)*x)})/numReadsTaxa)
subSqSum <- sum(numReadsSubs*(numReadsSubs-1))
denom <- sqrt(2*(numTaxa-1) * subSqSum)
Zs <- (totalReads*taxaSqSum-subSqSum)/denom
return(Zs)
}
T.statistics <- function(data){
numReadsTaxa <- colSums(data)
numReadsSubs <- rowSums(data)
totalReads <- sum(data)
Ts <- sum(colSums((data - (numReadsSubs%*%t(numReadsTaxa))/totalReads)^2) / numReadsTaxa)
return(Ts)
}
Xmc.statistics <- function(groupParameter, pi0){
numGroups <- length(groupParameter)
numTaxa <- length(pi0)
xsc <- rep(0, numGroups)
for(i in 1:numGroups){
pi <- groupParameter[[i]]$pi
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
# Get Xsc values
xsc[i] <- Xsc.statistics(pi, theta, numReads, pi0)
}
xmc <- sum(xsc)
return(xmc)
}
Xsc.statistics <- function(pi1, theta, numReads, pi0){
totalReads <- sum(numReads)
# Get Xsc value
tempVal <- ((theta*(sum(numReads^2)-totalReads) + totalReads) / totalReads^2) * (diag(pi0)-pi0 %*% t(pi0))
xsc <- as.vector(t(pi1-pi0) %*% MASS::ginv(tempVal) %*% (pi1-pi0))
return(xsc)
}
Xoc.statistics <- function(group.data, epsilon=10^(-4)){
numGroups <- length(group.data)
# Get the fit for every data set
thetas <- rep(0, numGroups)
logliks <- rep(0, numGroups)
pis <- vector("list", numGroups)
for(i in 1:numGroups){
tempTheta <- DM.MoM(group.data[[i]])$theta
fit <- dirmult::dirmult(group.data[[i]], initscalar=(1-tempTheta)/tempTheta, epsilon=epsilon, trace=FALSE)
thetas[i] <- fit$theta
logliks[i] <- fit$loglik
pis[[i]] <- fit$pi
}
# Get the fit assuming equal thetas
equalFit <- dirmult::equalTheta(group.data, mean(thetas), epsilon, FALSE, pis)
# Calculate the xoc
xoc <- as.vector(-2*(equalFit$loglik-sum(logliks)))
return(xoc)
}
Xdc.statistics <- function(group.data, epsilon=10^(-4)){
# Get the loglik from the fit from every data set
logliks <- sapply(group.data, function(x, epsilon){
tempTheta <- DM.MoM(x)$theta
dirmult::dirmult(x, initscalar=(1-tempTheta)/tempTheta, epsilon=epsilon, trace=FALSE)$loglik
}, epsilon=epsilon)
# Get the fit assuming all in the same group
groupDataC <- do.call(rbind, group.data)
tempTheta <- DM.MoM(groupDataC)$theta
groupFit <- dirmult::dirmult(groupDataC, initscalar=(1-tempTheta)/tempTheta, epsilon=epsilon, trace=FALSE)
# Calculate the xdc
xdc <- -2*(groupFit$loglik-sum(logliks))
return(xdc)
}
Xdc.statistics.MoM <- function(group.data){
# Get the loglik from the fit from every data set
logliks <- sapply(group.data, function(x){DM.MoM(x)$loglik})
# Get the fit assuming all in the same group
groupDataC <- do.call(rbind, group.data)
groupFit <- DM.MoM(groupDataC)
# Calculate the xdc
xdc <- -2*(groupFit$loglik-sum(logliks))
return(xdc)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Hnull / Ha functions
### ~~~~~~~~~~~~~~~~~~~~~
Xmcupo.statistics.Hnull.Ha <- function(groupParameter){
numGroups <- length(groupParameter)
numTaxa <- length(groupParameter[[1]]$pi)
genGroupParameter <- vector("list", numGroups)
for(i in 1:numGroups){
pi <- groupParameter[[i]]$pi
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
# Generate a new set of data
genData <- Dirichlet.multinomial(numReads, pi*(1-theta)/theta)
genTotalReads <- sum(genData)
genPi <- colSums(genData)/genTotalReads
# Replace any 0 pi values with a small number
# This will subtract that value from the other data so a total value of 1 is maintained
if(any(genPi==0)){
numZero <- sum(genPi==0)
numNonZero <- numTaxa - numZero
genPi[which(genPi!=0)] <- genPi[which(genPi!=0)] - numZero/(numNonZero*2*(genTotalReads+1))
genPi[which(genPi==0)] <- 1/(2*(genTotalReads+1))
}
genTheta <- weirMoM(genData, genPi)$theta
genGroupParameter[[i]] <- list(pi=genPi, theta=genTheta, nrs=numReads)
}
# Get the Xmcupo stats for the generated data
Xmcupo <- Xmcupo.statistics(genGroupParameter)
return(Xmcupo)
}
ZT.statistics.Hnull.Ha <- function(Nrs, fit, type){
if(tolower(type) == "hnull"){
genData <- Multinomial(Nrs, fit$pi)
}else{
genData <- Dirichlet.multinomial(Nrs, fit$gamma)
}
ZT <- c(Z.statistics(genData), T.statistics(genData))
return(ZT)
}
Xmc.statistics.Hnull.Ha <- function(groupParameter, pi0){
numGroups <- length(groupParameter)
numTaxa <- length(pi0)
genGroupParameter <- vector("list", numGroups)
for(i in 1:numGroups){
pi <- groupParameter[[i]]$pi
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
# Generate a new set of data
genData <- Dirichlet.multinomial(numReads, pi*(1-theta)/theta)
genPi <- colSums(genData)/sum(genData)
genTheta <- weirMoM(genData, genPi)$theta
genGroupParameter[[i]] <- list(pi=genPi, theta=genTheta, nrs=numReads)
}
# Get the Xmc stats for the generated data
Xmc <- Xmc.statistics(genGroupParameter, pi0)
return(Xmc)
}
Xsc.statistics.Hnull.Ha <- function(Nrs, fit, type, pi0){
# Generate a new set of data
genData <- Dirichlet.multinomial(Nrs, fit$gamma)
fit.gen <- DM.MoM(genData)
tempPi <- fit$pi
if(tolower(type) == "ha")
tempPi <- pi0
# Calculate Xsc stat
xsc <- Xsc.statistics(fit.gen$pi, fit.gen$theta, Nrs, tempPi)
return(xsc)
}
Xoc.statistics.Hnull.Ha <- function(group.Nrs, group.alphap, type){
numGroups <- length(group.Nrs)
tempShape <- group.alphap
# Generate a new set of data
genGroupData <- vector("list", numGroups)
for(i in 1:numGroups){
if(tolower(type) == "ha")
tempShape <- group.alphap[i,]
genGroupData[[i]] <- Dirichlet.multinomial(group.Nrs[[i]], tempShape)
}
# Get the xoc stats for the generated data
xoc <- Xoc.statistics(genGroupData)
return(xoc)
}
Xdc.statistics.Hnull.Ha <- function(alphap, group.Nrs, type, est){
numGroups <- length(group.Nrs)
tempShape <- alphap
# Generate a new set of data
genGroupData <- vector("list", numGroups)
for(i in 1:numGroups){
if(tolower(type) == "ha")
tempShape <- alphap[i,]
genGroupData[[i]] <- Dirichlet.multinomial(group.Nrs[[i]], tempShape)
}
# Get the xdc stats for the generated data
if(tolower(est) == "mle"){
xdc <- Xdc.statistics(genGroupData)
}else{
xdc <- Xdc.statistics.MoM(genGroupData)
}
return(xdc)
}
### ~~~~~~~~~~~~~~~~~~~~~
### OLD
### ~~~~~~~~~~~~~~~~~~~~~
DM.Rpart.Base.Old <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
# Set the methods to use and call rpart
methods <- list(init=rpartInit, eval=rpartEval, split=rpartSplitOld)
rpartRes <- rpart::rpart(as.matrix(data) ~., data=covars, method=methods, minsplit=minsplit, minbucket=minbucket, cp=cp)
cpInfo <- rpartRes$cptable
size <- cpInfo[nrow(cpInfo), 2] + 1
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(rpartRes)
# Plot the rpart results
if(plot)
suppressWarnings(rpart.plot::rpart.plot(rpartRes, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=cpInfo, fullTree=rpartRes, bestTree=rpartRes, subTree=NULL, errorRate=NULL, size=size, splits=splits))
}
rpartSplitOld <- function(y, wt, x, parms, continuous){
# Get initial LL
LL <- DM.MoM(y)$loglik
# Determine what we are comparing
if(continuous){
numTests <- length(x) - 1
dir <- rep(-1, numTests)
}else{
uniqX <- sort(unique(x))
numTests <- length(uniqX) - 1
dir <- uniqX
}
# Run through every comparison
LRT <- rep(0, numTests)
for(i in 1:numTests){
if(continuous){
grp1 <- y[1:i,, drop=FALSE]
grp2 <- y[-c(1:i),, drop=FALSE]
}else{
grp1 <- y[x == uniqX[i],, drop=FALSE]
grp2 <- y[x != uniqX[i],, drop=FALSE]
}
# Skip any 1 subject groups
if(nrow(grp1) == 1 || nrow(grp2) == 1)
next
LLgrp1 <- DM.MoM(grp1)$loglik
LLgrp2 <- DM.MoM(grp2)$loglik
# Skip any infinite LL comparisons (makes lrt 0)
if(LLgrp1 == Inf || LLgrp2 == Inf)
next
LRT[i] <- -2*(LL-LLgrp1-LLgrp2)
}
ret <- list(goodness=LRT, direction=dir)
return(ret)
}
| /WorkingCode/Code/HMP Functions.R | no_license | BioRankings/HMP | R | false | false | 67,453 | r |
library(dirmult) # example code functions and dirmult, equalTheta functions
library(ggplot2) # pretty plots for pi
library(doParallel) # parallelizing
library(MASS) # ginv function for Xsc.statistics
library(vegan) # ga distances
library(gplots) # heatmap plots
library(rpart) # base rpart
library(rpart.plot) # rpart plotting
library(lattice) # Repeated measures plotting
### Define a global environment to use with rpart
hmp.pkg.env <- new.env(parent=emptyenv())
hmp.pkg.env$EVAL_COUNT_RPART <- 1
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### External
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### ~~~~~~~~~~~~~~~~~~~~~
### Generation functions
### ~~~~~~~~~~~~~~~~~~~~~
Multinomial <- function(Nrs, probs){
if(missing(Nrs) || missing(probs))
stop("Nrs and/or probs missing.")
# Create the data from the rmultinom
mData <- matrix(0, length(Nrs), length(probs))
for(i in 1:length(Nrs))
mData[i,] <- stats::rmultinom(1, Nrs[i], probs)
# Label the created data
colnames(mData) <- paste("Taxa", 1:ncol(mData))
rownames(mData) <- paste("Sample", 1:nrow(mData))
return(mData)
}
Dirichlet.multinomial <- function(Nrs, shape){
if(missing(Nrs) || missing(shape))
stop("Nrs and/or shape missing.")
# Create the data from the rmultinom
dmData <- matrix(0, length(Nrs), length(shape))
for(i in 1:length(Nrs))
dmData[i,] <- stats::rmultinom(1, Nrs[i], dirmult::rdirichlet(1, shape))
# Label the created data
colnames(dmData) <- paste("Taxa", 1:ncol(dmData))
rownames(dmData) <- paste("Sample", 1:nrow(dmData))
return(dmData)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Other functions
### ~~~~~~~~~~~~~~~~~~~~~
C.alpha.multinomial <- function(data){
if(missing(data))
stop("data missing.")
perNumReadsSubs <- rowSums(data)/sum(data)
# Get T statistic
Ts <- T.statistics(data)
M.alpha <- diag(perNumReadsSubs)- as.matrix(perNumReadsSubs) %*% t(as.matrix(perNumReadsSubs))
g <- sum(diag(M.alpha %*% M.alpha)) / sum(diag(M.alpha))
df <- (ncol(data)-1)*((sum(diag(M.alpha)))^2) / (sum(diag(M.alpha %*% M.alpha)))
# Get pvalue
pval <- 1-pchisq(q=Ts/g, df=df, ncp=0, lower.tail=TRUE)
GoF.test <- list("T statistics"=Ts, "p value"=pval)
return(GoF.test)
}
DM.MoM <- function(data){
if(missing(data))
stop("data missing.")
pi.MoM <- colSums(data)/sum(data)
theta.MoM <- weirMoM(data, pi.MoM)$theta
gamma.MoM <- pi.MoM*((1-theta.MoM)/theta.MoM)
# Set LL to Inf if we only have 1 sample
if(nrow(data) == 1){
loglikdm <- Inf
}else{
loglikdm <- loglikDM(data, gamma.MoM)
}
fit.MoM <- list(loglik=loglikdm, gamma=gamma.MoM, pi=pi.MoM, theta=theta.MoM)
return(fit.MoM)
}
Kullback.Leibler <- function(group.data, plot=TRUE, main="Kullback Leibler Divergences", parallel=FALSE, cores=3){
if(missing(group.data))
stop("data missing.")
# Check the number of groups
numGrps <- length(group.data)
if(numGrps < 2)
stop("At least 2 data sets are required.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGrps)
}else{
grpNames <- names(group.data)
}
# Add 1 so we don't ever get an all 0 comparison
group.data <- lapply(group.data, function(x) x+1)
# Run dirmult on every group
if(parallel){
cl <- parallel::makeCluster(min(cores, numGrps))
doParallel::registerDoParallel(cl)
tryCatch({
results <- foreach::foreach(i=1:numGrps, .combine=list, .multicombine=TRUE, .inorder=TRUE, .packages=c("dirmult")) %dopar%{
param <- DM.MoM(group.data[[i]])
return(param)
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
results <- vector("list", numGrps)
for(i in 1:numGrps)
results[[i]] <- DM.MoM(group.data[[i]])
}
# Get alpha for every group
alpha <- lapply(results, function(x) x$gamma)
names(alpha) <- grpNames
# Get LL given alpha
LL.vals <- sapply(results, function(x) x$loglik)
# Get LL for every group using another alpha
KLmat <- matrix(0, numGrps, numGrps)
for(i in 1:numGrps){
for(j in i:numGrps){
if(i == j)
next
KLval1 <- LL.vals[i] - loglikDM(group.data[[i]], alpha[[j]])
KLval2 <- LL.vals[j] - loglikDM(group.data[[j]], alpha[[i]])
KLmat[i, j] <- KLval1 + KLval2
KLmat[j, i] <- KLval1 + KLval2
}
}
colnames(KLmat) <- grpNames
rownames(KLmat) <- grpNames
if(plot){
gplots::heatmap.2(as.matrix(KLmat), dendrogram="both", Rowv=TRUE, Colv=TRUE,
trace="none", symm=TRUE, margins=c(12, 9), density.info="none",
main=main
)
}
return(KLmat)
}
Xmcupo.effectsize <- function(group.data){
if(missing(group.data))
stop("group.data missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
totalReads <- sum(sapply(group.data, sum))
if(numTaxa < numGroups)
stop("The number of taxa must be greater than the number of groups.")
# Get the parameters for every group
groupParameter <- lapply(group.data, function(x){
# Calc pi, theta and the number of reads
numReadsSubs <- rowSums(x)
pi.MoM <- colSums(x)/sum(x)
theta.MoM <- weirMoM(x, pi.MoM)$theta
return(list(pi=pi.MoM, theta=theta.MoM, nrs=numReadsSubs))
})
# Calculate Xmcupo stats for base data
Xmcupo <- Xmcupo.statistics(groupParameter)
# Edit parameters to use the biggest difference between pis
groupParameterMax <- groupParameter
for(i in 1:numGroups){
newPi <- rep(0, numTaxa)
newPi[i] <- 1
groupParameterMax[[i]]$pi <- newPi
}
# Calculate Xmcupo stats for biggest difference
XmcupoMax <- Xmcupo.statistics(groupParameterMax)
# Calculate Cramers
CramerV <- sqrt(Xmcupo/(totalReads*min(numTaxa-1, numGroups-1)))
Mod.CramerV <- sqrt(Xmcupo/XmcupoMax)
# Calculate pvalue
pval <- 1-pchisq(q=Xmcupo, df=(numGroups-1)*(numTaxa-1), ncp=0, lower.tail=TRUE)
result <- c("Chi-Squared"=Xmcupo, "Cramer Phi"=CramerV, "Modified-Cramer Phi"=Mod.CramerV, "P value"=pval)
return(result)
}
Est.PI <- function(group.data, conf=.95){
if(missing(group.data))
stop("group.data is missing.")
# Check the number of groups
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Calculate the pi and error bars for each group
allParamsMLE <- data.frame(matrix(0, 0, 6))
allParamsMOM <- data.frame(matrix(0, 0, 6))
thetaMLE <- data.frame(matrix(0, numGroups, 3))
thetaMOM <- data.frame(matrix(0, numGroups, 3))
for(i in 1:numGroups){
tempData <- group.data[[i]]
# Check the data has samples
numSub <- nrow(tempData)
if(numSub < 1)
stop("At least one data set in group.data is empty")
tempParam1 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2 <- data.frame(matrix(0, ncol(tempData), 6))
tempParam2[,2] <- grpNames[i]
tempParam1[,2] <- grpNames[i]
# Check for taxa with 0 column sums (add 1 to everything if this happens)
badTaxa <- which(colSums(tempData) == 0)
if(length(badTaxa) != 0)
tempData <- tempData + 1
# Handle having 1 sample
if(numSub == 1){
tempParam1[,1] <- colnames(tempData)
tempParam1[,3] <- unlist(tempData[1,]/sum(tempData))
tempParam1[,4] <- NA
tempParam1[,5] <- tempParam1[,3]
tempParam1[,6] <- tempParam1[,3]
tempParam1 <- tempParam1[order(tempParam1[,1]),]
tempTheta1 <- c(0, NA)
tempParam2 <- tempParam1
tempTheta2 <- tempTheta1
}else{
# Get the MoM and MLE for every taxa
fsum <- dirmult::dirmult.summary(tempData, dirmult::dirmult(tempData, trace=FALSE))
tempTheta <- fsum[nrow(fsum),]
fsum <- fsum[-nrow(fsum),]
# Turn the summary into a data frame we can plot from
tempParam1[,1] <- rownames(fsum)
tempParam1[,3] <- fsum$MLE
tempParam1[,4] <- fsum$se.MLE
tempTheta1 <- tempTheta[,2:3]
tempParam2[,1] <- rownames(fsum)
tempParam2[,3] <- fsum$MoM
tempParam2[,4] <- fsum$se.MOM
tempTheta2 <- tempTheta[,4:5]
# Calc Upper and Lower bounds for CI
minSubj <- min(sapply(group.data, function(x) nrow(x)))
if(minSubj < 30){
val <- stats::qt(0.5 + conf *0.5, df=minSubj-1)
}else{
val <- stats::qnorm(0.5 + conf*0.5)
}
tempParam1[,5] <- tempParam1[,3] + val*tempParam1[,4]
tempParam1[,6] <- tempParam1[,3] - val*tempParam1[,4]
tempParam2[,5] <- tempParam2[,3] + val*tempParam2[,4]
tempParam2[,6] <- tempParam2[,3] - val*tempParam2[,4]
}
# Save outside of loop
allParamsMLE <- rbind(allParamsMLE, tempParam1)
thetaMLE[i,] <- c(grpNames[i], tempTheta1)
allParamsMOM <- rbind(allParamsMOM, tempParam2)
thetaMOM[i,] <- c(grpNames[i], tempTheta2)
}
colnames(allParamsMLE) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMLE) <- c("Group", colnames(tempTheta1))
colnames(allParamsMOM) <- c("Taxa", "Group", "PI", "SE", "Upper", "Lower")
colnames(thetaMOM) <- c("Group", colnames(tempTheta2))
# Make sure none of our error bars go over 100 or below 0
allParamsMLE$Upper <- ifelse(allParamsMLE$Upper > 1, 1, allParamsMLE$Upper)
allParamsMLE$Lower <- ifelse(allParamsMLE$Lower < 0, 0, allParamsMLE$Lower)
allParamsMOM$Upper <- ifelse(allParamsMOM$Upper > 1, 1, allParamsMOM$Upper)
allParamsMOM$Lower <- ifelse(allParamsMOM$Lower < 0, 0, allParamsMOM$Lower)
# Factor the data so it stays in the right order
allParamsMLE$Group <- factor(allParamsMLE$Group, levels=grpNames)
allParamsMLE$Taxa <- factor(allParamsMLE$Taxa, levels=unique(colnames(group.data[[1]])))
allParamsMOM$Group <- factor(allParamsMOM$Group, levels=grpNames)
allParamsMOM$Taxa <- factor(allParamsMOM$Taxa, levels=unique(colnames(group.data[[1]])))
MLE <- list(params=allParamsMLE, theta=thetaMLE)
MOM <- list(params=allParamsMOM, theta=thetaMOM)
return(list(MLE=MLE, MOM=MOM))
}
Test.Paired <- function(group.data, numPerms=1000, parallel=FALSE, cores=3){
if(missing(group.data))
stop("group.data is missing.")
if(length(group.data) != 2)
stop("group.data must have exactly 2 data sets.")
if(numPerms <= 0)
stop("The number of permutations must be an integer greater than 0.")
# Make sure we have the same columns
if(ncol(group.data[[1]]) != ncol(group.data[[2]])){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Check they have the same number of subjects
numSub <- nrow(group.data[[1]])
if(numSub != nrow(group.data[[2]]))
stop("Groups must have the same number of subjects.")
# Check row names match
rNames1 <- rownames(group.data[[1]])
rNames2 <- rownames(group.data[[2]])
if(!all(rNames1 == rNames2)){ # check names in the same order
if(all(rNames1 %in% rNames2)){ # check names match in wrong order
group.data[[1]] <- group.data[[1]][order(rNames1),]
group.data[[2]] <- group.data[[2]][order(rNames2),]
}else{
warning("Subject names do not match, assuming data is ordered correctly.")
}
}
# Turn into abundances
group.data[[1]] <- group.data[[1]]/rowSums(group.data[[1]])
group.data[[2]] <- group.data[[2]]/rowSums(group.data[[2]])
# Merge data1 and data2 together
dataComb <- rbind(group.data[[1]], group.data[[2]])
# Get the differences between the groups
dataDiff <- group.data[[1]] - group.data[[2]]
meanDiff <- apply(dataDiff, 2, mean)
# Calculate the sum of squares
obsDiff <- sum(meanDiff^2)
# Permute the group membership
if(parallel){
cl <- parallel::makeCluster(cores)
doParallel::registerDoParallel(cl)
tryCatch({
permDiffs <- foreach::foreach(i=1:numPerms, .combine=c, .inorder=FALSE, .multicombine=TRUE) %dopar%{
# Randomly swap group membership by reverseing difference sign
swaps <- sample(c(1, -1), numSub, replace=TRUE)
dataDiffTemp <- dataDiff * swaps
meanDiffTemp <- apply(dataDiffTemp, 2, mean)
# Calculate the sum of squares
obsDiffTemp <- sum(meanDiffTemp^2)
return(obsDiffTemp)
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
permDiffs <- rep(0, numPerms)
for(i in 1:numPerms){
# Randomly swap group membership by reverseing difference sign
swaps <- sample(c(1, -1), numSub, replace=TRUE)
dataDiffTemp <- dataDiff * swaps
meanDiffTemp <- apply(dataDiffTemp, 2, mean)
# Calculate the sum of squares
permDiffs[i] <- sum(meanDiffTemp^2)
}
}
# Calculate pvalue
pval <- (sum(permDiffs >= obsDiff) + 1)/(numPerms + 1)
return(pval)
}
DM.Rpart <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0, numCV=10, numCon=0, parallel=FALSE, cores=3, use1SE=FALSE, lowerSE=TRUE){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(numCV < 2){
ret <- DM.Rpart.Base(data, covars, plot, minsplit, minbucket, cp)
}else if(numCon < 2){
ret <- DM.Rpart.CV(data, covars, plot, minsplit, minbucket, cp, numCV, parallel, cores, use1SE, lowerSE)
}else{
ret <- DM.Rpart.CV.Consensus(data, covars, plot, minsplit, minbucket, cp, numCV, numCon, parallel, cores, use1SE, lowerSE)
}
return(ret)
}
DM.Rpart.Base <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
# Set the methods to use and call rpart
methods <- list(init=rpartInit, eval=rpartEval, split=rpartSplit)
rpartRes <- rpart::rpart(as.matrix(data) ~., data=covars, method=methods, minsplit=minsplit, minbucket=minbucket, cp=cp)
cpInfo <- rpartRes$cptable
size <- cpInfo[nrow(cpInfo), 2] + 1
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(rpartRes)
# Plot the rpart results
if(plot)
suppressWarnings(rpart.plot::rpart.plot(rpartRes, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=cpInfo, fullTree=rpartRes, bestTree=rpartRes, errorRate=NULL, size=size, splits=splits))
}
DM.Rpart.CV <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0, numCV=10, parallel=FALSE, cores=3, use1SE=FALSE, lowerSE=TRUE){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(numCV < 2)
stop("numCV must be at least 2.")
# Run initial Rpart
rpartBase <- DM.Rpart.Base(data, covars, FALSE, minsplit, minbucket, cp)
rpartRes <- rpartBase$fullTree
# Check for a valid starting tree
if(nrow(rpartRes$cptable) == 1){
warning("No splits in the data.")
return(rpartBase)
}
cvRes <- rpartCV(data, covars, rpartRes, minsplit, minbucket, numCV, parallel, cores)
# Calculate the best tree
ciInfo <- as.data.frame(cvRes$ciInfo)
# Find the tree with the lowest MSE
minMSE <- min(ciInfo$MSE)
lowestMSELoc <- which(ciInfo$MSE == minMSE)[1]
# Find which trees are within 1 SE of the lowest mse tree
cutoffU <- ciInfo$MSE[lowestMSELoc] + ciInfo$SE[lowestMSELoc]
cutoffL <- ciInfo$MSE[lowestMSELoc] - ciInfo$SE[lowestMSELoc]
ciInfo$within1SE <- ifelse(ciInfo$MSE <= cutoffU & ciInfo$MSE >= cutoffL, 1, 0)
if(use1SE){
# Find the smallest/biggest tree within 1 SE
within <- which(ciInfo$within1SE == 1)
if(lowerSE){
bestTreeLoc <- min(within)
}else{
bestTreeLoc <- max(within)
}
}else{
bestTreeLoc <- lowestMSELoc
}
# Pull out the best tree
size <- ciInfo[bestTreeLoc, 2] + 1
best <- rpart::prune(rpartRes, cp=ciInfo[bestTreeLoc, 1])
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(best)
if(plot)
suppressWarnings(rpart.plot::rpart.plot(best, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=ciInfo, fullTree=rpartRes, bestTree=best, errorRate=cvRes$errorRate, size=size, splits=splits))
}
DM.Rpart.CV.Consensus <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0, numCV=10, numCon=100, parallel=FALSE, cores=3, use1SE=FALSE, lowerSE=TRUE){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(numCV < 2)
stop("numCV must be at least 2.")
if(numCon < 2)
stop("numCon must be at least 2.")
if(parallel){
cl <- parallel::makeCluster(min(cores, numCon))
doParallel::registerDoParallel(cl)
tryCatch({
results <- foreach::foreach(i=1:numCon, .combine=append, .multicombine=FALSE, .inorder=FALSE, .errorhandling="pass", .packages=c("rpart", "HMP")) %dopar%{
cvList <- DM.Rpart.CV(data, covars, FALSE, minsplit, minbucket, cp, numCV, FALSE, 1, use1SE, lowerSE)
return(list(cvList))
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
results <- vector("list", numCon)
for(i in 1:numCon)
results[[i]] <- DM.Rpart.CV(data, covars, FALSE, minsplit, minbucket, cp, numCV, FALSE, 1, use1SE, lowerSE)
}
# Combine cv results
MSETab <- do.call("cbind", lapply(results, function(x){x$cpTable[,4]}))
rankTab <- do.call("cbind", lapply(results, function(x){x$cpTable[,8]}))
ciInfo <- cbind(
results[[1]]$cpTable[,1:3],
"MeanMSE"=rowMeans(MSETab),
"sdMSE"=apply(MSETab, 1, sd),
"MeanRank"=rowMeans(rankTab),
"sdRank"=apply(rankTab, 1, sd)
)
# Find the tree with the lowest MSE
minMSE <- min(ciInfo$MeanMSE)
bestTreeLoc <- which(ciInfo$MeanMSE == minMSE)[1]
# Pull out the best tree
size <- ciInfo[bestTreeLoc, 2] + 1
best <- rpart::prune(results[[1]]$fullTree, cp=ciInfo[bestTreeLoc, 1])
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(best)
if(plot)
suppressWarnings(rpart.plot::rpart.plot(best, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=ciInfo, fullTree=results[[1]]$fullTree, bestTree=best, errorRate=NULL, size=size, splits=splits))
}
Gen.Alg <- function(data, covars, iters=50, popSize=200, earlyStop=0, dataDist="euclidean", covarDist="gower",
verbose=FALSE, plot=TRUE, minSolLen=NULL, maxSolLen=NULL, custCovDist=NULL, penalty=0){
if(missing(data) || (missing(covars) && is.null(custCovDist)))
stop("data and/or covars are missing.")
# Check for any bad numbers
if(iters <= 0)
stop("iters must be an integer greater than 0")
if(popSize <= 0)
stop("popSize must be an integer greater than 0")
if(earlyStop < 0)
stop("earlyStop must be an integer greater than or equal to 0")
if(penalty < 0 || penalty > 1)
stop("penalty must be between 0 and 1")
# Check distances
if(dataDist != "euclidean" && dataDist != "gower")
stop("data.dist must be euclidean or gower.")
if(covarDist != "euclidean" && covarDist != "gower")
stop("covars.dist must be euclidean or gower.")
# Define size
size <- ncol(data)
# Check stopping rules
if(!is.null(minSolLen))
if(minSolLen < 0 || minSolLen >= size)
stop("minSolLen must be 0 or greater and less than the number of columns in data.")
if(!is.null(maxSolLen))
if(maxSolLen <= 0 || maxSolLen > size)
stop("maxSolLen must be greater than 0 and less than or equal to the number columns in data.")
if(!is.null(maxSolLen) && !is.null(minSolLen))
if(maxSolLen < minSolLen)
stop("maxSolLen must be bigger than minSolLen.")
# Define some variables for use in the GA loop
mutationChance <- 1/(size+1)
elitism <- floor(popSize/5)
evalSumm <- matrix(NA, iters, 6)
newPopSize <- popSize - elitism
newPopulation <- matrix(NA, newPopSize, size)
parentProb <- stats::dnorm(1:popSize, mean=0, sd=(popSize/3))
if(verbose){
print("X. Current Step : Current Time Taken")
runningTime <- proc.time()
print(paste("1. Calculating Distances:", round((proc.time() - runningTime)[3], 3)))
}
# Set up our base distance matrix
if(is.null(custCovDist)){
covarDists <- vegan::vegdist(covars, covarDist)
}else{
covarDists <- custCovDist
}
# Get each columns distance contribution
colDists <- vector("list", ncol(data))
for(i in 1:ncol(data))
colDists[[i]] <- vegan::vegdist(data[,i], dataDist)
if(dataDist == "euclidean")
colDists <- lapply(colDists, function(x) x^2)
if(verbose)
print(paste("2. Creating Starting Data:", round((proc.time() - runningTime)[3], 3)))
# Create our starting data
population <- gaCreation(data, popSize)
if(verbose)
print(paste("3. Scoring Starting Data:", round((proc.time() - runningTime)[3], 3)))
# Score and sort
evalVals <- rep(NA, popSize)
for(e in 1:popSize)
evalVals[e] <- gaScoring(population[e,], covarDists, colDists, dataDist, penalty, minSolLen, maxSolLen)
population <- population[order(evalVals, decreasing=TRUE),]
bestScoreValue <- max(evalVals)
bestScoreCounter <- 0
if(verbose)
print(paste("4. Running Iterations:", round((proc.time() - runningTime)[3], 3)))
# Run GA
ptr <- proc.time()
for(i in 1:iters){
if(verbose){
if(i %% round(iters/10) == 0)
print(paste("Iteration - ", i, ": ", round((proc.time() - runningTime)[3], 3), sep=""))
}
# Cross over to fill rest of new population
for(child in 1:newPopSize){
parentIDs <- sample(1:popSize, 2, prob=parentProb)
parents <- population[parentIDs,]
crossOverPoint <- sample(0:size, 1)
if(crossOverPoint == 0){
newPopulation[child,] <- parents[2,]
}else if(crossOverPoint == size){
newPopulation[child,] <- parents[1,]
}else{
newPopulation[child,] <- c(parents[1,][1:crossOverPoint], parents[2,][(crossOverPoint+1):size])
}
}
# Mutate all but elite
if(mutationChance > 0){
population[(elitism+1):popSize,] <- apply(newPopulation, 2, function(x){ifelse(stats::runif(newPopSize) < mutationChance, 1-x, x)})
}else{
population[(elitism+1):popSize,] <- newPopulation
}
# Score and sort our new solutions
for(e in 1:popSize)
evalVals[e] <- gaScoring(population[e,], covarDists, colDists, dataDist, penalty, minSolLen, maxSolLen)
population <- population[order(evalVals, decreasing=TRUE),]
evalSumm[i,] <- summary(evalVals)
# Check if we want to stop early
if(bestScoreValue == max(evalVals)){
bestScoreCounter <- bestScoreCounter + 1
}else{
bestScoreCounter <- 0
bestScoreValue <- max(evalVals)
}
if(bestScoreCounter == earlyStop && earlyStop != 0)
break
}
gaTime <- (proc.time() - ptr)[3]
if(verbose)
print(paste("5. Prettying Results", round((proc.time() - runningTime)[3], 3)))
# Pretty up our data for returning
rownames(population) <- paste("Solution", 1:nrow(population))
colnames(population) <- colnames(data)
rownames(evalSumm) <- paste("Iteration", 1:nrow(evalSumm))
colnames(evalSumm) <- c("Worst", "25%ile", "Median", "Mean", "75%ile", "Best")
evalVals <- matrix(evalVals[order(evalVals, decreasing=TRUE)], 1, length(evalVals))
colnames(evalVals) <- paste("Solution ", 1:length(evalVals))
rownames(evalVals) <- "Score"
# Get selected columns using a consensus
selIndex <- which(population[1,] == 1)
sel <- colnames(data)[selIndex]
# Get the nonselected columns
nonSel <- colnames(data)[-selIndex]
# Plot scoring summary
if(plot)
gaPlot(evalSumm)
return(list(scoreSumm=evalSumm, solutions=population, scores=evalVals, time=gaTime, selected=sel, nonSelected=nonSel, selectedIndex=selIndex))
}
Gen.Alg.Consensus <- function(data, covars, consensus=.5, numRuns=10, parallel=FALSE, cores=3, ...){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
if(consensus <= 0 || consensus > 1)
stop("consensus must be greater than 0 and equal or less than 1")
# Run the GA X times
if(parallel){
cl <- parallel::makeCluster(min(cores, numRuns))
doParallel::registerDoParallel(cl)
tryCatch({
gaRes <- foreach::foreach(i=1:numRuns, .combine=list, .multicombine=TRUE, .inorder=FALSE, .packages=c("vegan", "HMP")) %dopar%{
tempResults <- Gen.Alg(data, covars, plot=FALSE, verbose=FALSE, ...)
return(tempResults)
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
}else{
gaRes <- vector("list", numRuns)
for(i in 1:numRuns)
gaRes[[i]] <- Gen.Alg(data, covars, plot=FALSE, verbose=FALSE, ...)
}
# Get all the best solutions
bestSols <- sapply(gaRes, function(x){x$solutions[1,]})
# Get the consensus solution vector
consSol <- (rowSums(bestSols) >= (numRuns * consensus)) * 1
# Get the selected Index's
selInd <- which(consSol == 1)
return(list(solutions=bestSols, consSol=consSol, selectedIndex=selInd))
}
### ~~~~~~~~~~~~~~~~~~~~~
### Plot functions
### ~~~~~~~~~~~~~~~~~~~~~
Barchart.data <- function(data, title="Taxa Proportions"){
if(missing(data))
stop("data missing.")
dataProp <- apply(data, 1, function(x){x/sum(x)})
barplot(dataProp, col=rainbow(ncol(data)), horiz=TRUE,
main=title, axisnames=FALSE, font.main=20, font.sub=16)
}
Plot.PI <- function(estPi, errorBars=TRUE, logScale=FALSE, main="PI Vector", ylab="Fractional Abundance"){
if(missing(estPi))
stop("estPi is missing.")
# Move title to the middle
ggplot2::theme_update(plot.title=ggplot2::element_text(hjust=0.5))
# Make the base plot
piPlot <- ggplot2::ggplot(estPi$params, ggplot2::aes_string(y="PI", x="Taxa", colour="Group")) +
ggplot2::geom_point() +
ggplot2::theme(legend.position = "top", text=ggplot2::element_text(size=15)) +
ggplot2::labs(title=main, y=ylab, x="") +
ggplot2::theme(axis.text.x=ggplot2::element_text(hjust=1, angle=45, size=10))
# Add error bars
if(errorBars){
piPlot <- piPlot + ggplot2::geom_errorbar(ggplot2::aes_string(ymax="Upper", ymin="Lower"))
}else{
piPlot <- piPlot + ggplot2::geom_line(ggplot2::aes_string(group="Group"))
}
# Do log scaling
if(logScale)
piPlot <- piPlot + ggplot2::scale_y_log10()
if(logScale)
piPlot <- piPlot + ggplot2::labs(y=paste(ylab, "(Logged)"))
print(piPlot)
}
Plot.MDS <- function(group.data, main="Group MDS", retCords=FALSE){
if(missing(group.data))
stop("group.data is missing.")
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Merge all the data sets together
mData <- do.call("rbind", group.data)
# Get their mds location
loc <- getBC(mData)
# Set color
availCols <- rainbow(numGroups)
cols <- NULL
for(i in 1:numGroups)
cols <- c(cols, rep(availCols[i], nrow(group.data[[i]])))
# Plot MDS
plot(loc, pch=16, ylab="MDS 2", xlab="MDS 1", col=cols, main=main)
legend("topright", legend=grpNames, pch=15, col=availCols)
if(retCords)
return(loc)
}
Plot.RM.Barchart <- function(group.data, groups, times, plotByGrp=TRUE, col=NULL, conf=.95){
if(missing(group.data) || missing(groups) || missing(times))
stop("group.data, groups and/or times are missing.")
numSamps <- length(group.data)
### Get the pi params
myEst <- Est.PI(group.data, conf)
params <- myEst$MLE$params
### Add the group and time information to the params
myGroups <- NULL
myTimes <- NULL
for(i in 1:numSamps){
myGroups <- c(myGroups, rep(groups[i], ncol(group.data[[1]])))
myTimes <- c(myTimes, rep(times[i], ncol(group.data[[1]])))
}
params$Grp <- as.character(myGroups)
params$Time <- as.character(myTimes)
if(is.null(col))
col <- rainbow(length(unique(params$Taxa)))
if(plotByGrp){
lattice::barchart(params$PI ~ params$Time | paste("Group", params$Grp),
ylab="Fractional Abundance", xlab="Time",
stack=TRUE, groups=params$Taxa, col=col,
key=list(
text=list(levels(params$Taxa)),
points=list(pch=19, col=col),
columns=5
)
)
}else{
lattice::barchart(params$PI ~ params$Grp | paste("Time", params$Time),
ylab="Fractional Abundance", xlab="Time",
stack=TRUE, groups=params$Taxa, col=col,
key=list(
text=list(levels(params$Taxa)),
points=list(pch=19, col=col),
columns=5
)
)
}
}
Plot.RM.Dotplot <- function(group.data, groups, times, errorBars=TRUE, col=NULL, conf=.95, alpha=1){
if(missing(group.data) || missing(groups) || missing(times))
stop("group.data, groups and/or times are missing.")
numSamps <- length(group.data)
numGrps <- length(unique(groups))
### Get the pi params
myEst <- Est.PI(group.data, conf)
params <- myEst$MLE$params
### Add the group and time information to the params
myGroups <- NULL
myTimes <- NULL
for(i in 1:numSamps){
myGroups <- c(myGroups, rep(groups[i], ncol(group.data[[1]])))
myTimes <- c(myTimes, rep(times[i], ncol(group.data[[1]])))
}
params$Grp <- as.character(myGroups)
params$Time <- as.character(myTimes)
if(is.null(col))
col <- rainbow(numGrps)
### Add alpha to the colors
col <- apply(sapply(col, grDevices::col2rgb)/255, 2, function(x){grDevices::rgb(x[1], x[2], x[3], alpha=alpha)})
if(errorBars){
lattice::dotplot(params$Taxa ~ params$PI | paste("Time", params$Time),
pch=19, groups=params$Grp, col=col,
ylab="Taxa", xlab="Fractional Abundance",
panel=lattice::panel.superpose,
panel.groups=function(x, y, subscripts, col, ...){
lattice::panel.xyplot(x, y, ...)
lattice::panel.segments(params$Lower[subscripts], y, params$Upper[subscripts], y, col=col)
},
key=list(
text=list(as.character(unique(params$Grp))),
points=list(pch=19, col=col)
)
)
}else{
lattice::dotplot(params$Taxa ~ params$PI | paste("Time", params$Time),
pch=19, groups=params$Grp, col=col,
ylab="Taxa", xlab="Fractional Abundance",
key=list(
text=list(as.character(unique(params$Grp))),
points=list(pch=19, col=col)
)
)
}
}
Plot.Theta <- function(estPi, main="Theta by Group"){
if(missing(estPi))
stop("estPi is missing.")
# Create the theta table
theta <- estPi$theta
thetaci <- cbind(
theta,
lci = theta[,2] - 1.96*theta[,3],
lci = theta[,2] + 1.96*theta[,3]
)
thetaci <- thetaci[order(thetaci[2]),]
xlim <- range(thetaci[,c(4, 5)])
# Plot the tornado plot
plot(thetaci[,2], 1:nrow(theta), pch=16, yaxt="n", xlim=xlim,
main=main, ylab="", xlab="Theta +/- 95% CI")
grid(ny=15, lwd=2)
axis(2, at=1:nrow(theta), labels=thetaci[,1])
for (i in 1:nrow(theta))
lines(c(thetaci[i, 4], thetaci[i, 5]), c(i, i))
}
Plot.MDS.wPI <- function(group.data, pi, main="Group MDS", retCords=FALSE){
if(missing(group.data) || missing(pi))
stop("group.data and/or pi is missing.")
numGroups <- length(group.data)
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
# Make sure we have group names
if(is.null(names(group.data))){
grpNames <- paste("Data Set", 1:numGroups)
}else{
grpNames <- names(group.data)
}
# Merge all the data sets together
mData <- do.call("rbind", group.data)
numSubs <- nrow(mData)
# Get the pi data, label, sort and combine
numTaxa <- ncol(mData)
piData <- matrix(pi$params$PI, length(group.data), numTaxa, byrow=TRUE)
colnames(piData) <- pi$params$Taxa[1:numTaxa]
rownames(piData) <- unique(pi$params$Group)
piData <- piData[, colnames(mData)]
mData <- rbind(mData, piData)
# Get their mds location
loc <- getBC(mData)
# Set color
availCols <- rainbow(numGroups)
cols <- NULL
for(i in 1:numGroups)
cols <- c(cols, rep(availCols[i], nrow(group.data[[i]])))
cols <- c(cols, availCols)
# Set size and shape
pch <- c(rep(16, numSubs), rep(17, numGroups))
cex <- c(rep(1, numSubs), rep(2, numGroups))
# Plot MDS
plot(loc, pch=pch, ylab="MDS 2", xlab="MDS 1", col=cols, main=main, cex=cex)
legend("topright", legend=grpNames, pch=15, col=availCols)
if(retCords)
return(loc)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Filter functions
### ~~~~~~~~~~~~~~~~~~~~~
formatDataSets <- function(group.data){
if(missing(group.data))
stop("group.data missing.")
# Make sure we have more than 1 data set
numGroups <- length(group.data)
if(numGroups < 2)
stop("At least 2 data sets are required.")
# Remove all 0 total read subjects from the data
group.data <- lapply(group.data, function(x){x[rowSums(x) > 0,, drop=FALSE]})
# Merge all the data together
dataNames <- vector("list", numGroups)
newData <- NULL
for(i in 1:length(group.data)){
tempData <- group.data[[i]]
# Save the current row names
dataNames[[i]] <- rownames(tempData)
newData <- merge(newData, t(group.data[[i]]), by=0, all=TRUE)
rownames(newData) <- newData[,1]
newData <- newData[,-1]
}
# Remove any nas
newData[is.na(newData)] <- 0
newData <- t(newData)
# Remove any all 0 columns and sort them
newData <- newData[,colSums(newData) != 0, drop=FALSE]
newData <- newData[,order(colSums(newData), decreasing=TRUE)]
# Turn the data back into a list
retData <- vector("list", numGroups)
base <- 0
for(i in 1:numGroups){
retData[[i]] <- newData[(base+1):(nrow(group.data[[i]])+ base),]
rownames(retData[[i]]) <- dataNames[[i]]
base <- base + nrow(group.data[[i]])
}
names(retData) <- names(group.data)
return(retData)
}
Data.filter <- function(data, order.type="data", minReads=0, numTaxa=NULL, perTaxa=NULL){
if(missing(data))
stop("data is missing.")
if(tolower(order.type) != "data" && tolower(order.type) != "sample")
stop(sprintf("'%s' not recognized, order.type must be 'data' or 'sample'", as.character(order.type)))
# Check if numTaxa or perTaxa is being used
if(!is.null(numTaxa) && !is.null(perTaxa))
stop("numTaxa and perTaxa cannot be used at the same time")
if(!is.null(numTaxa)){
if(numTaxa > ncol(data) || numTaxa <= 0)
stop(sprintf("numTaxa must be between 0 and %i.", ncol(data)))
}
if(!is.null(perTaxa)){
if(perTaxa >= 1 || perTaxa <= 0)
stop("perTaxa must be between 0 and 1.")
}
if(is.null(numTaxa) && is.null(perTaxa))
numTaxa <- ncol(data)
taxaNames <- colnames(data)
# Drop all subjects that don't have enough reads
data <- data[rowSums(data)>minReads,, drop=FALSE]
if(nrow(data) < 2)
stop("minReads is too large and is excluding too many samples. Please try lowering its value.")
# Drop all 0 taxa
data <- data[,colSums(data)>0, drop=FALSE]
# Order the data based on order.type
if(tolower(order.type) == "sample"){
data <- t(apply(data, 1, function(x){x[order(x, decreasing=TRUE)]}))
}else{
data <- data[,order(colSums(data), decreasing=TRUE)]
}
# Use a percentage based approach to find the number of taxa to collapse
if(!is.null(perTaxa)){
perNumReadsTaxa <- colSums(data)/sum(data)
cumSumReads <- cumsum(perNumReadsTaxa)
taxaAboveThrs <- which(cumSumReads > perTaxa)
if(length(taxaAboveThrs) == 0){
numTaxa <- 1
}else{
numTaxa <- min(taxaAboveThrs)
}
}
if(numTaxa >= ncol(data)){
retData <- data
}else{
# Pull out the taxa we want to collapse
otherData <- data[,-c(1:numTaxa), drop=FALSE]
# Put the data back together and relabel
retData <- cbind(data[,1:numTaxa], Other=rowSums(otherData))
}
return(retData)
}
### ~~~~~~~~~~~~~~~~~~~~~
### MC functions
### ~~~~~~~~~~~~~~~~~~~~~
MC.ZT.statistics <- function(Nrs, numMC=10, fit, type="ha", siglev=0.05) {
if(missing(Nrs) || missing(fit))
stop("Nrs and/or fit missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
# Get all the ZT values
ZTstatMatrix <- matrix(0, numMC, 2)
for(i in 1:numMC)
ZTstatMatrix[i,] <- ZT.statistics.Hnull.Ha(Nrs, fit, type)
# Pull out z and t and remove NAs
z <- ZTstatMatrix[,1]
z <- z[!is.na(z)]
t <- ZTstatMatrix[,2]
t <- t[!is.na(t)]
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(fit$pi)-1, ncp=0, lower.tail=TRUE)
# Calculate our pvalues for z and t
zpval <- (sum(z > qAlpha) + 1)/(length(z) + 1)
tpval <- (sum(t > qAlpha) + 1)/(length(t) + 1)
return(cbind(zpval, tpval))
}
MC.Xsc.statistics <- function(Nrs, numMC=10, fit, pi0=NULL, type="ha", siglev=0.05) {
if(missing(Nrs) || missing(fit))
stop("Nrs and/or fit missing.")
if(is.null(pi0) && tolower(type) == "ha")
stop("pi0 cannot be null with type 'ha'.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
# Get all the XSC values
XscStatVector <- rep(0, numMC)
for(i in 1:numMC)
XscStatVector[i] <- Xsc.statistics.Hnull.Ha(Nrs, fit, type, pi0)
# Remove NAs
XscStatVector <- XscStatVector[!is.na(XscStatVector)]
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(fit$pi)-1, ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XscStatVector > qAlpha) + 1)/(length(XscStatVector) + 1)
return(pval)
}
MC.Xmc.statistics <- function(group.Nrs, numMC=10, pi0, group.pi, group.theta, type="ha", siglev=0.05) {
if(missing(group.theta) || missing(pi0) || missing(group.Nrs))
stop("group.Nrs, pi0 and/or group.theta missing.")
if(missing(group.pi) && tolower(type) == "ha")
stop("group.pi missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
numGroups <- length(group.Nrs)
numTaxa <- length(pi0)
# If the type is ha this will change in the for loop
tempPi <- pi0
# Create the parameters for every group
groupParameter <- vector("list", numGroups)
for (i in 1:numGroups){
if(tolower(type) == "ha")
tempPi <- group.pi[i,]
groupParameter[[i]] <- list(pi=tempPi, theta=group.theta[i], nrs=group.Nrs[[i]])
}
# Get all the Xmc values
XmcStatVector <- rep(0, numMC)
for(i in 1:numMC)
XmcStatVector[i] <- Xmc.statistics.Hnull.Ha(groupParameter, pi0)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(group.theta)*(numTaxa-1), ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XmcStatVector > qAlpha) + 1)/(length(XmcStatVector) + 1)
return(pval)
}
MC.Xmcupo.statistics <- function(group.Nrs, numMC=10, pi0, group.pi, group.theta, type="ha", siglev=0.05) {
if(missing(group.theta) || missing(group.Nrs))
stop("group.Nrs and/or group.theta missing.")
if(missing(group.pi) && tolower(type) == "ha")
stop("group.pi missing.")
if(missing(pi0) && tolower(type) == "hnull")
stop("pi0 missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
numGroups <- length(group.Nrs)
# Create the parameters for every group
groupParameter <- vector("list", numGroups)
for (i in 1:numGroups){
if(tolower(type) == "ha"){
numTaxa <- ncol(group.pi)
tempPi <- group.pi[i,]
}else{
numTaxa <- length(pi0)
tempPi <- pi0
}
groupParameter[[i]] <- list(pi=tempPi, theta=group.theta[i], nrs=group.Nrs[[i]])
}
# Get all the Xmcupo values
XmcupoStatVector <- rep(0, numMC)
for(i in 1:numMC)
XmcupoStatVector[i] <- Xmcupo.statistics.Hnull.Ha(groupParameter)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=length(group.theta)*(numTaxa-1), ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XmcupoStatVector > qAlpha) + 1)/(length(XmcupoStatVector) + 1)
return(pval)
}
MC.Xdc.statistics <- function(group.Nrs, numMC=10, alphap, type="ha", siglev=0.05, est="mom") {
if(missing(alphap) || missing(group.Nrs))
stop("group.Nrs and/or alphap missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
if(tolower(est) != "mom" && tolower(est) != "mle")
stop(sprintf("Est '%s' not found. Est must be 'mle' or 'mom'.", as.character(est)))
numGroups <- length(group.Nrs)
if(tolower(type) == "hnull"){
numTaxa <- length(alphap)
}else{
numTaxa <- ncol(alphap)
}
# Get all the Xdc values
XdcStatVector <- rep(0, numMC)
for(i in 1:numMC)
XdcStatVector[i] <- Xdc.statistics.Hnull.Ha(alphap, group.Nrs, type, est)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=(numGroups-1)*numTaxa, ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XdcStatVector > qAlpha) + 1)/(length(XdcStatVector) + 1)
return(pval)
}
MC.Xoc.statistics <- function(group.Nrs, numMC=10, group.alphap, type="ha", siglev=0.05) {
if(missing(group.alphap) || missing(group.Nrs))
stop("group.Nrs and/or group.alphap missing.")
if(tolower(type) != "ha" && tolower(type) != "hnull")
stop(sprintf("Type '%s' not found. Type must be 'ha' for power or 'hnull' for size.\n", as.character(type)))
numGroups <- length(group.Nrs)
# Get all the Xoc values
XocStatVector <- rep(0, numMC)
for(i in 1:numMC)
XocStatVector[i] <- Xoc.statistics.Hnull.Ha(group.Nrs, group.alphap, type)
# Get a reference value from the real data
qAlpha <- qchisq(p=(1-siglev), df=(numGroups-1), ncp=0, lower.tail=TRUE)
# Calculate pvalues
pval <- (sum(XocStatVector > qAlpha) + 1)/(length(XocStatVector) + 1)
return(pval)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Sample functions
### ~~~~~~~~~~~~~~~~~~~~~
Xsc.onesample <- function(data, pi0){
if(missing(data) || missing(pi0))
stop("data and/or pi0 missing.")
numReadsSubs <- rowSums(data)
numTaxa <- length(pi0)
# Check the data set has the same number of taxa
numTaxa <- length(pi0)
if(ncol(data) != numTaxa)
stop("Every data set must have the same length as pi0")
# Get parameters
fit.MoM <- DM.MoM(data)
# Get Xsc and calculate pvalue
Xsc <- Xsc.statistics(fit.MoM$pi, fit.MoM$theta, numReadsSubs, pi0)
pval <- 1-pchisq(q=Xsc, df=numTaxa-1, ncp=0, lower.tail=TRUE)
RAD.mean.test <- list("Xsc statistics"=Xsc, "p value"=pval)
return(RAD.mean.test)
}
Xmc.sevsample <- function(group.data, pi0){
if(missing(group.data) || missing(pi0))
stop("group.data and/or pi0 missing.")
# Check every data set has the same number of taxa
taxaCounts <- sapply(group.data, ncol)
numTaxa <- length(pi0)
if(any(taxaCounts != numTaxa))
stop("Every data set must have matching taxa, including pi0")
numGroups <- length(group.data)
# Get the parameters for every group
groupParameter <- lapply(group.data, function(x){
# Calc pi, theta and the number of reads
numReadsSubs <- rowSums(x)
pi.MoM <- colSums(x)/sum(x)
theta.MoM <- weirMoM(x, pi.MoM)$theta
return(list(pi=pi.MoM, theta=theta.MoM, nrs=numReadsSubs))
})
# Get Xmc and calculate pvalue
Xmc <- Xmc.statistics(groupParameter, pi0)
pval <- 1-pchisq(q=Xmc, df=numGroups*(numTaxa-1), ncp=0, lower.tail=TRUE)
sevRAD.mean.test <- list("Xmc statistics"=Xmc, "p value"=pval)
return(sevRAD.mean.test)
}
Xmcupo.sevsample <- function(group.data){
if(missing(group.data))
stop("group.data is missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
# Get the parameters for every group
groupParameter <- lapply(group.data, function(x){
# Calc pi, theta and the number of reads
numReadsSubs <- rowSums(x)
pi.MoM <- colSums(x)/sum(x)
theta.MoM <- weirMoM(x, pi.MoM)$theta
return(list(pi=pi.MoM, theta=theta.MoM, nrs=numReadsSubs))
})
# Get Xmcupo and calculate pvalue
Xmcupo <- Xmcupo.statistics(groupParameter)
pval <- 1-pchisq(q=Xmcupo, df=(numGroups-1)*(numTaxa-1), ncp=0, lower.tail=TRUE)
ret <- list("Xmcupo statistics"=Xmcupo, "p value"=pval)
return(ret)
}
Xoc.sevsample <- function(group.data, epsilon=10^(-4)){
if(missing(group.data))
stop("group.data missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
}
numGroups <- length(group.data)
# Get Xoc and calculate pvalue
Xoc <- Xoc.statistics(group.data, epsilon)
pval <- 1-pchisq(q=Xoc, df=numGroups-1, ncp=0, lower.tail=TRUE)
sev.overd.test <- list("Xoc statistics"=Xoc, "p value"=pval)
return(sev.overd.test)
}
Xdc.sevsample <- function(group.data, epsilon=10^(-4), est="mom"){
if(missing(group.data))
stop("group.data missing.")
if(tolower(est) != "mle" && tolower(est) != "mom")
stop(sprintf("Est '%s' not found. Est must be 'mle' or 'mom'.", as.character(est)))
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
# Get Xdc and calculate pvalue
if(tolower(est) == "mle"){
Xdc <- Xdc.statistics(group.data, epsilon)
}else{
Xdc <- Xdc.statistics.MoM(group.data)
}
pval <- 1-pchisq(q=Xdc, df=(numGroups-1)*numTaxa, ncp=0, lower.tail=TRUE)
xdc.sevsamp.test <- list("Xdc statistics"=Xdc, "p value"=pval)
return(xdc.sevsamp.test)
}
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Internal
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### ~~~~~~~~~~~~~~~~~~~~~
### Other functions
### ~~~~~~~~~~~~~~~~~~~~~
kullbackLeiber <- function(data, plot=TRUE, parallel=FALSE, cores=3){
warning("This function has been spellchecked. Please use 'Kullback.Leibler' instead.")
kl <- Kullback.Leibler(data, plot, parallel, cores)
return(kl)
}
loglikDM <- function(data, alphap){
data <- data[,colSums(data) != 0, drop=FALSE]
alphap <- alphap[alphap != 0]
ll <- sum(lgamma(rowSums(data)+1) + lgamma(sum(alphap)) - lgamma(sum(alphap)+rowSums(data))) +
sum(rowSums(lgamma(sweep(data, 2, alphap, "+")) - lgamma(data+1) - lgamma(t(replicate(nrow(data), alphap)))))
return(ll)
}
weirMoM <- function(data, MoM, se=FALSE){
numTaxa <- ncol(data)
numSamp <- nrow(data)
rowSumsData <- rowSums(data) + 0.000001
colSumsData <- colSums(data)
if(numSamp == 1)
return(list(theta=0, se=0))
MSP <- (numSamp-1)^(-1) * sum(rowSums((data/rowSumsData - matrix(rep(MoM, numSamp), numSamp, numTaxa, byrow=TRUE))^2) * rowSumsData)
MSG <- (sum(colSumsData)-numSamp)^(-1) * sum(rowSums(data/rowSumsData * (1-data/rowSumsData)) * rowSumsData)
nc <- 1/(numSamp-1) * (sum(rowSumsData)-sum(rowSumsData^2)/sum(rowSumsData))
MoM.wh <- (MSP-MSG)/(MSP+(nc-1)*MSG)
std.er <- NULL
if(se)
std.er <- sqrt(2 * (1-MoM.wh)^2/(numSamp-1) * ((1+(nc-1) * MoM.wh)/nc)^2)
return(list(theta=MoM.wh, se=std.er))
}
getBC <- function(data){
dataPer <- data/rowSums(data)
### Remove any points with exactly the same values
dups <- duplicated(dataPer)
if(any(dups))
dataPer <- dataPer[!dups,]
bcDist <- vegan::vegdist(dataPer, method="bray")
nonMetricMDS <- MASS::isoMDS(bcDist, trace=FALSE)
mdsPoints <- vegan::postMDS(nonMetricMDS$points, bcDist)
mds <- vegan::scores(mdsPoints)
return(mds[, 1:2])
}
pioest <- function(group.data){
if(missing(group.data))
stop("data.groups missing.")
# Make sure we have the same columns
taxaCounts <- sapply(group.data, ncol)
numTaxa <- taxaCounts[1]
if(any(taxaCounts != numTaxa)){
warning("Group columns do not match, running formatDataSets.")
group.data <- formatDataSets(group.data)
numTaxa <- ncol(group.data[[1]])
}
numGroups <- length(group.data)
# Pull out pi and calculate xsc
pis <- matrix(0, numTaxa, numGroups)
xscs <- rep(0, numGroups)
for(i in 1:numGroups){
tempData <- group.data[[i]]
numReadsSubs <- rowSums(tempData)
totalReads <- sum(tempData)
pi <- colSums(tempData)/totalReads
theta <- weirMoM(tempData, pi)$theta
xscs[i] <- (theta * (sum(numReadsSubs^2)-totalReads) + totalReads) / totalReads^2
pis[,i] <- pi
}
# Remove any 0 taxa
pis <- pis[rowSums(pis) != 0,]
# Calculate pi0
pi0 <- rowSums(pis/xscs)/sum(1/xscs)
names(pi0) <- colnames(group.data[[1]])
return(pi0)
}
### ~~~~~~~~~~~~~~~~~~~~~
### ga functions
### ~~~~~~~~~~~~~~~~~~~~~
gaScoring <- function(indices, covarDists, colDists, distType, lambda, minSolLen, maxSolLen) {
BAD_RETURN <- -2 # Return worse than cor could do
numSel <- sum(indices)
# Check if nothing is selected
if(numSel == 0)
return(BAD_RETURN)
# Check if we dont have enough selected
if(!is.null(minSolLen))
if(numSel < minSolLen)
return(BAD_RETURN)
# Check if we dont have too many selected
if(!is.null(maxSolLen))
if(numSel > maxSolLen)
return(BAD_RETURN)
edges <- which(indices==1)
# Combine the distance matrices based on distance type
if(distType == "gower"){
combinedSolDists <- Reduce("+", colDists[edges])/sum(indices)
}else if(distType == "euclidean"){
combinedSolDists <- sqrt(Reduce("+", colDists[edges]))
}
# Get the correlation and penalize it based on the number of columns selected
mycor <- stats::cor(combinedSolDists, covarDists)
mycor <- mycor - (lambda * (sum(indices)/length(indices)))
return(mycor)
}
gaCreation <- function(data, popSize){
ZERO_TO_ONE_RATIO <- 10 # Ratio of 0 to 1s for the random data
SUGGESTION_COUNT <- 10 # Number starting points we should make from the data
size <- ncol(data)
population <- matrix(NA, popSize, size)
# Make 10 starting points as long as our popSize is > 10
if(popSize >= SUGGESTION_COUNT){
# Get a rough starting point
rstart <- apply(data, 2, mean)
# Use the rough difference to make starting solutions
breaks <- seq(.05, 1, 1/SUGGESTION_COUNT)
breakVals <- stats::quantile(rstart, breaks)
suggestions <- matrix(0, length(breaks), length(rstart))
for(i in 1:length(breaks))
suggestions[i,] <- ifelse(rstart >= breakVals[i], 1, 0)
population[1:SUGGESTION_COUNT,] <- suggestions
numCreated <- SUGGESTION_COUNT
}else{
numCreated <- 0
}
# Fill any remaining population spots with random solutions
if(popSize != SUGGESTION_COUNT){
for(child in (numCreated+1):popSize)
population[child,] <- sample(c(rep(0, ZERO_TO_ONE_RATIO), 1), size, replace=TRUE)
}
return(population)
}
gaPlot <- function(evalSumm){
plot(evalSumm[,4], type="l", ylab="Score", ylim=c(0, 1), lwd=2, main="Eval Scores by Iteration", xlab="Iteration")
lines(evalSumm[,6], col="red", lwd=2)
lines(evalSumm[,1], col="blue", lwd=2)
legend("topleft", colnames(evalSumm)[c(4, 6, 1)], pch=16, col=c("black", "red", "blue"))
}
### ~~~~~~~~~~~~~~~~~~~~~
### rpart functions
### ~~~~~~~~~~~~~~~~~~~~~
rpartInit <- function(y, offset, parms, wt){
hmp.pkg.env$EVAL_COUNT_RPART <- 1 # reset eval counts
sfun <- function(yval, dev, wt, ylevel, digits){
paste(" mean=", round(mean(yval), 3), sep="")
}
environment(sfun) <- .GlobalEnv
list(y=y, parms=NULL, numresp=1, numy=ncol(y), summary=sfun)
}
rpartEval <- function(y, wt, parms){
# Set a unique label
label <- hmp.pkg.env$EVAL_COUNT_RPART
hmp.pkg.env$EVAL_COUNT_RPART <- hmp.pkg.env$EVAL_COUNT_RPART + 1
dev <- DM.MoM(y)$loglik * -1
# Skip any infinite LL comparisons (makes lrt 0)
if(dev == Inf || dev == -Inf)
dev <- 0
list(label=label, deviance=dev)
}
rpartSplit <- function(y, wt, x, parms, continuous){
# Get initial LL
LL <- DM.MoM(y)$loglik
uniqX <- sort(unique(x))
numUni <- length(uniqX) - 1
# Determine what we are comparing
if(continuous){
numTests <- length(x) - 1
dir <- rep(-1, numTests)
}else{
numTests <- numUni
dir <- uniqX
}
# Run through every comparison
LRT <- rep(0, numTests)
for(i in 1:numUni){
if(continuous){
id <- which(x <= uniqX[i])
grp1 <- y[id,, drop=FALSE]
grp2 <- y[-id,, drop=FALSE]
}else{
grp1 <- y[x == uniqX[i],, drop=FALSE]
grp2 <- y[x != uniqX[i],, drop=FALSE]
}
# Skip any 1 subject groups
if(nrow(grp1) == 1 || nrow(grp2) == 1)
next
LLgrp1 <- DM.MoM(grp1)$loglik
LLgrp2 <- DM.MoM(grp2)$loglik
# Skip any infinite LL comparisons (makes lrt 0)
if(LLgrp1 == Inf || LLgrp2 == Inf)
next
if(continuous){
LRT[id[length(id)]] <- -2*(LL-LLgrp1-LLgrp2)
}else{
LRT[i] <- -2*(LL-LLgrp1-LLgrp2)
}
}
ret <- list(goodness=LRT, direction=dir)
return(ret)
}
rpartCV <- function(data, covars, rpartRes, minsplit, minbucket, numCV, parallel, cores){
# Pull out cp info
cpTable <- rpartRes$cptable
numCPLvls <- nrow(cpTable)
# New cp for pruning
if(numCPLvls > 2){
oldCPs <- cpTable[,1]
cpTable[1, 1] <- Inf
cpTable[numCPLvls, 1] <- 0
for(m in 2:(numCPLvls-1))
cpTable[m, 1] <- sqrt(oldCPs[m] * oldCPs[m-1])
}
# Set up groups for dropping data
numSub <- nrow(data)
dropNums <- cut(1:numSub, numCV, FALSE)
dropGrps <- sample(dropNums, numSub)
errorRate <- vector("list", numCV)
if(parallel){
cl <- parallel::makeCluster(min(cores, numCV))
doParallel::registerDoParallel(cl)
tryCatch({
cvRes <- foreach::foreach(k=1:numCV, .combine=append, .multicombine=FALSE, .inorder=FALSE, .errorhandling="pass", .packages=c("rpart", "HMP")) %dopar%{
cvRes <- rpartCVSingle(data, covars, k, cpTable, dropGrps, numCPLvls, minsplit, minbucket)
return(list(cvRes))
}
}, finally = {
parallel::stopCluster(cl) # Close the parallel connections
}
)
errorRate <- lapply(cvRes, function(x)x[[1]])
subTree <- lapply(cvRes, function(x)x[[2]])
}else{
errorRate <- vector("list", numCV)
subTree <- vector("list", numCV)
for(k in 1:numCV){
cvRes <- rpartCVSingle(data, covars, k, cpTable, dropGrps, numCPLvls, minsplit, minbucket)
errorRate[[k]] <- cvRes[[1]]
subTree[[k]] <- cvRes[[2]]
}
}
# Calculate the square root of the errors
error <- sapply(errorRate, sqrt)
# Calculate CI of MSE
ciInfo <- matrix(NA, numCPLvls, 4)
for(j in 1:numCPLvls){
ciInfo[j, 1] <- mean(error[j,])
ciInfo[j, 2:3] <- rpartCI(error[j,], 0.95)
ciInfo[j, 4] <- sd(error[j,])/sqrt(ncol(error))
}
ciInfo <- cbind(ciInfo, rank(ciInfo[,1]))
colnames(ciInfo) <- c("MSE", "Lower", "Upper", "SE", "Rank")
# Add ci info back into cp table
cpTable2 <- cbind(rpartRes$cptable, ciInfo)
return(list(subTree=subTree, errorRate=do.call("cbind", errorRate), ciInfo=cpTable2))
}
rpartCVSingle <- function(data, covars, cvNum, cpTable, dropGrps, numCPLvls, minsplit, minbucket){
# Get location of data to drop
dropLoc <- which(dropGrps == cvNum)
# Seperate dropped data
subData <- data[-dropLoc,, drop=FALSE]
dropData <- data[dropLoc,, drop=FALSE]
# Seperate dropped covars
subCovs <- covars[-dropLoc,, drop=FALSE]
dropCovs <- covars[dropLoc,, drop=FALSE]
# Run rpart on smaller data
subRpartRes <- DM.Rpart.Base(subData, subCovs, FALSE, minsplit, minbucket)$fullTree
# Need to be abandance for later code
subData <- subData/(rowSums(subData))
dropData <- dropData/(rowSums(dropData))
# Calculate relative error
MSEn <- rep(NA, numCPLvls)
subTree <- vector("list", numCPLvls)
for(i in 1:numCPLvls){
subTree[[i]] <- rpart::prune(subRpartRes, cp=cpTable[i, 1])
pruneSubTree <- subTree[[i]]
subPre <- predict(pruneSubTree, newdata=subCovs, type="vector")
dropPre <- predict(pruneSubTree, newdata=dropCovs, type="vector")
## 1.a. Distance: new subject to the mean Taxa in the signed Terminal node
tempDist <- 0
for(j in 1:length(dropPre)){
tempVal <- (dist(rbind(dropData[j,, drop=FALSE], colMeans(subData[subPre == dropPre[j],, drop=FALSE]))))^2
if(!is.na(tempVal))
tempDist <- tempDist + tempVal
}
MSEn[i] <- tempDist/length(dropPre)
}
names(MSEn) <- cpTable[,2] + 1
return(list(errorRate=MSEn, subTree=subTree))
}
rpartCI <- function(vector, interval) {
vec_sd <- sd(vector)
numSamp <- length(vector)
vec_mean <- mean(vector)
# Error according to t distribution
error <- qt((interval + 1)/2, df = numSamp - 1) * vec_sd/sqrt(numSamp)
# Confidence interval as a vector
res <- c("Lower"=vec_mean - error, "Upper"=vec_mean + error)
return(res)
}
rpartCS <- function(fit) {
# Pull out split information
splitNames <- rownames(fit$splits)
allVars <- colnames(attributes(fit$terms)$factors)
# Rename splits to fit into data frame
rownames(fit$splits) <- 1:nrow(fit$splits)
splits <- data.frame(fit$splits)
splits$var <- splitNames
splits$type <- ""
splits$primary <- ""
# Get the frame
frame <- as.data.frame(fit$frame)
frame$var <- as.character(frame$var)
primeFr <- frame[frame$var != "<leaf>",]
# Go through every node and check competing and surrogaet splits
index <- 0
for(i in 1:nrow(primeFr)){
spltPrimName <- paste("Split", primeFr$yval[i], primeFr$var[i])
# Fill in primary info
index <- index + 1
splits$type[index] <- "primary"
splits$primary[index] <- spltPrimName
# Check for competing splits
if(primeFr$ncompete[i] > 0){
for(j in 1:primeFr$ncompete[i]){
index <- index + 1
splits$type[index] <- "competing"
splits$primary[index] <- spltPrimName
}
}
# Check for surrogate splits
if(primeFr$nsurrogate[i] > 0){
for(j in 1:primeFr$nsurrogate[i]){
index <- index + 1
splits$type[index] <- "surrogate"
splits$primary[index] <- spltPrimName
}
}
}
return(splits)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Stat functions
### ~~~~~~~~~~~~~~~~~~~~~
Xmcupo.statistics <- function(groupParameter){
numGroups <- length(groupParameter)
numTaxa <- length(groupParameter[[1]]$pi)
# Pull out pi and calculate xsc
pis <- matrix(0, numTaxa, numGroups)
xscs <- rep(0, numGroups)
for(i in 1:numGroups){
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
totalReads <- sum(numReads)
# Calculate the Xsc for each group
xscs[i] <- (theta * (sum(numReads^2)-totalReads) + totalReads) / totalReads^2
pis[,i] <- groupParameter[[i]]$pi
}
# Remove any 0 taxa
pis <- pis[rowSums(pis)!=0,]
# Calculate pi0
pi0 <- colSums(t(pis)/xscs)/sum(1/xscs)
# Calculate Xmcupo
Xmcupo <- sum(colSums((pis-pi0)^2/pi0)/xscs)
return(Xmcupo)
}
Z.statistics <- function(data){
numTaxa <- ncol(data)
numReadsTaxa <- colSums(data)
numReadsSubs <- rowSums(data)
totalReads <- sum(data)
taxaSqSum <- sum(apply(data, 2, function(x){sum((x-1)*x)})/numReadsTaxa)
subSqSum <- sum(numReadsSubs*(numReadsSubs-1))
denom <- sqrt(2*(numTaxa-1) * subSqSum)
Zs <- (totalReads*taxaSqSum-subSqSum)/denom
return(Zs)
}
T.statistics <- function(data){
numReadsTaxa <- colSums(data)
numReadsSubs <- rowSums(data)
totalReads <- sum(data)
Ts <- sum(colSums((data - (numReadsSubs%*%t(numReadsTaxa))/totalReads)^2) / numReadsTaxa)
return(Ts)
}
Xmc.statistics <- function(groupParameter, pi0){
numGroups <- length(groupParameter)
numTaxa <- length(pi0)
xsc <- rep(0, numGroups)
for(i in 1:numGroups){
pi <- groupParameter[[i]]$pi
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
# Get Xsc values
xsc[i] <- Xsc.statistics(pi, theta, numReads, pi0)
}
xmc <- sum(xsc)
return(xmc)
}
Xsc.statistics <- function(pi1, theta, numReads, pi0){
totalReads <- sum(numReads)
# Get Xsc value
tempVal <- ((theta*(sum(numReads^2)-totalReads) + totalReads) / totalReads^2) * (diag(pi0)-pi0 %*% t(pi0))
xsc <- as.vector(t(pi1-pi0) %*% MASS::ginv(tempVal) %*% (pi1-pi0))
return(xsc)
}
Xoc.statistics <- function(group.data, epsilon=10^(-4)){
numGroups <- length(group.data)
# Get the fit for every data set
thetas <- rep(0, numGroups)
logliks <- rep(0, numGroups)
pis <- vector("list", numGroups)
for(i in 1:numGroups){
tempTheta <- DM.MoM(group.data[[i]])$theta
fit <- dirmult::dirmult(group.data[[i]], initscalar=(1-tempTheta)/tempTheta, epsilon=epsilon, trace=FALSE)
thetas[i] <- fit$theta
logliks[i] <- fit$loglik
pis[[i]] <- fit$pi
}
# Get the fit assuming equal thetas
equalFit <- dirmult::equalTheta(group.data, mean(thetas), epsilon, FALSE, pis)
# Calculate the xoc
xoc <- as.vector(-2*(equalFit$loglik-sum(logliks)))
return(xoc)
}
Xdc.statistics <- function(group.data, epsilon=10^(-4)){
# Get the loglik from the fit from every data set
logliks <- sapply(group.data, function(x, epsilon){
tempTheta <- DM.MoM(x)$theta
dirmult::dirmult(x, initscalar=(1-tempTheta)/tempTheta, epsilon=epsilon, trace=FALSE)$loglik
}, epsilon=epsilon)
# Get the fit assuming all in the same group
groupDataC <- do.call(rbind, group.data)
tempTheta <- DM.MoM(groupDataC)$theta
groupFit <- dirmult::dirmult(groupDataC, initscalar=(1-tempTheta)/tempTheta, epsilon=epsilon, trace=FALSE)
# Calculate the xdc
xdc <- -2*(groupFit$loglik-sum(logliks))
return(xdc)
}
Xdc.statistics.MoM <- function(group.data){
# Get the loglik from the fit from every data set
logliks <- sapply(group.data, function(x){DM.MoM(x)$loglik})
# Get the fit assuming all in the same group
groupDataC <- do.call(rbind, group.data)
groupFit <- DM.MoM(groupDataC)
# Calculate the xdc
xdc <- -2*(groupFit$loglik-sum(logliks))
return(xdc)
}
### ~~~~~~~~~~~~~~~~~~~~~
### Hnull / Ha functions
### ~~~~~~~~~~~~~~~~~~~~~
Xmcupo.statistics.Hnull.Ha <- function(groupParameter){
numGroups <- length(groupParameter)
numTaxa <- length(groupParameter[[1]]$pi)
genGroupParameter <- vector("list", numGroups)
for(i in 1:numGroups){
pi <- groupParameter[[i]]$pi
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
# Generate a new set of data
genData <- Dirichlet.multinomial(numReads, pi*(1-theta)/theta)
genTotalReads <- sum(genData)
genPi <- colSums(genData)/genTotalReads
# Replace any 0 pi values with a small number
# This will subtract that value from the other data so a total value of 1 is maintained
if(any(genPi==0)){
numZero <- sum(genPi==0)
numNonZero <- numTaxa - numZero
genPi[which(genPi!=0)] <- genPi[which(genPi!=0)] - numZero/(numNonZero*2*(genTotalReads+1))
genPi[which(genPi==0)] <- 1/(2*(genTotalReads+1))
}
genTheta <- weirMoM(genData, genPi)$theta
genGroupParameter[[i]] <- list(pi=genPi, theta=genTheta, nrs=numReads)
}
# Get the Xmcupo stats for the generated data
Xmcupo <- Xmcupo.statistics(genGroupParameter)
return(Xmcupo)
}
ZT.statistics.Hnull.Ha <- function(Nrs, fit, type){
if(tolower(type) == "hnull"){
genData <- Multinomial(Nrs, fit$pi)
}else{
genData <- Dirichlet.multinomial(Nrs, fit$gamma)
}
ZT <- c(Z.statistics(genData), T.statistics(genData))
return(ZT)
}
Xmc.statistics.Hnull.Ha <- function(groupParameter, pi0){
numGroups <- length(groupParameter)
numTaxa <- length(pi0)
genGroupParameter <- vector("list", numGroups)
for(i in 1:numGroups){
pi <- groupParameter[[i]]$pi
theta <- groupParameter[[i]]$theta
numReads <- groupParameter[[i]]$nrs
# Generate a new set of data
genData <- Dirichlet.multinomial(numReads, pi*(1-theta)/theta)
genPi <- colSums(genData)/sum(genData)
genTheta <- weirMoM(genData, genPi)$theta
genGroupParameter[[i]] <- list(pi=genPi, theta=genTheta, nrs=numReads)
}
# Get the Xmc stats for the generated data
Xmc <- Xmc.statistics(genGroupParameter, pi0)
return(Xmc)
}
Xsc.statistics.Hnull.Ha <- function(Nrs, fit, type, pi0){
# Generate a new set of data
genData <- Dirichlet.multinomial(Nrs, fit$gamma)
fit.gen <- DM.MoM(genData)
tempPi <- fit$pi
if(tolower(type) == "ha")
tempPi <- pi0
# Calculate Xsc stat
xsc <- Xsc.statistics(fit.gen$pi, fit.gen$theta, Nrs, tempPi)
return(xsc)
}
Xoc.statistics.Hnull.Ha <- function(group.Nrs, group.alphap, type){
numGroups <- length(group.Nrs)
tempShape <- group.alphap
# Generate a new set of data
genGroupData <- vector("list", numGroups)
for(i in 1:numGroups){
if(tolower(type) == "ha")
tempShape <- group.alphap[i,]
genGroupData[[i]] <- Dirichlet.multinomial(group.Nrs[[i]], tempShape)
}
# Get the xoc stats for the generated data
xoc <- Xoc.statistics(genGroupData)
return(xoc)
}
Xdc.statistics.Hnull.Ha <- function(alphap, group.Nrs, type, est){
numGroups <- length(group.Nrs)
tempShape <- alphap
# Generate a new set of data
genGroupData <- vector("list", numGroups)
for(i in 1:numGroups){
if(tolower(type) == "ha")
tempShape <- alphap[i,]
genGroupData[[i]] <- Dirichlet.multinomial(group.Nrs[[i]], tempShape)
}
# Get the xdc stats for the generated data
if(tolower(est) == "mle"){
xdc <- Xdc.statistics(genGroupData)
}else{
xdc <- Xdc.statistics.MoM(genGroupData)
}
return(xdc)
}
### ~~~~~~~~~~~~~~~~~~~~~
### OLD
### ~~~~~~~~~~~~~~~~~~~~~
DM.Rpart.Base.Old <- function(data, covars, plot=TRUE, minsplit=1, minbucket=1, cp=0){
if(missing(data) || missing(covars))
stop("data and/or covars are missing.")
# Set the methods to use and call rpart
methods <- list(init=rpartInit, eval=rpartEval, split=rpartSplitOld)
rpartRes <- rpart::rpart(as.matrix(data) ~., data=covars, method=methods, minsplit=minsplit, minbucket=minbucket, cp=cp)
cpInfo <- rpartRes$cptable
size <- cpInfo[nrow(cpInfo), 2] + 1
# Get split info from best tree
splits <- NULL
if(size > 1)
splits <- rpartCS(rpartRes)
# Plot the rpart results
if(plot)
suppressWarnings(rpart.plot::rpart.plot(rpartRes, type=2, extra=101, box.palette=NA, branch.lty=3, shadow.col="gray", nn=FALSE))
return(list(cpTable=cpInfo, fullTree=rpartRes, bestTree=rpartRes, subTree=NULL, errorRate=NULL, size=size, splits=splits))
}
rpartSplitOld <- function(y, wt, x, parms, continuous){
# Get initial LL
LL <- DM.MoM(y)$loglik
# Determine what we are comparing
if(continuous){
numTests <- length(x) - 1
dir <- rep(-1, numTests)
}else{
uniqX <- sort(unique(x))
numTests <- length(uniqX) - 1
dir <- uniqX
}
# Run through every comparison
LRT <- rep(0, numTests)
for(i in 1:numTests){
if(continuous){
grp1 <- y[1:i,, drop=FALSE]
grp2 <- y[-c(1:i),, drop=FALSE]
}else{
grp1 <- y[x == uniqX[i],, drop=FALSE]
grp2 <- y[x != uniqX[i],, drop=FALSE]
}
# Skip any 1 subject groups
if(nrow(grp1) == 1 || nrow(grp2) == 1)
next
LLgrp1 <- DM.MoM(grp1)$loglik
LLgrp2 <- DM.MoM(grp2)$loglik
# Skip any infinite LL comparisons (makes lrt 0)
if(LLgrp1 == Inf || LLgrp2 == Inf)
next
LRT[i] <- -2*(LL-LLgrp1-LLgrp2)
}
ret <- list(goodness=LRT, direction=dir)
return(ret)
}
|
context("get_ag_bulletin")
# Test that get_ag_bulletin returns a data frame with 29 columns ---------------
test_that("get_ag_bulletin returns 29 columns", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "QLD")
expect_equal(ncol(bom_bulletin), 29)
expect_named(
bom_bulletin,
c(
"product_id",
"state",
"dist",
"wmo",
"site",
"station",
"full_name",
"obs_time_local",
"obs_time_utc",
"time_zone",
"lat",
"lon",
"elev",
"bar_ht",
"start",
"end",
"r",
"tn",
"tx",
"twd",
"ev",
"tg",
"sn",
"t5",
"t10",
"t20",
"t50",
"t1m",
"wr"
)
)
})
# Test that get_ag_bulletin returns the requested state bulletin ---------------
test_that("get_ag_bulletin returns the bulletin for ACT/NSW", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "NSW")
expect_equal(bom_bulletin[["state"]][1], "NSW")
})
test_that("get_ag_bulletin returns the bulletin for NT", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "NT")
expect_equal(bom_bulletin[["state"]][1], "NT")
})
test_that("get_ag_bulletin returns the bulletin for QLD", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "QLD")
expect_equal(bom_bulletin[["state"]][1], "QLD")
})
test_that("get_ag_bulletin returns the bulletin for SA", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "SA")
expect_equal(bom_bulletin[["state"]][1], "SA")
})
test_that("get_ag_bulletin returns the bulletin for TAS", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "TAS")
expect_equal(bom_bulletin[["state"]][1], "TAS")
})
test_that("get_ag_bulletin returns the bulletin for VIC", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "VIC")
expect_equal(bom_bulletin[["state"]][1], "VIC")
})
test_that("get_ag_bulletin returns the bulletin for WA", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "WA")
expect_equal(bom_bulletin[["state"]][1], "WA")
})
test_that("get_ag_bulletin returns the bulletin for AUS", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "AUS")
state <- na.omit(bom_bulletin[["state"]])
expect_equal(length(unique(state)), 7)
})
# Test that .validate_state stops if the state recognised ----------------------
test_that("get_ag_bulletin() stops if the state is recognised", {
skip_on_cran()
state <- "Kansas"
expect_error(get_ag_bulletin(state))
})
| /tests/testthat/test-get_ag_bulletin.R | permissive | graceli8/bomrang | R | false | false | 2,516 | r | context("get_ag_bulletin")
# Test that get_ag_bulletin returns a data frame with 29 columns ---------------
test_that("get_ag_bulletin returns 29 columns", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "QLD")
expect_equal(ncol(bom_bulletin), 29)
expect_named(
bom_bulletin,
c(
"product_id",
"state",
"dist",
"wmo",
"site",
"station",
"full_name",
"obs_time_local",
"obs_time_utc",
"time_zone",
"lat",
"lon",
"elev",
"bar_ht",
"start",
"end",
"r",
"tn",
"tx",
"twd",
"ev",
"tg",
"sn",
"t5",
"t10",
"t20",
"t50",
"t1m",
"wr"
)
)
})
# Test that get_ag_bulletin returns the requested state bulletin ---------------
test_that("get_ag_bulletin returns the bulletin for ACT/NSW", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "NSW")
expect_equal(bom_bulletin[["state"]][1], "NSW")
})
test_that("get_ag_bulletin returns the bulletin for NT", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "NT")
expect_equal(bom_bulletin[["state"]][1], "NT")
})
test_that("get_ag_bulletin returns the bulletin for QLD", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "QLD")
expect_equal(bom_bulletin[["state"]][1], "QLD")
})
test_that("get_ag_bulletin returns the bulletin for SA", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "SA")
expect_equal(bom_bulletin[["state"]][1], "SA")
})
test_that("get_ag_bulletin returns the bulletin for TAS", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "TAS")
expect_equal(bom_bulletin[["state"]][1], "TAS")
})
test_that("get_ag_bulletin returns the bulletin for VIC", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "VIC")
expect_equal(bom_bulletin[["state"]][1], "VIC")
})
test_that("get_ag_bulletin returns the bulletin for WA", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "WA")
expect_equal(bom_bulletin[["state"]][1], "WA")
})
test_that("get_ag_bulletin returns the bulletin for AUS", {
skip_on_cran()
bom_bulletin <- get_ag_bulletin(state = "AUS")
state <- na.omit(bom_bulletin[["state"]])
expect_equal(length(unique(state)), 7)
})
# Test that .validate_state stops if the state recognised ----------------------
test_that("get_ag_bulletin() stops if the state is recognised", {
skip_on_cran()
state <- "Kansas"
expect_error(get_ag_bulletin(state))
})
|
# predictions
library(tibble)
model <- readRDS("models/stack_pls_c50_rngr_pval_dv.Rds")
test <- test.dv[, names(pval.dv)]
pred.test <- predict(model, newdata = test, na.action = na.pass)
test.submit <- data_frame(USER_ID = testing$USER_ID, Predictions = pred.test)
mod <- str_replace_all(as.character(Sys.time()), "-|:| ", "")
write_csv(test.submit, paste0("submission_", mod, ".csv"))
| /3-test_predictions.R | no_license | laiw/kaggle_ae_2016 | R | false | false | 390 | r | # predictions
library(tibble)
model <- readRDS("models/stack_pls_c50_rngr_pval_dv.Rds")
test <- test.dv[, names(pval.dv)]
pred.test <- predict(model, newdata = test, na.action = na.pass)
test.submit <- data_frame(USER_ID = testing$USER_ID, Predictions = pred.test)
mod <- str_replace_all(as.character(Sys.time()), "-|:| ", "")
write_csv(test.submit, paste0("submission_", mod, ".csv"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.