content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#' Validity first conformal prediction for Conformal Quantile Regression
#'
#' @param x A N*d training matrix
#' @param y A N*1 training vector
#' @param split a number between 0 and 1
#' @param params_grid a grid of mtry and ntree
#' @param alpha miscoverage level
#' @return average prediction width and a function for coverage on some testing points
#' @export
vfcp_cqr = function(x,y,split,params_grid,alpha=0.1){
stopifnot(length(split) == 2)
y_data <- as.vector(y)
x_data <- as.matrix(x)
N <- length(y_data)
n1 <- ceiling(N*split[1])
n2 <- ceiling({N - n1}*split[2])
n3 <- N - n1 - n2
I1 <- sample.int(N, n1, replace = FALSE)
I2 <- sample.int(N-n1, n2, replace = FALSE)
X1 <- x_data[I1,]
X1 = as.matrix(X1)
Y1 <- y_data[I1]
Xtmp <- x_data[-I1,]
Xtmp <- as.matrix(Xtmp)
Ytmp <- y_data[-I1]
X2 <- Xtmp[I2,]
X2 = as.matrix(X2)
Y2 <- Ytmp[I2]
X3 <- Xtmp[-I2,]
X3 = as.matrix(X3)
Y3 <- Ytmp[-I2]
num_params <- nrow(params_grid)
dim_params <- ncol(params_grid)
opt_width <- diff(range(y_data))
for(idx in 1:num_params){
params = params_grid[idx,]
ret_mtry_ntree <- conf_CQR_prelim(X1, Y1, X2, Y2, beta_grid = beta_grid, mtry = params[1], ntree = params[2], alpha = alpha)
if(ret_mtry_ntree$width <= opt_width){
ret <- ret_mtry_ntree
opt_width <- ret_mtry_ntree$width
}
}
forest <- ret$forest
beta <- ret$beta
cqr_method <- ret$cqr_method
predictions_D3 <- predict(forest, newdata = X3, what = c(beta, 1/2, 1 - beta))
if(cqr_method == "CQR"){
ret$opt_threshold <- quantile(pmax(predictions_D3[,1] - Y3, Y3 - predictions_D3[,3]), probs = (1-alpha)*(1+1/n3))
ret$width <- 2*ret$opt_threshold + ret$width_beta
} else if(cqr_method == "CQR-m"){
low <- (predictions_D3[,1] - Y3)/(abs(predictions_D3[,2] - predictions_D3[,1]) + 1e-08)
up <- (Y3 - predictions_D3[,3])/(abs(predictions_D3[,3] - predictions_D3[,2]) + 1e-08)
ret$opt_threshold <- quantile(pmax(low, up), probs = (1-alpha)*(1 + 1/n3))
ret$width <- (1 + ret$opt_threshold)*ret$width_beta
} else if(cqr_method == "CQR-r"){
denom <- abs(predictions_D3[,3] - predictions_D3[,1]) + 1e-08
low <- (predictions_D3[,1] - Y3)/denom
up <- (Y3 - predictions_D3[,3])/denom
ret$opt_threshold <- quantile(pmax(low, up), probs = (1-alpha)*(1 + 1/n3))
ret$width <- (1 + 2*ret$opt_threshold)*ret$width_beta
}
if(ret$cqr_method == "CQR"){
forest <- ret$forest
beta <- ret$beta
quant <- ret$opt_threshold
pred_set_verify <- function(xx, yy){
qhat_final <- predict(forest, newdata = xx, what = c(beta, 1 - beta))
return(pmax(qhat_final[,1] - yy, yy - qhat_final[,2]) <= quant)
}
ret$pred_set <- pred_set_verify
} else if(ret$cqr_method == "CQR-m"){
forest <- ret$forest
beta <- ret$beta
quant <- ret$opt_threshold
pred_set_verify <- function(xx, yy){
qhat_final <- predict(forest, newdata = xx, what = c(beta, 1/2, 1 - beta))
low <- (qhat_final[,1] - yy)/(abs(qhat_final[,2] - qhat_final[,1]) + 1e-08)
up <- (yy - qhat_final[,3])/(abs(qhat_final[,3] - qhat_final[,2]) + 1e-08)
return(pmax(low, up) <= quant)
}
ret$pred_set <- pred_set_verify
} else if(ret$cqr_method == "CQR-r"){
forest <- ret$forest
beta <- ret$beta
quant <- ret$opt_threshold
pred_set_verify <- function(xx, yy){
qhat_final <- predict(forest, newdata = xx, what = c(beta, 1 - beta))
low <- (qhat_final[,1] - yy)/(abs(qhat_final[,2] - qhat_final[,1]) + 1e-08)
up <- (yy - qhat_final[,2])/(abs(qhat_final[,2] - qhat_final[,1]) + 1e-08)
return(pmax(low, up) <= quant)
}
ret$pred_set <- pred_set_verify
}
ret$method <- method
return(ret)
}
|
/R/vfcp_cqr.R
|
no_license
|
Elsa-Yang98/ConformalSmallest
|
R
| false
| false
| 3,731
|
r
|
#' Validity first conformal prediction for Conformal Quantile Regression
#'
#' @param x A N*d training matrix
#' @param y A N*1 training vector
#' @param split a number between 0 and 1
#' @param params_grid a grid of mtry and ntree
#' @param alpha miscoverage level
#' @return average prediction width and a function for coverage on some testing points
#' @export
vfcp_cqr = function(x,y,split,params_grid,alpha=0.1){
stopifnot(length(split) == 2)
y_data <- as.vector(y)
x_data <- as.matrix(x)
N <- length(y_data)
n1 <- ceiling(N*split[1])
n2 <- ceiling({N - n1}*split[2])
n3 <- N - n1 - n2
I1 <- sample.int(N, n1, replace = FALSE)
I2 <- sample.int(N-n1, n2, replace = FALSE)
X1 <- x_data[I1,]
X1 = as.matrix(X1)
Y1 <- y_data[I1]
Xtmp <- x_data[-I1,]
Xtmp <- as.matrix(Xtmp)
Ytmp <- y_data[-I1]
X2 <- Xtmp[I2,]
X2 = as.matrix(X2)
Y2 <- Ytmp[I2]
X3 <- Xtmp[-I2,]
X3 = as.matrix(X3)
Y3 <- Ytmp[-I2]
num_params <- nrow(params_grid)
dim_params <- ncol(params_grid)
opt_width <- diff(range(y_data))
for(idx in 1:num_params){
params = params_grid[idx,]
ret_mtry_ntree <- conf_CQR_prelim(X1, Y1, X2, Y2, beta_grid = beta_grid, mtry = params[1], ntree = params[2], alpha = alpha)
if(ret_mtry_ntree$width <= opt_width){
ret <- ret_mtry_ntree
opt_width <- ret_mtry_ntree$width
}
}
forest <- ret$forest
beta <- ret$beta
cqr_method <- ret$cqr_method
predictions_D3 <- predict(forest, newdata = X3, what = c(beta, 1/2, 1 - beta))
if(cqr_method == "CQR"){
ret$opt_threshold <- quantile(pmax(predictions_D3[,1] - Y3, Y3 - predictions_D3[,3]), probs = (1-alpha)*(1+1/n3))
ret$width <- 2*ret$opt_threshold + ret$width_beta
} else if(cqr_method == "CQR-m"){
low <- (predictions_D3[,1] - Y3)/(abs(predictions_D3[,2] - predictions_D3[,1]) + 1e-08)
up <- (Y3 - predictions_D3[,3])/(abs(predictions_D3[,3] - predictions_D3[,2]) + 1e-08)
ret$opt_threshold <- quantile(pmax(low, up), probs = (1-alpha)*(1 + 1/n3))
ret$width <- (1 + ret$opt_threshold)*ret$width_beta
} else if(cqr_method == "CQR-r"){
denom <- abs(predictions_D3[,3] - predictions_D3[,1]) + 1e-08
low <- (predictions_D3[,1] - Y3)/denom
up <- (Y3 - predictions_D3[,3])/denom
ret$opt_threshold <- quantile(pmax(low, up), probs = (1-alpha)*(1 + 1/n3))
ret$width <- (1 + 2*ret$opt_threshold)*ret$width_beta
}
if(ret$cqr_method == "CQR"){
forest <- ret$forest
beta <- ret$beta
quant <- ret$opt_threshold
pred_set_verify <- function(xx, yy){
qhat_final <- predict(forest, newdata = xx, what = c(beta, 1 - beta))
return(pmax(qhat_final[,1] - yy, yy - qhat_final[,2]) <= quant)
}
ret$pred_set <- pred_set_verify
} else if(ret$cqr_method == "CQR-m"){
forest <- ret$forest
beta <- ret$beta
quant <- ret$opt_threshold
pred_set_verify <- function(xx, yy){
qhat_final <- predict(forest, newdata = xx, what = c(beta, 1/2, 1 - beta))
low <- (qhat_final[,1] - yy)/(abs(qhat_final[,2] - qhat_final[,1]) + 1e-08)
up <- (yy - qhat_final[,3])/(abs(qhat_final[,3] - qhat_final[,2]) + 1e-08)
return(pmax(low, up) <= quant)
}
ret$pred_set <- pred_set_verify
} else if(ret$cqr_method == "CQR-r"){
forest <- ret$forest
beta <- ret$beta
quant <- ret$opt_threshold
pred_set_verify <- function(xx, yy){
qhat_final <- predict(forest, newdata = xx, what = c(beta, 1 - beta))
low <- (qhat_final[,1] - yy)/(abs(qhat_final[,2] - qhat_final[,1]) + 1e-08)
up <- (yy - qhat_final[,2])/(abs(qhat_final[,2] - qhat_final[,1]) + 1e-08)
return(pmax(low, up) <= quant)
}
ret$pred_set <- pred_set_verify
}
ret$method <- method
return(ret)
}
|
#' Create a negative binomial distribution
#'
#' A generalization of the geometric distribution. It is the number
#' of failures in a sequence of i.i.d. Bernoulli trials before
#' a specified target number (\eqn{r}) of successes occurs.
#'
#'
#' @param size The target number of successes (greater than \eqn{0})
#' until the experiment is stopped. Denoted \eqn{r} below.
#' @param p The success probability for a given trial. `p` can be any
#' value in `[0, 1]`, and defaults to `0.5`.
#' @param mu Alternative parameterization via the non-negative mean
#' of the distribution (instead of the probability `p`), defaults to `size`.
#'
#' @return A `NegativeBinomial` object.
#' @export
#'
#' @family discrete distributions
#'
#' @details
#'
#' We recommend reading this documentation on
#' <https://alexpghayes.github.io/distributions3/>, where the math
#' will render with additional detail and much greater clarity.
#'
#' In the following, let \eqn{X} be a negative binomial random variable with
#' success probability `p` = \eqn{p}.
#'
#' **Support**: \eqn{\{0, 1, 2, 3, ...\}}
#'
#' **Mean**: \eqn{\frac{(1 - p) r}{p} = \mu}
#'
#' **Variance**: \eqn{\frac{(1 - p) r}{p^2}}
#'
#' **Probability mass function (p.m.f.)**:
#'
#' \deqn{
#' f(k) = {k + r - 1 \choose k} \cdot p^r (1-p)^k
#' }{
#' f(k) = (k+r-1)!/(k!(r-1)!) p^r (1-p)^k
#' }
#'
#' **Cumulative distribution function (c.d.f.)**:
#'
#' Omitted for now.
#'
#' **Moment generating function (m.g.f.)**:
#'
#' \deqn{
#' \left(\frac{p}{1 - (1 -p) e^t}\right)^r, t < -\log (1-p)
#' }{
#' \frac{p^r}{(1 - (1-p) e^t)^r}, t < -\log (1-p)
#' }
#'
#' **Alternative parameterization**: Sometimes, especially when used in
#' regression models, the negative binomial distribution is parameterized
#' by its mean \eqn{\mu} (as listed above) plus the size parameter \eqn{r}.
#' This implies a success probability of \eqn{p = r/(r + \mu)}. This can
#' also be seen as a generalization of the Poisson distribution where the
#' assumption of equidispersion (i.e., variance equal to mean) is relaxed.
#' The negative binomial distribution is overdispersed (i.e., variance greater than mean)
#' and its variance can also be written as \eqn{\mu + 1/r \mu^2}. The Poisson
#' distribution is then obtained as \eqn{r} goes to infinity. Note that in this
#' view it is natural to also allow for non-integer \eqn{r} parameters.
#' The factorials in the equations above are then expressed in terms of the
#' gamma function.
#'
#' @examples
#'
#' set.seed(27)
#'
#' X <- NegativeBinomial(size = 5, p = 0.1)
#' X
#'
#' random(X, 10)
#'
#' pdf(X, 50)
#' log_pdf(X, 50)
#'
#' cdf(X, 50)
#' quantile(X, 0.7)
#'
#' ## alternative parameterization of X
#' Y <- NegativeBinomial(mu = 45, size = 5)
#' Y
#' cdf(Y, 50)
#' quantile(Y, 0.7)
NegativeBinomial <- function(size, p = 0.5, mu = size) {
if(!missing(mu) && !missing(p)) stop("only one of the parameters 'p' or 'mu' must be specified")
if(missing(mu)) {
stopifnot("parameter 'size' must always be positive" = all(size > 0))
stopifnot("parameter 'p' must always be in [0, 1]" = all(p >= 0 & p <= 1))
stopifnot(
"parameter lengths do not match (only scalars are allowed to be recycled)" =
length(size) == length(p) | length(size) == 1L | length(p) == 1L
)
d <- data.frame(size = size, p = p)
} else {
stopifnot("parameter 'mu' must always be non-negative" = all(mu >= 0))
stopifnot("parameter 'size' must always be positive" = all(size > 0))
stopifnot(
"parameter lengths do not match (only scalars are allowed to be recycled)" =
length(size) == length(mu) | length(size) == 1L | length(mu) == 1L
)
d <- data.frame(mu = mu, size = size)
}
class(d) <- c("NegativeBinomial", "distribution")
d
}
#' @export
mean.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
rval <- if("mu" %in% names(unclass(x))) {
x$mu
} else {
x$size * (1 - x$p) / x$p
}
setNames(rval, names(x))
}
#' @export
variance.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
rval <- if("mu" %in% names(unclass(x))) {
x$mu + 1/x$size * x$mu^2
} else {
x$size * (1 - x$p)/ x$p^2
}
setNames(rval, names(x))
}
#' @export
skewness.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
if("mu" %in% names(unclass(x))) x$p <- x$size/(x$size + x$mu)
rval <- (2 - x$p) / sqrt((1 - x$p) * x$size)
setNames(rval, names(x))
}
#' @export
kurtosis.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
if("mu" %in% names(unclass(x))) x$p <- x$size/(x$size + x$mu)
rval <- 6 / x$size + x$p^2 / x$size * (1 - x$p)
setNames(rval, names(x))
}
#' Draw a random sample from a negative binomial distribution
#'
#' @inherit NegativeBinomial examples
#'
#' @param x A `NegativeBinomial` object created by a call to
#' [NegativeBinomial()].
#' @param n The number of samples to draw. Defaults to `1L`.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param ... Unused. Unevaluated arguments will generate a warning to
#' catch mispellings or other possible errors.
#'
#' @family NegativeBinomial distribution
#'
#' @return In case of a single distribution object or `n = 1`, either a numeric
#' vector of length `n` (if `drop = TRUE`, default) or a `matrix` with `n` columns
#' (if `drop = FALSE`).
#' @export
#'
random.NegativeBinomial <- function(x, n = 1L, drop = TRUE, ...) {
n <- make_positive_integer(n)
if (n == 0L) {
return(numeric(0L))
}
FUN <- if("mu" %in% names(unclass(x))) {
function(at, d) rnbinom(n = at, mu = d$mu, size = d$size)
} else {
function(at, d) rnbinom(n = at, size = d$size, prob = d$p)
}
apply_dpqr(d = x, FUN = FUN, at = n, type = "random", drop = drop)
}
#' Evaluate the probability mass function of a NegativeBinomial distribution
#'
#' @inherit NegativeBinomial examples
#'
#' @param d A `NegativeBinomial` object created by a call to
#' [NegativeBinomial()].
#' @param x A vector of elements whose probabilities you would like to
#' determine given the distribution `d`.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param elementwise logical. Should each distribution in \code{d} be evaluated
#' at all elements of \code{x} (\code{elementwise = FALSE}, yielding a matrix)?
#' Or, if \code{d} and \code{x} have the same length, should the evaluation be
#' done element by element (\code{elementwise = TRUE}, yielding a vector)? The
#' default of \code{NULL} means that \code{elementwise = TRUE} is used if the
#' lengths match and otherwise \code{elementwise = FALSE} is used.
#' @param ... Arguments to be passed to \code{\link[stats]{dnbinom}}.
#' Unevaluated arguments will generate a warning to catch mispellings or other
#' possible errors.
#'
#' @family NegativeBinomial distribution
#'
#' @return In case of a single distribution object, either a numeric
#' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with
#' `length(x)` columns (if `drop = FALSE`). In case of a vectorized distribution
#' object, a matrix with `length(x)` columns containing all possible combinations.
#' @export
#'
pdf.NegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(d))) {
function(at, d) dnbinom(x = at, mu = d$mu, size = d$size, ...)
} else {
function(at, d) dnbinom(x = at, size = d$size, prob = d$p, ...)
}
apply_dpqr(d = d, FUN = FUN, at = x, type = "density", drop = drop, elementwise = elementwise)
}
#' @rdname pdf.NegativeBinomial
#' @export
#'
log_pdf.NegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(d))) {
function(at, d) dnbinom(x = at, mu = d$mu, size = d$size, log = TRUE)
} else {
function(at, d) dnbinom(x = at, size = d$size, prob = d$p, log = TRUE)
}
apply_dpqr(d = d, FUN = FUN, at = x, type = "logLik", drop = drop, elementwise = elementwise)
}
#' Evaluate the cumulative distribution function of a negative binomial distribution
#'
#' @inherit NegativeBinomial examples
#'
#' @param d A `NegativeBinomial` object created by a call to
#' [NegativeBinomial()].
#' @param x A vector of elements whose cumulative probabilities you would
#' like to determine given the distribution `d`.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param elementwise logical. Should each distribution in \code{d} be evaluated
#' at all elements of \code{x} (\code{elementwise = FALSE}, yielding a matrix)?
#' Or, if \code{d} and \code{x} have the same length, should the evaluation be
#' done element by element (\code{elementwise = TRUE}, yielding a vector)? The
#' default of \code{NULL} means that \code{elementwise = TRUE} is used if the
#' lengths match and otherwise \code{elementwise = FALSE} is used.
#' @param ... Arguments to be passed to \code{\link[stats]{pnbinom}}.
#' Unevaluated arguments will generate a warning to catch mispellings or other
#' possible errors.
#'
#' @family NegativeBinomial distribution
#'
#' @return In case of a single distribution object, either a numeric
#' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with
#' `length(x)` columns (if `drop = FALSE`). In case of a vectorized distribution
#' object, a matrix with `length(x)` columns containing all possible combinations.
#' @export
#'
cdf.NegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(d))) {
function(at, d) pnbinom(q = at, mu = d$mu, size = d$size, ...)
} else {
function(at, d) pnbinom(q = at, size = d$size, prob = d$p, ...)
}
apply_dpqr(d = d, FUN = FUN, at = x, type = "probability", drop = drop, elementwise = elementwise)
}
#' Determine quantiles of a NegativeBinomial distribution
#'
#' @inherit NegativeBinomial examples
#' @inheritParams random.NegativeBinomial
#'
#' @param probs A vector of probabilities.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param elementwise logical. Should each distribution in \code{x} be evaluated
#' at all elements of \code{probs} (\code{elementwise = FALSE}, yielding a matrix)?
#' Or, if \code{x} and \code{probs} have the same length, should the evaluation be
#' done element by element (\code{elementwise = TRUE}, yielding a vector)? The
#' default of \code{NULL} means that \code{elementwise = TRUE} is used if the
#' lengths match and otherwise \code{elementwise = FALSE} is used.
#' @param ... Arguments to be passed to \code{\link[stats]{qnbinom}}.
#' Unevaluated arguments will generate a warning to catch mispellings or other
#' possible errors.
#'
#' @return In case of a single distribution object, either a numeric
#' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with
#' `length(probs)` columns (if `drop = FALSE`). In case of a vectorized
#' distribution object, a matrix with `length(probs)` columns containing all
#' possible combinations.
#' @export
#'
#' @family NegativeBinomial distribution
#'
quantile.NegativeBinomial <- function(x, probs, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(x))) {
function(at, d) qnbinom(p = at, mu = x$mu, size = x$size, ...)
} else {
function(at, d) qnbinom(p = at, size = x$size, prob = x$p, ...)
}
apply_dpqr(d = x, FUN = FUN, at = probs, type = "quantile", drop = drop, elementwise = elementwise)
}
#' Return the support of the NegativeBinomial distribution
#'
#' @param d An `NegativeBinomial` object created by a call to [NegativeBinomial()].
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param ... Currently not used.
#'
#' @return A vector of length 2 with the minimum and maximum value of the support.
#'
#' @export
support.NegativeBinomial <- function(d, drop = TRUE, ...) {
ellipsis::check_dots_used()
min <- rep(0, length(d))
max <- rep(Inf, length(d))
make_support(min, max, d, drop = drop)
}
#' @exportS3Method
is_discrete.NegativeBinomial <- function(d, ...) {
ellipsis::check_dots_used()
setNames(rep.int(TRUE, length(d)), names(d))
}
#' @exportS3Method
is_continuous.NegativeBinomial <- function(d, ...) {
ellipsis::check_dots_used()
setNames(rep.int(FALSE, length(d)), names(d))
}
|
/R/NegativeBinomial.R
|
permissive
|
alexpghayes/distributions3
|
R
| false
| false
| 12,450
|
r
|
#' Create a negative binomial distribution
#'
#' A generalization of the geometric distribution. It is the number
#' of failures in a sequence of i.i.d. Bernoulli trials before
#' a specified target number (\eqn{r}) of successes occurs.
#'
#'
#' @param size The target number of successes (greater than \eqn{0})
#' until the experiment is stopped. Denoted \eqn{r} below.
#' @param p The success probability for a given trial. `p` can be any
#' value in `[0, 1]`, and defaults to `0.5`.
#' @param mu Alternative parameterization via the non-negative mean
#' of the distribution (instead of the probability `p`), defaults to `size`.
#'
#' @return A `NegativeBinomial` object.
#' @export
#'
#' @family discrete distributions
#'
#' @details
#'
#' We recommend reading this documentation on
#' <https://alexpghayes.github.io/distributions3/>, where the math
#' will render with additional detail and much greater clarity.
#'
#' In the following, let \eqn{X} be a negative binomial random variable with
#' success probability `p` = \eqn{p}.
#'
#' **Support**: \eqn{\{0, 1, 2, 3, ...\}}
#'
#' **Mean**: \eqn{\frac{(1 - p) r}{p} = \mu}
#'
#' **Variance**: \eqn{\frac{(1 - p) r}{p^2}}
#'
#' **Probability mass function (p.m.f.)**:
#'
#' \deqn{
#' f(k) = {k + r - 1 \choose k} \cdot p^r (1-p)^k
#' }{
#' f(k) = (k+r-1)!/(k!(r-1)!) p^r (1-p)^k
#' }
#'
#' **Cumulative distribution function (c.d.f.)**:
#'
#' Omitted for now.
#'
#' **Moment generating function (m.g.f.)**:
#'
#' \deqn{
#' \left(\frac{p}{1 - (1 -p) e^t}\right)^r, t < -\log (1-p)
#' }{
#' \frac{p^r}{(1 - (1-p) e^t)^r}, t < -\log (1-p)
#' }
#'
#' **Alternative parameterization**: Sometimes, especially when used in
#' regression models, the negative binomial distribution is parameterized
#' by its mean \eqn{\mu} (as listed above) plus the size parameter \eqn{r}.
#' This implies a success probability of \eqn{p = r/(r + \mu)}. This can
#' also be seen as a generalization of the Poisson distribution where the
#' assumption of equidispersion (i.e., variance equal to mean) is relaxed.
#' The negative binomial distribution is overdispersed (i.e., variance greater than mean)
#' and its variance can also be written as \eqn{\mu + 1/r \mu^2}. The Poisson
#' distribution is then obtained as \eqn{r} goes to infinity. Note that in this
#' view it is natural to also allow for non-integer \eqn{r} parameters.
#' The factorials in the equations above are then expressed in terms of the
#' gamma function.
#'
#' @examples
#'
#' set.seed(27)
#'
#' X <- NegativeBinomial(size = 5, p = 0.1)
#' X
#'
#' random(X, 10)
#'
#' pdf(X, 50)
#' log_pdf(X, 50)
#'
#' cdf(X, 50)
#' quantile(X, 0.7)
#'
#' ## alternative parameterization of X
#' Y <- NegativeBinomial(mu = 45, size = 5)
#' Y
#' cdf(Y, 50)
#' quantile(Y, 0.7)
NegativeBinomial <- function(size, p = 0.5, mu = size) {
if(!missing(mu) && !missing(p)) stop("only one of the parameters 'p' or 'mu' must be specified")
if(missing(mu)) {
stopifnot("parameter 'size' must always be positive" = all(size > 0))
stopifnot("parameter 'p' must always be in [0, 1]" = all(p >= 0 & p <= 1))
stopifnot(
"parameter lengths do not match (only scalars are allowed to be recycled)" =
length(size) == length(p) | length(size) == 1L | length(p) == 1L
)
d <- data.frame(size = size, p = p)
} else {
stopifnot("parameter 'mu' must always be non-negative" = all(mu >= 0))
stopifnot("parameter 'size' must always be positive" = all(size > 0))
stopifnot(
"parameter lengths do not match (only scalars are allowed to be recycled)" =
length(size) == length(mu) | length(size) == 1L | length(mu) == 1L
)
d <- data.frame(mu = mu, size = size)
}
class(d) <- c("NegativeBinomial", "distribution")
d
}
#' @export
mean.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
rval <- if("mu" %in% names(unclass(x))) {
x$mu
} else {
x$size * (1 - x$p) / x$p
}
setNames(rval, names(x))
}
#' @export
variance.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
rval <- if("mu" %in% names(unclass(x))) {
x$mu + 1/x$size * x$mu^2
} else {
x$size * (1 - x$p)/ x$p^2
}
setNames(rval, names(x))
}
#' @export
skewness.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
if("mu" %in% names(unclass(x))) x$p <- x$size/(x$size + x$mu)
rval <- (2 - x$p) / sqrt((1 - x$p) * x$size)
setNames(rval, names(x))
}
#' @export
kurtosis.NegativeBinomial <- function(x, ...) {
ellipsis::check_dots_used()
if("mu" %in% names(unclass(x))) x$p <- x$size/(x$size + x$mu)
rval <- 6 / x$size + x$p^2 / x$size * (1 - x$p)
setNames(rval, names(x))
}
#' Draw a random sample from a negative binomial distribution
#'
#' @inherit NegativeBinomial examples
#'
#' @param x A `NegativeBinomial` object created by a call to
#' [NegativeBinomial()].
#' @param n The number of samples to draw. Defaults to `1L`.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param ... Unused. Unevaluated arguments will generate a warning to
#' catch mispellings or other possible errors.
#'
#' @family NegativeBinomial distribution
#'
#' @return In case of a single distribution object or `n = 1`, either a numeric
#' vector of length `n` (if `drop = TRUE`, default) or a `matrix` with `n` columns
#' (if `drop = FALSE`).
#' @export
#'
random.NegativeBinomial <- function(x, n = 1L, drop = TRUE, ...) {
n <- make_positive_integer(n)
if (n == 0L) {
return(numeric(0L))
}
FUN <- if("mu" %in% names(unclass(x))) {
function(at, d) rnbinom(n = at, mu = d$mu, size = d$size)
} else {
function(at, d) rnbinom(n = at, size = d$size, prob = d$p)
}
apply_dpqr(d = x, FUN = FUN, at = n, type = "random", drop = drop)
}
#' Evaluate the probability mass function of a NegativeBinomial distribution
#'
#' @inherit NegativeBinomial examples
#'
#' @param d A `NegativeBinomial` object created by a call to
#' [NegativeBinomial()].
#' @param x A vector of elements whose probabilities you would like to
#' determine given the distribution `d`.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param elementwise logical. Should each distribution in \code{d} be evaluated
#' at all elements of \code{x} (\code{elementwise = FALSE}, yielding a matrix)?
#' Or, if \code{d} and \code{x} have the same length, should the evaluation be
#' done element by element (\code{elementwise = TRUE}, yielding a vector)? The
#' default of \code{NULL} means that \code{elementwise = TRUE} is used if the
#' lengths match and otherwise \code{elementwise = FALSE} is used.
#' @param ... Arguments to be passed to \code{\link[stats]{dnbinom}}.
#' Unevaluated arguments will generate a warning to catch mispellings or other
#' possible errors.
#'
#' @family NegativeBinomial distribution
#'
#' @return In case of a single distribution object, either a numeric
#' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with
#' `length(x)` columns (if `drop = FALSE`). In case of a vectorized distribution
#' object, a matrix with `length(x)` columns containing all possible combinations.
#' @export
#'
pdf.NegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(d))) {
function(at, d) dnbinom(x = at, mu = d$mu, size = d$size, ...)
} else {
function(at, d) dnbinom(x = at, size = d$size, prob = d$p, ...)
}
apply_dpqr(d = d, FUN = FUN, at = x, type = "density", drop = drop, elementwise = elementwise)
}
#' @rdname pdf.NegativeBinomial
#' @export
#'
log_pdf.NegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(d))) {
function(at, d) dnbinom(x = at, mu = d$mu, size = d$size, log = TRUE)
} else {
function(at, d) dnbinom(x = at, size = d$size, prob = d$p, log = TRUE)
}
apply_dpqr(d = d, FUN = FUN, at = x, type = "logLik", drop = drop, elementwise = elementwise)
}
#' Evaluate the cumulative distribution function of a negative binomial distribution
#'
#' @inherit NegativeBinomial examples
#'
#' @param d A `NegativeBinomial` object created by a call to
#' [NegativeBinomial()].
#' @param x A vector of elements whose cumulative probabilities you would
#' like to determine given the distribution `d`.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param elementwise logical. Should each distribution in \code{d} be evaluated
#' at all elements of \code{x} (\code{elementwise = FALSE}, yielding a matrix)?
#' Or, if \code{d} and \code{x} have the same length, should the evaluation be
#' done element by element (\code{elementwise = TRUE}, yielding a vector)? The
#' default of \code{NULL} means that \code{elementwise = TRUE} is used if the
#' lengths match and otherwise \code{elementwise = FALSE} is used.
#' @param ... Arguments to be passed to \code{\link[stats]{pnbinom}}.
#' Unevaluated arguments will generate a warning to catch mispellings or other
#' possible errors.
#'
#' @family NegativeBinomial distribution
#'
#' @return In case of a single distribution object, either a numeric
#' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with
#' `length(x)` columns (if `drop = FALSE`). In case of a vectorized distribution
#' object, a matrix with `length(x)` columns containing all possible combinations.
#' @export
#'
cdf.NegativeBinomial <- function(d, x, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(d))) {
function(at, d) pnbinom(q = at, mu = d$mu, size = d$size, ...)
} else {
function(at, d) pnbinom(q = at, size = d$size, prob = d$p, ...)
}
apply_dpqr(d = d, FUN = FUN, at = x, type = "probability", drop = drop, elementwise = elementwise)
}
#' Determine quantiles of a NegativeBinomial distribution
#'
#' @inherit NegativeBinomial examples
#' @inheritParams random.NegativeBinomial
#'
#' @param probs A vector of probabilities.
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param elementwise logical. Should each distribution in \code{x} be evaluated
#' at all elements of \code{probs} (\code{elementwise = FALSE}, yielding a matrix)?
#' Or, if \code{x} and \code{probs} have the same length, should the evaluation be
#' done element by element (\code{elementwise = TRUE}, yielding a vector)? The
#' default of \code{NULL} means that \code{elementwise = TRUE} is used if the
#' lengths match and otherwise \code{elementwise = FALSE} is used.
#' @param ... Arguments to be passed to \code{\link[stats]{qnbinom}}.
#' Unevaluated arguments will generate a warning to catch mispellings or other
#' possible errors.
#'
#' @return In case of a single distribution object, either a numeric
#' vector of length `probs` (if `drop = TRUE`, default) or a `matrix` with
#' `length(probs)` columns (if `drop = FALSE`). In case of a vectorized
#' distribution object, a matrix with `length(probs)` columns containing all
#' possible combinations.
#' @export
#'
#' @family NegativeBinomial distribution
#'
quantile.NegativeBinomial <- function(x, probs, drop = TRUE, elementwise = NULL, ...) {
FUN <- if("mu" %in% names(unclass(x))) {
function(at, d) qnbinom(p = at, mu = x$mu, size = x$size, ...)
} else {
function(at, d) qnbinom(p = at, size = x$size, prob = x$p, ...)
}
apply_dpqr(d = x, FUN = FUN, at = probs, type = "quantile", drop = drop, elementwise = elementwise)
}
#' Return the support of the NegativeBinomial distribution
#'
#' @param d An `NegativeBinomial` object created by a call to [NegativeBinomial()].
#' @param drop logical. Should the result be simplified to a vector if possible?
#' @param ... Currently not used.
#'
#' @return A vector of length 2 with the minimum and maximum value of the support.
#'
#' @export
support.NegativeBinomial <- function(d, drop = TRUE, ...) {
ellipsis::check_dots_used()
min <- rep(0, length(d))
max <- rep(Inf, length(d))
make_support(min, max, d, drop = drop)
}
#' @exportS3Method
is_discrete.NegativeBinomial <- function(d, ...) {
ellipsis::check_dots_used()
setNames(rep.int(TRUE, length(d)), names(d))
}
#' @exportS3Method
is_continuous.NegativeBinomial <- function(d, ...) {
ellipsis::check_dots_used()
setNames(rep.int(FALSE, length(d)), names(d))
}
|
library(ggplot2)
front.ceuc <- function(X, z, mu, discretisation=50)
{
deltaX <- (max(X[,1])-min(X[,1]))/discretisation
deltaY <- (max(X[,2])-min(X[,2]))/discretisation
minX <- min(X[,1])-deltaX
maxX <- max(X[,1])+deltaX
minY <- min(X[,2])-deltaY
maxY <- max(X[,2])+deltaY
# grille d'affichage
grilleX <- seq(from=minX,to=maxX,by=deltaX)
naffX <- length(grilleX)
grilleY <- seq(from=minY,to=maxY,by=deltaY)
naffY <- length(grilleY)
grille <- cbind(rep.int(grilleX,times=rep(naffY,naffX)),rep(grilleY,naffX))
# calcul des valeurs de la fonction
valf <- ceuc.val(mu, grille)
# ggplot(X, aes(x=grilleX, y=grilleY, z=valf)) + geom_point(colour=z) + geom_contour()
plot(X, col=c("red","green","blue","magenta","orange")[z], asp=1)
contour(grilleX, grilleY, matrix(valf,nrow=naffX,byrow=T), add=T, drawlabels=FALSE, levels=1.5)
}
|
/TP3/script/front.ceuc.R
|
permissive
|
puechtom/sy09
|
R
| false
| false
| 896
|
r
|
library(ggplot2)
front.ceuc <- function(X, z, mu, discretisation=50)
{
deltaX <- (max(X[,1])-min(X[,1]))/discretisation
deltaY <- (max(X[,2])-min(X[,2]))/discretisation
minX <- min(X[,1])-deltaX
maxX <- max(X[,1])+deltaX
minY <- min(X[,2])-deltaY
maxY <- max(X[,2])+deltaY
# grille d'affichage
grilleX <- seq(from=minX,to=maxX,by=deltaX)
naffX <- length(grilleX)
grilleY <- seq(from=minY,to=maxY,by=deltaY)
naffY <- length(grilleY)
grille <- cbind(rep.int(grilleX,times=rep(naffY,naffX)),rep(grilleY,naffX))
# calcul des valeurs de la fonction
valf <- ceuc.val(mu, grille)
# ggplot(X, aes(x=grilleX, y=grilleY, z=valf)) + geom_point(colour=z) + geom_contour()
plot(X, col=c("red","green","blue","magenta","orange")[z], asp=1)
contour(grilleX, grilleY, matrix(valf,nrow=naffX,byrow=T), add=T, drawlabels=FALSE, levels=1.5)
}
|
banco_macro_saude_aux <- read.csv("MACRORREGIOES_DE_SAUDE.csv") %>%
select(IBGE,macrorregiao)
# banco_macro_aux <- banco_anomalias_analise %>%
# filter(ANO_NASC == "2018") %>%
# left_join(mapa_rs)
#
# banco_macro_saude_final <- merge(x = banco_macro_saude, y = banco_macro_aux, by.x = "Código IBGE",by.y = "CODMUNRES")
# View(banco_macro_saude_final)
#
#
# banco_macro_saude_final_ultimate <- banco_macro_saude_final %>%
# select("Código IBGE","Município","Macrorregião","geometry")
#
# banco_macro_saude_final_ultimate$macro <- as.numeric(as.factor(banco_macro_saude_final_ultimate$Macrorregião))
#
# sf::st_write(banco_macro_saude_final_ultimate,"shapefiles/macro_saude.shp", quiet = TRUE)
macro_saude_shape <- sf::st_read("shapefiles/micro_saude_rs/micro_saude_rs.shp", quiet = TRUE)
names(macro_saude_shape) <- c("IBGE","municipio","macroregiao","macroregiao_num","geometry")
macro_saude_shape <- macro_saude_shape[,3:5]
banco_aux <- banco_anomalias_analise %>%
filter(ANO_NASC == "2018") %>%
left_join(banco_macro_saude_aux, by=c("CODMUNRES" = "IBGE"))
banco_aux2 <- banco_aux %>%
group_by(macrorregiao) %>%
summarise(numero_nascidos_vivos = sum(numero_nascidos_vivos),
nascidos_vivos_anomalia = sum(nascidos_vivos_anomalia),
prevalencia = nascidos_vivos_anomalia/numero_nascidos_vivos*10^4)
banco_grafico <- left_join(banco_aux2,macro_saude_shape,by = c("macrorregiao"= "macroregiao"))
pal <- colorBin("plasma", domain = banco_grafico$prevalencia, bins = bins_defalt$brks)
pal2 <- function(x){
ifelse(x==0,"#808080",pal(x))
}
names(tidy)[2] <- "variavel"
tidy = st_as_sf(banco_grafico)
tidy <- st_transform(tidy, "+init=epsg:4326")
leaflet(tidy) %>%
addProviderTiles(providers$OpenStreetMap.Mapnik) %>%
addPolygons(fillColor = ~pal2(prevalencia),
weight = 1.5,
opacity = 0.7,
fillOpacity = 0.7,
color = "gray",
highlight = highlightOptions(
weight = 5,
color = "#666",
fillOpacity = 0.7,
bringToFront = TRUE),
label = sprintf("<strong>%s</strong><br/>Prevalência:
%s",
tidy$macrorregiao, round(tidy$prevalencia,3)) %>%
lapply(htmltools::HTML),
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "6px 11px"),
textsize = "13px",
direction = "bottom")) %>%
leaflet::addLegend(pal = pal, values = ~tidy$prevalencia, opacity = 0.7, title = "Prevalência",
labFormat = labelFormat(digits = 3),
position = "bottomright")
|
/macro_saude_testes.R
|
no_license
|
anomaliascongenitas/App_Anomalias_Congenitas_RS
|
R
| false
| false
| 2,762
|
r
|
banco_macro_saude_aux <- read.csv("MACRORREGIOES_DE_SAUDE.csv") %>%
select(IBGE,macrorregiao)
# banco_macro_aux <- banco_anomalias_analise %>%
# filter(ANO_NASC == "2018") %>%
# left_join(mapa_rs)
#
# banco_macro_saude_final <- merge(x = banco_macro_saude, y = banco_macro_aux, by.x = "Código IBGE",by.y = "CODMUNRES")
# View(banco_macro_saude_final)
#
#
# banco_macro_saude_final_ultimate <- banco_macro_saude_final %>%
# select("Código IBGE","Município","Macrorregião","geometry")
#
# banco_macro_saude_final_ultimate$macro <- as.numeric(as.factor(banco_macro_saude_final_ultimate$Macrorregião))
#
# sf::st_write(banco_macro_saude_final_ultimate,"shapefiles/macro_saude.shp", quiet = TRUE)
macro_saude_shape <- sf::st_read("shapefiles/micro_saude_rs/micro_saude_rs.shp", quiet = TRUE)
names(macro_saude_shape) <- c("IBGE","municipio","macroregiao","macroregiao_num","geometry")
macro_saude_shape <- macro_saude_shape[,3:5]
banco_aux <- banco_anomalias_analise %>%
filter(ANO_NASC == "2018") %>%
left_join(banco_macro_saude_aux, by=c("CODMUNRES" = "IBGE"))
banco_aux2 <- banco_aux %>%
group_by(macrorregiao) %>%
summarise(numero_nascidos_vivos = sum(numero_nascidos_vivos),
nascidos_vivos_anomalia = sum(nascidos_vivos_anomalia),
prevalencia = nascidos_vivos_anomalia/numero_nascidos_vivos*10^4)
banco_grafico <- left_join(banco_aux2,macro_saude_shape,by = c("macrorregiao"= "macroregiao"))
pal <- colorBin("plasma", domain = banco_grafico$prevalencia, bins = bins_defalt$brks)
pal2 <- function(x){
ifelse(x==0,"#808080",pal(x))
}
names(tidy)[2] <- "variavel"
tidy = st_as_sf(banco_grafico)
tidy <- st_transform(tidy, "+init=epsg:4326")
leaflet(tidy) %>%
addProviderTiles(providers$OpenStreetMap.Mapnik) %>%
addPolygons(fillColor = ~pal2(prevalencia),
weight = 1.5,
opacity = 0.7,
fillOpacity = 0.7,
color = "gray",
highlight = highlightOptions(
weight = 5,
color = "#666",
fillOpacity = 0.7,
bringToFront = TRUE),
label = sprintf("<strong>%s</strong><br/>Prevalência:
%s",
tidy$macrorregiao, round(tidy$prevalencia,3)) %>%
lapply(htmltools::HTML),
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "6px 11px"),
textsize = "13px",
direction = "bottom")) %>%
leaflet::addLegend(pal = pal, values = ~tidy$prevalencia, opacity = 0.7, title = "Prevalência",
labFormat = labelFormat(digits = 3),
position = "bottomright")
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./thyroid_005.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/AvgRank/thyroid/thyroid_005.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 348
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./thyroid_005.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' popPlace : Populates the input$unit field using information from the PostGres estimates database.
#' return a data frame with the placefips, countyfips, placename and totalPopulation Revised 1/23/2019
#'
#' @param level identifies the level to be used (State, Planning Regions, Counties, Municipalities)
#' taken from the input$level parameter from the dashboard
#'
#' @export
popPlace <- function(DBPool,curyr) {
#Regions
regList <- list(
"Denver PMSA",
"Denver-Boulder Metro Area",
"Denver-Boulder-Greely CMSA",
"10 County Denver Metro Area",
"Central Mountains",
"Eastern Plains",
"Front Range",
"San Luis Valley",
"Western Slope",
"Region 1: Northern Eastern Plains",
"Region 2: Northern Front Range",
"Region 3: Denver Metropolitan Area",
"Region 4: Southern Front Range",
"Region 5: Central Eastern Plains",
"Region 6: Southern Eastern Plains",
"Region 7: Pueblo County",
"Region 8: San Juan Valley",
"Region 9: Southern Western Slope",
"Region 10: Central Western Slope",
"Region 11: Northern Western Slope",
"Region 12: Northern Mountains",
"Region 13: Central Mountains",
"Region 14: Southern Mountains")
# Create Connection Strings
clookupStr <- paste0("SELECT countyfips, placefips, municipalityname, year, totalpopulation FROM estimates.county_muni_timeseries WHERE year = ", curyr, " and placefips = 0;")
plookupStr <- paste0("SELECT countyfips, placefips, municipalityname, year, totalpopulation FROM estimates.county_muni_timeseries WHERE year = ", curyr, ";")
mlookupStr <- paste0("SELECT countyfips, placefips, year, totalpopulation FROM estimates.county_muni_timeseries WHERE year = ", curyr, " and placefips != 0 and placefips != 99990 and countyfips = 999;")
# f.cLookup contains the county records
f.cLookup <- dbGetQuery(DBPool, clookupStr)
f.pLookup <- dbGetQuery(DBPool, plookupStr)
#f.mLookup is the multi county cities
f.mLookup <- dbGetQuery(DBPool, mlookupStr)
# Counties
# f.cLookup <- f.cLookup[c(2:nrow(f.cLookup)),]
f.cLookup[,3] <- sapply(f.cLookup[,3], function(x) simpleCap(x))
# Municialities
#removing errant records...
f.pLookup <- f.pLookup[which(f.pLookup$placefips != 0),] #remove State Records
f.pLookup <- f.pLookup[which(f.pLookup$countyfips != 999),] # County total records for multiple places
f.pLookup <- f.pLookup[which(f.pLookup$placefips != 99990),] #Remove Unincoprpoated Areas
f.pLookup <- f.pLookup[which(!is.na(f.pLookup$placefips)),] #Remove Disbanded Areas
f.pLookup$municipalityname <- gsub(' \\(Part\\)','',f.pLookup$municipalityname)
f.pLookup$municipalityname <- gsub(' \\(part\\)','',f.pLookup$municipalityname)
f.pLookup$municipalityname <- gsub('Sprgs','Springs',f.pLookup$municipalityname)
f.pLookup$municipalityname <- gsub('/G','',f.pLookup$municipalityname)
#merging f.pLookup and f.mLookup and updating totalpopulation value
f.mLookup <- f.mLookup[,c(2,4)]
f.pLookupFin <- left_join(f.pLookup,f.mLookup,by="placefips")
f.pLookupFin$cty_Pop <- f.pLookupFin$totalpopulation.x # this is the potions of the population in each portion
f.pLookupFin$totalpopulation <- ifelse(is.na(f.pLookupFin$totalpopulation.y),f.pLookupFin$totalpopulation.x,f.pLookupFin$totalpopulation.y)
f.pLookupFin <- f.pLookupFin[,c(2,1,3,4,8,7)]
# merging counties and municipals
f.cty <- f.cLookup[,c(1,3)]
f.plac <- left_join(f.pLookupFin,f.cty,by="countyfips")
names(f.plac)[3] <- "municipalityname"
names(f.plac)[7] <- "countyname"
f.plac <- f.plac[,c(2,1,3:7)]
f.plac <- f.plac[order(f.plac$municipalityname),]
loc <- list("Counties" = f.cLookup, "Munis" = f.plac, "Region" = regList)
return(loc)
}
|
/R/popPlace.R
|
no_license
|
ColoradoDemography/Housing_DG
|
R
| false
| false
| 4,000
|
r
|
#' popPlace : Populates the input$unit field using information from the PostGres estimates database.
#' return a data frame with the placefips, countyfips, placename and totalPopulation Revised 1/23/2019
#'
#' @param level identifies the level to be used (State, Planning Regions, Counties, Municipalities)
#' taken from the input$level parameter from the dashboard
#'
#' @export
popPlace <- function(DBPool,curyr) {
#Regions
regList <- list(
"Denver PMSA",
"Denver-Boulder Metro Area",
"Denver-Boulder-Greely CMSA",
"10 County Denver Metro Area",
"Central Mountains",
"Eastern Plains",
"Front Range",
"San Luis Valley",
"Western Slope",
"Region 1: Northern Eastern Plains",
"Region 2: Northern Front Range",
"Region 3: Denver Metropolitan Area",
"Region 4: Southern Front Range",
"Region 5: Central Eastern Plains",
"Region 6: Southern Eastern Plains",
"Region 7: Pueblo County",
"Region 8: San Juan Valley",
"Region 9: Southern Western Slope",
"Region 10: Central Western Slope",
"Region 11: Northern Western Slope",
"Region 12: Northern Mountains",
"Region 13: Central Mountains",
"Region 14: Southern Mountains")
# Create Connection Strings
clookupStr <- paste0("SELECT countyfips, placefips, municipalityname, year, totalpopulation FROM estimates.county_muni_timeseries WHERE year = ", curyr, " and placefips = 0;")
plookupStr <- paste0("SELECT countyfips, placefips, municipalityname, year, totalpopulation FROM estimates.county_muni_timeseries WHERE year = ", curyr, ";")
mlookupStr <- paste0("SELECT countyfips, placefips, year, totalpopulation FROM estimates.county_muni_timeseries WHERE year = ", curyr, " and placefips != 0 and placefips != 99990 and countyfips = 999;")
# f.cLookup contains the county records
f.cLookup <- dbGetQuery(DBPool, clookupStr)
f.pLookup <- dbGetQuery(DBPool, plookupStr)
#f.mLookup is the multi county cities
f.mLookup <- dbGetQuery(DBPool, mlookupStr)
# Counties
# f.cLookup <- f.cLookup[c(2:nrow(f.cLookup)),]
f.cLookup[,3] <- sapply(f.cLookup[,3], function(x) simpleCap(x))
# Municialities
#removing errant records...
f.pLookup <- f.pLookup[which(f.pLookup$placefips != 0),] #remove State Records
f.pLookup <- f.pLookup[which(f.pLookup$countyfips != 999),] # County total records for multiple places
f.pLookup <- f.pLookup[which(f.pLookup$placefips != 99990),] #Remove Unincoprpoated Areas
f.pLookup <- f.pLookup[which(!is.na(f.pLookup$placefips)),] #Remove Disbanded Areas
f.pLookup$municipalityname <- gsub(' \\(Part\\)','',f.pLookup$municipalityname)
f.pLookup$municipalityname <- gsub(' \\(part\\)','',f.pLookup$municipalityname)
f.pLookup$municipalityname <- gsub('Sprgs','Springs',f.pLookup$municipalityname)
f.pLookup$municipalityname <- gsub('/G','',f.pLookup$municipalityname)
#merging f.pLookup and f.mLookup and updating totalpopulation value
f.mLookup <- f.mLookup[,c(2,4)]
f.pLookupFin <- left_join(f.pLookup,f.mLookup,by="placefips")
f.pLookupFin$cty_Pop <- f.pLookupFin$totalpopulation.x # this is the potions of the population in each portion
f.pLookupFin$totalpopulation <- ifelse(is.na(f.pLookupFin$totalpopulation.y),f.pLookupFin$totalpopulation.x,f.pLookupFin$totalpopulation.y)
f.pLookupFin <- f.pLookupFin[,c(2,1,3,4,8,7)]
# merging counties and municipals
f.cty <- f.cLookup[,c(1,3)]
f.plac <- left_join(f.pLookupFin,f.cty,by="countyfips")
names(f.plac)[3] <- "municipalityname"
names(f.plac)[7] <- "countyname"
f.plac <- f.plac[,c(2,1,3:7)]
f.plac <- f.plac[order(f.plac$municipalityname),]
loc <- list("Counties" = f.cLookup, "Munis" = f.plac, "Region" = regList)
return(loc)
}
|
#' Obtain standard deviation
#'
#' `sd_pop` retrieves the population standard deviation of a vector of observations.
#' The function will automatically remove NA values.
#'
#' @param x Vector of numeric values
#' @param quiet Whether to print a message if NAs or NaNs exist
#' @return A numeric value corresponding to the population standard
#' deviation of the input.
#' @export
sd_pop <- function(x, quiet = T) {
if (sum(is.na(x))) {
if (!quiet) message("Removing NAs when finding std. deviation")
x <- x[!is.na(x)]
}
if (sum(is.nan(x))) {
if (!quiet) message("Removing NaNs when finding std. deviation")
x <- x[!is.nan(x)]
}
sd <- sum((x - mean(x))^2)
sqrt(sd / length(x))
}
|
/R/sd_pop.R
|
no_license
|
awqx/qsarr
|
R
| false
| false
| 711
|
r
|
#' Obtain standard deviation
#'
#' `sd_pop` retrieves the population standard deviation of a vector of observations.
#' The function will automatically remove NA values.
#'
#' @param x Vector of numeric values
#' @param quiet Whether to print a message if NAs or NaNs exist
#' @return A numeric value corresponding to the population standard
#' deviation of the input.
#' @export
sd_pop <- function(x, quiet = T) {
if (sum(is.na(x))) {
if (!quiet) message("Removing NAs when finding std. deviation")
x <- x[!is.na(x)]
}
if (sum(is.nan(x))) {
if (!quiet) message("Removing NaNs when finding std. deviation")
x <- x[!is.nan(x)]
}
sd <- sum((x - mean(x))^2)
sqrt(sd / length(x))
}
|
#' Get Opening Stock Data
#'
#' Function to obtain opening stocks data calculated by the faoswsStock module.
#'
#' @export getOpeningStockData
getOpeningStockData = function(){
## define measured elements
openingStockKey <- getCompleteImputationKey(table = "production")
openingStockKey@dimensions[["measuredElement"]]@keys <- "5113"
## Pivot to vectorize yield computation
openingStockPivot = c(
## Pivoting(code = areaVarFS, ascending = TRUE),
## Pivoting(code = itemVarFS, ascending = TRUE),
## Pivoting(code = yearVar, ascending = FALSE),
## Pivoting(code = elementVarFS, ascending = TRUE)
Pivoting(code = areaVar, ascending = TRUE),
Pivoting(code = itemVar, ascending = TRUE),
Pivoting(code = yearVar, ascending = FALSE),
Pivoting(code = elementVar, ascending = TRUE)
)
## Query the data
openingStockQuery = GetData(
key = openingStockKey,
flags = TRUE,
normalized = FALSE,
pivoting = openingStockPivot
)
## setnames(lossQuery,
## old = names(lossQuery),
## new = c("geographicAreaFS","measuredItemFCL","timePointYears",
## "Value_measuredElement_5120","flagFaostat_measuredElementFS_5120")
## )
## ## Convert geographicAreaM49 to geographicAreaFS
## lossQuery[, geographicAreaM49 := as.numeric(faoswsUtil::fs2m49(as.character(geographicAreaFS)))]
## ## Convert measuredItemCPC to measuredItemFCL
## lossQuery[, measuredItemFCL := addHeadingsFCL(measuredItemFCL)]
## lossQuery[, measuredItemCPC := faoswsUtil::fcl2cpc(as.character(measuredItemFCL))]
## Convert time to numeric
openingStockQuery[, timePointYears := as.numeric(timePointYears)]
openingStockQuery[, geographicAreaM49 := as.numeric(geographicAreaM49)]
## Taking only official data
## distinct(lossQuery,flagFaostat_measuredElementFS_5120)
## lossQuery = lossQuery[flagFaostat_measuredElementFS_5120 == "", ]
## if (protected) {
## protectedFlag <- flagValidTable[flagValidTable$Protected == TRUE,] %>%
## .[, flagCombination := paste(flagObservationStatus, flagMethod, sep = ";")]
## col_keep <- names(openingStockQuery) %>%
## .[.!="flagCombination"]
## ## subset to protected flags
## openingStockQuery <-
## openingStockQuery[, flagCombination := paste(flagObservationStatus_measuredElement_5113, flagMethod_measuredElement_5113, sep = ";")] %>%
## merge(., protectedFlag, by = "flagCombination") %>%
## filter(Protected == TRUE) %>% # only keep protected values
## select_(.dots = col_keep)
## }
openingStockQuery
}
|
/R/getOpeningStockData.R
|
no_license
|
AEENRA/faoswsLoss
|
R
| false
| false
| 2,626
|
r
|
#' Get Opening Stock Data
#'
#' Function to obtain opening stocks data calculated by the faoswsStock module.
#'
#' @export getOpeningStockData
getOpeningStockData = function(){
## define measured elements
openingStockKey <- getCompleteImputationKey(table = "production")
openingStockKey@dimensions[["measuredElement"]]@keys <- "5113"
## Pivot to vectorize yield computation
openingStockPivot = c(
## Pivoting(code = areaVarFS, ascending = TRUE),
## Pivoting(code = itemVarFS, ascending = TRUE),
## Pivoting(code = yearVar, ascending = FALSE),
## Pivoting(code = elementVarFS, ascending = TRUE)
Pivoting(code = areaVar, ascending = TRUE),
Pivoting(code = itemVar, ascending = TRUE),
Pivoting(code = yearVar, ascending = FALSE),
Pivoting(code = elementVar, ascending = TRUE)
)
## Query the data
openingStockQuery = GetData(
key = openingStockKey,
flags = TRUE,
normalized = FALSE,
pivoting = openingStockPivot
)
## setnames(lossQuery,
## old = names(lossQuery),
## new = c("geographicAreaFS","measuredItemFCL","timePointYears",
## "Value_measuredElement_5120","flagFaostat_measuredElementFS_5120")
## )
## ## Convert geographicAreaM49 to geographicAreaFS
## lossQuery[, geographicAreaM49 := as.numeric(faoswsUtil::fs2m49(as.character(geographicAreaFS)))]
## ## Convert measuredItemCPC to measuredItemFCL
## lossQuery[, measuredItemFCL := addHeadingsFCL(measuredItemFCL)]
## lossQuery[, measuredItemCPC := faoswsUtil::fcl2cpc(as.character(measuredItemFCL))]
## Convert time to numeric
openingStockQuery[, timePointYears := as.numeric(timePointYears)]
openingStockQuery[, geographicAreaM49 := as.numeric(geographicAreaM49)]
## Taking only official data
## distinct(lossQuery,flagFaostat_measuredElementFS_5120)
## lossQuery = lossQuery[flagFaostat_measuredElementFS_5120 == "", ]
## if (protected) {
## protectedFlag <- flagValidTable[flagValidTable$Protected == TRUE,] %>%
## .[, flagCombination := paste(flagObservationStatus, flagMethod, sep = ";")]
## col_keep <- names(openingStockQuery) %>%
## .[.!="flagCombination"]
## ## subset to protected flags
## openingStockQuery <-
## openingStockQuery[, flagCombination := paste(flagObservationStatus_measuredElement_5113, flagMethod_measuredElement_5113, sep = ";")] %>%
## merge(., protectedFlag, by = "flagCombination") %>%
## filter(Protected == TRUE) %>% # only keep protected values
## select_(.dots = col_keep)
## }
openingStockQuery
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotIndividualData.R
\name{plotIndividualData}
\alias{plotIndividualData}
\title{plotIndividualData}
\usage{
plotIndividualData(pt.id = 60, what.data = "both", log.scale = T,
plot.psad = F, pt)
}
\arguments{
\item{pt.id}{Patient whose data to print}
\item{what.data}{What clinical data source to print}
\item{log.scale}{T}
\item{plot.psad}{F}
}
\description{
Find the distance between two patients.
}
|
/man/plotIndividualData.Rd
|
no_license
|
jbindman/prostate-project
|
R
| false
| true
| 485
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotIndividualData.R
\name{plotIndividualData}
\alias{plotIndividualData}
\title{plotIndividualData}
\usage{
plotIndividualData(pt.id = 60, what.data = "both", log.scale = T,
plot.psad = F, pt)
}
\arguments{
\item{pt.id}{Patient whose data to print}
\item{what.data}{What clinical data source to print}
\item{log.scale}{T}
\item{plot.psad}{F}
}
\description{
Find the distance between two patients.
}
|
loadHouseholdData <- function() {
library(data.table)
# Check if we can use a cached version of the file
zipFile<-"HouseholdData.zip"
if ( (file.exists(zipFile)) &&
(file.info(zipFile)$ctime > Sys.time()-24*60*60) &&
(file.info(zipFile)$size > 20000000)
) {
warning("Using cached file")
} else {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",zipFile)
if (!file.exists(zipFile)) {
stop("Download file failed")
}
if (file.info(zipFile)$size < 20000000 ) {
stop("Downloaded file is too small")
unlink(zipFile)
}
}
# read the data
alldata <- read.table(unz(zipFile, "household_power_consumption.txt"),
header=T, quote="\"", sep=";",
stringsAsFactors=F, na.strings="?",
colClasses=c(rep("character",2) , rep("numeric",7)))
# filter out anything that's not 2007-02-01 or 2007-02-02
# and make it a data.table
data<-data.table(alldata[alldata$Date=='1/2/2007' | alldata$Date=='2/2/2007',])
# add a calculated field, labeled epoch, that holds the datetime in Posixct format
data<-data[,epoch:=as.POSIXct(paste(Date, Time, sep=" "),format="%d/%m/%Y %H:%M:%S")]
}
|
/loadHouseholdData.R
|
no_license
|
twuyts/ExData_Plotting1
|
R
| false
| false
| 1,395
|
r
|
loadHouseholdData <- function() {
library(data.table)
# Check if we can use a cached version of the file
zipFile<-"HouseholdData.zip"
if ( (file.exists(zipFile)) &&
(file.info(zipFile)$ctime > Sys.time()-24*60*60) &&
(file.info(zipFile)$size > 20000000)
) {
warning("Using cached file")
} else {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",zipFile)
if (!file.exists(zipFile)) {
stop("Download file failed")
}
if (file.info(zipFile)$size < 20000000 ) {
stop("Downloaded file is too small")
unlink(zipFile)
}
}
# read the data
alldata <- read.table(unz(zipFile, "household_power_consumption.txt"),
header=T, quote="\"", sep=";",
stringsAsFactors=F, na.strings="?",
colClasses=c(rep("character",2) , rep("numeric",7)))
# filter out anything that's not 2007-02-01 or 2007-02-02
# and make it a data.table
data<-data.table(alldata[alldata$Date=='1/2/2007' | alldata$Date=='2/2/2007',])
# add a calculated field, labeled epoch, that holds the datetime in Posixct format
data<-data[,epoch:=as.POSIXct(paste(Date, Time, sep=" "),format="%d/%m/%Y %H:%M:%S")]
}
|
#' function to launch the catdrought app
#'
#' @importFrom magrittr %>%
#'
#' @export
meteoland_app <- function(
) {
### Language input ###########################################################
shiny::addResourcePath(
'images', system.file('resources', 'images', package = 'meteolandappkg')
)
lang_choices <- c('cat', 'spa', 'eng')
lang_flags <- c(
glue::glue("<img class='flag-image' src='images/cat.png' width=20px><div class='flag-lang'>%s</div></img>"),
glue::glue("<img class='flag-image' src='images/spa.png' width=20px><div class='flag-lang'>%s</div></img>"),
glue::glue("<img class='flag-image' src='images/eng.png' width=20px><div class='flag-lang'>%s</div></img>")
)
## UI ####
ui <- shiny::tagList(
# shinyjs
shinyjs::useShinyjs(),
# css
shiny::tags$head(
# custom css
shiny::includeCSS(
system.file('resources', 'meteoland.css', package = 'meteolandappkg')
),
# corporative image css
shiny::includeCSS(
system.file('resources', 'corp_image.css', package = 'meteolandappkg')
)
),
navbarPageWithInputs(
# opts
title = 'Meteoland App',
id = 'nav', collapsible = TRUE,
# navbar with inputs (helpers.R) accepts an input argument, we use it for the lang
# selector
inputs = shinyWidgets::pickerInput(
'lang', NULL,
choices = lang_choices,
selected = 'cat',
width = '100px',
choicesOpt = list(
content = c(
sprintf(lang_flags[1], lang_choices[1]),
sprintf(lang_flags[2], lang_choices[2]),
sprintf(lang_flags[3], lang_choices[3])
)
)
),
# navbarPage contents
shiny::tabPanel(
title = shiny::uiOutput("actual_tab"),
# we need to create the ui in the server to catch the language input
# and redraw all the inputs and texts in the selected lang
shiny::uiOutput('current_ui')
) # end of current tab
) # end of navbar
) # end of UI
## SERVER ####
server <- function(input, output, session) {
## debug #####
# output$debug1 <- shiny::renderPrint({
# input$map_daily_marker_click
# })
# output$debug2 <- shiny::renderPrint({
# map_reactives$map_click
# })
# output$debug3 <- shiny::renderPrint({
# map_reactives$map_shape_click
# })
## lang reactive ####
lang <- shiny::reactive({
input$lang
})
output$actual_tab <- shiny::renderText({
translate_app('actual_tab_title', lang())
})
## proper UI ####
output$current_ui <- shiny::renderUI({
lang_declared <- lang()
shiny::tagList(
# a little space
shiny::br(),
shiny::sidebarLayout(
shiny::sidebarPanel(
# sidebar width
width = 3,
# panel for fixed inputs (mode and point/grid)
shiny::wellPanel(
# Mode selector
shiny::radioButtons(
inputId = 'mode_sel',
label = 'Please select the desired mode:',
choices = c('Historical', 'Current', 'Projection'),
inline = TRUE, selected = 'Historical'
),
# point/grid selector
shiny::radioButtons(
inputId = 'point_grid_sel',
label = 'Points (up to 10) or Grid?',
choices = c('Points', 'Grid'),
inline = TRUE, selected = 'Points'
)
),
# Dinamic ui to show inputs and buttons depending on the mode selected
shiny:: wellPanel(
shiny::uiOutput(
outputId = 'dinamic_inputs'
)
),
# latitude and longitude selector. To be able to show both in the same
# line we must to rely in some html/css magic ;)
shiny:: div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'latitude',
label = 'Latitude',
value = NA)),
shiny::div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'longitude',
label = 'Longitude',
value = NA)),
# conditional panel to show in case of grid. In this case we need
# two different sets of coordinates, the upper left and the bottom
# right coordinates of the boundary box desired by the user
shiny::conditionalPanel(
condition = "input.point_grid_sel == 'Grid'",
shiny::div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'latitude_bottom',
label = 'Latitude bottom right',
value = NA)),
shiny::div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'longitude_bottom',
label = 'Longitude bottom right',
value = NA)),
shiny::p("Grid mode selected."),
shiny::p("Please provide the upper left coordinates and the bottom right coordinates of the desired grid.")
),
shiny::p('Coordinates input must be in latitude/logitude Mercator ',
'projection, in decimal format'),
# selected coordinates output, we need a fluid row to put inline
# the selected coordinates and the clear button. All of this is in
# a conditional panel to show only if points are selected
shiny::conditionalPanel(
condition = "input.point_grid_sel == 'Points'",
# Append coordinates button
shiny::actionButton(
inputId = 'append_coord_button',
label = 'Append coords',
icon = shiny::icon('bullseye')
),
# a little space and a header
shiny::br(), shiny::br(),
shiny::h5('Selected points:'),
shiny::fluidRow(
# coord column
shiny::column(
width = 6,
shiny::br(),
shiny::tableOutput('user_coords')
),
# reset button column
shiny::column(
width = 6,
shiny::br(), shiny::br(),
shiny::actionButton(
inputId = 'reset_coord_button',
label = 'Reset coords',
icon = shiny::icon('eraser')
)
)
)
# debug
# textOutput('clicked'),
# textOutput('lat_debug'),
# textOutput('long_debug')
# textOutput('dates_debug')
# ,textOutput('interpolated_df_debug')
),
# a little space
shiny::br(), shiny::br(),
# Action button to activate the process
shiny::actionButton(
inputId = 'process_button',
label = 'Go!',
icon = shiny::icon('play')
)
),
shiny::mainPanel(
# main panel width
width = 9,
# map output
leaflet::leafletOutput('map', height = 600)
)
)
)
}) ## end of proper UI
#### user coords data frame ####
# empty coordinates data frame, to be able to add clicks in the map and
# manual inputs in the case of more than one coordinate pair
user_coords <- reactiveValues()
user_coords$df <- data.frame(
lat = numeric(0),
lng = numeric(0)
)
} # end of server function
# Run the application
meteolandapp <- shiny::shinyApp(
ui = ui, server = server,
# onStart = function() {
#
# ## on stop routine to cloose the db pool
# shiny::onStop(function() {
# pool::poolClose(catdrought_db)
# })
# }
)
# shiny::runApp(nfi_app)
return(meteolandapp)
}
|
/R/app.R
|
no_license
|
MalditoBarbudo/meteolandappkg
|
R
| false
| false
| 8,290
|
r
|
#' function to launch the catdrought app
#'
#' @importFrom magrittr %>%
#'
#' @export
meteoland_app <- function(
) {
### Language input ###########################################################
shiny::addResourcePath(
'images', system.file('resources', 'images', package = 'meteolandappkg')
)
lang_choices <- c('cat', 'spa', 'eng')
lang_flags <- c(
glue::glue("<img class='flag-image' src='images/cat.png' width=20px><div class='flag-lang'>%s</div></img>"),
glue::glue("<img class='flag-image' src='images/spa.png' width=20px><div class='flag-lang'>%s</div></img>"),
glue::glue("<img class='flag-image' src='images/eng.png' width=20px><div class='flag-lang'>%s</div></img>")
)
## UI ####
ui <- shiny::tagList(
# shinyjs
shinyjs::useShinyjs(),
# css
shiny::tags$head(
# custom css
shiny::includeCSS(
system.file('resources', 'meteoland.css', package = 'meteolandappkg')
),
# corporative image css
shiny::includeCSS(
system.file('resources', 'corp_image.css', package = 'meteolandappkg')
)
),
navbarPageWithInputs(
# opts
title = 'Meteoland App',
id = 'nav', collapsible = TRUE,
# navbar with inputs (helpers.R) accepts an input argument, we use it for the lang
# selector
inputs = shinyWidgets::pickerInput(
'lang', NULL,
choices = lang_choices,
selected = 'cat',
width = '100px',
choicesOpt = list(
content = c(
sprintf(lang_flags[1], lang_choices[1]),
sprintf(lang_flags[2], lang_choices[2]),
sprintf(lang_flags[3], lang_choices[3])
)
)
),
# navbarPage contents
shiny::tabPanel(
title = shiny::uiOutput("actual_tab"),
# we need to create the ui in the server to catch the language input
# and redraw all the inputs and texts in the selected lang
shiny::uiOutput('current_ui')
) # end of current tab
) # end of navbar
) # end of UI
## SERVER ####
server <- function(input, output, session) {
## debug #####
# output$debug1 <- shiny::renderPrint({
# input$map_daily_marker_click
# })
# output$debug2 <- shiny::renderPrint({
# map_reactives$map_click
# })
# output$debug3 <- shiny::renderPrint({
# map_reactives$map_shape_click
# })
## lang reactive ####
lang <- shiny::reactive({
input$lang
})
output$actual_tab <- shiny::renderText({
translate_app('actual_tab_title', lang())
})
## proper UI ####
output$current_ui <- shiny::renderUI({
lang_declared <- lang()
shiny::tagList(
# a little space
shiny::br(),
shiny::sidebarLayout(
shiny::sidebarPanel(
# sidebar width
width = 3,
# panel for fixed inputs (mode and point/grid)
shiny::wellPanel(
# Mode selector
shiny::radioButtons(
inputId = 'mode_sel',
label = 'Please select the desired mode:',
choices = c('Historical', 'Current', 'Projection'),
inline = TRUE, selected = 'Historical'
),
# point/grid selector
shiny::radioButtons(
inputId = 'point_grid_sel',
label = 'Points (up to 10) or Grid?',
choices = c('Points', 'Grid'),
inline = TRUE, selected = 'Points'
)
),
# Dinamic ui to show inputs and buttons depending on the mode selected
shiny:: wellPanel(
shiny::uiOutput(
outputId = 'dinamic_inputs'
)
),
# latitude and longitude selector. To be able to show both in the same
# line we must to rely in some html/css magic ;)
shiny:: div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'latitude',
label = 'Latitude',
value = NA)),
shiny::div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'longitude',
label = 'Longitude',
value = NA)),
# conditional panel to show in case of grid. In this case we need
# two different sets of coordinates, the upper left and the bottom
# right coordinates of the boundary box desired by the user
shiny::conditionalPanel(
condition = "input.point_grid_sel == 'Grid'",
shiny::div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'latitude_bottom',
label = 'Latitude bottom right',
value = NA)),
shiny::div(style = "display: inline-block;vertical-align:top; width: 145px;",
shiny::numericInput(
'longitude_bottom',
label = 'Longitude bottom right',
value = NA)),
shiny::p("Grid mode selected."),
shiny::p("Please provide the upper left coordinates and the bottom right coordinates of the desired grid.")
),
shiny::p('Coordinates input must be in latitude/logitude Mercator ',
'projection, in decimal format'),
# selected coordinates output, we need a fluid row to put inline
# the selected coordinates and the clear button. All of this is in
# a conditional panel to show only if points are selected
shiny::conditionalPanel(
condition = "input.point_grid_sel == 'Points'",
# Append coordinates button
shiny::actionButton(
inputId = 'append_coord_button',
label = 'Append coords',
icon = shiny::icon('bullseye')
),
# a little space and a header
shiny::br(), shiny::br(),
shiny::h5('Selected points:'),
shiny::fluidRow(
# coord column
shiny::column(
width = 6,
shiny::br(),
shiny::tableOutput('user_coords')
),
# reset button column
shiny::column(
width = 6,
shiny::br(), shiny::br(),
shiny::actionButton(
inputId = 'reset_coord_button',
label = 'Reset coords',
icon = shiny::icon('eraser')
)
)
)
# debug
# textOutput('clicked'),
# textOutput('lat_debug'),
# textOutput('long_debug')
# textOutput('dates_debug')
# ,textOutput('interpolated_df_debug')
),
# a little space
shiny::br(), shiny::br(),
# Action button to activate the process
shiny::actionButton(
inputId = 'process_button',
label = 'Go!',
icon = shiny::icon('play')
)
),
shiny::mainPanel(
# main panel width
width = 9,
# map output
leaflet::leafletOutput('map', height = 600)
)
)
)
}) ## end of proper UI
#### user coords data frame ####
# empty coordinates data frame, to be able to add clicks in the map and
# manual inputs in the case of more than one coordinate pair
user_coords <- reactiveValues()
user_coords$df <- data.frame(
lat = numeric(0),
lng = numeric(0)
)
} # end of server function
# Run the application
meteolandapp <- shiny::shinyApp(
ui = ui, server = server,
# onStart = function() {
#
# ## on stop routine to cloose the db pool
# shiny::onStop(function() {
# pool::poolClose(catdrought_db)
# })
# }
)
# shiny::runApp(nfi_app)
return(meteolandapp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_station.R
\name{select_station}
\alias{select_station}
\title{Choose the station for a given landslide-position}
\usage{
select_station(position = NULL, method = "distance", n = 3)
}
\arguments{
\item{n}{Number of Stations (Voronoi only uses 1)}
}
\description{
Choose the station for a given landslide-position
}
|
/man/select_station.Rd
|
no_license
|
MaThRk/raingaugeR
|
R
| false
| true
| 399
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_station.R
\name{select_station}
\alias{select_station}
\title{Choose the station for a given landslide-position}
\usage{
select_station(position = NULL, method = "distance", n = 3)
}
\arguments{
\item{n}{Number of Stations (Voronoi only uses 1)}
}
\description{
Choose the station for a given landslide-position
}
|
load("NunnData.RData")
High[High==0]<-NA
M<-is.na(Low+High+apply(X,1:2,sum))
L<-Low
L[M]<-NA
U<-High
U[M]<-NA
#######################################################:
###### SUMMARIZE THE FINAL MODEL #######:
#######################################################:
load("fit.Robject")
#Extremal coefficient:
num<-1:32
EC<-fit$EC.mn
EC<-cbind(NA,EC[,1:14],NA,NA,EC[,15:28],NA)
EC<-rbind(NA,EC[1:14,],NA,NA,EC[15:28,],NA)
library(fields)
image.plot(1:32,1:32,EC,
xlab="Tooth number",ylab="Tooth number",
cex.lab=1,cex.axis=1)
X11()
burn<-40000
iters<-50000
#Table of coefficients
SSS<-fit$beta
for(j in 1:9){
SSS<-cbind(SSS,fit$alpha*fit$beta[,j])
}
SSS<-cbind(SSS,fit$scale)
SSS<-cbind(SSS,fit$shape[,1])
SSS<-cbind(SSS,fit$alpha*fit$shape[,1])
SSS<-cbind(SSS,fit$alpha)
SSS<-SSS[burn:iters,]
colnames(SSS)<-c(colnames(fit$beta),colnames(fit$beta),
"scale Molar","scale Pre-Molar","scale Can","scale Inc",
"shapeC","shapeM","alpha")
QQQ<-apply(SSS,2,quantile,c(0.5,0.05,0.95))
print(round(QQQ,2))
#######################################################:
###### Plot one sub #######:
#######################################################:
tooth<-c(2:15,18:31)
covs<-c("Crown-to-root ","Prob depth ","Mobility ","Miss neighbor ")
sub<-9
sub<-3
par(mfrow=c(3,2),mar=c(4,5,4,1))
XXX<-X[,sub,5:8]
Y1<-fit$pred.y.mn[,sub]-L[,sub]
#Y2<-pred.y.mn.gauss[,sub]-L[,sub]
S5<-fit$Surv5[,sub]
S10<-fit$Surv10[,sub]
Y1<-ifelse(Y1>100,100,Y1)
#Y2<-ifelse(Y2>100,100,Y2)
M<-is.na(L[,sub])
XXX[M,]<-NA
S5[M]<-S10[M]<-Y1[M]<-Y2[M]<-NA
#Probing depth:
plot(tooth,XXX[,2],xaxt="n",main="Probing depth",
xlab="",ylab="Probing depth (mm)")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){abline(v=tooth[j],lty=2)}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Mobility:
plot(tooth,XXX[,3],xaxt="n",main="Mobility",
xlab="",ylab="Mobility")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){abline(v=tooth[j],lty=2)}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Mean residual life
plot(tooth,Y1,ylim=c(0,100),xaxt="n",main="MRL, PS model",
xlab="",ylab="Mean residual life (years)")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){
abline(v=tooth[j],lty=2)
}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Mean residual life
# plot(tooth,Y2,ylim=c(0,100),xaxt="n",main="MRL, Gaussian model",
# xlab="",ylab="Mean residual life (years)")
# axis(1,tooth,tooth)
# for(j in 1:28){
# if(M[j]){
# abline(v=tooth[j],lty=2)
# }
# if(!M[j]){if(L[j,sub]==U[j,sub]){
# abline(v=tooth[j],lty=1)
# }}
# }
#Survival probs
plot(tooth,S5,ylim=0:1,xaxt="n",main = "5-year survival probability",
xlab="Tooth Number",ylab="Probability")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){
abline(v=tooth[j],lty=2)
}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Survival probs
plot(tooth,S10,ylim=0:1,xaxt="n",main="10-year survival probability",
xlab="Tooth Number",ylab="Probability")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){
abline(v=tooth[j],lty=2)
}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
S10<-fit$Surv10
S10[is.na(L)]<-NA
apply(S10<0.9,2,sum,na.rm=T)
#######################################################:
###### PIT #######:
#######################################################:
X11()
pcure <- mean(fit$pcure[burn:iters])
betaC<-QQQ[2,1:9]
betaM<-QQQ[2,10:18]
log_theta<-fit$log.theta.mn
XbC<-XbM<-L
for(t in 1:99){
XbC[,t]<-X[,t,]%*%betaC
XbM[,t]<-X[,t,]%*%betaM
}
scale<-QQQ[2,19:22]
shapeC<-QQQ[2,23]
shapeM<-QQQ[2,24]
par(mfrow=c(1,2))
PITC<-PITM<-L
nreps<-10
plot(NA,
xlab="Expected quantiles",
ylab="Observed quantiles",
main="Conditional",
xlim=0:1,ylim=0:1,lty=2,type="l",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25)
for(rep in 1:nreps){
for(t in 1:99){
set.seed(0820*t+10000*rep)
SSS<-exp(-(log_theta[,t]+XbC[t])/shapeC)*scale[type]
LLL<-(1-pcure)*pweibull(L[,t],shapeC,SSS)
UUU<-(1-pcure)*pweibull(U[,t],shapeC,SSS)
PITC[,t]<-runif(28,LLL,UUU)
}
pitc<-sort(as.vector(PITC))
pite<-seq(0,1,length=length(pitc))
lines(pite,pitc)#,lty=2)
}
abline(0,1,col=2,lwd=2)
par(mfrow=c(1,1))
plot(NA,
xlab="Expected quantiles",
ylab="Observed quantiles",
xlim=0:1,ylim=0:1,lty=2,type="l",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25)
for(rep in 1:nreps){
for(t in 1:99){
set.seed(0820*t+10000*rep)
SSS<-exp(-XbM[t]/shapeM)*scale[type]
LLL<-(1-pcure)*pweibull(L[,t],shapeM,SSS)
UUU<-(1-pcure)*pweibull(U[,t],shapeM,SSS)
PITM[,t]<-runif(28,LLL,UUU)
}
pitm<-sort(as.vector(PITM))
pite<-seq(0,1,length=length(pitm))
lines(pite,pitm)#,lty=2)
}
lines(0:1,0:1,col=2,lwd=2)
|
/summarize.R
|
no_license
|
schnellp/JRSSC-2015-SpatialSurvival
|
R
| false
| false
| 4,911
|
r
|
load("NunnData.RData")
High[High==0]<-NA
M<-is.na(Low+High+apply(X,1:2,sum))
L<-Low
L[M]<-NA
U<-High
U[M]<-NA
#######################################################:
###### SUMMARIZE THE FINAL MODEL #######:
#######################################################:
load("fit.Robject")
#Extremal coefficient:
num<-1:32
EC<-fit$EC.mn
EC<-cbind(NA,EC[,1:14],NA,NA,EC[,15:28],NA)
EC<-rbind(NA,EC[1:14,],NA,NA,EC[15:28,],NA)
library(fields)
image.plot(1:32,1:32,EC,
xlab="Tooth number",ylab="Tooth number",
cex.lab=1,cex.axis=1)
X11()
burn<-40000
iters<-50000
#Table of coefficients
SSS<-fit$beta
for(j in 1:9){
SSS<-cbind(SSS,fit$alpha*fit$beta[,j])
}
SSS<-cbind(SSS,fit$scale)
SSS<-cbind(SSS,fit$shape[,1])
SSS<-cbind(SSS,fit$alpha*fit$shape[,1])
SSS<-cbind(SSS,fit$alpha)
SSS<-SSS[burn:iters,]
colnames(SSS)<-c(colnames(fit$beta),colnames(fit$beta),
"scale Molar","scale Pre-Molar","scale Can","scale Inc",
"shapeC","shapeM","alpha")
QQQ<-apply(SSS,2,quantile,c(0.5,0.05,0.95))
print(round(QQQ,2))
#######################################################:
###### Plot one sub #######:
#######################################################:
tooth<-c(2:15,18:31)
covs<-c("Crown-to-root ","Prob depth ","Mobility ","Miss neighbor ")
sub<-9
sub<-3
par(mfrow=c(3,2),mar=c(4,5,4,1))
XXX<-X[,sub,5:8]
Y1<-fit$pred.y.mn[,sub]-L[,sub]
#Y2<-pred.y.mn.gauss[,sub]-L[,sub]
S5<-fit$Surv5[,sub]
S10<-fit$Surv10[,sub]
Y1<-ifelse(Y1>100,100,Y1)
#Y2<-ifelse(Y2>100,100,Y2)
M<-is.na(L[,sub])
XXX[M,]<-NA
S5[M]<-S10[M]<-Y1[M]<-Y2[M]<-NA
#Probing depth:
plot(tooth,XXX[,2],xaxt="n",main="Probing depth",
xlab="",ylab="Probing depth (mm)")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){abline(v=tooth[j],lty=2)}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Mobility:
plot(tooth,XXX[,3],xaxt="n",main="Mobility",
xlab="",ylab="Mobility")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){abline(v=tooth[j],lty=2)}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Mean residual life
plot(tooth,Y1,ylim=c(0,100),xaxt="n",main="MRL, PS model",
xlab="",ylab="Mean residual life (years)")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){
abline(v=tooth[j],lty=2)
}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Mean residual life
# plot(tooth,Y2,ylim=c(0,100),xaxt="n",main="MRL, Gaussian model",
# xlab="",ylab="Mean residual life (years)")
# axis(1,tooth,tooth)
# for(j in 1:28){
# if(M[j]){
# abline(v=tooth[j],lty=2)
# }
# if(!M[j]){if(L[j,sub]==U[j,sub]){
# abline(v=tooth[j],lty=1)
# }}
# }
#Survival probs
plot(tooth,S5,ylim=0:1,xaxt="n",main = "5-year survival probability",
xlab="Tooth Number",ylab="Probability")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){
abline(v=tooth[j],lty=2)
}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
#Survival probs
plot(tooth,S10,ylim=0:1,xaxt="n",main="10-year survival probability",
xlab="Tooth Number",ylab="Probability")
axis(1,tooth,tooth)
for(j in 1:28){
if(M[j]){
abline(v=tooth[j],lty=2)
}
if(!M[j]){if(L[j,sub]==U[j,sub]){
abline(v=tooth[j],lty=1)
}}
}
S10<-fit$Surv10
S10[is.na(L)]<-NA
apply(S10<0.9,2,sum,na.rm=T)
#######################################################:
###### PIT #######:
#######################################################:
X11()
pcure <- mean(fit$pcure[burn:iters])
betaC<-QQQ[2,1:9]
betaM<-QQQ[2,10:18]
log_theta<-fit$log.theta.mn
XbC<-XbM<-L
for(t in 1:99){
XbC[,t]<-X[,t,]%*%betaC
XbM[,t]<-X[,t,]%*%betaM
}
scale<-QQQ[2,19:22]
shapeC<-QQQ[2,23]
shapeM<-QQQ[2,24]
par(mfrow=c(1,2))
PITC<-PITM<-L
nreps<-10
plot(NA,
xlab="Expected quantiles",
ylab="Observed quantiles",
main="Conditional",
xlim=0:1,ylim=0:1,lty=2,type="l",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25)
for(rep in 1:nreps){
for(t in 1:99){
set.seed(0820*t+10000*rep)
SSS<-exp(-(log_theta[,t]+XbC[t])/shapeC)*scale[type]
LLL<-(1-pcure)*pweibull(L[,t],shapeC,SSS)
UUU<-(1-pcure)*pweibull(U[,t],shapeC,SSS)
PITC[,t]<-runif(28,LLL,UUU)
}
pitc<-sort(as.vector(PITC))
pite<-seq(0,1,length=length(pitc))
lines(pite,pitc)#,lty=2)
}
abline(0,1,col=2,lwd=2)
par(mfrow=c(1,1))
plot(NA,
xlab="Expected quantiles",
ylab="Observed quantiles",
xlim=0:1,ylim=0:1,lty=2,type="l",
cex.lab=1.25,cex.axis=1.25,cex.main=1.25)
for(rep in 1:nreps){
for(t in 1:99){
set.seed(0820*t+10000*rep)
SSS<-exp(-XbM[t]/shapeM)*scale[type]
LLL<-(1-pcure)*pweibull(L[,t],shapeM,SSS)
UUU<-(1-pcure)*pweibull(U[,t],shapeM,SSS)
PITM[,t]<-runif(28,LLL,UUU)
}
pitm<-sort(as.vector(PITM))
pite<-seq(0,1,length=length(pitm))
lines(pite,pitm)#,lty=2)
}
lines(0:1,0:1,col=2,lwd=2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blogger_objects.R
\name{Blog.posts}
\alias{Blog.posts}
\title{Blog.posts Object}
\usage{
Blog.posts(selfLink = NULL, totalItems = NULL)
}
\arguments{
\item{selfLink}{The URL of the container for posts in this blog}
\item{totalItems}{The count of posts in this blog}
}
\value{
Blog.posts object
}
\description{
Blog.posts Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The container of posts in this blog.
}
\seealso{
Other Blog functions: \code{\link{Blog.locale}},
\code{\link{Blog.pages}}, \code{\link{Blog}}
}
|
/googlebloggerv2.auto/man/Blog.posts.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 635
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blogger_objects.R
\name{Blog.posts}
\alias{Blog.posts}
\title{Blog.posts Object}
\usage{
Blog.posts(selfLink = NULL, totalItems = NULL)
}
\arguments{
\item{selfLink}{The URL of the container for posts in this blog}
\item{totalItems}{The count of posts in this blog}
}
\value{
Blog.posts object
}
\description{
Blog.posts Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The container of posts in this blog.
}
\seealso{
Other Blog functions: \code{\link{Blog.locale}},
\code{\link{Blog.pages}}, \code{\link{Blog}}
}
|
# Explore NEON Workshop
# Part 1: Morning of November 8, 2018
# Follows this webpage https://www.neonscience.org/download-explore-neon-data
library(neonUtilities)
library(geoNEON)
library(raster)
library(rhdf5)
options(stringsAsFactors = FALSE)
setwd("~/Documents/Workshops/Explore NEON Workshop 2018")
# Photosynthetically active radiation (PAR) data for Abby Road and
# Wind River Experimental Forest (WREF)
# Stack data from portal (only needs to happen once for a dataset, not for each run)
stackByTable("data/NEON_par.zip")
# Without this step, NEON data is difficult to use
# Download new data with zipsByProduct() (wrapper to the API)
# Download woody vegetation data from WREF
zipsByProduct(
dpID = "DP1.10098.001", # dpID = data product ID
site = "WREF",
package = "expanded",
check.size = TRUE, # interrupts workflow, so set FALSE for batch processes
# savepath = "~/Downloads"
)
# Now stack this data
stackByTable("filesToStack10098", folder = TRUE)
# Retrieve AOP (aerial observational platform) data
# Spatial resolution: 1m x 1m (or 10cm x 10cm for Lidar)
# Can download "by file" (usu. bigger) or "by tile" (usu. smaller)
byTileAOP(
dpID = "DP3.30015.001",
site = "WREF",
year = "2017",
easting = 580000, # can take vector as well
northing = 5075000, # can take vector as well
# savepath = "~/Downloads"
)
# Load PAR data
par30 <- read.delim("data/NEON_par/stackedFiles/PARPAR_30min.csv", sep=",")
View(par30)
# Load variables (metadata) table
parvar <- read.delim("data/NEON_par/stackedFiles/variables.csv", sep=",")
View(parvar)
# Convert time series data to R-readable
par30$startDateTime <- as.POSIXct(par30$startDateTime,
format = "%Y-%m-%d T %H:%M:%S Z",
tz = "GMT")
head(par30)
# Plot data
plot(
PARMean ~ startDateTime,
data = par30[which(par30$verticalPosition == 80),],
type = 'l'
)
# Load vegetation structure data
vegmap <- read.delim("data/filesToStack10098/stackedFiles/vst_mappingandtagging.csv",
sep=",")
View(vegmap)
vegind <- read.delim("data/filesToStack10098/stackedFiles/vst_apparentindividual.csv",
sep=",")
View(vegind)
parvar_veg <- read.delim("data/filesToStack10098/stackedFiles/variables.csv",
sep=",")
View(parvar_veg)
# Use the geoNEON package to calculate stem locations
# Gets (easting, northing) from (stemAzimuth, stemDistance)
names(vegmap)
vegmap_calc <- geoNEON::def.calc.geo.os(vegmap, "vst_mappingandtagging")
names(vegmap_calc)[!(names(vegmap_calc) %in% names(vegmap))]
# And now merge the mapping data with the individual measurements.
# individualID is the linking variable, the others are included to
# avoid having duplicate columns.
veg <- merge(
vegind,
vegmap_calc,
by = c("individualID","namedLocation",
"domainID","siteID","plotID")
)
# Map the stems in plot 85
symbols(
x = veg$adjEasting[which(veg$plotID=="WREF_085")],
y = veg$adjNorthing[which(veg$plotID=="WREF_085")],
circles = veg$stemDiameter[which(veg$plotID=="WREF_085")]/100, # radius, /100 to convert cm to m
xlab = "Easting",
ylab = "Northing",
inches = FALSE
)
# Rasterize and plot Lidar data
chm <- raster("data/DP3.30015.001/2017/FullSite/D16/2017_WREF_1/L3/DiscreteLidar/CanopyHeightModelGtif/NEON_D16_WREF_DP3_580000_5075000_CHM.tif")
plot(chm, col=topo.colors(6))
|
/QinClara/neon_workshop_2018_1a.R
|
permissive
|
lxwrght/NEON_Explore
|
R
| false
| false
| 3,422
|
r
|
# Explore NEON Workshop
# Part 1: Morning of November 8, 2018
# Follows this webpage https://www.neonscience.org/download-explore-neon-data
library(neonUtilities)
library(geoNEON)
library(raster)
library(rhdf5)
options(stringsAsFactors = FALSE)
setwd("~/Documents/Workshops/Explore NEON Workshop 2018")
# Photosynthetically active radiation (PAR) data for Abby Road and
# Wind River Experimental Forest (WREF)
# Stack data from portal (only needs to happen once for a dataset, not for each run)
stackByTable("data/NEON_par.zip")
# Without this step, NEON data is difficult to use
# Download new data with zipsByProduct() (wrapper to the API)
# Download woody vegetation data from WREF
zipsByProduct(
dpID = "DP1.10098.001", # dpID = data product ID
site = "WREF",
package = "expanded",
check.size = TRUE, # interrupts workflow, so set FALSE for batch processes
# savepath = "~/Downloads"
)
# Now stack this data
stackByTable("filesToStack10098", folder = TRUE)
# Retrieve AOP (aerial observational platform) data
# Spatial resolution: 1m x 1m (or 10cm x 10cm for Lidar)
# Can download "by file" (usu. bigger) or "by tile" (usu. smaller)
byTileAOP(
dpID = "DP3.30015.001",
site = "WREF",
year = "2017",
easting = 580000, # can take vector as well
northing = 5075000, # can take vector as well
# savepath = "~/Downloads"
)
# Load PAR data
par30 <- read.delim("data/NEON_par/stackedFiles/PARPAR_30min.csv", sep=",")
View(par30)
# Load variables (metadata) table
parvar <- read.delim("data/NEON_par/stackedFiles/variables.csv", sep=",")
View(parvar)
# Convert time series data to R-readable
par30$startDateTime <- as.POSIXct(par30$startDateTime,
format = "%Y-%m-%d T %H:%M:%S Z",
tz = "GMT")
head(par30)
# Plot data
plot(
PARMean ~ startDateTime,
data = par30[which(par30$verticalPosition == 80),],
type = 'l'
)
# Load vegetation structure data
vegmap <- read.delim("data/filesToStack10098/stackedFiles/vst_mappingandtagging.csv",
sep=",")
View(vegmap)
vegind <- read.delim("data/filesToStack10098/stackedFiles/vst_apparentindividual.csv",
sep=",")
View(vegind)
parvar_veg <- read.delim("data/filesToStack10098/stackedFiles/variables.csv",
sep=",")
View(parvar_veg)
# Use the geoNEON package to calculate stem locations
# Gets (easting, northing) from (stemAzimuth, stemDistance)
names(vegmap)
vegmap_calc <- geoNEON::def.calc.geo.os(vegmap, "vst_mappingandtagging")
names(vegmap_calc)[!(names(vegmap_calc) %in% names(vegmap))]
# And now merge the mapping data with the individual measurements.
# individualID is the linking variable, the others are included to
# avoid having duplicate columns.
veg <- merge(
vegind,
vegmap_calc,
by = c("individualID","namedLocation",
"domainID","siteID","plotID")
)
# Map the stems in plot 85
symbols(
x = veg$adjEasting[which(veg$plotID=="WREF_085")],
y = veg$adjNorthing[which(veg$plotID=="WREF_085")],
circles = veg$stemDiameter[which(veg$plotID=="WREF_085")]/100, # radius, /100 to convert cm to m
xlab = "Easting",
ylab = "Northing",
inches = FALSE
)
# Rasterize and plot Lidar data
chm <- raster("data/DP3.30015.001/2017/FullSite/D16/2017_WREF_1/L3/DiscreteLidar/CanopyHeightModelGtif/NEON_D16_WREF_DP3_580000_5075000_CHM.tif")
plot(chm, col=topo.colors(6))
|
library(shiny)
library(ggplot2)
shinyServer(function(input,output){
mtcars$am <- factor(mtcars$am,labels=c('automatic','manual'))
model<-lm(mpg ~ hp + wt + am, data=mtcars)
modelpred<-reactive({
hpInput<-input$sliderHP
wtInput<-input$sliderWT
amInput<-input$selectAM
mtcarsNew<-data.frame(hp=hpInput,wt=wtInput,am=amInput)
mtcarsNew$am <- as.factor(mtcarsNew$am)
predict(model,newdata=mtcarsNew)
})
output$plot<-renderPlot({
hpInput<-input$sliderHP
wtInput<-input$sliderWT
amInput<-input$selectAM
mtcarsNew<-data.frame(hp=hpInput,wt=wtInput,am=amInput)
mtcarsNew$am <- as.factor(mtcarsNew$am)
ggplot(mtcars, aes(x=hp,y=mpg,label=rownames(mtcars))) + geom_text(vjust = -1,size = 3.5,angle=315,check_overlap = TRUE) + geom_point(aes(size=wt,colour=am)) + geom_point(aes(x=mtcarsNew$hp,y=modelpred(),size=mtcarsNew$wt,shape=mtcarsNew$am)) + labs(x= "Gross horsepower", y= "Miles/(US) gallon" , title="Gross horsepower vs Miles/(US) gallon for automatic and manual transmission")
})
output$pred<-renderText({
modelpred()
})
}
)
|
/server.R
|
no_license
|
logicshare/DPPweek4Project
|
R
| false
| false
| 1,149
|
r
|
library(shiny)
library(ggplot2)
shinyServer(function(input,output){
mtcars$am <- factor(mtcars$am,labels=c('automatic','manual'))
model<-lm(mpg ~ hp + wt + am, data=mtcars)
modelpred<-reactive({
hpInput<-input$sliderHP
wtInput<-input$sliderWT
amInput<-input$selectAM
mtcarsNew<-data.frame(hp=hpInput,wt=wtInput,am=amInput)
mtcarsNew$am <- as.factor(mtcarsNew$am)
predict(model,newdata=mtcarsNew)
})
output$plot<-renderPlot({
hpInput<-input$sliderHP
wtInput<-input$sliderWT
amInput<-input$selectAM
mtcarsNew<-data.frame(hp=hpInput,wt=wtInput,am=amInput)
mtcarsNew$am <- as.factor(mtcarsNew$am)
ggplot(mtcars, aes(x=hp,y=mpg,label=rownames(mtcars))) + geom_text(vjust = -1,size = 3.5,angle=315,check_overlap = TRUE) + geom_point(aes(size=wt,colour=am)) + geom_point(aes(x=mtcarsNew$hp,y=modelpred(),size=mtcarsNew$wt,shape=mtcarsNew$am)) + labs(x= "Gross horsepower", y= "Miles/(US) gallon" , title="Gross horsepower vs Miles/(US) gallon for automatic and manual transmission")
})
output$pred<-renderText({
modelpred()
})
}
)
|
ICA = function(Data,OutputDimension=2,Contrastfunction="logcosh",Alpha=1,Iterations=200,PlotIt=FALSE,Cls){
# Independent Component Analysis
# projection=ICA(Data)
# INPUT
# Data[1:n,1:d] array of data: n cases in rows, d variables in columns, matrix is not symmetric
# or distance matrix, in this case matrix has to be symmetric
# OPTIONAL
# OutputDimension data is projected onto a R^p where P is the maximum ( default ==2)
# of the dimension chosen by cmdscale and OutputDimension
# Contrastfunction Maximierung der Negentropie ?ber geeignete geeignete Kontrastfunktion
# Default: 'logcosh' G(u)=1/a*log cosh(a*u)
# 'exp': G(u)=-exp(u^2/2)
#
# Alpha constant with 1<=alpha<=2 used in approximation to neg-entropy when fun == "logcosh"
# Iterations maximum number of iterations to perform.
#
# PlotIt bool, defaut=FALSE, if =TRUE: ClassPlot of every current Position of Databots will be made.
# OutputDimension>2 only the first two dimensions will be shown
# cls vector, Classifikation of Data if available, ClassPlots will be colorized
# OUTPUT is a list with following elements:
# ProjectedPoints[1:n,OutputDimension] n by OutputDimension matrix containing coordinates of the Projection:
# with ICA transformed Data called Source, columns of Soruce contain the independent components
#
# Mixing[1:OutputDimension,1:d] Mischungsmatrix s.d gilt Data=MixingMatrix*ProjectedPoints
# Unmixing Entmischungsmatrix mit Data*Unmixing=ProjectedPoints
# PCMatrix pre-whitening matrix that projects data onto the first n.comp principal components.
#
# Note: Uses the R and C code implementation of the FastICA algorithm of Aapo Hyvarinen et al.
# (http://www.cs.helsinki.fi/u/ahyvarin/)
# Negentropie: Entropiedifferenz zu einer entsprechenden normalverteilten Zufallsvariable
# J(y)=|E(G(y)-E(G(v)))|^2
# author: MT 06/2015
#requireRpackage('fastICA')
requireNamespace('fastICA')
if(missing(Data))
stop('No Data given')
Data;
if(!is.matrix(Data))
stop('Data has to be a matrix, maybe use as.matrix()')
AnzVar=ncol(Data)
AnzData=nrow(Data)
res=fastICA::fastICA(X=Data,n.comp=OutputDimension,fun = Contrastfunction,alg.typ = "parallel",alpha = Alpha,
method = "C",row.norm = FALSE, maxit = Iterations,tol = 0.0001, verbose = TRUE)
ProjectedPoints=res$S
if(PlotIt){
if(missing(Cls)){
AnzData=nrow(Data)
Cls=rep(1,AnzData)
}
string=paste0('ICA projection with ',Contrastfunction, ' approximation')
#ClassPlot(ProjectedPoints[,1],ProjectedPoints[,2],Cls=Cls,Title=string,Xlabel='independent component 1',Ylabel='independent component 2')
PlotProjectedPoints(ProjectedPoints,Cls,main=string)
}
return(list(ProjectedPoints=ProjectedPoints,Mixing=res$A,Unmixing=res$W,PCMatrix=res$K))
}
|
/R/ICA.R
|
no_license
|
ms609/ProjectionBasedClustering
|
R
| false
| false
| 3,154
|
r
|
ICA = function(Data,OutputDimension=2,Contrastfunction="logcosh",Alpha=1,Iterations=200,PlotIt=FALSE,Cls){
# Independent Component Analysis
# projection=ICA(Data)
# INPUT
# Data[1:n,1:d] array of data: n cases in rows, d variables in columns, matrix is not symmetric
# or distance matrix, in this case matrix has to be symmetric
# OPTIONAL
# OutputDimension data is projected onto a R^p where P is the maximum ( default ==2)
# of the dimension chosen by cmdscale and OutputDimension
# Contrastfunction Maximierung der Negentropie ?ber geeignete geeignete Kontrastfunktion
# Default: 'logcosh' G(u)=1/a*log cosh(a*u)
# 'exp': G(u)=-exp(u^2/2)
#
# Alpha constant with 1<=alpha<=2 used in approximation to neg-entropy when fun == "logcosh"
# Iterations maximum number of iterations to perform.
#
# PlotIt bool, defaut=FALSE, if =TRUE: ClassPlot of every current Position of Databots will be made.
# OutputDimension>2 only the first two dimensions will be shown
# cls vector, Classifikation of Data if available, ClassPlots will be colorized
# OUTPUT is a list with following elements:
# ProjectedPoints[1:n,OutputDimension] n by OutputDimension matrix containing coordinates of the Projection:
# with ICA transformed Data called Source, columns of Soruce contain the independent components
#
# Mixing[1:OutputDimension,1:d] Mischungsmatrix s.d gilt Data=MixingMatrix*ProjectedPoints
# Unmixing Entmischungsmatrix mit Data*Unmixing=ProjectedPoints
# PCMatrix pre-whitening matrix that projects data onto the first n.comp principal components.
#
# Note: Uses the R and C code implementation of the FastICA algorithm of Aapo Hyvarinen et al.
# (http://www.cs.helsinki.fi/u/ahyvarin/)
# Negentropie: Entropiedifferenz zu einer entsprechenden normalverteilten Zufallsvariable
# J(y)=|E(G(y)-E(G(v)))|^2
# author: MT 06/2015
#requireRpackage('fastICA')
requireNamespace('fastICA')
if(missing(Data))
stop('No Data given')
Data;
if(!is.matrix(Data))
stop('Data has to be a matrix, maybe use as.matrix()')
AnzVar=ncol(Data)
AnzData=nrow(Data)
res=fastICA::fastICA(X=Data,n.comp=OutputDimension,fun = Contrastfunction,alg.typ = "parallel",alpha = Alpha,
method = "C",row.norm = FALSE, maxit = Iterations,tol = 0.0001, verbose = TRUE)
ProjectedPoints=res$S
if(PlotIt){
if(missing(Cls)){
AnzData=nrow(Data)
Cls=rep(1,AnzData)
}
string=paste0('ICA projection with ',Contrastfunction, ' approximation')
#ClassPlot(ProjectedPoints[,1],ProjectedPoints[,2],Cls=Cls,Title=string,Xlabel='independent component 1',Ylabel='independent component 2')
PlotProjectedPoints(ProjectedPoints,Cls,main=string)
}
return(list(ProjectedPoints=ProjectedPoints,Mixing=res$A,Unmixing=res$W,PCMatrix=res$K))
}
|
test_that("aesthetic checking in geom throws correct errors", {
p <- ggplot(mtcars) + geom_point(aes(disp, mpg, colour = after_scale(data)))
expect_snapshot_error(ggplotGrob(p))
aes <- list(a = 1:4, b = letters[1:4], c = TRUE, d = 1:2, e = 1:5)
expect_snapshot_error(check_aesthetics(aes, 4))
})
test_that("updating geom aesthetic defaults preserves class and order", {
original_defaults <- GeomPoint$default_aes
update_geom_defaults("point", aes(color = "red"))
updated_defaults <- GeomPoint$default_aes
expect_s3_class(updated_defaults, "uneval")
intended_defaults <- original_defaults
intended_defaults[["colour"]] <- "red"
expect_equal(updated_defaults, intended_defaults)
update_geom_defaults("point", original_defaults)
})
test_that("updating stat aesthetic defaults preserves class and order", {
original_defaults <- StatBin$default_aes
update_stat_defaults("bin", aes(y = after_stat(density)))
updated_defaults <- StatBin$default_aes
expect_s3_class(updated_defaults, "uneval")
intended_defaults <- original_defaults
intended_defaults[["y"]] <- expr(after_stat(density))
attr(intended_defaults[["y"]], ".Environment") <- attr(updated_defaults[["y"]], ".Environment")
expect_equal(updated_defaults, intended_defaults)
update_stat_defaults("bin", original_defaults)
})
|
/tests/testthat/test-geom-.R
|
permissive
|
tidyverse/ggplot2
|
R
| false
| false
| 1,341
|
r
|
test_that("aesthetic checking in geom throws correct errors", {
p <- ggplot(mtcars) + geom_point(aes(disp, mpg, colour = after_scale(data)))
expect_snapshot_error(ggplotGrob(p))
aes <- list(a = 1:4, b = letters[1:4], c = TRUE, d = 1:2, e = 1:5)
expect_snapshot_error(check_aesthetics(aes, 4))
})
test_that("updating geom aesthetic defaults preserves class and order", {
original_defaults <- GeomPoint$default_aes
update_geom_defaults("point", aes(color = "red"))
updated_defaults <- GeomPoint$default_aes
expect_s3_class(updated_defaults, "uneval")
intended_defaults <- original_defaults
intended_defaults[["colour"]] <- "red"
expect_equal(updated_defaults, intended_defaults)
update_geom_defaults("point", original_defaults)
})
test_that("updating stat aesthetic defaults preserves class and order", {
original_defaults <- StatBin$default_aes
update_stat_defaults("bin", aes(y = after_stat(density)))
updated_defaults <- StatBin$default_aes
expect_s3_class(updated_defaults, "uneval")
intended_defaults <- original_defaults
intended_defaults[["y"]] <- expr(after_stat(density))
attr(intended_defaults[["y"]], ".Environment") <- attr(updated_defaults[["y"]], ".Environment")
expect_equal(updated_defaults, intended_defaults)
update_stat_defaults("bin", original_defaults)
})
|
NA_datetime_ <- vctrs::new_datetime(NA_real_)
NA_list_ <- list(list())
connectapi_ptypes <- list(
users = tibble::tibble(
"email" = NA_character_,
"username" = NA_character_,
"first_name" = NA_character_,
"last_name" = NA_character_,
"user_role" = NA_character_,
"created_time" = NA_datetime_,
"updated_time" = NA_datetime_,
"active_time" = NA_datetime_,
"confirmed" = NA,
"locked" = NA,
"guid" = NA_character_
),
groups = tibble::tibble(
"guid" = NA_character_,
"name" = NA_character_,
"owner_guid" = NA_character_
),
usage_shiny = tibble::tibble(
"content_guid" = NA_character_,
"user_guid" = NA_character_,
"started" = NA_datetime_,
"ended" = NA_datetime_,
"data_version" = NA_integer_
),
usage_static = tibble::tibble(
"content_guid" = NA_character_,
"user_guid" = NA_character_,
"variant_key" = NA_character_,
"time" = NA_datetime_,
"rendering_id" = NA_character_,
"bundle_id" = NA_character_,
"data_version" = NA_integer_
),
content = tibble::tibble(
"id" = NA_integer_,
"guid" = NA_character_,
"access_type" = NA_character_,
"connection_timeout" = NA_real_,
"read_timeout" = NA_real_,
"init_timeout" = NA_real_,
"idle_timeout" = NA_real_,
"max_processes" = NA_integer_,
"min_processes" = NA_integer_,
"max_conns_per_process" = NA_integer_,
"load_factor" = NA_real_,
"url" = NA_character_,
"vanity_url" = NA,
"name" = NA_character_,
"title" = NA_character_,
"bundle_id" = NA_integer_,
#(1=shiny, 2=shiny Rmd, 3=source Rmd, 4=static, 5=api, 6=tensorflow, 7=python, 8=flask, 9=dash, 10=streamlit)
"app_mode" = NA_integer_,
"content_category" = NA_character_,
"has_parameters" = NA,
"created_time" = NA_datetime_,
"last_deployed_time" = NA_datetime_,
"r_version" = NA_character_,
"py_version" = NA_character_,
"build_status" = NA_integer_,
"run_as" = NA_character_,
"run_as_current_user" = NA,
"description" = NA_character_,
"app_role" = NA_character_,
"owner_first_name" = NA_character_,
"owner_last_name" = NA_character_,
"owner_username" = NA_character_,
"owner_guid" = NA_character_,
"owner_email" = NA_character_,
"owner_locked" = NA,
"is_scheduled" = NA,
"git" = NA_list_
),
audit_logs = tibble::tibble(
"id" = NA_character_,
"time" = NA_datetime_,
"user_id" = NA_character_,
"user_description" = NA_character_,
"action" = NA_character_,
"event_description" = NA_character_
),
procs = tibble::tibble(
pid = NA_character_,
appId = NA_integer_,
appGuid = NA_character_,
appName = NA_character_,
appUrl = NA_character_,
appRunAs = NA_character_,
type = NA_character_,
cpuCurrent = NA_real_,
cpuTotal = NA_integer_,
ram = fs::as_fs_bytes(NA_integer_)
),
acl_user = tibble::tibble(
content_guid = NA_character_,
content_access_type = NA_character_,
email = NA_character_,
username = NA_character_,
first_name = NA_character_,
last_name = NA_character_,
password = NA_character_,
user_role = NA_character_,
created_time = NA_datetime_,
updated_time = NA_datetime_,
active_time = NA_datetime_,
confirmed = NA,
locked = NA,
guid = NA_character_,
app_role = NA_character_,
is_owner = NA
),
acl_group = tibble::tibble(
owner_guid = NA_character_,
name = NA_character_,
members = NA_list_,
owner = NA_character_,
guid = NA_character_,
app_role = NA_character_,
content_guid = NA_character_,
content_access_type = NA_character_
),
variant = tibble::tibble(
id = NA_integer_,
app_id = NA_integer_,
key = NA_character_,
bundle_id = NA_integer_,
is_default = NA,
name = NA_character_,
email_collaborators = NA,
email_viewers = NA,
created_time = NA_datetime_,
rendering_id = NA_integer_,
render_time = NA_datetime_,
render_duration = NA_integer_,
visibility = NA_character_,
owner_id = NA_integer_
),
rendering = tibble::tibble(
id = NA_integer_,
app_id = NA_integer_,
variant_id = NA_integer_,
bundle_id = NA_integer_,
job_key = NA_character_,
render_time = NA_datetime_,
render_duration = bit64::as.integer64(NA_integer_),
active = NA,
app_guid = NA_character_,
variant_key = NA_character_,
),
jobs = tibble::tibble(
id = NA_integer_,
pid = NA_integer_,
key = NA_character_,
app_id = NA_integer_,
app_guid = NA_character_,
variant_id = NA_integer_,
bundle_id = NA_integer_,
start_time = NA_datetime_,
end_time = NA_datetime_,
tag = NA_character_,
exit_code = NA_integer_,
finalized = NA,
hostname = NA_character_,
variant_key = NA_character_
),
job = tibble::tibble(
pid = NA_integer_,
key = NA_character_,
app_id = NA_integer_,
variant_id = NA_integer_,
bundle_id = NA_integer_,
tag = NA_character_,
finalized = NA,
hostname = NA_character_,
origin = NA_character_,
stdout = NA_list_,
stderr = NA_list_,
logged_error = NA_character_,
start_time = NA_datetime_,
end_time = NA_datetime_,
exit_code = NA_integer_,
app_guid = NA_character_,
variant_key = NA_character_
)
)
|
/R/ptype.R
|
no_license
|
brunaw/connectapi
|
R
| false
| false
| 5,351
|
r
|
NA_datetime_ <- vctrs::new_datetime(NA_real_)
NA_list_ <- list(list())
connectapi_ptypes <- list(
users = tibble::tibble(
"email" = NA_character_,
"username" = NA_character_,
"first_name" = NA_character_,
"last_name" = NA_character_,
"user_role" = NA_character_,
"created_time" = NA_datetime_,
"updated_time" = NA_datetime_,
"active_time" = NA_datetime_,
"confirmed" = NA,
"locked" = NA,
"guid" = NA_character_
),
groups = tibble::tibble(
"guid" = NA_character_,
"name" = NA_character_,
"owner_guid" = NA_character_
),
usage_shiny = tibble::tibble(
"content_guid" = NA_character_,
"user_guid" = NA_character_,
"started" = NA_datetime_,
"ended" = NA_datetime_,
"data_version" = NA_integer_
),
usage_static = tibble::tibble(
"content_guid" = NA_character_,
"user_guid" = NA_character_,
"variant_key" = NA_character_,
"time" = NA_datetime_,
"rendering_id" = NA_character_,
"bundle_id" = NA_character_,
"data_version" = NA_integer_
),
content = tibble::tibble(
"id" = NA_integer_,
"guid" = NA_character_,
"access_type" = NA_character_,
"connection_timeout" = NA_real_,
"read_timeout" = NA_real_,
"init_timeout" = NA_real_,
"idle_timeout" = NA_real_,
"max_processes" = NA_integer_,
"min_processes" = NA_integer_,
"max_conns_per_process" = NA_integer_,
"load_factor" = NA_real_,
"url" = NA_character_,
"vanity_url" = NA,
"name" = NA_character_,
"title" = NA_character_,
"bundle_id" = NA_integer_,
#(1=shiny, 2=shiny Rmd, 3=source Rmd, 4=static, 5=api, 6=tensorflow, 7=python, 8=flask, 9=dash, 10=streamlit)
"app_mode" = NA_integer_,
"content_category" = NA_character_,
"has_parameters" = NA,
"created_time" = NA_datetime_,
"last_deployed_time" = NA_datetime_,
"r_version" = NA_character_,
"py_version" = NA_character_,
"build_status" = NA_integer_,
"run_as" = NA_character_,
"run_as_current_user" = NA,
"description" = NA_character_,
"app_role" = NA_character_,
"owner_first_name" = NA_character_,
"owner_last_name" = NA_character_,
"owner_username" = NA_character_,
"owner_guid" = NA_character_,
"owner_email" = NA_character_,
"owner_locked" = NA,
"is_scheduled" = NA,
"git" = NA_list_
),
audit_logs = tibble::tibble(
"id" = NA_character_,
"time" = NA_datetime_,
"user_id" = NA_character_,
"user_description" = NA_character_,
"action" = NA_character_,
"event_description" = NA_character_
),
procs = tibble::tibble(
pid = NA_character_,
appId = NA_integer_,
appGuid = NA_character_,
appName = NA_character_,
appUrl = NA_character_,
appRunAs = NA_character_,
type = NA_character_,
cpuCurrent = NA_real_,
cpuTotal = NA_integer_,
ram = fs::as_fs_bytes(NA_integer_)
),
acl_user = tibble::tibble(
content_guid = NA_character_,
content_access_type = NA_character_,
email = NA_character_,
username = NA_character_,
first_name = NA_character_,
last_name = NA_character_,
password = NA_character_,
user_role = NA_character_,
created_time = NA_datetime_,
updated_time = NA_datetime_,
active_time = NA_datetime_,
confirmed = NA,
locked = NA,
guid = NA_character_,
app_role = NA_character_,
is_owner = NA
),
acl_group = tibble::tibble(
owner_guid = NA_character_,
name = NA_character_,
members = NA_list_,
owner = NA_character_,
guid = NA_character_,
app_role = NA_character_,
content_guid = NA_character_,
content_access_type = NA_character_
),
variant = tibble::tibble(
id = NA_integer_,
app_id = NA_integer_,
key = NA_character_,
bundle_id = NA_integer_,
is_default = NA,
name = NA_character_,
email_collaborators = NA,
email_viewers = NA,
created_time = NA_datetime_,
rendering_id = NA_integer_,
render_time = NA_datetime_,
render_duration = NA_integer_,
visibility = NA_character_,
owner_id = NA_integer_
),
rendering = tibble::tibble(
id = NA_integer_,
app_id = NA_integer_,
variant_id = NA_integer_,
bundle_id = NA_integer_,
job_key = NA_character_,
render_time = NA_datetime_,
render_duration = bit64::as.integer64(NA_integer_),
active = NA,
app_guid = NA_character_,
variant_key = NA_character_,
),
jobs = tibble::tibble(
id = NA_integer_,
pid = NA_integer_,
key = NA_character_,
app_id = NA_integer_,
app_guid = NA_character_,
variant_id = NA_integer_,
bundle_id = NA_integer_,
start_time = NA_datetime_,
end_time = NA_datetime_,
tag = NA_character_,
exit_code = NA_integer_,
finalized = NA,
hostname = NA_character_,
variant_key = NA_character_
),
job = tibble::tibble(
pid = NA_integer_,
key = NA_character_,
app_id = NA_integer_,
variant_id = NA_integer_,
bundle_id = NA_integer_,
tag = NA_character_,
finalized = NA,
hostname = NA_character_,
origin = NA_character_,
stdout = NA_list_,
stderr = NA_list_,
logged_error = NA_character_,
start_time = NA_datetime_,
end_time = NA_datetime_,
exit_code = NA_integer_,
app_guid = NA_character_,
variant_key = NA_character_
)
)
|
plot2 <-function() {
options(warn=-1)
suppressMessages(library(chron))
suppressMessages(library(lubridate))
options(warn=0)
dt <- read.csv("household_power_consumption.txt",
header = TRUE, sep = ";",na.strings=c("?"))
dt["Date"] <- as.Date(dt$Date,format="%d/%m/%Y")
dt <- dt[dt$Date >= "2007-02-01" & dt$Date <= "2007-02-02", ]
dt["Time"]<- times(dt$Time)
plot(wday(dt$Date)+dt$Time,
as.numeric(dt$Global_active_power),
type='l',ylab="Global Active Power(kilowatts)",
xlab = "",xaxt = "n")
axis(1, at=5:7, labels=c("Thur","Fri","Sat"))
dev.copy(png,filename="ExData_Plotting1\\plot2.png");
dev.off ();
}
|
/plot2.R
|
no_license
|
jpcannon/ExData_Plotting1
|
R
| false
| false
| 674
|
r
|
plot2 <-function() {
options(warn=-1)
suppressMessages(library(chron))
suppressMessages(library(lubridate))
options(warn=0)
dt <- read.csv("household_power_consumption.txt",
header = TRUE, sep = ";",na.strings=c("?"))
dt["Date"] <- as.Date(dt$Date,format="%d/%m/%Y")
dt <- dt[dt$Date >= "2007-02-01" & dt$Date <= "2007-02-02", ]
dt["Time"]<- times(dt$Time)
plot(wday(dt$Date)+dt$Time,
as.numeric(dt$Global_active_power),
type='l',ylab="Global Active Power(kilowatts)",
xlab = "",xaxt = "n")
axis(1, at=5:7, labels=c("Thur","Fri","Sat"))
dev.copy(png,filename="ExData_Plotting1\\plot2.png");
dev.off ();
}
|
##### info ####
# file: elymus_adult_gs_survival_parameter_2019_density_exp
# author: Amy Kendig
# date last edited: 10/27/20
# goal: sample from model coefficients to estimate survival
#### set-up ####
# tidyverse and brms packages must be loaded
# load model
# load("output/elymus_adult_gs_survival_model_2019_density_exp.rda")
load("output/elymus_adult_gs_survival_fung_model_2019_density_exp.rda")
# extract posterior distributions
# evAGsSurvD2Samps <- posterior_samples(evAGsSurvD2Mod2)
evAGsSurvD2FuSamps <- posterior_samples(evAGsSurvD2FuMod2)
# sample parameters
# U_P_dens <- evAGsSurvD2Samps[sample(nrow(evAGsSurvD2Samps), size = n_samps, replace = T), ] %>%
# rename("fungicide_mv_seedling_density" = "b_fungicide:mv_seedling_density",
# "fungicide_ev_seedling_density" = "b_fungicide:ev_seedling_density",
# "fungicide_ev_adult_density" = "b_fungicide:ev_adult_density") %>%
# mutate(int_wat = b_Intercept,
# int_fun = int_wat + b_fungicide,
# mv_dens_wat = b_mv_seedling_density,
# mv_dens_fun = mv_dens_wat + fungicide_mv_seedling_density,
# evS_dens_wat = b_ev_seedling_density,
# evS_dens_fun = evS_dens_wat + fungicide_ev_seedling_density,
# evA_dens_wat = b_ev_adult_density,
# evA_dens_fun = evA_dens_wat + fungicide_ev_adult_density)
u_P_df <- evAGsSurvD2FuSamps[sample(nrow(evAGsSurvD2FuSamps), size = n_samps, replace = T), ] %>%
mutate(int_wat = b_Intercept,
int_fun = int_wat + b_fungicide)
#### survival function ####
# U_P_fun <- function(disease, A_dens, S_dens, P_dens, iter) {
#
# U_P_lin_expr <- ifelse(disease == 1,
# U_P_dens$int_wat[iter] + U_P_dens$mv_dens_wat[iter] * A_dens + U_P_dens$evS_dens_wat[iter] * S_dens + U_P_dens$evA_dens_wat[iter] * P_dens,
# U_P_dens$int_fun[iter] + U_P_dens$mv_dens_fun[iter] * A_dens + U_P_dens$evS_dens_fun[iter] * S_dens + U_P_dens$evA_dens_fun[iter] * P_dens)
#
# U_P <- ifelse(exp(U_P_lin_expr) == Inf, 1, exp(U_P_lin_expr)/(1 + exp(U_P_lin_expr)))
#
# return(U_P)
# }
u_P_fun <- function(disease, iter) {
u_P_lin_expr <- ifelse(disease == 1,
u_P_df$int_wat[iter],
u_P_df$int_fun[iter])
u_P <- exp(u_P_lin_expr)/(1 + exp(u_P_lin_expr))
return(u_P)
}
|
/code/elymus_adult_gs_survival_parameter_2019_density_exp.R
|
no_license
|
aekendig/microstegium-bipolaris
|
R
| false
| false
| 2,363
|
r
|
##### info ####
# file: elymus_adult_gs_survival_parameter_2019_density_exp
# author: Amy Kendig
# date last edited: 10/27/20
# goal: sample from model coefficients to estimate survival
#### set-up ####
# tidyverse and brms packages must be loaded
# load model
# load("output/elymus_adult_gs_survival_model_2019_density_exp.rda")
load("output/elymus_adult_gs_survival_fung_model_2019_density_exp.rda")
# extract posterior distributions
# evAGsSurvD2Samps <- posterior_samples(evAGsSurvD2Mod2)
evAGsSurvD2FuSamps <- posterior_samples(evAGsSurvD2FuMod2)
# sample parameters
# U_P_dens <- evAGsSurvD2Samps[sample(nrow(evAGsSurvD2Samps), size = n_samps, replace = T), ] %>%
# rename("fungicide_mv_seedling_density" = "b_fungicide:mv_seedling_density",
# "fungicide_ev_seedling_density" = "b_fungicide:ev_seedling_density",
# "fungicide_ev_adult_density" = "b_fungicide:ev_adult_density") %>%
# mutate(int_wat = b_Intercept,
# int_fun = int_wat + b_fungicide,
# mv_dens_wat = b_mv_seedling_density,
# mv_dens_fun = mv_dens_wat + fungicide_mv_seedling_density,
# evS_dens_wat = b_ev_seedling_density,
# evS_dens_fun = evS_dens_wat + fungicide_ev_seedling_density,
# evA_dens_wat = b_ev_adult_density,
# evA_dens_fun = evA_dens_wat + fungicide_ev_adult_density)
u_P_df <- evAGsSurvD2FuSamps[sample(nrow(evAGsSurvD2FuSamps), size = n_samps, replace = T), ] %>%
mutate(int_wat = b_Intercept,
int_fun = int_wat + b_fungicide)
#### survival function ####
# U_P_fun <- function(disease, A_dens, S_dens, P_dens, iter) {
#
# U_P_lin_expr <- ifelse(disease == 1,
# U_P_dens$int_wat[iter] + U_P_dens$mv_dens_wat[iter] * A_dens + U_P_dens$evS_dens_wat[iter] * S_dens + U_P_dens$evA_dens_wat[iter] * P_dens,
# U_P_dens$int_fun[iter] + U_P_dens$mv_dens_fun[iter] * A_dens + U_P_dens$evS_dens_fun[iter] * S_dens + U_P_dens$evA_dens_fun[iter] * P_dens)
#
# U_P <- ifelse(exp(U_P_lin_expr) == Inf, 1, exp(U_P_lin_expr)/(1 + exp(U_P_lin_expr)))
#
# return(U_P)
# }
u_P_fun <- function(disease, iter) {
u_P_lin_expr <- ifelse(disease == 1,
u_P_df$int_wat[iter],
u_P_df$int_fun[iter])
u_P <- exp(u_P_lin_expr)/(1 + exp(u_P_lin_expr))
return(u_P)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umxReduceACE}
\alias{umxReduceACE}
\title{Reduce an ACE model.}
\usage{
umxReduceACE(
model,
report = c("markdown", "inline", "html", "report"),
baseFileName = "tmp",
intervals = TRUE,
...
)
}
\arguments{
\item{model}{an ACE or ADE \code{\link[=mxModel]{mxModel()}} to reduce}
\item{report}{How to report the results. "html" = open in browser}
\item{baseFileName}{(optional) custom filename for html output (defaults to "tmp")}
\item{intervals}{Recompute CIs (if any included) on the best model (default = TRUE)}
\item{...}{Other parameters to control model summary}
}
\value{
Best fitting model
}
\description{
This function can perform model reduction on \code{\link[=umxACE]{umxACE()}} models,
testing dropping A and C, as well as an ADE or ACE model, displaying the results
in a table, and returning the best model.
}
\details{
It is designed for testing univariate models. You can offer up either the ACE or ADE base model.
Suggestions for more sophisticated automation welcomed!
}
\examples{
\dontrun{
data(twinData)
mzData <- subset(twinData, zygosity == "MZFF")
dzData <- subset(twinData, zygosity == "DZFF")
m1 = umxACE(selDVs = "bmi", dzData = dzData, mzData = mzData, sep = "")
m2 = umxReduce(m1)
umxSummary(m2)
m1 = umxACE(selDVs = "bmi", dzData = dzData, mzData = mzData, sep = "", dzCr = .25)
m2 = umxReduce(m1)
}
}
\references{
\itemize{
\item Wagenmakers, E.J., & Farrell, S. (2004). AIC model selection using Akaike weights. \emph{Psychonomic Bulletin and Review}, \strong{11}, 192-196. \href{https://doi.org/10.3758/BF03206482}{doi:}
}
}
\seealso{
\code{\link[=umxReduceGxE]{umxReduceGxE()}}, \code{\link[=umxReduce]{umxReduce()}}
Other Twin Modeling Functions:
\code{\link{plot.MxModelTwinMaker}()},
\code{\link{power.ACE.test}()},
\code{\link{umxACEcov}()},
\code{\link{umxACEv}()},
\code{\link{umxACE}()},
\code{\link{umxCP}()},
\code{\link{umxDoCp}()},
\code{\link{umxDoC}()},
\code{\link{umxGxE_window}()},
\code{\link{umxGxEbiv}()},
\code{\link{umxGxE}()},
\code{\link{umxIP}()},
\code{\link{umxPlotCP}()},
\code{\link{umxPlotDoC}()},
\code{\link{umxReduceGxE}()},
\code{\link{umxReduce}()},
\code{\link{umxRotate.MxModelCP}()},
\code{\link{umxSexLim}()},
\code{\link{umxSimplex}()},
\code{\link{umxSummarizeTwinData}()},
\code{\link{umxSummaryACEcov}()},
\code{\link{umxSummaryACEv}()},
\code{\link{umxSummaryACE}()},
\code{\link{umxSummaryCP}()},
\code{\link{umxSummaryDoC}()},
\code{\link{umxSummaryGxEbiv}()},
\code{\link{umxSummaryGxE}()},
\code{\link{umxSummaryIP}()},
\code{\link{umxSummarySexLim}()},
\code{\link{umxSummarySimplex}()},
\code{\link{umxTwinMaker}()},
\code{\link{umx}}
}
\concept{Twin Modeling Functions}
|
/man/umxReduceACE.Rd
|
no_license
|
jishanling/umx
|
R
| false
| true
| 2,774
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umxReduceACE}
\alias{umxReduceACE}
\title{Reduce an ACE model.}
\usage{
umxReduceACE(
model,
report = c("markdown", "inline", "html", "report"),
baseFileName = "tmp",
intervals = TRUE,
...
)
}
\arguments{
\item{model}{an ACE or ADE \code{\link[=mxModel]{mxModel()}} to reduce}
\item{report}{How to report the results. "html" = open in browser}
\item{baseFileName}{(optional) custom filename for html output (defaults to "tmp")}
\item{intervals}{Recompute CIs (if any included) on the best model (default = TRUE)}
\item{...}{Other parameters to control model summary}
}
\value{
Best fitting model
}
\description{
This function can perform model reduction on \code{\link[=umxACE]{umxACE()}} models,
testing dropping A and C, as well as an ADE or ACE model, displaying the results
in a table, and returning the best model.
}
\details{
It is designed for testing univariate models. You can offer up either the ACE or ADE base model.
Suggestions for more sophisticated automation welcomed!
}
\examples{
\dontrun{
data(twinData)
mzData <- subset(twinData, zygosity == "MZFF")
dzData <- subset(twinData, zygosity == "DZFF")
m1 = umxACE(selDVs = "bmi", dzData = dzData, mzData = mzData, sep = "")
m2 = umxReduce(m1)
umxSummary(m2)
m1 = umxACE(selDVs = "bmi", dzData = dzData, mzData = mzData, sep = "", dzCr = .25)
m2 = umxReduce(m1)
}
}
\references{
\itemize{
\item Wagenmakers, E.J., & Farrell, S. (2004). AIC model selection using Akaike weights. \emph{Psychonomic Bulletin and Review}, \strong{11}, 192-196. \href{https://doi.org/10.3758/BF03206482}{doi:}
}
}
\seealso{
\code{\link[=umxReduceGxE]{umxReduceGxE()}}, \code{\link[=umxReduce]{umxReduce()}}
Other Twin Modeling Functions:
\code{\link{plot.MxModelTwinMaker}()},
\code{\link{power.ACE.test}()},
\code{\link{umxACEcov}()},
\code{\link{umxACEv}()},
\code{\link{umxACE}()},
\code{\link{umxCP}()},
\code{\link{umxDoCp}()},
\code{\link{umxDoC}()},
\code{\link{umxGxE_window}()},
\code{\link{umxGxEbiv}()},
\code{\link{umxGxE}()},
\code{\link{umxIP}()},
\code{\link{umxPlotCP}()},
\code{\link{umxPlotDoC}()},
\code{\link{umxReduceGxE}()},
\code{\link{umxReduce}()},
\code{\link{umxRotate.MxModelCP}()},
\code{\link{umxSexLim}()},
\code{\link{umxSimplex}()},
\code{\link{umxSummarizeTwinData}()},
\code{\link{umxSummaryACEcov}()},
\code{\link{umxSummaryACEv}()},
\code{\link{umxSummaryACE}()},
\code{\link{umxSummaryCP}()},
\code{\link{umxSummaryDoC}()},
\code{\link{umxSummaryGxEbiv}()},
\code{\link{umxSummaryGxE}()},
\code{\link{umxSummaryIP}()},
\code{\link{umxSummarySexLim}()},
\code{\link{umxSummarySimplex}()},
\code{\link{umxTwinMaker}()},
\code{\link{umx}}
}
\concept{Twin Modeling Functions}
|
library(Matrix)
rm(list = ls())
A <- matrix(c(6,-2, 3, 4, -10, 4, 1, -3, 5), 3, 3, byrow=T)
B <- matrix(c(1,1,1), nrow = 3)
dim(A) <- c(3, 3)
elu <- expand(lu(A))
L <- elu$L
U <- elu$U
P <- elu$P
all.equal(Matrix(A), with(elu, P %*% L %*% U))
(Y <- solve(L, solve(P) %*% B)) # solve LY = inv(P).B instead of LY = PB
(X <- solve(U, Y))
all.equal(X, Matrix(solve(A, B)))
U <- matrix(c(2, 3, -1, 1, 0, 2, 1, 0, 0, 0, -1, 3, 0, 0, 0, 4), 4, 4, byrow=T)
D <- Diagonal(4)
diag(D) <- (diag(U))
UN <- Diagonal(4)
UN <- solve(D, U %*% solve(UN))
UN
f1 <- quote((1+2*y-3*z)/6)
f2 <- quote((2-4*x-4*z)/-10)
f3 <- quote((3-x+3*y)/5)
FunctionList <- list(x=1, y=1, z=1)
f1_a <- eval(f1, FunctionList)[1]
FunctionList <- list(x=f1_a, y=1, z=1)
f1_b <- eval(f2, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=1)
f1_c <- eval(f3, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=f1_c)
print(c(f1_a, f1_b, f1_c))
maxiteration <- 100
results <- data.frame(Iteration = numeric(maxiteration), x = numeric(maxiteration),
y = numeric(maxiteration), z = numeric(maxiteration))
FunctionList <- list(x=1, y=1, z=1)
for(i in 1:maxiteration){
f1_a <- eval(f1, FunctionList)[1]
FunctionList <- list(x=f1_a, y=1, z=1)
f1_b <- eval(f2, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=1)
f1_c <- eval(f3, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=f1_c)
results[i,] <- list(i, f1_a, f1_b, f1_c)}
print(results)
xfunc <- quote(x-sqrt(x^2-1))
Derivative <- D(xfunc, "x")
f1 <- quote(sin(2*x-y)-1.2*x-0.4)
f2 <- quote(0.8*x^2+1.5*y^2-1)
StartX <- 0.4
StartY <- -0.75
maxiteration <- 20
f1x <- D(f1, "x")
f1y<- D(f1, "y")
f2x <- D(f2, "x")
f2y<- D(f2, "y")
maxiteration <- 10
results <- data.frame(Iteration = numeric(maxiteration), xDelta = numeric(maxiteration),
yDelta = numeric(maxiteration), NewX = numeric(maxiteration), NewY = numeric(maxiteration))
FunctionList <- list(x=StartX, y=StartY)
DeltaX <- 0
DeltaY <- 0
for(i in 1:maxiteration){
f1xE <- eval(f1x, FunctionList)[1]
f1yE <- eval(f1y, FunctionList)[1]
f2xE <- eval(f2x, FunctionList)[1]
f2yE <- eval(f2y, FunctionList)[1]
f1E <- eval(f1, FunctionList)[1]
f2E <- eval(f2, FunctionList)[1]
DeltaX <- ((f1yE*f2E) - (f1E*f2yE))/((f1xE*f2yE)-(f1yE*f2xE))
DeltaY <- ((f1E*f2xE) - (f2E*f1xE))/((f1xE*f2yE)-(f1yE*f2xE))
results[i,] <- list(i, DeltaX, DeltaY, FunctionList[[1]]+DeltaX, FunctionList[[2]]+DeltaY)
FunctionList <- list(x=FunctionList[[1]]+DeltaX, y=FunctionList[[2]]+DeltaY)}
print(results)
|
/Algorithms/LU Decomposition.R
|
permissive
|
nusretipek/NumericalAnalysisR
|
R
| false
| false
| 2,647
|
r
|
library(Matrix)
rm(list = ls())
A <- matrix(c(6,-2, 3, 4, -10, 4, 1, -3, 5), 3, 3, byrow=T)
B <- matrix(c(1,1,1), nrow = 3)
dim(A) <- c(3, 3)
elu <- expand(lu(A))
L <- elu$L
U <- elu$U
P <- elu$P
all.equal(Matrix(A), with(elu, P %*% L %*% U))
(Y <- solve(L, solve(P) %*% B)) # solve LY = inv(P).B instead of LY = PB
(X <- solve(U, Y))
all.equal(X, Matrix(solve(A, B)))
U <- matrix(c(2, 3, -1, 1, 0, 2, 1, 0, 0, 0, -1, 3, 0, 0, 0, 4), 4, 4, byrow=T)
D <- Diagonal(4)
diag(D) <- (diag(U))
UN <- Diagonal(4)
UN <- solve(D, U %*% solve(UN))
UN
f1 <- quote((1+2*y-3*z)/6)
f2 <- quote((2-4*x-4*z)/-10)
f3 <- quote((3-x+3*y)/5)
FunctionList <- list(x=1, y=1, z=1)
f1_a <- eval(f1, FunctionList)[1]
FunctionList <- list(x=f1_a, y=1, z=1)
f1_b <- eval(f2, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=1)
f1_c <- eval(f3, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=f1_c)
print(c(f1_a, f1_b, f1_c))
maxiteration <- 100
results <- data.frame(Iteration = numeric(maxiteration), x = numeric(maxiteration),
y = numeric(maxiteration), z = numeric(maxiteration))
FunctionList <- list(x=1, y=1, z=1)
for(i in 1:maxiteration){
f1_a <- eval(f1, FunctionList)[1]
FunctionList <- list(x=f1_a, y=1, z=1)
f1_b <- eval(f2, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=1)
f1_c <- eval(f3, FunctionList)[1]
FunctionList <- list(x=f1_a, y=f1_b, z=f1_c)
results[i,] <- list(i, f1_a, f1_b, f1_c)}
print(results)
xfunc <- quote(x-sqrt(x^2-1))
Derivative <- D(xfunc, "x")
f1 <- quote(sin(2*x-y)-1.2*x-0.4)
f2 <- quote(0.8*x^2+1.5*y^2-1)
StartX <- 0.4
StartY <- -0.75
maxiteration <- 20
f1x <- D(f1, "x")
f1y<- D(f1, "y")
f2x <- D(f2, "x")
f2y<- D(f2, "y")
maxiteration <- 10
results <- data.frame(Iteration = numeric(maxiteration), xDelta = numeric(maxiteration),
yDelta = numeric(maxiteration), NewX = numeric(maxiteration), NewY = numeric(maxiteration))
FunctionList <- list(x=StartX, y=StartY)
DeltaX <- 0
DeltaY <- 0
for(i in 1:maxiteration){
f1xE <- eval(f1x, FunctionList)[1]
f1yE <- eval(f1y, FunctionList)[1]
f2xE <- eval(f2x, FunctionList)[1]
f2yE <- eval(f2y, FunctionList)[1]
f1E <- eval(f1, FunctionList)[1]
f2E <- eval(f2, FunctionList)[1]
DeltaX <- ((f1yE*f2E) - (f1E*f2yE))/((f1xE*f2yE)-(f1yE*f2xE))
DeltaY <- ((f1E*f2xE) - (f2E*f1xE))/((f1xE*f2yE)-(f1yE*f2xE))
results[i,] <- list(i, DeltaX, DeltaY, FunctionList[[1]]+DeltaX, FunctionList[[2]]+DeltaY)
FunctionList <- list(x=FunctionList[[1]]+DeltaX, y=FunctionList[[2]]+DeltaY)}
print(results)
|
## download and unzip the FNEI data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
"FNEI_data.zip")
unzip("FNEI_Data.zip")
## read in the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## subset the data, so only Baltimore City remains (fips == "24510")
subsetBaltimore <- NEI[NEI$fips == "24510",]
## sum the emissions per year for Baltimore City
baltimoreSum <- aggregate(Emissions ~ year, subsetBaltimore, sum)
## plot the decrease in emissions
png(file = "plot2.png")
plot(baltimoreSum, type = "b", ylab = "Sum of Emissions in tons")
title("Over 43% reduction in Baltimore City Emissions from 1999 - 2008
\n(red best-fit line shows significant overall decrease, despite 2005)", cex.main = 1)
abline(lm(baltimoreSum$Emissions ~ baltimoreSum$year), col="red")
dev.off()
## for reference, here's the 43+ percent change for years 1999 v. 2008:
firstYearEmissions <- baltimoreSum[baltimoreSum$year == 1999, "Emissions"]
lastYearEmissions <- baltimoreSum[baltimoreSum$year == 2008, "Emissions"]
(lastYearEmissions - firstYearEmissions)/firstYearEmissions ## -0.431222
|
/plot2.R
|
no_license
|
DanielFletcher1/ExData2
|
R
| false
| false
| 1,179
|
r
|
## download and unzip the FNEI data
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip",
"FNEI_data.zip")
unzip("FNEI_Data.zip")
## read in the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## subset the data, so only Baltimore City remains (fips == "24510")
subsetBaltimore <- NEI[NEI$fips == "24510",]
## sum the emissions per year for Baltimore City
baltimoreSum <- aggregate(Emissions ~ year, subsetBaltimore, sum)
## plot the decrease in emissions
png(file = "plot2.png")
plot(baltimoreSum, type = "b", ylab = "Sum of Emissions in tons")
title("Over 43% reduction in Baltimore City Emissions from 1999 - 2008
\n(red best-fit line shows significant overall decrease, despite 2005)", cex.main = 1)
abline(lm(baltimoreSum$Emissions ~ baltimoreSum$year), col="red")
dev.off()
## for reference, here's the 43+ percent change for years 1999 v. 2008:
firstYearEmissions <- baltimoreSum[baltimoreSum$year == 1999, "Emissions"]
lastYearEmissions <- baltimoreSum[baltimoreSum$year == 2008, "Emissions"]
(lastYearEmissions - firstYearEmissions)/firstYearEmissions ## -0.431222
|
\name{seqnum}
\alias{seqnum}
\title{Transform into a sequence object with numerical alphabet.}
\description{
The function \code{seqnum} transforms the provided state sequence object into an equivalent sequence object in which the original alphabet is replaced with an alphabet of numbers ranging from \code{0} to \code{(nbstates-1)}.
}
\usage{
seqnum(seqdata, with.missing=FALSE)
}
\arguments{
\item{seqdata}{a state sequence object as defined by the \code{\link{seqdef}} function.}
\item{with.missing}{logical: Should missing elements in the sequences be turned into numerical values as well? The code for missing values in the sequences is retrieved from the \code{'nr'} attribute of \code{seqdata}.}
}
\details{
The first state (for example \code{'A'}) is coded with the value \code{0}, the second state (for example \code{'B'}) is coded with the value \code{1}, etc... The function returns a sequence object containing the original sequences coded with the new numerical alphabet
ranging from \code{0} to \code{(nbstates-1)}}
\seealso{\code{\link{seqdef}}, \code{\link{alphabet} } }
\examples{
data(actcal)
actcal.seq <- seqdef(actcal,13:24)
## The first 10 sequences in the actcal.seq
## sequence object
actcal.seq[1:10,]
alphabet(actcal.seq)
## The first 10 sequences in the actcal.seq
## sequence object with numerical alphabet
seqnum(actcal.seq[1:10,])
## states A,B,C,D are now coded 0,1,2,3
alphabet(seqnum(actcal.seq))
}
\author{Alexis Gabadinho}
\keyword{Data handling}
\keyword{State sequences}
|
/man/seqnum.Rd
|
no_license
|
cran/TraMineR
|
R
| false
| false
| 1,525
|
rd
|
\name{seqnum}
\alias{seqnum}
\title{Transform into a sequence object with numerical alphabet.}
\description{
The function \code{seqnum} transforms the provided state sequence object into an equivalent sequence object in which the original alphabet is replaced with an alphabet of numbers ranging from \code{0} to \code{(nbstates-1)}.
}
\usage{
seqnum(seqdata, with.missing=FALSE)
}
\arguments{
\item{seqdata}{a state sequence object as defined by the \code{\link{seqdef}} function.}
\item{with.missing}{logical: Should missing elements in the sequences be turned into numerical values as well? The code for missing values in the sequences is retrieved from the \code{'nr'} attribute of \code{seqdata}.}
}
\details{
The first state (for example \code{'A'}) is coded with the value \code{0}, the second state (for example \code{'B'}) is coded with the value \code{1}, etc... The function returns a sequence object containing the original sequences coded with the new numerical alphabet
ranging from \code{0} to \code{(nbstates-1)}}
\seealso{\code{\link{seqdef}}, \code{\link{alphabet} } }
\examples{
data(actcal)
actcal.seq <- seqdef(actcal,13:24)
## The first 10 sequences in the actcal.seq
## sequence object
actcal.seq[1:10,]
alphabet(actcal.seq)
## The first 10 sequences in the actcal.seq
## sequence object with numerical alphabet
seqnum(actcal.seq[1:10,])
## states A,B,C,D are now coded 0,1,2,3
alphabet(seqnum(actcal.seq))
}
\author{Alexis Gabadinho}
\keyword{Data handling}
\keyword{State sequences}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VehicleSales.R
\docType{data}
\name{VehicleSales}
\alias{VehicleSales}
\title{Total Vehicle Sales in USA}
\format{
360 * 2 dataframe...
\describe{
\item{column1}{DATE}
\item{column2}{Total Sales}
}
}
\usage{
VehicleSales
}
\description{
Total Vehicle Sales in USA
}
\keyword{datasets}
|
/man/VehicleSales.Rd
|
permissive
|
DaZheng123/DataProject
|
R
| false
| true
| 364
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VehicleSales.R
\docType{data}
\name{VehicleSales}
\alias{VehicleSales}
\title{Total Vehicle Sales in USA}
\format{
360 * 2 dataframe...
\describe{
\item{column1}{DATE}
\item{column2}{Total Sales}
}
}
\usage{
VehicleSales
}
\description{
Total Vehicle Sales in USA
}
\keyword{datasets}
|
### Series 9, Task 1
### cv function
cv <- function(fitfn, formula = logupo3 ~ . , data = d.ozone.es, ..., trace = TRUE)
{
modFrame <- model.frame(formula, data = data)
nc <- nrow(data)
ssr <- 0
if(trace) cat(" j = ")
for(j in 1:nc) {
if(trace) cat(if(j %% (nc %/% 10) == 1) paste(j, "") else ".")
## Fit without 'j' :
fit <- fitfn(formula=formula, data = data[-j ,], ...)
## Evaluate at 'j' :
ssr <- ssr + (model.response(modFrame)[j] - predict(fit, modFrame[j,]))^2
}
if(trace) cat("\n")
ssr
}
## Read in dataset
ozone <- read.table("http://stat.ethz.ch/Teaching/Datasets/ozone.dat",
header = TRUE)
###################################################
### TASK a)
###################################################
d.ozone <- subset(transform(ozone, logupo3 = log(upo3)), select = -upo3)
d.ozone.e <- d.ozone[-which.max(d.ozone[,"wdsp"]),]
d.ozone.es <- d.ozone.e
d.ozone.es[,-10] <- scale(d.ozone.e[,-10])
###################################################
### TASK b)
###################################################
form <- logupo3~.
earth.mod <- earth(form, data = d.ozone.es, degree = 2)
plotmo(earth.mod, degree2 = FALSE)
plotmo(earth.mod, degree1 = FALSE)
###################################################
### TASK c)
###################################################
ppr.mod <- ppr(form, data = d.ozone.es, nterms = 4)
par(mfrow=c(1,1))
plot(ppr.mod)
###################################################
### TASK d)
###################################################
smoothers <- c("supsmu", "spline", "gcvspline")
num.ridges <- c(3,4,5)
deg.freed <- c(5,6,7)
models <- c()
cv.score <- c()
### for each set of parameters calculate cv score
### for each smoother method
for (sm in smoothers) {
### for each number of ridge functions
for (nr in num.ridges) {
### if sm is spline try different DFs
if (sm == "spline") {
for (df in deg.freed) {
cat("Smoother method: ", sm, ", number of ridge terms = ", nr,
", euqivalent degrees of freedom = ", df, ".\n")
model <- ppr(form, data = d.ozone.es, sm.method = sm, nterms = nr, df = df)
models <- cbind(models, model)
score <- cv(ppr, sm.method = sm, nterms = nr, df = df)
cv.score <- cbind(cv.score, score)
cat("Score = ", score, "\n")
}
}
else {
cat("Smoother method: ", sm,", number of ridge terms = ", nr, ".\n")
model <- ppr(form, data = d.ozone.es, sm.method = sm, nterms = nr)
models <- cbind(models, model)
score <- cv(ppr, sm.method = sm, nterms = nr)
cv.score <- cbind(cv.score, score)
cat("Score = ", score, "\n")
}
}
}
###################################################
### TASK e)
###################################################
deg <- c(1,2,3,4)
for (d in deg) {
cat("Degree = ", d,".\n")
score <- cv(earth, degree = d)
cat("Score = ", score, ".\n")
}
require(mgcv)
gamForm <- wrapFormula(logupo3~.,d.ozone.e, wrapString="s(*)")
gam.cv <- cv(gam)
###################################################
### TASK f)
###################################################
best <- ppr(form, data = d.ozone.es, sm.method = "spline", nterms = 5, df = 6)
best.cv <- cv(ppr, sm.method = "spline", nterms = 5, df = 6)
plotmo(best)
|
/Week 9/hmw9.R
|
no_license
|
porvaznikmichal/ETH-Computational-Statistics
|
R
| false
| false
| 3,298
|
r
|
### Series 9, Task 1
### cv function
cv <- function(fitfn, formula = logupo3 ~ . , data = d.ozone.es, ..., trace = TRUE)
{
modFrame <- model.frame(formula, data = data)
nc <- nrow(data)
ssr <- 0
if(trace) cat(" j = ")
for(j in 1:nc) {
if(trace) cat(if(j %% (nc %/% 10) == 1) paste(j, "") else ".")
## Fit without 'j' :
fit <- fitfn(formula=formula, data = data[-j ,], ...)
## Evaluate at 'j' :
ssr <- ssr + (model.response(modFrame)[j] - predict(fit, modFrame[j,]))^2
}
if(trace) cat("\n")
ssr
}
## Read in dataset
ozone <- read.table("http://stat.ethz.ch/Teaching/Datasets/ozone.dat",
header = TRUE)
###################################################
### TASK a)
###################################################
d.ozone <- subset(transform(ozone, logupo3 = log(upo3)), select = -upo3)
d.ozone.e <- d.ozone[-which.max(d.ozone[,"wdsp"]),]
d.ozone.es <- d.ozone.e
d.ozone.es[,-10] <- scale(d.ozone.e[,-10])
###################################################
### TASK b)
###################################################
form <- logupo3~.
earth.mod <- earth(form, data = d.ozone.es, degree = 2)
plotmo(earth.mod, degree2 = FALSE)
plotmo(earth.mod, degree1 = FALSE)
###################################################
### TASK c)
###################################################
ppr.mod <- ppr(form, data = d.ozone.es, nterms = 4)
par(mfrow=c(1,1))
plot(ppr.mod)
###################################################
### TASK d)
###################################################
smoothers <- c("supsmu", "spline", "gcvspline")
num.ridges <- c(3,4,5)
deg.freed <- c(5,6,7)
models <- c()
cv.score <- c()
### for each set of parameters calculate cv score
### for each smoother method
for (sm in smoothers) {
### for each number of ridge functions
for (nr in num.ridges) {
### if sm is spline try different DFs
if (sm == "spline") {
for (df in deg.freed) {
cat("Smoother method: ", sm, ", number of ridge terms = ", nr,
", euqivalent degrees of freedom = ", df, ".\n")
model <- ppr(form, data = d.ozone.es, sm.method = sm, nterms = nr, df = df)
models <- cbind(models, model)
score <- cv(ppr, sm.method = sm, nterms = nr, df = df)
cv.score <- cbind(cv.score, score)
cat("Score = ", score, "\n")
}
}
else {
cat("Smoother method: ", sm,", number of ridge terms = ", nr, ".\n")
model <- ppr(form, data = d.ozone.es, sm.method = sm, nterms = nr)
models <- cbind(models, model)
score <- cv(ppr, sm.method = sm, nterms = nr)
cv.score <- cbind(cv.score, score)
cat("Score = ", score, "\n")
}
}
}
###################################################
### TASK e)
###################################################
deg <- c(1,2,3,4)
for (d in deg) {
cat("Degree = ", d,".\n")
score <- cv(earth, degree = d)
cat("Score = ", score, ".\n")
}
require(mgcv)
gamForm <- wrapFormula(logupo3~.,d.ozone.e, wrapString="s(*)")
gam.cv <- cv(gam)
###################################################
### TASK f)
###################################################
best <- ppr(form, data = d.ozone.es, sm.method = "spline", nterms = 5, df = 6)
best.cv <- cv(ppr, sm.method = "spline", nterms = 5, df = 6)
plotmo(best)
|
\name{data_symbolic_interval_polish_voivodships}
\alias{data_symbolic_interval_polish_voivodships}
\docType{data}
\title{The evaluation of Polish voivodships tourism attractiveness level}
\description{The empirical study uses the statistical data referring to the attractiveness level of 18 objects (16 Polish NUTS-2 regions - voivodships, pattern and anti-pattern object).
Two-stage data collection was performed. Firstly, data on tourist attractiveness were collected for 380 counties using 9 classic metric variables (measured on a ratio scale):
x1 - beds in hotels per 1000 inhabitants of a county,
x2 - number of nights spent daily by resident tourists per 1000 inhabitants of a county,
x3 - number of nights spent daily by foreign tourists per 1000 inhabitants of a county,
x4 - dust pollution emission in tons per 10 km2 of a county area,
x5 - gas pollution emission in tons per 1 km2 of a county area,
x6 - number of criminal offences, crimes against life and health and property crimes per 1000 inhabitants of a county,
x7 - forest cover of the county in %,
x8 - participants of mass events per 1000 inhabitants of a county,
x9 - number of tourist economy entities (sections: I, N79) registered in the system REGON per 1000 inhabitants of a county.
The three variables (x4, x5 and x6) are destimulants. Other variables are stimulants.
In the second step, the data were aggregated to the level of the voivodships (NUTS-2), giving the symbolic interval-valued data. The lower bound of the interval for each symbolic interval-valued variable in the voivodship was obtained by calculating the first quartile on the basis of data from counties. The upper bound of the interval was obtained by calculating the third quartile.
}
\format{Tree-dimansional array: 18 objects (16 Polish NUTS-2 regions - voivodships, pattern and anti-pattern object), 9 symbolic interval-valued variables with lower and upper values of interval in third dimension. The coordinates of an pattern object cover the most preferred preference variable values.
The coordinates of an anti-pattern object cover the least preferred preference variable values.}
\source{
The statistical data were collected in 2016 and come from the Local Data Bank of the Central Statistical Office of Poland.
}
\examples{
library(clusterSim)
data(data_symbolic_interval_polish_voivodships)
print(data_symbolic_interval_polish_voivodships)
}
\keyword{data set}
|
/man/data_symbolic_interval_polish_voivodships.rd
|
no_license
|
cran/clusterSim
|
R
| false
| false
| 2,471
|
rd
|
\name{data_symbolic_interval_polish_voivodships}
\alias{data_symbolic_interval_polish_voivodships}
\docType{data}
\title{The evaluation of Polish voivodships tourism attractiveness level}
\description{The empirical study uses the statistical data referring to the attractiveness level of 18 objects (16 Polish NUTS-2 regions - voivodships, pattern and anti-pattern object).
Two-stage data collection was performed. Firstly, data on tourist attractiveness were collected for 380 counties using 9 classic metric variables (measured on a ratio scale):
x1 - beds in hotels per 1000 inhabitants of a county,
x2 - number of nights spent daily by resident tourists per 1000 inhabitants of a county,
x3 - number of nights spent daily by foreign tourists per 1000 inhabitants of a county,
x4 - dust pollution emission in tons per 10 km2 of a county area,
x5 - gas pollution emission in tons per 1 km2 of a county area,
x6 - number of criminal offences, crimes against life and health and property crimes per 1000 inhabitants of a county,
x7 - forest cover of the county in %,
x8 - participants of mass events per 1000 inhabitants of a county,
x9 - number of tourist economy entities (sections: I, N79) registered in the system REGON per 1000 inhabitants of a county.
The three variables (x4, x5 and x6) are destimulants. Other variables are stimulants.
In the second step, the data were aggregated to the level of the voivodships (NUTS-2), giving the symbolic interval-valued data. The lower bound of the interval for each symbolic interval-valued variable in the voivodship was obtained by calculating the first quartile on the basis of data from counties. The upper bound of the interval was obtained by calculating the third quartile.
}
\format{Tree-dimansional array: 18 objects (16 Polish NUTS-2 regions - voivodships, pattern and anti-pattern object), 9 symbolic interval-valued variables with lower and upper values of interval in third dimension. The coordinates of an pattern object cover the most preferred preference variable values.
The coordinates of an anti-pattern object cover the least preferred preference variable values.}
\source{
The statistical data were collected in 2016 and come from the Local Data Bank of the Central Statistical Office of Poland.
}
\examples{
library(clusterSim)
data(data_symbolic_interval_polish_voivodships)
print(data_symbolic_interval_polish_voivodships)
}
\keyword{data set}
|
#' @export
makeRLearner.regr.rpart = function() {
makeRLearnerRegr(
cl = "regr.rpart",
package = "rpart",
par.set = makeParamSet(
makeIntegerLearnerParam(id = "minsplit", default = 20L, lower = 1L),
makeIntegerLearnerParam(id = "minbucket", lower = 1L),
makeNumericLearnerParam(id = "cp", default = 0.01, lower = 0, upper = 1),
makeIntegerLearnerParam(id = "maxcompete", default = 4L, lower = 0L),
makeIntegerLearnerParam(id = "maxsurrogate", default = 5L, lower = 0L),
makeDiscreteLearnerParam(id = "usesurrogate", default = 2L, values = 0:2),
makeDiscreteLearnerParam(id = "surrogatestyle", default = 0L, values = 0:1),
# we use 30 as upper limit, see docs of rpart.control
makeIntegerLearnerParam(id = "maxdepth", default = 30L, lower = 1L, upper = 30L),
makeIntegerLearnerParam(id = "xval", default = 10L, lower = 0L)
),
par.vals = list(xval = 0L),
properties = c("missings", "numerics", "factors", "ordered", "weights"),
name = "Decision Tree",
short.name = "rpart",
note = "`xval` has been set to 0 by default for speed."
)
}
#' @export
trainLearner.regr.rpart = function(.learner, .task, .subset, .weights = NULL, ...) {
d = getTaskData(.task, .subset)
if (is.null(.weights)) {
f = getTaskFormula(.task)
rpart::rpart(f, data = d, ...)
} else {
f = as.formula(getTaskFormulaAsString(.task))
rpart::rpart(f, data = d, weights = .weights, ...)
}
}
#' @export
predictLearner.regr.rpart = function(.learner, .model, .newdata, ...) {
predict(.model$learner.model, newdata = .newdata, ...)
}
|
/R/RLearner_regr_rpart.R
|
no_license
|
dickoa/mlr
|
R
| false
| false
| 1,622
|
r
|
#' @export
makeRLearner.regr.rpart = function() {
makeRLearnerRegr(
cl = "regr.rpart",
package = "rpart",
par.set = makeParamSet(
makeIntegerLearnerParam(id = "minsplit", default = 20L, lower = 1L),
makeIntegerLearnerParam(id = "minbucket", lower = 1L),
makeNumericLearnerParam(id = "cp", default = 0.01, lower = 0, upper = 1),
makeIntegerLearnerParam(id = "maxcompete", default = 4L, lower = 0L),
makeIntegerLearnerParam(id = "maxsurrogate", default = 5L, lower = 0L),
makeDiscreteLearnerParam(id = "usesurrogate", default = 2L, values = 0:2),
makeDiscreteLearnerParam(id = "surrogatestyle", default = 0L, values = 0:1),
# we use 30 as upper limit, see docs of rpart.control
makeIntegerLearnerParam(id = "maxdepth", default = 30L, lower = 1L, upper = 30L),
makeIntegerLearnerParam(id = "xval", default = 10L, lower = 0L)
),
par.vals = list(xval = 0L),
properties = c("missings", "numerics", "factors", "ordered", "weights"),
name = "Decision Tree",
short.name = "rpart",
note = "`xval` has been set to 0 by default for speed."
)
}
#' @export
trainLearner.regr.rpart = function(.learner, .task, .subset, .weights = NULL, ...) {
d = getTaskData(.task, .subset)
if (is.null(.weights)) {
f = getTaskFormula(.task)
rpart::rpart(f, data = d, ...)
} else {
f = as.formula(getTaskFormulaAsString(.task))
rpart::rpart(f, data = d, weights = .weights, ...)
}
}
#' @export
predictLearner.regr.rpart = function(.learner, .model, .newdata, ...) {
predict(.model$learner.model, newdata = .newdata, ...)
}
|
\name{removeDuplicatedRulesVR}
\alias{removeDuplicatedRulesVR}
\title{REMOVE DUPLICATED RULES FROM VALUE REDUCT}
\description{It returns a new Value Reduct object without duplicated rules.}
\usage{
removeDuplicatedRulesVR(object)
}
\arguments{
\item{object}{A Value Reduct object}
}
\value{It returns a Value Reduct object without duplicated rules.}
\references{Pawlak, Zdzislaw 1991 \emph{Rough Sets: Theoretical Aspects of Reasoning About Data} Dordrecht: Kluwer Academic Publishing.}
\author{Alber Sanchez \email{alber.sanchez@uni-muenster.de}}
\seealso{
\code{\link{ValueReduct-class}}
}
\examples{
exampleMatrix2 <- matrix(c(1,1,0,1,1,2,2,0,0,0,1,1,1,2,0,0,0,
0,0,0,2,1,0,0,1,2,2,2,1,1,0,0,2,2,2),ncol = 5)
dt <- new(Class="DecisionTable",decisionTable = exampleMatrix2)
dtUnique <- removeDuplicatedRulesDT(dt)
cr <- new(Class="ConditionReduct",decisionTable = dtUnique,columnIds=c(1,2,4,5))
cr <- removeDuplicatedRulesCR(cr)
vr <- computeValueReduct(cr)
vr <- removeDuplicatedRulesVR(vr)
}
\keyword{logic}
\keyword{rough}
\keyword{set}
|
/man/removeDuplicatedRulesVR.Rd
|
no_license
|
albhasan/RoughSetKnowledgeReduction
|
R
| false
| false
| 1,046
|
rd
|
\name{removeDuplicatedRulesVR}
\alias{removeDuplicatedRulesVR}
\title{REMOVE DUPLICATED RULES FROM VALUE REDUCT}
\description{It returns a new Value Reduct object without duplicated rules.}
\usage{
removeDuplicatedRulesVR(object)
}
\arguments{
\item{object}{A Value Reduct object}
}
\value{It returns a Value Reduct object without duplicated rules.}
\references{Pawlak, Zdzislaw 1991 \emph{Rough Sets: Theoretical Aspects of Reasoning About Data} Dordrecht: Kluwer Academic Publishing.}
\author{Alber Sanchez \email{alber.sanchez@uni-muenster.de}}
\seealso{
\code{\link{ValueReduct-class}}
}
\examples{
exampleMatrix2 <- matrix(c(1,1,0,1,1,2,2,0,0,0,1,1,1,2,0,0,0,
0,0,0,2,1,0,0,1,2,2,2,1,1,0,0,2,2,2),ncol = 5)
dt <- new(Class="DecisionTable",decisionTable = exampleMatrix2)
dtUnique <- removeDuplicatedRulesDT(dt)
cr <- new(Class="ConditionReduct",decisionTable = dtUnique,columnIds=c(1,2,4,5))
cr <- removeDuplicatedRulesCR(cr)
vr <- computeValueReduct(cr)
vr <- removeDuplicatedRulesVR(vr)
}
\keyword{logic}
\keyword{rough}
\keyword{set}
|
conf_hypergeo_U <-
function (a, b, z, ip = 0)
{
if (!is.complex(z))
z = complex(real = z, imaginary = 0 * z)
if (!is.complex(a))
a = complex(real = a, imaginary = 0)
if (!is.complex(b))
b = complex(real = b, imaginary = 0)
ans =
kummerM(z, a = a, b = b)/
(cgamma(1 + a - b) * cgamma(1 - b)) +
(z^(1 - b)) *
kummerM(z, a = (1 + a - b), b = 2 - b)/
(cgamma(a) * cgamma(b-1))
ans
}
|
/Archive/func/sampler/MFVI/util.R
|
no_license
|
jereliu/BELA
|
R
| false
| false
| 460
|
r
|
conf_hypergeo_U <-
function (a, b, z, ip = 0)
{
if (!is.complex(z))
z = complex(real = z, imaginary = 0 * z)
if (!is.complex(a))
a = complex(real = a, imaginary = 0)
if (!is.complex(b))
b = complex(real = b, imaginary = 0)
ans =
kummerM(z, a = a, b = b)/
(cgamma(1 + a - b) * cgamma(1 - b)) +
(z^(1 - b)) *
kummerM(z, a = (1 + a - b), b = 2 - b)/
(cgamma(a) * cgamma(b-1))
ans
}
|
setwd('/Users/yoshimori/Desktop/Magisterka/New/CloudFunctionOptimizer/plots')
library('ggplot2')
types = read.table("./data/all_results.csv",header = TRUE)
titles = types[seq(1,21,3),1]
#titles = c("256", "512", "1024", "1536", "2048", "2560", "3008")
limit_time <- types[seq(1,21,3),2]
limit_price <- types[seq(1,21,3),3]
real_time <- types[seq(2,21,3),2]
real_price <- types[seq(2,21,3),3]
sdbws_time <- types[seq(3,21,3),2]
sdbws_price <- types[seq(3,21,3),3]
x <- data.frame("Real" = real_time, "Limit" = limit_time, "SDBWS" = sdbws_time)
str(x)
matrix = t(data.matrix(x))
colnames(matrix) <- titles
barplot(matrix,
main = "Execution time",
xlab = "Functions",
col = c("blue","red", "green"),
beside = TRUE)
leg = legend("topright",
c("Real","Limit", "SDBWS"),
fill = c("blue","red", "green"))
|
/plots/execution_time_comparison.R
|
no_license
|
PawelBanach/CloudFunctionOptimizer
|
R
| false
| false
| 861
|
r
|
setwd('/Users/yoshimori/Desktop/Magisterka/New/CloudFunctionOptimizer/plots')
library('ggplot2')
types = read.table("./data/all_results.csv",header = TRUE)
titles = types[seq(1,21,3),1]
#titles = c("256", "512", "1024", "1536", "2048", "2560", "3008")
limit_time <- types[seq(1,21,3),2]
limit_price <- types[seq(1,21,3),3]
real_time <- types[seq(2,21,3),2]
real_price <- types[seq(2,21,3),3]
sdbws_time <- types[seq(3,21,3),2]
sdbws_price <- types[seq(3,21,3),3]
x <- data.frame("Real" = real_time, "Limit" = limit_time, "SDBWS" = sdbws_time)
str(x)
matrix = t(data.matrix(x))
colnames(matrix) <- titles
barplot(matrix,
main = "Execution time",
xlab = "Functions",
col = c("blue","red", "green"),
beside = TRUE)
leg = legend("topright",
c("Real","Limit", "SDBWS"),
fill = c("blue","red", "green"))
|
# on Mi laptop
home.dir <- '/media/lev-genetik/980E73270E72FD96/Liege'
progr.dir <- '/home/lev-genetik/Desktop/Projects/liege/src/Lev'
# on DELL laptop
home.dir <- '/media/lev-genetik/OS/Liege'
progr.dir <- '/home/lev-genetik/CEDAR_mt/'
|
/SetDirectories.R
|
no_license
|
Lev-genetik/CEDAR_mt
|
R
| false
| false
| 237
|
r
|
# on Mi laptop
home.dir <- '/media/lev-genetik/980E73270E72FD96/Liege'
progr.dir <- '/home/lev-genetik/Desktop/Projects/liege/src/Lev'
# on DELL laptop
home.dir <- '/media/lev-genetik/OS/Liege'
progr.dir <- '/home/lev-genetik/CEDAR_mt/'
|
context("Durations")
test_that("new_difftime works as expected",{
x <- as.POSIXct("2008-08-03 13:01:59", tz = "UTC")
y <- difftime(x + 5 + 30*60 + 60*60 + 14*24*60*60, x, tz = "UTC")
attr(y, "tzone") <- NULL
diff <- new_difftime(seconds = 5, minutes = 30, days = 0,
hour = 1, weeks = 2)
expect_that(diff, equals(y))
})
test_that("new_difftime handles vectors",{
x <- as.POSIXct(c("2008-08-03 13:01:59", "2008-08-03 13:01:59"), tz = "UTC")
y <- difftime(x + c(5 + 30*60 + 60*60 + 14*24*60*60,
1 + 3*24*60*60 + 60*60), x, tz = "UTC")
attr(y, "tzone") <- NULL
z <- difftime(x + c(5 + 30*60 + 60*60 + 14*24*60*60, 5 +
30*60 + 60*60 + 14*24*60*60 + 3*24*60*60), x, tz = "UTC")
attr(z, "tzone") <- NULL
expect_that(new_difftime(seconds = c(5, 1), minutes = c(30,
0), days = c(0, 3), hour = c(1,1), weeks = c(2, 0)),
equals(y))
expect_that(new_difftime(seconds = 5, minutes = 30, days =
c(0, 3), hour = 1, weeks = 2), equals(z))
})
test_that("new_duration works as expected",{
dur <- new_duration(seconds = 5, minutes = 30, days = 0,
hour = 1, weeks = 2)
expect_equal(dur@.Data, 1215005)
expect_is(dur, "Duration")
})
test_that("new_duration handles vectors",{
dur1 <- new_duration(seconds = c(5, 1), minutes = c(30, 0),
days = c(0, 3), hour = c(1,1), weeks = c(2, 0))
dur2 <- new_duration(seconds = 5, minutes = 30, days =
c(0, 3), hour = 1, weeks = 2)
expect_equal(dur1@.Data, c(1215005, 262801))
expect_equal(dur2@.Data, c(1215005, 1474205))
expect_is(dur1, "Duration")
expect_is(dur2, "Duration")
})
test_that("as.duration handles vectors",{
expect_that(as.duration(minutes(1:3)), equals(eminutes(1:3)))
})
test_that("as.duration handles periods",{
expect_that(as.duration(seconds(1)), equals(dseconds(1)))
expect_that(as.duration(minutes(2)), equals(dminutes(2)))
expect_that(as.duration(hours(3)), equals(dhours(3)))
expect_that(as.duration(days(4)), equals(ddays(4)))
expect_that(as.duration(weeks(5)), equals(dweeks(5)))
expect_that(as.duration(months(1)), equals(dseconds(60*60*24*365/12)))
expect_that(as.duration(years(1)), equals(dseconds(60*60*24*365)))
expect_that(as.duration(seconds(1) + minutes(4)), equals(dseconds(1) + dminutes(4)))
})
test_that("as.duration handles intervals",{
time1 <- as.POSIXct("2009-01-02 12:24:03", tz = "UTC")
time2 <- as.POSIXct("2010-02-03 14:31:42", tz = "UTC")
dur <- as.duration(interval(time1, time2))
y <- as.numeric(time2 - time1, units = "secs")
expect_equal(dur@.Data, y)
expect_is(dur, "Duration")
})
test_that("as.duration handles difftimes",{
x <- difftime(as.POSIXct("2010-02-03 14:31:42", tz = "UTC"),
as.POSIXct("2009-01-02 12:24:03", tz = "UTC"))
dur <- as.duration(x)
y <- as.numeric(x, units = "secs")
expect_equal(dur@.Data, y)
expect_is(dur, "Duration")
})
test_that("eobjects handle vectors",{
dur <- dseconds(c(1,3,4))
expect_equal(dur@.Data, c(1, 3, 4))
expect_is(dur, "Duration")
})
test_that("is.duration works as expected",{
ct_time <- as.POSIXct("2008-08-03 13:01:59", tz = "UTC")
lt_time <- as.POSIXlt("2009-08-03 13:01:59", tz = "UTC")
expect_that(is.duration(234), is_false())
expect_that(is.duration(ct_time), is_false())
expect_that(is.duration(lt_time), is_false())
expect_that(is.duration(Sys.Date()), is_false())
expect_that(is.duration(minutes(1)), is_false())
expect_that(is.duration(eminutes(1)), is_true())
expect_that(is.duration(new_difftime(1000)), is_false())
expect_that(is.duration(new_interval(lt_time, ct_time)), is_false())
})
test_that("is.duration handle vectors",{
expect_that(is.duration(dminutes(1:3)), is_true())
})
test_that("format.Duration correctly displays intervals of length 0", {
dur <- new_duration(seconds = 5)
expect_output(dur[FALSE], "Duration\\(0)")
})
test_that("format.Duration correctly displays durations with an NA", {
dur <- new_duration(seconds = c(5, NA))
expect_equivalent(format(dur), c("5s", NA))
})
test_that("summary.Duration creates useful summary", {
dur <- dminutes(5)
text <- c(rep("300s (~5 minutes)", 6), 1)
names(text) <- c("Min.", "1st Qu.", "Median", "Mean", "3rd Qu.", "Max.", "NA's")
expect_equal(summary(c(dur, NA)), text)
})
test_that("compute_estimate works with NA values", {
x <- list(
NA,
c(1, NA),
c(100, NA),
c(10000, NA),
c(100000, NA),
c(100000000, NA)
)
expected <- list(
"NA seconds",
c("1 seconds", "NA seconds"),
c("~1.67 minutes", "NA minutes"),
c("~2.78 hours", "NA hours"),
c("~1.16 days", "NA days"),
c("~3.17 years", "NA years")
)
mapply(function(x, expected) {
expect_identical(expected, lubridate:::compute_estimate(x))
},
x, expected)
})
test_that("as.duration handles NA interval objects", {
one_missing_date <- as.POSIXct(NA_real_, origin = origin)
one_missing_interval <- new_interval(one_missing_date,
one_missing_date)
several_missing_dates <- rep(as.POSIXct(NA_real_, origin = origin), 2)
several_missing_intervals <- new_interval(several_missing_dates,
several_missing_dates)
start_missing_intervals <- new_interval(several_missing_dates, origin)
end_missing_intervals <- new_interval(origin, several_missing_dates)
na.dur <- dseconds(NA)
expect_equal(as.duration(one_missing_interval), na.dur)
expect_equal(as.duration(several_missing_intervals), c(na.dur, na.dur))
expect_equal(as.duration(start_missing_intervals), c(na.dur, na.dur))
expect_equal(as.duration(end_missing_intervals), c(na.dur, na.dur))
})
test_that("as.duration handles NA period objects", {
na.dur <- dseconds(NA)
expect_equal(suppressMessages(as.duration(years(NA))), na.dur)
expect_equal(suppressMessages(as.duration(years(c(NA, NA)))), c(na.dur, na.dur))
expect_equal(suppressMessages(as.duration(years(c(1, NA)))), c(dyears(1), na.dur))
})
test_that("as.duration handles NA objects", {
na.dur <- dseconds(NA)
expect_equal(as.duration(NA), na.dur)
})
|
/R/win-library/3.1/lubridate/tests/test-durations.R
|
no_license
|
apexdevelop/working_dir
|
R
| false
| false
| 6,121
|
r
|
context("Durations")
test_that("new_difftime works as expected",{
x <- as.POSIXct("2008-08-03 13:01:59", tz = "UTC")
y <- difftime(x + 5 + 30*60 + 60*60 + 14*24*60*60, x, tz = "UTC")
attr(y, "tzone") <- NULL
diff <- new_difftime(seconds = 5, minutes = 30, days = 0,
hour = 1, weeks = 2)
expect_that(diff, equals(y))
})
test_that("new_difftime handles vectors",{
x <- as.POSIXct(c("2008-08-03 13:01:59", "2008-08-03 13:01:59"), tz = "UTC")
y <- difftime(x + c(5 + 30*60 + 60*60 + 14*24*60*60,
1 + 3*24*60*60 + 60*60), x, tz = "UTC")
attr(y, "tzone") <- NULL
z <- difftime(x + c(5 + 30*60 + 60*60 + 14*24*60*60, 5 +
30*60 + 60*60 + 14*24*60*60 + 3*24*60*60), x, tz = "UTC")
attr(z, "tzone") <- NULL
expect_that(new_difftime(seconds = c(5, 1), minutes = c(30,
0), days = c(0, 3), hour = c(1,1), weeks = c(2, 0)),
equals(y))
expect_that(new_difftime(seconds = 5, minutes = 30, days =
c(0, 3), hour = 1, weeks = 2), equals(z))
})
test_that("new_duration works as expected",{
dur <- new_duration(seconds = 5, minutes = 30, days = 0,
hour = 1, weeks = 2)
expect_equal(dur@.Data, 1215005)
expect_is(dur, "Duration")
})
test_that("new_duration handles vectors",{
dur1 <- new_duration(seconds = c(5, 1), minutes = c(30, 0),
days = c(0, 3), hour = c(1,1), weeks = c(2, 0))
dur2 <- new_duration(seconds = 5, minutes = 30, days =
c(0, 3), hour = 1, weeks = 2)
expect_equal(dur1@.Data, c(1215005, 262801))
expect_equal(dur2@.Data, c(1215005, 1474205))
expect_is(dur1, "Duration")
expect_is(dur2, "Duration")
})
test_that("as.duration handles vectors",{
expect_that(as.duration(minutes(1:3)), equals(eminutes(1:3)))
})
test_that("as.duration handles periods",{
expect_that(as.duration(seconds(1)), equals(dseconds(1)))
expect_that(as.duration(minutes(2)), equals(dminutes(2)))
expect_that(as.duration(hours(3)), equals(dhours(3)))
expect_that(as.duration(days(4)), equals(ddays(4)))
expect_that(as.duration(weeks(5)), equals(dweeks(5)))
expect_that(as.duration(months(1)), equals(dseconds(60*60*24*365/12)))
expect_that(as.duration(years(1)), equals(dseconds(60*60*24*365)))
expect_that(as.duration(seconds(1) + minutes(4)), equals(dseconds(1) + dminutes(4)))
})
test_that("as.duration handles intervals",{
time1 <- as.POSIXct("2009-01-02 12:24:03", tz = "UTC")
time2 <- as.POSIXct("2010-02-03 14:31:42", tz = "UTC")
dur <- as.duration(interval(time1, time2))
y <- as.numeric(time2 - time1, units = "secs")
expect_equal(dur@.Data, y)
expect_is(dur, "Duration")
})
test_that("as.duration handles difftimes",{
x <- difftime(as.POSIXct("2010-02-03 14:31:42", tz = "UTC"),
as.POSIXct("2009-01-02 12:24:03", tz = "UTC"))
dur <- as.duration(x)
y <- as.numeric(x, units = "secs")
expect_equal(dur@.Data, y)
expect_is(dur, "Duration")
})
test_that("eobjects handle vectors",{
dur <- dseconds(c(1,3,4))
expect_equal(dur@.Data, c(1, 3, 4))
expect_is(dur, "Duration")
})
test_that("is.duration works as expected",{
ct_time <- as.POSIXct("2008-08-03 13:01:59", tz = "UTC")
lt_time <- as.POSIXlt("2009-08-03 13:01:59", tz = "UTC")
expect_that(is.duration(234), is_false())
expect_that(is.duration(ct_time), is_false())
expect_that(is.duration(lt_time), is_false())
expect_that(is.duration(Sys.Date()), is_false())
expect_that(is.duration(minutes(1)), is_false())
expect_that(is.duration(eminutes(1)), is_true())
expect_that(is.duration(new_difftime(1000)), is_false())
expect_that(is.duration(new_interval(lt_time, ct_time)), is_false())
})
test_that("is.duration handle vectors",{
expect_that(is.duration(dminutes(1:3)), is_true())
})
test_that("format.Duration correctly displays intervals of length 0", {
dur <- new_duration(seconds = 5)
expect_output(dur[FALSE], "Duration\\(0)")
})
test_that("format.Duration correctly displays durations with an NA", {
dur <- new_duration(seconds = c(5, NA))
expect_equivalent(format(dur), c("5s", NA))
})
test_that("summary.Duration creates useful summary", {
dur <- dminutes(5)
text <- c(rep("300s (~5 minutes)", 6), 1)
names(text) <- c("Min.", "1st Qu.", "Median", "Mean", "3rd Qu.", "Max.", "NA's")
expect_equal(summary(c(dur, NA)), text)
})
test_that("compute_estimate works with NA values", {
x <- list(
NA,
c(1, NA),
c(100, NA),
c(10000, NA),
c(100000, NA),
c(100000000, NA)
)
expected <- list(
"NA seconds",
c("1 seconds", "NA seconds"),
c("~1.67 minutes", "NA minutes"),
c("~2.78 hours", "NA hours"),
c("~1.16 days", "NA days"),
c("~3.17 years", "NA years")
)
mapply(function(x, expected) {
expect_identical(expected, lubridate:::compute_estimate(x))
},
x, expected)
})
test_that("as.duration handles NA interval objects", {
one_missing_date <- as.POSIXct(NA_real_, origin = origin)
one_missing_interval <- new_interval(one_missing_date,
one_missing_date)
several_missing_dates <- rep(as.POSIXct(NA_real_, origin = origin), 2)
several_missing_intervals <- new_interval(several_missing_dates,
several_missing_dates)
start_missing_intervals <- new_interval(several_missing_dates, origin)
end_missing_intervals <- new_interval(origin, several_missing_dates)
na.dur <- dseconds(NA)
expect_equal(as.duration(one_missing_interval), na.dur)
expect_equal(as.duration(several_missing_intervals), c(na.dur, na.dur))
expect_equal(as.duration(start_missing_intervals), c(na.dur, na.dur))
expect_equal(as.duration(end_missing_intervals), c(na.dur, na.dur))
})
test_that("as.duration handles NA period objects", {
na.dur <- dseconds(NA)
expect_equal(suppressMessages(as.duration(years(NA))), na.dur)
expect_equal(suppressMessages(as.duration(years(c(NA, NA)))), c(na.dur, na.dur))
expect_equal(suppressMessages(as.duration(years(c(1, NA)))), c(dyears(1), na.dur))
})
test_that("as.duration handles NA objects", {
na.dur <- dseconds(NA)
expect_equal(as.duration(NA), na.dur)
})
|
library(testthat)
library(RStoolbox)
Sys.setenv("R_TESTS" = "") ## needed to pass R CMD check: https://github.com/hadley/testthat/issues/144
test_check("RStoolbox")
|
/tests/testthat.R
|
no_license
|
nemochina2008/RStoolbox
|
R
| false
| false
| 171
|
r
|
library(testthat)
library(RStoolbox)
Sys.setenv("R_TESTS" = "") ## needed to pass R CMD check: https://github.com/hadley/testthat/issues/144
test_check("RStoolbox")
|
##
## Fold-changes from U to PI -- checking that all species are on the same scale.
#setwd("/usr/projects/GROseq/NHP/annotations")
load("../annotations/fdr.RData")
## Index extent of changes.
isresp <- fdr_df$U2PIFDR_H < 0.05 & fdr_df$U2PIFDDR_C < 0.05 & fdr_df$U2PIFDR_M < 0.05 ## Clearly responding in all three.
summary(isresp)
pdf("foldchange.correlations.pdf")
boxplot(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp])
library(vioplot)
vioplot(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp])
cor(data.frame(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp]))
## Plots ... this is the supplementary figure.
pairs(data.frame(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp]))
dev.off()
|
/dataSanityChecks/sanityCheckUPIFoldChanges.R
|
no_license
|
Danko-Lab/CD4-Cell-Evolution
|
R
| false
| false
| 807
|
r
|
##
## Fold-changes from U to PI -- checking that all species are on the same scale.
#setwd("/usr/projects/GROseq/NHP/annotations")
load("../annotations/fdr.RData")
## Index extent of changes.
isresp <- fdr_df$U2PIFDR_H < 0.05 & fdr_df$U2PIFDDR_C < 0.05 & fdr_df$U2PIFDR_M < 0.05 ## Clearly responding in all three.
summary(isresp)
pdf("foldchange.correlations.pdf")
boxplot(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp])
library(vioplot)
vioplot(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp])
cor(data.frame(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp]))
## Plots ... this is the supplementary figure.
pairs(data.frame(fdr_df$U2PIFC_H[isresp], fdr_df$U2PIFC_C[isresp], fdr_df$U2PIFC_M[isresp]))
dev.off()
|
#' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...) {
system.file(..., package = "SCORER")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
) {
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
/R/app_config.R
|
permissive
|
verbalins/SCORER
|
R
| false
| false
| 1,103
|
r
|
#' Access files in the current app
#'
#' NOTE: If you manually change your package name in the DESCRIPTION,
#' don't forget to change it here too, and in the config file.
#' For a safer name change mechanism, use the `golem::set_golem_name()` function.
#'
#' @param ... character vectors, specifying subdirectory and file(s)
#' within your package. The default, none, returns the root of the app.
#'
#' @noRd
app_sys <- function(...) {
system.file(..., package = "SCORER")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config GOLEM_CONFIG_ACTIVE value. If unset, R_CONFIG_ACTIVE.
#' If unset, "default".
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv(
"GOLEM_CONFIG_ACTIVE",
Sys.getenv(
"R_CONFIG_ACTIVE",
"default"
)
),
use_parent = TRUE
) {
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/military.R
\name{military}
\alias{military}
\title{Generate Random Vector of Military Branches}
\usage{
military(n, x = c("Army", "Air Force", "Navy", "Marine Corps", "Coast Guard"),
prob = c(0.3785, 0.2334, 0.2218, 0.1366, 0.0296), name = "Military")
}
\arguments{
\item{n}{The number elements to generate. This can be globally set within
the environment of \code{r_data_frame} or \code{r_list}.}
\item{x}{A vector of elements to chose from.}
\item{prob}{A vector of probabilities to chose from.}
\item{name}{The name to assign to the output vector's \code{varname}
attribute. This is used to auto assign names to the column/vector name when
used inside of \code{r_data_frame} or \code{r_list}.}
}
\value{
Returns a random factor vector of military branch elements.
}
\description{
Generate a random vector of military branches.
}
\details{
The military branches and probabilities used match approximate U.S.
military make-up:
\tabular{lrr}{
\bold{ Branch} \tab \bold{N} \tab \bold{Percent} \cr
Army \tab 541,291 \tab 37.9\%\cr
Air Force \tab 333,772 \tab 23.3\%\cr
Navy \tab 317,237 \tab 22.2\%\cr
Marine Corps \tab 195,338 \tab 13.7\%\cr
Coast Guard \tab 42,357 \tab 3.0\%\cr
}
}
\examples{
military(10)
barplot(table(military(10000)))
pie(table(military(10000)))
}
\seealso{
Other variable.functions: \code{\link{age}},
\code{\link{animal}}, \code{\link{answer}},
\code{\link{area}}, \code{\link{car}},
\code{\link{children}}, \code{\link{coin}},
\code{\link{color}}, \code{\link{date_stamp}},
\code{\link{death}}, \code{\link{dice}},
\code{\link{dna}}, \code{\link{dob}},
\code{\link{dummy}}, \code{\link{education}},
\code{\link{employment}}, \code{\link{eye}},
\code{\link{grade_level}}, \code{\link{grade}},
\code{\link{group}}, \code{\link{hair}},
\code{\link{height}}, \code{\link{income}},
\code{\link{internet_browser}}, \code{\link{iq}},
\code{\link{language}}, \code{\link{level}},
\code{\link{likert}}, \code{\link{lorem_ipsum}},
\code{\link{marital}}, \code{\link{month}},
\code{\link{name}}, \code{\link{normal}},
\code{\link{political}}, \code{\link{race}},
\code{\link{religion}}, \code{\link{sat}},
\code{\link{sentence}}, \code{\link{sex_inclusive}},
\code{\link{sex}}, \code{\link{smokes}},
\code{\link{speed}}, \code{\link{state}},
\code{\link{string}}, \code{\link{upper}},
\code{\link{valid}}, \code{\link{year}},
\code{\link{zip_code}}
}
\keyword{army}
\keyword{branch}
\keyword{marines}
\keyword{military}
\keyword{navy}
|
/man/military.Rd
|
no_license
|
AlfonsoRReyes/wakefield
|
R
| false
| true
| 2,616
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/military.R
\name{military}
\alias{military}
\title{Generate Random Vector of Military Branches}
\usage{
military(n, x = c("Army", "Air Force", "Navy", "Marine Corps", "Coast Guard"),
prob = c(0.3785, 0.2334, 0.2218, 0.1366, 0.0296), name = "Military")
}
\arguments{
\item{n}{The number elements to generate. This can be globally set within
the environment of \code{r_data_frame} or \code{r_list}.}
\item{x}{A vector of elements to chose from.}
\item{prob}{A vector of probabilities to chose from.}
\item{name}{The name to assign to the output vector's \code{varname}
attribute. This is used to auto assign names to the column/vector name when
used inside of \code{r_data_frame} or \code{r_list}.}
}
\value{
Returns a random factor vector of military branch elements.
}
\description{
Generate a random vector of military branches.
}
\details{
The military branches and probabilities used match approximate U.S.
military make-up:
\tabular{lrr}{
\bold{ Branch} \tab \bold{N} \tab \bold{Percent} \cr
Army \tab 541,291 \tab 37.9\%\cr
Air Force \tab 333,772 \tab 23.3\%\cr
Navy \tab 317,237 \tab 22.2\%\cr
Marine Corps \tab 195,338 \tab 13.7\%\cr
Coast Guard \tab 42,357 \tab 3.0\%\cr
}
}
\examples{
military(10)
barplot(table(military(10000)))
pie(table(military(10000)))
}
\seealso{
Other variable.functions: \code{\link{age}},
\code{\link{animal}}, \code{\link{answer}},
\code{\link{area}}, \code{\link{car}},
\code{\link{children}}, \code{\link{coin}},
\code{\link{color}}, \code{\link{date_stamp}},
\code{\link{death}}, \code{\link{dice}},
\code{\link{dna}}, \code{\link{dob}},
\code{\link{dummy}}, \code{\link{education}},
\code{\link{employment}}, \code{\link{eye}},
\code{\link{grade_level}}, \code{\link{grade}},
\code{\link{group}}, \code{\link{hair}},
\code{\link{height}}, \code{\link{income}},
\code{\link{internet_browser}}, \code{\link{iq}},
\code{\link{language}}, \code{\link{level}},
\code{\link{likert}}, \code{\link{lorem_ipsum}},
\code{\link{marital}}, \code{\link{month}},
\code{\link{name}}, \code{\link{normal}},
\code{\link{political}}, \code{\link{race}},
\code{\link{religion}}, \code{\link{sat}},
\code{\link{sentence}}, \code{\link{sex_inclusive}},
\code{\link{sex}}, \code{\link{smokes}},
\code{\link{speed}}, \code{\link{state}},
\code{\link{string}}, \code{\link{upper}},
\code{\link{valid}}, \code{\link{year}},
\code{\link{zip_code}}
}
\keyword{army}
\keyword{branch}
\keyword{marines}
\keyword{military}
\keyword{navy}
|
## Put comments here that give an overall description of what your
## functions do
## This creates a "cachedMatrix" which is really a list of functions to get and set a matrix's value and inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve takes a list created by makeCacheMatrix and uses the setinverse function,
## defined above, and R's solve function to cache the matrix's inverse as the variable "m" i
## the makeCacheMatrix list.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
cimar/ProgrammingAssignment2
|
R
| false
| false
| 1,085
|
r
|
## Put comments here that give an overall description of what your
## functions do
## This creates a "cachedMatrix" which is really a list of functions to get and set a matrix's value and inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve takes a list created by makeCacheMatrix and uses the setinverse function,
## defined above, and R's solve function to cache the matrix's inverse as the variable "m" i
## the makeCacheMatrix list.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
#scatter plot matrix
pairs(Cars)
#Correlation matrix
cor(Cars)
#Regression model on all 4 variables& summary
#the linear model of interest
model.car <- lm(MPG~VOL+HP+SP+WT,data=Cars)
summary(model.car)
######## Scatter plot matrix with Correlations inserted in graph
panel.cor <- function(x, y, digits=2, prefix="", cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.4/strwidth(txt)
text(0.5, 0.5, txt, cex = cex)
}
pairs(Cars, upper.panel=panel.cor,main="Scatter Plot Matrix with Correlation Coefficients")
#Regress individually on non-significant variables
model.carV <- lm(MPG~VOL,data = Cars)
summary(model.carV)
model.carW <- lm(MPG~WT,data = Cars)
summary(model.carW)
model.carVW <- lm(MPG~VOL+WT,data = Cars)
summary(model.carVW)
#Check for multicollinearity
library(car)
car::vif(model.car)
#Treating collinearity using backward subset selection methos
library("MASS")
stepAIC(model.car)
plot(model.car) #Modelvalidation plots( Hint enter in console to get all 4 plots)
#After treating collinearity, we drop WT variable & create a new model
model.final <- lm( MPG~VOL+HP+SP+WT,data = Cars)
summary(model.final)
#Diagnostic plots
# Residual Vrs Regressors,QQ plots,Std ResidualVrsFitted
plot(model.car)
residualPlots(model.car)
#Added variable plots
avPlots(model.car,id.n=2,id.cex=0.7)
#QQ plots of studentised residuals
qqPlot(model.car)
#Deletion diagnotics
influenceIndexPlot(model.car) #index plots of the influence measures
Cars_D77 <- Cars[-77,]
|
/cars.R
|
no_license
|
AkshayChopade07/R-Codes
|
R
| false
| false
| 1,730
|
r
|
#scatter plot matrix
pairs(Cars)
#Correlation matrix
cor(Cars)
#Regression model on all 4 variables& summary
#the linear model of interest
model.car <- lm(MPG~VOL+HP+SP+WT,data=Cars)
summary(model.car)
######## Scatter plot matrix with Correlations inserted in graph
panel.cor <- function(x, y, digits=2, prefix="", cex.cor)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.4/strwidth(txt)
text(0.5, 0.5, txt, cex = cex)
}
pairs(Cars, upper.panel=panel.cor,main="Scatter Plot Matrix with Correlation Coefficients")
#Regress individually on non-significant variables
model.carV <- lm(MPG~VOL,data = Cars)
summary(model.carV)
model.carW <- lm(MPG~WT,data = Cars)
summary(model.carW)
model.carVW <- lm(MPG~VOL+WT,data = Cars)
summary(model.carVW)
#Check for multicollinearity
library(car)
car::vif(model.car)
#Treating collinearity using backward subset selection methos
library("MASS")
stepAIC(model.car)
plot(model.car) #Modelvalidation plots( Hint enter in console to get all 4 plots)
#After treating collinearity, we drop WT variable & create a new model
model.final <- lm( MPG~VOL+HP+SP+WT,data = Cars)
summary(model.final)
#Diagnostic plots
# Residual Vrs Regressors,QQ plots,Std ResidualVrsFitted
plot(model.car)
residualPlots(model.car)
#Added variable plots
avPlots(model.car,id.n=2,id.cex=0.7)
#QQ plots of studentised residuals
qqPlot(model.car)
#Deletion diagnotics
influenceIndexPlot(model.car) #index plots of the influence measures
Cars_D77 <- Cars[-77,]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Clustering.R
\name{get_fcs_cell_clustering_vector}
\alias{get_fcs_cell_clustering_vector}
\title{Create the cluster annotation as one vector for cluster info extraction from fcs file}
\usage{
get_fcs_cell_clustering_vector(cell_clustering_list)
}
\arguments{
\item{cell_clustering_list}{list of clusters for ech samples}
}
\value{
}
\description{
Create the cluster annotation as one vector for cluster info extraction from fcs file
}
|
/man/get_fcs_cell_clustering_vector.Rd
|
no_license
|
AlexanderKononov/cytofBrowserLite
|
R
| false
| true
| 514
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Clustering.R
\name{get_fcs_cell_clustering_vector}
\alias{get_fcs_cell_clustering_vector}
\title{Create the cluster annotation as one vector for cluster info extraction from fcs file}
\usage{
get_fcs_cell_clustering_vector(cell_clustering_list)
}
\arguments{
\item{cell_clustering_list}{list of clusters for ech samples}
}
\value{
}
\description{
Create the cluster annotation as one vector for cluster info extraction from fcs file
}
|
#' @title k-Nearest-Neighbor Regression Learner
#'
#' @name mlr_learners_regr.kknn
#'
#' @description
#' k-Nearest-Neighbor regression.
#' Calls [kknn::kknn()] from package \CRANpkg{kknn}.
#'
#' @section Initial parameter values:
#' - `store_model`:
#' - See note.
#'
#' @template note_kknn
#'
#' @templateVar id regr.kknn
#' @template learner
#'
#' @references
#' `r format_bib("hechenbichler_2004", "samworth_2012", "cover_1967")`
#'
#' @export
#' @template seealso_learner
#' @template example
LearnerRegrKKNN = R6Class("LearnerRegrKKNN",
inherit = LearnerRegr,
public = list(
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
ps = ps(
k = p_int(default = 7L, lower = 1L, tags = "train"),
distance = p_dbl(0, default = 2, tags = "train"),
kernel = p_fct(c("rectangular", "triangular", "epanechnikov", "biweight", "triweight", "cos", "inv", "gaussian", "rank", "optimal"), default = "optimal", tags = "train"),
scale = p_lgl(default = TRUE, tags = "train"),
ykernel = p_uty(default = NULL, tags = "train"),
store_model = p_lgl(default = FALSE, tags = "train")
)
ps$values = list(k = 7L)
super$initialize(
id = "regr.kknn",
param_set = ps,
feature_types = c("logical", "integer", "numeric", "factor", "ordered"),
packages = c("mlr3learners", "kknn"),
man = "mlr3learners::mlr_learners_regr.kknn"
)
}
),
private = list(
.train = function(task) {
# https://github.com/mlr-org/mlr3learners/issues/191
pv = self$param_set$get_values(tags = "train")
if (pv$k >= task$nrow) {
stopf("Parameter k = %i must be smaller than the number of observations n = %i",
pv$k, task$nrow)
}
list(
formula = task$formula(),
data = task$data(),
pv = pv,
kknn = NULL
)
},
.predict = function(task) {
model = self$model
newdata = ordered_features(task, self)
pv = insert_named(model$pv, self$param_set$get_values(tags = "predict"))
with_package("kknn", { # https://github.com/KlausVigo/kknn/issues/16
p = invoke(kknn::kknn,
formula = model$formula, train = model$data,
test = newdata, .args = remove_named(pv, "store_model"))
})
if (isTRUE(pv$store_model)) {
self$state$model$kknn = p
}
list(response = p$fitted.values)
}
)
)
#' @include aaa.R
learners[["regr.kknn"]] = LearnerRegrKKNN
|
/R/LearnerRegrKKNN.R
|
no_license
|
cran/mlr3learners
|
R
| false
| false
| 2,583
|
r
|
#' @title k-Nearest-Neighbor Regression Learner
#'
#' @name mlr_learners_regr.kknn
#'
#' @description
#' k-Nearest-Neighbor regression.
#' Calls [kknn::kknn()] from package \CRANpkg{kknn}.
#'
#' @section Initial parameter values:
#' - `store_model`:
#' - See note.
#'
#' @template note_kknn
#'
#' @templateVar id regr.kknn
#' @template learner
#'
#' @references
#' `r format_bib("hechenbichler_2004", "samworth_2012", "cover_1967")`
#'
#' @export
#' @template seealso_learner
#' @template example
LearnerRegrKKNN = R6Class("LearnerRegrKKNN",
inherit = LearnerRegr,
public = list(
#' @description
#' Creates a new instance of this [R6][R6::R6Class] class.
initialize = function() {
ps = ps(
k = p_int(default = 7L, lower = 1L, tags = "train"),
distance = p_dbl(0, default = 2, tags = "train"),
kernel = p_fct(c("rectangular", "triangular", "epanechnikov", "biweight", "triweight", "cos", "inv", "gaussian", "rank", "optimal"), default = "optimal", tags = "train"),
scale = p_lgl(default = TRUE, tags = "train"),
ykernel = p_uty(default = NULL, tags = "train"),
store_model = p_lgl(default = FALSE, tags = "train")
)
ps$values = list(k = 7L)
super$initialize(
id = "regr.kknn",
param_set = ps,
feature_types = c("logical", "integer", "numeric", "factor", "ordered"),
packages = c("mlr3learners", "kknn"),
man = "mlr3learners::mlr_learners_regr.kknn"
)
}
),
private = list(
.train = function(task) {
# https://github.com/mlr-org/mlr3learners/issues/191
pv = self$param_set$get_values(tags = "train")
if (pv$k >= task$nrow) {
stopf("Parameter k = %i must be smaller than the number of observations n = %i",
pv$k, task$nrow)
}
list(
formula = task$formula(),
data = task$data(),
pv = pv,
kknn = NULL
)
},
.predict = function(task) {
model = self$model
newdata = ordered_features(task, self)
pv = insert_named(model$pv, self$param_set$get_values(tags = "predict"))
with_package("kknn", { # https://github.com/KlausVigo/kknn/issues/16
p = invoke(kknn::kknn,
formula = model$formula, train = model$data,
test = newdata, .args = remove_named(pv, "store_model"))
})
if (isTRUE(pv$store_model)) {
self$state$model$kknn = p
}
list(response = p$fitted.values)
}
)
)
#' @include aaa.R
learners[["regr.kknn"]] = LearnerRegrKKNN
|
get_risk_group <- function (){
cwd<-getwd()
group.data<-NULL
file.data<-paste0(localsettings$data.dir,"group.data.csv")
if (file.exists(file.data)){
group.data<-read.csv(file=file.data)
}else{
source("/Users/benjaminsmith/Documents/msm-project/behavioral-analysis/load_real_life_measures_v2.R")
measures.rl = load_real_life_measures_v2()
group.data<-measures.rl[,c("Adjusted_subid","RiskCat","RiskLabel")]
group.data$MethUse<-!grepl("No Meth",as.character(group.data$RiskLabel))
group.data$SexRisk<-grepl("Risky",as.character(group.data$RiskLabel))
write.csv(group.data,file = file.data,row.names = FALSE)
}
setwd(cwd)
return(group.data)
}
|
/get_risk_group.R
|
permissive
|
bjsmith/reversallearning
|
R
| false
| false
| 684
|
r
|
get_risk_group <- function (){
cwd<-getwd()
group.data<-NULL
file.data<-paste0(localsettings$data.dir,"group.data.csv")
if (file.exists(file.data)){
group.data<-read.csv(file=file.data)
}else{
source("/Users/benjaminsmith/Documents/msm-project/behavioral-analysis/load_real_life_measures_v2.R")
measures.rl = load_real_life_measures_v2()
group.data<-measures.rl[,c("Adjusted_subid","RiskCat","RiskLabel")]
group.data$MethUse<-!grepl("No Meth",as.character(group.data$RiskLabel))
group.data$SexRisk<-grepl("Risky",as.character(group.data$RiskLabel))
write.csv(group.data,file = file.data,row.names = FALSE)
}
setwd(cwd)
return(group.data)
}
|
# Accomplish the following three goals:
#
# 1. select() all columns that do NOT contain the word "total",
# since if we have the male and female data, we can always
# recreate the total count in a separate column, if we want it.
# Hint: Use the contains() function, which you'll
# find detailed in 'Special functions' section of ?select.
#
# 2. gather() all columns EXCEPT score_range, using
# key = part_sex and value = count.
#
# 3. separate() part_sex into two separate variables (columns),
# called "part" and "sex", respectively. You may need to check
# the 'Examples' section of ?separate to remember how the 'into'
# argument should be phrased.
#
sat %>%
select(-contains("total")) %>%
gather(part_sex, count, -score_range) %>%
separate(part_sex, c("part", "sex")) %>%
print
|
/mit-ml/stats/14_310x_Intro_to_R/Tidying_Data_with_tidyr/scripts/script8-correct.R
|
permissive
|
stepinski/machinelearning
|
R
| false
| false
| 790
|
r
|
# Accomplish the following three goals:
#
# 1. select() all columns that do NOT contain the word "total",
# since if we have the male and female data, we can always
# recreate the total count in a separate column, if we want it.
# Hint: Use the contains() function, which you'll
# find detailed in 'Special functions' section of ?select.
#
# 2. gather() all columns EXCEPT score_range, using
# key = part_sex and value = count.
#
# 3. separate() part_sex into two separate variables (columns),
# called "part" and "sex", respectively. You may need to check
# the 'Examples' section of ?separate to remember how the 'into'
# argument should be phrased.
#
sat %>%
select(-contains("total")) %>%
gather(part_sex, count, -score_range) %>%
separate(part_sex, c("part", "sex")) %>%
print
|
#this is the simulated version of the data file
datafile0<-'./Data/NCDB.csv'
lookup <- './Data/NCDB_CodeLookup.csv'
if(file.exists('config.R')) source('config.R')
|
/config_public.R
|
no_license
|
frkndrsn/TSCI5050
|
R
| false
| false
| 163
|
r
|
#this is the simulated version of the data file
datafile0<-'./Data/NCDB.csv'
lookup <- './Data/NCDB_CodeLookup.csv'
if(file.exists('config.R')) source('config.R')
|
###############################################################################
# Basic setup
###############################################################################
# Source setup file
source(file.path("/xxx/_setup.R"))
source(file.path("/xxx/funcs.R"))
# Set working directory
study="/xxx/"
setwd(study)
# Load required libraries
pacman::p_load(haven, data.table, dplyr, stringr, striprtf, arsenal)
###############################################################################
# Read in required datasets
###############################################################################
library(foreign)
adsl = data.table(read.xport(file.path(setup_list$analysis, "adsl.xpt")))
adae = data.table(read.xport(file.path(setup_list$analysis, "adae_j.xpt"))
)[SAFFL=="Y" & !is.na(AEDECOD) & TRTEMFL=="Y"]
###############################################################################
# Derivations
###############################################################################
adae2 <- merge(adsl, adae,)
# Required variables
vars = c("USUBJID","AESOC","AEDECOD","AESOCJ","AEDECODJ","TRTEMFL",
"TRT01AN","TRT01A")
grd = copy(adae2[,..vars])
# Function to count per arm and period and transpose the counts
cnt = function(trt,col)
{
# For all columns except total patients
if(col!=2)
{
grd = grd[TRT01AN %in% trt]
grd1 = unique(grd[,.(USUBJID,TRT01AN,TRT01A)])
cnt1 = grd1[, .N,by = c("TRT01AN", "TRT01A")][
,`:=` (AEDECOD = "Number of subjects with at least one event",
AESOC = "AAA",
ORD=1)]
grd2 = unique(grd[,.(USUBJID,TRT01AN,TRT01A,AESOC)])
cnt2 = grd2[, .N,by = c("TRT01AN", "TRT01A", "AESOC")][,ORD:=2]
grd3 = unique(grd[,.(USUBJID,TRT01AN,TRT01A,AESOC,AEDECOD)])
cnt3 = grd3[, .N,by = c("TRT01AN", "TRT01A", "AESOC","AEDECOD")][,ORD:=2]
# If at least 1 AE in any grade
if(nrow(cnt1)>0 | nrow(cnt2)>0)
{
combine = data.table(rbind(cnt1,cnt2,cnt3,fill=TRUE))
combine = combine[,`:=` (SUBORD = ifelse(is.na(AEDECOD),1,2),
AEDECOD = ifelse(is.na(AEDECOD),AESOC,AEDECOD))][order(AESOC)]
trans_dset = dcast(combine,
ORD+SUBORD+AESOC+AEDECOD ~ TRT01AN,
value.var = "N")
colnames(trans_dset) = paste(as.character(col), colnames(trans_dset), sep="_")
} else #If 0 AE in all grades
{
cols = paste(col,c("ORD","SUBORD","AESOC","AEDECOD", trt), sep="_")
trans_dset = data.table(ord = numeric(), subord = numeric(), aesoc = character(), aedecod = character(), t1 = character())
setnames(trans_dset, colnames(trans_dset), cols)
}
} else if(col==2) #For all patients
{
grd = grd[!(TRT01AN==0)]
grd1 = unique(grd[,.(USUBJID)])
cnt1 = data.table(N=grd1[,.N])[,`:=` (AEDECOD = "Number of subjects with at least one event",
AESOC = "AAA")][,ORD:=1]
grd2 = unique(grd[,.(USUBJID,AESOC)])
cnt2 = grd2[,.N, by = c("AESOC")][,ORD:=2]
grd3 = unique(grd[,.(USUBJID,AESOC,AEDECOD)])
cnt3 = grd3[,.N, by = c("AESOC","AEDECOD")][,ORD:=2]
combine = data.table(rbind(cnt1,cnt2,cnt3,fill=TRUE))
combine = combine[,`:=` (SUBORD = ifelse(is.na(AEDECOD),1,2),
AEDECOD = ifelse(is.na(AEDECOD),AESOC,AEDECOD))][order(AESOC)]
trans_dset = combine
colnames(trans_dset) = paste(as.character(col), colnames(trans_dset), sep="_")
}
return(trans_dset)
}
# Overall study period- Act High arm
o_acth = cnt(trt = 81, col = 1)
# Overall study period- Act low arm
o_actl = cnt(trt = 54, col = 1)
# Overall study period- placebo arm
o_plac = cnt(trt = 0, col = 1)
# Overall study period- all patients count
o_all = cnt(trt = c(54,81,0), col = 2)
View(o_acth)
View(o_actl)
View(o_plac)
View(o_all)
# Add by variables and merge datasets
o_act = as.data.table(o_act)
setkeyv(o_acth, c("1_AESOC", "1_AEDECOD", "1_ORD","1_SUBORD"))
setkeyv(o_actl, c("1_AESOC", "1_AEDECOD", "1_ORD","1_SUBORD"))
setkeyv(o_plac, c("1_AESOC", "1_AEDECOD", "1_ORD","1_SUBORD"))
setkeyv(o_all, c("2_AESOC", "2_AEDECOD", "2_ORD","2_SUBORD"))
final = copy(o_acth[o_actl[o_plac[o_all]]])
setnames(final, c("1_AESOC","1_AEDECOD","1_ORD","1_SUBORD"), c("AESOC","AEDECOD","ORD","SUBORD"))
# Calculate big N values
Ntrt = copy(adsl[SAFFL=="Y", .N, by="TRT01AN"])
Ntot = copy(adsl[SAFFL=="Y", .N])
# Calculate percentages
final = final[, `:=` (C1 = percent(`1_81`, Ntrt[TRT01AN==81,N], 1),
C2 = percent(`1_54`, Ntrt[TRT01AN==54,N], 1),
C3 = percent(`1_0`, Ntrt[TRT01AN==0,N], 1),
C4 = percent(`2_N`, Ntot, 1)
)][order(ORD,AESOC,SUBORD)]
View(final)
|
/t_ae.R
|
no_license
|
izumin36/test_rep01
|
R
| false
| false
| 4,889
|
r
|
###############################################################################
# Basic setup
###############################################################################
# Source setup file
source(file.path("/xxx/_setup.R"))
source(file.path("/xxx/funcs.R"))
# Set working directory
study="/xxx/"
setwd(study)
# Load required libraries
pacman::p_load(haven, data.table, dplyr, stringr, striprtf, arsenal)
###############################################################################
# Read in required datasets
###############################################################################
library(foreign)
adsl = data.table(read.xport(file.path(setup_list$analysis, "adsl.xpt")))
adae = data.table(read.xport(file.path(setup_list$analysis, "adae_j.xpt"))
)[SAFFL=="Y" & !is.na(AEDECOD) & TRTEMFL=="Y"]
###############################################################################
# Derivations
###############################################################################
adae2 <- merge(adsl, adae,)
# Required variables
vars = c("USUBJID","AESOC","AEDECOD","AESOCJ","AEDECODJ","TRTEMFL",
"TRT01AN","TRT01A")
grd = copy(adae2[,..vars])
# Function to count per arm and period and transpose the counts
cnt = function(trt,col)
{
# For all columns except total patients
if(col!=2)
{
grd = grd[TRT01AN %in% trt]
grd1 = unique(grd[,.(USUBJID,TRT01AN,TRT01A)])
cnt1 = grd1[, .N,by = c("TRT01AN", "TRT01A")][
,`:=` (AEDECOD = "Number of subjects with at least one event",
AESOC = "AAA",
ORD=1)]
grd2 = unique(grd[,.(USUBJID,TRT01AN,TRT01A,AESOC)])
cnt2 = grd2[, .N,by = c("TRT01AN", "TRT01A", "AESOC")][,ORD:=2]
grd3 = unique(grd[,.(USUBJID,TRT01AN,TRT01A,AESOC,AEDECOD)])
cnt3 = grd3[, .N,by = c("TRT01AN", "TRT01A", "AESOC","AEDECOD")][,ORD:=2]
# If at least 1 AE in any grade
if(nrow(cnt1)>0 | nrow(cnt2)>0)
{
combine = data.table(rbind(cnt1,cnt2,cnt3,fill=TRUE))
combine = combine[,`:=` (SUBORD = ifelse(is.na(AEDECOD),1,2),
AEDECOD = ifelse(is.na(AEDECOD),AESOC,AEDECOD))][order(AESOC)]
trans_dset = dcast(combine,
ORD+SUBORD+AESOC+AEDECOD ~ TRT01AN,
value.var = "N")
colnames(trans_dset) = paste(as.character(col), colnames(trans_dset), sep="_")
} else #If 0 AE in all grades
{
cols = paste(col,c("ORD","SUBORD","AESOC","AEDECOD", trt), sep="_")
trans_dset = data.table(ord = numeric(), subord = numeric(), aesoc = character(), aedecod = character(), t1 = character())
setnames(trans_dset, colnames(trans_dset), cols)
}
} else if(col==2) #For all patients
{
grd = grd[!(TRT01AN==0)]
grd1 = unique(grd[,.(USUBJID)])
cnt1 = data.table(N=grd1[,.N])[,`:=` (AEDECOD = "Number of subjects with at least one event",
AESOC = "AAA")][,ORD:=1]
grd2 = unique(grd[,.(USUBJID,AESOC)])
cnt2 = grd2[,.N, by = c("AESOC")][,ORD:=2]
grd3 = unique(grd[,.(USUBJID,AESOC,AEDECOD)])
cnt3 = grd3[,.N, by = c("AESOC","AEDECOD")][,ORD:=2]
combine = data.table(rbind(cnt1,cnt2,cnt3,fill=TRUE))
combine = combine[,`:=` (SUBORD = ifelse(is.na(AEDECOD),1,2),
AEDECOD = ifelse(is.na(AEDECOD),AESOC,AEDECOD))][order(AESOC)]
trans_dset = combine
colnames(trans_dset) = paste(as.character(col), colnames(trans_dset), sep="_")
}
return(trans_dset)
}
# Overall study period- Act High arm
o_acth = cnt(trt = 81, col = 1)
# Overall study period- Act low arm
o_actl = cnt(trt = 54, col = 1)
# Overall study period- placebo arm
o_plac = cnt(trt = 0, col = 1)
# Overall study period- all patients count
o_all = cnt(trt = c(54,81,0), col = 2)
View(o_acth)
View(o_actl)
View(o_plac)
View(o_all)
# Add by variables and merge datasets
o_act = as.data.table(o_act)
setkeyv(o_acth, c("1_AESOC", "1_AEDECOD", "1_ORD","1_SUBORD"))
setkeyv(o_actl, c("1_AESOC", "1_AEDECOD", "1_ORD","1_SUBORD"))
setkeyv(o_plac, c("1_AESOC", "1_AEDECOD", "1_ORD","1_SUBORD"))
setkeyv(o_all, c("2_AESOC", "2_AEDECOD", "2_ORD","2_SUBORD"))
final = copy(o_acth[o_actl[o_plac[o_all]]])
setnames(final, c("1_AESOC","1_AEDECOD","1_ORD","1_SUBORD"), c("AESOC","AEDECOD","ORD","SUBORD"))
# Calculate big N values
Ntrt = copy(adsl[SAFFL=="Y", .N, by="TRT01AN"])
Ntot = copy(adsl[SAFFL=="Y", .N])
# Calculate percentages
final = final[, `:=` (C1 = percent(`1_81`, Ntrt[TRT01AN==81,N], 1),
C2 = percent(`1_54`, Ntrt[TRT01AN==54,N], 1),
C3 = percent(`1_0`, Ntrt[TRT01AN==0,N], 1),
C4 = percent(`2_N`, Ntot, 1)
)][order(ORD,AESOC,SUBORD)]
View(final)
|
#' Create Load Report
#'
#' Create a 2-page pdf file report of a rating-curve load model. The
#'report contains the text output and 6 diagnostic plots.
#'
#' @param x the load model.
#' @param file the output file base name; the .pdf suffix
#'is appended to make the actual file name. if missing, then the
#'name of \code{x} is used as the base name.
#' @return The actual file name is returned invisibly.
#' @export
loadReport <- function(x, file) {
## Coding history:
## 2013Jul29 DLLorenz Original version from S+ library
##
if(missing(file))
file <- deparse(substitute(x))
retval <- setPDF(basename=file)
plot.new()
## Draw the text
par(mar=c(0,0,0,0), usr=c(0,1,0,1))
txt <- capture.output(x)
text(0, 1, paste(txt, collapse="\n"), family="mono", adj=c(0,1))
## 6 diagnostic plots
AA.lo <- setLayout(num.cols=2L, num.rows=3L)
for(i in seq(6)) {
setGraph(i, AA.lo)
plot(x, which=i, set.up=FALSE)
}
## All done, close the graph
dev.off(retval[[1]])
invisible(paste(retval[[2]], ".pdf", sep=""))
}
|
/R/loadReport.R
|
permissive
|
katakagi/rloadest_test
|
R
| false
| false
| 1,051
|
r
|
#' Create Load Report
#'
#' Create a 2-page pdf file report of a rating-curve load model. The
#'report contains the text output and 6 diagnostic plots.
#'
#' @param x the load model.
#' @param file the output file base name; the .pdf suffix
#'is appended to make the actual file name. if missing, then the
#'name of \code{x} is used as the base name.
#' @return The actual file name is returned invisibly.
#' @export
loadReport <- function(x, file) {
## Coding history:
## 2013Jul29 DLLorenz Original version from S+ library
##
if(missing(file))
file <- deparse(substitute(x))
retval <- setPDF(basename=file)
plot.new()
## Draw the text
par(mar=c(0,0,0,0), usr=c(0,1,0,1))
txt <- capture.output(x)
text(0, 1, paste(txt, collapse="\n"), family="mono", adj=c(0,1))
## 6 diagnostic plots
AA.lo <- setLayout(num.cols=2L, num.rows=3L)
for(i in seq(6)) {
setGraph(i, AA.lo)
plot(x, which=i, set.up=FALSE)
}
## All done, close the graph
dev.off(retval[[1]])
invisible(paste(retval[[2]], ".pdf", sep=""))
}
|
#this is a quick script to clean the names in the appendix from McKechnie & Wolf
library(ape)
tree <- read.tree("bird.tre")
#set strings as factors to false to avoid problems later
traits <- read.csv("McKechnie_Wolf.csv", stringsAsFactors=FALSE)
#what names are in the trait database that aren't in the tree?
examine <- setdiff(traits$species, tree$tip.label)
#send the wrong names out to excel to check
write.csv(examine, "look_here.csv", row.names=FALSE)
#manually cleaned. bring the cleaned back in, make sure to set no factors
use <- read.csv("McKechnie_Wolf_clean1.csv", stringsAsFactors=FALSE)
#subset to instances where the bad name equals the original name, and replace with cleaned
traits$species[traits$species %in% use$original] <- use$cleaned
#now check to see if there are any additional problems
examine <- setdiff(traits$species, tree$tip.label)
write.csv(examine, "look_here.csv", row.names=FALSE)
#bring in the twice cleaned data
use <- read.csv("McKechnie_Wolf_clean2.csv", stringsAsFactors=FALSE)
traits$species[traits$species %in% use$original] <- use$cleaned
#ok everything matches. save this file out
write.csv(traits, "McKechnie_Wolf.csv", row.names=FALSE)
|
/R/script_clean_McWolf_bird.R
|
permissive
|
uyedaj/bmr
|
R
| false
| false
| 1,199
|
r
|
#this is a quick script to clean the names in the appendix from McKechnie & Wolf
library(ape)
tree <- read.tree("bird.tre")
#set strings as factors to false to avoid problems later
traits <- read.csv("McKechnie_Wolf.csv", stringsAsFactors=FALSE)
#what names are in the trait database that aren't in the tree?
examine <- setdiff(traits$species, tree$tip.label)
#send the wrong names out to excel to check
write.csv(examine, "look_here.csv", row.names=FALSE)
#manually cleaned. bring the cleaned back in, make sure to set no factors
use <- read.csv("McKechnie_Wolf_clean1.csv", stringsAsFactors=FALSE)
#subset to instances where the bad name equals the original name, and replace with cleaned
traits$species[traits$species %in% use$original] <- use$cleaned
#now check to see if there are any additional problems
examine <- setdiff(traits$species, tree$tip.label)
write.csv(examine, "look_here.csv", row.names=FALSE)
#bring in the twice cleaned data
use <- read.csv("McKechnie_Wolf_clean2.csv", stringsAsFactors=FALSE)
traits$species[traits$species %in% use$original] <- use$cleaned
#ok everything matches. save this file out
write.csv(traits, "McKechnie_Wolf.csv", row.names=FALSE)
|
#merge power results with the effect size as 0.08 level
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="simu_result",full.names=T)
total <- 0
#n.loop = number of simulation setting* number of sample size setting
n.loop <- 12
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result",i1,".Rdata")
if(file%in%files==T){
load(paste0("./simulation/power/result/simu_result",i1,".Rdata"))
#result.list is a list of pvalue
#three different simulation settings: 1. no heterogneity 2. one tumor heter 3. multiple tumor heterogeneity
#34different sample size were implemented 5000, 25,000 50,000 and 100,000
#
#its the output of a foreach parallele result
#[[1]] and [[2]] share the same structure
#[[1]] [[1]] is the vector of p_value from FTOP
#[[1]] [[1]] is a long vector looped by first simulation setting, then sample size (inner loop), 12 different sections. The third loop is the number of replicates in each section
#[[1]] [[2]] is the vector of p_value from MTOP
#[[1]] [[3]] is the vector of p_value from standard logistoc regression
#[[1]] [[4]] is the vector of p_value from MTOP this is because of a previous typo
#[[1]] [[5]] is the vector of FTOP from complete analysis
#[[1]] [[6]] is the vector of polytomous model from complete analysis
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
#total = total*2
p_global_result <- matrix(0,total,n.loop)
p_mglobal_result <- matrix(0,total,n.loop)
p_standard <- matrix(0,total,n.loop)
p_global_complete <- matrix(0,total,n.loop)
#p_poly <- matrix(0,total,9)
total <- 0
#args 1:2000 contains the simulation results for FTOP, MTOP, standard logistic regressionn, complete FTOP
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result",i1,".Rdata")
if(file%in%files==T){
load(paste0("./simulation/power/result//simu_result",i1,".Rdata"))
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_global_result[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp2),] <- matrix(result.list[[2]][[2]],ncol=n.loop)
p_standard[total+(1:temp2),] <- matrix(result.list[[2]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp2),] <-matrix(result.list[[2]][[5]],ncol=n.loop)
}else if(temp2==0){
p_global_result[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp1),] <- matrix(result.list[[1]][[2]],ncol=n.loop)
p_standard[total+(1:temp1),] <- matrix(result.list[[1]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp1),] <-matrix(result.list[[1]][[5]],ncol=n.loop)
}else{
p_global_result[total+(1:temp),] <-rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
p_mglobal_result[total+(1:temp),] <- rbind(matrix(result.list[[1]][[2]],ncol=n.loop),
matrix(result.list[[2]][[2]],ncol=n.loop))
p_standard[total+(1:temp),] <- rbind(matrix(result.list[[1]][[3]],ncol=n.loop),
matrix(result.list[[2]][[3]],ncol=n.loop))
p_global_complete[total+(1:temp),] <-rbind(matrix(result.list[[1]][[5]],ncol=n.loop),
matrix(result.list[[2]][[5]],ncol=n.loop))
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
CountPower <- function(p,alpha){
n <- length(p)
idx <- which(p<=alpha)
return(length(idx)/n)
}
#load results for polytomous model
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="poly_",full.names=T)
total <- 0
#args 1:2000 contains the results for polytomous
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_",i1,".Rdata")
if(file%in%files==T){
load(file)
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
p_poly <- matrix(0,total,n.loop)
total <- 0
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_",i1,".Rdata")
if(file%in%files==T){
load(paste0(file))
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_poly[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
}else if(temp2==0){
p_poly[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
}else{
p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
# apply(p_global_result,2,function(x){CountPower(x,10^-3)})
# apply(p_mglobal_result,2,function(x){CountPower(x,10^-3)})
# apply(p_standard,2,function(x){CountPower(x,10^-3)})
# apply(p_global_complete,2,function(x){CountPower(x,10^-3)})
# apply(p_poly,2,function(x){CountPower(x,thres)})
thres = 5E-08
#remove standard polytomous function
#unstable outliers
#idx <- which(p_poly[,4]==0)
#p_poly = p_poly[-idx,,drop=F]
result <- cbind(apply(p_global_result,2,function(x){CountPower(x,thres)}),
apply(p_mglobal_result,2,function(x){CountPower(x,thres)}),
apply(p_standard,2,function(x){CountPower(x,thres)}),
apply(p_global_complete,2,function(x){CountPower(x,thres)}),
apply(p_poly,2,function(x){CountPower(x,thres)}))
result.1 <- result
#write.csv(result,file=paste0("./simulation/power/result/power.simulation.result.csv") )
write.csv(result,file=paste0("./simulation/power/result/power.simulation.result.csv") )
#merge power results with effectsize as 0.25 level
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="simu_result_0.25_",full.names=T)
total <- 0
n.loop <- 3
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result_0.25_",i1,".Rdata")
if(file%in%files==T){
load(file)
#result.list is a list of pvalue
#three different simulation settings: 1. no heterogneity 2. one tumor heter 3. multiple tumor heterogeneity
#3 different sample size were implemented 5000, 50,000 and 100,000
#
#its the output of a foreach parallele result
#[[1]] and [[2]] share the same structure
#[[1]] [[1]] is the vector of p_value from FTOP
#[[1]] [[1]] is a long vector looped by first simulation setting, then sample size (inner loop), 9 different sections
#[[1]] [[2]] is the vector of p_value from MTOP
#[[1]] [[3]] is the vector of p_value from standard logistoc regression
#[[1]] [[4]] is the vector of p_value from MTOP this is because of a previous typo
#[[1]] [[5]] is the vector of FTOP from complete analysis
#[[1]] [[6]] is the vector of polytomous model from complete analysis
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
#total = total*2
p_global_result <- matrix(0,total,n.loop)
p_mglobal_result <- matrix(0,total,n.loop)
p_standard <- matrix(0,total,n.loop)
p_global_complete <- matrix(0,total,n.loop)
#p_poly <- matrix(0,total,9)
total <- 0
#args 1:2000 contains the simulation results for FTOP, MTOP, standard logistic regressionn, complete FTOP
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result_0.25_",i1,".Rdata")
if(file%in%files==T){
load(file)
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_global_result[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp2),] <- matrix(result.list[[2]][[2]],ncol=n.loop)
p_standard[total+(1:temp2),] <- matrix(result.list[[2]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp2),] <-matrix(result.list[[2]][[5]],ncol=n.loop)
}else if(temp2==0){
p_global_result[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp1),] <- matrix(result.list[[1]][[2]],ncol=n.loop)
p_standard[total+(1:temp1),] <- matrix(result.list[[1]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp1),] <-matrix(result.list[[1]][[5]],ncol=n.loop)
}else{
p_global_result[total+(1:temp),] <-rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
p_mglobal_result[total+(1:temp),] <- rbind(matrix(result.list[[1]][[2]],ncol=n.loop),
matrix(result.list[[2]][[2]],ncol=n.loop))
p_standard[total+(1:temp),] <- rbind(matrix(result.list[[1]][[3]],ncol=n.loop),
matrix(result.list[[2]][[3]],ncol=n.loop))
p_global_complete[total+(1:temp),] <-rbind(matrix(result.list[[1]][[5]],ncol=n.loop),
matrix(result.list[[2]][[5]],ncol=n.loop))
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
CountPower <- function(p,alpha){
n <- length(p)
idx <- which(p<=alpha)
return(length(idx)/n)
}
#load results for polytomous model
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="poly_0.25_",full.names=T)
total <- 0
#args 1:2000 contains the results for polytomous
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_0.25_",i1,".Rdata")
if(file%in%files==T){
load(file)
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
p_poly <- matrix(0,total,n.loop)
total <- 0
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_0.25_",i1,".Rdata")
if(file%in%files==T){
load(paste0(file))
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_poly[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
}else if(temp2==0){
p_poly[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
}else{
p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
# apply(p_global_result,2,function(x){CountPower(x,10^-3)})
# apply(p_mglobal_result,2,function(x){CountPower(x,10^-3)})
# apply(p_standard,2,function(x){CountPower(x,10^-3)})
# apply(p_global_complete,2,function(x){CountPower(x,10^-3)})
# apply(p_poly,2,function(x){CountPower(x,thres)})
thres = 5E-08
#remove standard polytomous function
#unstable outliers
#idx <- which(p_poly[,4]==0)
#p_poly = p_poly[-idx,,drop=F]
result <- cbind(apply(p_global_result,2,function(x){CountPower(x,thres)}),
apply(p_mglobal_result,2,function(x){CountPower(x,thres)}),
apply(p_standard,2,function(x){CountPower(x,thres)}),
apply(p_global_complete,2,function(x){CountPower(x,thres)}),
apply(p_poly,2,function(x){CountPower(x,thres)}))
write.csv(result,file=paste0("./simulation/power/result/power.simulation.result_0.25.csv") )
# setwd('/data/zhangh24/breast_cancer_data_analysis/')
# filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
# files <- dir(filedir,pattern="simu_result",full.names=T)
# total <- 0
# for(i1 in 6000:7000){
# print(i1)
# file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result",i1,".Rdata")
# if(file%in%files==T){
# load(paste0("./simulation/power/result//simu_result",i1,".Rdata"))
# total = total+ length(result.list[[1]][[1]])/9 + length(result.list[[2]][[1]])/9
#
# }
# }
#
# #total = total*2
#
# p_global_result <- matrix(0,total,9)
# p_mglobal_result <- matrix(0,total,9)
# p_standard <- matrix(0,total,9)
# p_global_complete <- matrix(0,total,9)
# p_poly <- matrix(0,total,9)
#
# total <- 0
#
# for(i1 in 6000:7000){
# print(i1)
# file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/simu_result",i1,".Rdata")
# if(file%in%files==T){
# load(paste0("./simulation/power/result//simu_result",i1,".Rdata"))
# temp1 = length(result.list[[1]][[1]])/9
# temp2 = length(result.list[[2]][[1]])/9
# temp = temp1+temp2
# if(temp1==0){
# p_global_result[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=9)
#
# p_mglobal_result[total+(1:temp2),] <- matrix(result.list[[2]][[2]],ncol=9)
# p_standard[total+(1:temp2),] <- matrix(result.list[[2]][[3]],ncol=9)
#
# p_global_complete[total+(1:temp2),] <-matrix(result.list[[2]][[5]],ncol=9)
# p_poly[total+(1:temp2),] <- matrix(result.list[[2]][[6]],ncol=9)
# }else if(temp2==0){
# p_global_result[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=9)
#
# p_mglobal_result[total+(1:temp1),] <- matrix(result.list[[1]][[2]],ncol=9)
# p_standard[total+(1:temp1),] <- matrix(result.list[[1]][[3]],ncol=9)
#
# p_global_complete[total+(1:temp1),] <-matrix(result.list[[1]][[5]],ncol=9)
# p_poly[total+(1:temp1),] <- matrix(result.list[[1]][[6]],ncol=9)
# }else{
# p_global_result[total+(1:temp),] <-rbind(matrix(result.list[[1]][[1]],ncol=9),
# matrix(result.list[[2]][[1]],ncol=9))
#
# p_mglobal_result[total+(1:temp),] <- rbind(matrix(result.list[[1]][[2]],ncol=9),
# matrix(result.list[[2]][[2]],ncol=9))
# p_standard[total+(1:temp),] <- rbind(matrix(result.list[[1]][[3]],ncol=9),
# matrix(result.list[[2]][[3]],ncol=9))
#
# p_global_complete[total+(1:temp),] <-rbind(matrix(result.list[[1]][[5]],ncol=9),
# matrix(result.list[[2]][[5]],ncol=9))
#
# p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
# }
#
#
# #p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# # matrix(result.list[[2]][[6]],ncol=9))
#
# total = total+ temp
#
# }
# }
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# apply(p_global_result,2,function(x){CountPower(x,10^-3)})
# apply(p_mglobal_result,2,function(x){CountPower(x,10^-3)})
# apply(p_standard,2,function(x){CountPower(x,10^-3)})
# apply(p_global_complete,2,function(x){CountPower(x,10^-3)})
# apply(p_poly,2,function(x){CountPower(x,10^-3)})
#
# thres = 5E-08
# result <- cbind(apply(p_global_result,2,function(x){CountPower(x,thres)}),
# apply(p_mglobal_result,2,function(x){CountPower(x,thres)}),
# apply(p_standard,2,function(x){CountPower(x,thres)}),
# apply(p_global_complete,2,function(x){CountPower(x,thres)}),
# apply(p_poly,2,function(x){CountPower(x,thres)}))
#
# result.2 <- result
#
# result <- result.1
# result[c(1,4,7),] <- result.2[1:3,]
#
# write.csv(result,file=paste0("./simulation/power/result/power.simulation.result.csv") )
#
#
# result.low <- result
# result.low[,2]/result.low[,3]
# result.low[,2]/result.low[,4]
#apply(p_poly,2,function(x){CountPower(x,10^-3)})
#CountTypeOne(p_global_result,10^-4)
# rbind(matrix(result.list[[1]][[3]],ncol=9),
# matrix(result.list[[2]][[3]],ncol=9))
#result <- list(p_global_result,p_mglobal_result,p_standard,p_mglobal_result,p_global_complete,p_poly)
|
/simulation/power/code/merge_power.R
|
no_license
|
andrewhaoyu/breast_cancer_data_analysis
|
R
| false
| false
| 17,633
|
r
|
#merge power results with the effect size as 0.08 level
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="simu_result",full.names=T)
total <- 0
#n.loop = number of simulation setting* number of sample size setting
n.loop <- 12
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result",i1,".Rdata")
if(file%in%files==T){
load(paste0("./simulation/power/result/simu_result",i1,".Rdata"))
#result.list is a list of pvalue
#three different simulation settings: 1. no heterogneity 2. one tumor heter 3. multiple tumor heterogeneity
#34different sample size were implemented 5000, 25,000 50,000 and 100,000
#
#its the output of a foreach parallele result
#[[1]] and [[2]] share the same structure
#[[1]] [[1]] is the vector of p_value from FTOP
#[[1]] [[1]] is a long vector looped by first simulation setting, then sample size (inner loop), 12 different sections. The third loop is the number of replicates in each section
#[[1]] [[2]] is the vector of p_value from MTOP
#[[1]] [[3]] is the vector of p_value from standard logistoc regression
#[[1]] [[4]] is the vector of p_value from MTOP this is because of a previous typo
#[[1]] [[5]] is the vector of FTOP from complete analysis
#[[1]] [[6]] is the vector of polytomous model from complete analysis
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
#total = total*2
p_global_result <- matrix(0,total,n.loop)
p_mglobal_result <- matrix(0,total,n.loop)
p_standard <- matrix(0,total,n.loop)
p_global_complete <- matrix(0,total,n.loop)
#p_poly <- matrix(0,total,9)
total <- 0
#args 1:2000 contains the simulation results for FTOP, MTOP, standard logistic regressionn, complete FTOP
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result",i1,".Rdata")
if(file%in%files==T){
load(paste0("./simulation/power/result//simu_result",i1,".Rdata"))
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_global_result[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp2),] <- matrix(result.list[[2]][[2]],ncol=n.loop)
p_standard[total+(1:temp2),] <- matrix(result.list[[2]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp2),] <-matrix(result.list[[2]][[5]],ncol=n.loop)
}else if(temp2==0){
p_global_result[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp1),] <- matrix(result.list[[1]][[2]],ncol=n.loop)
p_standard[total+(1:temp1),] <- matrix(result.list[[1]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp1),] <-matrix(result.list[[1]][[5]],ncol=n.loop)
}else{
p_global_result[total+(1:temp),] <-rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
p_mglobal_result[total+(1:temp),] <- rbind(matrix(result.list[[1]][[2]],ncol=n.loop),
matrix(result.list[[2]][[2]],ncol=n.loop))
p_standard[total+(1:temp),] <- rbind(matrix(result.list[[1]][[3]],ncol=n.loop),
matrix(result.list[[2]][[3]],ncol=n.loop))
p_global_complete[total+(1:temp),] <-rbind(matrix(result.list[[1]][[5]],ncol=n.loop),
matrix(result.list[[2]][[5]],ncol=n.loop))
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
CountPower <- function(p,alpha){
n <- length(p)
idx <- which(p<=alpha)
return(length(idx)/n)
}
#load results for polytomous model
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="poly_",full.names=T)
total <- 0
#args 1:2000 contains the results for polytomous
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_",i1,".Rdata")
if(file%in%files==T){
load(file)
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
p_poly <- matrix(0,total,n.loop)
total <- 0
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_",i1,".Rdata")
if(file%in%files==T){
load(paste0(file))
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_poly[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
}else if(temp2==0){
p_poly[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
}else{
p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
# apply(p_global_result,2,function(x){CountPower(x,10^-3)})
# apply(p_mglobal_result,2,function(x){CountPower(x,10^-3)})
# apply(p_standard,2,function(x){CountPower(x,10^-3)})
# apply(p_global_complete,2,function(x){CountPower(x,10^-3)})
# apply(p_poly,2,function(x){CountPower(x,thres)})
thres = 5E-08
#remove standard polytomous function
#unstable outliers
#idx <- which(p_poly[,4]==0)
#p_poly = p_poly[-idx,,drop=F]
result <- cbind(apply(p_global_result,2,function(x){CountPower(x,thres)}),
apply(p_mglobal_result,2,function(x){CountPower(x,thres)}),
apply(p_standard,2,function(x){CountPower(x,thres)}),
apply(p_global_complete,2,function(x){CountPower(x,thres)}),
apply(p_poly,2,function(x){CountPower(x,thres)}))
result.1 <- result
#write.csv(result,file=paste0("./simulation/power/result/power.simulation.result.csv") )
write.csv(result,file=paste0("./simulation/power/result/power.simulation.result.csv") )
#merge power results with effectsize as 0.25 level
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="simu_result_0.25_",full.names=T)
total <- 0
n.loop <- 3
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result_0.25_",i1,".Rdata")
if(file%in%files==T){
load(file)
#result.list is a list of pvalue
#three different simulation settings: 1. no heterogneity 2. one tumor heter 3. multiple tumor heterogeneity
#3 different sample size were implemented 5000, 50,000 and 100,000
#
#its the output of a foreach parallele result
#[[1]] and [[2]] share the same structure
#[[1]] [[1]] is the vector of p_value from FTOP
#[[1]] [[1]] is a long vector looped by first simulation setting, then sample size (inner loop), 9 different sections
#[[1]] [[2]] is the vector of p_value from MTOP
#[[1]] [[3]] is the vector of p_value from standard logistoc regression
#[[1]] [[4]] is the vector of p_value from MTOP this is because of a previous typo
#[[1]] [[5]] is the vector of FTOP from complete analysis
#[[1]] [[6]] is the vector of polytomous model from complete analysis
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
#total = total*2
p_global_result <- matrix(0,total,n.loop)
p_mglobal_result <- matrix(0,total,n.loop)
p_standard <- matrix(0,total,n.loop)
p_global_complete <- matrix(0,total,n.loop)
#p_poly <- matrix(0,total,9)
total <- 0
#args 1:2000 contains the simulation results for FTOP, MTOP, standard logistic regressionn, complete FTOP
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result_0.25_",i1,".Rdata")
if(file%in%files==T){
load(file)
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_global_result[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp2),] <- matrix(result.list[[2]][[2]],ncol=n.loop)
p_standard[total+(1:temp2),] <- matrix(result.list[[2]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp2),] <-matrix(result.list[[2]][[5]],ncol=n.loop)
}else if(temp2==0){
p_global_result[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
p_mglobal_result[total+(1:temp1),] <- matrix(result.list[[1]][[2]],ncol=n.loop)
p_standard[total+(1:temp1),] <- matrix(result.list[[1]][[3]],ncol=n.loop)
p_global_complete[total+(1:temp1),] <-matrix(result.list[[1]][[5]],ncol=n.loop)
}else{
p_global_result[total+(1:temp),] <-rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
p_mglobal_result[total+(1:temp),] <- rbind(matrix(result.list[[1]][[2]],ncol=n.loop),
matrix(result.list[[2]][[2]],ncol=n.loop))
p_standard[total+(1:temp),] <- rbind(matrix(result.list[[1]][[3]],ncol=n.loop),
matrix(result.list[[2]][[3]],ncol=n.loop))
p_global_complete[total+(1:temp),] <-rbind(matrix(result.list[[1]][[5]],ncol=n.loop),
matrix(result.list[[2]][[5]],ncol=n.loop))
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
CountPower <- function(p,alpha){
n <- length(p)
idx <- which(p<=alpha)
return(length(idx)/n)
}
#load results for polytomous model
setwd('/data/zhangh24/breast_cancer_data_analysis/')
filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
files <- dir(filedir,pattern="poly_0.25_",full.names=T)
total <- 0
#args 1:2000 contains the results for polytomous
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_0.25_",i1,".Rdata")
if(file%in%files==T){
load(file)
total = total+ length(result.list[[1]][[1]])/n.loop + length(result.list[[2]][[1]])/n.loop
}
}
p_poly <- matrix(0,total,n.loop)
total <- 0
for(i1 in 1:2000){
print(i1)
file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//poly_0.25_",i1,".Rdata")
if(file%in%files==T){
load(paste0(file))
temp1 = length(result.list[[1]][[1]])/n.loop
temp2 = length(result.list[[2]][[1]])/n.loop
temp = temp1+temp2
if(temp1==0){
p_poly[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=n.loop)
}else if(temp2==0){
p_poly[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=n.loop)
}else{
p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[1]],ncol=n.loop),
matrix(result.list[[2]][[1]],ncol=n.loop))
}
#p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
total = total+ temp
}
}
# apply(p_global_result,2,function(x){CountPower(x,10^-3)})
# apply(p_mglobal_result,2,function(x){CountPower(x,10^-3)})
# apply(p_standard,2,function(x){CountPower(x,10^-3)})
# apply(p_global_complete,2,function(x){CountPower(x,10^-3)})
# apply(p_poly,2,function(x){CountPower(x,thres)})
thres = 5E-08
#remove standard polytomous function
#unstable outliers
#idx <- which(p_poly[,4]==0)
#p_poly = p_poly[-idx,,drop=F]
result <- cbind(apply(p_global_result,2,function(x){CountPower(x,thres)}),
apply(p_mglobal_result,2,function(x){CountPower(x,thres)}),
apply(p_standard,2,function(x){CountPower(x,thres)}),
apply(p_global_complete,2,function(x){CountPower(x,thres)}),
apply(p_poly,2,function(x){CountPower(x,thres)}))
write.csv(result,file=paste0("./simulation/power/result/power.simulation.result_0.25.csv") )
# setwd('/data/zhangh24/breast_cancer_data_analysis/')
# filedir <- '/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/'
# files <- dir(filedir,pattern="simu_result",full.names=T)
# total <- 0
# for(i1 in 6000:7000){
# print(i1)
# file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result//simu_result",i1,".Rdata")
# if(file%in%files==T){
# load(paste0("./simulation/power/result//simu_result",i1,".Rdata"))
# total = total+ length(result.list[[1]][[1]])/9 + length(result.list[[2]][[1]])/9
#
# }
# }
#
# #total = total*2
#
# p_global_result <- matrix(0,total,9)
# p_mglobal_result <- matrix(0,total,9)
# p_standard <- matrix(0,total,9)
# p_global_complete <- matrix(0,total,9)
# p_poly <- matrix(0,total,9)
#
# total <- 0
#
# for(i1 in 6000:7000){
# print(i1)
# file = paste0("/data/zhangh24/breast_cancer_data_analysis/simulation/power/result/simu_result",i1,".Rdata")
# if(file%in%files==T){
# load(paste0("./simulation/power/result//simu_result",i1,".Rdata"))
# temp1 = length(result.list[[1]][[1]])/9
# temp2 = length(result.list[[2]][[1]])/9
# temp = temp1+temp2
# if(temp1==0){
# p_global_result[total+(1:temp2),] <- matrix(result.list[[2]][[1]],ncol=9)
#
# p_mglobal_result[total+(1:temp2),] <- matrix(result.list[[2]][[2]],ncol=9)
# p_standard[total+(1:temp2),] <- matrix(result.list[[2]][[3]],ncol=9)
#
# p_global_complete[total+(1:temp2),] <-matrix(result.list[[2]][[5]],ncol=9)
# p_poly[total+(1:temp2),] <- matrix(result.list[[2]][[6]],ncol=9)
# }else if(temp2==0){
# p_global_result[total+(1:temp1),] <- matrix(result.list[[1]][[1]],ncol=9)
#
# p_mglobal_result[total+(1:temp1),] <- matrix(result.list[[1]][[2]],ncol=9)
# p_standard[total+(1:temp1),] <- matrix(result.list[[1]][[3]],ncol=9)
#
# p_global_complete[total+(1:temp1),] <-matrix(result.list[[1]][[5]],ncol=9)
# p_poly[total+(1:temp1),] <- matrix(result.list[[1]][[6]],ncol=9)
# }else{
# p_global_result[total+(1:temp),] <-rbind(matrix(result.list[[1]][[1]],ncol=9),
# matrix(result.list[[2]][[1]],ncol=9))
#
# p_mglobal_result[total+(1:temp),] <- rbind(matrix(result.list[[1]][[2]],ncol=9),
# matrix(result.list[[2]][[2]],ncol=9))
# p_standard[total+(1:temp),] <- rbind(matrix(result.list[[1]][[3]],ncol=9),
# matrix(result.list[[2]][[3]],ncol=9))
#
# p_global_complete[total+(1:temp),] <-rbind(matrix(result.list[[1]][[5]],ncol=9),
# matrix(result.list[[2]][[5]],ncol=9))
#
# p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# matrix(result.list[[2]][[6]],ncol=9))
# }
#
#
# #p_poly[total+(1:temp),] <- rbind(matrix(result.list[[1]][[6]],ncol=9),
# # matrix(result.list[[2]][[6]],ncol=9))
#
# total = total+ temp
#
# }
# }
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# apply(p_global_result,2,function(x){CountPower(x,10^-3)})
# apply(p_mglobal_result,2,function(x){CountPower(x,10^-3)})
# apply(p_standard,2,function(x){CountPower(x,10^-3)})
# apply(p_global_complete,2,function(x){CountPower(x,10^-3)})
# apply(p_poly,2,function(x){CountPower(x,10^-3)})
#
# thres = 5E-08
# result <- cbind(apply(p_global_result,2,function(x){CountPower(x,thres)}),
# apply(p_mglobal_result,2,function(x){CountPower(x,thres)}),
# apply(p_standard,2,function(x){CountPower(x,thres)}),
# apply(p_global_complete,2,function(x){CountPower(x,thres)}),
# apply(p_poly,2,function(x){CountPower(x,thres)}))
#
# result.2 <- result
#
# result <- result.1
# result[c(1,4,7),] <- result.2[1:3,]
#
# write.csv(result,file=paste0("./simulation/power/result/power.simulation.result.csv") )
#
#
# result.low <- result
# result.low[,2]/result.low[,3]
# result.low[,2]/result.low[,4]
#apply(p_poly,2,function(x){CountPower(x,10^-3)})
#CountTypeOne(p_global_result,10^-4)
# rbind(matrix(result.list[[1]][[3]],ncol=9),
# matrix(result.list[[2]][[3]],ncol=9))
#result <- list(p_global_result,p_mglobal_result,p_standard,p_mglobal_result,p_global_complete,p_poly)
|
test_that("chance.misc works as expected", {
set.seed(86858246)
expect_that(chance.pick(c(1, 2, 3)), equals(1))
expect_that(chance.weighted(c(1, 2, 3), c(0.5, 0.3, 0.2)), equals(2))
expect_that(chance.hash(), equals("3c996c26d0e9675cad9d1025783791ad7e6d936d94c058f3f9e3797eb897d138"))
expect_that(chance.hash(case='upper'), equals("D5F58A87560D1E0839558F49325F77CFFA19FF2A1DBE909439291868D97F4ADA"))
expect_that(chance.capitalise('test'), equals("Test"))
expect_that(chance.guid(), equals("92b7fbbb-fb42-0a0e-ad5b-b0ffa2506a5f"))
})
|
/tests/testthat/test-misc.R
|
no_license
|
martineastwood/chancer
|
R
| false
| false
| 567
|
r
|
test_that("chance.misc works as expected", {
set.seed(86858246)
expect_that(chance.pick(c(1, 2, 3)), equals(1))
expect_that(chance.weighted(c(1, 2, 3), c(0.5, 0.3, 0.2)), equals(2))
expect_that(chance.hash(), equals("3c996c26d0e9675cad9d1025783791ad7e6d936d94c058f3f9e3797eb897d138"))
expect_that(chance.hash(case='upper'), equals("D5F58A87560D1E0839558F49325F77CFFA19FF2A1DBE909439291868D97F4ADA"))
expect_that(chance.capitalise('test'), equals("Test"))
expect_that(chance.guid(), equals("92b7fbbb-fb42-0a0e-ad5b-b0ffa2506a5f"))
})
|
library(magick)
library(magrittr)
path1 <- "/Users/alexis_pro/Documents/GitHub/covid19_vaccination_africa/maps"
setwd(path1)
list.files(path=path1, pattern = '*.png', full.names = TRUE) %>%
image_read() %>% # reads each path file
image_join() %>% # joins image
image_animate(fps=1) %>% # animates, can opt for number of loops
image_write("map_vaccination_progress.gif") # write to current dir
path2 <- "/Users/alexis_pro/Dropbox/WorldBank/AFRCE/coronavirus/alexis data and code/vaccines/barchart_vaccine_inequity"
setwd(path2)
list.files(path=path2, pattern = '*.png', full.names = TRUE) %>%
image_read() %>% # reads each path file
image_join() %>% # joins image
image_animate(fps=1) %>% # animates, can opt for number of loops
image_write("vaccine_inequity.gif") # write to current dir
|
/vaccine_animation_code.R
|
no_license
|
alexis-ribal/covid19_vaccination_africa
|
R
| false
| false
| 812
|
r
|
library(magick)
library(magrittr)
path1 <- "/Users/alexis_pro/Documents/GitHub/covid19_vaccination_africa/maps"
setwd(path1)
list.files(path=path1, pattern = '*.png', full.names = TRUE) %>%
image_read() %>% # reads each path file
image_join() %>% # joins image
image_animate(fps=1) %>% # animates, can opt for number of loops
image_write("map_vaccination_progress.gif") # write to current dir
path2 <- "/Users/alexis_pro/Dropbox/WorldBank/AFRCE/coronavirus/alexis data and code/vaccines/barchart_vaccine_inequity"
setwd(path2)
list.files(path=path2, pattern = '*.png', full.names = TRUE) %>%
image_read() %>% # reads each path file
image_join() %>% # joins image
image_animate(fps=1) %>% # animates, can opt for number of loops
image_write("vaccine_inequity.gif") # write to current dir
|
\encoding{UTF-8}
\name{p3state.msm-package}
\alias{p3state.msm-package}
\alias{p3state.msm}
\docType{package}
\title{Analyzing survival data from an illness-death model}
\description{
\pkg{p3state.msm} provides functions for estimating semi-parametric regression
models but also to implement nonparametric estimators for the transition
probabilities. The methods can also be used in progressive three-state models.
In progressive three-state models, estimators for other quantities such as the
bivariate distribution function (for the sequentially ordered events) are also
given.
}
\details{
\tabular{ll}{
Package: \tab p3state.msm\cr
Type: \tab Package\cr
Version: \tab 1.3.2\cr
Date: \tab 2023-01-19\cr
License: \tab GPL-3\cr
LazyLoad: \tab yes\cr
LazyData: \tab yes\cr
}
}
\author{
Luis Meira-Machado, Javier Roca Pardinas \email{roca@uvigo.es}\cr
and Artur Araújo \email{artur.stat@gmail.com}\cr
Maintainer: Luis Meira-Machado \email{lmachado@math.uminho.pt}
}
\references{
Crowley J., Hu M. (1977). Covariance analysis of heart transplant survival data.
\emph{Journal of the American Statistical Association}, \bold{72}(357), 27-36.
\doi{10.2307/2286902}
Meira-Machado L., De Una-Alvarez J., Cadarso-Suarez C. (2006).
Nonparametric estimation of transition probabilities in a non-Markov illness-death model.
\emph{Lifetime Data Analysis}, \bold{12}(3), 325-344. \doi{10.1007/s10985-006-9009-x}
de Una-Alvarez J., Meira-Machado L. (2008).
A simple estimator of the bivariate distribution function for censored gap times.
\emph{Statistics & Probability Letters}, \bold{78}(15), 2440-2445. \doi{10.1016/j.spl.2008.02.031}
Meira-Machado L., Roca-Pardinas J. (2011).
p3state.msm: Analyzing Survival Data from an Illness-Death Model.
\emph{Journal of Statistical Software}, \bold{38}(3), 1-18. \doi{10.18637/jss.v038.i03}
}
\keyword{package}
|
/man/p3state.msm-package.Rd
|
no_license
|
cran/p3state.msm
|
R
| false
| false
| 1,941
|
rd
|
\encoding{UTF-8}
\name{p3state.msm-package}
\alias{p3state.msm-package}
\alias{p3state.msm}
\docType{package}
\title{Analyzing survival data from an illness-death model}
\description{
\pkg{p3state.msm} provides functions for estimating semi-parametric regression
models but also to implement nonparametric estimators for the transition
probabilities. The methods can also be used in progressive three-state models.
In progressive three-state models, estimators for other quantities such as the
bivariate distribution function (for the sequentially ordered events) are also
given.
}
\details{
\tabular{ll}{
Package: \tab p3state.msm\cr
Type: \tab Package\cr
Version: \tab 1.3.2\cr
Date: \tab 2023-01-19\cr
License: \tab GPL-3\cr
LazyLoad: \tab yes\cr
LazyData: \tab yes\cr
}
}
\author{
Luis Meira-Machado, Javier Roca Pardinas \email{roca@uvigo.es}\cr
and Artur Araújo \email{artur.stat@gmail.com}\cr
Maintainer: Luis Meira-Machado \email{lmachado@math.uminho.pt}
}
\references{
Crowley J., Hu M. (1977). Covariance analysis of heart transplant survival data.
\emph{Journal of the American Statistical Association}, \bold{72}(357), 27-36.
\doi{10.2307/2286902}
Meira-Machado L., De Una-Alvarez J., Cadarso-Suarez C. (2006).
Nonparametric estimation of transition probabilities in a non-Markov illness-death model.
\emph{Lifetime Data Analysis}, \bold{12}(3), 325-344. \doi{10.1007/s10985-006-9009-x}
de Una-Alvarez J., Meira-Machado L. (2008).
A simple estimator of the bivariate distribution function for censored gap times.
\emph{Statistics & Probability Letters}, \bold{78}(15), 2440-2445. \doi{10.1016/j.spl.2008.02.031}
Meira-Machado L., Roca-Pardinas J. (2011).
p3state.msm: Analyzing Survival Data from an Illness-Death Model.
\emph{Journal of Statistical Software}, \bold{38}(3), 1-18. \doi{10.18637/jss.v038.i03}
}
\keyword{package}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_q.R
\name{add_q.tbl_uvregression}
\alias{add_q.tbl_uvregression}
\title{Add a column of q-values to account for multiple comparisons}
\usage{
\method{add_q}{tbl_uvregression}(x, method = "fdr",
pvalue_fun = x$inputs$pvalue_fun, ...)
}
\arguments{
\item{x}{\code{tbl_uvregression} object}
\item{method}{String indicating method to be used for p-value adjustment.
Methods from \link[stats:p.adjust]{stats::p.adjust} are accepted. Default is \code{method = 'fdr'}.}
\item{pvalue_fun}{Function to round and format p-values.
Default is \link{style_pvalue}.
The function must have a numeric vector input (the numeric, exact p-value),
and return a string that is the rounded/formatted p-value (e.g.
\code{pvalue_fun = function(x) style_pvalue(x, digits = 2)} or equivalently,
\code{purrr::partial(style_pvalue, digits = 2)}).}
\item{...}{Additional arguments passed to or from other methods}
}
\value{
A \code{tbl_uvregression} object
}
\description{
Adjustments to are p-values are performed with \link[stats:p.adjust]{stats::p.adjust}.
}
\section{Example Output}{
\if{html}{\figure{tbl_uvr_q_ex.png}{options: width=50\%}}
}
\examples{
tbl_uvr_q_ex <-
trial \%>\%
dplyr::select(age, marker, grade, response) \%>\%
tbl_uvregression(
method = lm,
y = age
) \%>\%
add_global_p() \%>\%
add_q()
}
\seealso{
Other tbl_uvregression tools: \code{\link{add_global_p.tbl_uvregression}},
\code{\link{add_nevent.tbl_uvregression}},
\code{\link{bold_italicize_labels_levels}},
\code{\link{bold_p.tbl_stack}},
\code{\link{bold_p.tbl_uvregression}},
\code{\link{inline_text.tbl_uvregression}},
\code{\link{modify_header}},
\code{\link{sort_p.tbl_uvregression}},
\code{\link{tbl_merge}}, \code{\link{tbl_stack}},
\code{\link{tbl_uvregression}}
}
\author{
Esther Drill, Daniel D. Sjoberg
}
\concept{tbl_uvregression tools}
|
/man/add_q.tbl_uvregression.Rd
|
permissive
|
Glewando/gtsummary
|
R
| false
| true
| 1,927
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_q.R
\name{add_q.tbl_uvregression}
\alias{add_q.tbl_uvregression}
\title{Add a column of q-values to account for multiple comparisons}
\usage{
\method{add_q}{tbl_uvregression}(x, method = "fdr",
pvalue_fun = x$inputs$pvalue_fun, ...)
}
\arguments{
\item{x}{\code{tbl_uvregression} object}
\item{method}{String indicating method to be used for p-value adjustment.
Methods from \link[stats:p.adjust]{stats::p.adjust} are accepted. Default is \code{method = 'fdr'}.}
\item{pvalue_fun}{Function to round and format p-values.
Default is \link{style_pvalue}.
The function must have a numeric vector input (the numeric, exact p-value),
and return a string that is the rounded/formatted p-value (e.g.
\code{pvalue_fun = function(x) style_pvalue(x, digits = 2)} or equivalently,
\code{purrr::partial(style_pvalue, digits = 2)}).}
\item{...}{Additional arguments passed to or from other methods}
}
\value{
A \code{tbl_uvregression} object
}
\description{
Adjustments to are p-values are performed with \link[stats:p.adjust]{stats::p.adjust}.
}
\section{Example Output}{
\if{html}{\figure{tbl_uvr_q_ex.png}{options: width=50\%}}
}
\examples{
tbl_uvr_q_ex <-
trial \%>\%
dplyr::select(age, marker, grade, response) \%>\%
tbl_uvregression(
method = lm,
y = age
) \%>\%
add_global_p() \%>\%
add_q()
}
\seealso{
Other tbl_uvregression tools: \code{\link{add_global_p.tbl_uvregression}},
\code{\link{add_nevent.tbl_uvregression}},
\code{\link{bold_italicize_labels_levels}},
\code{\link{bold_p.tbl_stack}},
\code{\link{bold_p.tbl_uvregression}},
\code{\link{inline_text.tbl_uvregression}},
\code{\link{modify_header}},
\code{\link{sort_p.tbl_uvregression}},
\code{\link{tbl_merge}}, \code{\link{tbl_stack}},
\code{\link{tbl_uvregression}}
}
\author{
Esther Drill, Daniel D. Sjoberg
}
\concept{tbl_uvregression tools}
|
## app.R ##
library(shiny)
library(shinydashboard)
library(magrittr)
## Auto Addition [>7]: verweisen Abschnitte auf nur einen weiteren, so soll dieser direkt angehängt werden.
## Würfel, Stats, Inventar
## Kampf
## Ende-Marker [E+], [E-1]
source("books.R")
ui <- dashboardPage(
dashboardHeader(title = "Shiny Books"),
dashboardSidebar(
selectInput("selectBook", "Choose Your Adventure", choices = books.booklist()),
actionButton("buttonLoad", "Start"),
"Game Stats"
),
dashboardBody(
tags$head(tags$script(src = "books.js")),
htmlOutput("htmlChapter")
)
)
server <- function(input, output, session) {
book <- reactiveVal()
chapter <- reactiveVal()
state <- reactiveValues()
# Load new book
observeEvent(input$buttonLoad, {
book(books.load.book(input$selectBook))
chapter(1)
})
# Render chapter
output$htmlChapter <- renderUI({
req(book(), chapter())
text <- parse.chapter(book()[[chapter()]]$text, state)
HTML(text)
})
# JS-induced chapter switch
observe({ chapter(input$jsSwitchToChapter) })
}
shinyApp(ui, server)
|
/app.R
|
no_license
|
AlphaOrange/books
|
R
| false
| false
| 1,110
|
r
|
## app.R ##
library(shiny)
library(shinydashboard)
library(magrittr)
## Auto Addition [>7]: verweisen Abschnitte auf nur einen weiteren, so soll dieser direkt angehängt werden.
## Würfel, Stats, Inventar
## Kampf
## Ende-Marker [E+], [E-1]
source("books.R")
ui <- dashboardPage(
dashboardHeader(title = "Shiny Books"),
dashboardSidebar(
selectInput("selectBook", "Choose Your Adventure", choices = books.booklist()),
actionButton("buttonLoad", "Start"),
"Game Stats"
),
dashboardBody(
tags$head(tags$script(src = "books.js")),
htmlOutput("htmlChapter")
)
)
server <- function(input, output, session) {
book <- reactiveVal()
chapter <- reactiveVal()
state <- reactiveValues()
# Load new book
observeEvent(input$buttonLoad, {
book(books.load.book(input$selectBook))
chapter(1)
})
# Render chapter
output$htmlChapter <- renderUI({
req(book(), chapter())
text <- parse.chapter(book()[[chapter()]]$text, state)
HTML(text)
})
# JS-induced chapter switch
observe({ chapter(input$jsSwitchToChapter) })
}
shinyApp(ui, server)
|
if (!require("shiny")) { install.packages("shiny", dependencies = TRUE) ; library(shiny)}
if (!require("shinyjs")) { install.packages("shinyjs", dependencies = TRUE) ; library(shinyjs)}
if (!require("shinyWidgets")) { install.packages("shinyWidgets", dependencies = TRUE) ; library(shinyWidgets)}
if (!require("shinythemes")) { install.packages("shinythemes", dependencies = TRUE) ; library(shinythemes)}
if (!require("shinydashboard")) { install.packages("shinydashboard", dependencies = TRUE) ; library(shinydashboard)}
if (!require("DT")) { install.packages("DT", dependencies = TRUE) ; library(DT)}
if (!require("data.table")) { install.packages("data.table", dependencies = TRUE) ; library(data.table)}
if (!require("png")) { install.packages("png", dependencies = TRUE) ; library(png)}
ui <-
dashboardPage( skin = "blue",
dashboardHeader(
#style = "position:fixed;", # inherit, width:12%;
title = tags$a(href="https://github.com/KaranKakouei/KaranHub/", icon("github"), "KaranHub", style = "color: white; font-size: 24px;"),
tags$li(a(href = 'https://github.com/KaranKakouei/KaranHub/', icon("github"), title = "Back to Apps Hub"), class = "dropdown", style = "size: 20px;margin-right:10px;")),
dashboardSidebar(
tags$style(HTML(".main-sidebar{width: 250px;}")),
sidebarMenu(
style = "position:fixed; width: 250px;", # inherit, 13%
menuItem("Home", tabName = "Home", icon = icon("home")),
menuItem("Appliance energy predictions", tabName = "Scripts", icon = icon("code"),
menuSubItem(" Data cleansing", tabName = "Scripts_Data_Cleansing", icon = icon("code")),
menuSubItem(" Machine Learning", tabName = "Scripts_AllAlgorithmsR", icon = icon("r-project")),
menuSubItem(" Machine Learning", tabName = "Scripts_AllAlgorithmsPy", icon = icon("python")),
menuSubItem(" Visualization", tabName = "Visualization", icon = icon("chart-line")),
menuSubItem(" Deep Learning", tabName = "Scripts_ANNsPy", icon = icon("python")))
)),
dashboardBody(
tags$script(HTML("$('body').addClass('fixed');")),
tabItems(
tabItem("Home",
fluidRow(
column(width = 1),
column(width = 10,
p("About me", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("I am a motivated, team-work oriented, and responsible Data Scientist and Water Manager with seven years of practical experience in Machine
Learning and predictive modeling of complex data, which includes handling the whole pipeline from preprocessing of data tothe visualization of results.
I am highly proficient in SQL and in programming languages such as R and Python, and have an open mind-set and a commitment to continuous learning and
professional development.", style= "text-align: justify; font-size: 18px;"),
p("For the last seven years, I have been working at the Leibniz-Institute of Freshwater Ecology and Inland Fisheries where I have been responsible
for analysis of big data using Machine and Deep Learning tools to assess predictive power of featureparameters or influencing factors and predict the
potential future scopes of inland waters with implications for environmental management. I have also been responsible for contributing to talking about
data and thriving on explaining the meaning of data by providing easily interpretable outcomes (e.g., interactive dashboards, reports and deliverables)
for either my fellow colleagues or managers and non-technical stakeholders.", style= "text-align: justify; font-size: 18px;"),
p("I have the initiative, enthusiasm and willingness to learn new skills and keep abreast of and promote new developments in the appropriate
professional field. Thanks to my migration background and my experience of studying and working in different countries (Iran, The Netherlands
and Germany) I have become very flexible and able to adapt to differentworking environments while communicating in different languages, which
I really enjoy.", style= "text-align: justify; font-size: 18px;"),
p("Here I work on a variety of kaggle competitions and would like to distribute my competitive solutions, so people might come up with potential
ways of solving real-life tasks. The good news is that I anounce my solutions in both R and Python environments. Yaaaay!",
style= "text-align: justify; font-size: 18px;"),
p("Data", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Appliances energy prediction", style = "font-size: 20px; ; font-style: italic; font-weight:bold;"),
p("The first project I am working on is the Appliances Energy Use (AEU). The AEU includes high resolution data that is recorded at 10-minute
frequencies for roughly 4.5 months. The recorded feature parameters are temperature (11 parameters), relative humidity (10 parameters), lights,
wind speed, precipitation, and visibility. According to the project descriptions, the house temperature and humidity conditions were monitored
with a ZigBee wireless sensor network. Each wireless node transmitted the temperature and humidity conditions around 3.3 min. Then, the wireless
data was averaged for 10 minutes periods. The energy data was logged every 10 minutes with m-bus energy meters. Weather from the nearest airport
weather station (Chievres Airport, Belgium) was downloaded from a public data set from Reliable Prognosis (rp5.ru), and merged together with the
experimental data sets using the date and time column. Two random variables have been included in the data set for testing the regression models
and to filter out non predictive attributes (parameters). You can download the data ", tags$a(href="https://www.kaggle.com/loveall/appliances-energy-prediction", "here on Kaggle."),
style= "text-align: justify; font-size: 18px;"),
p("To follow the whole pipeline from preprocessing of data to the visualization of the results, I prepared an interractive shiny web application
which includes all the scripts and visualized outcomes. I will update this read me file as well as the dashboard once I have solutions for further
projects. Enjoy scrolling through the interractive dashboard!"), style= "text-align: justify; font-size: 18px;"),
p("R and Python scripts", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("If you are interested in the codes presented in this Shiny app, please download the scripts under my ", tags$a(href="https://github.com/KaranKakouei/KaranHub/", "GitHub repository."), style= "text-align: justify; font-size: 18px;")
)), column(width = 2)
),
tabItem("Scripts_Data_Cleansing",
tabsetPanel(type = "tabs",
tabPanel("Data cleansing in R",
fluidRow(
column(width = 1),
column(width = 10,
p("Data cleansing in R", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here I clean the experimental dataset to prepare it for creating regression models of appliances energy use in a low energy building.", style = "text-align: justify; font-size: 18px;"),
p("Load all necessary R libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_1", placeholder = TRUE),
p("Load your data set", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_2", placeholder = TRUE),
p("First overview of the data. We first take a look at the dimentions of our dataframe to get information on the potential number of feature parameters and observations' length", style = "text-align: justify; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_3", placeholder = TRUE),
p("The dataframe has 19735 rows, which means enough data is available for for our ML algorithms.
So we have 29 columns, which include a date, a response (Appliances), and 27 feature parameters.
Now we alphabetically sort the columns and check data structure.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_4", placeholder = TRUE),
p("Data structure shows that date and some of our feature parameters are loaded as character, which prevent us to describe summary of the data such as mean value of feature parameters.
So we convert the date into date format and all feature variables to integers.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_5", placeholder = TRUE),
p("If we check the data structures we can see that all columns are in the correct format now.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_6", placeholder = TRUE),
p("Now we check whether each column includes incomplete values or NULLs,
and find potential dulicates or the number of NAs per column. We then get the summary of each column for a more detailed overview of values per feature parameter.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_7", placeholder = TRUE),
p("So there is no NULLs, missing values, or duplicates in the dataframe!", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("", style = "font-size: 18px;"),
p("Feature parameters", style = "text-align: justify; font-size: 20px; font-weight:bold;"),
p("The number of feature parameters is rather large for such a dataset, so we should reduce them to prevent overfitting in our regression models.
First we can check for correlation between our features, and visualize data as a heatmap.", style = "ptext-align: justify; adding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_8", placeholder = TRUE),
tags$img(src = "DC_R_Heatmap.png", width = "100%", style="text-align: center;"),
p("A couple of feature parameters are highly co-correlated (|r| > 0.9). Examples are: T6 vs. T_out, T9 vs. T3/T5/T7, RH3 vs. RH4.
Either of these feature parameters can equally explain the variability in the response variable (i.e., Appliances),
but we need to decide wich one to remove. We further visualizea the co-corelated feature parameters to confirm and deal with them. In this figure,
we can see most of those features are highly co-correlated and show a very similar trend and a high variety of values.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_9", placeholder = TRUE),
tags$img(src = "DC_R_correlation_lines.png", width = "100%", style="text-align: center;"),
p("Beyond the cocorrelated feature parameters, some of these parameters are relatively unimportant for appliances, so can be removed from the data set prior to regression modelling.
To select which variables are unimpotant and deserve to be removed from the dataframe, we can run a random forest with 100 trees
and assess the relative influence of all feature parameters on Appliances.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_10", placeholder = TRUE),
tags$img(src = "DC_R_VarImp.png", width = "100%", style="text-align: center;"),
p("The co-correlated parameters with least relative influence can also be removed from the data set. These parameters are: T_out, T7, and RH4.
Furthermore, least important feature parameters such as the two random variables of 'rv1' and 'rv2', and the 'Visibility' can also be removed from the data set.
'Lights' has also lots of null values which can be removed too. After removing these variables we come up with a total number of 21 feature parameters",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_11", placeholder = TRUE),
p("Now we check the distribution of the appliances and all features",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_12", placeholder = TRUE),
tags$img(src = "DC_R_Histogram.png", width = "100%", style="text-align: center;"),
p("We can see that all the columns follow normal distribution except T9, RH_6, and wind speed. The appliances is also left-skewed",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("As the data was recorded at different times of the day or days of a week, and energy consumption (i.e. appliances)
might depend on these parameters such as weekdays versus weekends or noon versus evenings, we can add
(1) julian date to account for potential seasonality in data, and
(2) day of the week and hour of the day to account for time variability in energy consumption
You can pull out individual parts of the date with the accessor functions yday() (day of the year), wday() (day of the week), hour().
Before fitting our selected features to the models, we need to normalize our feature parameters.
Data normalization (here min-max scaling) enables us the opportunity to assess their relative
influences on appliances without s judged by their relatively small or large values", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_13", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabPanel("Data cleansing in Python",
fluidRow(
column(width = 1),
column(width = 10,
p("Data cleansing in Python", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here I clean the experimental dataset to prepare it for creating regression models of appliances energy use in a low energy building.", style = "text-align: justify; font-size: 18px;"),
p("Load all necessary Python libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_1", placeholder = TRUE),
p("Load the data set, sort the columns alphabetically and explore the data by checking its structure, dimentions,
number of feature parameters, and observations' length.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_2", placeholder = TRUE),
p("The dataframe has 19735 rows, which means enough data is available for for our ML algorithms.
So we have 29 columns, which include a date, a response (Appliances), and 27 feature parameters.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("Now we check whether each column includes incomplete values or NULLs, and find potential dulicates or
the number of NAs per column.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_3", placeholder = TRUE),
p("So there is no NULLs, missing values, or duplicates in the dataframe!", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("", style = "font-size: 18px;"),
p("Feature parameters", style = "text-align: justify; font-size: 20px; font-weight:bold;"),
p("The number of feature parameters is rather large for such a dataset, so we should reduce them to prevent overfitting in our regression models.
First we can check for correlation between our features, and visualize data as a heatmap.", style = "ptext-align: justify; adding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_4", placeholder = TRUE),
tags$img(src = "DC_Py_Heatmap.png", width = "100%", style="text-align: center;"),
p("A couple of feature parameters are highly co-correlated (|r| > 0.9). Examples are: T6 vs. T_out, T9 vs. T3/T5/T7, RH3 vs. RH4.
Either of these feature parameters can equally explain the variability in the response variable (i.e., Appliances),
but we need to decide wich one to remove. We further visualize the co-corelated feature parameters to confirm and deal with them. In this figure,
we can see most of those features are highly co-correlated and show a very similar trend and a high variety of values.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_5", placeholder = TRUE),
tags$img(src = "DC_Py_correlation_lines.png", width = "100%", style="text-align: center;"),
p("Beyond the cocorrelated feature parameters, some of these parameters are relatively unimportant for appliances, so can be removed from the data set prior to regression modelling.
To select which variables are unimpotant and deserve to be removed from the dataframe, we can run a random forest with 100 trees
and assess the relative influence of all feature parameters on Appliances.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_6", placeholder = TRUE),
tags$img(src = "DC_Py_VarImp.png", width = "100%", style="text-align: center;"),
p("The co-correlated parameters with least relative influence can also be removed from the data set. These parameters are: T_out, T7, and RH4.
Furthermore, least important feature parameters such as the two random variables of 'rv1' and 'rv2', and the 'Visibility' can also be removed from the data set.
'Lights' has also lots of zero values which can be removed too. After removing these variables we come up with a total number of 21 feature parameters.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_7", placeholder = TRUE),
p("Now we check the distribution of the appliances and all features:",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_8", placeholder = TRUE),
tags$img(src = "DC_Py_Distribution.png", width = "100%", style="text-align: center;"),
p("We can see that all the columns follow normal distribution except T9, RH_6, and wind speed. The appliances is also left-skewed.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("As the data was recorded at different times of the day or days of a week, and energy consumption (i.e. appliances)
might depend on these parameters such as weekdays versus weekends or noon versus evenings, we can add
(1) julian date to account for potential seasonality in data, and
(2) day of the week and hour of the day to account for time variability in energy consumption
You can pull out individual parts of the date with the accessor functions yday() (day of the year), wday() (day of the week), hour().
Before fitting our selected features to the models, we need to normalize our feature parameters.
Data normalization (here min-max scaling) enables us the opportunity to assess their relative
influences on appliances without s judged by their relatively small or large values", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_9", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
)
)
),
tabItem("Scripts_AllAlgorithmsR",
fluidRow(
column(width = 1),
column(width = 10,
p("Apply Machine-/Deep Learing algorithms in R", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here we apply five Machine Learning and one Deep Learning algorithms as promising tools to assess the predictability
of appliances energy consumption as our response variable. These six algorithms are from different categories.
We will be looking at the following algorithms and categories:
KNeighborsRegressor (KNN) from Nearest neighbour Regression, Support Vector Regressor (SVR) from support vector machines,
Random Forests (RF), Boosted Regression Trees (BRT), and Extra Trees Regression (ETR) from ensamble models,
and Artificial Neural Networks (ANN) from neural network models.", style = "text-align: justify; font-size: 18px;"),
p("Import all necessary R-libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsR_1", placeholder = TRUE),
p("Load your data:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsR_2", placeholder = TRUE),
p("Run a loop over the data to split the data set randomly into 75% train and 25% test sets for the 10-fold cross-validations and apply all six algorithms on each split
to evaluate predictability of appliances energy consumption:",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsR_3", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabItem("Scripts_AllAlgorithmsPy",
fluidRow(
column(width = 1),
column(width = 10,
p("Apply Machine-/Deep Learing algorithms in Python", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here we apply five Machine Learning and one Deep Learning algorithms as promising tools to assess the predictability
of appliances energy consumption as our response variable. These six algorithms are from different categories.
We will be looking at the following algorithms and categories:
KNeighborsRegressor (KNN) from Nearest neighbour Regression, Support Vector Regressor (SVR) from support vector machines,
Random Forests (RF), Boosted Regression Trees (BRT), and Extra Trees Regression (ETR) from ensamble models,
and Artificial Neural Networks (ANN) from neural network models.", style = "text-align: justify; font-size: 18px;"),
p("Import all necessary Python libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_1", placeholder = TRUE),
p("Load your data:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_2", placeholder = TRUE),
p("Print data column names for the next step to assemble features.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
#p("Transfer dataframe to array and check output's schema. Then select the final data (i.e., your response variable and
#feature parameters) to be used in the subsequent ML steps", style = "padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_3", placeholder = TRUE),
p("Run a loop over the data to split the data for 10-fold cross-validation and apply RF algorithm on each split:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_4", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabItem("Visualization",
fluidRow(
column(width = 1),
column(width = 10,
p("R and python model performances", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("The 10-fold cross-validation results show that the python models outperformed R models. This might be resulted from
model structures that varies across the platforms.", style = "text-align: justify; font-size: 18px;"),
tags$img(src = "DV_R_Py_Final_comparisons.png", width = "100%", style="text-align: center;"),
p("SVR in Python and ANN in R were the worst performing models with R scores of 0.02 and 0.05, and median absolute errors (MedAE) of 13.3 and 50.5, respectively.
In contrast, the two algorithms of Extra Trees Regression (R scores: 0.62 in python and 0.56 in R) and Random Forests (R scores: 0.58 in python and 0.54 in R)
outperformed all other algorithms. However, this does not mean that other tools such as Deep Learning (ANNs) are less powerfull than the other algorithms. In the next step,
we build complex ANNs that can perform as good as Random Forest and Extra Trees Regression algorithms, without being prone to overfitting.
You can find the ANN scripts under the 'Deep Learning' tab in the side bar.
", style = "text-align: justify; padding-top: 20px; font-size: 18px;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabItem("Scripts_ANNsPy",
fluidRow(
column(width = 1),
column(width = 10,
p("Deep Learning (Artificial Neural Networks) in Python", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here we use Random Forests as a promising Machine Learning algorithm to assess the predictability of our response variable, and evaluate the relative influence of feature parameters.",
style = "text-align: justify; font-size: 18px;"),
p("Import PySpark and all necessary Python libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_1", placeholder = TRUE),
p("Load your data, convert it into array, and define your response (Y) and feature (X) parameters:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_2", placeholder = TRUE),
p("Run a loop over the data to split the data for 10-fold cross-validation and apply ANNs algorithm on each split. At the same time, save evaluate the best models of each
of each split and save the results as csv-file :", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_3", placeholder = TRUE),
p("To track loss values online, you should follw the following steps:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_4", placeholder = TRUE),
tags$img(src = "ANNs_loss_val-los_tensorboard.png", width = "100%", style="text-align: center;"),
p("The evaluation results show that the appliances energy consumption can also be predicted using the Artificial Neural Networks as good as Random Forest and
Extra Trees regression algorithms. The performance of ANNs will become better once changing the model structure by adapting the number of hidden layers and neurons.
In the figure below, we can see that the model performances relatively improved while increasing the number of hidden layers and neurons.",
style = "text-align: justify; padding-top: 20px; font-size: 18px;"),
tags$img(src = "ANNs_Final_comparisons.png", width = "100%", style="text-align: center;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("The figure shows that the model with 4096 neurons and 5 hidden layers outperformed all other models, even those with larger number of neurons or hidden layers.
This means that increasing the number neurons or hidden layers depend on many factors such as length of the time-series that need to be taken into acount. Furthermore,
we need to take care of potential overfitting problems once increasing ANN model complexity. In my code, I have just skipped a model if it was performing worse than
any previous models, and in case of overfitting, the model was neither saved in the directory nor evaluated for our final results shown in the figure above.
To avoid overfitted models, we might test which combinations result in a better R score or lesser median absolute error. This is possible if you apply the above-written
loop on your data. I wish you much success!",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
)
)
)
)
|
/ui.R
|
no_license
|
KaranKakouei/KaranHub
|
R
| false
| false
| 32,221
|
r
|
if (!require("shiny")) { install.packages("shiny", dependencies = TRUE) ; library(shiny)}
if (!require("shinyjs")) { install.packages("shinyjs", dependencies = TRUE) ; library(shinyjs)}
if (!require("shinyWidgets")) { install.packages("shinyWidgets", dependencies = TRUE) ; library(shinyWidgets)}
if (!require("shinythemes")) { install.packages("shinythemes", dependencies = TRUE) ; library(shinythemes)}
if (!require("shinydashboard")) { install.packages("shinydashboard", dependencies = TRUE) ; library(shinydashboard)}
if (!require("DT")) { install.packages("DT", dependencies = TRUE) ; library(DT)}
if (!require("data.table")) { install.packages("data.table", dependencies = TRUE) ; library(data.table)}
if (!require("png")) { install.packages("png", dependencies = TRUE) ; library(png)}
ui <-
dashboardPage( skin = "blue",
dashboardHeader(
#style = "position:fixed;", # inherit, width:12%;
title = tags$a(href="https://github.com/KaranKakouei/KaranHub/", icon("github"), "KaranHub", style = "color: white; font-size: 24px;"),
tags$li(a(href = 'https://github.com/KaranKakouei/KaranHub/', icon("github"), title = "Back to Apps Hub"), class = "dropdown", style = "size: 20px;margin-right:10px;")),
dashboardSidebar(
tags$style(HTML(".main-sidebar{width: 250px;}")),
sidebarMenu(
style = "position:fixed; width: 250px;", # inherit, 13%
menuItem("Home", tabName = "Home", icon = icon("home")),
menuItem("Appliance energy predictions", tabName = "Scripts", icon = icon("code"),
menuSubItem(" Data cleansing", tabName = "Scripts_Data_Cleansing", icon = icon("code")),
menuSubItem(" Machine Learning", tabName = "Scripts_AllAlgorithmsR", icon = icon("r-project")),
menuSubItem(" Machine Learning", tabName = "Scripts_AllAlgorithmsPy", icon = icon("python")),
menuSubItem(" Visualization", tabName = "Visualization", icon = icon("chart-line")),
menuSubItem(" Deep Learning", tabName = "Scripts_ANNsPy", icon = icon("python")))
)),
dashboardBody(
tags$script(HTML("$('body').addClass('fixed');")),
tabItems(
tabItem("Home",
fluidRow(
column(width = 1),
column(width = 10,
p("About me", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("I am a motivated, team-work oriented, and responsible Data Scientist and Water Manager with seven years of practical experience in Machine
Learning and predictive modeling of complex data, which includes handling the whole pipeline from preprocessing of data tothe visualization of results.
I am highly proficient in SQL and in programming languages such as R and Python, and have an open mind-set and a commitment to continuous learning and
professional development.", style= "text-align: justify; font-size: 18px;"),
p("For the last seven years, I have been working at the Leibniz-Institute of Freshwater Ecology and Inland Fisheries where I have been responsible
for analysis of big data using Machine and Deep Learning tools to assess predictive power of featureparameters or influencing factors and predict the
potential future scopes of inland waters with implications for environmental management. I have also been responsible for contributing to talking about
data and thriving on explaining the meaning of data by providing easily interpretable outcomes (e.g., interactive dashboards, reports and deliverables)
for either my fellow colleagues or managers and non-technical stakeholders.", style= "text-align: justify; font-size: 18px;"),
p("I have the initiative, enthusiasm and willingness to learn new skills and keep abreast of and promote new developments in the appropriate
professional field. Thanks to my migration background and my experience of studying and working in different countries (Iran, The Netherlands
and Germany) I have become very flexible and able to adapt to differentworking environments while communicating in different languages, which
I really enjoy.", style= "text-align: justify; font-size: 18px;"),
p("Here I work on a variety of kaggle competitions and would like to distribute my competitive solutions, so people might come up with potential
ways of solving real-life tasks. The good news is that I anounce my solutions in both R and Python environments. Yaaaay!",
style= "text-align: justify; font-size: 18px;"),
p("Data", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Appliances energy prediction", style = "font-size: 20px; ; font-style: italic; font-weight:bold;"),
p("The first project I am working on is the Appliances Energy Use (AEU). The AEU includes high resolution data that is recorded at 10-minute
frequencies for roughly 4.5 months. The recorded feature parameters are temperature (11 parameters), relative humidity (10 parameters), lights,
wind speed, precipitation, and visibility. According to the project descriptions, the house temperature and humidity conditions were monitored
with a ZigBee wireless sensor network. Each wireless node transmitted the temperature and humidity conditions around 3.3 min. Then, the wireless
data was averaged for 10 minutes periods. The energy data was logged every 10 minutes with m-bus energy meters. Weather from the nearest airport
weather station (Chievres Airport, Belgium) was downloaded from a public data set from Reliable Prognosis (rp5.ru), and merged together with the
experimental data sets using the date and time column. Two random variables have been included in the data set for testing the regression models
and to filter out non predictive attributes (parameters). You can download the data ", tags$a(href="https://www.kaggle.com/loveall/appliances-energy-prediction", "here on Kaggle."),
style= "text-align: justify; font-size: 18px;"),
p("To follow the whole pipeline from preprocessing of data to the visualization of the results, I prepared an interractive shiny web application
which includes all the scripts and visualized outcomes. I will update this read me file as well as the dashboard once I have solutions for further
projects. Enjoy scrolling through the interractive dashboard!"), style= "text-align: justify; font-size: 18px;"),
p("R and Python scripts", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("If you are interested in the codes presented in this Shiny app, please download the scripts under my ", tags$a(href="https://github.com/KaranKakouei/KaranHub/", "GitHub repository."), style= "text-align: justify; font-size: 18px;")
)), column(width = 2)
),
tabItem("Scripts_Data_Cleansing",
tabsetPanel(type = "tabs",
tabPanel("Data cleansing in R",
fluidRow(
column(width = 1),
column(width = 10,
p("Data cleansing in R", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here I clean the experimental dataset to prepare it for creating regression models of appliances energy use in a low energy building.", style = "text-align: justify; font-size: 18px;"),
p("Load all necessary R libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_1", placeholder = TRUE),
p("Load your data set", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_2", placeholder = TRUE),
p("First overview of the data. We first take a look at the dimentions of our dataframe to get information on the potential number of feature parameters and observations' length", style = "text-align: justify; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_3", placeholder = TRUE),
p("The dataframe has 19735 rows, which means enough data is available for for our ML algorithms.
So we have 29 columns, which include a date, a response (Appliances), and 27 feature parameters.
Now we alphabetically sort the columns and check data structure.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_4", placeholder = TRUE),
p("Data structure shows that date and some of our feature parameters are loaded as character, which prevent us to describe summary of the data such as mean value of feature parameters.
So we convert the date into date format and all feature variables to integers.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_5", placeholder = TRUE),
p("If we check the data structures we can see that all columns are in the correct format now.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_6", placeholder = TRUE),
p("Now we check whether each column includes incomplete values or NULLs,
and find potential dulicates or the number of NAs per column. We then get the summary of each column for a more detailed overview of values per feature parameter.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_7", placeholder = TRUE),
p("So there is no NULLs, missing values, or duplicates in the dataframe!", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("", style = "font-size: 18px;"),
p("Feature parameters", style = "text-align: justify; font-size: 20px; font-weight:bold;"),
p("The number of feature parameters is rather large for such a dataset, so we should reduce them to prevent overfitting in our regression models.
First we can check for correlation between our features, and visualize data as a heatmap.", style = "ptext-align: justify; adding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_8", placeholder = TRUE),
tags$img(src = "DC_R_Heatmap.png", width = "100%", style="text-align: center;"),
p("A couple of feature parameters are highly co-correlated (|r| > 0.9). Examples are: T6 vs. T_out, T9 vs. T3/T5/T7, RH3 vs. RH4.
Either of these feature parameters can equally explain the variability in the response variable (i.e., Appliances),
but we need to decide wich one to remove. We further visualizea the co-corelated feature parameters to confirm and deal with them. In this figure,
we can see most of those features are highly co-correlated and show a very similar trend and a high variety of values.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_9", placeholder = TRUE),
tags$img(src = "DC_R_correlation_lines.png", width = "100%", style="text-align: center;"),
p("Beyond the cocorrelated feature parameters, some of these parameters are relatively unimportant for appliances, so can be removed from the data set prior to regression modelling.
To select which variables are unimpotant and deserve to be removed from the dataframe, we can run a random forest with 100 trees
and assess the relative influence of all feature parameters on Appliances.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_10", placeholder = TRUE),
tags$img(src = "DC_R_VarImp.png", width = "100%", style="text-align: center;"),
p("The co-correlated parameters with least relative influence can also be removed from the data set. These parameters are: T_out, T7, and RH4.
Furthermore, least important feature parameters such as the two random variables of 'rv1' and 'rv2', and the 'Visibility' can also be removed from the data set.
'Lights' has also lots of null values which can be removed too. After removing these variables we come up with a total number of 21 feature parameters",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_11", placeholder = TRUE),
p("Now we check the distribution of the appliances and all features",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_12", placeholder = TRUE),
tags$img(src = "DC_R_Histogram.png", width = "100%", style="text-align: center;"),
p("We can see that all the columns follow normal distribution except T9, RH_6, and wind speed. The appliances is also left-skewed",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("As the data was recorded at different times of the day or days of a week, and energy consumption (i.e. appliances)
might depend on these parameters such as weekdays versus weekends or noon versus evenings, we can add
(1) julian date to account for potential seasonality in data, and
(2) day of the week and hour of the day to account for time variability in energy consumption
You can pull out individual parts of the date with the accessor functions yday() (day of the year), wday() (day of the week), hour().
Before fitting our selected features to the models, we need to normalize our feature parameters.
Data normalization (here min-max scaling) enables us the opportunity to assess their relative
influences on appliances without s judged by their relatively small or large values", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCR_13", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabPanel("Data cleansing in Python",
fluidRow(
column(width = 1),
column(width = 10,
p("Data cleansing in Python", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here I clean the experimental dataset to prepare it for creating regression models of appliances energy use in a low energy building.", style = "text-align: justify; font-size: 18px;"),
p("Load all necessary Python libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_1", placeholder = TRUE),
p("Load the data set, sort the columns alphabetically and explore the data by checking its structure, dimentions,
number of feature parameters, and observations' length.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_2", placeholder = TRUE),
p("The dataframe has 19735 rows, which means enough data is available for for our ML algorithms.
So we have 29 columns, which include a date, a response (Appliances), and 27 feature parameters.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("Now we check whether each column includes incomplete values or NULLs, and find potential dulicates or
the number of NAs per column.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_3", placeholder = TRUE),
p("So there is no NULLs, missing values, or duplicates in the dataframe!", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("", style = "font-size: 18px;"),
p("Feature parameters", style = "text-align: justify; font-size: 20px; font-weight:bold;"),
p("The number of feature parameters is rather large for such a dataset, so we should reduce them to prevent overfitting in our regression models.
First we can check for correlation between our features, and visualize data as a heatmap.", style = "ptext-align: justify; adding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_4", placeholder = TRUE),
tags$img(src = "DC_Py_Heatmap.png", width = "100%", style="text-align: center;"),
p("A couple of feature parameters are highly co-correlated (|r| > 0.9). Examples are: T6 vs. T_out, T9 vs. T3/T5/T7, RH3 vs. RH4.
Either of these feature parameters can equally explain the variability in the response variable (i.e., Appliances),
but we need to decide wich one to remove. We further visualize the co-corelated feature parameters to confirm and deal with them. In this figure,
we can see most of those features are highly co-correlated and show a very similar trend and a high variety of values.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_5", placeholder = TRUE),
tags$img(src = "DC_Py_correlation_lines.png", width = "100%", style="text-align: center;"),
p("Beyond the cocorrelated feature parameters, some of these parameters are relatively unimportant for appliances, so can be removed from the data set prior to regression modelling.
To select which variables are unimpotant and deserve to be removed from the dataframe, we can run a random forest with 100 trees
and assess the relative influence of all feature parameters on Appliances.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_6", placeholder = TRUE),
tags$img(src = "DC_Py_VarImp.png", width = "100%", style="text-align: center;"),
p("The co-correlated parameters with least relative influence can also be removed from the data set. These parameters are: T_out, T7, and RH4.
Furthermore, least important feature parameters such as the two random variables of 'rv1' and 'rv2', and the 'Visibility' can also be removed from the data set.
'Lights' has also lots of zero values which can be removed too. After removing these variables we come up with a total number of 21 feature parameters.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_7", placeholder = TRUE),
p("Now we check the distribution of the appliances and all features:",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_8", placeholder = TRUE),
tags$img(src = "DC_Py_Distribution.png", width = "100%", style="text-align: center;"),
p("We can see that all the columns follow normal distribution except T9, RH_6, and wind speed. The appliances is also left-skewed.",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("As the data was recorded at different times of the day or days of a week, and energy consumption (i.e. appliances)
might depend on these parameters such as weekdays versus weekends or noon versus evenings, we can add
(1) julian date to account for potential seasonality in data, and
(2) day of the week and hour of the day to account for time variability in energy consumption
You can pull out individual parts of the date with the accessor functions yday() (day of the year), wday() (day of the week), hour().
Before fitting our selected features to the models, we need to normalize our feature parameters.
Data normalization (here min-max scaling) enables us the opportunity to assess their relative
influences on appliances without s judged by their relatively small or large values", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("Scripts_DCPy_9", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
)
)
),
tabItem("Scripts_AllAlgorithmsR",
fluidRow(
column(width = 1),
column(width = 10,
p("Apply Machine-/Deep Learing algorithms in R", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here we apply five Machine Learning and one Deep Learning algorithms as promising tools to assess the predictability
of appliances energy consumption as our response variable. These six algorithms are from different categories.
We will be looking at the following algorithms and categories:
KNeighborsRegressor (KNN) from Nearest neighbour Regression, Support Vector Regressor (SVR) from support vector machines,
Random Forests (RF), Boosted Regression Trees (BRT), and Extra Trees Regression (ETR) from ensamble models,
and Artificial Neural Networks (ANN) from neural network models.", style = "text-align: justify; font-size: 18px;"),
p("Import all necessary R-libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsR_1", placeholder = TRUE),
p("Load your data:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsR_2", placeholder = TRUE),
p("Run a loop over the data to split the data set randomly into 75% train and 25% test sets for the 10-fold cross-validations and apply all six algorithms on each split
to evaluate predictability of appliances energy consumption:",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsR_3", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabItem("Scripts_AllAlgorithmsPy",
fluidRow(
column(width = 1),
column(width = 10,
p("Apply Machine-/Deep Learing algorithms in Python", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here we apply five Machine Learning and one Deep Learning algorithms as promising tools to assess the predictability
of appliances energy consumption as our response variable. These six algorithms are from different categories.
We will be looking at the following algorithms and categories:
KNeighborsRegressor (KNN) from Nearest neighbour Regression, Support Vector Regressor (SVR) from support vector machines,
Random Forests (RF), Boosted Regression Trees (BRT), and Extra Trees Regression (ETR) from ensamble models,
and Artificial Neural Networks (ANN) from neural network models.", style = "text-align: justify; font-size: 18px;"),
p("Import all necessary Python libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_1", placeholder = TRUE),
p("Load your data:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_2", placeholder = TRUE),
p("Print data column names for the next step to assemble features.", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
#p("Transfer dataframe to array and check output's schema. Then select the final data (i.e., your response variable and
#feature parameters) to be used in the subsequent ML steps", style = "padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_3", placeholder = TRUE),
p("Run a loop over the data to split the data for 10-fold cross-validation and apply RF algorithm on each split:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_AllAlgorithmsPy_4", placeholder = TRUE),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabItem("Visualization",
fluidRow(
column(width = 1),
column(width = 10,
p("R and python model performances", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("The 10-fold cross-validation results show that the python models outperformed R models. This might be resulted from
model structures that varies across the platforms.", style = "text-align: justify; font-size: 18px;"),
tags$img(src = "DV_R_Py_Final_comparisons.png", width = "100%", style="text-align: center;"),
p("SVR in Python and ANN in R were the worst performing models with R scores of 0.02 and 0.05, and median absolute errors (MedAE) of 13.3 and 50.5, respectively.
In contrast, the two algorithms of Extra Trees Regression (R scores: 0.62 in python and 0.56 in R) and Random Forests (R scores: 0.58 in python and 0.54 in R)
outperformed all other algorithms. However, this does not mean that other tools such as Deep Learning (ANNs) are less powerfull than the other algorithms. In the next step,
we build complex ANNs that can perform as good as Random Forest and Extra Trees Regression algorithms, without being prone to overfitting.
You can find the ANN scripts under the 'Deep Learning' tab in the side bar.
", style = "text-align: justify; padding-top: 20px; font-size: 18px;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
),
tabItem("Scripts_ANNsPy",
fluidRow(
column(width = 1),
column(width = 10,
p("Deep Learning (Artificial Neural Networks) in Python", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("Here we use Random Forests as a promising Machine Learning algorithm to assess the predictability of our response variable, and evaluate the relative influence of feature parameters.",
style = "text-align: justify; font-size: 18px;"),
p("Import PySpark and all necessary Python libraries:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_1", placeholder = TRUE),
p("Load your data, convert it into array, and define your response (Y) and feature (X) parameters:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_2", placeholder = TRUE),
p("Run a loop over the data to split the data for 10-fold cross-validation and apply ANNs algorithm on each split. At the same time, save evaluate the best models of each
of each split and save the results as csv-file :", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_3", placeholder = TRUE),
p("To track loss values online, you should follw the following steps:", style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
verbatimTextOutput("code_ANNsPy_4", placeholder = TRUE),
tags$img(src = "ANNs_loss_val-los_tensorboard.png", width = "100%", style="text-align: center;"),
p("The evaluation results show that the appliances energy consumption can also be predicted using the Artificial Neural Networks as good as Random Forest and
Extra Trees regression algorithms. The performance of ANNs will become better once changing the model structure by adapting the number of hidden layers and neurons.
In the figure below, we can see that the model performances relatively improved while increasing the number of hidden layers and neurons.",
style = "text-align: justify; padding-top: 20px; font-size: 18px;"),
tags$img(src = "ANNs_Final_comparisons.png", width = "100%", style="text-align: center;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("The figure shows that the model with 4096 neurons and 5 hidden layers outperformed all other models, even those with larger number of neurons or hidden layers.
This means that increasing the number neurons or hidden layers depend on many factors such as length of the time-series that need to be taken into acount. Furthermore,
we need to take care of potential overfitting problems once increasing ANN model complexity. In my code, I have just skipped a model if it was performing worse than
any previous models, and in case of overfitting, the model was neither saved in the directory nor evaluated for our final results shown in the figure above.
To avoid overfitted models, we might test which combinations result in a better R score or lesser median absolute error. This is possible if you apply the above-written
loop on your data. I wish you much success!",
style = "text-align: justify; padding-top: 10px; font-size: 18px;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;"),
p("", style = "padding-top: 10px; font-size: 30px; font-weight:bold;")
)), column(width = 2)
)
)
)
)
|
#Read data
data <- read.csv2("household_power_consumption.txt", sep = ";",header=T, fill=TRUE, na.strings='?')
#convert data columns
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
#subset data
data <- subset(data,Date>="2007-02-01" & Date<="2007-02-02")
# Convert columns to numeric
for(i in c(3:9)) {data[,i] <- as.numeric(as.character(data[,i]))}
# Create Date_Time variable
data$Date_Time <- paste(data$Date, data$Time)
# Convert Date_Time variable to proper format
data$Date_Time <- strptime(data$Date_Time, format="%Y-%m-%d %H:%M:%S")
#Plot 2
png(filename = "plot2.png", width = 480, height = 480, units = "px", bg = "white")
Sys.setlocale("LC_TIME", "C")
plot(data$Date_Time, data$Global_active_power, xaxt=NULL, xlab = "", ylab = "Global Active Power (kilowatts)", type="n")
lines(data$Date_Time, data$Global_active_power, type="S")
dev.off()
|
/plot2.R
|
no_license
|
cschnider/ExData_Plotting1
|
R
| false
| false
| 861
|
r
|
#Read data
data <- read.csv2("household_power_consumption.txt", sep = ";",header=T, fill=TRUE, na.strings='?')
#convert data columns
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
#subset data
data <- subset(data,Date>="2007-02-01" & Date<="2007-02-02")
# Convert columns to numeric
for(i in c(3:9)) {data[,i] <- as.numeric(as.character(data[,i]))}
# Create Date_Time variable
data$Date_Time <- paste(data$Date, data$Time)
# Convert Date_Time variable to proper format
data$Date_Time <- strptime(data$Date_Time, format="%Y-%m-%d %H:%M:%S")
#Plot 2
png(filename = "plot2.png", width = 480, height = 480, units = "px", bg = "white")
Sys.setlocale("LC_TIME", "C")
plot(data$Date_Time, data$Global_active_power, xaxt=NULL, xlab = "", ylab = "Global Active Power (kilowatts)", type="n")
lines(data$Date_Time, data$Global_active_power, type="S")
dev.off()
|
# Read BioLector data
# Have working directory set to investigation folder
# Save data in a list
dat <- list()
#Define data type
dat$data_type = "metabolite_measurements"
# Define dataset study token
dat$study_id <- "SDY010"
dat$study_token <- "MetabG"
dat$study_sub <- NA
# Define glucose as carbon source for plot descriptions
dat$CSource = "glucose"
dat$CSourceRead = "glc"
# Define paths and locations
DATPATH <- "01_studies/SDY010_MetabG_metabolites_G/ASY004_metabolite_glucose_glycogen"
data.file <- file.path(DATPATH, "20201027_metabolite_glucose_glycogen_best_gain.CSV")
meta.file <- file.path(DATPATH, "20201027_metabolite_glucose_glycogen_METADATA.csv")
# Read weight data
dat$data <- read.csv(data.file, skip = 15, header=T)
# Read metadata
dat$metadata <- read.csv(meta.file)
|
/01_studies/SDY010_MetabG_metabolites_G/ASY004_metabolite_glucose_glycogen/read_data.R
|
no_license
|
pfennigt/e_coli_growth
|
R
| false
| false
| 795
|
r
|
# Read BioLector data
# Have working directory set to investigation folder
# Save data in a list
dat <- list()
#Define data type
dat$data_type = "metabolite_measurements"
# Define dataset study token
dat$study_id <- "SDY010"
dat$study_token <- "MetabG"
dat$study_sub <- NA
# Define glucose as carbon source for plot descriptions
dat$CSource = "glucose"
dat$CSourceRead = "glc"
# Define paths and locations
DATPATH <- "01_studies/SDY010_MetabG_metabolites_G/ASY004_metabolite_glucose_glycogen"
data.file <- file.path(DATPATH, "20201027_metabolite_glucose_glycogen_best_gain.CSV")
meta.file <- file.path(DATPATH, "20201027_metabolite_glucose_glycogen_METADATA.csv")
# Read weight data
dat$data <- read.csv(data.file, skip = 15, header=T)
# Read metadata
dat$metadata <- read.csv(meta.file)
|
\name{settler.survival}
\alias{settler.survival}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Settler-recruit survival function
}
\description{
}
\usage{
settler.survival(S,a)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{S}{
Number of settlers (scalar)
}
\item{a}{
Parameter defining the strength of density-dependence (scalar)
}
}
\details{
This function is not needed by the user. It is used by the sim.metapopgen function to calculate density-dependent recruitment. The density-dependent function is \code{y = (1 / (1 + a * S )}
}
\value{
A probability of survival
}
\references{
}
\author{
}
\note{
}
\seealso{
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/settler.survival.Rd
|
no_license
|
MarcoAndrello/MetaPopGen_0.0.8
|
R
| false
| false
| 897
|
rd
|
\name{settler.survival}
\alias{settler.survival}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Settler-recruit survival function
}
\description{
}
\usage{
settler.survival(S,a)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{S}{
Number of settlers (scalar)
}
\item{a}{
Parameter defining the strength of density-dependence (scalar)
}
}
\details{
This function is not needed by the user. It is used by the sim.metapopgen function to calculate density-dependent recruitment. The density-dependent function is \code{y = (1 / (1 + a * S )}
}
\value{
A probability of survival
}
\references{
}
\author{
}
\note{
}
\seealso{
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
\name{exportPLINKSet}
\alias{exportPLINKSet}
\docType{package}
\title{
exportPLINKSet
}
\description{
Simple function using Rcpp to write the gene set to a file in the PLINK set format.
}
\usage{
exportPLINKSet(geneSets, fname)
}
\arguments{
\item{geneSets}{An object created by the \code{makeGeneSet()} function.
}
\item{fname}{The name of the PLINK file to be created.
}
}
\value{
A Boolean indicating if the file was successfully written.
}
\seealso{
\code{\link{makeGeneSet}}
}
\examples{
# Please see the vignette or the package description
# for an example of using this function.
}
\keyword{ file }
|
/fuzzedpackages/snplist/man/exportPLINKSet.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 632
|
rd
|
\name{exportPLINKSet}
\alias{exportPLINKSet}
\docType{package}
\title{
exportPLINKSet
}
\description{
Simple function using Rcpp to write the gene set to a file in the PLINK set format.
}
\usage{
exportPLINKSet(geneSets, fname)
}
\arguments{
\item{geneSets}{An object created by the \code{makeGeneSet()} function.
}
\item{fname}{The name of the PLINK file to be created.
}
}
\value{
A Boolean indicating if the file was successfully written.
}
\seealso{
\code{\link{makeGeneSet}}
}
\examples{
# Please see the vignette or the package description
# for an example of using this function.
}
\keyword{ file }
|
### Null model simulation runs
###
rm(list=ls())
library(GMSE)
library(scales)
library(RColorBrewer)
source("helpers.R")
source("gmse_apply_helpers.R")
source("sims/global_paras.R")
###################################
###################################
###################################
sim_set_name = "nullModel"
##########################
##############################
##################################
###################################
source(sprintf("sims/%s/paras_%s.R", sim_set_name, sim_set_name))
### Set output dir
outdir = sprintf("sims/%s/out/",sim_set_name)
### Create output identifier (date/time string)
outidx = gsub(":", "", gsub(" ", "", gsub("-", "",Sys.time())))
### Create outpath
outpath = sprintf("%s%s_%s/", outdir, outidx, sim_set_name)
### Create output dir
dir.create(outpath)
### Save current parameters to output dir (note Rdata file to separate from Rds sim files)
save(gmse_paras, file=sprintf("%sparas_%s_%s.Rdata", outpath, outidx, sim_set_name))
### Init empty list
res = list()
for(sim in 1:sims) {
res_year = as.list(rep(NA, years))
sim_old <- gmse_apply(get_res = gmse_paras$get_res,
land_dim_1 = gmse_paras$land_dim_1,
land_dim_2 = gmse_paras$land_dim_2,
land_ownership = gmse_paras$land_ownership,
tend_crops = gmse_paras$tend_crops,
scaring = gmse_paras$scaring,
remove_pr = gmse_paras$remove_pr,
lambda = gmse_paras$lambda,
res_death_K = gmse_paras$res_death_K,
RESOURCE_ini = gmse_paras$RESOURCE_ini,
manage_target = gmse_paras$manage_target,
res_death_type = gmse_paras$res_death_type,
manager_budget = gmse_paras$manager_budget,
user_budget = gmse_paras$user_budget,
public_land = gmse_paras$public_land,
stakeholders = gmse_paras$stakeholders,
res_consume = gmse_paras$res_consume,
observe_type = gmse_paras$observe_type,
agent_view = gmse_paras$agent_view,
agent_move = gmse_paras$agent_move,
converge_crit = gmse_paras$converge_crit,
ga_mingen = gmse_paras$ga_mingen)
for(year in 1:years) {
sim_new = try({gmse_apply(get_res = "Full", old_list = sim_old)}, silent = T)
if(class(sim_new)=="try-error") {
if(grepl("Extinction", sim_new[1])) {
print(sprintf("True extinction, skipping to next sim."))
res_year[year:years] = "Extinction (true)"
break()
} else {
if(grepl("Error in estimate_abundances", sim_new[1])) {
print(sprintf("Observed extinction, skipping to next sim."))
res_year[year:years] = "Extinction (observed)"
break()
} else {
print(sprintf("Observed extinction, skipping to next sim."))
res_year[year:years] = "Extinction (observed, other error)"
break()
}
}
} else {sample_budgets_ManyPoorFewRich
print(sprintf("Sim %d, year %d", sim, year))
res_year[[year]] = trunc_res(sim_new)
sim_old <- sim_new
}
}
res[[sim]] = res_year
# Save sim/year data
saveRDS(res[[sim]], sprintf("%ssim%04d_%s_%s.Rds", outpath, sim, outidx, sim_set_name))
}
###
format(object.size(res), units = "auto")
for(i in 1:sims) {
for(j in 1:years) {
res[[i]][[j]]$LAND = NULL
res[[i]][[j]]$resource_array = NULL
res[[i]][[j]]$observation_array = NULL
res[[i]][[j]]$manager_array = NULL
res[[i]][[j]]$user_array = NULL
}
}
format(object.size(res), units = "auto")
y_lims = c(bufRange(min(extract_gmse(res, "resources"), na.rm=T), end = "lo"),
bufRange(max(extract_gmse(res, "resources"), na.rm=T), end = "hi"))
par(mfrow=c(1,1))
tiff(sprintf("%s%s/resources.tiff", outdir, outidx))
plot_resource(res, type="resources", sumtype = "none", ylim = y_lims)
dev.off()
tiff(sprintf("%s%s/observations.tiff", outdir, outidx))
plot_resource(res, type="observations", sumtype = "none", ylim = y_lims)
dev.off()
tiff(sprintf("%s%s/actions.tiff", outdir, outidx))
plot_actions(res, type = "mean")
dev.off()
tiff(sprintf("%s%s/yield.tiff", outdir, outidx))
plot_yield(res, type = "all")
dev.off()
|
/sims/nullModel/index_nullModel.R
|
no_license
|
jejoenje/gmse_vary
|
R
| false
| false
| 4,516
|
r
|
### Null model simulation runs
###
rm(list=ls())
library(GMSE)
library(scales)
library(RColorBrewer)
source("helpers.R")
source("gmse_apply_helpers.R")
source("sims/global_paras.R")
###################################
###################################
###################################
sim_set_name = "nullModel"
##########################
##############################
##################################
###################################
source(sprintf("sims/%s/paras_%s.R", sim_set_name, sim_set_name))
### Set output dir
outdir = sprintf("sims/%s/out/",sim_set_name)
### Create output identifier (date/time string)
outidx = gsub(":", "", gsub(" ", "", gsub("-", "",Sys.time())))
### Create outpath
outpath = sprintf("%s%s_%s/", outdir, outidx, sim_set_name)
### Create output dir
dir.create(outpath)
### Save current parameters to output dir (note Rdata file to separate from Rds sim files)
save(gmse_paras, file=sprintf("%sparas_%s_%s.Rdata", outpath, outidx, sim_set_name))
### Init empty list
res = list()
for(sim in 1:sims) {
res_year = as.list(rep(NA, years))
sim_old <- gmse_apply(get_res = gmse_paras$get_res,
land_dim_1 = gmse_paras$land_dim_1,
land_dim_2 = gmse_paras$land_dim_2,
land_ownership = gmse_paras$land_ownership,
tend_crops = gmse_paras$tend_crops,
scaring = gmse_paras$scaring,
remove_pr = gmse_paras$remove_pr,
lambda = gmse_paras$lambda,
res_death_K = gmse_paras$res_death_K,
RESOURCE_ini = gmse_paras$RESOURCE_ini,
manage_target = gmse_paras$manage_target,
res_death_type = gmse_paras$res_death_type,
manager_budget = gmse_paras$manager_budget,
user_budget = gmse_paras$user_budget,
public_land = gmse_paras$public_land,
stakeholders = gmse_paras$stakeholders,
res_consume = gmse_paras$res_consume,
observe_type = gmse_paras$observe_type,
agent_view = gmse_paras$agent_view,
agent_move = gmse_paras$agent_move,
converge_crit = gmse_paras$converge_crit,
ga_mingen = gmse_paras$ga_mingen)
for(year in 1:years) {
sim_new = try({gmse_apply(get_res = "Full", old_list = sim_old)}, silent = T)
if(class(sim_new)=="try-error") {
if(grepl("Extinction", sim_new[1])) {
print(sprintf("True extinction, skipping to next sim."))
res_year[year:years] = "Extinction (true)"
break()
} else {
if(grepl("Error in estimate_abundances", sim_new[1])) {
print(sprintf("Observed extinction, skipping to next sim."))
res_year[year:years] = "Extinction (observed)"
break()
} else {
print(sprintf("Observed extinction, skipping to next sim."))
res_year[year:years] = "Extinction (observed, other error)"
break()
}
}
} else {sample_budgets_ManyPoorFewRich
print(sprintf("Sim %d, year %d", sim, year))
res_year[[year]] = trunc_res(sim_new)
sim_old <- sim_new
}
}
res[[sim]] = res_year
# Save sim/year data
saveRDS(res[[sim]], sprintf("%ssim%04d_%s_%s.Rds", outpath, sim, outidx, sim_set_name))
}
###
format(object.size(res), units = "auto")
for(i in 1:sims) {
for(j in 1:years) {
res[[i]][[j]]$LAND = NULL
res[[i]][[j]]$resource_array = NULL
res[[i]][[j]]$observation_array = NULL
res[[i]][[j]]$manager_array = NULL
res[[i]][[j]]$user_array = NULL
}
}
format(object.size(res), units = "auto")
y_lims = c(bufRange(min(extract_gmse(res, "resources"), na.rm=T), end = "lo"),
bufRange(max(extract_gmse(res, "resources"), na.rm=T), end = "hi"))
par(mfrow=c(1,1))
tiff(sprintf("%s%s/resources.tiff", outdir, outidx))
plot_resource(res, type="resources", sumtype = "none", ylim = y_lims)
dev.off()
tiff(sprintf("%s%s/observations.tiff", outdir, outidx))
plot_resource(res, type="observations", sumtype = "none", ylim = y_lims)
dev.off()
tiff(sprintf("%s%s/actions.tiff", outdir, outidx))
plot_actions(res, type = "mean")
dev.off()
tiff(sprintf("%s%s/yield.tiff", outdir, outidx))
plot_yield(res, type = "all")
dev.off()
|
# LIMPIAR DATOS
#ANALISIS DE DATOS
rm(list = ls())
# LIBRERIAS UTILIZADAS
library(psych)
library(readr)
library(dplyr)
# FUNCIONES IMPLEMENTADAS
clc <- function(){
cat("\014")
}
maxs <- function(dataframe){
conts = c(0, 0, 0, 0, 0)
k = seq(1, dim(dataframe)[1])
for(i in k){
# cat(i, "\n")
row = c(dataframe[i, ]$PUNT_LECTURA_CRITICA, dataframe[i, ]$PUNT_MATEMATICAS,
dataframe[i, ]$PUNT_C_NATURALES, dataframe[i, ]$PUNT_SOCIALES_CIUDADANAS,
dataframe[i, ]$PUNT_INGLES)
# cat(row, "\n")
ind = which.max(row)
# cat(ind, "\n")
conts[ind] = conts[ind] + 1
}
return(conts)
}
################################################################################
clc()
# DATASET 2019-II
datos_2019_2 = read.csv(file.path("/home/oscarvch03/Desktop/Proyecto_Estadistica", "Saber_11__2019-2.csv"))
datos_2019_2_clc = na.omit(data.frame(filter(datos_2019_2, ESTU_DEPTO_RESIDE != "")))
head(datos_2019_2_clc)
len19 = length(datos_2019_2_clc$ESTU_GENERO)
# DATASET 2015-II
datos_2015_2 = read.csv(file.path("/home/oscarvch03/Desktop/Proyecto_Estadistica", "Saber_11__2015-2.csv"))
datos_2015_2_clc = na.omit(data.frame(filter(datos_2015_2, ESTU_DEPTO_RESIDE != "-")))
head(datos_2015_2_clc)
len15 = length(datos_2015_2_clc$ESTU_GENERO)
################################################################################
# 1) ANALISIS DESCRIPTIVO DE LOS DATOS #########################################
# a) DISTRIBUCION POR DEPARTAMENTOS
par(mar=c(10, 6, 4, 4))
a = datos_2015_2_clc$ESTU_DEPTO_RESIDE
barplot(table(a)[2:length(summary(a))], ylab = "Frecuencia", las = 3,
main = "Departamentos 2015-II", col = rainbow(length(summary(a))))
b = datos_2019_2_clc$ESTU_DEPTO_RESIDE
barplot(table(b)[2:length(summary(b))], ylab = "Frecuencia", las = 3,
main = "Departamentos 2019-II", col = rainbow(length(summary(b))))
# b) DISTRIBUCION POR GENERO
x1 = summary(datos_2015_2_clc$ESTU_GENERO)
labels1 = round(100 * x1 / sum(x1), 2)
leg1 = names(summary(datos_2015_2_clc$ESTU_GENERO))
pie(x1, labels = labels1, radius = 1.4, main = "DISTRIBUCION POR GENERO 2015-II", col = rainbow(length(x1)))
legend("topright", leg1, cex = 0.8, fill = rainbow(length(x1)))
x2 = summary(datos_2019_2_clc$ESTU_GENERO)
labels2 = round(100 * x2 / sum(x2), 2)
leg2 = names(summary(datos_2019_2_clc$ESTU_GENERO))
pie(x2, labels = labels2, radius = 1.4, main = "DISTRIBUCION POR GENERO 2019-II", col = rainbow(length(x2)))
legend("topright", leg2, cex = 0.8, fill = rainbow(length(x2)))
# c) DISTRIBUCION POR ESTRATO
x3 = summary(datos_2015_2_clc$FAMI_ESTRATOVIVIENDA)
x3_n = x3[2:length(x3)]
labels3 = round(100 * x3_n/ sum(x3_n), 2)
leg3 = names(summary(datos_2015_2_clc$FAMI_ESTRATOVIVIENDA))[2:length(x3)]
pie(x3_n, labels = labels3, radius = 1.4, main = "DISTRIBUCION POR ESTRATO 2015-II", col = rainbow(length(x3_n)))
legend("topright", leg3, cex = 0.8, fill = rainbow(length(x3_n)))
x4 = summary(datos_2019_2_clc$FAMI_ESTRATOVIVIENDA)
x4_n = x4[3:8]
labels4 = round(100 * x4_n / sum(x4_n), 2)
leg4 = names(summary(datos_2019_2_clc$FAMI_ESTRATOVIVIENDA))[3:8]
pie(x4_n, labels = labels4, radius = 1.4, main = "DISTRIBUCION POR GENERO 2019-II", col = rainbow(length(x4_n)))
legend("topright", leg4, cex = 0.8, fill = rainbow(length(x4_n)))
# d) PUNTAJES >= 300 Y PUNTAJES < 300
may1 = data.frame(filter(datos_2015_2_clc, PUNT_GLOBAL >= 300))
men1 = data.frame(filter(datos_2015_2_clc, PUNT_GLOBAL < 300))
len1 = dim(may1)[1]
len2 = dim(men1)[1]
x5 = c(len1, len2)
labels5 = round(100 * x5 / sum(x5), 2)
leg5 = c("Puntajes >= 300", "Puntajes < 300")
pie(x5, labels = labels5, radius = 1.4, main = "PUNTAJES GLOBALES 2015-II", col = rainbow(length(x5)))
legend("topright", leg5, cex = 0.65, fill = rainbow(length(x5)))
may2 = data.frame(filter(datos_2019_2_clc, PUNT_GLOBAL >= 300))
men2 = data.frame(filter(datos_2019_2_clc, PUNT_GLOBAL < 300))
len3 = dim(may2)[1]
len4 = dim(men2)[1]
x6 = c(len3, len4)
labels6 = round(100 * x6 / sum(x6), 2)
leg6 = c("Puntajes >= 300", "Puntajes < 300")
pie(x6, labels = labels6, radius = 1.4, main = "PUNTAJES GLOBALES 2019-II", col = rainbow(length(x6)))
legend("topright", leg6, cex = 0.65, fill = rainbow(length(x6)))
################################################################################
# 4) PRUEBAS CHI-SQUARE ########################################################
# NO SE REALIZARON YA QUE LOS RESULTADOS SON EVIDENTES
# a) PUNTAJES POR AREA 2015-II VS 2019-II
# conts1_area = maxs(datos_2015_2_clc)
E1_i = c(100434, 124661, 84707, 99285, 132144)
p1_i = E1_i / sum(E1_i)
random = sample(2:len19, len15, replace = F)
new_2019_2 = datos_2019_2_clc[random, ]
# conts2_area = maxs(new_2019_2)
n1_i = c(209895, 148474, 49880, 35247, 97735)
X1_2 = sum(((n1_i - E1_i) ^ 2) / E1_i)
proof1 = chisq.test(n1_i, p = p1_i)
gl1 = length(n1_i) - 1
pchisq(X1_2, df = gl1, lower.tail = FALSE)
qchisq(0.05, df = gl1, lower.tail = FALSE)
areas = c("LECTURA \n CRITICA", "MATEMATICAS", "CIENCIAS \n NATURALES",
"SOCIALES \n CIUDADANAS", "INGLES")
labels7 = round(100 * E1_i/ sum(E1_i), 2)
pie(E1_i, labels = labels7, radius = 1.4, main = "DISTRIBUCION POR MEJOR MATERIA 2015-II", col = rainbow(length(E1_i)))
legend("topright", areas, cex = 0.7, fill = rainbow(length(E1_i)))
labels8 = round(100 * n1_i/ sum(n1_i), 2)
pie(n1_i, labels = labels8, radius = 1.4, main = "DISTRIBUCION POR MEJOR MATERIA 2019-II", col = rainbow(length(n1_i)))
legend("topright", areas, cex = 0.7, fill = rainbow(length(n1_i)))
|
/Analisis.r
|
no_license
|
Gabrri/PROYECTO-ESTADISTICA
|
R
| false
| false
| 5,517
|
r
|
# LIMPIAR DATOS
#ANALISIS DE DATOS
rm(list = ls())
# LIBRERIAS UTILIZADAS
library(psych)
library(readr)
library(dplyr)
# FUNCIONES IMPLEMENTADAS
clc <- function(){
cat("\014")
}
maxs <- function(dataframe){
conts = c(0, 0, 0, 0, 0)
k = seq(1, dim(dataframe)[1])
for(i in k){
# cat(i, "\n")
row = c(dataframe[i, ]$PUNT_LECTURA_CRITICA, dataframe[i, ]$PUNT_MATEMATICAS,
dataframe[i, ]$PUNT_C_NATURALES, dataframe[i, ]$PUNT_SOCIALES_CIUDADANAS,
dataframe[i, ]$PUNT_INGLES)
# cat(row, "\n")
ind = which.max(row)
# cat(ind, "\n")
conts[ind] = conts[ind] + 1
}
return(conts)
}
################################################################################
clc()
# DATASET 2019-II
datos_2019_2 = read.csv(file.path("/home/oscarvch03/Desktop/Proyecto_Estadistica", "Saber_11__2019-2.csv"))
datos_2019_2_clc = na.omit(data.frame(filter(datos_2019_2, ESTU_DEPTO_RESIDE != "")))
head(datos_2019_2_clc)
len19 = length(datos_2019_2_clc$ESTU_GENERO)
# DATASET 2015-II
datos_2015_2 = read.csv(file.path("/home/oscarvch03/Desktop/Proyecto_Estadistica", "Saber_11__2015-2.csv"))
datos_2015_2_clc = na.omit(data.frame(filter(datos_2015_2, ESTU_DEPTO_RESIDE != "-")))
head(datos_2015_2_clc)
len15 = length(datos_2015_2_clc$ESTU_GENERO)
################################################################################
# 1) ANALISIS DESCRIPTIVO DE LOS DATOS #########################################
# a) DISTRIBUCION POR DEPARTAMENTOS
par(mar=c(10, 6, 4, 4))
a = datos_2015_2_clc$ESTU_DEPTO_RESIDE
barplot(table(a)[2:length(summary(a))], ylab = "Frecuencia", las = 3,
main = "Departamentos 2015-II", col = rainbow(length(summary(a))))
b = datos_2019_2_clc$ESTU_DEPTO_RESIDE
barplot(table(b)[2:length(summary(b))], ylab = "Frecuencia", las = 3,
main = "Departamentos 2019-II", col = rainbow(length(summary(b))))
# b) DISTRIBUCION POR GENERO
x1 = summary(datos_2015_2_clc$ESTU_GENERO)
labels1 = round(100 * x1 / sum(x1), 2)
leg1 = names(summary(datos_2015_2_clc$ESTU_GENERO))
pie(x1, labels = labels1, radius = 1.4, main = "DISTRIBUCION POR GENERO 2015-II", col = rainbow(length(x1)))
legend("topright", leg1, cex = 0.8, fill = rainbow(length(x1)))
x2 = summary(datos_2019_2_clc$ESTU_GENERO)
labels2 = round(100 * x2 / sum(x2), 2)
leg2 = names(summary(datos_2019_2_clc$ESTU_GENERO))
pie(x2, labels = labels2, radius = 1.4, main = "DISTRIBUCION POR GENERO 2019-II", col = rainbow(length(x2)))
legend("topright", leg2, cex = 0.8, fill = rainbow(length(x2)))
# c) DISTRIBUCION POR ESTRATO
x3 = summary(datos_2015_2_clc$FAMI_ESTRATOVIVIENDA)
x3_n = x3[2:length(x3)]
labels3 = round(100 * x3_n/ sum(x3_n), 2)
leg3 = names(summary(datos_2015_2_clc$FAMI_ESTRATOVIVIENDA))[2:length(x3)]
pie(x3_n, labels = labels3, radius = 1.4, main = "DISTRIBUCION POR ESTRATO 2015-II", col = rainbow(length(x3_n)))
legend("topright", leg3, cex = 0.8, fill = rainbow(length(x3_n)))
x4 = summary(datos_2019_2_clc$FAMI_ESTRATOVIVIENDA)
x4_n = x4[3:8]
labels4 = round(100 * x4_n / sum(x4_n), 2)
leg4 = names(summary(datos_2019_2_clc$FAMI_ESTRATOVIVIENDA))[3:8]
pie(x4_n, labels = labels4, radius = 1.4, main = "DISTRIBUCION POR GENERO 2019-II", col = rainbow(length(x4_n)))
legend("topright", leg4, cex = 0.8, fill = rainbow(length(x4_n)))
# d) PUNTAJES >= 300 Y PUNTAJES < 300
may1 = data.frame(filter(datos_2015_2_clc, PUNT_GLOBAL >= 300))
men1 = data.frame(filter(datos_2015_2_clc, PUNT_GLOBAL < 300))
len1 = dim(may1)[1]
len2 = dim(men1)[1]
x5 = c(len1, len2)
labels5 = round(100 * x5 / sum(x5), 2)
leg5 = c("Puntajes >= 300", "Puntajes < 300")
pie(x5, labels = labels5, radius = 1.4, main = "PUNTAJES GLOBALES 2015-II", col = rainbow(length(x5)))
legend("topright", leg5, cex = 0.65, fill = rainbow(length(x5)))
may2 = data.frame(filter(datos_2019_2_clc, PUNT_GLOBAL >= 300))
men2 = data.frame(filter(datos_2019_2_clc, PUNT_GLOBAL < 300))
len3 = dim(may2)[1]
len4 = dim(men2)[1]
x6 = c(len3, len4)
labels6 = round(100 * x6 / sum(x6), 2)
leg6 = c("Puntajes >= 300", "Puntajes < 300")
pie(x6, labels = labels6, radius = 1.4, main = "PUNTAJES GLOBALES 2019-II", col = rainbow(length(x6)))
legend("topright", leg6, cex = 0.65, fill = rainbow(length(x6)))
################################################################################
# 4) PRUEBAS CHI-SQUARE ########################################################
# NO SE REALIZARON YA QUE LOS RESULTADOS SON EVIDENTES
# a) PUNTAJES POR AREA 2015-II VS 2019-II
# conts1_area = maxs(datos_2015_2_clc)
E1_i = c(100434, 124661, 84707, 99285, 132144)
p1_i = E1_i / sum(E1_i)
random = sample(2:len19, len15, replace = F)
new_2019_2 = datos_2019_2_clc[random, ]
# conts2_area = maxs(new_2019_2)
n1_i = c(209895, 148474, 49880, 35247, 97735)
X1_2 = sum(((n1_i - E1_i) ^ 2) / E1_i)
proof1 = chisq.test(n1_i, p = p1_i)
gl1 = length(n1_i) - 1
pchisq(X1_2, df = gl1, lower.tail = FALSE)
qchisq(0.05, df = gl1, lower.tail = FALSE)
areas = c("LECTURA \n CRITICA", "MATEMATICAS", "CIENCIAS \n NATURALES",
"SOCIALES \n CIUDADANAS", "INGLES")
labels7 = round(100 * E1_i/ sum(E1_i), 2)
pie(E1_i, labels = labels7, radius = 1.4, main = "DISTRIBUCION POR MEJOR MATERIA 2015-II", col = rainbow(length(E1_i)))
legend("topright", areas, cex = 0.7, fill = rainbow(length(E1_i)))
labels8 = round(100 * n1_i/ sum(n1_i), 2)
pie(n1_i, labels = labels8, radius = 1.4, main = "DISTRIBUCION POR MEJOR MATERIA 2019-II", col = rainbow(length(n1_i)))
legend("topright", areas, cex = 0.7, fill = rainbow(length(n1_i)))
|
# Test case 87
Input <- matrix(c(1,2,4,
1,2,4,
1,2,4), byrow = TRUE, nrow = 3);
Output <- matrix(c(3,4,
3,4,
3,4), byrow = TRUE, nrow = 3);
Link <- matrix(c(2,
2,
2), byrow = TRUE, nrow = 3);
K = 2; # 3 divisions
N = 3; # Amount of DMUs
sum_m = 3; # Amount of inputs
sum_r = 2; # Amount of outputs
sum_l = 1; # Amount of Link variables
# Distinguish the Amount vector:
Amount = matrix(c(1,1,2,1,1), byrow=TRUE, nrow=1);
Amount_Input = c(1,2);
Amount_Output = c(1,1);
Amount_Link = c(1);
weights = matrix(c(0.5,0.5), byrow=TRUE, nrow=1);
direction = "non";
link_con = 1; # fix
return_to_scale = "CRS" ;
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 87",{
# Slack_transformation:
weightsNSBM <- matrix(c( 1,2,2,2,2,2,2,3,3,4,4,4,
1,2,2,2,2,2,2,3,3,4,4,4,
1,2,2,2,2,2,2,3,3,4,4,4), byrow = TRUE, nrow = 3);
t <- matrix(c(1,
1,
1), byrow = TRUE, nrow = 3);
lambda <- matrix(c( 2,2,2,2,2,2,
2,2,2,2,2,2,
2,2,2,2,2,2), byrow = TRUE, nrow = 3);
slack_plus <- matrix(c( 3,3,
3,3,
3,3), byrow = TRUE, nrow = 3);
slack_minus <- matrix(c( 4,4,4,
4,4,4,
4,4,4), byrow = TRUE, nrow = 3);
# nsbm_division
DivEffNSBM <- matrix(c( -1.5,-2/7,
-1.5,-2/7,
-1.5,-2/7), byrow = TRUE, nrow = 3);
# projection_frontier
Input_proj <- matrix(c( -3,-2,0,
-3,-2,0,
-3,-2,0), byrow = TRUE, nrow = 3);
Output_proj <- matrix(c( 6,7,
6,7,
6,7), byrow = TRUE, nrow = 3);
Link_proj <- Link;
#########################################
#########################################
#########################################
# slacks_transformation:
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$t, t, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_plus, slack_plus, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_minus, slack_minus, check.attributes = FALSE)
# nsbm.division
expect_equal(nsbm.division(direction, slack_plus, slack_minus, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, Link_obj), DivEffNSBM, check.attributes = FALSE)
# projection.frontier:
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Input_Proj,5), Input_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Output_Proj,3), Output_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Link_Proj,4), Link_proj, check.attributes = FALSE)
})
|
/2_nsbm_approach/Nsbm.function/tests/Test_case_87.R
|
no_license
|
thomaskrupa/thesis
|
R
| false
| false
| 3,843
|
r
|
# Test case 87
Input <- matrix(c(1,2,4,
1,2,4,
1,2,4), byrow = TRUE, nrow = 3);
Output <- matrix(c(3,4,
3,4,
3,4), byrow = TRUE, nrow = 3);
Link <- matrix(c(2,
2,
2), byrow = TRUE, nrow = 3);
K = 2; # 3 divisions
N = 3; # Amount of DMUs
sum_m = 3; # Amount of inputs
sum_r = 2; # Amount of outputs
sum_l = 1; # Amount of Link variables
# Distinguish the Amount vector:
Amount = matrix(c(1,1,2,1,1), byrow=TRUE, nrow=1);
Amount_Input = c(1,2);
Amount_Output = c(1,1);
Amount_Link = c(1);
weights = matrix(c(0.5,0.5), byrow=TRUE, nrow=1);
direction = "non";
link_con = 1; # fix
return_to_scale = "CRS" ;
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 87",{
# Slack_transformation:
weightsNSBM <- matrix(c( 1,2,2,2,2,2,2,3,3,4,4,4,
1,2,2,2,2,2,2,3,3,4,4,4,
1,2,2,2,2,2,2,3,3,4,4,4), byrow = TRUE, nrow = 3);
t <- matrix(c(1,
1,
1), byrow = TRUE, nrow = 3);
lambda <- matrix(c( 2,2,2,2,2,2,
2,2,2,2,2,2,
2,2,2,2,2,2), byrow = TRUE, nrow = 3);
slack_plus <- matrix(c( 3,3,
3,3,
3,3), byrow = TRUE, nrow = 3);
slack_minus <- matrix(c( 4,4,4,
4,4,4,
4,4,4), byrow = TRUE, nrow = 3);
# nsbm_division
DivEffNSBM <- matrix(c( -1.5,-2/7,
-1.5,-2/7,
-1.5,-2/7), byrow = TRUE, nrow = 3);
# projection_frontier
Input_proj <- matrix(c( -3,-2,0,
-3,-2,0,
-3,-2,0), byrow = TRUE, nrow = 3);
Output_proj <- matrix(c( 6,7,
6,7,
6,7), byrow = TRUE, nrow = 3);
Link_proj <- Link;
#########################################
#########################################
#########################################
# slacks_transformation:
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$t, t, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_plus, slack_plus, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_minus, slack_minus, check.attributes = FALSE)
# nsbm.division
expect_equal(nsbm.division(direction, slack_plus, slack_minus, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, Link_obj), DivEffNSBM, check.attributes = FALSE)
# projection.frontier:
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Input_Proj,5), Input_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Output_Proj,3), Output_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Link_Proj,4), Link_proj, check.attributes = FALSE)
})
|
`pnet` <-
function(MN, add=FALSE, col = gray(.7), border='black', lwd=1 )
{
###########
if(missing(add)) { add=FALSE }
if(missing(col)) { col = gray(.7) }
if(missing(lwd)) { lwd=1 }
if(missing(border)) { border='black' }
###### pnet(MN)
###### pnet(MN, col = 'brown' , border='black', lwd=.5)
if(add==FALSE)
{
plot(c(-1,1),c(-1,1), type='n', xlab='', ylab='', asp=1, axes=FALSE)
}
pcirc(col)
lines(MN$x1, MN$y1, col=col)
lines(MN$x2, MN$y2, col=col)
segments(c(-.02, 0), c(0, -0.02), c(0.02, 0), c(0, 0.02), col=1)
}
|
/R/pnet.R
|
no_license
|
cran/RFOC
|
R
| false
| false
| 595
|
r
|
`pnet` <-
function(MN, add=FALSE, col = gray(.7), border='black', lwd=1 )
{
###########
if(missing(add)) { add=FALSE }
if(missing(col)) { col = gray(.7) }
if(missing(lwd)) { lwd=1 }
if(missing(border)) { border='black' }
###### pnet(MN)
###### pnet(MN, col = 'brown' , border='black', lwd=.5)
if(add==FALSE)
{
plot(c(-1,1),c(-1,1), type='n', xlab='', ylab='', asp=1, axes=FALSE)
}
pcirc(col)
lines(MN$x1, MN$y1, col=col)
lines(MN$x2, MN$y2, col=col)
segments(c(-.02, 0), c(0, -0.02), c(0.02, 0), c(0, 0.02), col=1)
}
|
best <- function(state, outcome) {
data <- readDataAndCheckState(state)
dataCol <- getOutcomeDataColumn(outcome)
stateHospitals <- data[data$State == state,]
subdata <- data.frame(name=stateHospitals[[2]], outcome=as.numeric(stateHospitals[[dataCol]]))
bestOutcome <- min(subdata$outcome[!is.na(subdata$outcome)])
bestHospitals <- sort(as.character(subdata$name[subdata$outcome == bestOutcome]))
min(bestHospitals)
}
|
/best.R
|
no_license
|
darja/datascience_hospital_quality
|
R
| false
| false
| 454
|
r
|
best <- function(state, outcome) {
data <- readDataAndCheckState(state)
dataCol <- getOutcomeDataColumn(outcome)
stateHospitals <- data[data$State == state,]
subdata <- data.frame(name=stateHospitals[[2]], outcome=as.numeric(stateHospitals[[dataCol]]))
bestOutcome <- min(subdata$outcome[!is.na(subdata$outcome)])
bestHospitals <- sort(as.character(subdata$name[subdata$outcome == bestOutcome]))
min(bestHospitals)
}
|
\name{algdat.pfull}
\alias{algdat.pfull}
\docType{data}
\title{
All Partitions of 25 Precincts into 3 Congressional Districts (No Population Constraint)
}
\description{
This data set contains demographic and geographic information about 25 contiguous precincts in the state of Florida. The data lists all possible partitions of the 25 precincts into three contiguous congressional districts.
}
\usage{data("algdat.pfull")}
\format{
A list with five entries.
\describe{
\item{\code{adjlist}}{An adjacency list for the 25 precincts.}
\item{\code{cdmat}}{A matrix containing every partition of the 25 precincts into three contiguous congressional districts, with no population constraint.}
\item{\code{precinct.data}}{A matrix containing demographic information for each of the 25 precincts.}
\item{\code{segregation.index}}{A matrix containing the dissimilarity index of segregation (Massey and Denton 1987) for each congressional district map in \code{cdmat}.}
\item{\code{distancemat}}{A square matrix containing the squared distance between the centroids of any two precincts.}
}
}
\references{
Fifield, Benjamin, Michael Higgins, Kosuke Imai and Alexander Tarr. (2015) "A New Automated Redistricting Simulator Using Markov Chain Monte Carlo." Working
Paper. Available at \url{http://imai.princeton.edu/research/files/redist.pdf}.
Massey, Douglas and Nancy Denton. (1987) "The Dimensions of Social Segregation". Social Forces.
}
\examples{
\dontrun{
data(algdat.pfull)
}}
\keyword{datasets}
|
/man/algdat.pfull.Rd
|
no_license
|
HJ08003/redist
|
R
| false
| false
| 1,532
|
rd
|
\name{algdat.pfull}
\alias{algdat.pfull}
\docType{data}
\title{
All Partitions of 25 Precincts into 3 Congressional Districts (No Population Constraint)
}
\description{
This data set contains demographic and geographic information about 25 contiguous precincts in the state of Florida. The data lists all possible partitions of the 25 precincts into three contiguous congressional districts.
}
\usage{data("algdat.pfull")}
\format{
A list with five entries.
\describe{
\item{\code{adjlist}}{An adjacency list for the 25 precincts.}
\item{\code{cdmat}}{A matrix containing every partition of the 25 precincts into three contiguous congressional districts, with no population constraint.}
\item{\code{precinct.data}}{A matrix containing demographic information for each of the 25 precincts.}
\item{\code{segregation.index}}{A matrix containing the dissimilarity index of segregation (Massey and Denton 1987) for each congressional district map in \code{cdmat}.}
\item{\code{distancemat}}{A square matrix containing the squared distance between the centroids of any two precincts.}
}
}
\references{
Fifield, Benjamin, Michael Higgins, Kosuke Imai and Alexander Tarr. (2015) "A New Automated Redistricting Simulator Using Markov Chain Monte Carlo." Working
Paper. Available at \url{http://imai.princeton.edu/research/files/redist.pdf}.
Massey, Douglas and Nancy Denton. (1987) "The Dimensions of Social Segregation". Social Forces.
}
\examples{
\dontrun{
data(algdat.pfull)
}}
\keyword{datasets}
|
\name{math}
\alias{math}
\alias{Ops,vector,timeSeries-method}
\alias{Ops,array,timeSeries-method}
\alias{Ops,ts,timeSeries-method}
\alias{Ops,timeSeries,vector-method}
\alias{Ops,timeSeries,array-method}
\alias{Ops,timeSeries,ts-method}
\alias{Ops,timeSeries,timeSeries-method}
\alias{-,timeSeries,missing-method}
\alias{+,timeSeries,missing-method}
\alias{cummax,timeSeries-method}
\alias{cummin,timeSeries-method}
\alias{cumprod,timeSeries-method}
\alias{cumsum,timeSeries-method}
\alias{Math,timeSeries-method}
\alias{Math2,timeSeries-method}
\alias{Summary,timeSeries-method}
\alias{trunc,timeSeries-method}
\alias{log,timeSeries-method}
\alias{\%*\%,timeSeries,vector-method}
\alias{\%*\%,timeSeries,ANY-method}
\alias{\%*\%,ANY,timeSeries-method}
%\alias{diff,timeSeries-method}
%\alias{scale,timeSeries-method}
\alias{quantile,timeSeries-method}
\alias{diff.timeSeries}
%\alias{scale.timeSeries}
\alias{quantile.timeSeries}
\title{Mathematical Time Series Operations}
\description{
Functions and methods dealing with mathematical 'timeSeries'
operations.
}
\details{
The math functions include:\cr
\tabular{ll}{
\code{Ops-method} \tab Group 'Ops' methods for a 'timeSeries' object \cr
\code{Math-method} \tab Group 'Math' methods for a 'timeSeries' object \cr
\code{Math2-method} \tab Group 'Math2' methods for a 'timeSeries' object \cr
\code{Summary-method} \tab Group 'Summary' methods for a 'timeSeries' object \cr
%\code{diff} \tab Differences a 'timeSeries' object, \cr
%\code{scale} \tab Centers and/or scales a 'timeSeries' object, \cr
\code{quantile} \tab Returns quantiles of an univariate 'timeSeries'. }
}
\usage{
% \S4method{Ops}{timeSeries}(e1, e2)
% \S4method{Math}{timeSeries}(x, ...)
% \S4method{Math2}{timeSeries}(x, digits)
% \S4method{Summary}{timeSeries}(x, ..., na.rm = FALSE)
% \S4method{diff}{timeSeries}(x, lag = 1, diff = 1, trim = FALSE, pad = NA, \dots)
% \S4method{scale}{timeSeries}(x, center = TRUE, scale = TRUE)
\S4method{quantile}{timeSeries}(x, \dots)
}
\arguments{
% \item{center, scale}{
% [scale] - \cr
% either a logical value or a numeric vector of length equal to
% the number of columns of \code{x}.
% }
% \item{diff}{
% an integer indicating the order of the difference. By default 1.
% }
% \item{digits} {
% number of digits to be used in 'round' or 'signif'.
% }
% \item{e1, e2}{
% [Ops] - \cr
% two objects of class \code{timeSeries}.
% }
% \item{lag}{
% an integer indicating which lag to use. By default 1.
% }
% \item{na.rm}{
% logical: should missing values be removed?
% }
% \item{pad}{
% [diffSeries] - \cr
% which value should get the padded values? By default \code{NA}.
% Another choice often used would be zero.
% }
% \item{trim}{
% a logical value. By default \code{TRUE}, the first missing
% observation in the return series will be removed.
% }
\item{x}{
an object of class \code{timeSeries}.
}
\item{\dots}{
arguments to be passed.
}
}
\value{
Returns the value from a mathematical or logical operation
operating on objects of class 'timeSeries[], or the value
computed by a mathematical function.
}
\examples{
## Create an Artificial timeSeries Object -
setRmetricsOptions(myFinCenter = "GMT")
charvec = timeCalendar()
set.seed(4711)
data = matrix(exp(cumsum(rnorm(12, sd = 0.1))))
TS = timeSeries(data, charvec, units = "TS")
TS
## Mathematical Operations: | +/- * ^ ... -
TS^2
TS[2:4]
OR = returns(TS)
OR
OR > 0
}
\keyword{chron}
\keyword{methods}
|
/pkg/timeSeries/man/methods-mathOps.Rd
|
no_license
|
xashely/rmetrics
|
R
| false
| false
| 3,657
|
rd
|
\name{math}
\alias{math}
\alias{Ops,vector,timeSeries-method}
\alias{Ops,array,timeSeries-method}
\alias{Ops,ts,timeSeries-method}
\alias{Ops,timeSeries,vector-method}
\alias{Ops,timeSeries,array-method}
\alias{Ops,timeSeries,ts-method}
\alias{Ops,timeSeries,timeSeries-method}
\alias{-,timeSeries,missing-method}
\alias{+,timeSeries,missing-method}
\alias{cummax,timeSeries-method}
\alias{cummin,timeSeries-method}
\alias{cumprod,timeSeries-method}
\alias{cumsum,timeSeries-method}
\alias{Math,timeSeries-method}
\alias{Math2,timeSeries-method}
\alias{Summary,timeSeries-method}
\alias{trunc,timeSeries-method}
\alias{log,timeSeries-method}
\alias{\%*\%,timeSeries,vector-method}
\alias{\%*\%,timeSeries,ANY-method}
\alias{\%*\%,ANY,timeSeries-method}
%\alias{diff,timeSeries-method}
%\alias{scale,timeSeries-method}
\alias{quantile,timeSeries-method}
\alias{diff.timeSeries}
%\alias{scale.timeSeries}
\alias{quantile.timeSeries}
\title{Mathematical Time Series Operations}
\description{
Functions and methods dealing with mathematical 'timeSeries'
operations.
}
\details{
The math functions include:\cr
\tabular{ll}{
\code{Ops-method} \tab Group 'Ops' methods for a 'timeSeries' object \cr
\code{Math-method} \tab Group 'Math' methods for a 'timeSeries' object \cr
\code{Math2-method} \tab Group 'Math2' methods for a 'timeSeries' object \cr
\code{Summary-method} \tab Group 'Summary' methods for a 'timeSeries' object \cr
%\code{diff} \tab Differences a 'timeSeries' object, \cr
%\code{scale} \tab Centers and/or scales a 'timeSeries' object, \cr
\code{quantile} \tab Returns quantiles of an univariate 'timeSeries'. }
}
\usage{
% \S4method{Ops}{timeSeries}(e1, e2)
% \S4method{Math}{timeSeries}(x, ...)
% \S4method{Math2}{timeSeries}(x, digits)
% \S4method{Summary}{timeSeries}(x, ..., na.rm = FALSE)
% \S4method{diff}{timeSeries}(x, lag = 1, diff = 1, trim = FALSE, pad = NA, \dots)
% \S4method{scale}{timeSeries}(x, center = TRUE, scale = TRUE)
\S4method{quantile}{timeSeries}(x, \dots)
}
\arguments{
% \item{center, scale}{
% [scale] - \cr
% either a logical value or a numeric vector of length equal to
% the number of columns of \code{x}.
% }
% \item{diff}{
% an integer indicating the order of the difference. By default 1.
% }
% \item{digits} {
% number of digits to be used in 'round' or 'signif'.
% }
% \item{e1, e2}{
% [Ops] - \cr
% two objects of class \code{timeSeries}.
% }
% \item{lag}{
% an integer indicating which lag to use. By default 1.
% }
% \item{na.rm}{
% logical: should missing values be removed?
% }
% \item{pad}{
% [diffSeries] - \cr
% which value should get the padded values? By default \code{NA}.
% Another choice often used would be zero.
% }
% \item{trim}{
% a logical value. By default \code{TRUE}, the first missing
% observation in the return series will be removed.
% }
\item{x}{
an object of class \code{timeSeries}.
}
\item{\dots}{
arguments to be passed.
}
}
\value{
Returns the value from a mathematical or logical operation
operating on objects of class 'timeSeries[], or the value
computed by a mathematical function.
}
\examples{
## Create an Artificial timeSeries Object -
setRmetricsOptions(myFinCenter = "GMT")
charvec = timeCalendar()
set.seed(4711)
data = matrix(exp(cumsum(rnorm(12, sd = 0.1))))
TS = timeSeries(data, charvec, units = "TS")
TS
## Mathematical Operations: | +/- * ^ ... -
TS^2
TS[2:4]
OR = returns(TS)
OR
OR > 0
}
\keyword{chron}
\keyword{methods}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/translate-qplot-lattice.r
\name{translate_qplot_lattice}
\alias{translate_qplot_lattice}
\title{Translating between qplot and lattice}
\description{
The major difference between lattice and ggplot2 is that lattice uses a formula based
interface. ggplot2 does not because the formula does not generalise well
to more complicated situations.
}
\examples{
\dontrun{
library(lattice)
xyplot(rating ~ year, data=movies)
qplot(year, rating, data=movies)
xyplot(rating ~ year | Comedy + Action, data = movies)
qplot(year, rating, data = movies, facets = ~ Comedy + Action)
# Or maybe
qplot(year, rating, data = movies, facets = Comedy ~ Action)
# While lattice has many different functions to produce different types of
# graphics (which are all basically equivalent to setting the panel argument),
# ggplot2 has qplot().
stripplot(~ rating, data = movies, jitter.data = TRUE)
qplot(rating, 1, data = movies, geom = "jitter")
histogram(~ rating, data = movies)
qplot(rating, data = movies, geom = "histogram")
bwplot(Comedy ~ rating ,data = movies)
qplot(factor(Comedy), rating, data = movies, type = "boxplot")
xyplot(wt ~ mpg, mtcars, type = c("p","smooth"))
qplot(mpg, wt, data = mtcars, geom = c("point","smooth"))
xyplot(wt ~ mpg, mtcars, type = c("p","r"))
qplot(mpg, wt, data = mtcars, geom = c("point","smooth"), method = "lm")
# The capabilities for scale manipulations are similar in both ggplot2 and
# lattice, although the syntax is a little different.
xyplot(wt ~ mpg | cyl, mtcars, scales = list(y = list(relation = "free")))
qplot(mpg, wt, data = mtcars) + facet_wrap(~ cyl, scales = "free")
xyplot(wt ~ mpg | cyl, mtcars, scales = list(log = 10))
qplot(mpg, wt, data = mtcars, log = "xy")
xyplot(wt ~ mpg | cyl, mtcars, scales = list(log = 2))
library(scales) # Load scales for log2_trans
qplot(mpg, wt, data = mtcars) + scale_x_continuous(trans = log2_trans()) +
scale_y_continuous(trans = log2_trans())
xyplot(wt ~ mpg, mtcars, group = cyl, auto.key = TRUE)
# Map directly to an aesthetic like colour, size, or shape.
qplot(mpg, wt, data = mtcars, colour = cyl)
xyplot(wt ~ mpg, mtcars, xlim = c(20,30))
# Works like lattice, except you can't specify a different limit
# for each panel/facet
qplot(mpg, wt, data = mtcars, xlim = c(20,30))
# Both lattice and ggplot2 have similar options for controlling labels on the plot.
xyplot(wt ~ mpg, mtcars, xlab = "Miles per gallon", ylab = "Weight",
main = "Weight-efficiency tradeoff")
qplot(mpg, wt, data = mtcars, xlab = "Miles per gallon", ylab = "Weight",
main = "Weight-efficiency tradeoff")
xyplot(wt ~ mpg, mtcars, aspect = 1)
qplot(mpg, wt, data = mtcars, asp = 1)
# par.settings() is equivalent to + theme() and trellis.options.set()
# and trellis.par.get() to theme_set() and theme_get().
# More complicated lattice formulas are equivalent to rearranging the data
# before using ggplot2.
}
}
|
/man/translate_qplot_lattice.Rd
|
no_license
|
mteremko84/ggplot2
|
R
| false
| false
| 2,968
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/translate-qplot-lattice.r
\name{translate_qplot_lattice}
\alias{translate_qplot_lattice}
\title{Translating between qplot and lattice}
\description{
The major difference between lattice and ggplot2 is that lattice uses a formula based
interface. ggplot2 does not because the formula does not generalise well
to more complicated situations.
}
\examples{
\dontrun{
library(lattice)
xyplot(rating ~ year, data=movies)
qplot(year, rating, data=movies)
xyplot(rating ~ year | Comedy + Action, data = movies)
qplot(year, rating, data = movies, facets = ~ Comedy + Action)
# Or maybe
qplot(year, rating, data = movies, facets = Comedy ~ Action)
# While lattice has many different functions to produce different types of
# graphics (which are all basically equivalent to setting the panel argument),
# ggplot2 has qplot().
stripplot(~ rating, data = movies, jitter.data = TRUE)
qplot(rating, 1, data = movies, geom = "jitter")
histogram(~ rating, data = movies)
qplot(rating, data = movies, geom = "histogram")
bwplot(Comedy ~ rating ,data = movies)
qplot(factor(Comedy), rating, data = movies, type = "boxplot")
xyplot(wt ~ mpg, mtcars, type = c("p","smooth"))
qplot(mpg, wt, data = mtcars, geom = c("point","smooth"))
xyplot(wt ~ mpg, mtcars, type = c("p","r"))
qplot(mpg, wt, data = mtcars, geom = c("point","smooth"), method = "lm")
# The capabilities for scale manipulations are similar in both ggplot2 and
# lattice, although the syntax is a little different.
xyplot(wt ~ mpg | cyl, mtcars, scales = list(y = list(relation = "free")))
qplot(mpg, wt, data = mtcars) + facet_wrap(~ cyl, scales = "free")
xyplot(wt ~ mpg | cyl, mtcars, scales = list(log = 10))
qplot(mpg, wt, data = mtcars, log = "xy")
xyplot(wt ~ mpg | cyl, mtcars, scales = list(log = 2))
library(scales) # Load scales for log2_trans
qplot(mpg, wt, data = mtcars) + scale_x_continuous(trans = log2_trans()) +
scale_y_continuous(trans = log2_trans())
xyplot(wt ~ mpg, mtcars, group = cyl, auto.key = TRUE)
# Map directly to an aesthetic like colour, size, or shape.
qplot(mpg, wt, data = mtcars, colour = cyl)
xyplot(wt ~ mpg, mtcars, xlim = c(20,30))
# Works like lattice, except you can't specify a different limit
# for each panel/facet
qplot(mpg, wt, data = mtcars, xlim = c(20,30))
# Both lattice and ggplot2 have similar options for controlling labels on the plot.
xyplot(wt ~ mpg, mtcars, xlab = "Miles per gallon", ylab = "Weight",
main = "Weight-efficiency tradeoff")
qplot(mpg, wt, data = mtcars, xlab = "Miles per gallon", ylab = "Weight",
main = "Weight-efficiency tradeoff")
xyplot(wt ~ mpg, mtcars, aspect = 1)
qplot(mpg, wt, data = mtcars, asp = 1)
# par.settings() is equivalent to + theme() and trellis.options.set()
# and trellis.par.get() to theme_set() and theme_get().
# More complicated lattice formulas are equivalent to rearranging the data
# before using ggplot2.
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render.R
\name{moon_reader}
\alias{moon_reader}
\alias{tsukuyomi}
\title{An R Markdown output format for remark.js slides}
\usage{
moon_reader(
css = c("default", "default-fonts"),
self_contained = FALSE,
seal = TRUE,
yolo = FALSE,
chakra = "https://remarkjs.com/downloads/remark-latest.min.js",
nature = list(),
anchor_sections = FALSE,
...
)
tsukuyomi(...)
}
\arguments{
\item{css}{A vector of CSS file paths. Two default CSS files
(\file{default.css} and \file{default-fonts.css}) are provided in this
package, which was borrowed from \url{https://remarkjs.com}. If the
character vector \code{css} contains a value that does not end with
\code{.css}, it is supposed to be a built-in CSS file in this package,
e.g., for \code{css = c('default', 'extra.css')}), it means
\code{default.css} in this package and a user-provided \code{extra.css}. To
find out all built-in CSS files, use \code{xaringan:::list_css()}.}
\item{self_contained}{Whether to produce a self-contained HTML file.}
\item{seal}{Whether to generate a title slide automatically using the YAML
metadata of the R Markdown document (if \code{FALSE}, you should write the
title slide by yourself).}
\item{yolo}{Whether to insert the
\href{https://kbroman.wordpress.com/2014/08/28/the-mustache-photo/}{Mustache
Karl (TM)} randomly in the slides. \code{TRUE} means insert his picture on
one slide, and if you want him to be on multiple slides, set \code{yolo} to
a positive integer or a percentage (e.g. 0.3 means 30\% of your slides will
be the Mustache Karl). Alternatively, \code{yolo} can also be a list of the
form \code{list(times = n, img = path)}: \code{n} is the number of times to
show an image, and \code{path} is the path to an image (by default, it is
Karl).}
\item{chakra}{A path to the remark.js library (can be either local or
remote).}
\item{nature}{(Nature transformation) A list of configurations to be passed
to \code{remark.create()}, e.g. \code{list(ratio = '16:9', navigation =
list(click = TRUE))}; see
\url{https://github.com/gnab/remark/wiki/Configuration}. Besides the
options provided by remark.js, you can also set \code{autoplay} to a number
(the number of milliseconds) so the slides will be played every
\code{autoplay} milliseconds. You can also set \code{countdown} to a number
(the number of milliseconds) to include a countdown timer on each slide. If
using \code{autoplay}, you can optionally set \code{countdown} to
\code{TRUE} to include a countdown equal to \code{autoplay}. To alter the
set of classes applied to the title slide, you can optionally set
\code{titleSlideClass} to a vector of classes; the default is
\code{c("center", "middle", "inverse")}.}
\item{...}{For \code{tsukuyomi()}, arguments passed to \code{moon_reader()};
for \code{moon_reader()}, arguments passed to
\code{rmarkdown::\link{html_document}()}.}
}
\description{
This output format produces an HTML file that contains the Markdown source
(knitted from R Markdown) and JavaScript code to render slides.
\code{tsukuyomi()} is an alias of \code{moon_reader()}.
}
\details{
Tsukuyomi is a genjutsu to trap the target in an illusion on eye contact.
If you are unfamiliar with CSS, please see the
\href{https://github.com/yihui/xaringan/wiki}{xaringan wiki on Github}
providing CSS slide modification examples.
}
\note{
Do not stare at Karl's picture for too long after you turn on the
\code{yolo} mode. I believe he has Sharingan.
Local images that you inserted via the Markdown syntax
\command{} will not be embedded into the HTML file when
\code{self_contained = TRUE} (only CSS, JavaScript, and R plot files will
be embedded). You may also download remark.js (via
\code{\link{summon_remark}()}) and use a local copy instead of the default
\code{chakra} argument when \code{self_contained = TRUE}, because it may be
time-consuming for Pandoc to download remark.js each time you compile your
slides.
Each page has its own countdown timer (when the option \code{countdown} is
set in \code{nature}), and the timer is (re)initialized whenever you
navigate to a new page. If you need a global timer, you can use the
presenter's mode (press \kbd{P}).
}
\examples{
# rmarkdown::render('foo.Rmd', 'SlidesDatactivist::moon_reader')
}
\references{
\url{http://naruto.wikia.com/wiki/Tsukuyomi}
}
|
/man/moon_reader.Rd
|
permissive
|
SylvainLapoix/slides_datactivist
|
R
| false
| true
| 4,410
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render.R
\name{moon_reader}
\alias{moon_reader}
\alias{tsukuyomi}
\title{An R Markdown output format for remark.js slides}
\usage{
moon_reader(
css = c("default", "default-fonts"),
self_contained = FALSE,
seal = TRUE,
yolo = FALSE,
chakra = "https://remarkjs.com/downloads/remark-latest.min.js",
nature = list(),
anchor_sections = FALSE,
...
)
tsukuyomi(...)
}
\arguments{
\item{css}{A vector of CSS file paths. Two default CSS files
(\file{default.css} and \file{default-fonts.css}) are provided in this
package, which was borrowed from \url{https://remarkjs.com}. If the
character vector \code{css} contains a value that does not end with
\code{.css}, it is supposed to be a built-in CSS file in this package,
e.g., for \code{css = c('default', 'extra.css')}), it means
\code{default.css} in this package and a user-provided \code{extra.css}. To
find out all built-in CSS files, use \code{xaringan:::list_css()}.}
\item{self_contained}{Whether to produce a self-contained HTML file.}
\item{seal}{Whether to generate a title slide automatically using the YAML
metadata of the R Markdown document (if \code{FALSE}, you should write the
title slide by yourself).}
\item{yolo}{Whether to insert the
\href{https://kbroman.wordpress.com/2014/08/28/the-mustache-photo/}{Mustache
Karl (TM)} randomly in the slides. \code{TRUE} means insert his picture on
one slide, and if you want him to be on multiple slides, set \code{yolo} to
a positive integer or a percentage (e.g. 0.3 means 30\% of your slides will
be the Mustache Karl). Alternatively, \code{yolo} can also be a list of the
form \code{list(times = n, img = path)}: \code{n} is the number of times to
show an image, and \code{path} is the path to an image (by default, it is
Karl).}
\item{chakra}{A path to the remark.js library (can be either local or
remote).}
\item{nature}{(Nature transformation) A list of configurations to be passed
to \code{remark.create()}, e.g. \code{list(ratio = '16:9', navigation =
list(click = TRUE))}; see
\url{https://github.com/gnab/remark/wiki/Configuration}. Besides the
options provided by remark.js, you can also set \code{autoplay} to a number
(the number of milliseconds) so the slides will be played every
\code{autoplay} milliseconds. You can also set \code{countdown} to a number
(the number of milliseconds) to include a countdown timer on each slide. If
using \code{autoplay}, you can optionally set \code{countdown} to
\code{TRUE} to include a countdown equal to \code{autoplay}. To alter the
set of classes applied to the title slide, you can optionally set
\code{titleSlideClass} to a vector of classes; the default is
\code{c("center", "middle", "inverse")}.}
\item{...}{For \code{tsukuyomi()}, arguments passed to \code{moon_reader()};
for \code{moon_reader()}, arguments passed to
\code{rmarkdown::\link{html_document}()}.}
}
\description{
This output format produces an HTML file that contains the Markdown source
(knitted from R Markdown) and JavaScript code to render slides.
\code{tsukuyomi()} is an alias of \code{moon_reader()}.
}
\details{
Tsukuyomi is a genjutsu to trap the target in an illusion on eye contact.
If you are unfamiliar with CSS, please see the
\href{https://github.com/yihui/xaringan/wiki}{xaringan wiki on Github}
providing CSS slide modification examples.
}
\note{
Do not stare at Karl's picture for too long after you turn on the
\code{yolo} mode. I believe he has Sharingan.
Local images that you inserted via the Markdown syntax
\command{} will not be embedded into the HTML file when
\code{self_contained = TRUE} (only CSS, JavaScript, and R plot files will
be embedded). You may also download remark.js (via
\code{\link{summon_remark}()}) and use a local copy instead of the default
\code{chakra} argument when \code{self_contained = TRUE}, because it may be
time-consuming for Pandoc to download remark.js each time you compile your
slides.
Each page has its own countdown timer (when the option \code{countdown} is
set in \code{nature}), and the timer is (re)initialized whenever you
navigate to a new page. If you need a global timer, you can use the
presenter's mode (press \kbd{P}).
}
\examples{
# rmarkdown::render('foo.Rmd', 'SlidesDatactivist::moon_reader')
}
\references{
\url{http://naruto.wikia.com/wiki/Tsukuyomi}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival_reg_flexsurv.R
\name{details_survival_reg_flexsurv}
\alias{details_survival_reg_flexsurv}
\title{Parametric survival regression}
\description{
\code{\link[flexsurv:flexsurvreg]{flexsurv::flexsurvreg()}} fits a parametric survival model.
}
\details{
For this engine, there is a single mode: censored regression
\subsection{Tuning Parameters}{
This model has 1 tuning parameters:
\itemize{
\item \code{dist}: Distribution (type: character, default: ‘weibull’)
}
}
\subsection{Translation from parsnip to the original package}{
The \strong{censored} extension package is required to fit this model.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(censored)
survival_reg(dist = character(1)) \%>\%
set_engine("flexsurv") \%>\%
set_mode("censored regression") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Parametric Survival Regression Model Specification (censored regression)
##
## Main Arguments:
## dist = character(1)
##
## Computational engine: flexsurv
##
## Model fit template:
## flexsurv::flexsurvreg(formula = missing_arg(), data = missing_arg(),
## weights = missing_arg(), dist = character(1))
}\if{html}{\out{</div>}}
}
\subsection{Other details}{
The main interface for this model uses the formula method since the
model specification typically involved the use of
\code{\link[survival:Surv]{survival::Surv()}}.
For this engine, stratification cannot be specified via
\code{\link[=strata]{strata()}}, please see
\code{\link[flexsurv:flexsurvreg]{flexsurv::flexsurvreg()}} for alternative
specifications.
Predictions of type \code{"time"} are predictions of the mean survival time.
}
\subsection{Case weights}{
This model can utilize case weights during model fitting. To use them,
see the documentation in \link{case_weights} and the examples
on \code{tidymodels.org}.
The \code{fit()} and \code{fit_xy()} arguments have arguments called
\code{case_weights} that expect vectors of case weights.
}
\subsection{Saving fitted model objects}{
This model object contains data that are not required to make
predictions. When saving the model for the purpose of prediction, the
size of the saved object might be substantially reduced by using
functions from the \href{https://butcher.tidymodels.org}{butcher} package.
}
\subsection{References}{
\itemize{
\item Jackson, C. 2016. \code{flexsurv}: A Platform for Parametric Survival
Modeling in R. \emph{Journal of Statistical Software}, 70(8), 1 - 33.
}
}
}
\keyword{internal}
|
/man/details_survival_reg_flexsurv.Rd
|
permissive
|
tidymodels/parsnip
|
R
| false
| true
| 2,629
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survival_reg_flexsurv.R
\name{details_survival_reg_flexsurv}
\alias{details_survival_reg_flexsurv}
\title{Parametric survival regression}
\description{
\code{\link[flexsurv:flexsurvreg]{flexsurv::flexsurvreg()}} fits a parametric survival model.
}
\details{
For this engine, there is a single mode: censored regression
\subsection{Tuning Parameters}{
This model has 1 tuning parameters:
\itemize{
\item \code{dist}: Distribution (type: character, default: ‘weibull’)
}
}
\subsection{Translation from parsnip to the original package}{
The \strong{censored} extension package is required to fit this model.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{library(censored)
survival_reg(dist = character(1)) \%>\%
set_engine("flexsurv") \%>\%
set_mode("censored regression") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Parametric Survival Regression Model Specification (censored regression)
##
## Main Arguments:
## dist = character(1)
##
## Computational engine: flexsurv
##
## Model fit template:
## flexsurv::flexsurvreg(formula = missing_arg(), data = missing_arg(),
## weights = missing_arg(), dist = character(1))
}\if{html}{\out{</div>}}
}
\subsection{Other details}{
The main interface for this model uses the formula method since the
model specification typically involved the use of
\code{\link[survival:Surv]{survival::Surv()}}.
For this engine, stratification cannot be specified via
\code{\link[=strata]{strata()}}, please see
\code{\link[flexsurv:flexsurvreg]{flexsurv::flexsurvreg()}} for alternative
specifications.
Predictions of type \code{"time"} are predictions of the mean survival time.
}
\subsection{Case weights}{
This model can utilize case weights during model fitting. To use them,
see the documentation in \link{case_weights} and the examples
on \code{tidymodels.org}.
The \code{fit()} and \code{fit_xy()} arguments have arguments called
\code{case_weights} that expect vectors of case weights.
}
\subsection{Saving fitted model objects}{
This model object contains data that are not required to make
predictions. When saving the model for the purpose of prediction, the
size of the saved object might be substantially reduced by using
functions from the \href{https://butcher.tidymodels.org}{butcher} package.
}
\subsection{References}{
\itemize{
\item Jackson, C. 2016. \code{flexsurv}: A Platform for Parametric Survival
Modeling in R. \emph{Journal of Statistical Software}, 70(8), 1 - 33.
}
}
}
\keyword{internal}
|
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
rthtyhyhy/ProgrammingAssignment2
|
R
| false
| false
| 524
|
r
|
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195748770757e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833964-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 2,048
|
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195748770757e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
\name{rglSetMouseCbTrackball}
\alias{rglSetMouseCbTrackball}
\title{Link the current device with others to share mouse control using the 'trackball' mode.}
\description{'rglSetMouseCbTrackball' sets mouse "trackball" callback for given
\code{button} on selected device(s), such that interaction made using the
given \code{button} affects all the devices (and all their subscenes, by
default).
The code is mostly based on the 'mouseCallbacks' demo from the
'rgl' package.}
\usage{rglSetMouseCbTrackball(dev = rgl.cur(),
button = 1, affectSubscenes = TRUE)}
\arguments{
\item{dev}{device(s) to set callback for}
\item{button}{\code{button} to set callback for}
\item{affectSubscenes}{if TRUE, interaction affects all subscenes
in a device, not only the current one}
}
\author{Tomas Sieger}
\examples{
if (interactive() && require(rgl)) {
dev1 <- open3d()
shade3d(cube3d(color = rep(rainbow(6), rep(4, 6))))
dev2 <- open3d()
mfrow3d(1, 2, sharedMouse = TRUE)
shade3d(cube3d(color = rep(rainbow(6), rep(4, 6))))
next3d()
shade3d(cube3d(color = rep(rainbow(6), rep(4, 6))))
rglSetMouseCbTrackball(c(dev1, dev2))
}
}
|
/man/rglSetMouseCbTrackball.Rd
|
no_license
|
tsieger/tsiMisc
|
R
| false
| false
| 1,146
|
rd
|
\name{rglSetMouseCbTrackball}
\alias{rglSetMouseCbTrackball}
\title{Link the current device with others to share mouse control using the 'trackball' mode.}
\description{'rglSetMouseCbTrackball' sets mouse "trackball" callback for given
\code{button} on selected device(s), such that interaction made using the
given \code{button} affects all the devices (and all their subscenes, by
default).
The code is mostly based on the 'mouseCallbacks' demo from the
'rgl' package.}
\usage{rglSetMouseCbTrackball(dev = rgl.cur(),
button = 1, affectSubscenes = TRUE)}
\arguments{
\item{dev}{device(s) to set callback for}
\item{button}{\code{button} to set callback for}
\item{affectSubscenes}{if TRUE, interaction affects all subscenes
in a device, not only the current one}
}
\author{Tomas Sieger}
\examples{
if (interactive() && require(rgl)) {
dev1 <- open3d()
shade3d(cube3d(color = rep(rainbow(6), rep(4, 6))))
dev2 <- open3d()
mfrow3d(1, 2, sharedMouse = TRUE)
shade3d(cube3d(color = rep(rainbow(6), rep(4, 6))))
next3d()
shade3d(cube3d(color = rep(rainbow(6), rep(4, 6))))
rglSetMouseCbTrackball(c(dev1, dev2))
}
}
|
### R code from vignette source 'RcppGSL-unitTests.Rnw'
###################################################
### code chunk number 1: RcppGSL-unitTests.Rnw:12-16
###################################################
require(RcppGSL)
prettyVersion <- packageDescription("RcppGSL")$Version
prettyDate <- format(Sys.Date(), "%B %e, %Y")
library(RUnit)
###################################################
### code chunk number 2: unitTesting
###################################################
pkg <- "RcppGSL"
if (file.exists("unitTests-results")) unlink("unitTests-results", recursive = TRUE)
dir.create("unitTests-results")
path <- system.file("unitTests", package=pkg)
testSuite <- defineTestSuite(name=paste(pkg, "unit testing"), dirs=path)
tests <- runTestSuite(testSuite)
err <- getErrors(tests)
if (err$nFail > 0) stop(sprintf("unit test problems: %d failures", err$nFail))
if (err$nErr > 0) stop( sprintf("unit test problems: %d errors", err$nErr))
printHTMLProtocol(tests, fileName= sprintf("unitTests-results/%s-unitTests.html", pkg))
printTextProtocol(tests, fileName= sprintf("unitTests-results/%s-unitTests.txt" , pkg))
#if (file.exists("/tmp")) {
# invisible(sapply(c("txt", "html"), function(ext) {
# fname <- sprintf("unitTests-results/%s-unitTests.%s", pkg, ext)
# file.copy(fname, "/tmp", overwrite=TRUE)
# }))
#}
###################################################
### code chunk number 3: importResults
###################################################
results <- "unitTests-results/RcppGSL-unitTests.txt"
if (file.exists(results)) {
writeLines(readLines(results))
} else{
writeLines( "unit test results not available" )
}
|
/data/genthat_extracted_code/RcppGSL/vignettes/RcppGSL-unitTests.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,675
|
r
|
### R code from vignette source 'RcppGSL-unitTests.Rnw'
###################################################
### code chunk number 1: RcppGSL-unitTests.Rnw:12-16
###################################################
require(RcppGSL)
prettyVersion <- packageDescription("RcppGSL")$Version
prettyDate <- format(Sys.Date(), "%B %e, %Y")
library(RUnit)
###################################################
### code chunk number 2: unitTesting
###################################################
pkg <- "RcppGSL"
if (file.exists("unitTests-results")) unlink("unitTests-results", recursive = TRUE)
dir.create("unitTests-results")
path <- system.file("unitTests", package=pkg)
testSuite <- defineTestSuite(name=paste(pkg, "unit testing"), dirs=path)
tests <- runTestSuite(testSuite)
err <- getErrors(tests)
if (err$nFail > 0) stop(sprintf("unit test problems: %d failures", err$nFail))
if (err$nErr > 0) stop( sprintf("unit test problems: %d errors", err$nErr))
printHTMLProtocol(tests, fileName= sprintf("unitTests-results/%s-unitTests.html", pkg))
printTextProtocol(tests, fileName= sprintf("unitTests-results/%s-unitTests.txt" , pkg))
#if (file.exists("/tmp")) {
# invisible(sapply(c("txt", "html"), function(ext) {
# fname <- sprintf("unitTests-results/%s-unitTests.%s", pkg, ext)
# file.copy(fname, "/tmp", overwrite=TRUE)
# }))
#}
###################################################
### code chunk number 3: importResults
###################################################
results <- "unitTests-results/RcppGSL-unitTests.txt"
if (file.exists(results)) {
writeLines(readLines(results))
} else{
writeLines( "unit test results not available" )
}
|
write_uef_event_records <- function() {
uef <- events %>%
filter(fields.Yliopisto == 'Itä-Suomen yliopisto') %>%
select(fields.URL)
uef <- as.character(uef)
session <- bow(uef,
user_agent = "sonkkilat@gmail.com")
nodes <- scrape(session) %>%
html_nodes(xpath = "//div[@role='article']")
df <- map_df(nodes, function(item) {
data.frame(university = "Itä-Suomen yliopisto",
id = str_squish(item %>%
html_node(xpath = "descendant::a") %>%
html_attr("href")),
person = NA,
title = str_squish(item %>%
html_node(xpath = "descendant::h2[@class='liftup__title']") %>%
html_text()),
link = NA,
date_day = str_squish(item %>%
html_node(xpath = "descendant::p[@class='date__day']") %>%
html_text()),
date_month = str_squish(item %>%
html_node(xpath = "descendant::p[@class='date__month']") %>%
html_text()),
time = NA,
stringsAsFactors=FALSE)
})
df <- df %>%
filter(str_detect(title, "[vV]äitös")) %>%
mutate(link_to_be = str_detect(title, "[vV]erkossa"))
df_res <- df %>%
pmap_dfr(function(...) {
current <- tibble(...)
url <- paste0("https://www.uef.fi", current$id)
page <- nod(session, url)
content <- scrape(page) %>%
html_node(xpath = "descendant::article") %>%
html_text()
content_tidy_quotes <- gsub('[”“]', '"', content)
# Title is either within quotes
title_long_quotes <- content_tidy_quotes %>%
str_extract(., '(?<=väitöskir[^"]{1,20}")[^"]+')
# or inside em element
title_long_em <- scrape(page) %>%
html_node(xpath = "descendant::em") %>%
html_text()
# Parsing all links on the page
links <- scrape(page) %>%
html_nodes(xpath = "descendant::a") %>%
html_attr("href")
# These seem to be the top two
uef_video <- match(1, str_detect(links, "www.uef.fi/live[^\\s]+"))
lsc_video <- match(1, str_detect(links, 'stream.lifesizecloud[^\\s]+'))
event <- scrape(page) %>%
html_node(xpath = "descendant::div[@class='grid__item']")
time_scraped <- event %>%
html_node(xpath = "dl/dt[contains(text(), 'Aika:')]/following-sibling::dd") %>%
html_text()
current %>%
mutate(title_long = ifelse(!is.na(title_long_quotes), title_long_quotes,
ifelse(is.na(title_long_quotes) & !is.na(title_long_em), title_long_em,
NA)),
time = time_scraped,
link = ifelse(!is.na(link), link,
ifelse(is.na(link) & !is.na(uef_video), links[uef_video],
ifelse(is.na(link) & is.na(uef_video) & !is.na(lsc_video), links[lsc_video],
ifelse(is.na(link) & is.na(uef_video) & is.na(lsc_video) & link_to_be == TRUE, "https://to.be.announced",
NA)))))
})
df <- df_res %>%
filter(!is.na(link))
df_tidy <- df %>%
mutate(id = paste0("https://www.uef.fi", id),
title_long = ifelse(is.na(title_long), "-", title_long),
title_person = paste0(title, " : ", title_long),
time = str_squish(time),
time = paste0(str_extract(time, "^[^\\-]+"), ":00"),
date_day = ifelse(nchar(date_day)==1, paste0("0", date_day), date_day),
month_from_date_month = case_when(date_month == "Syys" ~ "09",
date_month == "Loka" ~ "10",
date_month == "Marras" ~ "11",
date_month == "Joulu" ~ "12",
date_month == "Tammi" ~ "01",
date_month == "Helmi" ~ "02",
date_month == "Maalis" ~ "03",
date_month == "Huhti" ~ "04",
date_month == "Touko" ~ "05",
date_month == "Kesä" ~ "06",
date_month == "Heinä" ~ "07",
date_month == "Elo" ~ "08",
TRUE ~ "other"),
date = paste(date_day, month_from_date_month, year_now, sep = "."),
date = as.Date(date, "%d.%m.%Y"),
datetime = as.POSIXct(paste(date, time), format="%Y-%m-%d %H:%M:%S"),
datetime = as_datetime(datetime, tz = "UTC")) %>%
select(-title, -date) %>%
rename(title = title_person,
date = datetime) %>%
mutate(title = gsub("\\(myös verkossa\\)", "", title)) %>%
select(university, id, title, date, link) %>%
filter(date >= Sys.Date())
post_it(df_tidy)
}
|
/uef_polite.R
|
no_license
|
tts/onlinedefences
|
R
| false
| false
| 5,548
|
r
|
write_uef_event_records <- function() {
uef <- events %>%
filter(fields.Yliopisto == 'Itä-Suomen yliopisto') %>%
select(fields.URL)
uef <- as.character(uef)
session <- bow(uef,
user_agent = "sonkkilat@gmail.com")
nodes <- scrape(session) %>%
html_nodes(xpath = "//div[@role='article']")
df <- map_df(nodes, function(item) {
data.frame(university = "Itä-Suomen yliopisto",
id = str_squish(item %>%
html_node(xpath = "descendant::a") %>%
html_attr("href")),
person = NA,
title = str_squish(item %>%
html_node(xpath = "descendant::h2[@class='liftup__title']") %>%
html_text()),
link = NA,
date_day = str_squish(item %>%
html_node(xpath = "descendant::p[@class='date__day']") %>%
html_text()),
date_month = str_squish(item %>%
html_node(xpath = "descendant::p[@class='date__month']") %>%
html_text()),
time = NA,
stringsAsFactors=FALSE)
})
df <- df %>%
filter(str_detect(title, "[vV]äitös")) %>%
mutate(link_to_be = str_detect(title, "[vV]erkossa"))
df_res <- df %>%
pmap_dfr(function(...) {
current <- tibble(...)
url <- paste0("https://www.uef.fi", current$id)
page <- nod(session, url)
content <- scrape(page) %>%
html_node(xpath = "descendant::article") %>%
html_text()
content_tidy_quotes <- gsub('[”“]', '"', content)
# Title is either within quotes
title_long_quotes <- content_tidy_quotes %>%
str_extract(., '(?<=väitöskir[^"]{1,20}")[^"]+')
# or inside em element
title_long_em <- scrape(page) %>%
html_node(xpath = "descendant::em") %>%
html_text()
# Parsing all links on the page
links <- scrape(page) %>%
html_nodes(xpath = "descendant::a") %>%
html_attr("href")
# These seem to be the top two
uef_video <- match(1, str_detect(links, "www.uef.fi/live[^\\s]+"))
lsc_video <- match(1, str_detect(links, 'stream.lifesizecloud[^\\s]+'))
event <- scrape(page) %>%
html_node(xpath = "descendant::div[@class='grid__item']")
time_scraped <- event %>%
html_node(xpath = "dl/dt[contains(text(), 'Aika:')]/following-sibling::dd") %>%
html_text()
current %>%
mutate(title_long = ifelse(!is.na(title_long_quotes), title_long_quotes,
ifelse(is.na(title_long_quotes) & !is.na(title_long_em), title_long_em,
NA)),
time = time_scraped,
link = ifelse(!is.na(link), link,
ifelse(is.na(link) & !is.na(uef_video), links[uef_video],
ifelse(is.na(link) & is.na(uef_video) & !is.na(lsc_video), links[lsc_video],
ifelse(is.na(link) & is.na(uef_video) & is.na(lsc_video) & link_to_be == TRUE, "https://to.be.announced",
NA)))))
})
df <- df_res %>%
filter(!is.na(link))
df_tidy <- df %>%
mutate(id = paste0("https://www.uef.fi", id),
title_long = ifelse(is.na(title_long), "-", title_long),
title_person = paste0(title, " : ", title_long),
time = str_squish(time),
time = paste0(str_extract(time, "^[^\\-]+"), ":00"),
date_day = ifelse(nchar(date_day)==1, paste0("0", date_day), date_day),
month_from_date_month = case_when(date_month == "Syys" ~ "09",
date_month == "Loka" ~ "10",
date_month == "Marras" ~ "11",
date_month == "Joulu" ~ "12",
date_month == "Tammi" ~ "01",
date_month == "Helmi" ~ "02",
date_month == "Maalis" ~ "03",
date_month == "Huhti" ~ "04",
date_month == "Touko" ~ "05",
date_month == "Kesä" ~ "06",
date_month == "Heinä" ~ "07",
date_month == "Elo" ~ "08",
TRUE ~ "other"),
date = paste(date_day, month_from_date_month, year_now, sep = "."),
date = as.Date(date, "%d.%m.%Y"),
datetime = as.POSIXct(paste(date, time), format="%Y-%m-%d %H:%M:%S"),
datetime = as_datetime(datetime, tz = "UTC")) %>%
select(-title, -date) %>%
rename(title = title_person,
date = datetime) %>%
mutate(title = gsub("\\(myös verkossa\\)", "", title)) %>%
select(university, id, title, date, link) %>%
filter(date >= Sys.Date())
post_it(df_tidy)
}
|
# Import-Export-Practice.R
#
# Script for practicing data import and export from R.
#########################################################
## do some setup first...
#########################################################
# Load the readxl library for working with Excel files
library(readxl)
# Set up some variables to use for file names
RapidEye.file <- "IBP_RapidEyeOrtho_Availability.csv"
PRISM.file <- "PRISM_ppt_CraigMountain_1900-2018.csv"
excel.file <- "Lander-HAF_preferred_species_cover-031215.xlsx"
multi.excel.file <- "GPS_Collars_Example.xlsx"
web.file <- "ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.csv"
data.path <- "C:\\Users\\jakal\\OneDrive - University of Idaho\\Documents\\GitHub\\REM504-DataScience\\Practice_Datasets"
## Set a working directory (this will work for today, but I don't normally do this.)
setwd(data.path)
###########################################################
## Reading from CSV files
###########################################################
# Import data from a CSV file - will turn string fields into factors!
# Check help file (?read.csv) for defaults
rapideye.data <- read.csv(RapidEye.file, header = TRUE)
summary(rapideye.data)
# Overwrite the rapideye.data object, treat strings as character fields
rapideye.data <- read.csv(RapidEye.file, header = TRUE, stringsAsFactors = FALSE)
summary(rapideye.data)
# Use read.table instead - does the same thing as read.csv, but need to
# specify the separator and fill option for missing values.
# Useful if you have data that uses tabs or some other delimiter.
rapideye.data <- read.table(RapidEye.file, sep=",", header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
# Import CSV data where you need to skip some header lines
prism.data <- read.csv(PRISM.file, header = TRUE, skip = 10)
###########################################################
## Reading data from Excel files
###########################################################
# Import a Worksheet from an Excel Workbook - will take first worksheet as default
### HINT: the read_excel function will not work if you have the file open in Excel. Close it first.
excel.data <- read_excel(excel.file)
# Import a specific worksheet from EXcel
excel.data <- read_excel(excel.file, sheet="Plot Totals")
# Example of importing multiple sheets from an Excel Workbook
## This example assumes that the data structure is the same in each of the sheets
## There's lots of ways to do this. For this example, I'll read each sheet in and
## append it to a data frame so in the end I'll have a single data frame.
# Get a list of the sheets in the Excel workbook
excel.sheets <- excel_sheets(multi.excel.file)
# Set up an empty data frame to hold the results
gps.data <- data.frame()
# iterate over each sheet in the excel file and append it to the data frame
for (current.sheet in excel.sheets) {
sheet.data <- read_excel(multi.excel.file, sheet=current.sheet, col_names=FALSE)
names(sheet.data) <- c("pdop","lat","lon","numSats","date","time","ttff","ttlf","ttbf")
sheet.data$collar <- current.sheet
gps.data <- rbind(gps.data,sheet.data)
}
# in the case of my data, I didn't have column names, so add those now
###########################################################
### Import data from a website
###########################################################
## Note that this only works this simply if you're accessing a file that is
## stored on a web server or FTP site. Accessing data via an API or through
## a site where you have to do a search/query/order is more complicated.
## We'll cover that in a later class lecture
web.data <- read.csv(url(web.file), header = TRUE, stringsAsFactors = FALSE)
###########################################################
## Writing data out to files
###########################################################
# Export R data to a CSV file
write.csv(web.data, file="some_data_to_save.csv")
|
/Lecture_Slides/R_Data_Import-Export_Practice.R
|
no_license
|
nredecker/REM504-DataScience
|
R
| false
| false
| 3,928
|
r
|
# Import-Export-Practice.R
#
# Script for practicing data import and export from R.
#########################################################
## do some setup first...
#########################################################
# Load the readxl library for working with Excel files
library(readxl)
# Set up some variables to use for file names
RapidEye.file <- "IBP_RapidEyeOrtho_Availability.csv"
PRISM.file <- "PRISM_ppt_CraigMountain_1900-2018.csv"
excel.file <- "Lander-HAF_preferred_species_cover-031215.xlsx"
multi.excel.file <- "GPS_Collars_Example.xlsx"
web.file <- "ftp://ftp.ncdc.noaa.gov/pub/data/noaa/isd-history.csv"
data.path <- "C:\\Users\\jakal\\OneDrive - University of Idaho\\Documents\\GitHub\\REM504-DataScience\\Practice_Datasets"
## Set a working directory (this will work for today, but I don't normally do this.)
setwd(data.path)
###########################################################
## Reading from CSV files
###########################################################
# Import data from a CSV file - will turn string fields into factors!
# Check help file (?read.csv) for defaults
rapideye.data <- read.csv(RapidEye.file, header = TRUE)
summary(rapideye.data)
# Overwrite the rapideye.data object, treat strings as character fields
rapideye.data <- read.csv(RapidEye.file, header = TRUE, stringsAsFactors = FALSE)
summary(rapideye.data)
# Use read.table instead - does the same thing as read.csv, but need to
# specify the separator and fill option for missing values.
# Useful if you have data that uses tabs or some other delimiter.
rapideye.data <- read.table(RapidEye.file, sep=",", header = TRUE, stringsAsFactors = FALSE, fill = TRUE)
# Import CSV data where you need to skip some header lines
prism.data <- read.csv(PRISM.file, header = TRUE, skip = 10)
###########################################################
## Reading data from Excel files
###########################################################
# Import a Worksheet from an Excel Workbook - will take first worksheet as default
### HINT: the read_excel function will not work if you have the file open in Excel. Close it first.
excel.data <- read_excel(excel.file)
# Import a specific worksheet from EXcel
excel.data <- read_excel(excel.file, sheet="Plot Totals")
# Example of importing multiple sheets from an Excel Workbook
## This example assumes that the data structure is the same in each of the sheets
## There's lots of ways to do this. For this example, I'll read each sheet in and
## append it to a data frame so in the end I'll have a single data frame.
# Get a list of the sheets in the Excel workbook
excel.sheets <- excel_sheets(multi.excel.file)
# Set up an empty data frame to hold the results
gps.data <- data.frame()
# iterate over each sheet in the excel file and append it to the data frame
for (current.sheet in excel.sheets) {
sheet.data <- read_excel(multi.excel.file, sheet=current.sheet, col_names=FALSE)
names(sheet.data) <- c("pdop","lat","lon","numSats","date","time","ttff","ttlf","ttbf")
sheet.data$collar <- current.sheet
gps.data <- rbind(gps.data,sheet.data)
}
# in the case of my data, I didn't have column names, so add those now
###########################################################
### Import data from a website
###########################################################
## Note that this only works this simply if you're accessing a file that is
## stored on a web server or FTP site. Accessing data via an API or through
## a site where you have to do a search/query/order is more complicated.
## We'll cover that in a later class lecture
web.data <- read.csv(url(web.file), header = TRUE, stringsAsFactors = FALSE)
###########################################################
## Writing data out to files
###########################################################
# Export R data to a CSV file
write.csv(web.data, file="some_data_to_save.csv")
|
lcars2357 <- data.frame(
series = 2357L,
name = c("pale-canary", "tanoi", "golden-tanoi", "neon-carrot", "eggplant", "lilac", "anakiwa", "mariner", "bahama-blue"),
value = c("#FFFF99", "#FFCC99", "#FFCC66", "#FF9933", "#664466", "#CC99CC", "#99CCFF", "#3366CC", "#006699"),
stringsAsFactors = FALSE
)
lcars2369 <- data.frame(
series = 2369L,
name = c("blue-bell", "melrose", "lilac", "hopbush", "chestnut-rose", "orange-peel", "atomic-tangerine", "golden-tanoi"),
value = c("#9999CC", "#9999FF", "#CC99CC", "#CC6699", "#CC6666", "#FF9966", "#FF9900", "#FFCC66"),
stringsAsFactors = FALSE
)
lcars2375 <- data.frame(
series = 2375L,
name = c("danub", "indigo", "lavender-purple", "cosmic", "red-damask", "medium-carmine", "bourbon", "sandy-brown"),
value = c("#6688CC", "#4455BB","#9977AA", "#774466", "#DD6644", "#AA5533", "#BB6622", "#EE9955"),
stringsAsFactors = FALSE
)
lcars2379 <- data.frame(
series = 2379L,
name = c("periwinkle", "dodger-pale", "dodger-soft", "near-blue", "navy-blue", "husk", "rust", "tamarillo"),
value = c("#CCDDFF", "#5599FF", "#3366FF", "#0011EE", "#000088", "#BBAA55", "#BB4411", "#882211"),
stringsAsFactors = FALSE
)
trekpals <- list(
andorian = colorRampPalette(c("#202020", "#446D99", "#83BDD7", "#E4E4E4"))(9),
bajoran = c("#C00000", "#7A6424", "#323C34", "#4838A8", "#E6D56E", "#A8B5A9"),
borg = c("#253741", "#5C849B", "#9EB3BA", "#E8D159", "#A68528", "#604E20"),
breen = c("#56483D", "#332822", "#DFCDB9", "#996601", "#8BC95A", "#6C696F", "#F1E899", "#EE711E"),
breen2 = colorRampPalette(c("#7E0500", "#CE1800", "#EF6F00", "#F7BD00", "#E4E4E4"))(9),
dominion = c("#313131", "#255268", "#620062", "#63A542", "#A5C6D6", "#B5D6A5", "#B900B9"),
enara = colorRampPalette(c("#4D004D", "#800080", "#F46404", "#E7D057", "#E9E09C"))(9),
enara2 = c("#262626", "#3D5983", "#5A8A54", "#CFB023"),
ferengi = colorRampPalette(c("#00740E", "#E4E4E4", "#C86B32"))(9),
gorn = c("#000042", "#0000B5", "#145416", "#639863", "#BDF7C6", "#FF0000", "#4E0000", "#A09349", "#777384"),
iconian = colorRampPalette(c("#633210", "#B85C1D", "#FFFFFF", "#6B666C"))(9),
klingon = rev(colorRampPalette(c("#000000", "#330000", "#660000", "#990000", "#CA0000", "#CA6400", "#C99400", "#FFFA0C"))(9)),
lcars_series = unique(c(lcars2357$value, lcars2369$value, lcars2375$value, lcars2379$value)),
lcars_2357 = lcars2357$value,
lcars_2369 = lcars2369$value,
lcars_2375 = lcars2375$value,
lcars_2379 = lcars2379$value,
lcars_alt = c("#FF9C00", "#F7BD5A", "#FFCC99", "#FFFF33", "#FFFF9C", "#CD6363", "#CC99CC",
"#FF9E63", "#646DCC", "#9C9CFF", "#3399FF", "#99CCFF", "#FFFFCC", "#B1957A",
"#ED884C", "#F5ED00", "#DDFFFF"),
lcars_first_contact = c("#C198B0", "#B46757", "#AE697D", "#97567B", "#C67825", "#B28452", "#C2B74B", "#BEBCDF"),
lcars_nemesis = c("#0A45EE", "#3786FF", "#4BB0FF", "#87EEFF", "#46616E", "#D45F10", "#A35A1A", "#A89B35", "#DFAF71", "#ACA98A"),
lcars_nx01 = c("#BDEFFF", "#009CCE", "#DEFFB5", "#CD6363", "#E7FFFF", "#4D6184"),
lcars_29c = c("#39C924", "#72E2E4", "#20788C", "#24BEE2", "#BC956E", "#D19FA2", "#805070", "#2062EE"),
lcars_23c = c("#0000FF", "#99CCFF", "#6666FF", "#99FF66", "#009900", "#FF6633", "#66CCFF"),
lcars_red_alert = c("#F517C3", "#Bf2D42", "#A30E24", "#330512", "#D9D5B8", "#F1DFBF", "#4C4D47", "#9E9993"),
lcars_cardassian = c("#B46356", "#944D40", "#7A4B42", "#CA480D", "#9B5928", "#C86C18",
"#D78017", "#F9AB3C", "#FFE705", "#FFF7A3", "#E2ED50",
"#2F7270", "#66FFFF", "#3C999C", "#8BEAFF", "#13A4EB", "#2E7BC5",
"#A1B3E2", "#BFCAFE", "#8B799C", "#524559"),
romulan = colorRampPalette(c("#1DF964", "#000000", "#429AFC"))(9),
romulan2 = colorRampPalette(c("#80F2B3", "#363636", "#30E1EA"))(9),
starfleet = c("#5B1414", "#AD722C", "#1A6384"),
starfleet2 = c("#5B1414", "#AD722C", "#1A6384", "#2C6B70", "#483A4A", "#000000"),
tholian = colorRampPalette(c("#5E0000", "#F07266", "#E1E1E1", "#D2C69C", "#9A7B08"))(9),
terran = colorRampPalette(c("#000000", "#704D29", "#D4B293", "#D0CED1"))(9),
ufp = colorRampPalette(c("#E4E4E4", "#201F7B"))(9),
red_alert = c("#670000", "#990000", "#CD0000", "#FE0000", "#FF9190", "#4D4D4D"),
yellow_alert = c("#674305", "#986509", "#CD870E", "#FFA90E", "#FFDA67", "#4D4D4D"),
black_alert = c("#050B64", "#0E3A9B", "#307CE4", "#64FFFF", "#000000")
)
usethis::use_data(trekpals, overwrite = TRUE)
.lcars2357 <- setNames(lcars2357$value, lcars2357$name)
.lcars2369 <- setNames(lcars2369$value, lcars2369$name)
.lcars2375 <- setNames(lcars2375$value, lcars2375$name)
.lcars2379 <- setNames(lcars2379$value, lcars2379$name)
.lcarscolors <- c(.lcars2357, .lcars2369, .lcars2375, .lcars2379)
.lcarscolors <- .lcarscolors[!duplicated(.lcarscolors)]
.lcars_pals <- list(
`2357` = trekpals[["lcars_2357"]],
`2369` = trekpals[["lcars_2369"]],
`2375` = trekpals[["lcars_2375"]],
`2379` = trekpals[["lcars_2379"]],
`alt` = trekpals[["lcars_alt"]],
`first_contact` = trekpals[["lcars_first_contact"]],
`nemesis` = trekpals[["lcars_nemesis"]],
`nx01` = trekpals[["lcars_nx01"]],
`23c` = trekpals[["lcars_23c"]],
`29c` = trekpals[["lcars_29c"]],
`red_alert` = trekpals[["lcars_red_alert"]],
`cardassian` = trekpals[["lcars_cardassian"]]
)
usethis::use_data(.lcars2357, .lcars2369, .lcars2375, .lcars2379, .lcarscolors, .lcars_pals, internal = TRUE, overwrite = TRUE)
|
/data-raw/data.R
|
permissive
|
RTrek/trekcolors
|
R
| false
| false
| 5,459
|
r
|
lcars2357 <- data.frame(
series = 2357L,
name = c("pale-canary", "tanoi", "golden-tanoi", "neon-carrot", "eggplant", "lilac", "anakiwa", "mariner", "bahama-blue"),
value = c("#FFFF99", "#FFCC99", "#FFCC66", "#FF9933", "#664466", "#CC99CC", "#99CCFF", "#3366CC", "#006699"),
stringsAsFactors = FALSE
)
lcars2369 <- data.frame(
series = 2369L,
name = c("blue-bell", "melrose", "lilac", "hopbush", "chestnut-rose", "orange-peel", "atomic-tangerine", "golden-tanoi"),
value = c("#9999CC", "#9999FF", "#CC99CC", "#CC6699", "#CC6666", "#FF9966", "#FF9900", "#FFCC66"),
stringsAsFactors = FALSE
)
lcars2375 <- data.frame(
series = 2375L,
name = c("danub", "indigo", "lavender-purple", "cosmic", "red-damask", "medium-carmine", "bourbon", "sandy-brown"),
value = c("#6688CC", "#4455BB","#9977AA", "#774466", "#DD6644", "#AA5533", "#BB6622", "#EE9955"),
stringsAsFactors = FALSE
)
lcars2379 <- data.frame(
series = 2379L,
name = c("periwinkle", "dodger-pale", "dodger-soft", "near-blue", "navy-blue", "husk", "rust", "tamarillo"),
value = c("#CCDDFF", "#5599FF", "#3366FF", "#0011EE", "#000088", "#BBAA55", "#BB4411", "#882211"),
stringsAsFactors = FALSE
)
trekpals <- list(
andorian = colorRampPalette(c("#202020", "#446D99", "#83BDD7", "#E4E4E4"))(9),
bajoran = c("#C00000", "#7A6424", "#323C34", "#4838A8", "#E6D56E", "#A8B5A9"),
borg = c("#253741", "#5C849B", "#9EB3BA", "#E8D159", "#A68528", "#604E20"),
breen = c("#56483D", "#332822", "#DFCDB9", "#996601", "#8BC95A", "#6C696F", "#F1E899", "#EE711E"),
breen2 = colorRampPalette(c("#7E0500", "#CE1800", "#EF6F00", "#F7BD00", "#E4E4E4"))(9),
dominion = c("#313131", "#255268", "#620062", "#63A542", "#A5C6D6", "#B5D6A5", "#B900B9"),
enara = colorRampPalette(c("#4D004D", "#800080", "#F46404", "#E7D057", "#E9E09C"))(9),
enara2 = c("#262626", "#3D5983", "#5A8A54", "#CFB023"),
ferengi = colorRampPalette(c("#00740E", "#E4E4E4", "#C86B32"))(9),
gorn = c("#000042", "#0000B5", "#145416", "#639863", "#BDF7C6", "#FF0000", "#4E0000", "#A09349", "#777384"),
iconian = colorRampPalette(c("#633210", "#B85C1D", "#FFFFFF", "#6B666C"))(9),
klingon = rev(colorRampPalette(c("#000000", "#330000", "#660000", "#990000", "#CA0000", "#CA6400", "#C99400", "#FFFA0C"))(9)),
lcars_series = unique(c(lcars2357$value, lcars2369$value, lcars2375$value, lcars2379$value)),
lcars_2357 = lcars2357$value,
lcars_2369 = lcars2369$value,
lcars_2375 = lcars2375$value,
lcars_2379 = lcars2379$value,
lcars_alt = c("#FF9C00", "#F7BD5A", "#FFCC99", "#FFFF33", "#FFFF9C", "#CD6363", "#CC99CC",
"#FF9E63", "#646DCC", "#9C9CFF", "#3399FF", "#99CCFF", "#FFFFCC", "#B1957A",
"#ED884C", "#F5ED00", "#DDFFFF"),
lcars_first_contact = c("#C198B0", "#B46757", "#AE697D", "#97567B", "#C67825", "#B28452", "#C2B74B", "#BEBCDF"),
lcars_nemesis = c("#0A45EE", "#3786FF", "#4BB0FF", "#87EEFF", "#46616E", "#D45F10", "#A35A1A", "#A89B35", "#DFAF71", "#ACA98A"),
lcars_nx01 = c("#BDEFFF", "#009CCE", "#DEFFB5", "#CD6363", "#E7FFFF", "#4D6184"),
lcars_29c = c("#39C924", "#72E2E4", "#20788C", "#24BEE2", "#BC956E", "#D19FA2", "#805070", "#2062EE"),
lcars_23c = c("#0000FF", "#99CCFF", "#6666FF", "#99FF66", "#009900", "#FF6633", "#66CCFF"),
lcars_red_alert = c("#F517C3", "#Bf2D42", "#A30E24", "#330512", "#D9D5B8", "#F1DFBF", "#4C4D47", "#9E9993"),
lcars_cardassian = c("#B46356", "#944D40", "#7A4B42", "#CA480D", "#9B5928", "#C86C18",
"#D78017", "#F9AB3C", "#FFE705", "#FFF7A3", "#E2ED50",
"#2F7270", "#66FFFF", "#3C999C", "#8BEAFF", "#13A4EB", "#2E7BC5",
"#A1B3E2", "#BFCAFE", "#8B799C", "#524559"),
romulan = colorRampPalette(c("#1DF964", "#000000", "#429AFC"))(9),
romulan2 = colorRampPalette(c("#80F2B3", "#363636", "#30E1EA"))(9),
starfleet = c("#5B1414", "#AD722C", "#1A6384"),
starfleet2 = c("#5B1414", "#AD722C", "#1A6384", "#2C6B70", "#483A4A", "#000000"),
tholian = colorRampPalette(c("#5E0000", "#F07266", "#E1E1E1", "#D2C69C", "#9A7B08"))(9),
terran = colorRampPalette(c("#000000", "#704D29", "#D4B293", "#D0CED1"))(9),
ufp = colorRampPalette(c("#E4E4E4", "#201F7B"))(9),
red_alert = c("#670000", "#990000", "#CD0000", "#FE0000", "#FF9190", "#4D4D4D"),
yellow_alert = c("#674305", "#986509", "#CD870E", "#FFA90E", "#FFDA67", "#4D4D4D"),
black_alert = c("#050B64", "#0E3A9B", "#307CE4", "#64FFFF", "#000000")
)
usethis::use_data(trekpals, overwrite = TRUE)
.lcars2357 <- setNames(lcars2357$value, lcars2357$name)
.lcars2369 <- setNames(lcars2369$value, lcars2369$name)
.lcars2375 <- setNames(lcars2375$value, lcars2375$name)
.lcars2379 <- setNames(lcars2379$value, lcars2379$name)
.lcarscolors <- c(.lcars2357, .lcars2369, .lcars2375, .lcars2379)
.lcarscolors <- .lcarscolors[!duplicated(.lcarscolors)]
.lcars_pals <- list(
`2357` = trekpals[["lcars_2357"]],
`2369` = trekpals[["lcars_2369"]],
`2375` = trekpals[["lcars_2375"]],
`2379` = trekpals[["lcars_2379"]],
`alt` = trekpals[["lcars_alt"]],
`first_contact` = trekpals[["lcars_first_contact"]],
`nemesis` = trekpals[["lcars_nemesis"]],
`nx01` = trekpals[["lcars_nx01"]],
`23c` = trekpals[["lcars_23c"]],
`29c` = trekpals[["lcars_29c"]],
`red_alert` = trekpals[["lcars_red_alert"]],
`cardassian` = trekpals[["lcars_cardassian"]]
)
usethis::use_data(.lcars2357, .lcars2369, .lcars2375, .lcars2379, .lcarscolors, .lcars_pals, internal = TRUE, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stochasim_functions.R
\name{est_mu}
\alias{est_mu}
\title{Approximate relative bank strength}
\usage{
est_mu(H, d)
}
\arguments{
\item{H}{effective rooting depth for riparian vegetation (m)}
\item{d}{mean hydraulic depth at channel-forming flow (m)}
}
\description{
\code{est_mu} uses an empirical function to estimate the relative bank
strength produced by vegetation with a known effective rooting depth (H)
(after Eaton, 2006) and a known average channel depth (d). The function only
applies when H < 0.9d.
}
|
/man/est_mu.Rd
|
no_license
|
bceaton/stochasim
|
R
| false
| true
| 592
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stochasim_functions.R
\name{est_mu}
\alias{est_mu}
\title{Approximate relative bank strength}
\usage{
est_mu(H, d)
}
\arguments{
\item{H}{effective rooting depth for riparian vegetation (m)}
\item{d}{mean hydraulic depth at channel-forming flow (m)}
}
\description{
\code{est_mu} uses an empirical function to estimate the relative bank
strength produced by vegetation with a known effective rooting depth (H)
(after Eaton, 2006) and a known average channel depth (d). The function only
applies when H < 0.9d.
}
|
## These two functions are used to create a special object that stores a matrix and cache's its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
carlosjgm/ProgrammingAssignment2
|
R
| false
| false
| 958
|
r
|
## These two functions are used to create a special object that stores a matrix and cache's its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix. If the inverse has already been calculated (and the
## matrix has not changed), then the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/clase 26 febrero 2018.R
|
no_license
|
JueliVilchis/Software_Actuarial_III
|
R
| false
| false
| 1,582
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confidence_intervals.R
\name{predict_prob}
\alias{predict_prob}
\title{Estimation of Failure Probabilities for Parametric Lifetime Distributions}
\usage{
predict_prob(q, loc_sc_params, distribution = c("weibull", "lognormal",
"loglogistic", "normal", "logistic", "sev", "weibull3", "lognormal3",
"loglogistic3"))
}
\arguments{
\item{q}{a numeric vector which consists of lifetime data.}
\item{loc_sc_params}{a (named) numeric vector of estimated location
and scale parameters for a specified distribution. The order of
elements is important. First entry needs to be the location
parameter \eqn{\mu} and the second element needs to be the scale
parameter \eqn{\sigma}. If a three-parametric model is used the third element
is the threshold parameter \eqn{\gamma}.}
\item{distribution}{supposed distribution of the random variable. The
value can be \code{"weibull"}, \code{"lognormal"}, \code{"loglogistic"},
\code{"normal"}, \code{"logistic"}, \code{"sev"} \emph{(smallest extreme value)},
\code{"weibull3"}, \code{"lognormal3"} or \code{"loglogistic3"}.
Other distributions have not been implemented yet.}
}
\value{
A vector containing the estimated failure probabilities for a given
set of quantiles and estimated parameters.
}
\description{
This function estimates the failure probabilities for a given set of estimated
location-scale (and threshold) parameters and specified quantiles.
}
\examples{
# Example 1: Predicted probabilities for two-parameter Weibull:
probs <- predict_prob(q = c(15, 48, 124), loc_sc_params = c(5, 0.5),
distribution = "weibull")
# Example 2: Predicted probabilities for three-parameter Weibull:
probs_weib3 <- predict_prob(q = c(25, 58, 134), loc_sc_params = c(5, 0.5, 10),
distribution = "weibull3")
}
|
/man/predict_prob.Rd
|
no_license
|
fabysbe/weibulltools
|
R
| false
| true
| 1,876
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confidence_intervals.R
\name{predict_prob}
\alias{predict_prob}
\title{Estimation of Failure Probabilities for Parametric Lifetime Distributions}
\usage{
predict_prob(q, loc_sc_params, distribution = c("weibull", "lognormal",
"loglogistic", "normal", "logistic", "sev", "weibull3", "lognormal3",
"loglogistic3"))
}
\arguments{
\item{q}{a numeric vector which consists of lifetime data.}
\item{loc_sc_params}{a (named) numeric vector of estimated location
and scale parameters for a specified distribution. The order of
elements is important. First entry needs to be the location
parameter \eqn{\mu} and the second element needs to be the scale
parameter \eqn{\sigma}. If a three-parametric model is used the third element
is the threshold parameter \eqn{\gamma}.}
\item{distribution}{supposed distribution of the random variable. The
value can be \code{"weibull"}, \code{"lognormal"}, \code{"loglogistic"},
\code{"normal"}, \code{"logistic"}, \code{"sev"} \emph{(smallest extreme value)},
\code{"weibull3"}, \code{"lognormal3"} or \code{"loglogistic3"}.
Other distributions have not been implemented yet.}
}
\value{
A vector containing the estimated failure probabilities for a given
set of quantiles and estimated parameters.
}
\description{
This function estimates the failure probabilities for a given set of estimated
location-scale (and threshold) parameters and specified quantiles.
}
\examples{
# Example 1: Predicted probabilities for two-parameter Weibull:
probs <- predict_prob(q = c(15, 48, 124), loc_sc_params = c(5, 0.5),
distribution = "weibull")
# Example 2: Predicted probabilities for three-parameter Weibull:
probs_weib3 <- predict_prob(q = c(25, 58, 134), loc_sc_params = c(5, 0.5, 10),
distribution = "weibull3")
}
|
# DTU31761A3: Wind Power Output Prediction using Regression
# Functions to ouput the result
# author: Edward J. Xu
# date: May 22th, 2019
########################################################################################################################
#' To output the result in a csv with a specific name
outputResult <- function(result, outputSeries = 1){
strFileName <- paste("Output/", deparse(substitute(result)), "_", outputSeries, ".csv", sep = "")
write.table(result, file = strFileName, sep = ",", dec = ".", row.names = F,
col.names = F, quote = FALSE)
}
#' To convert a list of vectors in same length to a matrix, and then output
outputlistVec <- function(listVec, outputSeries = 1){
numList <- length(listVec)
num <- length(listVec[[1]])
mat <- matrix(NA, ncol = numList, nrow = num)
for (i in 1:numList) {
mat[,i] <- listVec[[i]]
}
strFileName <- paste("Output/", deparse(substitute(listVec)), "_", outputSeries, ".csv", sep = "")
write.table(mat, file = strFileName, sep = ",", dec = ".", row.names = F,
col.names = F, quote = FALSE)
}
|
/src/FuncOutput.R
|
permissive
|
ArjunReddy07/WindPowerForecast
|
R
| false
| false
| 1,135
|
r
|
# DTU31761A3: Wind Power Output Prediction using Regression
# Functions to ouput the result
# author: Edward J. Xu
# date: May 22th, 2019
########################################################################################################################
#' To output the result in a csv with a specific name
outputResult <- function(result, outputSeries = 1){
strFileName <- paste("Output/", deparse(substitute(result)), "_", outputSeries, ".csv", sep = "")
write.table(result, file = strFileName, sep = ",", dec = ".", row.names = F,
col.names = F, quote = FALSE)
}
#' To convert a list of vectors in same length to a matrix, and then output
outputlistVec <- function(listVec, outputSeries = 1){
numList <- length(listVec)
num <- length(listVec[[1]])
mat <- matrix(NA, ncol = numList, nrow = num)
for (i in 1:numList) {
mat[,i] <- listVec[[i]]
}
strFileName <- paste("Output/", deparse(substitute(listVec)), "_", outputSeries, ".csv", sep = "")
write.table(mat, file = strFileName, sep = ",", dec = ".", row.names = F,
col.names = F, quote = FALSE)
}
|
library(nimble)
DeerEcervi <- read.table('DeerEcervi.txt', header = TRUE)
## Create presence/absence data from counts.
DeerEcervi$Ecervi.01 <- DeerEcervi$Ecervi
DeerEcervi$Ecervi.01[DeerEcervi$Ecervi>0] <- 1
## Center Length for better interpretation
DeerEcervi$cLength <- DeerEcervi$Length - mean(DeerEcervi$Length)
## Make a factor version of Sex for plotting
DeerEcervi$fSex <- factor(DeerEcervi$Sex)
## Make a factor and id version of Farm
DeerEcervi$fFarm <- factor(DeerEcervi$Farm)
DeerEcervi$farm.ids <- as.numeric(DeerEcervi$fFarm)
DEcode1 <- nimbleCode({
for(i in 1:2) {
# Priors for ntercepts and length coefficients for sex = 1,2
sex.effect[i] ~ dnorm(0, sd = 1000)
length.effect[i] ~ dnorm(0, sd = 1000)
}
# Priors for farm random effects and their standard deviation.
farm.sd ~ dunif(0, 20)
for(i in 1:num.farms) {
farm.effect[i] ~ dnorm(0, sd = farm.sd)
}
# logit link and Bernoulli data probabilities
for(i in 1:num.animals) {
logit(disease.probability[i]) <-
sex.effect[ sex[i] ] +
length.effect[ sex[i] ]*cLength[i] +
farm.effect[ farm.ids[i] ]
Ecervi.01[i] ~ dbern(disease.probability[i])
}
})
DEconstants <- list(num.farms = 24,
num.animals = 826,
cLength = DeerEcervi$cLength,
sex = DeerEcervi$Sex,
farm.ids = DeerEcervi$farm.ids)
DEdata <- list(Ecervi.01 = DeerEcervi$Ecervi.01)
DEinits1 <- function() {
list(sex.effect = c(0, 0),
length.effect = c(0, 0),
farm.sd = 1,
farm.effect = rnorm(24, 0, 1) )
}
set.seed(123)
DEinits_vals <- DEinits1()
## JAGS-compatible version
DEcode2 <- nimbleCode({
for(i in 1:2) {
length.effect[i] ~ dnorm(0, 1.0E-6) # precisions
sex.effect[i] ~ dnorm(0, 1.0E-6)
}
farm.sd ~ dunif(0, 20)
farm.precision <- 1/(farm.sd*farm.sd)
for(i in 1:num.farms) {
farm.effect[i] ~ dnorm(0, farm.precision) # precision
}
for(i in 1:num.animals) {
logit(disease.probability[i]) <-
sex.effect[ sex[i] ] +
length.effect[ sex[i] ]*cLength[i] +
farm.effect[ farm.ids[i] ]
Ecervi.01[i] ~ dbern(disease.probability[i])
}
})
|
/Content/examples/DeerEcervi/load_DeerEcervi.R
|
no_license
|
paritoshkroy/nimble-virtual-2020
|
R
| false
| false
| 2,200
|
r
|
library(nimble)
DeerEcervi <- read.table('DeerEcervi.txt', header = TRUE)
## Create presence/absence data from counts.
DeerEcervi$Ecervi.01 <- DeerEcervi$Ecervi
DeerEcervi$Ecervi.01[DeerEcervi$Ecervi>0] <- 1
## Center Length for better interpretation
DeerEcervi$cLength <- DeerEcervi$Length - mean(DeerEcervi$Length)
## Make a factor version of Sex for plotting
DeerEcervi$fSex <- factor(DeerEcervi$Sex)
## Make a factor and id version of Farm
DeerEcervi$fFarm <- factor(DeerEcervi$Farm)
DeerEcervi$farm.ids <- as.numeric(DeerEcervi$fFarm)
DEcode1 <- nimbleCode({
for(i in 1:2) {
# Priors for ntercepts and length coefficients for sex = 1,2
sex.effect[i] ~ dnorm(0, sd = 1000)
length.effect[i] ~ dnorm(0, sd = 1000)
}
# Priors for farm random effects and their standard deviation.
farm.sd ~ dunif(0, 20)
for(i in 1:num.farms) {
farm.effect[i] ~ dnorm(0, sd = farm.sd)
}
# logit link and Bernoulli data probabilities
for(i in 1:num.animals) {
logit(disease.probability[i]) <-
sex.effect[ sex[i] ] +
length.effect[ sex[i] ]*cLength[i] +
farm.effect[ farm.ids[i] ]
Ecervi.01[i] ~ dbern(disease.probability[i])
}
})
DEconstants <- list(num.farms = 24,
num.animals = 826,
cLength = DeerEcervi$cLength,
sex = DeerEcervi$Sex,
farm.ids = DeerEcervi$farm.ids)
DEdata <- list(Ecervi.01 = DeerEcervi$Ecervi.01)
DEinits1 <- function() {
list(sex.effect = c(0, 0),
length.effect = c(0, 0),
farm.sd = 1,
farm.effect = rnorm(24, 0, 1) )
}
set.seed(123)
DEinits_vals <- DEinits1()
## JAGS-compatible version
DEcode2 <- nimbleCode({
for(i in 1:2) {
length.effect[i] ~ dnorm(0, 1.0E-6) # precisions
sex.effect[i] ~ dnorm(0, 1.0E-6)
}
farm.sd ~ dunif(0, 20)
farm.precision <- 1/(farm.sd*farm.sd)
for(i in 1:num.farms) {
farm.effect[i] ~ dnorm(0, farm.precision) # precision
}
for(i in 1:num.animals) {
logit(disease.probability[i]) <-
sex.effect[ sex[i] ] +
length.effect[ sex[i] ]*cLength[i] +
farm.effect[ farm.ids[i] ]
Ecervi.01[i] ~ dbern(disease.probability[i])
}
})
|
#### Seurat Main Cluster analysis on Orchestra
library(Seurat)
library(dplyr)
library(Matrix)
library(methods)
#Inputs
species="m"
base_dir <- "/n/scratch2/bf78/MIASingleCell/MIA18"
projectName = "MIA18"
pc_num = 20;
res=0.4;
min_genes = 500;
load("seurat_mat_NewFinalE18.Robj")
load("cd_s_comb.Robj")
#Create CellVector, vector of all cell names listed as character strings, in this case all clusters that express markers for oligodendrocytes, radial glia, or ganglionic eminence
ORGGESeurat <- subset(seurat_mat_NewFinalE18,idents = c("12","13","14","15","19"))
ORGGECells <- colnames(ORGGESeurat)
NewComb <- cd_comb[,ORGGECells]
### Seurat
Seurat_Oligo_RG_GE <- CreateSeuratObject(counts = NewComb, min.cells = 3, project = projectName)
Seurat_Oligo_RG_GE[["percent.mt"]] <- PercentageFeatureSet(Seurat_Oligo_RG_GE, pattern = "^MT-")
Seurat_Oligo_RG_GE[["percent.ribo"]] <- PercentageFeatureSet(Seurat_Oligo_RG_GE, pattern = "^RP[SL]")
head(Seurat_Oligo_RG_GE@meta.data, 5)
Seurat_Oligo_RG_GE <- subset(Seurat_Oligo_RG_GE, subset = nFeature_RNA > 200 & nFeature_RNA < 2500 & percent.mt < 5)
#Normalize
Seurat_Oligo_RG_GE <- NormalizeData(object = Seurat_Oligo_RG_GE, normalization.method = "LogNormalize", scale.factor = 10000)
print("Normalization is completed")
#ID variable genes
Seurat_Oligo_RG_GE <- FindVariableFeatures(Seurat_Oligo_RG_GE, selection.method = "vst", nfeatures = 2000)
print("FindVariableGenes is completed")
#Regression and Scaling with 8 cores!
Seurat_Oligo_RG_GE <- ScaleData(Seurat_Oligo_RG_GE,vars.to.regress = c("percent.ribo","percent.mt"))
print("ScaleData and Regression is completed")
#Run PCA
Seurat_Oligo_RG_GE <- RunPCA(Seurat_Oligo_RG_GE, features = VariableFeatures(object = Seurat_Oligo_RG_GE))
pdf("Elbowplot.pdf")
ElbowPlot(Seurat_Oligo_RG_GE)
dev.off()
##Find clusters, VlnPlot per cluster, and see whether some of your clusters are formed of cells that systematically have lower or higher number of expressed genes
Seurat_Oligo_RG_GE <- FindNeighbors(Seurat_Oligo_RG_GE, dims = 1:20)
Seurat_Oligo_RG_GE <- FindClusters(Seurat_Oligo_RG_GE, resolution = res)
save(Seurat_Oligo_RG_GE, file = "Seurat_Oligo_RG_GE.Robj")
saveRDS(Seurat_Oligo_RG_GE, file = "Seurat_Oligo_RG_GE.rds")
#Run UMAP and save coordinates
Seurat_Oligo_RG_GE <- RunUMAP(Seurat_Oligo_RG_GE, dims = 1:20, min.dist = 0.01)
embeds = Embeddings(Seurat_Oligo_RG_GE[["umap"]])
write.csv(embeds, file = paste("umapcoordinatesE18Oligo_RG_GE",30,"_",res,".csv",sep=""))
seuratClusters <- Idents(Seurat_Oligo_RG_GE)
write.csv(seuratClusters, file = "seuratClusterE18Oligo_RG_GE.csv")
pdf("umapE18Oligo_RG_GE.pdf")
DimPlot(Seurat_Oligo_RG_GE, reduction = "umap", label = TRUE, pt.size = 0.3) + NoLegend()
dev.off()
print("RunUmap is done")
ClusterMarkers <- FindAllMarkers(Seurat_Oligo_RG_GE, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
ClusterMarkers %>% group_by(cluster) %>% top_n(n = 2, wt = avg_logFC)
write.csv(ClusterMarkers, file = "ClusterMarkersE18Oligo_RG_GE.csv")
|
/scRNA-seq/MIA18_ORGGESubclustering.R
|
no_license
|
kbruch/Maternal-Immune-Activation-Project
|
R
| false
| false
| 3,021
|
r
|
#### Seurat Main Cluster analysis on Orchestra
library(Seurat)
library(dplyr)
library(Matrix)
library(methods)
#Inputs
species="m"
base_dir <- "/n/scratch2/bf78/MIASingleCell/MIA18"
projectName = "MIA18"
pc_num = 20;
res=0.4;
min_genes = 500;
load("seurat_mat_NewFinalE18.Robj")
load("cd_s_comb.Robj")
#Create CellVector, vector of all cell names listed as character strings, in this case all clusters that express markers for oligodendrocytes, radial glia, or ganglionic eminence
ORGGESeurat <- subset(seurat_mat_NewFinalE18,idents = c("12","13","14","15","19"))
ORGGECells <- colnames(ORGGESeurat)
NewComb <- cd_comb[,ORGGECells]
### Seurat
Seurat_Oligo_RG_GE <- CreateSeuratObject(counts = NewComb, min.cells = 3, project = projectName)
Seurat_Oligo_RG_GE[["percent.mt"]] <- PercentageFeatureSet(Seurat_Oligo_RG_GE, pattern = "^MT-")
Seurat_Oligo_RG_GE[["percent.ribo"]] <- PercentageFeatureSet(Seurat_Oligo_RG_GE, pattern = "^RP[SL]")
head(Seurat_Oligo_RG_GE@meta.data, 5)
Seurat_Oligo_RG_GE <- subset(Seurat_Oligo_RG_GE, subset = nFeature_RNA > 200 & nFeature_RNA < 2500 & percent.mt < 5)
#Normalize
Seurat_Oligo_RG_GE <- NormalizeData(object = Seurat_Oligo_RG_GE, normalization.method = "LogNormalize", scale.factor = 10000)
print("Normalization is completed")
#ID variable genes
Seurat_Oligo_RG_GE <- FindVariableFeatures(Seurat_Oligo_RG_GE, selection.method = "vst", nfeatures = 2000)
print("FindVariableGenes is completed")
#Regression and Scaling with 8 cores!
Seurat_Oligo_RG_GE <- ScaleData(Seurat_Oligo_RG_GE,vars.to.regress = c("percent.ribo","percent.mt"))
print("ScaleData and Regression is completed")
#Run PCA
Seurat_Oligo_RG_GE <- RunPCA(Seurat_Oligo_RG_GE, features = VariableFeatures(object = Seurat_Oligo_RG_GE))
pdf("Elbowplot.pdf")
ElbowPlot(Seurat_Oligo_RG_GE)
dev.off()
##Find clusters, VlnPlot per cluster, and see whether some of your clusters are formed of cells that systematically have lower or higher number of expressed genes
Seurat_Oligo_RG_GE <- FindNeighbors(Seurat_Oligo_RG_GE, dims = 1:20)
Seurat_Oligo_RG_GE <- FindClusters(Seurat_Oligo_RG_GE, resolution = res)
save(Seurat_Oligo_RG_GE, file = "Seurat_Oligo_RG_GE.Robj")
saveRDS(Seurat_Oligo_RG_GE, file = "Seurat_Oligo_RG_GE.rds")
#Run UMAP and save coordinates
Seurat_Oligo_RG_GE <- RunUMAP(Seurat_Oligo_RG_GE, dims = 1:20, min.dist = 0.01)
embeds = Embeddings(Seurat_Oligo_RG_GE[["umap"]])
write.csv(embeds, file = paste("umapcoordinatesE18Oligo_RG_GE",30,"_",res,".csv",sep=""))
seuratClusters <- Idents(Seurat_Oligo_RG_GE)
write.csv(seuratClusters, file = "seuratClusterE18Oligo_RG_GE.csv")
pdf("umapE18Oligo_RG_GE.pdf")
DimPlot(Seurat_Oligo_RG_GE, reduction = "umap", label = TRUE, pt.size = 0.3) + NoLegend()
dev.off()
print("RunUmap is done")
ClusterMarkers <- FindAllMarkers(Seurat_Oligo_RG_GE, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
ClusterMarkers %>% group_by(cluster) %>% top_n(n = 2, wt = avg_logFC)
write.csv(ClusterMarkers, file = "ClusterMarkersE18Oligo_RG_GE.csv")
|
# test that deparase and n2_num gives correct output
p <- study_parameters(n1 = 3:4,
n2 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x
p <- study_parameters(n1 = 3:4,
n2 = 5,
n3 = 2,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = 2,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x$n3
# pn
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
partially_nested = TRUE,
cohend = 2
)
p
x <- get_power(p)
x$n3
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = per_treatment(5,10),
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x$n3
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(unequal_clusters(func = rnorm(10, 10)), unequal_clusters(func = rnorm(5, 15))),
n3 = 2,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
x <- get_power(p)
x$n2_tx_lab
x$n2_cc_lab
x <- get_power(p, R = 3)
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(unequal_clusters(func = rnorm(10, 10)), unequal_clusters(5,5,5,5,5)),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x
x$n3
## rounding
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(unequal_clusters(func = rnorm(10, 10)), unequal_clusters(func = rnorm(5, 15))),
n3 = 2,
T_end = 10,
icc_pre_subject = c(0.665, 0.666666666666666),
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
## two-level
p <- study_parameters(n1 = 4,
n2 = per_treatment(unequal_clusters(5,5,5,5,4), unequal_clusters(func = rnorm(10, 10))),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
get_power(p)
p <- study_parameters(n1 = 4,
n2 = per_treatment(10, 20),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
partially_nested = TRUE,
dropout = 0,
cohend = 2
)
p
get_power(p)
|
/tests/local/dev_test_print.R
|
no_license
|
rpsychologist/powerlmm
|
R
| false
| false
| 6,330
|
r
|
# test that deparase and n2_num gives correct output
p <- study_parameters(n1 = 3:4,
n2 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x
p <- study_parameters(n1 = 3:4,
n2 = 5,
n3 = 2,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = 2,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x$n3
# pn
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
partially_nested = TRUE,
cohend = 2
)
p
x <- get_power(p)
x$n3
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(10, 20),
n3 = per_treatment(5,10),
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x$n3
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(unequal_clusters(func = rnorm(10, 10)), unequal_clusters(func = rnorm(5, 15))),
n3 = 2,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
x <- get_power(p)
x$n2_tx_lab
x$n2_cc_lab
x <- get_power(p, R = 3)
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(unequal_clusters(func = rnorm(10, 10)), unequal_clusters(5,5,5,5,5)),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
x
x$n3
## rounding
p <- study_parameters(n1 = 3:4,
n2 = per_treatment(unequal_clusters(func = rnorm(10, 10)), unequal_clusters(func = rnorm(5, 15))),
n3 = 2,
T_end = 10,
icc_pre_subject = c(0.665, 0.666666666666666),
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
x <- get_power(p)
## two-level
p <- study_parameters(n1 = 4,
n2 = per_treatment(unequal_clusters(5,5,5,5,4), unequal_clusters(func = rnorm(10, 10))),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
dropout = 0,
cohend = 2
)
p
get_power(p)
p <- study_parameters(n1 = 4,
n2 = per_treatment(10, 20),
n3 = 5,
T_end = 10,
icc_pre_subject = 0.6,
icc_pre_cluster = 0.05,
cor_cluster = -0.5,
cor_subject = -0.7,
icc_slope = 0.1,
var_ratio = 0.02,
sigma_error = 10,
partially_nested = TRUE,
dropout = 0,
cohend = 2
)
p
get_power(p)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_keras.R
\name{fit_keras_es}
\alias{fit_keras_es}
\title{Fit neural network by keras package with early stopping}
\usage{
fit_keras_es(
x,
y,
eval_x,
eval_y,
task_type,
keras_model,
epoch = 10000L,
patience = 20L,
seed,
...
)
}
\arguments{
\item{x}{A matrix of features.}
\item{y}{A vector of labels.}
\item{eval_x}{A matrix of features for evaluation.}
\item{eval_y}{A vector of labels for evaluation.}
\item{task_type}{A character scalar of task_type.}
\item{keras_model}{A keras model object.}
\item{epoch}{A integer scalar of epoch.}
\item{patience}{A integer scalar of patience.}
\item{seed}{A integer scalar of random seed.}
\item{...}{Additional arguments passed to \link[keras:fit]{keras::fit}.}
}
\value{
A fitted object.
}
\description{
Fit neural network by keras package with early stopping
}
|
/man/fit_keras_es.Rd
|
permissive
|
five-dots/ml4e
|
R
| false
| true
| 918
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_keras.R
\name{fit_keras_es}
\alias{fit_keras_es}
\title{Fit neural network by keras package with early stopping}
\usage{
fit_keras_es(
x,
y,
eval_x,
eval_y,
task_type,
keras_model,
epoch = 10000L,
patience = 20L,
seed,
...
)
}
\arguments{
\item{x}{A matrix of features.}
\item{y}{A vector of labels.}
\item{eval_x}{A matrix of features for evaluation.}
\item{eval_y}{A vector of labels for evaluation.}
\item{task_type}{A character scalar of task_type.}
\item{keras_model}{A keras model object.}
\item{epoch}{A integer scalar of epoch.}
\item{patience}{A integer scalar of patience.}
\item{seed}{A integer scalar of random seed.}
\item{...}{Additional arguments passed to \link[keras:fit]{keras::fit}.}
}
\value{
A fitted object.
}
\description{
Fit neural network by keras package with early stopping
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_mart.R
\name{init_mart_human}
\alias{init_mart_human}
\title{Initialize biomaRt for hsapiens}
\usage{
init_mart_human()
}
\value{
None. Creates a global variable called mart_human.
}
\description{
Initialize biomaRt for hsapiens
}
|
/man/init_mart_human.Rd
|
no_license
|
tgrimes/miscgene
|
R
| false
| true
| 314
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_mart.R
\name{init_mart_human}
\alias{init_mart_human}
\title{Initialize biomaRt for hsapiens}
\usage{
init_mart_human()
}
\value{
None. Creates a global variable called mart_human.
}
\description{
Initialize biomaRt for hsapiens
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finspace_operations.R
\name{finspace_create_kx_cluster}
\alias{finspace_create_kx_cluster}
\title{Creates a new kdb cluster}
\usage{
finspace_create_kx_cluster(
clientToken = NULL,
environmentId,
clusterName,
clusterType,
databases = NULL,
cacheStorageConfigurations = NULL,
autoScalingConfiguration = NULL,
clusterDescription = NULL,
capacityConfiguration,
releaseLabel,
vpcConfiguration = NULL,
initializationScript = NULL,
commandLineArguments = NULL,
code = NULL,
executionRole = NULL,
savedownStorageConfiguration = NULL,
azMode,
availabilityZoneId = NULL,
tags = NULL
)
}
\arguments{
\item{clientToken}{A token that ensures idempotency. This token expires in 10 minutes.}
\item{environmentId}{[required] A unique identifier for the kdb environment.}
\item{clusterName}{[required] A unique name for the cluster that you want to create.}
\item{clusterType}{[required] Specifies the type of KDB database that is being created. The following
types are available:
\itemize{
\item HDB – A Historical Database. The data is only accessible with
read-only permissions from one of the FinSpace managed kdb databases
mounted to the cluster.
\item RDB – A Realtime Database. This type of database captures all the
data from a ticker plant and stores it in memory until the end of
day, after which it writes all of its data to a disk and reloads the
HDB. This cluster type requires local storage for temporary storage
of data during the savedown process. If you specify this field in
your request, you must provide the \code{savedownStorageConfiguration}
parameter.
\item GATEWAY – A gateway cluster allows you to access data across
processes in kdb systems. It allows you to create your own routing
logic using the initialization scripts and custom code. This type of
cluster does not require a writable local storage.
}}
\item{databases}{A list of databases that will be available for querying.}
\item{cacheStorageConfigurations}{The configurations for a read only cache storage associated with a
cluster. This cache will be stored as an FSx Lustre that reads from the
S3 store.}
\item{autoScalingConfiguration}{The configuration based on which FinSpace will scale in or scale out
nodes in your cluster.}
\item{clusterDescription}{A description of the cluster.}
\item{capacityConfiguration}{[required] A structure for the metadata of a cluster. It includes information about
like the CPUs needed, memory of instances, number of instances, and the
port used while establishing a connection.}
\item{releaseLabel}{[required] The version of FinSpace managed kdb to run.}
\item{vpcConfiguration}{Configuration details about the network where the Privatelink endpoint
of the cluster resides.}
\item{initializationScript}{Specifies a Q program that will be run at launch of a cluster. It is a
relative path within \emph{.zip} file that contains the custom code, which
will be loaded on the cluster. It must include the file name itself. For
example, \code{somedir/init.q}.}
\item{commandLineArguments}{Defines the key-value pairs to make them available inside the cluster.}
\item{code}{The details of the custom code that you want to use inside a cluster
when analyzing a data. It consists of the S3 source bucket, location, S3
object version, and the relative path from where the custom code is
loaded into the cluster.}
\item{executionRole}{An IAM role that defines a set of permissions associated with a cluster.
These permissions are assumed when a cluster attempts to access another
cluster.}
\item{savedownStorageConfiguration}{The size and type of the temporary storage that is used to hold data
during the savedown process. This parameter is required when you choose
\code{clusterType} as RDB. All the data written to this storage space is lost
when the cluster node is restarted.}
\item{azMode}{[required] The number of availability zones you want to assign per cluster. This
can be one of the following
\itemize{
\item \code{SINGLE} – Assigns one availability zone per cluster.
\item \code{MULTI} – Assigns all the availability zones per cluster.
}}
\item{availabilityZoneId}{The availability zone identifiers for the requested regions.}
\item{tags}{A list of key-value pairs to label the cluster. You can add up to 50
tags to a cluster.}
}
\description{
Creates a new kdb cluster.
See \url{https://www.paws-r-sdk.com/docs/finspace_create_kx_cluster/} for full documentation.
}
\keyword{internal}
|
/cran/paws.management/man/finspace_create_kx_cluster.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 4,540
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finspace_operations.R
\name{finspace_create_kx_cluster}
\alias{finspace_create_kx_cluster}
\title{Creates a new kdb cluster}
\usage{
finspace_create_kx_cluster(
clientToken = NULL,
environmentId,
clusterName,
clusterType,
databases = NULL,
cacheStorageConfigurations = NULL,
autoScalingConfiguration = NULL,
clusterDescription = NULL,
capacityConfiguration,
releaseLabel,
vpcConfiguration = NULL,
initializationScript = NULL,
commandLineArguments = NULL,
code = NULL,
executionRole = NULL,
savedownStorageConfiguration = NULL,
azMode,
availabilityZoneId = NULL,
tags = NULL
)
}
\arguments{
\item{clientToken}{A token that ensures idempotency. This token expires in 10 minutes.}
\item{environmentId}{[required] A unique identifier for the kdb environment.}
\item{clusterName}{[required] A unique name for the cluster that you want to create.}
\item{clusterType}{[required] Specifies the type of KDB database that is being created. The following
types are available:
\itemize{
\item HDB – A Historical Database. The data is only accessible with
read-only permissions from one of the FinSpace managed kdb databases
mounted to the cluster.
\item RDB – A Realtime Database. This type of database captures all the
data from a ticker plant and stores it in memory until the end of
day, after which it writes all of its data to a disk and reloads the
HDB. This cluster type requires local storage for temporary storage
of data during the savedown process. If you specify this field in
your request, you must provide the \code{savedownStorageConfiguration}
parameter.
\item GATEWAY – A gateway cluster allows you to access data across
processes in kdb systems. It allows you to create your own routing
logic using the initialization scripts and custom code. This type of
cluster does not require a writable local storage.
}}
\item{databases}{A list of databases that will be available for querying.}
\item{cacheStorageConfigurations}{The configurations for a read only cache storage associated with a
cluster. This cache will be stored as an FSx Lustre that reads from the
S3 store.}
\item{autoScalingConfiguration}{The configuration based on which FinSpace will scale in or scale out
nodes in your cluster.}
\item{clusterDescription}{A description of the cluster.}
\item{capacityConfiguration}{[required] A structure for the metadata of a cluster. It includes information about
like the CPUs needed, memory of instances, number of instances, and the
port used while establishing a connection.}
\item{releaseLabel}{[required] The version of FinSpace managed kdb to run.}
\item{vpcConfiguration}{Configuration details about the network where the Privatelink endpoint
of the cluster resides.}
\item{initializationScript}{Specifies a Q program that will be run at launch of a cluster. It is a
relative path within \emph{.zip} file that contains the custom code, which
will be loaded on the cluster. It must include the file name itself. For
example, \code{somedir/init.q}.}
\item{commandLineArguments}{Defines the key-value pairs to make them available inside the cluster.}
\item{code}{The details of the custom code that you want to use inside a cluster
when analyzing a data. It consists of the S3 source bucket, location, S3
object version, and the relative path from where the custom code is
loaded into the cluster.}
\item{executionRole}{An IAM role that defines a set of permissions associated with a cluster.
These permissions are assumed when a cluster attempts to access another
cluster.}
\item{savedownStorageConfiguration}{The size and type of the temporary storage that is used to hold data
during the savedown process. This parameter is required when you choose
\code{clusterType} as RDB. All the data written to this storage space is lost
when the cluster node is restarted.}
\item{azMode}{[required] The number of availability zones you want to assign per cluster. This
can be one of the following
\itemize{
\item \code{SINGLE} – Assigns one availability zone per cluster.
\item \code{MULTI} – Assigns all the availability zones per cluster.
}}
\item{availabilityZoneId}{The availability zone identifiers for the requested regions.}
\item{tags}{A list of key-value pairs to label the cluster. You can add up to 50
tags to a cluster.}
}
\description{
Creates a new kdb cluster.
See \url{https://www.paws-r-sdk.com/docs/finspace_create_kx_cluster/} for full documentation.
}
\keyword{internal}
|
# Create Silva typestrain database from fasta file
# Retrieve a fasta file from http://www.arb-silva.de/search/
# strain: [T]
# Sequences occur in: [x]Ref(NR)
# Taxonomy: Silva Ref NR
library(warppipe)
library(dplyr)
library(stringr)
silva_NR_typestrains <- seq_tbl("data-raw/silva_SSUr119_silvaRefNR.fasta")
silva_NR_typestrains <- mutate(silva_NR_typestrains, str_match(Description, "[A-Z0-9.]+"))
colnames(silva_NR_typestrains)[3] <- "Accession_Number"
silva_NR_typestrains <- select(silva_NR_typestrains, Accession_Number, Sequence)
save(silva_NR_typestrains, file="data/silva_ref_nr.rda", compress = "xz")
write.csv(silva_NR_typestrains, "data-tidy/silva_ref_nr.csv", row.names = FALSE)
|
/data-raw/silva_ref_nr.R
|
no_license
|
linearregression/warppipe
|
R
| false
| false
| 695
|
r
|
# Create Silva typestrain database from fasta file
# Retrieve a fasta file from http://www.arb-silva.de/search/
# strain: [T]
# Sequences occur in: [x]Ref(NR)
# Taxonomy: Silva Ref NR
library(warppipe)
library(dplyr)
library(stringr)
silva_NR_typestrains <- seq_tbl("data-raw/silva_SSUr119_silvaRefNR.fasta")
silva_NR_typestrains <- mutate(silva_NR_typestrains, str_match(Description, "[A-Z0-9.]+"))
colnames(silva_NR_typestrains)[3] <- "Accession_Number"
silva_NR_typestrains <- select(silva_NR_typestrains, Accession_Number, Sequence)
save(silva_NR_typestrains, file="data/silva_ref_nr.rda", compress = "xz")
write.csv(silva_NR_typestrains, "data-tidy/silva_ref_nr.csv", row.names = FALSE)
|
\name{predict.FRESAsignature}
\alias{predict.FRESAsignature}
\title{Predicts \code{\link{CVsignature}} models}
\description{
This function predicts the outcome from a FRESAsignature model
}
\usage{
\method{predict}{FRESAsignature}(object,...)
}
\arguments{
\item{object}{
An object of class FRESAsignature
}
\item{...}{
A list with: testdata=testdata
}
}
\value{
A vector of the predicted values
}
\author{Jose G. Tamez-Pena}
\seealso{\code{\link{CVsignature}},\code{\link{getSignature}},\code{\link{signatureDistance}}}
\keyword{Model_Prediction}
|
/fuzzedpackages/FRESA.CAD/man/predict.FRESAsignature.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 560
|
rd
|
\name{predict.FRESAsignature}
\alias{predict.FRESAsignature}
\title{Predicts \code{\link{CVsignature}} models}
\description{
This function predicts the outcome from a FRESAsignature model
}
\usage{
\method{predict}{FRESAsignature}(object,...)
}
\arguments{
\item{object}{
An object of class FRESAsignature
}
\item{...}{
A list with: testdata=testdata
}
}
\value{
A vector of the predicted values
}
\author{Jose G. Tamez-Pena}
\seealso{\code{\link{CVsignature}},\code{\link{getSignature}},\code{\link{signatureDistance}}}
\keyword{Model_Prediction}
|
# This is the Multiple Regression for comp*conversion in the BTx642*Tx7000 mapping data
# Written Feb, 2014 by Robert Anderson for the BTx642*Tx7000 comp*conversion publication
# Set directory, load MLR library
setwd("C:/Users/Robert/Google Drive/Mullet Lab/Projects/QTL Mapping projects/BTx642 x Tx7000")
getwd()
library("car", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
# Optional: Input standardization, as in Gelman, 2008.
library("arm", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
# --Note: To use unstandardized values, simply remove "standardize()" from the GlucFIT and PentFIT equations.
# Optional: Cross validation of the regression model
#install.packages("cvTools")
library("cvTools", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
multiLR <- read.delim("C:/Users/Robert/Google Drive/Mullet Lab/Projects/QTL Mapping projects/BTx642 x Tx7000/642x7000_compxconver_R-input.txt")
View(multiLR)
GlucFIT = standardize(lm(yield.glucose ~ stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR))
sum.GlucFIT = summary(GlucFIT)
sum.GlucFIT
PentFIT = standardize(lm(yield.pentose ~ stem.length + int.diameter + wall.density + + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR))
sum.PentFIT = summary(PentFIT)
sum.PentFIT
# -----( test whether we can remove certain variables from the model )---------
GlucFIT.sub1 = standardize(lm(yield.glucose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR))
anova(GlucFIT.sub1,GlucFIT)
# Above shows that _.
PentFIT.sub1 = standardize(lm(yield.pentose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR))
anova(PentFIT.sub1,PentFIT)
# Above shows that _.
# ------( Regression Diagnostics )---------------------
GlucFIT.resid = residuals(GlucFIT)
PentFIT.resid = residuals(PentFIT)
Gluc.resid.table = as.data.frame(GlucFIT.resid)
Pent.resid.table = as.data.frame(PentFIT.resid)
qqnorm(GlucFIT.resid, ylab = "Glucose yield MLR Residuals")
qqline(GlucFIT.resid)
qqnorm(PentFIT.resid, ylab = "Pentose Yield MLR Residuals")
qqline(PentFIT.resid)
shapiro.test(GlucFIT.resid)
shapiro.test(PentFIT.resid)
hist(GlucFIT.resid, xlab = "Glucose yield MLR residuals", main = " ")
hist(PentFIT.resid, xlab = "Pentose yield MLR residuals", main = " ")
# ---( Variance-inflation factors)
GlucFIT.vif <- vif(standardize(lm(yield.glucose ~ stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR)))
GlucFIT.vif
PentFIT.vif <- vif(standardize(lm(yield.pentose ~ stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR)))
PentFIT.vif
GlucFIT.dtf.vif <- vif(standardize(lm(yield.glucose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR)))
GlucFIT.dtf.vif
PentFIT.dtf.vif <- vif(standardize(lm(yield.pentose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR)))
PentFIT.dtf.vif
# *Note, tolerance = 1/vif A tolerance<0.01 (vif>100) means trait should be removed from model
# ---( RMSE cross validation )
# Removing standardization, since this affected variable length (NULL values)
GlucFIT2 = lm(yield.glucose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR)
PentFIT2 = lm(yield.pentose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR)
Gluc.CV = cvFit(GlucFIT2, data = multiLR, y = multiLR$yield.glucose, cost = rmspe, K = 5, R = 10)
Gluc.CV
Pent.CV = cvFit(PentFIT2, data = multiLR, y = multiLR$yield.pentose, cost = rmspe, K = 5, R = 10)
Pent.CV
# Values for the full model
Gluc.CV.full = rmspe(multiLR$yield.glucose, predict(GlucFIT2), includeSE=TRUE)
Gluc.CV.full
Pent.CV.full = rmspe(multiLR$yield.pentose, predict(PentFIT2), includeSE=TRUE)
Pent.CV.full
#library("DAAG", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
#CVlm(df=multiLR, form.lm=formula(GlucFIT2), m=5, plotit="Observed")
#install.packages("caret", dependencies = c("Depends", "Suggests"))
#library("caret", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
#library("mlbench", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
#dat <- data.frame(multiLR)
#set.seed(107)
|
/R script for multiple linear regression wall-642x7000.R
|
no_license
|
Patchx/R_Projects
|
R
| false
| false
| 4,955
|
r
|
# This is the Multiple Regression for comp*conversion in the BTx642*Tx7000 mapping data
# Written Feb, 2014 by Robert Anderson for the BTx642*Tx7000 comp*conversion publication
# Set directory, load MLR library
setwd("C:/Users/Robert/Google Drive/Mullet Lab/Projects/QTL Mapping projects/BTx642 x Tx7000")
getwd()
library("car", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
# Optional: Input standardization, as in Gelman, 2008.
library("arm", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
# --Note: To use unstandardized values, simply remove "standardize()" from the GlucFIT and PentFIT equations.
# Optional: Cross validation of the regression model
#install.packages("cvTools")
library("cvTools", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
multiLR <- read.delim("C:/Users/Robert/Google Drive/Mullet Lab/Projects/QTL Mapping projects/BTx642 x Tx7000/642x7000_compxconver_R-input.txt")
View(multiLR)
GlucFIT = standardize(lm(yield.glucose ~ stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR))
sum.GlucFIT = summary(GlucFIT)
sum.GlucFIT
PentFIT = standardize(lm(yield.pentose ~ stem.length + int.diameter + wall.density + + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR))
sum.PentFIT = summary(PentFIT)
sum.PentFIT
# -----( test whether we can remove certain variables from the model )---------
GlucFIT.sub1 = standardize(lm(yield.glucose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR))
anova(GlucFIT.sub1,GlucFIT)
# Above shows that _.
PentFIT.sub1 = standardize(lm(yield.pentose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR))
anova(PentFIT.sub1,PentFIT)
# Above shows that _.
# ------( Regression Diagnostics )---------------------
GlucFIT.resid = residuals(GlucFIT)
PentFIT.resid = residuals(PentFIT)
Gluc.resid.table = as.data.frame(GlucFIT.resid)
Pent.resid.table = as.data.frame(PentFIT.resid)
qqnorm(GlucFIT.resid, ylab = "Glucose yield MLR Residuals")
qqline(GlucFIT.resid)
qqnorm(PentFIT.resid, ylab = "Pentose Yield MLR Residuals")
qqline(PentFIT.resid)
shapiro.test(GlucFIT.resid)
shapiro.test(PentFIT.resid)
hist(GlucFIT.resid, xlab = "Glucose yield MLR residuals", main = " ")
hist(PentFIT.resid, xlab = "Pentose yield MLR residuals", main = " ")
# ---( Variance-inflation factors)
GlucFIT.vif <- vif(standardize(lm(yield.glucose ~ stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR)))
GlucFIT.vif
PentFIT.vif <- vif(standardize(lm(yield.pentose ~ stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR)))
PentFIT.vif
GlucFIT.dtf.vif <- vif(standardize(lm(yield.glucose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR)))
GlucFIT.dtf.vif
PentFIT.dtf.vif <- vif(standardize(lm(yield.pentose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR)))
PentFIT.dtf.vif
# *Note, tolerance = 1/vif A tolerance<0.01 (vif>100) means trait should be removed from model
# ---( RMSE cross validation )
# Removing standardization, since this affected variable length (NULL values)
GlucFIT2 = lm(yield.glucose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.cellulose + pca.pellet + fa.pellet, data=multiLR)
PentFIT2 = lm(yield.pentose ~ dtf + stem.length + int.diameter + wall.density + percent.wall + percent.lignin.in.wall + grams.xylan + pca.pellet + fa.pellet, data=multiLR)
Gluc.CV = cvFit(GlucFIT2, data = multiLR, y = multiLR$yield.glucose, cost = rmspe, K = 5, R = 10)
Gluc.CV
Pent.CV = cvFit(PentFIT2, data = multiLR, y = multiLR$yield.pentose, cost = rmspe, K = 5, R = 10)
Pent.CV
# Values for the full model
Gluc.CV.full = rmspe(multiLR$yield.glucose, predict(GlucFIT2), includeSE=TRUE)
Gluc.CV.full
Pent.CV.full = rmspe(multiLR$yield.pentose, predict(PentFIT2), includeSE=TRUE)
Pent.CV.full
#library("DAAG", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
#CVlm(df=multiLR, form.lm=formula(GlucFIT2), m=5, plotit="Observed")
#install.packages("caret", dependencies = c("Depends", "Suggests"))
#library("caret", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
#library("mlbench", lib.loc="C:/Revolution/R-Enterprise-6.1/R-2.14.2/library")
#dat <- data.frame(multiLR)
#set.seed(107)
|
## You should create one R script called run_analysis.R that does the following.
## 1.Merges the training and the test sets to create one data set.
## load necessary packages
if (is.element('dplyr', installed.packages()[,1]) == FALSE) {
+ install.packages('dplyr') }
library(dplyr)
## set working directory to be UCI HAR Dataset file
setwd("./UCI HAR Dataset")
## import accessory files into a data.frame
activity_labels<-read.table("./activity_labels.txt")
features<-read.table("./features.txt")
## Make train data.frame
Subject_train<-read.table("./train/Subject_train.txt")
names(Subject_train)<-"subject"
X_train<-read.table("./train/X_train.txt")
names(X_train)<-features$V2
y_train<-read.table("./train/y_train.txt")
y_train<-merge(y_train,activity_labels)
y_train<-y_train[2]
names(y_train)<-"activity"
train<-bind_cols(Subject_train,y_train,X_train)
## Make test data.frame
Subject_test<-read.table("./test/Subject_test.txt")
names(Subject_test)<-"subject"
X_test<-read.table("./test/X_test.txt")
names(X_test)<-features$V2
y_test<-read.table("./test/y_test.txt")
y_test<-merge(y_test,activity_labels)
y_test<-y_test[2]
names(y_test)<-"activity"
test<-bind_cols(Subject_test,y_test,X_test)
## Merge test and train data.frames
Merge<-bind_rows(train,test)
## 2.Extracts only the measurements on the mean and standard deviation for each measurement.
Merge_Ext<-Merge %>% dplyr:: select(grep("subject|activity|mean|std", names(Merge)), -grep("Freq", names(Merge)))
## 3.Uses descriptive activity names to name the activities in the data set
## done above in the creation of the data.frames
## 4.Appropriately labels the data set with descriptive variable names.
names(Merge_Ext)<-gsub("-","",names(Merge_Ext))
names(Merge_Ext)<-sub("\\(\\)","",names(Merge_Ext))
names(Merge_Ext)<-sub("mean","Mean", names(Merge_Ext))
names(Merge_Ext)<-sub("std","Std", names(Merge_Ext))
names(Merge_Ext)<-sub("BodyBody","Body", names(Merge_Ext))
## 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
Tidy<-Merge_Ext %>% group_by (subject,activity) %>% summarise_all(funs(mean))
Tidy
## creates an output file in txt formate
write.table(Tidy,file="Tidy_data.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
SEMHerman/Tidy-Data
|
R
| false
| false
| 2,352
|
r
|
## You should create one R script called run_analysis.R that does the following.
## 1.Merges the training and the test sets to create one data set.
## load necessary packages
if (is.element('dplyr', installed.packages()[,1]) == FALSE) {
+ install.packages('dplyr') }
library(dplyr)
## set working directory to be UCI HAR Dataset file
setwd("./UCI HAR Dataset")
## import accessory files into a data.frame
activity_labels<-read.table("./activity_labels.txt")
features<-read.table("./features.txt")
## Make train data.frame
Subject_train<-read.table("./train/Subject_train.txt")
names(Subject_train)<-"subject"
X_train<-read.table("./train/X_train.txt")
names(X_train)<-features$V2
y_train<-read.table("./train/y_train.txt")
y_train<-merge(y_train,activity_labels)
y_train<-y_train[2]
names(y_train)<-"activity"
train<-bind_cols(Subject_train,y_train,X_train)
## Make test data.frame
Subject_test<-read.table("./test/Subject_test.txt")
names(Subject_test)<-"subject"
X_test<-read.table("./test/X_test.txt")
names(X_test)<-features$V2
y_test<-read.table("./test/y_test.txt")
y_test<-merge(y_test,activity_labels)
y_test<-y_test[2]
names(y_test)<-"activity"
test<-bind_cols(Subject_test,y_test,X_test)
## Merge test and train data.frames
Merge<-bind_rows(train,test)
## 2.Extracts only the measurements on the mean and standard deviation for each measurement.
Merge_Ext<-Merge %>% dplyr:: select(grep("subject|activity|mean|std", names(Merge)), -grep("Freq", names(Merge)))
## 3.Uses descriptive activity names to name the activities in the data set
## done above in the creation of the data.frames
## 4.Appropriately labels the data set with descriptive variable names.
names(Merge_Ext)<-gsub("-","",names(Merge_Ext))
names(Merge_Ext)<-sub("\\(\\)","",names(Merge_Ext))
names(Merge_Ext)<-sub("mean","Mean", names(Merge_Ext))
names(Merge_Ext)<-sub("std","Std", names(Merge_Ext))
names(Merge_Ext)<-sub("BodyBody","Body", names(Merge_Ext))
## 5.From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
Tidy<-Merge_Ext %>% group_by (subject,activity) %>% summarise_all(funs(mean))
Tidy
## creates an output file in txt formate
write.table(Tidy,file="Tidy_data.txt",row.name=FALSE)
|
\name{BPSpriorElicit}
\alias{BPSpriorElicit}
\title{Function to Set Hyperparameters of BPS Priors}
\description{
A function to set the hyperparameters of a first order autoregressive BPS prior distribution,
approximately assigning constant prior mean hazard rate and corresponding coefficient of variation.
}
\usage{BPSpriorElicit(r0 = 1, H = 1, T00 = 1, ord = 4, G = 30, c = 0.9)}
\arguments{
\item{r0}{prior mean hazard rate (\eqn{r_0})}
\item{H}{corresponding coefficient of variation}
\item{T00}{time-horizon of interest (\eqn{T_\infty})}
\item{ord}{spline order (\eqn{k})}
\item{G}{number of internal spline knots}
\item{c}{correlation coefficient between two consecutive spline weights}
}
\details{
A first order autoregressive BPS prior hazard rate is defined, for \eqn{0<t<T_\infty}, by
\deqn{\rho(t)=\exp\{\sum_{j=1}^{G+k-2} \eta_j B_j(t)\}}
where:
\itemize{
\item \eqn{\eta_j} is the \eqn{j}-th element of a normally distributed vector of spline weights (see below for details)
\item \eqn{B_j(t)} is the \eqn{j}-th B-spline basis function of order \eqn{k}, evaluated at \eqn{t},
defined on a grid of \eqn{G+2k-2} equispaced knots with first internal knot at \eqn{0}
and last internal knot at \eqn{T_\infty} (see \code{\link{splineDesign}} for details)
}
The spline weights form a stationary AR(1) process with mean \eqn{m}, variance \eqn{w} and lag-one autocorrelation \eqn{c}.
The elicitation procedure takes \eqn{w = H^2} and \eqn{m = \log r_0 - 0.5 * w}, based on the mean and variance formulas
for the log-normal distribution. As B-spline basis functions form a partition of unity within internal nodes,
the mean of \eqn{\rho(t)} is approximately equal to \eqn{r0}, for \eqn{0<t<T_\infty}, and its standard deviation to \eqn{Hr_0}.
}
\value{
A list with nine components:
\item{r0}{prior mean hazard rate (copy of the input argument)}
\item{H}{corresponding coefficient of variation (copy of the input argument)}
\item{T00}{time-horizon of interest (copy of the input argument)}
\item{ord}{spline order (copy of the input argument)}
\item{G}{number of internal spline knots (copy of the input argument)}
\item{c}{correlation coefficient between two consecutive spline weights (copy of the input argument)}
\item{knots}{full grid of spline knots}
\item{m}{mean of spline coefficients}
\item{w}{variance of spline coefficients}
}
\seealso{\code{\link{BayHaz-package}}, \code{\link{BPSpriorSample}}, \code{\link{BPSpostSample}}}
\examples{
# ten events per century with unit coefficient of variation and fifty year time horizon
# cubic splines with minimal number of knots and strongly correlated spline weights
hypars<-BPSpriorElicit(r0 = 0.1, H = 1, T00 = 50, ord = 4, G = 3, c = 0.9)
}
\keyword{distribution}
\keyword{survival}
\keyword{smooth}
|
/man/BPSpriorElicit.Rd
|
no_license
|
cran/BayHaz
|
R
| false
| false
| 2,812
|
rd
|
\name{BPSpriorElicit}
\alias{BPSpriorElicit}
\title{Function to Set Hyperparameters of BPS Priors}
\description{
A function to set the hyperparameters of a first order autoregressive BPS prior distribution,
approximately assigning constant prior mean hazard rate and corresponding coefficient of variation.
}
\usage{BPSpriorElicit(r0 = 1, H = 1, T00 = 1, ord = 4, G = 30, c = 0.9)}
\arguments{
\item{r0}{prior mean hazard rate (\eqn{r_0})}
\item{H}{corresponding coefficient of variation}
\item{T00}{time-horizon of interest (\eqn{T_\infty})}
\item{ord}{spline order (\eqn{k})}
\item{G}{number of internal spline knots}
\item{c}{correlation coefficient between two consecutive spline weights}
}
\details{
A first order autoregressive BPS prior hazard rate is defined, for \eqn{0<t<T_\infty}, by
\deqn{\rho(t)=\exp\{\sum_{j=1}^{G+k-2} \eta_j B_j(t)\}}
where:
\itemize{
\item \eqn{\eta_j} is the \eqn{j}-th element of a normally distributed vector of spline weights (see below for details)
\item \eqn{B_j(t)} is the \eqn{j}-th B-spline basis function of order \eqn{k}, evaluated at \eqn{t},
defined on a grid of \eqn{G+2k-2} equispaced knots with first internal knot at \eqn{0}
and last internal knot at \eqn{T_\infty} (see \code{\link{splineDesign}} for details)
}
The spline weights form a stationary AR(1) process with mean \eqn{m}, variance \eqn{w} and lag-one autocorrelation \eqn{c}.
The elicitation procedure takes \eqn{w = H^2} and \eqn{m = \log r_0 - 0.5 * w}, based on the mean and variance formulas
for the log-normal distribution. As B-spline basis functions form a partition of unity within internal nodes,
the mean of \eqn{\rho(t)} is approximately equal to \eqn{r0}, for \eqn{0<t<T_\infty}, and its standard deviation to \eqn{Hr_0}.
}
\value{
A list with nine components:
\item{r0}{prior mean hazard rate (copy of the input argument)}
\item{H}{corresponding coefficient of variation (copy of the input argument)}
\item{T00}{time-horizon of interest (copy of the input argument)}
\item{ord}{spline order (copy of the input argument)}
\item{G}{number of internal spline knots (copy of the input argument)}
\item{c}{correlation coefficient between two consecutive spline weights (copy of the input argument)}
\item{knots}{full grid of spline knots}
\item{m}{mean of spline coefficients}
\item{w}{variance of spline coefficients}
}
\seealso{\code{\link{BayHaz-package}}, \code{\link{BPSpriorSample}}, \code{\link{BPSpostSample}}}
\examples{
# ten events per century with unit coefficient of variation and fifty year time horizon
# cubic splines with minimal number of knots and strongly correlated spline weights
hypars<-BPSpriorElicit(r0 = 0.1, H = 1, T00 = 50, ord = 4, G = 3, c = 0.9)
}
\keyword{distribution}
\keyword{survival}
\keyword{smooth}
|
# data-raw/process-raceiatdat.R
# Data import and processing pipeline
# Create mini sample dataset to build into package
# Start with clean N = 8000 dataset
raceiatdat8000 <- haven::read_sav(file = "https://github.com/lizredford/explore-iat/blob/master/raceiat_N7983.sav?raw=true")
# Convert variable names to lowercase. it's better form and is also consistent with the
# other processed project implicit datasets.
raceiatdat8000$implicit <- raceiatdat8000$Implicit
raceiatdat8000$explicit <- raceiatdat8000$Explicit
# Delete uppper case ones
raceiatdat8000 <- raceiatdat8000[,-c(17,18)]
library(readr)
write_csv(raceiatdat8000, "raceiatdat8000.csv") # Export data
# Saving in package as raw data
# Set up the data-raw directory
devtools::use_data_raw()
# This script in the R directory will contain the documentation.
# You can use any name you want.
file.create("R/raceiatdat8000.R")
devtools::use_data(raceiatdat8000, overwrite = T)
|
/data-raw/process-raceiatdat8000.R
|
no_license
|
justinmillar/projectimplicit
|
R
| false
| false
| 940
|
r
|
# data-raw/process-raceiatdat.R
# Data import and processing pipeline
# Create mini sample dataset to build into package
# Start with clean N = 8000 dataset
raceiatdat8000 <- haven::read_sav(file = "https://github.com/lizredford/explore-iat/blob/master/raceiat_N7983.sav?raw=true")
# Convert variable names to lowercase. it's better form and is also consistent with the
# other processed project implicit datasets.
raceiatdat8000$implicit <- raceiatdat8000$Implicit
raceiatdat8000$explicit <- raceiatdat8000$Explicit
# Delete uppper case ones
raceiatdat8000 <- raceiatdat8000[,-c(17,18)]
library(readr)
write_csv(raceiatdat8000, "raceiatdat8000.csv") # Export data
# Saving in package as raw data
# Set up the data-raw directory
devtools::use_data_raw()
# This script in the R directory will contain the documentation.
# You can use any name you want.
file.create("R/raceiatdat8000.R")
devtools::use_data(raceiatdat8000, overwrite = T)
|
#############################################################################################################################
rm(list=ls())
#############################################################################################################################
test <- TRUE # set test to FALSE to run the real simulation
scenarioName <- "3xELF"
ELFAge <- 18
big <- "US"
outDir <- file.path("code", "scenarios_micro", "micro_v0.3_sensitivity", "MoreCECD_SGUS", "outData")
#############################################################################################################################
source(file.path("code", "scenarios_micro", "util", "microsimulation_preparation.R"))
source(file.path("code", "scenarios_micro", "util", "read_big_mat_transition.R"))
init <- micro_prep()
eTrans <- readEcigTrans(big)
startAge <- 11
endAge <- 80
startYear <- 2017
endYear <- 2067
numAge <- endAge - startAge + 1
numYear <- endYear - startYear + 1
if(test){
n.i <- 10
v.M_1 <- rep(c("C", "N", "E", "Q", "Q"), n.i/5)
v.age <- rep(c(19, 4, 35, 180, 20), n.i/5)
n.t <- 10
} else {
n.i <- length(init$sin.ages) # number of simulated individuals
v.M_1 <- init$sin.states # beginning states
v.age <- init$sin.ages # initialize age
n.t <- 50 # time horizon
}
v.n <- c("N", "C", "Q", "D", "E", "X") # the model states: Never Smoker(N), Smoker(C), Quitter(Q), Dual(D), E-cig Only(E)
n.s <- length(v.n) # the number of states
d.e <- 0.03 # equal discounting of costs and QALYs by 3%
d.x <- 0.03 # mortality rate decreases by 3% annually
# v.Trt <- c("No Treatment", "Treatment") # store the strategy names
# Cost and utility inputs
u.N <- 1 # utility when not smoking
u.bTbase <- c(rep(0.98, 19), rep(0.96, 10), rep(0.97, 5), rep(0.96, 5), rep(0.97, 15), rep(0.98, 16))
u.Cbase <- c(rep(0.91,19), rep(c(0.88,0.86,0.83,0.81,0.78,0.76,0.74), each=5), rep(0.71, 16)) # utility when smoking
u.Qbase <- 1 - (1-u.Cbase) * 0.05
if(big == "JP"){
u.Ebase <- 1 - (1-u.Cbase) * 0.05
} else {
u.Ebase <- 1 - (1-u.Cbase) * 0.10
}
u.Dbase <- sqrt(u.Cbase * u.Ebase)
v.NXbase <- init$deathRateN
v.bTbase <- c(rep(0.92, 5), rep(0.93, 5), rep(0.94, 14), rep(0.95, 21), rep(0.96, 25))
v.RRCbase <- c(rep(2.8, 49), rep(2.5, 10), rep(2.0, 11))
v.RRQbase <- 1 + (v.RRCbase - 1) * 0.05
if(big == "JP"){
v.RREbase <- 1 + (v.RRCbase - 1) * 0.05
} else {
v.RREbase <- 1 + (v.RRCbase - 1) * 0.10
}
v.RRDbase <- sqrt(v.RRCbase * v.RREbase)
# Transition rates transformation
fELF = 1/2
fInitEcig = 1
fSwitchEcig = 3
v.NC.primer <- init$NCprimer
v.CQ.primer <- init$CQprimer
v.QC.primer <- init$QCprimer
v.NE.primer=eTrans$NE; v.ND.primer=eTrans$ND; v.CD.primer=eTrans$CD*fSwitchEcig; v.CE.primer=eTrans$CE*fSwitchEcig;
v.QD.primer=eTrans$QD; v.QE.primer=eTrans$QE; v.DC.primer=eTrans$DC; v.DQ.primer=eTrans$DQ;
v.DE.primer=eTrans$DE; v.EC.primer=eTrans$EC; v.ED.primer=eTrans$ED; v.EN.primer=eTrans$EN
v.NE.primer <- v.NE.primer * fInitEcig
row18 <- 18 - startAge + 1
rowAgeELF <- ELFAge - startAge + 1
v.NE.primer[row18:rowAgeELF] <- v.NE.primer[row18:rowAgeELF] * fELF
v.CD.primer[row18:rowAgeELF] <- v.CD.primer[row18:rowAgeELF] * fELF
v.CE.primer[row18:rowAgeELF] <- v.CE.primer[row18:rowAgeELF] * fELF
v.QE.primer[row18:rowAgeELF] <- v.QE.primer[row18:rowAgeELF] * fELF
v.ND.primer[row18:rowAgeELF] <- v.ND.primer[row18:rowAgeELF] * fELF
v.QD.primer[row18:rowAgeELF] <- v.QD.primer[row18:rowAgeELF] * fELF
##################################### Functions ###########################################
##################################### Helper functions ####################################
getNiVec <- function(v.base, v.index){
v.ni <- v.base[v.index]
v.ni[is.na(v.ni)] <- 0
v.ni
}
##################################### Main functions ######################################
# THE NEW samplev() FUNCTION
# efficient implementation of the rMultinom() function of the Hmisc package ####
samplev <- function (probs, m) {
d <- dim(probs)
n <- d[1]
k <- d[2]
lev <- dimnames(probs)[[2]]
if (!length(lev))
lev <- 1:k
ran <- matrix(lev[1], ncol = m, nrow = n)
U <- t(probs)
for(i in 2:k) {
U[i, ] <- U[i, ] + U[i - 1, ]
}
if (any((U[k, ] - 1) > 1e-05))
stop("error in multinom: probabilities do not sum to 1")
for (j in 1:m) {
un <- rep(runif(n), rep(k, n))
ran[, j] <- lev[1 + colSums(un > U)]
}
ran
}
# The MicroSim function for the simple microsimulation of the 'Sick-Sicker' model keeps track of what happens to each individual during each cycle.
MicroSim <- function(v.M_1, v.age, n.i, n.t, v.n, X = NULL, d.c, d.e, TR.out = TRUE, TS.out = FALSE, Trt = FALSE, seed = 1) {
# Arguments:
# v.M_1: vector of initial states for individuals
# n.i: number of individuals
# n.t: total number of cycles to run the model
# v.n: vector of health state names
# X: vector or matrix of individual characteristics
# d.c: discount rate for costs
# d.e: discount rate for health outcome (QALYs)
# TR.out: should the output include a Microsimulation trace? (default is TRUE)
# TS.out: should the output include a matrix of transitions between states? (default is TRUE)
# Trt: are the n.i individuals receiving treatment? (scalar with a Boolean value, default is FALSE)
# seed: starting seed number for random number generator (default is 1)
# Makes use of:
# Probs: function for the estimation of transition probabilities
# Costs: function for the estimation of cost state values
# Effs: function for the estimation of state specific health outcomes (QALYs)
v.index <- v.age - startAge + 1
v.dwe <- 1 / (1 + d.e) ^ (0:n.t)
# Create the matrix capturing the state name/costs/health outcomes for all individuals at each time point
m.M <- m.E <- matrix(nrow = n.i, ncol = n.t + 1,
dimnames = list(paste("ind", 1:n.i, sep = " "),
paste("cycle", 0:n.t, sep = " ")))
if(TR.out == TRUE) {
TR = matrix(NA, n.s, n.t)
}
m.M[, 1] <- v.M_1 # indicate the initial health state
v.RR <- getInitRR(v.M_1, v.index)
u <- getInitU(v.M_1, v.index)
m.E[, 1] <- Effs (u, cl=1)
set.seed(seed) # set the seed for every individual for the random number generator
for (t in 1:n.t) { # t <- 3
# print(v.index)
if (TR.out == TRUE) {
TR[,t] <- table(factor((m.M[,t])[v.age>=12 & v.age<=80], levels=v.n, ordered=TRUE))
}
if(t>1){
v.RR <- getRR(v.RR, m.M[,t], v.index)
}
# print(t)
# print(v.RR)
m.p <- Probs(m.M[, t], v.index, v.RR) # calculate the transition probabilities at cycle t
m.M[, t + 1] <- samplev(prob = m.p, m = 1) # sample the next health state and store that state in matrix m.M
cat('\r', paste(round(t/n.t * 100), "% done", sep = " ")) # display the progress of the simulation
v.age <- v.age + 1
v.index <- v.index + 1
v.NXbase <<- v.NXbase * (1-d.x)
u <- getU(u, m.M[,t+1], v.index)
m.E[,t + 1] <- Effs(u, cl=1)
} # close the loop for the time points
if (TS.out == TRUE) { # create a matrix of transitions across states
TS <- paste(m.M, cbind(m.M[, -1], NA), sep = "->") # transitions from one state to the other
TS <- matrix(TS, nrow = n.i)
rownames(TS) <- paste("Ind", 1:n.i, sep = " ") # name the rows
colnames(TS) <- paste("Cycle", 0:n.t, sep = " ") # name the columns
} else {
TS <- NULL
}
if(TR.out==TRUE){
TR <- prop.table(t(TR), margin = 1)
} else {
TR <- NULL
}
te <- m.E %*% v.dwe # total (discounted) QALYs per individual
te_hat <- mean(te) # average (discounted) QALYs
colSumME <- colSums(m.E)
results <- list(m.M = m.M, TS = TS, TR = TR, m.E = m.E, te = te, te_hat = te_hat, colSumME = colSumME) # store the results from the simulation in a list
return(results) # return the results
} # end of the MicroSim function
#### Probability function
getInitRR <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- rep(1, n.i)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- v.RRQ.ni[M_it=="Q"]
v.RR[M_it=="D"] <- v.RRD.ni[M_it=="D"]
v.RR[M_it=="E"] <- v.RRE.ni[M_it=="E"]
v.RR
}
getRR <- function(v.RRold, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.bT.ni <- getNiVec(v.bTbase, v.index)
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- getInitRR(M_it, v.index)
v.worseToN <- M_it=="N" & v.RRold>1
v.RR[v.worseToN] <- 1 + (v.RRold[v.worseToN] - 1) * v.bT.ni[v.worseToN]
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- 1 + (v.RRold[M_it=="Q"] - 1) * v.bT.ni[M_it=="Q"]
v.worseToD <- M_it=="D" & v.RRold>v.RRD.ni
v.RR[v.worseToD] <- v.RRD.ni[v.worseToD] + (v.RRold[v.worseToD] - v.RRD.ni[v.worseToD]) * v.bT.ni[v.worseToD]
v.worseToE <- M_it=="E" & v.RRold>v.RRE.ni
v.RR[v.worseToE] <- v.RRE.ni[v.worseToE] + (v.RRold[v.worseToE] - v.RRE.ni[v.worseToE]) * v.bT.ni[v.worseToE]
v.RR
}
# The Probs function that updates the transition probabilities of every cycle is shown below.
Probs <- function(M_it, v.index, v.RR) {
# M_it: health state occupied by individual i at cycle t (character variable)
# dur: the duration of being sick (sick/sicker)
v.index[v.index<=0] <- length(v.NXbase) + 10
m.p.it <- matrix(NA, n.s, n.i) # create vector of state transition probabilities
rownames(m.p.it) <- v.n # assign names to the vector
# Update base transition rates
v.NX.ni <- getNiVec(v.NXbase, v.index)
v.toX.ni <- v.RR * v.NX.ni
v.NC.primer.ni <- getNiVec(v.NC.primer, v.index)
v.NC.ni <- v.NC.primer.ni
v.CQ.primer.ni <- getNiVec(v.CQ.primer, v.index)
v.CQ.ni <- v.CQ.primer.ni
v.QC.primer.ni <- getNiVec(v.QC.primer, v.index)
v.QC.ni <- v.QC.primer.ni
# Ecig transitions
v.NE.ni <- getNiVec(v.NE.primer, v.index); v.ND.ni <- getNiVec(v.ND.primer, v.index)
v.NE.ni[v.index>15] <- 0; v.ND.ni[v.index>15] <- 0;
v.CD.ni <- getNiVec(v.CD.primer, v.index); v.CE.ni <- getNiVec(v.CE.primer, v.index)
v.QD.ni <- getNiVec(v.QD.primer, v.index); v.QE.ni <- getNiVec(v.QE.primer, v.index)
v.DC.ni <- getNiVec(v.DC.primer, v.index); v.DQ.ni <- getNiVec(v.DQ.primer, v.index)
v.DE.ni <- getNiVec(v.DE.primer, v.index); v.EC.ni <- getNiVec(v.EC.primer, v.index)
v.ED.ni <- getNiVec(v.ED.primer, v.index); v.EN.ni <- getNiVec(v.EN.primer, v.index)
v.NC.ni <- v.NC.ni * (1-v.NE.ni-v.ND.ni)
v.CQ.ni <- v.CQ.ni * (1-v.CD.ni-v.CE.ni)
v.QC.ni <- v.QC.ni * (1-v.QE.ni-v.QD.ni)
v.NC.ni <- v.NC.ni * (1-v.toX.ni); v.NE.ni <- v.NE.ni * (1-v.toX.ni); v.ND.ni <- v.ND.ni * (1-v.toX.ni)
v.CQ.ni <- v.CQ.ni * (1-v.toX.ni); v.CD.ni <- v.CD.ni * (1-v.toX.ni); v.CE.ni <- v.CE.ni * (1-v.toX.ni)
v.QC.ni <- v.QC.ni * (1-v.toX.ni); v.QE.ni <- v.QE.ni * (1-v.toX.ni); v.QD.ni <- v.QD.ni * (1-v.toX.ni)
v.DC.ni <- v.DC.ni * (1-v.toX.ni); v.DQ.ni <- v.DQ.ni * (1-v.toX.ni); v.DE.ni <- v.DE.ni * (1-v.toX.ni)
v.EC.ni <- v.EC.ni * (1-v.toX.ni); v.EN.ni <- v.EN.ni * (1-v.toX.ni); v.ED.ni <- v.ED.ni * (1-v.toX.ni)
v.NN.ni <- 1 - v.NC.ni - v.NE.ni - v.ND.ni - v.toX.ni
v.CC.ni <- 1 - v.CQ.ni - v.CD.ni - v.CE.ni - v.toX.ni
v.QQ.ni <- 1 - v.QC.ni - v.QE.ni - v.QD.ni - v.toX.ni
v.DD.ni <- 1 - v.DC.ni - v.DQ.ni - v.DE.ni - v.toX.ni
v.EE.ni <- 1 - v.EC.ni - v.EN.ni - v.ED.ni - v.toX.ni
youngN <- M_it=="N"&v.index<=15
m.p.it[,youngN] <- rbind(v.NN.ni[youngN], v.NC.ni[youngN], 0, v.ND.ni[youngN], v.NE.ni[youngN], v.toX.ni[youngN]) # transition probabilities when never smoke
youngE <- M_it=="E"&v.index<=15
m.p.it[,youngE] <- rbind(v.EN.ni[youngE], v.EC.ni[youngE], 0, v.ED.ni[youngE], v.EE.ni[youngE], v.toX.ni[youngE])
oldN <- M_it=="N"&v.index>15
m.p.it[,oldN] <- rbind(v.NN.ni[oldN], v.NC.ni[oldN], 0, 0, 0, v.toX.ni[oldN]) # transition probabilities when never smoke
oldE <- M_it=="E"&v.index>15
m.p.it[,oldE] <- rbind(0, v.EC.ni[oldE], v.EN.ni[oldE], v.ED.ni[oldE], v.EE.ni[oldE], v.toX.ni[oldE])
m.p.it[,M_it == "C"] <- rbind(0, v.CC.ni[M_it=="C"], v.CQ.ni[M_it=="C"], v.CD.ni[M_it=="C"], v.CE.ni[M_it=="C"], v.toX.ni[M_it=="C"]) # transition probabilities when current smoke
m.p.it[,M_it == "Q"] <- rbind(0, v.QC.ni[M_it=="Q"], v.QQ.ni[M_it=="Q"], v.QD.ni[M_it=="Q"], v.QE.ni[M_it=="Q"], v.toX.ni[M_it=="Q"]) # transition probabilities when quit smoke
m.p.it[,M_it == "D"] <- rbind(0, v.DC.ni[M_it=="D"], v.DQ.ni[M_it=="D"], v.DD.ni[M_it=="D"], v.DE.ni[M_it=="D"], v.toX.ni[M_it=="D"])
m.p.it[,M_it == "X"] <- c(0, 0, 0, 0, 0, 1) # transition probabilities when dead
# cat("\n")
# print(m.p.it)
# cat("\n")
ifelse(colSums(m.p.it) == 1, return(t(m.p.it)), print("Probabilities do not sum to 1")) # return the transition probabilities or produce an error
}
### Utility function
getInitU <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- rep(0, n.i)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- u.Q.ni[M_it=="Q"]
u[M_it=="D"] <- u.D.ni[M_it=="D"]
u[M_it=="E"] <- u.E.ni[M_it=="E"]
u[v.index<1 | v.index>70] <- 0
u
}
# Estimates the utility (i.e. the unit QALY for the individual) at every cycle.
getU <- function(u.old, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.bT.ni <- getNiVec(u.bTbase, v.index)
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- getInitU(M_it, v.index)
u.worseToN <- M_it=="N" & u.old!=0 & u.old<1
u[u.worseToN] <- 1 - (1 - u.old[u.worseToN]) * u.bT.ni[u.worseToN]
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- 1 - (1 - u.old[M_it=="Q"]) * u.bT.ni[M_it=="Q"]
u.worseToD <- M_it=="D" & u.old<u.D.ni
u[u.worseToD] <- u.D.ni[u.worseToD] - (u.D.ni[u.worseToD] - u.old[u.worseToD]) * u.bT.ni[u.worseToD]
u.worseToE <- M_it=="E" & u.old<u.E.ni
u[u.worseToE] <- u.E.ni[u.worseToE] - (u.E.ni[u.worseToE] - u.old[u.worseToE]) * u.bT.ni[u.worseToE]
u[M_it == "X"] <- 0 # update the utility if dead
u[v.index<1 | v.index>70] <- 0
u
}
Effs <- function (u, cl = 1) {
# M_it: health state occupied by individual i at cycle t (character variable)
# v.index
# dur: the duration of being sick/sicker
# cl: cycle length (default is 1)
QALYs <- u * cl # calculate the QALYs during cycle t
return(QALYs) # return the QALYs
}
##################################### Run the simulation ##################################
# START SIMULATION
p = Sys.time()
sim_no_trt <- MicroSim(v.M_1, v.age, n.i, n.t, v.n, X = v.x, d.c, d.e, Trt = FALSE, seed = 200)
comp.time = Sys.time() - p
comp.time
# PRINT DATA
sim_no_trt$TR
# sim_no_trt$m.M
# SAVE DATA
saveRDS(sim_no_trt$TR, file.path(outDir, paste0(scenarioName,"_","SG",big,"_TR.rds")))
saveRDS(sim_no_trt$colSumME, file.path(outDir, paste0(scenarioName,"_","SG",big,"_colSumME.rds")))
# saveRDS(sim_no_trt, file.path(outDir, paste0(scenarioName,"_","SG",big,"_sim_no_trt.rds")))
|
/code/scenarios_micro/micro_v0.3_sensitivity/MoreCECD_SGUS/3xELF_micro.R
|
no_license
|
KateDoan/gice
|
R
| false
| false
| 15,736
|
r
|
#############################################################################################################################
rm(list=ls())
#############################################################################################################################
test <- TRUE # set test to FALSE to run the real simulation
scenarioName <- "3xELF"
ELFAge <- 18
big <- "US"
outDir <- file.path("code", "scenarios_micro", "micro_v0.3_sensitivity", "MoreCECD_SGUS", "outData")
#############################################################################################################################
source(file.path("code", "scenarios_micro", "util", "microsimulation_preparation.R"))
source(file.path("code", "scenarios_micro", "util", "read_big_mat_transition.R"))
init <- micro_prep()
eTrans <- readEcigTrans(big)
startAge <- 11
endAge <- 80
startYear <- 2017
endYear <- 2067
numAge <- endAge - startAge + 1
numYear <- endYear - startYear + 1
if(test){
n.i <- 10
v.M_1 <- rep(c("C", "N", "E", "Q", "Q"), n.i/5)
v.age <- rep(c(19, 4, 35, 180, 20), n.i/5)
n.t <- 10
} else {
n.i <- length(init$sin.ages) # number of simulated individuals
v.M_1 <- init$sin.states # beginning states
v.age <- init$sin.ages # initialize age
n.t <- 50 # time horizon
}
v.n <- c("N", "C", "Q", "D", "E", "X") # the model states: Never Smoker(N), Smoker(C), Quitter(Q), Dual(D), E-cig Only(E)
n.s <- length(v.n) # the number of states
d.e <- 0.03 # equal discounting of costs and QALYs by 3%
d.x <- 0.03 # mortality rate decreases by 3% annually
# v.Trt <- c("No Treatment", "Treatment") # store the strategy names
# Cost and utility inputs
u.N <- 1 # utility when not smoking
u.bTbase <- c(rep(0.98, 19), rep(0.96, 10), rep(0.97, 5), rep(0.96, 5), rep(0.97, 15), rep(0.98, 16))
u.Cbase <- c(rep(0.91,19), rep(c(0.88,0.86,0.83,0.81,0.78,0.76,0.74), each=5), rep(0.71, 16)) # utility when smoking
u.Qbase <- 1 - (1-u.Cbase) * 0.05
if(big == "JP"){
u.Ebase <- 1 - (1-u.Cbase) * 0.05
} else {
u.Ebase <- 1 - (1-u.Cbase) * 0.10
}
u.Dbase <- sqrt(u.Cbase * u.Ebase)
v.NXbase <- init$deathRateN
v.bTbase <- c(rep(0.92, 5), rep(0.93, 5), rep(0.94, 14), rep(0.95, 21), rep(0.96, 25))
v.RRCbase <- c(rep(2.8, 49), rep(2.5, 10), rep(2.0, 11))
v.RRQbase <- 1 + (v.RRCbase - 1) * 0.05
if(big == "JP"){
v.RREbase <- 1 + (v.RRCbase - 1) * 0.05
} else {
v.RREbase <- 1 + (v.RRCbase - 1) * 0.10
}
v.RRDbase <- sqrt(v.RRCbase * v.RREbase)
# Transition rates transformation
fELF = 1/2
fInitEcig = 1
fSwitchEcig = 3
v.NC.primer <- init$NCprimer
v.CQ.primer <- init$CQprimer
v.QC.primer <- init$QCprimer
v.NE.primer=eTrans$NE; v.ND.primer=eTrans$ND; v.CD.primer=eTrans$CD*fSwitchEcig; v.CE.primer=eTrans$CE*fSwitchEcig;
v.QD.primer=eTrans$QD; v.QE.primer=eTrans$QE; v.DC.primer=eTrans$DC; v.DQ.primer=eTrans$DQ;
v.DE.primer=eTrans$DE; v.EC.primer=eTrans$EC; v.ED.primer=eTrans$ED; v.EN.primer=eTrans$EN
v.NE.primer <- v.NE.primer * fInitEcig
row18 <- 18 - startAge + 1
rowAgeELF <- ELFAge - startAge + 1
v.NE.primer[row18:rowAgeELF] <- v.NE.primer[row18:rowAgeELF] * fELF
v.CD.primer[row18:rowAgeELF] <- v.CD.primer[row18:rowAgeELF] * fELF
v.CE.primer[row18:rowAgeELF] <- v.CE.primer[row18:rowAgeELF] * fELF
v.QE.primer[row18:rowAgeELF] <- v.QE.primer[row18:rowAgeELF] * fELF
v.ND.primer[row18:rowAgeELF] <- v.ND.primer[row18:rowAgeELF] * fELF
v.QD.primer[row18:rowAgeELF] <- v.QD.primer[row18:rowAgeELF] * fELF
##################################### Functions ###########################################
##################################### Helper functions ####################################
getNiVec <- function(v.base, v.index){
v.ni <- v.base[v.index]
v.ni[is.na(v.ni)] <- 0
v.ni
}
##################################### Main functions ######################################
# THE NEW samplev() FUNCTION
# efficient implementation of the rMultinom() function of the Hmisc package ####
samplev <- function (probs, m) {
d <- dim(probs)
n <- d[1]
k <- d[2]
lev <- dimnames(probs)[[2]]
if (!length(lev))
lev <- 1:k
ran <- matrix(lev[1], ncol = m, nrow = n)
U <- t(probs)
for(i in 2:k) {
U[i, ] <- U[i, ] + U[i - 1, ]
}
if (any((U[k, ] - 1) > 1e-05))
stop("error in multinom: probabilities do not sum to 1")
for (j in 1:m) {
un <- rep(runif(n), rep(k, n))
ran[, j] <- lev[1 + colSums(un > U)]
}
ran
}
# The MicroSim function for the simple microsimulation of the 'Sick-Sicker' model keeps track of what happens to each individual during each cycle.
MicroSim <- function(v.M_1, v.age, n.i, n.t, v.n, X = NULL, d.c, d.e, TR.out = TRUE, TS.out = FALSE, Trt = FALSE, seed = 1) {
# Arguments:
# v.M_1: vector of initial states for individuals
# n.i: number of individuals
# n.t: total number of cycles to run the model
# v.n: vector of health state names
# X: vector or matrix of individual characteristics
# d.c: discount rate for costs
# d.e: discount rate for health outcome (QALYs)
# TR.out: should the output include a Microsimulation trace? (default is TRUE)
# TS.out: should the output include a matrix of transitions between states? (default is TRUE)
# Trt: are the n.i individuals receiving treatment? (scalar with a Boolean value, default is FALSE)
# seed: starting seed number for random number generator (default is 1)
# Makes use of:
# Probs: function for the estimation of transition probabilities
# Costs: function for the estimation of cost state values
# Effs: function for the estimation of state specific health outcomes (QALYs)
v.index <- v.age - startAge + 1
v.dwe <- 1 / (1 + d.e) ^ (0:n.t)
# Create the matrix capturing the state name/costs/health outcomes for all individuals at each time point
m.M <- m.E <- matrix(nrow = n.i, ncol = n.t + 1,
dimnames = list(paste("ind", 1:n.i, sep = " "),
paste("cycle", 0:n.t, sep = " ")))
if(TR.out == TRUE) {
TR = matrix(NA, n.s, n.t)
}
m.M[, 1] <- v.M_1 # indicate the initial health state
v.RR <- getInitRR(v.M_1, v.index)
u <- getInitU(v.M_1, v.index)
m.E[, 1] <- Effs (u, cl=1)
set.seed(seed) # set the seed for every individual for the random number generator
for (t in 1:n.t) { # t <- 3
# print(v.index)
if (TR.out == TRUE) {
TR[,t] <- table(factor((m.M[,t])[v.age>=12 & v.age<=80], levels=v.n, ordered=TRUE))
}
if(t>1){
v.RR <- getRR(v.RR, m.M[,t], v.index)
}
# print(t)
# print(v.RR)
m.p <- Probs(m.M[, t], v.index, v.RR) # calculate the transition probabilities at cycle t
m.M[, t + 1] <- samplev(prob = m.p, m = 1) # sample the next health state and store that state in matrix m.M
cat('\r', paste(round(t/n.t * 100), "% done", sep = " ")) # display the progress of the simulation
v.age <- v.age + 1
v.index <- v.index + 1
v.NXbase <<- v.NXbase * (1-d.x)
u <- getU(u, m.M[,t+1], v.index)
m.E[,t + 1] <- Effs(u, cl=1)
} # close the loop for the time points
if (TS.out == TRUE) { # create a matrix of transitions across states
TS <- paste(m.M, cbind(m.M[, -1], NA), sep = "->") # transitions from one state to the other
TS <- matrix(TS, nrow = n.i)
rownames(TS) <- paste("Ind", 1:n.i, sep = " ") # name the rows
colnames(TS) <- paste("Cycle", 0:n.t, sep = " ") # name the columns
} else {
TS <- NULL
}
if(TR.out==TRUE){
TR <- prop.table(t(TR), margin = 1)
} else {
TR <- NULL
}
te <- m.E %*% v.dwe # total (discounted) QALYs per individual
te_hat <- mean(te) # average (discounted) QALYs
colSumME <- colSums(m.E)
results <- list(m.M = m.M, TS = TS, TR = TR, m.E = m.E, te = te, te_hat = te_hat, colSumME = colSumME) # store the results from the simulation in a list
return(results) # return the results
} # end of the MicroSim function
#### Probability function
getInitRR <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- rep(1, n.i)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- v.RRQ.ni[M_it=="Q"]
v.RR[M_it=="D"] <- v.RRD.ni[M_it=="D"]
v.RR[M_it=="E"] <- v.RRE.ni[M_it=="E"]
v.RR
}
getRR <- function(v.RRold, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.bT.ni <- getNiVec(v.bTbase, v.index)
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RRD.ni <- getNiVec(v.RRDbase, v.index)
v.RRE.ni <- getNiVec(v.RREbase, v.index)
v.RR <- getInitRR(M_it, v.index)
v.worseToN <- M_it=="N" & v.RRold>1
v.RR[v.worseToN] <- 1 + (v.RRold[v.worseToN] - 1) * v.bT.ni[v.worseToN]
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- 1 + (v.RRold[M_it=="Q"] - 1) * v.bT.ni[M_it=="Q"]
v.worseToD <- M_it=="D" & v.RRold>v.RRD.ni
v.RR[v.worseToD] <- v.RRD.ni[v.worseToD] + (v.RRold[v.worseToD] - v.RRD.ni[v.worseToD]) * v.bT.ni[v.worseToD]
v.worseToE <- M_it=="E" & v.RRold>v.RRE.ni
v.RR[v.worseToE] <- v.RRE.ni[v.worseToE] + (v.RRold[v.worseToE] - v.RRE.ni[v.worseToE]) * v.bT.ni[v.worseToE]
v.RR
}
# The Probs function that updates the transition probabilities of every cycle is shown below.
Probs <- function(M_it, v.index, v.RR) {
# M_it: health state occupied by individual i at cycle t (character variable)
# dur: the duration of being sick (sick/sicker)
v.index[v.index<=0] <- length(v.NXbase) + 10
m.p.it <- matrix(NA, n.s, n.i) # create vector of state transition probabilities
rownames(m.p.it) <- v.n # assign names to the vector
# Update base transition rates
v.NX.ni <- getNiVec(v.NXbase, v.index)
v.toX.ni <- v.RR * v.NX.ni
v.NC.primer.ni <- getNiVec(v.NC.primer, v.index)
v.NC.ni <- v.NC.primer.ni
v.CQ.primer.ni <- getNiVec(v.CQ.primer, v.index)
v.CQ.ni <- v.CQ.primer.ni
v.QC.primer.ni <- getNiVec(v.QC.primer, v.index)
v.QC.ni <- v.QC.primer.ni
# Ecig transitions
v.NE.ni <- getNiVec(v.NE.primer, v.index); v.ND.ni <- getNiVec(v.ND.primer, v.index)
v.NE.ni[v.index>15] <- 0; v.ND.ni[v.index>15] <- 0;
v.CD.ni <- getNiVec(v.CD.primer, v.index); v.CE.ni <- getNiVec(v.CE.primer, v.index)
v.QD.ni <- getNiVec(v.QD.primer, v.index); v.QE.ni <- getNiVec(v.QE.primer, v.index)
v.DC.ni <- getNiVec(v.DC.primer, v.index); v.DQ.ni <- getNiVec(v.DQ.primer, v.index)
v.DE.ni <- getNiVec(v.DE.primer, v.index); v.EC.ni <- getNiVec(v.EC.primer, v.index)
v.ED.ni <- getNiVec(v.ED.primer, v.index); v.EN.ni <- getNiVec(v.EN.primer, v.index)
v.NC.ni <- v.NC.ni * (1-v.NE.ni-v.ND.ni)
v.CQ.ni <- v.CQ.ni * (1-v.CD.ni-v.CE.ni)
v.QC.ni <- v.QC.ni * (1-v.QE.ni-v.QD.ni)
v.NC.ni <- v.NC.ni * (1-v.toX.ni); v.NE.ni <- v.NE.ni * (1-v.toX.ni); v.ND.ni <- v.ND.ni * (1-v.toX.ni)
v.CQ.ni <- v.CQ.ni * (1-v.toX.ni); v.CD.ni <- v.CD.ni * (1-v.toX.ni); v.CE.ni <- v.CE.ni * (1-v.toX.ni)
v.QC.ni <- v.QC.ni * (1-v.toX.ni); v.QE.ni <- v.QE.ni * (1-v.toX.ni); v.QD.ni <- v.QD.ni * (1-v.toX.ni)
v.DC.ni <- v.DC.ni * (1-v.toX.ni); v.DQ.ni <- v.DQ.ni * (1-v.toX.ni); v.DE.ni <- v.DE.ni * (1-v.toX.ni)
v.EC.ni <- v.EC.ni * (1-v.toX.ni); v.EN.ni <- v.EN.ni * (1-v.toX.ni); v.ED.ni <- v.ED.ni * (1-v.toX.ni)
v.NN.ni <- 1 - v.NC.ni - v.NE.ni - v.ND.ni - v.toX.ni
v.CC.ni <- 1 - v.CQ.ni - v.CD.ni - v.CE.ni - v.toX.ni
v.QQ.ni <- 1 - v.QC.ni - v.QE.ni - v.QD.ni - v.toX.ni
v.DD.ni <- 1 - v.DC.ni - v.DQ.ni - v.DE.ni - v.toX.ni
v.EE.ni <- 1 - v.EC.ni - v.EN.ni - v.ED.ni - v.toX.ni
youngN <- M_it=="N"&v.index<=15
m.p.it[,youngN] <- rbind(v.NN.ni[youngN], v.NC.ni[youngN], 0, v.ND.ni[youngN], v.NE.ni[youngN], v.toX.ni[youngN]) # transition probabilities when never smoke
youngE <- M_it=="E"&v.index<=15
m.p.it[,youngE] <- rbind(v.EN.ni[youngE], v.EC.ni[youngE], 0, v.ED.ni[youngE], v.EE.ni[youngE], v.toX.ni[youngE])
oldN <- M_it=="N"&v.index>15
m.p.it[,oldN] <- rbind(v.NN.ni[oldN], v.NC.ni[oldN], 0, 0, 0, v.toX.ni[oldN]) # transition probabilities when never smoke
oldE <- M_it=="E"&v.index>15
m.p.it[,oldE] <- rbind(0, v.EC.ni[oldE], v.EN.ni[oldE], v.ED.ni[oldE], v.EE.ni[oldE], v.toX.ni[oldE])
m.p.it[,M_it == "C"] <- rbind(0, v.CC.ni[M_it=="C"], v.CQ.ni[M_it=="C"], v.CD.ni[M_it=="C"], v.CE.ni[M_it=="C"], v.toX.ni[M_it=="C"]) # transition probabilities when current smoke
m.p.it[,M_it == "Q"] <- rbind(0, v.QC.ni[M_it=="Q"], v.QQ.ni[M_it=="Q"], v.QD.ni[M_it=="Q"], v.QE.ni[M_it=="Q"], v.toX.ni[M_it=="Q"]) # transition probabilities when quit smoke
m.p.it[,M_it == "D"] <- rbind(0, v.DC.ni[M_it=="D"], v.DQ.ni[M_it=="D"], v.DD.ni[M_it=="D"], v.DE.ni[M_it=="D"], v.toX.ni[M_it=="D"])
m.p.it[,M_it == "X"] <- c(0, 0, 0, 0, 0, 1) # transition probabilities when dead
# cat("\n")
# print(m.p.it)
# cat("\n")
ifelse(colSums(m.p.it) == 1, return(t(m.p.it)), print("Probabilities do not sum to 1")) # return the transition probabilities or produce an error
}
### Utility function
getInitU <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- rep(0, n.i)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- u.Q.ni[M_it=="Q"]
u[M_it=="D"] <- u.D.ni[M_it=="D"]
u[M_it=="E"] <- u.E.ni[M_it=="E"]
u[v.index<1 | v.index>70] <- 0
u
}
# Estimates the utility (i.e. the unit QALY for the individual) at every cycle.
getU <- function(u.old, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.bT.ni <- getNiVec(u.bTbase, v.index)
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u.D.ni <- getNiVec(u.Dbase, v.index)
u.E.ni <- getNiVec(u.Ebase, v.index)
u <- getInitU(M_it, v.index)
u.worseToN <- M_it=="N" & u.old!=0 & u.old<1
u[u.worseToN] <- 1 - (1 - u.old[u.worseToN]) * u.bT.ni[u.worseToN]
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- 1 - (1 - u.old[M_it=="Q"]) * u.bT.ni[M_it=="Q"]
u.worseToD <- M_it=="D" & u.old<u.D.ni
u[u.worseToD] <- u.D.ni[u.worseToD] - (u.D.ni[u.worseToD] - u.old[u.worseToD]) * u.bT.ni[u.worseToD]
u.worseToE <- M_it=="E" & u.old<u.E.ni
u[u.worseToE] <- u.E.ni[u.worseToE] - (u.E.ni[u.worseToE] - u.old[u.worseToE]) * u.bT.ni[u.worseToE]
u[M_it == "X"] <- 0 # update the utility if dead
u[v.index<1 | v.index>70] <- 0
u
}
Effs <- function (u, cl = 1) {
# M_it: health state occupied by individual i at cycle t (character variable)
# v.index
# dur: the duration of being sick/sicker
# cl: cycle length (default is 1)
QALYs <- u * cl # calculate the QALYs during cycle t
return(QALYs) # return the QALYs
}
##################################### Run the simulation ##################################
# START SIMULATION
p = Sys.time()
sim_no_trt <- MicroSim(v.M_1, v.age, n.i, n.t, v.n, X = v.x, d.c, d.e, Trt = FALSE, seed = 200)
comp.time = Sys.time() - p
comp.time
# PRINT DATA
sim_no_trt$TR
# sim_no_trt$m.M
# SAVE DATA
saveRDS(sim_no_trt$TR, file.path(outDir, paste0(scenarioName,"_","SG",big,"_TR.rds")))
saveRDS(sim_no_trt$colSumME, file.path(outDir, paste0(scenarioName,"_","SG",big,"_colSumME.rds")))
# saveRDS(sim_no_trt, file.path(outDir, paste0(scenarioName,"_","SG",big,"_sim_no_trt.rds")))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_games.R
\name{get_games}
\alias{get_games}
\title{Fetch the games from the opendota API.}
\usage{
get_games(game_vec, wait_time = 1, output = "all", verbose = TRUE)
}
\arguments{
\item{game_vec}{Numeric vector of match ID's}
\item{wait_time}{how long to wait (in seconds) between each API call, default is 1 sec (opendota
asks you not to send more than 1 call per second)}
\item{output}{Defaulted to "all", which will extract entire JSON, if not all, it should have the
path to an R file that will be sourced and create some output, not the R file must also
output to output_list()}
\item{verbose}{Give live information on status of parsing, if FALSE no text is output to console.}
}
\value{
Returns a list of objects, if output == "all" it's a list of JSON outputs.
}
\description{
Takes a vector of numerical value match ID's of dota2 replays, and attempts to
fetch them from the opendota API only parsed matches are output.
}
\examples{
\dontrun{
match_ids <- get_game_list(num_matches = 100,
from_time = "20170101",
to_time = "20170423",
min_mmr = 4000)
get_games(match_ids)
}
}
|
/man/get_games.Rd
|
no_license
|
cran/opendotaR
|
R
| false
| true
| 1,204
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_games.R
\name{get_games}
\alias{get_games}
\title{Fetch the games from the opendota API.}
\usage{
get_games(game_vec, wait_time = 1, output = "all", verbose = TRUE)
}
\arguments{
\item{game_vec}{Numeric vector of match ID's}
\item{wait_time}{how long to wait (in seconds) between each API call, default is 1 sec (opendota
asks you not to send more than 1 call per second)}
\item{output}{Defaulted to "all", which will extract entire JSON, if not all, it should have the
path to an R file that will be sourced and create some output, not the R file must also
output to output_list()}
\item{verbose}{Give live information on status of parsing, if FALSE no text is output to console.}
}
\value{
Returns a list of objects, if output == "all" it's a list of JSON outputs.
}
\description{
Takes a vector of numerical value match ID's of dota2 replays, and attempts to
fetch them from the opendota API only parsed matches are output.
}
\examples{
\dontrun{
match_ids <- get_game_list(num_matches = 100,
from_time = "20170101",
to_time = "20170423",
min_mmr = 4000)
get_games(match_ids)
}
}
|
rankall <- function(outcome, num = "best") {
## Read outcome data
outcomeAll <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that outcome are valid
if (!is.element(outcome, c("heart attack", "heart failure", "pneumonia"))) {
stop("Error: invalid outcome")
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
#Validate the specified rank number
if(num == "best") {
num <- 1
}
else if(num == "worst") {
num <- nrow(outcomeAll)
}
else if(is.numeric(x=num)) {
if(num<1 || num > nrow(outcomeAll)) {
stop('invalid num')
}
}
else {
stop('invalid num')
}
if (outcome == "heart attack") {
dataset <- subset(outcomeAll, select = c(2, 7, 11))
}
else if (outcome == "heart failure") {
dataset <- subset(outcomeAll, select = c(2, 7, 17))
}
else if (outcome == "pneumonia") {
dataset <- subset(outcomeAll, select = c(2, 7, 23))
}
dataset[, 3] <- suppressWarnings(as.numeric(dataset[, 3]))
colnames(dataset) <- c("hospital", "state", "mortality_rate")
state_groups <- split(dataset, dataset$state)
all_state_ranks = lapply(splited, function(x, num) {
# Order by mortality_rate and hospital
x <- x[order(x$mortality_rate, x$hospital),]
return (x$hospital[num])
}, num)
final_dfrm <- data.frame(hospital=unlist(all_state_ranks), state=names(all_state_ranks))
final_dfrm
}
|
/rankall.R
|
no_license
|
d-roy/ProgrammingAssignment3
|
R
| false
| false
| 1,626
|
r
|
rankall <- function(outcome, num = "best") {
## Read outcome data
outcomeAll <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that outcome are valid
if (!is.element(outcome, c("heart attack", "heart failure", "pneumonia"))) {
stop("Error: invalid outcome")
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the
## (abbreviated) state name
#Validate the specified rank number
if(num == "best") {
num <- 1
}
else if(num == "worst") {
num <- nrow(outcomeAll)
}
else if(is.numeric(x=num)) {
if(num<1 || num > nrow(outcomeAll)) {
stop('invalid num')
}
}
else {
stop('invalid num')
}
if (outcome == "heart attack") {
dataset <- subset(outcomeAll, select = c(2, 7, 11))
}
else if (outcome == "heart failure") {
dataset <- subset(outcomeAll, select = c(2, 7, 17))
}
else if (outcome == "pneumonia") {
dataset <- subset(outcomeAll, select = c(2, 7, 23))
}
dataset[, 3] <- suppressWarnings(as.numeric(dataset[, 3]))
colnames(dataset) <- c("hospital", "state", "mortality_rate")
state_groups <- split(dataset, dataset$state)
all_state_ranks = lapply(splited, function(x, num) {
# Order by mortality_rate and hospital
x <- x[order(x$mortality_rate, x$hospital),]
return (x$hospital[num])
}, num)
final_dfrm <- data.frame(hospital=unlist(all_state_ranks), state=names(all_state_ranks))
final_dfrm
}
|
#数据科学中的R语言 学习笔记
#第二章 数据对象
vector1<-1:10
vector2<-seq(from=1,to=10,by=2)
vector1<-1:10+2
vector2<-1:(10+2)
#计算sin函数从0到pi的曲线下面积
n<-1000
h<-seq(from=0,to=pi,length.out = n)
w<-pi/n
rect<-sin(h)*w
sum(rect)
#第三章 数据操作
func<-function(x){
if(x%%2==0){
ret<-"偶数"
}else{
ret<-"奇数"
}
return(ret)
}
vec<-round(runif(4)*100)
sapply(vec,func)
#将自定义函数改装成可以接受向量的函数
funcv<-Vectorize(func)
funcv(vec)
#使用ifelse
ifelse(vec%%2,"odd","even")
#计算变异系数
op<-options()
options(digits = 2)#控制小数点后两位
sapply(iris[,1:4],function(x) sd(x)/mean(x))
options(op)
#sapply对列表进行计算
mylist<-as.list(iris[,1:4])
sapply(mylist,mean)
#lapply 返回一个列表
lapply(mylist,mean)
myfunc<-function(x){
ret<-c(mean(x),sd(x))
return(ret)
}
result<-lapply(mylist,myfunc)
#将列表转换为矩阵
#先转成数据框,再转置
t(as.data.frame(result))
#使用取子集的二元操作符作为sapply的参数
t(sapply(result,"["))
#利用do.call 将result传入rbind函数中
do.call("rbind",result)
#计算矩阵
set.seed(1)
vec<-round(runif(12)*100)
mat<-matrix(vec,3,4)
apply(mat,MARGIN = 1,sum)
apply(mat,MARGIN = 2,sum)
#根据某一个分类变量进行分类计算
tapply(X=iris$Sepal.Length,INDEX = list(iris$Species),FUN = mean)
#与tapply类似的还有aggregate
with(iris,aggregate(Sepal.Length, by=list(Species),mean))
head(iris)
|
/数据科学中的r语言 笔记.R
|
no_license
|
yangminghan/Book_Code
|
R
| false
| false
| 1,494
|
r
|
#数据科学中的R语言 学习笔记
#第二章 数据对象
vector1<-1:10
vector2<-seq(from=1,to=10,by=2)
vector1<-1:10+2
vector2<-1:(10+2)
#计算sin函数从0到pi的曲线下面积
n<-1000
h<-seq(from=0,to=pi,length.out = n)
w<-pi/n
rect<-sin(h)*w
sum(rect)
#第三章 数据操作
func<-function(x){
if(x%%2==0){
ret<-"偶数"
}else{
ret<-"奇数"
}
return(ret)
}
vec<-round(runif(4)*100)
sapply(vec,func)
#将自定义函数改装成可以接受向量的函数
funcv<-Vectorize(func)
funcv(vec)
#使用ifelse
ifelse(vec%%2,"odd","even")
#计算变异系数
op<-options()
options(digits = 2)#控制小数点后两位
sapply(iris[,1:4],function(x) sd(x)/mean(x))
options(op)
#sapply对列表进行计算
mylist<-as.list(iris[,1:4])
sapply(mylist,mean)
#lapply 返回一个列表
lapply(mylist,mean)
myfunc<-function(x){
ret<-c(mean(x),sd(x))
return(ret)
}
result<-lapply(mylist,myfunc)
#将列表转换为矩阵
#先转成数据框,再转置
t(as.data.frame(result))
#使用取子集的二元操作符作为sapply的参数
t(sapply(result,"["))
#利用do.call 将result传入rbind函数中
do.call("rbind",result)
#计算矩阵
set.seed(1)
vec<-round(runif(12)*100)
mat<-matrix(vec,3,4)
apply(mat,MARGIN = 1,sum)
apply(mat,MARGIN = 2,sum)
#根据某一个分类变量进行分类计算
tapply(X=iris$Sepal.Length,INDEX = list(iris$Species),FUN = mean)
#与tapply类似的还有aggregate
with(iris,aggregate(Sepal.Length, by=list(Species),mean))
head(iris)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer.R
\name{UpdateParameters.default}
\alias{UpdateParameters.default}
\title{Default implementation of UpdateParameters generic function}
\usage{
\method{UpdateParameters}{default}(object, learningRate)
}
\description{
Default implementation of UpdateParameters generic function
}
|
/man/UpdateParameters.default.Rd
|
permissive
|
chuanwen/nnm
|
R
| false
| true
| 362
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer.R
\name{UpdateParameters.default}
\alias{UpdateParameters.default}
\title{Default implementation of UpdateParameters generic function}
\usage{
\method{UpdateParameters}{default}(object, learningRate)
}
\description{
Default implementation of UpdateParameters generic function
}
|
vac.rate <- array(0,100)
vac.rate[15:19] <- 0.00905
vac.rate[20:24] <- 0.01980
vac.rate[25:29] <- 0.01899
vac.rate[30:34] <- 0.01708
vac.rate[35:39] <- 0.01643
vac.rate[40:44] <- 0.00711
vac.rate[45:49] <- 0.00711
vac.rate[50:54] <- 0.00471
vac.rate[55:59] <- 0.00471
vac.rate[60:64] <- 0.00300
vac.rate[65:69] <- 0.00300
plot(vac.rate, type="s", xlim=c(15,90), ylim=c(0,0.04),
xlab="Age", ylab="Annual vaccination rate", main="MSM vaccine uptake")
vac.rate[70:100] <- 0.00609
lines(vac.rate, type="s", lty=2)
x <- 1:100
smooth <- dgamma((x-15)/5, shape=2, scale=2)
lines(0.02*smooth/max(smooth), col="firebrick1", lwd=2)
lines(0.04*smooth/max(smooth), col="firebrick1", lwd=1)
legend("topright", legend=c("HepB historic","HPV base-case","HPV alternative"),
lty=1, lwd=c(1,2,1), col=c(1,2,2), bty="n")
|
/R-scripts/FigS9.R
|
no_license
|
hansbogaards/HPV-immunization-for-MSM-multimodel-approach
|
R
| false
| false
| 836
|
r
|
vac.rate <- array(0,100)
vac.rate[15:19] <- 0.00905
vac.rate[20:24] <- 0.01980
vac.rate[25:29] <- 0.01899
vac.rate[30:34] <- 0.01708
vac.rate[35:39] <- 0.01643
vac.rate[40:44] <- 0.00711
vac.rate[45:49] <- 0.00711
vac.rate[50:54] <- 0.00471
vac.rate[55:59] <- 0.00471
vac.rate[60:64] <- 0.00300
vac.rate[65:69] <- 0.00300
plot(vac.rate, type="s", xlim=c(15,90), ylim=c(0,0.04),
xlab="Age", ylab="Annual vaccination rate", main="MSM vaccine uptake")
vac.rate[70:100] <- 0.00609
lines(vac.rate, type="s", lty=2)
x <- 1:100
smooth <- dgamma((x-15)/5, shape=2, scale=2)
lines(0.02*smooth/max(smooth), col="firebrick1", lwd=2)
lines(0.04*smooth/max(smooth), col="firebrick1", lwd=1)
legend("topright", legend=c("HepB historic","HPV base-case","HPV alternative"),
lty=1, lwd=c(1,2,1), col=c(1,2,2), bty="n")
|
#' @include parser-general.R
NULL
.gbk_mandatory <- c("LOCUS", "DEFINITION", "ACCESSION", "VERSION", "FEATURES", "//")
#' Parser for GenBank/GenPept records.
#'
#' @param x A character vector
#' @return A \code{\linkS4class{gbRecord}} instance.
#' @keywords internal
gbk_record <- function(rec) {
# get a vector with the positions of the main GenBank fields
rec_idx <- grep("^[A-Z//]+", rec)
rec_kwd <- strsplitN(rec[rec_idx], " +", 1L)
gbk_contig <- gbk_sequence <- NULL
# Check the presence of mandatory fields
if (any(is.na(charmatch(.gbk_mandatory, rec_kwd)))) {
stop("mandatory fields are missing from the GenBank file")
}
## get positions of features, origin, contig and end_of_record
ftb_idx <- rec_idx[rec_kwd == "FEATURES"]
seq_idx <- rec_idx[rec_kwd == "ORIGIN"]
ctg_idx <- rec_idx[rec_kwd == "CONTIG"]
end_idx <- rec_idx[rec_kwd == "//"]
ftb_end_idx <- rec_idx[which(rec_kwd == "FEATURES") + 1] - 1
## HEADER
x <- rec[seq.int(ftb_idx - 1)]
seqenv <- seqinfo(gbk_header(x), NULL)
## SEQUENCE
if (length(seq_idx) > 0L) {
# if "//" is right after "ORIGIN" there is no sequence
# and gb_sequence stays set to NULL
if (end_idx - seq_idx > 1L) {
gbk_sequence <- rec[seq.int(seq_idx + 1, end_idx - 1)]
}
## CONTIG
} else if (length(ctg_idx) > 0L) {
contig_line <- strsplitN(collapse(rec[seq.int(ctg_idx, end_idx-1)], ''),
'CONTIG', 2L, fixed = TRUE)
gb_contig <- gbLocation(contig_line)
}
seqenv$sequence <-
parse_sequence(seq = gbk_sequence, acc = getAccession(seqenv),
seqtype = getMoltype(seqenv), src = "gbk")
## FEATURES
ft <- rec[seq.int(ftb_idx + 1, ftb_end_idx)]
ft <- parse_features(x = ft, seqinfo = seqenv)
new_gbRecord(seqinfo = seqenv, features = ft, contig = gbk_contig)
}
#' @keywords internal
gbk_header <- function(x) {
# generate a vector with the positions of the main GenBank keywords.
# keywords are all capitals, beginning in column 1 of a record.
gbk_idx <- grep("^[A-Z//]+", x)
gbk_kwd <- strsplitN(x[gbk_idx], split = " +", 1)
## LOCUS (Mandatory)
locus <- gbk_locus(x[gbk_idx[gbk_kwd == "LOCUS"]])
## DEFINITION (Mandatory)
def_idx <- which(gbk_kwd == "DEFINITION")
def_line <- x[seq.int(gbk_idx[def_idx], gbk_idx[def_idx + 1] - 1)]
definition <- collapse(sub("DEFINITION ", "", def_line), ' ')
## ACCESSION (Mandatory)
acc_line <- x[gbk_idx[gbk_kwd == "ACCESSION"]]
accession <- strsplitN(acc_line, split = "\\s+", 2L)
## VERSION and GI (Mandatory)
ver_line <- x[gbk_idx[gbk_kwd == "VERSION"]]
version <- usplit(ver_line, split = "\\s+")[2L]
seqid <- paste0('gi|', usplit(ver_line, split = "GI:", fixed = TRUE)[2L])
## DBLINK (Optional)
if (length(db_line <- x[gbk_idx[gbk_kwd == "DBLINK"]]) > 0L) {
dblink <- usplit(db_line, split = "Project: ", fixed = TRUE)[2L]
} else {
dblink <- NA_character_
}
## DBSOURCE (GenPept only; sometimes more than one line)
if (length(dbsrc_idx <- which(gbk_kwd == "DBSOURCE")) > 0L) {
dbs_lines <- x[seq.int(gbk_idx[dbsrc_idx], gbk_idx[dbsrc_idx + 1] - 1)]
dbsource <- collapse(gsub("^ +", "", sub("DBSOURCE", "", dbs_lines)), "\n")
} else {
dbsource <- NA_character_
}
## KEYWORDS (Mandatory)
key_line <- x[gbk_idx[gbk_kwd == "KEYWORDS"]]
keywords <- sub("KEYWORDS ", "", key_line)
## SOURCE with ORGANISM and the complete lineage (Mandatory)
src_idx <- which(gbk_kwd == 'SOURCE')
source_lines <- x[seq.int(gbk_idx[src_idx], gbk_idx[src_idx + 1] - 1)]
source <- sub("SOURCE ", "", source_lines[1L])
organism <- sub(" ORGANISM ", "", source_lines[2L])
taxonomy <- collapse(gsub("^ +", "", source_lines[-c(1L, 2L)]), ' ')
## REFERENCES (Mandatory?)
if (length(ref_idx <- which(gbk_kwd == "REFERENCE")) > 0L) {
ref_lines <-
x[
seq.int(
gbk_idx[ref_idx[1]],
(gbk_idx[ref_idx[length(ref_idx)] + 1] - 1) %|na|% length(x)
)]
references <- gbk_reference_list(ref_lines)
} else {
references <- .gbReferenceList()
}
## COMMENT (Optional)
if (length(gbk_idx[gbk_kwd == "COMMENT"]) > 0L) {
com_lines <- x[seq.int(min(gbk_idx[gbk_kwd == "COMMENT"]), length(x))]
comment <- collapse(gsub("^ +", "", sub("COMMENT", "", com_lines)), "\n")
} else {
comment <- NA_character_
}
.gbHeader(
locus = locus,
definition = definition,
accession = accession,
version = version,
seqid = seqid,
dblink = dblink,
dbsource = dbsource,
keywords = keywords,
source = source,
organism = organism,
taxonomy = taxonomy,
references = references,
comment = comment
)
}
#' @keywords internal
gbk_locus <- function(locus_line) {
tokens <- usplit(locus_line, split = "\\s+")[-1]
# GenBank format: 'bp', GenPept: 'aa'
gb <- if (tokens[3] == 'bp') TRUE else FALSE
date_idx <- length(tokens)
divi_idx <- date_idx - 1
topo_idx <- date_idx - 2
if (gb && date_idx < 7 || !gb && date_idx < 6) {
# topology is missing
topo_idx <- NULL
}
.gbLocus(
lnm = tokens[1],
len = tokens[2],
mtp = if (gb) tokens[4] else 'AA',
top = tokens[topo_idx] %||% NA_character_,
div = tokens[divi_idx] %||% NA_character_,
cdt = tokens[date_idx],
mdt = tokens[date_idx]
)
}
#' @keywords internal
gbk_reference <- function(ref) {
## split by subkeywords
ref_idx <- grep("^ {0,3}[A-Z]+", ref)
ref_list <- ixsplit(ref, ref_idx, include_i = TRUE, collapse_x = TRUE)
##
kwd <- vapply(ref_list, strsplitN, '\\s+', 1L, FUN.VALUE = "")
field <- vapply(ref_list, strsplitN, '^[A-Z]+\\s+(?!\\S)\\s', 2L, perl = TRUE, FUN.VALUE = "")
##
ref <- set_reference()
ref$refline(field[kwd == "REFERENCE"])
ref$authors(field[kwd == "AUTHORS"])
ref$consrtm(field[kwd == "CONSRTM"])
ref$title(field[kwd == "TITLE"])
ref$journal(field[kwd == "JOURNAL"])
ref$pubmed(field[kwd == "PUBMED"])
ref$remark(field[kwd == "REMARK"])
ref$yield()
}
#' @keywords internal
gbk_reference_list <- function(ref_lines) {
## split references
ref_idx <- grep("REFERENCE", ref_lines, fixed = TRUE, ignore.case = FALSE)
ref_list <- ixsplit(ref_lines, ref_idx)
.gbReferenceList(ref = lapply(ref_list, gbk_reference))
}
|
/R/parser-gbk.R
|
no_license
|
cran/biofiles
|
R
| false
| false
| 6,310
|
r
|
#' @include parser-general.R
NULL
.gbk_mandatory <- c("LOCUS", "DEFINITION", "ACCESSION", "VERSION", "FEATURES", "//")
#' Parser for GenBank/GenPept records.
#'
#' @param x A character vector
#' @return A \code{\linkS4class{gbRecord}} instance.
#' @keywords internal
gbk_record <- function(rec) {
# get a vector with the positions of the main GenBank fields
rec_idx <- grep("^[A-Z//]+", rec)
rec_kwd <- strsplitN(rec[rec_idx], " +", 1L)
gbk_contig <- gbk_sequence <- NULL
# Check the presence of mandatory fields
if (any(is.na(charmatch(.gbk_mandatory, rec_kwd)))) {
stop("mandatory fields are missing from the GenBank file")
}
## get positions of features, origin, contig and end_of_record
ftb_idx <- rec_idx[rec_kwd == "FEATURES"]
seq_idx <- rec_idx[rec_kwd == "ORIGIN"]
ctg_idx <- rec_idx[rec_kwd == "CONTIG"]
end_idx <- rec_idx[rec_kwd == "//"]
ftb_end_idx <- rec_idx[which(rec_kwd == "FEATURES") + 1] - 1
## HEADER
x <- rec[seq.int(ftb_idx - 1)]
seqenv <- seqinfo(gbk_header(x), NULL)
## SEQUENCE
if (length(seq_idx) > 0L) {
# if "//" is right after "ORIGIN" there is no sequence
# and gb_sequence stays set to NULL
if (end_idx - seq_idx > 1L) {
gbk_sequence <- rec[seq.int(seq_idx + 1, end_idx - 1)]
}
## CONTIG
} else if (length(ctg_idx) > 0L) {
contig_line <- strsplitN(collapse(rec[seq.int(ctg_idx, end_idx-1)], ''),
'CONTIG', 2L, fixed = TRUE)
gb_contig <- gbLocation(contig_line)
}
seqenv$sequence <-
parse_sequence(seq = gbk_sequence, acc = getAccession(seqenv),
seqtype = getMoltype(seqenv), src = "gbk")
## FEATURES
ft <- rec[seq.int(ftb_idx + 1, ftb_end_idx)]
ft <- parse_features(x = ft, seqinfo = seqenv)
new_gbRecord(seqinfo = seqenv, features = ft, contig = gbk_contig)
}
#' @keywords internal
gbk_header <- function(x) {
# generate a vector with the positions of the main GenBank keywords.
# keywords are all capitals, beginning in column 1 of a record.
gbk_idx <- grep("^[A-Z//]+", x)
gbk_kwd <- strsplitN(x[gbk_idx], split = " +", 1)
## LOCUS (Mandatory)
locus <- gbk_locus(x[gbk_idx[gbk_kwd == "LOCUS"]])
## DEFINITION (Mandatory)
def_idx <- which(gbk_kwd == "DEFINITION")
def_line <- x[seq.int(gbk_idx[def_idx], gbk_idx[def_idx + 1] - 1)]
definition <- collapse(sub("DEFINITION ", "", def_line), ' ')
## ACCESSION (Mandatory)
acc_line <- x[gbk_idx[gbk_kwd == "ACCESSION"]]
accession <- strsplitN(acc_line, split = "\\s+", 2L)
## VERSION and GI (Mandatory)
ver_line <- x[gbk_idx[gbk_kwd == "VERSION"]]
version <- usplit(ver_line, split = "\\s+")[2L]
seqid <- paste0('gi|', usplit(ver_line, split = "GI:", fixed = TRUE)[2L])
## DBLINK (Optional)
if (length(db_line <- x[gbk_idx[gbk_kwd == "DBLINK"]]) > 0L) {
dblink <- usplit(db_line, split = "Project: ", fixed = TRUE)[2L]
} else {
dblink <- NA_character_
}
## DBSOURCE (GenPept only; sometimes more than one line)
if (length(dbsrc_idx <- which(gbk_kwd == "DBSOURCE")) > 0L) {
dbs_lines <- x[seq.int(gbk_idx[dbsrc_idx], gbk_idx[dbsrc_idx + 1] - 1)]
dbsource <- collapse(gsub("^ +", "", sub("DBSOURCE", "", dbs_lines)), "\n")
} else {
dbsource <- NA_character_
}
## KEYWORDS (Mandatory)
key_line <- x[gbk_idx[gbk_kwd == "KEYWORDS"]]
keywords <- sub("KEYWORDS ", "", key_line)
## SOURCE with ORGANISM and the complete lineage (Mandatory)
src_idx <- which(gbk_kwd == 'SOURCE')
source_lines <- x[seq.int(gbk_idx[src_idx], gbk_idx[src_idx + 1] - 1)]
source <- sub("SOURCE ", "", source_lines[1L])
organism <- sub(" ORGANISM ", "", source_lines[2L])
taxonomy <- collapse(gsub("^ +", "", source_lines[-c(1L, 2L)]), ' ')
## REFERENCES (Mandatory?)
if (length(ref_idx <- which(gbk_kwd == "REFERENCE")) > 0L) {
ref_lines <-
x[
seq.int(
gbk_idx[ref_idx[1]],
(gbk_idx[ref_idx[length(ref_idx)] + 1] - 1) %|na|% length(x)
)]
references <- gbk_reference_list(ref_lines)
} else {
references <- .gbReferenceList()
}
## COMMENT (Optional)
if (length(gbk_idx[gbk_kwd == "COMMENT"]) > 0L) {
com_lines <- x[seq.int(min(gbk_idx[gbk_kwd == "COMMENT"]), length(x))]
comment <- collapse(gsub("^ +", "", sub("COMMENT", "", com_lines)), "\n")
} else {
comment <- NA_character_
}
.gbHeader(
locus = locus,
definition = definition,
accession = accession,
version = version,
seqid = seqid,
dblink = dblink,
dbsource = dbsource,
keywords = keywords,
source = source,
organism = organism,
taxonomy = taxonomy,
references = references,
comment = comment
)
}
#' @keywords internal
gbk_locus <- function(locus_line) {
tokens <- usplit(locus_line, split = "\\s+")[-1]
# GenBank format: 'bp', GenPept: 'aa'
gb <- if (tokens[3] == 'bp') TRUE else FALSE
date_idx <- length(tokens)
divi_idx <- date_idx - 1
topo_idx <- date_idx - 2
if (gb && date_idx < 7 || !gb && date_idx < 6) {
# topology is missing
topo_idx <- NULL
}
.gbLocus(
lnm = tokens[1],
len = tokens[2],
mtp = if (gb) tokens[4] else 'AA',
top = tokens[topo_idx] %||% NA_character_,
div = tokens[divi_idx] %||% NA_character_,
cdt = tokens[date_idx],
mdt = tokens[date_idx]
)
}
#' @keywords internal
gbk_reference <- function(ref) {
## split by subkeywords
ref_idx <- grep("^ {0,3}[A-Z]+", ref)
ref_list <- ixsplit(ref, ref_idx, include_i = TRUE, collapse_x = TRUE)
##
kwd <- vapply(ref_list, strsplitN, '\\s+', 1L, FUN.VALUE = "")
field <- vapply(ref_list, strsplitN, '^[A-Z]+\\s+(?!\\S)\\s', 2L, perl = TRUE, FUN.VALUE = "")
##
ref <- set_reference()
ref$refline(field[kwd == "REFERENCE"])
ref$authors(field[kwd == "AUTHORS"])
ref$consrtm(field[kwd == "CONSRTM"])
ref$title(field[kwd == "TITLE"])
ref$journal(field[kwd == "JOURNAL"])
ref$pubmed(field[kwd == "PUBMED"])
ref$remark(field[kwd == "REMARK"])
ref$yield()
}
#' @keywords internal
gbk_reference_list <- function(ref_lines) {
## split references
ref_idx <- grep("REFERENCE", ref_lines, fixed = TRUE, ignore.case = FALSE)
ref_list <- ixsplit(ref_lines, ref_idx)
.gbReferenceList(ref = lapply(ref_list, gbk_reference))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.