content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
Queue <- setRefClass(Class = "Queue",
fields = list(
name = "character",
data = "list"
),
methods = list(
size = function() {
'Returns the number of items in the queue.'
return(length(data))
},
#
push = function(item) {
'Inserts element at back of the queue.'
data[[size()+1]] <<- item
},
#
pop = function() {
'Removes and returns head of queue (or raises error if queue is empty).'
if (size() == 0) stop("queue is empty!")
value <- data[[1]]
data[[1]] <<- NULL
value
},
#
poll = function() {
'Removes and returns head of queue (or NULL if queue is empty).'
if (size() == 0) return(NULL)
else pop()
},
#
peek = function(pos = c(1)) {
'Returns (but does not remove) specified positions in queue (or NULL if any one of them is not available).'
if (size() < max(pos)) return(NULL)
#
if (length(pos) == 1) return(data[[pos]])
else return(data[pos])
},
contains = function (item) {
return(item %in% data)
},
clear = function() {
'Clears queue.'
data <<- list()
},
initialize=function(...) {
callSuper(...)
#
# Initialise fields here (place holder)...
#
.self
}
)
) | /R/queue.R | permissive | oboforty/metafetcher | R | false | false | 2,275 | r | Queue <- setRefClass(Class = "Queue",
fields = list(
name = "character",
data = "list"
),
methods = list(
size = function() {
'Returns the number of items in the queue.'
return(length(data))
},
#
push = function(item) {
'Inserts element at back of the queue.'
data[[size()+1]] <<- item
},
#
pop = function() {
'Removes and returns head of queue (or raises error if queue is empty).'
if (size() == 0) stop("queue is empty!")
value <- data[[1]]
data[[1]] <<- NULL
value
},
#
poll = function() {
'Removes and returns head of queue (or NULL if queue is empty).'
if (size() == 0) return(NULL)
else pop()
},
#
peek = function(pos = c(1)) {
'Returns (but does not remove) specified positions in queue (or NULL if any one of them is not available).'
if (size() < max(pos)) return(NULL)
#
if (length(pos) == 1) return(data[[pos]])
else return(data[pos])
},
contains = function (item) {
return(item %in% data)
},
clear = function() {
'Clears queue.'
data <<- list()
},
initialize=function(...) {
callSuper(...)
#
# Initialise fields here (place holder)...
#
.self
}
)
) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncpen_cpp_wrap.R
\name{cv.ncpen}
\alias{cv.ncpen}
\title{cv.ncpen: cross validation for \code{ncpen}}
\usage{
cv.ncpen(y.vec, x.mat, family = c("gaussian", "linear", "binomial",
"logit", "poisson", "multinomial", "cox"), penalty = c("scad", "mcp",
"tlp", "lasso", "classo", "ridge", "sridge", "mbridge", "mlog"),
x.standardize = TRUE, intercept = TRUE, lambda = NULL,
n.lambda = NULL, r.lambda = NULL, w.lambda = NULL, gamma = NULL,
tau = NULL, alpha = NULL, df.max = 50, cf.max = 100,
proj.min = 10, add.max = 10, niter.max = 30, qiter.max = 10,
aiter.max = 100, b.eps = 1e-06, k.eps = 1e-04, c.eps = 1e-06,
cut = TRUE, local = FALSE, local.initial = NULL, n.fold = 10,
fold.id = NULL)
}
\arguments{
\item{y.vec}{(numeric vector) response vector.
Must be 0,1 for \code{binomial} and 1,2,..., for \code{multinomial}.}
\item{x.mat}{(numeric matrix) design matrix without intercept.
The censoring indicator must be included at the last column of the design matrix for \code{cox}.}
\item{family}{(character) regression model. Supported models are
\code{gaussian} (or \code{linear}),
\code{binomial} (or \code{logit}),
\code{poisson},
\code{multinomial},
and \code{cox}.
Default is \code{gaussian}.}
\item{penalty}{(character) penalty function.
Supported penalties are
\code{scad} (smoothly clipped absolute deviation),
\code{mcp} (minimax concave penalty),
\code{tlp} (truncated LASSO penalty),
\code{lasso} (least absolute shrinkage and selection operator),
\code{classo} (clipped lasso = mcp + lasso),
\code{ridge} (ridge),
\code{sridge} (sparse ridge = mcp + ridge),
\code{mbridge} (modified bridge) and
\code{mlog} (modified log).
Default is \code{scad}.}
\item{x.standardize}{(logical) whether to standardize \code{x.mat} prior to fitting the model (see details).
The estimated coefficients are always restored to the original scale.}
\item{intercept}{(logical) whether to include an intercept in the model.}
\item{lambda}{(numeric vector) user-specified sequence of \code{lambda} values.
Default is supplied automatically from samples.}
\item{n.lambda}{(numeric) the number of \code{lambda} values.
Default is 100.}
\item{r.lambda}{(numeric) ratio of the smallest \code{lambda} value to largest.
Default is 0.001 when n>p, and 0.01 for other cases.}
\item{w.lambda}{(numeric vector) penalty weights for each coefficient (see references).
If a penalty weight is set to 0, the corresponding coefficient is always nonzero.}
\item{gamma}{(numeric) additional tuning parameter for controlling shrinkage effect of \code{classo} and \code{sridge} (see references).
Default is half of the smallest \code{lambda}.}
\item{tau}{(numeric) concavity parameter of the penalties (see reference).
Default is 3.7 for \code{scad}, 2.1 for \code{mcp}, \code{classo} and \code{sridge}, 0.001 for \code{tlp}, \code{mbridge} and \code{mlog}.}
\item{alpha}{(numeric) ridge effect (weight between the penalty and ridge penalty) (see details).
Default value is 1. If penalty is \code{ridge} and \code{sridge} then \code{alpha} is set to 0.}
\item{df.max}{(numeric) the maximum number of nonzero coefficients.}
\item{cf.max}{(numeric) the maximum of absolute value of nonzero coefficients.}
\item{proj.min}{(numeric) the projection cycle inside CD algorithm (largely internal use. See details).}
\item{add.max}{(numeric) the maximum number of variables added in CCCP iterations (largely internal use. See references).}
\item{niter.max}{(numeric) maximum number of iterations in CCCP.}
\item{qiter.max}{(numeric) maximum number of quadratic approximations in each CCCP iteration.}
\item{aiter.max}{(numeric) maximum number of iterations in CD algorithm.}
\item{b.eps}{(numeric) convergence threshold for coefficients vector.}
\item{k.eps}{(numeric) convergence threshold for KKT conditions.}
\item{c.eps}{(numeric) convergence threshold for KKT conditions (largely internal use).}
\item{cut}{(logical) convergence threshold for KKT conditions (largely internal use).}
\item{local}{(logical) whether to use local initial estimator for path construction. It may take a long time.}
\item{local.initial}{(numeric vector) initial estimator for \code{local=TRUE}.}
\item{n.fold}{(numeric) number of folds for CV.}
\item{fold.id}{(numeric vector) fold ids from 1 to k that indicate fold configuration.}
}
\value{
An object with S3 class \code{cv.ncpen}.
\item{ncpen.fit}{ncpen object fitted from the whole samples.}
\item{fold.index}{fold ids of the samples.}
\item{rmse}{rood mean squared errors from CV.}
\item{like}{negative log-likelihoods from CV.}
\item{lambda}{sequence of \code{lambda} used for CV.}
}
\description{
performs k-fold cross-validation (CV) for nonconvex penalized regression models
over a sequence of the regularization parameter \code{lambda}.
}
\details{
Two kinds of CV errors are returned: root mean squared error and negative log likelihood.
The results depends on the random partition made internally.
To choose an optimal coefficients form the cv results, use \code{\link{coef.cv.ncpen}}.
\code{ncpen} does not search values of \code{gamma}, \code{tau} and \code{alpha}.
}
\examples{
### linear regression with scad penalty
sam = sam.gen.ncpen(n=200,p=10,q=5,cf.min=0.5,cf.max=1,corr=0.5,family="gaussian")
x.mat = sam$x.mat; y.vec = sam$y.vec
fit = cv.ncpen(y.vec=y.vec,x.mat=x.mat,n.lambda=10,family="gaussian", penalty="scad")
coef(fit)
}
\references{
Fan, J. and Li, R. (2001). Variable selection via nonconcave penalized likelihood and its oracle properties.
\emph{Journal of the American statistical Association}, 96, 1348-60.
Zhang, C.H. (2010). Nearly unbiased variable selection under minimax concave penalty.
\emph{The Annals of statistics}, 38(2), 894-942.
Shen, X., Pan, W., Zhu, Y. and Zhou, H. (2013). On constrained and regularized high-dimensional regression.
\emph{Annals of the Institute of Statistical Mathematics}, 65(5), 807-832.
Kwon, S., Lee, S. and Kim, Y. (2016). Moderately clipped LASSO.
\emph{Computational Statistics and Data Analysis}, 92C, 53-67.
Kwon, S. Kim, Y. and Choi, H.(2013). Sparse bridge estimation with a diverging number of parameters.
\emph{Statistics and Its Interface}, 6, 231-242.
Huang, J., Horowitz, J.L. and Ma, S. (2008). Asymptotic properties of bridge estimators in sparse high-dimensional regression models.
\emph{The Annals of Statistics}, 36(2), 587-613.
Zou, H. and Li, R. (2008). One-step sparse estimates in nonconcave penalized likelihood models.
\emph{Annals of statistics}, 36(4), 1509.
Lee, S., Kwon, S. and Kim, Y. (2016). A modified local quadratic approximation algorithm for penalized optimization problems.
\emph{Computational Statistics and Data Analysis}, 94, 275-286.
}
\seealso{
\code{\link{plot.cv.ncpen}}, \code{\link{coef.cv.ncpen}}, \code{\link{ncpen}}, \code{\link{predict.ncpen}}
}
\author{
Dongshin Kim, Sunghoon Kwon, Sangin Lee
}
| /man/cv.ncpen.Rd | no_license | zeemkr/ncpen | R | false | true | 6,945 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ncpen_cpp_wrap.R
\name{cv.ncpen}
\alias{cv.ncpen}
\title{cv.ncpen: cross validation for \code{ncpen}}
\usage{
cv.ncpen(y.vec, x.mat, family = c("gaussian", "linear", "binomial",
"logit", "poisson", "multinomial", "cox"), penalty = c("scad", "mcp",
"tlp", "lasso", "classo", "ridge", "sridge", "mbridge", "mlog"),
x.standardize = TRUE, intercept = TRUE, lambda = NULL,
n.lambda = NULL, r.lambda = NULL, w.lambda = NULL, gamma = NULL,
tau = NULL, alpha = NULL, df.max = 50, cf.max = 100,
proj.min = 10, add.max = 10, niter.max = 30, qiter.max = 10,
aiter.max = 100, b.eps = 1e-06, k.eps = 1e-04, c.eps = 1e-06,
cut = TRUE, local = FALSE, local.initial = NULL, n.fold = 10,
fold.id = NULL)
}
\arguments{
\item{y.vec}{(numeric vector) response vector.
Must be 0,1 for \code{binomial} and 1,2,..., for \code{multinomial}.}
\item{x.mat}{(numeric matrix) design matrix without intercept.
The censoring indicator must be included at the last column of the design matrix for \code{cox}.}
\item{family}{(character) regression model. Supported models are
\code{gaussian} (or \code{linear}),
\code{binomial} (or \code{logit}),
\code{poisson},
\code{multinomial},
and \code{cox}.
Default is \code{gaussian}.}
\item{penalty}{(character) penalty function.
Supported penalties are
\code{scad} (smoothly clipped absolute deviation),
\code{mcp} (minimax concave penalty),
\code{tlp} (truncated LASSO penalty),
\code{lasso} (least absolute shrinkage and selection operator),
\code{classo} (clipped lasso = mcp + lasso),
\code{ridge} (ridge),
\code{sridge} (sparse ridge = mcp + ridge),
\code{mbridge} (modified bridge) and
\code{mlog} (modified log).
Default is \code{scad}.}
\item{x.standardize}{(logical) whether to standardize \code{x.mat} prior to fitting the model (see details).
The estimated coefficients are always restored to the original scale.}
\item{intercept}{(logical) whether to include an intercept in the model.}
\item{lambda}{(numeric vector) user-specified sequence of \code{lambda} values.
Default is supplied automatically from samples.}
\item{n.lambda}{(numeric) the number of \code{lambda} values.
Default is 100.}
\item{r.lambda}{(numeric) ratio of the smallest \code{lambda} value to largest.
Default is 0.001 when n>p, and 0.01 for other cases.}
\item{w.lambda}{(numeric vector) penalty weights for each coefficient (see references).
If a penalty weight is set to 0, the corresponding coefficient is always nonzero.}
\item{gamma}{(numeric) additional tuning parameter for controlling shrinkage effect of \code{classo} and \code{sridge} (see references).
Default is half of the smallest \code{lambda}.}
\item{tau}{(numeric) concavity parameter of the penalties (see reference).
Default is 3.7 for \code{scad}, 2.1 for \code{mcp}, \code{classo} and \code{sridge}, 0.001 for \code{tlp}, \code{mbridge} and \code{mlog}.}
\item{alpha}{(numeric) ridge effect (weight between the penalty and ridge penalty) (see details).
Default value is 1. If penalty is \code{ridge} and \code{sridge} then \code{alpha} is set to 0.}
\item{df.max}{(numeric) the maximum number of nonzero coefficients.}
\item{cf.max}{(numeric) the maximum of absolute value of nonzero coefficients.}
\item{proj.min}{(numeric) the projection cycle inside CD algorithm (largely internal use. See details).}
\item{add.max}{(numeric) the maximum number of variables added in CCCP iterations (largely internal use. See references).}
\item{niter.max}{(numeric) maximum number of iterations in CCCP.}
\item{qiter.max}{(numeric) maximum number of quadratic approximations in each CCCP iteration.}
\item{aiter.max}{(numeric) maximum number of iterations in CD algorithm.}
\item{b.eps}{(numeric) convergence threshold for coefficients vector.}
\item{k.eps}{(numeric) convergence threshold for KKT conditions.}
\item{c.eps}{(numeric) convergence threshold for KKT conditions (largely internal use).}
\item{cut}{(logical) convergence threshold for KKT conditions (largely internal use).}
\item{local}{(logical) whether to use local initial estimator for path construction. It may take a long time.}
\item{local.initial}{(numeric vector) initial estimator for \code{local=TRUE}.}
\item{n.fold}{(numeric) number of folds for CV.}
\item{fold.id}{(numeric vector) fold ids from 1 to k that indicate fold configuration.}
}
\value{
An object with S3 class \code{cv.ncpen}.
\item{ncpen.fit}{ncpen object fitted from the whole samples.}
\item{fold.index}{fold ids of the samples.}
\item{rmse}{rood mean squared errors from CV.}
\item{like}{negative log-likelihoods from CV.}
\item{lambda}{sequence of \code{lambda} used for CV.}
}
\description{
performs k-fold cross-validation (CV) for nonconvex penalized regression models
over a sequence of the regularization parameter \code{lambda}.
}
\details{
Two kinds of CV errors are returned: root mean squared error and negative log likelihood.
The results depends on the random partition made internally.
To choose an optimal coefficients form the cv results, use \code{\link{coef.cv.ncpen}}.
\code{ncpen} does not search values of \code{gamma}, \code{tau} and \code{alpha}.
}
\examples{
### linear regression with scad penalty
sam = sam.gen.ncpen(n=200,p=10,q=5,cf.min=0.5,cf.max=1,corr=0.5,family="gaussian")
x.mat = sam$x.mat; y.vec = sam$y.vec
fit = cv.ncpen(y.vec=y.vec,x.mat=x.mat,n.lambda=10,family="gaussian", penalty="scad")
coef(fit)
}
\references{
Fan, J. and Li, R. (2001). Variable selection via nonconcave penalized likelihood and its oracle properties.
\emph{Journal of the American statistical Association}, 96, 1348-60.
Zhang, C.H. (2010). Nearly unbiased variable selection under minimax concave penalty.
\emph{The Annals of statistics}, 38(2), 894-942.
Shen, X., Pan, W., Zhu, Y. and Zhou, H. (2013). On constrained and regularized high-dimensional regression.
\emph{Annals of the Institute of Statistical Mathematics}, 65(5), 807-832.
Kwon, S., Lee, S. and Kim, Y. (2016). Moderately clipped LASSO.
\emph{Computational Statistics and Data Analysis}, 92C, 53-67.
Kwon, S. Kim, Y. and Choi, H.(2013). Sparse bridge estimation with a diverging number of parameters.
\emph{Statistics and Its Interface}, 6, 231-242.
Huang, J., Horowitz, J.L. and Ma, S. (2008). Asymptotic properties of bridge estimators in sparse high-dimensional regression models.
\emph{The Annals of Statistics}, 36(2), 587-613.
Zou, H. and Li, R. (2008). One-step sparse estimates in nonconcave penalized likelihood models.
\emph{Annals of statistics}, 36(4), 1509.
Lee, S., Kwon, S. and Kim, Y. (2016). A modified local quadratic approximation algorithm for penalized optimization problems.
\emph{Computational Statistics and Data Analysis}, 94, 275-286.
}
\seealso{
\code{\link{plot.cv.ncpen}}, \code{\link{coef.cv.ncpen}}, \code{\link{ncpen}}, \code{\link{predict.ncpen}}
}
\author{
Dongshin Kim, Sunghoon Kwon, Sangin Lee
}
|
#' Calculating a simulated marginal effect
#'
#' This function is called by \code{\link[coxed]{sim.survdata}} and is not intended to be used by itself.
#' @param baseline The baseline hazard functions, output by \code{\link[coxed]{baseline.build}}
#' @param xb The simulated data, output by \code{\link[coxed]{generate.lm}}
#' @param covariate Specification of the column number of the covariate in the \code{X} matrix for which to generate a simulated marginal effect (default is 1).
#' The marginal effect is the difference in expected duration when the covariate is fixed at a high value and the expected duration when the covariate is fixed
#' at a low value
#' @param low The low value of the covariate for which to calculate a marginal effect
#' @param high The high value of the covariate for which to calculate a marginal effect
#' @param compare The statistic to employ when examining the two new vectors of expected durations (see details for \code{\link[coxed]{sim.survdata}}). The default is \code{median}
#' @details The idea is to simulate a marginal change in duration so that researchers can compare the performance of
#' estimators of this statistic using simulated data.
#'
#' The function calculates simulated durations for each observation conditional on a baseline hazard function
#' and exogenous covariates and coefficients. The \code{covariate} argument specifies the variable in the X matrix to
#' vary so as to measure the marginal effect. First the covariate is set to the value specified in \code{low} for all
#' observations, then to the value specified in \code{high} for all observations. Given each value, new durations are
#' drawn. The durations when the covariate equals the low value are subtracted from the durations when the covariate
#' equals the high value. The marginal effect is calculated by employing the statistic given by \code{compare}, which
#' is \code{median} by default.
#' @return A list with three items:
#' \tabular{ll}{
#' \code{marg.effect} \tab A scalar containing the simulated marginal effect\cr
#' \code{data.low} \tab The durations and covariates when the covariate of interest is set to the low value \cr
#' \code{data.high} \tab The durations and covariates when the covariate of interest is set to the high value \cr
#' }
#' @author Jonathan Kropko <jkropko@@virginia.edu> and Jeffrey J. Harden <jharden2@@nd.edu>
#' @seealso \code{\link[coxed]{baseline.build}}, \code{\link[coxed]{generate.lm}}, \code{\link[coxed]{sim.survdata}}
#' @export
#' @examples
#' T <- 100
#' N <- 1000
#' X <- as.matrix(data.frame(X1=rnorm(N), X2=rnorm(N), X3=rnorm(N)))
#' beta <- as.matrix(rnorm(3))
#' baseline <- baseline.build(T=T, knots=8, spline=TRUE)
#' xb <- generate.lm(baseline, X=X, beta=beta, N=N, censor=.1, type="none")
#' me <- make.margeffect(baseline, xb, covariate=1, low=0, high=1)
#' me$marg.effect
make.margeffect <- function(baseline, xb, covariate=1, low=0, high=1, compare=median){
if(xb$tvc){
X0 <- dplyr::select(xb$data, -id, -failed, -start, -end)
X1 <- dplyr::select(xb$data, -id, -failed, -start, -end)
} else {
X0 <- dplyr::select(xb$data, -y)
X1 <- dplyr::select(xb$data, -y)
}
X0[,covariate] <- low
X1[,covariate] <- high
beta <- xb$beta
if(ncol(beta) > 1) beta <- beta[,-1]
if(ncol(beta) == 1){
XB0 <- as.matrix(X0)%*%beta
survival <- t(sapply(XB0, FUN=function(x){baseline$survivor^exp(x)}, simplify=TRUE))
} else {
XB0 <- as.matrix(X0)%*%t(as.matrix(beta))
survival <- t(apply(XB0, 1, FUN=function(x){baseline$survivor^exp(x)}))
}
y0 <- apply(survival, 1, FUN=function(x){
which.max(diff(x < runif(1)))
})
if(ncol(beta) == 1){
XB1 <- as.matrix(X1)%*%beta
survival <- t(sapply(XB1, FUN=function(x){baseline$survivor^exp(x)}, simplify=TRUE))
} else {
XB1 <- as.matrix(X1)%*%t(as.matrix(beta))
survival <- t(apply(XB1, 1, FUN=function(x){baseline$survivor^exp(x)}))
}
y1 <- apply(survival, 1, FUN=function(x){
which.max(diff(x < runif(1)))
})
marg.effect <- compare(y1 - y0)
data.low <- list(x = X0, y = y0)
data.high <- list(x = X1, y = y1)
return(list(marg.effect = marg.effect,
data.low = data.low,
data.high = data.high))
}
| /R/make.margeffect.R | no_license | cran/coxed | R | false | false | 4,395 | r | #' Calculating a simulated marginal effect
#'
#' This function is called by \code{\link[coxed]{sim.survdata}} and is not intended to be used by itself.
#' @param baseline The baseline hazard functions, output by \code{\link[coxed]{baseline.build}}
#' @param xb The simulated data, output by \code{\link[coxed]{generate.lm}}
#' @param covariate Specification of the column number of the covariate in the \code{X} matrix for which to generate a simulated marginal effect (default is 1).
#' The marginal effect is the difference in expected duration when the covariate is fixed at a high value and the expected duration when the covariate is fixed
#' at a low value
#' @param low The low value of the covariate for which to calculate a marginal effect
#' @param high The high value of the covariate for which to calculate a marginal effect
#' @param compare The statistic to employ when examining the two new vectors of expected durations (see details for \code{\link[coxed]{sim.survdata}}). The default is \code{median}
#' @details The idea is to simulate a marginal change in duration so that researchers can compare the performance of
#' estimators of this statistic using simulated data.
#'
#' The function calculates simulated durations for each observation conditional on a baseline hazard function
#' and exogenous covariates and coefficients. The \code{covariate} argument specifies the variable in the X matrix to
#' vary so as to measure the marginal effect. First the covariate is set to the value specified in \code{low} for all
#' observations, then to the value specified in \code{high} for all observations. Given each value, new durations are
#' drawn. The durations when the covariate equals the low value are subtracted from the durations when the covariate
#' equals the high value. The marginal effect is calculated by employing the statistic given by \code{compare}, which
#' is \code{median} by default.
#' @return A list with three items:
#' \tabular{ll}{
#' \code{marg.effect} \tab A scalar containing the simulated marginal effect\cr
#' \code{data.low} \tab The durations and covariates when the covariate of interest is set to the low value \cr
#' \code{data.high} \tab The durations and covariates when the covariate of interest is set to the high value \cr
#' }
#' @author Jonathan Kropko <jkropko@@virginia.edu> and Jeffrey J. Harden <jharden2@@nd.edu>
#' @seealso \code{\link[coxed]{baseline.build}}, \code{\link[coxed]{generate.lm}}, \code{\link[coxed]{sim.survdata}}
#' @export
#' @examples
#' T <- 100
#' N <- 1000
#' X <- as.matrix(data.frame(X1=rnorm(N), X2=rnorm(N), X3=rnorm(N)))
#' beta <- as.matrix(rnorm(3))
#' baseline <- baseline.build(T=T, knots=8, spline=TRUE)
#' xb <- generate.lm(baseline, X=X, beta=beta, N=N, censor=.1, type="none")
#' me <- make.margeffect(baseline, xb, covariate=1, low=0, high=1)
#' me$marg.effect
make.margeffect <- function(baseline, xb, covariate=1, low=0, high=1, compare=median){
if(xb$tvc){
X0 <- dplyr::select(xb$data, -id, -failed, -start, -end)
X1 <- dplyr::select(xb$data, -id, -failed, -start, -end)
} else {
X0 <- dplyr::select(xb$data, -y)
X1 <- dplyr::select(xb$data, -y)
}
X0[,covariate] <- low
X1[,covariate] <- high
beta <- xb$beta
if(ncol(beta) > 1) beta <- beta[,-1]
if(ncol(beta) == 1){
XB0 <- as.matrix(X0)%*%beta
survival <- t(sapply(XB0, FUN=function(x){baseline$survivor^exp(x)}, simplify=TRUE))
} else {
XB0 <- as.matrix(X0)%*%t(as.matrix(beta))
survival <- t(apply(XB0, 1, FUN=function(x){baseline$survivor^exp(x)}))
}
y0 <- apply(survival, 1, FUN=function(x){
which.max(diff(x < runif(1)))
})
if(ncol(beta) == 1){
XB1 <- as.matrix(X1)%*%beta
survival <- t(sapply(XB1, FUN=function(x){baseline$survivor^exp(x)}, simplify=TRUE))
} else {
XB1 <- as.matrix(X1)%*%t(as.matrix(beta))
survival <- t(apply(XB1, 1, FUN=function(x){baseline$survivor^exp(x)}))
}
y1 <- apply(survival, 1, FUN=function(x){
which.max(diff(x < runif(1)))
})
marg.effect <- compare(y1 - y0)
data.low <- list(x = X0, y = y0)
data.high <- list(x = X1, y = y1)
return(list(marg.effect = marg.effect,
data.low = data.low,
data.high = data.high))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_helpers.R
\name{all_data_types}
\alias{all_data_types}
\title{All data types}
\usage{
all_data_types(
model,
complete_data = FALSE,
possible_data = FALSE,
given = NULL
)
}
\arguments{
\item{model}{A \code{causal_model}. A model object generated by \code{\link{make_model}}.}
\item{complete_data}{Logical. If `TRUE` returns only complete data types (no NAs). Defaults to `FALSE`.}
\item{possible_data}{Logical. If `TRUE` returns only complete data types (no NAs) that are *possible* given model restrictions. Note that in principle an intervention could make observationally impossible data types arise. Defaults to `FALSE`.}
\item{given}{A character. A quoted statement that evaluates to logical. Data conditional on specific values.}
}
\value{
A \code{data.frame} with all data types (including NA types) that are possible from a model.
}
\description{
Creates dataframe with all data types (including NA types) that are possible from a model.
}
\examples{
\donttest{
all_data_types(make_model('X -> Y'))
model <- make_model('X -> Y') \%>\% set_restrictions(labels = list(Y = '00'), keep = TRUE)
all_data_types(model)
all_data_types(model, complete_data = TRUE)
all_data_types(model, possible_data = TRUE)
all_data_types(model, given = 'X==1')
all_data_types(model, given = 'X==1 & Y==1')
}
}
| /man/all_data_types.Rd | no_license | yadmasu1/CausalQueries | R | false | true | 1,401 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_helpers.R
\name{all_data_types}
\alias{all_data_types}
\title{All data types}
\usage{
all_data_types(
model,
complete_data = FALSE,
possible_data = FALSE,
given = NULL
)
}
\arguments{
\item{model}{A \code{causal_model}. A model object generated by \code{\link{make_model}}.}
\item{complete_data}{Logical. If `TRUE` returns only complete data types (no NAs). Defaults to `FALSE`.}
\item{possible_data}{Logical. If `TRUE` returns only complete data types (no NAs) that are *possible* given model restrictions. Note that in principle an intervention could make observationally impossible data types arise. Defaults to `FALSE`.}
\item{given}{A character. A quoted statement that evaluates to logical. Data conditional on specific values.}
}
\value{
A \code{data.frame} with all data types (including NA types) that are possible from a model.
}
\description{
Creates dataframe with all data types (including NA types) that are possible from a model.
}
\examples{
\donttest{
all_data_types(make_model('X -> Y'))
model <- make_model('X -> Y') \%>\% set_restrictions(labels = list(Y = '00'), keep = TRUE)
all_data_types(model)
all_data_types(model, complete_data = TRUE)
all_data_types(model, possible_data = TRUE)
all_data_types(model, given = 'X==1')
all_data_types(model, given = 'X==1 & Y==1')
}
}
|
#set locale to default
Sys.setlocale(category = "LC_ALL", locale = "C")
#read data with data.table (much faster)
#download data if not already present
if (!file.exists("household_power_consumption.txt")) {
download.file(url="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="household_power_consumption.zip")
unzip("household_power_consumption.zip")
}
library(data.table)
power.dat<-fread("household_power_consumption.txt",colClasses = "character",na.strings="?",data.table=FALSE)
#convert Date to class Date
power.dat$Date<- as.Date(power.dat$Date,format="%d/%m/%Y")
#select the data for the two specified days
reduced.power.dat<-power.dat[power.dat$Date==as.Date("2007-02-01") | power.dat$Date==as.Date("2007-02-02"),]
#remove the data to save memory
rm(power.dat)
#convert Date and Time to a common POSIXlt object
reduced.power.dat$Time<- strptime(paste(reduced.power.dat$Date,reduced.power.dat$Time),format="%Y-%m-%d %H:%M:%S")
#covnvert variable to numeric
reduced.power.dat$Global_active_power<-as.numeric(reduced.power.dat$Global_active_power)
png("plot2.png")
plot(reduced.power.dat$Time,reduced.power.dat$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | cwagner78/ExData_Plotting1 | R | false | false | 1,283 | r | #set locale to default
Sys.setlocale(category = "LC_ALL", locale = "C")
#read data with data.table (much faster)
#download data if not already present
if (!file.exists("household_power_consumption.txt")) {
download.file(url="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="household_power_consumption.zip")
unzip("household_power_consumption.zip")
}
library(data.table)
power.dat<-fread("household_power_consumption.txt",colClasses = "character",na.strings="?",data.table=FALSE)
#convert Date to class Date
power.dat$Date<- as.Date(power.dat$Date,format="%d/%m/%Y")
#select the data for the two specified days
reduced.power.dat<-power.dat[power.dat$Date==as.Date("2007-02-01") | power.dat$Date==as.Date("2007-02-02"),]
#remove the data to save memory
rm(power.dat)
#convert Date and Time to a common POSIXlt object
reduced.power.dat$Time<- strptime(paste(reduced.power.dat$Date,reduced.power.dat$Time),format="%Y-%m-%d %H:%M:%S")
#covnvert variable to numeric
reduced.power.dat$Global_active_power<-as.numeric(reduced.power.dat$Global_active_power)
png("plot2.png")
plot(reduced.power.dat$Time,reduced.power.dat$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.off() |
library(colorscience)
### Name: chromaticity.diagram
### Title: Plot the chromaticity diagram
### Aliases: chromaticity.diagram
### Keywords: datasets
### ** Examples
chromaticity.diagram()
xl<-yl<-0:1
chromaticity.diagram(xlim=xl,ylim=yl)
chromaticity.diagram(conversionFunction=CIE1931XYZ2CIE1976uv, xlim=xl,ylim=yl,
xlab="u'",ylab="v'")
| /data/genthat_extracted_code/colorscience/examples/chromaticity.diagram.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 347 | r | library(colorscience)
### Name: chromaticity.diagram
### Title: Plot the chromaticity diagram
### Aliases: chromaticity.diagram
### Keywords: datasets
### ** Examples
chromaticity.diagram()
xl<-yl<-0:1
chromaticity.diagram(xlim=xl,ylim=yl)
chromaticity.diagram(conversionFunction=CIE1931XYZ2CIE1976uv, xlim=xl,ylim=yl,
xlab="u'",ylab="v'")
|
#' Set up/retrieve the directory structure for the checks
#'
#' Currently the following files and directories are used.
#' They are all in the main revdep directory, which is `revdep` in the
#' package tree.
#' * `library`: a collection of package libraries
#' * `library/data.sqlite`: the SQLite database that contains the check
#' data.
#' * `library/<checked-pkg>/old`: library that contains the *old* version
#' of the revdep-checked package, together with its dependencies.
#' * `library/<checked-pkg>/new`: library that contains the *new* version
#' of the revdep-checked package, together with its dependencies.
#' * `library/<pkg>` are the libraries for the reverse dependencies.
#'
#' @param pkgdir Path to the package we are revdep-checking.
#' @param what Directory to query:
#' * `"root"`: the root of the check directory,
#' * `"db"`: the database file,
#' * `"old"`: the library of the old version of the package.
#' * `"new"`: the library of the new version of the package.
#' * `"pkg"`: the library of the reverse dependency, the `package`
#' argument must be supplied as well.
#' * `"check"`: the check directory of the reverse dependency, the
#' `package` argument must be supplied as well.
#' * `"pkgold"`: package libraries to use when checking `package` with
#' the old version.
#' * `"pkgnew"`: package libraries to use when checking `package` with
#' the new version.
#' @param package The name of the package, if `what` is `"pkg"`, `"check"`,
#' `"pkgold"` or `"pkgnew"`.
#' @return Character scalar, the requested path.
#'
#' @keywords internal
dir_find <- function(pkgdir,
what = c("root", "db", "old", "new", "pkg", "check",
"checks", "lib", "pkgold", "pkgnew"),
package = NULL) {
pkgdir <- pkg_check(pkgdir)
pkg <- pkg_name(pkgdir)
switch(match.arg(what),
root = file.path(pkgdir, "revdep"),
db = file.path(pkgdir, "revdep", "data.sqlite"),
checks = file.path(pkgdir, "revdep", "checks"),
check = file.path(pkgdir, "revdep", "checks", package),
lib = file.path(pkgdir, "revdep", "library"),
pkg = file.path(pkgdir, "revdep", "library", package),
old = file.path(pkgdir, "revdep", "library", pkg, "old"),
new = file.path(pkgdir, "revdep", "library", pkg, "new"),
## Order is important here, because installs should go to the first
pkgold = c(file.path(pkgdir, "revdep", "library", package),
file.path(pkgdir, "revdep", "library", pkg, "old")),
pkgnew = c(file.path(pkgdir, "revdep", "library", package),
file.path(pkgdir, "revdep", "library", pkg, "new"))
)
}
#' @export
#' @rdname dir_find
dir_setup <- function(pkgdir) {
dir_create(dir_find(pkgdir, "root"))
dir_create(dir_find(pkgdir, "checks"))
}
#' @export
#' @rdname dir_find
dir_setup_package <- function(pkgdir, package) {
dir_create(dir_find(pkgdir, "pkgold", package))
dir_create(dir_find(pkgdir, "pkgnew", package))
dir_create(dir_find(pkgdir, "check", package))
}
dir_create <- function(paths) {
vapply(
paths, FUN = dir.create, FUN.VALUE = logical(1),
recursive = TRUE, showWarnings = FALSE
)
}
| /R/dirs.R | permissive | vspinu/revdepcheck | R | false | false | 3,228 | r |
#' Set up/retrieve the directory structure for the checks
#'
#' Currently the following files and directories are used.
#' They are all in the main revdep directory, which is `revdep` in the
#' package tree.
#' * `library`: a collection of package libraries
#' * `library/data.sqlite`: the SQLite database that contains the check
#' data.
#' * `library/<checked-pkg>/old`: library that contains the *old* version
#' of the revdep-checked package, together with its dependencies.
#' * `library/<checked-pkg>/new`: library that contains the *new* version
#' of the revdep-checked package, together with its dependencies.
#' * `library/<pkg>` are the libraries for the reverse dependencies.
#'
#' @param pkgdir Path to the package we are revdep-checking.
#' @param what Directory to query:
#' * `"root"`: the root of the check directory,
#' * `"db"`: the database file,
#' * `"old"`: the library of the old version of the package.
#' * `"new"`: the library of the new version of the package.
#' * `"pkg"`: the library of the reverse dependency, the `package`
#' argument must be supplied as well.
#' * `"check"`: the check directory of the reverse dependency, the
#' `package` argument must be supplied as well.
#' * `"pkgold"`: package libraries to use when checking `package` with
#' the old version.
#' * `"pkgnew"`: package libraries to use when checking `package` with
#' the new version.
#' @param package The name of the package, if `what` is `"pkg"`, `"check"`,
#' `"pkgold"` or `"pkgnew"`.
#' @return Character scalar, the requested path.
#'
#' @keywords internal
dir_find <- function(pkgdir,
what = c("root", "db", "old", "new", "pkg", "check",
"checks", "lib", "pkgold", "pkgnew"),
package = NULL) {
pkgdir <- pkg_check(pkgdir)
pkg <- pkg_name(pkgdir)
switch(match.arg(what),
root = file.path(pkgdir, "revdep"),
db = file.path(pkgdir, "revdep", "data.sqlite"),
checks = file.path(pkgdir, "revdep", "checks"),
check = file.path(pkgdir, "revdep", "checks", package),
lib = file.path(pkgdir, "revdep", "library"),
pkg = file.path(pkgdir, "revdep", "library", package),
old = file.path(pkgdir, "revdep", "library", pkg, "old"),
new = file.path(pkgdir, "revdep", "library", pkg, "new"),
## Order is important here, because installs should go to the first
pkgold = c(file.path(pkgdir, "revdep", "library", package),
file.path(pkgdir, "revdep", "library", pkg, "old")),
pkgnew = c(file.path(pkgdir, "revdep", "library", package),
file.path(pkgdir, "revdep", "library", pkg, "new"))
)
}
#' @export
#' @rdname dir_find
dir_setup <- function(pkgdir) {
dir_create(dir_find(pkgdir, "root"))
dir_create(dir_find(pkgdir, "checks"))
}
#' @export
#' @rdname dir_find
dir_setup_package <- function(pkgdir, package) {
dir_create(dir_find(pkgdir, "pkgold", package))
dir_create(dir_find(pkgdir, "pkgnew", package))
dir_create(dir_find(pkgdir, "check", package))
}
dir_create <- function(paths) {
vapply(
paths, FUN = dir.create, FUN.VALUE = logical(1),
recursive = TRUE, showWarnings = FALSE
)
}
|
\name{LST}
\alias{LST}
\docType{data}
\encoding{latin1}
\title{Time series of MODIS LST images}
\description{\code{LST} contains a spatial sub-sample (Istra region in Croatia) of 46 time series of MODIS LST images (estimated Land Surface Temperature in degrees C) at 1 km resolution. The temporal support size of these images is 8-days.}
\usage{data(LST)}
\format{
The \code{LST} data frame contains the following layers:
\describe{
\item{\code{LST2008_01_01}}{8-day MODIS LST mosaick for period 2007-12-29 to 2008-01-04}
\item{\code{LST2008_01_09}}{8-day MODIS LST mosaick for period 2008-01-05 to 2008-01-13}
\item{\code{\dots}}{subsequent bands}
\item{\code{lon}}{a numeric vector; x-coordinate (m) in the WGS84 system }
\item{\code{lat}}{a numeric vector; y-coordinate (m) in the WGS84 system }
}
}
\author{ Tomislav Hengl and Melita Percec Tadic }
\references{
\itemize{
\item Hengl, T., Heuvelink, G.B.M., Percec Tadic, M., Pebesma, E., (2011) Spatio-temporal prediction of daily temperatures using time-series of MODIS LST images. Theoretical and Applied Climatology, 107(1-2): 265-277. \doi{10.1007/s00704-011-0464-2}
\item MODIS products (\url{https://lpdaac.usgs.gov/data/get-started-data/})
}
}
\note{Time series of 46 day-time and night-time 8-day composite LST images (\href{https://lpdaac.usgs.gov/data/get-started-data/}{MOD11A2} product bands \code{1} and \code{5}) was obtained from the NASA's FTP server (\url{https://ladsweb.modaps.eosdis.nasa.gov/}). The original 8-day composite images were created by patching together images from a period of \enc{±}{+-}4 days, so that the proportion of clouds can be reduced to a minimum. The \code{"zvalue"} slot in the \code{"RasterBrick"} object can be used as the dateTime column expressed as:\cr
\code{yyyy-mm-ddThh:mm:sszzzzzz}\cr
where T is the separator between the date and the time, and the time zone is either Z (for UTC) or zzzzzz, which represents \enc{±}{+-}hh:mm in relation to UTC.}
\keyword{datasets}
| /man/LST.Rd | no_license | cran/plotKML | R | false | false | 1,992 | rd | \name{LST}
\alias{LST}
\docType{data}
\encoding{latin1}
\title{Time series of MODIS LST images}
\description{\code{LST} contains a spatial sub-sample (Istra region in Croatia) of 46 time series of MODIS LST images (estimated Land Surface Temperature in degrees C) at 1 km resolution. The temporal support size of these images is 8-days.}
\usage{data(LST)}
\format{
The \code{LST} data frame contains the following layers:
\describe{
\item{\code{LST2008_01_01}}{8-day MODIS LST mosaick for period 2007-12-29 to 2008-01-04}
\item{\code{LST2008_01_09}}{8-day MODIS LST mosaick for period 2008-01-05 to 2008-01-13}
\item{\code{\dots}}{subsequent bands}
\item{\code{lon}}{a numeric vector; x-coordinate (m) in the WGS84 system }
\item{\code{lat}}{a numeric vector; y-coordinate (m) in the WGS84 system }
}
}
\author{ Tomislav Hengl and Melita Percec Tadic }
\references{
\itemize{
\item Hengl, T., Heuvelink, G.B.M., Percec Tadic, M., Pebesma, E., (2011) Spatio-temporal prediction of daily temperatures using time-series of MODIS LST images. Theoretical and Applied Climatology, 107(1-2): 265-277. \doi{10.1007/s00704-011-0464-2}
\item MODIS products (\url{https://lpdaac.usgs.gov/data/get-started-data/})
}
}
\note{Time series of 46 day-time and night-time 8-day composite LST images (\href{https://lpdaac.usgs.gov/data/get-started-data/}{MOD11A2} product bands \code{1} and \code{5}) was obtained from the NASA's FTP server (\url{https://ladsweb.modaps.eosdis.nasa.gov/}). The original 8-day composite images were created by patching together images from a period of \enc{±}{+-}4 days, so that the proportion of clouds can be reduced to a minimum. The \code{"zvalue"} slot in the \code{"RasterBrick"} object can be used as the dateTime column expressed as:\cr
\code{yyyy-mm-ddThh:mm:sszzzzzz}\cr
where T is the separator between the date and the time, and the time zone is either Z (for UTC) or zzzzzz, which represents \enc{±}{+-}hh:mm in relation to UTC.}
\keyword{datasets}
|
test_that("ResultAssignerArchive works", {
ras = ResultAssignerArchive$new()
inst = MAKE_INST_1D()
design = generate_design_random(inst$search_space, n = 4L)$data
inst$eval_batch(design)
expect_null(inst$result)
ras$assign_result(inst)
expect_data_table(inst$result, nrows = 1L)
expect_equal(inst$result[[inst$archive$cols_x]], inst$archive$best()[[inst$archive$cols_x]])
expect_equal(inst$result[[inst$archive$cols_y]], inst$archive$best()[[inst$archive$cols_y]])
})
test_that("ResultAssignerArchive works with OptimizerMbo and bayesopt_ego", {
result_assigner = ResultAssignerArchive$new()
instance = MAKE_INST_1D()
surrogate = SurrogateLearner$new(REGR_KM_DETERM)
acq_function = AcqFunctionEI$new()
acq_optimizer = AcqOptimizer$new(opt("random_search", batch_size = 2L), terminator = trm("evals", n_evals = 2L))
optimizer = opt("mbo", loop_function = bayesopt_ego, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer, result_assigner = result_assigner)
optimizer$optimize(instance)
expect_true(nrow(instance$archive$data) == 5L)
expect_data_table(instance$result, nrow = 1L)
})
test_that("ResultAssignerArchive works with OptimizerMbo and bayesopt_parego", {
result_assigner = ResultAssignerArchive$new()
instance = MAKE_INST(OBJ_1D_2, search_space = PS_1D, terminator = trm("evals", n_evals = 5L))
surrogate = SurrogateLearner$new(REGR_KM_DETERM)
acq_function = AcqFunctionEI$new()
acq_optimizer = AcqOptimizer$new(opt("random_search", batch_size = 2L), terminator = trm("evals", n_evals = 2L))
optimizer = opt("mbo", loop_function = bayesopt_parego, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer, result_assigner = result_assigner)
optimizer$optimize(instance)
expect_true(nrow(instance$archive$data) == 5L)
expect_data_table(instance$result, min.rows = 1L)
})
test_that("ResultAssignerArchive works with OptimizerMbo and bayesopt_smsego", {
result_assigner = ResultAssignerArchive$new()
instance = MAKE_INST(OBJ_1D_2, search_space = PS_1D, terminator = trm("evals", n_evals = 5L))
surrogate = SurrogateLearnerCollection$new(list(REGR_KM_DETERM, REGR_KM_DETERM$clone(deep = TRUE)))
acq_function = AcqFunctionSmsEgo$new()
acq_optimizer = AcqOptimizer$new(opt("random_search", batch_size = 2L), terminator = trm("evals", n_evals = 2L))
optimizer = opt("mbo", loop_function = bayesopt_smsego, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer, result_assigner = result_assigner)
optimizer$optimize(instance)
expect_true(nrow(instance$archive$data) == 5L)
expect_data_table(instance$result, min.rows = 1L)
})
| /tests/testthat/test_ResultAssignerArchive.R | no_license | mlr-org/mlr3mbo | R | false | false | 2,692 | r | test_that("ResultAssignerArchive works", {
ras = ResultAssignerArchive$new()
inst = MAKE_INST_1D()
design = generate_design_random(inst$search_space, n = 4L)$data
inst$eval_batch(design)
expect_null(inst$result)
ras$assign_result(inst)
expect_data_table(inst$result, nrows = 1L)
expect_equal(inst$result[[inst$archive$cols_x]], inst$archive$best()[[inst$archive$cols_x]])
expect_equal(inst$result[[inst$archive$cols_y]], inst$archive$best()[[inst$archive$cols_y]])
})
test_that("ResultAssignerArchive works with OptimizerMbo and bayesopt_ego", {
result_assigner = ResultAssignerArchive$new()
instance = MAKE_INST_1D()
surrogate = SurrogateLearner$new(REGR_KM_DETERM)
acq_function = AcqFunctionEI$new()
acq_optimizer = AcqOptimizer$new(opt("random_search", batch_size = 2L), terminator = trm("evals", n_evals = 2L))
optimizer = opt("mbo", loop_function = bayesopt_ego, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer, result_assigner = result_assigner)
optimizer$optimize(instance)
expect_true(nrow(instance$archive$data) == 5L)
expect_data_table(instance$result, nrow = 1L)
})
test_that("ResultAssignerArchive works with OptimizerMbo and bayesopt_parego", {
result_assigner = ResultAssignerArchive$new()
instance = MAKE_INST(OBJ_1D_2, search_space = PS_1D, terminator = trm("evals", n_evals = 5L))
surrogate = SurrogateLearner$new(REGR_KM_DETERM)
acq_function = AcqFunctionEI$new()
acq_optimizer = AcqOptimizer$new(opt("random_search", batch_size = 2L), terminator = trm("evals", n_evals = 2L))
optimizer = opt("mbo", loop_function = bayesopt_parego, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer, result_assigner = result_assigner)
optimizer$optimize(instance)
expect_true(nrow(instance$archive$data) == 5L)
expect_data_table(instance$result, min.rows = 1L)
})
test_that("ResultAssignerArchive works with OptimizerMbo and bayesopt_smsego", {
result_assigner = ResultAssignerArchive$new()
instance = MAKE_INST(OBJ_1D_2, search_space = PS_1D, terminator = trm("evals", n_evals = 5L))
surrogate = SurrogateLearnerCollection$new(list(REGR_KM_DETERM, REGR_KM_DETERM$clone(deep = TRUE)))
acq_function = AcqFunctionSmsEgo$new()
acq_optimizer = AcqOptimizer$new(opt("random_search", batch_size = 2L), terminator = trm("evals", n_evals = 2L))
optimizer = opt("mbo", loop_function = bayesopt_smsego, surrogate = surrogate, acq_function = acq_function, acq_optimizer = acq_optimizer, result_assigner = result_assigner)
optimizer$optimize(instance)
expect_true(nrow(instance$archive$data) == 5L)
expect_data_table(instance$result, min.rows = 1L)
})
|
#### R Stats Machine Learning Cross Validation
# Exercise Cross Validation
# Crate a model with the faithful dataset
?faithful
# explain waiting with eruptions
# get a visual impression to get an idea of the relationship
# get the MSE using simple cross validation and 5 fold CV
# in the solution I will use a 50/50 split of the dataset for simple CV
# compare the results - Which one has a lower error rate?
# what could be possible problems with the cv approach I outlined
# Simple CV
# Simple xy plot
plot(faithful$waiting, faithful$eruptions)
# linear model explaining the waiting time - training data 1-136
mymodel = glm(data = faithful[1:136,], waiting ~ eruptions)
# MSE on the second half of the data ( validation set)
# (difference of the ture values of faithful$waiting from the predicted model)
mean((faithful$waiting - predict(mymodel, faithful))[137:272]^2)
#### 5-fold CV
library(boot) # for the cv.glm function
# we are going to get a model with the full dataset
mymodel2 = glm(data=faithful, waiting ~ eruptions)
# cv.glm for 5 fold cv
cv.result = cv.glm(data = faithful, mymodel2, K =5)
# the error rate is slightly lower with standard CV
cv.result$delta
### 5 fold Cv will keep bias out of your data
| /R_Stat_Johns_Hopkins_work/R_Stats_Machine_Learning/R_Stats_Machine_Learning_Cross_Validation.R | no_license | mpierne01/Johns_Hopkins_DS_Work | R | false | false | 1,230 | r | #### R Stats Machine Learning Cross Validation
# Exercise Cross Validation
# Crate a model with the faithful dataset
?faithful
# explain waiting with eruptions
# get a visual impression to get an idea of the relationship
# get the MSE using simple cross validation and 5 fold CV
# in the solution I will use a 50/50 split of the dataset for simple CV
# compare the results - Which one has a lower error rate?
# what could be possible problems with the cv approach I outlined
# Simple CV
# Simple xy plot
plot(faithful$waiting, faithful$eruptions)
# linear model explaining the waiting time - training data 1-136
mymodel = glm(data = faithful[1:136,], waiting ~ eruptions)
# MSE on the second half of the data ( validation set)
# (difference of the ture values of faithful$waiting from the predicted model)
mean((faithful$waiting - predict(mymodel, faithful))[137:272]^2)
#### 5-fold CV
library(boot) # for the cv.glm function
# we are going to get a model with the full dataset
mymodel2 = glm(data=faithful, waiting ~ eruptions)
# cv.glm for 5 fold cv
cv.result = cv.glm(data = faithful, mymodel2, K =5)
# the error rate is slightly lower with standard CV
cv.result$delta
### 5 fold Cv will keep bias out of your data
|
context("testing")
library(countfitteR)
test_that("fit_counts", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
fc <- fit_counts(df, model = "pois")
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(fc$x1pois$BIC, 13.33694, tolerance = 3.09e-06)
expect_equal(fc$x1pois$model, "pois")
expect_equal(fc$x2pois$BIC, 11.95064, tolerance = 2.55e-06)
})
test_that("compare_fit", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
cmp <- compare_fit(df, fitlist = fit_counts(df, model = "all"))
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(cmp[1,5], 3.639184)
# expect_equal(cmp[6,5], NA)
expect_equal(cmp[18,5], 1.819557, tolerance = 3.65e-07)
})
test_that("plot_fit", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
cmp <- compare_fit(df, fitlist = fit_counts(df, model = "all"))
p <- plot_fitcmp(cmp)
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(cmp[1,5], 3.639184)
# expect_equal(cmp[6,5], NA)
expect_equal(cmp[18,5], 1.819557, tolerance = 3.65e-07)
expect_equal(p$labels[[1]], "x")
expect_equal(p$coordinates$clip, "on")
expect_equal(p$coordinates$limits$x, NULL)
})
test_that("summary_fit", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
fc <- fit_counts(df, model = "all")
sf <- summary_fitlist(fc)
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(fc$x1_pois$BIC, 13.33694, tolerance = 3.09e-06)
expect_equal(fc$x1_zip$BIC, NA)
expect_equal(fc$x2_nb$BIC, 13.74248, tolerance = 2.8e-07)
expect_equal(sf$BIC[1], 13.33694, tolerance = 3.09e-06)
expect_equal(sf$theta[6], 19305.4, tolerance = 0.000684)
expect_equal(sf$lambda[5], 0.5)
}) | /tests/testthat/testing.R | no_license | michbur/countfitteR-1 | R | false | false | 2,072 | r | context("testing")
library(countfitteR)
test_that("fit_counts", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
fc <- fit_counts(df, model = "pois")
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(fc$x1pois$BIC, 13.33694, tolerance = 3.09e-06)
expect_equal(fc$x1pois$model, "pois")
expect_equal(fc$x2pois$BIC, 11.95064, tolerance = 2.55e-06)
})
test_that("compare_fit", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
cmp <- compare_fit(df, fitlist = fit_counts(df, model = "all"))
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(cmp[1,5], 3.639184)
# expect_equal(cmp[6,5], NA)
expect_equal(cmp[18,5], 1.819557, tolerance = 3.65e-07)
})
test_that("plot_fit", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
cmp <- compare_fit(df, fitlist = fit_counts(df, model = "all"))
p <- plot_fitcmp(cmp)
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(cmp[1,5], 3.639184)
# expect_equal(cmp[6,5], NA)
expect_equal(cmp[18,5], 1.819557, tolerance = 3.65e-07)
expect_equal(p$labels[[1]], "x")
expect_equal(p$coordinates$clip, "on")
expect_equal(p$coordinates$limits$x, NULL)
})
test_that("summary_fit", {
df <- data.frame(x1 = c(0,1,0,0,0,2), x2 = c(0,0,1,1,0,1))
fc <- fit_counts(df, model = "all")
sf <- summary_fitlist(fc)
expect_equal(df[1,1], 0)
expect_equal(df[2,2], 0)
expect_equal(df[6,1], 2)
expect_equal(df[6,2], 1)
expect_equal(df[5,5], NULL)
expect_equal(fc$x1_pois$BIC, 13.33694, tolerance = 3.09e-06)
expect_equal(fc$x1_zip$BIC, NA)
expect_equal(fc$x2_nb$BIC, 13.74248, tolerance = 2.8e-07)
expect_equal(sf$BIC[1], 13.33694, tolerance = 3.09e-06)
expect_equal(sf$theta[6], 19305.4, tolerance = 0.000684)
expect_equal(sf$lambda[5], 0.5)
}) |
#' ---
#' title: "Generate Predictions Functions"
#' author: "Kevin Lu"
#' date: '`r format(Sys.Date(), "%B %d, %Y")`'
#' output:
#' html_document:
#' theme: default
#' highlight: tango
#' toc: true
#' toc_float: true
#' number_sections: false
#' fig_width: 8
#' fig_height: 5
#' ---
#' # 1. Source Pairs Trading Functions
source("./src/util/01-load-packages.R")
#' # 2. Generate Predictions Function
#' Description
#' Generate predictions on the test set given a cutoff date to split the train and test sets and a list of parameters.
#'
#' Arguments
#' pricing_data: A dataframe containing pricing data from Poloneix gathered in tidy format.
#' cutoff_date: A data representing the cutoff date between the train and test sets.
#' params: A list of parameters passed to the functions below that describe the mean reversion pairs trading strategy.
#'
#' Value
#' A dataframe containing the position, change in position, signal, and hedge ratio for the coin pairs selected by the
#' strategy.
generate_predictions <- function(pricing_data, cutoff_date, params) {
# Create train, test, and selected coin pairs
setup <- setup_strategy(pricing_data = pricing_data,
cutoff_date = cutoff_date,
params = params)
train <- setup[["train"]]
test <- setup[["test"]]
selected_pairs <- setup[["selected_pairs"]]
# Generate backtest results
predictions <- backtest_strategy(train = train,
test = test,
selected_pairs = selected_pairs,
params = params)
# Return predictions
return(predictions)
}
| /src/util/10-generate-predictions-functions.R | no_license | luyongxu/pairstrading | R | false | false | 1,741 | r | #' ---
#' title: "Generate Predictions Functions"
#' author: "Kevin Lu"
#' date: '`r format(Sys.Date(), "%B %d, %Y")`'
#' output:
#' html_document:
#' theme: default
#' highlight: tango
#' toc: true
#' toc_float: true
#' number_sections: false
#' fig_width: 8
#' fig_height: 5
#' ---
#' # 1. Source Pairs Trading Functions
source("./src/util/01-load-packages.R")
#' # 2. Generate Predictions Function
#' Description
#' Generate predictions on the test set given a cutoff date to split the train and test sets and a list of parameters.
#'
#' Arguments
#' pricing_data: A dataframe containing pricing data from Poloneix gathered in tidy format.
#' cutoff_date: A data representing the cutoff date between the train and test sets.
#' params: A list of parameters passed to the functions below that describe the mean reversion pairs trading strategy.
#'
#' Value
#' A dataframe containing the position, change in position, signal, and hedge ratio for the coin pairs selected by the
#' strategy.
generate_predictions <- function(pricing_data, cutoff_date, params) {
# Create train, test, and selected coin pairs
setup <- setup_strategy(pricing_data = pricing_data,
cutoff_date = cutoff_date,
params = params)
train <- setup[["train"]]
test <- setup[["test"]]
selected_pairs <- setup[["selected_pairs"]]
# Generate backtest results
predictions <- backtest_strategy(train = train,
test = test,
selected_pairs = selected_pairs,
params = params)
# Return predictions
return(predictions)
}
|
\name{plotTools}
\alias{plotTools}
\title{
Plot eflalo or tacsat files
}
\description{
Plot eflalo or tacsat given a grid and column name as a map of intensity
}
\usage{
plotTools(x,level= "ICESrectangle",xlim,ylim,zlim=NULL,log=FALSE,
gridcell=c(0.1,0.05),color=NULL,control.tacsat= list(clm = NULL),
control.eflalo = list(clm = NULL), returnRange = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Eflalo or tacsat dataframe
}
\item{level}{
Aggregating level: "ICESrectangle" or specified "gridcell" between xlim and ylim in
steps gricell for tacsat or "ICESrectangle" only for eflalo
}
\item{xlim}{
two element numeric vector giving a range of longitudes, expressed in degrees,
to which drawing should be restricted. Longitude is measured in degrees east
of Greenwich, so that, in particular, locations in the USA have negative
longitude. If fill = TRUE, polygons selected by region must be entirely inside
the xlim range. The default value of this argument spans the entire longitude
range of the database.
}
\item{ylim}{
two element numeric vector giving a range of latitudes, expressed in degrees,
to which drawing should be restricted. Latitude is measured in degrees north
of the equator, so that, in particular, locations in the USA have positive
latitude. If fill = TRUE, polygons selected by region must be entirely inside
the ylim range. The default value of this argument spans the entire latitude
range of the database.
}
\item{zlim}{
the minimum and maximum z values for which colors should be plotted,
defaulting to the range of the finite values of z. Each of the given colors
will be used to color an equispaced interval of this range. The midpoints of
the intervals cover the range, so that values just outside the range will be
plotted.
}
\item{log}{
whether values to plot need to be logged, TRUE or FALSE
}
\item{gridcell}{
two element numeric vector giving the steps in longitudinal degrees and steps
in latitudinal degrees for the grid to plot on. Only needed when
level = "gridcell"
}
\item{color}{
colors range. default = brewer.pal(9,"YlOrRd")
}
\item{control.tacsat}{
list with attribute "clm": column names to display aggregated results over
}
\item{control.eflalo}{
list with attribute "clm": column names to display aggregated results over
}
\item{returnRange}{Logical: return range of plotted values (which can be used to
define own legend}
}
\author{Niels T. Hintzen}
\seealso{
\code{\link{plotTreeMap}}, \code{\link{Grid2KLM}}, \code{\link{landingsMap2GIFanim}}, \code{\link{pings2EffortMaps}}, \code{\link{pings2LandingsMaps}}}
\examples{
data(tacsat)
data(eflalo)
plotTools(tacsat,level="ICESrectangle",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL))
x11()
plotTools(eflalo,level="ICESrectangle",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL))
x11()
plotTools(tacsat,level="gridcell",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL))
x11()
plotTools(eflalo,level="ICESrectangle",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL),
control.eflalo=list(clm=c("LE_KG_COD","LE_KG_PLE")))
}
| /vmstools/man/plotTools.Rd | no_license | mcruf/vmstools | R | false | false | 3,553 | rd | \name{plotTools}
\alias{plotTools}
\title{
Plot eflalo or tacsat files
}
\description{
Plot eflalo or tacsat given a grid and column name as a map of intensity
}
\usage{
plotTools(x,level= "ICESrectangle",xlim,ylim,zlim=NULL,log=FALSE,
gridcell=c(0.1,0.05),color=NULL,control.tacsat= list(clm = NULL),
control.eflalo = list(clm = NULL), returnRange = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Eflalo or tacsat dataframe
}
\item{level}{
Aggregating level: "ICESrectangle" or specified "gridcell" between xlim and ylim in
steps gricell for tacsat or "ICESrectangle" only for eflalo
}
\item{xlim}{
two element numeric vector giving a range of longitudes, expressed in degrees,
to which drawing should be restricted. Longitude is measured in degrees east
of Greenwich, so that, in particular, locations in the USA have negative
longitude. If fill = TRUE, polygons selected by region must be entirely inside
the xlim range. The default value of this argument spans the entire longitude
range of the database.
}
\item{ylim}{
two element numeric vector giving a range of latitudes, expressed in degrees,
to which drawing should be restricted. Latitude is measured in degrees north
of the equator, so that, in particular, locations in the USA have positive
latitude. If fill = TRUE, polygons selected by region must be entirely inside
the ylim range. The default value of this argument spans the entire latitude
range of the database.
}
\item{zlim}{
the minimum and maximum z values for which colors should be plotted,
defaulting to the range of the finite values of z. Each of the given colors
will be used to color an equispaced interval of this range. The midpoints of
the intervals cover the range, so that values just outside the range will be
plotted.
}
\item{log}{
whether values to plot need to be logged, TRUE or FALSE
}
\item{gridcell}{
two element numeric vector giving the steps in longitudinal degrees and steps
in latitudinal degrees for the grid to plot on. Only needed when
level = "gridcell"
}
\item{color}{
colors range. default = brewer.pal(9,"YlOrRd")
}
\item{control.tacsat}{
list with attribute "clm": column names to display aggregated results over
}
\item{control.eflalo}{
list with attribute "clm": column names to display aggregated results over
}
\item{returnRange}{Logical: return range of plotted values (which can be used to
define own legend}
}
\author{Niels T. Hintzen}
\seealso{
\code{\link{plotTreeMap}}, \code{\link{Grid2KLM}}, \code{\link{landingsMap2GIFanim}}, \code{\link{pings2EffortMaps}}, \code{\link{pings2LandingsMaps}}}
\examples{
data(tacsat)
data(eflalo)
plotTools(tacsat,level="ICESrectangle",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL))
x11()
plotTools(eflalo,level="ICESrectangle",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL))
x11()
plotTools(tacsat,level="gridcell",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL))
x11()
plotTools(eflalo,level="ICESrectangle",xlim=c(-5,10),ylim=c(48,62),zlim=NULL,
log=FALSE,gridcell=c(0.1,0.05),color=NULL,control.tacsat=list(clm=NULL),
control.eflalo=list(clm=c("LE_KG_COD","LE_KG_PLE")))
}
|
\dontrun{
WPT <- server()
locs <- getStatus(WPT, "someID")
}
| /inst/examples/docs/getStatus.R | permissive | johndharrison/webpagetestr | R | false | false | 65 | r | \dontrun{
WPT <- server()
locs <- getStatus(WPT, "someID")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{acf2}
\alias{acf2}
\title{Calculate lag10 autocorrelation}
\usage{
acf2(x, lag = 10, ...)
}
\arguments{
\item{x}{numeric vector}
\item{lag}{integer}
\item{...}{additional arguments to \code{acf}}
}
\description{
A wrapper for the function acf that returns the autocorrelation for
the specified lag. Missing values are removed.
}
\seealso{
\code{\link{acf}}
}
| /man/acf2.Rd | no_license | rscharpf/VanillaICE | R | false | true | 458 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{acf2}
\alias{acf2}
\title{Calculate lag10 autocorrelation}
\usage{
acf2(x, lag = 10, ...)
}
\arguments{
\item{x}{numeric vector}
\item{lag}{integer}
\item{...}{additional arguments to \code{acf}}
}
\description{
A wrapper for the function acf that returns the autocorrelation for
the specified lag. Missing values are removed.
}
\seealso{
\code{\link{acf}}
}
|
#' @title GMDH GIA auxiliar functions
#'
#' @description Performs auxiliar tasks to predict.gia
#'
#' @keywords internal
#'
gmdh.gia_1 <- function(X, y, prune) {
inicial <- X
fin.1 <- Inf
fin.2 <- 0
lap <- 0
modelos <- vector(mode = "list")
while(fin.1 >= fin.2) {
lap <- lap + 1
message(paste("Layer ", lap, sep = ""))
ifelse(lap == 1, .var <- combn(c(1:ncol(inicial)), 2, simplify = TRUE),
.var <- combn(c(1:ncol(X)), 2, simplify = TRUE))
ifelse(lap == 1, NA, .var.ndx <- which(.var[2, ] <= ncol(inicial)))
ifelse(lap == 1, NA, .var <- .var[, -.var.ndx])
message(paste(".................... ", ncol(.var), " neurons ", sep = ""))
mod <- apply(.var, 2, function(.var){gmdh.combi_1(y = y,
X = cbind(X[, .var[1], drop = FALSE],
X[, .var[2], drop = FALSE]))})
names(mod) <- paste(lap, c("."), 1:length(mod), sep = "")
CV.all <- unlist(lapply(mod, function(mod){mod$results$CV}))
Z <- lapply(mod, predict.combi, X)
Z <- matrix(data = unlist(Z), nrow = nrow(X), ncol = length(mod))
colnames(Z) <- names(mod)
Z <- fun.filter(Z)
nombres.Z <- colnames(Z)
CV.all <- CV.all[nombres.Z]
ndx <- sort(na.omit(order(CV.all)[1:prune]))
CV.all <- CV.all[ndx, drop = FALSE]
mod <- mod[names(CV.all)]
Z <- Z[, names(CV.all)]
Z <- cbind(inicial, Z)
fin.1 <- min(CV.all, na.rm = TRUE)
message(paste(" Error ", fin.1, sep = ""))
lap <- lap + 1
message(paste("Layer ", lap, sep = ""))
.var <- combn(c(1:ncol(Z)), 2, simplify = TRUE)
.var.ndx <- which(.var[2, ] <= ncol(inicial))
.var <- .var[, -.var.ndx]
message(paste(".................... ", ncol(.var), " neurons ", sep = ""))
mod.Z <- apply(.var, 2, function(.var){gmdh.combi_1(y = y,
X = cbind(Z[, .var[1], drop = FALSE],
Z[, .var[2], drop = FALSE]))})
names(mod.Z) <- paste(lap, c("."), 1:length(mod.Z), sep = "")
CV.all <- unlist(lapply(mod.Z, function(mod.Z){mod.Z$results$CV}))
X <- lapply(mod.Z, predict.combi, Z)
X <- matrix(data = unlist(X), ncol = length(mod.Z))
colnames(X) <- names(mod.Z)
X <- fun.filter(X)
nombres.X <- colnames(X)
CV.all <- CV.all[nombres.X]
ndx <- sort(na.omit(order(CV.all)[1:prune]))
CV.all <- CV.all[ndx, drop = FALSE]
mod.Z <- mod.Z[names(CV.all)]
X <- X[, names(CV.all)]
X <- cbind(inicial, X)
modelos[[length(modelos) + 1]] <- mod
modelos[[length(modelos) + 1]] <- mod.Z
class(modelos) <- "gia"
fin.2 <- min(CV.all, na.rm = TRUE)
message(paste(" Error ", fin.2, sep = ""))
ifelse(fin.2 >= fin.1, return(modelos), NA)
}
}
| /R/gia_1.R | no_license | cran/GMDHreg | R | false | false | 2,889 | r | #' @title GMDH GIA auxiliar functions
#'
#' @description Performs auxiliar tasks to predict.gia
#'
#' @keywords internal
#'
gmdh.gia_1 <- function(X, y, prune) {
inicial <- X
fin.1 <- Inf
fin.2 <- 0
lap <- 0
modelos <- vector(mode = "list")
while(fin.1 >= fin.2) {
lap <- lap + 1
message(paste("Layer ", lap, sep = ""))
ifelse(lap == 1, .var <- combn(c(1:ncol(inicial)), 2, simplify = TRUE),
.var <- combn(c(1:ncol(X)), 2, simplify = TRUE))
ifelse(lap == 1, NA, .var.ndx <- which(.var[2, ] <= ncol(inicial)))
ifelse(lap == 1, NA, .var <- .var[, -.var.ndx])
message(paste(".................... ", ncol(.var), " neurons ", sep = ""))
mod <- apply(.var, 2, function(.var){gmdh.combi_1(y = y,
X = cbind(X[, .var[1], drop = FALSE],
X[, .var[2], drop = FALSE]))})
names(mod) <- paste(lap, c("."), 1:length(mod), sep = "")
CV.all <- unlist(lapply(mod, function(mod){mod$results$CV}))
Z <- lapply(mod, predict.combi, X)
Z <- matrix(data = unlist(Z), nrow = nrow(X), ncol = length(mod))
colnames(Z) <- names(mod)
Z <- fun.filter(Z)
nombres.Z <- colnames(Z)
CV.all <- CV.all[nombres.Z]
ndx <- sort(na.omit(order(CV.all)[1:prune]))
CV.all <- CV.all[ndx, drop = FALSE]
mod <- mod[names(CV.all)]
Z <- Z[, names(CV.all)]
Z <- cbind(inicial, Z)
fin.1 <- min(CV.all, na.rm = TRUE)
message(paste(" Error ", fin.1, sep = ""))
lap <- lap + 1
message(paste("Layer ", lap, sep = ""))
.var <- combn(c(1:ncol(Z)), 2, simplify = TRUE)
.var.ndx <- which(.var[2, ] <= ncol(inicial))
.var <- .var[, -.var.ndx]
message(paste(".................... ", ncol(.var), " neurons ", sep = ""))
mod.Z <- apply(.var, 2, function(.var){gmdh.combi_1(y = y,
X = cbind(Z[, .var[1], drop = FALSE],
Z[, .var[2], drop = FALSE]))})
names(mod.Z) <- paste(lap, c("."), 1:length(mod.Z), sep = "")
CV.all <- unlist(lapply(mod.Z, function(mod.Z){mod.Z$results$CV}))
X <- lapply(mod.Z, predict.combi, Z)
X <- matrix(data = unlist(X), ncol = length(mod.Z))
colnames(X) <- names(mod.Z)
X <- fun.filter(X)
nombres.X <- colnames(X)
CV.all <- CV.all[nombres.X]
ndx <- sort(na.omit(order(CV.all)[1:prune]))
CV.all <- CV.all[ndx, drop = FALSE]
mod.Z <- mod.Z[names(CV.all)]
X <- X[, names(CV.all)]
X <- cbind(inicial, X)
modelos[[length(modelos) + 1]] <- mod
modelos[[length(modelos) + 1]] <- mod.Z
class(modelos) <- "gia"
fin.2 <- min(CV.all, na.rm = TRUE)
message(paste(" Error ", fin.2, sep = ""))
ifelse(fin.2 >= fin.1, return(modelos), NA)
}
}
|
##############################################
# Üldandmed meedet kasutanud ettevõtte kohta
#
# (c) 2020 - Raoul Lättemäe
#
##############################################
library(tidyverse)
# Lae käibe- ja statistikanumbrid
load("~/Dokumendid/R/EMTA/2019-2020.rdata")
andmed$Käive.2019 = rowSums(andmed[,c("Käive.i.2019", "Käive.ii.2019", "Käive.iii.2019", "Käive.iv.2019")], na.rm = TRUE)
andmed$Maksud.2019 = rowSums(andmed[,c("Maksud.i.2019", "Maksud.ii.2019", "Maksud.iii.2019", "Maksud.iv.2019")], na.rm = TRUE)
andmed$Tööjõumaksud.2019 = rowSums(andmed[,c("Tööjõumaksud.i.2019", "Tööjõumaksud.ii.2019", "Tööjõumaksud.iii.2019", "Tööjõumaksud.iv.2019")], na.rm = TRUE)
andmed$Töötajad.2019 = rowSums(andmed[,c("Töötajad.i.2019", "Töötajad.ii.2019", "Töötajad.iii.2019", "Töötajad.iv.2019")], na.rm = TRUE)/4
andmed$Käive.i = rowSums(andmed[c("Käive.i.2020")], na.rm = TRUE) - rowSums(andmed[c("Käive.i.2019")], na.rm = TRUE)
andmed$Käive.ii = rowSums(andmed[c("Käive.ii.2020")], na.rm = TRUE) - rowSums(andmed[c("Käive.ii.2019")], na.rm = TRUE)
my.andmed <- andmed %>%
select(Registrikood, Käive.2019, Maksud.2019, Tööjõumaksud.2019, Töötajad.2019, Käive.i, Käive.ii)
# Lae ettevõtete andmed
load("~/Dokumendid/R/EMTA/2020_ii.rdata")
# Lisa Ettevõtete registrist täiendavad koodid
andmed <- left_join(my.andmed, data %>% select(Registrikood, Nimi, Liik, KMKR, EMTAK.kood, EMTAK, Maakond, Linn), by = "Registrikood")
data <- NULL
# lae töötukassa andmed
load("~/Dokumendid/R/Töötukassa/koond.rdata")
koond <- andmed %>%
group_by(EMTAK.kood) %>%
summarise(Töötajad = sum(Töötajad.2019), Maksud = sum(Maksud.2019), Tööjõumaksud = sum(Tööjõumaksud.2019))
hyvitis.koond <- left_join(data.koond, andmed, by = c("Registrikood"))
hyvitis.sum <- hyvitis.koond %>%
group_by(EMTAK.kood) %>%
summarise(Töötajad = sum(Töötajad.2019), Maksud = sum(Maksud.2019), Tööjõumaksud = sum(Tööjõumaksud.2019))
hyvitis.prop = left_join(hyvitis.sum, koond, by = c("EMTAK.kood"), suffix = c("hyvitis", "kokku"))
hyvitis.prop$töötajadpc = hyvitis.prop$Töötajadhyvitis/hyvitis.prop$Töötajadkokku
hyvitis.prop$maksudpc = hyvitis.prop$Maksudhyvitis/hyvitis.prop$Maksudkokku
hyvitis.prop$tööjõumaksudpc = hyvitis.prop$Tööjõumaksudhyvitis/hyvitis.prop$Tööjõumaksudkokku
write.csv(hyvitis.prop, "~/Dokumendid/R/Töötukassa/prop.csv")
| /koond/yld.R | no_license | AndresVork/Rita2 | R | false | false | 2,428 | r | ##############################################
# Üldandmed meedet kasutanud ettevõtte kohta
#
# (c) 2020 - Raoul Lättemäe
#
##############################################
library(tidyverse)
# Lae käibe- ja statistikanumbrid
load("~/Dokumendid/R/EMTA/2019-2020.rdata")
andmed$Käive.2019 = rowSums(andmed[,c("Käive.i.2019", "Käive.ii.2019", "Käive.iii.2019", "Käive.iv.2019")], na.rm = TRUE)
andmed$Maksud.2019 = rowSums(andmed[,c("Maksud.i.2019", "Maksud.ii.2019", "Maksud.iii.2019", "Maksud.iv.2019")], na.rm = TRUE)
andmed$Tööjõumaksud.2019 = rowSums(andmed[,c("Tööjõumaksud.i.2019", "Tööjõumaksud.ii.2019", "Tööjõumaksud.iii.2019", "Tööjõumaksud.iv.2019")], na.rm = TRUE)
andmed$Töötajad.2019 = rowSums(andmed[,c("Töötajad.i.2019", "Töötajad.ii.2019", "Töötajad.iii.2019", "Töötajad.iv.2019")], na.rm = TRUE)/4
andmed$Käive.i = rowSums(andmed[c("Käive.i.2020")], na.rm = TRUE) - rowSums(andmed[c("Käive.i.2019")], na.rm = TRUE)
andmed$Käive.ii = rowSums(andmed[c("Käive.ii.2020")], na.rm = TRUE) - rowSums(andmed[c("Käive.ii.2019")], na.rm = TRUE)
my.andmed <- andmed %>%
select(Registrikood, Käive.2019, Maksud.2019, Tööjõumaksud.2019, Töötajad.2019, Käive.i, Käive.ii)
# Lae ettevõtete andmed
load("~/Dokumendid/R/EMTA/2020_ii.rdata")
# Lisa Ettevõtete registrist täiendavad koodid
andmed <- left_join(my.andmed, data %>% select(Registrikood, Nimi, Liik, KMKR, EMTAK.kood, EMTAK, Maakond, Linn), by = "Registrikood")
data <- NULL
# lae töötukassa andmed
load("~/Dokumendid/R/Töötukassa/koond.rdata")
koond <- andmed %>%
group_by(EMTAK.kood) %>%
summarise(Töötajad = sum(Töötajad.2019), Maksud = sum(Maksud.2019), Tööjõumaksud = sum(Tööjõumaksud.2019))
hyvitis.koond <- left_join(data.koond, andmed, by = c("Registrikood"))
hyvitis.sum <- hyvitis.koond %>%
group_by(EMTAK.kood) %>%
summarise(Töötajad = sum(Töötajad.2019), Maksud = sum(Maksud.2019), Tööjõumaksud = sum(Tööjõumaksud.2019))
hyvitis.prop = left_join(hyvitis.sum, koond, by = c("EMTAK.kood"), suffix = c("hyvitis", "kokku"))
hyvitis.prop$töötajadpc = hyvitis.prop$Töötajadhyvitis/hyvitis.prop$Töötajadkokku
hyvitis.prop$maksudpc = hyvitis.prop$Maksudhyvitis/hyvitis.prop$Maksudkokku
hyvitis.prop$tööjõumaksudpc = hyvitis.prop$Tööjõumaksudhyvitis/hyvitis.prop$Tööjõumaksudkokku
write.csv(hyvitis.prop, "~/Dokumendid/R/Töötukassa/prop.csv")
|
context("slice")
test_that("slice handles numeric input (#226)", {
g <- mtcars %>% group_by(cyl)
res <- g %>% slice(1)
expect_equal(nrow(res), 3)
expect_equal(res, g %>% filter(row_number() == 1L))
expect_equal(
mtcars %>% slice(1),
mtcars %>% filter(row_number() == 1L)
)
})
test_that("slice silently ignores out of range values (#226)", {
expect_equal(slice(mtcars, c(2, 100)), slice(mtcars, 2))
g <- group_by(mtcars, cyl)
expect_equal(slice(g, c(2, 100)), slice(g, 2))
})
test_that("slice works with negative indices", {
res <- slice(mtcars, -(1:2))
exp <- tail(mtcars, -2)
expect_equal(names(res), names(exp))
for (col in names(res)) {
expect_equal(res[[col]], exp[[col]])
}
})
test_that("slice forbids positive and negative together", {
expect_error(
mtcars %>% slice(c(-1, 2)),
"Found 1 positive indices and 1 negative indices",
fixed = TRUE
)
expect_error(
mtcars %>% slice(c(2:3, -1)),
"Found 2 positive indices and 1 negative indices",
fixed = TRUE
)
})
test_that("slice works with grouped data", {
g <- group_by(mtcars, cyl)
res <- slice(g, 1:2)
exp <- filter(g, row_number() < 3)
expect_equal(res, exp)
res <- slice(g, -(1:2))
exp <- filter(g, row_number() >= 3)
expect_equal(res, exp)
g <- group_by(data.frame(x = c(1, 1, 2, 2, 2)), x)
expect_equal(group_keys(slice(g, 3, .preserve = TRUE))$x, c(1, 2))
expect_equal(group_keys(slice(g, 3, .preserve = FALSE))$x, 2)
})
test_that("slice gives correct rows (#649)", {
a <- tibble(value = paste0("row", 1:10))
expect_equal(slice(a, 1:3)$value, paste0("row", 1:3))
expect_equal(slice(a, c(4, 6, 9))$value, paste0("row", c(4, 6, 9)))
a <- tibble(
value = paste0("row", 1:10),
group = rep(1:2, each = 5)
) %>%
group_by(group)
expect_equal(slice(a, 1:3)$value, paste0("row", c(1:3, 6:8)))
expect_equal(slice(a, c(2, 4))$value, paste0("row", c(2, 4, 7, 9)))
})
test_that("slice handles NA (#1235)", {
df <- tibble(x = 1:3)
expect_equal(nrow(slice(df, NA_integer_)), 0L)
expect_equal(nrow(slice(df, c(1L, NA_integer_))), 1L)
expect_equal(nrow(slice(df, c(-1L, NA_integer_))), 2L)
df <- tibble(x = 1:4, g = rep(1:2, 2)) %>% group_by(g)
expect_equal(nrow(slice(df, c(1, NA))), 2)
expect_equal(nrow(slice(df, c(-1, NA))), 2)
})
test_that("slice handles logical NA (#3970)", {
df <- tibble(x = 1:3)
expect_equal(nrow(slice(df, NA)), 0L)
expect_error(slice(df, TRUE))
expect_error(slice(df, FALSE))
})
test_that("slice handles empty data frames (#1219)", {
df <- data.frame(x = numeric())
res <- df %>% slice(1:3)
expect_equal(nrow(res), 0L)
expect_equal(names(res), "x")
})
test_that("slice works fine if n > nrow(df) (#1269)", {
slice_res <- mtcars %>% group_by(cyl) %>% slice(8)
filter_res <- mtcars %>% group_by(cyl) %>% filter(row_number() == 8)
expect_equal(slice_res, filter_res)
})
test_that("slice strips grouped indices (#1405)", {
res <- mtcars %>% group_by(cyl) %>% slice(1) %>% mutate(mpgplus = mpg + 1)
expect_equal(nrow(res), 3L)
expect_equal(group_rows(res), as.list(1:3))
})
test_that("slice works with zero-column data frames (#2490)", {
expect_equal(
tibble(a = 1:3) %>% select(-a) %>% slice(1) %>% nrow(),
1L
)
})
test_that("slice works under gctorture2", {
x <- tibble(y = 1:10)
with_gctorture2(999, x2 <- slice(x, 1:10))
expect_identical(x, x2)
})
test_that("slice correctly computes positive indices from negative indices (#3073)", {
x <- tibble(y = 1:10)
expect_identical(slice(x, -10:-30), tibble(y = 1:9))
})
test_that("slice handles raw matrices", {
df <- tibble(a = 1:4, b = matrix(as.raw(1:8), ncol = 2))
expect_identical(
slice(df, 1:2)$b,
matrix(as.raw(c(1, 2, 5, 6)), ncol = 2)
)
})
test_that("slice on ungrouped data.frame (not tibble) does not enforce tibble", {
expect_equal(class(slice(mtcars, 2)), "data.frame")
expect_equal(class(slice(mtcars, -2)), "data.frame")
expect_equal(class(slice(mtcars, NA)), "data.frame")
})
test_that("slice skips 0 (#3313)", {
d <- tibble(x = 1:5, y = LETTERS[1:5], g = 1)
expect_identical(slice(d, 0), slice(d, integer(0)))
expect_identical(slice(d, c(0, 1)), slice(d, 1))
expect_identical(slice(d, c(0, 1, 2)), slice(d, c(1, 2)))
expect_identical(slice(d, c(-1, 0)), slice(d, -1))
expect_identical(slice(d, c(0, -1)), slice(d, -1))
d <- group_by(d, g)
expect_identical(slice(d, 0), slice(d, integer(0)))
expect_identical(slice(d, c(0, 1)), slice(d, 1))
expect_identical(slice(d, c(0, 1, 2)), slice(d, c(1, 2)))
expect_identical(slice(d, c(-1, 0)), slice(d, -1))
expect_identical(slice(d, c(0, -1)), slice(d, -1))
})
test_that("slice is not confused about dense groups (#3753)",{
df <- tibble(row = 1:3)
expect_equal(slice(df, c(2,1,3))$row, c(2L,1L,3L))
expect_equal(slice(df, c(1,1,1))$row, rep(1L, 3))
})
test_that("slice accepts ... (#3804)", {
expect_equal(slice(mtcars, 1, 2), slice(mtcars, 1:2))
expect_equal(slice(mtcars, 1, n()), slice(mtcars, c(1, nrow(mtcars))))
g <- mtcars %>% group_by(cyl)
expect_equal(slice(g, 1, n()), slice(g, c(1, n())))
})
test_that("slice does not evaluate the expression in empty groups (#1438)", {
res <- mtcars %>%
group_by(cyl) %>%
filter(cyl==6) %>%
slice(1:2)
expect_equal(nrow(res), 2L)
expect_condition(
res <- mtcars %>% group_by(cyl) %>% filter(cyl==6) %>% sample_n(size=3),
NA
)
expect_equal(nrow(res), 3L)
})
test_that("column_subset() falls back to R indexing on esoteric data types (#4128)", {
res <- slice(tibble::enframe(formals(rnorm)), 2:3)
expect_identical(res, tibble(name = c("mean", "sd"), value = list(0, 1)))
})
| /tests/testthat/test-slice.r | permissive | davan690/dplyr | R | false | false | 5,697 | r | context("slice")
test_that("slice handles numeric input (#226)", {
g <- mtcars %>% group_by(cyl)
res <- g %>% slice(1)
expect_equal(nrow(res), 3)
expect_equal(res, g %>% filter(row_number() == 1L))
expect_equal(
mtcars %>% slice(1),
mtcars %>% filter(row_number() == 1L)
)
})
test_that("slice silently ignores out of range values (#226)", {
expect_equal(slice(mtcars, c(2, 100)), slice(mtcars, 2))
g <- group_by(mtcars, cyl)
expect_equal(slice(g, c(2, 100)), slice(g, 2))
})
test_that("slice works with negative indices", {
res <- slice(mtcars, -(1:2))
exp <- tail(mtcars, -2)
expect_equal(names(res), names(exp))
for (col in names(res)) {
expect_equal(res[[col]], exp[[col]])
}
})
test_that("slice forbids positive and negative together", {
expect_error(
mtcars %>% slice(c(-1, 2)),
"Found 1 positive indices and 1 negative indices",
fixed = TRUE
)
expect_error(
mtcars %>% slice(c(2:3, -1)),
"Found 2 positive indices and 1 negative indices",
fixed = TRUE
)
})
test_that("slice works with grouped data", {
g <- group_by(mtcars, cyl)
res <- slice(g, 1:2)
exp <- filter(g, row_number() < 3)
expect_equal(res, exp)
res <- slice(g, -(1:2))
exp <- filter(g, row_number() >= 3)
expect_equal(res, exp)
g <- group_by(data.frame(x = c(1, 1, 2, 2, 2)), x)
expect_equal(group_keys(slice(g, 3, .preserve = TRUE))$x, c(1, 2))
expect_equal(group_keys(slice(g, 3, .preserve = FALSE))$x, 2)
})
test_that("slice gives correct rows (#649)", {
a <- tibble(value = paste0("row", 1:10))
expect_equal(slice(a, 1:3)$value, paste0("row", 1:3))
expect_equal(slice(a, c(4, 6, 9))$value, paste0("row", c(4, 6, 9)))
a <- tibble(
value = paste0("row", 1:10),
group = rep(1:2, each = 5)
) %>%
group_by(group)
expect_equal(slice(a, 1:3)$value, paste0("row", c(1:3, 6:8)))
expect_equal(slice(a, c(2, 4))$value, paste0("row", c(2, 4, 7, 9)))
})
test_that("slice handles NA (#1235)", {
df <- tibble(x = 1:3)
expect_equal(nrow(slice(df, NA_integer_)), 0L)
expect_equal(nrow(slice(df, c(1L, NA_integer_))), 1L)
expect_equal(nrow(slice(df, c(-1L, NA_integer_))), 2L)
df <- tibble(x = 1:4, g = rep(1:2, 2)) %>% group_by(g)
expect_equal(nrow(slice(df, c(1, NA))), 2)
expect_equal(nrow(slice(df, c(-1, NA))), 2)
})
test_that("slice handles logical NA (#3970)", {
df <- tibble(x = 1:3)
expect_equal(nrow(slice(df, NA)), 0L)
expect_error(slice(df, TRUE))
expect_error(slice(df, FALSE))
})
test_that("slice handles empty data frames (#1219)", {
df <- data.frame(x = numeric())
res <- df %>% slice(1:3)
expect_equal(nrow(res), 0L)
expect_equal(names(res), "x")
})
test_that("slice works fine if n > nrow(df) (#1269)", {
slice_res <- mtcars %>% group_by(cyl) %>% slice(8)
filter_res <- mtcars %>% group_by(cyl) %>% filter(row_number() == 8)
expect_equal(slice_res, filter_res)
})
test_that("slice strips grouped indices (#1405)", {
res <- mtcars %>% group_by(cyl) %>% slice(1) %>% mutate(mpgplus = mpg + 1)
expect_equal(nrow(res), 3L)
expect_equal(group_rows(res), as.list(1:3))
})
test_that("slice works with zero-column data frames (#2490)", {
expect_equal(
tibble(a = 1:3) %>% select(-a) %>% slice(1) %>% nrow(),
1L
)
})
test_that("slice works under gctorture2", {
x <- tibble(y = 1:10)
with_gctorture2(999, x2 <- slice(x, 1:10))
expect_identical(x, x2)
})
test_that("slice correctly computes positive indices from negative indices (#3073)", {
x <- tibble(y = 1:10)
expect_identical(slice(x, -10:-30), tibble(y = 1:9))
})
test_that("slice handles raw matrices", {
df <- tibble(a = 1:4, b = matrix(as.raw(1:8), ncol = 2))
expect_identical(
slice(df, 1:2)$b,
matrix(as.raw(c(1, 2, 5, 6)), ncol = 2)
)
})
test_that("slice on ungrouped data.frame (not tibble) does not enforce tibble", {
expect_equal(class(slice(mtcars, 2)), "data.frame")
expect_equal(class(slice(mtcars, -2)), "data.frame")
expect_equal(class(slice(mtcars, NA)), "data.frame")
})
test_that("slice skips 0 (#3313)", {
d <- tibble(x = 1:5, y = LETTERS[1:5], g = 1)
expect_identical(slice(d, 0), slice(d, integer(0)))
expect_identical(slice(d, c(0, 1)), slice(d, 1))
expect_identical(slice(d, c(0, 1, 2)), slice(d, c(1, 2)))
expect_identical(slice(d, c(-1, 0)), slice(d, -1))
expect_identical(slice(d, c(0, -1)), slice(d, -1))
d <- group_by(d, g)
expect_identical(slice(d, 0), slice(d, integer(0)))
expect_identical(slice(d, c(0, 1)), slice(d, 1))
expect_identical(slice(d, c(0, 1, 2)), slice(d, c(1, 2)))
expect_identical(slice(d, c(-1, 0)), slice(d, -1))
expect_identical(slice(d, c(0, -1)), slice(d, -1))
})
test_that("slice is not confused about dense groups (#3753)",{
df <- tibble(row = 1:3)
expect_equal(slice(df, c(2,1,3))$row, c(2L,1L,3L))
expect_equal(slice(df, c(1,1,1))$row, rep(1L, 3))
})
test_that("slice accepts ... (#3804)", {
expect_equal(slice(mtcars, 1, 2), slice(mtcars, 1:2))
expect_equal(slice(mtcars, 1, n()), slice(mtcars, c(1, nrow(mtcars))))
g <- mtcars %>% group_by(cyl)
expect_equal(slice(g, 1, n()), slice(g, c(1, n())))
})
test_that("slice does not evaluate the expression in empty groups (#1438)", {
res <- mtcars %>%
group_by(cyl) %>%
filter(cyl==6) %>%
slice(1:2)
expect_equal(nrow(res), 2L)
expect_condition(
res <- mtcars %>% group_by(cyl) %>% filter(cyl==6) %>% sample_n(size=3),
NA
)
expect_equal(nrow(res), 3L)
})
test_that("column_subset() falls back to R indexing on esoteric data types (#4128)", {
res <- slice(tibble::enframe(formals(rnorm)), 2:3)
expect_identical(res, tibble(name = c("mean", "sd"), value = list(0, 1)))
})
|
library(plyr)
library(data.table)
stcoord<-read.csv("C:/Users/Hector/Desktop/PhD/Pengfei/China_AQ_Data/coord_stations.csv", h=T)
O3vars<-c("O3","O3_24h","O3_8h","O3_8h_24h")
Data<-list()
China<-list()
for (j in 1:7){
setwd(paste0("C:/Users/Hector/Desktop/PhD/Pengfei/China_AQ_Data/",as.character(2013+j)))
Data[[j]]<-list.files(pattern ="*.csv")
Chi<-list()
for (i in 1:length(Data[[j]])){
Chi[[i]]<-read.csv(Data[[j]][[i]],h=T,check.names = FALSE)
Chi[[i]]<-setDT(Chi[[i]])
}
China[[j]]<-do.call("rbind",c(Chi,fill=TRUE))
ChiO3<-list()
ChiO3_<-list()
for (i in 1:length(O3vars)){
ChiO3[[i]]<-subset(China[[j]], type==O3vars[i])
columns<-ChiO3[[i]][,c(1,2,3)]
temp1<-data.frame(columns, i=rep(1:(ncol(ChiO3[[i]])-3),ea=nrow(columns)))
stations<-rep(colnames(ChiO3[[i]])[c(-1,-2,-3)],each=nrow(columns))
Sta<-cbind(stations,temp1)
Sta<-join(Sta,stcoord)
ChiO3[[i]]<-cbind(Sta,unlist(ChiO3[[i]][,4:ncol(ChiO3[[i]])]));colnames(ChiO3[[i]])[8]<-paste(O3vars[i])
ChiO3_[[i]]<-ChiO3[[i]][-c(4,5)]
}
China[[j]]<-Reduce(function(x,y) join(x,y),ChiO3_)
}
ChinaO3<-do.call("rbind",China)
write.csv(ChinaO3,"C:/Users/Hector/Desktop/Pengfei/China_O3.csv",row.names=FALSE)
| /Ref_O3_data.R | no_license | hacamargoa/Ozone | R | false | false | 1,161 | r | library(plyr)
library(data.table)
stcoord<-read.csv("C:/Users/Hector/Desktop/PhD/Pengfei/China_AQ_Data/coord_stations.csv", h=T)
O3vars<-c("O3","O3_24h","O3_8h","O3_8h_24h")
Data<-list()
China<-list()
for (j in 1:7){
setwd(paste0("C:/Users/Hector/Desktop/PhD/Pengfei/China_AQ_Data/",as.character(2013+j)))
Data[[j]]<-list.files(pattern ="*.csv")
Chi<-list()
for (i in 1:length(Data[[j]])){
Chi[[i]]<-read.csv(Data[[j]][[i]],h=T,check.names = FALSE)
Chi[[i]]<-setDT(Chi[[i]])
}
China[[j]]<-do.call("rbind",c(Chi,fill=TRUE))
ChiO3<-list()
ChiO3_<-list()
for (i in 1:length(O3vars)){
ChiO3[[i]]<-subset(China[[j]], type==O3vars[i])
columns<-ChiO3[[i]][,c(1,2,3)]
temp1<-data.frame(columns, i=rep(1:(ncol(ChiO3[[i]])-3),ea=nrow(columns)))
stations<-rep(colnames(ChiO3[[i]])[c(-1,-2,-3)],each=nrow(columns))
Sta<-cbind(stations,temp1)
Sta<-join(Sta,stcoord)
ChiO3[[i]]<-cbind(Sta,unlist(ChiO3[[i]][,4:ncol(ChiO3[[i]])]));colnames(ChiO3[[i]])[8]<-paste(O3vars[i])
ChiO3_[[i]]<-ChiO3[[i]][-c(4,5)]
}
China[[j]]<-Reduce(function(x,y) join(x,y),ChiO3_)
}
ChinaO3<-do.call("rbind",China)
write.csv(ChinaO3,"C:/Users/Hector/Desktop/Pengfei/China_O3.csv",row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interest-rate-classes.R
\name{is_valid_compounding}
\alias{is_valid_compounding}
\alias{compounding}
\title{Compounding frequencies}
\usage{
is_valid_compounding(compounding)
}
\arguments{
\item{compounding}{a numeric vector representing the compounding frequency}
}
\value{
a flag (\code{TRUE} or \code{FALSE}) if all the supplied compounding
frequencies are supported.
}
\description{
A non-exported function that checks whether compounding values frequencies
are supported.
}
\details{
Valid compounding values are:
\tabular{ll}{
\bold{Value} \tab \bold{Frequency} \cr
-1 \tab Simply, T-bill discounting \cr
0 \tab Simply \cr
1 \tab Annually \cr
2 \tab Semi-annually \cr
3 \tab Tri-annually \cr
4 \tab Quarterly \cr
6 \tab Bi-monthly \cr
12 \tab Monthly \cr
24 \tab Fortnightly \cr
52 \tab Weekly \cr
365 \tab Daily \cr
Inf \tab Continuously \cr
}
}
| /man/is_valid_compounding.Rd | no_license | farzadwp/fmbasics | R | false | true | 1,292 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interest-rate-classes.R
\name{is_valid_compounding}
\alias{is_valid_compounding}
\alias{compounding}
\title{Compounding frequencies}
\usage{
is_valid_compounding(compounding)
}
\arguments{
\item{compounding}{a numeric vector representing the compounding frequency}
}
\value{
a flag (\code{TRUE} or \code{FALSE}) if all the supplied compounding
frequencies are supported.
}
\description{
A non-exported function that checks whether compounding values frequencies
are supported.
}
\details{
Valid compounding values are:
\tabular{ll}{
\bold{Value} \tab \bold{Frequency} \cr
-1 \tab Simply, T-bill discounting \cr
0 \tab Simply \cr
1 \tab Annually \cr
2 \tab Semi-annually \cr
3 \tab Tri-annually \cr
4 \tab Quarterly \cr
6 \tab Bi-monthly \cr
12 \tab Monthly \cr
24 \tab Fortnightly \cr
52 \tab Weekly \cr
365 \tab Daily \cr
Inf \tab Continuously \cr
}
}
|
# # ReadGenTest6AxData20151222_.R
# # Description: To be rewrite for Upclass model VVI or VVV ...
# # Note: run the first half of 'mainUpclass6AxCat6.R', add testing data to sslUnlabeledData for upclass
.totalNumberOfRows <- 12000
.columnNamesFor6Ax <- c("aX","aY","aZ", "gX","gY","gZ")
# # read raw data from sqlite files, 6Ax raw data read
# 1)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_RUNNING_football_160109T082912.db") # 12472 samples
# 2) this case requires some extra care
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_handinpocket_160111T084156.db") # 12854 samples
# 3) first half with hand in pocket (still recognized as 'running'), second half release hand and dangle in the air (can be recognized)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_1stHALFhandinpocketAND2ndHALFnormal_160111T120515.db") # 12757 samples
# 4)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_BIKING_diffbike_160111T132648.db") # 12726 samples
# 5) walk with high rised arms
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_armrise_160112T084959.db") # 12799 samples
# 6) biking with left hand dangle in the air
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_BIKING_handdangle_160112T120402.db") # 12620 samples
# 7)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_DRIVING_handonlap_160112T215529.db") # 12823 samples
# 8)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_DRIVING_handonlap_160112T222540.db") # 12937 samples
# 9) standing before window in NLC
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_STILL_standing_160113T155010.db") # 12937 samples
# 10) walking - hand up in the air and eating
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_eatwhandinair_160115T090340.db") # 12929 samples
.raw <- dbGetQuery(conn=.connectToDb, statement="SELECT * FROM measurements")
.rawData <- as.matrix(.raw)[81:(.totalNumberOfRows+80), c(3,4,5,7,8,9)] # discard _id and timestamps
colnames(.rawData) <- .columnNamesFor6Ax
# # preprocessing, related function has been loaded in 'mainUpclass6AxCat6.R'
# # activityCategories is already defined in 'mainE1071SvmCrossValid6Ax.R'
# # segmentation and feature extraction for 6Ax data
source("Segmentation.R")
source("FeatureExtraction.R")
# # mean, sd, median, dominant freq, spectral centroid, spectral energy, XYZ correlation
# # note the features are subject to change in the future
.rawFeatures <- c("mean", "sd", "median", "sigfreq", "speccen", "specengy", "cor")
.accAxes <- c("aX", "aY", "aZ")
.nameOfFeatures <- paste(rep(.rawFeatures, each = (length(.accAxes))), .accAxes, sep = "_")
.numberOfFeatures <- length(.nameOfFeatures)
.nrowSegment <- 64L # overlap 50% by default
# # derive feature vector with 3Ax measurements
.func_derive_feature_vector <- function(segmentMatrix) {
return(func_convert_features_mat2vec(
cbind(
func_feature_derive_mean(segmentMatrix[, 1:3]), # for acc features only
func_feature_derive_sd(segmentMatrix[, 1:3]),
func_feature_derive_median(segmentMatrix[, 1:3]),
func_feature_derive_sigfreq(segmentMatrix[, 1:3]),
func_feature_derive_speccen(segmentMatrix[, 1:3]),
func_feature_derive_specengy(segmentMatrix[, 1:3]),
func_feature_derive_cor(segmentMatrix[, 1:3])
)))
}
# # segmentation function(generator)
.rawData.segment <- segment.gen(.rawData, lengthOfSegment = .nrowSegment)
# # estimate the row number of .bikingFeatureMatrix
.nrowFeatureMatrix <- nrow(.rawData) %/% (.nrowSegment/2) - 1 # the last segment needs second half to meet 64 in total, so -1
.featureMatrix <- matrix(nrow = .nrowFeatureMatrix, ncol = .numberOfFeatures)
for (i in 1:.nrowFeatureMatrix) {
.segment <- .rawData.segment()
if (is.null(.segment)) {
.featureMatrix <- .featureMatrix[1:(i-1),]
message(paste("calculate .nrowFeatureMatrix wrong, = ", .nrowFeatureMatrix, sep=""))
break;
} else {
.featureMatrix[i, ] <- .func_derive_feature_vector(.segment)
}
}
# # put features as column names
colnames(.featureMatrix) <- .nameOfFeatures
# # convert .featureMatrix to data.frame without labelling
.featureDataframeNoLabel <- as.data.frame(.featureMatrix)
# # .. .featureDataframe is for further testing
testDataSet <- .featureDataframeNoLabel
# # transform combinedFeatureDfForTesting into PCA domain with proper scaling
testDataSetAfterScaling <- as.data.frame(
(
(
as.matrix(testDataSet) - matrix(pcaForComboFeatureDf$center, nrow = nrow(testDataSet), ncol = length(pcaForComboFeatureDf$center), byrow = TRUE)
)
/ matrix(pcaForComboFeatureDf$scale, nrow = nrow(testDataSet), ncol = length(pcaForComboFeatureDf$scale), byrow = TRUE)
) %*% (pcaForComboFeatureDf$rotation)
)
# # prediction
res.libsvm <- predict(bestmod, testDataSetAfterScaling)
table(Predict = res.libsvm) | /DataSets/Testing/ReadGenTest6AxData20151222_.R | no_license | jamescfli/R_HAR_project_bySSL | R | false | false | 5,265 | r | # # ReadGenTest6AxData20151222_.R
# # Description: To be rewrite for Upclass model VVI or VVV ...
# # Note: run the first half of 'mainUpclass6AxCat6.R', add testing data to sslUnlabeledData for upclass
.totalNumberOfRows <- 12000
.columnNamesFor6Ax <- c("aX","aY","aZ", "gX","gY","gZ")
# # read raw data from sqlite files, 6Ax raw data read
# 1)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_RUNNING_football_160109T082912.db") # 12472 samples
# 2) this case requires some extra care
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_handinpocket_160111T084156.db") # 12854 samples
# 3) first half with hand in pocket (still recognized as 'running'), second half release hand and dangle in the air (can be recognized)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_1stHALFhandinpocketAND2ndHALFnormal_160111T120515.db") # 12757 samples
# 4)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_BIKING_diffbike_160111T132648.db") # 12726 samples
# 5) walk with high rised arms
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_armrise_160112T084959.db") # 12799 samples
# 6) biking with left hand dangle in the air
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_BIKING_handdangle_160112T120402.db") # 12620 samples
# 7)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_DRIVING_handonlap_160112T215529.db") # 12823 samples
# 8)
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_DRIVING_handonlap_160112T222540.db") # 12937 samples
# 9) standing before window in NLC
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_STILL_standing_160113T155010.db") # 12937 samples
# 10) walking - hand up in the air and eating
.connectToDb <- dbConnect(drv=RSQLite::SQLite(), dbname="DataSets/Testing/JAMES_3_measure_WALKING_eatwhandinair_160115T090340.db") # 12929 samples
.raw <- dbGetQuery(conn=.connectToDb, statement="SELECT * FROM measurements")
.rawData <- as.matrix(.raw)[81:(.totalNumberOfRows+80), c(3,4,5,7,8,9)] # discard _id and timestamps
colnames(.rawData) <- .columnNamesFor6Ax
# # preprocessing, related function has been loaded in 'mainUpclass6AxCat6.R'
# # activityCategories is already defined in 'mainE1071SvmCrossValid6Ax.R'
# # segmentation and feature extraction for 6Ax data
source("Segmentation.R")
source("FeatureExtraction.R")
# # mean, sd, median, dominant freq, spectral centroid, spectral energy, XYZ correlation
# # note the features are subject to change in the future
.rawFeatures <- c("mean", "sd", "median", "sigfreq", "speccen", "specengy", "cor")
.accAxes <- c("aX", "aY", "aZ")
.nameOfFeatures <- paste(rep(.rawFeatures, each = (length(.accAxes))), .accAxes, sep = "_")
.numberOfFeatures <- length(.nameOfFeatures)
.nrowSegment <- 64L # overlap 50% by default
# # derive feature vector with 3Ax measurements
.func_derive_feature_vector <- function(segmentMatrix) {
return(func_convert_features_mat2vec(
cbind(
func_feature_derive_mean(segmentMatrix[, 1:3]), # for acc features only
func_feature_derive_sd(segmentMatrix[, 1:3]),
func_feature_derive_median(segmentMatrix[, 1:3]),
func_feature_derive_sigfreq(segmentMatrix[, 1:3]),
func_feature_derive_speccen(segmentMatrix[, 1:3]),
func_feature_derive_specengy(segmentMatrix[, 1:3]),
func_feature_derive_cor(segmentMatrix[, 1:3])
)))
}
# # segmentation function(generator)
.rawData.segment <- segment.gen(.rawData, lengthOfSegment = .nrowSegment)
# # estimate the row number of .bikingFeatureMatrix
.nrowFeatureMatrix <- nrow(.rawData) %/% (.nrowSegment/2) - 1 # the last segment needs second half to meet 64 in total, so -1
.featureMatrix <- matrix(nrow = .nrowFeatureMatrix, ncol = .numberOfFeatures)
for (i in 1:.nrowFeatureMatrix) {
.segment <- .rawData.segment()
if (is.null(.segment)) {
.featureMatrix <- .featureMatrix[1:(i-1),]
message(paste("calculate .nrowFeatureMatrix wrong, = ", .nrowFeatureMatrix, sep=""))
break;
} else {
.featureMatrix[i, ] <- .func_derive_feature_vector(.segment)
}
}
# # put features as column names
colnames(.featureMatrix) <- .nameOfFeatures
# # convert .featureMatrix to data.frame without labelling
.featureDataframeNoLabel <- as.data.frame(.featureMatrix)
# # .. .featureDataframe is for further testing
testDataSet <- .featureDataframeNoLabel
# # transform combinedFeatureDfForTesting into PCA domain with proper scaling
testDataSetAfterScaling <- as.data.frame(
(
(
as.matrix(testDataSet) - matrix(pcaForComboFeatureDf$center, nrow = nrow(testDataSet), ncol = length(pcaForComboFeatureDf$center), byrow = TRUE)
)
/ matrix(pcaForComboFeatureDf$scale, nrow = nrow(testDataSet), ncol = length(pcaForComboFeatureDf$scale), byrow = TRUE)
) %*% (pcaForComboFeatureDf$rotation)
)
# # prediction
res.libsvm <- predict(bestmod, testDataSetAfterScaling)
table(Predict = res.libsvm) |
#calculating power example
mu0 = 30
mua = 32
sigma = 4
n = 16
alpha = 0.05
z <- qnorm(1-alpha)
mu0test <- pnorm(mu0 + z * sigma/sqrt(n), mean = mu0, sd = sigma/sqrt(n),lower.tail = FALSE)
#result: 0.05
muatest <- pnorm(mu0 + z * sigma/sqrt(n), mean = mua, sd = sigma/sqrt(n),lower.tail = FALSE)
#result: 0.63876
#huge difference!
#plotting the power curve
library(ggplot2)
library(reshape2)
nseq = c(8, 16, 32, 64, 128) #various n values
mua = seq(30, 35, by = 0.1)
z = qnorm(.95)
power = sapply(nseq, function(n)
pnorm(mu0 + z * sigma / sqrt(n), mean = mua, sd = sigma / sqrt(n),
lower.tail = FALSE)
)
colnames(power) <- paste("n", nseq, sep = "")
d <- data.frame(mua, power)
d2 <- melt(d, id.vars = "mua")
names(d2) <- c("mua", "n", "power")
g <- ggplot(d2,
aes(x = mua, y = power, col = n)) + geom_line(size = 2)
g
#example using manipulate
library(manipulate)
mu0 = 30
#function elements:
#sigma: standard deviation
#mua: mean under alternative
#n: sample size
#alpha: type I error rate
#uses values from previous examples
#Plot: compares the variance of null and alternate
#hypotheses
#line is where to reject the null hypothesis
#power is probability of getting larger than the black line
myplot <- function(sigma, mua, n, alpha){
g = ggplot(data.frame(mu = c(27, 36)), aes(x = mu))
g = g + stat_function(fun=dnorm, geom = "line",
args = list(mean = mu0, sd = sigma / sqrt(n)),
size = 2, col = "red")
g = g + stat_function(fun=dnorm, geom = "line",
args = list(mean = mua, sd = sigma / sqrt(n)),
size = 2, col = "blue")
xitc = mu0 + qnorm(1 - alpha) * sigma / sqrt(n)
g = g + geom_vline(xintercept=xitc, size = 3)
g
}
#slider allows you to vary the different parameters
manipulate(
myplot(sigma, mua, n, alpha),
sigma = slider(1, 10, step = 1, initial = 4),
mua = slider(30, 35, step = 1, initial = 32),
n = slider(1, 50, step = 1, initial = 16),
alpha = slider(0.01, 0.1, step = 0.01, initial = 0.05)
)
#make an alternative version
manipulate(
myplot(sigma, mua, n, alpha),
sigma = slider(2, 12, step = 1, initial = 3),
mua = slider(30, 35, step = 1, initial = 33),
n = slider(1, 50, step = 1.5, initial = 20),
alpha = slider(0.02, 0.1, step = 0.01, initial = 0.08)
)
#Consider calculating power for a Gossett's T test for our example
#The power is P(((Xbar - mu_0)/(s / sqrt(n)) > t{1-alpha, n-1} ; mu = mu_a)
#Calculating this requires the non-central t
#distribution.
#power.t.test does this very well
#Omit one of the arguments and it solves for it
test_statistic = ((Xbar - mu_0)/(s / sqrt(n)))
power.t.test(n = 16, delta = 2 / 4, sd=1, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 16, delta = 2, sd=4, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 16, delta = 100, sd=200, type = "one.sample", alt = "one.sided")$power
#power = 0.6040329 (for all three!)
#what this shows is that power is affected by the effect size
#alternate version
power.t.test(n = 16, delta = 50, sd=250, type = "one.sample", alt = "one.sided")$power
#power = 0.1892608
#example: calculate sample size
power.t.test(power = .8, delta = 2 / 4, sd=1, type = "one.sample", alt = "one.sided")$n
power.t.test(power = .8, delta = 2, sd=4, type = "one.sample", alt = "one.sided")$n
power.t.test(power = .8, delta = 100, sd=200, type = "one.sample", alt = "one.sided")$n
#sample size (n) = 26.13751 (for all three!)
power.t.test(power = .75, delta = 2, sd=4, type = "one.sample", alt = "one.sided")$n
#sample size(n) = 22.92961
#power.t.test is a great way to find out the different elements of a power calculation
#try to make power calculations as simple as possible | /stats_inference/wk4/power.R | no_license | randallhelms/datasciencecoursera | R | false | false | 3,857 | r | #calculating power example
mu0 = 30
mua = 32
sigma = 4
n = 16
alpha = 0.05
z <- qnorm(1-alpha)
mu0test <- pnorm(mu0 + z * sigma/sqrt(n), mean = mu0, sd = sigma/sqrt(n),lower.tail = FALSE)
#result: 0.05
muatest <- pnorm(mu0 + z * sigma/sqrt(n), mean = mua, sd = sigma/sqrt(n),lower.tail = FALSE)
#result: 0.63876
#huge difference!
#plotting the power curve
library(ggplot2)
library(reshape2)
nseq = c(8, 16, 32, 64, 128) #various n values
mua = seq(30, 35, by = 0.1)
z = qnorm(.95)
power = sapply(nseq, function(n)
pnorm(mu0 + z * sigma / sqrt(n), mean = mua, sd = sigma / sqrt(n),
lower.tail = FALSE)
)
colnames(power) <- paste("n", nseq, sep = "")
d <- data.frame(mua, power)
d2 <- melt(d, id.vars = "mua")
names(d2) <- c("mua", "n", "power")
g <- ggplot(d2,
aes(x = mua, y = power, col = n)) + geom_line(size = 2)
g
#example using manipulate
library(manipulate)
mu0 = 30
#function elements:
#sigma: standard deviation
#mua: mean under alternative
#n: sample size
#alpha: type I error rate
#uses values from previous examples
#Plot: compares the variance of null and alternate
#hypotheses
#line is where to reject the null hypothesis
#power is probability of getting larger than the black line
myplot <- function(sigma, mua, n, alpha){
g = ggplot(data.frame(mu = c(27, 36)), aes(x = mu))
g = g + stat_function(fun=dnorm, geom = "line",
args = list(mean = mu0, sd = sigma / sqrt(n)),
size = 2, col = "red")
g = g + stat_function(fun=dnorm, geom = "line",
args = list(mean = mua, sd = sigma / sqrt(n)),
size = 2, col = "blue")
xitc = mu0 + qnorm(1 - alpha) * sigma / sqrt(n)
g = g + geom_vline(xintercept=xitc, size = 3)
g
}
#slider allows you to vary the different parameters
manipulate(
myplot(sigma, mua, n, alpha),
sigma = slider(1, 10, step = 1, initial = 4),
mua = slider(30, 35, step = 1, initial = 32),
n = slider(1, 50, step = 1, initial = 16),
alpha = slider(0.01, 0.1, step = 0.01, initial = 0.05)
)
#make an alternative version
manipulate(
myplot(sigma, mua, n, alpha),
sigma = slider(2, 12, step = 1, initial = 3),
mua = slider(30, 35, step = 1, initial = 33),
n = slider(1, 50, step = 1.5, initial = 20),
alpha = slider(0.02, 0.1, step = 0.01, initial = 0.08)
)
#Consider calculating power for a Gossett's T test for our example
#The power is P(((Xbar - mu_0)/(s / sqrt(n)) > t{1-alpha, n-1} ; mu = mu_a)
#Calculating this requires the non-central t
#distribution.
#power.t.test does this very well
#Omit one of the arguments and it solves for it
test_statistic = ((Xbar - mu_0)/(s / sqrt(n)))
power.t.test(n = 16, delta = 2 / 4, sd=1, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 16, delta = 2, sd=4, type = "one.sample", alt = "one.sided")$power
power.t.test(n = 16, delta = 100, sd=200, type = "one.sample", alt = "one.sided")$power
#power = 0.6040329 (for all three!)
#what this shows is that power is affected by the effect size
#alternate version
power.t.test(n = 16, delta = 50, sd=250, type = "one.sample", alt = "one.sided")$power
#power = 0.1892608
#example: calculate sample size
power.t.test(power = .8, delta = 2 / 4, sd=1, type = "one.sample", alt = "one.sided")$n
power.t.test(power = .8, delta = 2, sd=4, type = "one.sample", alt = "one.sided")$n
power.t.test(power = .8, delta = 100, sd=200, type = "one.sample", alt = "one.sided")$n
#sample size (n) = 26.13751 (for all three!)
power.t.test(power = .75, delta = 2, sd=4, type = "one.sample", alt = "one.sided")$n
#sample size(n) = 22.92961
#power.t.test is a great way to find out the different elements of a power calculation
#try to make power calculations as simple as possible |
## File Name: rasch_mml2_prior_information.R
## File Version: 0.04
rasch_mml2_prior_information <- function(prior.a, prior.b, prior.c,
prior.d)
{
a <- rasch_mml2_prior_information_generate_string(prior=prior.a,
distribution="N")
b <- rasch_mml2_prior_information_generate_string(prior=prior.b,
distribution="N")
c <- rasch_mml2_prior_information_generate_string(prior=prior.c,
distribution="Beta")
d <- rasch_mml2_prior_information_generate_string(prior=prior.d,
distribution="Beta")
priors <- list( a=a, b=b, c=c, d=d)
return(priors)
}
| /R/rasch_mml2_prior_information.R | no_license | cran/sirt | R | false | false | 665 | r | ## File Name: rasch_mml2_prior_information.R
## File Version: 0.04
rasch_mml2_prior_information <- function(prior.a, prior.b, prior.c,
prior.d)
{
a <- rasch_mml2_prior_information_generate_string(prior=prior.a,
distribution="N")
b <- rasch_mml2_prior_information_generate_string(prior=prior.b,
distribution="N")
c <- rasch_mml2_prior_information_generate_string(prior=prior.c,
distribution="Beta")
d <- rasch_mml2_prior_information_generate_string(prior=prior.d,
distribution="Beta")
priors <- list( a=a, b=b, c=c, d=d)
return(priors)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ega_to_tidygraph.R
\name{ega_to_tidygraph}
\alias{ega_to_tidygraph}
\title{Convert EGAnet objects to tidygraph}
\usage{
ega_to_tidygraph(ega)
}
\arguments{
\item{ega}{An EGA object.}
}
\description{
Converts an EGA object to the input required by \code{tidygraph::tbl_graph()}
so that it can be plotted using \code{ggraph}.
}
\examples{
\dontrun{
library(tidygraph)
library(ggraph)
ega.wmt <- EGA(wmt2[,7:24], plot.EGA = FALSE)
x <- ega_to_tidygraph(ega.wmt)
graph <- tidygraph::tbl_graph(nodes = x$nodes, edges = x$edges)
ggraph::ggraph(graph) +
geom_edge_link(aes(colour = link)) +
geom_node_point(aes(colour = dimension))
}
}
| /man/ega_to_tidygraph.Rd | no_license | DominiqueMakowski/EGAnet | R | false | true | 730 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ega_to_tidygraph.R
\name{ega_to_tidygraph}
\alias{ega_to_tidygraph}
\title{Convert EGAnet objects to tidygraph}
\usage{
ega_to_tidygraph(ega)
}
\arguments{
\item{ega}{An EGA object.}
}
\description{
Converts an EGA object to the input required by \code{tidygraph::tbl_graph()}
so that it can be plotted using \code{ggraph}.
}
\examples{
\dontrun{
library(tidygraph)
library(ggraph)
ega.wmt <- EGA(wmt2[,7:24], plot.EGA = FALSE)
x <- ega_to_tidygraph(ega.wmt)
graph <- tidygraph::tbl_graph(nodes = x$nodes, edges = x$edges)
ggraph::ggraph(graph) +
geom_edge_link(aes(colour = link)) +
geom_node_point(aes(colour = dimension))
}
}
|
#Author: mw201608
calc_msbb_array_deg=function(eset,trait=c('CDR','Braak','CERAD','PLQ_Mn','NPrSum','NTrSum'),n.permute){
#eset, an expressionSet object
#n.permute, number of permutations
library(limma)
library(Biobase)
trait=match.arg(trait)
#
stages=c('Normal','Low','High')
AD=stagingAD(eset,trait)
eset=eset[,!is.na(AD)]
AD=AD[!is.na(AD)]
AD=stages[AD+1]
stages=stages[stages %in% names(table(AD)[table(AD) >= 3])]
eset=eset[,AD %in% stages]
AD=AD[AD %in% stages]
f <- ordered(AD, levels=stages)
design <- model.matrix(~0+f)
colnames(design) <- stages
fit <- lmFit(eset, design)
if(all(c('Normal','Low','High') %in% stages)){
iStages=1
cont <- makeContrasts(LowvsNormal="Low-Normal",HighvsNormal="High-Normal",HighvsLow="High-Low",levels=design)
}else if(all(c('Normal','Low') %in% stages)){
iStages=2
cont <- makeContrasts(LowvsNormal="Low-Normal",levels=design)
}else if(all(c('Normal','High') %in% stages)){
iStages=3
cont <- makeContrasts(HighvsNormal="High-Normal",levels=design)
}else if(all(c('Low','High') %in% stages)){
iStages=4
cont <- makeContrasts(HighvsLow="High-Low",levels=design)
}
fit2 <- contrasts.fit(fit, cont)
fit2 <- eBayes(fit2)
sig1=sig2=sig3=NULL
if(iStages==1 || iStages==2) sig1=cbind(topTable(fit2, coef='LowvsNormal', adjust="BH",n= Inf),contrast="Low-Normal")
if(iStages==1 || iStages==3) sig2=cbind(topTable(fit2, coef='HighvsNormal', adjust="BH",n= Inf),contrast="High-Normal")
if(iStages==1 || iStages==4) sig3=cbind(topTable(fit2, coef='HighvsLow', adjust="BH",n= Inf),contrast="High-Low")
if(n.permute>0){
set.seed(12345)
tperm=lapply(1:n.permute,function(i){
f0 <- sample(f,length(f),FALSE)
design <- model.matrix(~0+f0)
colnames(design) <- stages
fit0 <- lmFit(eset, design)
if(iStages==1){
cont <- makeContrasts(LowvsNormal="Low-Normal",HighvsNormal="High-Normal",HighvsLow="High-Low",levels=design)
}else if(iStages==2){
cont <- makeContrasts(LowvsNormal="Low-Normal",levels=design)
}else if(iStages==3){
cont <- makeContrasts(HighvsNormal="High-Normal",levels=design)
}else if(iStages==4){
cont <- makeContrasts(HighvsLow="High-Low",levels=design)
}
fit0 <- contrasts.fit(fit0, cont)
fit0 <- eBayes(fit0)
fit0$t
})
tperm=do.call(rbind,tperm)
if(iStages==1 || iStages==2) sig1$FDR=getFDR(abs(sig1$t),abs(tperm[,'LowvsNormal']))
if(iStages==1 || iStages==3) sig2$FDR=getFDR(abs(sig2$t),abs(tperm[,'HighvsNormal']))
if(iStages==1 || iStages==4) sig3$FDR=getFDR(abs(sig3$t),abs(tperm[,'HighvsLow']))
}
sig=rbind(sig1,sig2,sig3)
return(sig)
}
#group samples into low, medium and severe categories
stagingAD=function(eset,trait){
if(trait=='CDR'){
#0: 0; 1: 0.5,1,2; 2: 3~5
AD=ifelse(eset$CDR>0,1,0)
AD=AD+ifelse(eset$CDR>2,1,0)
}else if (trait=='Braak'){
#0: 0~2; 1: 3~4; 2: 5-6 #0 is merged with 1-2 because there is none or no more than two 0 in each region
AD=ifelse(eset$Braak>2,1,0)
AD=AD+ifelse(eset$Braak>5,1,0)
}else if(trait=='CERAD'){
#0: 1; 1: 3-4; 2: 2 #1=normal, 2=definite AD, 3=Probable AD, 4=possible AD
AD=ifelse(eset$CERAD>1,1,0)
AD=AD+ifelse(eset$CERAD==2,1,0)
}else if(trait=='PLQ_Mn'){
#0: 0; 1: 1~9; 2: > 9
AD=ifelse(eset$PLQ_Mn>0,1,0)
AD=AD+ifelse(eset$PLQ_Mn>9,1,0)
}else if(trait=='NPrSum'){
#0: 0; 1: 1~17; 2: > 17
AD=ifelse(eset$NPrSum>0,1,0)
AD=AD+ifelse(eset$NPrSum>17,1,0)
}else if(trait=='NTrSum'){
#0: 0~2; 1: 2~10; 2: > 10
AD=ifelse(eset$NTrSum>0,1,0)
AD=AD+ifelse(eset$NTrSum>10,1,0)
}
AD
}
| /compute_DEGs.R | no_license | mw201608/AD_pan_cortical_transcriptomics | R | false | false | 3,522 | r | #Author: mw201608
calc_msbb_array_deg=function(eset,trait=c('CDR','Braak','CERAD','PLQ_Mn','NPrSum','NTrSum'),n.permute){
#eset, an expressionSet object
#n.permute, number of permutations
library(limma)
library(Biobase)
trait=match.arg(trait)
#
stages=c('Normal','Low','High')
AD=stagingAD(eset,trait)
eset=eset[,!is.na(AD)]
AD=AD[!is.na(AD)]
AD=stages[AD+1]
stages=stages[stages %in% names(table(AD)[table(AD) >= 3])]
eset=eset[,AD %in% stages]
AD=AD[AD %in% stages]
f <- ordered(AD, levels=stages)
design <- model.matrix(~0+f)
colnames(design) <- stages
fit <- lmFit(eset, design)
if(all(c('Normal','Low','High') %in% stages)){
iStages=1
cont <- makeContrasts(LowvsNormal="Low-Normal",HighvsNormal="High-Normal",HighvsLow="High-Low",levels=design)
}else if(all(c('Normal','Low') %in% stages)){
iStages=2
cont <- makeContrasts(LowvsNormal="Low-Normal",levels=design)
}else if(all(c('Normal','High') %in% stages)){
iStages=3
cont <- makeContrasts(HighvsNormal="High-Normal",levels=design)
}else if(all(c('Low','High') %in% stages)){
iStages=4
cont <- makeContrasts(HighvsLow="High-Low",levels=design)
}
fit2 <- contrasts.fit(fit, cont)
fit2 <- eBayes(fit2)
sig1=sig2=sig3=NULL
if(iStages==1 || iStages==2) sig1=cbind(topTable(fit2, coef='LowvsNormal', adjust="BH",n= Inf),contrast="Low-Normal")
if(iStages==1 || iStages==3) sig2=cbind(topTable(fit2, coef='HighvsNormal', adjust="BH",n= Inf),contrast="High-Normal")
if(iStages==1 || iStages==4) sig3=cbind(topTable(fit2, coef='HighvsLow', adjust="BH",n= Inf),contrast="High-Low")
if(n.permute>0){
set.seed(12345)
tperm=lapply(1:n.permute,function(i){
f0 <- sample(f,length(f),FALSE)
design <- model.matrix(~0+f0)
colnames(design) <- stages
fit0 <- lmFit(eset, design)
if(iStages==1){
cont <- makeContrasts(LowvsNormal="Low-Normal",HighvsNormal="High-Normal",HighvsLow="High-Low",levels=design)
}else if(iStages==2){
cont <- makeContrasts(LowvsNormal="Low-Normal",levels=design)
}else if(iStages==3){
cont <- makeContrasts(HighvsNormal="High-Normal",levels=design)
}else if(iStages==4){
cont <- makeContrasts(HighvsLow="High-Low",levels=design)
}
fit0 <- contrasts.fit(fit0, cont)
fit0 <- eBayes(fit0)
fit0$t
})
tperm=do.call(rbind,tperm)
if(iStages==1 || iStages==2) sig1$FDR=getFDR(abs(sig1$t),abs(tperm[,'LowvsNormal']))
if(iStages==1 || iStages==3) sig2$FDR=getFDR(abs(sig2$t),abs(tperm[,'HighvsNormal']))
if(iStages==1 || iStages==4) sig3$FDR=getFDR(abs(sig3$t),abs(tperm[,'HighvsLow']))
}
sig=rbind(sig1,sig2,sig3)
return(sig)
}
#group samples into low, medium and severe categories
stagingAD=function(eset,trait){
if(trait=='CDR'){
#0: 0; 1: 0.5,1,2; 2: 3~5
AD=ifelse(eset$CDR>0,1,0)
AD=AD+ifelse(eset$CDR>2,1,0)
}else if (trait=='Braak'){
#0: 0~2; 1: 3~4; 2: 5-6 #0 is merged with 1-2 because there is none or no more than two 0 in each region
AD=ifelse(eset$Braak>2,1,0)
AD=AD+ifelse(eset$Braak>5,1,0)
}else if(trait=='CERAD'){
#0: 1; 1: 3-4; 2: 2 #1=normal, 2=definite AD, 3=Probable AD, 4=possible AD
AD=ifelse(eset$CERAD>1,1,0)
AD=AD+ifelse(eset$CERAD==2,1,0)
}else if(trait=='PLQ_Mn'){
#0: 0; 1: 1~9; 2: > 9
AD=ifelse(eset$PLQ_Mn>0,1,0)
AD=AD+ifelse(eset$PLQ_Mn>9,1,0)
}else if(trait=='NPrSum'){
#0: 0; 1: 1~17; 2: > 17
AD=ifelse(eset$NPrSum>0,1,0)
AD=AD+ifelse(eset$NPrSum>17,1,0)
}else if(trait=='NTrSum'){
#0: 0~2; 1: 2~10; 2: > 10
AD=ifelse(eset$NTrSum>0,1,0)
AD=AD+ifelse(eset$NTrSum>10,1,0)
}
AD
}
|
###run clean merge all up to line 6
rm(list=ls())
options(stringsAsFactors = FALSE)
library(ggplot2)
library(lme4)
library(dplyr)
# Set working directory:
if(length(grep("Lizzie", getwd())>0)) {setwd("~/Documents/git/projects/treegarden/budreview/ospree/analyses")
} else if
(length(grep("ailene", getwd()))>0) {setwd("/Users/aileneettinger/git/ospree/analyses")
}else
setwd("~/Documents/git/ospree/analyses")
####Question does selectting on flowering time (ever bearing, June or Day Neutral) influence the control of the leaves? http://strawberryplants.org/2010/05/strawberry-varieties/
#1 classify varieties as midseaon, everbearing, or daylength neutral
#2 mixed model for flowering
#3mixed model for leafing
berries<-read.csv("output/strawberries_bb.csv")
###what are the varieties
table(berries$varetc)
###what are the respvars
table(berries$respvar.simple)
berries$forcetemp <- as.numeric(berries$forcetemp)
berries$photoperiod_day <- as.numeric(berries$photoperiod_day)
berries$Total_Utah_Model<-as.numeric(berries$Total_Utah_Model)
berries$Total_Chilling_Hours<-as.numeric(berries$Total_Chilling_Hours)
berries$Total_Chill_portions<-as.numeric(berries$Total_Chill_portions)
berries$responsedays <- as.numeric(berries$response.time)
berries$response <- as.numeric(berries$response)
condition1<-c("percentbudburst","percentflower","daystobudburst","daystoflower")
straw <- filter(berries, respvar.simple %in% condition1)
table(straw$varetc)
###assign them to varclass
#View(filter(straw, varetc==""))
straw$vartype<-NA
straw$vartype[straw$varetc == "Abundance"] <- "June"
straw$vartype[straw$varetc == "Alta"] <- "everbearing"
straw$vartype[straw$varetc == "As"] <- "everbearing"
straw$vartype[straw$varetc == "Elsanta"] <- "June"
straw$vartype[straw$varetc == "Florene"] <- "June"
straw$vartype[straw$varetc == "Frida"] <- "June"
straw$vartype[straw$varetc == "Grytoy"] <- "everbearing"
straw$vartype[straw$varetc == "Hardanger"] <- "everbearing"
straw$vartype[straw$varetc == "Haugastol"] <- "everbearing"
straw$vartype[straw$varetc == "Honeoye"] <- "June"
straw$vartype[straw$varetc == "Jonsok"] <- "June"
straw$vartype[straw$varetc == "Korona"] <- "June"
straw$vartype[straw$varetc == "Namsos"] <- "everbearing"
straw$vartype[straw$varetc == "Ostara"] <- "everbearing"
straw$vartype[straw$varetc == "Rabunda"] <- "everbearing"
straw$vartype[straw$varetc == "Revada"] <- "everbearing"
straw$vartype[straw$varetc == "Tribute"] <- "dayneutral"
straw$vartype[straw$varetc == "RH30"] <- "dayneutral"
straw$vartype[straw$varetc == "Senga Sengana"] <- "June"
straw$vartype[straw$datasetID == "verheul07"] <- "June"
straw$vartype[straw$varetc == "Zefyr"] <- "June"
#8 June
#9 everbearing (5 vesca)
#2 day neutral (1 virginiana)
#hypoth June are short day
# Everbearing longday
#dayneutal
table(straw$vartype)
table(straw$species)
#mixed<-lmer(response~responsedays+responsedays:forcetemp+responsedays:photoperiod_day+(1|vartype), data=straw)
#summary(mixed)
#coef(mixed)
###i Think this is wrong, but why
###filtering and cleaning
conditionleaf<-c("percentbudburst","daystobudburst")
bud<- filter(straw, respvar.simple %in% conditionleaf)
buddy<-filter(bud, response.time!="")
conditionflo<-c("percentflower","daystoflower")
flo<- filter(straw, respvar.simple %in% conditionflo) ##what is this variable? there are 200 percent value and respvar simpl eis just flowers. Its actually "number not percent"
floy<-filter(flo, response.time!="")
##floy metircs
unique(floy$datasetID)
table(floy$varetc)
table(floy$forcetemp)
table(floy$photoperiod_day)
##bud metrics
unique(buddy$datasetID)
table(buddy$varetc)
table(buddy$forcetemp)
table(buddy$photoperiod_day)
### a few fun exploratory models with just forcing and photoperoid
#####%budburst
library("rstanarm")
library(brms)
mod <-brm(response~rforcetemp+photoperiod_day+(responsedays+responsedays:forcetemp+responsedays:photoperiod_day|vartype), data=buddy,iter = 1000,
family = gaussian(),
prior = c(prior(normal(0, 10), "b"),
prior(normal(0, 50), "Intercept"),
prior(student_t(3, 0, 20), "sd"),
prior(student_t(3, 0, 20), "sigma")))
summary(mod)
coef(mod)
###percent flower
mod2 <-lmer(response~responsedays+responsedays:forcetemp+responsedays:photoperiod_day+(1|vartype), data=floy)
table(floy$forcetemp) #Why is floy rank deficient in force temp, they dont work as a mixed model either
#### investigate mod and mod2
###look at jsut phenology ###i think there isnt enough levels for each factor
#flower
flo.phen<-filter(berries, respvar.simple=="daystoflower")
###now leaf
bud.phen<-filter(berries, respvar.simple=="daystobudburst")
bud.phen<- within(bud.phen, response.time[response=="no response" & response.time==""]<-"no response")
fix <- which(bud.phen$figure.table..if.applicable.=="fig 3")
bud.phen$response.time[fix] <- as.numeric(bud.phen$response[fix])
bud.phen<-filter(bud.phen, figure.table..if.applicable.!= "fig 4")
table(flo.phen$datasetID)
table(bud.phen$datasetID)
###to do: and ambients | /analyses/strawberries/strawberry_analysis.R | no_license | lizzieinvancouver/ospree | R | false | false | 5,085 | r | ###run clean merge all up to line 6
rm(list=ls())
options(stringsAsFactors = FALSE)
library(ggplot2)
library(lme4)
library(dplyr)
# Set working directory:
if(length(grep("Lizzie", getwd())>0)) {setwd("~/Documents/git/projects/treegarden/budreview/ospree/analyses")
} else if
(length(grep("ailene", getwd()))>0) {setwd("/Users/aileneettinger/git/ospree/analyses")
}else
setwd("~/Documents/git/ospree/analyses")
####Question does selectting on flowering time (ever bearing, June or Day Neutral) influence the control of the leaves? http://strawberryplants.org/2010/05/strawberry-varieties/
#1 classify varieties as midseaon, everbearing, or daylength neutral
#2 mixed model for flowering
#3mixed model for leafing
berries<-read.csv("output/strawberries_bb.csv")
###what are the varieties
table(berries$varetc)
###what are the respvars
table(berries$respvar.simple)
berries$forcetemp <- as.numeric(berries$forcetemp)
berries$photoperiod_day <- as.numeric(berries$photoperiod_day)
berries$Total_Utah_Model<-as.numeric(berries$Total_Utah_Model)
berries$Total_Chilling_Hours<-as.numeric(berries$Total_Chilling_Hours)
berries$Total_Chill_portions<-as.numeric(berries$Total_Chill_portions)
berries$responsedays <- as.numeric(berries$response.time)
berries$response <- as.numeric(berries$response)
condition1<-c("percentbudburst","percentflower","daystobudburst","daystoflower")
straw <- filter(berries, respvar.simple %in% condition1)
table(straw$varetc)
###assign them to varclass
#View(filter(straw, varetc==""))
straw$vartype<-NA
straw$vartype[straw$varetc == "Abundance"] <- "June"
straw$vartype[straw$varetc == "Alta"] <- "everbearing"
straw$vartype[straw$varetc == "As"] <- "everbearing"
straw$vartype[straw$varetc == "Elsanta"] <- "June"
straw$vartype[straw$varetc == "Florene"] <- "June"
straw$vartype[straw$varetc == "Frida"] <- "June"
straw$vartype[straw$varetc == "Grytoy"] <- "everbearing"
straw$vartype[straw$varetc == "Hardanger"] <- "everbearing"
straw$vartype[straw$varetc == "Haugastol"] <- "everbearing"
straw$vartype[straw$varetc == "Honeoye"] <- "June"
straw$vartype[straw$varetc == "Jonsok"] <- "June"
straw$vartype[straw$varetc == "Korona"] <- "June"
straw$vartype[straw$varetc == "Namsos"] <- "everbearing"
straw$vartype[straw$varetc == "Ostara"] <- "everbearing"
straw$vartype[straw$varetc == "Rabunda"] <- "everbearing"
straw$vartype[straw$varetc == "Revada"] <- "everbearing"
straw$vartype[straw$varetc == "Tribute"] <- "dayneutral"
straw$vartype[straw$varetc == "RH30"] <- "dayneutral"
straw$vartype[straw$varetc == "Senga Sengana"] <- "June"
straw$vartype[straw$datasetID == "verheul07"] <- "June"
straw$vartype[straw$varetc == "Zefyr"] <- "June"
#8 June
#9 everbearing (5 vesca)
#2 day neutral (1 virginiana)
#hypoth June are short day
# Everbearing longday
#dayneutal
table(straw$vartype)
table(straw$species)
#mixed<-lmer(response~responsedays+responsedays:forcetemp+responsedays:photoperiod_day+(1|vartype), data=straw)
#summary(mixed)
#coef(mixed)
###i Think this is wrong, but why
###filtering and cleaning
conditionleaf<-c("percentbudburst","daystobudburst")
bud<- filter(straw, respvar.simple %in% conditionleaf)
buddy<-filter(bud, response.time!="")
conditionflo<-c("percentflower","daystoflower")
flo<- filter(straw, respvar.simple %in% conditionflo) ##what is this variable? there are 200 percent value and respvar simpl eis just flowers. Its actually "number not percent"
floy<-filter(flo, response.time!="")
##floy metircs
unique(floy$datasetID)
table(floy$varetc)
table(floy$forcetemp)
table(floy$photoperiod_day)
##bud metrics
unique(buddy$datasetID)
table(buddy$varetc)
table(buddy$forcetemp)
table(buddy$photoperiod_day)
### a few fun exploratory models with just forcing and photoperoid
#####%budburst
library("rstanarm")
library(brms)
mod <-brm(response~rforcetemp+photoperiod_day+(responsedays+responsedays:forcetemp+responsedays:photoperiod_day|vartype), data=buddy,iter = 1000,
family = gaussian(),
prior = c(prior(normal(0, 10), "b"),
prior(normal(0, 50), "Intercept"),
prior(student_t(3, 0, 20), "sd"),
prior(student_t(3, 0, 20), "sigma")))
summary(mod)
coef(mod)
###percent flower
mod2 <-lmer(response~responsedays+responsedays:forcetemp+responsedays:photoperiod_day+(1|vartype), data=floy)
table(floy$forcetemp) #Why is floy rank deficient in force temp, they dont work as a mixed model either
#### investigate mod and mod2
###look at jsut phenology ###i think there isnt enough levels for each factor
#flower
flo.phen<-filter(berries, respvar.simple=="daystoflower")
###now leaf
bud.phen<-filter(berries, respvar.simple=="daystobudburst")
bud.phen<- within(bud.phen, response.time[response=="no response" & response.time==""]<-"no response")
fix <- which(bud.phen$figure.table..if.applicable.=="fig 3")
bud.phen$response.time[fix] <- as.numeric(bud.phen$response[fix])
bud.phen<-filter(bud.phen, figure.table..if.applicable.!= "fig 4")
table(flo.phen$datasetID)
table(bud.phen$datasetID)
###to do: and ambients |
testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752001e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) | /metacoder/inst/testfiles/intersect_line_rectangle/AFL_intersect_line_rectangle/intersect_line_rectangle_valgrind_files/1615768734-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 727 | r | testlist <- list(b = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), p1 = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752001e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161), p2 = -1.72131968218895e+83)
result <- do.call(metacoder:::intersect_line_rectangle,testlist)
str(result) |
################################################################################
## package 'openCR'
## openCR.fit.R
## 2011-12-30, 2013-01-21
## 2013-01-21 modified to balance with terminal beta, tau
## 2015-01-30 removed make.lookup (see secr)
## 2015-01-30 moved miscellaneous functions to utility.r
## 2015-01-31 deleted utility functions not needed or can be called from secr
## 2015-02-06 reconciled this current version with forked 1.2.0
## 2015-02-06 removed pdot, esa, derived
## 2017-05-15 reconciled versions; revision in progress
## 2017-05-18 2.2.0 ditched old openCR.fit; renamed openCR.MCfit
## 2017-11-20 general revision 2.2.1
## 2017-11-20 refined start options
## 2017-11-20 method default Newton-Raphson
## 2018-01-20 remember compileAttributes('d:/open populations/openCR')
## 2018-01-25 single coerced to multi
## 2018-02-02 detectfn HHR etc.
## 2018-05-01 intermediate variable allbetanames to fix problem with fixedbeta
## 2018-10-29 CJSp1 argument for openCR.design
## 2018-11-20 dropped posterior (see classMembership method)
## 2019-04-07 check whether require autoini
## 2019-04-09 removed data$multi
## 2020-10-19 added agecov
## 2020-11-02 movemodel renamed movementcode
## 2020-12-07 CJSmte experimental - trial abandoned 2020-12-12
################################################################################
openCR.fit <- function (capthist, type = "CJS", model = list(p~1, phi~1, sigma~1),
distribution = c("poisson", "binomial"), mask = NULL,
detectfn = c('HHN','HHR','HEX','HAN','HCG','HVP'), binomN = 0,
movementmodel = c('static','uncorrelated','normal','exponential', 't2D', 'uniform'),
edgemethod = c('truncate', 'wrap', 'none'), start = NULL, link = list(),
fixed = list(), timecov = NULL, sessioncov = NULL, agecov = NULL,
dframe = NULL, dframe0 = NULL, details = list(), method = 'Newton-Raphson',
trace = NULL, ncores = NULL, ...)
{
# Fit open population capture recapture model
#
# Some arguments:
#
# capthist - capture history object (includes traps object as an attribute)
# model - formulae for real parameters in terms of effects and covariates
# start - start values for maximization (numeric vector link scale);
# link - list of parameter-specific link function names 'log', 'logit', 'loglog',
# 'identity', 'sin', 'neglog', 'mlogit'
# fixed - list of fixed values for named parameters
# sessioncov - dataframe of session-level covariates
# mask
# detectfn
# dframe - optional data frame of design data for detection model (tricky & untested)
# details - list with several additional settings, mostly of special interest
# method - optimization method (indirectly chooses
# trace - logical; if TRUE output each likelihood as it is calculated
# ... - other arguments passed to join()
#########################################################################
## Use input 'details' to override various defaults
defaultdetails <- list(
autoini = NULL,
CJSp1 = FALSE,
contrasts = NULL,
control = list(),
debug = 0,
grain = 1,
hessian = 'auto',
ignoreusage = FALSE,
initialage = 0,
LLonly = FALSE,
kernelradius = 10,
minimumage = 0,
maximumage = 1,
multinom = FALSE,
R = FALSE,
squeeze = TRUE,
trace = FALSE
)
if (is.logical(details$hessian))
details$hessian <- ifelse(details$hessian, 'auto', 'none')
details <- replace (defaultdetails, names(details), details)
if (!is.null(trace)) details$trace <- trace
if (details$LLonly) details$trace <- FALSE
if (details$R) ncores <- 1 ## force 2018-11-12
if (!is.null(ncores) && (ncores == 1)) details$grain <- -1
#########################################################################
distribution <- match.arg(distribution)
distrib <- switch (distribution, poisson = 0, binomial = 1)
##############################################
# Multithread option 2018-04-11, 2020-11-02
##############################################
secr::setNumThreads(ncores, stackSize = "auto") # change to match secr
if (is.character(detectfn)) {
detectfn <- match.arg(detectfn)
detectfn <- secr:::detectionfunctionnumber(detectfn)
}
if (is.character(dframe)) {
dframename <- dframe; rm(dframe)
dframe <- get(dframename, pos=-1)
}
if (is.character(dframe0)) {
dframename <- dframe0; rm(dframe0)
dframe0 <- get(dframename, pos=-1)
}
if (is.character(capthist)) {
capthistname <- capthist; rm(capthist)
capthist <- get(capthistname, pos=-1)
}
##############################################
## Standard form for capthist
##############################################
marray <- m.array(capthist)
capthist <- stdcapthist(capthist, type, details$nclone, details$squeeze, ...)
inputcapthist <- capthist ## PROCESSED
##############################################
## check type argument
##############################################
if (type %in% .openCRstuff$suspendedtypes)
stop (type, " not currently available")
secr <- grepl('secr', type)
if (secr) {
if (is.null(mask))
stop("requires valid mask")
if (ms(mask)) {
mask <- mask[[1]]
warning("multi-session mask provided; using first")
}
if (is.character(mask)) {
maskname <- mask; rm(mask)
mask <- get(maskname, pos=-1)
}
if (is.function (movementmodel)) {
moveargs <- formalArgs(movementmodel)
usermodel <- as.character(substitute(movementmodel))
movementmodel <- "user"
}
else {
usermodel <- ""
movementmodel <- match.arg(movementmodel)
}
## integer code for movement model
movementcode <- movecode(movementmodel)
edgemethod <- match.arg(edgemethod)
edgecode <- edgemethodcode(edgemethod) # 0 none, 1 wrap, 2 truncate
if (!is.null(mask) && attr(mask, 'type') != 'traprect' &&
movementcode > 1 && edgemethod == 'wrap') {
stop("edgemethod = 'wrap' requires mask of type 'traprect'")
}
if (movementcode > 1 && edgemethod == 'none') {
warning("specify edgemethod 'wrap' or 'truncate' to avoid ",
"bias in movement models")
}
}
else {
if (!is.null(mask)) warning("mask not used in non-spatial analysis")
movementcode <- -1
edgecode <- -1
usermodel <- ""
}
##############################################
## Remember start time and call
##############################################
ptm <- proc.time()
starttime <- format(Sys.time(), "%H:%M:%S %d %b %Y")
cl <- match.call(expand.dots = TRUE)
if (type %in% c("secrCL","secrD"))
intervals(capthist) <- rep(0, ncol(capthist)-1)
intervals <- intervals(capthist)
intervals <- intervals[intervals>0] ## primary intervals only
sessnames <- sessionlabels(capthist)
cumss <- getcumss(capthist) ## cumulative secondary sessions per primary session
if (is.null(sessnames))
sessnames <- 1:(length(cumss)-1)
nc <- nrow(capthist)
if (nc == 0) warning ("no detection histories")
J <- length(cumss)-1 ## number of primary sessions
primarysession <- primarysessions(intervals(capthist)) # rep(1:J, diff(cumss)) ## map secondary to primary
k <- nrow(traps(capthist)) ## number of detectors (secr only)
m <- if (is.null(mask)) 0 else nrow(mask)
marea <- if (is.null(mask)) NA else maskarea(mask)
##############################################
## Use input formula to override defaults
##############################################
if ('formula' %in% class(model)) model <- list(model)
model <- secr:::stdform (model) ## named, no LHS
defaultmodel <- list(p = ~1, lambda0 = ~1, phi = ~1, b = ~1, f = ~1, lambda = ~1, g = ~1,
gamma = ~1, kappa = ~1, BN = ~1, BD = ~1, N=~1, D = ~1, superN = ~1,
superD = ~1, sigma = ~1, z = ~1, move.a = ~1, move.b = ~1, tau = ~1)
model <- replace (defaultmodel, names(model), model)
pnames <- switch (type,
CJS = c('p', 'phi'), # 1
CJSmte = c('p', 'phi', 'move.a', 'move.b'), # 5
JSSAb = c('p', 'phi','b','superN'), # 2
JSSAl = c('p', 'phi','lambda','superN'), # 3
JSSAf = c('p', 'phi','f','superN'), # 4
JSSAg = c('p', 'phi','gamma','superN'), # 22
JSSAk = c('p', 'phi','kappa','superN'), # 28
JSSAfCL = c('p', 'phi','f'), # 15
JSSAlCL = c('p', 'phi','lambda'), # 16
JSSAbCL = c('p', 'phi','b'), # 17
JSSAgCL = c('p', 'phi','gamma'), # 23
JSSAkCL = c('p', 'phi','kappa'), # 29
PLBf = c('p', 'phi','f'), # 15
PLBl = c('p', 'phi','lambda'), # 16
PLBb = c('p', 'phi','b'), # 17
PLBg = c('p', 'phi','gamma'), # 23
PLBk = c('p', 'phi','kappa'), # 29
JSSAB = c('p', 'phi','BN'), # 18
JSSAN = c('p', 'phi','N'), # 19
Pradel = c('p', 'phi','lambda'), # 20
Pradelg = c('p', 'phi','gamma'), # 26
JSSARET = c('p', 'phi','b','superN','tau'), # 21
JSSAfgCL = c('p', 'phi','f','g'), # 27 # experimental temporary emigration
CJSsecr = c('lambda0', 'phi','sigma'), # 6
JSSAsecrfCL = c('lambda0', 'phi','f','sigma'), # 9
JSSAsecrlCL = c('lambda0', 'phi','lambda','sigma'), # 10
JSSAsecrbCL = c('lambda0', 'phi','b','sigma'), # 11
JSSAsecrgCL = c('lambda0', 'phi','gamma','sigma'), # 25
PLBsecrf = c('lambda0', 'phi','f','sigma'), # 9
PLBsecrl = c('lambda0', 'phi','lambda','sigma'), # 10
PLBsecrb = c('lambda0', 'phi','b','sigma'), # 11
PLBsecrg = c('lambda0', 'phi','gamma','sigma'), # 25
JSSAsecrf = c('lambda0', 'phi','f','superD','sigma'), # 7
JSSAsecrl = c('lambda0', 'phi','lambda','superD','sigma'), # 12
JSSAsecrb = c('lambda0', 'phi','b','superD','sigma'), # 13
JSSAsecrg = c('lambda0', 'phi','gamma','superD','sigma'), # 24
JSSAsecrB = c('lambda0', 'phi','BD','sigma'), # 14
JSSAsecrD = c('lambda0', 'phi','D','sigma'), # 8
secrCL = c('lambda0', 'phi', 'b','sigma'), # 30
secrD = c('lambda0', 'phi', 'b', 'superD', 'sigma'), # 31
"unrecognised type")
moveargsi <- c(-2,-2)
if (secr) {
if (movementmodel %in% c('normal','exponential')) {
pnames <- c(pnames, 'move.a')
moveargsi[1] <- .openCRstuff$sigmai[typecode(type)] + 1 + (detectfn %in% c(15,17,18,19))
}
else if (movementmodel == 't2D') {
pnames <- c(pnames, 'move.a', 'move.b')
moveargsi[1] <- .openCRstuff$sigmai[typecode(type)] + 1 + (detectfn %in% c(15,17,18,19))
moveargsi[2] <- moveargsi[1]+1
}
else if (movementmodel == 'user') {
if (! ("r" == moveargs[1]))
stop ("user-supplied movement model must have r as first argument")
if ("a" %in% moveargs) {
pnames <- c(pnames, 'move.a')
moveargsi[1] <- .openCRstuff$sigmai[typecode(type)] + 1 + (detectfn %in% c(15,17,18,19))
if ("b" %in% moveargs) {
pnames <- c(pnames, 'move.b')
moveargsi[2] <- moveargsi[1] + 1
}
}
}
else if (movementmodel == 'uniform') {
## no parameters, no action needed
}
if (type %in% c("secrCL","secrD")) {
## closed population
## fix survival and recruitment
fixed <- replace(list(phi = 1.0, b = 1.0), names(fixed), fixed)
}
}
if (any(pnames == 'unrecognised type'))
stop ("'type' not recognised")
if (detectfn %in% c(15,17:19)) pnames <- c(pnames, 'z')
########################################
# Finite mixtures
########################################
nmix <- secr:::get.nmix(model, capthist, NULL)
if ((nmix>1) & (nmix<4)) {
if (type %in% c('Pradel', 'Pradelg')) stop ("Mixture models not implemented for Pradel models")
model$pmix <- as.formula(paste('~h', nmix, sep=''))
if (!all(all.vars(model$pmix) %in% c('session','g','h2','h3')))
stop ("formula for pmix may include only 'session', 'g' or '1'")
pnames <- c(pnames, 'pmix')
}
details$nmix <- nmix
if (type == 'CJSmte') {
moveargsi[1] <- 1 + nmix
moveargsi[2] <- moveargsi[1] + 1
}
#################################
# Link functions (model-specific)
#################################
defaultlink <- list(p = 'logit', lambda0 = 'log', phi = 'logit', b = 'mlogit', f = 'log',
gamma = 'logit', kappa = 'log', g = 'logit',
lambda = 'log', BN = 'log', BD = 'log', D = 'log', N = 'log',
superN = 'log', superD = 'log', sigma = 'log', z = 'log', pmix='mlogit',
move.a = 'log', move.b = 'log', tau = 'mlogit')
link <- replace (defaultlink, names(link), link)
link[!(names(link) %in% pnames)] <- NULL
if (details$nmix==1) link$pmix <- NULL
pnamesR <- pnames[!(pnames %in% names(fixed))]
model[!(names(model) %in% pnamesR)] <- NULL
if ((length(model) == 0) & (length(fixed)>0))
stop ("all parameters fixed") ## assume want only LL
vars <- unlist(lapply(model, all.vars))
##############################################
# Prepare detection design matrices and lookup
##############################################
memo ('Preparing design matrices', details$trace)
design <- openCR.design (capthist, model, type,
timecov = timecov,
sessioncov = sessioncov,
agecov = agecov,
dframe = dframe,
naive = FALSE,
contrasts = details$contrasts,
initialage = details$initialage,
minimumage = details$minimumage,
maximumage = details$maximumage,
CJSp1 = details$CJSp1)
allvars <- unlist(lapply(model, all.vars))
learnedresponse <- any(.openCRstuff$learnedresponses %in% allvars) || !is.null(dframe)
mixturemodel <- "h2" %in% allvars | "h3" %in% allvars
design0 <- if (learnedresponse) {
if (is.null(dframe0)) dframe0 <- dframe
openCR.design (capthist, model, type,
timecov = timecov,
sessioncov = sessioncov,
agecov = agecov,
dframe = dframe0,
naive = TRUE,
contrasts = details$contrasts,
initialage = details$initialage,
minimumage = details$minimumage,
maximumage = details$maximumage,
CJSp1 = details$CJSp1)
}
else {
design
}
############################
# Parameter mapping (general)
#############################
np <- sapply(design$designMatrices, ncol)
NP <- sum(np)
parindx <- split(1:NP, rep(1:length(np), np))
names(parindx) <- names(np)
##########################
# Movement kernel
##########################
cellsize <- mqarray <- 0
kernel <- mqarray <- matrix(0,1,2) ## default
if (secr && (movementmodel %in%
c('normal', 'exponential', 'user', 't2D', 'uniform'))) {
## movement kernel
k2 <- details$kernelradius
cellsize <- attr(mask,'area')^0.5 * 100 ## metres, equal mask cellsize
kernel <- expand.grid(x = -k2:k2, y = -k2:k2)
kernel <- kernel[(kernel$x^2 + kernel$y^2) <= (k2+0.5)^2, ]
mqarray <- mqsetup (mask, kernel, cellsize, edgecode)
}
###########################################
# Choose likelihood function
###########################################
if (secr)
loglikefn <- open.secr.loglikfn # see logliksecr.R
else
loglikefn <- open.loglikfn # see loglik.R
##########################
# Variable names (general)
##########################
allbetanames <- unlist(sapply(design$designMatrices, colnames))
names(allbetanames) <- NULL
realnames <- names(model)
allbetanames <- sub('..(Intercept))','',allbetanames)
## allow for fixed beta parameters
if (!is.null(details$fixedbeta))
betanames <- allbetanames[is.na(details$fixedbeta)]
else
betanames <- allbetanames
betaw <- max(c(nchar(betanames),8)) # for 'trace' formatting
###################################################
# Option to generate start values from previous fit
###################################################
if (inherits(start, 'secr') | inherits(start, 'openCR')) {
start <- mapbeta(start$parindx, parindx, coef(start)$beta, NULL)
}
else if (is.list(start) & (inherits(start[[1]], 'secr') | inherits(start[[1]], 'openCR')) ) {
start2 <- if (length(start)>1) mapbeta(start[[2]]$parindx, parindx, coef(start[[2]])$beta, NULL) else NULL
start <- mapbeta(start[[1]]$parindx, parindx, coef(start[[1]])$beta, NULL)
if (!is.null(start2)) {
start[is.na(start)] <- start2[is.na(start)] ## use second as needed
}
}
else if (is.numeric(start) & !is.null(names(start))) {
## optionally reorder and subset beta values by name
OK <- allbetanames %in% names(start)
if (!all(OK))
stop ("beta names not in start : ", paste(allbetanames[!OK], collapse=', '))
start <- start[allbetanames]
}
###############################
# Start values (model-specific)
###############################
if (is.null(start)) start <- rep(NA, NP)
freq <- covariates(capthist)$freq
ncf <- if (!is.null(freq)) sum(freq) else nc
if (any(is.na(start)) | is.list(start)) {
rpsv <- if(secr) RPSV(capthist, CC = TRUE) else NA
## assemble start vector
default <- list(
p = 0.6,
lambda0 = 0.6,
phi = 0.7,
gamma = 0.7,
kappa = 2,
b = 0.1,
f = 0.3,
lambda = 1.0,
g = 0.2, # random temporary emigration parameter
# tau = 1/(details$M+1),
BN = 20,
BD = (ncf + 1) / marea,
D = (ncf + 1) / marea,
N = ncf + 1,
# superN = ncf + 20,
superN = ncf*(1-distrib) + 20, ## use N-n for binomial 2018-03-12
superD = (ncf + 20) / marea,
# superD = (ncf*(1-distrib) + 20) / marea, ## not a good idea 2018-05-28
sigma = rpsv,
z = 2,
move.a = if (secr) rpsv/2 else 0.6,
move.b = if (secr) 1 else 0.2,
pmix = 0.25
)
getdefault <- function (par) transform (default[[par]], link[[par]])
defaultstart <- rep(0, NP)
for ( i in 1:length(parindx) ) {
defaultstart[parindx[[i]][1]] <- getdefault (names(model)[i])
}
if(details$nmix>1) {
## scaled by mlogit.untransform
defaultstart[parindx[['pmix']]] <- (2:details$nmix)/(details$nmix+1)
}
if('b' %in% names(parindx)) {
## scaled by mlogit.untransform
defaultstart[parindx[['b']]] <- 1/J
}
# if('tau' %in% names(parindx))
# ## scaled by mlogit.untransform
# defaultstart[parindx[['tau']]] <- 1/(details$M+1)
if (secr & !(type %in% c('CJSsecr'))) {
requireautoini <- (is.null(start) | !all(names(parindx) %in% names(start)))
if (requireautoini) { ## condition added 2019-04-07
if (!is.null(details$autoini))
start3 <- autoini (subset(capthist, occasions = primarysession==details$autoini), mask)
else
start3 <- autoini (capthist, mask) ## 2019-05-07
if (any(is.na(unlist(start3))))
warning ("initial values not found")
defaultstart[parindx[['lambda0']][1]] <- transform (-log(1-start3[['g0']]), link[['lambda0']])
defaultstart[parindx[['sigma']][1]] <- transform (start3[['sigma']], link[['sigma']])
if (type == 'JSSAsecrD')
defaultstart[parindx[['D']][1]] <- transform (start3[['D']], link[['D']])
else if (type == 'JSSAsecrB')
defaultstart[parindx[['BD']][1]] <- transform (start3[['D']]/J, link[['BD']])
else if (type %in% c('JSSAsecrf','JSSAsecrl','JSSAsecrb', 'JSSAsecrg'))
defaultstart[parindx[['superD']][1]] <- transform (start3[['D']], link[['superD']])
}
# CL types do not need density
}
}
tmp <- start
if (is.null(start) | is.list(start)) start <- rep(NA, NP)
if (any(is.na(start))) {
start[is.na(start)] <- defaultstart[is.na(start)]
}
if (is.list(tmp)) {
# 2020-10-31 protect against bad start list
ok <- names(tmp) %in% names(link)
if (any(!ok)) {
warning("ignoring parameter(s) in start not in model: ",
paste(names(tmp)[!ok], collapse = ', '))
tmp <- tmp[ok]
}
for (i in names(tmp)) {
if (i == 'b') {
start[parindx[[i]][1]] <- tmp[[i]]
}
else {
start[parindx[[i]][1]] <- transform (tmp[[i]], link[[i]])
}
}
}
##########################
# Fixed beta parameters
##########################
fb <- details$fixedbeta
if (!is.null(fb)) {
if (!(length(fb)== NP))
stop ("invalid fixed beta - require NP-vector")
if (sum(is.na(fb))==0)
stop ("cannot fix all beta parameters")
start <- start[is.na(fb)] ## drop unwanted betas; remember later to adjust parameter count
}
#########################
# capthist statistics
#########################
lost <- which(apply(capthist,1,min, drop = FALSE)<0)
twoD <- apply(abs(capthist), 1:2, sum, drop = FALSE)
CH <- twoD
if (J==1)
twoD <- as.matrix(apply(twoD, 1, function(x) tapply(x,primarysession,max)))
else
twoD <- t(apply(twoD, 1, function(x) tapply(x,primarysession,max))) # in terms of primary sessions
fi <- apply(twoD, 1, function(x) min(which(x>0)))
li <- apply(twoD, 1, function(x) max(which(x>0)))
twoD[cbind(lost, li[lost])] <- -1
li[lost] <- -li[lost]
covariates(CH) <- covariates(capthist)
covariates(twoD) <- covariates(capthist)
JScounts <- unlist(JS.counts(twoD))
if (secr) {
usge <- usage(traps(capthist))
if (is.null(usge) | details$ignoreusage)
usge <- matrix(1, nrow=k, ncol= cumss[J+1]) # in terms of secondary sessions
## 2017-11-26 collapse data from exclusive detectors; modified 2018-01-17
CH <- capthist
if (detector(traps(capthist))[1] == "multi") {
CH <- abs(capthist)
CH <- apply(CH,1:2, which.max) * (apply(CH,1:2, max)>0)
lost <- apply(capthist,1:2, min)<0
CH[lost] <- -CH[lost]
class (CH) <- 'capthist'
traps(CH) <- traps(capthist)
}
}
else {
usge <- NULL
}
data <- new.env(parent = emptyenv())
assign("capthist", CH, pos = data)
assign("type", type, pos = data)
assign("mask", mask, pos = data)
assign("detectfn", detectfn, pos = data)
assign("distrib", distrib, pos = data)
assign("binomN", binomN, pos = data)
assign("link", link, pos = data)
assign("fixed", fixed, pos = data)
assign("details", details, pos = data)
assign("ncores", ncores, pos = data)
assign("design", design, pos = data)
assign("design0", design0, pos = data)
assign("parindx", parindx, pos = data)
assign("intervals", intervals, pos = data)
assign("nc", nc, pos = data)
assign("J", J, pos = data)
assign("cumss", cumss, pos = data)
assign("k", k, pos = data)
assign("m", m, pos = data)
assign("betaw", betaw, pos = data)
assign("fi", fi, pos = data)
assign("li", li, pos = data)
assign("JScounts", JScounts, pos = data)
assign("marray", marray, pos = data) # 2020-12-08
assign("usge", usge, pos = data)
assign("moveargsi", moveargsi, pos = data)
assign("movementcode", movementcode, pos = data)
assign("edgecode", edgecode, pos = data)
assign("usermodel", usermodel, pos = data)
assign("kernel", kernel, pos = data)
assign("cellsize", cellsize, pos = data)
assign("mqarray", mqarray, pos = data)
assign("learnedresponse", learnedresponse, pos = data)
assign("mixturemodel", mixturemodel, pos = data)
# assign("PIA0njx", PIA0njx, pos = data)
#############################
# Single evaluation option
#############################
.openCRstuff$iter <- 0
if (details$LLonly) {
if (is.null(start))
stop ("provide transformed parameter values in 'start'")
args <- list(beta = start,
oneeval = TRUE,
data = data)
LL <- do.call(loglikefn, args)
names(LL) <- c('logLik', betanames)
attr(LL, 'parindx') <- parindx
return(LL)
}
#####################
# Maximize likelihood
#####################
## modified 2017-05-16 to assume most data are in the environment, not needing to be passed
memo('Maximizing likelihood...', details$trace)
if (details$trace) {
message('Eval Loglik', paste(str_pad(betanames, width = betaw), collapse = ' '))
}
if (tolower(method) %in% c('newton-raphson', 'nr')) {
args <- list (p = start,
f = loglikefn,
data = data, # environment(),
betaw = betaw,
hessian = tolower(details$hessian)=='auto',
stepmax = 10)
## cluster = cluster)
this.fit <- do.call (nlm, args)
this.fit$par <- this.fit$estimate # copy for uniformity
this.fit$value <- this.fit$minimum # copy for uniformity
if (this.fit$code > 2)
warning ("possible maximization error: nlm returned code ",
this.fit$code, ". See ?nlm")
}
else if (tolower(method) %in% c('none')) {
# Hessian-only
memo ('Computing Hessian with fdHess in nlme', details$trace)
loglikfn <- function (beta) {
## args <- list(beta = beta, data = data, cluster = cluster)
args <- list(beta = beta, data = data)
do.call(loglikefn, args)
}
grad.Hess <- nlme::fdHess(start, fun = loglikfn, .relStep = 0.001, minAbsPar=0.1)
this.fit <- list (value = loglikfn(start), par = start,
gradient = grad.Hess$gradient,
hessian = grad.Hess$Hessian)
}
else {
args <- list(par = start,
fn = loglikefn,
data = data,
hessian = tolower(details$hessian)=='auto',
control = details$control,
method = method)
# cluster = cluster)
this.fit <- do.call (optim, args)
# default method = 'BFGS', control=list(parscale=c(1,0.1,5))
if (this.fit$convergence != 0)
warning ("probable maximization error: optim returned convergence ",
this.fit$convergence, ". See ?optim")
}
this.fit$method <- method ## remember what method we used...
covar <- NULL
if (this.fit$value > 1e9) { ## failed
this.fit$beta[] <- NA
eigH <- NA
}
else {
############################
# Variance-covariance matrix
############################
if (tolower(details$hessian)=='fdhess') {
memo ('Computing Hessian with fdHess in nlme', details$trace)
loglikfn <- function (beta) {
args <- list (beta = beta,
parindx = parindx,
env = data) # environment(),
## cluster = cluster)
-do.call(loglikefn, args)
}
grad.Hess <- nlme::fdHess(this.fit$par, fun = loglikfn, .relStep = 0.001, minAbsPar=0.1)
this.fit$hessian <- -grad.Hess$Hessian
}
hess <- this.fit$hessian
eigH <- NA
NP <- length(betanames)
covar <- matrix(nrow = NP, ncol = NP)
if (!is.null(hess)) {
eigH <- eigen(this.fit$hessian)$values
## eigH <- eigH/max(eigH)
eigH <- abs(eigH)/max(abs(eigH)) ## 2020-05-28
covar <- try(MASS::ginv(hess))
if (inherits(covar, "try-error")) {
warning ("could not invert Hessian to compute ",
"variance-covariance matrix")
covar <- matrix(nrow = NP, ncol = NP)
}
else if (any(diag(covar)<0)) {
warning ("variance calculation failed for ",
"some beta parameters; confounding likely")
}
}
dimnames(covar) <- list(betanames, betanames)
}
desc <- packageDescription("openCR") ## for version number
temp <- list (call = cl,
capthist = inputcapthist,
type = type,
model = model,
distribution = distribution,
mask = mask,
detectfn = detectfn,
binomN = binomN,
movementmodel = movementmodel,
edgemethod = edgemethod,
usermodel = usermodel,
moveargsi = moveargsi,
start = start,
link = link,
fixed = fixed,
timecov = timecov,
sessioncov = sessioncov,
agecov = agecov,
dframe = dframe,
dframe0 = dframe0,
details = details,
method = method,
ncores = ncores,
design = design,
design0 = design0,
parindx = parindx,
intervals = intervals,
vars = vars,
betanames = betanames,
realnames = realnames,
sessionlabels = sessnames,
fit = this.fit,
beta.vcv = covar,
eigH = eigH,
version = desc$Version,
starttime = starttime,
proctime = proc.time()[3] - ptm[3]
)
if (secr) temp <- c(temp, list(mask=mask))
attr (temp, 'class') <- 'openCR'
###############################################
## if (!is.null(cluster)) stopCluster(cluster)
###############################################
memo(paste('Completed in ', round(temp$proctime,2), ' seconds at ',
format(Sys.time(), "%H:%M:%S %d %b %Y"),
sep=''), details$trace)
temp
}
################################################################################
| /openCR/R/openCR.fit.R | no_license | akhikolla/updatedatatype-list4 | R | false | false | 33,602 | r | ################################################################################
## package 'openCR'
## openCR.fit.R
## 2011-12-30, 2013-01-21
## 2013-01-21 modified to balance with terminal beta, tau
## 2015-01-30 removed make.lookup (see secr)
## 2015-01-30 moved miscellaneous functions to utility.r
## 2015-01-31 deleted utility functions not needed or can be called from secr
## 2015-02-06 reconciled this current version with forked 1.2.0
## 2015-02-06 removed pdot, esa, derived
## 2017-05-15 reconciled versions; revision in progress
## 2017-05-18 2.2.0 ditched old openCR.fit; renamed openCR.MCfit
## 2017-11-20 general revision 2.2.1
## 2017-11-20 refined start options
## 2017-11-20 method default Newton-Raphson
## 2018-01-20 remember compileAttributes('d:/open populations/openCR')
## 2018-01-25 single coerced to multi
## 2018-02-02 detectfn HHR etc.
## 2018-05-01 intermediate variable allbetanames to fix problem with fixedbeta
## 2018-10-29 CJSp1 argument for openCR.design
## 2018-11-20 dropped posterior (see classMembership method)
## 2019-04-07 check whether require autoini
## 2019-04-09 removed data$multi
## 2020-10-19 added agecov
## 2020-11-02 movemodel renamed movementcode
## 2020-12-07 CJSmte experimental - trial abandoned 2020-12-12
################################################################################
openCR.fit <- function (capthist, type = "CJS", model = list(p~1, phi~1, sigma~1),
distribution = c("poisson", "binomial"), mask = NULL,
detectfn = c('HHN','HHR','HEX','HAN','HCG','HVP'), binomN = 0,
movementmodel = c('static','uncorrelated','normal','exponential', 't2D', 'uniform'),
edgemethod = c('truncate', 'wrap', 'none'), start = NULL, link = list(),
fixed = list(), timecov = NULL, sessioncov = NULL, agecov = NULL,
dframe = NULL, dframe0 = NULL, details = list(), method = 'Newton-Raphson',
trace = NULL, ncores = NULL, ...)
{
# Fit open population capture recapture model
#
# Some arguments:
#
# capthist - capture history object (includes traps object as an attribute)
# model - formulae for real parameters in terms of effects and covariates
# start - start values for maximization (numeric vector link scale);
# link - list of parameter-specific link function names 'log', 'logit', 'loglog',
# 'identity', 'sin', 'neglog', 'mlogit'
# fixed - list of fixed values for named parameters
# sessioncov - dataframe of session-level covariates
# mask
# detectfn
# dframe - optional data frame of design data for detection model (tricky & untested)
# details - list with several additional settings, mostly of special interest
# method - optimization method (indirectly chooses
# trace - logical; if TRUE output each likelihood as it is calculated
# ... - other arguments passed to join()
#########################################################################
## Use input 'details' to override various defaults
defaultdetails <- list(
autoini = NULL,
CJSp1 = FALSE,
contrasts = NULL,
control = list(),
debug = 0,
grain = 1,
hessian = 'auto',
ignoreusage = FALSE,
initialage = 0,
LLonly = FALSE,
kernelradius = 10,
minimumage = 0,
maximumage = 1,
multinom = FALSE,
R = FALSE,
squeeze = TRUE,
trace = FALSE
)
if (is.logical(details$hessian))
details$hessian <- ifelse(details$hessian, 'auto', 'none')
details <- replace (defaultdetails, names(details), details)
if (!is.null(trace)) details$trace <- trace
if (details$LLonly) details$trace <- FALSE
if (details$R) ncores <- 1 ## force 2018-11-12
if (!is.null(ncores) && (ncores == 1)) details$grain <- -1
#########################################################################
distribution <- match.arg(distribution)
distrib <- switch (distribution, poisson = 0, binomial = 1)
##############################################
# Multithread option 2018-04-11, 2020-11-02
##############################################
secr::setNumThreads(ncores, stackSize = "auto") # change to match secr
if (is.character(detectfn)) {
detectfn <- match.arg(detectfn)
detectfn <- secr:::detectionfunctionnumber(detectfn)
}
if (is.character(dframe)) {
dframename <- dframe; rm(dframe)
dframe <- get(dframename, pos=-1)
}
if (is.character(dframe0)) {
dframename <- dframe0; rm(dframe0)
dframe0 <- get(dframename, pos=-1)
}
if (is.character(capthist)) {
capthistname <- capthist; rm(capthist)
capthist <- get(capthistname, pos=-1)
}
##############################################
## Standard form for capthist
##############################################
marray <- m.array(capthist)
capthist <- stdcapthist(capthist, type, details$nclone, details$squeeze, ...)
inputcapthist <- capthist ## PROCESSED
##############################################
## check type argument
##############################################
if (type %in% .openCRstuff$suspendedtypes)
stop (type, " not currently available")
secr <- grepl('secr', type)
if (secr) {
if (is.null(mask))
stop("requires valid mask")
if (ms(mask)) {
mask <- mask[[1]]
warning("multi-session mask provided; using first")
}
if (is.character(mask)) {
maskname <- mask; rm(mask)
mask <- get(maskname, pos=-1)
}
if (is.function (movementmodel)) {
moveargs <- formalArgs(movementmodel)
usermodel <- as.character(substitute(movementmodel))
movementmodel <- "user"
}
else {
usermodel <- ""
movementmodel <- match.arg(movementmodel)
}
## integer code for movement model
movementcode <- movecode(movementmodel)
edgemethod <- match.arg(edgemethod)
edgecode <- edgemethodcode(edgemethod) # 0 none, 1 wrap, 2 truncate
if (!is.null(mask) && attr(mask, 'type') != 'traprect' &&
movementcode > 1 && edgemethod == 'wrap') {
stop("edgemethod = 'wrap' requires mask of type 'traprect'")
}
if (movementcode > 1 && edgemethod == 'none') {
warning("specify edgemethod 'wrap' or 'truncate' to avoid ",
"bias in movement models")
}
}
else {
if (!is.null(mask)) warning("mask not used in non-spatial analysis")
movementcode <- -1
edgecode <- -1
usermodel <- ""
}
##############################################
## Remember start time and call
##############################################
ptm <- proc.time()
starttime <- format(Sys.time(), "%H:%M:%S %d %b %Y")
cl <- match.call(expand.dots = TRUE)
if (type %in% c("secrCL","secrD"))
intervals(capthist) <- rep(0, ncol(capthist)-1)
intervals <- intervals(capthist)
intervals <- intervals[intervals>0] ## primary intervals only
sessnames <- sessionlabels(capthist)
cumss <- getcumss(capthist) ## cumulative secondary sessions per primary session
if (is.null(sessnames))
sessnames <- 1:(length(cumss)-1)
nc <- nrow(capthist)
if (nc == 0) warning ("no detection histories")
J <- length(cumss)-1 ## number of primary sessions
primarysession <- primarysessions(intervals(capthist)) # rep(1:J, diff(cumss)) ## map secondary to primary
k <- nrow(traps(capthist)) ## number of detectors (secr only)
m <- if (is.null(mask)) 0 else nrow(mask)
marea <- if (is.null(mask)) NA else maskarea(mask)
##############################################
## Use input formula to override defaults
##############################################
if ('formula' %in% class(model)) model <- list(model)
model <- secr:::stdform (model) ## named, no LHS
defaultmodel <- list(p = ~1, lambda0 = ~1, phi = ~1, b = ~1, f = ~1, lambda = ~1, g = ~1,
gamma = ~1, kappa = ~1, BN = ~1, BD = ~1, N=~1, D = ~1, superN = ~1,
superD = ~1, sigma = ~1, z = ~1, move.a = ~1, move.b = ~1, tau = ~1)
model <- replace (defaultmodel, names(model), model)
pnames <- switch (type,
CJS = c('p', 'phi'), # 1
CJSmte = c('p', 'phi', 'move.a', 'move.b'), # 5
JSSAb = c('p', 'phi','b','superN'), # 2
JSSAl = c('p', 'phi','lambda','superN'), # 3
JSSAf = c('p', 'phi','f','superN'), # 4
JSSAg = c('p', 'phi','gamma','superN'), # 22
JSSAk = c('p', 'phi','kappa','superN'), # 28
JSSAfCL = c('p', 'phi','f'), # 15
JSSAlCL = c('p', 'phi','lambda'), # 16
JSSAbCL = c('p', 'phi','b'), # 17
JSSAgCL = c('p', 'phi','gamma'), # 23
JSSAkCL = c('p', 'phi','kappa'), # 29
PLBf = c('p', 'phi','f'), # 15
PLBl = c('p', 'phi','lambda'), # 16
PLBb = c('p', 'phi','b'), # 17
PLBg = c('p', 'phi','gamma'), # 23
PLBk = c('p', 'phi','kappa'), # 29
JSSAB = c('p', 'phi','BN'), # 18
JSSAN = c('p', 'phi','N'), # 19
Pradel = c('p', 'phi','lambda'), # 20
Pradelg = c('p', 'phi','gamma'), # 26
JSSARET = c('p', 'phi','b','superN','tau'), # 21
JSSAfgCL = c('p', 'phi','f','g'), # 27 # experimental temporary emigration
CJSsecr = c('lambda0', 'phi','sigma'), # 6
JSSAsecrfCL = c('lambda0', 'phi','f','sigma'), # 9
JSSAsecrlCL = c('lambda0', 'phi','lambda','sigma'), # 10
JSSAsecrbCL = c('lambda0', 'phi','b','sigma'), # 11
JSSAsecrgCL = c('lambda0', 'phi','gamma','sigma'), # 25
PLBsecrf = c('lambda0', 'phi','f','sigma'), # 9
PLBsecrl = c('lambda0', 'phi','lambda','sigma'), # 10
PLBsecrb = c('lambda0', 'phi','b','sigma'), # 11
PLBsecrg = c('lambda0', 'phi','gamma','sigma'), # 25
JSSAsecrf = c('lambda0', 'phi','f','superD','sigma'), # 7
JSSAsecrl = c('lambda0', 'phi','lambda','superD','sigma'), # 12
JSSAsecrb = c('lambda0', 'phi','b','superD','sigma'), # 13
JSSAsecrg = c('lambda0', 'phi','gamma','superD','sigma'), # 24
JSSAsecrB = c('lambda0', 'phi','BD','sigma'), # 14
JSSAsecrD = c('lambda0', 'phi','D','sigma'), # 8
secrCL = c('lambda0', 'phi', 'b','sigma'), # 30
secrD = c('lambda0', 'phi', 'b', 'superD', 'sigma'), # 31
"unrecognised type")
moveargsi <- c(-2,-2)
if (secr) {
if (movementmodel %in% c('normal','exponential')) {
pnames <- c(pnames, 'move.a')
moveargsi[1] <- .openCRstuff$sigmai[typecode(type)] + 1 + (detectfn %in% c(15,17,18,19))
}
else if (movementmodel == 't2D') {
pnames <- c(pnames, 'move.a', 'move.b')
moveargsi[1] <- .openCRstuff$sigmai[typecode(type)] + 1 + (detectfn %in% c(15,17,18,19))
moveargsi[2] <- moveargsi[1]+1
}
else if (movementmodel == 'user') {
if (! ("r" == moveargs[1]))
stop ("user-supplied movement model must have r as first argument")
if ("a" %in% moveargs) {
pnames <- c(pnames, 'move.a')
moveargsi[1] <- .openCRstuff$sigmai[typecode(type)] + 1 + (detectfn %in% c(15,17,18,19))
if ("b" %in% moveargs) {
pnames <- c(pnames, 'move.b')
moveargsi[2] <- moveargsi[1] + 1
}
}
}
else if (movementmodel == 'uniform') {
## no parameters, no action needed
}
if (type %in% c("secrCL","secrD")) {
## closed population
## fix survival and recruitment
fixed <- replace(list(phi = 1.0, b = 1.0), names(fixed), fixed)
}
}
if (any(pnames == 'unrecognised type'))
stop ("'type' not recognised")
if (detectfn %in% c(15,17:19)) pnames <- c(pnames, 'z')
########################################
# Finite mixtures
########################################
nmix <- secr:::get.nmix(model, capthist, NULL)
if ((nmix>1) & (nmix<4)) {
if (type %in% c('Pradel', 'Pradelg')) stop ("Mixture models not implemented for Pradel models")
model$pmix <- as.formula(paste('~h', nmix, sep=''))
if (!all(all.vars(model$pmix) %in% c('session','g','h2','h3')))
stop ("formula for pmix may include only 'session', 'g' or '1'")
pnames <- c(pnames, 'pmix')
}
details$nmix <- nmix
if (type == 'CJSmte') {
moveargsi[1] <- 1 + nmix
moveargsi[2] <- moveargsi[1] + 1
}
#################################
# Link functions (model-specific)
#################################
defaultlink <- list(p = 'logit', lambda0 = 'log', phi = 'logit', b = 'mlogit', f = 'log',
gamma = 'logit', kappa = 'log', g = 'logit',
lambda = 'log', BN = 'log', BD = 'log', D = 'log', N = 'log',
superN = 'log', superD = 'log', sigma = 'log', z = 'log', pmix='mlogit',
move.a = 'log', move.b = 'log', tau = 'mlogit')
link <- replace (defaultlink, names(link), link)
link[!(names(link) %in% pnames)] <- NULL
if (details$nmix==1) link$pmix <- NULL
pnamesR <- pnames[!(pnames %in% names(fixed))]
model[!(names(model) %in% pnamesR)] <- NULL
if ((length(model) == 0) & (length(fixed)>0))
stop ("all parameters fixed") ## assume want only LL
vars <- unlist(lapply(model, all.vars))
##############################################
# Prepare detection design matrices and lookup
##############################################
memo ('Preparing design matrices', details$trace)
design <- openCR.design (capthist, model, type,
timecov = timecov,
sessioncov = sessioncov,
agecov = agecov,
dframe = dframe,
naive = FALSE,
contrasts = details$contrasts,
initialage = details$initialage,
minimumage = details$minimumage,
maximumage = details$maximumage,
CJSp1 = details$CJSp1)
allvars <- unlist(lapply(model, all.vars))
learnedresponse <- any(.openCRstuff$learnedresponses %in% allvars) || !is.null(dframe)
mixturemodel <- "h2" %in% allvars | "h3" %in% allvars
design0 <- if (learnedresponse) {
if (is.null(dframe0)) dframe0 <- dframe
openCR.design (capthist, model, type,
timecov = timecov,
sessioncov = sessioncov,
agecov = agecov,
dframe = dframe0,
naive = TRUE,
contrasts = details$contrasts,
initialage = details$initialage,
minimumage = details$minimumage,
maximumage = details$maximumage,
CJSp1 = details$CJSp1)
}
else {
design
}
############################
# Parameter mapping (general)
#############################
np <- sapply(design$designMatrices, ncol)
NP <- sum(np)
parindx <- split(1:NP, rep(1:length(np), np))
names(parindx) <- names(np)
##########################
# Movement kernel
##########################
cellsize <- mqarray <- 0
kernel <- mqarray <- matrix(0,1,2) ## default
if (secr && (movementmodel %in%
c('normal', 'exponential', 'user', 't2D', 'uniform'))) {
## movement kernel
k2 <- details$kernelradius
cellsize <- attr(mask,'area')^0.5 * 100 ## metres, equal mask cellsize
kernel <- expand.grid(x = -k2:k2, y = -k2:k2)
kernel <- kernel[(kernel$x^2 + kernel$y^2) <= (k2+0.5)^2, ]
mqarray <- mqsetup (mask, kernel, cellsize, edgecode)
}
###########################################
# Choose likelihood function
###########################################
if (secr)
loglikefn <- open.secr.loglikfn # see logliksecr.R
else
loglikefn <- open.loglikfn # see loglik.R
##########################
# Variable names (general)
##########################
allbetanames <- unlist(sapply(design$designMatrices, colnames))
names(allbetanames) <- NULL
realnames <- names(model)
allbetanames <- sub('..(Intercept))','',allbetanames)
## allow for fixed beta parameters
if (!is.null(details$fixedbeta))
betanames <- allbetanames[is.na(details$fixedbeta)]
else
betanames <- allbetanames
betaw <- max(c(nchar(betanames),8)) # for 'trace' formatting
###################################################
# Option to generate start values from previous fit
###################################################
if (inherits(start, 'secr') | inherits(start, 'openCR')) {
start <- mapbeta(start$parindx, parindx, coef(start)$beta, NULL)
}
else if (is.list(start) & (inherits(start[[1]], 'secr') | inherits(start[[1]], 'openCR')) ) {
start2 <- if (length(start)>1) mapbeta(start[[2]]$parindx, parindx, coef(start[[2]])$beta, NULL) else NULL
start <- mapbeta(start[[1]]$parindx, parindx, coef(start[[1]])$beta, NULL)
if (!is.null(start2)) {
start[is.na(start)] <- start2[is.na(start)] ## use second as needed
}
}
else if (is.numeric(start) & !is.null(names(start))) {
## optionally reorder and subset beta values by name
OK <- allbetanames %in% names(start)
if (!all(OK))
stop ("beta names not in start : ", paste(allbetanames[!OK], collapse=', '))
start <- start[allbetanames]
}
###############################
# Start values (model-specific)
###############################
if (is.null(start)) start <- rep(NA, NP)
freq <- covariates(capthist)$freq
ncf <- if (!is.null(freq)) sum(freq) else nc
if (any(is.na(start)) | is.list(start)) {
rpsv <- if(secr) RPSV(capthist, CC = TRUE) else NA
## assemble start vector
default <- list(
p = 0.6,
lambda0 = 0.6,
phi = 0.7,
gamma = 0.7,
kappa = 2,
b = 0.1,
f = 0.3,
lambda = 1.0,
g = 0.2, # random temporary emigration parameter
# tau = 1/(details$M+1),
BN = 20,
BD = (ncf + 1) / marea,
D = (ncf + 1) / marea,
N = ncf + 1,
# superN = ncf + 20,
superN = ncf*(1-distrib) + 20, ## use N-n for binomial 2018-03-12
superD = (ncf + 20) / marea,
# superD = (ncf*(1-distrib) + 20) / marea, ## not a good idea 2018-05-28
sigma = rpsv,
z = 2,
move.a = if (secr) rpsv/2 else 0.6,
move.b = if (secr) 1 else 0.2,
pmix = 0.25
)
getdefault <- function (par) transform (default[[par]], link[[par]])
defaultstart <- rep(0, NP)
for ( i in 1:length(parindx) ) {
defaultstart[parindx[[i]][1]] <- getdefault (names(model)[i])
}
if(details$nmix>1) {
## scaled by mlogit.untransform
defaultstart[parindx[['pmix']]] <- (2:details$nmix)/(details$nmix+1)
}
if('b' %in% names(parindx)) {
## scaled by mlogit.untransform
defaultstart[parindx[['b']]] <- 1/J
}
# if('tau' %in% names(parindx))
# ## scaled by mlogit.untransform
# defaultstart[parindx[['tau']]] <- 1/(details$M+1)
if (secr & !(type %in% c('CJSsecr'))) {
requireautoini <- (is.null(start) | !all(names(parindx) %in% names(start)))
if (requireautoini) { ## condition added 2019-04-07
if (!is.null(details$autoini))
start3 <- autoini (subset(capthist, occasions = primarysession==details$autoini), mask)
else
start3 <- autoini (capthist, mask) ## 2019-05-07
if (any(is.na(unlist(start3))))
warning ("initial values not found")
defaultstart[parindx[['lambda0']][1]] <- transform (-log(1-start3[['g0']]), link[['lambda0']])
defaultstart[parindx[['sigma']][1]] <- transform (start3[['sigma']], link[['sigma']])
if (type == 'JSSAsecrD')
defaultstart[parindx[['D']][1]] <- transform (start3[['D']], link[['D']])
else if (type == 'JSSAsecrB')
defaultstart[parindx[['BD']][1]] <- transform (start3[['D']]/J, link[['BD']])
else if (type %in% c('JSSAsecrf','JSSAsecrl','JSSAsecrb', 'JSSAsecrg'))
defaultstart[parindx[['superD']][1]] <- transform (start3[['D']], link[['superD']])
}
# CL types do not need density
}
}
tmp <- start
if (is.null(start) | is.list(start)) start <- rep(NA, NP)
if (any(is.na(start))) {
start[is.na(start)] <- defaultstart[is.na(start)]
}
if (is.list(tmp)) {
# 2020-10-31 protect against bad start list
ok <- names(tmp) %in% names(link)
if (any(!ok)) {
warning("ignoring parameter(s) in start not in model: ",
paste(names(tmp)[!ok], collapse = ', '))
tmp <- tmp[ok]
}
for (i in names(tmp)) {
if (i == 'b') {
start[parindx[[i]][1]] <- tmp[[i]]
}
else {
start[parindx[[i]][1]] <- transform (tmp[[i]], link[[i]])
}
}
}
##########################
# Fixed beta parameters
##########################
fb <- details$fixedbeta
if (!is.null(fb)) {
if (!(length(fb)== NP))
stop ("invalid fixed beta - require NP-vector")
if (sum(is.na(fb))==0)
stop ("cannot fix all beta parameters")
start <- start[is.na(fb)] ## drop unwanted betas; remember later to adjust parameter count
}
#########################
# capthist statistics
#########################
lost <- which(apply(capthist,1,min, drop = FALSE)<0)
twoD <- apply(abs(capthist), 1:2, sum, drop = FALSE)
CH <- twoD
if (J==1)
twoD <- as.matrix(apply(twoD, 1, function(x) tapply(x,primarysession,max)))
else
twoD <- t(apply(twoD, 1, function(x) tapply(x,primarysession,max))) # in terms of primary sessions
fi <- apply(twoD, 1, function(x) min(which(x>0)))
li <- apply(twoD, 1, function(x) max(which(x>0)))
twoD[cbind(lost, li[lost])] <- -1
li[lost] <- -li[lost]
covariates(CH) <- covariates(capthist)
covariates(twoD) <- covariates(capthist)
JScounts <- unlist(JS.counts(twoD))
if (secr) {
usge <- usage(traps(capthist))
if (is.null(usge) | details$ignoreusage)
usge <- matrix(1, nrow=k, ncol= cumss[J+1]) # in terms of secondary sessions
## 2017-11-26 collapse data from exclusive detectors; modified 2018-01-17
CH <- capthist
if (detector(traps(capthist))[1] == "multi") {
CH <- abs(capthist)
CH <- apply(CH,1:2, which.max) * (apply(CH,1:2, max)>0)
lost <- apply(capthist,1:2, min)<0
CH[lost] <- -CH[lost]
class (CH) <- 'capthist'
traps(CH) <- traps(capthist)
}
}
else {
usge <- NULL
}
data <- new.env(parent = emptyenv())
assign("capthist", CH, pos = data)
assign("type", type, pos = data)
assign("mask", mask, pos = data)
assign("detectfn", detectfn, pos = data)
assign("distrib", distrib, pos = data)
assign("binomN", binomN, pos = data)
assign("link", link, pos = data)
assign("fixed", fixed, pos = data)
assign("details", details, pos = data)
assign("ncores", ncores, pos = data)
assign("design", design, pos = data)
assign("design0", design0, pos = data)
assign("parindx", parindx, pos = data)
assign("intervals", intervals, pos = data)
assign("nc", nc, pos = data)
assign("J", J, pos = data)
assign("cumss", cumss, pos = data)
assign("k", k, pos = data)
assign("m", m, pos = data)
assign("betaw", betaw, pos = data)
assign("fi", fi, pos = data)
assign("li", li, pos = data)
assign("JScounts", JScounts, pos = data)
assign("marray", marray, pos = data) # 2020-12-08
assign("usge", usge, pos = data)
assign("moveargsi", moveargsi, pos = data)
assign("movementcode", movementcode, pos = data)
assign("edgecode", edgecode, pos = data)
assign("usermodel", usermodel, pos = data)
assign("kernel", kernel, pos = data)
assign("cellsize", cellsize, pos = data)
assign("mqarray", mqarray, pos = data)
assign("learnedresponse", learnedresponse, pos = data)
assign("mixturemodel", mixturemodel, pos = data)
# assign("PIA0njx", PIA0njx, pos = data)
#############################
# Single evaluation option
#############################
.openCRstuff$iter <- 0
if (details$LLonly) {
if (is.null(start))
stop ("provide transformed parameter values in 'start'")
args <- list(beta = start,
oneeval = TRUE,
data = data)
LL <- do.call(loglikefn, args)
names(LL) <- c('logLik', betanames)
attr(LL, 'parindx') <- parindx
return(LL)
}
#####################
# Maximize likelihood
#####################
## modified 2017-05-16 to assume most data are in the environment, not needing to be passed
memo('Maximizing likelihood...', details$trace)
if (details$trace) {
message('Eval Loglik', paste(str_pad(betanames, width = betaw), collapse = ' '))
}
if (tolower(method) %in% c('newton-raphson', 'nr')) {
args <- list (p = start,
f = loglikefn,
data = data, # environment(),
betaw = betaw,
hessian = tolower(details$hessian)=='auto',
stepmax = 10)
## cluster = cluster)
this.fit <- do.call (nlm, args)
this.fit$par <- this.fit$estimate # copy for uniformity
this.fit$value <- this.fit$minimum # copy for uniformity
if (this.fit$code > 2)
warning ("possible maximization error: nlm returned code ",
this.fit$code, ". See ?nlm")
}
else if (tolower(method) %in% c('none')) {
# Hessian-only
memo ('Computing Hessian with fdHess in nlme', details$trace)
loglikfn <- function (beta) {
## args <- list(beta = beta, data = data, cluster = cluster)
args <- list(beta = beta, data = data)
do.call(loglikefn, args)
}
grad.Hess <- nlme::fdHess(start, fun = loglikfn, .relStep = 0.001, minAbsPar=0.1)
this.fit <- list (value = loglikfn(start), par = start,
gradient = grad.Hess$gradient,
hessian = grad.Hess$Hessian)
}
else {
args <- list(par = start,
fn = loglikefn,
data = data,
hessian = tolower(details$hessian)=='auto',
control = details$control,
method = method)
# cluster = cluster)
this.fit <- do.call (optim, args)
# default method = 'BFGS', control=list(parscale=c(1,0.1,5))
if (this.fit$convergence != 0)
warning ("probable maximization error: optim returned convergence ",
this.fit$convergence, ". See ?optim")
}
this.fit$method <- method ## remember what method we used...
covar <- NULL
if (this.fit$value > 1e9) { ## failed
this.fit$beta[] <- NA
eigH <- NA
}
else {
############################
# Variance-covariance matrix
############################
if (tolower(details$hessian)=='fdhess') {
memo ('Computing Hessian with fdHess in nlme', details$trace)
loglikfn <- function (beta) {
args <- list (beta = beta,
parindx = parindx,
env = data) # environment(),
## cluster = cluster)
-do.call(loglikefn, args)
}
grad.Hess <- nlme::fdHess(this.fit$par, fun = loglikfn, .relStep = 0.001, minAbsPar=0.1)
this.fit$hessian <- -grad.Hess$Hessian
}
hess <- this.fit$hessian
eigH <- NA
NP <- length(betanames)
covar <- matrix(nrow = NP, ncol = NP)
if (!is.null(hess)) {
eigH <- eigen(this.fit$hessian)$values
## eigH <- eigH/max(eigH)
eigH <- abs(eigH)/max(abs(eigH)) ## 2020-05-28
covar <- try(MASS::ginv(hess))
if (inherits(covar, "try-error")) {
warning ("could not invert Hessian to compute ",
"variance-covariance matrix")
covar <- matrix(nrow = NP, ncol = NP)
}
else if (any(diag(covar)<0)) {
warning ("variance calculation failed for ",
"some beta parameters; confounding likely")
}
}
dimnames(covar) <- list(betanames, betanames)
}
desc <- packageDescription("openCR") ## for version number
temp <- list (call = cl,
capthist = inputcapthist,
type = type,
model = model,
distribution = distribution,
mask = mask,
detectfn = detectfn,
binomN = binomN,
movementmodel = movementmodel,
edgemethod = edgemethod,
usermodel = usermodel,
moveargsi = moveargsi,
start = start,
link = link,
fixed = fixed,
timecov = timecov,
sessioncov = sessioncov,
agecov = agecov,
dframe = dframe,
dframe0 = dframe0,
details = details,
method = method,
ncores = ncores,
design = design,
design0 = design0,
parindx = parindx,
intervals = intervals,
vars = vars,
betanames = betanames,
realnames = realnames,
sessionlabels = sessnames,
fit = this.fit,
beta.vcv = covar,
eigH = eigH,
version = desc$Version,
starttime = starttime,
proctime = proc.time()[3] - ptm[3]
)
if (secr) temp <- c(temp, list(mask=mask))
attr (temp, 'class') <- 'openCR'
###############################################
## if (!is.null(cluster)) stopCluster(cluster)
###############################################
memo(paste('Completed in ', round(temp$proctime,2), ' seconds at ',
format(Sys.time(), "%H:%M:%S %d %b %Y"),
sep=''), details$trace)
temp
}
################################################################################
|
#' Function mediador_inteiro_teor
#'
#' This function downloads the whole texts of the agreements according to "solitacao" number
#' extracted through mediador_meta function
#' @param solicitacao number that works as an id of the document.
#' @param download whether to download the document or not
#' @param vector wheter to create a vector of the documents as an R object.
#' @keywords mediador, labor agreements.
#' @import RCurl
#' @import XML
#' @import stringi
#' @return A file with the document and/or an R object with the text.
#' @examples
#' m<-mediador_inteiro_teor(solicitacao=c("MR087305/2016","MR035244/2009"))
#' @export
mediador_inteiro_teor<-function(solicitacao,download=TRUE,vector=TRUE){
url<-paste0("http://www3.mte.gov.br/sistemas/mediador/Resumo/ResumoVisualizar?NrSolicitacao=",solicitacao)
## Cria um objeto a parte para usá-lo a fim de nomear os arquivos a serem baixados
s<-as.character(solicitacao)
s<-stringr::str_replace(s,"/","_")
## Inicia o loop para baixar os textos.
inteiro.teor<-""
for(i in 1:length(s)){
s1<-getURL(url[i])
s1<-htmlParse(s1,encoding="UTF-8")
s1<-xpathApply(s1,"//*[@class='titulo' or @align='justify' or @class='textosubgrupo'or @class='tituloClausula' or @class='textonome' or @class='descricaoClausula']",xmlValue)
s1<-toString(unlist(s1))
s1<-stri_replace_all_regex(s1,"\\s+,","\n")
if(vector==TRUE) inteiro.teor[i]<-s1
if(download==TRUE) write(s1,paste0(s[i],".txt"))
}
return(inteiro.teor)
}
| /R/mediador_inteiro_teor.R | no_license | jjesusfilho/brLabor | R | false | false | 1,518 | r | #' Function mediador_inteiro_teor
#'
#' This function downloads the whole texts of the agreements according to "solitacao" number
#' extracted through mediador_meta function
#' @param solicitacao number that works as an id of the document.
#' @param download whether to download the document or not
#' @param vector wheter to create a vector of the documents as an R object.
#' @keywords mediador, labor agreements.
#' @import RCurl
#' @import XML
#' @import stringi
#' @return A file with the document and/or an R object with the text.
#' @examples
#' m<-mediador_inteiro_teor(solicitacao=c("MR087305/2016","MR035244/2009"))
#' @export
mediador_inteiro_teor<-function(solicitacao,download=TRUE,vector=TRUE){
url<-paste0("http://www3.mte.gov.br/sistemas/mediador/Resumo/ResumoVisualizar?NrSolicitacao=",solicitacao)
## Cria um objeto a parte para usá-lo a fim de nomear os arquivos a serem baixados
s<-as.character(solicitacao)
s<-stringr::str_replace(s,"/","_")
## Inicia o loop para baixar os textos.
inteiro.teor<-""
for(i in 1:length(s)){
s1<-getURL(url[i])
s1<-htmlParse(s1,encoding="UTF-8")
s1<-xpathApply(s1,"//*[@class='titulo' or @align='justify' or @class='textosubgrupo'or @class='tituloClausula' or @class='textonome' or @class='descricaoClausula']",xmlValue)
s1<-toString(unlist(s1))
s1<-stri_replace_all_regex(s1,"\\s+,","\n")
if(vector==TRUE) inteiro.teor[i]<-s1
if(download==TRUE) write(s1,paste0(s[i],".txt"))
}
return(inteiro.teor)
}
|
# Author: Robert J. Hijmans
# Date : March 2014
# Version 1.0
# Licence GPL v3
#if ( !isGeneric("focalFun") ) {
# setGeneric("focalFun", function(x, ...)
# standardGeneric("focalFun"))
#}
#setMethod('focalFun', signature(x='Raster'),
.focalFun <- function(x, fun, ngb=5, filename='', ...) {
out <- raster(x)
if (.doCluster()) {
cl <- getCluster()
on.exit( returnCluster() )
if (canProcessInMemory(x)) {
v <- getValuesFocal(x, 1, nrow(x), ngb=ngb, array=TRUE)
v <- parallel::parApply(cl, v, 1, fun)
out <- setValues(out, v)
if (filename != '') {
out <- writeRaster(out, filename, ...)
}
return(out)
} else {
tr <- blockSize(out)
pb <- pbCreate(tr$n, label='focalFun', ...)
out <- writeStart(out, filename=filename, ...)
for (i in 1:tr$n) {
v <- getValuesFocal(x, tr$row[i], tr$nrows[i], ngb=ngb, array=TRUE)
v <- parallel::parApply(cl, v, 1, fun)
out <- writeValues(out, v, tr$row[i])
}
}
return(writeStop(out))
} else {
if (canProcessInMemory(x)) {
v <- getValuesFocal(x, 1, nrow(x), ngb=ngb, array=TRUE)
v <- apply(v, 1, fun)
out <- setValues(out, v)
if (filename != '') {
out <- writeRaster(out, filename, ...)
}
return(out)
} else {
tr <- blockSize(out)
pb <- pbCreate(tr$n, label='focalFun', ...)
out <- writeStart(out, filename=filename, ...)
for (i in 1:tr$n) {
v <- getValuesFocal(x, tr$row[i], tr$nrows[i], ngb=ngb, array=TRUE)
v <- apply(v, 1, fun)
out <- writeValues(out, v, tr$row[i])
}
}
return(writeStop(out))
}
}
#)
| /R/focalFun.R | no_license | cran/raster | R | false | false | 1,652 | r | # Author: Robert J. Hijmans
# Date : March 2014
# Version 1.0
# Licence GPL v3
#if ( !isGeneric("focalFun") ) {
# setGeneric("focalFun", function(x, ...)
# standardGeneric("focalFun"))
#}
#setMethod('focalFun', signature(x='Raster'),
.focalFun <- function(x, fun, ngb=5, filename='', ...) {
out <- raster(x)
if (.doCluster()) {
cl <- getCluster()
on.exit( returnCluster() )
if (canProcessInMemory(x)) {
v <- getValuesFocal(x, 1, nrow(x), ngb=ngb, array=TRUE)
v <- parallel::parApply(cl, v, 1, fun)
out <- setValues(out, v)
if (filename != '') {
out <- writeRaster(out, filename, ...)
}
return(out)
} else {
tr <- blockSize(out)
pb <- pbCreate(tr$n, label='focalFun', ...)
out <- writeStart(out, filename=filename, ...)
for (i in 1:tr$n) {
v <- getValuesFocal(x, tr$row[i], tr$nrows[i], ngb=ngb, array=TRUE)
v <- parallel::parApply(cl, v, 1, fun)
out <- writeValues(out, v, tr$row[i])
}
}
return(writeStop(out))
} else {
if (canProcessInMemory(x)) {
v <- getValuesFocal(x, 1, nrow(x), ngb=ngb, array=TRUE)
v <- apply(v, 1, fun)
out <- setValues(out, v)
if (filename != '') {
out <- writeRaster(out, filename, ...)
}
return(out)
} else {
tr <- blockSize(out)
pb <- pbCreate(tr$n, label='focalFun', ...)
out <- writeStart(out, filename=filename, ...)
for (i in 1:tr$n) {
v <- getValuesFocal(x, tr$row[i], tr$nrows[i], ngb=ngb, array=TRUE)
v <- apply(v, 1, fun)
out <- writeValues(out, v, tr$row[i])
}
}
return(writeStop(out))
}
}
#)
|
# installing and loading the dplyr package
install.packages("dplyr")
library(dplyr)
# adding a column with mutate
library(dslabs)
data("murders")
# add the rate column
murders <- mutate(murders, rate = total / population * 100000, rank = rank(-rate))
# Create a table, call it my_states, that satisfies both the conditions
my_states <- filter (murders, region %in% c("Northeast","West") & rate < 1)
# Use select to show only the state name, the murder rate and the rank
select(my_states, state, rate, rank) | /Exercises/Basics-R/Filter-2 Conditions.R | permissive | IbisMalko/R-Scripts | R | false | false | 511 | r | # installing and loading the dplyr package
install.packages("dplyr")
library(dplyr)
# adding a column with mutate
library(dslabs)
data("murders")
# add the rate column
murders <- mutate(murders, rate = total / population * 100000, rank = rank(-rate))
# Create a table, call it my_states, that satisfies both the conditions
my_states <- filter (murders, region %in% c("Northeast","West") & rate < 1)
# Use select to show only the state name, the murder rate and the rank
select(my_states, state, rate, rank) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/df_21311NW006B.R
\docType{data}
\name{df_21311NW006B}
\alias{df_21311NW006B}
\title{21311NW006B - Studierende / Studenten, Land, Hochschulen, Studienfach, Hochschulsemester, Nationalität, Semester}
\format{
A tibble with 633.244 rows and 7 variables: \describe{ \item{bil002}{Studierende / Studenten} \item{dland}{Land} \item{bilhs1}{Hochschulen} \item{bilsf1}{Studienfach} \item{bilsh1}{Hochschulsemester} \item{nat}{Nationalität} \item{semest}{Semester} }
}
\source{
The data in this package was obtained from the \href{https://www.landesdatenbank.nrw.de}{Landesdatenbank NRW} (retrieved on 2021-07-12) using the code '21311NW006B'. The licence for data obtained from the Landesdatenbank NRW / Destatis is available here (see paragraph 2 section 2): \href{http://www.govdata.de/dl-de/by-2-0}{Data licence Germany – attribution – Version 2.0}. Paragraph 2 section 3 requires including a reference to the dataset (URI) if available. To the best of my knowledge, the Landesdatenbank NRW does not yet provide Uniform Resource Identifiers (URI) to identify their tables.
Paragraph 3 stipulates that 'changes, editing, new designs or other amendments must be marked as such in the source note'. Compared to the raw data, the following changes were made: (1) column names were changed to lower case, (2) some technical columns were excluded and (3) value labels were turned into factors using the description of each value label. The code for all these transformations is available through this package.
}
\usage{
df_21311NW006B
}
\description{
The table is sourced from the \href{https://www.landesdatenbank.nrw.de}{Landesdatenbank NRW} from the series 2 Bildung, Sozialleistungen, Gesundheit, Rechtspflege (\emph{Education, Social Benefits, Health, Administration of Justice}) -> 21 Bildung und Kultur (\emph{Education and Culture}) -> 213 Hochschulen (\emph{Universities}) -> 21311 Statistik der Studenten (\emph{Student Statistics}).
}
\details{
The following summary is automatically generated using \code{\link[skimr:skim]{skimr::skim()}}
Table: Data summary\tabular{ll}{
\tab \cr
Name \tab df_21311NW006B \cr
Number of rows \tab 633244 \cr
Number of columns \tab 7 \cr
_______________________ \tab \cr
Column type frequency: \tab \cr
character \tab 6 \cr
numeric \tab 1 \cr
________________________ \tab \cr
Group variables \tab None \cr
}
\strong{Variable type: character}\tabular{lrrrrrrr}{
skim_variable \tab n_missing \tab complete_rate \tab min \tab max \tab empty \tab n_unique \tab whitespace \cr
DLAND \tab 0 \tab 1 \tab 2 \tab 2 \tab 0 \tab 1 \tab 0 \cr
BILHS1 \tab 0 \tab 1 \tab 6 \tab 6 \tab 0 \tab 174 \tab 0 \cr
BILSF1 \tab 0 \tab 1 \tab 5 \tab 5 \tab 0 \tab 234 \tab 0 \cr
BILSH1 \tab 0 \tab 1 \tab 7 \tab 7 \tab 0 \tab 20 \tab 0 \cr
NAT \tab 0 \tab 1 \tab 4 \tab 4 \tab 0 \tab 2 \tab 0 \cr
SEMEST \tab 0 \tab 1 \tab 10 \tab 10 \tab 0 \tab 22 \tab 0 \cr
}
\strong{Variable type: numeric}\tabular{lrrrrrrrrrl}{
skim_variable \tab n_missing \tab complete_rate \tab mean \tab sd \tab p0 \tab p25 \tab p50 \tab p75 \tab p100 \tab hist \cr
BIL002 \tab 0 \tab 1 \tab 20.6 \tab 62.75 \tab 1 \tab 2 \tab 5 \tab 16 \tab 6663 \tab ▇▁▁▁▁ \cr
}
}
\examples{
df_21311NW006B
}
\keyword{datasets}
| /man/df_21311NW006B.Rd | permissive | RichardMeyer-Eppler/studentenstatistikNRW | R | false | true | 3,360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/df_21311NW006B.R
\docType{data}
\name{df_21311NW006B}
\alias{df_21311NW006B}
\title{21311NW006B - Studierende / Studenten, Land, Hochschulen, Studienfach, Hochschulsemester, Nationalität, Semester}
\format{
A tibble with 633.244 rows and 7 variables: \describe{ \item{bil002}{Studierende / Studenten} \item{dland}{Land} \item{bilhs1}{Hochschulen} \item{bilsf1}{Studienfach} \item{bilsh1}{Hochschulsemester} \item{nat}{Nationalität} \item{semest}{Semester} }
}
\source{
The data in this package was obtained from the \href{https://www.landesdatenbank.nrw.de}{Landesdatenbank NRW} (retrieved on 2021-07-12) using the code '21311NW006B'. The licence for data obtained from the Landesdatenbank NRW / Destatis is available here (see paragraph 2 section 2): \href{http://www.govdata.de/dl-de/by-2-0}{Data licence Germany – attribution – Version 2.0}. Paragraph 2 section 3 requires including a reference to the dataset (URI) if available. To the best of my knowledge, the Landesdatenbank NRW does not yet provide Uniform Resource Identifiers (URI) to identify their tables.
Paragraph 3 stipulates that 'changes, editing, new designs or other amendments must be marked as such in the source note'. Compared to the raw data, the following changes were made: (1) column names were changed to lower case, (2) some technical columns were excluded and (3) value labels were turned into factors using the description of each value label. The code for all these transformations is available through this package.
}
\usage{
df_21311NW006B
}
\description{
The table is sourced from the \href{https://www.landesdatenbank.nrw.de}{Landesdatenbank NRW} from the series 2 Bildung, Sozialleistungen, Gesundheit, Rechtspflege (\emph{Education, Social Benefits, Health, Administration of Justice}) -> 21 Bildung und Kultur (\emph{Education and Culture}) -> 213 Hochschulen (\emph{Universities}) -> 21311 Statistik der Studenten (\emph{Student Statistics}).
}
\details{
The following summary is automatically generated using \code{\link[skimr:skim]{skimr::skim()}}
Table: Data summary\tabular{ll}{
\tab \cr
Name \tab df_21311NW006B \cr
Number of rows \tab 633244 \cr
Number of columns \tab 7 \cr
_______________________ \tab \cr
Column type frequency: \tab \cr
character \tab 6 \cr
numeric \tab 1 \cr
________________________ \tab \cr
Group variables \tab None \cr
}
\strong{Variable type: character}\tabular{lrrrrrrr}{
skim_variable \tab n_missing \tab complete_rate \tab min \tab max \tab empty \tab n_unique \tab whitespace \cr
DLAND \tab 0 \tab 1 \tab 2 \tab 2 \tab 0 \tab 1 \tab 0 \cr
BILHS1 \tab 0 \tab 1 \tab 6 \tab 6 \tab 0 \tab 174 \tab 0 \cr
BILSF1 \tab 0 \tab 1 \tab 5 \tab 5 \tab 0 \tab 234 \tab 0 \cr
BILSH1 \tab 0 \tab 1 \tab 7 \tab 7 \tab 0 \tab 20 \tab 0 \cr
NAT \tab 0 \tab 1 \tab 4 \tab 4 \tab 0 \tab 2 \tab 0 \cr
SEMEST \tab 0 \tab 1 \tab 10 \tab 10 \tab 0 \tab 22 \tab 0 \cr
}
\strong{Variable type: numeric}\tabular{lrrrrrrrrrl}{
skim_variable \tab n_missing \tab complete_rate \tab mean \tab sd \tab p0 \tab p25 \tab p50 \tab p75 \tab p100 \tab hist \cr
BIL002 \tab 0 \tab 1 \tab 20.6 \tab 62.75 \tab 1 \tab 2 \tab 5 \tab 16 \tab 6663 \tab ▇▁▁▁▁ \cr
}
}
\examples{
df_21311NW006B
}
\keyword{datasets}
|
#===============================================================================================
#vaccine impact (proportion of preventable IPD cases) e.g., cases that could have been prevented due to vaccination
impact_per_case <- ipd_mc %>%
mutate(cases = map(.x = mc, .f = ~group_by(.x, sim) %>%
# make it per vaccinee
crossing(Vac.age = seq(55, 85, by = 5)) %>%
filter(agey >= Vac.age) %>%
# end per vaccinee
group_by(Vac.age, sim) %>%
summarise(cases = sum(fit)))) %>%
select(-data, -model, -mc) %>%
unnest(cases) %>%
inner_join(VE_impact_by_age) %>%
mutate(rel_impact = Impact/cases) %>%
group_by_at(.vars = vars(-c(sim, cases, Impact, rel_impact))) %>%
nest %>%
mutate(Q = map(.x = data, ~quantile(.x$rel_impact, probs = c(0.025, 0.5, 0.975)))) %>%
unnest_wider(Q)
# plot_impact_per_case <-
VE_C1 <- make_grid_plot(x = impact_per_case, percent = TRUE, ylab = "Vaccine impact (proportion of preventable IPD cases)") +
geom_point(data = q, aes(x = Vac.age, y = Impactmax), shape = 4, stroke = 1, size = 1) +
theme(legend.position = "bottom") +
scale_color_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1") +
scale_fill_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1")
ggsave(filename = "output/S8_Fig_vaccine_impact_per_vaccinee.png",
plot = VE_C1,
width = 14, height = 8, units = "in", dpi = 300)
#===============================================================================================
# vaccine impact per 100,000 total population (55+y pop)
impact_per_vaccinee <-
# get the population over 55, 60, etc. as potential vaccinees
pop_country_df %>%
crossing(Vac.age = seq(55, 85, by = 5)) %>%
filter(agey >= Vac.age) %>%
group_by(country, Vac.age) %>%
summarise(pop = sum(ntotal)) %>%
# merge with Impact data (averted cases, absolute)
inner_join(VE_impact_by_age) %>%
# relative impact is per 100,000 total population 55+y
mutate(rel_impact = Impact/pop*scale) %>%
group_by_at(.vars = vars(-c(sim, pop, Impact, rel_impact))) %>%
nest %>%
mutate(Q = map(.x = data, ~quantile(.x$rel_impact, probs = c(0.025, 0.5, 0.975)))) %>%
unnest_wider(Q)
# plot_impact_per_vaccinee <-
VE_C2 <- make_grid_plot(x = impact_per_vaccinee, ylab = "Vaccine impact (Cases averted per 100,000 population)") +
geom_point(data = q, aes(x = Vac.age, y = Impactmax), shape = 4, stroke = 1, size = 1) +
theme(legend.position = "bottom") +
scale_color_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1") +
scale_fill_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1")
ggsave(filename = "output/S9_Fig_vaccine_impact_per_100k_pop.png",
plot = VE_C2,
width = 14, height = 8, units = "in", dpi = 300) | /archived_code/6_impact_scenario3.R | permissive | deusthindwa/optimal.age.targeting.pneumo.vaccines | R | false | false | 2,956 | r | #===============================================================================================
#vaccine impact (proportion of preventable IPD cases) e.g., cases that could have been prevented due to vaccination
impact_per_case <- ipd_mc %>%
mutate(cases = map(.x = mc, .f = ~group_by(.x, sim) %>%
# make it per vaccinee
crossing(Vac.age = seq(55, 85, by = 5)) %>%
filter(agey >= Vac.age) %>%
# end per vaccinee
group_by(Vac.age, sim) %>%
summarise(cases = sum(fit)))) %>%
select(-data, -model, -mc) %>%
unnest(cases) %>%
inner_join(VE_impact_by_age) %>%
mutate(rel_impact = Impact/cases) %>%
group_by_at(.vars = vars(-c(sim, cases, Impact, rel_impact))) %>%
nest %>%
mutate(Q = map(.x = data, ~quantile(.x$rel_impact, probs = c(0.025, 0.5, 0.975)))) %>%
unnest_wider(Q)
# plot_impact_per_case <-
VE_C1 <- make_grid_plot(x = impact_per_case, percent = TRUE, ylab = "Vaccine impact (proportion of preventable IPD cases)") +
geom_point(data = q, aes(x = Vac.age, y = Impactmax), shape = 4, stroke = 1, size = 1) +
theme(legend.position = "bottom") +
scale_color_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1") +
scale_fill_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1")
ggsave(filename = "output/S8_Fig_vaccine_impact_per_vaccinee.png",
plot = VE_C1,
width = 14, height = 8, units = "in", dpi = 300)
#===============================================================================================
# vaccine impact per 100,000 total population (55+y pop)
impact_per_vaccinee <-
# get the population over 55, 60, etc. as potential vaccinees
pop_country_df %>%
crossing(Vac.age = seq(55, 85, by = 5)) %>%
filter(agey >= Vac.age) %>%
group_by(country, Vac.age) %>%
summarise(pop = sum(ntotal)) %>%
# merge with Impact data (averted cases, absolute)
inner_join(VE_impact_by_age) %>%
# relative impact is per 100,000 total population 55+y
mutate(rel_impact = Impact/pop*scale) %>%
group_by_at(.vars = vars(-c(sim, pop, Impact, rel_impact))) %>%
nest %>%
mutate(Q = map(.x = data, ~quantile(.x$rel_impact, probs = c(0.025, 0.5, 0.975)))) %>%
unnest_wider(Q)
# plot_impact_per_vaccinee <-
VE_C2 <- make_grid_plot(x = impact_per_vaccinee, ylab = "Vaccine impact (Cases averted per 100,000 population)") +
geom_point(data = q, aes(x = Vac.age, y = Impactmax), shape = 4, stroke = 1, size = 1) +
theme(legend.position = "bottom") +
scale_color_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1") +
scale_fill_brewer(name = "Age dependent vaccine efficacy/effectiveness", palette = "Set1")
ggsave(filename = "output/S9_Fig_vaccine_impact_per_100k_pop.png",
plot = VE_C2,
width = 14, height = 8, units = "in", dpi = 300) |
outcome <- read.csv("data/outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
## You may get a warning about NAs being introduced; that is okay
hist(outcome[, 11]) | /Learn-R/Week4/part1.R | no_license | sswess/DataScience | R | false | false | 261 | r | outcome <- read.csv("data/outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
## You may get a warning about NAs being introduced; that is okay
hist(outcome[, 11]) |
\name{mcsv_r}
\alias{mcsv_r}
\alias{mcsv_w}
\title{Read/Write Multiple csv Files at a Time}
\usage{
mcsv_r(files, a.names = NULL, l.name = NULL, list = TRUE,
pos = 1, envir = as.environment(pos))
mcsv_w(..., dir = NULL, open = FALSE, sep = ", ",
dataframes = NULL, pos = 1,
envir = as.environment(pos))
}
\arguments{
\item{files}{csv file(s) to read.}
\item{a.names}{object names to assign the csv file(s) to.
If \code{NULL} assigns the csv to the name(s) of the csv
file(s) in the global environment.}
\item{l.name}{A character vector of names to assign to
the csv files (dataframes) being read in. Default
(\code{NULL}) uses the names of the files in the
directory without the file extension.}
\item{list}{A character vector of length one to name the
list being read in. Default is \code{"L1"}.}
\item{pos}{where to do the removal. By default, uses the
current environment.}
\item{envir}{the environment to use.}
\item{\dots}{data.frame object(s) to write to a file or a
list of data.frame objects. If the objects in a list are
unnamed V + digit will be assigned. Lists of dataframes
(e.g., the output from \code{\link[qdap]{termco}} or
\code{\link[qdap]{polarity}}) can be passed as well.}
\item{dir}{optional directory names. If \code{NULL} a
directory will be created in the working directory with
the data and time stamp as the folder name.}
\item{open}{logical. If \code{TRUE} opens the directory
upon completion.}
\item{sep}{A character string to separate the terms.}
\item{dataframes}{An optional character vector of
dataframes in lieu of \dots argument.}
}
\value{
\code{mcsv_r} - reads in multiple csv files at once.
\code{mcsv_w} - creates a directory with multiple csv
files. Silently returns the path of the directory.
}
\description{
\code{mcsv_w} - Read and assign multiple csv files at the
same time.
\code{mcsv_w} - Write multiple csv files into a file at
the same time.
}
\details{
mcsv is short for "multiple csv" and the suffix c(_r, _w)
stands for "read" (r) or "write" (w).
}
\note{
\code{\link[qdap]{mcsv_r}} is useful for reading in
multiple csv files from \code{\link[qdap]{cm_df.temp}}
for interaction with \code{\link[qdap]{cm_range2long}}.
}
\examples{
\dontrun{
## mcsv_r EXAMPLE:
mtcarsb <- mtcars[1:5, ]; CO2b <- CO2[1:5, ]
(a <- mcsv_w(mtcarsb, CO2b, dir="foo"))
rm("mtcarsb", "CO2b") # gone from .GlobalEnv
(nms <- dir(a))
mcsv_r(file.path(a, nms))
mtcarsb; CO2b
rm("mtcarsb", "CO2b") # gone from .GlobalEnv
mcsv_r(file.path(a, nms), paste0("foo.dat", 1:2))
foo.dat1; foo.dat2
rm("foo.dat1", "foo.dat2") # gone from .GlobalEnv
delete("foo")
## mcsv_w EXAMPLES:
(a <- mcsv_w(mtcars, CO2, dir="foo"))
delete("foo")
## Write lists of dataframes as well
poldat <- with(DATA, polarity(state, person))
term <- c("the ", "she", " wh")
termdat <- with(raj.act.1, termco(dialogue, person, term))
mcsv_w(poldat, termdat, mtcars, CO2, dir="foo2")
delete("foo2")
}
}
\seealso{
\code{\link[qdap]{cm_range2long}},
\code{\link[qdap]{cm_df.temp}},
\code{\link[qdap]{condense}}, \code{\link[base]{assign}}
}
| /man/multicsv.Rd | no_license | abresler/qdap | R | false | false | 3,147 | rd | \name{mcsv_r}
\alias{mcsv_r}
\alias{mcsv_w}
\title{Read/Write Multiple csv Files at a Time}
\usage{
mcsv_r(files, a.names = NULL, l.name = NULL, list = TRUE,
pos = 1, envir = as.environment(pos))
mcsv_w(..., dir = NULL, open = FALSE, sep = ", ",
dataframes = NULL, pos = 1,
envir = as.environment(pos))
}
\arguments{
\item{files}{csv file(s) to read.}
\item{a.names}{object names to assign the csv file(s) to.
If \code{NULL} assigns the csv to the name(s) of the csv
file(s) in the global environment.}
\item{l.name}{A character vector of names to assign to
the csv files (dataframes) being read in. Default
(\code{NULL}) uses the names of the files in the
directory without the file extension.}
\item{list}{A character vector of length one to name the
list being read in. Default is \code{"L1"}.}
\item{pos}{where to do the removal. By default, uses the
current environment.}
\item{envir}{the environment to use.}
\item{\dots}{data.frame object(s) to write to a file or a
list of data.frame objects. If the objects in a list are
unnamed V + digit will be assigned. Lists of dataframes
(e.g., the output from \code{\link[qdap]{termco}} or
\code{\link[qdap]{polarity}}) can be passed as well.}
\item{dir}{optional directory names. If \code{NULL} a
directory will be created in the working directory with
the data and time stamp as the folder name.}
\item{open}{logical. If \code{TRUE} opens the directory
upon completion.}
\item{sep}{A character string to separate the terms.}
\item{dataframes}{An optional character vector of
dataframes in lieu of \dots argument.}
}
\value{
\code{mcsv_r} - reads in multiple csv files at once.
\code{mcsv_w} - creates a directory with multiple csv
files. Silently returns the path of the directory.
}
\description{
\code{mcsv_w} - Read and assign multiple csv files at the
same time.
\code{mcsv_w} - Write multiple csv files into a file at
the same time.
}
\details{
mcsv is short for "multiple csv" and the suffix c(_r, _w)
stands for "read" (r) or "write" (w).
}
\note{
\code{\link[qdap]{mcsv_r}} is useful for reading in
multiple csv files from \code{\link[qdap]{cm_df.temp}}
for interaction with \code{\link[qdap]{cm_range2long}}.
}
\examples{
\dontrun{
## mcsv_r EXAMPLE:
mtcarsb <- mtcars[1:5, ]; CO2b <- CO2[1:5, ]
(a <- mcsv_w(mtcarsb, CO2b, dir="foo"))
rm("mtcarsb", "CO2b") # gone from .GlobalEnv
(nms <- dir(a))
mcsv_r(file.path(a, nms))
mtcarsb; CO2b
rm("mtcarsb", "CO2b") # gone from .GlobalEnv
mcsv_r(file.path(a, nms), paste0("foo.dat", 1:2))
foo.dat1; foo.dat2
rm("foo.dat1", "foo.dat2") # gone from .GlobalEnv
delete("foo")
## mcsv_w EXAMPLES:
(a <- mcsv_w(mtcars, CO2, dir="foo"))
delete("foo")
## Write lists of dataframes as well
poldat <- with(DATA, polarity(state, person))
term <- c("the ", "she", " wh")
termdat <- with(raj.act.1, termco(dialogue, person, term))
mcsv_w(poldat, termdat, mtcars, CO2, dir="foo2")
delete("foo2")
}
}
\seealso{
\code{\link[qdap]{cm_range2long}},
\code{\link[qdap]{cm_df.temp}},
\code{\link[qdap]{condense}}, \code{\link[base]{assign}}
}
|
# R Tutorial - #6 - Model comparison (Hierarchical linear regression)
# By: Aaron Prosser, MD MSc | Last edited: 7-Dec-2020
# The following code is open-source. Feel free to edit/share at your pleasure.
# All the data sets and R code for this tutorial series can be downloaded here:
https://github.com/Statistics4Doctors
# Tutorial Outline:
# Part 1 - Model comparison (Hierarchical linear regression)
# Install these packages, if you haven't already:
install.packages("openxlsx")
# Load the packages we're using in this tutorial:
library(openxlsx)
# Disable scientific notation:
options(scipen=999, digits=4)
# Set directory:
dir <- "C:/Users/Aaron/iCloudDrive/Projects/Statistics for Doctors/R Tutorial/"
# Import data and create a data frame:
file <- "ChildAggression_Excel.xlsx"
path <- paste(dir,file,sep="")
import <- read.xlsx(path, sheet = 1, startRow = 1)
dat <- as.data.frame(import)
# -------- #
# Part 1 # Model comparison (Hierarchical linear regression)
# -------- #
m0 <- lm(Aggression ~ 1, data=dat)
m1 <- lm(Aggression ~ Parenting_Style, data=dat)
m2 <- lm(Aggression ~ Parenting_Style + Video_Games, data=dat)
m3 <- lm(Aggression ~ Parenting_Style + Video_Games + Sibling_Aggression, data=dat)
# Get the R-squared of the reduced vs. full model
reduced <- m0
full <- m1
summary(reduced)
summary(full)
# Compare the full model to the reduced model:
anova(reduced, full) | /R Tutorial - #6 - Model comparison (Hierarchical linear regression).R | no_license | Sergiommrr/R-Tutorial-All-Files | R | false | false | 1,446 | r | # R Tutorial - #6 - Model comparison (Hierarchical linear regression)
# By: Aaron Prosser, MD MSc | Last edited: 7-Dec-2020
# The following code is open-source. Feel free to edit/share at your pleasure.
# All the data sets and R code for this tutorial series can be downloaded here:
https://github.com/Statistics4Doctors
# Tutorial Outline:
# Part 1 - Model comparison (Hierarchical linear regression)
# Install these packages, if you haven't already:
install.packages("openxlsx")
# Load the packages we're using in this tutorial:
library(openxlsx)
# Disable scientific notation:
options(scipen=999, digits=4)
# Set directory:
dir <- "C:/Users/Aaron/iCloudDrive/Projects/Statistics for Doctors/R Tutorial/"
# Import data and create a data frame:
file <- "ChildAggression_Excel.xlsx"
path <- paste(dir,file,sep="")
import <- read.xlsx(path, sheet = 1, startRow = 1)
dat <- as.data.frame(import)
# -------- #
# Part 1 # Model comparison (Hierarchical linear regression)
# -------- #
m0 <- lm(Aggression ~ 1, data=dat)
m1 <- lm(Aggression ~ Parenting_Style, data=dat)
m2 <- lm(Aggression ~ Parenting_Style + Video_Games, data=dat)
m3 <- lm(Aggression ~ Parenting_Style + Video_Games + Sibling_Aggression, data=dat)
# Get the R-squared of the reduced vs. full model
reduced <- m0
full <- m1
summary(reduced)
summary(full)
# Compare the full model to the reduced model:
anova(reduced, full) |
# Col union
# Form the union of columns in a and b. If there are columns of the same name in both a and b, take the column from a.
#
# @param data frame a
# @param data frame b
# @keyword internal
cunion <- function(a, b) {
if (length(a) == 0) return(b)
if (length(b) == 0) return(a)
cbind(a, b[setdiff(names(b), names(a))])
}
# Interleave (or zip) multiple units into one vector
interleave <- function(...) UseMethod("interleave")
#' @export
interleave.unit <- function(...) {
units <- lapply(list(...), as.list)
interleaved_list <- interleave.default(!!!units)
inject(unit.c(!!!interleaved_list))
}
#' @export
interleave.default <- function(...) {
vec_interleave(...)
}
| /R/utilities-matrix.R | permissive | tidyverse/ggplot2 | R | false | false | 689 | r | # Col union
# Form the union of columns in a and b. If there are columns of the same name in both a and b, take the column from a.
#
# @param data frame a
# @param data frame b
# @keyword internal
cunion <- function(a, b) {
if (length(a) == 0) return(b)
if (length(b) == 0) return(a)
cbind(a, b[setdiff(names(b), names(a))])
}
# Interleave (or zip) multiple units into one vector
interleave <- function(...) UseMethod("interleave")
#' @export
interleave.unit <- function(...) {
units <- lapply(list(...), as.list)
interleaved_list <- interleave.default(!!!units)
inject(unit.c(!!!interleaved_list))
}
#' @export
interleave.default <- function(...) {
vec_interleave(...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_modify_event_subscription}
\alias{redshift_modify_event_subscription}
\title{Modifies an existing Amazon Redshift event notification subscription}
\usage{
redshift_modify_event_subscription(SubscriptionName, SnsTopicArn,
SourceType, SourceIds, EventCategories, Severity, Enabled)
}
\arguments{
\item{SubscriptionName}{[required] The name of the modified Amazon Redshift event notification
subscription.}
\item{SnsTopicArn}{The Amazon Resource Name (ARN) of the SNS topic to be used by the event
notification subscription.}
\item{SourceType}{The type of source that will be generating the events. For example, if
you want to be notified of events generated by a cluster, you would set
this parameter to cluster. If this value is not specified, events are
returned for all Amazon Redshift objects in your AWS account. You must
specify a source type in order to specify source IDs.
Valid values: cluster, cluster-parameter-group, cluster-security-group,
cluster-snapshot, and scheduled-action.}
\item{SourceIds}{A list of one or more identifiers of Amazon Redshift source objects. All
of the objects must be of the same type as was specified in the source
type parameter. The event subscription will return only events generated
by the specified objects. If not specified, then events are returned for
all objects within the source type specified.
Example: my-cluster-1, my-cluster-2
Example: my-snapshot-20131010}
\item{EventCategories}{Specifies the Amazon Redshift event categories to be published by the
event notification subscription.
Values: configuration, management, monitoring, security}
\item{Severity}{Specifies the Amazon Redshift event severity to be published by the
event notification subscription.
Values: ERROR, INFO}
\item{Enabled}{A Boolean value indicating if the subscription is enabled. \code{true}
indicates the subscription is enabled}
}
\value{
A list with the following syntax:\preformatted{list(
EventSubscription = list(
CustomerAwsId = "string",
CustSubscriptionId = "string",
SnsTopicArn = "string",
Status = "string",
SubscriptionCreationTime = as.POSIXct(
"2015-01-01"
),
SourceType = "string",
SourceIdsList = list(
"string"
),
EventCategoriesList = list(
"string"
),
Severity = "string",
Enabled = TRUE|FALSE,
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
)
}
}
\description{
Modifies an existing Amazon Redshift event notification subscription.
}
\section{Request syntax}{
\preformatted{svc$modify_event_subscription(
SubscriptionName = "string",
SnsTopicArn = "string",
SourceType = "string",
SourceIds = list(
"string"
),
EventCategories = list(
"string"
),
Severity = "string",
Enabled = TRUE|FALSE
)
}
}
\keyword{internal}
| /cran/paws.database/man/redshift_modify_event_subscription.Rd | permissive | TWarczak/paws | R | false | true | 2,937 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_modify_event_subscription}
\alias{redshift_modify_event_subscription}
\title{Modifies an existing Amazon Redshift event notification subscription}
\usage{
redshift_modify_event_subscription(SubscriptionName, SnsTopicArn,
SourceType, SourceIds, EventCategories, Severity, Enabled)
}
\arguments{
\item{SubscriptionName}{[required] The name of the modified Amazon Redshift event notification
subscription.}
\item{SnsTopicArn}{The Amazon Resource Name (ARN) of the SNS topic to be used by the event
notification subscription.}
\item{SourceType}{The type of source that will be generating the events. For example, if
you want to be notified of events generated by a cluster, you would set
this parameter to cluster. If this value is not specified, events are
returned for all Amazon Redshift objects in your AWS account. You must
specify a source type in order to specify source IDs.
Valid values: cluster, cluster-parameter-group, cluster-security-group,
cluster-snapshot, and scheduled-action.}
\item{SourceIds}{A list of one or more identifiers of Amazon Redshift source objects. All
of the objects must be of the same type as was specified in the source
type parameter. The event subscription will return only events generated
by the specified objects. If not specified, then events are returned for
all objects within the source type specified.
Example: my-cluster-1, my-cluster-2
Example: my-snapshot-20131010}
\item{EventCategories}{Specifies the Amazon Redshift event categories to be published by the
event notification subscription.
Values: configuration, management, monitoring, security}
\item{Severity}{Specifies the Amazon Redshift event severity to be published by the
event notification subscription.
Values: ERROR, INFO}
\item{Enabled}{A Boolean value indicating if the subscription is enabled. \code{true}
indicates the subscription is enabled}
}
\value{
A list with the following syntax:\preformatted{list(
EventSubscription = list(
CustomerAwsId = "string",
CustSubscriptionId = "string",
SnsTopicArn = "string",
Status = "string",
SubscriptionCreationTime = as.POSIXct(
"2015-01-01"
),
SourceType = "string",
SourceIdsList = list(
"string"
),
EventCategoriesList = list(
"string"
),
Severity = "string",
Enabled = TRUE|FALSE,
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
)
}
}
\description{
Modifies an existing Amazon Redshift event notification subscription.
}
\section{Request syntax}{
\preformatted{svc$modify_event_subscription(
SubscriptionName = "string",
SnsTopicArn = "string",
SourceType = "string",
SourceIds = list(
"string"
),
EventCategories = list(
"string"
),
Severity = "string",
Enabled = TRUE|FALSE
)
}
}
\keyword{internal}
|
suppressPackageStartupMessages(library(stars))
set.seed(13521) # runif
tif = system.file("tif/L7_ETMs.tif", package = "stars")
(x_ = read_stars(c(tif,tif))) # FIXME: not what you'd expect
(x = read_stars(tif))
image(x)
gdal_crs(tif)
plot(x)
plot(x, join_zlim = FALSE)
x %>% st_set_dimensions(names = c('a', 'b', 'c'))
st_get_dimension_values(x, 3)
(x1 = st_set_dimensions(x, "band", values = c(1,2,3,4,5,7), names = "band_number", point = TRUE))
rbind(c(0.45,0.515), c(0.525,0.605), c(0.63,0.69), c(0.775,0.90), c(1.55,1.75), c(2.08,2.35)) %>%
units::set_units(um) -> bw # units::set_units(µm) -> bw
# set bandwidth midpoint:
(x2 = st_set_dimensions(x, "band", values = 0.5 * (bw[,1]+bw[,2]),
names = "bandwidth_midpoint", point = TRUE))
# set bandwidth intervals:
(x3 = st_set_dimensions(x, "band", values = make_intervals(bw), names = "bandwidth"))
x + x
x * x
x[,,,1:3]
x[,1:100,100:200,]
sqrt(x)
st_apply(x, 3, min)
st_apply(x, 1:2, max)
st_apply(x, 1:2, range)
geomatrix = system.file("tif/geomatrix.tif", package = "stars")
x = read_stars(geomatrix)
y = st_transform(x, st_crs(4326))
st_coordinates(x)[1:10,]
nc = system.file("nc/tos_O1_2001-2002.nc", package = "stars")
(x = read_stars(nc))
st_as_stars(st_bbox(x))
st_as_stars(st_bbox(x), deltax = 20, deltay = 20)
df = as.data.frame(x)
units::drop_units(x)
dimnames(x)
dimnames(x) <- letters[1:3]
dimnames(x)
st_as_stars()
# multiple sub-datasets:
nc_red = system.file("nc/reduced.nc", package = "stars")
red = read_stars(nc_red)
red
plot(red)
x = st_xy2sfc(read_stars(tif)[,1:10,1:10,], as_points = FALSE)
st_bbox(x)
x = read_stars(tif)
merge(split(x, "band"))
read_stars(c(tif,tif)) # merges as attributes
read_stars(c(tif,tif), along = "sensor")
read_stars(c(tif,tif), along = 4)
read_stars(c(tif,tif), along = "band")
read_stars(c(tif,tif), along = 3)
# cut:
tif = system.file("tif/L7_ETMs.tif", package = "stars")
x = read_stars(tif)
cut(x, c(0, 50, 100, 255))
cut(x[,,,1,drop=TRUE], c(0, 50, 100, 255))
plot(cut(x[,,,1,drop=TRUE], c(0, 50, 100, 255)))
st_bbox(st_dimensions(x))
x[x < 0] = NA
x[is.na(x)] = 0
# c:
f = system.file("netcdf/avhrr-only-v2.19810902.nc", package = "starsdata")
if (f != "") {
files = c("avhrr-only-v2.19810901.nc",
"avhrr-only-v2.19810902.nc",
"avhrr-only-v2.19810903.nc",
"avhrr-only-v2.19810904.nc",
"avhrr-only-v2.19810905.nc",
"avhrr-only-v2.19810906.nc",
"avhrr-only-v2.19810907.nc",
"avhrr-only-v2.19810908.nc",
"avhrr-only-v2.19810909.nc")
l = list()
for (f in files) {
from = system.file(paste0("netcdf/", f), package = "starsdata")
l[[f]] = read_stars(from, sub = c("sst", "anom"))
}
ret = do.call(c, l)
print(ret)
ret = adrop(c(l[[1]], l[[2]], l[[3]], along = list(times = as.Date("1981-09-01") + 0:2)))
print(ret)
ret = adrop(adrop(c(l[[1]], l[[2]], l[[3]], along = "times")))
print(ret)
}
st_dimensions(list(matrix(1, 4, 4))) # st_dimensions.default
if (FALSE && require("starsdata")) {
# curvilinear:
s5p = system.file(
"sentinel5p/S5P_NRTI_L2__NO2____20180717T120113_20180717T120613_03932_01_010002_20180717T125231.nc",
package = "starsdata")
print(s5p)
lat_ds = paste0("HDF5:\"", s5p, "\"://PRODUCT/latitude")
lon_ds = paste0("HDF5:\"", s5p, "\"://PRODUCT/longitude")
nit_ds = paste0("HDF5:\"", s5p, "\"://PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/nitrogendioxide_summed_total_column")
lat = read_stars(lat_ds)
lon = read_stars(lon_ds)
nit = read_stars(nit_ds)
nit[[1]][nit[[1]] > 9e+36] = NA
ll = setNames(c(lon, lat), c("x", "y"))
nit.c = st_as_stars(nit, curvilinear = ll)
print(nit.c)
s5p = system.file(
"sentinel5p/S5P_NRTI_L2__NO2____20180717T120113_20180717T120613_03932_01_010002_20180717T125231.nc",
package = "starsdata")
nit.c2 = read_stars(s5p,
sub = "//PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/nitrogendioxide_summed_total_column",
curvilinear = c("//PRODUCT/latitude", "//PRODUCT/longitude"))
print(all.equal(nit.c, nit.c2))
}
# predict:
(x = read_stars(tif))
model = lm(x~L7_ETMs.tif, head(as.data.frame(x), 50))
predict(x, model)
| /tests/stars.R | no_license | CBUFLM/stars | R | false | false | 4,073 | r | suppressPackageStartupMessages(library(stars))
set.seed(13521) # runif
tif = system.file("tif/L7_ETMs.tif", package = "stars")
(x_ = read_stars(c(tif,tif))) # FIXME: not what you'd expect
(x = read_stars(tif))
image(x)
gdal_crs(tif)
plot(x)
plot(x, join_zlim = FALSE)
x %>% st_set_dimensions(names = c('a', 'b', 'c'))
st_get_dimension_values(x, 3)
(x1 = st_set_dimensions(x, "band", values = c(1,2,3,4,5,7), names = "band_number", point = TRUE))
rbind(c(0.45,0.515), c(0.525,0.605), c(0.63,0.69), c(0.775,0.90), c(1.55,1.75), c(2.08,2.35)) %>%
units::set_units(um) -> bw # units::set_units(µm) -> bw
# set bandwidth midpoint:
(x2 = st_set_dimensions(x, "band", values = 0.5 * (bw[,1]+bw[,2]),
names = "bandwidth_midpoint", point = TRUE))
# set bandwidth intervals:
(x3 = st_set_dimensions(x, "band", values = make_intervals(bw), names = "bandwidth"))
x + x
x * x
x[,,,1:3]
x[,1:100,100:200,]
sqrt(x)
st_apply(x, 3, min)
st_apply(x, 1:2, max)
st_apply(x, 1:2, range)
geomatrix = system.file("tif/geomatrix.tif", package = "stars")
x = read_stars(geomatrix)
y = st_transform(x, st_crs(4326))
st_coordinates(x)[1:10,]
nc = system.file("nc/tos_O1_2001-2002.nc", package = "stars")
(x = read_stars(nc))
st_as_stars(st_bbox(x))
st_as_stars(st_bbox(x), deltax = 20, deltay = 20)
df = as.data.frame(x)
units::drop_units(x)
dimnames(x)
dimnames(x) <- letters[1:3]
dimnames(x)
st_as_stars()
# multiple sub-datasets:
nc_red = system.file("nc/reduced.nc", package = "stars")
red = read_stars(nc_red)
red
plot(red)
x = st_xy2sfc(read_stars(tif)[,1:10,1:10,], as_points = FALSE)
st_bbox(x)
x = read_stars(tif)
merge(split(x, "band"))
read_stars(c(tif,tif)) # merges as attributes
read_stars(c(tif,tif), along = "sensor")
read_stars(c(tif,tif), along = 4)
read_stars(c(tif,tif), along = "band")
read_stars(c(tif,tif), along = 3)
# cut:
tif = system.file("tif/L7_ETMs.tif", package = "stars")
x = read_stars(tif)
cut(x, c(0, 50, 100, 255))
cut(x[,,,1,drop=TRUE], c(0, 50, 100, 255))
plot(cut(x[,,,1,drop=TRUE], c(0, 50, 100, 255)))
st_bbox(st_dimensions(x))
x[x < 0] = NA
x[is.na(x)] = 0
# c:
f = system.file("netcdf/avhrr-only-v2.19810902.nc", package = "starsdata")
if (f != "") {
files = c("avhrr-only-v2.19810901.nc",
"avhrr-only-v2.19810902.nc",
"avhrr-only-v2.19810903.nc",
"avhrr-only-v2.19810904.nc",
"avhrr-only-v2.19810905.nc",
"avhrr-only-v2.19810906.nc",
"avhrr-only-v2.19810907.nc",
"avhrr-only-v2.19810908.nc",
"avhrr-only-v2.19810909.nc")
l = list()
for (f in files) {
from = system.file(paste0("netcdf/", f), package = "starsdata")
l[[f]] = read_stars(from, sub = c("sst", "anom"))
}
ret = do.call(c, l)
print(ret)
ret = adrop(c(l[[1]], l[[2]], l[[3]], along = list(times = as.Date("1981-09-01") + 0:2)))
print(ret)
ret = adrop(adrop(c(l[[1]], l[[2]], l[[3]], along = "times")))
print(ret)
}
st_dimensions(list(matrix(1, 4, 4))) # st_dimensions.default
if (FALSE && require("starsdata")) {
# curvilinear:
s5p = system.file(
"sentinel5p/S5P_NRTI_L2__NO2____20180717T120113_20180717T120613_03932_01_010002_20180717T125231.nc",
package = "starsdata")
print(s5p)
lat_ds = paste0("HDF5:\"", s5p, "\"://PRODUCT/latitude")
lon_ds = paste0("HDF5:\"", s5p, "\"://PRODUCT/longitude")
nit_ds = paste0("HDF5:\"", s5p, "\"://PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/nitrogendioxide_summed_total_column")
lat = read_stars(lat_ds)
lon = read_stars(lon_ds)
nit = read_stars(nit_ds)
nit[[1]][nit[[1]] > 9e+36] = NA
ll = setNames(c(lon, lat), c("x", "y"))
nit.c = st_as_stars(nit, curvilinear = ll)
print(nit.c)
s5p = system.file(
"sentinel5p/S5P_NRTI_L2__NO2____20180717T120113_20180717T120613_03932_01_010002_20180717T125231.nc",
package = "starsdata")
nit.c2 = read_stars(s5p,
sub = "//PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/nitrogendioxide_summed_total_column",
curvilinear = c("//PRODUCT/latitude", "//PRODUCT/longitude"))
print(all.equal(nit.c, nit.c2))
}
# predict:
(x = read_stars(tif))
model = lm(x~L7_ETMs.tif, head(as.data.frame(x), 50))
predict(x, model)
|
#' @title Student CDF with integer number of degrees of freedom
#' @description Cumulative distribution function of the noncentrel Student
#' distribution with an integer number of degrees of freedom.
#' @param q quantile
#' @param nu integer greater than \eqn{1}, the number of degrees of freedom
#' @param delta noncentrality parameter
#' @return Numeric value, the CDF evaluated at \code{q}.
#' @export
#' @useDynLib OwenRcpp
#' @examples
#' ptOwen(2, 3) - pt(2, 3)
#' ptOwen(2, 3, delta=1) - pt(2, 3, ncp=1)
ptOwen <- function(q, nu, delta=0){
if(isNotPositiveInteger(nu)){
stop("`nu` must be a finite integer >=1.")
}
pStudent(as.double(q), as.integer(nu), as.double(delta))
} | /R/ptOwen.R | no_license | stla/OwenRcpp | R | false | false | 691 | r | #' @title Student CDF with integer number of degrees of freedom
#' @description Cumulative distribution function of the noncentrel Student
#' distribution with an integer number of degrees of freedom.
#' @param q quantile
#' @param nu integer greater than \eqn{1}, the number of degrees of freedom
#' @param delta noncentrality parameter
#' @return Numeric value, the CDF evaluated at \code{q}.
#' @export
#' @useDynLib OwenRcpp
#' @examples
#' ptOwen(2, 3) - pt(2, 3)
#' ptOwen(2, 3, delta=1) - pt(2, 3, ncp=1)
ptOwen <- function(q, nu, delta=0){
if(isNotPositiveInteger(nu)){
stop("`nu` must be a finite integer >=1.")
}
pStudent(as.double(q), as.integer(nu), as.double(delta))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isomiRs.R
\name{isoCounts}
\alias{isoCounts}
\title{Create count matrix with different summarizing options}
\usage{
isoCounts(
ids,
ref = FALSE,
iso5 = FALSE,
iso3 = FALSE,
add = FALSE,
snv = FALSE,
seed = FALSE,
all = FALSE,
minc = 1,
mins = 1,
merge_by = NULL
)
}
\arguments{
\item{ids}{Object of class \link{IsomirDataSeq}.}
\item{ref}{Differentiate reference miRNA from rest.}
\item{iso5}{Differentiate trimming at 5 miRNA from rest.}
\item{iso3}{Differentiate trimming at 3 miRNA from rest.}
\item{add}{Differentiate additions miRNA from rest.}
\item{snv}{Differentiate nt substitution miRNA from rest.}
\item{seed}{Differentiate changes in 2-7 nts from rest.}
\item{all}{Differentiate all isomiRs.}
\item{minc}{Int minimum number of isomiR sequences to be included.}
\item{mins}{Int minimum number of samples with number of
sequences bigger than \code{minc} counts.}
\item{merge_by}{Column in coldata to merge samples into a single
column in counts. Useful to combine technical replicates.}
}
\value{
\link{IsomirDataSeq} object with new count table.
The count matrix can be access with \code{counts(ids)}.
}
\description{
This function collapses isomiRs into different groups. It is a similar
concept than how to work with gene isoforms. With this function,
different changes can be put together into a single miRNA variant.
For instance all sequences with variants at 3' end can be
considered as different elements in the table
or analysis having the following naming
\verb{hsa-miR-124a-5p.iso.t3:AAA}.
}
\details{
You can merge all isomiRs into miRNAs by calling the function only
with the first parameter \code{isoCounts(ids)}.
You can get a table with isomiRs altogether and
the reference miRBase sequences by calling the function with \code{ref=TRUE}.
You can get a table with 5' trimming isomiRS, miRBase reference and
the rest by calling with \code{isoCounts(ids, ref=TRUE, iso5=TRUE)}.
If you set up all parameters to TRUE, you will get a table for
each different sequence mapping to a miRNA (i.e. all isomiRs).
Examples for the naming used for the isomiRs are at
http://seqcluster.readthedocs.org/mirna_annotation.html#mirna-annotation.
}
\examples{
data(mirData)
ids <- isoCounts(mirData, ref=TRUE)
head(counts(ids))
# taking into account isomiRs and reference sequence.
ids <- isoCounts(mirData, ref=TRUE, minc=10, mins=6)
head(counts(ids))
}
| /man/isoCounts.Rd | permissive | lpantano/isomiRs | R | false | true | 2,472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isomiRs.R
\name{isoCounts}
\alias{isoCounts}
\title{Create count matrix with different summarizing options}
\usage{
isoCounts(
ids,
ref = FALSE,
iso5 = FALSE,
iso3 = FALSE,
add = FALSE,
snv = FALSE,
seed = FALSE,
all = FALSE,
minc = 1,
mins = 1,
merge_by = NULL
)
}
\arguments{
\item{ids}{Object of class \link{IsomirDataSeq}.}
\item{ref}{Differentiate reference miRNA from rest.}
\item{iso5}{Differentiate trimming at 5 miRNA from rest.}
\item{iso3}{Differentiate trimming at 3 miRNA from rest.}
\item{add}{Differentiate additions miRNA from rest.}
\item{snv}{Differentiate nt substitution miRNA from rest.}
\item{seed}{Differentiate changes in 2-7 nts from rest.}
\item{all}{Differentiate all isomiRs.}
\item{minc}{Int minimum number of isomiR sequences to be included.}
\item{mins}{Int minimum number of samples with number of
sequences bigger than \code{minc} counts.}
\item{merge_by}{Column in coldata to merge samples into a single
column in counts. Useful to combine technical replicates.}
}
\value{
\link{IsomirDataSeq} object with new count table.
The count matrix can be access with \code{counts(ids)}.
}
\description{
This function collapses isomiRs into different groups. It is a similar
concept than how to work with gene isoforms. With this function,
different changes can be put together into a single miRNA variant.
For instance all sequences with variants at 3' end can be
considered as different elements in the table
or analysis having the following naming
\verb{hsa-miR-124a-5p.iso.t3:AAA}.
}
\details{
You can merge all isomiRs into miRNAs by calling the function only
with the first parameter \code{isoCounts(ids)}.
You can get a table with isomiRs altogether and
the reference miRBase sequences by calling the function with \code{ref=TRUE}.
You can get a table with 5' trimming isomiRS, miRBase reference and
the rest by calling with \code{isoCounts(ids, ref=TRUE, iso5=TRUE)}.
If you set up all parameters to TRUE, you will get a table for
each different sequence mapping to a miRNA (i.e. all isomiRs).
Examples for the naming used for the isomiRs are at
http://seqcluster.readthedocs.org/mirna_annotation.html#mirna-annotation.
}
\examples{
data(mirData)
ids <- isoCounts(mirData, ref=TRUE)
head(counts(ids))
# taking into account isomiRs and reference sequence.
ids <- isoCounts(mirData, ref=TRUE, minc=10, mins=6)
head(counts(ids))
}
|
# Jags-Ymet-Xnom1grp-Mrobust.R
# Accompanies the book:
# Kruschke, J. K. (2015). Doing Bayesian Data Analysis, Second Edition:
# A Tutorial with R, JAGS, and Stan. Academic Press / Elsevier.
source("DBDA2E-utilities.R")
#===============================================================================
genMCMC = function( data , numSavedSteps=50000 , saveName=NULL ) {
require(rjags)
#-----------------------------------------------------------------------------
# THE DATA.
y = data
# Do some checking that data make sense:
if ( any( !is.finite(y) ) ) { stop("All y values must be finite.") }
Ntotal = length(y)
# Specify the data in a list, for later shipment to JAGS:
dataList = list(
y = y ,
Ntotal = Ntotal ,
meanY = mean(y) ,
sdY = sd(y)
)
#-----------------------------------------------------------------------------
# THE MODEL.
modelString = "
model {
for ( i in 1:Ntotal ) {
y[i] ~ dt( mu , 1/sigma^2 , nu )
}
mu ~ dnorm( meanY , 1/(100*sdY)^2 )
sigma ~ dunif( sdY/1000 , sdY*1000 )
nu <- nuMinusOne+1
nuMinusOne ~ dexp(1/29)
}
" # close quote for modelString
# Write out modelString to a text file
writeLines( modelString , con="TEMPmodel.txt" )
#-----------------------------------------------------------------------------
# INTIALIZE THE CHAINS.
# Initial values of MCMC chains based on data:
mu = mean(y)
sigma = sd(y)
initsList = list( mu = mu , sigma = sigma , nuMinusOne = 4 )
#-----------------------------------------------------------------------------
# RUN THE CHAINS
parameters = c( "mu" , "sigma" , "nu" ) # The parameters to be monitored
adaptSteps = 500 # Number of steps to "tune" the samplers
burnInSteps = 1000
nChains = 4
thinSteps = 5
nIter = ceiling( ( numSavedSteps * thinSteps ) / nChains )
# Create, initialize, and adapt the model:
jagsModel = jags.model( "TEMPmodel.txt" , data=dataList , inits=initsList ,
n.chains=nChains , n.adapt=adaptSteps )
# Burn-in:
cat( "Burning in the MCMC chain...\n" )
update( jagsModel , n.iter=burnInSteps )
# The saved MCMC chain:
cat( "Sampling final MCMC chain...\n" )
codaSamples = coda.samples( jagsModel , variable.names=parameters ,
n.iter=nIter , thin=thinSteps )
# resulting codaSamples object has these indices:
# codaSamples[[ chainIdx ]][ stepIdx , paramIdx ]
if ( !is.null(saveName) ) {
save( codaSamples , file=paste(saveName,"Mcmc.Rdata",sep="") )
}
return( codaSamples )
} # end function
#===============================================================================
smryMCMC = function( codaSamples , compValMu , # must specify compValMu
ropeMu=NULL , saveName=NULL ,
compValSigma=NULL , ropeSigma=NULL ,
compValEff=0.0 , ropeEff=c(-0.1,0.1) ) {
summaryInfo = NULL
mcmcMat = as.matrix(codaSamples,chains=TRUE)
summaryInfo = rbind( summaryInfo ,
"mu" = summarizePost( mcmcMat[,"mu"] ,
compVal=compValMu , ROPE=ropeMu ) )
summaryInfo = rbind( summaryInfo ,
"sigma" = summarizePost( mcmcMat[,"sigma"] ,
compVal=compValSigma ,
ROPE=ropeSigma ) )
summaryInfo = rbind( summaryInfo ,
"nu" = summarizePost( mcmcMat[,"nu"] ,
compVal=NULL , ROPE=NULL ) )
summaryInfo = rbind( summaryInfo ,
"log10(nu)" = summarizePost( log10(mcmcMat[,"nu"]) ,
compVal=NULL , ROPE=NULL ) )
summaryInfo = rbind( summaryInfo ,
"effSz" = summarizePost(
( mcmcMat[,"mu"] - compValMu ) / mcmcMat[,"sigma"] ,
compVal=compValEff , ROPE=ropeEff ) )
if ( !is.null(saveName) ) {
write.csv( summaryInfo , file=paste(saveName,"SummaryInfo.csv",sep="") )
}
return( summaryInfo )
}
#===============================================================================
plotMCMC = function( codaSamples , data , compValMu , # must specify compValMu
ropeMu=NULL ,
compValSigma=NULL , ropeSigma=NULL ,
compValEff=0.0 , ropeEff=c(-0.1,0.1) ,
showCurve=FALSE , pairsPlot=FALSE ,
saveName=NULL , saveType="jpg" ) {
# showCurve is TRUE or FALSE and indicates whether the posterior should
# be displayed as a histogram (by default) or by an approximate curve.
# pairsPlot is TRUE or FALSE and indicates whether scatterplots of pairs
# of parameters should be displayed.
#-----------------------------------------------------------------------------
mcmcMat = as.matrix(codaSamples,chains=TRUE)
chainLength = NROW( mcmcMat )
mu = mcmcMat[,"mu"]
sigma = mcmcMat[,"sigma"]
nu = mcmcMat[,"nu"]
#-----------------------------------------------------------------------------
if ( pairsPlot ) {
# Plot the parameters pairwise, to see correlations:
openGraph(width=7*3/5,height=7*3/5)
nPtToPlot = 1000
plotIdx = floor(seq(1,chainLength,by=chainLength/nPtToPlot))
panel.cor = function(x, y, digits=2, prefix="", cex.cor, ...) {
usr = par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y))
txt = format(c(r, 0.123456789), digits=digits)[1]
txt = paste(prefix, txt, sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex=1.25 ) # was cex=cex.cor*r
}
pairs( cbind( mu , sigma , log10(nu) )[plotIdx,] ,
labels=c( expression(mu) ,
expression(sigma) ,
expression(log10(nu)) ) ,
lower.panel=panel.cor , col="skyblue" )
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostPairs",sep=""), type=saveType)
}
}
#-----------------------------------------------------------------------------
# Set up window and layout:
openGraph(width=6.0,height=8.0*3/5)
layout( matrix( c(2,3,5, 1,4,6) , nrow=3, byrow=FALSE ) )
par( mar=c(3.5,3.5,2.5,0.5) , mgp=c(2.25,0.7,0) )
# Select thinned steps in chain for plotting of posterior predictive curves:
nCurvesToPlot = 20
stepIdxVec = seq( 1 , chainLength , floor(chainLength/nCurvesToPlot) )
# Compute limits for plots of data with posterior pred. distributions
y = data
xLim = c( min(y)-0.1*(max(y)-min(y)) , max(y)+0.1*(max(y)-min(y)) )
xBreaks = seq( xLim[1] , xLim[2] ,
length=ceiling((xLim[2]-xLim[1])/(sd(y)/4)) )
histInfo = hist(y,breaks=xBreaks,plot=FALSE)
yMax = 1.2 * max( histInfo$density )
xVec = seq( xLim[1] , xLim[2] , length=501 )
#-----------------------------------------------------------------------------
# Plot data y and smattering of posterior predictive curves:
histInfo = hist( y , prob=TRUE , xlim=xLim , ylim=c(0,yMax) , breaks=xBreaks,
col="red2" , border="white" , xlab="y" , ylab="" ,
yaxt="n" , cex.lab=1.5 , main="Data w. Post. Pred." )
for ( stepIdx in 1:length(stepIdxVec) ) {
lines(xVec, dt( (xVec-mu[stepIdxVec[stepIdx]])/sigma[stepIdxVec[stepIdx]],
df=nu[stepIdxVec[stepIdx]] )/sigma[stepIdxVec[stepIdx]] ,
type="l" , col="skyblue" , lwd=1 )
}
text( max(xVec) , yMax , bquote(N==.(length(y))) , adj=c(1.1,1.1) )
#-----------------------------------------------------------------------------
histInfo = plotPost( mu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValMu , ROPE=ropeMu ,
xlab=bquote(mu) , main=paste("Mean") ,
col="skyblue" )
#-----------------------------------------------------------------------------
histInfo = plotPost( sigma , cex.lab=1.75 , showCurve=showCurve ,
compVal=compValSigma , ROPE=ropeSigma , cenTend="mode" ,
xlab=bquote(sigma) , main=paste("Scale") ,
col="skyblue" )
#-----------------------------------------------------------------------------
effectSize = ( mu - compValMu ) / sigma
histInfo = plotPost( effectSize , compVal=compValEff , ROPE=ropeEff ,
showCurve=showCurve , cenTend="mode" ,
xlab=bquote( ( mu - .(compValMu) ) / sigma ),
cex.lab=1.75 , main="Effect Size" ,
col="skyblue" )
#-----------------------------------------------------------------------------
postInfo = plotPost( log10(nu) , col="skyblue" , # breaks=30 ,
showCurve=showCurve ,
xlab=bquote("log10("*nu*")") , cex.lab = 1.75 ,
cenTend="mode" ,
main="Normality" ) # (<0.7 suggests kurtosis)
#-----------------------------------------------------------------------------
# Blank plot
plot(1, ann=FALSE, axes=FALSE, xlim=c(0,1) , ylim=c(0,1) ,
type="n" , xaxs="i" , yaxs="i" )
text(.5,.5,"[intentionally blank]",adj=c(.5,.5))
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"Post",sep=""), type=saveType)
}
}
#===============================================================================
| /statistics/DBDA2Eprograms/Jags-Ymet-Xnom1grp-Mrobust.R | no_license | Clamwinds/letoverlambda | R | false | false | 9,682 | r | # Jags-Ymet-Xnom1grp-Mrobust.R
# Accompanies the book:
# Kruschke, J. K. (2015). Doing Bayesian Data Analysis, Second Edition:
# A Tutorial with R, JAGS, and Stan. Academic Press / Elsevier.
source("DBDA2E-utilities.R")
#===============================================================================
genMCMC = function( data , numSavedSteps=50000 , saveName=NULL ) {
require(rjags)
#-----------------------------------------------------------------------------
# THE DATA.
y = data
# Do some checking that data make sense:
if ( any( !is.finite(y) ) ) { stop("All y values must be finite.") }
Ntotal = length(y)
# Specify the data in a list, for later shipment to JAGS:
dataList = list(
y = y ,
Ntotal = Ntotal ,
meanY = mean(y) ,
sdY = sd(y)
)
#-----------------------------------------------------------------------------
# THE MODEL.
modelString = "
model {
for ( i in 1:Ntotal ) {
y[i] ~ dt( mu , 1/sigma^2 , nu )
}
mu ~ dnorm( meanY , 1/(100*sdY)^2 )
sigma ~ dunif( sdY/1000 , sdY*1000 )
nu <- nuMinusOne+1
nuMinusOne ~ dexp(1/29)
}
" # close quote for modelString
# Write out modelString to a text file
writeLines( modelString , con="TEMPmodel.txt" )
#-----------------------------------------------------------------------------
# INTIALIZE THE CHAINS.
# Initial values of MCMC chains based on data:
mu = mean(y)
sigma = sd(y)
initsList = list( mu = mu , sigma = sigma , nuMinusOne = 4 )
#-----------------------------------------------------------------------------
# RUN THE CHAINS
parameters = c( "mu" , "sigma" , "nu" ) # The parameters to be monitored
adaptSteps = 500 # Number of steps to "tune" the samplers
burnInSteps = 1000
nChains = 4
thinSteps = 5
nIter = ceiling( ( numSavedSteps * thinSteps ) / nChains )
# Create, initialize, and adapt the model:
jagsModel = jags.model( "TEMPmodel.txt" , data=dataList , inits=initsList ,
n.chains=nChains , n.adapt=adaptSteps )
# Burn-in:
cat( "Burning in the MCMC chain...\n" )
update( jagsModel , n.iter=burnInSteps )
# The saved MCMC chain:
cat( "Sampling final MCMC chain...\n" )
codaSamples = coda.samples( jagsModel , variable.names=parameters ,
n.iter=nIter , thin=thinSteps )
# resulting codaSamples object has these indices:
# codaSamples[[ chainIdx ]][ stepIdx , paramIdx ]
if ( !is.null(saveName) ) {
save( codaSamples , file=paste(saveName,"Mcmc.Rdata",sep="") )
}
return( codaSamples )
} # end function
#===============================================================================
smryMCMC = function( codaSamples , compValMu , # must specify compValMu
ropeMu=NULL , saveName=NULL ,
compValSigma=NULL , ropeSigma=NULL ,
compValEff=0.0 , ropeEff=c(-0.1,0.1) ) {
summaryInfo = NULL
mcmcMat = as.matrix(codaSamples,chains=TRUE)
summaryInfo = rbind( summaryInfo ,
"mu" = summarizePost( mcmcMat[,"mu"] ,
compVal=compValMu , ROPE=ropeMu ) )
summaryInfo = rbind( summaryInfo ,
"sigma" = summarizePost( mcmcMat[,"sigma"] ,
compVal=compValSigma ,
ROPE=ropeSigma ) )
summaryInfo = rbind( summaryInfo ,
"nu" = summarizePost( mcmcMat[,"nu"] ,
compVal=NULL , ROPE=NULL ) )
summaryInfo = rbind( summaryInfo ,
"log10(nu)" = summarizePost( log10(mcmcMat[,"nu"]) ,
compVal=NULL , ROPE=NULL ) )
summaryInfo = rbind( summaryInfo ,
"effSz" = summarizePost(
( mcmcMat[,"mu"] - compValMu ) / mcmcMat[,"sigma"] ,
compVal=compValEff , ROPE=ropeEff ) )
if ( !is.null(saveName) ) {
write.csv( summaryInfo , file=paste(saveName,"SummaryInfo.csv",sep="") )
}
return( summaryInfo )
}
#===============================================================================
plotMCMC = function( codaSamples , data , compValMu , # must specify compValMu
ropeMu=NULL ,
compValSigma=NULL , ropeSigma=NULL ,
compValEff=0.0 , ropeEff=c(-0.1,0.1) ,
showCurve=FALSE , pairsPlot=FALSE ,
saveName=NULL , saveType="jpg" ) {
# showCurve is TRUE or FALSE and indicates whether the posterior should
# be displayed as a histogram (by default) or by an approximate curve.
# pairsPlot is TRUE or FALSE and indicates whether scatterplots of pairs
# of parameters should be displayed.
#-----------------------------------------------------------------------------
mcmcMat = as.matrix(codaSamples,chains=TRUE)
chainLength = NROW( mcmcMat )
mu = mcmcMat[,"mu"]
sigma = mcmcMat[,"sigma"]
nu = mcmcMat[,"nu"]
#-----------------------------------------------------------------------------
if ( pairsPlot ) {
# Plot the parameters pairwise, to see correlations:
openGraph(width=7*3/5,height=7*3/5)
nPtToPlot = 1000
plotIdx = floor(seq(1,chainLength,by=chainLength/nPtToPlot))
panel.cor = function(x, y, digits=2, prefix="", cex.cor, ...) {
usr = par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y))
txt = format(c(r, 0.123456789), digits=digits)[1]
txt = paste(prefix, txt, sep="")
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex=1.25 ) # was cex=cex.cor*r
}
pairs( cbind( mu , sigma , log10(nu) )[plotIdx,] ,
labels=c( expression(mu) ,
expression(sigma) ,
expression(log10(nu)) ) ,
lower.panel=panel.cor , col="skyblue" )
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"PostPairs",sep=""), type=saveType)
}
}
#-----------------------------------------------------------------------------
# Set up window and layout:
openGraph(width=6.0,height=8.0*3/5)
layout( matrix( c(2,3,5, 1,4,6) , nrow=3, byrow=FALSE ) )
par( mar=c(3.5,3.5,2.5,0.5) , mgp=c(2.25,0.7,0) )
# Select thinned steps in chain for plotting of posterior predictive curves:
nCurvesToPlot = 20
stepIdxVec = seq( 1 , chainLength , floor(chainLength/nCurvesToPlot) )
# Compute limits for plots of data with posterior pred. distributions
y = data
xLim = c( min(y)-0.1*(max(y)-min(y)) , max(y)+0.1*(max(y)-min(y)) )
xBreaks = seq( xLim[1] , xLim[2] ,
length=ceiling((xLim[2]-xLim[1])/(sd(y)/4)) )
histInfo = hist(y,breaks=xBreaks,plot=FALSE)
yMax = 1.2 * max( histInfo$density )
xVec = seq( xLim[1] , xLim[2] , length=501 )
#-----------------------------------------------------------------------------
# Plot data y and smattering of posterior predictive curves:
histInfo = hist( y , prob=TRUE , xlim=xLim , ylim=c(0,yMax) , breaks=xBreaks,
col="red2" , border="white" , xlab="y" , ylab="" ,
yaxt="n" , cex.lab=1.5 , main="Data w. Post. Pred." )
for ( stepIdx in 1:length(stepIdxVec) ) {
lines(xVec, dt( (xVec-mu[stepIdxVec[stepIdx]])/sigma[stepIdxVec[stepIdx]],
df=nu[stepIdxVec[stepIdx]] )/sigma[stepIdxVec[stepIdx]] ,
type="l" , col="skyblue" , lwd=1 )
}
text( max(xVec) , yMax , bquote(N==.(length(y))) , adj=c(1.1,1.1) )
#-----------------------------------------------------------------------------
histInfo = plotPost( mu , cex.lab = 1.75 , showCurve=showCurve ,
compVal=compValMu , ROPE=ropeMu ,
xlab=bquote(mu) , main=paste("Mean") ,
col="skyblue" )
#-----------------------------------------------------------------------------
histInfo = plotPost( sigma , cex.lab=1.75 , showCurve=showCurve ,
compVal=compValSigma , ROPE=ropeSigma , cenTend="mode" ,
xlab=bquote(sigma) , main=paste("Scale") ,
col="skyblue" )
#-----------------------------------------------------------------------------
effectSize = ( mu - compValMu ) / sigma
histInfo = plotPost( effectSize , compVal=compValEff , ROPE=ropeEff ,
showCurve=showCurve , cenTend="mode" ,
xlab=bquote( ( mu - .(compValMu) ) / sigma ),
cex.lab=1.75 , main="Effect Size" ,
col="skyblue" )
#-----------------------------------------------------------------------------
postInfo = plotPost( log10(nu) , col="skyblue" , # breaks=30 ,
showCurve=showCurve ,
xlab=bquote("log10("*nu*")") , cex.lab = 1.75 ,
cenTend="mode" ,
main="Normality" ) # (<0.7 suggests kurtosis)
#-----------------------------------------------------------------------------
# Blank plot
plot(1, ann=FALSE, axes=FALSE, xlim=c(0,1) , ylim=c(0,1) ,
type="n" , xaxs="i" , yaxs="i" )
text(.5,.5,"[intentionally blank]",adj=c(.5,.5))
if ( !is.null(saveName) ) {
saveGraph( file=paste(saveName,"Post",sep=""), type=saveType)
}
}
#===============================================================================
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseSpeciesList.R
\name{getspecies}
\alias{getspecies}
\title{parseSpeciesList is a prototype function to parse a species lists}
\usage{
getspecies(inputFile, short = TRUE)
}
\arguments{
\item{short}{logical parameter if TRUE (default) the function trys to get only the names and country codes. If FALSE the full text
will put in the data frame.}
\item{inputTXT}{a Text of the specified format}
}
\description{
parseSpeciesList is a first prototype to parse beetle
species lists as provided by the enthusiasts (coleopterists) of the beetle community.
This is a very raw and simple approach and due to parsing the text line by line not really
in R style.
Unfortunately it has to be performed line by line because some keywords are missing and the rules are not always matching.
So in a first try we use the "family", "genus" and "subgenus" as keywords. They are always placed in the beginning of a
line. After "genus" or "subgenus" there is a unique line for each single species.
In the species textline we will find a more or less systematic list of country
codes that indicate all countries with known occurrence of this special species.
The resulting dataframe is a not normalized relation ( so it means a huge table with mostly redundant informations).
It looks like:
familiy; genus; subgenus; species; loctype; country\cr
Carabidae; Carabus; Carabinae; irregularis; A:; GE\cr
Carabidae; Carabus; Carabinae; irregularis; N:; CZ\cr
.
.
.
}
\examples{
### examples parseSpeciesList ###
### we need the stringr lib
library(stringr)
library(foreach)
### first the basic parsing
inputFile <- system.file("extdata", "species.chunk", package="parseSpeciesList")
df <- getspecies(inputFile)
### all entries only for CZ
cz<- subset(df, df$loc =='CZ')
### all entries for porculus
porculus<- subset(df, (df$species =="porculus"))
######################################
### now a basic mapping example ####
### we need some more libs ;)
if (!require(devtools)) {install.packages("devtools")} # for installation from github
if (!require(maptools)) {install.packages("maptools")} # for read shapes
if (!require(sp)) {install.packages("sp")} # for manipulationg spatial data sp objects
library(devtools)
library(maptools)
library(sp)
if (!require(mapview)) {install_github("environmentalinformatics-marburg/mapview")}
library(mapview) # for modern mapping
### load prepared mapdata (source: http://thematicmapping.org/downloads/TM_WORLD_BORDERS-0.3.zip)
load("data/world.Rdata")
### now all findings of porculus (whatever it is ;))
porculus<- subset(df, (df$species =="porculus"))
### join the world countries to our data
### (iso2 fit most but there is no Code for the regions)
joinSpdf <- joinData2Map(
porculus
, nameMap = sPDF
, nameJoinIDMap = "ISO2"
, nameJoinColumnData = "loc")
#### no we have to project it
proj4string(joinSpdf) <- CRS("+init=epsg:4326")
### plot it with e.g. mapview (and have some colors and interactivity)
mapView(joinSpdf,zcol="species")
}
\author{
Chris Reudenbach, Flo Detsch
}
\references{
Löbl, I. & A. Smetana (eds): Catalogue of Palaearctic Coleoptera: \url{http://www.apollobooks.com/palaearcticcoleoptera.htm}
}
| /man/getspecies.Rd | permissive | gisma/parseSpeciesList | R | false | true | 3,398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseSpeciesList.R
\name{getspecies}
\alias{getspecies}
\title{parseSpeciesList is a prototype function to parse a species lists}
\usage{
getspecies(inputFile, short = TRUE)
}
\arguments{
\item{short}{logical parameter if TRUE (default) the function trys to get only the names and country codes. If FALSE the full text
will put in the data frame.}
\item{inputTXT}{a Text of the specified format}
}
\description{
parseSpeciesList is a first prototype to parse beetle
species lists as provided by the enthusiasts (coleopterists) of the beetle community.
This is a very raw and simple approach and due to parsing the text line by line not really
in R style.
Unfortunately it has to be performed line by line because some keywords are missing and the rules are not always matching.
So in a first try we use the "family", "genus" and "subgenus" as keywords. They are always placed in the beginning of a
line. After "genus" or "subgenus" there is a unique line for each single species.
In the species textline we will find a more or less systematic list of country
codes that indicate all countries with known occurrence of this special species.
The resulting dataframe is a not normalized relation ( so it means a huge table with mostly redundant informations).
It looks like:
familiy; genus; subgenus; species; loctype; country\cr
Carabidae; Carabus; Carabinae; irregularis; A:; GE\cr
Carabidae; Carabus; Carabinae; irregularis; N:; CZ\cr
.
.
.
}
\examples{
### examples parseSpeciesList ###
### we need the stringr lib
library(stringr)
library(foreach)
### first the basic parsing
inputFile <- system.file("extdata", "species.chunk", package="parseSpeciesList")
df <- getspecies(inputFile)
### all entries only for CZ
cz<- subset(df, df$loc =='CZ')
### all entries for porculus
porculus<- subset(df, (df$species =="porculus"))
######################################
### now a basic mapping example ####
### we need some more libs ;)
if (!require(devtools)) {install.packages("devtools")} # for installation from github
if (!require(maptools)) {install.packages("maptools")} # for read shapes
if (!require(sp)) {install.packages("sp")} # for manipulationg spatial data sp objects
library(devtools)
library(maptools)
library(sp)
if (!require(mapview)) {install_github("environmentalinformatics-marburg/mapview")}
library(mapview) # for modern mapping
### load prepared mapdata (source: http://thematicmapping.org/downloads/TM_WORLD_BORDERS-0.3.zip)
load("data/world.Rdata")
### now all findings of porculus (whatever it is ;))
porculus<- subset(df, (df$species =="porculus"))
### join the world countries to our data
### (iso2 fit most but there is no Code for the regions)
joinSpdf <- joinData2Map(
porculus
, nameMap = sPDF
, nameJoinIDMap = "ISO2"
, nameJoinColumnData = "loc")
#### no we have to project it
proj4string(joinSpdf) <- CRS("+init=epsg:4326")
### plot it with e.g. mapview (and have some colors and interactivity)
mapView(joinSpdf,zcol="species")
}
\author{
Chris Reudenbach, Flo Detsch
}
\references{
Löbl, I. & A. Smetana (eds): Catalogue of Palaearctic Coleoptera: \url{http://www.apollobooks.com/palaearcticcoleoptera.htm}
}
|
/test/ana_sus.R | no_license | lizhengbio/methylantion_ML_sumamry | R | false | false | 59,750 | r | ||
`bsktest` <-
function(x,...){
UseMethod("bsktest")
}
#`bsktest.splm` <-
#function(x, listw, index=NULL, test=c("CLMlambda","CLMmu"), ...){
# switch(match.arg(test), CLMlambda = {
# bsk = clmltest.model(x,listw, index, ...)
# }, CLMmu = {
# bsk = clmmtest.model(x,listw, index, ... )
# })
# return(bsk)
#}
`bsktest.formula` <-
function(x, data, index=NULL, listw,
test=c("LMH","LM1","LM2","CLMlambda","CLMmu"),
standardize=TRUE, ...){
switch(match.arg(test), LM1 = {
bsk = slm1test(x, data, index, listw, standardize, ...)
}, LM2 = {
bsk = slm2test(x, data, index, listw, standardize, ...)
}, LMH = {
bsk = LMHtest(x, data, index, listw, ...)
}, CLMlambda = {
bsk = clmltest(x, data, index, listw, ...)
}, CLMmu = {
bsk = clmmtest(x, data, index, listw, ...)
})
return(bsk)
}
`clmltest.model` <-
function(x, listw, index, ...){
## depends on:
## listw2dgCMatrix.R
## REmod.R
## spreml.R
if(!inherits(x,"splm")) stop("argument should be an object of class splm")
frm<-x$call
if(x$type != "random effects ML") stop("argument should be of type random effects ML")
if(is.null(index)) stop("index should be specified to retrieve information on time and cross-sectional dimentions")
if(!inherits(listw,"listw")) stop("object w should be of class listw")
ind <- index[,1]
tind <- index[,2]
if(names(x$coefficients)[1]=="(Intercept)") X<-data.frame(cbind(rep(1,ncol(x$model)), x$model[,-1]))
else X<-x$model[,-1]
y<-x$model[,1]
eML<-x$residuals
oo<-order(tind,ind)
X<-X[oo,]
y<-y[oo]
N<-length(unique(ind))
k<-dim(X)[[2]]
T<-max(tapply(X[,1],ind,length))
NT<-length(ind)
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
eme<-tapply(eML,inde1,mean)
emme<-eML - rep(eme,T)
sigmav<-crossprod(eML,emme)/(N*(T-1))
sigma1<-crossprod(eML,rep(eme,T))/N
c1<-sigmav/sigma1^2
c2<-1/sigmav
c1e<-as.numeric(c1)*eML
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
WpsW<-Wst+Ws
yybis<-function(q){
wq<-(WpsW)%*%q
wq<-as.matrix(wq)
}
Wc1e<-unlist(tapply(eML,inde,yybis))
sumWc1e<-tapply(Wc1e,inde1,sum)
prod1<-as.numeric(c1)*rep(sumWc1e,T)/T
prod2<-as.numeric(c2)* (Wc1e - rep(sumWc1e,T)/T)
prod<-prod1+prod2
D<-1/2*crossprod(eML,prod)
W2<-Ws%*%Ws
WW<-crossprod(Ws)
tr<-function(R) sum(diag(R))
b<-tr(W2+WW)
LMl1<-D^2 / (((T-1)+as.numeric(sigmav)^2/as.numeric(sigma1)^2)*b)
LMlstar<-sqrt(LMl1)
statistics<-LMlstar
pval <- 2*pnorm(LMlstar, lower.tail=FALSE)
names(statistics)="LM*-lambda"
method<- "Baltagi, Song and Koh LM*-lambda conditional LM test (assuming sigma^2_mu >= 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
`clmmtest.model` <-
function(x, listw, index, ...){
if(!inherits(x,"splm")) stop("argument should be an object of class splm")
frm<-x$call
if(x$type != "fixed effects error") stop("argument should be of type random effects ML")
if(is.null(index)) stop("index should be specified to retrieve information on time and cross-sectional dimentions")
if(!inherits(listw,"listw")) stop("object w should be of class listw")
ind <- index[,1]
tind <- index[,2]
if(names(x$coefficients)[1]=="(Intercept)") X<-data.frame(cbind(rep(1,ncol(x$model)), x$model[,-1]))
else X<-x$model[,-1]
y<-x$model[,1]
eML<-x$residuals
oo<-order(tind,ind)
X<-X[oo,]
y<-y[oo]
N<-length(unique(ind))
k<-dim(X)[[2]]
T<-max(tapply(X[,1],ind,length))
NT<-length(ind)
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
lambda<-x$spat.coef
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
B<- Diagonal(N)-lambda*Ws
BpB<-crossprod(B)
BpB2 <- BpB %*% BpB
BpBi<- solve(BpB)
tr<-function(R) sum(diag(R))
trBpB<-tr(BpB)
vc<-function(R) {
BBu<-BpB %*% R
BBu<-as.matrix(BBu)
}
eme<-unlist(tapply(eML,inde,vc))
# eme<-tapply(eML,inde1,mean)
# emme<-eML - rep(eme,T)
#
sigmav2<-crossprod(eML,eme)/(N*T)
sigmav4<-sigmav2^2
yybis<-function(q){
wq<-rep(q,T)
tmp<-wq%*%eML
}
BBu<-apply(BpB2,1,yybis)
BBu<-rep(BBu,T)
upBBu<-crossprod(eML,BBu)
Dmu<- -(T/(2*sigmav2))*trBpB + (1/(2*sigmav4))*upBBu
WpB<-Wst%*%B
BpW<-crossprod(B, Ws)
WpBplBpW <-WpB + BpW
bigG<-WpBplBpW %*% BpBi
smalle<-tr(BpB2)
smalld<-tr(WpBplBpW)
smallh<-trBpB
smallg<-tr(bigG)
smallc<-tr(bigG%*%bigG)
NUM<- ((2 * sigmav4)/T) * ((N*sigmav4*smallc)-(sigmav4*smallg^2)) ###equation 2.30 in the paper
DENft<- NT*sigmav4* smalle * smallc
DENst<- N*sigmav4* smalld^2
DENtt<- T*sigmav4* smallg^2 * smalle
DENfot<- 2* sigmav4 *smallg * smallh* smalld
DENfit<- sigmav4 * smallh^2* smallc
DEN<- DENft - DENst - DENtt + DENfot - DENfit
LMmu <- Dmu^2*NUM / DEN
LMmustar<- sqrt(LMmu)
statistics<-LMmustar
pval <- 2*pnorm(LMmustar, lower.tail=FALSE)
names(statistics)="LM*-mu"
method<- "Baltagi, Song and Koh LM*- mu conditional LM test (assuming lambda may or may not be = 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random regional effects")
class(RVAL) <- "htest"
return(RVAL)
}
`slm1test` <-
function(formula, data, index=NULL, listw, standardize, ...){
if(!is.null(index)) { ####can be deleted when using the wrapper
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
x<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
cl<-match.call()
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(x))]
tind<-tindex[which(names(index)%in%row.names(x))]
oo<-order(tind,ind)
x<-x[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(x)[[2]]
T<-max(tapply(x[,1],ind,length))
NT<-length(ind)
ols<-lm(y~x-1)
XpXi<-solve(crossprod(x))
n<-dim(ols$model)[1]
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N)) ####indicator to get the cross-sectional observations
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T)) ####indicator to get the time periods observations
bOLS<-coefficients(ols)
e<-as.matrix(residuals(ols))
ee<-crossprod(e)
JIe<-tapply(e,inde1,sum)
JIe<-rep(JIe,T)
G<-(crossprod(e,JIe)/ee)-1
tr<-function(R) sum(diag(R))
LM1<-sqrt((NT/(2*(T-1))))*as.numeric(G)
s<-NT-k
B<-XpXi%*%t(x)
fun<-function(Q) tapply(Q,inde1,sum)
JIx<-apply(x,2,fun)
JIX<-matrix(,NT,k)
for (i in 1:k) JIX[,i]<-rep(JIx[,i],T) ## "NOTE ON THE TRACE.R"
di<-numeric(NT)
XpJIX<-crossprod(x,JIX)
d1<-NT-tr(XpJIX%*%XpXi)
Ed1<-d1/s
di2<-numeric(NT)
JIJIx<-apply(JIX,2,fun)
JIJIX<-matrix(,NT,k)
for (i in 1:k) JIJIX[,i]<-rep(JIJIx[,i],T)
JIJIxxpx<-JIJIX%*%XpXi
di1<- crossprod(x, JIJIxxpx)
tr1<-tr(di1)
XpIJX<-crossprod(x,JIX)
fp<-XpIJX%*%B
sp<-JIX%*%XpXi
tr3<-tr(fp%*%sp)
fintr<-NT*T-2*tr1+tr3
Vd1<-2*(s*fintr - (d1^2))/s^2*(s+2)
SLM1<-((G+1)- Ed1)/sqrt(Vd1)
statistics <- if(standardize) SLM1 else LM1
pval <- 2*pnorm(statistics, lower.tail=FALSE)
names(statistics) <- if(standardize) "SLM1" else "LM1"
method<- "Baltagi, Song and Koh SLM1 marginal test"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random effects")
class(RVAL) <- "htest"
return(RVAL)
}
`slm2test` <-
function(formula, data, index=NULL, listw, standardize, ...){
if(!is.null(index)) {
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
x<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
cl<-match.call()
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(x))]
tind<-tindex[which(names(index)%in%row.names(x))]
oo<-order(tind,ind)
x<-x[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(x)[[2]]
T<-max(tapply(x[,1],ind,length))
NT<-length(ind)
ols<-lm(y~x-1)
XpXi<-solve(crossprod(x))
n<-dim(ols$model)[1]
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
bOLS<-coefficients(ols)
e<-as.matrix(residuals(ols))
ee<-crossprod(e)
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
WWp<-(Ws+Wst)/2
yy<-function(q){
wq<-WWp%*%q
wq<-as.matrix(wq)
}
IWWpe<-unlist(tapply(e,inde,yy))
H<-crossprod(e,IWWpe)/crossprod(e)
W2<-Ws%*%Ws
WW<-crossprod(Ws)
tr<-function(R) sum(diag(R))
b<-tr(W2+WW)
LM2<-sqrt((N^2*T)/b)*as.numeric(H)
s<-NT-k
lag<-function(QQ)lag.listw(listw,QQ)
fun2<-function(Q) unlist(tapply(Q,inde,lag))
Wx<-apply(x,2,fun2)
WX<-matrix(Wx,NT,k)
XpWx<-crossprod(x,WX)
D2M<-XpWx%*%XpXi
Ed2<- (T*sum(diag(Ws)) - tr(D2M))/s
WWx<-apply(WX,2,fun2)
WWX<-matrix(WWx,NT,k)
XpWWX<-crossprod(x,WWX)
spb<-XpWWX%*%XpXi
spbb<-tr(spb)
tpb<-XpWx%*%XpXi%*%XpWx%*%XpXi
fintr2<-T*tr(W2) - 2* spbb + tr(tpb)
Vd2<-2*(s*fintr2 - (sum(diag(D2M))^2))/s^2*(s+2)
We<-unlist(tapply(e,inde,function(W) lag.listw(listw,W)))
d2<-crossprod(e,We)/ee
SLM2<- (d2-Ed2)/sqrt(Vd2)
statistics <- if(standardize) SLM2 else LM2
pval <- 2*pnorm(statistics, lower.tail=FALSE)
names(statistics) <- if(standardize) "SLM2" else "LM2"
method<- "Baltagi, Song and Koh LM2 marginal test"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
`LMHtest` <-
function(formula, data, index=NULL, listw, ...){
## depends on listw2dgCMatrix.R
#require(ibdreg) # for mixed chisquare distribution
# now imported
if(!is.null(index)) { ####can be deleted when using the wrapper
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
x<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
cl<-match.call()
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(x))]
tind<-tindex[which(names(index)%in%row.names(x))]
oo<-order(tind,ind)
x<-x[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(x)[[2]]
T<-max(tapply(x[,1],ind,length))
NT<-length(ind)
ols<-lm(y~x-1)
XpXi<-solve(crossprod(x))
n<-dim(ols$model)[1]
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N)) ####indicator to get the cross-sectional observations
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T)) ####indicator to get the time periods observations
bOLS<-coefficients(ols)
e<-as.matrix(residuals(ols))
ee<-crossprod(e)
JIe<-tapply(e,inde1,sum)
JIe<-rep(JIe,T)
G<-(crossprod(e,JIe)/ee)-1
tr<-function(R) sum(diag(R))
LM1<-sqrt((NT/(2*(T-1))))*as.numeric(G)
####calculate the elements of LMj, LM1, SLM1
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
WWp<-(Ws+Wst)/2
yy<-function(q){
wq<-WWp%*%q
wq<-as.matrix(wq)
}
IWWpe<-unlist(tapply(e,inde,yy))
H<-crossprod(e,IWWpe)/crossprod(e)
W2<-Ws%*%Ws
WW<-crossprod(Ws)
tr<-function(R) sum(diag(R))
b<-tr(W2+WW)
LM2<-sqrt((N^2*T)/b)*as.numeric(H)
if (LM1<=0){
if (LM2<=0) JOINT<-0
else JOINT<-LM2^2
} ####this is chi-square_m in teh notation of the paper.
else{
if (LM2<=0) JOINT<-LM1^2
else JOINT<-LM1^2 + LM2^2
}
statistics<-JOINT
pval <- 1 - pchibar(statistics, df=0:2, wt=c(0.25,0.5,0.25))
names(statistics)="LM-H"
method<- "Baltagi, Song and Koh LM-H one-sided joint test"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random Regional Effects and Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
`clmmtest` <-
function(formula, data, index=NULL, listw, ...){
## print("uso questa")
ml <- spfeml(formula=formula, data=data, index=index, listw=listw, model="error", effects="pooled")
## spml(formula, data=data, index=index, listw, errors = "BSK", effects = "fixed", lag = FALSE, spatial.error = TRUE)
if(!is.null(index)) {
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
X<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(X))]
tind<-tindex[which(names(index)%in%row.names(X))]
oo<-order(tind,ind)
X<-X[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(X)[[2]]
T<-max(tapply(X[,1],ind,length))
NT<-length(ind)
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
lambda<-ml$spat.coef
eML<-residuals(ml)
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
B<- Diagonal(N)-lambda*Ws
BpB<-crossprod(B)
BpB2 <- BpB %*% BpB
BpBi<- solve(BpB)
tr<-function(R) sum(diag(R))
trBpB<-tr(BpB)
vc<-function(R) {
BBu<-BpB %*% R
BBu<-as.matrix(BBu)
}
eme<-unlist(tapply(eML,inde,vc))
# eme<-tapply(eML,inde1,mean)
# emme<-eML - rep(eme,T)
#
sigmav2<-crossprod(eML,eme)/(N*T)
sigmav4<-sigmav2^2
yybis<-function(q){
wq<-rep(q,T)
tmp<-wq%*%eML
}
BBu<-apply(BpB2,1,yybis)
BBu<-rep(BBu,T)
upBBu<-crossprod(eML,BBu)
Dmu<- -(T/(2*sigmav2))*trBpB + (1/(2*sigmav4))*upBBu
WpB<-Wst%*%B
BpW<-crossprod(B, Ws)
WpBplBpW <-WpB + BpW
bigG<-WpBplBpW %*% BpBi
smalle<-tr(BpB2)
smalld<-tr(WpBplBpW)
smallh<-trBpB
smallg<-tr(bigG)
smallc<-tr(bigG%*%bigG)
NUM<- ((2 * sigmav4)/T) * ((N*sigmav4*smallc)-(sigmav4*smallg^2)) ###equation 2.30 in the paper
DENft<- NT*sigmav4* smalle * smallc
DENst<- N*sigmav4* smalld^2
DENtt<- T*sigmav4* smallg^2 * smalle
DENfot<- 2* sigmav4 *smallg * smallh* smalld
DENfit<- sigmav4 * smallh^2* smallc
DEN<- DENft - DENst - DENtt + DENfot - DENfit
LMmu <- Dmu^2*NUM / DEN
LMmustar<- sqrt(LMmu)
statistics<-LMmustar
pval <- 2*pnorm(LMmustar, lower.tail=FALSE)
names(statistics)="LM*-mu"
method<- "Baltagi, Song and Koh LM*- mu conditional LM test (assuming lambda may or may not be = 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random regional effects")
class(RVAL) <- "htest"
return(RVAL)
}
clmltest <- function (formula, data, index = NULL, listw)
{
# ml <- spreml(formula, data = data, w = listw2mat(listw),
# errors = "re")
if (!is.null(index)) {
#require(plm)
data <- plm.data(data, index)
}
index <- data[, 1]
tindex <- data[, 2]
data$tindex <- tindex
ml <- lme(formula, data, random=~1|tindex)
X <- model.matrix(formula, data = data)
y <- model.response(model.frame(formula, data = data))
names(index) <- row.names(data)
ind <- index[which(names(index) %in% row.names(X))]
tind <- tindex[which(names(index) %in% row.names(X))]
oo <- order(tind, ind)
X <- X[oo, ]
y <- y[oo]
ind <- ind[oo]
tind <- tind[oo]
N <- length(unique(ind))
k <- dim(X)[[2]]
T <- max(tapply(X[, 1], ind, length))
NT <- length(ind)
eML <- residuals(ml)
indic <- seq(1, T)
inde <- as.numeric(rep(indic, each = N))
ind1 <- seq(1, N)
inde1 <- as.numeric(rep(ind1, T))
eme <- tapply(eML, inde1, mean)
emme <- eML - rep(eme, T)
sigmav <- crossprod(eML, emme)/(N * (T - 1))
sigma1 <- crossprod(eML, rep(eme, T))/N
c1 <- sigmav/sigma1^2
c2 <- 1/sigmav
c1e <- as.numeric(c1) * eML
Wst <- listw2dgCMatrix(listw)
Ws <- t(Wst)
WpsW <- Wst + Ws
yybis <- function(q) {
wq <- (WpsW) %*% q
wq <- as.matrix(wq)
}
Wc1e <- unlist(tapply(eML, inde, yybis))
sumWc1e <- tapply(Wc1e, inde1, sum)
prod1 <- as.numeric(c1) * rep(sumWc1e, T)/T
prod2 <- as.numeric(c2) * (Wc1e - rep(sumWc1e, T)/T)
prod <- prod1 + prod2
D <- 1/2 * crossprod(eML, prod)
W2 <- Ws %*% Ws
WW <- crossprod(Ws)
tr <- function(R) sum(diag(R))
b <- tr(W2 + WW)
LMl1 <- D^2/(((T - 1) + as.numeric(sigmav)^2/as.numeric(sigma1)^2) *
b)
LMlstar <- sqrt(LMl1)
statistics <- LMlstar
pval <- 2*pnorm(LMlstar, lower.tail = FALSE)
names(statistics) = "LM*-lambda"
method <- "Baltagi, Song and Koh LM*-lambda conditional LM test (assuming sigma^2_mu >= 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics, method = method, p.value = pval,
data.name = deparse(formula), alternative = "Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
| /splm/R/bsktest.R | no_license | ingted/R-Examples | R | false | false | 17,644 | r | `bsktest` <-
function(x,...){
UseMethod("bsktest")
}
#`bsktest.splm` <-
#function(x, listw, index=NULL, test=c("CLMlambda","CLMmu"), ...){
# switch(match.arg(test), CLMlambda = {
# bsk = clmltest.model(x,listw, index, ...)
# }, CLMmu = {
# bsk = clmmtest.model(x,listw, index, ... )
# })
# return(bsk)
#}
`bsktest.formula` <-
function(x, data, index=NULL, listw,
test=c("LMH","LM1","LM2","CLMlambda","CLMmu"),
standardize=TRUE, ...){
switch(match.arg(test), LM1 = {
bsk = slm1test(x, data, index, listw, standardize, ...)
}, LM2 = {
bsk = slm2test(x, data, index, listw, standardize, ...)
}, LMH = {
bsk = LMHtest(x, data, index, listw, ...)
}, CLMlambda = {
bsk = clmltest(x, data, index, listw, ...)
}, CLMmu = {
bsk = clmmtest(x, data, index, listw, ...)
})
return(bsk)
}
`clmltest.model` <-
function(x, listw, index, ...){
## depends on:
## listw2dgCMatrix.R
## REmod.R
## spreml.R
if(!inherits(x,"splm")) stop("argument should be an object of class splm")
frm<-x$call
if(x$type != "random effects ML") stop("argument should be of type random effects ML")
if(is.null(index)) stop("index should be specified to retrieve information on time and cross-sectional dimentions")
if(!inherits(listw,"listw")) stop("object w should be of class listw")
ind <- index[,1]
tind <- index[,2]
if(names(x$coefficients)[1]=="(Intercept)") X<-data.frame(cbind(rep(1,ncol(x$model)), x$model[,-1]))
else X<-x$model[,-1]
y<-x$model[,1]
eML<-x$residuals
oo<-order(tind,ind)
X<-X[oo,]
y<-y[oo]
N<-length(unique(ind))
k<-dim(X)[[2]]
T<-max(tapply(X[,1],ind,length))
NT<-length(ind)
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
eme<-tapply(eML,inde1,mean)
emme<-eML - rep(eme,T)
sigmav<-crossprod(eML,emme)/(N*(T-1))
sigma1<-crossprod(eML,rep(eme,T))/N
c1<-sigmav/sigma1^2
c2<-1/sigmav
c1e<-as.numeric(c1)*eML
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
WpsW<-Wst+Ws
yybis<-function(q){
wq<-(WpsW)%*%q
wq<-as.matrix(wq)
}
Wc1e<-unlist(tapply(eML,inde,yybis))
sumWc1e<-tapply(Wc1e,inde1,sum)
prod1<-as.numeric(c1)*rep(sumWc1e,T)/T
prod2<-as.numeric(c2)* (Wc1e - rep(sumWc1e,T)/T)
prod<-prod1+prod2
D<-1/2*crossprod(eML,prod)
W2<-Ws%*%Ws
WW<-crossprod(Ws)
tr<-function(R) sum(diag(R))
b<-tr(W2+WW)
LMl1<-D^2 / (((T-1)+as.numeric(sigmav)^2/as.numeric(sigma1)^2)*b)
LMlstar<-sqrt(LMl1)
statistics<-LMlstar
pval <- 2*pnorm(LMlstar, lower.tail=FALSE)
names(statistics)="LM*-lambda"
method<- "Baltagi, Song and Koh LM*-lambda conditional LM test (assuming sigma^2_mu >= 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
`clmmtest.model` <-
function(x, listw, index, ...){
if(!inherits(x,"splm")) stop("argument should be an object of class splm")
frm<-x$call
if(x$type != "fixed effects error") stop("argument should be of type random effects ML")
if(is.null(index)) stop("index should be specified to retrieve information on time and cross-sectional dimentions")
if(!inherits(listw,"listw")) stop("object w should be of class listw")
ind <- index[,1]
tind <- index[,2]
if(names(x$coefficients)[1]=="(Intercept)") X<-data.frame(cbind(rep(1,ncol(x$model)), x$model[,-1]))
else X<-x$model[,-1]
y<-x$model[,1]
eML<-x$residuals
oo<-order(tind,ind)
X<-X[oo,]
y<-y[oo]
N<-length(unique(ind))
k<-dim(X)[[2]]
T<-max(tapply(X[,1],ind,length))
NT<-length(ind)
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
lambda<-x$spat.coef
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
B<- Diagonal(N)-lambda*Ws
BpB<-crossprod(B)
BpB2 <- BpB %*% BpB
BpBi<- solve(BpB)
tr<-function(R) sum(diag(R))
trBpB<-tr(BpB)
vc<-function(R) {
BBu<-BpB %*% R
BBu<-as.matrix(BBu)
}
eme<-unlist(tapply(eML,inde,vc))
# eme<-tapply(eML,inde1,mean)
# emme<-eML - rep(eme,T)
#
sigmav2<-crossprod(eML,eme)/(N*T)
sigmav4<-sigmav2^2
yybis<-function(q){
wq<-rep(q,T)
tmp<-wq%*%eML
}
BBu<-apply(BpB2,1,yybis)
BBu<-rep(BBu,T)
upBBu<-crossprod(eML,BBu)
Dmu<- -(T/(2*sigmav2))*trBpB + (1/(2*sigmav4))*upBBu
WpB<-Wst%*%B
BpW<-crossprod(B, Ws)
WpBplBpW <-WpB + BpW
bigG<-WpBplBpW %*% BpBi
smalle<-tr(BpB2)
smalld<-tr(WpBplBpW)
smallh<-trBpB
smallg<-tr(bigG)
smallc<-tr(bigG%*%bigG)
NUM<- ((2 * sigmav4)/T) * ((N*sigmav4*smallc)-(sigmav4*smallg^2)) ###equation 2.30 in the paper
DENft<- NT*sigmav4* smalle * smallc
DENst<- N*sigmav4* smalld^2
DENtt<- T*sigmav4* smallg^2 * smalle
DENfot<- 2* sigmav4 *smallg * smallh* smalld
DENfit<- sigmav4 * smallh^2* smallc
DEN<- DENft - DENst - DENtt + DENfot - DENfit
LMmu <- Dmu^2*NUM / DEN
LMmustar<- sqrt(LMmu)
statistics<-LMmustar
pval <- 2*pnorm(LMmustar, lower.tail=FALSE)
names(statistics)="LM*-mu"
method<- "Baltagi, Song and Koh LM*- mu conditional LM test (assuming lambda may or may not be = 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random regional effects")
class(RVAL) <- "htest"
return(RVAL)
}
`slm1test` <-
function(formula, data, index=NULL, listw, standardize, ...){
if(!is.null(index)) { ####can be deleted when using the wrapper
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
x<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
cl<-match.call()
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(x))]
tind<-tindex[which(names(index)%in%row.names(x))]
oo<-order(tind,ind)
x<-x[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(x)[[2]]
T<-max(tapply(x[,1],ind,length))
NT<-length(ind)
ols<-lm(y~x-1)
XpXi<-solve(crossprod(x))
n<-dim(ols$model)[1]
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N)) ####indicator to get the cross-sectional observations
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T)) ####indicator to get the time periods observations
bOLS<-coefficients(ols)
e<-as.matrix(residuals(ols))
ee<-crossprod(e)
JIe<-tapply(e,inde1,sum)
JIe<-rep(JIe,T)
G<-(crossprod(e,JIe)/ee)-1
tr<-function(R) sum(diag(R))
LM1<-sqrt((NT/(2*(T-1))))*as.numeric(G)
s<-NT-k
B<-XpXi%*%t(x)
fun<-function(Q) tapply(Q,inde1,sum)
JIx<-apply(x,2,fun)
JIX<-matrix(,NT,k)
for (i in 1:k) JIX[,i]<-rep(JIx[,i],T) ## "NOTE ON THE TRACE.R"
di<-numeric(NT)
XpJIX<-crossprod(x,JIX)
d1<-NT-tr(XpJIX%*%XpXi)
Ed1<-d1/s
di2<-numeric(NT)
JIJIx<-apply(JIX,2,fun)
JIJIX<-matrix(,NT,k)
for (i in 1:k) JIJIX[,i]<-rep(JIJIx[,i],T)
JIJIxxpx<-JIJIX%*%XpXi
di1<- crossprod(x, JIJIxxpx)
tr1<-tr(di1)
XpIJX<-crossprod(x,JIX)
fp<-XpIJX%*%B
sp<-JIX%*%XpXi
tr3<-tr(fp%*%sp)
fintr<-NT*T-2*tr1+tr3
Vd1<-2*(s*fintr - (d1^2))/s^2*(s+2)
SLM1<-((G+1)- Ed1)/sqrt(Vd1)
statistics <- if(standardize) SLM1 else LM1
pval <- 2*pnorm(statistics, lower.tail=FALSE)
names(statistics) <- if(standardize) "SLM1" else "LM1"
method<- "Baltagi, Song and Koh SLM1 marginal test"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random effects")
class(RVAL) <- "htest"
return(RVAL)
}
`slm2test` <-
function(formula, data, index=NULL, listw, standardize, ...){
if(!is.null(index)) {
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
x<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
cl<-match.call()
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(x))]
tind<-tindex[which(names(index)%in%row.names(x))]
oo<-order(tind,ind)
x<-x[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(x)[[2]]
T<-max(tapply(x[,1],ind,length))
NT<-length(ind)
ols<-lm(y~x-1)
XpXi<-solve(crossprod(x))
n<-dim(ols$model)[1]
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
bOLS<-coefficients(ols)
e<-as.matrix(residuals(ols))
ee<-crossprod(e)
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
WWp<-(Ws+Wst)/2
yy<-function(q){
wq<-WWp%*%q
wq<-as.matrix(wq)
}
IWWpe<-unlist(tapply(e,inde,yy))
H<-crossprod(e,IWWpe)/crossprod(e)
W2<-Ws%*%Ws
WW<-crossprod(Ws)
tr<-function(R) sum(diag(R))
b<-tr(W2+WW)
LM2<-sqrt((N^2*T)/b)*as.numeric(H)
s<-NT-k
lag<-function(QQ)lag.listw(listw,QQ)
fun2<-function(Q) unlist(tapply(Q,inde,lag))
Wx<-apply(x,2,fun2)
WX<-matrix(Wx,NT,k)
XpWx<-crossprod(x,WX)
D2M<-XpWx%*%XpXi
Ed2<- (T*sum(diag(Ws)) - tr(D2M))/s
WWx<-apply(WX,2,fun2)
WWX<-matrix(WWx,NT,k)
XpWWX<-crossprod(x,WWX)
spb<-XpWWX%*%XpXi
spbb<-tr(spb)
tpb<-XpWx%*%XpXi%*%XpWx%*%XpXi
fintr2<-T*tr(W2) - 2* spbb + tr(tpb)
Vd2<-2*(s*fintr2 - (sum(diag(D2M))^2))/s^2*(s+2)
We<-unlist(tapply(e,inde,function(W) lag.listw(listw,W)))
d2<-crossprod(e,We)/ee
SLM2<- (d2-Ed2)/sqrt(Vd2)
statistics <- if(standardize) SLM2 else LM2
pval <- 2*pnorm(statistics, lower.tail=FALSE)
names(statistics) <- if(standardize) "SLM2" else "LM2"
method<- "Baltagi, Song and Koh LM2 marginal test"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
`LMHtest` <-
function(formula, data, index=NULL, listw, ...){
## depends on listw2dgCMatrix.R
#require(ibdreg) # for mixed chisquare distribution
# now imported
if(!is.null(index)) { ####can be deleted when using the wrapper
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
x<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
cl<-match.call()
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(x))]
tind<-tindex[which(names(index)%in%row.names(x))]
oo<-order(tind,ind)
x<-x[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(x)[[2]]
T<-max(tapply(x[,1],ind,length))
NT<-length(ind)
ols<-lm(y~x-1)
XpXi<-solve(crossprod(x))
n<-dim(ols$model)[1]
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N)) ####indicator to get the cross-sectional observations
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T)) ####indicator to get the time periods observations
bOLS<-coefficients(ols)
e<-as.matrix(residuals(ols))
ee<-crossprod(e)
JIe<-tapply(e,inde1,sum)
JIe<-rep(JIe,T)
G<-(crossprod(e,JIe)/ee)-1
tr<-function(R) sum(diag(R))
LM1<-sqrt((NT/(2*(T-1))))*as.numeric(G)
####calculate the elements of LMj, LM1, SLM1
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
WWp<-(Ws+Wst)/2
yy<-function(q){
wq<-WWp%*%q
wq<-as.matrix(wq)
}
IWWpe<-unlist(tapply(e,inde,yy))
H<-crossprod(e,IWWpe)/crossprod(e)
W2<-Ws%*%Ws
WW<-crossprod(Ws)
tr<-function(R) sum(diag(R))
b<-tr(W2+WW)
LM2<-sqrt((N^2*T)/b)*as.numeric(H)
if (LM1<=0){
if (LM2<=0) JOINT<-0
else JOINT<-LM2^2
} ####this is chi-square_m in teh notation of the paper.
else{
if (LM2<=0) JOINT<-LM1^2
else JOINT<-LM1^2 + LM2^2
}
statistics<-JOINT
pval <- 1 - pchibar(statistics, df=0:2, wt=c(0.25,0.5,0.25))
names(statistics)="LM-H"
method<- "Baltagi, Song and Koh LM-H one-sided joint test"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random Regional Effects and Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
`clmmtest` <-
function(formula, data, index=NULL, listw, ...){
## print("uso questa")
ml <- spfeml(formula=formula, data=data, index=index, listw=listw, model="error", effects="pooled")
## spml(formula, data=data, index=index, listw, errors = "BSK", effects = "fixed", lag = FALSE, spatial.error = TRUE)
if(!is.null(index)) {
#require(plm)
data <- plm.data(data, index)
}
index <- data[,1]
tindex <- data[,2]
X<-model.matrix(formula,data=data)
y<-model.response(model.frame(formula,data=data))
names(index)<-row.names(data)
ind<-index[which(names(index)%in%row.names(X))]
tind<-tindex[which(names(index)%in%row.names(X))]
oo<-order(tind,ind)
X<-X[oo,]
y<-y[oo]
ind<-ind[oo]
tind<-tind[oo]
N<-length(unique(ind))
k<-dim(X)[[2]]
T<-max(tapply(X[,1],ind,length))
NT<-length(ind)
indic<-seq(1,T)
inde<-as.numeric(rep(indic,each=N))
ind1<-seq(1,N)
inde1<-as.numeric(rep(ind1,T))
lambda<-ml$spat.coef
eML<-residuals(ml)
Ws<-listw2dgCMatrix(listw)
Wst<-t(Ws)
B<- Diagonal(N)-lambda*Ws
BpB<-crossprod(B)
BpB2 <- BpB %*% BpB
BpBi<- solve(BpB)
tr<-function(R) sum(diag(R))
trBpB<-tr(BpB)
vc<-function(R) {
BBu<-BpB %*% R
BBu<-as.matrix(BBu)
}
eme<-unlist(tapply(eML,inde,vc))
# eme<-tapply(eML,inde1,mean)
# emme<-eML - rep(eme,T)
#
sigmav2<-crossprod(eML,eme)/(N*T)
sigmav4<-sigmav2^2
yybis<-function(q){
wq<-rep(q,T)
tmp<-wq%*%eML
}
BBu<-apply(BpB2,1,yybis)
BBu<-rep(BBu,T)
upBBu<-crossprod(eML,BBu)
Dmu<- -(T/(2*sigmav2))*trBpB + (1/(2*sigmav4))*upBBu
WpB<-Wst%*%B
BpW<-crossprod(B, Ws)
WpBplBpW <-WpB + BpW
bigG<-WpBplBpW %*% BpBi
smalle<-tr(BpB2)
smalld<-tr(WpBplBpW)
smallh<-trBpB
smallg<-tr(bigG)
smallc<-tr(bigG%*%bigG)
NUM<- ((2 * sigmav4)/T) * ((N*sigmav4*smallc)-(sigmav4*smallg^2)) ###equation 2.30 in the paper
DENft<- NT*sigmav4* smalle * smallc
DENst<- N*sigmav4* smalld^2
DENtt<- T*sigmav4* smallg^2 * smalle
DENfot<- 2* sigmav4 *smallg * smallh* smalld
DENfit<- sigmav4 * smallh^2* smallc
DEN<- DENft - DENst - DENtt + DENfot - DENfit
LMmu <- Dmu^2*NUM / DEN
LMmustar<- sqrt(LMmu)
statistics<-LMmustar
pval <- 2*pnorm(LMmustar, lower.tail=FALSE)
names(statistics)="LM*-mu"
method<- "Baltagi, Song and Koh LM*- mu conditional LM test (assuming lambda may or may not be = 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics,
method = method,
p.value = pval, data.name=deparse(formula), alternative="Random regional effects")
class(RVAL) <- "htest"
return(RVAL)
}
clmltest <- function (formula, data, index = NULL, listw)
{
# ml <- spreml(formula, data = data, w = listw2mat(listw),
# errors = "re")
if (!is.null(index)) {
#require(plm)
data <- plm.data(data, index)
}
index <- data[, 1]
tindex <- data[, 2]
data$tindex <- tindex
ml <- lme(formula, data, random=~1|tindex)
X <- model.matrix(formula, data = data)
y <- model.response(model.frame(formula, data = data))
names(index) <- row.names(data)
ind <- index[which(names(index) %in% row.names(X))]
tind <- tindex[which(names(index) %in% row.names(X))]
oo <- order(tind, ind)
X <- X[oo, ]
y <- y[oo]
ind <- ind[oo]
tind <- tind[oo]
N <- length(unique(ind))
k <- dim(X)[[2]]
T <- max(tapply(X[, 1], ind, length))
NT <- length(ind)
eML <- residuals(ml)
indic <- seq(1, T)
inde <- as.numeric(rep(indic, each = N))
ind1 <- seq(1, N)
inde1 <- as.numeric(rep(ind1, T))
eme <- tapply(eML, inde1, mean)
emme <- eML - rep(eme, T)
sigmav <- crossprod(eML, emme)/(N * (T - 1))
sigma1 <- crossprod(eML, rep(eme, T))/N
c1 <- sigmav/sigma1^2
c2 <- 1/sigmav
c1e <- as.numeric(c1) * eML
Wst <- listw2dgCMatrix(listw)
Ws <- t(Wst)
WpsW <- Wst + Ws
yybis <- function(q) {
wq <- (WpsW) %*% q
wq <- as.matrix(wq)
}
Wc1e <- unlist(tapply(eML, inde, yybis))
sumWc1e <- tapply(Wc1e, inde1, sum)
prod1 <- as.numeric(c1) * rep(sumWc1e, T)/T
prod2 <- as.numeric(c2) * (Wc1e - rep(sumWc1e, T)/T)
prod <- prod1 + prod2
D <- 1/2 * crossprod(eML, prod)
W2 <- Ws %*% Ws
WW <- crossprod(Ws)
tr <- function(R) sum(diag(R))
b <- tr(W2 + WW)
LMl1 <- D^2/(((T - 1) + as.numeric(sigmav)^2/as.numeric(sigma1)^2) *
b)
LMlstar <- sqrt(LMl1)
statistics <- LMlstar
pval <- 2*pnorm(LMlstar, lower.tail = FALSE)
names(statistics) = "LM*-lambda"
method <- "Baltagi, Song and Koh LM*-lambda conditional LM test (assuming sigma^2_mu >= 0)"
dname <- deparse(formula)
RVAL <- list(statistic = statistics, method = method, p.value = pval,
data.name = deparse(formula), alternative = "Spatial autocorrelation")
class(RVAL) <- "htest"
return(RVAL)
}
|
eq1 <- function(a1,a2,a3,a4,a5){
e = 0
f = a1/(a2+a3)
s = (a2+a3)/(a3+a4+a5)
if (f!=s){
e=e+1
}
if(f>=1){
e=e+1
}
if (s>=1)
e=e+1
return(e)
}
eq2 <- function(a5,a6,a7,a8,a9,a10){
e=0
f = a6/(a6+a7)
s = (a6+a7)/(a7+a8+a9)
t = (a7+a8+a9)/(a5+a9+a10)
if (f!=s){
e=e+1
}
if (s!=t)
e=e+1
if (f!=t)
e=e+1
if(f>=1){
e=e+1
}
if (s>=1)
e=e+1
if (t>=1)
e = e+1
return(e)
}
eq3 <- function(a10,a11,a12,a13){
e=0
f = (a11+a12)/(a12+a13)
s = (a12+a13)/(a13+a10)
if (f!=s){
e=e+1
}
if(f>=1){
e=e+1
}
if (s>=1)
e=e+1
return(e)
}
solve1<-function(a5){
domain=seq(13)
domain=domain[domain!=a5]
states=combn(domain,4)
ans<-c()
for( i in 1:length(states[1,])){
print(states[,i])
for(j in permn(states[,i])){
nums<-j
print(j)
if(!eq1(nums[1],nums[2],nums[3],nums[4],a5)){
print("found1!")
ans<-c(ans,c(nums[1],nums[2],nums[3],nums[4],a5))
}
}
}
return(ans)
}
solve2<-function(ex,a5,domain){
states=combn(domain,5)
ans<-c()
for( i in 1:length(states[1,])){
for(j in permn(states[,i])){
nums<-j
if(!eq2(a5,nums[1],nums[2],nums[3],nums[4],nums[5])){
print("found2!")
ans<-c(ans,c(ex,nums[1],nums[2],nums[3],nums[4],nums[5]))
}
}
}
return(ans)
}
solve3<-function(ex,a10,dom){
ans<-c()
for(j in permn(dom)){
vars<-j
if(!eq3(a10,vars[1],vars[2],vars[3])){
print("found3!")
ans<-c(ans,c(ex,vars[1],vars[2],vars[3]))
}
}
return(ans)
}
library("combinat")
first_constraint_ans=c()
for (i in 1:13){
a5=i
first_constraint_ans<-c(first_constraint_ans,solve1(a5))
}
first_ans=matrix(first_constraint_ans,nrow = 5)
second_ans<-c()
for(i in 1:length(first_ans[1,])){
a5<-(first_ans[5,i])
ex=first_ans[,i]
tot_domain=seq(13)
tot_domain<-tot_domain[!(tot_domain %in% ex)]
second_ans=c(second_ans,c(solve2(ex,a5,tot_domain)))
}
second_ans=matrix(second_ans,nrow=10)
third_ans<-c()
for(i in 1:length(second_ans[1,])){
a10<-(second_ans[10,i])
ex=second_ans[,i]
tot_domain=seq(13)
tot_domain<-tot_domain[!(tot_domain %in% ex)]
third_ans=c(third_ans,c(solve3(ex,a10,tot_domain)))
}
ans_1 = third_ans
ans_1
save(ans_1,file='1273168143_5.RData')
| /Learn_coding/Q5.R | no_license | kian79/data_science_with_R | R | false | false | 2,540 | r | eq1 <- function(a1,a2,a3,a4,a5){
e = 0
f = a1/(a2+a3)
s = (a2+a3)/(a3+a4+a5)
if (f!=s){
e=e+1
}
if(f>=1){
e=e+1
}
if (s>=1)
e=e+1
return(e)
}
eq2 <- function(a5,a6,a7,a8,a9,a10){
e=0
f = a6/(a6+a7)
s = (a6+a7)/(a7+a8+a9)
t = (a7+a8+a9)/(a5+a9+a10)
if (f!=s){
e=e+1
}
if (s!=t)
e=e+1
if (f!=t)
e=e+1
if(f>=1){
e=e+1
}
if (s>=1)
e=e+1
if (t>=1)
e = e+1
return(e)
}
eq3 <- function(a10,a11,a12,a13){
e=0
f = (a11+a12)/(a12+a13)
s = (a12+a13)/(a13+a10)
if (f!=s){
e=e+1
}
if(f>=1){
e=e+1
}
if (s>=1)
e=e+1
return(e)
}
solve1<-function(a5){
domain=seq(13)
domain=domain[domain!=a5]
states=combn(domain,4)
ans<-c()
for( i in 1:length(states[1,])){
print(states[,i])
for(j in permn(states[,i])){
nums<-j
print(j)
if(!eq1(nums[1],nums[2],nums[3],nums[4],a5)){
print("found1!")
ans<-c(ans,c(nums[1],nums[2],nums[3],nums[4],a5))
}
}
}
return(ans)
}
solve2<-function(ex,a5,domain){
states=combn(domain,5)
ans<-c()
for( i in 1:length(states[1,])){
for(j in permn(states[,i])){
nums<-j
if(!eq2(a5,nums[1],nums[2],nums[3],nums[4],nums[5])){
print("found2!")
ans<-c(ans,c(ex,nums[1],nums[2],nums[3],nums[4],nums[5]))
}
}
}
return(ans)
}
solve3<-function(ex,a10,dom){
ans<-c()
for(j in permn(dom)){
vars<-j
if(!eq3(a10,vars[1],vars[2],vars[3])){
print("found3!")
ans<-c(ans,c(ex,vars[1],vars[2],vars[3]))
}
}
return(ans)
}
library("combinat")
first_constraint_ans=c()
for (i in 1:13){
a5=i
first_constraint_ans<-c(first_constraint_ans,solve1(a5))
}
first_ans=matrix(first_constraint_ans,nrow = 5)
second_ans<-c()
for(i in 1:length(first_ans[1,])){
a5<-(first_ans[5,i])
ex=first_ans[,i]
tot_domain=seq(13)
tot_domain<-tot_domain[!(tot_domain %in% ex)]
second_ans=c(second_ans,c(solve2(ex,a5,tot_domain)))
}
second_ans=matrix(second_ans,nrow=10)
third_ans<-c()
for(i in 1:length(second_ans[1,])){
a10<-(second_ans[10,i])
ex=second_ans[,i]
tot_domain=seq(13)
tot_domain<-tot_domain[!(tot_domain %in% ex)]
third_ans=c(third_ans,c(solve3(ex,a10,tot_domain)))
}
ans_1 = third_ans
ans_1
save(ans_1,file='1273168143_5.RData')
|
library(ape)
testtree <- read.tree("5934_8.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5934_8_unrooted.txt") | /codeml_files/newick_trees_processed/5934_8/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("5934_8.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5934_8_unrooted.txt") |
####################################################################################################
#' This function finds the sequence for graph
#' @param RS.seq - Sequence of values for cc
#' @return A dataframe with
#' \item{Ret.seq}{Sequence}
#' @family Internal methods
#' @keywords internal
Find.sequence.rma<- function(RS.val) {
small=RS.val-0.1
big=RS.val+0.1
New.seq=seq(small, big, length.out = 10)
Ret.seq=sort(c(RS.val,New.seq),decreasing = FALSE)
return(Ret.seq)
}
####################################################################################################
#' This function finds the rma OR/RR/RD for the given range using metafor
#' @param a - description
#' @param b - description
#' @param c - description
#' @param d - description
#' @param RS.seq - description
#' @param metric - description
#' @return A dataframe with
#' \item{output}{Sequence}
#' @family Internal methods
#' @keywords internal
Find.rma.metric<- function(a,b,c,d,RS.seq, metric) {
iterations = length(RS.seq)
variables = 4
output <- matrix(ncol=variables, nrow=iterations)
for(gseq in 1:iterations){
cc=RS.seq[gseq]
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
if(metric==1){ output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="OR")$b),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="OR")$ci.lb),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="OR")$ci.ub))
}
if(metric==2){
output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RR")$b),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RR")$ci.lb),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RR")$ci.ub))
}
if(metric==3){
output[gseq,] <- c(cc,
metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RD")$b,
metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RD")$ci.lb,
metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RD")$ci.ub)
} # End of if condition testing for metric type
if(metric==4){ output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="OR")$b),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="OR")$ci.lb),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="OR")$ci.ub))
}
if(metric==5){
output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RR")$b),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RR")$ci.lb),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RR")$ci.ub))
}
if(metric==6){
output[gseq,] <- c(cc,
metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RD")$b,
metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RD")$ci.lb,
metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RD")$ci.ub)
} # End of if condition testing for metric type
} # End of inner for loop iterating over gseq (number of cc to run through)
return(output)
} # End of function
####################################################################################################
#' This Function to calculate OR/RR/RD and the LHS/RHS of change
#' @param odf.row - description
#' @param cc.vals - description
#' @return A dataframe with
#' \item{Full.Slevel.df}{Sequence}
#' @family Internal methods
#' @keywords internal
In.Each.Row <-function(odf.row,cc.vals)
{
iterations = length(cc.vals)
output.OR <- matrix(ncol=2, nrow=iterations)
output.RR <- matrix(ncol=2, nrow=iterations)
output.RD <- matrix(ncol=2, nrow=iterations)
for(j in 1:iterations){
cc.value = cc.vals[j]
ora.results=Calculate.cc.OR(odf.row, cc.value )
output.OR[j,] <- as.matrix(ora.results)
rra.results=Calculate.cc.RR(odf.row, cc.value )
output.RR[j,] <- as.matrix(rra.results)
rda.results=Calculate.cc.RD(odf.row, cc.value )
output.RD[j,] <- as.matrix(rda.results)
}
odf.or=data.frame(t(output.OR[,1]),t(output.OR[,2]))
odf.rr=data.frame(t(output.RR[,1]),t(output.RR[,2]))
odf.rd=data.frame(t(output.RD[,1]),t(output.RD[,2]))
names(odf.or)<-c("or.cc.all.tpm8","or.cc.all.tpm4",
"or.cc.all.001","or.cc.all.01","or.cc.all.015","or.cc.all.1","or.cc.all.15",
"or.cc.all.2","or.cc.all.25","or.cc.all.3","or.cc.all.35",
"or.cc.all.4","or.cc.all.45","or.cc.all.5",
"or.cc.zero.tpm8","or.cc.zero.tpm4",
"or.cc.zero.001","or.cc.zero.01","or.cc.zero.015","or.cc.zero.1","or.cc.zero.15",
"or.cc.zero.2","or.cc.zero.25","or.cc.zero.3","or.cc.zero.35",
"or.cc.zero.4","or.cc.zero.45","or.cc.zero.5")
Smallest.val.all.or= min(output.OR[,1])
Largest.val.all.or= max(output.OR[,1])
Smallest.val.zero.or= min(output.OR[,2])
Largest.val.zero.or= max(output.OR[,2])
names(odf.rr)<-c("rr.cc.all.tpm8","rr.cc.all.tpm4",
"rr.cc.all.001","rr.cc.all.01","rr.cc.all.015","rr.cc.all.1","rr.cc.all.15",
"rr.cc.all.2","rr.cc.all.25","rr.cc.all.3","rr.cc.all.35",
"rr.cc.all.4","rr.cc.all.45","rr.cc.all.5",
"rr.cc.zero.tpm8","rr.cc.zero.tpm4",
"rr.cc.zero.001","rr.cc.zero.01","rr.cc.zero.015","rr.cc.zero.1","rr.cc.zero.15",
"rr.cc.zero.2","rr.cc.zero.25","rr.cc.zero.3","rr.cc.zero.35",
"rr.cc.zero.4","rr.cc.zero.45","rr.cc.zero.5")
Smallest.val.all.rr= min(output.RR[,1])
Largest.val.all.rr= max(output.RR[,1])
Smallest.val.zero.rr= min(output.RR[,2])
Largest.val.zero.rr= max(output.RR[,2])
names(odf.rd)<-c("rd.cc.all.tpm8","rd.cc.all.tpm4",
"rd.cc.all.001","rd.cc.all.01","rd.cc.all.015","rd.cc.all.1","rd.cc.all.15",
"rd.cc.all.2","rd.cc.all.25","rd.cc.all.3","rd.cc.all.35",
"rd.cc.all.4","rd.cc.all.45","rd.cc.all.5",
"rd.cc.zero.tpm8","rd.cc.zero.tpm4",
"rd.cc.zero.001","rd.cc.zero.01","rd.cc.zero.015","rd.cc.zero.1","rd.cc.zero.15",
"rd.cc.zero.2","rd.cc.zero.25","rd.cc.zero.3","rd.cc.zero.35",
"rd.cc.zero.4","rd.cc.zero.45","rd.cc.zero.5")
Smallest.val.all.rd= min(output.RD[,1])
Largest.val.all.rd= max(output.RD[,1])
Smallest.val.zero.rd= min(output.RD[,2])
Largest.val.zero.rd= max(output.RD[,2])
Full.Slevel.df= data.frame(odf.or,Max.or.all= Largest.val.all.or ,Min.or.all= Smallest.val.all.or,
Max.or.zero= Largest.val.zero.or ,Min.or.zero= Smallest.val.zero.or,
odf.rr,Max.rr.all= Largest.val.all.rr ,Min.rr.all= Smallest.val.all.rr,
Max.rr.zero= Largest.val.zero.rr ,Min.rr.zero= Smallest.val.zero.rr,
odf.rd,Max.rd.all= Largest.val.all.rd ,Min.rd.all= Smallest.val.all.rd,
Max.rd.zero= Largest.val.zero.rd ,Min.rd.zero= Smallest.val.zero.rd)
return(Full.Slevel.df)
}
############################################################################################
#' This Function to calculate OR and the point of change
#' @param cc.df - description
#' @param cc.value - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
Calculate.cc.OR <-function(cc.df,cc.value)
{
a=cc.df$a; b=cc.df$b;
c=cc.df$c; d=cc.df$d;
cc=cc.value
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
################ Calculat only for cells with zero
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
ora.all = (a.all*d.all)/(b.all*c.all)
ora.zero = (a.zero*d.zero) / (b.zero*c.zero)
r.df=data.frame(ora=ora.all,orz=ora.zero)
return(r.df)
}
############################################################################################
#' This Function to calculate RR and the point of change
#' @param cc.df - description
#' @param cc.value - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
Calculate.cc.RR <-function(cc.df,cc.value)
{
a=cc.df$a; b=cc.df$b;
c=cc.df$c; d=cc.df$d;
cc=cc.value
nt=a+b
nc=c+d
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
nc.all = nc + (2 * cc)
nt.all = nt + (2 * cc)
################ Calculat only for cells with zero
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
nc.zero = c.zero+d.zero
nt.zero = a.zero+b.zero
rr.all = (a.all / nt.all) / (c.all / nc.all)
rr.zero = (a.zero / nt.zero) / (c.zero / nc.zero)
r.rr.df=data.frame(rra=rr.all,rrz=rr.zero)
return(r.rr.df)
}
############################################################################################
#' This Function to calculate RD and the point of change
#' @param cc.df - description
#' @param cc.value - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
Calculate.cc.RD <-function(cc.df,cc.value)
{
a=cc.df$a; b=cc.df$b;
c=cc.df$c; d=cc.df$d;
cc=cc.value
nt=a+b
nc=c+d
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
nc.all = nc + (2 * cc)
nt.all = nt + (2 * cc)
################ Calculat only for cells with zero
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
nc.zero = c.zero+d.zero
nt.zero = a.zero+b.zero
rd.all = (a.all / nt.all) - (c.all / nc.all)
rd.zero = (a.zero / nt.zero) - (c.zero / nc.zero)
r.rd.df=data.frame(rda=rd.all,rdz=rd.zero)
return(r.rd.df)
}
############################################################################################
#' This Is between utility function
#' @param x - description
#' @param a - description
#' @param b - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
is.between <- function(x, a, b) {
small= min(a,b)
big = max(a,b)
x > small & x < big
}
| /R/304.Internal-methods.R | no_license | RajeswaranV/vcdPlus | R | false | false | 11,132 | r | ####################################################################################################
#' This function finds the sequence for graph
#' @param RS.seq - Sequence of values for cc
#' @return A dataframe with
#' \item{Ret.seq}{Sequence}
#' @family Internal methods
#' @keywords internal
Find.sequence.rma<- function(RS.val) {
small=RS.val-0.1
big=RS.val+0.1
New.seq=seq(small, big, length.out = 10)
Ret.seq=sort(c(RS.val,New.seq),decreasing = FALSE)
return(Ret.seq)
}
####################################################################################################
#' This function finds the rma OR/RR/RD for the given range using metafor
#' @param a - description
#' @param b - description
#' @param c - description
#' @param d - description
#' @param RS.seq - description
#' @param metric - description
#' @return A dataframe with
#' \item{output}{Sequence}
#' @family Internal methods
#' @keywords internal
Find.rma.metric<- function(a,b,c,d,RS.seq, metric) {
iterations = length(RS.seq)
variables = 4
output <- matrix(ncol=variables, nrow=iterations)
for(gseq in 1:iterations){
cc=RS.seq[gseq]
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
if(metric==1){ output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="OR")$b),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="OR")$ci.lb),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="OR")$ci.ub))
}
if(metric==2){
output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RR")$b),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RR")$ci.lb),
exp(metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RR")$ci.ub))
}
if(metric==3){
output[gseq,] <- c(cc,
metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RD")$b,
metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RD")$ci.lb,
metafor::rma(ai=a.all,bi=b.all,ci=c.all,di=d.all,measure="RD")$ci.ub)
} # End of if condition testing for metric type
if(metric==4){ output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="OR")$b),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="OR")$ci.lb),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="OR")$ci.ub))
}
if(metric==5){
output[gseq,] <- c(cc,
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RR")$b),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RR")$ci.lb),
exp(metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RR")$ci.ub))
}
if(metric==6){
output[gseq,] <- c(cc,
metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RD")$b,
metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RD")$ci.lb,
metafor::rma(ai=a.zero,bi=b.zero,ci=c.zero,di=d.zero,measure="RD")$ci.ub)
} # End of if condition testing for metric type
} # End of inner for loop iterating over gseq (number of cc to run through)
return(output)
} # End of function
####################################################################################################
#' This Function to calculate OR/RR/RD and the LHS/RHS of change
#' @param odf.row - description
#' @param cc.vals - description
#' @return A dataframe with
#' \item{Full.Slevel.df}{Sequence}
#' @family Internal methods
#' @keywords internal
In.Each.Row <-function(odf.row,cc.vals)
{
iterations = length(cc.vals)
output.OR <- matrix(ncol=2, nrow=iterations)
output.RR <- matrix(ncol=2, nrow=iterations)
output.RD <- matrix(ncol=2, nrow=iterations)
for(j in 1:iterations){
cc.value = cc.vals[j]
ora.results=Calculate.cc.OR(odf.row, cc.value )
output.OR[j,] <- as.matrix(ora.results)
rra.results=Calculate.cc.RR(odf.row, cc.value )
output.RR[j,] <- as.matrix(rra.results)
rda.results=Calculate.cc.RD(odf.row, cc.value )
output.RD[j,] <- as.matrix(rda.results)
}
odf.or=data.frame(t(output.OR[,1]),t(output.OR[,2]))
odf.rr=data.frame(t(output.RR[,1]),t(output.RR[,2]))
odf.rd=data.frame(t(output.RD[,1]),t(output.RD[,2]))
names(odf.or)<-c("or.cc.all.tpm8","or.cc.all.tpm4",
"or.cc.all.001","or.cc.all.01","or.cc.all.015","or.cc.all.1","or.cc.all.15",
"or.cc.all.2","or.cc.all.25","or.cc.all.3","or.cc.all.35",
"or.cc.all.4","or.cc.all.45","or.cc.all.5",
"or.cc.zero.tpm8","or.cc.zero.tpm4",
"or.cc.zero.001","or.cc.zero.01","or.cc.zero.015","or.cc.zero.1","or.cc.zero.15",
"or.cc.zero.2","or.cc.zero.25","or.cc.zero.3","or.cc.zero.35",
"or.cc.zero.4","or.cc.zero.45","or.cc.zero.5")
Smallest.val.all.or= min(output.OR[,1])
Largest.val.all.or= max(output.OR[,1])
Smallest.val.zero.or= min(output.OR[,2])
Largest.val.zero.or= max(output.OR[,2])
names(odf.rr)<-c("rr.cc.all.tpm8","rr.cc.all.tpm4",
"rr.cc.all.001","rr.cc.all.01","rr.cc.all.015","rr.cc.all.1","rr.cc.all.15",
"rr.cc.all.2","rr.cc.all.25","rr.cc.all.3","rr.cc.all.35",
"rr.cc.all.4","rr.cc.all.45","rr.cc.all.5",
"rr.cc.zero.tpm8","rr.cc.zero.tpm4",
"rr.cc.zero.001","rr.cc.zero.01","rr.cc.zero.015","rr.cc.zero.1","rr.cc.zero.15",
"rr.cc.zero.2","rr.cc.zero.25","rr.cc.zero.3","rr.cc.zero.35",
"rr.cc.zero.4","rr.cc.zero.45","rr.cc.zero.5")
Smallest.val.all.rr= min(output.RR[,1])
Largest.val.all.rr= max(output.RR[,1])
Smallest.val.zero.rr= min(output.RR[,2])
Largest.val.zero.rr= max(output.RR[,2])
names(odf.rd)<-c("rd.cc.all.tpm8","rd.cc.all.tpm4",
"rd.cc.all.001","rd.cc.all.01","rd.cc.all.015","rd.cc.all.1","rd.cc.all.15",
"rd.cc.all.2","rd.cc.all.25","rd.cc.all.3","rd.cc.all.35",
"rd.cc.all.4","rd.cc.all.45","rd.cc.all.5",
"rd.cc.zero.tpm8","rd.cc.zero.tpm4",
"rd.cc.zero.001","rd.cc.zero.01","rd.cc.zero.015","rd.cc.zero.1","rd.cc.zero.15",
"rd.cc.zero.2","rd.cc.zero.25","rd.cc.zero.3","rd.cc.zero.35",
"rd.cc.zero.4","rd.cc.zero.45","rd.cc.zero.5")
Smallest.val.all.rd= min(output.RD[,1])
Largest.val.all.rd= max(output.RD[,1])
Smallest.val.zero.rd= min(output.RD[,2])
Largest.val.zero.rd= max(output.RD[,2])
Full.Slevel.df= data.frame(odf.or,Max.or.all= Largest.val.all.or ,Min.or.all= Smallest.val.all.or,
Max.or.zero= Largest.val.zero.or ,Min.or.zero= Smallest.val.zero.or,
odf.rr,Max.rr.all= Largest.val.all.rr ,Min.rr.all= Smallest.val.all.rr,
Max.rr.zero= Largest.val.zero.rr ,Min.rr.zero= Smallest.val.zero.rr,
odf.rd,Max.rd.all= Largest.val.all.rd ,Min.rd.all= Smallest.val.all.rd,
Max.rd.zero= Largest.val.zero.rd ,Min.rd.zero= Smallest.val.zero.rd)
return(Full.Slevel.df)
}
############################################################################################
#' This Function to calculate OR and the point of change
#' @param cc.df - description
#' @param cc.value - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
Calculate.cc.OR <-function(cc.df,cc.value)
{
a=cc.df$a; b=cc.df$b;
c=cc.df$c; d=cc.df$d;
cc=cc.value
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
################ Calculat only for cells with zero
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
ora.all = (a.all*d.all)/(b.all*c.all)
ora.zero = (a.zero*d.zero) / (b.zero*c.zero)
r.df=data.frame(ora=ora.all,orz=ora.zero)
return(r.df)
}
############################################################################################
#' This Function to calculate RR and the point of change
#' @param cc.df - description
#' @param cc.value - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
Calculate.cc.RR <-function(cc.df,cc.value)
{
a=cc.df$a; b=cc.df$b;
c=cc.df$c; d=cc.df$d;
cc=cc.value
nt=a+b
nc=c+d
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
nc.all = nc + (2 * cc)
nt.all = nt + (2 * cc)
################ Calculat only for cells with zero
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
nc.zero = c.zero+d.zero
nt.zero = a.zero+b.zero
rr.all = (a.all / nt.all) / (c.all / nc.all)
rr.zero = (a.zero / nt.zero) / (c.zero / nc.zero)
r.rr.df=data.frame(rra=rr.all,rrz=rr.zero)
return(r.rr.df)
}
############################################################################################
#' This Function to calculate RD and the point of change
#' @param cc.df - description
#' @param cc.value - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
Calculate.cc.RD <-function(cc.df,cc.value)
{
a=cc.df$a; b=cc.df$b;
c=cc.df$c; d=cc.df$d;
cc=cc.value
nt=a+b
nc=c+d
a.all = a + cc
b.all = b + cc
c.all = c + cc
d.all = d + cc
nc.all = nc + (2 * cc)
nt.all = nt + (2 * cc)
################ Calculat only for cells with zero
if(a==0) {a.zero = a + cc} else {a.zero=a}
if(b==0) {b.zero = b + cc} else {b.zero=b}
if(c==0) {c.zero = c + cc} else {c.zero=c}
if(d==0) {d.zero = d + cc} else {d.zero=d}
nc.zero = c.zero+d.zero
nt.zero = a.zero+b.zero
rd.all = (a.all / nt.all) - (c.all / nc.all)
rd.zero = (a.zero / nt.zero) - (c.zero / nc.zero)
r.rd.df=data.frame(rda=rd.all,rdz=rd.zero)
return(r.rd.df)
}
############################################################################################
#' This Is between utility function
#' @param x - description
#' @param a - description
#' @param b - description
#' @return A dataframe with
#' \item{r.df}{Sequence}
#' @family Internal methods
#' @keywords internal
is.between <- function(x, a, b) {
small= min(a,b)
big = max(a,b)
x > small & x < big
}
|
## makeCacheMatrix creats a special "matrix" objecthat can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
a <- NULL
set <- function(y) {
x <<- y
a <<- NULL
}
get <- function() x
setsolve <- function(solve) a <<- solve
getsolve <- function() a
list(set = set, get = get,setsolve = setsolve,getsolve = getsolve)
}
## the function "cacheSolve" computes the inverse of the special "matrix" that retured by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
a <- x$getsolve()
if(!is.null(a)) {
message("getting inversed matrix")
return(a)
}
data <- x$get()
a <- solve(data, ...)
x$setsolve(a)
a
} | /cachematrix.R | no_license | LeylaFarahani/ProgrammingAssignment2 | R | false | false | 659 | r | ## makeCacheMatrix creats a special "matrix" objecthat can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
a <- NULL
set <- function(y) {
x <<- y
a <<- NULL
}
get <- function() x
setsolve <- function(solve) a <<- solve
getsolve <- function() a
list(set = set, get = get,setsolve = setsolve,getsolve = getsolve)
}
## the function "cacheSolve" computes the inverse of the special "matrix" that retured by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
a <- x$getsolve()
if(!is.null(a)) {
message("getting inversed matrix")
return(a)
}
data <- x$get()
a <- solve(data, ...)
x$setsolve(a)
a
} |
#' Find neighbors
#'
#' Takes the number of nodes and the budget allocation and detemines neighboring points
#' @param nodes The number of places to divide the budget among
#' @param budget_A The allocation of the budget for each node
#' @return Returns vector of neighboring budget allocation vectors
#' @export
neighbors <- function (nodes, budget_A){
grand <- matrix(ncol = nodes)
for (i in 1:nodes){
a <- i
new1 <- c(budget_A)
new1[i] <- new1[i] + 1
if (i==nodes){
i <- 0
}
new1[i+1] <- new1[i+1] - 1
for (z in 1:nodes){
if (new1[z]<0){
new1 <- c()
break
}
}
new2 <- c(budget_A)
new2[a] <- new2[a] - 1
if (a==nodes){
a <- 0
}
new2[a+1] <- new2[a+1] + 1
for (y in 1:nodes){
if (new2[y]<0){
new2 <- c()
break
}
}
grand <- rbind(grand, new1, new2)
}
dimnames(grand) <-list(rep("", dim(grand)[1]), rep("", dim(grand)[2]))
grand
valid_neighbors <- grand[complete.cases(grand),]
return(valid_neighbors)
} | /R/neighbors.R | no_license | skysmith14/Programming-Project | R | false | false | 1,490 | r | #' Find neighbors
#'
#' Takes the number of nodes and the budget allocation and detemines neighboring points
#' @param nodes The number of places to divide the budget among
#' @param budget_A The allocation of the budget for each node
#' @return Returns vector of neighboring budget allocation vectors
#' @export
neighbors <- function (nodes, budget_A){
grand <- matrix(ncol = nodes)
for (i in 1:nodes){
a <- i
new1 <- c(budget_A)
new1[i] <- new1[i] + 1
if (i==nodes){
i <- 0
}
new1[i+1] <- new1[i+1] - 1
for (z in 1:nodes){
if (new1[z]<0){
new1 <- c()
break
}
}
new2 <- c(budget_A)
new2[a] <- new2[a] - 1
if (a==nodes){
a <- 0
}
new2[a+1] <- new2[a+1] + 1
for (y in 1:nodes){
if (new2[y]<0){
new2 <- c()
break
}
}
grand <- rbind(grand, new1, new2)
}
dimnames(grand) <-list(rep("", dim(grand)[1]), rep("", dim(grand)[2]))
grand
valid_neighbors <- grand[complete.cases(grand),]
return(valid_neighbors)
} |
.createListFromDF <- function(exp.mod.dg, extraCol=NULL)
{
rtx <- list()
exp.mod.dg <- data.frame(lapply(exp.mod.dg, as.character), stringsAsFactors=FALSE)
rtx$model.id = unique(exp.mod.dg$model.id)
drgColName.No = colnames(exp.mod.dg)[grep("drug",colnames(exp.mod.dg))]
drug = list("join.name" = unique(exp.mod.dg$drug))
if(length(drgColName.No)>1)
{
drug.N <- vapply(drgColName.No, function(x){unique(exp.mod.dg[,x])}, FUN.VALUE = character(1))
drug.N <- drug.N[!is.na(drug.N)]
drug[["names"]] = as.list(drug.N)
}
rtx$drug = drug
##------------ set extra col ------------------------------------
if(!is.null(extraCol))
{
for(ec in c(extraCol))
{
vx = exp.mod.dg[, ec]
if(length(unique(vx))==1)
{ vx <- unique(vx) }
rtx[[ec]] <- vx
}
}
doseColsNames <- c("dose", gsub("drug", "dose", names(rtx$drug$names)))
dataColName <- c("time", "volume", "width","length",
doseColsNames, "body.weight", "date", "comment")
for (w in dataColName)
{
if(is.element(w, colnames(exp.mod.dg))==FALSE)
{
exp.mod.dg[,w] <- NA
}
}
##---- add dose.1 + dose.2 .... to dose
rtxData <- data.frame(lapply(exp.mod.dg[,dataColName], as.character),
stringsAsFactors=FALSE)
##------ change column type for each column ---------------------------
rtxData$time <- as.numeric(rtxData$time)
rtxData$volume<- as.numeric(rtxData$volume)
rtxData$width <- as.numeric(rtxData$width)
rtxData$length<- as.numeric(rtxData$length)
rtxData$body.weight<- as.numeric(rtxData$body.weight)
rtxData$date <- as.Date(rtxData$date)
for(doseCi in doseColsNames)
{
rtxData[ ,doseCi] <- as.numeric(rtxData[ ,doseCi])
}
rtxData <- BBmisc::sortByCol(rtxData , dataColName,
asc = rep(TRUE, length(dataColName)))
rtx$data<- rtxData
return(rtx)
}
## list of PDXMI variables
## Source
## Meehan, Terrence F., et al. "PDX-MI: minimal information for patient-derived
## tumor xenograft models." Cancer research 77.21 (2017): e62-e66.
## http://cancerres.aacrjournals.org/lookup/doi/10.1158/0008-5472.CAN-17-0582
modelClassS4Vars <- function()
{
return(
c("model.id", "drug", "data", "treatment.type", "treatment.target",
"patient.id", "patient.sex", "patient.age", "patient.diagnosis",
"patient.consent.to.share.data", "patient.ethnicity",
"patient.current.treatment.drug",
"patient.current.treatment.protocol", "patient.prior.treatment.protocol",
"patient.response.to.prior.treatment", "patient.virology.status",
"tumor.id", "tumor.tissue.of.origin",
"tumor.primary.metastasis.recurrence",
"tumor.specimen.tissue", "tumor.tissue.histology", "tumor.tumor.grade",
"tumor.disease.stage", "tumor.specific.markers",
"tumor.fom.untreated.patient",
"tumor.original.sample.type", "tumor.from.existing.pdx.model",
"model.submitter.pdx.id", "model.mouse.strain.source",
"model.strain.immune.system.humanized",
"model.type.of.humanization", "model.tumor.preparation",
"model.injection.type.and.site",
"model.mouse.treatment.for.engraftment", "model.engraftment.rate",
"model.engraftment.time",
"model.tumor.characterization.technology",
"model.tumor.confirmed.not.to.be.of.mouse.origin",
"model.response.to.standard.of.care",
"model.animal.health.status", "model.passage.qa.performed",
"model.treatment.passage", "model.treatment.protocol",
"model.treatment.response", "model.tumor.omics",
"model.development.of.metastases.in.strain",
"model.doubling.time.of.tumor",
"pdx.model.availability", "governance.restriction.for.distribution",
"id.publication.data")
)
}
makePDXModClassS4 <- function(exp.mod.dg, extraCol)
{
pdxS3 <- .createListFromDF(exp.mod.dg, extraCol=extraCol)
pdxS4 <- PDXmodClass(model.id = pdxS3$model.id, drug = pdxS3$drug,
data=pdxS3$data)
pS4SlN<- modelClassS4Vars()
for(s in pS4SlN)
{
if(!is.null(pdxS3[[s]]))
{ slot(pdxS4, s) <- pdxS3[[s]] }
}
return(pdxS4)
}
###----- define standard column names -----------
.getColumnsDF <- function()
{
standCol <- c("model.id", "drug", "time", "volume", "width","length",
"date", "body.weight","formula")
requredCols <- c("model.id", "time", "volume", "drug")
rtz <- list(standCol=standCol,requredCols=requredCols)
return(rtz)
}
experimentSlotfromDf <- function(experiment)
{
clnm <- .getColumnsDF()
drugColsName <- colnames(experiment)[grep("drug",colnames(experiment))]
requredCols = c("model.id", "time", "volume", drugColsName)
colAbsent = setdiff(requredCols, colnames(experiment))
if(length(colAbsent)>0)
{
msg = sprintf("These colums are required\n%s", paste(colAbsent, collapse = ', '))
stop(msg)
}
if(length(drugColsName)==0)
{
msg = sprintf("Column with drug information requred\nDrug infromation column should be named drug, drug.1 ...\n")
stop(msg)
} else{
msg = sprintf("Drug columns are\n%s\n", paste(drugColsName, collapse = ', '))
cat(msg)
}
doseColsName <- colnames(experiment)[grep("dose",colnames(experiment))]
if(length(doseColsName)==0)
{
msg = sprintf("No dose column found\n")
#warning(msg)
}
standardCols <- unique(unlist(c(requredCols, doseColsName, "width","length",
"date", "time", "body.weight", "comment",
modelClassS4Vars())))
extraCol <- setdiff(colnames(experiment), standardCols)
if(length(extraCol)>0)
{
msg <- sprintf("These colums are not part of standard information, therefor will be stored but not processed\n%s\n", paste(extraCol, collapse = ', '))
warning(msg)
}
##---- reformat drug column -----------
drgColName.No = colnames(experiment)[grep("drug\\.",colnames(experiment))]
if(length(drgColName.No)>0)
{
msg = sprintf("drug column will be replaced by %s\n", paste(drgColName.No, collapse = " + "))
cat(msg)
pasteWithoutNA <- function(L, collapse = " + "){paste(L[!is.na(L)], collapse = collapse)}
experiment[, "drug"] = apply(experiment[,drgColName.No], 1, pasteWithoutNA)
}
##------- if drug names are already in drug1 + drug2 split them ----------
u.modDrg.id <- unique(experiment[, c("model.id", "drug")])
if(any(is.na(u.modDrg.id$model.id)))
{ stop("model.id is NA") }
mdup = u.modDrg.id$model.id[duplicated(u.modDrg.id$model.id)]
if(length(mdup)>0)
{
msg = sprintf("Duplicated model.id\n%s\nuse different model.id for different drugs\n", paste(mdup, collapse = "\n"))
stop(msg)
}
expSlot = list()
for (i in seq_len(dim(u.modDrg.id)[1]))
{
exp.mod.dg <- subset(experiment,
experiment$model.id== u.modDrg.id[i, "model.id"] &
experiment$drug == u.modDrg.id[i, "drug"] )
expSlot[[i]] <- makePDXModClassS4(exp.mod.dg, extraCol=extraCol)
}
#mod.ids <- unlist(vapply(expSlot , "[[" , "model.id" ))
mod.ids <- vapply(expSlot, function(mod){slot(mod, "model.id")}, FUN.VALUE = character(1))
if(length(mod.ids) != length(unique(mod.ids)))
{
msg <- sprintf("These model.id are repeated\n%s",
paste(mod.ids[table(mod.ids)!=1], collapse = ', '))
stop(msg)
}
names(expSlot) <- mod.ids
return(expSlot)
}
| /R/create_Experiment_slot.R | no_license | hexingkang1990/Xeva | R | false | false | 7,417 | r | .createListFromDF <- function(exp.mod.dg, extraCol=NULL)
{
rtx <- list()
exp.mod.dg <- data.frame(lapply(exp.mod.dg, as.character), stringsAsFactors=FALSE)
rtx$model.id = unique(exp.mod.dg$model.id)
drgColName.No = colnames(exp.mod.dg)[grep("drug",colnames(exp.mod.dg))]
drug = list("join.name" = unique(exp.mod.dg$drug))
if(length(drgColName.No)>1)
{
drug.N <- vapply(drgColName.No, function(x){unique(exp.mod.dg[,x])}, FUN.VALUE = character(1))
drug.N <- drug.N[!is.na(drug.N)]
drug[["names"]] = as.list(drug.N)
}
rtx$drug = drug
##------------ set extra col ------------------------------------
if(!is.null(extraCol))
{
for(ec in c(extraCol))
{
vx = exp.mod.dg[, ec]
if(length(unique(vx))==1)
{ vx <- unique(vx) }
rtx[[ec]] <- vx
}
}
doseColsNames <- c("dose", gsub("drug", "dose", names(rtx$drug$names)))
dataColName <- c("time", "volume", "width","length",
doseColsNames, "body.weight", "date", "comment")
for (w in dataColName)
{
if(is.element(w, colnames(exp.mod.dg))==FALSE)
{
exp.mod.dg[,w] <- NA
}
}
##---- add dose.1 + dose.2 .... to dose
rtxData <- data.frame(lapply(exp.mod.dg[,dataColName], as.character),
stringsAsFactors=FALSE)
##------ change column type for each column ---------------------------
rtxData$time <- as.numeric(rtxData$time)
rtxData$volume<- as.numeric(rtxData$volume)
rtxData$width <- as.numeric(rtxData$width)
rtxData$length<- as.numeric(rtxData$length)
rtxData$body.weight<- as.numeric(rtxData$body.weight)
rtxData$date <- as.Date(rtxData$date)
for(doseCi in doseColsNames)
{
rtxData[ ,doseCi] <- as.numeric(rtxData[ ,doseCi])
}
rtxData <- BBmisc::sortByCol(rtxData , dataColName,
asc = rep(TRUE, length(dataColName)))
rtx$data<- rtxData
return(rtx)
}
## list of PDXMI variables
## Source
## Meehan, Terrence F., et al. "PDX-MI: minimal information for patient-derived
## tumor xenograft models." Cancer research 77.21 (2017): e62-e66.
## http://cancerres.aacrjournals.org/lookup/doi/10.1158/0008-5472.CAN-17-0582
modelClassS4Vars <- function()
{
return(
c("model.id", "drug", "data", "treatment.type", "treatment.target",
"patient.id", "patient.sex", "patient.age", "patient.diagnosis",
"patient.consent.to.share.data", "patient.ethnicity",
"patient.current.treatment.drug",
"patient.current.treatment.protocol", "patient.prior.treatment.protocol",
"patient.response.to.prior.treatment", "patient.virology.status",
"tumor.id", "tumor.tissue.of.origin",
"tumor.primary.metastasis.recurrence",
"tumor.specimen.tissue", "tumor.tissue.histology", "tumor.tumor.grade",
"tumor.disease.stage", "tumor.specific.markers",
"tumor.fom.untreated.patient",
"tumor.original.sample.type", "tumor.from.existing.pdx.model",
"model.submitter.pdx.id", "model.mouse.strain.source",
"model.strain.immune.system.humanized",
"model.type.of.humanization", "model.tumor.preparation",
"model.injection.type.and.site",
"model.mouse.treatment.for.engraftment", "model.engraftment.rate",
"model.engraftment.time",
"model.tumor.characterization.technology",
"model.tumor.confirmed.not.to.be.of.mouse.origin",
"model.response.to.standard.of.care",
"model.animal.health.status", "model.passage.qa.performed",
"model.treatment.passage", "model.treatment.protocol",
"model.treatment.response", "model.tumor.omics",
"model.development.of.metastases.in.strain",
"model.doubling.time.of.tumor",
"pdx.model.availability", "governance.restriction.for.distribution",
"id.publication.data")
)
}
makePDXModClassS4 <- function(exp.mod.dg, extraCol)
{
pdxS3 <- .createListFromDF(exp.mod.dg, extraCol=extraCol)
pdxS4 <- PDXmodClass(model.id = pdxS3$model.id, drug = pdxS3$drug,
data=pdxS3$data)
pS4SlN<- modelClassS4Vars()
for(s in pS4SlN)
{
if(!is.null(pdxS3[[s]]))
{ slot(pdxS4, s) <- pdxS3[[s]] }
}
return(pdxS4)
}
###----- define standard column names -----------
.getColumnsDF <- function()
{
standCol <- c("model.id", "drug", "time", "volume", "width","length",
"date", "body.weight","formula")
requredCols <- c("model.id", "time", "volume", "drug")
rtz <- list(standCol=standCol,requredCols=requredCols)
return(rtz)
}
experimentSlotfromDf <- function(experiment)
{
clnm <- .getColumnsDF()
drugColsName <- colnames(experiment)[grep("drug",colnames(experiment))]
requredCols = c("model.id", "time", "volume", drugColsName)
colAbsent = setdiff(requredCols, colnames(experiment))
if(length(colAbsent)>0)
{
msg = sprintf("These colums are required\n%s", paste(colAbsent, collapse = ', '))
stop(msg)
}
if(length(drugColsName)==0)
{
msg = sprintf("Column with drug information requred\nDrug infromation column should be named drug, drug.1 ...\n")
stop(msg)
} else{
msg = sprintf("Drug columns are\n%s\n", paste(drugColsName, collapse = ', '))
cat(msg)
}
doseColsName <- colnames(experiment)[grep("dose",colnames(experiment))]
if(length(doseColsName)==0)
{
msg = sprintf("No dose column found\n")
#warning(msg)
}
standardCols <- unique(unlist(c(requredCols, doseColsName, "width","length",
"date", "time", "body.weight", "comment",
modelClassS4Vars())))
extraCol <- setdiff(colnames(experiment), standardCols)
if(length(extraCol)>0)
{
msg <- sprintf("These colums are not part of standard information, therefor will be stored but not processed\n%s\n", paste(extraCol, collapse = ', '))
warning(msg)
}
##---- reformat drug column -----------
drgColName.No = colnames(experiment)[grep("drug\\.",colnames(experiment))]
if(length(drgColName.No)>0)
{
msg = sprintf("drug column will be replaced by %s\n", paste(drgColName.No, collapse = " + "))
cat(msg)
pasteWithoutNA <- function(L, collapse = " + "){paste(L[!is.na(L)], collapse = collapse)}
experiment[, "drug"] = apply(experiment[,drgColName.No], 1, pasteWithoutNA)
}
##------- if drug names are already in drug1 + drug2 split them ----------
u.modDrg.id <- unique(experiment[, c("model.id", "drug")])
if(any(is.na(u.modDrg.id$model.id)))
{ stop("model.id is NA") }
mdup = u.modDrg.id$model.id[duplicated(u.modDrg.id$model.id)]
if(length(mdup)>0)
{
msg = sprintf("Duplicated model.id\n%s\nuse different model.id for different drugs\n", paste(mdup, collapse = "\n"))
stop(msg)
}
expSlot = list()
for (i in seq_len(dim(u.modDrg.id)[1]))
{
exp.mod.dg <- subset(experiment,
experiment$model.id== u.modDrg.id[i, "model.id"] &
experiment$drug == u.modDrg.id[i, "drug"] )
expSlot[[i]] <- makePDXModClassS4(exp.mod.dg, extraCol=extraCol)
}
#mod.ids <- unlist(vapply(expSlot , "[[" , "model.id" ))
mod.ids <- vapply(expSlot, function(mod){slot(mod, "model.id")}, FUN.VALUE = character(1))
if(length(mod.ids) != length(unique(mod.ids)))
{
msg <- sprintf("These model.id are repeated\n%s",
paste(mod.ids[table(mod.ids)!=1], collapse = ', '))
stop(msg)
}
names(expSlot) <- mod.ids
return(expSlot)
}
|
#' Convert USD to EUR
#'
#' The function looks up the most recent USD/EUR exchange rate and computes the provided
#' USD amount in EUR.
#' @param usd number
#' @return number
#' @export
#' @importFrom httr GET content
#' @importFrom logger log_debug
#' @examples
#' convert_usd_to_eur(100)
convert_usd_to_eur <- function(usd) {
eurusd <- eurusd()
log_debug('The current USD price is {round(eurusd, 2)}')
usd * eurusd
}
| /R/convert_eur_to_usd.R | no_license | anb133/EurUsd | R | false | false | 430 | r | #' Convert USD to EUR
#'
#' The function looks up the most recent USD/EUR exchange rate and computes the provided
#' USD amount in EUR.
#' @param usd number
#' @return number
#' @export
#' @importFrom httr GET content
#' @importFrom logger log_debug
#' @examples
#' convert_usd_to_eur(100)
convert_usd_to_eur <- function(usd) {
eurusd <- eurusd()
log_debug('The current USD price is {round(eurusd, 2)}')
usd * eurusd
}
|
#######################################################
# ENTROPTY AND MUTUAL INFROMATION ESTIMATION FROM DATA
#######################################################
library(gplots) #Load R-Library for plotting and histograms
#####################################
## LOAD DATA
#####################################
## You can either input the data manually, like this, here:
# x=c(1,1,1,0,1,1,0,1,0,0,1,1,0,0,1,1,1,0,1,1,0,1)
# y=c(1,0,1,0,1,0,1,0,1,1,0,1,1,0,1,1,0,1,1,0,0,1)
## Or upload empirical data from a csv file:
# Set working directory
setwd("C:/Users/mhilbert/OneDrive/Analytics Software/R_simple channel")
# import datafile: the input and output columns have to have the same length (number of rows). The file is set up to have a header in the first row; an ID in the 1st column, values for X in the 2nd column (e.g. sender), values for Y in the 3rd column (e.g. receiver).
data <- read.csv("TestCase.csv",stringsAsFactors=FALSE) # exchange TestCase.csv with your own .csv dataset
xRaw=data[,2] # assigns the values of the 2nd colum to X
yRaw=data[,3] # assigns the values of the 3rd colum to Y
## CREATE CLASSES AND BINS
nDatax=length(xRaw) # count the size of the data set
# Get the unique values of the vectors
classesX=sort(unique(xRaw))
classesY=sort(unique(yRaw))
# Number of bin clases in the histogram
nclass=matrix(nrow = 2, ncol = 1)
nclass[1]=length(classesX)
nclass[2]=length(classesY)
# This will print out the size and number of classes (you can comment it off)
print(paste0("There are ", nDatax, " data points, with ", nclass[1], " different classes in X, namely: "))
print(classesX)
print(paste0("There are ", nDatax, " data points, with ", nclass[2], " different classes in Y, namely: "))
print(classesY)
# assign numeric identifiers to categorical characters
idClassesX=seq(0, nclass[1]-1,1) # idClasses=1:nclass
idClassesY=seq(0, nclass[2]-1,1)
x=matrix(nrow = nDatax, ncol = 1) # replace the characters with numeric identificators, create vectors
y=matrix(nrow = nDatax, ncol = 1)
# Fill the vectors with the numeric id for each class
for(i in 1:nclass[1]) {
x[xRaw==classesX[i]]=idClassesX[i]
}
for(i in 1:nclass[2]) {
y[yRaw==classesY[i]]=idClassesY[i]
}
#####################################
## PROBABILITIES
#####################################
## MARGINAL PROBABILITIES
# Define the function that is a marginal probability
prob <- function(x,nclass){
if (nclass==0){#If number of classes is equal to 0, then specify manualy the bins for the histogram
bins = seq(min(x)-0.5, max(x)+0.5)
px=hist(x,plot = FALSE,breaks=bins)
}
if (nclass>0){#If number of clases is > to 0, then specify the bins using number of classes
px=hist(x,plot = FALSE,nclass=nclass)
}
#Convert frequency count into probability [0,1]
px=px$counts/sum(px$counts)
return(px)
}
# Calculate marginal probabilities P(X) and P(Y)
px=prob(x,0) # using the option nclass=0 for automaticaly selection of the number bins for the histogram, otherwise specify the number of classes you want by replacing 0 with another number
py=prob(y,0)
pxdf = data.frame(px,row.names=classesX) # Create a dataframe for the marginal probabilities
pydf = data.frame(py,row.names=classesY)
print(pxdf) # print the marginal probability of X
print(pydf) # print the marginal probability of Y
## JOINT PROBABILITY
# Define the function that is a joint probability
probxy <- function(x,y,nclass){
xy <- data.frame(x,y)
pxy <- hist2d(xy,nbins=nclass,show=FALSE)
#Convert frequency count into probability [0,1]
pxy=pxy$counts/pxy$nobs
return(pxy)
}
# Calculate joint probability P(X,Y)
pxy=probxy(x,y,nclass)
pxydf=data.frame(pxy,row.names=classesX) # Create a dataframe for the joint probabilities
names(pxydf)=classesY
print(pxydf) # print the 2D matrix of the joint distribution among classes
## CONDITIONAL (NOISE) PROBABILITY
#Define conditional (noise) probability (based on X)
conditionalProb <- function(x,y,nclass){
px = prob(x,0)
py = prob(y,0)
pxy = probxy(x,y,nclass)
pY_X =matrix(nrow=dim(pxy)[1],ncol=dim(pxy)[2]) # Create a vector to store the conditional probabilities
for (i in 1:dim(pxy)[1]) {
for (j in 1:dim(pxy)[2]) {
pY_X[i,j]=pxy[i,j]/px[i] # Calculate the conditional probability
}
}
return(pY_X)
}
# Calculate conditional (noise) probability
pY_X=conditionalProb(x,y,nclass)
px_ydf=data.frame(pY_X,row.names=classesX)
names(px_ydf)=classesY
print("P(Y|X)=P(Y,X)/P(X)")
print(px_ydf)
#####################################
## INFORMATION MEASURES
#####################################
## ENTROPIES
#Define entropy
entropy <- function(x,nclass){
px=prob(x,0)
H=-sum(px*log2(px))
return(H)
}
# Calculate H(x)
Hx=entropy(x,nclass[1])
print(paste0("H(x)=",Hx))
# Calculate H(y)
Hy=entropy(y,nclass[2])
print(paste0("H(y)=",Hy))
## MUTUAL INFORMATION
#Define Mutual Information
mutualInformation <- function(x,y,nclass){
px=prob(x,0) # Calculate marginal probability (X)
py=prob(y,0) # Calculate marginal probability (Y)
pxy=probxy(x,y,nclass) # Calculate joint probabilitu (x,Y)
MI = 0
for (i in 1:nclass[1]) {
for (j in 1:nclass[2]) {
if(!pxy[i,j]==0){
MI=MI+pxy[i,j]*log2(pxy[i,j]/(px[i]*py[j]))
}
}
}
return(MI)
}
# Calculate I(x,y)
MI=mutualInformation(x,y,nclass)
print(paste0("I(x,y)=",MI))
## JOINT ENTROPY
# Calculate H(x,y)
Hyx=Hx+Hy-MI
print(paste0("H(x,y)=",Hyx))
## CONDITIONAL ENTROPIES
# H(x|y)
Hx_y=Hx-MI
print(paste0("H(x|y)=",Hx_y))
# H(y|x)
Hy_x=Hy-MI
print(paste0("H(y|x)=",Hy_x))
#####################################
## BOOTSTRAPPING SIGNIFICANCE TESTS
#####################################
# number Of Samples:
numberSamples=1000
# Get random seeds for every bootstrap
randomSeed=floor(runif(numberSamples, 1,100000))
# randomSeed=1:numberSamples
set.seed(666)
## BOOTSTRAP MARGINAL VARIABLE
# Define bootstrap of a marginal variable
bootstrap1 <- function(x){
nDatax=length(x) # Get the lenght of the vectors
posBS=floor(runif(nDatax, 1,nDatax)) #Select randomly with repetition the same number of observations
bx=x[posBS] # asign them to new vectors
return(bx)
}
# Bootstrap Y
byMI=matrix(NA,nrow = numberSamples, ncol = 1) # Create a matrix for all the statistics
diffbyMI=matrix(nrow = numberSamples, ncol = 1) # Create a matrix for all the differences
for(k in 1:numberSamples){
set.seed(randomSeed[k])
by=bootstrap1(y) #Select randomly with repetition the same number of observations
byMI[k]=mutualInformation(x,by,nclass) # I(x,y) Mutual information
diffbyMI[k]=byMI[k]-MI # Calculate the difference with the original
}
#Calculate differences for bootstrapped Y
ShareMIby=sum(as.numeric(diffbyMI>0),na.rm = T)/sum(as.numeric(!is.nan(diffbyMI))) # Calculate % of estimations greater than MI
hist(byMI,main = paste("I(X,Y) =" , MI,"% > =",ShareMIby))
lines(rbind(c(MI,sum(as.numeric(!is.nan(diffbyMI)))),c(MI,0)),lwd=3,col="red")
print(paste0("Share of MI(X,bootstrapped Y) > MI(X,Y)=",ShareMIby))
# Bootstrap X
bxMI=matrix(NA,nrow = numberSamples, ncol = 1) # Create a matrix for all the statistics
diffbxMI=matrix(nrow = numberSamples, ncol = 1) # Create a matrix for all the differences
for(k in 1:numberSamples){
set.seed(randomSeed[k])
bx=bootstrap1(x) #Select randomly with repetition the same number of observations
bxMI[k]=mutualInformation(bx,y,nclass) # I(x,y) Mutual information
diffbxMI[k]=bxMI[k]-MI # Calculate the difference with the original
}
#Calculate differences for bootstrapped X
ShareMIbx=sum(as.numeric(diffbxMI>0),na.rm = T)/sum(as.numeric(!is.nan(diffbxMI))) # Calculate % of estimations greater than MI
hist(bxMI,main = paste("I(X,Y) =" , MI,"% > =",ShareMIbx))
lines(rbind(c(MI,sum(as.numeric(!is.nan(diffbxMI)))),c(MI,0)),lwd=3,col="red")
print(paste0("Share of MI(bootstrapped X , Y) > MI(X,Y)=",ShareMIbx))
## BOOTSTRAP JOINT VARIABLE
# Define bootstrap of a joint variable (x,y) pairs
bootstrap2 <- function(x,y){
nDatax=length(x) # Get the lenght of the vectors
nDatay=length(y)
if(nDatax==nDatay){#If they are the same length, select randomly with repetition the same number of observations
posBS=floor(runif(nDatax*2, 1,nDatax))
bx=x[posBS] # asign them to new vectors
by=y[posBS]
bxy=matrix(nrow = nDatax*2, ncol = 2) # merge the two vector into one to use as retuning value of the funtion
bxy[,1]=bx
bxy[,2]=by
}else{
print("SIZE OF X AND Y MUST BE THE SAME")
bxy = NaN
}
return(bxy)
}
#Bootstrap (x,y) pairs
bxy=bootstrap2(x,y)
bx=bxy[,1]
by=bxy[,2]
bMI=matrix(NA,nrow = numberSamples, ncol = 1) # Create a matrix for all the statistics
diffbMI=matrix(nrow = numberSamples, ncol = 1) # Create a matrix for all the differences
for(k in 1:numberSamples){
set.seed(randomSeed[k])
bxy=bootstrap2(x,y)
bx=bxy[,1]
by=bxy[,2]
bMI[k]=mutualInformation(bx,by,nclass) # I(x,y) Mutual information
diffbMI[k]=bMI[k]-MI # Calculate the difference with the original
}
# Estimate the % of estimations greater than MI and P(Y|X)
ShareMIb=sum(as.numeric(diffbMI>0),na.rm = T)/sum(as.numeric(!is.nan(diffbMI)))
hist(bMI,main = paste("I(X,Y) =" , MI,"% > =",ShareMIb))
lines(rbind(c(MI,sum(as.numeric(!is.nan(diffbMI)))),c(MI,0)),lwd=3,col="red")
print(paste0("MI(bootstrapped pairs x,y) > MI(x,y)=",ShareMIb))
# Calculate and print joint probability of bootstrapped pairs (x,y)
pbxy=probxy(bx,by,nclass)
pbxydf=data.frame(pbxy,row.names=classesX) # Create a dataframe for the joint probabilities
names(pbxydf)=classesY
print(pbxydf) # print the 2D matrix of the joint distribution among classes
| /Simple Channel Measures from DATA.R | no_license | martinhilbert/entropy-and-mutual-information | R | false | false | 10,047 | r | #######################################################
# ENTROPTY AND MUTUAL INFROMATION ESTIMATION FROM DATA
#######################################################
library(gplots) #Load R-Library for plotting and histograms
#####################################
## LOAD DATA
#####################################
## You can either input the data manually, like this, here:
# x=c(1,1,1,0,1,1,0,1,0,0,1,1,0,0,1,1,1,0,1,1,0,1)
# y=c(1,0,1,0,1,0,1,0,1,1,0,1,1,0,1,1,0,1,1,0,0,1)
## Or upload empirical data from a csv file:
# Set working directory
setwd("C:/Users/mhilbert/OneDrive/Analytics Software/R_simple channel")
# import datafile: the input and output columns have to have the same length (number of rows). The file is set up to have a header in the first row; an ID in the 1st column, values for X in the 2nd column (e.g. sender), values for Y in the 3rd column (e.g. receiver).
data <- read.csv("TestCase.csv",stringsAsFactors=FALSE) # exchange TestCase.csv with your own .csv dataset
xRaw=data[,2] # assigns the values of the 2nd colum to X
yRaw=data[,3] # assigns the values of the 3rd colum to Y
## CREATE CLASSES AND BINS
nDatax=length(xRaw) # count the size of the data set
# Get the unique values of the vectors
classesX=sort(unique(xRaw))
classesY=sort(unique(yRaw))
# Number of bin clases in the histogram
nclass=matrix(nrow = 2, ncol = 1)
nclass[1]=length(classesX)
nclass[2]=length(classesY)
# This will print out the size and number of classes (you can comment it off)
print(paste0("There are ", nDatax, " data points, with ", nclass[1], " different classes in X, namely: "))
print(classesX)
print(paste0("There are ", nDatax, " data points, with ", nclass[2], " different classes in Y, namely: "))
print(classesY)
# assign numeric identifiers to categorical characters
idClassesX=seq(0, nclass[1]-1,1) # idClasses=1:nclass
idClassesY=seq(0, nclass[2]-1,1)
x=matrix(nrow = nDatax, ncol = 1) # replace the characters with numeric identificators, create vectors
y=matrix(nrow = nDatax, ncol = 1)
# Fill the vectors with the numeric id for each class
for(i in 1:nclass[1]) {
x[xRaw==classesX[i]]=idClassesX[i]
}
for(i in 1:nclass[2]) {
y[yRaw==classesY[i]]=idClassesY[i]
}
#####################################
## PROBABILITIES
#####################################
## MARGINAL PROBABILITIES
# Define the function that is a marginal probability
prob <- function(x,nclass){
if (nclass==0){#If number of classes is equal to 0, then specify manualy the bins for the histogram
bins = seq(min(x)-0.5, max(x)+0.5)
px=hist(x,plot = FALSE,breaks=bins)
}
if (nclass>0){#If number of clases is > to 0, then specify the bins using number of classes
px=hist(x,plot = FALSE,nclass=nclass)
}
#Convert frequency count into probability [0,1]
px=px$counts/sum(px$counts)
return(px)
}
# Calculate marginal probabilities P(X) and P(Y)
px=prob(x,0) # using the option nclass=0 for automaticaly selection of the number bins for the histogram, otherwise specify the number of classes you want by replacing 0 with another number
py=prob(y,0)
pxdf = data.frame(px,row.names=classesX) # Create a dataframe for the marginal probabilities
pydf = data.frame(py,row.names=classesY)
print(pxdf) # print the marginal probability of X
print(pydf) # print the marginal probability of Y
## JOINT PROBABILITY
# Define the function that is a joint probability
probxy <- function(x,y,nclass){
xy <- data.frame(x,y)
pxy <- hist2d(xy,nbins=nclass,show=FALSE)
#Convert frequency count into probability [0,1]
pxy=pxy$counts/pxy$nobs
return(pxy)
}
# Calculate joint probability P(X,Y)
pxy=probxy(x,y,nclass)
pxydf=data.frame(pxy,row.names=classesX) # Create a dataframe for the joint probabilities
names(pxydf)=classesY
print(pxydf) # print the 2D matrix of the joint distribution among classes
## CONDITIONAL (NOISE) PROBABILITY
#Define conditional (noise) probability (based on X)
conditionalProb <- function(x,y,nclass){
px = prob(x,0)
py = prob(y,0)
pxy = probxy(x,y,nclass)
pY_X =matrix(nrow=dim(pxy)[1],ncol=dim(pxy)[2]) # Create a vector to store the conditional probabilities
for (i in 1:dim(pxy)[1]) {
for (j in 1:dim(pxy)[2]) {
pY_X[i,j]=pxy[i,j]/px[i] # Calculate the conditional probability
}
}
return(pY_X)
}
# Calculate conditional (noise) probability
pY_X=conditionalProb(x,y,nclass)
px_ydf=data.frame(pY_X,row.names=classesX)
names(px_ydf)=classesY
print("P(Y|X)=P(Y,X)/P(X)")
print(px_ydf)
#####################################
## INFORMATION MEASURES
#####################################
## ENTROPIES
#Define entropy
entropy <- function(x,nclass){
px=prob(x,0)
H=-sum(px*log2(px))
return(H)
}
# Calculate H(x)
Hx=entropy(x,nclass[1])
print(paste0("H(x)=",Hx))
# Calculate H(y)
Hy=entropy(y,nclass[2])
print(paste0("H(y)=",Hy))
## MUTUAL INFORMATION
#Define Mutual Information
mutualInformation <- function(x,y,nclass){
px=prob(x,0) # Calculate marginal probability (X)
py=prob(y,0) # Calculate marginal probability (Y)
pxy=probxy(x,y,nclass) # Calculate joint probabilitu (x,Y)
MI = 0
for (i in 1:nclass[1]) {
for (j in 1:nclass[2]) {
if(!pxy[i,j]==0){
MI=MI+pxy[i,j]*log2(pxy[i,j]/(px[i]*py[j]))
}
}
}
return(MI)
}
# Calculate I(x,y)
MI=mutualInformation(x,y,nclass)
print(paste0("I(x,y)=",MI))
## JOINT ENTROPY
# Calculate H(x,y)
Hyx=Hx+Hy-MI
print(paste0("H(x,y)=",Hyx))
## CONDITIONAL ENTROPIES
# H(x|y)
Hx_y=Hx-MI
print(paste0("H(x|y)=",Hx_y))
# H(y|x)
Hy_x=Hy-MI
print(paste0("H(y|x)=",Hy_x))
#####################################
## BOOTSTRAPPING SIGNIFICANCE TESTS
#####################################
# number Of Samples:
numberSamples=1000
# Get random seeds for every bootstrap
randomSeed=floor(runif(numberSamples, 1,100000))
# randomSeed=1:numberSamples
set.seed(666)
## BOOTSTRAP MARGINAL VARIABLE
# Define bootstrap of a marginal variable
bootstrap1 <- function(x){
nDatax=length(x) # Get the lenght of the vectors
posBS=floor(runif(nDatax, 1,nDatax)) #Select randomly with repetition the same number of observations
bx=x[posBS] # asign them to new vectors
return(bx)
}
# Bootstrap Y
byMI=matrix(NA,nrow = numberSamples, ncol = 1) # Create a matrix for all the statistics
diffbyMI=matrix(nrow = numberSamples, ncol = 1) # Create a matrix for all the differences
for(k in 1:numberSamples){
set.seed(randomSeed[k])
by=bootstrap1(y) #Select randomly with repetition the same number of observations
byMI[k]=mutualInformation(x,by,nclass) # I(x,y) Mutual information
diffbyMI[k]=byMI[k]-MI # Calculate the difference with the original
}
#Calculate differences for bootstrapped Y
ShareMIby=sum(as.numeric(diffbyMI>0),na.rm = T)/sum(as.numeric(!is.nan(diffbyMI))) # Calculate % of estimations greater than MI
hist(byMI,main = paste("I(X,Y) =" , MI,"% > =",ShareMIby))
lines(rbind(c(MI,sum(as.numeric(!is.nan(diffbyMI)))),c(MI,0)),lwd=3,col="red")
print(paste0("Share of MI(X,bootstrapped Y) > MI(X,Y)=",ShareMIby))
# Bootstrap X
bxMI=matrix(NA,nrow = numberSamples, ncol = 1) # Create a matrix for all the statistics
diffbxMI=matrix(nrow = numberSamples, ncol = 1) # Create a matrix for all the differences
for(k in 1:numberSamples){
set.seed(randomSeed[k])
bx=bootstrap1(x) #Select randomly with repetition the same number of observations
bxMI[k]=mutualInformation(bx,y,nclass) # I(x,y) Mutual information
diffbxMI[k]=bxMI[k]-MI # Calculate the difference with the original
}
#Calculate differences for bootstrapped X
ShareMIbx=sum(as.numeric(diffbxMI>0),na.rm = T)/sum(as.numeric(!is.nan(diffbxMI))) # Calculate % of estimations greater than MI
hist(bxMI,main = paste("I(X,Y) =" , MI,"% > =",ShareMIbx))
lines(rbind(c(MI,sum(as.numeric(!is.nan(diffbxMI)))),c(MI,0)),lwd=3,col="red")
print(paste0("Share of MI(bootstrapped X , Y) > MI(X,Y)=",ShareMIbx))
## BOOTSTRAP JOINT VARIABLE
# Define bootstrap of a joint variable (x,y) pairs
bootstrap2 <- function(x,y){
nDatax=length(x) # Get the lenght of the vectors
nDatay=length(y)
if(nDatax==nDatay){#If they are the same length, select randomly with repetition the same number of observations
posBS=floor(runif(nDatax*2, 1,nDatax))
bx=x[posBS] # asign them to new vectors
by=y[posBS]
bxy=matrix(nrow = nDatax*2, ncol = 2) # merge the two vector into one to use as retuning value of the funtion
bxy[,1]=bx
bxy[,2]=by
}else{
print("SIZE OF X AND Y MUST BE THE SAME")
bxy = NaN
}
return(bxy)
}
#Bootstrap (x,y) pairs
bxy=bootstrap2(x,y)
bx=bxy[,1]
by=bxy[,2]
bMI=matrix(NA,nrow = numberSamples, ncol = 1) # Create a matrix for all the statistics
diffbMI=matrix(nrow = numberSamples, ncol = 1) # Create a matrix for all the differences
for(k in 1:numberSamples){
set.seed(randomSeed[k])
bxy=bootstrap2(x,y)
bx=bxy[,1]
by=bxy[,2]
bMI[k]=mutualInformation(bx,by,nclass) # I(x,y) Mutual information
diffbMI[k]=bMI[k]-MI # Calculate the difference with the original
}
# Estimate the % of estimations greater than MI and P(Y|X)
ShareMIb=sum(as.numeric(diffbMI>0),na.rm = T)/sum(as.numeric(!is.nan(diffbMI)))
hist(bMI,main = paste("I(X,Y) =" , MI,"% > =",ShareMIb))
lines(rbind(c(MI,sum(as.numeric(!is.nan(diffbMI)))),c(MI,0)),lwd=3,col="red")
print(paste0("MI(bootstrapped pairs x,y) > MI(x,y)=",ShareMIb))
# Calculate and print joint probability of bootstrapped pairs (x,y)
pbxy=probxy(bx,by,nclass)
pbxydf=data.frame(pbxy,row.names=classesX) # Create a dataframe for the joint probabilities
names(pbxydf)=classesY
print(pbxydf) # print the 2D matrix of the joint distribution among classes
|
library(shiny)
players <- read.csv("data/nba2018.csv")
ui <- fluidPage(
titlePanel("NBA 2018/19 Player Stats"),
sidebarLayout(
sidebarPanel(
"Exploring all player stats from the NBA 2018/19 season",
br(),
sliderInput(
inputId = "num",
label = "Player VORP rating at least",
min = -3,
max = 10,
value = 0
),
h3("Filters:"),
selectInput(
inputId="Team",
label = "Team",
choices = unique(players$Team),
selected = "Golden State Warriors"
)
),
mainPanel(
strong(
"There are",
nrow(players),
"players in the dataset"
)
)
)
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
| /Shiny_Workshop/app_03.R | no_license | melanatech/ODSC2019 | R | false | false | 767 | r | library(shiny)
players <- read.csv("data/nba2018.csv")
ui <- fluidPage(
titlePanel("NBA 2018/19 Player Stats"),
sidebarLayout(
sidebarPanel(
"Exploring all player stats from the NBA 2018/19 season",
br(),
sliderInput(
inputId = "num",
label = "Player VORP rating at least",
min = -3,
max = 10,
value = 0
),
h3("Filters:"),
selectInput(
inputId="Team",
label = "Team",
choices = unique(players$Team),
selected = "Golden State Warriors"
)
),
mainPanel(
strong(
"There are",
nrow(players),
"players in the dataset"
)
)
)
)
server <- function(input, output, session) {
}
shinyApp(ui, server)
|
---
title: "final prject"
output: html_document
---
```{r}
# Load data
library(NHANES)
library(tidyverse)
library(glmnet)
small.nhanes <- na.omit(NHANES[NHANES$SurveyYr=="2011_12"
& NHANES$Age > 17,c(1,3,4,8:11,13,17,20,21,25,46,50,51,52,61)])
small.nhanes <- as.data.frame(small.nhanes %>%
group_by(ID) %>% filter(row_number()==1) )
nrow(small.nhanes)
## Checking whether there are any ID that was repeated. If not ##
## then length(unique(small.nhanes$ID)) and nrow(small.nhanes) are same ##
length(unique(small.nhanes$ID))
## Create training and test set ##
set.seed(1003928039)
train <- small.nhanes[sample(seq_len(nrow(small.nhanes)), size = 400),]
nrow(train)
length(which(small.nhanes$ID %in% train$ID))
test <- small.nhanes[!small.nhanes$ID %in% train$ID,]
nrow(test)
# Fit the model with all predictors
model <- lm(train$BPSysAve ~ ., data = train[, -c(1)])
summary(model)
# Diagnostic check
## The hat values ###
h <- hatvalues(model)
thresh <- 2 * 36/nrow(train)
w <- which(h > thresh)
w
### The Influential Observations ####
D <- cooks.distance(model)
which(D > qf(0.5, 36, 400-36))
## DFFITS ##
dfits <- dffits(model)
which(abs(dfits) > 2*sqrt(36/400))
## DFBETAS ##
dfb <- dfbetas(model)
which(abs(dfb[,1]) > 2/sqrt(400))
resid <- rstudent(model)
fitted <- predict(model)
par(family = 'serif', mfrow = c(2,2))
qqnorm(resid)
qqline(resid)
plot(resid ~ fitted, type = "p", xlab = "Fitted Values",
ylab = "Standardized Residual", cex.lab = 1.2,
col = "red")
lines(lowess(fitted, resid), col = "blue")
plot(train$BPSysAve ~ fitted,type='p', xlab = "Fitted Values",
ylab = "BPSysAve", cex.lab = 1.2,
col = "red")
abline(lm(train$BPSysAve ~ fitted), lwd = 2, col = "blue")
lines(lowess(fitted,train$BPSysAve), col = "red")
# Check if the predictors should be transformed
library(car)
mult <- lm(cbind(train$BPSysAve, train$Gender, train$Age, train$Race3,train$Education, train$MaritalStatus,train$HHIncome, train$Poverty, train$Weight, train$Height, train$BMI, train$Depressed, train$SleepHrsNight, train$SleepTrouble, train$PhysActive,train$SmokeNow) ~ 1)
bc <- powerTransform(mult, family = yjPower)
summary(bc)
library(car)
# Check multicollinearity among the predictors
vif(model.6)
anova(model)
# Check variable criteria according to AIC, AICc, BIC, R2
criteria <- function(model){
n <- length(model$residuals)
p <- 15
RSS <- sum(model$residuals^2)
R2 <- summary(model)$r.squared
R2.adj <- summary(model)$adj.r.squared
AIC <- n*log(RSS/n) + 2*p
AICc <- AIC + (2*(p+2)*(p+3))/(n-p-1)
BIC <- n*log(RSS/n) + (p+2)*log(n)
res <- c(R2, R2.adj, AIC, AICc, BIC)
names(res) <- c("R Squared", "Adjsuted R Squared", "AIC", "AICc", "BIC")
return(res)
}
crit <- criteria(model = model)
crit
# Stepwise variable selections
sel.var.bic <- step(model, trace = 0, k = log(400), direction = "both")
select_var_b<-attr(terms(sel.var.bic), "term.labels")
select_var_b
sel.var.bic6 <- step(model, trace = 0, k = log(400), direction = "forward")
select_var6<-attr(terms(sel.var.bic6), "term.labels")
select_var6
sel.var.bic2 <- step(model, trace = 0, k = log(400), direction = "backward")
select_var2<-attr(terms(sel.var.bic2), "term.labels")
select_var2
sel.var.aic <- step(model, trace = 0, k = 2, direction = "both")
select_var<-attr(terms(sel.var.aic), "term.labels")
select_var
sel.var.aic4 <- step(model, trace = 0, k = 2, direction = "forward")
select_var4<-attr(terms(sel.var.aic4), "term.labels")
select_var4
sel.var.aic5 <- step(model, trace = 0, k = 2, direction = "backward")
select_var5<-attr(terms(sel.var.aic5), "term.labels")
select_var5
# Select the final model according to those criteria
model.2 <- lm(train$BPSysAve ~ train$Gender + train$Age, data = train)
crit2 <- criteria(model = model.2)
crit2
model.3 <- lm(train$BPSysAve ~ train$Gender + train$Age+train$Poverty+train$Weight+train$SleepTrouble, data = train)
crit3 <- criteria(model = model.3)
crit3
model.1 <- lm(train$BPSysAve ~ train$Age , data = train)
crit1 <- criteria(model = model.1)
crit1
model.4 <- lm(train$BPSysAve ~ train$Age + train$SmokeNow , data = train)
crit4 <- criteria(model = model.4)
crit4
model.5 <- lm(train$BPSysAve ~ train$Gender + train$Age+ train$SmokeNow , data = train)
crit5 <- criteria(model = model.5)
crit5
model.6 <- lm(train$BPSysAve ~ train$Gender + train$Age+ train$Poverty+ train$Weight+ train$SleepTrouble+ train$SmokeNow , data = train)
crit6 <- criteria(model = model.6)
crit6
model.7 <- lm(test$BPSysAve ~ test$Gender + test$Age+test$Poverty+test$Weight+test$SleepTrouble, data = test)
crit7 <- criteria(model = model.7)
crit7
summary(model.3)
# Final model diagnostic check
h1 <- hatvalues(model.1)
thresh1 <- 2 * 36/nrow(train)
w1 <- which(h1 > thresh1)
w1
### The Influential Observations ####
D1 <- cooks.distance(model.1)
which(D1 > qf(0.5, 36, 400-36))
## DFFITS ##
dfits1 <- dffits(model.3=1)
which(abs(dfits1) > 2*sqrt(36/400))
## DFBETAS ##
dfb1 <- dfbetas(model.1)
which(abs(dfb1[,1]) > 2/sqrt(400))
resid1 <- rstudent(model.1)
fitted1<- predict(model.1)
par(family = 'serif', mfrow = c(2,2))
qqnorm(resid1)
qqline(resid1)
plot(resid1 ~ fitted1, type = "p", xlab = "Fitted Values",
ylab = "Standardized Residual", cex.lab = 1.2,
col = "red")
lines(lowess(fitted1, resid1), col = "blue")
plot(train$BPSysAve ~ fitted1,type='p', xlab = "Fitted Values",
ylab = "BPSysAve", cex.lab = 1.2,
col = "red")
abline(lm(train$BPSysAve ~ fitted1), lwd = 2, col = "blue")
lines(lowess(fitted1,train$BPSysAve), col = "red")
# Produce the criteria comparision table
rbind(crit,crit1, crit2, crit3, crit4, crit5, crit6)
summary(model)
summary(model.1)
tapply(train$BPSysAve, train$Race3, mean)
db_e <- train[train$Gender == "Male",]
table(db_e$BPSysAve)
## Perform Prediction ##
pred.y <- predict(model, newdata = test, type = "response")
## Prediction error ##
mean((test$BPSysAve - pred.y)^2)
# the oringal model is the best
library(glmnet)
## Fit a ridge penalty ##
model.ridge <- glmnet(x = model.matrix( ~ ., data = train[,-c(1,12)]), y = train$BPSysAve,
standardize = T, alpha = 0)
## Perform Prediction ##
pred.y.ridge <- predict(model.ridge, newx = model.matrix( ~ ., data = test[,-c(1,12)]), type = "response")
## Prediction error ##
mean((test$BPSysAve - pred.y.ridge)^2)
## Perform cross validation to choose lambda ##
cv.out <- cv.glmnet(x = model.matrix( ~ ., data = train[,-c(1,12)]), y = train$BPSysAve
, standardize = T, alpha = 1)
plot(cv.out)
best.lambda <- cv.out$lambda.1se
best.lambda
co<-coef(cv.out, s = "lambda.1se")
#Selection of the significant features(predictors)
## threshold for variable selection ##
thresh <- 0.00
# select variables #
inds<-which(abs(co) > thresh )
variables<-row.names(co)[inds]
sel.var.lasso<-variables[!(variables %in% '(Intercept)')]
sel.var.lasso
## Step wise regression ###
library(rms)
set.seed(1003928039)
### Cross Validation and prediction performance of AIC based selection ###
ols.aic1 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(select_var, "BPSysAve"))],
x=T, y=T, model = T)
ols.aic2 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Gender", "Age","SmokeNow","Poverty","Weight","SleepTrouble", "BPSysAve"))],
x=T, y=T, model = T)
ols.aic <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(select_var4, "BPSysAve"))],
x=T, y=T, model = T)
## 10 fold cross validation ##
aic.cross <- calibrate(ols.aic, method = "crossvalidation", B = 10)
aic.cross1 <- calibrate(ols.aic1, method = "crossvalidation", B = 10)
aic.cross2 <- calibrate(ols.aic2, method = "crossvalidation", B = 10)
## Calibration plot ##
mfrow = c(2,2)
plot(aic.cross, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration for original model")
plot(aic.cross1, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration for model.1")
plot(aic.cross2, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration for model.1 + SmokeNow")
dev.off()
## Test Error ##
pred.aic <- predict(ols.aic, newdata = test[,which(colnames(train) %in% c("Gender", "Age","SmokeNow","Poverty","Weight","SleepTrouble", "BPSysAve"))])
## Prediction error ##
pred.error.AIC <- mean((test$BPSysAve - pred.aic)^2)
pred.error.AIC
set.seed(1003928039)
### Cross Validation and prediction performance of BIC based selection ###
ols.bic <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(select_var_b, "BPSysAve"))],
x=T, y=T, model = T)
ols.bic2 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Age", "BPSysAve"))],
x=T, y=T, model = T)
ols.bic1 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Gender","Age","SmokeNow", "BPSysAve"))],
x=T, y=T, model = T)
ols.bic3 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Age","SmokeNow", "BPSysAve"))],
x=T, y=T, model = T)
## 10 fold cross validation ##
bic.cross <- calibrate(ols.bic, method = "crossvalidation", B = 10)
bic.cross1 <- calibrate(ols.bic1, method = "crossvalidation", B = 10)
bic.cross2 <- calibrate(ols.bic2, method = "crossvalidation", B = 10)
bic.cross3 <- calibrate(ols.bic3, method = "crossvalidation", B = 10)
## Calibration plot ##
mfrow = c(2,2)
plot(bic.cross, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with BIC")
plot(bic.cross1, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with BIC + SmokeNow")
plot(bic.cross2, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with Lasso")
plot(bic.cross3, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with Lasso + SmokeNow")
dev.off()
## Test Error ##
pred.bic <- predict(ols.bic, newdata = test[,which(colnames(train) %in% c(select_var_b, "BPSysAve"))])
## Prediction error ##
pred.error.BIC <- mean((test$BPSysAve - pred.bic)^2)
pred.error.BIC
set.seed(1003928039)
### Cross Validation and prediction performance of lasso based selection ###
ols.lasso <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(sel.var.lasso, "BPSysAve"))],
x=T, y=T, model = T)
## 10 fold cross validation ##
lasso.cross <- calibrate(ols.lasso, method = "crossvalidation", B = 10)
## Calibration plot ##
pdf("lasso_cross.pdf", height = 8, width = 16)
plot(lasso.cross, las = 1, xlab = "Predicted Probability", main = "Cross-Validation calibration with LASSO")
dev.off()
pred.lasso <- predict(ols.lasso, newdata = test[,which(colnames(train) %in% c(sel.var.lasso, "lpsa"))])
## Prediction error ##
pred.error.lasso <- mean((test$BPSysAve - pred.lasso)^2)
pred.error.lasso
print(c(pred.error.AIC, pred.error.BIC, pred.error.lasso))
# Data description
count = 0
for (i in 1:743)
{if (small.nhanes$Gender[i]=="male")
{count=count+1}
}
count
mean(small.nhanes$Age)
sd(small.nhanes$Age)
count1 = 0
for (i in 1:743)
{if (small.nhanes$Race3[i]=="Black")
{count1=count1+1}
}
count1
sum(small.nhanes$Education=="High School")
sum(small.nhanes$Education=="College Grad")
sum(small.nhanes$Education=="Some College")
sum(small.nhanes$Education=="8th Grade")
sum(small.nhanes$Education=="9 - 11th Grade")
str(small.nhanes$HHIncome)
sum(small.nhanes$MaritalStatus=="Divorced")
sum(small.nhanes$MaritalStatus=="Married")
sum(small.nhanes$MaritalStatus=="NeverMarried")
sum(small.nhanes$MaritalStatus=="LivePartner")
sum(small.nhanes$MaritalStatus=="Widowed")
sum(small.nhanes$MaritalStatus=="Separated")
sum(small.nhanes$HHIncome==" 0-4999")
sum(small.nhanes$HHIncome==" 5000-9999")
sum(small.nhanes$HHIncome=="10000-14999")
sum(small.nhanes$HHIncome=="15000-19999")
sum(small.nhanes$HHIncome=="20000-24999")
sum(small.nhanes$HHIncome=="25000-34999")
sum(small.nhanes$HHIncome=="35000-44999")
sum(small.nhanes$HHIncome=="45000-54999")
sum(small.nhanes$HHIncome=="55000-64999")
sum(small.nhanes$HHIncome=="65000-74999")
sum(small.nhanes$HHIncome=="75000-99999")
sum(small.nhanes$HHIncome=="more 99999")
sum(small.nhanes$Poverty)
mean(small.nhanes$Poverty)
sd(small.nhanes$Poverty)
mean(small.nhanes$Weight)
sd(small.nhanes$Weight)
mean(small.nhanes$BPSysAve)
sd(small.nhanes$BPSysAve)
sum(small.nhanes$Depressed=='None')
sum(small.nhanes$Depressed=='Several')
sum(small.nhanes$Depressed=='Most')
Type_peau<-as.factor(c("None","Several","Most"))
Type_peau
unclass(Type_peau)
mean(small.nhanes$SleepHrsNight)
sd(small.nhanes$SleepHrsNight)
sum(small.nhanes$SmokeNow=='No')
mean(small.nhanes$SmokeNow=='No')
sd(small.nhanes$SmokeNow=='No')
sd(small.nhanes$SleepTrouble)
# Visualize relationships between some predictors and their relationship with the outcomes
library(ggplot2)
library(ggsci)
ggplot(train, aes(train$SmokeNow, train$Age)) +
geom_point(alpha = 0.7, size = 2) +
scale_color_jama() +
theme(text = element_text(family = "serif", size = 11), legend.position="top") +
xlab("Non-smoke or Smoke") +
ylab("Age") +
ggtitle("Age vs. SmokeNow") +
labs(
caption = "Source: NHANES survey",
col="Blood pressure")
```
| /final project.R | no_license | alisamao09/Is-systolic-blood-pressure-reading-related-to-gender-age-poverty-weight-sleep-trouble-and-smoki | R | false | false | 13,224 | r | ---
title: "final prject"
output: html_document
---
```{r}
# Load data
library(NHANES)
library(tidyverse)
library(glmnet)
small.nhanes <- na.omit(NHANES[NHANES$SurveyYr=="2011_12"
& NHANES$Age > 17,c(1,3,4,8:11,13,17,20,21,25,46,50,51,52,61)])
small.nhanes <- as.data.frame(small.nhanes %>%
group_by(ID) %>% filter(row_number()==1) )
nrow(small.nhanes)
## Checking whether there are any ID that was repeated. If not ##
## then length(unique(small.nhanes$ID)) and nrow(small.nhanes) are same ##
length(unique(small.nhanes$ID))
## Create training and test set ##
set.seed(1003928039)
train <- small.nhanes[sample(seq_len(nrow(small.nhanes)), size = 400),]
nrow(train)
length(which(small.nhanes$ID %in% train$ID))
test <- small.nhanes[!small.nhanes$ID %in% train$ID,]
nrow(test)
# Fit the model with all predictors
model <- lm(train$BPSysAve ~ ., data = train[, -c(1)])
summary(model)
# Diagnostic check
## The hat values ###
h <- hatvalues(model)
thresh <- 2 * 36/nrow(train)
w <- which(h > thresh)
w
### The Influential Observations ####
D <- cooks.distance(model)
which(D > qf(0.5, 36, 400-36))
## DFFITS ##
dfits <- dffits(model)
which(abs(dfits) > 2*sqrt(36/400))
## DFBETAS ##
dfb <- dfbetas(model)
which(abs(dfb[,1]) > 2/sqrt(400))
resid <- rstudent(model)
fitted <- predict(model)
par(family = 'serif', mfrow = c(2,2))
qqnorm(resid)
qqline(resid)
plot(resid ~ fitted, type = "p", xlab = "Fitted Values",
ylab = "Standardized Residual", cex.lab = 1.2,
col = "red")
lines(lowess(fitted, resid), col = "blue")
plot(train$BPSysAve ~ fitted,type='p', xlab = "Fitted Values",
ylab = "BPSysAve", cex.lab = 1.2,
col = "red")
abline(lm(train$BPSysAve ~ fitted), lwd = 2, col = "blue")
lines(lowess(fitted,train$BPSysAve), col = "red")
# Check if the predictors should be transformed
library(car)
mult <- lm(cbind(train$BPSysAve, train$Gender, train$Age, train$Race3,train$Education, train$MaritalStatus,train$HHIncome, train$Poverty, train$Weight, train$Height, train$BMI, train$Depressed, train$SleepHrsNight, train$SleepTrouble, train$PhysActive,train$SmokeNow) ~ 1)
bc <- powerTransform(mult, family = yjPower)
summary(bc)
library(car)
# Check multicollinearity among the predictors
vif(model.6)
anova(model)
# Check variable criteria according to AIC, AICc, BIC, R2
criteria <- function(model){
n <- length(model$residuals)
p <- 15
RSS <- sum(model$residuals^2)
R2 <- summary(model)$r.squared
R2.adj <- summary(model)$adj.r.squared
AIC <- n*log(RSS/n) + 2*p
AICc <- AIC + (2*(p+2)*(p+3))/(n-p-1)
BIC <- n*log(RSS/n) + (p+2)*log(n)
res <- c(R2, R2.adj, AIC, AICc, BIC)
names(res) <- c("R Squared", "Adjsuted R Squared", "AIC", "AICc", "BIC")
return(res)
}
crit <- criteria(model = model)
crit
# Stepwise variable selections
sel.var.bic <- step(model, trace = 0, k = log(400), direction = "both")
select_var_b<-attr(terms(sel.var.bic), "term.labels")
select_var_b
sel.var.bic6 <- step(model, trace = 0, k = log(400), direction = "forward")
select_var6<-attr(terms(sel.var.bic6), "term.labels")
select_var6
sel.var.bic2 <- step(model, trace = 0, k = log(400), direction = "backward")
select_var2<-attr(terms(sel.var.bic2), "term.labels")
select_var2
sel.var.aic <- step(model, trace = 0, k = 2, direction = "both")
select_var<-attr(terms(sel.var.aic), "term.labels")
select_var
sel.var.aic4 <- step(model, trace = 0, k = 2, direction = "forward")
select_var4<-attr(terms(sel.var.aic4), "term.labels")
select_var4
sel.var.aic5 <- step(model, trace = 0, k = 2, direction = "backward")
select_var5<-attr(terms(sel.var.aic5), "term.labels")
select_var5
# Select the final model according to those criteria
model.2 <- lm(train$BPSysAve ~ train$Gender + train$Age, data = train)
crit2 <- criteria(model = model.2)
crit2
model.3 <- lm(train$BPSysAve ~ train$Gender + train$Age+train$Poverty+train$Weight+train$SleepTrouble, data = train)
crit3 <- criteria(model = model.3)
crit3
model.1 <- lm(train$BPSysAve ~ train$Age , data = train)
crit1 <- criteria(model = model.1)
crit1
model.4 <- lm(train$BPSysAve ~ train$Age + train$SmokeNow , data = train)
crit4 <- criteria(model = model.4)
crit4
model.5 <- lm(train$BPSysAve ~ train$Gender + train$Age+ train$SmokeNow , data = train)
crit5 <- criteria(model = model.5)
crit5
model.6 <- lm(train$BPSysAve ~ train$Gender + train$Age+ train$Poverty+ train$Weight+ train$SleepTrouble+ train$SmokeNow , data = train)
crit6 <- criteria(model = model.6)
crit6
model.7 <- lm(test$BPSysAve ~ test$Gender + test$Age+test$Poverty+test$Weight+test$SleepTrouble, data = test)
crit7 <- criteria(model = model.7)
crit7
summary(model.3)
# Final model diagnostic check
h1 <- hatvalues(model.1)
thresh1 <- 2 * 36/nrow(train)
w1 <- which(h1 > thresh1)
w1
### The Influential Observations ####
D1 <- cooks.distance(model.1)
which(D1 > qf(0.5, 36, 400-36))
## DFFITS ##
dfits1 <- dffits(model.3=1)
which(abs(dfits1) > 2*sqrt(36/400))
## DFBETAS ##
dfb1 <- dfbetas(model.1)
which(abs(dfb1[,1]) > 2/sqrt(400))
resid1 <- rstudent(model.1)
fitted1<- predict(model.1)
par(family = 'serif', mfrow = c(2,2))
qqnorm(resid1)
qqline(resid1)
plot(resid1 ~ fitted1, type = "p", xlab = "Fitted Values",
ylab = "Standardized Residual", cex.lab = 1.2,
col = "red")
lines(lowess(fitted1, resid1), col = "blue")
plot(train$BPSysAve ~ fitted1,type='p', xlab = "Fitted Values",
ylab = "BPSysAve", cex.lab = 1.2,
col = "red")
abline(lm(train$BPSysAve ~ fitted1), lwd = 2, col = "blue")
lines(lowess(fitted1,train$BPSysAve), col = "red")
# Produce the criteria comparision table
rbind(crit,crit1, crit2, crit3, crit4, crit5, crit6)
summary(model)
summary(model.1)
tapply(train$BPSysAve, train$Race3, mean)
db_e <- train[train$Gender == "Male",]
table(db_e$BPSysAve)
## Perform Prediction ##
pred.y <- predict(model, newdata = test, type = "response")
## Prediction error ##
mean((test$BPSysAve - pred.y)^2)
# the oringal model is the best
library(glmnet)
## Fit a ridge penalty ##
model.ridge <- glmnet(x = model.matrix( ~ ., data = train[,-c(1,12)]), y = train$BPSysAve,
standardize = T, alpha = 0)
## Perform Prediction ##
pred.y.ridge <- predict(model.ridge, newx = model.matrix( ~ ., data = test[,-c(1,12)]), type = "response")
## Prediction error ##
mean((test$BPSysAve - pred.y.ridge)^2)
## Perform cross validation to choose lambda ##
cv.out <- cv.glmnet(x = model.matrix( ~ ., data = train[,-c(1,12)]), y = train$BPSysAve
, standardize = T, alpha = 1)
plot(cv.out)
best.lambda <- cv.out$lambda.1se
best.lambda
co<-coef(cv.out, s = "lambda.1se")
#Selection of the significant features(predictors)
## threshold for variable selection ##
thresh <- 0.00
# select variables #
inds<-which(abs(co) > thresh )
variables<-row.names(co)[inds]
sel.var.lasso<-variables[!(variables %in% '(Intercept)')]
sel.var.lasso
## Step wise regression ###
library(rms)
set.seed(1003928039)
### Cross Validation and prediction performance of AIC based selection ###
ols.aic1 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(select_var, "BPSysAve"))],
x=T, y=T, model = T)
ols.aic2 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Gender", "Age","SmokeNow","Poverty","Weight","SleepTrouble", "BPSysAve"))],
x=T, y=T, model = T)
ols.aic <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(select_var4, "BPSysAve"))],
x=T, y=T, model = T)
## 10 fold cross validation ##
aic.cross <- calibrate(ols.aic, method = "crossvalidation", B = 10)
aic.cross1 <- calibrate(ols.aic1, method = "crossvalidation", B = 10)
aic.cross2 <- calibrate(ols.aic2, method = "crossvalidation", B = 10)
## Calibration plot ##
mfrow = c(2,2)
plot(aic.cross, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration for original model")
plot(aic.cross1, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration for model.1")
plot(aic.cross2, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration for model.1 + SmokeNow")
dev.off()
## Test Error ##
pred.aic <- predict(ols.aic, newdata = test[,which(colnames(train) %in% c("Gender", "Age","SmokeNow","Poverty","Weight","SleepTrouble", "BPSysAve"))])
## Prediction error ##
pred.error.AIC <- mean((test$BPSysAve - pred.aic)^2)
pred.error.AIC
set.seed(1003928039)
### Cross Validation and prediction performance of BIC based selection ###
ols.bic <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(select_var_b, "BPSysAve"))],
x=T, y=T, model = T)
ols.bic2 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Age", "BPSysAve"))],
x=T, y=T, model = T)
ols.bic1 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Gender","Age","SmokeNow", "BPSysAve"))],
x=T, y=T, model = T)
ols.bic3 <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c("Age","SmokeNow", "BPSysAve"))],
x=T, y=T, model = T)
## 10 fold cross validation ##
bic.cross <- calibrate(ols.bic, method = "crossvalidation", B = 10)
bic.cross1 <- calibrate(ols.bic1, method = "crossvalidation", B = 10)
bic.cross2 <- calibrate(ols.bic2, method = "crossvalidation", B = 10)
bic.cross3 <- calibrate(ols.bic3, method = "crossvalidation", B = 10)
## Calibration plot ##
mfrow = c(2,2)
plot(bic.cross, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with BIC")
plot(bic.cross1, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with BIC + SmokeNow")
plot(bic.cross2, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with Lasso")
plot(bic.cross3, las = 1, xlab = "Predicted Y", main = "Cross-Validation calibration with Lasso + SmokeNow")
dev.off()
## Test Error ##
pred.bic <- predict(ols.bic, newdata = test[,which(colnames(train) %in% c(select_var_b, "BPSysAve"))])
## Prediction error ##
pred.error.BIC <- mean((test$BPSysAve - pred.bic)^2)
pred.error.BIC
set.seed(1003928039)
### Cross Validation and prediction performance of lasso based selection ###
ols.lasso <- ols(train$BPSysAve ~ ., data = train[,which(colnames(train) %in% c(sel.var.lasso, "BPSysAve"))],
x=T, y=T, model = T)
## 10 fold cross validation ##
lasso.cross <- calibrate(ols.lasso, method = "crossvalidation", B = 10)
## Calibration plot ##
pdf("lasso_cross.pdf", height = 8, width = 16)
plot(lasso.cross, las = 1, xlab = "Predicted Probability", main = "Cross-Validation calibration with LASSO")
dev.off()
pred.lasso <- predict(ols.lasso, newdata = test[,which(colnames(train) %in% c(sel.var.lasso, "lpsa"))])
## Prediction error ##
pred.error.lasso <- mean((test$BPSysAve - pred.lasso)^2)
pred.error.lasso
print(c(pred.error.AIC, pred.error.BIC, pred.error.lasso))
# Data description
count = 0
for (i in 1:743)
{if (small.nhanes$Gender[i]=="male")
{count=count+1}
}
count
mean(small.nhanes$Age)
sd(small.nhanes$Age)
count1 = 0
for (i in 1:743)
{if (small.nhanes$Race3[i]=="Black")
{count1=count1+1}
}
count1
sum(small.nhanes$Education=="High School")
sum(small.nhanes$Education=="College Grad")
sum(small.nhanes$Education=="Some College")
sum(small.nhanes$Education=="8th Grade")
sum(small.nhanes$Education=="9 - 11th Grade")
str(small.nhanes$HHIncome)
sum(small.nhanes$MaritalStatus=="Divorced")
sum(small.nhanes$MaritalStatus=="Married")
sum(small.nhanes$MaritalStatus=="NeverMarried")
sum(small.nhanes$MaritalStatus=="LivePartner")
sum(small.nhanes$MaritalStatus=="Widowed")
sum(small.nhanes$MaritalStatus=="Separated")
sum(small.nhanes$HHIncome==" 0-4999")
sum(small.nhanes$HHIncome==" 5000-9999")
sum(small.nhanes$HHIncome=="10000-14999")
sum(small.nhanes$HHIncome=="15000-19999")
sum(small.nhanes$HHIncome=="20000-24999")
sum(small.nhanes$HHIncome=="25000-34999")
sum(small.nhanes$HHIncome=="35000-44999")
sum(small.nhanes$HHIncome=="45000-54999")
sum(small.nhanes$HHIncome=="55000-64999")
sum(small.nhanes$HHIncome=="65000-74999")
sum(small.nhanes$HHIncome=="75000-99999")
sum(small.nhanes$HHIncome=="more 99999")
sum(small.nhanes$Poverty)
mean(small.nhanes$Poverty)
sd(small.nhanes$Poverty)
mean(small.nhanes$Weight)
sd(small.nhanes$Weight)
mean(small.nhanes$BPSysAve)
sd(small.nhanes$BPSysAve)
sum(small.nhanes$Depressed=='None')
sum(small.nhanes$Depressed=='Several')
sum(small.nhanes$Depressed=='Most')
Type_peau<-as.factor(c("None","Several","Most"))
Type_peau
unclass(Type_peau)
mean(small.nhanes$SleepHrsNight)
sd(small.nhanes$SleepHrsNight)
sum(small.nhanes$SmokeNow=='No')
mean(small.nhanes$SmokeNow=='No')
sd(small.nhanes$SmokeNow=='No')
sd(small.nhanes$SleepTrouble)
# Visualize relationships between some predictors and their relationship with the outcomes
library(ggplot2)
library(ggsci)
ggplot(train, aes(train$SmokeNow, train$Age)) +
geom_point(alpha = 0.7, size = 2) +
scale_color_jama() +
theme(text = element_text(family = "serif", size = 11), legend.position="top") +
xlab("Non-smoke or Smoke") +
ylab("Age") +
ggtitle("Age vs. SmokeNow") +
labs(
caption = "Source: NHANES survey",
col="Blood pressure")
```
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bulk_in.R
\name{bulk_in}
\alias{bulk_in}
\title{Efficiently bulk load a directory of csv or excel files}
\usage{
bulk_in(input_directory, type, sheet_name = NULL, safe_mode = FALSE)
}
\arguments{
\item{input_directory}{the directory to import the files from}
\item{type}{the type of input file ("csv" or "excel")}
\item{sheet_name}{(optional) name of the excel sheet to import (same in all files) default = NULL}
\item{safe_mode}{should safemode be used? (default = FALSE)}
}
\value{
A \code{dataframe} of containing all of the data from the files
}
\description{
Bulk loads csv or excel files contained in a directory into a dataframe
}
\details{
This function recursively finds all csv / excel files in a folder and loads
them all into a single dataframe. Note will only deal with either excel or
csv not a mixture.
Uses the read_excel function from readxl for excel import - if no
sheet_name is provided (or is NULL) then will read in the first sheet by
default. The column types will be guessed when the data is read in.
The package uses data.table's rbindlist function for performance reasons.
This is fast but binds based on column position rather than matching column
names. If this causes issues then use safe_mode = TRUE which uses a slower
match based on column names.
}
\examples{
\dontrun{
# to add
}
}
| /man/bulk_in.Rd | no_license | MarkGoble/mishMashr | R | false | true | 1,399 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bulk_in.R
\name{bulk_in}
\alias{bulk_in}
\title{Efficiently bulk load a directory of csv or excel files}
\usage{
bulk_in(input_directory, type, sheet_name = NULL, safe_mode = FALSE)
}
\arguments{
\item{input_directory}{the directory to import the files from}
\item{type}{the type of input file ("csv" or "excel")}
\item{sheet_name}{(optional) name of the excel sheet to import (same in all files) default = NULL}
\item{safe_mode}{should safemode be used? (default = FALSE)}
}
\value{
A \code{dataframe} of containing all of the data from the files
}
\description{
Bulk loads csv or excel files contained in a directory into a dataframe
}
\details{
This function recursively finds all csv / excel files in a folder and loads
them all into a single dataframe. Note will only deal with either excel or
csv not a mixture.
Uses the read_excel function from readxl for excel import - if no
sheet_name is provided (or is NULL) then will read in the first sheet by
default. The column types will be guessed when the data is read in.
The package uses data.table's rbindlist function for performance reasons.
This is fast but binds based on column position rather than matching column
names. If this causes issues then use safe_mode = TRUE which uses a slower
match based on column names.
}
\examples{
\dontrun{
# to add
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ValidateModel.R
\name{formatValidationResult}
\alias{formatValidationResult}
\title{Format validation result}
\usage{
formatValidationResult(result, abs_diff = TRUE, tolerance)
}
\arguments{
\item{result}{Validation result to be formatted}
\item{abs_diff}{A logical value indicating whether to validate absolute values}
\item{tolerance}{A numeric value setting tolerance of the comparison}
}
\value{
A list contains formatted validation results
}
\description{
Format validation result
}
| /man/formatValidationResult.Rd | permissive | USEPA/useeior | R | false | true | 568 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ValidateModel.R
\name{formatValidationResult}
\alias{formatValidationResult}
\title{Format validation result}
\usage{
formatValidationResult(result, abs_diff = TRUE, tolerance)
}
\arguments{
\item{result}{Validation result to be formatted}
\item{abs_diff}{A logical value indicating whether to validate absolute values}
\item{tolerance}{A numeric value setting tolerance of the comparison}
}
\value{
A list contains formatted validation results
}
\description{
Format validation result
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.being_successful}
\alias{dasl.being_successful}
\title{Being successful}
\format{16 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/being-successful/?sf_paged=3}{Being successful}
}
\description{
In a random sample of U.S. adults surveyed in December 2011, Pew Research asked how important it is “to you personally” to be successful in a high-paying career or profession. Responses are recorded by sex and age.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
www.pewsocialtrends.org/files/2012/04 /Women-in-the-Workplace.pdf
}
\concept{Quality Control}
\concept{Tables}
| /man/dasl.being_successful.Rd | no_license | sigbertklinke/mmstat.data | R | false | true | 796 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dasl.R
\docType{data}
\name{dasl.being_successful}
\alias{dasl.being_successful}
\title{Being successful}
\format{16 observations}
\source{
DASL – The Data And Story Library: \href{https://dasl.datadescription.com/datafile/being-successful/?sf_paged=3}{Being successful}
}
\description{
In a random sample of U.S. adults surveyed in December 2011, Pew Research asked how important it is “to you personally” to be successful in a high-paying career or profession. Responses are recorded by sex and age.
}
\details{
\url{https://github.com/sigbertklinke/wwwdata/tree/master/wwwdata/dasl}
}
\references{
www.pewsocialtrends.org/files/2012/04 /Women-in-the-Workplace.pdf
}
\concept{Quality Control}
\concept{Tables}
|
##Hello World
##R script created to record and sampling of the bits of code I think'll need to read the MOATS TSVs and use them to plot activity inside each MOATs
## Estimate 3 steps (Step 1 thru Step 3.6)
#Likely Libraries
library(shiny)
library(ggplot2)
library(stringr)
library(readxl)
library(readr)
install.packages("tidyverse")
library(tidyr)
# 1.) Set working directory
setwd("/Users/katherinerovinski/GIT/NWFSC.MUK_MOATS_SMR2019/LabViewLogs.AllMOATS/")
# 2.) Create list of files that will be modified
files_list = list.files(
path = "/Users/katherinerovinski/GIT/NWFSC.MUK_MOATs_SMR2019/LabViewLogs.AllMOATS/",
recursive = TRUE,
pattern = "^.*\\.tsv$"
)
print(files_list)
# 3.) Create loop of those files to correct rows, columns, date format
# c <- those files being corrected
for (c in files_list) {
#action3.1,
#action3.2,
#action3.3,
#action3.4,
#action3.5,
#action3.6,
}
# 3.1) Delete first 15 rows, settinga dataframe, DF1 and establishing the number of rows and columns
DF1 = data.frame([d= (1-14423), (1-16)])
DF1 = data.frame
for (c in files_list) {
}
# 3.2) create a column, fill in the value as a date that reads it from the file name
# 3.3) Deleting rows about Xdimension
# 3.4) Correct format for the date time group
# 3.5) Changes the headings of the channels from "Untitled thru Untitled 14" to real names
# 3.6) Create column or an expression that will identify Day & Night
| /06_MOATS_replication_verification/02_Scripts/ext_correction _cripts/TSVfilecorrectionsScript.R | no_license | KROVINSKI/NWFSC.MUK_KRL_SMR2019 | R | false | false | 1,433 | r | ##Hello World
##R script created to record and sampling of the bits of code I think'll need to read the MOATS TSVs and use them to plot activity inside each MOATs
## Estimate 3 steps (Step 1 thru Step 3.6)
#Likely Libraries
library(shiny)
library(ggplot2)
library(stringr)
library(readxl)
library(readr)
install.packages("tidyverse")
library(tidyr)
# 1.) Set working directory
setwd("/Users/katherinerovinski/GIT/NWFSC.MUK_MOATS_SMR2019/LabViewLogs.AllMOATS/")
# 2.) Create list of files that will be modified
files_list = list.files(
path = "/Users/katherinerovinski/GIT/NWFSC.MUK_MOATs_SMR2019/LabViewLogs.AllMOATS/",
recursive = TRUE,
pattern = "^.*\\.tsv$"
)
print(files_list)
# 3.) Create loop of those files to correct rows, columns, date format
# c <- those files being corrected
for (c in files_list) {
#action3.1,
#action3.2,
#action3.3,
#action3.4,
#action3.5,
#action3.6,
}
# 3.1) Delete first 15 rows, settinga dataframe, DF1 and establishing the number of rows and columns
DF1 = data.frame([d= (1-14423), (1-16)])
DF1 = data.frame
for (c in files_list) {
}
# 3.2) create a column, fill in the value as a date that reads it from the file name
# 3.3) Deleting rows about Xdimension
# 3.4) Correct format for the date time group
# 3.5) Changes the headings of the channels from "Untitled thru Untitled 14" to real names
# 3.6) Create column or an expression that will identify Day & Night
|
\name{pbcModels}
\alias{pbcModels}
\alias{pbcGumbel}
\alias{pbcFGM}
\alias{pbcFrank}
\alias{pbcNormal}
\alias{pbcAMH}
\alias{pbcJoe}
\title{
Linking copula families for the PBC model
}
\description{
Linking copula families implemented in the \bold{PBC} package.
}
\usage{
pbcGumbel(graph)
pbcFGM(graph)
pbcFrank(graph)
pbcNormal(graph)
pbcAMH(graph)
pbcJoe(graph)
}
\arguments{
\item{graph}{the graph (of class \code{\link{igraph}}) associated to the PBC copula.}
}
\details{
A pair \eqn{(U_i,U_j)} of the PBC model has copula
\deqn{C_{ij}(u,v) = u^{ 1 - 1/n_i } * v^{ 1 - 1/n_j } * D_{ij}( u^{ 1/n_i }, v^{ 1/n_j } ),}
where \eqn{n_i} and \eqn{n_j} are the number of neighbors in the graph for the variables \eqn{U_i} and \eqn{U_j} respectively (G. Mazo G, S. Girard and F. Forbes). The copula families implemented for \eqn{D_{ij}(u,v)} are given below.
\describe{
\item{\code{pbcGumbel}:}{family of Gumbel copulas:
\deqn{ \exp(-((-\ln(u)))^\theta + (-\ln(v))^\theta)^{1/\theta})}{
exp(-((-ln(u))^theta + (-ln(v))^theta)^(1/theta)) }
with
\eqn{\theta\in[1,\infty)}{theta in [1,Inf)}. }
\item{\code{pbcFGM}:}{family of Farlie-Gumbel-Morgenstern (FGM) copulas:
\deqn{u * v * (1 + \theta * (1 - u) * (1 - v))}{
u * v * (1 + theta * (1 - u) * (1 - v)) }
with
\eqn{\theta \in [-1,1]}{theta in [-1,1]}. }
\item{\code{pbcFrank}:}{family of Frank copulas:
\deqn{- \ln(1 + (\exp(- \theta * u) - 1)*(\exp(- \theta * v) - 1)/(\exp(-\theta) - 1))/\theta}{
- ln(1 + (exp(- theta * u) - 1)*(exp(- theta * v) - 1)/(exp(-theta) - 1))/theta }
with
\eqn{\theta\in(0,\infty)}{theta in (0,Inf)}. }
\item{\code{pbcNormal}:}{family of normal copulas:
\deqn{\exp(((\theta * q(u))^2 + (\theta * q(v))^2 - 2 * \theta
* q(u) * q(v)) / (2 * (- 1 + \theta^2))) / ( 1 - \theta^2)^0.5}{
exp(((theta * q(u))^2 + (theta * q(v))^2 - 2 * theta
* q(u) * q(v)) / (2 * (- 1 + theta^2))) / ( 1 - theta^2)^0.5
}
with
\eqn{\theta \in [-1,1])}{theta in [-1,1])}, q is the inverse of the standard normal distribution function. }
\item{\code{pbcAMH}:}{Family of Ali-Mikhail-Haq (AMH) copulas:
\deqn{u * v / (1 - \theta * (1 -u) * (1 -v))}{
u * v / (1 - theta * (1 -u) * (1 -v)) }
with
\eqn{\theta \in [0,1)}{theta in [0,1)}. }
\item{\code{pbcJoe}:}{Family of Joe copulas:
\deqn{1 - ((1 - u)^\theta + (1 - v)^\theta - (1 - u)^\theta * (1 - v)^\theta)^{1/\theta}}{
1 - ((1 - u)^theta + (1 - v)^theta - (1 - u)^theta * (1 - v)^theta)^(1/theta) }
with
\eqn{\theta\in[1,\infty)}{theta in [1,Inf)}. }
}
}
\value{
A \code{"\link{PBC}"} object.
}
\references{
G. Mazo G, S. Girard and F. Forbes. A class of high dimensional copulas based on products of bivariate copulas. http://hal.archives-ouvertes.fr/hal-00910775.
R. B. Nelsen. An Introduction to Copulas. Springer, 1999. \cr
}
\keyword{ models }
\seealso{
\code{\link{pbc}}
}
\examples{
## Example for the FGM family
graph <- graph.formula(X1-X2,X2-X3, simpify = FALSE)
## Create a PBC object
fgmObject <- pbcFGM(graph)
## alternatively
g <- graph.formula(X1-X2,X2-X3, simpify = FALSE)
fgmObject <- pbc(g, model="fgm")
}
| /man/pbcModels.Rd | no_license | cran/PBC | R | false | false | 3,277 | rd | \name{pbcModels}
\alias{pbcModels}
\alias{pbcGumbel}
\alias{pbcFGM}
\alias{pbcFrank}
\alias{pbcNormal}
\alias{pbcAMH}
\alias{pbcJoe}
\title{
Linking copula families for the PBC model
}
\description{
Linking copula families implemented in the \bold{PBC} package.
}
\usage{
pbcGumbel(graph)
pbcFGM(graph)
pbcFrank(graph)
pbcNormal(graph)
pbcAMH(graph)
pbcJoe(graph)
}
\arguments{
\item{graph}{the graph (of class \code{\link{igraph}}) associated to the PBC copula.}
}
\details{
A pair \eqn{(U_i,U_j)} of the PBC model has copula
\deqn{C_{ij}(u,v) = u^{ 1 - 1/n_i } * v^{ 1 - 1/n_j } * D_{ij}( u^{ 1/n_i }, v^{ 1/n_j } ),}
where \eqn{n_i} and \eqn{n_j} are the number of neighbors in the graph for the variables \eqn{U_i} and \eqn{U_j} respectively (G. Mazo G, S. Girard and F. Forbes). The copula families implemented for \eqn{D_{ij}(u,v)} are given below.
\describe{
\item{\code{pbcGumbel}:}{family of Gumbel copulas:
\deqn{ \exp(-((-\ln(u)))^\theta + (-\ln(v))^\theta)^{1/\theta})}{
exp(-((-ln(u))^theta + (-ln(v))^theta)^(1/theta)) }
with
\eqn{\theta\in[1,\infty)}{theta in [1,Inf)}. }
\item{\code{pbcFGM}:}{family of Farlie-Gumbel-Morgenstern (FGM) copulas:
\deqn{u * v * (1 + \theta * (1 - u) * (1 - v))}{
u * v * (1 + theta * (1 - u) * (1 - v)) }
with
\eqn{\theta \in [-1,1]}{theta in [-1,1]}. }
\item{\code{pbcFrank}:}{family of Frank copulas:
\deqn{- \ln(1 + (\exp(- \theta * u) - 1)*(\exp(- \theta * v) - 1)/(\exp(-\theta) - 1))/\theta}{
- ln(1 + (exp(- theta * u) - 1)*(exp(- theta * v) - 1)/(exp(-theta) - 1))/theta }
with
\eqn{\theta\in(0,\infty)}{theta in (0,Inf)}. }
\item{\code{pbcNormal}:}{family of normal copulas:
\deqn{\exp(((\theta * q(u))^2 + (\theta * q(v))^2 - 2 * \theta
* q(u) * q(v)) / (2 * (- 1 + \theta^2))) / ( 1 - \theta^2)^0.5}{
exp(((theta * q(u))^2 + (theta * q(v))^2 - 2 * theta
* q(u) * q(v)) / (2 * (- 1 + theta^2))) / ( 1 - theta^2)^0.5
}
with
\eqn{\theta \in [-1,1])}{theta in [-1,1])}, q is the inverse of the standard normal distribution function. }
\item{\code{pbcAMH}:}{Family of Ali-Mikhail-Haq (AMH) copulas:
\deqn{u * v / (1 - \theta * (1 -u) * (1 -v))}{
u * v / (1 - theta * (1 -u) * (1 -v)) }
with
\eqn{\theta \in [0,1)}{theta in [0,1)}. }
\item{\code{pbcJoe}:}{Family of Joe copulas:
\deqn{1 - ((1 - u)^\theta + (1 - v)^\theta - (1 - u)^\theta * (1 - v)^\theta)^{1/\theta}}{
1 - ((1 - u)^theta + (1 - v)^theta - (1 - u)^theta * (1 - v)^theta)^(1/theta) }
with
\eqn{\theta\in[1,\infty)}{theta in [1,Inf)}. }
}
}
\value{
A \code{"\link{PBC}"} object.
}
\references{
G. Mazo G, S. Girard and F. Forbes. A class of high dimensional copulas based on products of bivariate copulas. http://hal.archives-ouvertes.fr/hal-00910775.
R. B. Nelsen. An Introduction to Copulas. Springer, 1999. \cr
}
\keyword{ models }
\seealso{
\code{\link{pbc}}
}
\examples{
## Example for the FGM family
graph <- graph.formula(X1-X2,X2-X3, simpify = FALSE)
## Create a PBC object
fgmObject <- pbcFGM(graph)
## alternatively
g <- graph.formula(X1-X2,X2-X3, simpify = FALSE)
fgmObject <- pbc(g, model="fgm")
}
|
library(labstatR)
### Name: Rpa
### Title: Calcola il rendimento di un portafoglio
### Aliases: Rpa
### Keywords: distribution
### ** Examples
x <- c(11,9,25,7,-2)/100
y <- c(-3,15,2,20,6)/100
pxy <- matrix(rep(0,25),5,5)
pxy[1,1] <- 0.2
pxy[2,2] <- 0.2
pxy[3,3] <- 0.2
pxy[4,4] <- 0.2
pxy[5,5] <- 0.2
Rpa(0.1,x,y,pxy)
Rpa(0.5,x,y,pxy)
| /data/genthat_extracted_code/labstatR/examples/Rpa.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 343 | r | library(labstatR)
### Name: Rpa
### Title: Calcola il rendimento di un portafoglio
### Aliases: Rpa
### Keywords: distribution
### ** Examples
x <- c(11,9,25,7,-2)/100
y <- c(-3,15,2,20,6)/100
pxy <- matrix(rep(0,25),5,5)
pxy[1,1] <- 0.2
pxy[2,2] <- 0.2
pxy[3,3] <- 0.2
pxy[4,4] <- 0.2
pxy[5,5] <- 0.2
Rpa(0.1,x,y,pxy)
Rpa(0.5,x,y,pxy)
|
library(Devore7)
### Name: xmp14.14
### Title: R Data set: xmp14.14
### Aliases: xmp14.14
### Keywords: datasets
### ** Examples
data(xmp14.14)
str(xmp14.14)
| /data/genthat_extracted_code/Devore7/examples/xmp14.14.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 165 | r | library(Devore7)
### Name: xmp14.14
### Title: R Data set: xmp14.14
### Aliases: xmp14.14
### Keywords: datasets
### ** Examples
data(xmp14.14)
str(xmp14.14)
|
render_preview <- function(template, input = NULL) {
if(is.null(input)) {
input <- file.path("inst", "rmarkdown", "templates", template, "skeleton", "skeleton.Rmd")
}
file.copy(input, input <- tempfile(fileext = ".rmd"))
output <- rmarkdown::render(
input, output_dir = tempdir()
)
outfile <- file.path("man", "figures", paste0("preview-", template, ".png"))
# Output is html based
if(grepl("html$", output)) {
require_package("webshot")
webshot::webshot(output, outfile, vwidth = 595, vheight = 842)
} else {
require_package("pdftools")
pdftools::pdf_convert(output, "png", pages = 1, filenames = outfile)
}
}
| /data-raw/preview.R | no_license | mitchelloharawild/vitae | R | false | false | 656 | r | render_preview <- function(template, input = NULL) {
if(is.null(input)) {
input <- file.path("inst", "rmarkdown", "templates", template, "skeleton", "skeleton.Rmd")
}
file.copy(input, input <- tempfile(fileext = ".rmd"))
output <- rmarkdown::render(
input, output_dir = tempdir()
)
outfile <- file.path("man", "figures", paste0("preview-", template, ".png"))
# Output is html based
if(grepl("html$", output)) {
require_package("webshot")
webshot::webshot(output, outfile, vwidth = 595, vheight = 842)
} else {
require_package("pdftools")
pdftools::pdf_convert(output, "png", pages = 1, filenames = outfile)
}
}
|
## This function creates a special matrix object that can cache its inverse.
## The returned special matrix is a list of 4 functions.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL ## i is the inverse of the matrix
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
## return a list of 4 functions: set, get, setinverse, getinverse
}
## This function computes the inverse of the special matrix returned by makeCacheMatrix function.
## If the inverse has already been calculated, then it retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## i is the inverse of the matrix
i <- x$getinverse()
if(!is.null(i)) { #check if i has been calculated, i.e. not null
message("getting cached data")
return(i)
}
## not cached before, solve it and cache it
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
| /cachematrix.R | no_license | fengdinggithub/ProgrammingAssignment2 | R | false | false | 1,199 | r | ## This function creates a special matrix object that can cache its inverse.
## The returned special matrix is a list of 4 functions.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL ## i is the inverse of the matrix
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
## return a list of 4 functions: set, get, setinverse, getinverse
}
## This function computes the inverse of the special matrix returned by makeCacheMatrix function.
## If the inverse has already been calculated, then it retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## i is the inverse of the matrix
i <- x$getinverse()
if(!is.null(i)) { #check if i has been calculated, i.e. not null
message("getting cached data")
return(i)
}
## not cached before, solve it and cache it
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
\name{mkinerrmin}
\Rdversion{1.1}
\alias{mkinerrmin}
\title{
Calculate the minimum error to assume in order to pass the variance test
}
\description{
This function uses \code{\link{optimize}} in order to iteratively find the
smallest relative error still resulting in passing the chi-squared test
as defined in the FOCUS kinetics report from 2006.
}
\usage{
mkinerrmin(errdata, n.parms, alpha = 0.05)
}
\arguments{
\item{errdata}{
A data frame with mean observed values in column named \code{value_mean}
and predicted values in column \code{value_pred}.
}
\item{n.parms}{
The number of optimized parameters to be taken into account for the data series.
}
\item{alpha}{
The confidence level chosen for the chi-squared test.
}
}
\value{
A list with the following components:
\item{err.min}{The relative error, expressed as a fraction.}
\item{n.optim}{The number of optimised parameters attributed to the data series.}
\item{df}{The number of remaining degrees of freedom for the chi2 error level calculations.
Note that mean values are used for the chi2 statistic and therefore every time point with
observed values in the series only counts one time.}
}
\details{
This function is used internally by \code{\link{mkinfit}}.
}
\references{
FOCUS (2006) \dQuote{Guidance Document on Estimating Persistence and
Degradation Kinetics from Environmental Fate Studies on Pesticides in EU
Registration} Report of the FOCUS Work Group on Degradation Kinetics,
EC Document Reference Sanco/10058/2005 version 2.0, 434 pp,
\url{http://focus.jrc.ec.europa.eu/dk}
}
\keyword{ manip }
| /R_packages/mkin/trunk/man/mkinerrmin.Rd | no_license | BackupTheBerlios/mkin-svn | R | false | false | 1,668 | rd | \name{mkinerrmin}
\Rdversion{1.1}
\alias{mkinerrmin}
\title{
Calculate the minimum error to assume in order to pass the variance test
}
\description{
This function uses \code{\link{optimize}} in order to iteratively find the
smallest relative error still resulting in passing the chi-squared test
as defined in the FOCUS kinetics report from 2006.
}
\usage{
mkinerrmin(errdata, n.parms, alpha = 0.05)
}
\arguments{
\item{errdata}{
A data frame with mean observed values in column named \code{value_mean}
and predicted values in column \code{value_pred}.
}
\item{n.parms}{
The number of optimized parameters to be taken into account for the data series.
}
\item{alpha}{
The confidence level chosen for the chi-squared test.
}
}
\value{
A list with the following components:
\item{err.min}{The relative error, expressed as a fraction.}
\item{n.optim}{The number of optimised parameters attributed to the data series.}
\item{df}{The number of remaining degrees of freedom for the chi2 error level calculations.
Note that mean values are used for the chi2 statistic and therefore every time point with
observed values in the series only counts one time.}
}
\details{
This function is used internally by \code{\link{mkinfit}}.
}
\references{
FOCUS (2006) \dQuote{Guidance Document on Estimating Persistence and
Degradation Kinetics from Environmental Fate Studies on Pesticides in EU
Registration} Report of the FOCUS Work Group on Degradation Kinetics,
EC Document Reference Sanco/10058/2005 version 2.0, 434 pp,
\url{http://focus.jrc.ec.europa.eu/dk}
}
\keyword{ manip }
|
# IMPORT DATA
churn_data_raw <- read.csv("data/WA_Fn-UseC_-Telco-Customer-Churn.csv")
glimpse(churn_data_raw)
# Data quality
summary(churn_data_raw)
# PREPROCESS DATA
# Checking for missing value
sapply(churn_data_raw, function(x) sum(is.na(x)))
# 11 missing values in TotalCharge
# Remove unnecessary data
churn_data_tbl <- churn_data_raw %>%
select(-customerID) %>% # Dropping customerID column since has no predictive info
drop_na() %>% # This will remove 11 rows where TotalCharge is NA
select(Churn, everything()) # Reordering the columns to keep churn as first
glimpse(churn_data_tbl)
summary(churn_data_tbl)
library(scales)
#show_col(hue_pal()(4))
hue_pal()(4)
p1 <- churn_data_tbl %>% ggplot(aes(x=Churn, fill = Churn)) +
geom_bar(aes(y = (..count..))) +
labs(y = "Count") +
geom_text(aes(y = (..count..),label = ifelse((..count..)==0,"",scales::percent((..count..)/sum(..count..)))),
stat="count", colour="darkgreen") +
scale_fill_manual( values = c("#00BFC4", "#F8766D")) #+ theme(legend.position = "none")
p2 <- churn_data_tbl %>% group_by(gender,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=gender, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Gender") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~gender) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p3 <- churn_data_tbl %>% mutate(SeniorCitizen=ifelse(SeniorCitizen == 0, 'No', "Yes")) %>%
group_by(SeniorCitizen,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=SeniorCitizen, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Senior Citizen") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~SeniorCitizen) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p4 <- churn_data_tbl %>% group_by(Partner,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=Partner, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Has Partner") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~Partner) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p5 <- churn_data_tbl %>% group_by(Dependents,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=Dependents, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Has Dependents") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~Dependents) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p6 <- churn_data_tbl %>% group_by(PhoneService,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=PhoneService, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Phone Service") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~PhoneService) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p7 <- churn_data_tbl %>% group_by(MultipleLines,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=MultipleLines, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Multiple Lines") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~MultipleLines) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p8 <- churn_data_tbl %>% group_by(InternetService,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=InternetService, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Internet Service") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~InternetService) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p9 <- churn_data_tbl %>% group_by(OnlineSecurity,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=OnlineSecurity, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Online Security") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~OnlineSecurity) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p10 <- churn_data_tbl %>% group_by(OnlineBackup,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=OnlineBackup, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Online Backup") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~OnlineBackup) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p11 <- churn_data_tbl %>% group_by(DeviceProtection,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=DeviceProtection, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Device Protection") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~DeviceProtection) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p12 <- churn_data_tbl %>% group_by(TechSupport,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=TechSupport, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Tech Support") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~TechSupport) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p13 <- churn_data_tbl %>% group_by(StreamingTV,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=StreamingTV, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Streaming TV") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~StreamingTV) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p14 <- churn_data_tbl %>% group_by(StreamingMovies,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=StreamingMovies, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Streaming Movies") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~StreamingMovies) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p15 <- churn_data_tbl %>% group_by(Contract,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=Contract, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Contract") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~Contract) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p16 <- churn_data_tbl %>% group_by(PaperlessBilling,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=PaperlessBilling, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Paperless Billing") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~PaperlessBilling) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p17 <- churn_data_tbl %>% group_by(PaymentMethod,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=PaymentMethod, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Payment Method") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~PaymentMethod) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
# Monthly charge plot
p <- churn_data_tbl %>%
ggplot(aes(MonthlyCharges)) +
geom_histogram(binwidth = 4)
MonthlyCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(MonthlyCharges_df) <- c("MonthlyCharges", "Freq")
p_xmax <- layer_data(p,1)$xmax
churn_data_tbl_tmp <- churn_data_tbl
churn_data_tbl_tmp$bin <- sapply(churn_data_tbl$MonthlyCharges,function(x) length(p_xmax)-sum(x < p_xmax)+1)
mean_df <- churn_data_tbl_tmp %>%
group_by(bin) %>%
summarise(Freq=n(),mean=mean(MonthlyCharges))
churn_df <- churn_data_tbl_tmp %>%
group_by(bin) %>% filter(Churn=='Yes') %>%
summarise(n_churn=n())
merge_df <- merge(mean_df, churn_df, by = "bin", all.x = T) %>%
mutate(percentage=round(n_churn/Freq*100,1))
merge_df$percentage[is.na(merge_df$percentage)] <- 0
p21 <- merge_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=mean, y = Freq, fill = mean), width=1.7, stat = "identity") +
geom_line(aes(x=mean, y = percentage*10), linetype="dashed") +
labs(y = "Count", x = "Monthly Charges [$]") +
theme(legend.position = "none") +
scale_y_continuous(limits = c(0, 1000)) +
scale_y_continuous(
sec.axis = sec_axis(~./10, name = "Churn Rate [%]"))
# Total Charges
p <- churn_data_tbl %>%
ggplot(aes(TotalCharges)) +
geom_histogram(binwidth = 100)
TotalCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(TotalCharges_df) <- c("TotalCharges", "Freq")
p_xmax <- layer_data(p,1)$xmax
churn_data_tbl_tmp <- churn_data_tbl
churn_data_tbl_tmp$bin <- sapply(churn_data_tbl$TotalCharges,function(x) length(p_xmax)-sum(x < p_xmax)+1)
mean_df <- churn_data_tbl_tmp %>%
group_by(bin) %>%
summarise(Freq=n(),mean=mean(TotalCharges))
churn_df <- churn_data_tbl_tmp %>%
group_by(bin) %>% filter(Churn=='Yes') %>%
summarise(n_churn=n())
merge_df <- merge(mean_df, churn_df, by = "bin", all.x = T) %>%
mutate(percentage=round(n_churn/Freq*100,1))
merge_df$percentage[is.na(merge_df$percentage)] <- 0
moving_avg=merge_df[10:(nrow(merge_df)),]
moving_avg$moving_percentage <- rollapply(merge_df$percentage, 10, mean)
p22 <- merge_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=mean, y = Freq, fill = mean), width=25, stat = "identity") +
geom_line(aes(x=mean, y = percentage*10), linetype="dotdash") +
geom_line(data=moving_avg, aes(x=mean, y = moving_percentage*10), linetype="solid", colour="red") +
labs(y = "Count", x = "Total Charges [$]") +
theme(legend.position = "none") +
scale_y_continuous(limits = c(0, 1000)) +
scale_y_continuous(
sec.axis = sec_axis(~./10, name = "Churn Rate [%]"))
# Tenure
churn_data_tbl_tmp <- churn_data_tbl %>% mutate(tenure=ceiling(tenure/12))
p <- churn_data_tbl_tmp %>%
ggplot(aes(tenure)) +
geom_histogram(binwidth = 1)
tenure_df <- layer_data(p,1)[,c('x','y')]
colnames(tenure_df) <- c("tenure", "Freq")
p_xmax <- layer_data(p,1)$xmax
churn_data_tbl_tmp$bin <- sapply(churn_data_tbl_tmp$tenure,function(x) length(p_xmax)-sum(x < p_xmax)+1)
mean_df <- churn_data_tbl_tmp %>%
group_by(bin) %>%
summarise(Freq=n(),mean=mean(tenure))
churn_df <- churn_data_tbl_tmp %>%
group_by(bin) %>% filter(Churn=='Yes') %>%
summarise(n_churn=n())
merge_df <- merge(mean_df, churn_df, by = "bin", all.x = T) %>%
mutate(percentage=round(n_churn/Freq*100,1))
merge_df$percentage[is.na(merge_df$percentage)] <- 0
p23 <- merge_df %>%
ggplot()+
scale_fill_gradient(low = "red", high = "green")+
geom_bar(aes(x=mean, y = Freq, fill = mean*2), width=.6, stat = "identity") +
geom_line(aes(x=mean, y = percentage*25), linetype="dashed") +
labs(y = "Count", x = "Tenure [Years]") +
theme(legend.position = "none") +
scale_y_continuous(limits = c(0, 2500)) +
scale_y_continuous(sec.axis = sec_axis(~./25, name = "Churn Rate [%]"))
# Total Charges
churn_data_tbl_tmp <- churn_data_tbl
p <- churn_data_tbl_tmp %>%
ggplot(aes(TotalCharges)) +
geom_histogram(bins = 200)
TotalCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(TotalCharges_df) <- c("TotalCharges", "Freq")
p24 <- TotalCharges_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=TotalCharges, y = Freq, fill = TotalCharges), width=30, stat = "identity") +
labs(y = "Count", x = "Total Charges [$]") +
theme(legend.position = "none")
p24
# LOG Total Charges
churn_data_tbl_tmp <- churn_data_tbl
churn_data_tbl_tmp$TotalCharges <- log(churn_data_tbl_tmp$TotalCharges)
p <- churn_data_tbl_tmp %>%
ggplot(aes(TotalCharges)) +
geom_histogram(bins = 200)
TotalCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(TotalCharges_df) <- c("TotalCharges", "Freq")
p25 <- TotalCharges_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=TotalCharges, y = Freq, fill = TotalCharges), width=.02, stat = "identity") +
labs(y = "Count", x = "LOG(Total Charges) [$]") +
theme(legend.position = "none")
p25
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
layout <- matrix(c(1,2,3,4),2,2,byrow=TRUE)
multiplot(p1, p2, p3, p4, layout=layout)
layout <- matrix(c(1,2,3,4),2,2,byrow=TRUE)
multiplot(p8, p13, p15, p16, layout=layout)
layout <- matrix(c(1,2,3,4),2,2,byrow=TRUE)
multiplot(p21, p22, p17, p23, layout=layout)
layout <- matrix(c(1,2),1,2,byrow=TRUE)
multiplot(p24, p25, layout=layout)
| /source/EDA.R | permissive | soltaniehha/Customer_Churn_w_Keras | R | false | false | 15,742 | r | # IMPORT DATA
churn_data_raw <- read.csv("data/WA_Fn-UseC_-Telco-Customer-Churn.csv")
glimpse(churn_data_raw)
# Data quality
summary(churn_data_raw)
# PREPROCESS DATA
# Checking for missing value
sapply(churn_data_raw, function(x) sum(is.na(x)))
# 11 missing values in TotalCharge
# Remove unnecessary data
churn_data_tbl <- churn_data_raw %>%
select(-customerID) %>% # Dropping customerID column since has no predictive info
drop_na() %>% # This will remove 11 rows where TotalCharge is NA
select(Churn, everything()) # Reordering the columns to keep churn as first
glimpse(churn_data_tbl)
summary(churn_data_tbl)
library(scales)
#show_col(hue_pal()(4))
hue_pal()(4)
p1 <- churn_data_tbl %>% ggplot(aes(x=Churn, fill = Churn)) +
geom_bar(aes(y = (..count..))) +
labs(y = "Count") +
geom_text(aes(y = (..count..),label = ifelse((..count..)==0,"",scales::percent((..count..)/sum(..count..)))),
stat="count", colour="darkgreen") +
scale_fill_manual( values = c("#00BFC4", "#F8766D")) #+ theme(legend.position = "none")
p2 <- churn_data_tbl %>% group_by(gender,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=gender, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Gender") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~gender) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p3 <- churn_data_tbl %>% mutate(SeniorCitizen=ifelse(SeniorCitizen == 0, 'No', "Yes")) %>%
group_by(SeniorCitizen,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=SeniorCitizen, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Senior Citizen") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~SeniorCitizen) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p4 <- churn_data_tbl %>% group_by(Partner,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=Partner, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Has Partner") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~Partner) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p5 <- churn_data_tbl %>% group_by(Dependents,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=Dependents, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Has Dependents") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~Dependents) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p6 <- churn_data_tbl %>% group_by(PhoneService,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=PhoneService, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Phone Service") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~PhoneService) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p7 <- churn_data_tbl %>% group_by(MultipleLines,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=MultipleLines, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Multiple Lines") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~MultipleLines) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p8 <- churn_data_tbl %>% group_by(InternetService,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=InternetService, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Internet Service") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~InternetService) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p9 <- churn_data_tbl %>% group_by(OnlineSecurity,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=OnlineSecurity, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Online Security") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~OnlineSecurity) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p10 <- churn_data_tbl %>% group_by(OnlineBackup,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=OnlineBackup, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Online Backup") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~OnlineBackup) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p11 <- churn_data_tbl %>% group_by(DeviceProtection,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=DeviceProtection, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Device Protection") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~DeviceProtection) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p12 <- churn_data_tbl %>% group_by(TechSupport,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=TechSupport, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Tech Support") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~TechSupport) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p13 <- churn_data_tbl %>% group_by(StreamingTV,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=StreamingTV, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Streaming TV") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~StreamingTV) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p14 <- churn_data_tbl %>% group_by(StreamingMovies,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=StreamingMovies, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Streaming Movies") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~StreamingMovies) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p15 <- churn_data_tbl %>% group_by(Contract,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=Contract, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Contract") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~Contract) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p16 <- churn_data_tbl %>% group_by(PaperlessBilling,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=PaperlessBilling, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Paperless Billing") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~PaperlessBilling) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
p17 <- churn_data_tbl %>% group_by(PaymentMethod,Churn) %>%
summarise(n = n()) %>%
mutate(percentage=paste0(round(n/sum(n)*100,1),"%")) %>%
ggplot(aes(x= Churn, y=n, group=PaymentMethod, fill=Churn)) +
geom_bar(stat="identity") +
labs(y = "Count", x = "Payment Method") +
geom_text(aes(label=percentage), position=position_dodge(width=0.9), vjust=-0.25) +
facet_grid(~PaymentMethod) + theme(legend.position = "none") +
scale_fill_manual( values = c("#00BFC4", "#F8766D"))
# Monthly charge plot
p <- churn_data_tbl %>%
ggplot(aes(MonthlyCharges)) +
geom_histogram(binwidth = 4)
MonthlyCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(MonthlyCharges_df) <- c("MonthlyCharges", "Freq")
p_xmax <- layer_data(p,1)$xmax
churn_data_tbl_tmp <- churn_data_tbl
churn_data_tbl_tmp$bin <- sapply(churn_data_tbl$MonthlyCharges,function(x) length(p_xmax)-sum(x < p_xmax)+1)
mean_df <- churn_data_tbl_tmp %>%
group_by(bin) %>%
summarise(Freq=n(),mean=mean(MonthlyCharges))
churn_df <- churn_data_tbl_tmp %>%
group_by(bin) %>% filter(Churn=='Yes') %>%
summarise(n_churn=n())
merge_df <- merge(mean_df, churn_df, by = "bin", all.x = T) %>%
mutate(percentage=round(n_churn/Freq*100,1))
merge_df$percentage[is.na(merge_df$percentage)] <- 0
p21 <- merge_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=mean, y = Freq, fill = mean), width=1.7, stat = "identity") +
geom_line(aes(x=mean, y = percentage*10), linetype="dashed") +
labs(y = "Count", x = "Monthly Charges [$]") +
theme(legend.position = "none") +
scale_y_continuous(limits = c(0, 1000)) +
scale_y_continuous(
sec.axis = sec_axis(~./10, name = "Churn Rate [%]"))
# Total Charges
p <- churn_data_tbl %>%
ggplot(aes(TotalCharges)) +
geom_histogram(binwidth = 100)
TotalCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(TotalCharges_df) <- c("TotalCharges", "Freq")
p_xmax <- layer_data(p,1)$xmax
churn_data_tbl_tmp <- churn_data_tbl
churn_data_tbl_tmp$bin <- sapply(churn_data_tbl$TotalCharges,function(x) length(p_xmax)-sum(x < p_xmax)+1)
mean_df <- churn_data_tbl_tmp %>%
group_by(bin) %>%
summarise(Freq=n(),mean=mean(TotalCharges))
churn_df <- churn_data_tbl_tmp %>%
group_by(bin) %>% filter(Churn=='Yes') %>%
summarise(n_churn=n())
merge_df <- merge(mean_df, churn_df, by = "bin", all.x = T) %>%
mutate(percentage=round(n_churn/Freq*100,1))
merge_df$percentage[is.na(merge_df$percentage)] <- 0
moving_avg=merge_df[10:(nrow(merge_df)),]
moving_avg$moving_percentage <- rollapply(merge_df$percentage, 10, mean)
p22 <- merge_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=mean, y = Freq, fill = mean), width=25, stat = "identity") +
geom_line(aes(x=mean, y = percentage*10), linetype="dotdash") +
geom_line(data=moving_avg, aes(x=mean, y = moving_percentage*10), linetype="solid", colour="red") +
labs(y = "Count", x = "Total Charges [$]") +
theme(legend.position = "none") +
scale_y_continuous(limits = c(0, 1000)) +
scale_y_continuous(
sec.axis = sec_axis(~./10, name = "Churn Rate [%]"))
# Tenure
churn_data_tbl_tmp <- churn_data_tbl %>% mutate(tenure=ceiling(tenure/12))
p <- churn_data_tbl_tmp %>%
ggplot(aes(tenure)) +
geom_histogram(binwidth = 1)
tenure_df <- layer_data(p,1)[,c('x','y')]
colnames(tenure_df) <- c("tenure", "Freq")
p_xmax <- layer_data(p,1)$xmax
churn_data_tbl_tmp$bin <- sapply(churn_data_tbl_tmp$tenure,function(x) length(p_xmax)-sum(x < p_xmax)+1)
mean_df <- churn_data_tbl_tmp %>%
group_by(bin) %>%
summarise(Freq=n(),mean=mean(tenure))
churn_df <- churn_data_tbl_tmp %>%
group_by(bin) %>% filter(Churn=='Yes') %>%
summarise(n_churn=n())
merge_df <- merge(mean_df, churn_df, by = "bin", all.x = T) %>%
mutate(percentage=round(n_churn/Freq*100,1))
merge_df$percentage[is.na(merge_df$percentage)] <- 0
p23 <- merge_df %>%
ggplot()+
scale_fill_gradient(low = "red", high = "green")+
geom_bar(aes(x=mean, y = Freq, fill = mean*2), width=.6, stat = "identity") +
geom_line(aes(x=mean, y = percentage*25), linetype="dashed") +
labs(y = "Count", x = "Tenure [Years]") +
theme(legend.position = "none") +
scale_y_continuous(limits = c(0, 2500)) +
scale_y_continuous(sec.axis = sec_axis(~./25, name = "Churn Rate [%]"))
# Total Charges
churn_data_tbl_tmp <- churn_data_tbl
p <- churn_data_tbl_tmp %>%
ggplot(aes(TotalCharges)) +
geom_histogram(bins = 200)
TotalCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(TotalCharges_df) <- c("TotalCharges", "Freq")
p24 <- TotalCharges_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=TotalCharges, y = Freq, fill = TotalCharges), width=30, stat = "identity") +
labs(y = "Count", x = "Total Charges [$]") +
theme(legend.position = "none")
p24
# LOG Total Charges
churn_data_tbl_tmp <- churn_data_tbl
churn_data_tbl_tmp$TotalCharges <- log(churn_data_tbl_tmp$TotalCharges)
p <- churn_data_tbl_tmp %>%
ggplot(aes(TotalCharges)) +
geom_histogram(bins = 200)
TotalCharges_df <- layer_data(p,1)[,c('x','y')]
colnames(TotalCharges_df) <- c("TotalCharges", "Freq")
p25 <- TotalCharges_df %>%
ggplot()+
scale_fill_gradient(low = "green", high = "red")+
geom_bar(aes(x=TotalCharges, y = Freq, fill = TotalCharges), width=.02, stat = "identity") +
labs(y = "Count", x = "LOG(Total Charges) [$]") +
theme(legend.position = "none")
p25
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
layout <- matrix(c(1,2,3,4),2,2,byrow=TRUE)
multiplot(p1, p2, p3, p4, layout=layout)
layout <- matrix(c(1,2,3,4),2,2,byrow=TRUE)
multiplot(p8, p13, p15, p16, layout=layout)
layout <- matrix(c(1,2,3,4),2,2,byrow=TRUE)
multiplot(p21, p22, p17, p23, layout=layout)
layout <- matrix(c(1,2),1,2,byrow=TRUE)
multiplot(p24, p25, layout=layout)
|
yadirStartAds <- function(Login = getOption("ryandexdirect.user"),
Ids = NULL,
Token = NULL,
AgencyAccount = getOption("ryandexdirect.agency_account"),
TokenPath = yadirTokenPath()){
# auth
Token <- tech_auth(login = Login, token = Token, AgencyAccount = AgencyAccount, TokenPath = TokenPath)
if(length(Ids) > 10000){
stop(paste0("In the parameter Ids transferred numbers ",length(Ids), " of ads, the maximum number of ads per request is 10000."))
}
if(is.null(Ids)){
stop("In the Ids argument, you must pass the vector containing the Id ads for which you want to resume the show. You have not passed any Id.")
}
# errors counter
CounErr <- 0
# Error vector
errors_id <- vector()
# start time
start_time <- Sys.time()
# start message
packageStartupMessage("Processing", appendLF = T)
IdsPast <- paste0(Ids, collapse = ",")
# request body
queryBody <- paste0("{
\"method\": \"resume\",
\"params\": {
\"SelectionCriteria\": {
\"Ids\": [",IdsPast,"]}
}
}")
# send request
answer <- POST("https://api.direct.yandex.com/json/v5/ads", body = queryBody, add_headers(Authorization = paste0("Bearer ",Token), 'Accept-Language' = "ru","Client-Login" = Login))
# parse answer
ans_pars <- content(answer)
# check answer for errors
if(!is.null(ans_pars$error)){
stop(paste0("Error: ", ans_pars$error$error_string,". Message: ",ans_pars$error$error_detail, ". RequestID: ",ans_pars$error$request_id))
}
# check not starting ads
for(error_search in 1:length(ans_pars$result$ResumeResults)){
if(!is.null(ans_pars$result$ResumeResults[[error_search]]$Errors)){
CounErr <- CounErr + 1
errors_id <- c(errors_id, Ids[error_search])
packageStartupMessage(paste0(" AdId: ",Ids[error_search]," - ", ans_pars$result$ResumeResults[[error_search]]$Errors[[1]]$Details))
}
}
# message about starting ads number
out_message <- ""
TotalCampStoped <- length(Ids) - CounErr
if(TotalCampStoped %in% c(2,3,4) & !(TotalCampStoped %% 100 %in% c(12,13,14))){
out_message <- "start ads"
} else if(TotalCampStoped %% 10 == 1 & TotalCampStoped %% 100 != 11){
out_message <- "start ads"
} else {
out_message <- "start ads"
}
# message
packageStartupMessage(paste0(TotalCampStoped, " ", out_message))
packageStartupMessage(paste0("Total time: ", as.integer(round(difftime(Sys.time(), start_time , units ="secs"),0)), " sec."))
return(errors_id)}
| /R/yadirStartAds.R | no_license | selesnow/ryandexdirect | R | false | false | 2,761 | r | yadirStartAds <- function(Login = getOption("ryandexdirect.user"),
Ids = NULL,
Token = NULL,
AgencyAccount = getOption("ryandexdirect.agency_account"),
TokenPath = yadirTokenPath()){
# auth
Token <- tech_auth(login = Login, token = Token, AgencyAccount = AgencyAccount, TokenPath = TokenPath)
if(length(Ids) > 10000){
stop(paste0("In the parameter Ids transferred numbers ",length(Ids), " of ads, the maximum number of ads per request is 10000."))
}
if(is.null(Ids)){
stop("In the Ids argument, you must pass the vector containing the Id ads for which you want to resume the show. You have not passed any Id.")
}
# errors counter
CounErr <- 0
# Error vector
errors_id <- vector()
# start time
start_time <- Sys.time()
# start message
packageStartupMessage("Processing", appendLF = T)
IdsPast <- paste0(Ids, collapse = ",")
# request body
queryBody <- paste0("{
\"method\": \"resume\",
\"params\": {
\"SelectionCriteria\": {
\"Ids\": [",IdsPast,"]}
}
}")
# send request
answer <- POST("https://api.direct.yandex.com/json/v5/ads", body = queryBody, add_headers(Authorization = paste0("Bearer ",Token), 'Accept-Language' = "ru","Client-Login" = Login))
# parse answer
ans_pars <- content(answer)
# check answer for errors
if(!is.null(ans_pars$error)){
stop(paste0("Error: ", ans_pars$error$error_string,". Message: ",ans_pars$error$error_detail, ". RequestID: ",ans_pars$error$request_id))
}
# check not starting ads
for(error_search in 1:length(ans_pars$result$ResumeResults)){
if(!is.null(ans_pars$result$ResumeResults[[error_search]]$Errors)){
CounErr <- CounErr + 1
errors_id <- c(errors_id, Ids[error_search])
packageStartupMessage(paste0(" AdId: ",Ids[error_search]," - ", ans_pars$result$ResumeResults[[error_search]]$Errors[[1]]$Details))
}
}
# message about starting ads number
out_message <- ""
TotalCampStoped <- length(Ids) - CounErr
if(TotalCampStoped %in% c(2,3,4) & !(TotalCampStoped %% 100 %in% c(12,13,14))){
out_message <- "start ads"
} else if(TotalCampStoped %% 10 == 1 & TotalCampStoped %% 100 != 11){
out_message <- "start ads"
} else {
out_message <- "start ads"
}
# message
packageStartupMessage(paste0(TotalCampStoped, " ", out_message))
packageStartupMessage(paste0("Total time: ", as.integer(round(difftime(Sys.time(), start_time , units ="secs"),0)), " sec."))
return(errors_id)}
|
rm(list=ls())
require(data.table)
require(ggplot2)
require(reshape2)
# args are (1) base background (for increment),
# (2) base foreground (all)
# (3) pc background (for increment)
# (4) pc foreground (for increment)
# (5) target
# parse_args <- function(argv = commandArgs(trailingOnly = T)) {
# parser <- optparse::OptionParser(
# usage = "usage: %prog path/to/input-useratob-scores/ path/to/accumulated-useratob-scores/",
# description = "convert (community X, user.a in community X, user.b in community X, 1) at interval k, to (...) cumulated up to interval k.",
# option_list = list(
# optparse::make_option(
# c("--verbose","-v"), action="store_true", default = FALSE,
# help="verbose?"
# )
# )
# )
# req_pos <- list(inputfiles=filelister, outputdir=identity)
# parsed <- optparse::parse_args(parser, argv, positional_arguments = length(req_pos))
# parsed$options$help <- NULL
# result <- c(mapply(function(f,c) f(c), req_pos, parsed$args, SIMPLIFY = F), parsed$options)
# result$storeres <- function(dt, was) {
# saveRDS(dt, sub(parsed$args[1], parsed$args[2], was))
# dt
# }
#
# if(result$verbose) print(result)
# result
# }
# given community affiliations for background + foreground
# for both regular and persistence communities X intervals
# compute # of covert members in small comms vs large comms
# compute # of non-covert members in small vs large comms
# compute TPR, FPR
# lf <- "mid"
# pwr <- "lo"
# pk <- "late"
lf <- "mid"
pwr <- "med"
pk <- "middle"
foregroundSnapshots <- readRDS(sprintf("output/matched/%s/%s/%s/10/001-covert-0-base.rds",lf,pwr,pk))
foregroundN <- 10
n <- 135
res <- data.table(increment=1:n,
snapFPR=numeric(n), snapTPR=numeric(n), snapAltTPR=numeric(n),
pcFPR=numeric(n), pcTPR=numeric(n), pcAltTPR=numeric(n),
key="increment"
)
covertStates <- c(abs="absent",big="present in large community",lit="present in small community")
bgcomp_user_ids <- readRDS("input/user.RData")[
(lifetime_main == lf & pwr_main == pwr & peak_main == pk), user_id
]
altfgN <- min(foregroundN, length(bgcomp_user_ids))
bgsmp <- sample(bgcomp_user_ids, altfgN)
covertTimeLine <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
outcome=factor(covertStates["abs"], levels=covertStates), key=c("increment","user_id")
)
covertPersistence <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
outcome=factor(covertStates["abs"], levels=covertStates), key=c("increment","user_id")
)
covertSnapAccumulation <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
count = 0, key=c("increment","user_id")
)
covertPersistAccumulation <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
count = 0, key=c("increment","user_id")
)
## compute some random background comparison on mid/lo/late users
## compute
for (i in 1:n) {
args <- c(
sprintf("input/background-clusters/spin-glass/base-15-30/%03d.rds",i),
sprintf("input/background-clusters/spin-glass/pc-15-30/%03d.rds",i),
sprintf("output/matched/%s/%s/%s/10/001-covert-0/%03d.rds",lf,pwr,pk,i)
)
backgroundSnapshot <- readRDS(args[1])
backgroundSampshot <- backgroundSnapshot[user_id %in% bgsmp, user_id, keyby=community]
foregroundSnapshot <- foregroundSnapshots[increment == i, user_id, keyby=community]
counts <- rbind(backgroundSnapshot, foregroundSnapshot)[,
list(total=.N, bg=sum(user_id >= 0), fg=sum(user_id<0), altfg=length(intersect(user_id, bgsmp))),
keyby=community
]
snap <- rbind(
counts[foregroundSnapshot][user_id < 0], # sometimes regular users are added due to covert perturbation
counts[backgroundSampshot]
)
if (dim(snap)[1]) {
jn <- setkey(snap[,
list(increment = i, found = total <= 30),
by=user_id
], increment, user_id)
covertSnapAccumulation[jn, count := found + 0]
covertTimeLine[jn, outcome := ifelse(found, covertStates["lit"], covertStates["big"])]
}
res[increment == i,
`:=`(
snapFPR = counts[total <= 30, sum(bg)]/max(counts[,sum(bg)],1),
snapTPR = counts[total <= 30, sum(fg)]/foregroundN,
snapAltTPR = counts[total <= 30, sum(altfg)]/altfgN
)
]
if (!file.exists(args[2])) {
cat("missing background",i,"\n")
next()
}
backgroundPersistent <- readRDS(args[2])
tres <- try(readRDS(args[3]))
foregroundPersistent <- if (class(tres)[1] == "try-error") {
cat("missing",i,"\n")
data.table(user_id=integer(), community=integer(), key="community")
} else setkey(tres, community)
counts <- rbind(backgroundPersistent, foregroundPersistent)[,
list(total=.N, bg=sum(user_id >= 0), fg=sum(user_id<0), altfg=length(intersect(user_id, bgsmp))),
keyby=community
]
backgroundSampsist <- backgroundPersistent[user_id %in% bgsmp, user_id, keyby=community]
snap <- rbind(
counts[foregroundPersistent][user_id < 0], # sometimes regular users are added due to covert perturbation
counts[backgroundSampsist]
)
#snap <- counts[foregroundPersistent][user_id < 0]
if (dim(snap)[1]) {
jn <- setkey(snap[,
list(increment = i, found = total <= 30),
by=user_id
], increment, user_id)
covertPersistAccumulation[jn, count := found + 0]
covertPersistence[jn, outcome := ifelse(found, covertStates["lit"], covertStates["big"])]
}
res[increment == i,
`:=`(
pcFPR = counts[total <= 30, sum(bg)]/counts[,sum(bg)],
pcTPR = counts[total <= 30, sum(fg)]/foregroundN,
pcAltTPR = counts[total <= 30, sum(altfg)]/altfgN
)
]
}
snapAcc <- covertSnapAccumulation[,list(increment, cs=cumsum(count)), by=user_id]
persAcc <- covertPersistAccumulation[,list(increment, cs=cumsum(count)), by=user_id]
ggplot(snapAcc) + aes(y=cs, x=increment, color=factor(user_id)) + geom_step() + theme_bw()
ggplot(persAcc) + aes(y=cs, x=increment, color=factor(user_id)) + geom_step() + theme_bw()
pltres <- melt.data.table(res, id.vars="increment", variable.name = "measure", value.name = "rate")
pltres[,
`community analysis` := factor(c(pc="persistence", snap="snapshot", snapAlt="altSnap", pcAlt="altpc")[gsub("[FT]PR", "", measure)])
][,
outcome := factor(gsub(".+([FT]PR)","\\1", measure))
]
ggsave("mmm-prs.png", ggplot(pltres) + theme_bw() + theme(panel.border=element_blank()) +
aes(x=increment, y=rate, color=outcome, linetype=`community analysis`) + geom_line() +
scale_color_manual(values=c(FPR='red',TPR='blue')) + labs(y="TPR & FPR") +
scale_linetype_manual(values=c(persistence="solid",snapshot="dashed", altpc="dotted", altSnap="dotdash")) +
ggtitle(sprintf("For %s %s %s", lf, pwr, pk)), width=7, height=4)
ggsave("mmm-snap.png", ggplot(covertTimeLine) + theme_bw() + aes(x=increment, y=factor(user_id), fill=outcome) +
geom_raster() + scale_fill_manual(values=c(absent="red",`present in large community`="yellow",`present in small community`="green")) +
ggtitle(sprintf("Snapshot Detection for %s %s %s", lf, pwr, pk)) + scale_x_continuous(expand=c(0,0)) +
labs(y="user id (<0 is covert)"), width=7, height=4)
ggsave("mmm-pers.png", ggplot(covertPersistence) + theme_bw() + aes(x=increment, y=factor(user_id), fill=outcome) +
geom_raster() + scale_fill_manual(values=c(absent="red",`present in large community`="yellow",`present in small community`="green")) +
ggtitle(sprintf("Persistence Detection for %s %s %s", lf, pwr, pk)) + scale_x_continuous(expand=c(0,0)) +
labs(y="user id (<0 is covert)"), width=7, height=4)
| /affiliation-detect.R | no_license | pearsonca/montreal-detect | R | false | false | 7,708 | r | rm(list=ls())
require(data.table)
require(ggplot2)
require(reshape2)
# args are (1) base background (for increment),
# (2) base foreground (all)
# (3) pc background (for increment)
# (4) pc foreground (for increment)
# (5) target
# parse_args <- function(argv = commandArgs(trailingOnly = T)) {
# parser <- optparse::OptionParser(
# usage = "usage: %prog path/to/input-useratob-scores/ path/to/accumulated-useratob-scores/",
# description = "convert (community X, user.a in community X, user.b in community X, 1) at interval k, to (...) cumulated up to interval k.",
# option_list = list(
# optparse::make_option(
# c("--verbose","-v"), action="store_true", default = FALSE,
# help="verbose?"
# )
# )
# )
# req_pos <- list(inputfiles=filelister, outputdir=identity)
# parsed <- optparse::parse_args(parser, argv, positional_arguments = length(req_pos))
# parsed$options$help <- NULL
# result <- c(mapply(function(f,c) f(c), req_pos, parsed$args, SIMPLIFY = F), parsed$options)
# result$storeres <- function(dt, was) {
# saveRDS(dt, sub(parsed$args[1], parsed$args[2], was))
# dt
# }
#
# if(result$verbose) print(result)
# result
# }
# given community affiliations for background + foreground
# for both regular and persistence communities X intervals
# compute # of covert members in small comms vs large comms
# compute # of non-covert members in small vs large comms
# compute TPR, FPR
# lf <- "mid"
# pwr <- "lo"
# pk <- "late"
lf <- "mid"
pwr <- "med"
pk <- "middle"
foregroundSnapshots <- readRDS(sprintf("output/matched/%s/%s/%s/10/001-covert-0-base.rds",lf,pwr,pk))
foregroundN <- 10
n <- 135
res <- data.table(increment=1:n,
snapFPR=numeric(n), snapTPR=numeric(n), snapAltTPR=numeric(n),
pcFPR=numeric(n), pcTPR=numeric(n), pcAltTPR=numeric(n),
key="increment"
)
covertStates <- c(abs="absent",big="present in large community",lit="present in small community")
bgcomp_user_ids <- readRDS("input/user.RData")[
(lifetime_main == lf & pwr_main == pwr & peak_main == pk), user_id
]
altfgN <- min(foregroundN, length(bgcomp_user_ids))
bgsmp <- sample(bgcomp_user_ids, altfgN)
covertTimeLine <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
outcome=factor(covertStates["abs"], levels=covertStates), key=c("increment","user_id")
)
covertPersistence <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
outcome=factor(covertStates["abs"], levels=covertStates), key=c("increment","user_id")
)
covertSnapAccumulation <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
count = 0, key=c("increment","user_id")
)
covertPersistAccumulation <- data.table(
increment=rep(1:n, each=foregroundN+altfgN), user_id=rep(c(-(1:foregroundN), bgsmp), times=n),
count = 0, key=c("increment","user_id")
)
## compute some random background comparison on mid/lo/late users
## compute
for (i in 1:n) {
args <- c(
sprintf("input/background-clusters/spin-glass/base-15-30/%03d.rds",i),
sprintf("input/background-clusters/spin-glass/pc-15-30/%03d.rds",i),
sprintf("output/matched/%s/%s/%s/10/001-covert-0/%03d.rds",lf,pwr,pk,i)
)
backgroundSnapshot <- readRDS(args[1])
backgroundSampshot <- backgroundSnapshot[user_id %in% bgsmp, user_id, keyby=community]
foregroundSnapshot <- foregroundSnapshots[increment == i, user_id, keyby=community]
counts <- rbind(backgroundSnapshot, foregroundSnapshot)[,
list(total=.N, bg=sum(user_id >= 0), fg=sum(user_id<0), altfg=length(intersect(user_id, bgsmp))),
keyby=community
]
snap <- rbind(
counts[foregroundSnapshot][user_id < 0], # sometimes regular users are added due to covert perturbation
counts[backgroundSampshot]
)
if (dim(snap)[1]) {
jn <- setkey(snap[,
list(increment = i, found = total <= 30),
by=user_id
], increment, user_id)
covertSnapAccumulation[jn, count := found + 0]
covertTimeLine[jn, outcome := ifelse(found, covertStates["lit"], covertStates["big"])]
}
res[increment == i,
`:=`(
snapFPR = counts[total <= 30, sum(bg)]/max(counts[,sum(bg)],1),
snapTPR = counts[total <= 30, sum(fg)]/foregroundN,
snapAltTPR = counts[total <= 30, sum(altfg)]/altfgN
)
]
if (!file.exists(args[2])) {
cat("missing background",i,"\n")
next()
}
backgroundPersistent <- readRDS(args[2])
tres <- try(readRDS(args[3]))
foregroundPersistent <- if (class(tres)[1] == "try-error") {
cat("missing",i,"\n")
data.table(user_id=integer(), community=integer(), key="community")
} else setkey(tres, community)
counts <- rbind(backgroundPersistent, foregroundPersistent)[,
list(total=.N, bg=sum(user_id >= 0), fg=sum(user_id<0), altfg=length(intersect(user_id, bgsmp))),
keyby=community
]
backgroundSampsist <- backgroundPersistent[user_id %in% bgsmp, user_id, keyby=community]
snap <- rbind(
counts[foregroundPersistent][user_id < 0], # sometimes regular users are added due to covert perturbation
counts[backgroundSampsist]
)
#snap <- counts[foregroundPersistent][user_id < 0]
if (dim(snap)[1]) {
jn <- setkey(snap[,
list(increment = i, found = total <= 30),
by=user_id
], increment, user_id)
covertPersistAccumulation[jn, count := found + 0]
covertPersistence[jn, outcome := ifelse(found, covertStates["lit"], covertStates["big"])]
}
res[increment == i,
`:=`(
pcFPR = counts[total <= 30, sum(bg)]/counts[,sum(bg)],
pcTPR = counts[total <= 30, sum(fg)]/foregroundN,
pcAltTPR = counts[total <= 30, sum(altfg)]/altfgN
)
]
}
snapAcc <- covertSnapAccumulation[,list(increment, cs=cumsum(count)), by=user_id]
persAcc <- covertPersistAccumulation[,list(increment, cs=cumsum(count)), by=user_id]
ggplot(snapAcc) + aes(y=cs, x=increment, color=factor(user_id)) + geom_step() + theme_bw()
ggplot(persAcc) + aes(y=cs, x=increment, color=factor(user_id)) + geom_step() + theme_bw()
pltres <- melt.data.table(res, id.vars="increment", variable.name = "measure", value.name = "rate")
pltres[,
`community analysis` := factor(c(pc="persistence", snap="snapshot", snapAlt="altSnap", pcAlt="altpc")[gsub("[FT]PR", "", measure)])
][,
outcome := factor(gsub(".+([FT]PR)","\\1", measure))
]
ggsave("mmm-prs.png", ggplot(pltres) + theme_bw() + theme(panel.border=element_blank()) +
aes(x=increment, y=rate, color=outcome, linetype=`community analysis`) + geom_line() +
scale_color_manual(values=c(FPR='red',TPR='blue')) + labs(y="TPR & FPR") +
scale_linetype_manual(values=c(persistence="solid",snapshot="dashed", altpc="dotted", altSnap="dotdash")) +
ggtitle(sprintf("For %s %s %s", lf, pwr, pk)), width=7, height=4)
ggsave("mmm-snap.png", ggplot(covertTimeLine) + theme_bw() + aes(x=increment, y=factor(user_id), fill=outcome) +
geom_raster() + scale_fill_manual(values=c(absent="red",`present in large community`="yellow",`present in small community`="green")) +
ggtitle(sprintf("Snapshot Detection for %s %s %s", lf, pwr, pk)) + scale_x_continuous(expand=c(0,0)) +
labs(y="user id (<0 is covert)"), width=7, height=4)
ggsave("mmm-pers.png", ggplot(covertPersistence) + theme_bw() + aes(x=increment, y=factor(user_id), fill=outcome) +
geom_raster() + scale_fill_manual(values=c(absent="red",`present in large community`="yellow",`present in small community`="green")) +
ggtitle(sprintf("Persistence Detection for %s %s %s", lf, pwr, pk)) + scale_x_continuous(expand=c(0,0)) +
labs(y="user id (<0 is covert)"), width=7, height=4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ListNasJobsResponse}
\alias{GoogleCloudAiplatformV1ListNasJobsResponse}
\title{GoogleCloudAiplatformV1ListNasJobsResponse Object}
\usage{
GoogleCloudAiplatformV1ListNasJobsResponse(
nextPageToken = NULL,
nasJobs = NULL
)
}
\arguments{
\item{nextPageToken}{A token to retrieve the next page of results}
\item{nasJobs}{List of NasJobs in the requested page}
}
\value{
GoogleCloudAiplatformV1ListNasJobsResponse object
}
\description{
GoogleCloudAiplatformV1ListNasJobsResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Response message for JobService.ListNasJobs
}
\concept{GoogleCloudAiplatformV1ListNasJobsResponse functions}
| /googleaiplatformv1.auto/man/GoogleCloudAiplatformV1ListNasJobsResponse.Rd | no_license | justinjm/autoGoogleAPI | R | false | true | 804 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1ListNasJobsResponse}
\alias{GoogleCloudAiplatformV1ListNasJobsResponse}
\title{GoogleCloudAiplatformV1ListNasJobsResponse Object}
\usage{
GoogleCloudAiplatformV1ListNasJobsResponse(
nextPageToken = NULL,
nasJobs = NULL
)
}
\arguments{
\item{nextPageToken}{A token to retrieve the next page of results}
\item{nasJobs}{List of NasJobs in the requested page}
}
\value{
GoogleCloudAiplatformV1ListNasJobsResponse object
}
\description{
GoogleCloudAiplatformV1ListNasJobsResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Response message for JobService.ListNasJobs
}
\concept{GoogleCloudAiplatformV1ListNasJobsResponse functions}
|
###############################################################################
### TITLE
### COPULA MODEL WITH COVARIATE--DEPENDENT
###
### SYSTEM REQUIREMENTS
### R > = 2.15.3 with packages ``mvtnorm'', ``parallel''
### BLAS (optional)
###
### INPUT VARIABLES
### The variables with comments in capital letters are user input
### variables.
###
### OUTPUT VARIABLES
### The output variables are always started with ``MCMC.''
###
### GENERALIZATION
### If you are interested in developing new copula models based on the
### existing code. Look into the ``model'' folder.
###
### DATE
### CREATED: Mon Jan 09 17:12:04 CET 2012
### CURRENT: Sun Jan 04 10:21:56 CST 2015
###############################################################################
###----------------------------------------------------------------------------
### SPECIFY THE MODEL
###----------------------------------------------------------------------------
## MARGINAL MODELS NAME, TYPE AND PARAMETERS
Mdl.MargisType <- c("POISSON", "POISSON", "BB7")
Mdl.MargisNM <- c("POSITIVE", "NEGATIVE", "BB7")
MCMC.Update <- list(list("mu" = T),
list("mu" = T),
list("lambdaL" = T, "lambdaU" = T))
names(MCMC.Update) <- Mdl.MargisNM
## THE MODEL EVALUATION CRITERION
## Set this to NULL to turn of evaluation.
LPDS <- c("joint", Mdl.MargisNM)
## The object structure for the model components
names(Mdl.MargisType) <- Mdl.MargisNM
###----------------------------------------------------------------------------
### THE DATA AND MODEL
###----------------------------------------------------------------------------
## THE DATASET
##-----------------------------------------------------------------------------
## The dataset should either provided via DGP or the real dataset. The dataset
## should be in the following structure:
## Mdl.X: "list" each list contains the covariates in each margin or copula.
## Mdl.Y: "list" each list contains the response variable of that margin.
load(file.path(CDCOPULA_LIB_ROOT_DIR, "data/BABA-Texts.Rdata"))
## No. of Total Observations
nObsRaw <- length(Y[[1]])
## Data subset used
Mdl.dataUsedIdx <- (1 + nObsRaw-nObsRaw):nObsRaw
Mdl.algorithm <- "full"
## THE RESPONSE VARIABLES
Mdl.Y <- lapply(Y[Mdl.MargisNM[-length(Mdl.MargisNM)]], function(x, idx)x[idx, ,drop = FALSE], Mdl.dataUsedIdx)
## The name of respond variables
names(Mdl.Y) <- Mdl.MargisNM[-length(Mdl.MargisNM)]
## COVARIATES USED FOR THE MARGINAL AND COPULA PARAMETERS
## ------------------------------------------------------------------------------
## A trick to include foreign marginal models in the estimation which are hard to directly
## put into the "MargiModel()" is do the following settings: (1) Let "MCMC.Update" be FALSE
## in all marginal densities. (2) Estimate the density features in foreign models and set
## the features in "Mdl.X" directly. (3) Set MCMC.UpdateStrategy be "twostage". (4) Set
## "Mdl.betaInit" be one in all marginal features.
Mdl.X <- MCMC.Update
Mdl.X[[1]][["mu"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[1]][["phi"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[1]][["df"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[1]][["lmd"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[2]][["mu"]] <- cbind(1, X[[2]][Mdl.dataUsedIdx, 1:10])
Mdl.X[[3]][["lambdaL"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9], X[[2]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[3]][["lambdaU"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9], X[[2]][Mdl.dataUsedIdx, 1:9])
## THE LINK FUNCTION USED IN THE MODEL
Mdl.parLink <- MCMC.Update
Mdl.parLink[[1]][["mu"]] <- list(type = "identity", nPar = 1)
Mdl.parLink[[1]][["phi"]] <- list(type = "glog", a = 0.01, b = 100, nPar = 1)
Mdl.parLink[[1]][["df"]] <- list(type = "glog", nPar = 1, a = 2, b = 30)
Mdl.parLink[[1]][["lmd"]] <- list(type = "glog", a = 0.01, b = 100, nPar = 1)
Mdl.parLink[[2]][["mu"]] <- list(type = "glog", a = 0.01, b = 100, nPar = 1)
Mdl.parLink[[3]][["lambdaL"]] <- list(type = "glogit", nPar = 1, a = 0.01, b = 0.99)
Mdl.parLink[[3]][["lambdaU"]] <- list(type = "glogit", nPar = 1, a = 0.01, b = 0.99)
## THE VARIABLE SELECTION SETTINGS AND STARTING POINT
## Variable selection candidates, NULL: no variable selection use full
## covariates. ("all-in", "all-out", "random", or user-input)
Mdl.varSelArgs <- MCMC.Update
Mdl.varSelArgs[[1]][["mu"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[1]][["phi"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[1]][["df"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[1]][["lmd"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[2]][["mu"]] <- list(cand = "2:end", init = "all-out")
Mdl.varSelArgs[[3]][["lambdaL"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[3]][["lambdaU"]] <- list(cand = "2:end", init = "all-in")
###----------------------------------------------------------------------------
### THE MCMC CONFIGURATION
###----------------------------------------------------------------------------
## NUMBER OF MCMC ITERATIONS
MCMC.nIter <- 1000
## SAVE OUTPUT PATH
##-----------------------------------------------------------------------------
## "save.output = FALSE" it will not save anything.
## "save.output = "path-to-directory"" it will save the working directory in
## the given directory.
save.output <- "~/running"
## MCMC TRAJECTORY
##-----------------------------------------------------------------------------
## If TRUE, the MCMC should be tracked during the evaluation.
MCMC.track <- TRUE
MCMC.UpdateOrder <- MCMC.Update
MCMC.UpdateOrder[[1]][[1]] <- 1
MCMC.UpdateOrder[[1]][[2]] <- 2
MCMC.UpdateOrder[[1]][[3]] <- 3
MCMC.UpdateOrder[[1]][[4]] <- 4
MCMC.UpdateOrder[[2]][[1]] <- 5
MCMC.UpdateOrder[[3]][[1]] <- 6
MCMC.UpdateOrder[[3]][[2]] <- 7
## MCMC UPDATING STRATEGY
##-----------------------------------------------------------------------------
## "joint" : Update the joint posterior w.r.t. MCMC.Update and MCMC.UpdateOrder
## "margin" : the marginal posterior.
## "twostage" : Update the joint posterior but using a two stage approach.
## NOTE: If one want to use "margin" or "twostage" options just to to estimate the copula
## density. A variable "MCMC.density[["u"]]" must provide. "MCMC.density" consists of CDF of
## margins (i.e. u1, u2, ...)
MCMC.UpdateStrategy <- "twostage"
## THE METROPOLIS-HASTINGS ALGORITHM PROPOSAL ARGUMENTS
MCMC.propArgs <- MCMC.Update
MCMC.propArgs[[1]][[1]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[1]][[2]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[1]][[3]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[1]][[4]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[2]][[1]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.2))
MCMC.propArgs[[3]][[1]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[3]][[2]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
## POSTERIOR INFERENCE OPTIONS
##-----------------------------------------------------------------------------
## CROSS VALIDATION
## "N.subsets" is no. of folds for cross-validation. If N.subsets = 0, no
## cross-validation. And "partiMethod" tells how to partition the data. Testing
## percent is used if partiMethod is "time-series". (use the old data to
## predict the new interval)
nCross <- 1
Mdl.crossValidArgs <- list(N.subsets = nCross,
partiMethod = "time-series",
testRatio = 0.2)
## Indices for training and testing sample according to cross-validation
Mdl.crossValidIdx <- set.crossvalid(length(Mdl.dataUsedIdx),Mdl.crossValidArgs)
## SAMPLER PROPORTION FOR POSTERIOR INFERENCE,
MCMC.sampleProp <- 1
## BURN-IN RATIO
MCMC.burninProp <- 0.1 # zero indicates no burn-in
###----------------------------------------------------------------------------
### PRIOR SETTINGS
###----------------------------------------------------------------------------
## PRIOR FOR THE COPULA PARAMETERS
## -----------------------------------------------------------------------------
## NOTE: The variable are recycled if needed. For example indicators$prob can be a scaler
## or a vector with same length of variable section candidates. There might be connections
## between parameters in the models but is will not affect the prior settings on the
## coefficients as long as we use a dynamic link function.
Mdl.priArgs <- MCMC.Update
Mdl.priArgs[[1]][["mu"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "norm", mean = 0, variance = 1),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[1]][["phi"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 1, variance = 1, a = 0.01),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[1]][["df"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 5, variance = 10, a = 2),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[1]][["lmd"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 1, variance = 1, a = 0.01),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[2]][["mu"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 1, variance = 1, a = 0.01),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[3]][["lambdaL"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "gbeta", mean = 0.2, variance = 0.05,
a = 0.05, b = 0.95),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[3]][["lambdaU"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "gbeta", mean = 0.2, variance = 0.05,
a = 0.05, b = 0.95),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
###----------------------------------------------------------------------------
### THE PARAMETERS FOR INITIAL AND CURRENT MCMC ITERATION
### The parameters in the current MCMC iteration. For the first iteration, it
### is set as the initial values
###----------------------------------------------------------------------------
## THE PARAMETER COEFFICIENTS STARTING POINT
## The possible inputs are ("random", "ols" or user-input).
Mdl.betaInit <- MCMC.Update
Mdl.betaInit[[1]][[1]] <- "random"
Mdl.betaInit[[1]][[2]] <- "random"
Mdl.betaInit[[1]][[3]] <- "random"
Mdl.betaInit[[1]][[4]] <- "random"
Mdl.betaInit[[2]][[1]] <- "random"
Mdl.betaInit[[3]][[1]] <- "random"
Mdl.betaInit[[3]][[2]] <- "random"
MCMC.optimInit <- TRUE
################################################################################
### THE END
################################################################################
| /inst/config/config.BB7.LU.SPLITTPOISSON.BABA-TEXTS.R | no_license | ayotoasset/cdcopula | R | false | false | 14,222 | r | ###############################################################################
### TITLE
### COPULA MODEL WITH COVARIATE--DEPENDENT
###
### SYSTEM REQUIREMENTS
### R > = 2.15.3 with packages ``mvtnorm'', ``parallel''
### BLAS (optional)
###
### INPUT VARIABLES
### The variables with comments in capital letters are user input
### variables.
###
### OUTPUT VARIABLES
### The output variables are always started with ``MCMC.''
###
### GENERALIZATION
### If you are interested in developing new copula models based on the
### existing code. Look into the ``model'' folder.
###
### DATE
### CREATED: Mon Jan 09 17:12:04 CET 2012
### CURRENT: Sun Jan 04 10:21:56 CST 2015
###############################################################################
###----------------------------------------------------------------------------
### SPECIFY THE MODEL
###----------------------------------------------------------------------------
## MARGINAL MODELS NAME, TYPE AND PARAMETERS
Mdl.MargisType <- c("POISSON", "POISSON", "BB7")
Mdl.MargisNM <- c("POSITIVE", "NEGATIVE", "BB7")
MCMC.Update <- list(list("mu" = T),
list("mu" = T),
list("lambdaL" = T, "lambdaU" = T))
names(MCMC.Update) <- Mdl.MargisNM
## THE MODEL EVALUATION CRITERION
## Set this to NULL to turn of evaluation.
LPDS <- c("joint", Mdl.MargisNM)
## The object structure for the model components
names(Mdl.MargisType) <- Mdl.MargisNM
###----------------------------------------------------------------------------
### THE DATA AND MODEL
###----------------------------------------------------------------------------
## THE DATASET
##-----------------------------------------------------------------------------
## The dataset should either provided via DGP or the real dataset. The dataset
## should be in the following structure:
## Mdl.X: "list" each list contains the covariates in each margin or copula.
## Mdl.Y: "list" each list contains the response variable of that margin.
load(file.path(CDCOPULA_LIB_ROOT_DIR, "data/BABA-Texts.Rdata"))
## No. of Total Observations
nObsRaw <- length(Y[[1]])
## Data subset used
Mdl.dataUsedIdx <- (1 + nObsRaw-nObsRaw):nObsRaw
Mdl.algorithm <- "full"
## THE RESPONSE VARIABLES
Mdl.Y <- lapply(Y[Mdl.MargisNM[-length(Mdl.MargisNM)]], function(x, idx)x[idx, ,drop = FALSE], Mdl.dataUsedIdx)
## The name of respond variables
names(Mdl.Y) <- Mdl.MargisNM[-length(Mdl.MargisNM)]
## COVARIATES USED FOR THE MARGINAL AND COPULA PARAMETERS
## ------------------------------------------------------------------------------
## A trick to include foreign marginal models in the estimation which are hard to directly
## put into the "MargiModel()" is do the following settings: (1) Let "MCMC.Update" be FALSE
## in all marginal densities. (2) Estimate the density features in foreign models and set
## the features in "Mdl.X" directly. (3) Set MCMC.UpdateStrategy be "twostage". (4) Set
## "Mdl.betaInit" be one in all marginal features.
Mdl.X <- MCMC.Update
Mdl.X[[1]][["mu"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[1]][["phi"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[1]][["df"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[1]][["lmd"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[2]][["mu"]] <- cbind(1, X[[2]][Mdl.dataUsedIdx, 1:10])
Mdl.X[[3]][["lambdaL"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9], X[[2]][Mdl.dataUsedIdx, 1:9])
Mdl.X[[3]][["lambdaU"]] <- cbind(1, X[[1]][Mdl.dataUsedIdx, 1:9], X[[2]][Mdl.dataUsedIdx, 1:9])
## THE LINK FUNCTION USED IN THE MODEL
Mdl.parLink <- MCMC.Update
Mdl.parLink[[1]][["mu"]] <- list(type = "identity", nPar = 1)
Mdl.parLink[[1]][["phi"]] <- list(type = "glog", a = 0.01, b = 100, nPar = 1)
Mdl.parLink[[1]][["df"]] <- list(type = "glog", nPar = 1, a = 2, b = 30)
Mdl.parLink[[1]][["lmd"]] <- list(type = "glog", a = 0.01, b = 100, nPar = 1)
Mdl.parLink[[2]][["mu"]] <- list(type = "glog", a = 0.01, b = 100, nPar = 1)
Mdl.parLink[[3]][["lambdaL"]] <- list(type = "glogit", nPar = 1, a = 0.01, b = 0.99)
Mdl.parLink[[3]][["lambdaU"]] <- list(type = "glogit", nPar = 1, a = 0.01, b = 0.99)
## THE VARIABLE SELECTION SETTINGS AND STARTING POINT
## Variable selection candidates, NULL: no variable selection use full
## covariates. ("all-in", "all-out", "random", or user-input)
Mdl.varSelArgs <- MCMC.Update
Mdl.varSelArgs[[1]][["mu"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[1]][["phi"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[1]][["df"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[1]][["lmd"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[2]][["mu"]] <- list(cand = "2:end", init = "all-out")
Mdl.varSelArgs[[3]][["lambdaL"]] <- list(cand = "2:end", init = "all-in")
Mdl.varSelArgs[[3]][["lambdaU"]] <- list(cand = "2:end", init = "all-in")
###----------------------------------------------------------------------------
### THE MCMC CONFIGURATION
###----------------------------------------------------------------------------
## NUMBER OF MCMC ITERATIONS
MCMC.nIter <- 1000
## SAVE OUTPUT PATH
##-----------------------------------------------------------------------------
## "save.output = FALSE" it will not save anything.
## "save.output = "path-to-directory"" it will save the working directory in
## the given directory.
save.output <- "~/running"
## MCMC TRAJECTORY
##-----------------------------------------------------------------------------
## If TRUE, the MCMC should be tracked during the evaluation.
MCMC.track <- TRUE
MCMC.UpdateOrder <- MCMC.Update
MCMC.UpdateOrder[[1]][[1]] <- 1
MCMC.UpdateOrder[[1]][[2]] <- 2
MCMC.UpdateOrder[[1]][[3]] <- 3
MCMC.UpdateOrder[[1]][[4]] <- 4
MCMC.UpdateOrder[[2]][[1]] <- 5
MCMC.UpdateOrder[[3]][[1]] <- 6
MCMC.UpdateOrder[[3]][[2]] <- 7
## MCMC UPDATING STRATEGY
##-----------------------------------------------------------------------------
## "joint" : Update the joint posterior w.r.t. MCMC.Update and MCMC.UpdateOrder
## "margin" : the marginal posterior.
## "twostage" : Update the joint posterior but using a two stage approach.
## NOTE: If one want to use "margin" or "twostage" options just to to estimate the copula
## density. A variable "MCMC.density[["u"]]" must provide. "MCMC.density" consists of CDF of
## margins (i.e. u1, u2, ...)
MCMC.UpdateStrategy <- "twostage"
## THE METROPOLIS-HASTINGS ALGORITHM PROPOSAL ARGUMENTS
MCMC.propArgs <- MCMC.Update
MCMC.propArgs[[1]][[1]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[1]][[2]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[1]][[3]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[1]][[4]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[2]][[1]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.2))
MCMC.propArgs[[3]][[1]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
MCMC.propArgs[[3]][[2]] <- list("algorithm" = list(type = "GNewtonMove", ksteps = 3, hess = "outer"),
"beta" = list(type = "mvt", df = 6),
"indicators" = list(type = "binom", prob = 0.5))
## POSTERIOR INFERENCE OPTIONS
##-----------------------------------------------------------------------------
## CROSS VALIDATION
## "N.subsets" is no. of folds for cross-validation. If N.subsets = 0, no
## cross-validation. And "partiMethod" tells how to partition the data. Testing
## percent is used if partiMethod is "time-series". (use the old data to
## predict the new interval)
nCross <- 1
Mdl.crossValidArgs <- list(N.subsets = nCross,
partiMethod = "time-series",
testRatio = 0.2)
## Indices for training and testing sample according to cross-validation
Mdl.crossValidIdx <- set.crossvalid(length(Mdl.dataUsedIdx),Mdl.crossValidArgs)
## SAMPLER PROPORTION FOR POSTERIOR INFERENCE,
MCMC.sampleProp <- 1
## BURN-IN RATIO
MCMC.burninProp <- 0.1 # zero indicates no burn-in
###----------------------------------------------------------------------------
### PRIOR SETTINGS
###----------------------------------------------------------------------------
## PRIOR FOR THE COPULA PARAMETERS
## -----------------------------------------------------------------------------
## NOTE: The variable are recycled if needed. For example indicators$prob can be a scaler
## or a vector with same length of variable section candidates. There might be connections
## between parameters in the models but is will not affect the prior settings on the
## coefficients as long as we use a dynamic link function.
Mdl.priArgs <- MCMC.Update
Mdl.priArgs[[1]][["mu"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "norm", mean = 0, variance = 1),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[1]][["phi"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 1, variance = 1, a = 0.01),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[1]][["df"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 5, variance = 10, a = 2),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[1]][["lmd"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 1, variance = 1, a = 0.01),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[2]][["mu"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "glognorm", mean = 1, variance = 1, a = 0.01),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[3]][["lambdaL"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "gbeta", mean = 0.2, variance = 0.05,
a = 0.05, b = 0.95),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
Mdl.priArgs[[3]][["lambdaU"]] <-
list("beta" = list("intercept" = list(type = "custom",
input = list(type = "gbeta", mean = 0.2, variance = 0.05,
a = 0.05, b = 0.95),
output = list(type = "norm", shrinkage = 1)),
"slopes" = list(type = "cond-mvnorm",
mean = 0, covariance = "identity", shrinkage = 1)),
"indicators" = list(type = "bern", prob = 0.5))
###----------------------------------------------------------------------------
### THE PARAMETERS FOR INITIAL AND CURRENT MCMC ITERATION
### The parameters in the current MCMC iteration. For the first iteration, it
### is set as the initial values
###----------------------------------------------------------------------------
## THE PARAMETER COEFFICIENTS STARTING POINT
## The possible inputs are ("random", "ols" or user-input).
Mdl.betaInit <- MCMC.Update
Mdl.betaInit[[1]][[1]] <- "random"
Mdl.betaInit[[1]][[2]] <- "random"
Mdl.betaInit[[1]][[3]] <- "random"
Mdl.betaInit[[1]][[4]] <- "random"
Mdl.betaInit[[2]][[1]] <- "random"
Mdl.betaInit[[3]][[1]] <- "random"
Mdl.betaInit[[3]][[2]] <- "random"
MCMC.optimInit <- TRUE
################################################################################
### THE END
################################################################################
|
library(RColorBrewer)
library(corrplot)
# Correlation matrix
load(file = "length_gc_rna2d_cor.Rdata")
# p-value matrix
load(file = "length_gc_rna2d_p.Rdata")
# Color palette
col <- colorRampPalette(c("red", "white", "blue"))(8)
corrplot(
r.mat,
method = "circle",
type = "full",
#add=TRUE,
col = col,
title = "Correlation between RNAs half-lives and their lengths, GC contents, and RNA2Ds",
bg = "white",
outline = FALSE,
p.mat = p.mat,
mar=c(0, 0, 1, 0),
sig.level = 0.01,
insig = "pch", # "blank" or "label_sig" or "pch" or "p-value"
pch = 4, pch.col = "black", pch.cex = 2,
tl.col = "black",
cl.pos = "r",
cl.length = 6,
tl.offset = 1.3,
win.asp = 1.1,
tl.cex = 1,
tl.srt = 90,
rect.lwd = 1,
na.label = "na",
is.corr = FALSE
)
text(2,3.9,"lnc-human1",cex = 1.2)
text(6,3.9,"lnc-human2",cex=1.2)
text(9,3.9,"lnc-human",cex=1.2)
text(12,3.9,"m-human1",cex=1.2)
text(16,3.9,"m-human2",cex=1.2)
text(20,3.9,"m-human",cex=1.2)
| /figure5.R | no_license | HongyuanWu/lncRNA-1 | R | false | false | 1,051 | r | library(RColorBrewer)
library(corrplot)
# Correlation matrix
load(file = "length_gc_rna2d_cor.Rdata")
# p-value matrix
load(file = "length_gc_rna2d_p.Rdata")
# Color palette
col <- colorRampPalette(c("red", "white", "blue"))(8)
corrplot(
r.mat,
method = "circle",
type = "full",
#add=TRUE,
col = col,
title = "Correlation between RNAs half-lives and their lengths, GC contents, and RNA2Ds",
bg = "white",
outline = FALSE,
p.mat = p.mat,
mar=c(0, 0, 1, 0),
sig.level = 0.01,
insig = "pch", # "blank" or "label_sig" or "pch" or "p-value"
pch = 4, pch.col = "black", pch.cex = 2,
tl.col = "black",
cl.pos = "r",
cl.length = 6,
tl.offset = 1.3,
win.asp = 1.1,
tl.cex = 1,
tl.srt = 90,
rect.lwd = 1,
na.label = "na",
is.corr = FALSE
)
text(2,3.9,"lnc-human1",cex = 1.2)
text(6,3.9,"lnc-human2",cex=1.2)
text(9,3.9,"lnc-human",cex=1.2)
text(12,3.9,"m-human1",cex=1.2)
text(16,3.9,"m-human2",cex=1.2)
text(20,3.9,"m-human",cex=1.2)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{model2netcdf.SIPNET}
\alias{model2netcdf.SIPNET}
\title{Function to convert SIPNET model output to standard netCDF format}
\usage{
model2netcdf.SIPNET(outdir, sitelat, sitelon, start_date, end_date)
}
\arguments{
\item{outdir}{Location of SIPNET model output}
\item{sitelat}{Latitude of the site}
\item{sitelon}{Longitude of the site}
\item{start_date}{Start time of the simulation}
\item{end_date}{End time of the simulation}
}
\description{
Convert SIPNET output to netCDF
}
\details{
Converts all output contained in a folder to netCDF.
}
\author{
Shawn Serbin, Michael Dietze
}
| /models/sipnet/man/model2netcdf.SIPNET.Rd | permissive | gbromley/pecan | R | false | false | 646 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{model2netcdf.SIPNET}
\alias{model2netcdf.SIPNET}
\title{Function to convert SIPNET model output to standard netCDF format}
\usage{
model2netcdf.SIPNET(outdir, sitelat, sitelon, start_date, end_date)
}
\arguments{
\item{outdir}{Location of SIPNET model output}
\item{sitelat}{Latitude of the site}
\item{sitelon}{Longitude of the site}
\item{start_date}{Start time of the simulation}
\item{end_date}{End time of the simulation}
}
\description{
Convert SIPNET output to netCDF
}
\details{
Converts all output contained in a folder to netCDF.
}
\author{
Shawn Serbin, Michael Dietze
}
|
# return the value/s of maxima. Can be used to filter out maximum values from cumulative data per day.
# input :
# values_vector : the vector of values (in correct order) which is filtered for maxima.
# must contain zeroes as minimum (don't delete them out)
# noresult : the value which is returned if there is no maximum. Defaults to 0 (for cumulative measures)
# output :
# collapsed_vector : the values of the maximum / maxima
# In order to get total amount, sum them.
return_max_values <- function(values_vector, noresult = 0){
collapsed_vec <- rle(values_vector)$values
locations_vec <- which(diff(c(sign(diff(collapsed_vec)), -1)) == -2) + 1
# check which is -2 because this means a change from going up to going down --> c(1, -1) has diff = -2
# note : If i would like to get maxima, I would search where diff is 2, because diff(c(-1, 1)) is 2
# take -1 because only diff of c(sign(diff(...)), -1) returns -2 for situations in which the function
# does not go down any more. (monotonical increase would not give a result)
res <- collapsed_vec[locations_vec]
if(length(res) == 0){
return(0)
} else {
return(res)
}
}
# EXAMPLES
# return_max_values(c(0,1,1,0,2,2,3,4,5,6,9,0,2,3,0,3,0))
# return_max_values(c(1, 0,0))
# return_max_values(c(1, 1, 1))
# return_max_values(c(0, 0, 0))
# return_max_values(c(12, 19, 300))
| /return_max_values.R | no_license | noschenk/useful_material | R | false | false | 1,374 | r | # return the value/s of maxima. Can be used to filter out maximum values from cumulative data per day.
# input :
# values_vector : the vector of values (in correct order) which is filtered for maxima.
# must contain zeroes as minimum (don't delete them out)
# noresult : the value which is returned if there is no maximum. Defaults to 0 (for cumulative measures)
# output :
# collapsed_vector : the values of the maximum / maxima
# In order to get total amount, sum them.
return_max_values <- function(values_vector, noresult = 0){
collapsed_vec <- rle(values_vector)$values
locations_vec <- which(diff(c(sign(diff(collapsed_vec)), -1)) == -2) + 1
# check which is -2 because this means a change from going up to going down --> c(1, -1) has diff = -2
# note : If i would like to get maxima, I would search where diff is 2, because diff(c(-1, 1)) is 2
# take -1 because only diff of c(sign(diff(...)), -1) returns -2 for situations in which the function
# does not go down any more. (monotonical increase would not give a result)
res <- collapsed_vec[locations_vec]
if(length(res) == 0){
return(0)
} else {
return(res)
}
}
# EXAMPLES
# return_max_values(c(0,1,1,0,2,2,3,4,5,6,9,0,2,3,0,3,0))
# return_max_values(c(1, 0,0))
# return_max_values(c(1, 1, 1))
# return_max_values(c(0, 0, 0))
# return_max_values(c(12, 19, 300))
|
# Supress R CMD check note
#' @importFrom memoise memoise
#' @importFrom rversions r_release
NULL
rstudio_release <- memoise::memoise(function() {
url <- "http://s3.amazonaws.com/rstudio-server/current.ver"
numeric_version(readLines(url, warn = FALSE))
})
r_release <- memoise::memoise(function() {
R_system_version(rversions::r_release()$version)
})
#' Diagnose potential devtools issues
#'
#' This checks to make sure you're using the latest release of R,
#' the released version of RStudio (if you're using it as your gui),
#' and the latest version of devtools and its dependencies.
#'
#' @family doctors
#' @export
#' @examples
#' \donttest{
#' dr_devtools()
#' }
dr_devtools <- function() {
msg <- character()
if (getRversion() < r_release()) {
msg[["R"]] <- paste0(
"* R is out of date (", getRversion(), " vs ", r_release(), ")"
)
}
deps <- package_deps("devtools", dependencies = NA)
old <- deps$diff < 0
if (any(old)) {
msg[["devtools"]] <- paste0(
"* Devtools or dependencies out of date: \n",
paste(deps$package[old], collapse = ", ")
)
}
if (rstudioapi::isAvailable()) {
rel <- rstudio_release()
cur <- rstudioapi::getVersion()
if (cur < rel) {
msg[["rstudio"]] <- paste0(
"* RStudio is out of date (", cur, " vs ", rel, ")"
)
}
}
doctor("devtools", msg)
}
#' Diagnose potential GitHub issues
#'
#' @param path Path to repository to check. Defaults to current working
#' directory
#' @family doctors
#' @export
#' @examples
#' \donttest{
#' dr_github()
#' }
dr_github <- function(path = ".") {
if (!uses_git(path)) {
return(doctor("github", "Current path is not a git repository"))
}
msg <- character()
r <- git2r::repository(path, discover = TRUE)
capture.output(config <- git2r::config(r))
config_names <- names(modifyList(config$global, config$local))
if (!("user.name" %in% config_names))
msg[["name"]] <- "* user.name config option not set"
if (!("user.email" %in% config_names))
msg[["user"]] <- "* user.email config option not set"
if (!file.exists("~/.ssh/id_rsa"))
msg[["ssh"]] <- "* SSH private key not found"
if (identical(Sys.getenv("GITHUB_PAT"), ""))
msg[["PAT"]] <- paste("* GITHUB_PAT environment variable not set",
"(this is not critical unless you want to install private repos)")
doctor("github", msg)
}
# Doctor class ------------------------------------------------------------
doctor <- function(name, messages) {
structure(
length(messages) == 0,
doctor = paste0("DR_", toupper(name)),
messages = messages,
class = "doctor"
)
}
#' @export
print.doctor <- function(x, ...) {
if (x) {
message(attr(x, "doctor"), " SAYS YOU LOOK HEALTHY")
return()
}
warning(attr(x, "doctor"), " FOUND PROBLEMS", call. = FALSE, immediate. = TRUE)
messages <- strwrap(attr(x, "messages"), exdent = 2)
message(paste(messages, collapse = "\n"))
}
| /R/doctor.R | no_license | BogdanTarus/devtools | R | false | false | 2,965 | r | # Supress R CMD check note
#' @importFrom memoise memoise
#' @importFrom rversions r_release
NULL
rstudio_release <- memoise::memoise(function() {
url <- "http://s3.amazonaws.com/rstudio-server/current.ver"
numeric_version(readLines(url, warn = FALSE))
})
r_release <- memoise::memoise(function() {
R_system_version(rversions::r_release()$version)
})
#' Diagnose potential devtools issues
#'
#' This checks to make sure you're using the latest release of R,
#' the released version of RStudio (if you're using it as your gui),
#' and the latest version of devtools and its dependencies.
#'
#' @family doctors
#' @export
#' @examples
#' \donttest{
#' dr_devtools()
#' }
dr_devtools <- function() {
msg <- character()
if (getRversion() < r_release()) {
msg[["R"]] <- paste0(
"* R is out of date (", getRversion(), " vs ", r_release(), ")"
)
}
deps <- package_deps("devtools", dependencies = NA)
old <- deps$diff < 0
if (any(old)) {
msg[["devtools"]] <- paste0(
"* Devtools or dependencies out of date: \n",
paste(deps$package[old], collapse = ", ")
)
}
if (rstudioapi::isAvailable()) {
rel <- rstudio_release()
cur <- rstudioapi::getVersion()
if (cur < rel) {
msg[["rstudio"]] <- paste0(
"* RStudio is out of date (", cur, " vs ", rel, ")"
)
}
}
doctor("devtools", msg)
}
#' Diagnose potential GitHub issues
#'
#' @param path Path to repository to check. Defaults to current working
#' directory
#' @family doctors
#' @export
#' @examples
#' \donttest{
#' dr_github()
#' }
dr_github <- function(path = ".") {
if (!uses_git(path)) {
return(doctor("github", "Current path is not a git repository"))
}
msg <- character()
r <- git2r::repository(path, discover = TRUE)
capture.output(config <- git2r::config(r))
config_names <- names(modifyList(config$global, config$local))
if (!("user.name" %in% config_names))
msg[["name"]] <- "* user.name config option not set"
if (!("user.email" %in% config_names))
msg[["user"]] <- "* user.email config option not set"
if (!file.exists("~/.ssh/id_rsa"))
msg[["ssh"]] <- "* SSH private key not found"
if (identical(Sys.getenv("GITHUB_PAT"), ""))
msg[["PAT"]] <- paste("* GITHUB_PAT environment variable not set",
"(this is not critical unless you want to install private repos)")
doctor("github", msg)
}
# Doctor class ------------------------------------------------------------
doctor <- function(name, messages) {
structure(
length(messages) == 0,
doctor = paste0("DR_", toupper(name)),
messages = messages,
class = "doctor"
)
}
#' @export
print.doctor <- function(x, ...) {
if (x) {
message(attr(x, "doctor"), " SAYS YOU LOOK HEALTHY")
return()
}
warning(attr(x, "doctor"), " FOUND PROBLEMS", call. = FALSE, immediate. = TRUE)
messages <- strwrap(attr(x, "messages"), exdent = 2)
message(paste(messages, collapse = "\n"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImaginR.R
\name{OutPutResult}
\alias{OutPutResult}
\title{Get phenotype, HEX and HSV color code for all pictures}
\usage{
OutPutResult(id)
}
\arguments{
\item{id}{The name of the pictures in your working directory}
}
\value{
The HEX and HSV color code and the color phenotype of the pearl oyster's inner shell for all images in a results.csv file
}
\description{
Get results in a .txt file, .csv file and in R data.frame
This function does what all the others functions do in a very simple way. Just put your images in your working directory (don't forget to getwd() !), do library this package and paste this only code: "OutPutResult()". You will get the results into your consol and in a results.csv file in your working directory.
}
\details{
In results.csv:
\itemize{
\item{id : the name of your pictures}
\item{h : the hue of the hsv color code}
\item{s : the saturation of the hsv color code}
\item{v : the value of the hsv color code}
\item{hex : the hexadecimal color code}
\item{phenotype : returns the color phenotype of the pearl oyster's inner shell (\emph{Pinctada margaritifera})}
}
}
| /man/OutPutResult.Rd | no_license | PLStenger/ImaginR | R | false | true | 1,183 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ImaginR.R
\name{OutPutResult}
\alias{OutPutResult}
\title{Get phenotype, HEX and HSV color code for all pictures}
\usage{
OutPutResult(id)
}
\arguments{
\item{id}{The name of the pictures in your working directory}
}
\value{
The HEX and HSV color code and the color phenotype of the pearl oyster's inner shell for all images in a results.csv file
}
\description{
Get results in a .txt file, .csv file and in R data.frame
This function does what all the others functions do in a very simple way. Just put your images in your working directory (don't forget to getwd() !), do library this package and paste this only code: "OutPutResult()". You will get the results into your consol and in a results.csv file in your working directory.
}
\details{
In results.csv:
\itemize{
\item{id : the name of your pictures}
\item{h : the hue of the hsv color code}
\item{s : the saturation of the hsv color code}
\item{v : the value of the hsv color code}
\item{hex : the hexadecimal color code}
\item{phenotype : returns the color phenotype of the pearl oyster's inner shell (\emph{Pinctada margaritifera})}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grass7_v_kernel_vector.R
\name{grass7_v_kernel_vector}
\alias{grass7_v_kernel_vector}
\title{QGIS algorithm v.kernel.vector}
\usage{
grass7_v_kernel_vector(
input = qgisprocess::qgis_default_value(),
net = qgisprocess::qgis_default_value(),
radius = qgisprocess::qgis_default_value(),
dsize = qgisprocess::qgis_default_value(),
segmax = qgisprocess::qgis_default_value(),
distmax = qgisprocess::qgis_default_value(),
multiplier = qgisprocess::qgis_default_value(),
node = qgisprocess::qgis_default_value(),
kernel = qgisprocess::qgis_default_value(),
.o = qgisprocess::qgis_default_value(),
.n = qgisprocess::qgis_default_value(),
.m = qgisprocess::qgis_default_value(),
output = qgisprocess::qgis_default_value(),
GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_SNAP_TOLERANCE_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_MIN_AREA_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_OUTPUT_TYPE_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_VECTOR_DSCO = qgisprocess::qgis_default_value(),
GRASS_VECTOR_LCO = qgisprocess::qgis_default_value(),
GRASS_VECTOR_EXPORT_NOCAT = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{input}{\code{source} - Name of input vector map with training points. Path to a vector layer.}
\item{net}{\code{source} - Name of input network vector map. Path to a vector layer.}
\item{radius}{\code{number} - Kernel radius in map units. A numeric value.}
\item{dsize}{\code{number} - Discretization error in map units. A numeric value.}
\item{segmax}{\code{number} - Maximum length of segment on network. A numeric value.}
\item{distmax}{\code{number} - Maximum distance from point to network. A numeric value.}
\item{multiplier}{\code{number} - Multiply the density result by this number. A numeric value.}
\item{node}{\code{enum} of \verb{("none", "split")} - Node method. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{kernel}{\code{enum} of \verb{("uniform", "triangular", "epanechnikov", "quartic", "triweight", "gaussian", "cosine")} - Kernel function. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{.o}{\code{boolean} - Try to calculate an optimal radius with given 'radius' taken as maximum (experimental). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -o.}
\item{.n}{\code{boolean} - Normalize values by sum of density multiplied by length of each segment.. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -n.}
\item{.m}{\code{boolean} - Multiply the result by number of input points. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -m.}
\item{output}{\code{vectorDestination} - Kernel. Path for new vector layer.}
\item{GRASS_REGION_PARAMETER}{\code{extent} - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..}
\item{GRASS_SNAP_TOLERANCE_PARAMETER}{\code{number} - v.in.ogr snap tolerance (-1 = no snap). A numeric value.}
\item{GRASS_MIN_AREA_PARAMETER}{\code{number} - v.in.ogr min area. A numeric value.}
\item{GRASS_OUTPUT_TYPE_PARAMETER}{\code{enum} of \verb{("auto", "point", "line", "area")} - v.out.ogr output type. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{GRASS_VECTOR_DSCO}{\code{string} - v.out.ogr output data source options (dsco). String value.}
\item{GRASS_VECTOR_LCO}{\code{string} - v.out.ogr output layer options (lco). String value.}
\item{GRASS_VECTOR_EXPORT_NOCAT}{\code{boolean} - Also export features without category (not labeled). Otherwise only features with category are exported. 1 for true/yes. 0 for false/no.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by GRASS v.kernel.vector (grass7:v.kernel.vector)
}
\details{
\subsection{Outputs description}{
\itemize{
\item output - outputVector - Kernel
}
}
}
| /man/grass7_v_kernel_vector.Rd | permissive | VB6Hobbyst7/r_package_qgis | R | false | true | 4,388 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grass7_v_kernel_vector.R
\name{grass7_v_kernel_vector}
\alias{grass7_v_kernel_vector}
\title{QGIS algorithm v.kernel.vector}
\usage{
grass7_v_kernel_vector(
input = qgisprocess::qgis_default_value(),
net = qgisprocess::qgis_default_value(),
radius = qgisprocess::qgis_default_value(),
dsize = qgisprocess::qgis_default_value(),
segmax = qgisprocess::qgis_default_value(),
distmax = qgisprocess::qgis_default_value(),
multiplier = qgisprocess::qgis_default_value(),
node = qgisprocess::qgis_default_value(),
kernel = qgisprocess::qgis_default_value(),
.o = qgisprocess::qgis_default_value(),
.n = qgisprocess::qgis_default_value(),
.m = qgisprocess::qgis_default_value(),
output = qgisprocess::qgis_default_value(),
GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_SNAP_TOLERANCE_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_MIN_AREA_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_OUTPUT_TYPE_PARAMETER = qgisprocess::qgis_default_value(),
GRASS_VECTOR_DSCO = qgisprocess::qgis_default_value(),
GRASS_VECTOR_LCO = qgisprocess::qgis_default_value(),
GRASS_VECTOR_EXPORT_NOCAT = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{input}{\code{source} - Name of input vector map with training points. Path to a vector layer.}
\item{net}{\code{source} - Name of input network vector map. Path to a vector layer.}
\item{radius}{\code{number} - Kernel radius in map units. A numeric value.}
\item{dsize}{\code{number} - Discretization error in map units. A numeric value.}
\item{segmax}{\code{number} - Maximum length of segment on network. A numeric value.}
\item{distmax}{\code{number} - Maximum distance from point to network. A numeric value.}
\item{multiplier}{\code{number} - Multiply the density result by this number. A numeric value.}
\item{node}{\code{enum} of \verb{("none", "split")} - Node method. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{kernel}{\code{enum} of \verb{("uniform", "triangular", "epanechnikov", "quartic", "triweight", "gaussian", "cosine")} - Kernel function. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{.o}{\code{boolean} - Try to calculate an optimal radius with given 'radius' taken as maximum (experimental). 1 for true/yes. 0 for false/no. Original algorithm parameter name: -o.}
\item{.n}{\code{boolean} - Normalize values by sum of density multiplied by length of each segment.. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -n.}
\item{.m}{\code{boolean} - Multiply the result by number of input points. 1 for true/yes. 0 for false/no. Original algorithm parameter name: -m.}
\item{output}{\code{vectorDestination} - Kernel. Path for new vector layer.}
\item{GRASS_REGION_PARAMETER}{\code{extent} - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..}
\item{GRASS_SNAP_TOLERANCE_PARAMETER}{\code{number} - v.in.ogr snap tolerance (-1 = no snap). A numeric value.}
\item{GRASS_MIN_AREA_PARAMETER}{\code{number} - v.in.ogr min area. A numeric value.}
\item{GRASS_OUTPUT_TYPE_PARAMETER}{\code{enum} of \verb{("auto", "point", "line", "area")} - v.out.ogr output type. Number of selected option, e.g. '1'. Comma separated list of options, e.g. '1,3'.}
\item{GRASS_VECTOR_DSCO}{\code{string} - v.out.ogr output data source options (dsco). String value.}
\item{GRASS_VECTOR_LCO}{\code{string} - v.out.ogr output layer options (lco). String value.}
\item{GRASS_VECTOR_EXPORT_NOCAT}{\code{boolean} - Also export features without category (not labeled). Otherwise only features with category are exported. 1 for true/yes. 0 for false/no.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by GRASS v.kernel.vector (grass7:v.kernel.vector)
}
\details{
\subsection{Outputs description}{
\itemize{
\item output - outputVector - Kernel
}
}
}
|
\name{pelicanKnitr-package}
\alias{pelicanKnitr-package}
\alias{pelicanKnitr}
\docType{package}
\title{
\packageTitle{pelicanKnitr}
}
\description{
\packageDescription{pelicanKnitr}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{pelicanKnitr}
\packageIndices{pelicanKnitr}
~~ An overview of how to use the package, including the most ~~
~~ important functions ~~
}
\author{
\packageAuthor{pelicanKnitr}
Maintainer: \packageMaintainer{pelicanKnitr}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file ~~
~~ KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /man/pelicanKnitr-package.Rd | no_license | tillbe/pelicanKnitr | R | false | false | 844 | rd | \name{pelicanKnitr-package}
\alias{pelicanKnitr-package}
\alias{pelicanKnitr}
\docType{package}
\title{
\packageTitle{pelicanKnitr}
}
\description{
\packageDescription{pelicanKnitr}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{pelicanKnitr}
\packageIndices{pelicanKnitr}
~~ An overview of how to use the package, including the most ~~
~~ important functions ~~
}
\author{
\packageAuthor{pelicanKnitr}
Maintainer: \packageMaintainer{pelicanKnitr}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file ~~
~~ KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
cat = "/home/rsqsim-00/pub/longCats/UCERF3base35kyrs/eqs.UCERF3base.out"
#eqs = readEqs(cat)
K = 30
iter = 20
eMin = 7.7
eMax = 9
use = which(eqs$M <= eMax & eqs$M >= eMin & eqs$t0yr > 5000)
eX = eqs$t0yr[use]
eY = eqs$M[use]
X = matrix( c(eX, eY), nrow=length(eX), ncol=2, byrow = FALSE)
###################### FUNCTION
findCt = function(X, centroids){
idx = vector( mode = "numeric", length = nrow(X) )
for ( i in 1:nrow(X) ){
min = 999999999999
index = -1
for ( j in 1:nrow(centroids) ) {
temp = 0
for ( k in 1:ncol(X) ) {
temp = temp + ( X[i,k] - centroids[j,k] )^2;
}
if (temp < min){
min = temp
index = j
}
}
idx[i] = index
}
return (idx)
}
computeCt = function(X, idx, K){
m = nrow(X)
n = ncol(X)
centroids = matrix( nrow = K, ncol = n)
for (i in 1:K) {
mySum = 0
myCount = 0
for (j in 1:m) {
if( i == idx[j] ){
mySum = mySum + X[j,]
myCount = myCount + 1
}
}
centroids[i, ] = mySum/myCount
}
return (centroids)
}
###################### START
print("***X")
print(X)
print("***initial centroids")
centroids = X[sample(nrow(X),size=K,replace=FALSE),]
idx = vector( mode = "numeric", length = nrow(X) )
print(centroids)
print("***running kmeans")
for(i in 1:iter){
print(i)
idx = findCt(X, centroids)
centroids = computeCt(X, idx, K)
}
print("***results")
print(idx)
print(centroids)
x11()
plot(eX, eY, col=idx, type = "p", xlab = "Time (yrs)", ylab = "Magnitude", ylim=c(eMin, eMax), main = "lol")
###################### END
print("K: ")
print(K)
temp <- readLines("stdin", 1)
| /cluster.R | no_license | zhenyufu/RSQSim-Rscript | R | false | false | 1,809 | r | cat = "/home/rsqsim-00/pub/longCats/UCERF3base35kyrs/eqs.UCERF3base.out"
#eqs = readEqs(cat)
K = 30
iter = 20
eMin = 7.7
eMax = 9
use = which(eqs$M <= eMax & eqs$M >= eMin & eqs$t0yr > 5000)
eX = eqs$t0yr[use]
eY = eqs$M[use]
X = matrix( c(eX, eY), nrow=length(eX), ncol=2, byrow = FALSE)
###################### FUNCTION
findCt = function(X, centroids){
idx = vector( mode = "numeric", length = nrow(X) )
for ( i in 1:nrow(X) ){
min = 999999999999
index = -1
for ( j in 1:nrow(centroids) ) {
temp = 0
for ( k in 1:ncol(X) ) {
temp = temp + ( X[i,k] - centroids[j,k] )^2;
}
if (temp < min){
min = temp
index = j
}
}
idx[i] = index
}
return (idx)
}
computeCt = function(X, idx, K){
m = nrow(X)
n = ncol(X)
centroids = matrix( nrow = K, ncol = n)
for (i in 1:K) {
mySum = 0
myCount = 0
for (j in 1:m) {
if( i == idx[j] ){
mySum = mySum + X[j,]
myCount = myCount + 1
}
}
centroids[i, ] = mySum/myCount
}
return (centroids)
}
###################### START
print("***X")
print(X)
print("***initial centroids")
centroids = X[sample(nrow(X),size=K,replace=FALSE),]
idx = vector( mode = "numeric", length = nrow(X) )
print(centroids)
print("***running kmeans")
for(i in 1:iter){
print(i)
idx = findCt(X, centroids)
centroids = computeCt(X, idx, K)
}
print("***results")
print(idx)
print(centroids)
x11()
plot(eX, eY, col=idx, type = "p", xlab = "Time (yrs)", ylab = "Magnitude", ylim=c(eMin, eMax), main = "lol")
###################### END
print("K: ")
print(K)
temp <- readLines("stdin", 1)
|
##########################################################
## ###
## Linear regression of Inb against b2 ###
## ###
###########################################################
rm(list=ls())
load("badgerSexInb.RData")
set.seed(seeds[12])
## set up plot output file
pdf("outputs/SexInfectionInbreed_CAT_RJMCMC.pdf")
code <- nimbleCode({
## survival components for dead badgers
for (i in 1:nind) {
## likelihood for interval-truncated gompertz
censored[i] ~ dinterval(tD[i], cint[i, ])
tD[i] ~ dsilerNim(a1, a2, b1, b2 * b2mult[i], c1)
log(b2mult[i]) <- beta[1] * inbrCAT[i] * z[1] +
beta[2] * infection[i] * z[2] +
beta[3] * sex[i] * z[3] +
beta[4] * sex[i] * inbrCAT[i] * z[4] +
beta[5] * infection[i] * inbrCAT[i] * z[5]
## sampling component
pd[i] <- exp(y[i] * log(mean.p) + (min(floor(tD[i]), tM[i]) - y[i]) * log(1 - mean.p))
dind[i] ~ dbern(pd[i])
}
for (j in 1:5){
beta[j] ~ dnorm(0, sd = 1)
z[j] ~ dbern(0.5)
}
a1 ~ dexp(1)
a2 ~ dexp(1)
b1 ~ dexp(1)
b2 ~ dexp(1)
c1 ~ dexp(1)
mean.p ~ dunif(0, 1)
# constraint_data ~ dconstraint(sum(z[1:2]) <= 1)
})
## set up data
consts <- list(nind = nind, tM = tM, sex = sex, infection = infection, inbrCAT = inbrCAT)
data <- list(y = y, cint = cint,
censored = censored, tD = tD, dind = dind)
## find overdispersed initial values
tinitFn <- function(cint, censored) {
apply(cbind(cint, censored), 1, function(x) {
if(x[3] == 2) {
y <- x[2] + rexp(1, 1)
} else {
y <- runif(1, x[1], x[2])
}
y
})
}
initFn <- function(cint, censored) {
## get ML estimates as initial values
optFn <- function(pars, t) {
if(any(pars[c(1:5)] < 0)) {
return(NA)
}
sum(dSiler(t, a1 = pars[1], a2 = pars[2], b1 = pars[3], b2 = pars[4] * exp(pars[6]), c1 = pars[5], log = TRUE))
}
pars <- list(convergence = 1)
k <- 0
while(pars$convergence != 0 & k < 50) {
## sample missing values
tD <- tinitFn(cint, censored)
## optimise to interval-censored only
pars <- optim(c(rexp(5, 10), rnorm(1, 0, 1)), optFn, t = tD, control = list(fnscale = -1))
k <- k + 1
}
if(k == 50) {
stop("Can't sample initial values")
}
pars <- pars$par
## output initial values
list(
tD = tD,
a1 = pars[1],
a2 = pars[2],
b1 = pars[3],
b2 = pars[4],
c1 = pars[5],
mean.p = runif(1, 0, 1),
beta = rep(pars[6], times = 5),
z = rep(0, times = 5)
)
}
## build the model
model <- nimbleModel(code, constants = consts,
data = data,
inits = initFn(cint, censored))
## configure model
config <- configureMCMC(model)
config$removeSamplers(c("a1", "a2", "b1", "b2", "c1"))
config$addSampler(target = c("a1", "a2", "b1", "b2", "c1"), type = 'AF_slice', control = 50)
config$addSampler(target = c("a1", "b1", "c1"), type = 'AF_slice', control = 20)
config$addSampler(target = c("b2"), type = 'slice')
## Add reversible jump
configureRJ(conf = config, ## model configuration
targetNodes = c("beta"), ## coefficients for selection
indicatorNodes = c("z"), ## indicators paired with coefficients
control = list(mean = 0.5, scale = .5))
config$addMonitors("beta", "z")
config$printSamplers(c("beta", "z", "a1", "a2", "b1", "b2", "c1", "mean.p"))
rIndicatorMCMC <- buildMCMC(config)
cIndicatorModel <- compileNimble(model)
cIndicatorMCMC <- compileNimble(rIndicatorMCMC, project = model)
system.time(run <- runMCMC(cIndicatorMCMC,
niter = 70000,
nburnin = 10000,
nchains = 2,
progressBar = TRUE,
summary = TRUE,
samplesAsCodaMCMC = TRUE,
thin = 1))
MCMCsummary(run$samples)
plot(run$samples)
#MCMCtrace(run$samples)
samples <- as.matrix(run$samples)
samples <- samples[sample.int(nrow(samples), ceiling(nrow(samples) * 0.1)), ]
samples %>%
as.data.frame() %>%
ggpairs()
dev.off()
#MCMCplot(run$samples)
#MCMCtrace(run$samples, pdf = F)
## save MCMC
saveRDS(samples, "outputs/SexInfectionInbreed_CAT_Samples_RJMCMC.rds")
#samples <- readRDS("outputsRJ/samples_RJMCMC_SexInfectionFULLMODEL.rds")
## Marginal probabilities of inclusion for each variable
zNames <- model$expandNodeNames('z')
zCols <- which(colnames(samples) %in% zNames)
binary <- as.data.table((samples[, zCols] != 0) + 0)
res <- binary[ , .N, by=names(binary)]
res <- res[order(N, decreasing = T)]
res <- res[, prob := N/dim(samples)[1]]
res
saveRDS(res, "outputs/SexInfectionInbreed_CAT_PosteriorModelProbs_RJMCMC.rds"
| /ModelFitting/SexInfectionInbreedCAT_RJMCMC.R | no_license | davehudson67/Inbreeding | R | false | false | 4,872 | r | ##########################################################
## ###
## Linear regression of Inb against b2 ###
## ###
###########################################################
rm(list=ls())
load("badgerSexInb.RData")
set.seed(seeds[12])
## set up plot output file
pdf("outputs/SexInfectionInbreed_CAT_RJMCMC.pdf")
code <- nimbleCode({
## survival components for dead badgers
for (i in 1:nind) {
## likelihood for interval-truncated gompertz
censored[i] ~ dinterval(tD[i], cint[i, ])
tD[i] ~ dsilerNim(a1, a2, b1, b2 * b2mult[i], c1)
log(b2mult[i]) <- beta[1] * inbrCAT[i] * z[1] +
beta[2] * infection[i] * z[2] +
beta[3] * sex[i] * z[3] +
beta[4] * sex[i] * inbrCAT[i] * z[4] +
beta[5] * infection[i] * inbrCAT[i] * z[5]
## sampling component
pd[i] <- exp(y[i] * log(mean.p) + (min(floor(tD[i]), tM[i]) - y[i]) * log(1 - mean.p))
dind[i] ~ dbern(pd[i])
}
for (j in 1:5){
beta[j] ~ dnorm(0, sd = 1)
z[j] ~ dbern(0.5)
}
a1 ~ dexp(1)
a2 ~ dexp(1)
b1 ~ dexp(1)
b2 ~ dexp(1)
c1 ~ dexp(1)
mean.p ~ dunif(0, 1)
# constraint_data ~ dconstraint(sum(z[1:2]) <= 1)
})
## set up data
consts <- list(nind = nind, tM = tM, sex = sex, infection = infection, inbrCAT = inbrCAT)
data <- list(y = y, cint = cint,
censored = censored, tD = tD, dind = dind)
## find overdispersed initial values
tinitFn <- function(cint, censored) {
apply(cbind(cint, censored), 1, function(x) {
if(x[3] == 2) {
y <- x[2] + rexp(1, 1)
} else {
y <- runif(1, x[1], x[2])
}
y
})
}
initFn <- function(cint, censored) {
## get ML estimates as initial values
optFn <- function(pars, t) {
if(any(pars[c(1:5)] < 0)) {
return(NA)
}
sum(dSiler(t, a1 = pars[1], a2 = pars[2], b1 = pars[3], b2 = pars[4] * exp(pars[6]), c1 = pars[5], log = TRUE))
}
pars <- list(convergence = 1)
k <- 0
while(pars$convergence != 0 & k < 50) {
## sample missing values
tD <- tinitFn(cint, censored)
## optimise to interval-censored only
pars <- optim(c(rexp(5, 10), rnorm(1, 0, 1)), optFn, t = tD, control = list(fnscale = -1))
k <- k + 1
}
if(k == 50) {
stop("Can't sample initial values")
}
pars <- pars$par
## output initial values
list(
tD = tD,
a1 = pars[1],
a2 = pars[2],
b1 = pars[3],
b2 = pars[4],
c1 = pars[5],
mean.p = runif(1, 0, 1),
beta = rep(pars[6], times = 5),
z = rep(0, times = 5)
)
}
## build the model
model <- nimbleModel(code, constants = consts,
data = data,
inits = initFn(cint, censored))
## configure model
config <- configureMCMC(model)
config$removeSamplers(c("a1", "a2", "b1", "b2", "c1"))
config$addSampler(target = c("a1", "a2", "b1", "b2", "c1"), type = 'AF_slice', control = 50)
config$addSampler(target = c("a1", "b1", "c1"), type = 'AF_slice', control = 20)
config$addSampler(target = c("b2"), type = 'slice')
## Add reversible jump
configureRJ(conf = config, ## model configuration
targetNodes = c("beta"), ## coefficients for selection
indicatorNodes = c("z"), ## indicators paired with coefficients
control = list(mean = 0.5, scale = .5))
config$addMonitors("beta", "z")
config$printSamplers(c("beta", "z", "a1", "a2", "b1", "b2", "c1", "mean.p"))
rIndicatorMCMC <- buildMCMC(config)
cIndicatorModel <- compileNimble(model)
cIndicatorMCMC <- compileNimble(rIndicatorMCMC, project = model)
system.time(run <- runMCMC(cIndicatorMCMC,
niter = 70000,
nburnin = 10000,
nchains = 2,
progressBar = TRUE,
summary = TRUE,
samplesAsCodaMCMC = TRUE,
thin = 1))
MCMCsummary(run$samples)
plot(run$samples)
#MCMCtrace(run$samples)
samples <- as.matrix(run$samples)
samples <- samples[sample.int(nrow(samples), ceiling(nrow(samples) * 0.1)), ]
samples %>%
as.data.frame() %>%
ggpairs()
dev.off()
#MCMCplot(run$samples)
#MCMCtrace(run$samples, pdf = F)
## save MCMC
saveRDS(samples, "outputs/SexInfectionInbreed_CAT_Samples_RJMCMC.rds")
#samples <- readRDS("outputsRJ/samples_RJMCMC_SexInfectionFULLMODEL.rds")
## Marginal probabilities of inclusion for each variable
zNames <- model$expandNodeNames('z')
zCols <- which(colnames(samples) %in% zNames)
binary <- as.data.table((samples[, zCols] != 0) + 0)
res <- binary[ , .N, by=names(binary)]
res <- res[order(N, decreasing = T)]
res <- res[, prob := N/dim(samples)[1]]
res
saveRDS(res, "outputs/SexInfectionInbreed_CAT_PosteriorModelProbs_RJMCMC.rds"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph.R
\name{matrix2graph}
\alias{matrix2graph}
\title{Convert a matrix or column-sparse matrix to a list of edges and nodes for
use by \code{\link{graphjs}}.}
\usage{
matrix2graph(M)
}
\arguments{
\item{M}{either a matrix or any of the possible column sparse matrix objects from the \link{Matrix} package.}
}
\value{
A list with node and edges data frame entries used by \code{\link{graphjs}}.
}
\description{
Convert a matrix or column-sparse matrix to a list of edges and nodes for
use by \code{\link{graphjs}}.
}
\note{
Numeric graphs are assumed to be weighted and the edge "size" values are set to the corresponding matrix entries.
}
\examples{
data(LeMis)
M <- graph2Matrix(LeMis$edges, LeMis$nodes)
G <- matrix2graph(M)
}
\seealso{
\code{\link{graphjs}}, \code{\link{graph2Matrix}}
}
| /man/matrix2graph.Rd | permissive | igraph/rthreejs | R | false | true | 872 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph.R
\name{matrix2graph}
\alias{matrix2graph}
\title{Convert a matrix or column-sparse matrix to a list of edges and nodes for
use by \code{\link{graphjs}}.}
\usage{
matrix2graph(M)
}
\arguments{
\item{M}{either a matrix or any of the possible column sparse matrix objects from the \link{Matrix} package.}
}
\value{
A list with node and edges data frame entries used by \code{\link{graphjs}}.
}
\description{
Convert a matrix or column-sparse matrix to a list of edges and nodes for
use by \code{\link{graphjs}}.
}
\note{
Numeric graphs are assumed to be weighted and the edge "size" values are set to the corresponding matrix entries.
}
\examples{
data(LeMis)
M <- graph2Matrix(LeMis$edges, LeMis$nodes)
G <- matrix2graph(M)
}
\seealso{
\code{\link{graphjs}}, \code{\link{graph2Matrix}}
}
|
source("global.R")
score_data <- compute_data()[[4]]
first_month_data <-
plotdata_for_month(
x = 100,
complete_data = score_data,
feature = "avg",
scale = TRUE,
window_size = 5,
threshold = 1,
center = "mean"
)
second_month_data <- plotdata_for_month(
x = 101,
complete_data = score_data,
feature = "avg",
scale = TRUE,
window_size = 5,
threshold = 2,
center = "mean"
)
base_month_data <- first_month_data[, c(1, 16, 17)]
colnames(base_month_data) <-
c("user", "current_xqval", "current_yqval")
previous_month_data <- second_month_data[, c(1, 16, 17)]
colnames(previous_month_data) <-
c("user", "previous_xqval", "previous_yqval")
common_rows <-
merge(base_month_data, previous_month_data, by =
"user")
common_rows_table <- data.table(common_rows)
common_rows_agg <-
as.data.frame(common_rows_table[, list(count = length(user)), by = list(current_xqval,
current_yqval,
previous_xqval,
previous_yqval)])
common_rows$x_movement <-
common_rows$left_xqval - common_rows$right_xqval
common_rows$y_movement <-
common_rows$left_yqval - common_rows$right_yqval
common_rows_table <- data.table(common_rows)
quiver_plot_data <-
common_rows_agg[(common_rows_agg$left_xqval == -1 &
common_rows_agg$left_yqval == -1),]
ratio <- .25 / (256 / 5)
ggplot(data = source_destination_frame,
aes(x = previous_xqval , y = previous_yqval )) +
geom_segment(aes(xend = x_destination , yend = y_destination),
arrow = arrow(length = unit(0.3, "cm"))) +
xlim(c(0.5, 4.5)) +
ylim(c(0.5, 4.5)) +theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
get_mean_movement <-
function(x,
complete_data,
feature,
window_size,
threshold) {
wd_1 <-
get_window_data(
x - 1,
complete_data = complete_data,
feature = feature,
window_size = window_size,
threshold = threshold
)
wd <-
get_window_data(
x,
complete_data = complete_data,
feature = feature,
window_size = window_size,
threshold = threshold
)
xstart <- mean(wd_1$fem_avg)
ystart <- mean(wd_1$mr_avg)
xend <- mean(wd$fem_avg)
yend <- mean(wd$mr_avg)
current_movement <- c(xstart, xend, ystart, yend)
names(current_movement) <- c("xstart", "xend", "ystart", "yend")
return (current_movement)
}
all_movements <-
lapply(
seq(min(score_data$month) + 10, max(score_data$month)),
get_mean_movement,
feature = "score",
complete_data = score_data,
window_size = 10,
threshold = 2
)
plot_movements <- as.data.frame(do.call(rbind, all_movements))
vector_data <- plot_movements[complete.cases(plot_movements), ]
point_data_1 <- vector_data[1, c(1, 3)]
colnames(point_data_1) <- c("xend", "yend")
point_data_1$type = "start"
point_data_2 <- vector_data[c(2:nrow(vector_data) - 1), c(2, 4)]
point_data_2$type = "path"
point_data_3 <- vector_data[nrow(vector_data), c("xend", "yend")]
point_data_3$type = "end"
point_data <- rbind(point_data_1, point_data_2, point_data_3)
ggplot(data = vector_data,
aes(x = xstart , y = ystart)) +
geom_segment(aes(xend = xend , yend = yend),
arrow = arrow(length = unit(0.3, "cm"))) +
geom_point(data = point_data,
aes(
x = xend,
y = yend,
col = type,
size = type
)) +
scale_size_manual (values = c(1.2, 0.7, 1.2)) +
scale_color_manual(
values = c(
"end" = "red",
"path" = "blue",
"start" = "green"
),
labels = c("end", "path", "start")
) +
ggtitle(paste("Movement of mean of ",
"avg",
"- Fem v MR ",
sep =
" ")) +
xlab(paste("Mean of Feminsim", "avg", sep = "")) +
ylab(paste("Mean of Mensrights", "avg", sep = ""))
| /vector_field.R | no_license | reladric/reddit-author-analysis | R | false | false | 4,226 | r | source("global.R")
score_data <- compute_data()[[4]]
first_month_data <-
plotdata_for_month(
x = 100,
complete_data = score_data,
feature = "avg",
scale = TRUE,
window_size = 5,
threshold = 1,
center = "mean"
)
second_month_data <- plotdata_for_month(
x = 101,
complete_data = score_data,
feature = "avg",
scale = TRUE,
window_size = 5,
threshold = 2,
center = "mean"
)
base_month_data <- first_month_data[, c(1, 16, 17)]
colnames(base_month_data) <-
c("user", "current_xqval", "current_yqval")
previous_month_data <- second_month_data[, c(1, 16, 17)]
colnames(previous_month_data) <-
c("user", "previous_xqval", "previous_yqval")
common_rows <-
merge(base_month_data, previous_month_data, by =
"user")
common_rows_table <- data.table(common_rows)
common_rows_agg <-
as.data.frame(common_rows_table[, list(count = length(user)), by = list(current_xqval,
current_yqval,
previous_xqval,
previous_yqval)])
common_rows$x_movement <-
common_rows$left_xqval - common_rows$right_xqval
common_rows$y_movement <-
common_rows$left_yqval - common_rows$right_yqval
common_rows_table <- data.table(common_rows)
quiver_plot_data <-
common_rows_agg[(common_rows_agg$left_xqval == -1 &
common_rows_agg$left_yqval == -1),]
ratio <- .25 / (256 / 5)
ggplot(data = source_destination_frame,
aes(x = previous_xqval , y = previous_yqval )) +
geom_segment(aes(xend = x_destination , yend = y_destination),
arrow = arrow(length = unit(0.3, "cm"))) +
xlim(c(0.5, 4.5)) +
ylim(c(0.5, 4.5)) +theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
get_mean_movement <-
function(x,
complete_data,
feature,
window_size,
threshold) {
wd_1 <-
get_window_data(
x - 1,
complete_data = complete_data,
feature = feature,
window_size = window_size,
threshold = threshold
)
wd <-
get_window_data(
x,
complete_data = complete_data,
feature = feature,
window_size = window_size,
threshold = threshold
)
xstart <- mean(wd_1$fem_avg)
ystart <- mean(wd_1$mr_avg)
xend <- mean(wd$fem_avg)
yend <- mean(wd$mr_avg)
current_movement <- c(xstart, xend, ystart, yend)
names(current_movement) <- c("xstart", "xend", "ystart", "yend")
return (current_movement)
}
all_movements <-
lapply(
seq(min(score_data$month) + 10, max(score_data$month)),
get_mean_movement,
feature = "score",
complete_data = score_data,
window_size = 10,
threshold = 2
)
plot_movements <- as.data.frame(do.call(rbind, all_movements))
vector_data <- plot_movements[complete.cases(plot_movements), ]
point_data_1 <- vector_data[1, c(1, 3)]
colnames(point_data_1) <- c("xend", "yend")
point_data_1$type = "start"
point_data_2 <- vector_data[c(2:nrow(vector_data) - 1), c(2, 4)]
point_data_2$type = "path"
point_data_3 <- vector_data[nrow(vector_data), c("xend", "yend")]
point_data_3$type = "end"
point_data <- rbind(point_data_1, point_data_2, point_data_3)
ggplot(data = vector_data,
aes(x = xstart , y = ystart)) +
geom_segment(aes(xend = xend , yend = yend),
arrow = arrow(length = unit(0.3, "cm"))) +
geom_point(data = point_data,
aes(
x = xend,
y = yend,
col = type,
size = type
)) +
scale_size_manual (values = c(1.2, 0.7, 1.2)) +
scale_color_manual(
values = c(
"end" = "red",
"path" = "blue",
"start" = "green"
),
labels = c("end", "path", "start")
) +
ggtitle(paste("Movement of mean of ",
"avg",
"- Fem v MR ",
sep =
" ")) +
xlab(paste("Mean of Feminsim", "avg", sep = "")) +
ylab(paste("Mean of Mensrights", "avg", sep = ""))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelbuildingservice_operations.R
\name{lexmodelbuildingservice_get_bot}
\alias{lexmodelbuildingservice_get_bot}
\title{Returns metadata information for a specific bot}
\usage{
lexmodelbuildingservice_get_bot(name, versionOrAlias)
}
\arguments{
\item{name}{[required] The name of the bot. The name is case sensitive.}
\item{versionOrAlias}{[required] The version or alias of the bot.}
}
\value{
A list with the following syntax:\preformatted{list(
name = "string",
description = "string",
intents = list(
list(
intentName = "string",
intentVersion = "string"
)
),
enableModelImprovements = TRUE|FALSE,
nluIntentConfidenceThreshold = 123.0,
clarificationPrompt = list(
messages = list(
list(
contentType = "PlainText"|"SSML"|"CustomPayload",
content = "string",
groupNumber = 123
)
),
maxAttempts = 123,
responseCard = "string"
),
abortStatement = list(
messages = list(
list(
contentType = "PlainText"|"SSML"|"CustomPayload",
content = "string",
groupNumber = 123
)
),
responseCard = "string"
),
status = "BUILDING"|"READY"|"READY_BASIC_TESTING"|"FAILED"|"NOT_BUILT",
failureReason = "string",
lastUpdatedDate = as.POSIXct(
"2015-01-01"
),
createdDate = as.POSIXct(
"2015-01-01"
),
idleSessionTTLInSeconds = 123,
voiceId = "string",
checksum = "string",
version = "string",
locale = "de-DE"|"en-AU"|"en-GB"|"en-US"|"es-419"|"es-ES"|"es-US"|"fr-FR"|"fr-CA"|"it-IT",
childDirected = TRUE|FALSE,
detectSentiment = TRUE|FALSE
)
}
}
\description{
Returns metadata information for a specific bot. You must provide the
bot name and the bot version or alias.
This operation requires permissions for the \code{lex:GetBot} action.
}
\section{Request syntax}{
\preformatted{svc$get_bot(
name = "string",
versionOrAlias = "string"
)
}
}
\examples{
\dontrun{
# This example shows how to get configuration information for a bot.
svc$get_bot(
name = "DocOrderPizza",
versionOrAlias = "$LATEST"
)
}
}
\keyword{internal}
| /cran/paws.machine.learning/man/lexmodelbuildingservice_get_bot.Rd | permissive | TWarczak/paws | R | false | true | 2,171 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelbuildingservice_operations.R
\name{lexmodelbuildingservice_get_bot}
\alias{lexmodelbuildingservice_get_bot}
\title{Returns metadata information for a specific bot}
\usage{
lexmodelbuildingservice_get_bot(name, versionOrAlias)
}
\arguments{
\item{name}{[required] The name of the bot. The name is case sensitive.}
\item{versionOrAlias}{[required] The version or alias of the bot.}
}
\value{
A list with the following syntax:\preformatted{list(
name = "string",
description = "string",
intents = list(
list(
intentName = "string",
intentVersion = "string"
)
),
enableModelImprovements = TRUE|FALSE,
nluIntentConfidenceThreshold = 123.0,
clarificationPrompt = list(
messages = list(
list(
contentType = "PlainText"|"SSML"|"CustomPayload",
content = "string",
groupNumber = 123
)
),
maxAttempts = 123,
responseCard = "string"
),
abortStatement = list(
messages = list(
list(
contentType = "PlainText"|"SSML"|"CustomPayload",
content = "string",
groupNumber = 123
)
),
responseCard = "string"
),
status = "BUILDING"|"READY"|"READY_BASIC_TESTING"|"FAILED"|"NOT_BUILT",
failureReason = "string",
lastUpdatedDate = as.POSIXct(
"2015-01-01"
),
createdDate = as.POSIXct(
"2015-01-01"
),
idleSessionTTLInSeconds = 123,
voiceId = "string",
checksum = "string",
version = "string",
locale = "de-DE"|"en-AU"|"en-GB"|"en-US"|"es-419"|"es-ES"|"es-US"|"fr-FR"|"fr-CA"|"it-IT",
childDirected = TRUE|FALSE,
detectSentiment = TRUE|FALSE
)
}
}
\description{
Returns metadata information for a specific bot. You must provide the
bot name and the bot version or alias.
This operation requires permissions for the \code{lex:GetBot} action.
}
\section{Request syntax}{
\preformatted{svc$get_bot(
name = "string",
versionOrAlias = "string"
)
}
}
\examples{
\dontrun{
# This example shows how to get configuration information for a bot.
svc$get_bot(
name = "DocOrderPizza",
versionOrAlias = "$LATEST"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{ChannelSnippet}
\alias{ChannelSnippet}
\title{ChannelSnippet Object}
\usage{
ChannelSnippet(country = NULL, customUrl = NULL, defaultLanguage = NULL,
description = NULL, localized = NULL, publishedAt = NULL,
thumbnails = NULL, title = NULL)
}
\arguments{
\item{country}{The country of the channel}
\item{customUrl}{The custom url of the channel}
\item{defaultLanguage}{The language of the channel's default title and description}
\item{description}{The description of the channel}
\item{localized}{Localized title and description, read-only}
\item{publishedAt}{The date and time that the channel was created}
\item{thumbnails}{A map of thumbnail images associated with the channel}
\item{title}{The channel's title}
}
\value{
ChannelSnippet object
}
\description{
ChannelSnippet Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Basic details about a channel, including title, description and thumbnails. Next available id: 15.
}
| /googleyoutubev3.auto/man/ChannelSnippet.Rd | permissive | uwazac/autoGoogleAPI | R | false | true | 1,081 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{ChannelSnippet}
\alias{ChannelSnippet}
\title{ChannelSnippet Object}
\usage{
ChannelSnippet(country = NULL, customUrl = NULL, defaultLanguage = NULL,
description = NULL, localized = NULL, publishedAt = NULL,
thumbnails = NULL, title = NULL)
}
\arguments{
\item{country}{The country of the channel}
\item{customUrl}{The custom url of the channel}
\item{defaultLanguage}{The language of the channel's default title and description}
\item{description}{The description of the channel}
\item{localized}{Localized title and description, read-only}
\item{publishedAt}{The date and time that the channel was created}
\item{thumbnails}{A map of thumbnail images associated with the channel}
\item{title}{The channel's title}
}
\value{
ChannelSnippet object
}
\description{
ChannelSnippet Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Basic details about a channel, including title, description and thumbnails. Next available id: 15.
}
|
# Bipartisan score data visualization
# 03-Gridmap.R
# Author: Huade Huo <hh561@georgetown.edu>
# License: MIT
# Read griddata
griddata <- read.csv("Data/gridmap.csv")
HvSv <- read.csv("Data/HvSv.csv")
# Gradient of n colors
# startColor = "#CEC7FF", endColor = "#290042" -purple
colorPalettefun <- function(startColor = "#CEC7FF", endColor = "#290042", numColor = 51){
colfunc <- colorRampPalette(c(startColor, endColor))
colfunc(numColor)
}
# Merge grid data and users' data
SelectSort <- function(df, state_abb, value){
data <- df[c(state_abb, value)]
names(data) <- c("abb", "datavalue")
data <- merge(griddata, data, by = "abb")
data <- data[order(data$datavalue),]
return(data)
}
# Main function
gridMap <- function(title, Subtitle){
# Assign colors
data_sc$gridColor <- colorPalettefun(numColor = 51)
# Init plot
plot(c(0, 12), c(0,8), type = "n", bty="n",
xaxt='n', yaxt='n', ann=FALSE,asp=1)
title(main = title, sub = Subtitle)
for (i in 1:nrow(data_sc)){
if (!is.na(data_sc$datavalue[i])){
rect(data_sc$xl[i],data_sc$yb[i],data_sc$xr[i],data_sc$yt[i],col=data_sc$gridColor[i], border="white")
text(0.5*(data_sc$xl[i]+data_sc$xr[i]),
0.5*(data_sc$yb[i]+data_sc$yt[i]),
labels = data_sc$abb[i],
col = "white")
}
else {
rect(data_sc$xl[i],data_sc$yb[i],data_sc$xr[i],data_sc$yt[i], border="white")
text(0.5*(data_sc$xl[i]+data_sc$xr[i]),
0.5*(data_sc$yb[i]+data_sc$yt[i]),
labels = data_sc$abb[i],
col = "grey")
}
}
}
# Overall
data_sc <- SelectSort(HvSv, state_abb = "abb", value = "AvgScore")
gridMap(title = "Bipartisan Index, 113th Congress \n(Overall)",
Subtitle = "Darker color indicates higher bipartisan score")
dev.copy(png,'Figures/Grid_map_overall.png')
dev.off()
# House
data_sc <- SelectSort(HvSv, state_abb = "abb", value = "Hv")
gridMap(title = "Bipartisan Index, 113th Congress \n(House)",
Subtitle = "Darker color indicates higher bipartisan score")
dev.copy(png,'Figures/Grid_map_House.png')
dev.off()
# Senate
data_sc <- SelectSort(HvSv, state_abb = "abb", value = "Sv")
gridMap(title = "Bipartisan Index, 113th Congress \n(Senate)",
Subtitle = "Darker color indicates higher bipartisan score")
dev.copy(png,'Figures/Grid_map_Senate.png')
dev.off() | /03-Gridmap.R | permissive | Huade/BipartisanIndex | R | false | false | 2,361 | r | # Bipartisan score data visualization
# 03-Gridmap.R
# Author: Huade Huo <hh561@georgetown.edu>
# License: MIT
# Read griddata
griddata <- read.csv("Data/gridmap.csv")
HvSv <- read.csv("Data/HvSv.csv")
# Gradient of n colors
# startColor = "#CEC7FF", endColor = "#290042" -purple
colorPalettefun <- function(startColor = "#CEC7FF", endColor = "#290042", numColor = 51){
colfunc <- colorRampPalette(c(startColor, endColor))
colfunc(numColor)
}
# Merge grid data and users' data
SelectSort <- function(df, state_abb, value){
data <- df[c(state_abb, value)]
names(data) <- c("abb", "datavalue")
data <- merge(griddata, data, by = "abb")
data <- data[order(data$datavalue),]
return(data)
}
# Main function
gridMap <- function(title, Subtitle){
# Assign colors
data_sc$gridColor <- colorPalettefun(numColor = 51)
# Init plot
plot(c(0, 12), c(0,8), type = "n", bty="n",
xaxt='n', yaxt='n', ann=FALSE,asp=1)
title(main = title, sub = Subtitle)
for (i in 1:nrow(data_sc)){
if (!is.na(data_sc$datavalue[i])){
rect(data_sc$xl[i],data_sc$yb[i],data_sc$xr[i],data_sc$yt[i],col=data_sc$gridColor[i], border="white")
text(0.5*(data_sc$xl[i]+data_sc$xr[i]),
0.5*(data_sc$yb[i]+data_sc$yt[i]),
labels = data_sc$abb[i],
col = "white")
}
else {
rect(data_sc$xl[i],data_sc$yb[i],data_sc$xr[i],data_sc$yt[i], border="white")
text(0.5*(data_sc$xl[i]+data_sc$xr[i]),
0.5*(data_sc$yb[i]+data_sc$yt[i]),
labels = data_sc$abb[i],
col = "grey")
}
}
}
# Overall
data_sc <- SelectSort(HvSv, state_abb = "abb", value = "AvgScore")
gridMap(title = "Bipartisan Index, 113th Congress \n(Overall)",
Subtitle = "Darker color indicates higher bipartisan score")
dev.copy(png,'Figures/Grid_map_overall.png')
dev.off()
# House
data_sc <- SelectSort(HvSv, state_abb = "abb", value = "Hv")
gridMap(title = "Bipartisan Index, 113th Congress \n(House)",
Subtitle = "Darker color indicates higher bipartisan score")
dev.copy(png,'Figures/Grid_map_House.png')
dev.off()
# Senate
data_sc <- SelectSort(HvSv, state_abb = "abb", value = "Sv")
gridMap(title = "Bipartisan Index, 113th Congress \n(Senate)",
Subtitle = "Darker color indicates higher bipartisan score")
dev.copy(png,'Figures/Grid_map_Senate.png')
dev.off() |
######################
# Stat 540 Project #
######################
# checking for batch effect
setwd("~/Dropbox/STAT 540 Project")
# import quantile, lowess normalized, logged data
gExpDat <- read.delim("~/Dropbox/STAT 540 Project/ProcessedData/Grasso_GeneExpr_AllSamples_log2.txt", row.names = 1)
str(gExpDat, list.len = 6)
gDes <- read.delim("~/Dropbox/STAT 540 Project/MetaData/DesignTable.txt")
str(gDes)
(n <- nrow(gDes)) # 122
sum(is.na(gExpDat)) # 0
table(gDes$RunDate)
# 07-Feb-2007 07-Sep-2005 15-Aug-2006 6-June-2006
# 88 12 12 10
# converting to date class
gDes$Date <- as.Date(gDes$RunDate, "%d-%b-%Y")
table(gDes$Date)
# 2005-09-07 2006-06-06 2006-08-15 2007-02-07
# 12 10 12 88
# converting to factor class
gDes$RunDate <- factor(gDes$RunDate, levels(gDes$RunDate)[c(2, 4, 3, 1)])
# order by date
(mOrd <- with(gDes, order(RunDate, SampleType, TissueType, DiseaseState)))
gDes <- gDes[mOrd,]
gExpDat <- gExpDat[, mOrd]
gDes$dayCode <- sapply(unclass(gDes$RunDate), function(i) {
foo <- rep("-", nlevels(gDes$RunDate) * 2)
foo[(i * 2 - 1):(i * 2)] <- i
paste(foo, collapse = "")
})
gDes$dayCode <- paste(gDes$dayCode, colnames(gExpDat))
library(RColorBrewer)
mCols <- colorRampPalette(rev(brewer.pal(n = 9, "Greys")))
# Heatmap showing batch effect
pdf("HeatmapExaminingBatchEffect.pdf")
heatmap(cor(gExpDat), Rowv = NA, Colv = NA, symm = TRUE, revC = TRUE, labRow = gDes$dayCode,
col = mCols(256), margins = c(10, 10),
RowSideColor = brewer.pal(11, "RdGy")[c(4, 7)][unclass(gDes$SampleType)],
ColSideColor = brewer.pal(11, "RdGy")[c(4, 7)][unclass(gDes$SampleType)])
dev.off()
| /scripts/batch_effect_analysis/03-BatchEffect.R | no_license | woodhaha/cancer-metastasis | R | false | false | 1,717 | r | ######################
# Stat 540 Project #
######################
# checking for batch effect
setwd("~/Dropbox/STAT 540 Project")
# import quantile, lowess normalized, logged data
gExpDat <- read.delim("~/Dropbox/STAT 540 Project/ProcessedData/Grasso_GeneExpr_AllSamples_log2.txt", row.names = 1)
str(gExpDat, list.len = 6)
gDes <- read.delim("~/Dropbox/STAT 540 Project/MetaData/DesignTable.txt")
str(gDes)
(n <- nrow(gDes)) # 122
sum(is.na(gExpDat)) # 0
table(gDes$RunDate)
# 07-Feb-2007 07-Sep-2005 15-Aug-2006 6-June-2006
# 88 12 12 10
# converting to date class
gDes$Date <- as.Date(gDes$RunDate, "%d-%b-%Y")
table(gDes$Date)
# 2005-09-07 2006-06-06 2006-08-15 2007-02-07
# 12 10 12 88
# converting to factor class
gDes$RunDate <- factor(gDes$RunDate, levels(gDes$RunDate)[c(2, 4, 3, 1)])
# order by date
(mOrd <- with(gDes, order(RunDate, SampleType, TissueType, DiseaseState)))
gDes <- gDes[mOrd,]
gExpDat <- gExpDat[, mOrd]
gDes$dayCode <- sapply(unclass(gDes$RunDate), function(i) {
foo <- rep("-", nlevels(gDes$RunDate) * 2)
foo[(i * 2 - 1):(i * 2)] <- i
paste(foo, collapse = "")
})
gDes$dayCode <- paste(gDes$dayCode, colnames(gExpDat))
library(RColorBrewer)
mCols <- colorRampPalette(rev(brewer.pal(n = 9, "Greys")))
# Heatmap showing batch effect
pdf("HeatmapExaminingBatchEffect.pdf")
heatmap(cor(gExpDat), Rowv = NA, Colv = NA, symm = TRUE, revC = TRUE, labRow = gDes$dayCode,
col = mCols(256), margins = c(10, 10),
RowSideColor = brewer.pal(11, "RdGy")[c(4, 7)][unclass(gDes$SampleType)],
ColSideColor = brewer.pal(11, "RdGy")[c(4, 7)][unclass(gDes$SampleType)])
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createCompoundlist.R
\name{CAS2SMILES}
\alias{CAS2SMILES}
\title{Convert CAS to SMILES}
\usage{
CAS2SMILES(CAS_number, name)
}
\arguments{
\item{CAS_number}{character
The CAS registry number of a compound}
\item{name}{character
The compound's name}
}
\value{
The SMILES code of the compound as character-string
}
\description{
This is a wrapper for \code{webchem::cir_query}, using the
CACTUS API at https://cactus.nci.nih.gov/chemical/structure_documentation
for the conversion. Before converting the CAS number, the
name is checked whether it contains the word 'derivative'.
If so, the conversion is stopped and NA is returned.
Also, a warning will be printed in this case.
}
\details{
The API allows only one query per second. This is a hard-
coded feature
}
\examples{
SMILES_ethanol <- CAS2SMILES("64-17-5", "Ethanol")
}
\author{
pstahlhofen
}
| /man/CAS2SMILES.Rd | no_license | MassBank/RMassBank | R | false | true | 929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createCompoundlist.R
\name{CAS2SMILES}
\alias{CAS2SMILES}
\title{Convert CAS to SMILES}
\usage{
CAS2SMILES(CAS_number, name)
}
\arguments{
\item{CAS_number}{character
The CAS registry number of a compound}
\item{name}{character
The compound's name}
}
\value{
The SMILES code of the compound as character-string
}
\description{
This is a wrapper for \code{webchem::cir_query}, using the
CACTUS API at https://cactus.nci.nih.gov/chemical/structure_documentation
for the conversion. Before converting the CAS number, the
name is checked whether it contains the word 'derivative'.
If so, the conversion is stopped and NA is returned.
Also, a warning will be printed in this case.
}
\details{
The API allows only one query per second. This is a hard-
coded feature
}
\examples{
SMILES_ethanol <- CAS2SMILES("64-17-5", "Ethanol")
}
\author{
pstahlhofen
}
|
CNeighbor <- function(Bu, Bv, Alpha, Beta, Delta, Theta){
p <- length(Bu)
if(is.null(dim(Bv)[2])) {Bv <- matrix(Bv, ncol=1)}
colSums(abs(Bu-Bv)/(Beta-Alpha) <= Delta) >= Theta*p
} | /funtimes/R/CNeighbor.R | no_license | svish91/funtimes | R | false | false | 185 | r | CNeighbor <- function(Bu, Bv, Alpha, Beta, Delta, Theta){
p <- length(Bu)
if(is.null(dim(Bv)[2])) {Bv <- matrix(Bv, ncol=1)}
colSums(abs(Bu-Bv)/(Beta-Alpha) <= Delta) >= Theta*p
} |
##' # Pull data from different domains to sua
##'
##' **Author: Cristina Muschitiello** modified by cldb
##' Modifications:
##' -only pulls data for unvalidated countries
##' -pulls data for opening stocks, live animals,
##'
##'
##' **Description:**
##'
##' This module is designed to harvest the data from other tables and pull all
##' relevant FBS data into the SUA/FBS domain. It pulls from the following
##'
##' **Inputs:**
##'
##' * Agriculture Production (production, stock, seed, industrial)
##' * Food (food)
##' * Loss (loss)
##' * feed (feed)
##' * stock (stock)
##' * Trade:
##' in november 2017, for urgent purposes, as it was not possible to validate all the new Trade data
##' it has been decided to use:
##' . Old Trade data up to 2013
##' . New Trade data from 2014 (Trade domain)
##' * Tourist (tourist)
##'
##' **Flag assignment:**
##'
##' | Observation Status Flag | Method Flag|
##' | --- | --- | --- |
## load the library
library(faosws)
library(data.table)
library(faoswsUtil)
library(sendmailR)
library(dplyr)
################################################
##### set environment #####
################################################
R_SWS_SHARE_PATH = Sys.getenv("R_SWS_SHARE_PATH")
if(CheckDebug()){
message("Not on server, so setting up environment...")
library(faoswsModules)
SETT <- ReadSettings("pullDataToSUA_2013/sws.yml")
#SETT<- AddSettings(dir = "setUpSUAUnbalancedTable/pullDataToSUA_2013", filename = "sws.yml", gitignore = T,
# fields = NULL)
R_SWS_SHARE_PATH <- SETT[["share"]]
## Get SWS Parameters
SetClientFiles(dir = SETT[["certdir"]])
GetTestEnvironment(
baseUrl = SETT[["server"]],
token = SETT[["token"]]
)
}
################################################
##### get sua data #####
################################################
yearkey = 2013
geoKey = GetCodeList(domain = "suafbs", dataset = "sua_unbalanced", "geographicAreaM49")[,code]
itemKey = GetCodeList(domain = "suafbs", dataset = "sua_unbalanced", "measuredItemFbsSua")[,code]
elementKey = GetCodeList(domain = "suafbs", dataset = "sua_unbalanced", "measuredElementSuaFbs")[,code]
# sessionKey = swsContext.datasets[[1]]
geoDim = Dimension(name = "geographicAreaM49", keys = geoKey)
eleDim = Dimension(name = "measuredElementSuaFbs", keys = elementKey)
itemDim = Dimension(name = "measuredItemFbsSua", keys = itemKey)
timeDim = Dimension(name = "timePointYears", keys = as.character(yearkey))
suaKey = DatasetKey(domain = "suafbs", dataset = "sua_unbalanced",
dimensions = list(
geographicAreaM49 = geoDim,
measuredElement = eleDim,
measuredItemCPC = itemDim,
timePointYears = timeDim)
)
suaData = GetData(suaKey)
stockvar13<- suaData %>% filter(measuredElementSuaFbs == 5071 & timePointYears==2013 & Value!=0)
stockop13<-stockvar13
stockop13[,"measuredElementSuaFbs"]<-"5113"
stockop13[,"Value"]<-0
################################################
##### Harvest from stockdata #####
################################################
message("Pulling data from Stock domain")
stocksCode = c("5113")
stockEleDim = Dimension(name = "measuredElement",
keys = stocksCode)
stockitemKeys = GetCodeList(domain = "agriculture", dataset = "aproduction",
dimension = "measuredItemCPC")[, code]
itemDim = Dimension(name = "measuredItemCPC", keys = stockitemKeys)
stokKey = DatasetKey(domain = "Stock", dataset = "stocksdata",
dimensions = list(
geographicAreaM49 = geoDim,
measuredElement = stockEleDim,
measuredItemCPC = itemDim,
timePointYears = Dimension(name = "timePointYears", keys = as.character(2014:2016)))
)
stockData = GetData(stokKey)
setnames(stockData, c("measuredElement", "measuredItemCPC"),
c("measuredElementSuaFbs", "measuredItemFbsSua"))
###########
################################################
##### Merging data files together #####
################################################
message("Merging data files together and saving")
out = rbind(stockop13,stockData)
#protected data
#### CRISTINA: after havig discovered that for crops , official food values are Wrong and have to be deleted.
# now we have to delete all the wrong values:
# THE FOLLOWING STEPS HAVE BEEN COMMENTED BECAUSE THEY SHOULD NOT BE NEEDED
# the data might have to be corrected from the questionnaires
#### The previous step has been inserted here and removed from the standardization in order
# to give to the data team the possibility to eventually add some food value for primary commodities
stats = SaveData(domain = "suafbs", dataset = "sua_unbalanced", data = as.data.table(out), waitTimeout = 2000000)
paste0(stats$inserted, " observations written, ",
stats$ignored, " weren't updated, ",
stats$discarded, " had problems.")
################################################################
##### send Email with notification of correct execution #####
################################################################
## Initiate email
from = "sws@fao.org"
to = swsContext.userEmail
subject = "PullDataToSua plug-in has correctly run"
body = "The plug-in has saved the SUAs in your session"
sendmail(from = from, to = to, subject = subject, msg = body)
paste0("Email sent to ", swsContext.userEmail)
| /pullDataToSUA_2013/openStockLine.R | no_license | SWS-Methodology/setUpSUAUnbalancedTable | R | false | false | 5,563 | r | ##' # Pull data from different domains to sua
##'
##' **Author: Cristina Muschitiello** modified by cldb
##' Modifications:
##' -only pulls data for unvalidated countries
##' -pulls data for opening stocks, live animals,
##'
##'
##' **Description:**
##'
##' This module is designed to harvest the data from other tables and pull all
##' relevant FBS data into the SUA/FBS domain. It pulls from the following
##'
##' **Inputs:**
##'
##' * Agriculture Production (production, stock, seed, industrial)
##' * Food (food)
##' * Loss (loss)
##' * feed (feed)
##' * stock (stock)
##' * Trade:
##' in november 2017, for urgent purposes, as it was not possible to validate all the new Trade data
##' it has been decided to use:
##' . Old Trade data up to 2013
##' . New Trade data from 2014 (Trade domain)
##' * Tourist (tourist)
##'
##' **Flag assignment:**
##'
##' | Observation Status Flag | Method Flag|
##' | --- | --- | --- |
## load the library
library(faosws)
library(data.table)
library(faoswsUtil)
library(sendmailR)
library(dplyr)
################################################
##### set environment #####
################################################
R_SWS_SHARE_PATH = Sys.getenv("R_SWS_SHARE_PATH")
if(CheckDebug()){
message("Not on server, so setting up environment...")
library(faoswsModules)
SETT <- ReadSettings("pullDataToSUA_2013/sws.yml")
#SETT<- AddSettings(dir = "setUpSUAUnbalancedTable/pullDataToSUA_2013", filename = "sws.yml", gitignore = T,
# fields = NULL)
R_SWS_SHARE_PATH <- SETT[["share"]]
## Get SWS Parameters
SetClientFiles(dir = SETT[["certdir"]])
GetTestEnvironment(
baseUrl = SETT[["server"]],
token = SETT[["token"]]
)
}
################################################
##### get sua data #####
################################################
yearkey = 2013
geoKey = GetCodeList(domain = "suafbs", dataset = "sua_unbalanced", "geographicAreaM49")[,code]
itemKey = GetCodeList(domain = "suafbs", dataset = "sua_unbalanced", "measuredItemFbsSua")[,code]
elementKey = GetCodeList(domain = "suafbs", dataset = "sua_unbalanced", "measuredElementSuaFbs")[,code]
# sessionKey = swsContext.datasets[[1]]
geoDim = Dimension(name = "geographicAreaM49", keys = geoKey)
eleDim = Dimension(name = "measuredElementSuaFbs", keys = elementKey)
itemDim = Dimension(name = "measuredItemFbsSua", keys = itemKey)
timeDim = Dimension(name = "timePointYears", keys = as.character(yearkey))
suaKey = DatasetKey(domain = "suafbs", dataset = "sua_unbalanced",
dimensions = list(
geographicAreaM49 = geoDim,
measuredElement = eleDim,
measuredItemCPC = itemDim,
timePointYears = timeDim)
)
suaData = GetData(suaKey)
stockvar13<- suaData %>% filter(measuredElementSuaFbs == 5071 & timePointYears==2013 & Value!=0)
stockop13<-stockvar13
stockop13[,"measuredElementSuaFbs"]<-"5113"
stockop13[,"Value"]<-0
################################################
##### Harvest from stockdata #####
################################################
message("Pulling data from Stock domain")
stocksCode = c("5113")
stockEleDim = Dimension(name = "measuredElement",
keys = stocksCode)
stockitemKeys = GetCodeList(domain = "agriculture", dataset = "aproduction",
dimension = "measuredItemCPC")[, code]
itemDim = Dimension(name = "measuredItemCPC", keys = stockitemKeys)
stokKey = DatasetKey(domain = "Stock", dataset = "stocksdata",
dimensions = list(
geographicAreaM49 = geoDim,
measuredElement = stockEleDim,
measuredItemCPC = itemDim,
timePointYears = Dimension(name = "timePointYears", keys = as.character(2014:2016)))
)
stockData = GetData(stokKey)
setnames(stockData, c("measuredElement", "measuredItemCPC"),
c("measuredElementSuaFbs", "measuredItemFbsSua"))
###########
################################################
##### Merging data files together #####
################################################
message("Merging data files together and saving")
out = rbind(stockop13,stockData)
#protected data
#### CRISTINA: after havig discovered that for crops , official food values are Wrong and have to be deleted.
# now we have to delete all the wrong values:
# THE FOLLOWING STEPS HAVE BEEN COMMENTED BECAUSE THEY SHOULD NOT BE NEEDED
# the data might have to be corrected from the questionnaires
#### The previous step has been inserted here and removed from the standardization in order
# to give to the data team the possibility to eventually add some food value for primary commodities
stats = SaveData(domain = "suafbs", dataset = "sua_unbalanced", data = as.data.table(out), waitTimeout = 2000000)
paste0(stats$inserted, " observations written, ",
stats$ignored, " weren't updated, ",
stats$discarded, " had problems.")
################################################################
##### send Email with notification of correct execution #####
################################################################
## Initiate email
from = "sws@fao.org"
to = swsContext.userEmail
subject = "PullDataToSua plug-in has correctly run"
body = "The plug-in has saved the SUAs in your session"
sendmail(from = from, to = to, subject = subject, msg = body)
paste0("Email sent to ", swsContext.userEmail)
|
library(DataVisualizations)
### Name: DualaxisClassplot
### Title: Dualaxis Classplot
### Aliases: DualaxisClassplot
### Keywords: DualaxisClassplot
### ** Examples
##ToDo
| /data/genthat_extracted_code/DataVisualizations/examples/DualaxisClassplot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 179 | r | library(DataVisualizations)
### Name: DualaxisClassplot
### Title: Dualaxis Classplot
### Aliases: DualaxisClassplot
### Keywords: DualaxisClassplot
### ** Examples
##ToDo
|
library(shiny)
# Define UI for application that plots HWE and genotype frequencies
shinyUI(pageWithSidebar(
# Application title
headerPanel("HWE"),
# Sidebar with a slider input for number of observations
sidebarPanel(
sliderInput("AA",
"Genotype frequency of AA",
min = 0,
max = 1,
value = .5),
sliderInput("Aa",
"Genotype frequency of Aa",
min = 0,
max = 1,
value = .2500),
sliderInput("aa",
"Genotype frequency of aa",
min = 0,
max = 1,
value = .2500),
tableOutput("genotypes"),
tableOutput("alleles"),
textOutput("error")
),
# Show a plot of the HWE and the user input population
mainPanel(
plotOutput("hwePlot")
)
)) | /HWE/ui.R | no_license | jnmaloof/HamiltonDemos | R | false | false | 896 | r | library(shiny)
# Define UI for application that plots HWE and genotype frequencies
shinyUI(pageWithSidebar(
# Application title
headerPanel("HWE"),
# Sidebar with a slider input for number of observations
sidebarPanel(
sliderInput("AA",
"Genotype frequency of AA",
min = 0,
max = 1,
value = .5),
sliderInput("Aa",
"Genotype frequency of Aa",
min = 0,
max = 1,
value = .2500),
sliderInput("aa",
"Genotype frequency of aa",
min = 0,
max = 1,
value = .2500),
tableOutput("genotypes"),
tableOutput("alleles"),
textOutput("error")
),
# Show a plot of the HWE and the user input population
mainPanel(
plotOutput("hwePlot")
)
)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insert_missing_lines.R
\name{insert_missing_lines}
\alias{insert_missing_lines}
\title{Returns character vector representing the contents of the pdl_report$content
with the missing second lines of data replaced.}
\usage{
insert_missing_lines(content)
}
\arguments{
\item{content}{character vector representing the contents of the
pdl_report$content with the possible missing second lines of data}
}
\description{
Returns character vector representing the contents of the pdl_report$content
with the missing second lines of data replaced.
}
| /man/insert_missing_lines.Rd | permissive | rmsharp/snprcspf | R | false | true | 618 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insert_missing_lines.R
\name{insert_missing_lines}
\alias{insert_missing_lines}
\title{Returns character vector representing the contents of the pdl_report$content
with the missing second lines of data replaced.}
\usage{
insert_missing_lines(content)
}
\arguments{
\item{content}{character vector representing the contents of the
pdl_report$content with the possible missing second lines of data}
}
\description{
Returns character vector representing the contents of the pdl_report$content
with the missing second lines of data replaced.
}
|
## The 2 following function are to calculate and cache
## the inverse matrix of an input matrix
## The function 'makeCacheMatrix' creates a list of functions
## 1.set the value of the matrix
## 2.get the value of the matrix
## 3.set the value of the inverse and store it in var 'i' in the parent env.
## 4.get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function 'makeCacheMatrix' takes x (result of the 1st function)
## as argument and returns the inverse matrix if it's already cached in 'x'
## otherwise calculates the inverse matrix using 'solve' function
## store/cache it in 'x' & returns it
cacheSolve <- function(x, ...) {
## Retrieve the inverse matrix if already cached
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
## Else, retrieve the original input matrix and compute the inverse
data <- x$get()
i <- solve(data, ...)
## Save the newly calculated inverse in cache
x$setinverse(i)
## Return the inverse matrix
i
}
| /cachematrix.R | no_license | tungtzet/ProgrammingAssignment2 | R | false | false | 1,347 | r | ## The 2 following function are to calculate and cache
## the inverse matrix of an input matrix
## The function 'makeCacheMatrix' creates a list of functions
## 1.set the value of the matrix
## 2.get the value of the matrix
## 3.set the value of the inverse and store it in var 'i' in the parent env.
## 4.get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The function 'makeCacheMatrix' takes x (result of the 1st function)
## as argument and returns the inverse matrix if it's already cached in 'x'
## otherwise calculates the inverse matrix using 'solve' function
## store/cache it in 'x' & returns it
cacheSolve <- function(x, ...) {
## Retrieve the inverse matrix if already cached
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
## Else, retrieve the original input matrix and compute the inverse
data <- x$get()
i <- solve(data, ...)
## Save the newly calculated inverse in cache
x$setinverse(i)
## Return the inverse matrix
i
}
|
BayesPen.lm.confounders <-
function(y,x,u,prior, nIter=500, burnIn=100, thin=1, update, force, max.steps=NULL, max.refit, saveAt="", include.me=FALSE, z.score=FALSE){
if(missing(prior)) prior<- NULL
if(missing(update)) update <- round(500/10)
if(missing(force)) force <- NULL
if(missing(max.refit)) max.refit <- NULL
if (nIter < ncol(x)){
stop("The number of MCMC draws must be set to be larger than the dimension for cronfounder selection.")
}
x <- as.matrix(x)
fit.out<-BLR2(y, XR=cbind(x,u), prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)
if(is.null(dim(x)) | ncol(x)==1){
confounder.weights <- c(0,abs(BLR2(x, XR=u, prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)$bR))
force <- c(1,force+1)
}else{
confounder.weights<-rep(0,ncol(u)+ncol(x))
for(i in 1:ncol(x)){
if(include.me){
fit.temp <- BLR2(x[,i], XR=cbind(x[,-i],u), prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)
weight.temp <- abs(fit.temp$bR)
if(z.score) weight.temp <- weight.temp/sqrt(diag(fit.temp$COV.bR))*sqrt(diag(fit.out$COV.bR))[-1]
confounder.weights <- confounder.weights+c(rep(0,ncol(x)),weight.temp[-c(1:c(ncol(x)-1))])
}else{
fit.temp <- BLR2(x[,i], XR=u, prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)
weight.temp <- abs(fit.temp$bR)
if(z.score) weight.temp <- weight.temp/sqrt(diag(fit.temp$COV.bR))*sqrt(diag(fit.out$COV.bR)[-c(1:3)])
confounder.weights <- confounder.weights+c(rep(0,ncol(x)),weight.temp)
}
}
force <- c(1:ncol(x),force+ncol(x))
}
print("Model fitting complete, start post processing.")
print("------------------------------------------------------------")
fit <- BayesPen(beta=fit.out$bR, beta_cov=fit.out$COV.bR,joint=TRUE,confounder.weights=confounder.weights,force=force,max.steps=max.steps)
print("Refit Model")
print("------------------------------------------------------------")
refit <- BayesPen.refit(y,cbind(x,u),fit=fit, max.refit=max.refit)
print("Complete")
print("------------------------------------------------------------")
out <- c(fit,refit[names(refit)[-grep("joint",names(refit))]])
out$lm <- fit.out
out$confounder.weights <- confounder.weights
return(out)
}
| /R/BayesPen.lm.confounders.R | no_license | cran/BayesPen | R | false | false | 2,415 | r | BayesPen.lm.confounders <-
function(y,x,u,prior, nIter=500, burnIn=100, thin=1, update, force, max.steps=NULL, max.refit, saveAt="", include.me=FALSE, z.score=FALSE){
if(missing(prior)) prior<- NULL
if(missing(update)) update <- round(500/10)
if(missing(force)) force <- NULL
if(missing(max.refit)) max.refit <- NULL
if (nIter < ncol(x)){
stop("The number of MCMC draws must be set to be larger than the dimension for cronfounder selection.")
}
x <- as.matrix(x)
fit.out<-BLR2(y, XR=cbind(x,u), prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)
if(is.null(dim(x)) | ncol(x)==1){
confounder.weights <- c(0,abs(BLR2(x, XR=u, prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)$bR))
force <- c(1,force+1)
}else{
confounder.weights<-rep(0,ncol(u)+ncol(x))
for(i in 1:ncol(x)){
if(include.me){
fit.temp <- BLR2(x[,i], XR=cbind(x[,-i],u), prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)
weight.temp <- abs(fit.temp$bR)
if(z.score) weight.temp <- weight.temp/sqrt(diag(fit.temp$COV.bR))*sqrt(diag(fit.out$COV.bR))[-1]
confounder.weights <- confounder.weights+c(rep(0,ncol(x)),weight.temp[-c(1:c(ncol(x)-1))])
}else{
fit.temp <- BLR2(x[,i], XR=u, prior=prior, nIter = nIter, burnIn = burnIn, thin = thin, update=update, saveAt=saveAt)
weight.temp <- abs(fit.temp$bR)
if(z.score) weight.temp <- weight.temp/sqrt(diag(fit.temp$COV.bR))*sqrt(diag(fit.out$COV.bR)[-c(1:3)])
confounder.weights <- confounder.weights+c(rep(0,ncol(x)),weight.temp)
}
}
force <- c(1:ncol(x),force+ncol(x))
}
print("Model fitting complete, start post processing.")
print("------------------------------------------------------------")
fit <- BayesPen(beta=fit.out$bR, beta_cov=fit.out$COV.bR,joint=TRUE,confounder.weights=confounder.weights,force=force,max.steps=max.steps)
print("Refit Model")
print("------------------------------------------------------------")
refit <- BayesPen.refit(y,cbind(x,u),fit=fit, max.refit=max.refit)
print("Complete")
print("------------------------------------------------------------")
out <- c(fit,refit[names(refit)[-grep("joint",names(refit))]])
out$lm <- fit.out
out$confounder.weights <- confounder.weights
return(out)
}
|
library(shiny)
shinyServer(
function(input, output) {
output$data <- reactive({{as.numeric(input$id1) * as.numeric(input$units)}})
#no idea why there's a small rounding error
output$percent <- reactive({ ((as.numeric(input$id1) * as.numeric(input$units)) - as.numeric(input$id1))/ as.numeric(input$id1)*100 })
output$plot <- renderPlot({
x <- ((as.numeric(input$id1) * as.numeric(input$units)) - as.numeric(input$id1)) / as.numeric(input$id1)*100
plot(x, breaks = 1, col = 'darkblue', border = 'white', type= "h", main="Percent change in values",
xlab="Units", ylab="Percentage")
})
}
) | /server.R | no_license | CourseraGit70/DataProduct | R | false | false | 709 | r | library(shiny)
shinyServer(
function(input, output) {
output$data <- reactive({{as.numeric(input$id1) * as.numeric(input$units)}})
#no idea why there's a small rounding error
output$percent <- reactive({ ((as.numeric(input$id1) * as.numeric(input$units)) - as.numeric(input$id1))/ as.numeric(input$id1)*100 })
output$plot <- renderPlot({
x <- ((as.numeric(input$id1) * as.numeric(input$units)) - as.numeric(input$id1)) / as.numeric(input$id1)*100
plot(x, breaks = 1, col = 'darkblue', border = 'white', type= "h", main="Percent change in values",
xlab="Units", ylab="Percentage")
})
}
) |
best <- function(state, outcome) {
## Read outcome data
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if (!(state %in% outcomeData$State)) stop("invalid state")
if(!(outcome %in% c("heart attack","heart failure", "pneumonia"))) stop("invalid outcome")
## Return hospital name in that state with lowest 30-day death
## rate
if(outcome == "heart attack"){
result = outcomeData[order(outcomeData$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack,outcomeData$Hospital.Name),]$Hospital.Name[1]
} else if (outcome == "heart failure") {
result = outcomeData[order(outcomeData$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure,outcomeData$Hospital.Name),]$Hospital.Name[1]
} else {
result = outcomeData[order(outcomeData$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia,outcomeData$Hospital.Name),]$Hospital.Name[1]
}
result
}
| /best.R | no_license | ticney/programmingAssignment3 | R | false | false | 963 | r | best <- function(state, outcome) {
## Read outcome data
outcomeData <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if (!(state %in% outcomeData$State)) stop("invalid state")
if(!(outcome %in% c("heart attack","heart failure", "pneumonia"))) stop("invalid outcome")
## Return hospital name in that state with lowest 30-day death
## rate
if(outcome == "heart attack"){
result = outcomeData[order(outcomeData$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack,outcomeData$Hospital.Name),]$Hospital.Name[1]
} else if (outcome == "heart failure") {
result = outcomeData[order(outcomeData$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure,outcomeData$Hospital.Name),]$Hospital.Name[1]
} else {
result = outcomeData[order(outcomeData$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia,outcomeData$Hospital.Name),]$Hospital.Name[1]
}
result
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{GetCustomCalendar}
\alias{GetCustomCalendar}
\title{Get Custom Calendar for a Report Suite(s)}
\usage{
GetCustomCalendar(reportsuite.ids)
}
\arguments{
\item{reportsuite.ids}{Report suite id (or list of report suite ids)}
}
\value{
Data frame
}
\description{
Get custom calendar for the specified report suites.
}
\details{
This function requires having a character vector with one or more valid Report Suites specified.
}
\examples{
\dontrun{
cal <- GetCustomCalendar("your_report_suite")
cal2 <- GetCustomCalendar(report_suites$rsid)
}
}
| /man/GetCustomCalendar.Rd | no_license | miguelbravo/RSiteCatalyst | R | false | false | 601 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{GetCustomCalendar}
\alias{GetCustomCalendar}
\title{Get Custom Calendar for a Report Suite(s)}
\usage{
GetCustomCalendar(reportsuite.ids)
}
\arguments{
\item{reportsuite.ids}{Report suite id (or list of report suite ids)}
}
\value{
Data frame
}
\description{
Get custom calendar for the specified report suites.
}
\details{
This function requires having a character vector with one or more valid Report Suites specified.
}
\examples{
\dontrun{
cal <- GetCustomCalendar("your_report_suite")
cal2 <- GetCustomCalendar(report_suites$rsid)
}
}
|
######################################################################
# generate the report, slides, and if needed start the web application
reportfilename = paste(report_file, "Rmd", sep=".")
docreportfilename = paste("doc", reportfilename, sep="/")
htmloutput = paste(report_file, "html", sep = ".")
dochtmloutput = paste("doc", htmloutput, sep="/")
unlink( "TMPdirReport", recursive = TRUE )
dir.create( "TMPdirReport" )
setwd( "TMPdirReport" )
file.copy( paste(local_directory,docreportfilename, sep="/"),reportfilename, overwrite = T )
knit2html( reportfilename, quiet = TRUE )
file.copy( htmloutput, paste(local_directory,dochtmloutput, sep="/"), overwrite = T )
setwd( "../" )
unlink( "TMPdirReport", recursive = TRUE )
| /scripts/PCA/R/runcode.R | no_license | mberglem/GTCI2014 | R | false | false | 739 | r |
######################################################################
# generate the report, slides, and if needed start the web application
reportfilename = paste(report_file, "Rmd", sep=".")
docreportfilename = paste("doc", reportfilename, sep="/")
htmloutput = paste(report_file, "html", sep = ".")
dochtmloutput = paste("doc", htmloutput, sep="/")
unlink( "TMPdirReport", recursive = TRUE )
dir.create( "TMPdirReport" )
setwd( "TMPdirReport" )
file.copy( paste(local_directory,docreportfilename, sep="/"),reportfilename, overwrite = T )
knit2html( reportfilename, quiet = TRUE )
file.copy( htmloutput, paste(local_directory,dochtmloutput, sep="/"), overwrite = T )
setwd( "../" )
unlink( "TMPdirReport", recursive = TRUE )
|
library(tidyverse)
library(lubridate)
## Nesta atividade você deve utilizar o resultado do exercício 01 da Atividade da aula 03 (remuneração em dólares convertida para reais)
## Utilize o código daquele exercício como ponto de partida para esta atividade.
## Sempre utilize o caminho relativo, não o caminho absoluto, pois não funcionará na correção do exercício.
### IMPORTANTE ###
## Se você utilizar alguma função própria ou do material de aula, o código da(s) função(ões) deve estar neste arquivo da atividade.
salarios <- read_csv("aula-03/data/201802_dados_salarios_servidores.csv.gz")
head(salarios,20)
salarios%>%
mutate(REMUNERACAO_FINAL= (REMUNERACAO_REAIS + (REMUNERACAO_DOLARES * 3.2421)))%>%
filter(REMUNERACAO_FINAL>900)%>%
select(ID_SERVIDOR_PORTAL, REMUNERACAO_REAIS, REMUNERACAO_DOLARES, REMUNERACAO_FINAL,DATA_INGRESSO_ORGAO,DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO,DESCRICAO_CARGO,ORGSUP_LOTACAO,ORGSUP_EXERCICIO)->
subset_salarios
subset_salarios%>%
head(20)
### 1 ####
##
## Correlação de ano de ingresso por cargo
## - Determine o coeficiente de correlação entre o tempo em anos desde a DATA_INGRESSO_ORGAO e o tempo em anos desde a DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO
## para todos os cargos que possuem no mínimo 200 servidores.
## - Crie uma coluna que determina se a correlação é positiva ou negativa, e outra coluna que define a força da correlação de acordo com
## o material visto em aula sobre interpretação do coeficiente.
## - O resultado desta atividade deve ser um Data Frame com as variáveis de Cargo, Coeficiente de Correlação, Direção da Correlação e Força da Correlação
##
### # ####
atividade1 <- subset_salarios %>%
group_by(DESCRICAO_CARGO) %>%
summarise( SERVIDORES = n(),CORRELACAO = cor(x = ( 2018 - year (DATA_INGRESSO_ORGAO)),
y = (2018 -year(DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO))))%>%
ungroup()%>%
filter(SERVIDORES >=200)%>%
arrange(SERVIDORES)%>%
mutate(DIRECAO = (ifelse(CORRELACAO>0,'POSITIVA','NEGATIVA')))%>%
mutate(CORRELACAO_ABSOLUTA = (ifelse(CORRELACAO>0,CORRELACAO,(CORRELACAO * (-1)))))%>%
mutate(FORCA = ifelse(CORRELACAO_ABSOLUTA >=0.9, 'MUITO FORTE',
ifelse(CORRELACAO_ABSOLUTA >= 0.7 & CORRELACAO_ABSOLUTA < 0.9, 'FORTE',
ifelse(CORRELACAO_ABSOLUTA >= 0.5 & CORRELACAO_ABSOLUTA < 0.7, 'MODERADA',
ifelse(CORRELACAO_ABSOLUTA >= 0.3 & CORRELACAO_ABSOLUTA < 0.5, 'FRACA','DESPREZÍVEL')))))%>%
select(DESCRICAO_CARGO,
CORRELACAO,
DIRECAO,
FORCA,
CORRELACAO_ABSOLUTA)
atividade1 %>%
select(DESCRICAO_CARGO,
CORRELACAO,
DIRECAO,
FORCA)
### 2 ###
##
## - A partir do dataset do exercício anterior, selecione os 10 cargos de correlação mais forte (seja positiva ou negativa) e os
## 10 cargos de correlação mais fraca (de novo, independente de ser positiva ou negativa)
## - Para estes 20 cargos, determine a Moda do órgão de lotação (ORGSUP_LOTACAO) e de exercício (ORGSUP_EXERCICIO)
## - Reponda se existe diferença entre as modas e se existe relação entre a Força da Correlação e a diferença entre as modas
## (caso haja diferença)
##
### # ###
atividade1 %>%
arrange(CORRELACAO_ABSOLUTA) %>%
head(10) %>%
pull(DESCRICAO_CARGO) -> cargos
subset_salarios %>%
filter(DESCRICAO_CARGO %in% cargos) %>%
count(ORGSUP_LOTACAO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_LOTACAO) -> moda_orgsup_lotacaof
subset_salarios %>%
count(ORGSUP_EXERCICIO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_EXERCICIO) -> moda_orgsup_exerciciof
atividade1%>%
arrange(CORRELACAO_ABSOLUTA)%>%
tail(10)%>%
pull(DESCRICAO_CARGO) -> cargos
subset_salarios %>%
filter(DESCRICAO_CARGO %in% cargos) %>%
count(ORGSUP_LOTACAO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_LOTACAO) -> moda_orgsup_lotacao
subset_salarios %>%
count(ORGSUP_EXERCICIO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_EXERCICIO) -> moda_orgsup_exercicio
print(paste('Dos 10 cargos correlacao mais forte :',
paste('Orgao Lotacao:',moda_orgsup_lotacao,' Orgao Exercicio:',moda_orgsup_exercicio)))
print(paste('Dos 10 cargos correlacao mais fracas : ',
paste('Orgao Lotacao:',moda_orgsup_lotacaof,' Orgao Exercicio:',moda_orgsup_exerciciof)))
moda_orgsup_exercicio
##Comentario
## a diferenca das modas entre orgao de lotacao e de exercio e o exato oposto entre os cargos de
## correlação mais forte e mais fraca
| /aula-04/02_atividade.R | no_license | racoutinho/data-analysis_with_R-201801 | R | false | false | 4,625 | r | library(tidyverse)
library(lubridate)
## Nesta atividade você deve utilizar o resultado do exercício 01 da Atividade da aula 03 (remuneração em dólares convertida para reais)
## Utilize o código daquele exercício como ponto de partida para esta atividade.
## Sempre utilize o caminho relativo, não o caminho absoluto, pois não funcionará na correção do exercício.
### IMPORTANTE ###
## Se você utilizar alguma função própria ou do material de aula, o código da(s) função(ões) deve estar neste arquivo da atividade.
salarios <- read_csv("aula-03/data/201802_dados_salarios_servidores.csv.gz")
head(salarios,20)
salarios%>%
mutate(REMUNERACAO_FINAL= (REMUNERACAO_REAIS + (REMUNERACAO_DOLARES * 3.2421)))%>%
filter(REMUNERACAO_FINAL>900)%>%
select(ID_SERVIDOR_PORTAL, REMUNERACAO_REAIS, REMUNERACAO_DOLARES, REMUNERACAO_FINAL,DATA_INGRESSO_ORGAO,DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO,DESCRICAO_CARGO,ORGSUP_LOTACAO,ORGSUP_EXERCICIO)->
subset_salarios
subset_salarios%>%
head(20)
### 1 ####
##
## Correlação de ano de ingresso por cargo
## - Determine o coeficiente de correlação entre o tempo em anos desde a DATA_INGRESSO_ORGAO e o tempo em anos desde a DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO
## para todos os cargos que possuem no mínimo 200 servidores.
## - Crie uma coluna que determina se a correlação é positiva ou negativa, e outra coluna que define a força da correlação de acordo com
## o material visto em aula sobre interpretação do coeficiente.
## - O resultado desta atividade deve ser um Data Frame com as variáveis de Cargo, Coeficiente de Correlação, Direção da Correlação e Força da Correlação
##
### # ####
atividade1 <- subset_salarios %>%
group_by(DESCRICAO_CARGO) %>%
summarise( SERVIDORES = n(),CORRELACAO = cor(x = ( 2018 - year (DATA_INGRESSO_ORGAO)),
y = (2018 -year(DATA_DIPLOMA_INGRESSO_SERVICOPUBLICO))))%>%
ungroup()%>%
filter(SERVIDORES >=200)%>%
arrange(SERVIDORES)%>%
mutate(DIRECAO = (ifelse(CORRELACAO>0,'POSITIVA','NEGATIVA')))%>%
mutate(CORRELACAO_ABSOLUTA = (ifelse(CORRELACAO>0,CORRELACAO,(CORRELACAO * (-1)))))%>%
mutate(FORCA = ifelse(CORRELACAO_ABSOLUTA >=0.9, 'MUITO FORTE',
ifelse(CORRELACAO_ABSOLUTA >= 0.7 & CORRELACAO_ABSOLUTA < 0.9, 'FORTE',
ifelse(CORRELACAO_ABSOLUTA >= 0.5 & CORRELACAO_ABSOLUTA < 0.7, 'MODERADA',
ifelse(CORRELACAO_ABSOLUTA >= 0.3 & CORRELACAO_ABSOLUTA < 0.5, 'FRACA','DESPREZÍVEL')))))%>%
select(DESCRICAO_CARGO,
CORRELACAO,
DIRECAO,
FORCA,
CORRELACAO_ABSOLUTA)
atividade1 %>%
select(DESCRICAO_CARGO,
CORRELACAO,
DIRECAO,
FORCA)
### 2 ###
##
## - A partir do dataset do exercício anterior, selecione os 10 cargos de correlação mais forte (seja positiva ou negativa) e os
## 10 cargos de correlação mais fraca (de novo, independente de ser positiva ou negativa)
## - Para estes 20 cargos, determine a Moda do órgão de lotação (ORGSUP_LOTACAO) e de exercício (ORGSUP_EXERCICIO)
## - Reponda se existe diferença entre as modas e se existe relação entre a Força da Correlação e a diferença entre as modas
## (caso haja diferença)
##
### # ###
atividade1 %>%
arrange(CORRELACAO_ABSOLUTA) %>%
head(10) %>%
pull(DESCRICAO_CARGO) -> cargos
subset_salarios %>%
filter(DESCRICAO_CARGO %in% cargos) %>%
count(ORGSUP_LOTACAO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_LOTACAO) -> moda_orgsup_lotacaof
subset_salarios %>%
count(ORGSUP_EXERCICIO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_EXERCICIO) -> moda_orgsup_exerciciof
atividade1%>%
arrange(CORRELACAO_ABSOLUTA)%>%
tail(10)%>%
pull(DESCRICAO_CARGO) -> cargos
subset_salarios %>%
filter(DESCRICAO_CARGO %in% cargos) %>%
count(ORGSUP_LOTACAO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_LOTACAO) -> moda_orgsup_lotacao
subset_salarios %>%
count(ORGSUP_EXERCICIO) %>%
arrange(desc(n)) %>%
head(1)%>%
pull(ORGSUP_EXERCICIO) -> moda_orgsup_exercicio
print(paste('Dos 10 cargos correlacao mais forte :',
paste('Orgao Lotacao:',moda_orgsup_lotacao,' Orgao Exercicio:',moda_orgsup_exercicio)))
print(paste('Dos 10 cargos correlacao mais fracas : ',
paste('Orgao Lotacao:',moda_orgsup_lotacaof,' Orgao Exercicio:',moda_orgsup_exerciciof)))
moda_orgsup_exercicio
##Comentario
## a diferenca das modas entre orgao de lotacao e de exercio e o exato oposto entre os cargos de
## correlação mais forte e mais fraca
|
context("hyet_missing related tests")
test_that("hyet_missing returns error", {
hyet <- "a"
expect_error(hyet_missing(hyet))
})
test_that("hyet_missing reports correct ratios", {
# create time series with 10 minutes time step
time_step <- 60
hyet <- tibble::tibble(
date = seq(
from = as.POSIXct(0, origin = "2018-01-01"), length.out = 24,
by = paste(time_step, "mins")
),
prec = runif(24, 0, 5)
)
# expect reported NA monthly ratio = 0
exp_miss <- tibble::tibble(year = 2018, month = 1, na_ratio = 0)
expect_equal(object = hyet_missing(hyet), expected = exp_miss)
# remove some records, NA monthly ratio = 0.125
hyet_miss <- hyet[-c(2:4), ]
hyet_miss <- hyet_fill(hyet_miss, 60)
exp_miss <- tibble::tibble(year = 2018, month = 1, na_ratio = 0.125)
expect_equal(object = hyet_missing(hyet_miss), expected = exp_miss)
})
| /tests/testthat/test_hyet_missing.R | permissive | kvantas/hyetor | R | false | false | 878 | r | context("hyet_missing related tests")
test_that("hyet_missing returns error", {
hyet <- "a"
expect_error(hyet_missing(hyet))
})
test_that("hyet_missing reports correct ratios", {
# create time series with 10 minutes time step
time_step <- 60
hyet <- tibble::tibble(
date = seq(
from = as.POSIXct(0, origin = "2018-01-01"), length.out = 24,
by = paste(time_step, "mins")
),
prec = runif(24, 0, 5)
)
# expect reported NA monthly ratio = 0
exp_miss <- tibble::tibble(year = 2018, month = 1, na_ratio = 0)
expect_equal(object = hyet_missing(hyet), expected = exp_miss)
# remove some records, NA monthly ratio = 0.125
hyet_miss <- hyet[-c(2:4), ]
hyet_miss <- hyet_fill(hyet_miss, 60)
exp_miss <- tibble::tibble(year = 2018, month = 1, na_ratio = 0.125)
expect_equal(object = hyet_missing(hyet_miss), expected = exp_miss)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/by_studies.R
\name{available_sample_lists}
\alias{available_sample_lists}
\title{Get All Sample Lists Available For a Study}
\usage{
available_sample_lists(study_id = NULL, base_url = NULL)
}
\arguments{
\item{study_id}{A character string indicating which study ID should be searched.
Only 1 study ID allowed.}
\item{base_url}{The database URL to query
If \code{NULL} will default to URL set with \verb{set_cbioportal_db(<your_db>)}}
}
\value{
A dataframe of patient_ids in a given study
}
\description{
Get All Sample Lists Available For a Study
}
\examples{
\dontrun{
set_cbioportal_db("public")
available_sample_lists(study_id = "acc_tcga")
}
}
| /man/available_sample_lists.Rd | permissive | karissawhiting/cbioportalR | R | false | true | 728 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/by_studies.R
\name{available_sample_lists}
\alias{available_sample_lists}
\title{Get All Sample Lists Available For a Study}
\usage{
available_sample_lists(study_id = NULL, base_url = NULL)
}
\arguments{
\item{study_id}{A character string indicating which study ID should be searched.
Only 1 study ID allowed.}
\item{base_url}{The database URL to query
If \code{NULL} will default to URL set with \verb{set_cbioportal_db(<your_db>)}}
}
\value{
A dataframe of patient_ids in a given study
}
\description{
Get All Sample Lists Available For a Study
}
\examples{
\dontrun{
set_cbioportal_db("public")
available_sample_lists(study_id = "acc_tcga")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.