content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#λ μ§ : 2020/08/05
#μ΄λ¦ : μ ν¨μ§
#λ΄μ© : λ°μ΄ν°μκ°ν - μ κ΅ wifi μ§λ μκ°ν
library('ggmap')
register_google(key = 'AIzaSyBrkMiFk4KP89zVAkrefQdzTiiGAucTcAY')
df_wifi <- read.csv('../file/wifi.csv')
df_wifi
#μΌν°μ’ν μμ±
wifi_center <- c(mean(df_wifi$lon), mean(df_wifi$lat))
#μ§λμμ±
map <- get_googlemap(center = wifi_center,
maptype = 'roadmap',
zoom = 7,
size = c(640, 640))
#μ§λμΆλ ₯
ggmap(map) + geom_point(aes(x=lon, y=lat, color='red', size=3), data = df_wifi)
| /Ch05/5-7.R | no_license | tsbslteisrhy/R | R | false | false | 578 | r | #λ μ§ : 2020/08/05
#μ΄λ¦ : μ ν¨μ§
#λ΄μ© : λ°μ΄ν°μκ°ν - μ κ΅ wifi μ§λ μκ°ν
library('ggmap')
register_google(key = 'AIzaSyBrkMiFk4KP89zVAkrefQdzTiiGAucTcAY')
df_wifi <- read.csv('../file/wifi.csv')
df_wifi
#μΌν°μ’ν μμ±
wifi_center <- c(mean(df_wifi$lon), mean(df_wifi$lat))
#μ§λμμ±
map <- get_googlemap(center = wifi_center,
maptype = 'roadmap',
zoom = 7,
size = c(640, 640))
#μ§λμΆλ ₯
ggmap(map) + geom_point(aes(x=lon, y=lat, color='red', size=3), data = df_wifi)
|
# methods for pffr-objects
#
#
# Author: fabians
# 16.08.2011, 13:01:24
###############################################################################
#' Prediction for penalized function-on-function regression
#'
#' Takes a fitted \code{pffr}-object produced by \code{\link{pffr}()} and produces
#' predictions given a new set of values for the model covariates or the original
#' values used for the model fit. Predictions can be accompanied by standard errors,
#' based on the posterior distribution of the model coefficients. This is a wrapper
#' function for \code{\link[mgcv]{predict.gam}()}.
#'
#' Index variables (i.e., evaluation points) for the functional covariates are reused
#' from the fitted model object and cannot be supplied with \code{newdata}.
#' Prediction is always for the entire index range of the responses as defined
#' in the original fit. If the original fit was performed on sparse or irregular,
#' non-gridded response data supplied via \code{pffr}'s \code{ydata}-argument
#' and no \code{newdata} was supplied, this function will
#' simply return fitted values for the original evaluation points of the response (in list form).
#' If the original fit was performed on sparse or irregular data and \code{newdata} \emph{was}
#' supplied, the function will return predictions on the grid of evaluation points given in
#' \code{object$pffr$yind}.
#'
#' @param object a fitted \code{pffr}-object
#' @param newdata A named list (or a \code{data.frame}) containing the values of the
#' model covariates at which predictions are required.
#' If no \code{newdata} is provided then predictions corresponding to the original data
#' are returned. If \code{newdata} is provided then it must contain all the variables needed
#' for prediction, in the format supplied to \code{pffr}, i.e., functional predictors must be
#' supplied as matrices with each row corresponding to one observed function.
#' See Details for more on index variables and prediction for models fit on
#' irregular or sparse data.
#' @param reformat logical, defaults to TRUE. Should predictions be returned in matrix form (default) or
#' in the long vector shape returned by \code{predict.gam()}?
#' @param type see \code{\link[mgcv]{predict.gam}()} for details.
#' Note that \code{type == "lpmatrix"} will force \code{reformat} to FALSE.
#' @param se.fit see \code{\link[mgcv]{predict.gam}()}
#' @param ... additional arguments passed on to \code{\link[mgcv]{predict.gam}()}
#' @seealso \code{\link[mgcv]{predict.gam}()}
#' @return If \code{type == "lpmatrix"}, the design matrix for the supplied covariate values in long format.
#' If \code{se == TRUE}, a list with entries \code{fit} and \code{se.fit} containing fits and standard errors, respectively.
#' If \code{type == "terms"} or \code{"iterms"} each of these lists is a list of matrices of the same dimension as the response for \code{newdata}
#' containing the linear predictor and its se for each term.
#' @export
#' @method predict pffr
#' @author Fabian Scheipl
#' @importFrom mgcv predict.gam predict.bam
predict.pffr <- function(object,
newdata,
reformat=TRUE,
type = "link",
se.fit = FALSE,
...){
#browser()
call <- match.call()
nyindex <- object$pffr$nyindex
## warn if any entries in ... are not arguments for predict.gam
dots <- list(...)
if(length(dots)){
validDots <- c(names(formals(predict.gam)), "cluster")
# should be
# unique(c(names(formals(predict.gam)),
# names(formals(predict.bam))))
# but predict.bam is not exported.
notUsed <- names(dots)[!(names(dots) %in% validDots)]
if(length(notUsed))
warning("Arguments <", paste(notUsed, collapse=", "), "> supplied but not used." )
}
if(!missing(newdata)){
nobs <- nrow(as.matrix(newdata[[1]]))
# check if the supplied data already has the shape expected by predict.gam
# and dispatch immediately if so (need this so summary works as expected!)
if(!(all(names(newdata) %in% names(object$model))) |
!(paste0(object$pffr$yindname,".vec") %in% names(newdata))){
# check lengths
stopifnot(length(unique(sapply(newdata, function(x)
ifelse(is.matrix(x), nrow(x), length(x))))) ==1)
# #FIXME: better leave this check to predict.gam....
# covnames <- mapply(gsub,
# pattern=c(".[st]mat$"),
# replacement="", x=unique(unlist(sapply(object$smooth, function(x) x$term))))
# covnames <- unique(covnames[covnames != paste(object$pffr$yindname, ".vec", sep="")])
# stopifnot(all(covnames %in% names(newdata)))
#get newdata into the shape expected by predict gam:
gamdata <- list()
#y-index
gamdata[[paste(object$pffr$yindname, ".vec", sep="")]] <- rep(object$pffr$yind, times=nobs)
# which covariates occur in which terms?
varmap <- sapply(names(object$pffr$labelmap), function(x) all.vars(formula(paste("~", x))))
# don't include response
covnames <- unique(names(newdata)[names(newdata)!=deparse(object$formula[[2]])])
for(cov in covnames){
#find the term(s) <cov> is associated with
trms <- which(sapply(varmap, function(x) any(grep(paste("^",cov,"$",sep=""), x))))
if(!is.null(dots$terms)) trms <- trms[names(trms) %in% dots$terms]
if(length(trms)!=0){
for(trm in trms){
is.ff <- trm %in% object$pffr$where$ff
is.sff <- trm %in% object$pffr$where$sff
is.ffpc <- trm %in% object$pffr$where$ffpc
is.pcre <- trm %in% object$pffr$where$pcre
#if ff(X) or sff(X), generate (X.mat), X.tmat, X.smat, L.X ...
if(is.ff){
ff <- object$pffr$ff[[grep(paste(cov,"[,\\)]",sep=""), names(object$pffr$ff))]]
#... but don't generate new data unless <cov> is the functional covariate.
if(grepl(paste(cov,"\\.[st]mat",sep=""), deparse(ff$call$x))){
# make L-matrix for new obs:
L <- ff$L
if(any(apply(L, 2, function(x) length(unique(x)))!=1)){
stop("Error for ", names(varmap)[trm],
"-- Prediction for ff-terms with varying rows in integration operator L not implememented yet.")
}
if(!is.null(ff$limits)){
#TODO implement prediction with limits
stop("Error for ", names(varmap)[trm],
"-- Prediction for ff-terms with <limits> not implememented yet.")
}
predL <- matrix(L[1,], byrow=TRUE, nrow=nrow(newdata[[cov]]), ncol=ncol(L))
gamdata[[paste(cov, ".smat", sep="")]] <-
matrix(ff$xind, byrow=TRUE, ncol=length(ff$xind), nrow=nobs*nyindex)
gamdata[[paste(cov, ".tmat", sep="")]] <-
matrix(rep(object$pffr$yind, times=nobs), ncol=length(ff$xind), nrow=nobs*nyindex)
gamdata[[paste("L.", cov, sep="")]] <-
(predL*newdata[[cov]])[rep(1:nobs, each=nyindex),]
}
}
if(is.sff){
sff <- object$pffr$ff[[grep(paste(cov,"[,\\)]",sep=""), names(object$pffr$ff))]]
#... but don't generate new data unless <cov> is the functional covariate.
if(grepl(paste(cov,"\\.[st]mat",sep=""), deparse(sff$call$x))){
# make L-matrix for new obs:
L <- sff$L
if(any(apply(L, 2, function(x) length(unique(x)))!=1)){
stop("Error for ", names(varmap)[trm],
"-- Prediction for sff-terms with varying rows in integration operator L not implememented yet.")
}
predL <- matrix(L[1,], byrow=TRUE, nrow=nrow(newdata[[cov]]), ncol=ncol(L))
gamdata[[paste(cov, ".mat", sep="")]] <- newdata[[cov]][rep(1:nobs, e=nyindex),]
gamdata[[paste(cov, ".smat", sep="")]] <-
matrix(sff$xind, byrow=TRUE, ncol=length(sff$xind), nrow=nobs*nyindex)
gamdata[[paste(cov, ".tmat", sep="")]] <-
matrix(rep(object$pffr$yind, times=nobs), ncol=length(sff$xind), nrow=nobs*nyindex)
gamdata[[paste("L.", cov, sep="")]] <- predL[rep(1:nobs, e=nyindex),]
}
}
if(is.pcre){
pcre <- object$pffr$pcre[[grep(cov, names(object$pffr$pcre))]]
gamdata[[paste(cov, ".vec", sep="")]] <- rep(newdata[[cov]], each=nyindex)
for(nm in colnames(pcre$efunctions)){
tmp <- approx(x=pcre$yind,
y=pcre$efunctions[, nm],
xout=object$pffr$yind,
method = "linear")$y
gamdata[[nm]] <- tmp[rep(1:nyindex, times=nobs)]
}
}
if(is.ffpc){
ffpc <- object$pffr$ffpc[[grep(paste(cov,"[,\\)]",sep=""),
names(object$pffr$ffpc))]]
# Xc' = Phi xi' + error --> get loadings for new data:
Xct <- t(newdata[[cov]]) - as.vector(ffpc$meanX)
xiMat <- t(qr.coef(qr(ffpc$PCMat), Xct))
colnames(xiMat) <- paste(make.names(cov),".PC", 1:ncol(xiMat), sep="")
xiMat <- xiMat[rep(1:nobs, each=nyindex), ]
for(nm in colnames(xiMat)){
gamdata[[nm]] <- xiMat[,nm]
}
}
if(!(is.ff | is.sff | is.ffpc | is.pcre)) {
#just repeat each entry nyindex-times to correspond to vec(<Response>)
gamdata[[cov]] <- drop(newdata[[cov]])[rep(1:nobs, each=nyindex)]
}
}
}
}
gamdata <- list2df(gamdata)
call[["newdata"]] <- gamdata
}
} else {
call$newdata <- eval(call$newdata)
nobs <- object$pffr$nobs
}
isIrregular <- missing(newdata) & object$pffr$sparseOrNongrid
#call predict.gam
call[[1]] <- if(inherits(object, "bam")){
mgcv::predict.bam
} else mgcv::predict.gam
call$object <- as.name("object")
ret <- eval(call)
if(type=="lpmatrix" && reformat){
reformat <- FALSE
warning("Setting reformat to FALSE for type=\"lpmatrix\".")
}
#reformat into matrices with same shape as <Response>
if(reformat){
if(!isIrregular){
if(missing(newdata) && !is.null(object$pffr$missingind)){
#pad with NAs at the appropriate locations so that fits are nobs x nyindex:
insertNA <- function(x){
if(length(x) != nobs*object$pffr$nyindex){
tmp <- rep(NA, nobs*object$pffr$nyindex)
tmp[-object$pffr$missingind] <- x
return(tmp)
} else {
return(x)
}
}
} else insertNA <- function(x) return(x)
if(se.fit){
if(type %in% c("terms", "iterms")){
ret <- lapply(ret, function(x)
do.call(list,
sapply(1:ncol(x), function(i){
#browser()
d <- list(I(matrix(insertNA(x[,i]), nrow=nobs,
ncol=object$pffr$nyindex,
byrow=TRUE)))
names(d) <- colnames(x)[i]
return(d)
})))
} else {
ret <- lapply(ret, function(x) matrix(insertNA(x), nrow=nobs,
ncol=object$pffr$nyindex, byrow=TRUE))
}
} else {
if(type %in% c("terms", "iterms")){
ret <- do.call(list, sapply(1:ncol(ret), function(i){
#browser()
d <- list(I(matrix(insertNA(ret[,i]), nrow=nobs,
ncol=object$pffr$nyindex, byrow=TRUE)))
names(d) <- colnames(ret)[i]
return(d)
}))
} else ret <- matrix(insertNA(ret), nrow=nobs, ncol=object$pffr$nyindex, byrow=TRUE)
}
} else {
evalpoints <- object$pffr$ydata[,c(".obs", ".index")]
if(se.fit){
if(type %in% c("terms", "iterms")){
ret <- lapply(ret, function(x)
do.call(list,
sapply(1:ncol(x), function(i){
#browser()
d <- list(cbind(evalpoints, .value=x[,i]))
names(d) <- colnames(x)[i]
return(d)
})))
} else {
ret <- lapply(ret, function(x) cbind(evalpoints, .value=x))
}
} else {
if(type %in% c("terms", "iterms")){
ret <- do.call(list, sapply(1:ncol(ret), function(i){
#browser()
d <- list(cbind(evalpoints, .value=ret[,i]))
names(d) <- colnames(ret)[i]
return(d)
}))
} else ret <- cbind(evalpoints, .value=ret)
}
}
}
return(ret)
}
#' Obtain model matrix for a pffr fit
#'
#' @param object a fitted \code{pffr}-object
#' @param ... other arguments, passed to \code{\link[mgcv]{predict.gam}}.
#'
#' @return A model matrix
#' @method model.matrix pffr
#' @author Fabian Scheipl
model.matrix.pffr <- function (object, ...)
{
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
predict(object, type = "lpmatrix", reformat=FALSE, ...)
}
#' Obtain residuals and fitted values for a pffr models
#'
#' See \code{\link{predict.pffr}} for alternative options to extract estimated
#' values from a \code{pffr} object.
#' "Fitted values" here refers to the estimated additive predictor values,
#' these will not be on the scale of the response for models with link functions.
#'
#' @param object a fitted \code{pffr}-object
#' @param reformat logical, defaults to TRUE. Should residuals be returned in
#' \code{n x yindex} matrix form (regular grid data) or, respectively, in the
#' shape of the originally supplied \code{ydata} argument (sparse/irregular
#' data), or, if \code{FALSE}, simply as a long vector as returned by
#' \code{resid.gam()}?
#' @param ... other arguments, passed to \code{\link[mgcv]{residuals.gam}}.
#'
#' @return A matrix or \code{ydata}-like \code{data.frame} or a vector of
#' residuals / fitted values (see \code{reformat}-argument)
#' @export
#' @importFrom mgcv residuals.gam
#' @method residuals pffr
#' @aliases fitted.pffr
#' @author Fabian Scheipl
residuals.pffr <- function (object, reformat=TRUE, ...)
{
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
ret <- mgcv::residuals.gam(object, ...)
if(reformat){
if(!object$pffr$sparseOrNongrid){
if(!(length(ret)==object$pffr$nobs*object$pffr$nyindex)){
tmp <- rep(NA, object$pffr$nobs*object$pffr$nyindex)
tmp[-object$pffr$missingind] <- ret
ret <- tmp
}
ret <- matrix(ret, nrow=object$pffr$nobs, ncol=object$pffr$nyindex, byrow=TRUE)
} else {
tmp <- object$pffr$ydata
tmp[,".value"] <- ret
ret <- tmp
}
}
return(ret)
}
#' @method fitted pffr
#' @export
#' @rdname residuals.pffr
fitted.pffr <- function (object, reformat=TRUE, ...)
{
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
ret <- object$fitted.values
if(reformat){
if(!object$pffr$sparseOrNongrid){
if(!(length(ret)==object$pffr$nobs*object$pffr$nyindex)){
tmp <- rep(NA, object$pffr$nobs*object$pffr$nyindex)
tmp[-object$pffr$missingind] <- ret
ret <- tmp
}
ret <- matrix(ret, nrow=object$pffr$nobs, ncol=object$pffr$nyindex, byrow=TRUE)
} else {
tmp <- object$pffr$ydata
tmp[,".value"] <- ret
ret <- tmp
}
}
return(ret)
}
#' Plot a pffr fit
#'
#' Plot a fitted pffr-object. Simply dispatches to \code{\link[mgcv]{plot.gam}}.
#'
#' @param x a fitted \code{pffr}-object
#' @param ... arguments handed over to \code{\link[mgcv]{plot.gam}}
#'
#' @return This function only generates plots.
#' @method plot pffr
#' @importFrom mgcv plot.gam
#' @author Fabian Scheipl
plot.pffr <- function (x, ...)
{
call <- match.call()
call[[1]] <- mgcv::plot.gam
#drop "pffr" class and replace <object> with changed value s.t. method dispatch works without glitches
class(x) <- class(x)[-1]
invisible(eval(call))
}
#' Get estimated coefficients from a pffr fit
#'
#' Returns estimated coefficient functions/surfaces \eqn{\beta(t), \beta(s,t)}
#' and estimated smooth effects \eqn{f(z), f(x,z)} or \eqn{f(x, z, t)} and their point-wise estimated standard errors.
#' Not implemented for smooths in more than 3 dimensions.
#'
#' The \code{seWithMean}-option corresponds to the \code{"iterms"}-option in \code{\link[mgcv]{predict.gam}}.
#' The \code{sandwich}-options works as follows: Assuming that the residual vectors \eqn{\epsilon_i(t), i=1,\dots,n} are i.i.d.
#' realizations of a mean zero Gaussian process with covariance \eqn{K(t,t')}, we can construct an estimator for
#' \eqn{K(t,t')} from the \eqn{n} replicates of the observed residual vectors. The covariance matrix of the stacked observations
#' vec\eqn{(Y_i(t))} is then given by a block-diagonal matrix with \eqn{n} copies of the estimated \eqn{K(t,t')} on the diagonal.
#' This block-diagonal matrix is used to construct the "meat" of a sandwich covariance estimator, similar to Chen et al. (2012),
#' see reference below.
#'
#'
#' @param object a fitted \code{pffr}-object
#' @param raw logical, defaults to FALSE. If TRUE, the function simply returns \code{object$coefficients}
#' @param se logical, defaults to TRUE. Return estimated standard error of the estimates?
#' @param freq logical, defaults to FALSE. If FALSE, use posterior variance \code{object$Vp} for variability estimates,
#' else use \code{object$Ve}. See \code{\link[mgcv]{gamObject}}
#' @param sandwich logical, defaults to FALSE. Use a Sandwich-estimator for approximate variances? See Details.
#' THIS IS AN EXPERIMENTAL FEATURE, USE A YOUR OWN RISK.
#' @param seWithMean logical, defaults to TRUE. Include uncertainty about the intercept/overall mean in standard errors returned for smooth components?
#' @param n1 see below
#' @param n2 see below
#' @param n3 \code{n1, n2, n3} give the number of gridpoints for 1-/2-/3-dimensional smooth terms
#' used in the marginal equidistant grids over the range of the covariates at which the estimated effects are evaluated.
#' @param Ktt (optional) an estimate of the covariance operator of the residual process \eqn{\epsilon_i(t) \sim N(0, K(t,t'))},
#' evaluated on \code{yind} of \code{object}. If not supplied, this is estimated from the crossproduct matrices of the
#' observed residual vectors. Only relevant for sandwich CIs.
#' @param ... other arguments, not used.
#'
#' @return If \code{raw==FALSE}, a list containing \itemize{
#' \item \code{pterms} a matrix containing the parametric / non-functional coefficients (and, optionally, their se's)
#' \item \code{smterms} a named list with one entry for each smooth term in the model. Each entry contains
#' \itemize{
#' \item \code{coef} a matrix giving the grid values over the covariates, the estimated effect (and, optionally, the se's).
#' The first covariate varies the fastest.
#' \item \code{x, y, z} the unique gridpoints used to evaluate the smooth/coefficient function/coefficient surface
#' \item \code{xlim, ylim, zlim} the extent of the x/y/z-axes
#' \item \code{xlab, ylab, zlab} the names of the covariates for the x/y/z-axes
#' \item \code{dim} the dimensionality of the effect
#' \item \code{main} the label of the smooth term (a short label, same as the one used in \code{summary.pffr})
#' }}
#' @references Chen, H., Wang, Y., Paik, M.C., and Choi, A. (2013).
#' A marginal approach to reduced-rank penalized spline smoothing with application to multilevel functional data.
#' \emph{Journal of the American Statistical Association}, 101, 1216--1229.
#' @method coef pffr
#' @export
#' @importFrom mgcv PredictMat get.var
#' @importFrom Matrix Diagonal kronecker t
#' @seealso \code{\link[mgcv]{plot.gam}}, \code{\link[mgcv]{predict.gam}} which this routine is
#' based on.
#' @author Fabian Scheipl
coef.pffr <- function(object, raw=FALSE, se=TRUE, freq=FALSE, sandwich=FALSE,
seWithMean=TRUE, n1=100, n2=40, n3=20, Ktt=NULL, ...){
if(raw){
return(object$coefficients)
} else {
getCoefs <- function(i){
## this constructs a grid over the range of the covariates
## and returns estimated values on this grid, with
## by-variables set to 1
## cf. mgcv:::plots.R (plot.mgcv.smooth etc..) for original code
safeRange <- function(x){
if(is.factor(x)) return(c(NA, NA))
return(range(x, na.rm=TRUE))
}
makeDataGrid <- function(trm){
#generate grid of values in range of original data
x <- get.var(trm$term[1], object$model)
if(trm$dim==1) {
xg <- if(is.factor(x)) {
unique(x)
} else seq(min(x), max(x), length=n1)
d <- data.frame(xg)
colnames(d) <- trm$term
attr(d, "xm") <- xg
}
if(is.pcre) {
ng <- n2
xg <- if(is.factor(x)) {
unique(x)
} else seq(min(x), max(x),length=ng)
which.pcre <- which(sapply(object$pffr$pcreterms, `[[`, "idname")
== trm$term[1])
pcreterm <- object$pffr$pcreterms[[which.pcre]]
yg <- seq(min(pcreterm$yind), max(pcreterm$yind), l=ng)
# interpolate given eigenfunctions to grid values:
efcts.grid <- sapply(colnames(pcreterm$efunctions),
function(nm){
approx(x=pcreterm$yind,
y=pcreterm$efunctions[, nm],
xout=yg,
method = "linear")$y
})
efcts.grid <- data.frame(efcts.grid[rep(1:ng, e=length(xg)),])
colnames(efcts.grid) <- colnames(pcreterm$efunctions)
d <- cbind(expand.grid(xg, yg),
efcts.grid)
colnames(d)[1:2] <- c(trm$term[1],
paste0(object$pffr$yindname, ".vec"))
attr(d, "xm") <- xg
attr(d, "ym") <- yg
} else {
if(trm$dim > 1) {
ng <- ifelse(trm$dim==2, n2, n3)
varnms <- trm$term
x <- get.var(trm$term[1], object$model)
xg <- if(is.factor(x)) {
unique(x)
} else seq(min(x), max(x),length=ng)
y <- get.var(trm$term[2], object$model)
yg <- if(is.factor(y)) {
unique(y)
} else seq(min(y), max(y),length=ng)
if(length(varnms)==2){
d <- expand.grid(xg, yg)
attr(d, "xm") <- xg
attr(d, "ym") <- yg
} else {
z <- get.var(trm$term[3], object$model)
zg <- if(is.factor(z)) {
unique(z)
} else seq(min(z), max(z), length=ng)
d <- expand.grid(xg, yg, zg)
attr(d, "xm") <- xg
attr(d, "ym") <- yg
attr(d, "zm") <- zg
}
colnames(d) <- varnms
}
}
if(trm$by!="NA"){
d$by <- 1
colnames(d) <- c(head(colnames(d),-1), trm$by)
}
return(d)
}
getP <- function(trm, d){
#return an object similar to what plot.mgcv.smooth etc. return
X <- PredictMat(trm, d)
if(is.pcre){
#sloppy, buit effective: temporarily overwrite offending entries
trm$dim <- 2
trm$term[2] <- paste0(object$pffr$yindname, ".vec")
}
P <- if(trm$dim==1){
list(x=attr(d, "xm"), xlab=trm$term, xlim=safeRange(attr(d, "xm")))
} else {
varnms <- trm$term
if(trm$dim==2){
list(x=attr(d, "xm"), y=attr(d, "ym"), xlab=varnms[1], ylab=varnms[2],
ylim=safeRange(attr(d, "ym")), xlim=safeRange(attr(d, "xm")))
} else {
if(trm$dim==3){
list(x=attr(d, "xm"), y=attr(d, "ym"), z=attr(d, "zm"),
xlab=varnms[1], ylab=varnms[2], zlab=varnms[3],
ylim=safeRange(attr(d, "ym")), xlim=safeRange(attr(d, "xm")),
zlim=safeRange(attr(d, "zm")))
}
}
}
trmind <- trm$first.para:trm$last.para
P$value <- X%*%object$coefficients[trmind]
P$coef <- cbind(d, "value"=P$value)
if(se){
# use seWithMean if possible:
if(seWithMean & attr(trm,"nCons")>0){
cat("using seWithMean for ", trm$label,".\n")
X1 <- matrix(object$cmX,nrow(X),ncol(object$Vp),byrow=TRUE)
meanL1 <- trm$meanL1
if (!is.null(meanL1)) X1 <- X1 / meanL1
X1[,trmind] <- X
P$se <- sqrt(rowSums((X1%*%covmat)*X1))
} else {
P$se <- sqrt(rowSums((X%*%covmat[trmind, trmind])*X))
}
P$coef <- cbind(P$coef, se=P$se)
}
P$dim <- trm$dim
return(P)
}
trm <- object$smooth[[i]]
is.pcre <- "pcre.random.effect" %in% class(trm)
#FIXME: this fails for pcre-terms with >2 FPCs...!
if(trm$dim > 3 && !is.pcre){
warning("can't deal with smooths with more than 3 dimensions, returning NULL for ",
shrtlbls[names(object$smooth)[i] == unlist(object$pffr$labelmap)])
return(NULL)
}
d <- makeDataGrid(trm)
P <- getP(trm, d)
#browser()
# get proper labeling
P$main <- shrtlbls[names(object$smooth)[i] == unlist(object$pffr$labelmap)]
which <- match(names(object$smooth)[i], object$pffr$labelmap)
if(which %in% object$pffr$where$ff){
which.ff <- which(object$pffr$where$ff == which)
P$ylab <- object$pffr$yindname
xlab <- deparse(as.call(formula(paste("~",names(object$pffr$ff)[which.ff]))[[2]])$xind)
if(xlab=="NULL") xlab <- "xindex"
P$xlab <- xlab
}
if(which %in% object$pffr$where$sff){
which.sff <- which(object$pffr$where$sff == which)
P$ylab <- object$pffr$yindname
xlab <- deparse(as.call(formula(paste("~",names(object$pffr$ff)[which.sff]))[[2]])$xind)
if(xlab=="NULL") xlab <- "xindex"
P$xlab <- xlab
P$zlab <- gsub(".mat$", "", object$pffr$ff[[which.sff]]$xname)
}
return(P)
}
bread <- if(freq){
object$Ve
} else {
object$Vp
}
if(sandwich){
X <- predict(object, type = "lpmatrix", reformat=FALSE)
bread <- bread/object$sig2
res <- residuals(object)
if(is.null(Ktt)){
# get estimate of Cov(eps_i(t)) = K(t,t')
# stopifnot(require(Matrix))
Ktt <- Reduce("+", lapply(1:nrow(res), function(i) tcrossprod(res[i,])))/nrow(res)
}
#Chen/Wang, Sec. 2.1: M = X' V^-1 (Y-eta)(Y-eta)' V^-1 X with V ^-1 = diag(sigma^-2)
# since the estimate is under working independence....
meat <- (t(X)%*%kronecker(Diagonal(nrow(res)), Ktt))%*%X / (object$scale^2)
covmat <- as.matrix(bread %*% meat %*% bread)
} else {
covmat <- bread
}
ret <- list()
smind <- unlist(sapply(object$smooth, function(x){
seq(x$first.para, x$last.para)
}))
ret$pterms <- cbind(value=object$coefficients[-smind])
if(se) ret$pterms <- cbind(ret$pterms, se=sqrt(diag(covmat)[-smind]))
shrtlbls <- getShrtlbls(object)
ret$smterms <- lapply(1:length(object$smooth), getCoefs)
names(ret$smterms) <- sapply(seq_along(ret$smterms), function(i){
ret$smterms[[i]]$main
})
return(ret)
}
}
#' Summary for a pffr fit
#'
#' Take a fitted \code{pffr}-object and produce summaries from it.
#' See \code{\link[mgcv]{summary.gam}()} for details.
#'
#' @param object a fitted \code{pffr}-object
#' @param ... see \code{\link[mgcv]{summary.gam}()} for options.
#'
#' @return A list with summary information, see \code{\link[mgcv]{summary.gam}()}
#' @export
#' @method summary pffr
#' @importFrom mgcv summary.gam
#' @author Fabian Scheipl, adapted from \code{\link[mgcv]{summary.gam}()} by Simon Wood, Henric Nilsson
summary.pffr <- function (object, ...) {
call <- match.call()
call[[1]] <- mgcv::summary.gam
## drop "pffr" class and replace <object> with changed value s.t. method dispatch works without glitches
## if we don't do this, summary.gam will call predict on the object if n>3000 & freq==TRUE
## and this predict-call gets dispatched to predict.pffr which dispatches back
## to predict.gam. Somewhere along the way an index variable get's lost and
## shit breaks down.
class(object) <- class(object)[!(class(object) %in% "pffr")]
call$object <- as.name("object")
ret <- eval(call)
ret$formula <- object$pffr$formula
# make short labels for display
shrtlbls <- getShrtlbls(object)
if(!is.null(ret$s.table)){
rownames(ret$s.table) <- sapply(rownames(ret$s.table),
function(x){
shrtlbls[pmatch(x, unlist(object$pffr$labelmap))]
})
}
class(ret) <- c("summary.pffr", class(ret))
if(!object$pffr$sparseOrNongrid) {
ret$n <- paste(ret$n, " (", object$pffr$nobs," x ", object$pffr$nyindex, ")", sep="")
} else {
ret$n <- paste(ret$n, " (in ", object$pffr$nobs," curves)", sep="")
}
return(ret)
}
#' Print method for summary of a pffr fit
#'
#' Pretty printing for a \code{summary.pffr}-object.
#' See \code{\link[mgcv]{print.summary.gam}()} for details.
#'
#' @param x a fitted \code{pffr}-object
#' @param digits controls number of digits printed in output.
#' @param signif.stars Should significance stars be printed alongside output?
#' @param ... not used
#'
#' @return A \code{\link{summary.pffr}} object
#' @method print summary.pffr
#' @importFrom stats printCoefmat
#' @export
#' @author Fabian Scheipl, adapted from \code{\link[mgcv]{print.summary.gam}()} by Simon Wood, Henric Nilsson
print.summary.pffr <- function(x, digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...){
# mostly identical to print.summary.gam
print(x$family)
cat("Formula:\n")
print(x$formula)
if (length(x$p.coeff)>0)
{ cat("\nConstant coefficients:\n")
printCoefmat(x$p.table, digits = digits, signif.stars = signif.stars, na.print = "NA", ...)
}
cat("\n")
if(x$m>0)
{ cat("Smooth terms & functional coefficients:\n")
printCoefmat(x$s.table, digits = digits, signif.stars = signif.stars, has.Pvalue = TRUE, na.print = "NA",cs.ind=1, ...)
}
cat("\nR-sq.(adj) = ",formatC(x$r.sq,digits=3,width=5))
if (length(x$dev.expl)>0) cat(" Deviance explained = ",formatC(x$dev.expl*100,digits=3,width=4),"%\n",sep="")
if (!is.null(x$method)&&!(x$method%in%c("PQL","lme.ML","lme.REML")))
cat(x$method," score = ",formatC(x$sp.criterion,digits=5),sep="")
cat(" Scale est. = ",formatC(x$scale,digits=5,width=8,flag="-")," n = ",x$n,"\n",sep="")
invisible(x)
}
#' QQ plots for pffr model residuals
#'
#' This is simply a wrapper for code{\link[mgcv]{qq.gam}()}.
#'
#' @param object a fitted \code{\link{pffr}}-object
#' @inheritParams mgcv::qq.gam
#' @export
qq.pffr <- function (object, rep = 0, level = 0.9, s.rep = 10, type = c("deviance",
"pearson", "response"), pch = ".", rl.col = 2, rep.col = "gray80",
...) {
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
call <- match.call()
# drop pffr-class so only gam-methods are used on object
class(object) <- class(object)[-1]
call$object <- object
call[[1]] <- mgcv::qq.gam
eval(call)
}
#' Some diagnostics for a fitted pffr model
#'
#' This is simply a wrapper for \code{\link[mgcv]{gam.check}()}.
#'
#' @param b a fitted \code{\link{pffr}}-object
#' @inheritParams mgcv::gam.check
#' @param rep passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @param level passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @param rl.col passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @param rep.col passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @export
pffr.check <- function (b, old.style = FALSE, type = c("deviance", "pearson",
"response"), k.sample = 5000, k.rep = 200, rep = 0, level = 0.9,
rl.col = 2, rep.col = "gray80", ...) {
if (!inherits(b, "pffr"))
stop("`object' is not of class \"pffr\"")
call <- match.call()
# drop pffr-class so only gam-methods are used on b
class(b) <- class(b)[-1]
call$b <- b
call[[1]] <- mgcv::gam.check
eval(call)
}
| /refund/R/pffr-methods.R | no_license | ingted/R-Examples | R | false | false | 37,938 | r | # methods for pffr-objects
#
#
# Author: fabians
# 16.08.2011, 13:01:24
###############################################################################
#' Prediction for penalized function-on-function regression
#'
#' Takes a fitted \code{pffr}-object produced by \code{\link{pffr}()} and produces
#' predictions given a new set of values for the model covariates or the original
#' values used for the model fit. Predictions can be accompanied by standard errors,
#' based on the posterior distribution of the model coefficients. This is a wrapper
#' function for \code{\link[mgcv]{predict.gam}()}.
#'
#' Index variables (i.e., evaluation points) for the functional covariates are reused
#' from the fitted model object and cannot be supplied with \code{newdata}.
#' Prediction is always for the entire index range of the responses as defined
#' in the original fit. If the original fit was performed on sparse or irregular,
#' non-gridded response data supplied via \code{pffr}'s \code{ydata}-argument
#' and no \code{newdata} was supplied, this function will
#' simply return fitted values for the original evaluation points of the response (in list form).
#' If the original fit was performed on sparse or irregular data and \code{newdata} \emph{was}
#' supplied, the function will return predictions on the grid of evaluation points given in
#' \code{object$pffr$yind}.
#'
#' @param object a fitted \code{pffr}-object
#' @param newdata A named list (or a \code{data.frame}) containing the values of the
#' model covariates at which predictions are required.
#' If no \code{newdata} is provided then predictions corresponding to the original data
#' are returned. If \code{newdata} is provided then it must contain all the variables needed
#' for prediction, in the format supplied to \code{pffr}, i.e., functional predictors must be
#' supplied as matrices with each row corresponding to one observed function.
#' See Details for more on index variables and prediction for models fit on
#' irregular or sparse data.
#' @param reformat logical, defaults to TRUE. Should predictions be returned in matrix form (default) or
#' in the long vector shape returned by \code{predict.gam()}?
#' @param type see \code{\link[mgcv]{predict.gam}()} for details.
#' Note that \code{type == "lpmatrix"} will force \code{reformat} to FALSE.
#' @param se.fit see \code{\link[mgcv]{predict.gam}()}
#' @param ... additional arguments passed on to \code{\link[mgcv]{predict.gam}()}
#' @seealso \code{\link[mgcv]{predict.gam}()}
#' @return If \code{type == "lpmatrix"}, the design matrix for the supplied covariate values in long format.
#' If \code{se == TRUE}, a list with entries \code{fit} and \code{se.fit} containing fits and standard errors, respectively.
#' If \code{type == "terms"} or \code{"iterms"} each of these lists is a list of matrices of the same dimension as the response for \code{newdata}
#' containing the linear predictor and its se for each term.
#' @export
#' @method predict pffr
#' @author Fabian Scheipl
#' @importFrom mgcv predict.gam predict.bam
predict.pffr <- function(object,
newdata,
reformat=TRUE,
type = "link",
se.fit = FALSE,
...){
#browser()
call <- match.call()
nyindex <- object$pffr$nyindex
## warn if any entries in ... are not arguments for predict.gam
dots <- list(...)
if(length(dots)){
validDots <- c(names(formals(predict.gam)), "cluster")
# should be
# unique(c(names(formals(predict.gam)),
# names(formals(predict.bam))))
# but predict.bam is not exported.
notUsed <- names(dots)[!(names(dots) %in% validDots)]
if(length(notUsed))
warning("Arguments <", paste(notUsed, collapse=", "), "> supplied but not used." )
}
if(!missing(newdata)){
nobs <- nrow(as.matrix(newdata[[1]]))
# check if the supplied data already has the shape expected by predict.gam
# and dispatch immediately if so (need this so summary works as expected!)
if(!(all(names(newdata) %in% names(object$model))) |
!(paste0(object$pffr$yindname,".vec") %in% names(newdata))){
# check lengths
stopifnot(length(unique(sapply(newdata, function(x)
ifelse(is.matrix(x), nrow(x), length(x))))) ==1)
# #FIXME: better leave this check to predict.gam....
# covnames <- mapply(gsub,
# pattern=c(".[st]mat$"),
# replacement="", x=unique(unlist(sapply(object$smooth, function(x) x$term))))
# covnames <- unique(covnames[covnames != paste(object$pffr$yindname, ".vec", sep="")])
# stopifnot(all(covnames %in% names(newdata)))
#get newdata into the shape expected by predict gam:
gamdata <- list()
#y-index
gamdata[[paste(object$pffr$yindname, ".vec", sep="")]] <- rep(object$pffr$yind, times=nobs)
# which covariates occur in which terms?
varmap <- sapply(names(object$pffr$labelmap), function(x) all.vars(formula(paste("~", x))))
# don't include response
covnames <- unique(names(newdata)[names(newdata)!=deparse(object$formula[[2]])])
for(cov in covnames){
#find the term(s) <cov> is associated with
trms <- which(sapply(varmap, function(x) any(grep(paste("^",cov,"$",sep=""), x))))
if(!is.null(dots$terms)) trms <- trms[names(trms) %in% dots$terms]
if(length(trms)!=0){
for(trm in trms){
is.ff <- trm %in% object$pffr$where$ff
is.sff <- trm %in% object$pffr$where$sff
is.ffpc <- trm %in% object$pffr$where$ffpc
is.pcre <- trm %in% object$pffr$where$pcre
#if ff(X) or sff(X), generate (X.mat), X.tmat, X.smat, L.X ...
if(is.ff){
ff <- object$pffr$ff[[grep(paste(cov,"[,\\)]",sep=""), names(object$pffr$ff))]]
#... but don't generate new data unless <cov> is the functional covariate.
if(grepl(paste(cov,"\\.[st]mat",sep=""), deparse(ff$call$x))){
# make L-matrix for new obs:
L <- ff$L
if(any(apply(L, 2, function(x) length(unique(x)))!=1)){
stop("Error for ", names(varmap)[trm],
"-- Prediction for ff-terms with varying rows in integration operator L not implememented yet.")
}
if(!is.null(ff$limits)){
#TODO implement prediction with limits
stop("Error for ", names(varmap)[trm],
"-- Prediction for ff-terms with <limits> not implememented yet.")
}
predL <- matrix(L[1,], byrow=TRUE, nrow=nrow(newdata[[cov]]), ncol=ncol(L))
gamdata[[paste(cov, ".smat", sep="")]] <-
matrix(ff$xind, byrow=TRUE, ncol=length(ff$xind), nrow=nobs*nyindex)
gamdata[[paste(cov, ".tmat", sep="")]] <-
matrix(rep(object$pffr$yind, times=nobs), ncol=length(ff$xind), nrow=nobs*nyindex)
gamdata[[paste("L.", cov, sep="")]] <-
(predL*newdata[[cov]])[rep(1:nobs, each=nyindex),]
}
}
if(is.sff){
sff <- object$pffr$ff[[grep(paste(cov,"[,\\)]",sep=""), names(object$pffr$ff))]]
#... but don't generate new data unless <cov> is the functional covariate.
if(grepl(paste(cov,"\\.[st]mat",sep=""), deparse(sff$call$x))){
# make L-matrix for new obs:
L <- sff$L
if(any(apply(L, 2, function(x) length(unique(x)))!=1)){
stop("Error for ", names(varmap)[trm],
"-- Prediction for sff-terms with varying rows in integration operator L not implememented yet.")
}
predL <- matrix(L[1,], byrow=TRUE, nrow=nrow(newdata[[cov]]), ncol=ncol(L))
gamdata[[paste(cov, ".mat", sep="")]] <- newdata[[cov]][rep(1:nobs, e=nyindex),]
gamdata[[paste(cov, ".smat", sep="")]] <-
matrix(sff$xind, byrow=TRUE, ncol=length(sff$xind), nrow=nobs*nyindex)
gamdata[[paste(cov, ".tmat", sep="")]] <-
matrix(rep(object$pffr$yind, times=nobs), ncol=length(sff$xind), nrow=nobs*nyindex)
gamdata[[paste("L.", cov, sep="")]] <- predL[rep(1:nobs, e=nyindex),]
}
}
if(is.pcre){
pcre <- object$pffr$pcre[[grep(cov, names(object$pffr$pcre))]]
gamdata[[paste(cov, ".vec", sep="")]] <- rep(newdata[[cov]], each=nyindex)
for(nm in colnames(pcre$efunctions)){
tmp <- approx(x=pcre$yind,
y=pcre$efunctions[, nm],
xout=object$pffr$yind,
method = "linear")$y
gamdata[[nm]] <- tmp[rep(1:nyindex, times=nobs)]
}
}
if(is.ffpc){
ffpc <- object$pffr$ffpc[[grep(paste(cov,"[,\\)]",sep=""),
names(object$pffr$ffpc))]]
# Xc' = Phi xi' + error --> get loadings for new data:
Xct <- t(newdata[[cov]]) - as.vector(ffpc$meanX)
xiMat <- t(qr.coef(qr(ffpc$PCMat), Xct))
colnames(xiMat) <- paste(make.names(cov),".PC", 1:ncol(xiMat), sep="")
xiMat <- xiMat[rep(1:nobs, each=nyindex), ]
for(nm in colnames(xiMat)){
gamdata[[nm]] <- xiMat[,nm]
}
}
if(!(is.ff | is.sff | is.ffpc | is.pcre)) {
#just repeat each entry nyindex-times to correspond to vec(<Response>)
gamdata[[cov]] <- drop(newdata[[cov]])[rep(1:nobs, each=nyindex)]
}
}
}
}
gamdata <- list2df(gamdata)
call[["newdata"]] <- gamdata
}
} else {
call$newdata <- eval(call$newdata)
nobs <- object$pffr$nobs
}
isIrregular <- missing(newdata) & object$pffr$sparseOrNongrid
#call predict.gam
call[[1]] <- if(inherits(object, "bam")){
mgcv::predict.bam
} else mgcv::predict.gam
call$object <- as.name("object")
ret <- eval(call)
if(type=="lpmatrix" && reformat){
reformat <- FALSE
warning("Setting reformat to FALSE for type=\"lpmatrix\".")
}
#reformat into matrices with same shape as <Response>
if(reformat){
if(!isIrregular){
if(missing(newdata) && !is.null(object$pffr$missingind)){
#pad with NAs at the appropriate locations so that fits are nobs x nyindex:
insertNA <- function(x){
if(length(x) != nobs*object$pffr$nyindex){
tmp <- rep(NA, nobs*object$pffr$nyindex)
tmp[-object$pffr$missingind] <- x
return(tmp)
} else {
return(x)
}
}
} else insertNA <- function(x) return(x)
if(se.fit){
if(type %in% c("terms", "iterms")){
ret <- lapply(ret, function(x)
do.call(list,
sapply(1:ncol(x), function(i){
#browser()
d <- list(I(matrix(insertNA(x[,i]), nrow=nobs,
ncol=object$pffr$nyindex,
byrow=TRUE)))
names(d) <- colnames(x)[i]
return(d)
})))
} else {
ret <- lapply(ret, function(x) matrix(insertNA(x), nrow=nobs,
ncol=object$pffr$nyindex, byrow=TRUE))
}
} else {
if(type %in% c("terms", "iterms")){
ret <- do.call(list, sapply(1:ncol(ret), function(i){
#browser()
d <- list(I(matrix(insertNA(ret[,i]), nrow=nobs,
ncol=object$pffr$nyindex, byrow=TRUE)))
names(d) <- colnames(ret)[i]
return(d)
}))
} else ret <- matrix(insertNA(ret), nrow=nobs, ncol=object$pffr$nyindex, byrow=TRUE)
}
} else {
evalpoints <- object$pffr$ydata[,c(".obs", ".index")]
if(se.fit){
if(type %in% c("terms", "iterms")){
ret <- lapply(ret, function(x)
do.call(list,
sapply(1:ncol(x), function(i){
#browser()
d <- list(cbind(evalpoints, .value=x[,i]))
names(d) <- colnames(x)[i]
return(d)
})))
} else {
ret <- lapply(ret, function(x) cbind(evalpoints, .value=x))
}
} else {
if(type %in% c("terms", "iterms")){
ret <- do.call(list, sapply(1:ncol(ret), function(i){
#browser()
d <- list(cbind(evalpoints, .value=ret[,i]))
names(d) <- colnames(ret)[i]
return(d)
}))
} else ret <- cbind(evalpoints, .value=ret)
}
}
}
return(ret)
}
#' Obtain model matrix for a pffr fit
#'
#' @param object a fitted \code{pffr}-object
#' @param ... other arguments, passed to \code{\link[mgcv]{predict.gam}}.
#'
#' @return A model matrix
#' @method model.matrix pffr
#' @author Fabian Scheipl
model.matrix.pffr <- function (object, ...)
{
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
predict(object, type = "lpmatrix", reformat=FALSE, ...)
}
#' Obtain residuals and fitted values for a pffr models
#'
#' See \code{\link{predict.pffr}} for alternative options to extract estimated
#' values from a \code{pffr} object.
#' "Fitted values" here refers to the estimated additive predictor values,
#' these will not be on the scale of the response for models with link functions.
#'
#' @param object a fitted \code{pffr}-object
#' @param reformat logical, defaults to TRUE. Should residuals be returned in
#' \code{n x yindex} matrix form (regular grid data) or, respectively, in the
#' shape of the originally supplied \code{ydata} argument (sparse/irregular
#' data), or, if \code{FALSE}, simply as a long vector as returned by
#' \code{resid.gam()}?
#' @param ... other arguments, passed to \code{\link[mgcv]{residuals.gam}}.
#'
#' @return A matrix or \code{ydata}-like \code{data.frame} or a vector of
#' residuals / fitted values (see \code{reformat}-argument)
#' @export
#' @importFrom mgcv residuals.gam
#' @method residuals pffr
#' @aliases fitted.pffr
#' @author Fabian Scheipl
residuals.pffr <- function (object, reformat=TRUE, ...)
{
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
ret <- mgcv::residuals.gam(object, ...)
if(reformat){
if(!object$pffr$sparseOrNongrid){
if(!(length(ret)==object$pffr$nobs*object$pffr$nyindex)){
tmp <- rep(NA, object$pffr$nobs*object$pffr$nyindex)
tmp[-object$pffr$missingind] <- ret
ret <- tmp
}
ret <- matrix(ret, nrow=object$pffr$nobs, ncol=object$pffr$nyindex, byrow=TRUE)
} else {
tmp <- object$pffr$ydata
tmp[,".value"] <- ret
ret <- tmp
}
}
return(ret)
}
#' @method fitted pffr
#' @export
#' @rdname residuals.pffr
fitted.pffr <- function (object, reformat=TRUE, ...)
{
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
ret <- object$fitted.values
if(reformat){
if(!object$pffr$sparseOrNongrid){
if(!(length(ret)==object$pffr$nobs*object$pffr$nyindex)){
tmp <- rep(NA, object$pffr$nobs*object$pffr$nyindex)
tmp[-object$pffr$missingind] <- ret
ret <- tmp
}
ret <- matrix(ret, nrow=object$pffr$nobs, ncol=object$pffr$nyindex, byrow=TRUE)
} else {
tmp <- object$pffr$ydata
tmp[,".value"] <- ret
ret <- tmp
}
}
return(ret)
}
#' Plot a pffr fit
#'
#' Plot a fitted pffr-object. Simply dispatches to \code{\link[mgcv]{plot.gam}}.
#'
#' @param x a fitted \code{pffr}-object
#' @param ... arguments handed over to \code{\link[mgcv]{plot.gam}}
#'
#' @return This function only generates plots.
#' @method plot pffr
#' @importFrom mgcv plot.gam
#' @author Fabian Scheipl
plot.pffr <- function (x, ...)
{
call <- match.call()
call[[1]] <- mgcv::plot.gam
#drop "pffr" class and replace <object> with changed value s.t. method dispatch works without glitches
class(x) <- class(x)[-1]
invisible(eval(call))
}
#' Get estimated coefficients from a pffr fit
#'
#' Returns estimated coefficient functions/surfaces \eqn{\beta(t), \beta(s,t)}
#' and estimated smooth effects \eqn{f(z), f(x,z)} or \eqn{f(x, z, t)} and their point-wise estimated standard errors.
#' Not implemented for smooths in more than 3 dimensions.
#'
#' The \code{seWithMean}-option corresponds to the \code{"iterms"}-option in \code{\link[mgcv]{predict.gam}}.
#' The \code{sandwich}-options works as follows: Assuming that the residual vectors \eqn{\epsilon_i(t), i=1,\dots,n} are i.i.d.
#' realizations of a mean zero Gaussian process with covariance \eqn{K(t,t')}, we can construct an estimator for
#' \eqn{K(t,t')} from the \eqn{n} replicates of the observed residual vectors. The covariance matrix of the stacked observations
#' vec\eqn{(Y_i(t))} is then given by a block-diagonal matrix with \eqn{n} copies of the estimated \eqn{K(t,t')} on the diagonal.
#' This block-diagonal matrix is used to construct the "meat" of a sandwich covariance estimator, similar to Chen et al. (2012),
#' see reference below.
#'
#'
#' @param object a fitted \code{pffr}-object
#' @param raw logical, defaults to FALSE. If TRUE, the function simply returns \code{object$coefficients}
#' @param se logical, defaults to TRUE. Return estimated standard error of the estimates?
#' @param freq logical, defaults to FALSE. If FALSE, use posterior variance \code{object$Vp} for variability estimates,
#' else use \code{object$Ve}. See \code{\link[mgcv]{gamObject}}
#' @param sandwich logical, defaults to FALSE. Use a Sandwich-estimator for approximate variances? See Details.
#' THIS IS AN EXPERIMENTAL FEATURE, USE A YOUR OWN RISK.
#' @param seWithMean logical, defaults to TRUE. Include uncertainty about the intercept/overall mean in standard errors returned for smooth components?
#' @param n1 see below
#' @param n2 see below
#' @param n3 \code{n1, n2, n3} give the number of gridpoints for 1-/2-/3-dimensional smooth terms
#' used in the marginal equidistant grids over the range of the covariates at which the estimated effects are evaluated.
#' @param Ktt (optional) an estimate of the covariance operator of the residual process \eqn{\epsilon_i(t) \sim N(0, K(t,t'))},
#' evaluated on \code{yind} of \code{object}. If not supplied, this is estimated from the crossproduct matrices of the
#' observed residual vectors. Only relevant for sandwich CIs.
#' @param ... other arguments, not used.
#'
#' @return If \code{raw==FALSE}, a list containing \itemize{
#' \item \code{pterms} a matrix containing the parametric / non-functional coefficients (and, optionally, their se's)
#' \item \code{smterms} a named list with one entry for each smooth term in the model. Each entry contains
#' \itemize{
#' \item \code{coef} a matrix giving the grid values over the covariates, the estimated effect (and, optionally, the se's).
#' The first covariate varies the fastest.
#' \item \code{x, y, z} the unique gridpoints used to evaluate the smooth/coefficient function/coefficient surface
#' \item \code{xlim, ylim, zlim} the extent of the x/y/z-axes
#' \item \code{xlab, ylab, zlab} the names of the covariates for the x/y/z-axes
#' \item \code{dim} the dimensionality of the effect
#' \item \code{main} the label of the smooth term (a short label, same as the one used in \code{summary.pffr})
#' }}
#' @references Chen, H., Wang, Y., Paik, M.C., and Choi, A. (2013).
#' A marginal approach to reduced-rank penalized spline smoothing with application to multilevel functional data.
#' \emph{Journal of the American Statistical Association}, 101, 1216--1229.
#' @method coef pffr
#' @export
#' @importFrom mgcv PredictMat get.var
#' @importFrom Matrix Diagonal kronecker t
#' @seealso \code{\link[mgcv]{plot.gam}}, \code{\link[mgcv]{predict.gam}} which this routine is
#' based on.
#' @author Fabian Scheipl
coef.pffr <- function(object, raw=FALSE, se=TRUE, freq=FALSE, sandwich=FALSE,
seWithMean=TRUE, n1=100, n2=40, n3=20, Ktt=NULL, ...){
if(raw){
return(object$coefficients)
} else {
getCoefs <- function(i){
## this constructs a grid over the range of the covariates
## and returns estimated values on this grid, with
## by-variables set to 1
## cf. mgcv:::plots.R (plot.mgcv.smooth etc..) for original code
safeRange <- function(x){
if(is.factor(x)) return(c(NA, NA))
return(range(x, na.rm=TRUE))
}
makeDataGrid <- function(trm){
#generate grid of values in range of original data
x <- get.var(trm$term[1], object$model)
if(trm$dim==1) {
xg <- if(is.factor(x)) {
unique(x)
} else seq(min(x), max(x), length=n1)
d <- data.frame(xg)
colnames(d) <- trm$term
attr(d, "xm") <- xg
}
if(is.pcre) {
ng <- n2
xg <- if(is.factor(x)) {
unique(x)
} else seq(min(x), max(x),length=ng)
which.pcre <- which(sapply(object$pffr$pcreterms, `[[`, "idname")
== trm$term[1])
pcreterm <- object$pffr$pcreterms[[which.pcre]]
yg <- seq(min(pcreterm$yind), max(pcreterm$yind), l=ng)
# interpolate given eigenfunctions to grid values:
efcts.grid <- sapply(colnames(pcreterm$efunctions),
function(nm){
approx(x=pcreterm$yind,
y=pcreterm$efunctions[, nm],
xout=yg,
method = "linear")$y
})
efcts.grid <- data.frame(efcts.grid[rep(1:ng, e=length(xg)),])
colnames(efcts.grid) <- colnames(pcreterm$efunctions)
d <- cbind(expand.grid(xg, yg),
efcts.grid)
colnames(d)[1:2] <- c(trm$term[1],
paste0(object$pffr$yindname, ".vec"))
attr(d, "xm") <- xg
attr(d, "ym") <- yg
} else {
if(trm$dim > 1) {
ng <- ifelse(trm$dim==2, n2, n3)
varnms <- trm$term
x <- get.var(trm$term[1], object$model)
xg <- if(is.factor(x)) {
unique(x)
} else seq(min(x), max(x),length=ng)
y <- get.var(trm$term[2], object$model)
yg <- if(is.factor(y)) {
unique(y)
} else seq(min(y), max(y),length=ng)
if(length(varnms)==2){
d <- expand.grid(xg, yg)
attr(d, "xm") <- xg
attr(d, "ym") <- yg
} else {
z <- get.var(trm$term[3], object$model)
zg <- if(is.factor(z)) {
unique(z)
} else seq(min(z), max(z), length=ng)
d <- expand.grid(xg, yg, zg)
attr(d, "xm") <- xg
attr(d, "ym") <- yg
attr(d, "zm") <- zg
}
colnames(d) <- varnms
}
}
if(trm$by!="NA"){
d$by <- 1
colnames(d) <- c(head(colnames(d),-1), trm$by)
}
return(d)
}
getP <- function(trm, d){
#return an object similar to what plot.mgcv.smooth etc. return
X <- PredictMat(trm, d)
if(is.pcre){
#sloppy, buit effective: temporarily overwrite offending entries
trm$dim <- 2
trm$term[2] <- paste0(object$pffr$yindname, ".vec")
}
P <- if(trm$dim==1){
list(x=attr(d, "xm"), xlab=trm$term, xlim=safeRange(attr(d, "xm")))
} else {
varnms <- trm$term
if(trm$dim==2){
list(x=attr(d, "xm"), y=attr(d, "ym"), xlab=varnms[1], ylab=varnms[2],
ylim=safeRange(attr(d, "ym")), xlim=safeRange(attr(d, "xm")))
} else {
if(trm$dim==3){
list(x=attr(d, "xm"), y=attr(d, "ym"), z=attr(d, "zm"),
xlab=varnms[1], ylab=varnms[2], zlab=varnms[3],
ylim=safeRange(attr(d, "ym")), xlim=safeRange(attr(d, "xm")),
zlim=safeRange(attr(d, "zm")))
}
}
}
trmind <- trm$first.para:trm$last.para
P$value <- X%*%object$coefficients[trmind]
P$coef <- cbind(d, "value"=P$value)
if(se){
# use seWithMean if possible:
if(seWithMean & attr(trm,"nCons")>0){
cat("using seWithMean for ", trm$label,".\n")
X1 <- matrix(object$cmX,nrow(X),ncol(object$Vp),byrow=TRUE)
meanL1 <- trm$meanL1
if (!is.null(meanL1)) X1 <- X1 / meanL1
X1[,trmind] <- X
P$se <- sqrt(rowSums((X1%*%covmat)*X1))
} else {
P$se <- sqrt(rowSums((X%*%covmat[trmind, trmind])*X))
}
P$coef <- cbind(P$coef, se=P$se)
}
P$dim <- trm$dim
return(P)
}
trm <- object$smooth[[i]]
is.pcre <- "pcre.random.effect" %in% class(trm)
#FIXME: this fails for pcre-terms with >2 FPCs...!
if(trm$dim > 3 && !is.pcre){
warning("can't deal with smooths with more than 3 dimensions, returning NULL for ",
shrtlbls[names(object$smooth)[i] == unlist(object$pffr$labelmap)])
return(NULL)
}
d <- makeDataGrid(trm)
P <- getP(trm, d)
#browser()
# get proper labeling
P$main <- shrtlbls[names(object$smooth)[i] == unlist(object$pffr$labelmap)]
which <- match(names(object$smooth)[i], object$pffr$labelmap)
if(which %in% object$pffr$where$ff){
which.ff <- which(object$pffr$where$ff == which)
P$ylab <- object$pffr$yindname
xlab <- deparse(as.call(formula(paste("~",names(object$pffr$ff)[which.ff]))[[2]])$xind)
if(xlab=="NULL") xlab <- "xindex"
P$xlab <- xlab
}
if(which %in% object$pffr$where$sff){
which.sff <- which(object$pffr$where$sff == which)
P$ylab <- object$pffr$yindname
xlab <- deparse(as.call(formula(paste("~",names(object$pffr$ff)[which.sff]))[[2]])$xind)
if(xlab=="NULL") xlab <- "xindex"
P$xlab <- xlab
P$zlab <- gsub(".mat$", "", object$pffr$ff[[which.sff]]$xname)
}
return(P)
}
bread <- if(freq){
object$Ve
} else {
object$Vp
}
if(sandwich){
X <- predict(object, type = "lpmatrix", reformat=FALSE)
bread <- bread/object$sig2
res <- residuals(object)
if(is.null(Ktt)){
# get estimate of Cov(eps_i(t)) = K(t,t')
# stopifnot(require(Matrix))
Ktt <- Reduce("+", lapply(1:nrow(res), function(i) tcrossprod(res[i,])))/nrow(res)
}
#Chen/Wang, Sec. 2.1: M = X' V^-1 (Y-eta)(Y-eta)' V^-1 X with V ^-1 = diag(sigma^-2)
# since the estimate is under working independence....
meat <- (t(X)%*%kronecker(Diagonal(nrow(res)), Ktt))%*%X / (object$scale^2)
covmat <- as.matrix(bread %*% meat %*% bread)
} else {
covmat <- bread
}
ret <- list()
smind <- unlist(sapply(object$smooth, function(x){
seq(x$first.para, x$last.para)
}))
ret$pterms <- cbind(value=object$coefficients[-smind])
if(se) ret$pterms <- cbind(ret$pterms, se=sqrt(diag(covmat)[-smind]))
shrtlbls <- getShrtlbls(object)
ret$smterms <- lapply(1:length(object$smooth), getCoefs)
names(ret$smterms) <- sapply(seq_along(ret$smterms), function(i){
ret$smterms[[i]]$main
})
return(ret)
}
}
#' Summary for a pffr fit
#'
#' Take a fitted \code{pffr}-object and produce summaries from it.
#' See \code{\link[mgcv]{summary.gam}()} for details.
#'
#' @param object a fitted \code{pffr}-object
#' @param ... see \code{\link[mgcv]{summary.gam}()} for options.
#'
#' @return A list with summary information, see \code{\link[mgcv]{summary.gam}()}
#' @export
#' @method summary pffr
#' @importFrom mgcv summary.gam
#' @author Fabian Scheipl, adapted from \code{\link[mgcv]{summary.gam}()} by Simon Wood, Henric Nilsson
summary.pffr <- function (object, ...) {
call <- match.call()
call[[1]] <- mgcv::summary.gam
## drop "pffr" class and replace <object> with changed value s.t. method dispatch works without glitches
## if we don't do this, summary.gam will call predict on the object if n>3000 & freq==TRUE
## and this predict-call gets dispatched to predict.pffr which dispatches back
## to predict.gam. Somewhere along the way an index variable get's lost and
## shit breaks down.
class(object) <- class(object)[!(class(object) %in% "pffr")]
call$object <- as.name("object")
ret <- eval(call)
ret$formula <- object$pffr$formula
# make short labels for display
shrtlbls <- getShrtlbls(object)
if(!is.null(ret$s.table)){
rownames(ret$s.table) <- sapply(rownames(ret$s.table),
function(x){
shrtlbls[pmatch(x, unlist(object$pffr$labelmap))]
})
}
class(ret) <- c("summary.pffr", class(ret))
if(!object$pffr$sparseOrNongrid) {
ret$n <- paste(ret$n, " (", object$pffr$nobs," x ", object$pffr$nyindex, ")", sep="")
} else {
ret$n <- paste(ret$n, " (in ", object$pffr$nobs," curves)", sep="")
}
return(ret)
}
#' Print method for summary of a pffr fit
#'
#' Pretty printing for a \code{summary.pffr}-object.
#' See \code{\link[mgcv]{print.summary.gam}()} for details.
#'
#' @param x a fitted \code{pffr}-object
#' @param digits controls number of digits printed in output.
#' @param signif.stars Should significance stars be printed alongside output?
#' @param ... not used
#'
#' @return A \code{\link{summary.pffr}} object
#' @method print summary.pffr
#' @importFrom stats printCoefmat
#' @export
#' @author Fabian Scheipl, adapted from \code{\link[mgcv]{print.summary.gam}()} by Simon Wood, Henric Nilsson
print.summary.pffr <- function(x, digits = max(3, getOption("digits") - 3),
signif.stars = getOption("show.signif.stars"), ...){
# mostly identical to print.summary.gam
print(x$family)
cat("Formula:\n")
print(x$formula)
if (length(x$p.coeff)>0)
{ cat("\nConstant coefficients:\n")
printCoefmat(x$p.table, digits = digits, signif.stars = signif.stars, na.print = "NA", ...)
}
cat("\n")
if(x$m>0)
{ cat("Smooth terms & functional coefficients:\n")
printCoefmat(x$s.table, digits = digits, signif.stars = signif.stars, has.Pvalue = TRUE, na.print = "NA",cs.ind=1, ...)
}
cat("\nR-sq.(adj) = ",formatC(x$r.sq,digits=3,width=5))
if (length(x$dev.expl)>0) cat(" Deviance explained = ",formatC(x$dev.expl*100,digits=3,width=4),"%\n",sep="")
if (!is.null(x$method)&&!(x$method%in%c("PQL","lme.ML","lme.REML")))
cat(x$method," score = ",formatC(x$sp.criterion,digits=5),sep="")
cat(" Scale est. = ",formatC(x$scale,digits=5,width=8,flag="-")," n = ",x$n,"\n",sep="")
invisible(x)
}
#' QQ plots for pffr model residuals
#'
#' This is simply a wrapper for code{\link[mgcv]{qq.gam}()}.
#'
#' @param object a fitted \code{\link{pffr}}-object
#' @inheritParams mgcv::qq.gam
#' @export
qq.pffr <- function (object, rep = 0, level = 0.9, s.rep = 10, type = c("deviance",
"pearson", "response"), pch = ".", rl.col = 2, rep.col = "gray80",
...) {
if (!inherits(object, "pffr"))
stop("`object' is not of class \"pffr\"")
call <- match.call()
# drop pffr-class so only gam-methods are used on object
class(object) <- class(object)[-1]
call$object <- object
call[[1]] <- mgcv::qq.gam
eval(call)
}
#' Some diagnostics for a fitted pffr model
#'
#' This is simply a wrapper for \code{\link[mgcv]{gam.check}()}.
#'
#' @param b a fitted \code{\link{pffr}}-object
#' @inheritParams mgcv::gam.check
#' @param rep passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @param level passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @param rl.col passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @param rep.col passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.
#' @export
pffr.check <- function (b, old.style = FALSE, type = c("deviance", "pearson",
"response"), k.sample = 5000, k.rep = 200, rep = 0, level = 0.9,
rl.col = 2, rep.col = "gray80", ...) {
if (!inherits(b, "pffr"))
stop("`object' is not of class \"pffr\"")
call <- match.call()
# drop pffr-class so only gam-methods are used on b
class(b) <- class(b)[-1]
call$b <- b
call[[1]] <- mgcv::gam.check
eval(call)
}
|
#' GPCC data downloader
#'
#' Function for downloading GPCC v2020.
#'
#' @importFrom utils download.file
#' @param folder_path a character string with the path where the data will be downloaded.
#' @return No return value, called to download the data set.
#' @keywords internal
download_gpcc <- function(folder_path = "."){
old_options <- options()
options(timeout = 6000)
on.exit(options(old_options))
zenodo_base <- "https://zenodo.org/record/7094293/files/"
zenodo_end <- "?download=1"
file_name <- "gpcc_tp_mm_land_189101_201912_025_monthly.nc"
file_url <- paste0(zenodo_base, file_name, zenodo_end)
file_destination <- paste(folder_path, file_name, sep = "/")
download.file(file_url, file_destination, mode = "wb")
} | /R/download_gpcc.R | no_license | imarkonis/pRecipe | R | false | false | 740 | r | #' GPCC data downloader
#'
#' Function for downloading GPCC v2020.
#'
#' @importFrom utils download.file
#' @param folder_path a character string with the path where the data will be downloaded.
#' @return No return value, called to download the data set.
#' @keywords internal
download_gpcc <- function(folder_path = "."){
old_options <- options()
options(timeout = 6000)
on.exit(options(old_options))
zenodo_base <- "https://zenodo.org/record/7094293/files/"
zenodo_end <- "?download=1"
file_name <- "gpcc_tp_mm_land_189101_201912_025_monthly.nc"
file_url <- paste0(zenodo_base, file_name, zenodo_end)
file_destination <- paste(folder_path, file_name, sep = "/")
download.file(file_url, file_destination, mode = "wb")
} |
# TOOL mothur-combine-results.R: mothur-combine-results (Combine results from two or more sequence processing runs.)
# INPUT file.txt: "Groups and taxonomy files from mothur runs" TYPE GENERIC
# INPUT META phenodata.tsv: "Phenodata files for the mothur runs" TYPE GENERIC
# OUTPUT all.tax: all.tax
# OUTPUT all.grp: all.grp
# OUTPUT META phenodata-merged.tsv: phenodata.tsv
# JTT 2012-11-05
# Check the file types
files <- dir()
files <- files[-c(grep("phenodata", files))]
types <- rep(NA, length(files))
for (i in 1:length(files)) {
d <- readLines(files[i], n = 1)
if (length(grep("Bacteria", d)) == 0) {
types[i] <- "tax"
} else {
types[i] <- "grp"
}
}
# Append data files
for (i in 1:length(files)) {
d <- readLines(files[i])
if (types[i] == "tax") {
write(d, "all.tax", append = TRUE)
}
if (types[i] == "grp") {
write(d, "all.grp", append = TRUE)
}
}
# Combine phenodata tables
files <- dir(pattern = "phenodata")
p <- list()
for (i in 1:length(files)) {
p[[i]] <- read.table(files[i], header = T, sep = "\t")[-c(1, 2), ]
}
pdata <- NULL
for (i in 1:length(p)) {
pdata <- rbind(pdata, p[[i]])
}
write.table(pdata, file = "phenodata-merged.tsv", quote = FALSE, sep = "\t", na = "", row.names = FALSE, col.names = TRUE)
| /tools/ngs/attick/mothur-combine-results.R | permissive | chipster/chipster-tools | R | false | false | 1,305 | r | # TOOL mothur-combine-results.R: mothur-combine-results (Combine results from two or more sequence processing runs.)
# INPUT file.txt: "Groups and taxonomy files from mothur runs" TYPE GENERIC
# INPUT META phenodata.tsv: "Phenodata files for the mothur runs" TYPE GENERIC
# OUTPUT all.tax: all.tax
# OUTPUT all.grp: all.grp
# OUTPUT META phenodata-merged.tsv: phenodata.tsv
# JTT 2012-11-05
# Check the file types
files <- dir()
files <- files[-c(grep("phenodata", files))]
types <- rep(NA, length(files))
for (i in 1:length(files)) {
d <- readLines(files[i], n = 1)
if (length(grep("Bacteria", d)) == 0) {
types[i] <- "tax"
} else {
types[i] <- "grp"
}
}
# Append data files
for (i in 1:length(files)) {
d <- readLines(files[i])
if (types[i] == "tax") {
write(d, "all.tax", append = TRUE)
}
if (types[i] == "grp") {
write(d, "all.grp", append = TRUE)
}
}
# Combine phenodata tables
files <- dir(pattern = "phenodata")
p <- list()
for (i in 1:length(files)) {
p[[i]] <- read.table(files[i], header = T, sep = "\t")[-c(1, 2), ]
}
pdata <- NULL
for (i in 1:length(p)) {
pdata <- rbind(pdata, p[[i]])
}
write.table(pdata, file = "phenodata-merged.tsv", quote = FALSE, sep = "\t", na = "", row.names = FALSE, col.names = TRUE)
|
fact <- function(n) {
if (n <= 1) 1
else n * Recall(n - 1)
}
| /Programming Language Detection/Experiment-2/Dataset/Train/R/factorial-1.r | no_license | dlaststark/machine-learning-projects | R | false | false | 65 | r | fact <- function(n) {
if (n <= 1) 1
else n * Recall(n - 1)
}
|
#Install and load these packages
require(tidyverse)
require(fastDummies)
require(stargazer)
# Load in data
df <- read.csv("../../data/data.csv")
#Change date format with lubridate
df$date <- as.Date.character(df$date, format = c("%Y-%m-%d"))
#Make the trend variable for months
#There seems to already be a trend variable for month but I made a new one anyway
df$trend <- format(df$date, "%m")
#make dummy variables for neighbourhood
df <- dummy_cols(df, select_columns = "neighbourhood")
#Make new dummy variable for dates of March 2020 and later (COVID period)
df$COVID <- df$date>"2020-02-01"
#Convert the TRUE and FALSE to 1 and 0
df$COVID <- df$COVID *1
#Regression analysis with number of reviews as the dependent variable and month, neighbourhoods, and COVID as the indepent variables
regression <- lm(num_reviews ~ . -X -year -month -date, data = df)
summary(regression)
#Create directories gen and analysis
dir.create("../../gen")
dir.create("../../gen/analysis")
#Export regression results using stargazer package
stargazer(regression,
title = "Results review regressions analysis",
out = "../../gen/analysis/regression.html")
| /src/analysis/regression.R | no_license | Ericvolten1/Airbnb_Amsterdam | R | false | false | 1,167 | r | #Install and load these packages
require(tidyverse)
require(fastDummies)
require(stargazer)
# Load in data
df <- read.csv("../../data/data.csv")
#Change date format with lubridate
df$date <- as.Date.character(df$date, format = c("%Y-%m-%d"))
#Make the trend variable for months
#There seems to already be a trend variable for month but I made a new one anyway
df$trend <- format(df$date, "%m")
#make dummy variables for neighbourhood
df <- dummy_cols(df, select_columns = "neighbourhood")
#Make new dummy variable for dates of March 2020 and later (COVID period)
df$COVID <- df$date>"2020-02-01"
#Convert the TRUE and FALSE to 1 and 0
df$COVID <- df$COVID *1
#Regression analysis with number of reviews as the dependent variable and month, neighbourhoods, and COVID as the indepent variables
regression <- lm(num_reviews ~ . -X -year -month -date, data = df)
summary(regression)
#Create directories gen and analysis
dir.create("../../gen")
dir.create("../../gen/analysis")
#Export regression results using stargazer package
stargazer(regression,
title = "Results review regressions analysis",
out = "../../gen/analysis/regression.html")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rDNA.R
\name{dna_removeDocument}
\alias{dna_removeDocument}
\title{Removes a document from the database}
\usage{
dna_removeDocument(connection, id, removeStatements = FALSE,
simulate = TRUE, verbose = TRUE)
}
\arguments{
\item{connection}{A \code{dna_connection} object created by the
\link{dna_connection} function.}
\item{id}{An integer value denoting the ID of the document to be removed. The
\link{dna_getDocuments} function can be used to look up IDs.}
\item{removeStatements}{The document given by \code{id} may contain
statements. If \code{removeStatements = TRUE} is set, these statements
are removed along with the respective document. If
\code{removeStatements = FALSE} is set, the statements are not deleted,
the document is kept as well, and a message is printed.}
\item{simulate}{Should the changes only be simulated instead of actually
applied to the DNA connection and the SQL database? This can help to
plan more complex recode operations.}
\item{verbose}{Print details on whether the document could be removed?}
}
\description{
Removes a document from the database based on its ID.
}
\details{
The user provides a connection object and the ID of an existing document in
the DNA database, and this document is removed both from memory and from the
SQL database, possibly including any statements contained in the document.
}
\author{
Philip Leifeld
}
| /rDNA/man/dna_removeDocument.Rd | no_license | RobertAWest/dna | R | false | true | 1,451 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rDNA.R
\name{dna_removeDocument}
\alias{dna_removeDocument}
\title{Removes a document from the database}
\usage{
dna_removeDocument(connection, id, removeStatements = FALSE,
simulate = TRUE, verbose = TRUE)
}
\arguments{
\item{connection}{A \code{dna_connection} object created by the
\link{dna_connection} function.}
\item{id}{An integer value denoting the ID of the document to be removed. The
\link{dna_getDocuments} function can be used to look up IDs.}
\item{removeStatements}{The document given by \code{id} may contain
statements. If \code{removeStatements = TRUE} is set, these statements
are removed along with the respective document. If
\code{removeStatements = FALSE} is set, the statements are not deleted,
the document is kept as well, and a message is printed.}
\item{simulate}{Should the changes only be simulated instead of actually
applied to the DNA connection and the SQL database? This can help to
plan more complex recode operations.}
\item{verbose}{Print details on whether the document could be removed?}
}
\description{
Removes a document from the database based on its ID.
}
\details{
The user provides a connection object and the ID of an existing document in
the DNA database, and this document is removed both from memory and from the
SQL database, possibly including any statements contained in the document.
}
\author{
Philip Leifeld
}
|
#Load libraries
library(rstan)
library(shinystan)
#Set cores and browser
options(mc.cores = 4)
options(browser = "chromium")
## Read climate data
all.climate <- read.csv("project_winequality/output/climate_allregions.csv", header = TRUE, stringsAsFactors = FALSE)
## Obtain vintage scores
score1 <- read.csv("project_winequality/data/NapaSonomaNC_Vintage.csv", header = TRUE)
score1 <- score1[, -c(8, 9)]
score1[which(score1$Location == "North Coast"), "Location"] <- c("North_Coast")
score2 <- read.csv("project_winequality/data/OR_Vintage.csv", header = TRUE)
score2 <- score2[, -c(8, 9)]
score2[which(score2$Location == "Oregon"), "Location"] <- c("Williamette")
score3 <- read.csv("project_winequality/data/WA_Vintage.csv", header = TRUE)
score3 <- score3[, -c(8, 9)]
score3[which(score3$Location == "Washington"), "Location"] <- c("Columbia")
all.score <- rbind(score1, score2, score3)
## Fix vintage label
all.score[which(all.score$Variety == "Cabernet "), "Variety"] <- c("Cabernet")
## Fix rank (according to latest WS download)
all.score[which(all.score$Variety == "Cabernet" & all.score$Location == "Napa" & all.score$Vintage == 2017), "R1_WS"] <- c(92)
all.score[which(all.score$Variety == "Zinfandel" & all.score$Location == "Napa" & all.score$Vintage == 2018), "R1_WS"] <- c(92)
all.score[which(all.score$Variety == "Zinfandel" & all.score$Location == "Sonoma" & all.score$Vintage == 2018), "R1_WS"] <- c(94)
all.score[which(all.score$Variety == "RhΓ΄ne-Style Reds" & all.score$Location == "Napa" & all.score$Vintage == 2017), "R1_WS"] <- c(90)
all.score[which(all.score$Variety == "RhΓ΄ne-Style Reds" & all.score$Location == "Sonoma" & all.score$Vintage == 2017), "R1_WS"] <- c(89)
## Fix averaging of ranks
all.score$Avg_Rank <- apply(all.score[, c("R1_WS", "R2_WE", "R3_WA")], MARGIN = 1, FUN = function(X) mean(X, na.rm = TRUE))
## Remove NAs
all.score <- subset(all.score, Avg_Rank > 0 & Vintage > 1979)
aggregate(Avg_Rank ~ Location * Variety, data = all.score, FUN = length)
## Add climate data to scores
all.score$GDD_Season <- NA
all.score$Prcp_Season <- NA
for(i in 1:nrow(all.score)){
temp <- subset(all.climate, Year == all.score$Vintage[i] & Location == all.score$Location[i])
all.score$GDD_Season[i] <- temp$GDD_Season
all.score$Prcp_Season[i] <- temp$Prcp_Season
}
## Create location and variety indices
### Locations
locs <- unique(all.score$Location)
locs <- locs[order(locs)]
locs.number <- as.numeric(as.factor(locs))
### Varieties
varieties <- unique(all.score$Variety)
varieties <- varieties[order(varieties)]
varieties.number <- as.numeric(as.factor(varieties))
## Organize data for model fitting
data.stan <- list(N = nrow(all.score),
avg_rank = all.score$Avg_Rank,
n_location = length(locs),
location = as.numeric(as.factor(all.score$Location)),
n_variety = length(varieties),
variety = as.numeric(as.factor(all.score$Variety)),
precip = all.score$Prcp_Season,
gdd = all.score$GDD_Season)
## Fit Stan model
fit1 <- stan("project_winequality/stan/seasonal.stan",
data = data.stan,
iter = 2000,
warmup = 1000,
chains = 4)
## Rename locations and varieties (recommend replacing direct indexing with grep)
names(fit1)[520:524] <- locs
names(fit1)[525:531] <- varieties
## View diagnostics
launch_shinystan(fit1)
## Summarize posterior samples
summary(fit1, pars = c("base_rank", "a_location", "sigma_location", "a_variety", "sigma_variety", "sigma_rank", "b_precip", "b_gdd"))$summary[, "mean"]
## Save estimates
saveRDS(object = fit1, file = "project_winequality/output/posterior_seasonal.RDS")
## Make plots
pdf(file = "project_winequality/output/Results_seasonal.pdf", onefile = TRUE)
plot(fit1, pars = c("rank_location"))
plot(fit1, pars = c("rank_variety"))
plot(fit1, pars = c("b_gdd", "b_precip"))
plot(fit1, pars = c("sigma_location", "sigma_variety", "sigma_rank"))
dev.off()
| /project_winequality/fit_seasonal.R | no_license | lizzieinvancouver/vintages | R | false | false | 4,033 | r |
#Load libraries
library(rstan)
library(shinystan)
#Set cores and browser
options(mc.cores = 4)
options(browser = "chromium")
## Read climate data
all.climate <- read.csv("project_winequality/output/climate_allregions.csv", header = TRUE, stringsAsFactors = FALSE)
## Obtain vintage scores
score1 <- read.csv("project_winequality/data/NapaSonomaNC_Vintage.csv", header = TRUE)
score1 <- score1[, -c(8, 9)]
score1[which(score1$Location == "North Coast"), "Location"] <- c("North_Coast")
score2 <- read.csv("project_winequality/data/OR_Vintage.csv", header = TRUE)
score2 <- score2[, -c(8, 9)]
score2[which(score2$Location == "Oregon"), "Location"] <- c("Williamette")
score3 <- read.csv("project_winequality/data/WA_Vintage.csv", header = TRUE)
score3 <- score3[, -c(8, 9)]
score3[which(score3$Location == "Washington"), "Location"] <- c("Columbia")
all.score <- rbind(score1, score2, score3)
## Fix vintage label
all.score[which(all.score$Variety == "Cabernet "), "Variety"] <- c("Cabernet")
## Fix rank (according to latest WS download)
all.score[which(all.score$Variety == "Cabernet" & all.score$Location == "Napa" & all.score$Vintage == 2017), "R1_WS"] <- c(92)
all.score[which(all.score$Variety == "Zinfandel" & all.score$Location == "Napa" & all.score$Vintage == 2018), "R1_WS"] <- c(92)
all.score[which(all.score$Variety == "Zinfandel" & all.score$Location == "Sonoma" & all.score$Vintage == 2018), "R1_WS"] <- c(94)
all.score[which(all.score$Variety == "RhΓ΄ne-Style Reds" & all.score$Location == "Napa" & all.score$Vintage == 2017), "R1_WS"] <- c(90)
all.score[which(all.score$Variety == "RhΓ΄ne-Style Reds" & all.score$Location == "Sonoma" & all.score$Vintage == 2017), "R1_WS"] <- c(89)
## Fix averaging of ranks
all.score$Avg_Rank <- apply(all.score[, c("R1_WS", "R2_WE", "R3_WA")], MARGIN = 1, FUN = function(X) mean(X, na.rm = TRUE))
## Remove NAs
all.score <- subset(all.score, Avg_Rank > 0 & Vintage > 1979)
aggregate(Avg_Rank ~ Location * Variety, data = all.score, FUN = length)
## Add climate data to scores
all.score$GDD_Season <- NA
all.score$Prcp_Season <- NA
for(i in 1:nrow(all.score)){
temp <- subset(all.climate, Year == all.score$Vintage[i] & Location == all.score$Location[i])
all.score$GDD_Season[i] <- temp$GDD_Season
all.score$Prcp_Season[i] <- temp$Prcp_Season
}
## Create location and variety indices
### Locations
locs <- unique(all.score$Location)
locs <- locs[order(locs)]
locs.number <- as.numeric(as.factor(locs))
### Varieties
varieties <- unique(all.score$Variety)
varieties <- varieties[order(varieties)]
varieties.number <- as.numeric(as.factor(varieties))
## Organize data for model fitting
data.stan <- list(N = nrow(all.score),
avg_rank = all.score$Avg_Rank,
n_location = length(locs),
location = as.numeric(as.factor(all.score$Location)),
n_variety = length(varieties),
variety = as.numeric(as.factor(all.score$Variety)),
precip = all.score$Prcp_Season,
gdd = all.score$GDD_Season)
## Fit Stan model
fit1 <- stan("project_winequality/stan/seasonal.stan",
data = data.stan,
iter = 2000,
warmup = 1000,
chains = 4)
## Rename locations and varieties (recommend replacing direct indexing with grep)
names(fit1)[520:524] <- locs
names(fit1)[525:531] <- varieties
## View diagnostics
launch_shinystan(fit1)
## Summarize posterior samples
summary(fit1, pars = c("base_rank", "a_location", "sigma_location", "a_variety", "sigma_variety", "sigma_rank", "b_precip", "b_gdd"))$summary[, "mean"]
## Save estimates
saveRDS(object = fit1, file = "project_winequality/output/posterior_seasonal.RDS")
## Make plots
pdf(file = "project_winequality/output/Results_seasonal.pdf", onefile = TRUE)
plot(fit1, pars = c("rank_location"))
plot(fit1, pars = c("rank_variety"))
plot(fit1, pars = c("b_gdd", "b_precip"))
plot(fit1, pars = c("sigma_location", "sigma_variety", "sigma_rank"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dikin_walk.R
\name{dikin_walk}
\alias{dikin_walk}
\title{Dikin Walk}
\usage{
dikin_walk(A, b, x0 = list(), points, r = 1, thin = 1, burn = 0,
chains = 1)
}
\arguments{
\item{A}{is the lhs of Ax <= b}
\item{b}{is the rhs of Ax <= b}
\item{x0}{is the starting point (a list of points)}
\item{points}{is the number of points we want to sample}
\item{r}{is the radius of the ellipsoid (1 by default)}
\item{thin}{every thin-th point is stored}
\item{burn}{the first burn points are deleted}
\item{chains}{is the number of chains we run}
}
\value{
a list of chains of the sampled points, each chain
being a matrix object with each column as a point
}
\description{
This function implements the Dikin Walk using the Hessian
of the Log barrier function. Note that a $r$ of 1 guarantees
that the ellipsoid generated won't leave our polytope $K$ (see
Theorems online)
}
\examples{
\dontrun{
## note that this Ax <= b is different from Ax=b that the
## user specifies for walkr (see transformation section in vignette)
dikin_walk(A = A, b = b, x0, points = 100,
r = 1thin = 1, burn = 0, chains = 1)
}
}
| /man/dikin_walk.Rd | no_license | andyyao95/walkr | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dikin_walk.R
\name{dikin_walk}
\alias{dikin_walk}
\title{Dikin Walk}
\usage{
dikin_walk(A, b, x0 = list(), points, r = 1, thin = 1, burn = 0,
chains = 1)
}
\arguments{
\item{A}{is the lhs of Ax <= b}
\item{b}{is the rhs of Ax <= b}
\item{x0}{is the starting point (a list of points)}
\item{points}{is the number of points we want to sample}
\item{r}{is the radius of the ellipsoid (1 by default)}
\item{thin}{every thin-th point is stored}
\item{burn}{the first burn points are deleted}
\item{chains}{is the number of chains we run}
}
\value{
a list of chains of the sampled points, each chain
being a matrix object with each column as a point
}
\description{
This function implements the Dikin Walk using the Hessian
of the Log barrier function. Note that a $r$ of 1 guarantees
that the ellipsoid generated won't leave our polytope $K$ (see
Theorems online)
}
\examples{
\dontrun{
## note that this Ax <= b is different from Ax=b that the
## user specifies for walkr (see transformation section in vignette)
dikin_walk(A = A, b = b, x0, points = 100,
r = 1thin = 1, burn = 0, chains = 1)
}
}
|
require(pdq)
lookup_job <- "LookupJob"
scan_job <- "ScanJob"
bulkinsert_job <- "BulkInsertJob"
compact_job <- "CompactJob"
# divide by 64 for cpu since we cant have multinode + multiclass
lookup_cpu_time <- 0.005 # microsec
scan_cpu_time <- 0.4
bulkinsert_cpu_time <- 0.1
compact_cpu_time <- 0.4
# divide by 8 for ssd since we cant have multinode + multiclass
lookup_ssd_time <- 0.003 # microsec
scan_ssd_time <- 0.05
bulkinsert_ssd_time <- 0.01
compact_ssd_time <- 0.02
CPU <- "CPU"
SSD <- "SSD"
modelName <- "Offload Node"
pdq::Init(modelName)
# Create traffic stream
CreateClosed(lookup_job, TERM, 128, 0.0)
CreateClosed(scan_job, TERM, 4, 0.0)
CreateClosed(bulkinsert_job, TERM, 8, 0.0)
#CreateClosed(compact_job, TERM, 1, 0.0)
# All operations in microsec
SetTUnit("millisec")
# Create M/M/64 node to model 64 CPU cores
# CreateMultiNode(64, CPU, CEN, FCFS)
CreateMultiNode(8, CPU, CEN, FCFS)
# Create M/M/8 node to model 8 SSDs of 2.5 TB = 20TB
# CreateMultiNode(8, SSD, CEN, FCFS)
CreateMultiNode(8, SSD, CEN, FCFS)
# Set service demand made by each job on each system resource
SetDemand(CPU, lookup_job, lookup_cpu_time)
SetDemand(CPU, scan_job, scan_cpu_time)
SetDemand(CPU, bulkinsert_job, bulkinsert_cpu_time)
#SetDemand(CPU, compact_job, compact_cpu_time)
SetDemand(SSD, lookup_job, lookup_ssd_time)
SetDemand(SSD, scan_job, scan_ssd_time)
SetDemand(SSD, bulkinsert_job, bulkinsert_ssd_time)
#SetDemand(SSD, compact_job, compact_ssd_time)
pdq::SetDebug(TRUE)
# cant use EXACT or CANON
Solve(APPROX)
Report()
| /PDQ/closed_offload.R | no_license | sanjosh/QueueingModels | R | false | false | 1,543 | r |
require(pdq)
lookup_job <- "LookupJob"
scan_job <- "ScanJob"
bulkinsert_job <- "BulkInsertJob"
compact_job <- "CompactJob"
# divide by 64 for cpu since we cant have multinode + multiclass
lookup_cpu_time <- 0.005 # microsec
scan_cpu_time <- 0.4
bulkinsert_cpu_time <- 0.1
compact_cpu_time <- 0.4
# divide by 8 for ssd since we cant have multinode + multiclass
lookup_ssd_time <- 0.003 # microsec
scan_ssd_time <- 0.05
bulkinsert_ssd_time <- 0.01
compact_ssd_time <- 0.02
CPU <- "CPU"
SSD <- "SSD"
modelName <- "Offload Node"
pdq::Init(modelName)
# Create traffic stream
CreateClosed(lookup_job, TERM, 128, 0.0)
CreateClosed(scan_job, TERM, 4, 0.0)
CreateClosed(bulkinsert_job, TERM, 8, 0.0)
#CreateClosed(compact_job, TERM, 1, 0.0)
# All operations in microsec
SetTUnit("millisec")
# Create M/M/64 node to model 64 CPU cores
# CreateMultiNode(64, CPU, CEN, FCFS)
CreateMultiNode(8, CPU, CEN, FCFS)
# Create M/M/8 node to model 8 SSDs of 2.5 TB = 20TB
# CreateMultiNode(8, SSD, CEN, FCFS)
CreateMultiNode(8, SSD, CEN, FCFS)
# Set service demand made by each job on each system resource
SetDemand(CPU, lookup_job, lookup_cpu_time)
SetDemand(CPU, scan_job, scan_cpu_time)
SetDemand(CPU, bulkinsert_job, bulkinsert_cpu_time)
#SetDemand(CPU, compact_job, compact_cpu_time)
SetDemand(SSD, lookup_job, lookup_ssd_time)
SetDemand(SSD, scan_job, scan_ssd_time)
SetDemand(SSD, bulkinsert_job, bulkinsert_ssd_time)
#SetDemand(SSD, compact_job, compact_ssd_time)
pdq::SetDebug(TRUE)
# cant use EXACT or CANON
Solve(APPROX)
Report()
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939408185837e-241, 1.97274569258757e-154, 5.49464572566663e+109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta_interleaved_matrices/AFL_communities_individual_based_sampling_beta_interleaved_matrices/communities_individual_based_sampling_beta_interleaved_matrices_valgrind_files/1615838455-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 363 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939408185837e-241, 1.97274569258757e-154, 5.49464572566663e+109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result) |
library(testthat)
library(attomr)
test_check("attomr")
| /tests/testthat.R | permissive | juanfung/attomr | R | false | false | 56 | r | library(testthat)
library(attomr)
test_check("attomr")
|
genprobsmat <-
function(N,FD,PL,CL,reso=1001){
PD<-1-FD
index<-1
usePL<-PL[[N]]
params<-matrix(NA,nrow=dim(usePL)[1],ncol=3)
params[,2]<-apply(usePL,1,function(x){sum(x>0)-1})
params[,1]<-N-params[,2]-1
params[,3]<-CL[[N]]
partitionprobs<-matrix(NA,ncol=dim(usePL)[1],nrow=length(FD))
## Now calculate the probability of each partition
for(i in 1:dim(usePL)[1]){
partitionprobs[,i]<-params[i,3]*(PD^params[i,1])*(FD^params[i,2])
}
## noB = number of B alleles
noB<-0:floor(N/2)
## probnoB = probability of seeing that number of B alleles
probnoB<-matrix(0,ncol=length(noB),nrow=length(FD))
nop<-dim(usePL)[2]
binky<-as.matrix(sapply(0:((2^(nop-1))-1),function(x){as.integer(intToBits(x))})[1:nop,])
for(part in 1:dim(usePL)[1]){
## nop = number of partitions
mytab<-(rev(usePL[part,])%*%binky)
mytab[mytab>nop/2]<-nop-mytab[mytab>nop/2]
mytab<-table(mytab)
for(k in 1:length(mytab)){
probnoB[,as.integer(names(mytab)[k])+1]<-probnoB[,as.integer(names(mytab)[k])+1]+mytab[k]*partitionprobs[,part]/(2^(nop-1))
}
}
return(probnoB)
}
processduptable<-function(HST,PL=NULL,CL=NULL,reso=1001){
# Check whether there is a partition list
if(is.null(PL)){
PL<-genPL(nchar(colnames(HST)[dim(HST)[2]-1]))
CL<-genCoefs(PL)
}else{
# Check whether the existing partition list will suffice
if(nchar(colnames(HST)[dim(HST)[2]-1])>length(PL)){
message("NEED TO GENERATE A LARGER PARTITION LIST")
PL<-genPL(nchar(colnames(HST)[dim(HST)[2]-1]))
CL<-genCoefs(PL)
}
if(is.null(CL)){
CL<-genCoefs(PL)
}
}
FDvec<-rep(0,dim(HST)[1])
myseq<-seq(0,1,length.out=reso)[-1]
probmat<-matrix(0,ncol=(dim(HST)[2]-4),nrow=(reso-1))
index<-0
for(i in 2:nchar(colnames(HST)[dim(HST)[2]-1])){
#message(i)
tempmat<-genprobsmat(i,myseq,PL,CL,reso=reso)
probmat[,index+(1:(dim(tempmat)[2]))]<-tempmat
index<-index+dim(tempmat)[2]
}
myvecs<-as.matrix(HST[,-c(1:3,dim(HST)[2]),drop=FALSE]) %*% t(log(probmat))
FDvec<-myseq[apply(myvecs,1,which.max)]
outtab<-cbind(100*HST[,3]/HST[,2],FDvec,100*HST[,3]/HST[,2]*(1-FDvec),100*HST[,3]/HST[,2]*(FDvec))
colnames(outtab)<-c("ObservedDupRate","PropFragDups","AdjustedDupRate","FragDupRate")
rownames(outtab)<-rownames(HST)
return(outtab)
}
| /fragmentationDuplicates/duppackage/R/functions-processduptable.R | no_license | dralynch/duplicates | R | false | false | 2,471 | r | genprobsmat <-
function(N,FD,PL,CL,reso=1001){
PD<-1-FD
index<-1
usePL<-PL[[N]]
params<-matrix(NA,nrow=dim(usePL)[1],ncol=3)
params[,2]<-apply(usePL,1,function(x){sum(x>0)-1})
params[,1]<-N-params[,2]-1
params[,3]<-CL[[N]]
partitionprobs<-matrix(NA,ncol=dim(usePL)[1],nrow=length(FD))
## Now calculate the probability of each partition
for(i in 1:dim(usePL)[1]){
partitionprobs[,i]<-params[i,3]*(PD^params[i,1])*(FD^params[i,2])
}
## noB = number of B alleles
noB<-0:floor(N/2)
## probnoB = probability of seeing that number of B alleles
probnoB<-matrix(0,ncol=length(noB),nrow=length(FD))
nop<-dim(usePL)[2]
binky<-as.matrix(sapply(0:((2^(nop-1))-1),function(x){as.integer(intToBits(x))})[1:nop,])
for(part in 1:dim(usePL)[1]){
## nop = number of partitions
mytab<-(rev(usePL[part,])%*%binky)
mytab[mytab>nop/2]<-nop-mytab[mytab>nop/2]
mytab<-table(mytab)
for(k in 1:length(mytab)){
probnoB[,as.integer(names(mytab)[k])+1]<-probnoB[,as.integer(names(mytab)[k])+1]+mytab[k]*partitionprobs[,part]/(2^(nop-1))
}
}
return(probnoB)
}
processduptable<-function(HST,PL=NULL,CL=NULL,reso=1001){
# Check whether there is a partition list
if(is.null(PL)){
PL<-genPL(nchar(colnames(HST)[dim(HST)[2]-1]))
CL<-genCoefs(PL)
}else{
# Check whether the existing partition list will suffice
if(nchar(colnames(HST)[dim(HST)[2]-1])>length(PL)){
message("NEED TO GENERATE A LARGER PARTITION LIST")
PL<-genPL(nchar(colnames(HST)[dim(HST)[2]-1]))
CL<-genCoefs(PL)
}
if(is.null(CL)){
CL<-genCoefs(PL)
}
}
FDvec<-rep(0,dim(HST)[1])
myseq<-seq(0,1,length.out=reso)[-1]
probmat<-matrix(0,ncol=(dim(HST)[2]-4),nrow=(reso-1))
index<-0
for(i in 2:nchar(colnames(HST)[dim(HST)[2]-1])){
#message(i)
tempmat<-genprobsmat(i,myseq,PL,CL,reso=reso)
probmat[,index+(1:(dim(tempmat)[2]))]<-tempmat
index<-index+dim(tempmat)[2]
}
myvecs<-as.matrix(HST[,-c(1:3,dim(HST)[2]),drop=FALSE]) %*% t(log(probmat))
FDvec<-myseq[apply(myvecs,1,which.max)]
outtab<-cbind(100*HST[,3]/HST[,2],FDvec,100*HST[,3]/HST[,2]*(1-FDvec),100*HST[,3]/HST[,2]*(FDvec))
colnames(outtab)<-c("ObservedDupRate","PropFragDups","AdjustedDupRate","FragDupRate")
rownames(outtab)<-rownames(HST)
return(outtab)
}
|
# Please source this script before running the NES8010 multivariate examples
# The script provides additional functions to make using the analyses simpler,
# and change the default plots to a format compatible with ggplot2
# grid package needs installing for ordi_identify
my_packages <- c("grid") # Specify extra packages
not_installed <- my_packages[!(my_packages %in% installed.packages()[ , "Package"])] # Extract not installed packages
if(length(not_installed)) install.packages(not_installed)
library(grid)
#' Interactive identify
#'
#' Interactive identify ggvegan species
#' @param plotname Name of a plot created with \code{\link{ordi_plot}}
#' @param size Font size of labels (default = 3)
#' @param ... Other optional parameters
#'
#' @details
#' This function is designed to be run interactively. First create a standard
#' ordination using \code{\link{ordi_pca}}, \code{\link{ordi_rda}},
#' \code{\link{ordi_ca}}, \code{\link{ordi_cca}} or \code{\link{ordi_nmds}}.
#' Then call \code{\link{ordi_plot}} but make sure that the plot results is
#' stored in an R object. Then apply this function to that object, and hit the
#' \emph{Esc} key to exit function.
#' \strong{Note:} In RStudio only the most recently displayed plot can be
#' labelled with this function, so avoid using the back arrow keys in the RStudio
#' plot window. Labelling may not always occur on first click, and is more
#' difficult on constrained ordination plots.
#'
#' @return The original ordiname is modified with labels
#'
#' @author Roy Sanderson, School of Natural & Environmental Science, Newcastle
#' University roy.sanderson@newcastle.ac.uk
#'
#' @examples
#' if(interactive()){
#'
#' # Unconstrained ordination
#' data(dune)
#' data(dune.env)
#' dune_pca <- ordi_pca(dune)
#' dune_plt <- ordi_plot(dune_pca, layers="species", geom="point") # defaults to sites and species
#' dune_plt # Display the plot
#' ordi_identify(dune_plt) # Hit Esc key to exit
#'
#' # Constrained ordination
#' dune_rda <- ordi_rda(dune ~ A1 + Management, data=dune.env)
#' # displays spp and constraints.
#' # Constraints are "biplot" for continuous and "centroids" for categorical
#' dune_plt <- ordi_plot(dune_rda, layers=c("species", "biplot", "centroids"), geom="point")
#' dune_plt # Display the plot
#' ordi_identify(dune_plt) # Hit Esc key to exit
#'
#' }
#' @import grid
#' @import mosaic
#' @import vegan
#' @export
ordi_identify <- function(plotname, size=3, ...){
print("Click on plot to label points; hit Esc key to exit")
plot_data <- plotname[["layers"]][[1]]$data
depth <- downViewport('panel.7-5-7-5')
x <- plot_data[,3]
y <- plot_data[,4]
labels <- plot_data[,2]
pushViewport(dataViewport(x,y))
pick <- grid.locator('in')
while(!is.null(pick)){
tmp <- grid.locator('in')
tmp.n <- as.numeric(tmp)
tmp2.x <- as.numeric(convertX( unit(x,'native'), 'in' ))
tmp2.y <- as.numeric(convertY( unit(y,'native'), 'in' ))
w <- which.min( (tmp2.x-tmp.n[1])^2 + (tmp2.y-tmp.n[2])^2 )
popViewport(n=1)
upViewport(depth)
print(last_plot() + annotate("text", label=labels[w], x = x[w], y = y[w],
size = size, hjust=0.5, vjust=-0.5))
depth <- downViewport('panel.7-5-7-5')
pushViewport(dataViewport(x,y))
pick <- grid.locator('in')
}
return(last_plot())
}
#' Principal components analysis
#'
#' Wrapper function with vegan for PCA
#' @param spp_data Dataframe of attributes (columns) by samples (rows)
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_pca <- function(spp_data, ...){
spp_data_pca <- rda(spp_data, ...)
class(spp_data_pca) <- c("rda", "cca", "pca")
spp_data_pca
}
#' Redundancy analysis
#'
#' Wrapper function with vegan for RDA
#' @param formula Dataframe of attributes (columns) by samples (rows) as response
#' and one or more explanatory variables from a second dataframe
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_rda <- {
rda
}
#' Correspondence analysis
#'
#' Wrapper function with vegan for CA
#' @param spp_data Dataframe of attributes (columns) by samples (rows)
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_ca <- function(spp_data, ...){
spp_data_ca <- cca(spp_data, ...)
class(spp_data_ca) <- c("rda", "cca", "ca")
spp_data_ca
}
#' Canonical correspondence analysis
#'
#' Wrapper function with vegan for CCA
#' @param formula Dataframe of attributes (columns) by samples (rows) as response
#' and one or more explanatory variables from a second dataframe
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_cca <- {
cca
}
#' Non-metric multidimensional analysis
#'
#' Wrapper function with vegan for metaMDS
#' @param spp_data Dataframe of attributes (columns) by samples (rows)
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_nmds <- function(spp_data, ...){
spp_data_ca <- metaMDS(spp_data, ...)
}
#' Ordination scores from constrained or unconstrained ordination
#'
#' Wrapper function with ggvegan for fortify
#' @param ordi_object Result of ordination
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_scores <- function(ordi_object, ...){
fortify(ordi_object, ...)
}
#' Stepwise selection of constrained ordination
#'
#' Wrapper function with vegan for ordistep
#' @param ordi_object Either a cca or rda object
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_step <- function(ordi_object, ...){
ordistep(ordi_object, ...)
}
#' Multiple plot function
#'
#' Display plot objects in multiple columns, rows, or other combinations
#' @param ... ggplot (or gf_ plot) objects
#' @param plotlist alternative input as a list of ggplot objects
#' @param cols Number of columns in layout
#' @param layout A matrix specifying the layout. If present, 'cols' is ignored.
#'
#' @details
#' If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
#' then plot 1 will go in the upper left, 2 will go in the upper right, and
#' 3 will go all the way across the bottom.
#'
#' @return Displays multi-way plot, but returns NULL
#'
#' @examples
#' # Create a couple of normal distributions of different sample sizes
#' small_normal <- rnorm(25)
#' medium_normal <- rnorm(100)
#' big_normal <- rnorm(100000)
#'
#' # Plot their frequency histograms, but store rather than display
#' small_normal_plt <- gf_histogram(~ small_normal)
#' medium_normal_plt <- gf_histogram(~ medium_normal)
#' big_normal_plt <- gf_histogram(~ big_normal)
#'
#' # Display two plots side-by-side
#' multi_plot(small_normal_plt, big_normal_plt, cols=2)
#'
#' # Display two plots one above the other
#' multi_plot(small_normal_plt, big_normal_plt, cols=1)
#'
#' # Display three plots in a grid
#' # Note use of layout 1, 2, 3, 3 coding to put
#' # the big_normal_plt (third named one) across the bottom
#' multi_plot(small_normal_plt, medium_normal_plt, big_normal_plt,
#' layout=matrix(c(1,2,3,3), nrow=2, byrow=TRUE))
#'
#' @import grid
#' @import mosaic
#' @export
multi_plot <- function(..., plotlist=NULL, cols=1, layout=NULL) {
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#' Type 3 Sums of squares
#'
#' Wrapper function with car for Anova
#' @param lm_mod Results of lm function
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
anova3 <- function(lm_mod, ...){
Anova(lm_mod, ...)
}
##' @title Fortify a \code{"cca"} object.
##'
##' @description
##' Fortifies an object of class \code{"cca"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"cca"}, the result of a call to
##' \code{\link[vegan]{cca}}, \code{\link[vegan]{rda}}, or
##' \code{\link[vegan]{capscale}}.
##' @param data currently ignored.
##' @param axes numeric; which axes to extract scores for.
##' @param display numeric; the scores to extract in the fortified object.
##' @param ... additional arguments passed to \code{\link[vegan]{scores.cca}},
##' and \code{\link[vegan]{scores.rda}}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify cca
##' @export
##'
##' @examples
##' require(vegan)
##' data(dune)
##' data(dune.env)
##'
##' sol <- cca(dune ~ A1 + Management, data = dune.env)
##' head(fortify(sol))
`fortify.cca` <- function(model, data, axes = 1:6,
display = c("sp", "wa", "lc", "bp", "cn"), ...) {
## extract scores
scrs <- scores(model, choices = axes, display = display, ...)
## handle case of only 1 set of scores
if (length(display) == 1L) {
scrs <- list(scrs)
nam <- switch(display,
sp = "species",
species = "species",
wa = "sites",
sites = "sites",
lc = "constraints",
bp = "biplot",
cn = "centroids",
stop("Unknown value for 'display'"))
names(scrs) <- nam
}
miss <- vapply(scrs, function(x ) all(is.na(x)), logical(1L))
scrs <- scrs[!miss]
nams <- names(scrs)
nr <- vapply(scrs, FUN = NROW, FUN.VALUE = integer(1))
df <- do.call('rbind', scrs)
rownames(df) <- NULL
df <- as.data.frame(df)
df <- cbind(Score = factor(rep(nams, times = nr)),
Label = unlist(lapply(scrs, rownames), use.names = FALSE),
df)
df
}
##' @title Fortify a \code{"metaMDS"} object.
##'
##' @description
##' Fortifies an object of class \code{"metaMDS"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"metaMDS"}, the result of a call
##' to \code{\link[vegan]{metaMDS}}.
##' @param data currently ignored.
##' @param ... additional arguments passed to
##' \code{\link[vegan]{scores.metaMDS}}. Note you can't use \code{display}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify metaMDS
##' @export
##'
##' @importFrom ggplot2 fortify
##' @importFrom vegan scores
##'
##' @examples
##' ## load example data
##' require(vegan)
##' data(dune)
##'
##' ord <- metaMDS(dune)
##' head(fortify(ord))
# `fortify.metaMDS` <- function(model, data, ...) {
# samp <- scores(model, display = "sites", ...)
# spp <- tryCatch(scores(model, display = "species", ...),
# error = function(c) {NULL})
# if (!is.null(spp)) {
# df <- rbind(samp, spp)
# df <- as.data.frame(df)
# df <- cbind(Score = factor(rep(c("sites","species"),
# c(nrow(samp), nrow(spp)))),
# Label = c(rownames(samp), rownames(spp)),
# df)
# } else {
# df <- data.frame(Score = factor(rep("sites", nrow(df))),
# Label = rownames(samp),
# samp)
# }
# rownames(df) <- NULL
# df
# }
# `fortify.metaMDS` <- function(model, data, axes = 1:2,
# display = c("sites"), ...) {
# ## extract scores
# scrs <- scores(model, choices = axes, display = display, ...)
# ## handle case of only 1 set of scores
# if (length(display) == 1L) {
# scrs <- list(scrs)
# nam <- switch(display,
# sp = "species",
# species = "species",
# si = "sites",
# sites = "sites",
# stop("Unknown value for 'display'"))
# names(scrs) <- nam
# }
# miss <- vapply(scrs, function(x ) all(is.na(x)), logical(1L))
# scrs <- scrs[!miss]
# nams <- names(scrs)
# nr <- vapply(scrs, FUN = NROW, FUN.VALUE = integer(1))
# df <- do.call('rbind', scrs)
# rownames(df) <- NULL
# df <- as.data.frame(df)
# df <- cbind(Score = factor(rep(nams, times = nr)),
# Label = unlist(lapply(scrs, rownames), use.names = FALSE),
# df)
# df
# }
#
##' @title ggplot-based plot for objects of class \code{"cca"}
##'
##' @description
##' Produces a multi-layer ggplot object representing the output of objects produced by \code{\link[vegan]{cca}}, or \code{\link[vegan]{capscale}}.
##'
##' @details
##' TODO
##'
##' @param object an object of class \code{"cca"}, the result of a call to \code{\link[vegan]{cca}} or \code{\link[vegan]{capscale}}.
##' @param axes numeric; which axes to plot, given as a vector of length 2.
##' @param geom character; which geoms to use for the layers. Can be a
##' vector of length equal to \code{length(display)}, in which case the
##' \emph{i}th element of \code{type} refers to the \emph{i}th element
##' of \code{display}.
##' @param layers character; which scores to plot as layers
##' @param legend.position character or two-element numeric vector; where to position the legend. See \code{\link[ggplot2]{theme}} for details. Use \code{"none"} to not draw the legend.
##' @param xlab character; label for the x-axis
##' @param ylab character; label for the y-axis
##' @param title character; subtitle for the plot
##' @param subtitle character; subtitle for the plot
##' @param caption character; caption for the plot
##' @param ... Additional arguments passed to \code{\link{fortify.cca}}.
##' @return Returns a ggplot object.
##' @author Gavin L. Simpson
##'
##' @method ordi_plot cca
##' @export
##'
##' @importFrom grid arrow unit
##' @importFrom ggplot2 autoplot ggplot geom_point geom_text geom_segment labs coord_fixed aes_string
##'
##' @examples
##' require(vegan)
##' data(dune)
##' data(dune.env)
##'
##' sol <- ordi_cca(dune ~ A1 + Management, data = dune.env)
##' ordi_plot(sol)
`ordi_plot.cca` <- function(object, axes = c(1,2), geom = c("point", "text"),
layers = c("species", "sites", "biplot", "centroids"),
legend.position = "none",
title = NULL, subtitle = NULL, caption = NULL,
ylab, xlab, ...) {
axes <- rep(axes, length.out = 2L)
obj <- fortify(object, axes = axes, ...)
LAYERS <- levels(obj$Score)
## sort out x, y aesthetics
vars <- getDimensionNames(obj)
## match the geom
geom <- match.arg(geom)
point <- TRUE
if (isTRUE(all.equal(geom, "text"))) {
point <- FALSE
}
## subset out the layers wanted
### need something here first to match acceptable ones?
### or just check that the layers selected would return a df with
### at least 1 row.
obj <- obj[obj$Score %in% layers, , drop = FALSE]
## skeleton layer
plt <- ggplot()
## add plot layers as required
want <- obj$Score %in% c("species", "sites")
if (point) {
plt <- plt +
geom_point(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], shape = 'Score',
colour = 'Score'))
} else {
plt <- plt +
geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], label = 'Label',
colour = 'Score'))
}
## remove biplot arrows for centroids if present
if(all(c("biplot","centroids") %in% LAYERS)) {
want <- obj$Score == "biplot"
tmp <- obj[want, ]
obj <- obj[!want, ]
bnam <- tmp[, "Label"]
cnam <- obj[obj$Score == "centroids", "Label"]
obj <- rbind(obj, tmp[!bnam %in% cnam, , drop = FALSE])
}
if(any(want <- obj$Score == "constraints")) {
if (point) {
plt <- plt + geom_point(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2]))
} else {
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2],
label = 'Label'))
}
}
if(any(want <- obj$Score == "biplot")) {
if (length(layers) > 1) {
mul <- arrowMul(obj[want, vars, drop = FALSE],
obj[!want, vars, drop = FALSE])
obj[want, vars] <- mul * obj[want, vars]
}
col <- "navy"
plt <- plt +
geom_segment(data = obj[want, , drop = FALSE ],
aes_string(x = 0, y = 0, xend = vars[1], yend = vars[2]),
arrow = arrow(length = unit(0.2, "cm")),
colour = col)
obj[want, vars] <- 1.1 * obj[want, vars]
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], label = 'Label'))
}
if(any(want <- obj$Score == "centroids")) {
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], label = 'Label'),
colour = "navy")
}
if(missing(xlab)) {
xlab <- vars[1]
}
if(missing(ylab)) {
ylab <- vars[2]
}
plt <- plt + labs(x = xlab, y = ylab, title = title, subtitle = subtitle,
caption = caption)
## add equal scaling
plt <- plt + coord_fixed(ratio = 1)
## do we want a legend
plt <- plt + theme(legend.position = legend.position)
plt
}
## `aplot.cca` <- `ordi_plot.cca`
##' @title ggplot-based plot for objects of class \code{"metaMDS"}
##'
##' @description
##' Produces a multi-layer ggplot object representing the output of
##' objects produced by \code{\link[vegan]{metaMDS}}.
##'
##' @details
##' TODO
##'
##' @param object an object of class \code{"metaMDS"}, the result of a call
##' to \code{\link[vegan]{metaMDS}}.
##' @param axes numeric; which axes to plot, given as a vector of length 2.
##' @param geom character; which geoms to use for the layers. Can be a
##' vector of length equal to \code{length(display)}, in which case the
##' \emph{i}th element of \code{type} refers to the \emph{i}th element
##' of \code{display}.
##' @param layers character; which scores to plot as layers
##' @param legend.position character or two-element numeric vector; where to position the legend. See \code{\link[ggplot2]{theme}} for details. Use \code{"none"} to not draw the legend.
##' @param xlab character; label for the x-axis
##' @param ylab character; label for the y-axis
##' @param title character; subtitle for the plot
##' @param subtitle character; subtitle for the plot
##' @param caption character; caption for the plot
##' @param ... Additional arguments passed to \code{\link{fortify.metaMDS}}.
##'
##' @return Returns a ggplot object.
##'
##' @author Gavin L. Simpson
##'
##' @method ordi_plot metaMDS
##' @export
##'
##' @importFrom grid arrow unit
##' @importFrom ggplot2 autoplot ggplot geom_point geom_text labs coord_fixed aes_string
##'
##' @examples
##' ## load example data
##' require(vegan)
##' data(dune)
##'
##' sol <- ordi_nmds(dune)
##' ordi_plot(sol)
`ordi_plot.metaMDS` <- function(object, axes=c(1,2), geom = c("point", "text"),
layers = c("species", "sites"),
legend.position = "none",
title = NULL, subtitle = NULL, caption = NULL,
ylab, xlab, ...) {
axes <- rep(axes, length.out = 2L)
display <- layers
obj <- fortify.metaMDS(object, ...)
obj <- obj[obj$Score %in% layers, ]
## sort out x, y aesthetics
vars <- getDimensionNames(obj)
## skeleton layer
plt <- ggplot()
geom <- match.arg(geom)
point <- TRUE
if (isTRUE(all.equal(geom, "text"))) {
point <- FALSE
}
if (point) {
plt <- plt + geom_point(data = obj,
aes_string(x = vars[1], y = vars[2], shape = 'Score',
colour = 'Score'))
} else {
plt <- plt + geom_text(data = obj,
aes_string(x = vars[1], y = vars[2], label = 'Label',
colour = 'Score'))
}
if(missing(xlab)) {
xlab <- vars[1]
}
if(missing(ylab)) {
ylab <- vars[2]
}
plt <- plt + labs(x = xlab, y = ylab, title = title, subtitle = subtitle,
caption = caption)
## add equal scaling
plt <- plt + coord_fixed(ratio = 1)
## do we want a legend
plt <- plt + theme(legend.position = legend.position)
plt
}
##' @title ggplot-based plot for objects of class \code{'rda'}
##'
##' @description
##' Produces a multi-layer ggplot object representing the output of objects produced by \code{\link[vegan]{rda}}.
##'
##' @details
##' TODO
##'
##' @param object an object of class \code{"rda"}, the result of a call to \code{\link[vegan]{rda}}
##' @param axes numeric; which axes to plot, given as a vector of length 2.
##' @param geom character; which geoms to use for the layers. Can be a vector of
##' up to length 2, in which case, the first element of \code{geom} will be
##' used for any site scores (both weighted sum or linear combination scores),
##' and the second element will be used for species scores. The latter will be
##' ignored if \code{arrows = TRUE}.
##' @param layers character; which scores to plot as layers
##' @param arrows logical; represent species (variables) using vectors?
##' @param legend.position character or two-element numeric vector; where to position the legend. See \code{\link[ggplot2]{theme}} for details. Use \code{"none"} to not draw the legend.
##' @param xlab character; label for the x-axis
##' @param ylab character; label for the y-axis
##' @param title character; subtitle for the plot
##' @param subtitle character; subtitle for the plot
##' @param caption character; caption for the plot
##' @param const General scaling constant to \code{rda} scores. See
##' \code{\link[vegan]{scores.rda}} for details.
##' @param ... Additional arguments passed to \code{\link{fortify}}.
##'
##' @return Returns a ggplot object.
##'
##' @author Gavin L. Simpson
##'
##' @examples
##' require(vegan)
##' data(dune)
##'
##' pca <- ordi_rda(dune)
##' ordi_plot(pca)
##'
##' ## Just the species scores
##' ordi_plot(pca, layers = "species")
##' @method ordi_plot rda
##' @export
##'
`ordi_plot.rda` <- function(object, axes = c(1,2), geom = c("point", "text"),
layers = c("species", "sites", "biplot", "centroids"),
arrows = FALSE, legend.position = "none",
title = NULL, subtitle = NULL, caption = NULL,
ylab, xlab, const, ...) {
## determine which layers to plot
valid <- valid_layers(object) # vector of valid layers
ok_layers <- check_user_layers(layers, valid, message = TRUE)
layers <- layers[ok_layers] # subset user-supplied layers
draw_list <- layer_draw_list(valid, layers) # what are we drawing
## fix-up axes needed to plot
laxes <- length(axes)
if (laxes != 2L) {
if (laxes > 2L) {
axes <- rep(axes, length.out = 2L) # shrink to required length
} else {
stop("Need 2 ordination axes to plot; only 1 was given.",
call. = FALSE)
}
}
obj <- fortify(object, axes = axes, const = const, ...) # grab some scores
available <- levels(obj[["Score"]])
draw_list <- layer_draw_list(valid, layers, available) # what are we drawing
layer_names <- names(draw_list)[draw_list]
## sort out x, y aesthetics
vars <- getDimensionNames(obj)
## process geom arg
geom <- match.arg(geom, several.ok = TRUE)
geom <- unique(geom) # simplify geom if elements are the same
## subset out the layers wanted
obj <- obj[obj[["Score"]] %in% layer_names, , drop = FALSE]
## skeleton layer
plt <- ggplot()
## draw sites, species, constraints == lc site scores
if (any(draw_list[c("species","sites","constraints")])) {
plt <- add_spp_site_scores(obj, plt, vars, geom, draw_list, arrows)
}
## remove biplot arrows for centroids if present
if(all(draw_list[c("biplot","centroids")])) {
want <- obj[["Score"]] == "biplot"
tmp <- obj[want, ]
obj <- obj[!want, ]
bnam <- tmp[, "Label"]
cnam <- obj[obj[["Score"]] == "centroids", "Label"]
obj <- rbind(obj, tmp[!bnam %in% cnam, , drop = FALSE])
}
if(isTRUE(draw_list["biplot"])) {
want <- obj[["Score"]] == "biplot"
if (length(layer_names) > 1) {
mul <- arrowMul(obj[want, vars, drop = FALSE],
obj[!want, vars, drop = FALSE])
obj[want, vars] <- mul * obj[want, vars]
}
col <- "navy"
plt <- plt +
geom_segment(data = obj[want, , drop = FALSE ],
aes_string(x = 0, y = 0,
xend = vars[1], yend = vars[2]),
arrow = arrow(length = unit(0.2, "cm")),
colour = col)
obj[want, vars] <- 1.1 * obj[want, vars]
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2],
label = 'Label'))
}
if(isTRUE(draw_list["centroids"])) {
want <- obj[["Score"]] == "centroids"
plt <- plt +
geom_text(data = obj[want, , drop = FALSE],
aes_string(x = vars[1], y = vars[2], label = 'Label'),
colour = "navy")
}
if(missing(xlab)) {
xlab <- vars[1]
}
if(missing(ylab)) {
ylab <- vars[2]
}
plt <- plt + labs(x = xlab, y = ylab, title = title, subtitle = subtitle,
caption = caption)
## add equal scaling
plt <- plt + coord_fixed(ratio = 1)
## do we want a legend
plt <- plt + theme(legend.position = legend.position)
plt
}
##' @title Scale Vectors to Data
##' @description Scale vector arrows to \code{fill} proportion of the data.
##' @param arrows a two-column matrix-like object containing coordinates for the arrows/vectors on x and y axes.
##' @param data a two-column matrix-like object containing coordinates of the data on the x and y axes.
##' @param at numeric vector of length 2; location of the origin of the arrows.
##' @param fill numeric; what proportion of the range of the data to fill
##' @return a numeric multiplier that will scale the arrows
##' @author Gavin L. Simpson
`arrowMul` <- function(arrows, data, at = c(0, 0), fill = 0.75) {
u <- c(range(data[,1], range(data[,2])))
u <- u - rep(at, each = 2)
r <- c(range(arrows[, 1], na.rm = TRUE), range(arrows[, 2], na.rm = TRUE))
rev <- sign(diff(u))[-2]
if (rev[1] < 0)
u[1:2] <- u[2:1]
if (rev[2] < 0)
u[3:4] <- u[4:3]
u <- u/r
u <- u[is.finite(u) & u > 0]
fill * min(u)
}
##' @title Number of scores
##' @description Returns the number of scores returns in object \code{x}.
##'
##' @param x The object whose number of scores is required.
##'
##' @return a numeric vector of length 1 with the number of scores.
##'
##' @author Gavin L. Simpson
`scoresLength` <- function(x) {
obs <- NROW(x)
if (is.null(obs))
obs <- 0
obs
}
##' @title Extract the names of the dimensions to plot as a character vector
##'
##' @description Find the character vector of names for the two dimensions of data to be plotted.
##' @param object a fortified ordination object.
##' @return A length 2 character vector of dimension names.
##' @author Gavin L. Simpson
`getDimensionNames` <- function(object) {
names(object)[-c(1,2)]
}
##' @title Adds a label layer using one of a set of common geoms
##'
##' @description Adds labels to a plot using one of \code{geom_label}, \code{geom_text}, \code{geom_label_repel} or \code{geom_text_repel}.
##'
##' @param data data frame; data set to use for the label layer. Must contain a variable \code{Label} containing the strings to use as labels.
##' @param geom character; which geom to use for labelling.
##' @param vars character; vector of names of variables to ass to the \code{x} and \code{y} aesthetics of the chosen geom.
##'
##' @author Gavin L. Simpson
##'
`label_fun` <- function(data,
geom = c("label", "text", "label_repel", "text_repel"),
vars) {
ll <- switch(geom,
label =
geom_label(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label')),
text =
geom_text(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label')),
label_repel =
geom_label_repel(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label')),
text_repel =
geom_text_repel(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label'))
)
ll
}
##' @title Valid layers for vegan objects
##'
##' @param object An R object.
##' @param ... Additional arguments passed to methods.
##'
##' @rdname valid_layers
##' @export
`valid_layers` <- function(object, ...) {
UseMethod('valid_layers')
}
##' @rdname valid_layers
##' @export
`valid_layers.rda` <- function(object, ...) {
c("species", "sites", "constraints", "biplot", "centroids", "regression")
}
##' @rdname valid_layers
##' @export
`valid_layers.cca` <- function(object, ...) {
c("species", "sites", "constraints", "biplot", "centroids", "regression")
}
##' @title ordination plots
##'
##' @param ... Additional arguments
##'
#' @rdname ordi_plot
#' @export
`ordi_plot` <- function(...){
UseMethod('ordi_plot')
}
# ##' @rdname ordi_plot
# ##' @export
# `ordi_plot.rda` <- function(...){
# UseMethod('ordi_plot')
# }
#
# ##' @rdname ordi_plot
# ##' @export
# `ordi_plot.cca` <- function(...){
# UseMethod('ordi_plot')
# }
##' @title Check user-supplied layers against list of valid layers
##'
##' @param user character; vector of user supplied layer names.
##' @param valid character; vector of valid layer names.
##' @param message logical; should a message be raised in the case of invalid
##' user-supplied layer names.
`check_user_layers` <- function(user, valid, message = FALSE) {
ok <- user %in% valid
if (isTRUE(message) && any(!ok)) {
msg <- "Invalid (ignored) layers for this object:"
invalid <- paste(user[!ok], collapse = ', ')
message(paste(msg, invalid, sep = " "))
}
ok
}
##' @title List of layers to draw for a given vegan object
##'
##' @param valid character; vector of valid layer names
##' @param layers character; a vector of layer names for \code{object} that has
##' already been filtered for validity.
##' @param available charecter; what layers are actually available
##'
##' @importFrom stats setNames
`layer_draw_list` <- function(valid, layers = NULL, available = NULL) {
l <- setNames(rep(TRUE, length(valid)), valid)
if (!is.null(layers)) {
if (!is.null(available)) {
layers <- layers[layers %in% available]
}
i <- valid %in% layers
l[!i] <- FALSE
}
l
}
##' @title Adds species and site score layers to an existing plot
##'
##' @param object an ordination object.
##' @param plt a ggplot object.
##' @param vars character; length 2 vector of dimension names.
##' @param geom character; vector of length 1 or 2 indicating which geoms will
##' be used ofr the species or site scores.
##' @param draw_list logical; vector of types of scores indicating which are
##' available and requested for plotting.
##' @param arrows logical; length 1 vector indicating if species scores should
##' be drawn using arrows.
##'
`add_spp_site_scores` <- function(object, plt, vars, geom, draw_list, arrows) {
wanted <- names(draw_list[c("species","sites","constraints")])
## if we're plotting species by arrows, drop species if in list
if (isTRUE(arrows)) {
wanted <- wanted[wanted != "species"]
}
## if still something to draw, draw it
if (length(wanted) > 0L) {
## case of a single geom
if (length(geom) == 1L) {
take <- object[["Score"]] %in% wanted
if (geom == "point") {
plt <- plt +
geom_point(data = object[take, , drop = FALSE],
aes_string(x = vars[1], y = vars[2],
shape = 'Score', colour = 'Score'))
} else {
plt <- plt +
geom_text(data = object[take, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2],
label = 'Label', colour = 'Score'),
size = 3)
}
} else {
## we have to plot species and sites/constraints separately
if ("species" %in% wanted) {
take <- object[["Score"]] == "species"
if (geom[2L] == "point") {
plt <- plt +
geom_point(data = object[take, , drop = FALSE],
aes_string(x = vars[1], y = vars[2],
shape = 'Score',
colour = 'Score'))
} else {
plt <- plt +
geom_text(data = object[take, , drop = FALSE ],
aes_string(x = vars[1],
y = vars[2],
label = 'Label',
colour = 'Score'),
size = 3)
}
}
if (any(c("sites","constraints") %in% wanted)) {
take <- object[["Score"]] %in% c("sites","constraints")
if (geom[1L] == "point") {
plt <- plt +
geom_point(data = object[take, , drop = FALSE],
aes_string(x = vars[1], y = vars[2],
shape = 'Score',
colour = 'Score'))
} else {
plt <- plt +
geom_text(data = object[take, , drop = FALSE ],
aes_string(x = vars[1],
y = vars[2],
label = 'Label',
colour = 'Score'),
size = 3)
}
}
}
}
## now check if species should be added as arrows
if (isTRUE(arrows) && draw_list["species"]) {
take <- object[["Score"]] == "species"
pdat <- object[take, , drop = FALSE]
col <- "black"
plt <- plt +
geom_segment(data = pdat,
aes_string(x = 0, y = 0,
xend = vars[1], yend = vars[2]),
arrow = arrow(length = unit(0.2, "cm")),
colour = col)
pdat[, vars] <- 1.1 * pdat[, vars, drop = FALSE]
plt <- plt + geom_text(data = pdat,
aes_string(x = vars[1], y = vars[2],
label = 'Label'), size = 4)
}
## return
plt
}
##' @title Fortify a \code{"cca"} object.
##'
##' @description
##' Fortifies an object of class \code{"cca"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"cca"}, the result of a call to
##' \code{\link[vegan]{cca}}, \code{\link[vegan]{rda}}, or
##' \code{\link[vegan]{capscale}}.
##' @param data currently ignored.
##' @param axes numeric; which axes to extract scores for.
##' @param display numeric; the scores to extract in the fortified object.
##' @param ... additional arguments passed to \code{\link[vegan]{scores.cca}},
##' and \code{\link[vegan]{scores.rda}}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify cca
##' @export
##'
##' @examples
##' require(vegan)
##' data(dune)
##' data(dune.env)
##'
##' sol <- cca(dune ~ A1 + Management, data = dune.env)
##' head(fortify(sol))
`fortify.cca` <- function(model, data, axes = 1:6,
display = c("sp", "wa", "lc", "bp", "cn"), ...) {
## extract scores
scrs <- scores(model, choices = axes, display = display, ...)
## handle case of only 1 set of scores
if (length(display) == 1L) {
scrs <- list(scrs)
nam <- switch(display,
sp = "species",
species = "species",
wa = "sites",
sites = "sites",
lc = "constraints",
bp = "biplot",
cn = "centroids",
stop("Unknown value for 'display'"))
names(scrs) <- nam
}
miss <- vapply(scrs, function(x ) all(is.na(x)), logical(1L))
scrs <- scrs[!miss]
nams <- names(scrs)
nr <- vapply(scrs, FUN = NROW, FUN.VALUE = integer(1))
df <- do.call('rbind', scrs)
rownames(df) <- NULL
df <- as.data.frame(df)
df <- cbind(Score = factor(rep(nams, times = nr)),
Label = unlist(lapply(scrs, rownames), use.names = FALSE),
df)
df
}
##' @title Fortify a \code{"metaMDS"} object.
##'
##' @description
##' Fortifies an object of class \code{"metaMDS"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"metaMDS"}, the result of a call
##' to \code{\link[vegan]{metaMDS}}.
##' @param data currently ignored.
##' @param ... additional arguments passed to
##' \code{\link[vegan]{scores.metaMDS}}. Note you can't use \code{display}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify metaMDS
##' @export
##'
##' @importFrom ggplot2 fortify
##' @importFrom vegan scores
##'
##' @examples
##' ## load example data
##' require(vegan)
##' data(dune)
##'
##' ord <- metaMDS(dune)
##' head(fortify(ord))
`fortify.metaMDS` <- function(model, display="sites") {
samp <- scores(model, display = "sites")
# spp <- tryCatch(scores(model, display = display),
# error = function(c) {NULL})
spp <- scores(model, display="species")
if("sites" %in% display && "species" %in% display){
#print("sites and species")
df <- rbind(samp, spp)
df <- as.data.frame(df)
df <- cbind(Score = factor(rep(c("sites","species"),
c(nrow(samp), nrow(spp)))),
Label = c(rownames(samp), rownames(spp)),
df)
} else if ("sites" %in% display){
#print("sites only")
df <- as.data.frame(samp)
df <- data.frame(Score = factor(rep("sites", nrow(df))),
Label = rownames(samp),
samp)
} else {
#print("spp only")
df <- as.data.frame(spp)
df <- data.frame(Score = factor(rep("species", nrow(df))),
Label = rownames(spp),
spp)
}
# if (!is.null(spp)) {
# df <- rbind(samp, spp)
# df <- as.data.frame(df)
# df <- cbind(Score = factor(rep(c("sites","species"),
# c(nrow(samp), nrow(spp)))),
# Label = c(rownames(samp), rownames(spp)),
# df)
# } else {
# df <- data.frame(Score = factor(rep("sites", nrow(df))),
# Label = rownames(samp),
# samp)
# }
rownames(df) <- NULL
df
}
# Function for convex hull
StatChull <- ggproto("StatChull", Stat,
compute_group = function(data, scales) {
data[chull(data$x, data$y), , drop = FALSE]
},
required_aes = c("x", "y")
)
geom_chull <- function(mapping = NULL, data = NULL, geom = "polygon",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
stat = StatChull, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
| /nes8010.R | no_license | matthew-callum/BIO8068-Assignment-1 | R | false | false | 41,480 | r | # Please source this script before running the NES8010 multivariate examples
# The script provides additional functions to make using the analyses simpler,
# and change the default plots to a format compatible with ggplot2
# grid package needs installing for ordi_identify
my_packages <- c("grid") # Specify extra packages
not_installed <- my_packages[!(my_packages %in% installed.packages()[ , "Package"])] # Extract not installed packages
if(length(not_installed)) install.packages(not_installed)
library(grid)
#' Interactive identify
#'
#' Interactive identify ggvegan species
#' @param plotname Name of a plot created with \code{\link{ordi_plot}}
#' @param size Font size of labels (default = 3)
#' @param ... Other optional parameters
#'
#' @details
#' This function is designed to be run interactively. First create a standard
#' ordination using \code{\link{ordi_pca}}, \code{\link{ordi_rda}},
#' \code{\link{ordi_ca}}, \code{\link{ordi_cca}} or \code{\link{ordi_nmds}}.
#' Then call \code{\link{ordi_plot}} but make sure that the plot results is
#' stored in an R object. Then apply this function to that object, and hit the
#' \emph{Esc} key to exit function.
#' \strong{Note:} In RStudio only the most recently displayed plot can be
#' labelled with this function, so avoid using the back arrow keys in the RStudio
#' plot window. Labelling may not always occur on first click, and is more
#' difficult on constrained ordination plots.
#'
#' @return The original ordiname is modified with labels
#'
#' @author Roy Sanderson, School of Natural & Environmental Science, Newcastle
#' University roy.sanderson@newcastle.ac.uk
#'
#' @examples
#' if(interactive()){
#'
#' # Unconstrained ordination
#' data(dune)
#' data(dune.env)
#' dune_pca <- ordi_pca(dune)
#' dune_plt <- ordi_plot(dune_pca, layers="species", geom="point") # defaults to sites and species
#' dune_plt # Display the plot
#' ordi_identify(dune_plt) # Hit Esc key to exit
#'
#' # Constrained ordination
#' dune_rda <- ordi_rda(dune ~ A1 + Management, data=dune.env)
#' # displays spp and constraints.
#' # Constraints are "biplot" for continuous and "centroids" for categorical
#' dune_plt <- ordi_plot(dune_rda, layers=c("species", "biplot", "centroids"), geom="point")
#' dune_plt # Display the plot
#' ordi_identify(dune_plt) # Hit Esc key to exit
#'
#' }
#' @import grid
#' @import mosaic
#' @import vegan
#' @export
ordi_identify <- function(plotname, size=3, ...){
print("Click on plot to label points; hit Esc key to exit")
plot_data <- plotname[["layers"]][[1]]$data
depth <- downViewport('panel.7-5-7-5')
x <- plot_data[,3]
y <- plot_data[,4]
labels <- plot_data[,2]
pushViewport(dataViewport(x,y))
pick <- grid.locator('in')
while(!is.null(pick)){
tmp <- grid.locator('in')
tmp.n <- as.numeric(tmp)
tmp2.x <- as.numeric(convertX( unit(x,'native'), 'in' ))
tmp2.y <- as.numeric(convertY( unit(y,'native'), 'in' ))
w <- which.min( (tmp2.x-tmp.n[1])^2 + (tmp2.y-tmp.n[2])^2 )
popViewport(n=1)
upViewport(depth)
print(last_plot() + annotate("text", label=labels[w], x = x[w], y = y[w],
size = size, hjust=0.5, vjust=-0.5))
depth <- downViewport('panel.7-5-7-5')
pushViewport(dataViewport(x,y))
pick <- grid.locator('in')
}
return(last_plot())
}
#' Principal components analysis
#'
#' Wrapper function with vegan for PCA
#' @param spp_data Dataframe of attributes (columns) by samples (rows)
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_pca <- function(spp_data, ...){
spp_data_pca <- rda(spp_data, ...)
class(spp_data_pca) <- c("rda", "cca", "pca")
spp_data_pca
}
#' Redundancy analysis
#'
#' Wrapper function with vegan for RDA
#' @param formula Dataframe of attributes (columns) by samples (rows) as response
#' and one or more explanatory variables from a second dataframe
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_rda <- {
rda
}
#' Correspondence analysis
#'
#' Wrapper function with vegan for CA
#' @param spp_data Dataframe of attributes (columns) by samples (rows)
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_ca <- function(spp_data, ...){
spp_data_ca <- cca(spp_data, ...)
class(spp_data_ca) <- c("rda", "cca", "ca")
spp_data_ca
}
#' Canonical correspondence analysis
#'
#' Wrapper function with vegan for CCA
#' @param formula Dataframe of attributes (columns) by samples (rows) as response
#' and one or more explanatory variables from a second dataframe
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_cca <- {
cca
}
#' Non-metric multidimensional analysis
#'
#' Wrapper function with vegan for metaMDS
#' @param spp_data Dataframe of attributes (columns) by samples (rows)
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_nmds <- function(spp_data, ...){
spp_data_ca <- metaMDS(spp_data, ...)
}
#' Ordination scores from constrained or unconstrained ordination
#'
#' Wrapper function with ggvegan for fortify
#' @param ordi_object Result of ordination
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_scores <- function(ordi_object, ...){
fortify(ordi_object, ...)
}
#' Stepwise selection of constrained ordination
#'
#' Wrapper function with vegan for ordistep
#' @param ordi_object Either a cca or rda object
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
ordi_step <- function(ordi_object, ...){
ordistep(ordi_object, ...)
}
#' Multiple plot function
#'
#' Display plot objects in multiple columns, rows, or other combinations
#' @param ... ggplot (or gf_ plot) objects
#' @param plotlist alternative input as a list of ggplot objects
#' @param cols Number of columns in layout
#' @param layout A matrix specifying the layout. If present, 'cols' is ignored.
#'
#' @details
#' If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
#' then plot 1 will go in the upper left, 2 will go in the upper right, and
#' 3 will go all the way across the bottom.
#'
#' @return Displays multi-way plot, but returns NULL
#'
#' @examples
#' # Create a couple of normal distributions of different sample sizes
#' small_normal <- rnorm(25)
#' medium_normal <- rnorm(100)
#' big_normal <- rnorm(100000)
#'
#' # Plot their frequency histograms, but store rather than display
#' small_normal_plt <- gf_histogram(~ small_normal)
#' medium_normal_plt <- gf_histogram(~ medium_normal)
#' big_normal_plt <- gf_histogram(~ big_normal)
#'
#' # Display two plots side-by-side
#' multi_plot(small_normal_plt, big_normal_plt, cols=2)
#'
#' # Display two plots one above the other
#' multi_plot(small_normal_plt, big_normal_plt, cols=1)
#'
#' # Display three plots in a grid
#' # Note use of layout 1, 2, 3, 3 coding to put
#' # the big_normal_plt (third named one) across the bottom
#' multi_plot(small_normal_plt, medium_normal_plt, big_normal_plt,
#' layout=matrix(c(1,2,3,3), nrow=2, byrow=TRUE))
#'
#' @import grid
#' @import mosaic
#' @export
multi_plot <- function(..., plotlist=NULL, cols=1, layout=NULL) {
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#' Type 3 Sums of squares
#'
#' Wrapper function with car for Anova
#' @param lm_mod Results of lm function
#' @param ... Other options to function
#'
#' @details To be written
#'
#' @export
anova3 <- function(lm_mod, ...){
Anova(lm_mod, ...)
}
##' @title Fortify a \code{"cca"} object.
##'
##' @description
##' Fortifies an object of class \code{"cca"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"cca"}, the result of a call to
##' \code{\link[vegan]{cca}}, \code{\link[vegan]{rda}}, or
##' \code{\link[vegan]{capscale}}.
##' @param data currently ignored.
##' @param axes numeric; which axes to extract scores for.
##' @param display numeric; the scores to extract in the fortified object.
##' @param ... additional arguments passed to \code{\link[vegan]{scores.cca}},
##' and \code{\link[vegan]{scores.rda}}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify cca
##' @export
##'
##' @examples
##' require(vegan)
##' data(dune)
##' data(dune.env)
##'
##' sol <- cca(dune ~ A1 + Management, data = dune.env)
##' head(fortify(sol))
`fortify.cca` <- function(model, data, axes = 1:6,
display = c("sp", "wa", "lc", "bp", "cn"), ...) {
## extract scores
scrs <- scores(model, choices = axes, display = display, ...)
## handle case of only 1 set of scores
if (length(display) == 1L) {
scrs <- list(scrs)
nam <- switch(display,
sp = "species",
species = "species",
wa = "sites",
sites = "sites",
lc = "constraints",
bp = "biplot",
cn = "centroids",
stop("Unknown value for 'display'"))
names(scrs) <- nam
}
miss <- vapply(scrs, function(x ) all(is.na(x)), logical(1L))
scrs <- scrs[!miss]
nams <- names(scrs)
nr <- vapply(scrs, FUN = NROW, FUN.VALUE = integer(1))
df <- do.call('rbind', scrs)
rownames(df) <- NULL
df <- as.data.frame(df)
df <- cbind(Score = factor(rep(nams, times = nr)),
Label = unlist(lapply(scrs, rownames), use.names = FALSE),
df)
df
}
##' @title Fortify a \code{"metaMDS"} object.
##'
##' @description
##' Fortifies an object of class \code{"metaMDS"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"metaMDS"}, the result of a call
##' to \code{\link[vegan]{metaMDS}}.
##' @param data currently ignored.
##' @param ... additional arguments passed to
##' \code{\link[vegan]{scores.metaMDS}}. Note you can't use \code{display}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify metaMDS
##' @export
##'
##' @importFrom ggplot2 fortify
##' @importFrom vegan scores
##'
##' @examples
##' ## load example data
##' require(vegan)
##' data(dune)
##'
##' ord <- metaMDS(dune)
##' head(fortify(ord))
# `fortify.metaMDS` <- function(model, data, ...) {
# samp <- scores(model, display = "sites", ...)
# spp <- tryCatch(scores(model, display = "species", ...),
# error = function(c) {NULL})
# if (!is.null(spp)) {
# df <- rbind(samp, spp)
# df <- as.data.frame(df)
# df <- cbind(Score = factor(rep(c("sites","species"),
# c(nrow(samp), nrow(spp)))),
# Label = c(rownames(samp), rownames(spp)),
# df)
# } else {
# df <- data.frame(Score = factor(rep("sites", nrow(df))),
# Label = rownames(samp),
# samp)
# }
# rownames(df) <- NULL
# df
# }
# `fortify.metaMDS` <- function(model, data, axes = 1:2,
# display = c("sites"), ...) {
# ## extract scores
# scrs <- scores(model, choices = axes, display = display, ...)
# ## handle case of only 1 set of scores
# if (length(display) == 1L) {
# scrs <- list(scrs)
# nam <- switch(display,
# sp = "species",
# species = "species",
# si = "sites",
# sites = "sites",
# stop("Unknown value for 'display'"))
# names(scrs) <- nam
# }
# miss <- vapply(scrs, function(x ) all(is.na(x)), logical(1L))
# scrs <- scrs[!miss]
# nams <- names(scrs)
# nr <- vapply(scrs, FUN = NROW, FUN.VALUE = integer(1))
# df <- do.call('rbind', scrs)
# rownames(df) <- NULL
# df <- as.data.frame(df)
# df <- cbind(Score = factor(rep(nams, times = nr)),
# Label = unlist(lapply(scrs, rownames), use.names = FALSE),
# df)
# df
# }
#
##' @title ggplot-based plot for objects of class \code{"cca"}
##'
##' @description
##' Produces a multi-layer ggplot object representing the output of objects produced by \code{\link[vegan]{cca}}, or \code{\link[vegan]{capscale}}.
##'
##' @details
##' TODO
##'
##' @param object an object of class \code{"cca"}, the result of a call to \code{\link[vegan]{cca}} or \code{\link[vegan]{capscale}}.
##' @param axes numeric; which axes to plot, given as a vector of length 2.
##' @param geom character; which geoms to use for the layers. Can be a
##' vector of length equal to \code{length(display)}, in which case the
##' \emph{i}th element of \code{type} refers to the \emph{i}th element
##' of \code{display}.
##' @param layers character; which scores to plot as layers
##' @param legend.position character or two-element numeric vector; where to position the legend. See \code{\link[ggplot2]{theme}} for details. Use \code{"none"} to not draw the legend.
##' @param xlab character; label for the x-axis
##' @param ylab character; label for the y-axis
##' @param title character; subtitle for the plot
##' @param subtitle character; subtitle for the plot
##' @param caption character; caption for the plot
##' @param ... Additional arguments passed to \code{\link{fortify.cca}}.
##' @return Returns a ggplot object.
##' @author Gavin L. Simpson
##'
##' @method ordi_plot cca
##' @export
##'
##' @importFrom grid arrow unit
##' @importFrom ggplot2 autoplot ggplot geom_point geom_text geom_segment labs coord_fixed aes_string
##'
##' @examples
##' require(vegan)
##' data(dune)
##' data(dune.env)
##'
##' sol <- ordi_cca(dune ~ A1 + Management, data = dune.env)
##' ordi_plot(sol)
`ordi_plot.cca` <- function(object, axes = c(1,2), geom = c("point", "text"),
layers = c("species", "sites", "biplot", "centroids"),
legend.position = "none",
title = NULL, subtitle = NULL, caption = NULL,
ylab, xlab, ...) {
axes <- rep(axes, length.out = 2L)
obj <- fortify(object, axes = axes, ...)
LAYERS <- levels(obj$Score)
## sort out x, y aesthetics
vars <- getDimensionNames(obj)
## match the geom
geom <- match.arg(geom)
point <- TRUE
if (isTRUE(all.equal(geom, "text"))) {
point <- FALSE
}
## subset out the layers wanted
### need something here first to match acceptable ones?
### or just check that the layers selected would return a df with
### at least 1 row.
obj <- obj[obj$Score %in% layers, , drop = FALSE]
## skeleton layer
plt <- ggplot()
## add plot layers as required
want <- obj$Score %in% c("species", "sites")
if (point) {
plt <- plt +
geom_point(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], shape = 'Score',
colour = 'Score'))
} else {
plt <- plt +
geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], label = 'Label',
colour = 'Score'))
}
## remove biplot arrows for centroids if present
if(all(c("biplot","centroids") %in% LAYERS)) {
want <- obj$Score == "biplot"
tmp <- obj[want, ]
obj <- obj[!want, ]
bnam <- tmp[, "Label"]
cnam <- obj[obj$Score == "centroids", "Label"]
obj <- rbind(obj, tmp[!bnam %in% cnam, , drop = FALSE])
}
if(any(want <- obj$Score == "constraints")) {
if (point) {
plt <- plt + geom_point(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2]))
} else {
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2],
label = 'Label'))
}
}
if(any(want <- obj$Score == "biplot")) {
if (length(layers) > 1) {
mul <- arrowMul(obj[want, vars, drop = FALSE],
obj[!want, vars, drop = FALSE])
obj[want, vars] <- mul * obj[want, vars]
}
col <- "navy"
plt <- plt +
geom_segment(data = obj[want, , drop = FALSE ],
aes_string(x = 0, y = 0, xend = vars[1], yend = vars[2]),
arrow = arrow(length = unit(0.2, "cm")),
colour = col)
obj[want, vars] <- 1.1 * obj[want, vars]
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], label = 'Label'))
}
if(any(want <- obj$Score == "centroids")) {
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2], label = 'Label'),
colour = "navy")
}
if(missing(xlab)) {
xlab <- vars[1]
}
if(missing(ylab)) {
ylab <- vars[2]
}
plt <- plt + labs(x = xlab, y = ylab, title = title, subtitle = subtitle,
caption = caption)
## add equal scaling
plt <- plt + coord_fixed(ratio = 1)
## do we want a legend
plt <- plt + theme(legend.position = legend.position)
plt
}
## `aplot.cca` <- `ordi_plot.cca`
##' @title ggplot-based plot for objects of class \code{"metaMDS"}
##'
##' @description
##' Produces a multi-layer ggplot object representing the output of
##' objects produced by \code{\link[vegan]{metaMDS}}.
##'
##' @details
##' TODO
##'
##' @param object an object of class \code{"metaMDS"}, the result of a call
##' to \code{\link[vegan]{metaMDS}}.
##' @param axes numeric; which axes to plot, given as a vector of length 2.
##' @param geom character; which geoms to use for the layers. Can be a
##' vector of length equal to \code{length(display)}, in which case the
##' \emph{i}th element of \code{type} refers to the \emph{i}th element
##' of \code{display}.
##' @param layers character; which scores to plot as layers
##' @param legend.position character or two-element numeric vector; where to position the legend. See \code{\link[ggplot2]{theme}} for details. Use \code{"none"} to not draw the legend.
##' @param xlab character; label for the x-axis
##' @param ylab character; label for the y-axis
##' @param title character; subtitle for the plot
##' @param subtitle character; subtitle for the plot
##' @param caption character; caption for the plot
##' @param ... Additional arguments passed to \code{\link{fortify.metaMDS}}.
##'
##' @return Returns a ggplot object.
##'
##' @author Gavin L. Simpson
##'
##' @method ordi_plot metaMDS
##' @export
##'
##' @importFrom grid arrow unit
##' @importFrom ggplot2 autoplot ggplot geom_point geom_text labs coord_fixed aes_string
##'
##' @examples
##' ## load example data
##' require(vegan)
##' data(dune)
##'
##' sol <- ordi_nmds(dune)
##' ordi_plot(sol)
`ordi_plot.metaMDS` <- function(object, axes=c(1,2), geom = c("point", "text"),
layers = c("species", "sites"),
legend.position = "none",
title = NULL, subtitle = NULL, caption = NULL,
ylab, xlab, ...) {
axes <- rep(axes, length.out = 2L)
display <- layers
obj <- fortify.metaMDS(object, ...)
obj <- obj[obj$Score %in% layers, ]
## sort out x, y aesthetics
vars <- getDimensionNames(obj)
## skeleton layer
plt <- ggplot()
geom <- match.arg(geom)
point <- TRUE
if (isTRUE(all.equal(geom, "text"))) {
point <- FALSE
}
if (point) {
plt <- plt + geom_point(data = obj,
aes_string(x = vars[1], y = vars[2], shape = 'Score',
colour = 'Score'))
} else {
plt <- plt + geom_text(data = obj,
aes_string(x = vars[1], y = vars[2], label = 'Label',
colour = 'Score'))
}
if(missing(xlab)) {
xlab <- vars[1]
}
if(missing(ylab)) {
ylab <- vars[2]
}
plt <- plt + labs(x = xlab, y = ylab, title = title, subtitle = subtitle,
caption = caption)
## add equal scaling
plt <- plt + coord_fixed(ratio = 1)
## do we want a legend
plt <- plt + theme(legend.position = legend.position)
plt
}
##' @title ggplot-based plot for objects of class \code{'rda'}
##'
##' @description
##' Produces a multi-layer ggplot object representing the output of objects produced by \code{\link[vegan]{rda}}.
##'
##' @details
##' TODO
##'
##' @param object an object of class \code{"rda"}, the result of a call to \code{\link[vegan]{rda}}
##' @param axes numeric; which axes to plot, given as a vector of length 2.
##' @param geom character; which geoms to use for the layers. Can be a vector of
##' up to length 2, in which case, the first element of \code{geom} will be
##' used for any site scores (both weighted sum or linear combination scores),
##' and the second element will be used for species scores. The latter will be
##' ignored if \code{arrows = TRUE}.
##' @param layers character; which scores to plot as layers
##' @param arrows logical; represent species (variables) using vectors?
##' @param legend.position character or two-element numeric vector; where to position the legend. See \code{\link[ggplot2]{theme}} for details. Use \code{"none"} to not draw the legend.
##' @param xlab character; label for the x-axis
##' @param ylab character; label for the y-axis
##' @param title character; subtitle for the plot
##' @param subtitle character; subtitle for the plot
##' @param caption character; caption for the plot
##' @param const General scaling constant to \code{rda} scores. See
##' \code{\link[vegan]{scores.rda}} for details.
##' @param ... Additional arguments passed to \code{\link{fortify}}.
##'
##' @return Returns a ggplot object.
##'
##' @author Gavin L. Simpson
##'
##' @examples
##' require(vegan)
##' data(dune)
##'
##' pca <- ordi_rda(dune)
##' ordi_plot(pca)
##'
##' ## Just the species scores
##' ordi_plot(pca, layers = "species")
##' @method ordi_plot rda
##' @export
##'
`ordi_plot.rda` <- function(object, axes = c(1,2), geom = c("point", "text"),
layers = c("species", "sites", "biplot", "centroids"),
arrows = FALSE, legend.position = "none",
title = NULL, subtitle = NULL, caption = NULL,
ylab, xlab, const, ...) {
## determine which layers to plot
valid <- valid_layers(object) # vector of valid layers
ok_layers <- check_user_layers(layers, valid, message = TRUE)
layers <- layers[ok_layers] # subset user-supplied layers
draw_list <- layer_draw_list(valid, layers) # what are we drawing
## fix-up axes needed to plot
laxes <- length(axes)
if (laxes != 2L) {
if (laxes > 2L) {
axes <- rep(axes, length.out = 2L) # shrink to required length
} else {
stop("Need 2 ordination axes to plot; only 1 was given.",
call. = FALSE)
}
}
obj <- fortify(object, axes = axes, const = const, ...) # grab some scores
available <- levels(obj[["Score"]])
draw_list <- layer_draw_list(valid, layers, available) # what are we drawing
layer_names <- names(draw_list)[draw_list]
## sort out x, y aesthetics
vars <- getDimensionNames(obj)
## process geom arg
geom <- match.arg(geom, several.ok = TRUE)
geom <- unique(geom) # simplify geom if elements are the same
## subset out the layers wanted
obj <- obj[obj[["Score"]] %in% layer_names, , drop = FALSE]
## skeleton layer
plt <- ggplot()
## draw sites, species, constraints == lc site scores
if (any(draw_list[c("species","sites","constraints")])) {
plt <- add_spp_site_scores(obj, plt, vars, geom, draw_list, arrows)
}
## remove biplot arrows for centroids if present
if(all(draw_list[c("biplot","centroids")])) {
want <- obj[["Score"]] == "biplot"
tmp <- obj[want, ]
obj <- obj[!want, ]
bnam <- tmp[, "Label"]
cnam <- obj[obj[["Score"]] == "centroids", "Label"]
obj <- rbind(obj, tmp[!bnam %in% cnam, , drop = FALSE])
}
if(isTRUE(draw_list["biplot"])) {
want <- obj[["Score"]] == "biplot"
if (length(layer_names) > 1) {
mul <- arrowMul(obj[want, vars, drop = FALSE],
obj[!want, vars, drop = FALSE])
obj[want, vars] <- mul * obj[want, vars]
}
col <- "navy"
plt <- plt +
geom_segment(data = obj[want, , drop = FALSE ],
aes_string(x = 0, y = 0,
xend = vars[1], yend = vars[2]),
arrow = arrow(length = unit(0.2, "cm")),
colour = col)
obj[want, vars] <- 1.1 * obj[want, vars]
plt <- plt + geom_text(data = obj[want, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2],
label = 'Label'))
}
if(isTRUE(draw_list["centroids"])) {
want <- obj[["Score"]] == "centroids"
plt <- plt +
geom_text(data = obj[want, , drop = FALSE],
aes_string(x = vars[1], y = vars[2], label = 'Label'),
colour = "navy")
}
if(missing(xlab)) {
xlab <- vars[1]
}
if(missing(ylab)) {
ylab <- vars[2]
}
plt <- plt + labs(x = xlab, y = ylab, title = title, subtitle = subtitle,
caption = caption)
## add equal scaling
plt <- plt + coord_fixed(ratio = 1)
## do we want a legend
plt <- plt + theme(legend.position = legend.position)
plt
}
##' @title Scale Vectors to Data
##' @description Scale vector arrows to \code{fill} proportion of the data.
##' @param arrows a two-column matrix-like object containing coordinates for the arrows/vectors on x and y axes.
##' @param data a two-column matrix-like object containing coordinates of the data on the x and y axes.
##' @param at numeric vector of length 2; location of the origin of the arrows.
##' @param fill numeric; what proportion of the range of the data to fill
##' @return a numeric multiplier that will scale the arrows
##' @author Gavin L. Simpson
`arrowMul` <- function(arrows, data, at = c(0, 0), fill = 0.75) {
u <- c(range(data[,1], range(data[,2])))
u <- u - rep(at, each = 2)
r <- c(range(arrows[, 1], na.rm = TRUE), range(arrows[, 2], na.rm = TRUE))
rev <- sign(diff(u))[-2]
if (rev[1] < 0)
u[1:2] <- u[2:1]
if (rev[2] < 0)
u[3:4] <- u[4:3]
u <- u/r
u <- u[is.finite(u) & u > 0]
fill * min(u)
}
##' @title Number of scores
##' @description Returns the number of scores returns in object \code{x}.
##'
##' @param x The object whose number of scores is required.
##'
##' @return a numeric vector of length 1 with the number of scores.
##'
##' @author Gavin L. Simpson
`scoresLength` <- function(x) {
obs <- NROW(x)
if (is.null(obs))
obs <- 0
obs
}
##' @title Extract the names of the dimensions to plot as a character vector
##'
##' @description Find the character vector of names for the two dimensions of data to be plotted.
##' @param object a fortified ordination object.
##' @return A length 2 character vector of dimension names.
##' @author Gavin L. Simpson
`getDimensionNames` <- function(object) {
names(object)[-c(1,2)]
}
##' @title Adds a label layer using one of a set of common geoms
##'
##' @description Adds labels to a plot using one of \code{geom_label}, \code{geom_text}, \code{geom_label_repel} or \code{geom_text_repel}.
##'
##' @param data data frame; data set to use for the label layer. Must contain a variable \code{Label} containing the strings to use as labels.
##' @param geom character; which geom to use for labelling.
##' @param vars character; vector of names of variables to ass to the \code{x} and \code{y} aesthetics of the chosen geom.
##'
##' @author Gavin L. Simpson
##'
`label_fun` <- function(data,
geom = c("label", "text", "label_repel", "text_repel"),
vars) {
ll <- switch(geom,
label =
geom_label(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label')),
text =
geom_text(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label')),
label_repel =
geom_label_repel(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label')),
text_repel =
geom_text_repel(data = data,
mapping = aes_string(x = vars[1],
y = vars[2],
label = 'Label'))
)
ll
}
##' @title Valid layers for vegan objects
##'
##' @param object An R object.
##' @param ... Additional arguments passed to methods.
##'
##' @rdname valid_layers
##' @export
`valid_layers` <- function(object, ...) {
UseMethod('valid_layers')
}
##' @rdname valid_layers
##' @export
`valid_layers.rda` <- function(object, ...) {
c("species", "sites", "constraints", "biplot", "centroids", "regression")
}
##' @rdname valid_layers
##' @export
`valid_layers.cca` <- function(object, ...) {
c("species", "sites", "constraints", "biplot", "centroids", "regression")
}
##' @title ordination plots
##'
##' @param ... Additional arguments
##'
#' @rdname ordi_plot
#' @export
`ordi_plot` <- function(...){
UseMethod('ordi_plot')
}
# ##' @rdname ordi_plot
# ##' @export
# `ordi_plot.rda` <- function(...){
# UseMethod('ordi_plot')
# }
#
# ##' @rdname ordi_plot
# ##' @export
# `ordi_plot.cca` <- function(...){
# UseMethod('ordi_plot')
# }
##' @title Check user-supplied layers against list of valid layers
##'
##' @param user character; vector of user supplied layer names.
##' @param valid character; vector of valid layer names.
##' @param message logical; should a message be raised in the case of invalid
##' user-supplied layer names.
`check_user_layers` <- function(user, valid, message = FALSE) {
ok <- user %in% valid
if (isTRUE(message) && any(!ok)) {
msg <- "Invalid (ignored) layers for this object:"
invalid <- paste(user[!ok], collapse = ', ')
message(paste(msg, invalid, sep = " "))
}
ok
}
##' @title List of layers to draw for a given vegan object
##'
##' @param valid character; vector of valid layer names
##' @param layers character; a vector of layer names for \code{object} that has
##' already been filtered for validity.
##' @param available charecter; what layers are actually available
##'
##' @importFrom stats setNames
`layer_draw_list` <- function(valid, layers = NULL, available = NULL) {
l <- setNames(rep(TRUE, length(valid)), valid)
if (!is.null(layers)) {
if (!is.null(available)) {
layers <- layers[layers %in% available]
}
i <- valid %in% layers
l[!i] <- FALSE
}
l
}
##' @title Adds species and site score layers to an existing plot
##'
##' @param object an ordination object.
##' @param plt a ggplot object.
##' @param vars character; length 2 vector of dimension names.
##' @param geom character; vector of length 1 or 2 indicating which geoms will
##' be used ofr the species or site scores.
##' @param draw_list logical; vector of types of scores indicating which are
##' available and requested for plotting.
##' @param arrows logical; length 1 vector indicating if species scores should
##' be drawn using arrows.
##'
`add_spp_site_scores` <- function(object, plt, vars, geom, draw_list, arrows) {
wanted <- names(draw_list[c("species","sites","constraints")])
## if we're plotting species by arrows, drop species if in list
if (isTRUE(arrows)) {
wanted <- wanted[wanted != "species"]
}
## if still something to draw, draw it
if (length(wanted) > 0L) {
## case of a single geom
if (length(geom) == 1L) {
take <- object[["Score"]] %in% wanted
if (geom == "point") {
plt <- plt +
geom_point(data = object[take, , drop = FALSE],
aes_string(x = vars[1], y = vars[2],
shape = 'Score', colour = 'Score'))
} else {
plt <- plt +
geom_text(data = object[take, , drop = FALSE ],
aes_string(x = vars[1], y = vars[2],
label = 'Label', colour = 'Score'),
size = 3)
}
} else {
## we have to plot species and sites/constraints separately
if ("species" %in% wanted) {
take <- object[["Score"]] == "species"
if (geom[2L] == "point") {
plt <- plt +
geom_point(data = object[take, , drop = FALSE],
aes_string(x = vars[1], y = vars[2],
shape = 'Score',
colour = 'Score'))
} else {
plt <- plt +
geom_text(data = object[take, , drop = FALSE ],
aes_string(x = vars[1],
y = vars[2],
label = 'Label',
colour = 'Score'),
size = 3)
}
}
if (any(c("sites","constraints") %in% wanted)) {
take <- object[["Score"]] %in% c("sites","constraints")
if (geom[1L] == "point") {
plt <- plt +
geom_point(data = object[take, , drop = FALSE],
aes_string(x = vars[1], y = vars[2],
shape = 'Score',
colour = 'Score'))
} else {
plt <- plt +
geom_text(data = object[take, , drop = FALSE ],
aes_string(x = vars[1],
y = vars[2],
label = 'Label',
colour = 'Score'),
size = 3)
}
}
}
}
## now check if species should be added as arrows
if (isTRUE(arrows) && draw_list["species"]) {
take <- object[["Score"]] == "species"
pdat <- object[take, , drop = FALSE]
col <- "black"
plt <- plt +
geom_segment(data = pdat,
aes_string(x = 0, y = 0,
xend = vars[1], yend = vars[2]),
arrow = arrow(length = unit(0.2, "cm")),
colour = col)
pdat[, vars] <- 1.1 * pdat[, vars, drop = FALSE]
plt <- plt + geom_text(data = pdat,
aes_string(x = vars[1], y = vars[2],
label = 'Label'), size = 4)
}
## return
plt
}
##' @title Fortify a \code{"cca"} object.
##'
##' @description
##' Fortifies an object of class \code{"cca"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"cca"}, the result of a call to
##' \code{\link[vegan]{cca}}, \code{\link[vegan]{rda}}, or
##' \code{\link[vegan]{capscale}}.
##' @param data currently ignored.
##' @param axes numeric; which axes to extract scores for.
##' @param display numeric; the scores to extract in the fortified object.
##' @param ... additional arguments passed to \code{\link[vegan]{scores.cca}},
##' and \code{\link[vegan]{scores.rda}}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify cca
##' @export
##'
##' @examples
##' require(vegan)
##' data(dune)
##' data(dune.env)
##'
##' sol <- cca(dune ~ A1 + Management, data = dune.env)
##' head(fortify(sol))
`fortify.cca` <- function(model, data, axes = 1:6,
display = c("sp", "wa", "lc", "bp", "cn"), ...) {
## extract scores
scrs <- scores(model, choices = axes, display = display, ...)
## handle case of only 1 set of scores
if (length(display) == 1L) {
scrs <- list(scrs)
nam <- switch(display,
sp = "species",
species = "species",
wa = "sites",
sites = "sites",
lc = "constraints",
bp = "biplot",
cn = "centroids",
stop("Unknown value for 'display'"))
names(scrs) <- nam
}
miss <- vapply(scrs, function(x ) all(is.na(x)), logical(1L))
scrs <- scrs[!miss]
nams <- names(scrs)
nr <- vapply(scrs, FUN = NROW, FUN.VALUE = integer(1))
df <- do.call('rbind', scrs)
rownames(df) <- NULL
df <- as.data.frame(df)
df <- cbind(Score = factor(rep(nams, times = nr)),
Label = unlist(lapply(scrs, rownames), use.names = FALSE),
df)
df
}
##' @title Fortify a \code{"metaMDS"} object.
##'
##' @description
##' Fortifies an object of class \code{"metaMDS"} to produce a
##' data frame of the selected axis scores in long format, suitable for
##' plotting with \code{\link[ggplot2]{ggplot}}.
##'
##' @details
##' TODO
##'
##' @param model an object of class \code{"metaMDS"}, the result of a call
##' to \code{\link[vegan]{metaMDS}}.
##' @param data currently ignored.
##' @param ... additional arguments passed to
##' \code{\link[vegan]{scores.metaMDS}}. Note you can't use \code{display}.
##' @return A data frame in long format containing the ordination scores.
##' The first two components are the axis scores.
##' @author Gavin L. Simpson
##'
##' @method fortify metaMDS
##' @export
##'
##' @importFrom ggplot2 fortify
##' @importFrom vegan scores
##'
##' @examples
##' ## load example data
##' require(vegan)
##' data(dune)
##'
##' ord <- metaMDS(dune)
##' head(fortify(ord))
`fortify.metaMDS` <- function(model, display="sites") {
samp <- scores(model, display = "sites")
# spp <- tryCatch(scores(model, display = display),
# error = function(c) {NULL})
spp <- scores(model, display="species")
if("sites" %in% display && "species" %in% display){
#print("sites and species")
df <- rbind(samp, spp)
df <- as.data.frame(df)
df <- cbind(Score = factor(rep(c("sites","species"),
c(nrow(samp), nrow(spp)))),
Label = c(rownames(samp), rownames(spp)),
df)
} else if ("sites" %in% display){
#print("sites only")
df <- as.data.frame(samp)
df <- data.frame(Score = factor(rep("sites", nrow(df))),
Label = rownames(samp),
samp)
} else {
#print("spp only")
df <- as.data.frame(spp)
df <- data.frame(Score = factor(rep("species", nrow(df))),
Label = rownames(spp),
spp)
}
# if (!is.null(spp)) {
# df <- rbind(samp, spp)
# df <- as.data.frame(df)
# df <- cbind(Score = factor(rep(c("sites","species"),
# c(nrow(samp), nrow(spp)))),
# Label = c(rownames(samp), rownames(spp)),
# df)
# } else {
# df <- data.frame(Score = factor(rep("sites", nrow(df))),
# Label = rownames(samp),
# samp)
# }
rownames(df) <- NULL
df
}
# Function for convex hull
StatChull <- ggproto("StatChull", Stat,
compute_group = function(data, scales) {
data[chull(data$x, data$y), , drop = FALSE]
},
required_aes = c("x", "y")
)
geom_chull <- function(mapping = NULL, data = NULL, geom = "polygon",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
stat = StatChull, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getExtInfo.R
\name{getExtInfo}
\alias{getExtInfo}
\title{A function to map the extendedInfo into a list of lists}
\usage{
getExtInfo(nodes)
}
\arguments{
\item{nodes}{A set of XML nodes from getNodeSet() that point to '//observableData/observableDatum'}
}
\value{
a list of lists, with names of the list elements set to the "key" and value to "value"
}
\description{
A function to map the extendedInfo into a list of lists
}
\details{
This assumes a certain structure of the XML, i.e.,
\code{//observableData/observableDatum/content/pair/key}
\code{//observableData/observableDatum/content/pair/value}
where \code{key} and \code{value} are automic class, i.e., numeric or string. The code then
makes a list with named elements, i.e., 'key' as the key, and 'value' as the value in the list.
}
| /man/getExtInfo.Rd | no_license | garyfeng/pdata | R | false | false | 890 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getExtInfo.R
\name{getExtInfo}
\alias{getExtInfo}
\title{A function to map the extendedInfo into a list of lists}
\usage{
getExtInfo(nodes)
}
\arguments{
\item{nodes}{A set of XML nodes from getNodeSet() that point to '//observableData/observableDatum'}
}
\value{
a list of lists, with names of the list elements set to the "key" and value to "value"
}
\description{
A function to map the extendedInfo into a list of lists
}
\details{
This assumes a certain structure of the XML, i.e.,
\code{//observableData/observableDatum/content/pair/key}
\code{//observableData/observableDatum/content/pair/value}
where \code{key} and \code{value} are automic class, i.e., numeric or string. The code then
makes a list with named elements, i.e., 'key' as the key, and 'value' as the value in the list.
}
|
skip_if_no_pycox()
set.seed(1)
np <- reticulate::import("numpy")
np$random$seed(1L)
torch <- reticulate::import("torch")
torch$manual_seed(1L)
test_that("get_pycox_optim", {
net <- build_pytorch_net(1L, 1L, 1L)
expect_is(get_pycox_optim("adadelta", net), "torch.optim.adadelta.Adadelta")
expect_is(get_pycox_optim("adagrad", net), "torch.optim.adagrad.Adagrad")
expect_is(get_pycox_optim("adamax", net), "torch.optim.adamax.Adamax")
expect_is(get_pycox_optim("adam", net), "torch.optim.adam.Adam")
expect_is(get_pycox_optim("adamw", net), "torch.optim.adamw.AdamW")
expect_is(get_pycox_optim("asgd", net), "torch.optim.asgd.ASGD")
expect_is(get_pycox_optim("rmsprop", net), "torch.optim.rmsprop.RMSprop")
expect_is(get_pycox_optim("rprop", net), "torch.optim.rprop.Rprop")
expect_is(get_pycox_optim("sgd", net), "torch.optim.sgd.SGD")
# expect_is(get_pycox_optim("sparse_adam", net), "torch.optim.sparse_adam.SparseAdam")
})
test_that("get_pycox_init", {
a <- 0; b <- 1; mean <- 0; std <- 1; val <- 0; gain <- 1; mode <- "fan_in"
non_linearity <- "leaky_relu"
expect_equal(get_pycox_init("uniform"),
paste0("torch.nn.init.uniform_(m.weight, ", a, ", ", b, ")"))
expect_equal(get_pycox_init("normal"),
paste0("torch.nn.init.normal_(m.weight, ", mean, ", ", std, ")"))
expect_equal(get_pycox_init("constant", val = val),
paste0("torch.nn.init.constant_(m.weight, ", val, ")"))
expect_equal(get_pycox_init("xavier_uniform"),
paste0("torch.nn.init.xavier_uniform_(m.weight, ", gain, ")"))
expect_equal(get_pycox_init("xavier_normal"),
paste0("torch.nn.init.xavier_normal_(m.weight, ", gain, ")"))
expect_equal(get_pycox_init("kaiming_uniform"),
paste0("torch.nn.init.kaiming_uniform_(m.weight, ", a, ", '",
mode, "', '", non_linearity, "')"))
expect_equal(get_pycox_init("kaiming_normal"),
paste0("torch.nn.init.kaiming_normal_(m.weight, ", a, ", '", mode, "', '",
non_linearity, "')"))
expect_equal(get_pycox_init("orthogonal"),
paste0("torch.nn.init.orthogonal_(m.weight, ", gain, ")"))
})
fit <- coxtime(Surv(time, status) ~ ., data = rats[1:50, ], verbose = FALSE)
test_that("predict", {
p <- predict(fit, type = "all", distr6 = FALSE)
expect_is(p, "list")
expect_is(p$surv, "matrix")
expect_is(p$risk, "numeric")
expect_equal(length(p$risk), 50)
expect_equal(dim(p$surv), c(50, 22))
})
test_that("predict distr6", {
if (!requireNamespace("distr6", quietly = TRUE)) {
skip("distr6 not installed.")
}
p <- predict(fit, type = "all", distr6 = TRUE)
expect_is(p, "list")
expect_is(p$surv, "VectorDistribution")
expect_equal(p$surv$properties$support$power, 50)
p <- predict(fit, type = "survival")
expect_is(p, "matrix")
})
test_that("build_pytorch_net", {
expect_silent(build_pytorch_net(2L, 2L, c(2, 4, 8), activation = c("relu", "elu", "glu"),
dropout = c(0.1, 1, 0.62)))
})
| /tests/testthat/test_pycox.R | permissive | Ppower123/survivalmodels | R | false | false | 3,040 | r | skip_if_no_pycox()
set.seed(1)
np <- reticulate::import("numpy")
np$random$seed(1L)
torch <- reticulate::import("torch")
torch$manual_seed(1L)
test_that("get_pycox_optim", {
net <- build_pytorch_net(1L, 1L, 1L)
expect_is(get_pycox_optim("adadelta", net), "torch.optim.adadelta.Adadelta")
expect_is(get_pycox_optim("adagrad", net), "torch.optim.adagrad.Adagrad")
expect_is(get_pycox_optim("adamax", net), "torch.optim.adamax.Adamax")
expect_is(get_pycox_optim("adam", net), "torch.optim.adam.Adam")
expect_is(get_pycox_optim("adamw", net), "torch.optim.adamw.AdamW")
expect_is(get_pycox_optim("asgd", net), "torch.optim.asgd.ASGD")
expect_is(get_pycox_optim("rmsprop", net), "torch.optim.rmsprop.RMSprop")
expect_is(get_pycox_optim("rprop", net), "torch.optim.rprop.Rprop")
expect_is(get_pycox_optim("sgd", net), "torch.optim.sgd.SGD")
# expect_is(get_pycox_optim("sparse_adam", net), "torch.optim.sparse_adam.SparseAdam")
})
test_that("get_pycox_init", {
a <- 0; b <- 1; mean <- 0; std <- 1; val <- 0; gain <- 1; mode <- "fan_in"
non_linearity <- "leaky_relu"
expect_equal(get_pycox_init("uniform"),
paste0("torch.nn.init.uniform_(m.weight, ", a, ", ", b, ")"))
expect_equal(get_pycox_init("normal"),
paste0("torch.nn.init.normal_(m.weight, ", mean, ", ", std, ")"))
expect_equal(get_pycox_init("constant", val = val),
paste0("torch.nn.init.constant_(m.weight, ", val, ")"))
expect_equal(get_pycox_init("xavier_uniform"),
paste0("torch.nn.init.xavier_uniform_(m.weight, ", gain, ")"))
expect_equal(get_pycox_init("xavier_normal"),
paste0("torch.nn.init.xavier_normal_(m.weight, ", gain, ")"))
expect_equal(get_pycox_init("kaiming_uniform"),
paste0("torch.nn.init.kaiming_uniform_(m.weight, ", a, ", '",
mode, "', '", non_linearity, "')"))
expect_equal(get_pycox_init("kaiming_normal"),
paste0("torch.nn.init.kaiming_normal_(m.weight, ", a, ", '", mode, "', '",
non_linearity, "')"))
expect_equal(get_pycox_init("orthogonal"),
paste0("torch.nn.init.orthogonal_(m.weight, ", gain, ")"))
})
fit <- coxtime(Surv(time, status) ~ ., data = rats[1:50, ], verbose = FALSE)
test_that("predict", {
p <- predict(fit, type = "all", distr6 = FALSE)
expect_is(p, "list")
expect_is(p$surv, "matrix")
expect_is(p$risk, "numeric")
expect_equal(length(p$risk), 50)
expect_equal(dim(p$surv), c(50, 22))
})
test_that("predict distr6", {
if (!requireNamespace("distr6", quietly = TRUE)) {
skip("distr6 not installed.")
}
p <- predict(fit, type = "all", distr6 = TRUE)
expect_is(p, "list")
expect_is(p$surv, "VectorDistribution")
expect_equal(p$surv$properties$support$power, 50)
p <- predict(fit, type = "survival")
expect_is(p, "matrix")
})
test_that("build_pytorch_net", {
expect_silent(build_pytorch_net(2L, 2L, c(2, 4, 8), activation = c("relu", "elu", "glu"),
dropout = c(0.1, 1, 0.62)))
})
|
rm(list = ls())
library(dplyr)
library(tidyverse)
library(ggplot2)
library(psych)
library(data.table)
library(TSA)
library(plyr)
library(DescTools)
library(MALDIquant)
library(data.table)
setwd("/Users/connorstevens/Documents/Research Project/Crude Oil Data/CL_Data")
## Futures price data. ------------
# Read first csv file.
file.names <- list.files(pattern="data_download.*csv")
futures.price <- fread(file.names[1], select = c(1, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16))
# Fix dates.
futures.price$date = as.Date(as.character(futures.price$date), "%m/%d/%Y")
futures.price$`future expiration` = as.Date(as.character(futures.price$`future expiration` ), "%m/%d/%Y")
futures.price$expiration = as.Date(as.character(futures.price$expiration), "%m/%d/%Y")
#Create time to maturity column.
futures.price$ttm <- futures.price$expiration - futures.price$date
#Only options with ttm 15 days or less.
futures.price <- futures.price[futures.price$ttm <= 30 & futures.price$ttm >= 5, ]
#Remove observations for which no data is available.
futures.price <- futures.price[!(futures.price$ask == 0 & futures.price$`settlement price` == 0), ]
#Option premium equal to ask.
futures.price$option_premium <- futures.price$ask
#Get as many prices as possible. First use ask, then if zero use settlement.
for (i in 1: nrow(futures.price)){
if(futures.price$ask[i] == 0){
futures.price$option_premium[i] <- futures.price$`settlement price`[i]
}
else{
futures.price$option_premium[i] <- futures.price$ask[i]
}
}
# Chronological order
#futures.price <- futures.price %>% map_df(rev)
# Read all csv files and append underneath each other.
for (i in 2:88){
print(file.names[i])
df.temp <- fread(file.names[i], select = c(1, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16))
#Remove NA values.
df.temp <- na.omit(df.temp)
#Convert to date type.
df.temp$date = as.Date(as.character(df.temp$date), "%m/%d/%Y")
df.temp$`future expiration` = as.Date(as.character(df.temp$`future expiration` ), "%m/%d/%Y")
df.temp$expiration = as.Date(as.character(df.temp$expiration), "%m/%d/%Y")
#Create time to maturity column.
df.temp$ttm <- df.temp$expiration - df.temp$date
#Only options with ttm 15 days or less.
df.temp <- df.temp[df.temp$ttm <= 15 & df.temp$ttm >= 5, ]
#Remove observations for which no data is available.
df.temp <- df.temp[!(df.temp$ask == 0 & df.temp$`settlement price` == 0), ]
#Option premium equal to ask.
df.temp$option_premium <- df.temp$ask
#Get as many prices as possible. First use ask, then if zero use settlement.
for (i in 1: nrow(df.temp)){
if(df.temp$ask[i] == 0){
df.temp$option_premium[i] <- df.temp$`settlement price`[i]
}
else if(df.temp$ask[i] > 0) {
df.temp$option_premium[i] <- df.temp$ask[i]
}
else if(df.temp$ask[i] == 0){
df.temp$option_premium <- df.temp$`settlement price`
}
else{
df.temp <- df.temp[-i, ]
}
}
futures.price = rbind(futures.price, df.temp)
}
#Order data chronologically.
futures.price <- arrange(futures.price, date)
#Remove observations where implied volatility and/or interpolated implied vol is 1 or -1.
futures.price <- futures.price[!(futures.price$iv == 1 | futures.price$iv == -1 | futures.price$iv == 0), ]
#Order futures.price data frame in increasing order of strike so that it can be used with match.closest.
futures.price <- futures.price[order(strike),]
#Separate puts and calls.
Calls <- futures.price[futures.price$`call/put` == "C"]
Puts <- futures.price[futures.price$`call/put` == "P"]
#Create ATM options dataframe
ATMCalls <- Calls[FALSE, ]
ATMPuts <- futures.price[FALSE, ]
ATMCalls[1, ] <- df_temp_calls[160, ]
ATMPuts[1, ] <- df_temp[184, ]
for(i in 2: nrow(futures.price)){
#Get date.
call_date <- Calls$date[i]
#Limit dataframe to options available on a given day.
df_temp_calls <- Calls[Calls$date == call_date, ]
#Skip to next iteration if no options are available for that signal.
if(nrow(df_temp_calls) == 0){
next
}
#Get spot on given day.
call_spot <- Calls$`adjusted close`[i]
#Find strike closest to spot.
#strike <- min(Closest(df_temp$strike, spot))
if(match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0) == 0){
call_strike <- 0
}
else{
#Set strike to closest strike available to adjusted close on signal day.
call_strike <- df_temp_calls$strike[match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0)]
}
#If no option is found, skip to the next signal.
if(call_strike == 0){
next
}
#Find index in df_temp of chosen option.
call_index <- match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0)
#Fill ATM options dataframe with atm options.
ATMCalls <- rbind(ATMCalls, df_temp_calls[index, ])
}
for(i in 2: nrow(futures.price)){
#Get date.
call_date <- Calls$date[i]
put_date <- Puts$date[i]
#Limit dataframe to options available on a given day.
df_temp_calls <- Calls[Calls$date == call_date, ]
df_temp_puts <- futures.price[Puts$date == put_date, ]
#Skip to next iteration if no options are available for that signal.
if(nrow() == 0){
next
}
#Get spot on given day.
call_spot <- Calls$`adjusted close`[i]
put_spot <- Puts$`adjusted close`[i]
#Find strike closest to spot.
#strike <- min(Closest(df_temp$strike, spot))
if(match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0) == 0){
call_strike <- 0
}
if(match.closest(put_spot, df_temp_puts$strike, tolerance = 0.5, nomatch = 0) == 0){
put_strike <- 0
}
else{
#Set strike to closest strike available to adjusted close on signal day.
call_strike <- df_temp_calls$strike[match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0)]
put_strike <- df_temp_puts$strike[match.closest(put_spot, df_temp_puts$strike, tolerance = 0.5, nomatch = 0)]
}
#If no option is found, skip to the next signal.
if(strike == 0){
next
}
#Find index in df_temp of chosen option.
index <- match.closest(spot, df_temp$strike, tolerance = 0.5, nomatch = 0)
#Fill ATM options dataframe with atm options.
ATMoptions <- rbind(ATMoptions, df_temp[index, ])
}
# Save as new dataframe.
write.csv(futures.price, "OptionDataV2.csv")
# ## Sentiment data. -------------
# sen <- read.csv('indexSentimentSQLCRU_CRUOpt2003_2020.csv')
# colnames(sen) = c('date', 'sentiment')
# sen$date = as.Date(as.character(sen$date), "%Y%m%d")
#
#
# ## Merging the Price and Sentiment Data. --------------
# # Initialize.
# sen = sen[order(sen$date, decreasing = TRUE),]
#
# # Merge.
# futures.price = left_join(futures.price, sen, by = "date")
#
# # Save new dataframe with: Date, Adjusted Close, Sentiment
# write.csv(futures.price, 'Futures Prices and Sentiment.csv')
#
# ## Descriptive Statistics. ---------------
# # Futures Prices.
# describe(df1$adjusted.close)
# ## Plotting Price and Volatility with Sentiment. ------------
# # Price & Sentiment.
# ggplot(futures.price, aes(x = date, col = 'Sentiment')) +
# geom_bar(aes(y = sentiment), stat = "identity", colour = "grey") +
# geom_line(aes(y = `adjusted close` / 300 - 0.22, col = 'Futures Price Crude Oil')) +
# theme(legend.position = "right") +
# theme(legend.title = element_blank()) +
# labs(x = 'Date') +
# scale_y_continuous(
#
# # Features of the first axis
# name = "Sentiment Score",
#
# # Add a second axis and specify its features
# sec.axis = sec_axis(~.*300 + 300*0.22, name="Futures Price ($)")
# )
#
# # Volatility and Sentiment.
# adjusted.close.lag = as.vector(futures.price$`adjusted close`[1:3770])
# adjusted.close = as.vector(futures.price$`adjusted close`[2:3771])
# cont.ret = log(adjusted.close / adjusted.close.lag)
#
# futures.price$cont.ret = c(0,cont.ret)
# futures.price$rv = sqrt(futures.price$cont.ret^2)
#
# ggplot(futures.price, aes(x = date)) +
# geom_bar(aes(y = sentiment), stat = "identity", colour = "grey") +
# geom_line(aes(y = rv, col = 'Daily Realized Volatility of Underlying')) +
# labs(x = 'Date') +
# theme(legend.position = "right") +
# theme(legend.title = element_blank()) +
# scale_y_continuous(
#
# # Features of the first axis
# name = "Sentiment Score",
#
# # Add a second axis and specify its features
# sec.axis = sec_axis(~.*1, name="Realized Volatility")
# )
#
#
# ## Fit some models. -----------
# # Linear regression Sentiment & dSentiment -> Futures price
# linear.model <- lm(futures.price$`adjusted close`[2:3771] ~ futures.price$sentiment[2:3771] + diff(futures.price$sentiment))
# summary(linear.model)
# prediction <- predict(linear.model)
# plot(futures.price$date[2:3771], futures.price$`adjusted close`[2:3771], type = 'l')
# lines(futures.price$date[2:3771], prediction, col = 'red')
#
# # Linear regression Sentiment & dSentiment -> Vol Returns Futures price
# returns <- log(futures.price$`adjusted close`[2:3771]/futures.price$`adjusted close`[1:3770])
# linear.model <- lm(sqrt(returns^2) ~ futures.price$sentiment[2:3771] + diff(futures.price$sentiment))
# summary(linear.model)
# prediction <- predict(linear.model)
# plot(futures.price$date[2:3771], sqrt(returns^2), type = 'l')
# lines(futures.price$date[2:3771], prediction, col = 'red')
#
# # ARMAX model
# returns <- log(futures.price$`adjusted close`[2:3771]/futures.price$`adjusted close`[1:3770])
# armax <- arimax(futures.price$`adjusted close`, xreg = futures.price$sentiment)
# armax
# prediction <- forecast(armax)
# plot(futures.price$date[2:3771], sqrt(returns^2), type = 'l')
| /GetOptionDataV2.R | no_license | MauritsOever/Research_project_P4 | R | false | false | 9,707 | r | rm(list = ls())
library(dplyr)
library(tidyverse)
library(ggplot2)
library(psych)
library(data.table)
library(TSA)
library(plyr)
library(DescTools)
library(MALDIquant)
library(data.table)
setwd("/Users/connorstevens/Documents/Research Project/Crude Oil Data/CL_Data")
## Futures price data. ------------
# Read first csv file.
file.names <- list.files(pattern="data_download.*csv")
futures.price <- fread(file.names[1], select = c(1, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16))
# Fix dates.
futures.price$date = as.Date(as.character(futures.price$date), "%m/%d/%Y")
futures.price$`future expiration` = as.Date(as.character(futures.price$`future expiration` ), "%m/%d/%Y")
futures.price$expiration = as.Date(as.character(futures.price$expiration), "%m/%d/%Y")
#Create time to maturity column.
futures.price$ttm <- futures.price$expiration - futures.price$date
#Only options with ttm 15 days or less.
futures.price <- futures.price[futures.price$ttm <= 30 & futures.price$ttm >= 5, ]
#Remove observations for which no data is available.
futures.price <- futures.price[!(futures.price$ask == 0 & futures.price$`settlement price` == 0), ]
#Option premium equal to ask.
futures.price$option_premium <- futures.price$ask
#Get as many prices as possible. First use ask, then if zero use settlement.
for (i in 1: nrow(futures.price)){
if(futures.price$ask[i] == 0){
futures.price$option_premium[i] <- futures.price$`settlement price`[i]
}
else{
futures.price$option_premium[i] <- futures.price$ask[i]
}
}
# Chronological order
#futures.price <- futures.price %>% map_df(rev)
# Read all csv files and append underneath each other.
for (i in 2:88){
print(file.names[i])
df.temp <- fread(file.names[i], select = c(1, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16))
#Remove NA values.
df.temp <- na.omit(df.temp)
#Convert to date type.
df.temp$date = as.Date(as.character(df.temp$date), "%m/%d/%Y")
df.temp$`future expiration` = as.Date(as.character(df.temp$`future expiration` ), "%m/%d/%Y")
df.temp$expiration = as.Date(as.character(df.temp$expiration), "%m/%d/%Y")
#Create time to maturity column.
df.temp$ttm <- df.temp$expiration - df.temp$date
#Only options with ttm 15 days or less.
df.temp <- df.temp[df.temp$ttm <= 15 & df.temp$ttm >= 5, ]
#Remove observations for which no data is available.
df.temp <- df.temp[!(df.temp$ask == 0 & df.temp$`settlement price` == 0), ]
#Option premium equal to ask.
df.temp$option_premium <- df.temp$ask
#Get as many prices as possible. First use ask, then if zero use settlement.
for (i in 1: nrow(df.temp)){
if(df.temp$ask[i] == 0){
df.temp$option_premium[i] <- df.temp$`settlement price`[i]
}
else if(df.temp$ask[i] > 0) {
df.temp$option_premium[i] <- df.temp$ask[i]
}
else if(df.temp$ask[i] == 0){
df.temp$option_premium <- df.temp$`settlement price`
}
else{
df.temp <- df.temp[-i, ]
}
}
futures.price = rbind(futures.price, df.temp)
}
#Order data chronologically.
futures.price <- arrange(futures.price, date)
#Remove observations where implied volatility and/or interpolated implied vol is 1 or -1.
futures.price <- futures.price[!(futures.price$iv == 1 | futures.price$iv == -1 | futures.price$iv == 0), ]
#Order futures.price data frame in increasing order of strike so that it can be used with match.closest.
futures.price <- futures.price[order(strike),]
#Separate puts and calls.
Calls <- futures.price[futures.price$`call/put` == "C"]
Puts <- futures.price[futures.price$`call/put` == "P"]
#Create ATM options dataframe
ATMCalls <- Calls[FALSE, ]
ATMPuts <- futures.price[FALSE, ]
ATMCalls[1, ] <- df_temp_calls[160, ]
ATMPuts[1, ] <- df_temp[184, ]
for(i in 2: nrow(futures.price)){
#Get date.
call_date <- Calls$date[i]
#Limit dataframe to options available on a given day.
df_temp_calls <- Calls[Calls$date == call_date, ]
#Skip to next iteration if no options are available for that signal.
if(nrow(df_temp_calls) == 0){
next
}
#Get spot on given day.
call_spot <- Calls$`adjusted close`[i]
#Find strike closest to spot.
#strike <- min(Closest(df_temp$strike, spot))
if(match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0) == 0){
call_strike <- 0
}
else{
#Set strike to closest strike available to adjusted close on signal day.
call_strike <- df_temp_calls$strike[match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0)]
}
#If no option is found, skip to the next signal.
if(call_strike == 0){
next
}
#Find index in df_temp of chosen option.
call_index <- match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0)
#Fill ATM options dataframe with atm options.
ATMCalls <- rbind(ATMCalls, df_temp_calls[index, ])
}
for(i in 2: nrow(futures.price)){
#Get date.
call_date <- Calls$date[i]
put_date <- Puts$date[i]
#Limit dataframe to options available on a given day.
df_temp_calls <- Calls[Calls$date == call_date, ]
df_temp_puts <- futures.price[Puts$date == put_date, ]
#Skip to next iteration if no options are available for that signal.
if(nrow() == 0){
next
}
#Get spot on given day.
call_spot <- Calls$`adjusted close`[i]
put_spot <- Puts$`adjusted close`[i]
#Find strike closest to spot.
#strike <- min(Closest(df_temp$strike, spot))
if(match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0) == 0){
call_strike <- 0
}
if(match.closest(put_spot, df_temp_puts$strike, tolerance = 0.5, nomatch = 0) == 0){
put_strike <- 0
}
else{
#Set strike to closest strike available to adjusted close on signal day.
call_strike <- df_temp_calls$strike[match.closest(call_spot, df_temp_calls$strike, tolerance = 0.5, nomatch = 0)]
put_strike <- df_temp_puts$strike[match.closest(put_spot, df_temp_puts$strike, tolerance = 0.5, nomatch = 0)]
}
#If no option is found, skip to the next signal.
if(strike == 0){
next
}
#Find index in df_temp of chosen option.
index <- match.closest(spot, df_temp$strike, tolerance = 0.5, nomatch = 0)
#Fill ATM options dataframe with atm options.
ATMoptions <- rbind(ATMoptions, df_temp[index, ])
}
# Save as new dataframe.
write.csv(futures.price, "OptionDataV2.csv")
# ## Sentiment data. -------------
# sen <- read.csv('indexSentimentSQLCRU_CRUOpt2003_2020.csv')
# colnames(sen) = c('date', 'sentiment')
# sen$date = as.Date(as.character(sen$date), "%Y%m%d")
#
#
# ## Merging the Price and Sentiment Data. --------------
# # Initialize.
# sen = sen[order(sen$date, decreasing = TRUE),]
#
# # Merge.
# futures.price = left_join(futures.price, sen, by = "date")
#
# # Save new dataframe with: Date, Adjusted Close, Sentiment
# write.csv(futures.price, 'Futures Prices and Sentiment.csv')
#
# ## Descriptive Statistics. ---------------
# # Futures Prices.
# describe(df1$adjusted.close)
# ## Plotting Price and Volatility with Sentiment. ------------
# # Price & Sentiment.
# ggplot(futures.price, aes(x = date, col = 'Sentiment')) +
# geom_bar(aes(y = sentiment), stat = "identity", colour = "grey") +
# geom_line(aes(y = `adjusted close` / 300 - 0.22, col = 'Futures Price Crude Oil')) +
# theme(legend.position = "right") +
# theme(legend.title = element_blank()) +
# labs(x = 'Date') +
# scale_y_continuous(
#
# # Features of the first axis
# name = "Sentiment Score",
#
# # Add a second axis and specify its features
# sec.axis = sec_axis(~.*300 + 300*0.22, name="Futures Price ($)")
# )
#
# # Volatility and Sentiment.
# adjusted.close.lag = as.vector(futures.price$`adjusted close`[1:3770])
# adjusted.close = as.vector(futures.price$`adjusted close`[2:3771])
# cont.ret = log(adjusted.close / adjusted.close.lag)
#
# futures.price$cont.ret = c(0,cont.ret)
# futures.price$rv = sqrt(futures.price$cont.ret^2)
#
# ggplot(futures.price, aes(x = date)) +
# geom_bar(aes(y = sentiment), stat = "identity", colour = "grey") +
# geom_line(aes(y = rv, col = 'Daily Realized Volatility of Underlying')) +
# labs(x = 'Date') +
# theme(legend.position = "right") +
# theme(legend.title = element_blank()) +
# scale_y_continuous(
#
# # Features of the first axis
# name = "Sentiment Score",
#
# # Add a second axis and specify its features
# sec.axis = sec_axis(~.*1, name="Realized Volatility")
# )
#
#
# ## Fit some models. -----------
# # Linear regression Sentiment & dSentiment -> Futures price
# linear.model <- lm(futures.price$`adjusted close`[2:3771] ~ futures.price$sentiment[2:3771] + diff(futures.price$sentiment))
# summary(linear.model)
# prediction <- predict(linear.model)
# plot(futures.price$date[2:3771], futures.price$`adjusted close`[2:3771], type = 'l')
# lines(futures.price$date[2:3771], prediction, col = 'red')
#
# # Linear regression Sentiment & dSentiment -> Vol Returns Futures price
# returns <- log(futures.price$`adjusted close`[2:3771]/futures.price$`adjusted close`[1:3770])
# linear.model <- lm(sqrt(returns^2) ~ futures.price$sentiment[2:3771] + diff(futures.price$sentiment))
# summary(linear.model)
# prediction <- predict(linear.model)
# plot(futures.price$date[2:3771], sqrt(returns^2), type = 'l')
# lines(futures.price$date[2:3771], prediction, col = 'red')
#
# # ARMAX model
# returns <- log(futures.price$`adjusted close`[2:3771]/futures.price$`adjusted close`[1:3770])
# armax <- arimax(futures.price$`adjusted close`, xreg = futures.price$sentiment)
# armax
# prediction <- forecast(armax)
# plot(futures.price$date[2:3771], sqrt(returns^2), type = 'l')
|
#########################################################################
##
## This file saves only the 'positive' and 'negative' articles, and
## discards the articles that were marked as 'discard'.
##
#########################################################################
library("tm")
dir.base <- "~/Documents/Research/Media_and_Risk_Arbitrage/Empirical_030_-_RA_Work"
dir.code <- file.path(dir.base, "merger-arbitrage",
"105_new_classification_of_press_articles")
source(file.path(dir.code, "config.R"))
source(file.path(dir.code, "functions.R"))
## Load manually classified articles.
load(file.classification.sample)
f.train <- classification.human; rm(classification.human)
cp <- cp.classification.human; rm(cp.classification.human)
## Sanity check.
stopifnot(all(unlist(lapply(cp, function(x) meta(x, "LocalMetaData")$classification.training)) ==
f.train$classification.training))
## Only keep positive and negative classifications.
pos.keep <- which(f.train$classification.training == "p" | f.train$classification.training == "n")
f.train <- f.train[pos.keep, ]
cp <- cp[pos.keep]
## Convert to class 'factor' and relevel.
f.train$classification.training <- as.factor(f.train$classification.training)
f.train$classification.training <- relevel(f.train$classification.training, ref = "n")
## Save manual classification to file.
classification.human <- f.train
cp.classification.human <- cp
save(classification.human, cp.classification.human,
file = file.classification.sample.only.pos.neg)
Sys.chmod(file.classification.sample.only.pos.neg, mode = "0400")
| /105_new_classification_of_press_articles/script_master_03.R | no_license | nmresearch/merger_arbitrage | R | false | false | 1,618 | r | #########################################################################
##
## This file saves only the 'positive' and 'negative' articles, and
## discards the articles that were marked as 'discard'.
##
#########################################################################
library("tm")
dir.base <- "~/Documents/Research/Media_and_Risk_Arbitrage/Empirical_030_-_RA_Work"
dir.code <- file.path(dir.base, "merger-arbitrage",
"105_new_classification_of_press_articles")
source(file.path(dir.code, "config.R"))
source(file.path(dir.code, "functions.R"))
## Load manually classified articles.
load(file.classification.sample)
f.train <- classification.human; rm(classification.human)
cp <- cp.classification.human; rm(cp.classification.human)
## Sanity check.
stopifnot(all(unlist(lapply(cp, function(x) meta(x, "LocalMetaData")$classification.training)) ==
f.train$classification.training))
## Only keep positive and negative classifications.
pos.keep <- which(f.train$classification.training == "p" | f.train$classification.training == "n")
f.train <- f.train[pos.keep, ]
cp <- cp[pos.keep]
## Convert to class 'factor' and relevel.
f.train$classification.training <- as.factor(f.train$classification.training)
f.train$classification.training <- relevel(f.train$classification.training, ref = "n")
## Save manual classification to file.
classification.human <- f.train
cp.classification.human <- cp
save(classification.human, cp.classification.human,
file = file.classification.sample.only.pos.neg)
Sys.chmod(file.classification.sample.only.pos.neg, mode = "0400")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_game.R
\name{make_game}
\alias{make_game}
\title{Play with a Discrete Random Variable}
\usage{
make_game(outcomes, probs, plays)
}
\arguments{
\item{outcomes}{numerical vector of possible values of the random variable}
\item{probs}{numerical vector giving the probability distribution}
\item{plays}{number of times the random variable is simulated}
}
\value{
a function of a single parameter n, with default value 1.
n is the number of times you simulate the net winnings.
}
\description{
Makes a function that simulates a game based where
your winnings are the sum of a specified number of plays of a
discrete random variable with a specified distribution.
}
\examples{
\dontrun{
play_game <- make_gmae(
outcomes = c(-1, 0, 5)
probs = c(0.4, 0.5, 0.1)
plays = 2000
)
## Play "plays" times, get net winnings:
sampler()
## Play "plays" times again:
sampler()
## Play "plays" times, a third time:
sampler()
## 1000 more simulations of the net winnings:
sampler(n = 1000)
}
}
\author{
Homer White \email{hwhite0@georgetowncollege.edu}
}
| /man/make_game.Rd | no_license | homerhanumat/tigerstats | R | false | true | 1,125 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_game.R
\name{make_game}
\alias{make_game}
\title{Play with a Discrete Random Variable}
\usage{
make_game(outcomes, probs, plays)
}
\arguments{
\item{outcomes}{numerical vector of possible values of the random variable}
\item{probs}{numerical vector giving the probability distribution}
\item{plays}{number of times the random variable is simulated}
}
\value{
a function of a single parameter n, with default value 1.
n is the number of times you simulate the net winnings.
}
\description{
Makes a function that simulates a game based where
your winnings are the sum of a specified number of plays of a
discrete random variable with a specified distribution.
}
\examples{
\dontrun{
play_game <- make_gmae(
outcomes = c(-1, 0, 5)
probs = c(0.4, 0.5, 0.1)
plays = 2000
)
## Play "plays" times, get net winnings:
sampler()
## Play "plays" times again:
sampler()
## Play "plays" times, a third time:
sampler()
## 1000 more simulations of the net winnings:
sampler(n = 1000)
}
}
\author{
Homer White \email{hwhite0@georgetowncollege.edu}
}
|
#' Plots a volcano plot of the results of DE genes
#' @param results
volcano_plot <- function(results, padj_thr = 1e-4, log2fc_thr = 1.5) {
results %<>%
dplyr::mutate(
padj = if_else(padj == 0, min(padj[padj > 0]), padj))
init_plot <- results %>%
ggplot(aes(text = gene, x = log2fc, y = - log10(padj), colour = in_pathway)) +
geom_point(size = 0.5) +
scale_color_manual(values = c(`CC` = "navyblue", `BB` = "red", `AA` = "orange")) +
labs(
x = "log2 fold change",
y = "-log[10](fdr)") +
geom_vline(xintercept = log2fc_thr * c(-1, 1), linetype = 2,
colour = "red") +
geom_hline(yintercept = - log10(padj_thr), linetype = 2,
colour = "red") +
cowplot::theme_minimal_grid() +
theme(legend.title = element_blank(), legend.position = "top")
ggplotly(init_plot, tooltip = "text", source = "volcano") %>%
hide_legend() %>%
config(displaylogo = FALSE)
} | /R/de_plots.R | no_license | Ong-Research/PathwayExplorer | R | false | false | 959 | r |
#' Plots a volcano plot of the results of DE genes
#' @param results
volcano_plot <- function(results, padj_thr = 1e-4, log2fc_thr = 1.5) {
results %<>%
dplyr::mutate(
padj = if_else(padj == 0, min(padj[padj > 0]), padj))
init_plot <- results %>%
ggplot(aes(text = gene, x = log2fc, y = - log10(padj), colour = in_pathway)) +
geom_point(size = 0.5) +
scale_color_manual(values = c(`CC` = "navyblue", `BB` = "red", `AA` = "orange")) +
labs(
x = "log2 fold change",
y = "-log[10](fdr)") +
geom_vline(xintercept = log2fc_thr * c(-1, 1), linetype = 2,
colour = "red") +
geom_hline(yintercept = - log10(padj_thr), linetype = 2,
colour = "red") +
cowplot::theme_minimal_grid() +
theme(legend.title = element_blank(), legend.position = "top")
ggplotly(init_plot, tooltip = "text", source = "volcano") %>%
hide_legend() %>%
config(displaylogo = FALSE)
} |
rm(list=ls(all=TRUE))
# solve inverse problem for nonlinear SDE
# dX_t = theta1 4(X_t - X_t^3) dt + dW_t
diffcoeff = 1.0
h = 0.00001
littlet = 0.5
bigt = 25
nsteps = ceiling(bigt/h)
nsaves = ceiling(bigt/littlet)
hilt = ceiling(littlet/h)
stopifnot((nsteps == (nsaves*hilt)))
ntrials = 500
x0 = 0
h12 = sqrt(h)
xtraj = matrix(0,nrow=ntrials,ncol=(nsaves+1))
xtraj[,1] = rep(x0,times=ntrials)
for (i in c(1:nsaves))
{
# print loop counter
print(i)
flush.console()
x = xtraj[,i]
for (j in c(1:hilt))
x = x + 4*(x - x^3)*h + h12*diffcoeff*rnorm(n=ntrials)
xtraj[,(i+1)] = x
}
tvec = seq(from=0,to=bigt,by=littlet)
xtraj = rbind(tvec,xtraj)
save(xtraj,file='fakedata.RData')
| /adjoint/example1_polynomial/makefakedata.R | no_license | hbhat4000/sdeinference | R | false | false | 756 | r | rm(list=ls(all=TRUE))
# solve inverse problem for nonlinear SDE
# dX_t = theta1 4(X_t - X_t^3) dt + dW_t
diffcoeff = 1.0
h = 0.00001
littlet = 0.5
bigt = 25
nsteps = ceiling(bigt/h)
nsaves = ceiling(bigt/littlet)
hilt = ceiling(littlet/h)
stopifnot((nsteps == (nsaves*hilt)))
ntrials = 500
x0 = 0
h12 = sqrt(h)
xtraj = matrix(0,nrow=ntrials,ncol=(nsaves+1))
xtraj[,1] = rep(x0,times=ntrials)
for (i in c(1:nsaves))
{
# print loop counter
print(i)
flush.console()
x = xtraj[,i]
for (j in c(1:hilt))
x = x + 4*(x - x^3)*h + h12*diffcoeff*rnorm(n=ntrials)
xtraj[,(i+1)] = x
}
tvec = seq(from=0,to=bigt,by=littlet)
xtraj = rbind(tvec,xtraj)
save(xtraj,file='fakedata.RData')
|
\name{bdendo}
\alias{bdendo}
\docType{data}
\title{A case-control study of endometrial cancer}
\description{
The \code{bdendo} data frame has 315 rows and 13 columns.
These data concern a study in which each case of endometrial cancer was
matched with 4 controls. Matching was by date of birth (within one
year), marital status, and residence.
}
\format{
This data frame contains the following columns:
\tabular{rl}{
\code{set}: \tab Case-control set: a numeric vector \cr
\code{d}: \tab Case or control: a numeric vector (1=case, 0=control) \cr
\code{gall}: \tab Gall bladder disease: a factor with levels
\code{No}
\code{Yes}. \cr
\code{hyp}: \tab Hypertension: a factor with levels
\code{No}
\code{Yes}. \cr
\code{ob}: \tab Obesity: a factor with levels
\code{No}
\code{Yes}. \cr
\code{est}: \tab A factor with levels
\code{No}
\code{Yes}. \cr
\code{dur}: \tab Duration of conjugated oestrogen therapy: an ordered factor with levels
\code{0} < \code{1} < \code{2} < \code{3} < \code{4}. \cr
\code{non}: \tab Use of non oestrogen drugs: a factor with levels
\code{No}
\code{Yes}. \cr
\code{duration}: \tab Months of oestrogen therapy: a numeric vector. \cr
\code{age}: \tab A numeric vector. \cr
\code{cest}: \tab Conjugated oestrogen dose: an ordered factor with levels
\code{0} < \code{1} < \code{2} < \code{3}. \cr
\code{agegrp}: \tab A factor with levels
\code{55-59}
\code{60-64}
\code{65-69}
\code{70-74}
\code{75-79}
\code{80-84} \cr
\code{age3}: \tab a factor with levels
\code{<64}
\code{65-74}
\code{75+} \cr
}
}
\source{
Breslow NE, and Day N, Statistical Methods in Cancer Research. Volume
I: The Analysis of Case-Control Studies. IARC Scientific
Publications, IARC:Lyon, 1980.
}
\examples{
data(bdendo)
}
\keyword{datasets}
| /man/bdendo.Rd | no_license | mkim0710/Epi | R | false | false | 2,119 | rd | \name{bdendo}
\alias{bdendo}
\docType{data}
\title{A case-control study of endometrial cancer}
\description{
The \code{bdendo} data frame has 315 rows and 13 columns.
These data concern a study in which each case of endometrial cancer was
matched with 4 controls. Matching was by date of birth (within one
year), marital status, and residence.
}
\format{
This data frame contains the following columns:
\tabular{rl}{
\code{set}: \tab Case-control set: a numeric vector \cr
\code{d}: \tab Case or control: a numeric vector (1=case, 0=control) \cr
\code{gall}: \tab Gall bladder disease: a factor with levels
\code{No}
\code{Yes}. \cr
\code{hyp}: \tab Hypertension: a factor with levels
\code{No}
\code{Yes}. \cr
\code{ob}: \tab Obesity: a factor with levels
\code{No}
\code{Yes}. \cr
\code{est}: \tab A factor with levels
\code{No}
\code{Yes}. \cr
\code{dur}: \tab Duration of conjugated oestrogen therapy: an ordered factor with levels
\code{0} < \code{1} < \code{2} < \code{3} < \code{4}. \cr
\code{non}: \tab Use of non oestrogen drugs: a factor with levels
\code{No}
\code{Yes}. \cr
\code{duration}: \tab Months of oestrogen therapy: a numeric vector. \cr
\code{age}: \tab A numeric vector. \cr
\code{cest}: \tab Conjugated oestrogen dose: an ordered factor with levels
\code{0} < \code{1} < \code{2} < \code{3}. \cr
\code{agegrp}: \tab A factor with levels
\code{55-59}
\code{60-64}
\code{65-69}
\code{70-74}
\code{75-79}
\code{80-84} \cr
\code{age3}: \tab a factor with levels
\code{<64}
\code{65-74}
\code{75+} \cr
}
}
\source{
Breslow NE, and Day N, Statistical Methods in Cancer Research. Volume
I: The Analysis of Case-Control Studies. IARC Scientific
Publications, IARC:Lyon, 1980.
}
\examples{
data(bdendo)
}
\keyword{datasets}
|
pollutantmean <- function(directory, pollutant, id=1:332){
files_list <- list.files(directory, full.names = TRUE)
all_data <- data.frame()
for (i in id) {
all_data <- rbind(all_data, read.csv(files_list[i]))
}
if(pollutant == "nitrate") {
x <- all_data[is.na(all_data$nitrate)==FALSE,]
mean(x$nitrate)
} else if (pollutant == "sulfate") {
x <- all_data[is.na(all_data$sulfate)==FALSE,]
mean(x$sulfate)
}
}
| /pollutantmean.R | no_license | wcontractor/datasciencecoursera | R | false | false | 454 | r | pollutantmean <- function(directory, pollutant, id=1:332){
files_list <- list.files(directory, full.names = TRUE)
all_data <- data.frame()
for (i in id) {
all_data <- rbind(all_data, read.csv(files_list[i]))
}
if(pollutant == "nitrate") {
x <- all_data[is.na(all_data$nitrate)==FALSE,]
mean(x$nitrate)
} else if (pollutant == "sulfate") {
x <- all_data[is.na(all_data$sulfate)==FALSE,]
mean(x$sulfate)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storeTRANSFAC.R
\name{storeTRANSFAC}
\alias{storeTRANSFAC}
\title{A function for storing TRANSFAC files of the forked matrices.}
\usage{
storeTRANSFAC(TheObject)
}
\arguments{
\item{TheObject}{the input is the object of FPWM class that holds the raw matrices directly exported from TFregulomeR().}
}
\description{
This function generates files of regular TRANSFAC format in order to further analysis and evaluation. Each file name holdes the name of Transfactor of interest, and the co-factor that is under analysis in the current matrix.
}
| /man/storeTRANSFAC.Rd | no_license | aidaghayour/FPWM | R | false | true | 633 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storeTRANSFAC.R
\name{storeTRANSFAC}
\alias{storeTRANSFAC}
\title{A function for storing TRANSFAC files of the forked matrices.}
\usage{
storeTRANSFAC(TheObject)
}
\arguments{
\item{TheObject}{the input is the object of FPWM class that holds the raw matrices directly exported from TFregulomeR().}
}
\description{
This function generates files of regular TRANSFAC format in order to further analysis and evaluation. Each file name holdes the name of Transfactor of interest, and the co-factor that is under analysis in the current matrix.
}
|
#' Combine Model-Based Recursive Partitioning with Linear Discriminant Analysis.
#'
#' This page lists all ingredients to combine Linear Discriminant Analysis with Model-Based Recursive Partitioning
#' (\code{\link[party]{mob}} from package \pkg{party}). See the example for how to do that.
#'
#' \code{ldaModel} is an object of class \code{\link[modeltools]{StatModel-class}} implemented in package \pkg{modeltools} that
#' provides an infra-structure for an unfitted \code{\link{wlda}} model.
#'
#' Moreover, methods for \code{\link{wlda}} and \code{ldaModel} objects for the generic functions
#' \code{\link[party]{reweight}}, \code{\link[stats]{deviance}}, \code{\link[sandwich]{estfun}}, and
#' \code{\link[stats]{predict}} are provided.
#'
#' @title Combine Model-based Recursive Partitioning with Linear Discriminant Analysis
#'
#' @param object An object of class "ldaModel" and "wlda", respectively.
#' @param x An object of class "wlda".
#' @param weights A vector of observation weights.
#' @param out Should class labels or posterior probabilities be returned?
#' @param \dots Further arguments.
#'
#' @return
#' \code{reweight}: The re-weighted fitted "ldaModel" object. \cr
#' \code{deviance}: The value of the deviance for Linear Discriminant Analysis extracted from \code{object}, i.e. the log-likelihood. \cr
#' \code{estfun}: The empirical estimating (or score) function for Linear Discriminant Analysis, i.e. the derivatives of the log-likelihood with respect
#' to the parameters, evaluated at the training data. \cr
#' \code{predict}: Either a vector of predicted class labels or a matrix of class posterior probabilities.
#'
#' @seealso \code{\link[party]{reweight}}, \code{\link[stats]{deviance}}, \code{\link[sandwich]{estfun}}, \code{\link[stats]{predict}}.
#'
#' @family recursive_partitioning lda
#'
#' @references
#' Zeileis, A., Hothorn, T. and Kornik, K. (2008), Model-based recursive partitioning.
#' \emph{Journal of Computational and Graphical Statistics}, \bold{17(2)} 492--514.
#'
#' @examples
#' library(benchData)
#'
#' data <- vData(500)
#' x <- seq(0,1,0.05)
#' grid <- expand.grid(x.1 = x, x.2 = x)
#'
#' fit <- mob(y ~ x.1 + x.2 | x.1 + x.2, data = data, model = ldaModel,
#' control = mob_control(objfun = deviance, minsplit = 200))
#'
#' ## predict posterior probabilities
#' pred <- predict(fit, newdata = grid, out = "posterior")
#' post <- do.call("rbind", pred)
#'
#' image(x, x, matrix(as.numeric(post[,1]), length(x)), xlab = "x.1", ylab = "x.2")
#' contour(x, x, matrix(as.numeric(post[,1]), length(x)), levels = 0.5, add = TRUE)
#' points(data$x, pch = as.character(data$y))
#'
#' ## predict node membership
#' splits <- predict(fit, newdata = grid, type = "node")
#' contour(x, x, matrix(splits, length(x)), levels = min(splits):max(splits), add = TRUE, lty = 2)
#'
#' ## training error
#' mean(predict(fit) != as.numeric(data$y))
#'
#' @rdname ldaModel
#'
#' @import party
#' @export
ldaModel <- new("StatModel",
name = "linear discriminant analysis",
dpp = function(formula, data = list(), subset = NULL, na.action = NULL,
frame = NULL, enclos = sys.frame(sys.nframe()), other = list(),
designMatrix = TRUE, responseMatrix = TRUE, setHook = NULL, ...) {
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0)
mf <- mf[c(1, m)]
mf[[1]] <- as.name("model.frame")
mf$na.action <- stats::na.pass
MEF <- new("ModelEnvFormula")
MEF@formula <- c(modeltools:::ParseFormula(formula, data = data)@formula,
other)
MEF@hooks$set <- setHook
if (is.null(frame))
frame <- parent.frame()
mf$subset <- try(subset)
if (inherits(mf$subset, "try-error"))
mf$subset <- NULL
MEF@get <- function(which, data = NULL, frame = parent.frame(),
envir = MEF@env) {
if (is.null(data))
RET <- get(which, envir = envir, inherits = FALSE)
else {
oldData <- get(which, envir = envir, inherits = FALSE)
if (!use.subset)
mf$subset <- NULL
mf$data <- data
mf$formula <- MEF@formula[[which]]
RET <- eval(mf, frame, enclos = enclos)
modeltools:::checkData(oldData, RET)
}
return(RET)
}
MEF@set <- function(which = NULL, data = NULL, frame = parent.frame(),
envir = MEF@env) {
if (is.null(which))
which <- names(MEF@formula)
if (any(duplicated(which)))
stop("Some model terms used more than once")
for (name in which) {
if (length(MEF@formula[[name]]) != 2)
stop("Invalid formula for ", sQuote(name))
mf$data <- data
mf$formula <- MEF@formula[[name]]
if (!use.subset)
mf$subset <- NULL
MF <- eval(mf, frame, enclos = enclos)
if (exists(name, envir = envir, inherits = FALSE))
modeltools:::checkData(get(name, envir = envir, inherits = FALSE),
MF)
assign(name, MF, envir = envir)
mt <- attr(MF, "terms")
if (name == "input" && designMatrix) {
attr(mt, "intercept") <- 0
assign("designMatrix", model.matrix(mt, data = MF,
...), envir = envir)
}
if (name == "response" && responseMatrix) {
assign("responseMatrix", MF[,1], envir = envir)
}
}
MEapply(MEF, MEF@hooks$set, clone = FALSE)
}
use.subset <- TRUE
MEF@set(which = NULL, data = data, frame = frame)
use.subset <- FALSE
if (!is.null(na.action))
MEF <- na.action(MEF)
MEF
},
fit = function (object, weights = NULL, ...) {
if (is.null(weights)) {
z <- wlda(object@get("designMatrix"), object@get("responseMatrix"), method = "ML", ...)
} else {
z <- wlda(object@get("designMatrix"), object@get("responseMatrix"), method = "ML",
weights = weights, ...)
}
class(z) <- c("ldaModel", "wlda")
z$terms <- attr(object@get("input"), "terms")
z$contrasts <- attr(object@get("designMatrix"), "contrasts")
z$xlevels <- attr(object@get("designMatrix"), "xlevels")
z$predict_response <- function(newdata = NULL) {#### prior as argument for predict?
if (!is.null(newdata)) {
penv <- new.env()
object@set("input", data = newdata, env = penv)
dm <- get("designMatrix", envir = penv, inherits = FALSE)
} else {
dm <- object@get("designMatrix")
}
lev1 <- names(z$prior)
ng <- length(lev1)
posterior <- matrix(0, ncol = ng, nrow = nrow(dm), dimnames = list(rownames(dm), lev1))
posterior[, lev1] <- sapply(lev1, function(y) log(z$prior[y]) -
0.5 * mahalanobis(dm, center = z$means[y, ], cov = z$cov))
gr <- factor(lev1[max.col(posterior)], levels = z$lev)
names(gr) <- rownames(dm)
return(gr)
}
z$addargs <- list(...)
z$ModelEnv <- object
z$statmodel <- ldaModel
z
},
predict = function (object, newdata = NULL, ...) {
object$predict_response(newdata = newdata)
},
capabilities = new("StatModelCapabilities",
weights = TRUE,
subset = TRUE
)
)
#' @rdname ldaModel
#'
#' @import party
#' @export
reweight.ldaModel <- function (object, weights, ...) {
fit <- ldaModel@fit
try(do.call("fit", c(list(object = object$ModelEnv, weights = weights), object$addargs)))
}
#' @noRd
#'
#' @importFrom stats model.matrix
#' @export
model.matrix.ldaModel <- function (object, ...)
object$ModelEnv@get("designMatrix")
#' @noRd
model.response.ldaModel <- function (object, ...)
object$ModelEnv@get("responseMatrix")
#' @rdname ldaModel
#'
#' @importFrom stats deviance
#' @export
## negative log-likelihood for wlda
## if classes are missing in the training data their weights are 0
## instead of calculating the quantities for all observations and then multipliying by 0 or >0 before summing them up
## calculate them only for those observations with weights >0
deviance.wlda <- function (object, ...) {
try({
wts <- weights(object)
if (is.null(wts))
wts <- 1
indw <- wts > 0
xmat <- model.matrix(object, ...)[indw, , drop = FALSE]
gr <- model.response.ldaModel(object, ...)[indw]
## check
# ng <- nlevels(gr)
# lev1 <- names(object$prior)
# post <- matrix(NA, ncol = ng, nrow = nrow(xmat), dimnames = list(rownames(xmat), levels(gr)))
# post[,lev1] <- sapply(lev1, function(z) log(object$prior[z]) + dmvnorm(xmat, object$means[z,], object$cov, log = TRUE))
# print(head(cbind(gr, post)))
# ll <- post[cbind(rownames(post), as.character(gr))]
# print(head(-ll))
pr <- object$prior[as.character(gr)]
z <- xmat - object$means[as.character(gr), , drop = FALSE]
# print(head(-log(pr) + ncol(xmat)/2 * log(2*pi) + 0.5 * determinant(object$cov)$modulus + 0.5 * mahalanobis(z, 0, object$cov)))
return(sum(wts[indw] * (-log(pr) + 0.5 * determinant(object$cov)$modulus + 0.5 * mahalanobis(z, 0, object$cov))))
})
return(Inf)
}
#' @noRd
#'
#' @export
'deviance.try-error' <- function(object, ...) Inf
#' @rdname ldaModel
#'
#' @importFrom sandwich estfun
#' @export
estfun.wlda <- function(x, ...) {
wts <- weights(x)
if (is.null(wts))
wts <- 1
xmat <- model.matrix(x, ...)
gr <- as.factor(model.response.ldaModel(x, ...))
### scores with respect to priors
dPrior <- diag(nlevels(gr))[gr,] # zero-one class indicator matrix, number of columns equals total number of classes
colnames(dPrior) <- levels(gr)
d <- dPrior <- dPrior[,names(x$prior), drop = FALSE] # select columns that belong to classes present in the current subset
dPrior <- wts * t(-t(dPrior) + as.vector(x$prior)) # calculate scores
if (ncol(dPrior) > 1) # if dPrior has more than 2 columns drop the first one in order to prevent linear dependencies (n x (K-1) matrix)
dPrior <- dPrior[,-1, drop = FALSE]
# else: if dPrior has only one column there is only one class present in the training data and a try-error will occur in the fluctuation tets
## scores with respect to means
p <- ncol(xmat)
n <- nrow(xmat)
K <- ncol(d)
z <- matrix(0, n, p)
indw <- wts > 0
z[indw,] <- xmat[indw, , drop = FALSE] - x$means[as.character(gr[indw]), , drop = FALSE]
cov.inv <- solve(x$cov)
dMean <- d[,rep(1:K, each = p), drop = FALSE] * (-wts * z %*% cov.inv)[,rep(1:p, K), drop = FALSE] # n x (K * V) matrix
## scores with respect to cov
inds <- cbind(rep(1:p, each = p), rep(1:p, p))
inds <- inds[inds[,1] <= inds[,2], , drop = FALSE]
f <- function(ind, cov.inv, z) {
S <- cov.inv[,ind[1],drop = FALSE] %*% cov.inv[ind[2],,drop = FALSE]
return(wts * 0.5 * (cov.inv[ind[1], ind[2]] - mahalanobis(z, center = 0, cov = S, inverted = TRUE)))
}
dCov <- apply(inds, 1, f, cov.inv = cov.inv, z = z)
# checks
# print(0.5 * cov.inv - 0.5 * cov.inv %*% z[1,] %*% t(z[1,]) %*% cov.inv)
# print(0.5 * cov.inv - 0.5 * cov.inv %*% z[2,] %*% t(z[2,]) %*% cov.inv)
# print(0.5 * cov.inv - 0.5 * cov.inv %*% z[3,] %*% t(z[3,]) %*% cov.inv)
# print(head(dCov))
# print(cbind(gr, dPrior, dMean, dCov))
# print(colSums(cbind(dPrior, dMean, dCov)))
# print(cor(cbind(dPrior, dMean, dCov)[x$weights>0,,drop = FALSE]))
return(cbind(dPrior, dMean, dCov))
}
#' @rdname ldaModel
#'
#' @export
predict.ldaModel <- function(object, out = c("class", "posterior"), ...) {
pred <- NextMethod(object, ...)
out <- match.arg(out)
pred <- switch(out,
class = pred$class,
posterior = {
post <- pred$posterior
lapply(seq_len(nrow(post)), function(i) post[i,, drop = FALSE])
})
return(pred)
}
| /R/Model_lda.R | permissive | schiffner/locClass | R | false | false | 11,925 | r | #' Combine Model-Based Recursive Partitioning with Linear Discriminant Analysis.
#'
#' This page lists all ingredients to combine Linear Discriminant Analysis with Model-Based Recursive Partitioning
#' (\code{\link[party]{mob}} from package \pkg{party}). See the example for how to do that.
#'
#' \code{ldaModel} is an object of class \code{\link[modeltools]{StatModel-class}} implemented in package \pkg{modeltools} that
#' provides an infra-structure for an unfitted \code{\link{wlda}} model.
#'
#' Moreover, methods for \code{\link{wlda}} and \code{ldaModel} objects for the generic functions
#' \code{\link[party]{reweight}}, \code{\link[stats]{deviance}}, \code{\link[sandwich]{estfun}}, and
#' \code{\link[stats]{predict}} are provided.
#'
#' @title Combine Model-based Recursive Partitioning with Linear Discriminant Analysis
#'
#' @param object An object of class "ldaModel" and "wlda", respectively.
#' @param x An object of class "wlda".
#' @param weights A vector of observation weights.
#' @param out Should class labels or posterior probabilities be returned?
#' @param \dots Further arguments.
#'
#' @return
#' \code{reweight}: The re-weighted fitted "ldaModel" object. \cr
#' \code{deviance}: The value of the deviance for Linear Discriminant Analysis extracted from \code{object}, i.e. the log-likelihood. \cr
#' \code{estfun}: The empirical estimating (or score) function for Linear Discriminant Analysis, i.e. the derivatives of the log-likelihood with respect
#' to the parameters, evaluated at the training data. \cr
#' \code{predict}: Either a vector of predicted class labels or a matrix of class posterior probabilities.
#'
#' @seealso \code{\link[party]{reweight}}, \code{\link[stats]{deviance}}, \code{\link[sandwich]{estfun}}, \code{\link[stats]{predict}}.
#'
#' @family recursive_partitioning lda
#'
#' @references
#' Zeileis, A., Hothorn, T. and Kornik, K. (2008), Model-based recursive partitioning.
#' \emph{Journal of Computational and Graphical Statistics}, \bold{17(2)} 492--514.
#'
#' @examples
#' library(benchData)
#'
#' data <- vData(500)
#' x <- seq(0,1,0.05)
#' grid <- expand.grid(x.1 = x, x.2 = x)
#'
#' fit <- mob(y ~ x.1 + x.2 | x.1 + x.2, data = data, model = ldaModel,
#' control = mob_control(objfun = deviance, minsplit = 200))
#'
#' ## predict posterior probabilities
#' pred <- predict(fit, newdata = grid, out = "posterior")
#' post <- do.call("rbind", pred)
#'
#' image(x, x, matrix(as.numeric(post[,1]), length(x)), xlab = "x.1", ylab = "x.2")
#' contour(x, x, matrix(as.numeric(post[,1]), length(x)), levels = 0.5, add = TRUE)
#' points(data$x, pch = as.character(data$y))
#'
#' ## predict node membership
#' splits <- predict(fit, newdata = grid, type = "node")
#' contour(x, x, matrix(splits, length(x)), levels = min(splits):max(splits), add = TRUE, lty = 2)
#'
#' ## training error
#' mean(predict(fit) != as.numeric(data$y))
#'
#' @rdname ldaModel
#'
#' @import party
#' @export
ldaModel <- new("StatModel",
name = "linear discriminant analysis",
dpp = function(formula, data = list(), subset = NULL, na.action = NULL,
frame = NULL, enclos = sys.frame(sys.nframe()), other = list(),
designMatrix = TRUE, responseMatrix = TRUE, setHook = NULL, ...) {
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0)
mf <- mf[c(1, m)]
mf[[1]] <- as.name("model.frame")
mf$na.action <- stats::na.pass
MEF <- new("ModelEnvFormula")
MEF@formula <- c(modeltools:::ParseFormula(formula, data = data)@formula,
other)
MEF@hooks$set <- setHook
if (is.null(frame))
frame <- parent.frame()
mf$subset <- try(subset)
if (inherits(mf$subset, "try-error"))
mf$subset <- NULL
MEF@get <- function(which, data = NULL, frame = parent.frame(),
envir = MEF@env) {
if (is.null(data))
RET <- get(which, envir = envir, inherits = FALSE)
else {
oldData <- get(which, envir = envir, inherits = FALSE)
if (!use.subset)
mf$subset <- NULL
mf$data <- data
mf$formula <- MEF@formula[[which]]
RET <- eval(mf, frame, enclos = enclos)
modeltools:::checkData(oldData, RET)
}
return(RET)
}
MEF@set <- function(which = NULL, data = NULL, frame = parent.frame(),
envir = MEF@env) {
if (is.null(which))
which <- names(MEF@formula)
if (any(duplicated(which)))
stop("Some model terms used more than once")
for (name in which) {
if (length(MEF@formula[[name]]) != 2)
stop("Invalid formula for ", sQuote(name))
mf$data <- data
mf$formula <- MEF@formula[[name]]
if (!use.subset)
mf$subset <- NULL
MF <- eval(mf, frame, enclos = enclos)
if (exists(name, envir = envir, inherits = FALSE))
modeltools:::checkData(get(name, envir = envir, inherits = FALSE),
MF)
assign(name, MF, envir = envir)
mt <- attr(MF, "terms")
if (name == "input" && designMatrix) {
attr(mt, "intercept") <- 0
assign("designMatrix", model.matrix(mt, data = MF,
...), envir = envir)
}
if (name == "response" && responseMatrix) {
assign("responseMatrix", MF[,1], envir = envir)
}
}
MEapply(MEF, MEF@hooks$set, clone = FALSE)
}
use.subset <- TRUE
MEF@set(which = NULL, data = data, frame = frame)
use.subset <- FALSE
if (!is.null(na.action))
MEF <- na.action(MEF)
MEF
},
fit = function (object, weights = NULL, ...) {
if (is.null(weights)) {
z <- wlda(object@get("designMatrix"), object@get("responseMatrix"), method = "ML", ...)
} else {
z <- wlda(object@get("designMatrix"), object@get("responseMatrix"), method = "ML",
weights = weights, ...)
}
class(z) <- c("ldaModel", "wlda")
z$terms <- attr(object@get("input"), "terms")
z$contrasts <- attr(object@get("designMatrix"), "contrasts")
z$xlevels <- attr(object@get("designMatrix"), "xlevels")
z$predict_response <- function(newdata = NULL) {#### prior as argument for predict?
if (!is.null(newdata)) {
penv <- new.env()
object@set("input", data = newdata, env = penv)
dm <- get("designMatrix", envir = penv, inherits = FALSE)
} else {
dm <- object@get("designMatrix")
}
lev1 <- names(z$prior)
ng <- length(lev1)
posterior <- matrix(0, ncol = ng, nrow = nrow(dm), dimnames = list(rownames(dm), lev1))
posterior[, lev1] <- sapply(lev1, function(y) log(z$prior[y]) -
0.5 * mahalanobis(dm, center = z$means[y, ], cov = z$cov))
gr <- factor(lev1[max.col(posterior)], levels = z$lev)
names(gr) <- rownames(dm)
return(gr)
}
z$addargs <- list(...)
z$ModelEnv <- object
z$statmodel <- ldaModel
z
},
predict = function (object, newdata = NULL, ...) {
object$predict_response(newdata = newdata)
},
capabilities = new("StatModelCapabilities",
weights = TRUE,
subset = TRUE
)
)
#' @rdname ldaModel
#'
#' @import party
#' @export
reweight.ldaModel <- function (object, weights, ...) {
fit <- ldaModel@fit
try(do.call("fit", c(list(object = object$ModelEnv, weights = weights), object$addargs)))
}
#' @noRd
#'
#' @importFrom stats model.matrix
#' @export
model.matrix.ldaModel <- function (object, ...)
object$ModelEnv@get("designMatrix")
#' @noRd
model.response.ldaModel <- function (object, ...)
object$ModelEnv@get("responseMatrix")
#' @rdname ldaModel
#'
#' @importFrom stats deviance
#' @export
## negative log-likelihood for wlda
## if classes are missing in the training data their weights are 0
## instead of calculating the quantities for all observations and then multipliying by 0 or >0 before summing them up
## calculate them only for those observations with weights >0
deviance.wlda <- function (object, ...) {
try({
wts <- weights(object)
if (is.null(wts))
wts <- 1
indw <- wts > 0
xmat <- model.matrix(object, ...)[indw, , drop = FALSE]
gr <- model.response.ldaModel(object, ...)[indw]
## check
# ng <- nlevels(gr)
# lev1 <- names(object$prior)
# post <- matrix(NA, ncol = ng, nrow = nrow(xmat), dimnames = list(rownames(xmat), levels(gr)))
# post[,lev1] <- sapply(lev1, function(z) log(object$prior[z]) + dmvnorm(xmat, object$means[z,], object$cov, log = TRUE))
# print(head(cbind(gr, post)))
# ll <- post[cbind(rownames(post), as.character(gr))]
# print(head(-ll))
pr <- object$prior[as.character(gr)]
z <- xmat - object$means[as.character(gr), , drop = FALSE]
# print(head(-log(pr) + ncol(xmat)/2 * log(2*pi) + 0.5 * determinant(object$cov)$modulus + 0.5 * mahalanobis(z, 0, object$cov)))
return(sum(wts[indw] * (-log(pr) + 0.5 * determinant(object$cov)$modulus + 0.5 * mahalanobis(z, 0, object$cov))))
})
return(Inf)
}
#' @noRd
#'
#' @export
'deviance.try-error' <- function(object, ...) Inf
#' @rdname ldaModel
#'
#' @importFrom sandwich estfun
#' @export
estfun.wlda <- function(x, ...) {
wts <- weights(x)
if (is.null(wts))
wts <- 1
xmat <- model.matrix(x, ...)
gr <- as.factor(model.response.ldaModel(x, ...))
### scores with respect to priors
dPrior <- diag(nlevels(gr))[gr,] # zero-one class indicator matrix, number of columns equals total number of classes
colnames(dPrior) <- levels(gr)
d <- dPrior <- dPrior[,names(x$prior), drop = FALSE] # select columns that belong to classes present in the current subset
dPrior <- wts * t(-t(dPrior) + as.vector(x$prior)) # calculate scores
if (ncol(dPrior) > 1) # if dPrior has more than 2 columns drop the first one in order to prevent linear dependencies (n x (K-1) matrix)
dPrior <- dPrior[,-1, drop = FALSE]
# else: if dPrior has only one column there is only one class present in the training data and a try-error will occur in the fluctuation tets
## scores with respect to means
p <- ncol(xmat)
n <- nrow(xmat)
K <- ncol(d)
z <- matrix(0, n, p)
indw <- wts > 0
z[indw,] <- xmat[indw, , drop = FALSE] - x$means[as.character(gr[indw]), , drop = FALSE]
cov.inv <- solve(x$cov)
dMean <- d[,rep(1:K, each = p), drop = FALSE] * (-wts * z %*% cov.inv)[,rep(1:p, K), drop = FALSE] # n x (K * V) matrix
## scores with respect to cov
inds <- cbind(rep(1:p, each = p), rep(1:p, p))
inds <- inds[inds[,1] <= inds[,2], , drop = FALSE]
f <- function(ind, cov.inv, z) {
S <- cov.inv[,ind[1],drop = FALSE] %*% cov.inv[ind[2],,drop = FALSE]
return(wts * 0.5 * (cov.inv[ind[1], ind[2]] - mahalanobis(z, center = 0, cov = S, inverted = TRUE)))
}
dCov <- apply(inds, 1, f, cov.inv = cov.inv, z = z)
# checks
# print(0.5 * cov.inv - 0.5 * cov.inv %*% z[1,] %*% t(z[1,]) %*% cov.inv)
# print(0.5 * cov.inv - 0.5 * cov.inv %*% z[2,] %*% t(z[2,]) %*% cov.inv)
# print(0.5 * cov.inv - 0.5 * cov.inv %*% z[3,] %*% t(z[3,]) %*% cov.inv)
# print(head(dCov))
# print(cbind(gr, dPrior, dMean, dCov))
# print(colSums(cbind(dPrior, dMean, dCov)))
# print(cor(cbind(dPrior, dMean, dCov)[x$weights>0,,drop = FALSE]))
return(cbind(dPrior, dMean, dCov))
}
#' @rdname ldaModel
#'
#' @export
predict.ldaModel <- function(object, out = c("class", "posterior"), ...) {
pred <- NextMethod(object, ...)
out <- match.arg(out)
pred <- switch(out,
class = pred$class,
posterior = {
post <- pred$posterior
lapply(seq_len(nrow(post)), function(i) post[i,, drop = FALSE])
})
return(pred)
}
|
suppressWarnings( library(RMySQL))
suppressWarnings( library(fasttime))
suppressWarnings( library(data.table))
suppressWarnings( library(knitr))
suppressWarnings( library(dplyr))
suppressWarnings( library(xts))
suppressWarnings( library(scales))
suppressWarnings( library(DT))
suppressWarnings(library(shinyBS))
Sys.setenv(TZ='GMT')
MarketData<- function(date, from, to, symbol, Level1, host) {
data<- read.csv("./data/Md.csv")
data$Time= as.character(data$Time)
data<- data[(data$Time>=paste0(date," ", from) & (data$Time<=paste0(date, " ", to))), ]
if (nrow(data) !=0) {
op<-options(digits.secs=6)
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
} else {data<- data.frame()}
#write.csv(data, file = "Md.csv",row.names=FALSE)
return(data)
}
MarketDataFutures<- function(date, from, to, symbol, Level1) {
data<- read.csv("Md.csv")
if (nrow(data) !=0) {
op<-options(digits.secs=6)
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
} else {data<- data.frame()}
return(data)
}
Printu<- function(date, from, to, symbol, host) {
data<- read.csv("./data/Printu.csv")
op<-options(digits.secs=6)
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
#write.csv(data, file = "Printu.csv",row.names=FALSE)
return(data)
}
PrevCLX2<- function(date, symbol) {
data<- read.csv("./data/PrevCLX.csv")
#write.csv(data, file = "PrevCLX.csv",row.names=FALSE)
return(data)
}
PrevCLXFutures<- function(date, symbol) {
data<- read.csv("./data/PrevCLX.csv")
#write.csv(data, file = "PrevCLX.csv",row.names=FALSE)
return(yClose)
}
Orders<- function(date, from, to, symbol) {
dd<- read.csv("./data/Orders.csv")
op<-options(digits.secs=6)
dd$Time<- fastPOSIXct(dd$Time, required.components = 6L, tz = "GMT")
dd$strategy<- as.character(dd$strategy)
return (dd)
}
OrderTable<- function(date, from, to, symbol, orderid) {
return(data.frame())
}
News<- function(date, from , to, symbol) {
news<- read.csv("./data/News.csv")
news$Time= as.character(news$Time)
news<- news[(news$Time>=paste0(date," ", from) & (news$Time<=paste0(date, " ", to))), ]
#write.csv(news, file = "News.csv",row.names=FALSE)
return(news)
}
Nav<- function(date, from, to, symbol, scale, host) {
return(data.frame())
}
checkErrors<- function(data) {
out <- tryCatch(
{
#mydb = dbConnect(MySQL(), user='roma', password='2bn4aYjV8bz5OY', dbname='reports', host='192.168.31.21')
#dbDisconnect(mydb)
print("Connection")
},
error=function(cond) {
return(FALSE)
},
warning=function(cond) {
return(NULL)
},
finally={
}
)
if (out==FALSE) {
showModal(modalDialog(
title = "Warning message",
"Failed to connect to database: Error: Can't connect to MySQL server ",
size= "m",
easyClose = TRUE
))
#stop()
return(TRUE)
} else {
if (is(try(data, silent=T), "try-error")) {
mess1<- ' Message: Empty data. Choose another day :-) '
showModal(modalDialog(
title = "Warning message",
" Message: Empty data. Choose another day :-) ",
size= "m",
easyClose = TRUE
))
#stop()
return(TRUE)
} else {
if (nrow(data)<1) {
showModal(modalDialog(
title = "Warning message",
" Message: Empty data. Choose another day :-) ",
size= "m",
easyClose = TRUE
))
#stop()
return(TRUE)
}
}
}
return(FALSE)
}
###Shiny server
shinyServer(function(input, output, session) {
dateText<- renderText({
input$go
isolate({as.character(input$date)})
})
### Define selected host
host<- reactive({
if (input$host==1) {
host<- "192"
} else {
host<- "10"
}
return(host)
})
observeEvent(input$help, {
toggle("text_help")
})
data<- eventReactive(input$go, {
rm(list=ls(all=TRUE))
withProgress( message = 'Data downloading', value = 0.2, {
if (input$futures) {
dd<-MarketDataFutures(date=dateText(), from=input$from, to=input$to, symbol=input$text, Level1=input$level1)
} else {
dd<- MarketData(date=dateText(), from=input$from, to=input$to, symbol=input$text, Level1=input$level1, host=host())
}
incProgress(1)
setProgress(1)
})
return(dd)
})
data1<- reactive({
input$go
isolate({
if (nrow(data())>1) {
if (input$futures) {
if (input$level1) {
data1<- data()[(data()$Reason=="Trade")|(data()$Reason=="Level1"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1", "tSide",
"MidPrice", "tPrice", "tType", "color") ]
} else {
data1<- data()[(data()$Reason=="Trade"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1",
"tSide", "MidPrice", "tPrice", "tType", "color") ]
}
} else {
if (input$level1) {
data1<- data()[(data()$Reason=="Trade")|(data()$Reason=="Level1"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1", "tSide","tType" ,"MidPrice",
"tPrice", "iCBC", "color") ]
} else {
data1<- data()[(data()$Reason=="Trade"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1", "tSide", "tType","MidPrice",
"tPrice", "iCBC", "color") ]
}
}
} else {
data1<- data.frame()
}
message(paste0("Data1 shape: ", nrow(data1)))
isErrors<- checkErrors(data=data1)
message(paste0("Errors: ", isErrors))
return(data1)
})
})
###Disable or enable inputs depending to DB
observeEvent(input$futures,
if (input$futures) {
disable("strat")
disable("news")
disable("icbc")
disable("nav")
disable("colorEx")
disable("host")
} else {
enable("strat")
enable("news")
enable("icbc")
enable("nav")
enable("colorEx")
enable("host")
})
delta<- reactive({ as.numeric(max(data1()$Time) - min(data1()$Time), units="secs") })
f<- reactive({ f<-as.xts(data1(), order.by=data1()[, 1], frequency=NULL)
return (f)})
Seconds<- reactive({
if (input$level1) {
temp<- f()[f()$Reason=="Level1", ]
ep1 <- endpoints(temp,'seconds')
data1<- as.data.frame(period.apply(temp, INDEX=ep1, FUN=function(x) tail(x, 1)))
row.names(data1)<- NULL
temp2<- f()[f()$Reason!="Level1"]
ep2 <- endpoints(temp2,'seconds')
data2<- as.data.frame(period.apply(temp2, INDEX=ep2, FUN=function(x) tail(x, 1)))
row.names(data2)<- NULL
data<- rbind(data1, data2)
} else {
ep <- endpoints(f(),'seconds')
data<- as.data.frame(period.apply(f(), INDEX=ep, FUN=function(x) tail(x, 1)))
row.names(data)<- NULL
}
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
data$tPrice<- as.numeric(as.character(data$tPrice))
data$Ask_P<- as.numeric(as.character(data$Ask_P))
data$Bid_P<- as.numeric(as.character(data$Bid_P))
data$tShares1<- as.character(data$tShares1)
data$tType<- as.character(data$tType)
data$color<- as.character(data$color)
data<- data[order(data$Time),]
return(data)})
Seconds10<- reactive({
if (delta()> 1800) {
if (input$level1) {
temp<- f()[f()$Reason=="Level1", ]
ep1 <- endpoints(temp,'seconds', k=10)
data1<- as.data.frame(period.apply(temp, INDEX=ep1, FUN=function(x) tail(x, 1)))
row.names(data1)<- NULL
temp2<- f()[f()$Reason!="Level1"]
ep2 <- endpoints(temp2,'seconds')
data2<- as.data.frame(period.apply(temp2, INDEX=ep2, FUN=function(x) tail(x, 1)))
row.names(data2)<- NULL
data<- rbind(data1, data2)
} else {
ep <- endpoints(f(),'seconds', k=10)
data<- as.data.frame(period.apply(f(), INDEX=ep, FUN=function(x) tail(x, 1)))
row.names(data)<- NULL
}
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
data$tPrice<- as.numeric(as.character(data$tPrice))
data$Ask_P<- as.numeric(as.character(data$Ask_P))
data$Bid_P<- as.numeric(as.character(data$Bid_P))
data$tShares1<- as.character(data$tShares1)
data$tType<- as.character(data$tType)
data$color<- as.character(data$color)
data<- data[order(data$Time),]
} else {data<- c()}
return(data)})
Minutes<- reactive({
if (delta()> 1800) {
if (input$level1) {
temp<- f()[f()$Reason=="Level1", ]
ep1 <- endpoints(temp, 'minutes')
data1<- as.data.frame(period.apply(temp, INDEX=ep1, FUN=function(x) tail(x, 1)))
row.names(data1)<- NULL
temp2<- f()[f()$Reason!="Level1"]
ep2 <- endpoints(temp2, 'minutes')
data2<- as.data.frame(period.apply(temp2, INDEX=ep2, FUN=function(x) tail(x, 1)))
row.names(data2)<- NULL
data<- rbind(data1, data2)
} else {
ep <- endpoints(f(),'seconds', k=10)
data<- as.data.frame(period.apply(f(), INDEX=ep, FUN=function(x) tail(x, 1)))
row.names(data)<- NULL
}
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
data$tPrice<- as.numeric(as.character(data$tPrice))
data$Ask_P<- as.numeric(as.character(data$Ask_P))
data$Bid_P<- as.numeric(as.character(data$Bid_P))
data$tShares1<- as.character(data$tShares1)
data$tType<- as.character(data$tType)
data$color<- as.character(data$color)
data<- data[order(data$Time),]
} else {data<- c()}
return(data)})
plotdelay<- reactive({
if (nrow(data1())>0) {
tDiff<- delta()
if (tDiff<= 1800) {data<-data1()} else {
if (tDiff>1800 & tDiff<3*3600) {data<-Seconds()}
if (tDiff>3*3600 & tDiff<6*(3600)) {data<-Seconds10()}
if (tDiff>6*3600) {data<-Minutes()}
}
data$tShares1<- as.numeric(data$tShares1)
data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
if (!input$futures) {
if (sum(data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")])>0) {
data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]<- as.integer(rescale(as.numeric(data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]), c(7,24)))
data$color[(data$tType=="OPG") | (data$tType=="CLX")]<- "#ffb84d"
}
}
} else {
data<- data.frame()
}
return(data)
})
Imbalance<- reactive({
if (length(input$icbc)>0) {
imb<- data()[,c("Time", "iCBC","iMarket", "iShares", "iPaired", "iExchange")]
imb[imb$Reason !="Imbalance", c("iShares", "iPaired", "iCBC", "iMarket") ]<- NA
tDiff<- delta()
if (tDiff> 3600*2) {
f= as.xts(imb, order.by=imb[, 1], frequency=NULL)
ep <- endpoints(f,'seconds', k=30)
imb1<- as.data.frame(period.apply(f, INDEX=ep, FUN=function(x) tail(x,1)))
imb1$Time<- fastPOSIXct(imb1$Time, required.components = 6L, tz = "GMT")
imb1$iCBC<- as.numeric(as.character(imb1$iCBC))
} else {
imb1<- imb
}
imb1$Time<- fastPOSIXct(imb1$Time, required.components = 6L, tz = "GMT")
m<- data.frame(Time=max(data()$Time), iCBC= NA, iMarket= NA, iShares= NA, iPaired= NA, iExchange= "")
n<- data.frame(Time=min(data()$Time), iCBC= NA, iMarket= NA, iShares= NA, iPaired= NA, iExchange= "")
imb1<- rbind(m, imb1, n)
row.names(imb1)<- NULL
} else {
imb1<- c()
}
return(imb1)
})
ImbExchange<- reactive({
imb= Imbalance()
if ("Q" %in% input$icbc) {
imb[imb$iExchange !="NSDQ", c("iShares", "iPaired", "iCBC") ]<- NA
}
if ("Y" %in% input$icbc) {
imb[imb$iExchange !="NYSE", c("iShares", "iPaired", "iCBC") ]<- NA
}
if ("A" %in% input$icbc) {
imb[imb$iExchange !="AXDP", c("iShares", "iPaired", "iCBC", "iMarket") ]<- NA
}
return(imb)
})
BottomPlot<- reactive({
if (nrow(data1())>0) {
tDiff<- delta()
if (tDiff<2*3600) {data<-Seconds()} else {
if (tDiff>=2*3600 & tDiff<5*3600) {data<-Seconds10()}
if (tDiff>=5*3600) {data<-Minutes()}
}
data<- data[ ,c("Time", "MidPrice")]
} else {
data<- data.frame()
}
return(data)
})
plotDelayGreater<- reactive({
dd<- plotdelay()
if (nrow(dd)>0) {
dd<- dd[dd$tPrice>0, ]
if (input$OverLap) {dd$tPrice1<- jitter(dd$tPrice)}
}
return(dd)
})
y0<- reactive({
if (nrow(plotDelayGreater())>0) {
min(plotDelayGreater()$tPrice)
} else {min(plotdelay()$Bid_P)
}
})
y1<- reactive({
if (nrow(plotDelayGreater())>0) {
max(plotDelayGreater()$tPrice)
} else {max(plotdelay()$Ask_P)
}
})
pp<- reactive({
Printu(date =dateText(), from=input$from, to=input$to, symbol= input$text, host= host() )
})
#Order<- reactive({
# input$go
# isolate({
# dataOrders<- Orders(date=as.character(dateText()), from=input$from, to=input$to, symbol= input$text)
# return(dataOrders)
# })
#})
Order<- eventReactive(input$go, {
isolate({
dataOrders<- Orders(date=as.character(dateText()), from=input$from, to=input$to, symbol= input$text)
return(dataOrders)
})
})
Newsdata<- reactive({
if (input$news & nrow(data())>0) {News(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text)} else {c()}
})
NavData1<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
Nav(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text, host= host(), scale= 12)} else {c()}
})
})
NavData2<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
Nav(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text, host= host(), scale= 9)} else {c()}
})
})
NavData3<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
Nav(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text, host= host(), scale= 8)} else {c()}
})
})
Navdata<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
tDiff<- delta()
if (tDiff<= 100) {data<- NavData1()} else {
if (tDiff>100 & tDiff<2*3600) {data<- NavData2()}
if (tDiff>2*3600) {data<- NavData3()}
}
} else {data<- c()}
return(data)
})
})
PrevClose<- reactive({
if (input$prevclx) {
if (input$futures) {
dat<- PrevCLXFutures(date=as.character(dateText()), symbol=input$text)
data<- data.frame(Time=c(plotdelay()$Time[1], tail(plotdelay()$Time,1)), tPrice=c(dat$tPrice, dat$tPrice))
} else {
dat<- PrevCLX2(date=as.character(dateText()), symbol=input$text)
data<- data.frame(Time=c(plotdelay()$Time[1], tail(plotdelay()$Time,1)), tPrice=c(dat$tPrice, dat$tPrice))
}
} else {data<-c()}
return(data)
})
###Strategies Orders input
dd<- reactive ({
un = unique(Order()$strategy)
if (is.null(input$strat)==FALSE) {
if (input$strat != "None") {
if (input$strat %in% inputChoices()) {
dd<- Order()[Order()['strategy']==input$strat, ]
} else {dd<- data.frame() }
} else {dd<- data.frame() }
} else {dd<- data.frame() }
return(dd)
})
Size<- reactive({as.integer(6)})
alpha<- reactive({0.7})
alpha1<- reactive({0.8})
Hline<-reactive({2})
Font<- reactive({11})
fillcolor = reactive({"#ff6666"})
hollowcolor = reactive({"#39ac73"})
plotcolor = reactive({"#3E3E3E"})
papercolor = reactive({"#1E2022"})
fontcolor = reactive({"#B3A78C"})
###Top plot
trendPlot <- renderPlotly({
event<- event_data("plotly_selected", source = "subset")
event<- event[event$y>0, ]
input$go
isolate({
if (nrow(plotdelay())>0) {
p2<- as.numeric(y1())
p1<- as.numeric(y0())
tDiff<- delta()
fontcolor<- "darkblue"
xax <- list(
title = "",
tickfont = list(color = "darkblue")
)
yax <- list(
title = "",
tickfont = list(color = "darkblue")
)
navOpacity=0.8
navSize=5
if (input$radio==1) {
l<- list( color = toRGB("grey90", alpha = 0.1),
fillcolor = toRGB("grey90", alpha = 0.1),
shape = "hv",
width = .00001)
} else {
l<- list( color = toRGB("grey40", alpha = 0.1),
fillcolor = toRGB("grey40", alpha = 0.1),
shape = "hv",
width = .00001)
}
if (input$colorEx) {
colorDesc<-"c('#FF4040','#ff9900','#66b3ff','#ff00ff','#00e6e6','#9933ff','#4dff4d','#ff99dd')[factor(MsgSource)]"
} else {
colorDesc<- "color"
}
dd<- dd()
if (input$OverLap) {
y="tPrice1"
t= "paste0(tPrice, ' Shares:', tShares)"
hoverinfo= "x+text"
} else{
y="tPrice"
t= "paste0('Shares:', tShares, '<br>Source:',MsgSource)"
hoverinfo= "x+y+text"
}
withProgress( message = 'Top Chart', value = 0.4, {
if (nrow(data.frame(event)) <1 & input$spread==FALSE) {
py<- plot_ly(plotDelayGreater(), x = Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo),
marker=list(size=tShares1, color=eval(parse(text=colorDesc)), opacity= alpha(), line = list( width = .001) )) %>%
layout(showlegend = FALSE, hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
###Imbalances
if (!is.null(input$icbc)) {
py<- add_trace(ImbExchange(), x=Time, y=iCBC, name="iCBC", mode="markers", marker=list(symbol = 22, color= "BF3EFF", size=Size(), opacity= alpha1()))
py<- layout(yaxis=list(range=c( y0(), y1())))
}
###Prev Close
if (input$prevclx) {py<- add_trace(PrevClose(), x=Time, y=tPrice, line=list(width=1, color="00CD66"), marker=list(size=2), name="PrevCLX", hoverinfo = "none")}
###Prints
if (is(try(pp(), silent=T), "try-error")==FALSE) {
for (i in 1: nrow(pp())) {
py <- py %>% add_trace(x = c(pp()$Time[i],pp()$Time[i]), y = c(y0(), y1()), mode = "line",marker=list(size=1), line=list(dash="2", color=pp()$color[i]), hoverinfo = "none", evaluate=TRUE) %>%
add_trace(x = c(pp()$Time[i]-Hline(), pp()$Time[i]+Hline()), y = c(pp()$tPrice[i], pp()$tPrice[i]), marker=list(size=1), mode = "line", line=list(dash="1", color="violet"), hoverinfo = "none", evaluate=TRUE)
}
}
###Orders
if (nrow(dd)>0) {
id<- unique(dd$orderid)
for (i in 1:length(id)) {
tt<-dd[dd$orderid==id[i], ]
tt<- tt[order(tt$Time), ]
py<- py %>% add_trace( x= tt$Time, y=tt$price, mode="markers+lines",name=id[i], text=paste0("Tif:",tt$timeinforce, " Shares:", tt$Shares, "<br>Exchange:", tt$exchange),
marker=list(symbol=tt$Shape, size=tt$Size, color=tt$color),
line=list(width=0.3, color=tt$color[1]), evaluate=TRUE)
}
}
###News
if ( input$news ) {
if (nrow(Newsdata())>0) {
a<- list()
for (i in 1:nrow(Newsdata())) {
tt<- NULL
tt<- Newsdata()[i,]
a[[i]] <- list(
bordercolor="steelblue",
borderwidth=1,
bgcolor= "#F0F8FF",
arrowcolor="4F94CD",
font= list(color="darkblue", family="Droid Sans", size=Font()),
align="left",
opacity=0.8,
x =tt$Time,
y = y1()-0.0015*y1(),
text = gsub("[$]","",tt$head),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 3,
ax = 20,
ay = -40)
py<- py %>% add_trace( x = c(tt$Time, tt$Time), y = c(y0(), y1()-0.0015*y1()), hoverinfo="x", marker=list(size=2, color="7093DB"), line=list(width=0.8, color="7093DB"), evaluate=TRUE)
}
py<- py %>% layout(annotations=a)
}
}
###Nav
if (length(input$nav)>0 ){
if ("B" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$B_NAV, mode="markers", marker=list(size=navSize, color="navy", opacity=navOpacity, symbol=217), name="B_Nav")
}
if ("A" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$A_NAV, mode="markers", marker=list(size=navSize, color="#99ccff", opacity=navOpacity, symbol=17), name="A_Nav")
}
if ("M" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$M_NAV, mode="markers", marker=list(size=navSize, color="#3366ff", opacity=navOpacity, symbol=24), name="M_Nav")
}
py<- layout(yaxis=list(range=c(p1-(p2-p1)*0.05, p2+(p2-p1)*0.05)))
}
###Level1
if (input$level1) {
py<- add_trace(plotdelay(), x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(plotdelay(), x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
}
if (input$volumeChart & nrow(plotDelayGreater())>0) {
#### Volume Chart
if (tDiff<=30) {x="Time1"}
if ((tDiff>30) & (tDiff<5*60)) {x="Time2"}
if ((tDiff>=5*60) & (tDiff<30*60)) {x="Time3"}
if (tDiff>=30*60) {x="Time4"}
VolumeAggregate<- plotDelayGreater()[ ,c(x, "tShares" )]
VolumeAggregate$tShares<- as.numeric(as.character(VolumeAggregate$tShares))
VolumeAggregate<- eval(parse(text=paste0("aggregate(.~",x,", data=VolumeAggregate, FUN=sum)")))
VolumeAggregate[ ,x]<- fastPOSIXct(VolumeAggregate[ ,x], required.components = 6L, tz = "GMT")
py <- add_trace(data= VolumeAggregate, x =eval(parse(text= x)), y = tShares, type = "bar", marker = list(color = "steelblue"), yaxis="y2", hoverinfo="none") %>% layout(paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(
yaxis = list(
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
domain = c(0.30, 0.95)),
yaxis2 = list(
zerolinecolor='#d9d9d9',
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
side = "left",
domain = c(0, 0.2))
)
}
}
if (nrow(data.frame(event)) <1 & input$spread==TRUE) {
if (nrow(plotDelayGreater())>0) {
py<- plot_ly(plotDelayGreater(), x = Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo),
marker=list(size=tShares1, color=eval(parse(text=colorDesc)), opacity= alpha(), line = list( width = .001) ))
} else {
py<- plot_ly(plotdelay(), x = Time, y = Bid_P, mode = "markers", text = eval(parse(text= t)), hoverinfo = "none",
marker=list(size=1, color=eval(parse(text=colorDesc)), opacity= alpha(), line = list( width = .001) ))
}
py<- add_trace(plotdelay(), x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(plotdelay(), x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
py<- layout(showlegend = FALSE, hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
###Imbalances
if (!is.null(input$icbc)) {
py<- add_trace(ImbExchange(), x=Time, y=iCBC, name="iCBC", mode="markers", marker=list(symbol = 22, color= "BF3EFF", size=Size(), opacity= alpha1()))
py<- layout(yaxis=list(range=c( y0(), y1())))
}
###Prev Close
if (input$prevclx) {py<- add_trace(PrevClose(), x=Time, y=tPrice, line=list(width=1, color="00CD66"), marker=list(size=2), name="PrevCLX", hoverinfo = "none")}
###Prints
if (is(try(pp(), silent=T), "try-error")==FALSE) {
for (i in 1: nrow(pp())) {
py <- py %>% add_trace(x = c(pp()$Time[i],pp()$Time[i]), y = c(y0(), y1()), mode = "line",marker=list(size=1), line=list(dash="2", color=pp()$color[i]), hoverinfo = "none", evaluate=TRUE) %>%
add_trace(x = c(pp()$Time[i]-Hline(),pp()$Time[i]+Hline()), y = c(pp()$tPrice[i], pp()$tPrice[i]), marker=list(size=1), mode = "line", line=list(dash="1", color="violet"), hoverinfo = "none", evaluate=TRUE)
}
}
###Orders
if (nrow(dd)>0) {
id<- unique(dd$orderid)
for (i in 1:length(id)) {
tt<-dd[dd$orderid==id[i], ]
tt<- tt[order(tt$Time), ]
py<- py %>% add_trace( x= tt$Time, y=tt$price, mode="markers+lines",name=id[i], text=paste0("Tif:",tt$timeinforce, " Shares:", tt$Shares, "<br>Exchange:", tt$exchange),
marker=list(symbol=tt$Shape, size=tt$Size, color=tt$color),
line=list(width=0.3, color=tt$color[1]), evaluate=TRUE)
}
}
###News
if ( input$news ) {
if (nrow(Newsdata())>0) {
a<- list()
for (i in 1:nrow(Newsdata())) {
tt<- NULL
tt<- Newsdata()[i,]
a[[i]] <- list(
bordercolor="steelblue",
borderwidth=1,
bgcolor= "#F0F8FF",
arrowcolor="4F94CD",
font= list(color="darkblue", family="Droid Sans", size=Font()),
align="left",
opacity= 0.8,
x =tt$Time,
y = y1()-0.0015*y1(),
text = gsub("[$]","",tt$head),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 3,
ax = 20,
ay = -40)
py<- py %>% add_trace( x = c(tt$Time, tt$Time), y = c(y0(), y1()-0.0015*y1()), hoverinfo="x", marker=list(size=2, color="7093DB"), line=list(width=0.8, color="7093DB"), evaluate=TRUE)
}
py<- py %>% layout(annotations=a)
}
}
###Nav
if (length(input$nav)>0 ){
if ("B" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$B_NAV, mode="markers", marker=list(size=navSize, color="navy", opacity=navOpacity, symbol=217), name="B_Nav")
}
if ("A" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$A_NAV, mode="markers", marker=list(size=navSize, color="#99ccff", opacity=navOpacity, symbol=17), name="A_Nav")
}
if ("M" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$M_NAV, mode="markers", marker=list(size=navSize, color="#3366ff", opacity=navOpacity, symbol=24), name="M_Nav")
}
py<- layout(yaxis=list(range=c(p1-(p2-p1)*0.05, p2+(p2-p1)*0.05)))
}
if (input$volumeChart & nrow(plotDelayGreater())>0) {
#### Volume Chart
if (tDiff<=30) {x="Time1"}
if ((tDiff>30) & (tDiff<5*60)) {x="Time2"}
if ((tDiff>=5*60) & (tDiff<30*60)) {x="Time3"}
if (tDiff>=30*60) {x="Time4"}
VolumeAggregate<- plotDelayGreater()[ ,c(x, "tShares" )]
VolumeAggregate$tShares<- as.numeric(as.character(VolumeAggregate$tShares))
VolumeAggregate<- eval(parse(text=paste0("aggregate(.~",x,", data=VolumeAggregate, FUN=sum)")))
VolumeAggregate[ ,x]<- fastPOSIXct(VolumeAggregate[ ,x], required.components = 6L, tz = "GMT")
py <- add_trace(data= VolumeAggregate, x =eval(parse(text= x)), y = tShares, type = "bar", marker = list(color = "steelblue"), yaxis="y2", hoverinfo="none") %>%
layout(paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(
yaxis = list(
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
domain = c(0.30, 0.95)),
yaxis2 = list(
zerolinecolor='#d9d9d9',
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
side = "left",
domain = c(0, 0.2))
)
}
}
if (nrow(data.frame(event)) >=1 ) {
t1<- as.POSIXct(as.character(as.POSIXct(min(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")-1
t2<- as.POSIXct(as.character(as.POSIXct(max(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")+1
tDiff= as.numeric(t2-t1, units="secs")
if (tDiff<= 1800) {data<- data1()} else {
if (tDiff> 1800 & tDiff< 3600*3) {
data<-Seconds()}
if (tDiff> 3600*3 & tDiff< 3600*6) {
data<-Seconds10()}
if (tDiff> 3600*6) {
data<-Minutes()}
}
data<- data[(data$Time>=t1 & data$Time<=t2) ,]
data$tShares1<- as.numeric(data$tShares1)
data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]<- as.integer(rescale(as.numeric(data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]), c(7,24)))
data$color[(data$tType=="OPG") | (data$tType=="CLX")]<- "#ffb84d"
dataTprice<- data[data$tPrice >0 ,]
if (input$OverLap) {dataTprice$tPrice1<- jitter(dataTprice$tPrice)}
if (nrow(data.frame(event)) >=1 & input$spread==FALSE) {
py <- plot_ly(dataTprice, x= Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo), name="Price",
marker=list(size=tShares1, color=eval(parse(text=colorDesc)), opacity = alpha(), line = list( width = .001) )) %>%
layout(showlegend = FALSE, hovermode = "closest", legend = list(x = 1, y = 1),paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
###Level1
if (input$level1) {
py<- add_trace(dataTprice, x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(dataTprice, x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
}
}
if (nrow(data.frame(event)) >=1 & input$spread==TRUE) {
y0<- min(data$tPrice[data$tPrice >0])
y1<-max(data$tPrice[data$tPrice >0])
py <- plot_ly(dataTprice, x= Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo), name="Price", marker=list(size=tShares1,color=color, opacity= alpha(), line = list( width = .001) ) )
py<- add_trace(data, x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(data, x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
py<- layout(showlegend = FALSE,hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
}
###Prints
if (is(try(pp(), silent=T), "try-error")==FALSE) {
temp<- pp()[(pp()$Time>=t1 & pp()$Time<=t2), ]
for (i in 1: nrow(temp)) {
py <- py %>% add_trace(x = c(temp$Time[i], temp$Time[i]), y = c( min(dataTprice$Bid_P), max(dataTprice$Ask_P) ), mode = "line",marker=list(size=1), line=list(dash="2", color=pp()$color[i]), hoverinfo = "none", evaluate=TRUE) %>%
add_trace(x = c(temp$Time[i]-Hline(), temp$Time[i]+Hline()), y = c(temp$tPrice[i], temp$tPrice[i]), marker=list(size=1), mode = "line", line=list(dash="1", color="violet"), hoverinfo = "none", evaluate=TRUE)
}
}
###Imbalances
if (!is.null(input$icbc)) {
tt<- subset(ImbExchange(), Time>=t1 & Time<=t2)
if (nrow(tt)>0) {
py<- add_trace(tt, x=Time, y=iCBC, name="iCBC NSDQ", mode="markers", marker=list(symbol = 22, color= "BF3EFF", size=Size(), opacity= alpha1()))
py<- layout(yaxis=list(range=c(min(dataTprice$Bid_P)-(max(dataTprice$Ask_P)-min(dataTprice$Bid_P))*0.005,max(dataTprice$Ask_P)+(max(dataTprice$Ask_P)-min(dataTprice$Bid_P))*0.005 )))
}
}
###Prev Close
if (input$prevclx) {
py<- add_trace(x=c(data$Time[1], tail(data$Time,1)), y=c(PrevClose()$tPrice, PrevClose()$tPrice), line=list(width=1, color="teal"), marker=list(size=2), name="PrevCLX", hoverinfo = "none")
}
###Orders
if (nrow(dd)>0) {
dd<- subset(dd, Time>=t1 & Time<=t2)
id<- unique(dd$orderid)
for (i in 1:length(id)) {
tt<-dd[dd$orderid==id[i], ]
tt<- tt[order(tt$Time), ]
py<- py %>% add_trace( x= tt$Time, y=tt$price, mode="markers+lines",name=id[i], text=paste0("Tif:",tt$timeinforce, " Shares:", tt$Shares, "<br>Exchange:", tt$exchange),
marker=list(symbol=tt$Shape, size=tt$Size, color=tt$color),
line=list(width=0.2, color=tt$color[1]), evaluate=TRUE)
}
}
###News
if ( input$news ) {
if (nrow(Newsdata())>0) {
Newsdd<- subset(Newsdata(), Time>=t1 & Time<=t2)
if (nrow(Newsdd)>0) {
mx<- max(data$tPrice)
mn<- min(data$tPrice[data$tPrice >0])
a<- list()
for (i in 1:nrow(Newsdd)) {
tt<- NULL
tt<- Newsdd[i,]
a[[i]] <- list(
bordercolor="steelblue",
borderwidth=1,
bgcolor= "#F0F8FF",
arrowcolor="4F94CD",
font= list(color="darkblue", family="Droid Sans", size=Font()),
align="left",
x = tt$Time,
y = mx - 0.0015*mx,
text = gsub("[$]","",tt$head),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 3,
ax = 20,
ay = -40)
py<- py %>% add_trace( x = c(tt$Time, tt$Time), y = c(mn, mx - 0.0015*mx), hoverinfo = "x", marker=list(size=2, color="7093DB"), line=list(width=0.8, color="7093DB"), evaluate=TRUE)
}
py<- py %>% layout(annotations=a)
}
}
}
###Nav
if (length(input$nav)>0 ){
if (tDiff<= 100) {dd1<- NavData1()} else {
if (tDiff>100 & tDiff<2*3600) {dd1<- NavData2()}
if (tDiff>2*3600) {dd1<- NavData3()}
}
if ("B" %in% input$nav) {
dd<- subset(dd1, Time>=t1 & Time<=t2)
py<- add_trace(x= dd$Time, y= dd$B_NAV, mode="markers", marker=list(size=navSize, color="navy", opacity=navOpacity, symbol=217), name="B_Nav")
}
if ("A" %in% input$nav) {
py<- add_trace(x= dd$Time, y= dd$A_NAV, mode="markers", marker=list(size=navSize, color="#99ccff", opacity=navOpacity, symbol=17), name="A_Nav")
}
if ("M" %in% input$nav) {
py<- add_trace(x= dd$Time, y= dd$M_NAV, mode="markers", marker=list(size=navSize, color="#3366ff", opacity=navOpacity, symbol=24), name="M_Nav")
}
py<- layout(yaxis=list(range=c(p1-(p2-p1)*0.05, p2+(p2-p1)*0.05)))
}
if (input$volumeChart & nrow(plotDelayGreater())>0) {
#### Volume Chart
if (tDiff<=30) {x="Time1"}
if ((tDiff>30) & (tDiff<5*60)) {x="Time2"}
if ((tDiff>=5*60) & (tDiff<30*60)) {x="Time3"}
if (tDiff>=30*60) {x="Time4"}
VolumeAggregate<- dataTprice[ ,c(x, "tShares" )]
VolumeAggregate<- eval(parse(text=paste0("aggregate(.~",x,", data=VolumeAggregate, FUN=sum)")))
VolumeAggregate[ ,x]<- fastPOSIXct(VolumeAggregate[ ,x], required.components = 6L, tz = "GMT")
py <- add_trace(data=VolumeAggregate, x =eval(parse(text= x)), y = tShares, type = "bar", marker = list(color = "steelblue"), yaxis="y2", hoverinfo="none")
py<- layout(
yaxis = list(
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
domain = c(0.30, 0.95)),
yaxis2 = list(
zerolinecolor='#d9d9d9',
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
side = "left",
domain = c(0, 0.2))
)
}
}
if (input$radio==2) {
if (input$volumeChart) {
py<- layout(py, xaxis=list(title = "",showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
yaxis2 = list( gridcolor = "#8c8c8c",
zerolinecolor= "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
} else {
py<- layout(py, xaxis=list(title = "",showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
}
}
incProgress(1)
setProgress(1)
})
py
}
})
})
###Imbalance plot
ImbalPlot <- renderPlotly({
event <- event_data("plotly_selected", source = "subset")
input$go
isolate({
if (nrow(data())>0) {
withProgress(message = 'Imbalance Chart', value = 0.3, {
ay2 <- list(
zeroline = FALSE,
tickfont = list(color = "green"),
overlaying = "y",
side = "right"
)
ay1<- list (
zeroline = FALSE,
tickfont = list(color = "darkblue"),
title=""
)
xax <- list(
title="",
zeroline = FALSE,
tickfont = list(color = "darkblue")
)
if (length(input$icbc)>0) {
tt<- data.frame()
tt<- ImbExchange()[,c("Time", "iMarket" ,"iPaired", "iShares")]
if (nrow(data.frame(event)) >0 ) {
t1<- as.POSIXct(as.character(as.POSIXct(min(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")-1
t2<- as.POSIXct(as.character(as.POSIXct(max(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")+1
if (!is.null(input$icbc)) {
tt<- subset(ImbExchange(), Time>=t1 & Time<=t2, select=c("Time", "iMarket","iPaired", "iShares"))
}
}
if ("A" %in% input$icbc==FALSE) {
py <- plot_ly(tt, x= Time, y=iPaired, mode="markers", marker=list( size=5 , opacity=0.9, color="steelblue"), name="iPaired") %>%
# add_trace(x=Time, y=iMarket, mode="markers", marker=list( size=5 , opacity=0.9, color="violet"), name="iMarket") %>%
add_trace(x=Time, y=iShares, mode="markers",yaxis = "y2", marker=list( size=5 , opacity=0.9, color="green"), name="iShares") %>%
layout(xaxis=xax, showlegend = FALSE, yaxis=ay1, yaxis2 = ay2) %>% layout( margin = list(autosize=FALSE,r=30), hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
} else {
py <- plot_ly(tt, x= Time, y=iPaired, mode="markers", marker=list( size=5 , opacity=0.9, color="steelblue"), name="iPaired") %>%
add_trace(x=Time, y=iShares, mode="markers",yaxis = "y2", marker=list( size=5 , opacity=0.9, color="green"), name="iShares")
py <- add_trace(tt[tt$iMarket !=0, ], x=Time, y=iMarket, mode="markers", marker=list( size=5 , opacity=0.9, color="violet"), name="iMarket") %>%
layout(xaxis=xax, showlegend = FALSE, yaxis=ay1, yaxis2 = ay2) %>% layout(margin = list(autosize=FALSE, r=30), hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
}
} else {
dd<- data.frame(Time=as.Date(character()), iShares=as.numeric())
py<- plot_ly(dd, x=Time, y=iShares, mode="markers") %>% layout(xaxis=xax, showlegend = FALSE, yaxis=ay1, paper_bgcolor= 'rgba(249,249,263,.85)')
}
if (input$radio==2) {
py<- layout(py, xaxis=list(title = "", showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
yaxis2 = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
}
incProgress(1)
setProgress(1)
})
if (nrow(plotdelay())==0) {py<- plot_ly(plotdelay(), x=Time, y=tPrice, mode="markers")}
py
}
})
})
###Bottom plot
trendPlot2 <- renderPlotly({
input$go
isolate({
if (nrow(BottomPlot())>0) {
withProgress(message = 'Bottom Chart', value = 0.5, {
xax <- list(
title = "",
tickfont = list(color = "darkblue")
)
yax <- list(
title = "",
tickfont = list(color = "darkblue")
)
py<- plot_ly(BottomPlot(), x = Time, y = MidPrice, source="subset", mode = "markers", marker=list( size=3.3 , opacity=0.9), name="") %>%
layout(showlegend = FALSE, hovermode = "closest", yaxis=list(type = "log") ,paper_bgcolor= 'rgba(249,249,263,.85)', dragmode = "select") %>%
layout(xaxis=xax, yaxis=yax)
if (is(try(pp(), silent=T), "try-error")==FALSE) {
for (i in 1: nrow(pp())) {
py <- py %>% add_trace(x = c(pp()$Time[i], pp()$Time[i]), y = c(y0(), y1()), mode = "line",marker=list(size=1), line=list(dash="2", color="steelblue"), evaluate=TRUE)
}
}
incProgress(1)
setProgress(1)
})
if (input$radio==2) {
py<- layout(py, xaxis=list(title = "", showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
}
py
}
})
})
### Data tab Market data
DataOut<- reactive({
event <- event_data("plotly_selected", source = "subset")
event<- event[event$y>0, ]
if (input$futures) {
columns<- c("Time", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares", "tSide", "tType", "tPrice")
} else {
columns<- c("Time", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares", "tSide", "tType", "tPrice", "iCBC", "iMarket", "iShares", "iPaired", "iExchange", "B_NAV", "M_NAV", "A_NAV")
}
if (nrow(data.frame(event))>1) {
t1<- fastPOSIXct(as.character(as.POSIXct(min(event$x)/1000, origin="1970-01-01", tz="EET")), required.components = 6L, tz ="GMT")
t2<- fastPOSIXct(as.character(as.POSIXct(max(event$x)/1000, origin="1970-01-01", tz="EET")), required.components = 6L, tz ="GMT")
tDiff= as.numeric(t2-t1, units="secs")
if (tDiff<= 1800) {md<-data1()} else {
if (tDiff>1800 & tDiff<5*3600) {md<-Seconds()}
if (tDiff>5*3600 & tDiff<7*(3600)) {md<-Seconds10()}
if (tDiff>7*3600) {md<-Minutes()}
}
if (tDiff<= 100) {nav<- NavData1()} else {
if (tDiff>100 & tDiff<2*3600) {nav<- NavData2()}
if (tDiff>2*3600) {nav<- NavData3()}
}
#md<- md[ ,columns ]
if (length(input$nav)>0) {
data<- rbind(md, nav)
data<- data[order(data$Time),]
rownames(data)<- NULL
Out<- data[(data$Time>=t1 & data$Time<=t2), ]
} else {Out<- md[(md$Time>=t1 & md$Time<=t2), ]}
} else {
md= data()[, columns]
if (length(input$nav)>0) {
nav<- NavData1()
Out= md
#Out<- rbind(md, nav)
#Out<- Out[order(Out$Time),]
rownames(Out)<- NULL
} else {Out<- md}
}
Out$Reason<- factor(Out$Reason)
Out$tSide<- factor(Out$tSide)
Out$tType<- factor(Out$tType)
Out$Time=format(Out$Time, format="%H:%M:%OS")
return (Out)
})
output$mytable <- renderDataTable(
datatable(DataOut(), extensions = 'Buttons',
options = list(pageLength = 15, searchHighlight = TRUE,dom = 'C<"clear">lfrtip'),
filter = list(position = 'top', clear = FALSE)))
output$downloadData <- downloadHandler(
filename = function() {paste0(dateText(),"_MarketData.csv", sep="") },
content = function(file) {
write.csv(DataOut() , file, row.names = FALSE, sep=",")
}
)
###Data tab for orders
OrderOut<- reactive({
id<- unique(dd()$orderid)
if ( is.null(id)) {
data<- data.frame(Time="", strategy="", messagetype="", exchange="", orderid="", destination="", side="", price="", type="", sharesexecuted="", timeinforce="")
} else {
data<- OrderTable(date=as.character(dateText()), symbol= input$text, orderid= unique(dd()$orderid))
}
return(data)
})
output$OrdersTable <- renderDataTable(
datatable(OrderOut(), extensions = 'Buttons',options = list(pageLength = 15, searchHighlight = TRUE,dom = 'C<"clear">lfrtip'),
filter = list(position = 'top', clear = FALSE))
)
output$downloadOrders <- downloadHandler(
filename = function() {paste0(dateText(),"_Orders.csv", sep="") },
content = function(file) {
write.table(OrderOut() , file, row.names = FALSE, sep=",")
}
)
output$plotui <- renderUI({
output$plot<- trendPlot
plotlyOutput("plot", width="100%", height = 800)
})
output$plotui2 <- renderUI({
output$plot2<- trendPlot2
plotlyOutput("plot2", width="100%", height = 200)
})
output$plotui3 <- renderUI({
output$plot3<- ImbalPlot
plotlyOutput("plot3", width="100%", height = 200)
})
inputChoices <- reactive({
input$go
isolate({
choices<- unique(Order()$strategy)
choices= c("None", choices)
return(choices)
})
})
output$strat <- renderUI({
selectInput('strat', 'Orders:', choices=inputChoices(), selected = input$strat, width="100")
})
output$name<- renderUI({
input$go
isolate({
if (input$radio=="2") {
eval(parse(text='includeCSS("slate.css")'))
}
})
})
output$textcol<- renderUI({
if (input$radio=="2") {
eval(parse(text= 'tags$style(type="text/css", "#from {background-color: #E3E3E3 }") '))
}
})
output$brush <- renderPrint({
})
session$onSessionEnded(stopApp)
}
)
| /server.R | no_license | romario076/Market-Data-visualization | R | false | false | 52,334 | r |
suppressWarnings( library(RMySQL))
suppressWarnings( library(fasttime))
suppressWarnings( library(data.table))
suppressWarnings( library(knitr))
suppressWarnings( library(dplyr))
suppressWarnings( library(xts))
suppressWarnings( library(scales))
suppressWarnings( library(DT))
suppressWarnings(library(shinyBS))
Sys.setenv(TZ='GMT')
MarketData<- function(date, from, to, symbol, Level1, host) {
data<- read.csv("./data/Md.csv")
data$Time= as.character(data$Time)
data<- data[(data$Time>=paste0(date," ", from) & (data$Time<=paste0(date, " ", to))), ]
if (nrow(data) !=0) {
op<-options(digits.secs=6)
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
} else {data<- data.frame()}
#write.csv(data, file = "Md.csv",row.names=FALSE)
return(data)
}
MarketDataFutures<- function(date, from, to, symbol, Level1) {
data<- read.csv("Md.csv")
if (nrow(data) !=0) {
op<-options(digits.secs=6)
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
} else {data<- data.frame()}
return(data)
}
Printu<- function(date, from, to, symbol, host) {
data<- read.csv("./data/Printu.csv")
op<-options(digits.secs=6)
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
#write.csv(data, file = "Printu.csv",row.names=FALSE)
return(data)
}
PrevCLX2<- function(date, symbol) {
data<- read.csv("./data/PrevCLX.csv")
#write.csv(data, file = "PrevCLX.csv",row.names=FALSE)
return(data)
}
PrevCLXFutures<- function(date, symbol) {
data<- read.csv("./data/PrevCLX.csv")
#write.csv(data, file = "PrevCLX.csv",row.names=FALSE)
return(yClose)
}
Orders<- function(date, from, to, symbol) {
dd<- read.csv("./data/Orders.csv")
op<-options(digits.secs=6)
dd$Time<- fastPOSIXct(dd$Time, required.components = 6L, tz = "GMT")
dd$strategy<- as.character(dd$strategy)
return (dd)
}
OrderTable<- function(date, from, to, symbol, orderid) {
return(data.frame())
}
News<- function(date, from , to, symbol) {
news<- read.csv("./data/News.csv")
news$Time= as.character(news$Time)
news<- news[(news$Time>=paste0(date," ", from) & (news$Time<=paste0(date, " ", to))), ]
#write.csv(news, file = "News.csv",row.names=FALSE)
return(news)
}
Nav<- function(date, from, to, symbol, scale, host) {
return(data.frame())
}
checkErrors<- function(data) {
out <- tryCatch(
{
#mydb = dbConnect(MySQL(), user='roma', password='2bn4aYjV8bz5OY', dbname='reports', host='192.168.31.21')
#dbDisconnect(mydb)
print("Connection")
},
error=function(cond) {
return(FALSE)
},
warning=function(cond) {
return(NULL)
},
finally={
}
)
if (out==FALSE) {
showModal(modalDialog(
title = "Warning message",
"Failed to connect to database: Error: Can't connect to MySQL server ",
size= "m",
easyClose = TRUE
))
#stop()
return(TRUE)
} else {
if (is(try(data, silent=T), "try-error")) {
mess1<- ' Message: Empty data. Choose another day :-) '
showModal(modalDialog(
title = "Warning message",
" Message: Empty data. Choose another day :-) ",
size= "m",
easyClose = TRUE
))
#stop()
return(TRUE)
} else {
if (nrow(data)<1) {
showModal(modalDialog(
title = "Warning message",
" Message: Empty data. Choose another day :-) ",
size= "m",
easyClose = TRUE
))
#stop()
return(TRUE)
}
}
}
return(FALSE)
}
###Shiny server
shinyServer(function(input, output, session) {
dateText<- renderText({
input$go
isolate({as.character(input$date)})
})
### Define selected host
host<- reactive({
if (input$host==1) {
host<- "192"
} else {
host<- "10"
}
return(host)
})
observeEvent(input$help, {
toggle("text_help")
})
data<- eventReactive(input$go, {
rm(list=ls(all=TRUE))
withProgress( message = 'Data downloading', value = 0.2, {
if (input$futures) {
dd<-MarketDataFutures(date=dateText(), from=input$from, to=input$to, symbol=input$text, Level1=input$level1)
} else {
dd<- MarketData(date=dateText(), from=input$from, to=input$to, symbol=input$text, Level1=input$level1, host=host())
}
incProgress(1)
setProgress(1)
})
return(dd)
})
data1<- reactive({
input$go
isolate({
if (nrow(data())>1) {
if (input$futures) {
if (input$level1) {
data1<- data()[(data()$Reason=="Trade")|(data()$Reason=="Level1"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1", "tSide",
"MidPrice", "tPrice", "tType", "color") ]
} else {
data1<- data()[(data()$Reason=="Trade"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1",
"tSide", "MidPrice", "tPrice", "tType", "color") ]
}
} else {
if (input$level1) {
data1<- data()[(data()$Reason=="Trade")|(data()$Reason=="Level1"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1", "tSide","tType" ,"MidPrice",
"tPrice", "iCBC", "color") ]
} else {
data1<- data()[(data()$Reason=="Trade"),
c("Time", "Time1", "Time2", "Time3", "Time4", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares","tShares1", "tSide", "tType","MidPrice",
"tPrice", "iCBC", "color") ]
}
}
} else {
data1<- data.frame()
}
message(paste0("Data1 shape: ", nrow(data1)))
isErrors<- checkErrors(data=data1)
message(paste0("Errors: ", isErrors))
return(data1)
})
})
###Disable or enable inputs depending to DB
observeEvent(input$futures,
if (input$futures) {
disable("strat")
disable("news")
disable("icbc")
disable("nav")
disable("colorEx")
disable("host")
} else {
enable("strat")
enable("news")
enable("icbc")
enable("nav")
enable("colorEx")
enable("host")
})
delta<- reactive({ as.numeric(max(data1()$Time) - min(data1()$Time), units="secs") })
f<- reactive({ f<-as.xts(data1(), order.by=data1()[, 1], frequency=NULL)
return (f)})
Seconds<- reactive({
if (input$level1) {
temp<- f()[f()$Reason=="Level1", ]
ep1 <- endpoints(temp,'seconds')
data1<- as.data.frame(period.apply(temp, INDEX=ep1, FUN=function(x) tail(x, 1)))
row.names(data1)<- NULL
temp2<- f()[f()$Reason!="Level1"]
ep2 <- endpoints(temp2,'seconds')
data2<- as.data.frame(period.apply(temp2, INDEX=ep2, FUN=function(x) tail(x, 1)))
row.names(data2)<- NULL
data<- rbind(data1, data2)
} else {
ep <- endpoints(f(),'seconds')
data<- as.data.frame(period.apply(f(), INDEX=ep, FUN=function(x) tail(x, 1)))
row.names(data)<- NULL
}
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
data$tPrice<- as.numeric(as.character(data$tPrice))
data$Ask_P<- as.numeric(as.character(data$Ask_P))
data$Bid_P<- as.numeric(as.character(data$Bid_P))
data$tShares1<- as.character(data$tShares1)
data$tType<- as.character(data$tType)
data$color<- as.character(data$color)
data<- data[order(data$Time),]
return(data)})
Seconds10<- reactive({
if (delta()> 1800) {
if (input$level1) {
temp<- f()[f()$Reason=="Level1", ]
ep1 <- endpoints(temp,'seconds', k=10)
data1<- as.data.frame(period.apply(temp, INDEX=ep1, FUN=function(x) tail(x, 1)))
row.names(data1)<- NULL
temp2<- f()[f()$Reason!="Level1"]
ep2 <- endpoints(temp2,'seconds')
data2<- as.data.frame(period.apply(temp2, INDEX=ep2, FUN=function(x) tail(x, 1)))
row.names(data2)<- NULL
data<- rbind(data1, data2)
} else {
ep <- endpoints(f(),'seconds', k=10)
data<- as.data.frame(period.apply(f(), INDEX=ep, FUN=function(x) tail(x, 1)))
row.names(data)<- NULL
}
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
data$tPrice<- as.numeric(as.character(data$tPrice))
data$Ask_P<- as.numeric(as.character(data$Ask_P))
data$Bid_P<- as.numeric(as.character(data$Bid_P))
data$tShares1<- as.character(data$tShares1)
data$tType<- as.character(data$tType)
data$color<- as.character(data$color)
data<- data[order(data$Time),]
} else {data<- c()}
return(data)})
Minutes<- reactive({
if (delta()> 1800) {
if (input$level1) {
temp<- f()[f()$Reason=="Level1", ]
ep1 <- endpoints(temp, 'minutes')
data1<- as.data.frame(period.apply(temp, INDEX=ep1, FUN=function(x) tail(x, 1)))
row.names(data1)<- NULL
temp2<- f()[f()$Reason!="Level1"]
ep2 <- endpoints(temp2, 'minutes')
data2<- as.data.frame(period.apply(temp2, INDEX=ep2, FUN=function(x) tail(x, 1)))
row.names(data2)<- NULL
data<- rbind(data1, data2)
} else {
ep <- endpoints(f(),'seconds', k=10)
data<- as.data.frame(period.apply(f(), INDEX=ep, FUN=function(x) tail(x, 1)))
row.names(data)<- NULL
}
data$Time<- fastPOSIXct(data$Time, required.components = 6L, tz = "GMT")
data$tPrice<- as.numeric(as.character(data$tPrice))
data$Ask_P<- as.numeric(as.character(data$Ask_P))
data$Bid_P<- as.numeric(as.character(data$Bid_P))
data$tShares1<- as.character(data$tShares1)
data$tType<- as.character(data$tType)
data$color<- as.character(data$color)
data<- data[order(data$Time),]
} else {data<- c()}
return(data)})
plotdelay<- reactive({
if (nrow(data1())>0) {
tDiff<- delta()
if (tDiff<= 1800) {data<-data1()} else {
if (tDiff>1800 & tDiff<3*3600) {data<-Seconds()}
if (tDiff>3*3600 & tDiff<6*(3600)) {data<-Seconds10()}
if (tDiff>6*3600) {data<-Minutes()}
}
data$tShares1<- as.numeric(data$tShares1)
data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
if (!input$futures) {
if (sum(data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")])>0) {
data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]<- as.integer(rescale(as.numeric(data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]), c(7,24)))
data$color[(data$tType=="OPG") | (data$tType=="CLX")]<- "#ffb84d"
}
}
} else {
data<- data.frame()
}
return(data)
})
Imbalance<- reactive({
if (length(input$icbc)>0) {
imb<- data()[,c("Time", "iCBC","iMarket", "iShares", "iPaired", "iExchange")]
imb[imb$Reason !="Imbalance", c("iShares", "iPaired", "iCBC", "iMarket") ]<- NA
tDiff<- delta()
if (tDiff> 3600*2) {
f= as.xts(imb, order.by=imb[, 1], frequency=NULL)
ep <- endpoints(f,'seconds', k=30)
imb1<- as.data.frame(period.apply(f, INDEX=ep, FUN=function(x) tail(x,1)))
imb1$Time<- fastPOSIXct(imb1$Time, required.components = 6L, tz = "GMT")
imb1$iCBC<- as.numeric(as.character(imb1$iCBC))
} else {
imb1<- imb
}
imb1$Time<- fastPOSIXct(imb1$Time, required.components = 6L, tz = "GMT")
m<- data.frame(Time=max(data()$Time), iCBC= NA, iMarket= NA, iShares= NA, iPaired= NA, iExchange= "")
n<- data.frame(Time=min(data()$Time), iCBC= NA, iMarket= NA, iShares= NA, iPaired= NA, iExchange= "")
imb1<- rbind(m, imb1, n)
row.names(imb1)<- NULL
} else {
imb1<- c()
}
return(imb1)
})
ImbExchange<- reactive({
imb= Imbalance()
if ("Q" %in% input$icbc) {
imb[imb$iExchange !="NSDQ", c("iShares", "iPaired", "iCBC") ]<- NA
}
if ("Y" %in% input$icbc) {
imb[imb$iExchange !="NYSE", c("iShares", "iPaired", "iCBC") ]<- NA
}
if ("A" %in% input$icbc) {
imb[imb$iExchange !="AXDP", c("iShares", "iPaired", "iCBC", "iMarket") ]<- NA
}
return(imb)
})
BottomPlot<- reactive({
if (nrow(data1())>0) {
tDiff<- delta()
if (tDiff<2*3600) {data<-Seconds()} else {
if (tDiff>=2*3600 & tDiff<5*3600) {data<-Seconds10()}
if (tDiff>=5*3600) {data<-Minutes()}
}
data<- data[ ,c("Time", "MidPrice")]
} else {
data<- data.frame()
}
return(data)
})
plotDelayGreater<- reactive({
dd<- plotdelay()
if (nrow(dd)>0) {
dd<- dd[dd$tPrice>0, ]
if (input$OverLap) {dd$tPrice1<- jitter(dd$tPrice)}
}
return(dd)
})
y0<- reactive({
if (nrow(plotDelayGreater())>0) {
min(plotDelayGreater()$tPrice)
} else {min(plotdelay()$Bid_P)
}
})
y1<- reactive({
if (nrow(plotDelayGreater())>0) {
max(plotDelayGreater()$tPrice)
} else {max(plotdelay()$Ask_P)
}
})
pp<- reactive({
Printu(date =dateText(), from=input$from, to=input$to, symbol= input$text, host= host() )
})
#Order<- reactive({
# input$go
# isolate({
# dataOrders<- Orders(date=as.character(dateText()), from=input$from, to=input$to, symbol= input$text)
# return(dataOrders)
# })
#})
Order<- eventReactive(input$go, {
isolate({
dataOrders<- Orders(date=as.character(dateText()), from=input$from, to=input$to, symbol= input$text)
return(dataOrders)
})
})
Newsdata<- reactive({
if (input$news & nrow(data())>0) {News(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text)} else {c()}
})
NavData1<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
Nav(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text, host= host(), scale= 12)} else {c()}
})
})
NavData2<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
Nav(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text, host= host(), scale= 9)} else {c()}
})
})
NavData3<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
Nav(date=as.character(dateText()), from=input$from, to=input$to, symbol=input$text, host= host(), scale= 8)} else {c()}
})
})
Navdata<- reactive({
input$go
isolate({
if (length(input$nav)>0) {
tDiff<- delta()
if (tDiff<= 100) {data<- NavData1()} else {
if (tDiff>100 & tDiff<2*3600) {data<- NavData2()}
if (tDiff>2*3600) {data<- NavData3()}
}
} else {data<- c()}
return(data)
})
})
PrevClose<- reactive({
if (input$prevclx) {
if (input$futures) {
dat<- PrevCLXFutures(date=as.character(dateText()), symbol=input$text)
data<- data.frame(Time=c(plotdelay()$Time[1], tail(plotdelay()$Time,1)), tPrice=c(dat$tPrice, dat$tPrice))
} else {
dat<- PrevCLX2(date=as.character(dateText()), symbol=input$text)
data<- data.frame(Time=c(plotdelay()$Time[1], tail(plotdelay()$Time,1)), tPrice=c(dat$tPrice, dat$tPrice))
}
} else {data<-c()}
return(data)
})
###Strategies Orders input
dd<- reactive ({
un = unique(Order()$strategy)
if (is.null(input$strat)==FALSE) {
if (input$strat != "None") {
if (input$strat %in% inputChoices()) {
dd<- Order()[Order()['strategy']==input$strat, ]
} else {dd<- data.frame() }
} else {dd<- data.frame() }
} else {dd<- data.frame() }
return(dd)
})
Size<- reactive({as.integer(6)})
alpha<- reactive({0.7})
alpha1<- reactive({0.8})
Hline<-reactive({2})
Font<- reactive({11})
fillcolor = reactive({"#ff6666"})
hollowcolor = reactive({"#39ac73"})
plotcolor = reactive({"#3E3E3E"})
papercolor = reactive({"#1E2022"})
fontcolor = reactive({"#B3A78C"})
###Top plot
trendPlot <- renderPlotly({
event<- event_data("plotly_selected", source = "subset")
event<- event[event$y>0, ]
input$go
isolate({
if (nrow(plotdelay())>0) {
p2<- as.numeric(y1())
p1<- as.numeric(y0())
tDiff<- delta()
fontcolor<- "darkblue"
xax <- list(
title = "",
tickfont = list(color = "darkblue")
)
yax <- list(
title = "",
tickfont = list(color = "darkblue")
)
navOpacity=0.8
navSize=5
if (input$radio==1) {
l<- list( color = toRGB("grey90", alpha = 0.1),
fillcolor = toRGB("grey90", alpha = 0.1),
shape = "hv",
width = .00001)
} else {
l<- list( color = toRGB("grey40", alpha = 0.1),
fillcolor = toRGB("grey40", alpha = 0.1),
shape = "hv",
width = .00001)
}
if (input$colorEx) {
colorDesc<-"c('#FF4040','#ff9900','#66b3ff','#ff00ff','#00e6e6','#9933ff','#4dff4d','#ff99dd')[factor(MsgSource)]"
} else {
colorDesc<- "color"
}
dd<- dd()
if (input$OverLap) {
y="tPrice1"
t= "paste0(tPrice, ' Shares:', tShares)"
hoverinfo= "x+text"
} else{
y="tPrice"
t= "paste0('Shares:', tShares, '<br>Source:',MsgSource)"
hoverinfo= "x+y+text"
}
withProgress( message = 'Top Chart', value = 0.4, {
if (nrow(data.frame(event)) <1 & input$spread==FALSE) {
py<- plot_ly(plotDelayGreater(), x = Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo),
marker=list(size=tShares1, color=eval(parse(text=colorDesc)), opacity= alpha(), line = list( width = .001) )) %>%
layout(showlegend = FALSE, hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
###Imbalances
if (!is.null(input$icbc)) {
py<- add_trace(ImbExchange(), x=Time, y=iCBC, name="iCBC", mode="markers", marker=list(symbol = 22, color= "BF3EFF", size=Size(), opacity= alpha1()))
py<- layout(yaxis=list(range=c( y0(), y1())))
}
###Prev Close
if (input$prevclx) {py<- add_trace(PrevClose(), x=Time, y=tPrice, line=list(width=1, color="00CD66"), marker=list(size=2), name="PrevCLX", hoverinfo = "none")}
###Prints
if (is(try(pp(), silent=T), "try-error")==FALSE) {
for (i in 1: nrow(pp())) {
py <- py %>% add_trace(x = c(pp()$Time[i],pp()$Time[i]), y = c(y0(), y1()), mode = "line",marker=list(size=1), line=list(dash="2", color=pp()$color[i]), hoverinfo = "none", evaluate=TRUE) %>%
add_trace(x = c(pp()$Time[i]-Hline(), pp()$Time[i]+Hline()), y = c(pp()$tPrice[i], pp()$tPrice[i]), marker=list(size=1), mode = "line", line=list(dash="1", color="violet"), hoverinfo = "none", evaluate=TRUE)
}
}
###Orders
if (nrow(dd)>0) {
id<- unique(dd$orderid)
for (i in 1:length(id)) {
tt<-dd[dd$orderid==id[i], ]
tt<- tt[order(tt$Time), ]
py<- py %>% add_trace( x= tt$Time, y=tt$price, mode="markers+lines",name=id[i], text=paste0("Tif:",tt$timeinforce, " Shares:", tt$Shares, "<br>Exchange:", tt$exchange),
marker=list(symbol=tt$Shape, size=tt$Size, color=tt$color),
line=list(width=0.3, color=tt$color[1]), evaluate=TRUE)
}
}
###News
if ( input$news ) {
if (nrow(Newsdata())>0) {
a<- list()
for (i in 1:nrow(Newsdata())) {
tt<- NULL
tt<- Newsdata()[i,]
a[[i]] <- list(
bordercolor="steelblue",
borderwidth=1,
bgcolor= "#F0F8FF",
arrowcolor="4F94CD",
font= list(color="darkblue", family="Droid Sans", size=Font()),
align="left",
opacity=0.8,
x =tt$Time,
y = y1()-0.0015*y1(),
text = gsub("[$]","",tt$head),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 3,
ax = 20,
ay = -40)
py<- py %>% add_trace( x = c(tt$Time, tt$Time), y = c(y0(), y1()-0.0015*y1()), hoverinfo="x", marker=list(size=2, color="7093DB"), line=list(width=0.8, color="7093DB"), evaluate=TRUE)
}
py<- py %>% layout(annotations=a)
}
}
###Nav
if (length(input$nav)>0 ){
if ("B" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$B_NAV, mode="markers", marker=list(size=navSize, color="navy", opacity=navOpacity, symbol=217), name="B_Nav")
}
if ("A" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$A_NAV, mode="markers", marker=list(size=navSize, color="#99ccff", opacity=navOpacity, symbol=17), name="A_Nav")
}
if ("M" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$M_NAV, mode="markers", marker=list(size=navSize, color="#3366ff", opacity=navOpacity, symbol=24), name="M_Nav")
}
py<- layout(yaxis=list(range=c(p1-(p2-p1)*0.05, p2+(p2-p1)*0.05)))
}
###Level1
if (input$level1) {
py<- add_trace(plotdelay(), x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(plotdelay(), x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
}
if (input$volumeChart & nrow(plotDelayGreater())>0) {
#### Volume Chart
if (tDiff<=30) {x="Time1"}
if ((tDiff>30) & (tDiff<5*60)) {x="Time2"}
if ((tDiff>=5*60) & (tDiff<30*60)) {x="Time3"}
if (tDiff>=30*60) {x="Time4"}
VolumeAggregate<- plotDelayGreater()[ ,c(x, "tShares" )]
VolumeAggregate$tShares<- as.numeric(as.character(VolumeAggregate$tShares))
VolumeAggregate<- eval(parse(text=paste0("aggregate(.~",x,", data=VolumeAggregate, FUN=sum)")))
VolumeAggregate[ ,x]<- fastPOSIXct(VolumeAggregate[ ,x], required.components = 6L, tz = "GMT")
py <- add_trace(data= VolumeAggregate, x =eval(parse(text= x)), y = tShares, type = "bar", marker = list(color = "steelblue"), yaxis="y2", hoverinfo="none") %>% layout(paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(
yaxis = list(
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
domain = c(0.30, 0.95)),
yaxis2 = list(
zerolinecolor='#d9d9d9',
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
side = "left",
domain = c(0, 0.2))
)
}
}
if (nrow(data.frame(event)) <1 & input$spread==TRUE) {
if (nrow(plotDelayGreater())>0) {
py<- plot_ly(plotDelayGreater(), x = Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo),
marker=list(size=tShares1, color=eval(parse(text=colorDesc)), opacity= alpha(), line = list( width = .001) ))
} else {
py<- plot_ly(plotdelay(), x = Time, y = Bid_P, mode = "markers", text = eval(parse(text= t)), hoverinfo = "none",
marker=list(size=1, color=eval(parse(text=colorDesc)), opacity= alpha(), line = list( width = .001) ))
}
py<- add_trace(plotdelay(), x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(plotdelay(), x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
py<- layout(showlegend = FALSE, hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
###Imbalances
if (!is.null(input$icbc)) {
py<- add_trace(ImbExchange(), x=Time, y=iCBC, name="iCBC", mode="markers", marker=list(symbol = 22, color= "BF3EFF", size=Size(), opacity= alpha1()))
py<- layout(yaxis=list(range=c( y0(), y1())))
}
###Prev Close
if (input$prevclx) {py<- add_trace(PrevClose(), x=Time, y=tPrice, line=list(width=1, color="00CD66"), marker=list(size=2), name="PrevCLX", hoverinfo = "none")}
###Prints
if (is(try(pp(), silent=T), "try-error")==FALSE) {
for (i in 1: nrow(pp())) {
py <- py %>% add_trace(x = c(pp()$Time[i],pp()$Time[i]), y = c(y0(), y1()), mode = "line",marker=list(size=1), line=list(dash="2", color=pp()$color[i]), hoverinfo = "none", evaluate=TRUE) %>%
add_trace(x = c(pp()$Time[i]-Hline(),pp()$Time[i]+Hline()), y = c(pp()$tPrice[i], pp()$tPrice[i]), marker=list(size=1), mode = "line", line=list(dash="1", color="violet"), hoverinfo = "none", evaluate=TRUE)
}
}
###Orders
if (nrow(dd)>0) {
id<- unique(dd$orderid)
for (i in 1:length(id)) {
tt<-dd[dd$orderid==id[i], ]
tt<- tt[order(tt$Time), ]
py<- py %>% add_trace( x= tt$Time, y=tt$price, mode="markers+lines",name=id[i], text=paste0("Tif:",tt$timeinforce, " Shares:", tt$Shares, "<br>Exchange:", tt$exchange),
marker=list(symbol=tt$Shape, size=tt$Size, color=tt$color),
line=list(width=0.3, color=tt$color[1]), evaluate=TRUE)
}
}
###News
if ( input$news ) {
if (nrow(Newsdata())>0) {
a<- list()
for (i in 1:nrow(Newsdata())) {
tt<- NULL
tt<- Newsdata()[i,]
a[[i]] <- list(
bordercolor="steelblue",
borderwidth=1,
bgcolor= "#F0F8FF",
arrowcolor="4F94CD",
font= list(color="darkblue", family="Droid Sans", size=Font()),
align="left",
opacity= 0.8,
x =tt$Time,
y = y1()-0.0015*y1(),
text = gsub("[$]","",tt$head),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 3,
ax = 20,
ay = -40)
py<- py %>% add_trace( x = c(tt$Time, tt$Time), y = c(y0(), y1()-0.0015*y1()), hoverinfo="x", marker=list(size=2, color="7093DB"), line=list(width=0.8, color="7093DB"), evaluate=TRUE)
}
py<- py %>% layout(annotations=a)
}
}
###Nav
if (length(input$nav)>0 ){
if ("B" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$B_NAV, mode="markers", marker=list(size=navSize, color="navy", opacity=navOpacity, symbol=217), name="B_Nav")
}
if ("A" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$A_NAV, mode="markers", marker=list(size=navSize, color="#99ccff", opacity=navOpacity, symbol=17), name="A_Nav")
}
if ("M" %in% input$nav) {
py<- add_trace(x= Navdata()$Time, y= Navdata()$M_NAV, mode="markers", marker=list(size=navSize, color="#3366ff", opacity=navOpacity, symbol=24), name="M_Nav")
}
py<- layout(yaxis=list(range=c(p1-(p2-p1)*0.05, p2+(p2-p1)*0.05)))
}
if (input$volumeChart & nrow(plotDelayGreater())>0) {
#### Volume Chart
if (tDiff<=30) {x="Time1"}
if ((tDiff>30) & (tDiff<5*60)) {x="Time2"}
if ((tDiff>=5*60) & (tDiff<30*60)) {x="Time3"}
if (tDiff>=30*60) {x="Time4"}
VolumeAggregate<- plotDelayGreater()[ ,c(x, "tShares" )]
VolumeAggregate$tShares<- as.numeric(as.character(VolumeAggregate$tShares))
VolumeAggregate<- eval(parse(text=paste0("aggregate(.~",x,", data=VolumeAggregate, FUN=sum)")))
VolumeAggregate[ ,x]<- fastPOSIXct(VolumeAggregate[ ,x], required.components = 6L, tz = "GMT")
py <- add_trace(data= VolumeAggregate, x =eval(parse(text= x)), y = tShares, type = "bar", marker = list(color = "steelblue"), yaxis="y2", hoverinfo="none") %>%
layout(paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(
yaxis = list(
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
domain = c(0.30, 0.95)),
yaxis2 = list(
zerolinecolor='#d9d9d9',
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
side = "left",
domain = c(0, 0.2))
)
}
}
if (nrow(data.frame(event)) >=1 ) {
t1<- as.POSIXct(as.character(as.POSIXct(min(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")-1
t2<- as.POSIXct(as.character(as.POSIXct(max(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")+1
tDiff= as.numeric(t2-t1, units="secs")
if (tDiff<= 1800) {data<- data1()} else {
if (tDiff> 1800 & tDiff< 3600*3) {
data<-Seconds()}
if (tDiff> 3600*3 & tDiff< 3600*6) {
data<-Seconds10()}
if (tDiff> 3600*6) {
data<-Minutes()}
}
data<- data[(data$Time>=t1 & data$Time<=t2) ,]
data$tShares1<- as.numeric(data$tShares1)
data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="ASK") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BID") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")]<- as.integer(rescale(as.numeric(sqrt(data$tShares1[(data$tSide=="BOTH") & (data$tType != "OPG") & (data$tType != "CLX")])), c(1,20)))
data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]<- as.integer(rescale(as.numeric(data$tShares1[(data$tType=="OPG") | (data$tType=="CLX")]), c(7,24)))
data$color[(data$tType=="OPG") | (data$tType=="CLX")]<- "#ffb84d"
dataTprice<- data[data$tPrice >0 ,]
if (input$OverLap) {dataTprice$tPrice1<- jitter(dataTprice$tPrice)}
if (nrow(data.frame(event)) >=1 & input$spread==FALSE) {
py <- plot_ly(dataTprice, x= Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo), name="Price",
marker=list(size=tShares1, color=eval(parse(text=colorDesc)), opacity = alpha(), line = list( width = .001) )) %>%
layout(showlegend = FALSE, hovermode = "closest", legend = list(x = 1, y = 1),paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
###Level1
if (input$level1) {
py<- add_trace(dataTprice, x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(dataTprice, x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
}
}
if (nrow(data.frame(event)) >=1 & input$spread==TRUE) {
y0<- min(data$tPrice[data$tPrice >0])
y1<-max(data$tPrice[data$tPrice >0])
py <- plot_ly(dataTprice, x= Time, y = eval(parse(text=y)), mode = "markers", text = eval(parse(text= t)), hoverinfo=eval(hoverinfo), name="Price", marker=list(size=tShares1,color=color, opacity= alpha(), line = list( width = .001) ) )
py<- add_trace(data, x=Time, y=Bid_P, name = "Bid", line = l, hoverinfo = "none")
py<- add_trace(data, x=Time, y=Ask_P, name = "Ask", line = l, fill="tonexty", hoverinfo = "none")
py<- layout(showlegend = FALSE,hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
py<- layout(xaxis=xax, yaxis=yax)
}
###Prints
if (is(try(pp(), silent=T), "try-error")==FALSE) {
temp<- pp()[(pp()$Time>=t1 & pp()$Time<=t2), ]
for (i in 1: nrow(temp)) {
py <- py %>% add_trace(x = c(temp$Time[i], temp$Time[i]), y = c( min(dataTprice$Bid_P), max(dataTprice$Ask_P) ), mode = "line",marker=list(size=1), line=list(dash="2", color=pp()$color[i]), hoverinfo = "none", evaluate=TRUE) %>%
add_trace(x = c(temp$Time[i]-Hline(), temp$Time[i]+Hline()), y = c(temp$tPrice[i], temp$tPrice[i]), marker=list(size=1), mode = "line", line=list(dash="1", color="violet"), hoverinfo = "none", evaluate=TRUE)
}
}
###Imbalances
if (!is.null(input$icbc)) {
tt<- subset(ImbExchange(), Time>=t1 & Time<=t2)
if (nrow(tt)>0) {
py<- add_trace(tt, x=Time, y=iCBC, name="iCBC NSDQ", mode="markers", marker=list(symbol = 22, color= "BF3EFF", size=Size(), opacity= alpha1()))
py<- layout(yaxis=list(range=c(min(dataTprice$Bid_P)-(max(dataTprice$Ask_P)-min(dataTprice$Bid_P))*0.005,max(dataTprice$Ask_P)+(max(dataTprice$Ask_P)-min(dataTprice$Bid_P))*0.005 )))
}
}
###Prev Close
if (input$prevclx) {
py<- add_trace(x=c(data$Time[1], tail(data$Time,1)), y=c(PrevClose()$tPrice, PrevClose()$tPrice), line=list(width=1, color="teal"), marker=list(size=2), name="PrevCLX", hoverinfo = "none")
}
###Orders
if (nrow(dd)>0) {
dd<- subset(dd, Time>=t1 & Time<=t2)
id<- unique(dd$orderid)
for (i in 1:length(id)) {
tt<-dd[dd$orderid==id[i], ]
tt<- tt[order(tt$Time), ]
py<- py %>% add_trace( x= tt$Time, y=tt$price, mode="markers+lines",name=id[i], text=paste0("Tif:",tt$timeinforce, " Shares:", tt$Shares, "<br>Exchange:", tt$exchange),
marker=list(symbol=tt$Shape, size=tt$Size, color=tt$color),
line=list(width=0.2, color=tt$color[1]), evaluate=TRUE)
}
}
###News
if ( input$news ) {
if (nrow(Newsdata())>0) {
Newsdd<- subset(Newsdata(), Time>=t1 & Time<=t2)
if (nrow(Newsdd)>0) {
mx<- max(data$tPrice)
mn<- min(data$tPrice[data$tPrice >0])
a<- list()
for (i in 1:nrow(Newsdd)) {
tt<- NULL
tt<- Newsdd[i,]
a[[i]] <- list(
bordercolor="steelblue",
borderwidth=1,
bgcolor= "#F0F8FF",
arrowcolor="4F94CD",
font= list(color="darkblue", family="Droid Sans", size=Font()),
align="left",
x = tt$Time,
y = mx - 0.0015*mx,
text = gsub("[$]","",tt$head),
xref = "x",
yref = "y",
showarrow = TRUE,
arrowhead = 3,
ax = 20,
ay = -40)
py<- py %>% add_trace( x = c(tt$Time, tt$Time), y = c(mn, mx - 0.0015*mx), hoverinfo = "x", marker=list(size=2, color="7093DB"), line=list(width=0.8, color="7093DB"), evaluate=TRUE)
}
py<- py %>% layout(annotations=a)
}
}
}
###Nav
if (length(input$nav)>0 ){
if (tDiff<= 100) {dd1<- NavData1()} else {
if (tDiff>100 & tDiff<2*3600) {dd1<- NavData2()}
if (tDiff>2*3600) {dd1<- NavData3()}
}
if ("B" %in% input$nav) {
dd<- subset(dd1, Time>=t1 & Time<=t2)
py<- add_trace(x= dd$Time, y= dd$B_NAV, mode="markers", marker=list(size=navSize, color="navy", opacity=navOpacity, symbol=217), name="B_Nav")
}
if ("A" %in% input$nav) {
py<- add_trace(x= dd$Time, y= dd$A_NAV, mode="markers", marker=list(size=navSize, color="#99ccff", opacity=navOpacity, symbol=17), name="A_Nav")
}
if ("M" %in% input$nav) {
py<- add_trace(x= dd$Time, y= dd$M_NAV, mode="markers", marker=list(size=navSize, color="#3366ff", opacity=navOpacity, symbol=24), name="M_Nav")
}
py<- layout(yaxis=list(range=c(p1-(p2-p1)*0.05, p2+(p2-p1)*0.05)))
}
if (input$volumeChart & nrow(plotDelayGreater())>0) {
#### Volume Chart
if (tDiff<=30) {x="Time1"}
if ((tDiff>30) & (tDiff<5*60)) {x="Time2"}
if ((tDiff>=5*60) & (tDiff<30*60)) {x="Time3"}
if (tDiff>=30*60) {x="Time4"}
VolumeAggregate<- dataTprice[ ,c(x, "tShares" )]
VolumeAggregate<- eval(parse(text=paste0("aggregate(.~",x,", data=VolumeAggregate, FUN=sum)")))
VolumeAggregate[ ,x]<- fastPOSIXct(VolumeAggregate[ ,x], required.components = 6L, tz = "GMT")
py <- add_trace(data=VolumeAggregate, x =eval(parse(text= x)), y = tShares, type = "bar", marker = list(color = "steelblue"), yaxis="y2", hoverinfo="none")
py<- layout(
yaxis = list(
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
domain = c(0.30, 0.95)),
yaxis2 = list(
zerolinecolor='#d9d9d9',
tickfont = list(color = fontcolor),
titlefont = list(color = fontcolor),
side = "left",
domain = c(0, 0.2))
)
}
}
if (input$radio==2) {
if (input$volumeChart) {
py<- layout(py, xaxis=list(title = "",showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
yaxis2 = list( gridcolor = "#8c8c8c",
zerolinecolor= "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
} else {
py<- layout(py, xaxis=list(title = "",showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
}
}
incProgress(1)
setProgress(1)
})
py
}
})
})
###Imbalance plot
ImbalPlot <- renderPlotly({
event <- event_data("plotly_selected", source = "subset")
input$go
isolate({
if (nrow(data())>0) {
withProgress(message = 'Imbalance Chart', value = 0.3, {
ay2 <- list(
zeroline = FALSE,
tickfont = list(color = "green"),
overlaying = "y",
side = "right"
)
ay1<- list (
zeroline = FALSE,
tickfont = list(color = "darkblue"),
title=""
)
xax <- list(
title="",
zeroline = FALSE,
tickfont = list(color = "darkblue")
)
if (length(input$icbc)>0) {
tt<- data.frame()
tt<- ImbExchange()[,c("Time", "iMarket" ,"iPaired", "iShares")]
if (nrow(data.frame(event)) >0 ) {
t1<- as.POSIXct(as.character(as.POSIXct(min(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")-1
t2<- as.POSIXct(as.character(as.POSIXct(max(event$x)/1000, origin="1970-01-01", tz="EET")), "%Y-%m-%d %H:%M:%S", tz ="GMT")+1
if (!is.null(input$icbc)) {
tt<- subset(ImbExchange(), Time>=t1 & Time<=t2, select=c("Time", "iMarket","iPaired", "iShares"))
}
}
if ("A" %in% input$icbc==FALSE) {
py <- plot_ly(tt, x= Time, y=iPaired, mode="markers", marker=list( size=5 , opacity=0.9, color="steelblue"), name="iPaired") %>%
# add_trace(x=Time, y=iMarket, mode="markers", marker=list( size=5 , opacity=0.9, color="violet"), name="iMarket") %>%
add_trace(x=Time, y=iShares, mode="markers",yaxis = "y2", marker=list( size=5 , opacity=0.9, color="green"), name="iShares") %>%
layout(xaxis=xax, showlegend = FALSE, yaxis=ay1, yaxis2 = ay2) %>% layout( margin = list(autosize=FALSE,r=30), hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
} else {
py <- plot_ly(tt, x= Time, y=iPaired, mode="markers", marker=list( size=5 , opacity=0.9, color="steelblue"), name="iPaired") %>%
add_trace(x=Time, y=iShares, mode="markers",yaxis = "y2", marker=list( size=5 , opacity=0.9, color="green"), name="iShares")
py <- add_trace(tt[tt$iMarket !=0, ], x=Time, y=iMarket, mode="markers", marker=list( size=5 , opacity=0.9, color="violet"), name="iMarket") %>%
layout(xaxis=xax, showlegend = FALSE, yaxis=ay1, yaxis2 = ay2) %>% layout(margin = list(autosize=FALSE, r=30), hovermode = "closest", paper_bgcolor= 'rgba(249,249,263,.85)')
}
} else {
dd<- data.frame(Time=as.Date(character()), iShares=as.numeric())
py<- plot_ly(dd, x=Time, y=iShares, mode="markers") %>% layout(xaxis=xax, showlegend = FALSE, yaxis=ay1, paper_bgcolor= 'rgba(249,249,263,.85)')
}
if (input$radio==2) {
py<- layout(py, xaxis=list(title = "", showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
yaxis2 = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
}
incProgress(1)
setProgress(1)
})
if (nrow(plotdelay())==0) {py<- plot_ly(plotdelay(), x=Time, y=tPrice, mode="markers")}
py
}
})
})
###Bottom plot
trendPlot2 <- renderPlotly({
input$go
isolate({
if (nrow(BottomPlot())>0) {
withProgress(message = 'Bottom Chart', value = 0.5, {
xax <- list(
title = "",
tickfont = list(color = "darkblue")
)
yax <- list(
title = "",
tickfont = list(color = "darkblue")
)
py<- plot_ly(BottomPlot(), x = Time, y = MidPrice, source="subset", mode = "markers", marker=list( size=3.3 , opacity=0.9), name="") %>%
layout(showlegend = FALSE, hovermode = "closest", yaxis=list(type = "log") ,paper_bgcolor= 'rgba(249,249,263,.85)', dragmode = "select") %>%
layout(xaxis=xax, yaxis=yax)
if (is(try(pp(), silent=T), "try-error")==FALSE) {
for (i in 1: nrow(pp())) {
py <- py %>% add_trace(x = c(pp()$Time[i], pp()$Time[i]), y = c(y0(), y1()), mode = "line",marker=list(size=1), line=list(dash="2", color="steelblue"), evaluate=TRUE)
}
}
incProgress(1)
setProgress(1)
})
if (input$radio==2) {
py<- layout(py, xaxis=list(title = "", showgrid = F,
tickfont = list(color = fontcolor())),
yaxis = list( gridcolor = "#8c8c8c",
tickfont = list(color = fontcolor()),
titlefont = list(color = fontcolor())),
paper_bgcolor = papercolor(),
plot_bgcolor = plotcolor())
}
py
}
})
})
### Data tab Market data
DataOut<- reactive({
event <- event_data("plotly_selected", source = "subset")
event<- event[event$y>0, ]
if (input$futures) {
columns<- c("Time", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares", "tSide", "tType", "tPrice")
} else {
columns<- c("Time", "MsgSource", "Reason", "Bid_P", "Ask_P", "tShares", "tSide", "tType", "tPrice", "iCBC", "iMarket", "iShares", "iPaired", "iExchange", "B_NAV", "M_NAV", "A_NAV")
}
if (nrow(data.frame(event))>1) {
t1<- fastPOSIXct(as.character(as.POSIXct(min(event$x)/1000, origin="1970-01-01", tz="EET")), required.components = 6L, tz ="GMT")
t2<- fastPOSIXct(as.character(as.POSIXct(max(event$x)/1000, origin="1970-01-01", tz="EET")), required.components = 6L, tz ="GMT")
tDiff= as.numeric(t2-t1, units="secs")
if (tDiff<= 1800) {md<-data1()} else {
if (tDiff>1800 & tDiff<5*3600) {md<-Seconds()}
if (tDiff>5*3600 & tDiff<7*(3600)) {md<-Seconds10()}
if (tDiff>7*3600) {md<-Minutes()}
}
if (tDiff<= 100) {nav<- NavData1()} else {
if (tDiff>100 & tDiff<2*3600) {nav<- NavData2()}
if (tDiff>2*3600) {nav<- NavData3()}
}
#md<- md[ ,columns ]
if (length(input$nav)>0) {
data<- rbind(md, nav)
data<- data[order(data$Time),]
rownames(data)<- NULL
Out<- data[(data$Time>=t1 & data$Time<=t2), ]
} else {Out<- md[(md$Time>=t1 & md$Time<=t2), ]}
} else {
md= data()[, columns]
if (length(input$nav)>0) {
nav<- NavData1()
Out= md
#Out<- rbind(md, nav)
#Out<- Out[order(Out$Time),]
rownames(Out)<- NULL
} else {Out<- md}
}
Out$Reason<- factor(Out$Reason)
Out$tSide<- factor(Out$tSide)
Out$tType<- factor(Out$tType)
Out$Time=format(Out$Time, format="%H:%M:%OS")
return (Out)
})
output$mytable <- renderDataTable(
datatable(DataOut(), extensions = 'Buttons',
options = list(pageLength = 15, searchHighlight = TRUE,dom = 'C<"clear">lfrtip'),
filter = list(position = 'top', clear = FALSE)))
output$downloadData <- downloadHandler(
filename = function() {paste0(dateText(),"_MarketData.csv", sep="") },
content = function(file) {
write.csv(DataOut() , file, row.names = FALSE, sep=",")
}
)
###Data tab for orders
OrderOut<- reactive({
id<- unique(dd()$orderid)
if ( is.null(id)) {
data<- data.frame(Time="", strategy="", messagetype="", exchange="", orderid="", destination="", side="", price="", type="", sharesexecuted="", timeinforce="")
} else {
data<- OrderTable(date=as.character(dateText()), symbol= input$text, orderid= unique(dd()$orderid))
}
return(data)
})
output$OrdersTable <- renderDataTable(
datatable(OrderOut(), extensions = 'Buttons',options = list(pageLength = 15, searchHighlight = TRUE,dom = 'C<"clear">lfrtip'),
filter = list(position = 'top', clear = FALSE))
)
output$downloadOrders <- downloadHandler(
filename = function() {paste0(dateText(),"_Orders.csv", sep="") },
content = function(file) {
write.table(OrderOut() , file, row.names = FALSE, sep=",")
}
)
output$plotui <- renderUI({
output$plot<- trendPlot
plotlyOutput("plot", width="100%", height = 800)
})
output$plotui2 <- renderUI({
output$plot2<- trendPlot2
plotlyOutput("plot2", width="100%", height = 200)
})
output$plotui3 <- renderUI({
output$plot3<- ImbalPlot
plotlyOutput("plot3", width="100%", height = 200)
})
inputChoices <- reactive({
input$go
isolate({
choices<- unique(Order()$strategy)
choices= c("None", choices)
return(choices)
})
})
output$strat <- renderUI({
selectInput('strat', 'Orders:', choices=inputChoices(), selected = input$strat, width="100")
})
output$name<- renderUI({
input$go
isolate({
if (input$radio=="2") {
eval(parse(text='includeCSS("slate.css")'))
}
})
})
output$textcol<- renderUI({
if (input$radio=="2") {
eval(parse(text= 'tags$style(type="text/css", "#from {background-color: #E3E3E3 }") '))
}
})
output$brush <- renderPrint({
})
session$onSessionEnded(stopApp)
}
)
|
#' Search TC/Govdata360 indicators, countries, categories, and dataset lists
#'
#' Searches using the TCdata360 API at \url{tcdata360.worldbank.org/docs}
#' or Govdata360 API at \url{govdata360.worldbank.org/docs}.
#' The function generates a wide dataframe containing the top search results related to the query.
#'
#' Hint: Want to use your search results to call data? Helpful functions include:
#' \itemize{
#' \item See \code{\link{get_data360}} to get actual indicator/dataset/country-level data.
#' \item See \code{\link{get_metadata360}} to get indicator/dataset/country-level metadata.
#' \item See \code{\link{get_resources360}} to get additional resource information.
#' }
#'
#' @param search_string string to query in TC/Govdata360
#' @param site string pertaining to the data360 site to download data from.
#' Possible choices: 'tc' for TCdata360, 'gov' for Govdata360
#' @param search_type NULL (optional). Character vector of search types to include.
#' Possible values include: 'indicator', 'category', 'country'
#' Leaving this NULL will return all possible search results.
#' @param limit_results NULL (optional). Number pertaining to the number of top results to be kept.
#' @import data.table
#' @export
#' @return List containing top search results
#' @examples
#' #search a country's code in TCdata360
#' search_360('Philippines', search_type = 'country')
#'
#' #search for top 10 relevant indicator codes in TCdata360
#' search_360('GDP', search_type = 'indicator', limit_results = 10)
#'
#' #search for top 10 indicators of a database in TCdata360
#' search_360('World Development Indicators', search_type = 'indicator', limit_results = 10)
#'
#' #search for top 10 indicators of a data provider in TCdata360
#' search_360('WEF', search_type = 'indicator', limit_results = 10)
#'
#' #search for top 10 relevant categories in Govdata360
#' search_360('Governance', site='gov', search_type = 'category', limit_results = 10)
search_360 <- function(search_string = "World Bank", site = "tc", search_type = NULL, limit_results = NULL) {
# determine API base based on site parameter
if (site == "tc") {
api_base <- "http://tcdata360-backend.worldbank.org/api/v1/search?q="
} else if (site == "gov") {
api_base <- "http://govdata360-backend.worldbank.org/api/v1/search?q="
} else {
# catch errors
stop("site parameter should only be either 'tc' or 'gov'. Please try again.")
}
tc_ind <- jsonlite::fromJSON('http://tcdata360-backend.worldbank.org/api/v1/indicators/?fields=id%2Cname')
gov_ind <- jsonlite::fromJSON('http://govdata360-backend.worldbank.org/api/v1/indicators/?fields=id%2Cname')
df_ind <- rbind(tc_ind, gov_ind)
df_ind <- data.table::setDT(df_ind, key="name")
df_ind <- unique(df_ind)
query <- utils::URLencode(tolower(search_string))
dl_url <- paste(api_base, query, sep = "")
df <- jsonlite::fromJSON(dl_url)
df <- as.data.frame(df$results)
search_types <- c("indicator", "category", "country")
if (!is.null(search_type)) {
if (length(search_type) == sum(search_type %in% search_types)) {
df <- df[df$type %in% search_type, ]
} else {
# catch errors
stop("search_type parameter should only be one or a combination of \"indicator\", \"category\", or \"country\". Please try again.")
}
}
if (!is.null(limit_results)) {
df <- utils::head(df, limit_results)
}
names(df)[names(df) == "id"] <- "slug"
df<-data.table::setDT(df, key="name")
df<-df_ind[df]
df<-df[order(-score)]
return(df)
}
| /R/search_360.R | permissive | asRodelgo/data360r | R | false | false | 3,636 | r | #' Search TC/Govdata360 indicators, countries, categories, and dataset lists
#'
#' Searches using the TCdata360 API at \url{tcdata360.worldbank.org/docs}
#' or Govdata360 API at \url{govdata360.worldbank.org/docs}.
#' The function generates a wide dataframe containing the top search results related to the query.
#'
#' Hint: Want to use your search results to call data? Helpful functions include:
#' \itemize{
#' \item See \code{\link{get_data360}} to get actual indicator/dataset/country-level data.
#' \item See \code{\link{get_metadata360}} to get indicator/dataset/country-level metadata.
#' \item See \code{\link{get_resources360}} to get additional resource information.
#' }
#'
#' @param search_string string to query in TC/Govdata360
#' @param site string pertaining to the data360 site to download data from.
#' Possible choices: 'tc' for TCdata360, 'gov' for Govdata360
#' @param search_type NULL (optional). Character vector of search types to include.
#' Possible values include: 'indicator', 'category', 'country'
#' Leaving this NULL will return all possible search results.
#' @param limit_results NULL (optional). Number pertaining to the number of top results to be kept.
#' @import data.table
#' @export
#' @return List containing top search results
#' @examples
#' #search a country's code in TCdata360
#' search_360('Philippines', search_type = 'country')
#'
#' #search for top 10 relevant indicator codes in TCdata360
#' search_360('GDP', search_type = 'indicator', limit_results = 10)
#'
#' #search for top 10 indicators of a database in TCdata360
#' search_360('World Development Indicators', search_type = 'indicator', limit_results = 10)
#'
#' #search for top 10 indicators of a data provider in TCdata360
#' search_360('WEF', search_type = 'indicator', limit_results = 10)
#'
#' #search for top 10 relevant categories in Govdata360
#' search_360('Governance', site='gov', search_type = 'category', limit_results = 10)
search_360 <- function(search_string = "World Bank", site = "tc", search_type = NULL, limit_results = NULL) {
# determine API base based on site parameter
if (site == "tc") {
api_base <- "http://tcdata360-backend.worldbank.org/api/v1/search?q="
} else if (site == "gov") {
api_base <- "http://govdata360-backend.worldbank.org/api/v1/search?q="
} else {
# catch errors
stop("site parameter should only be either 'tc' or 'gov'. Please try again.")
}
tc_ind <- jsonlite::fromJSON('http://tcdata360-backend.worldbank.org/api/v1/indicators/?fields=id%2Cname')
gov_ind <- jsonlite::fromJSON('http://govdata360-backend.worldbank.org/api/v1/indicators/?fields=id%2Cname')
df_ind <- rbind(tc_ind, gov_ind)
df_ind <- data.table::setDT(df_ind, key="name")
df_ind <- unique(df_ind)
query <- utils::URLencode(tolower(search_string))
dl_url <- paste(api_base, query, sep = "")
df <- jsonlite::fromJSON(dl_url)
df <- as.data.frame(df$results)
search_types <- c("indicator", "category", "country")
if (!is.null(search_type)) {
if (length(search_type) == sum(search_type %in% search_types)) {
df <- df[df$type %in% search_type, ]
} else {
# catch errors
stop("search_type parameter should only be one or a combination of \"indicator\", \"category\", or \"country\". Please try again.")
}
}
if (!is.null(limit_results)) {
df <- utils::head(df, limit_results)
}
names(df)[names(df) == "id"] <- "slug"
df<-data.table::setDT(df, key="name")
df<-df_ind[df]
df<-df[order(-score)]
return(df)
}
|
#####################################################################
## Descripcion: Automatizar proceso
##
## Fecha: 2021-05-14
## Autor: MH
#####################################################################
library(tidyverse)
library(SparkR, pos = 999999)
dta_train <- readRDS('cache/dta_train_spark.RDS')
dta_test <- readRDS('cache/dta_test_eval.rds')
#Cambiante
guarda_preds <- function(alpha_val = 40, rank_val = 20, lambda_val = 1){
print("Iniciando sesion de spark ...")
print(Sys.time())
sparkR.session(master = "local[*]", sparkConfig = list(spark.driver.memory = "8g"))
print("Cargando dfs...")
print(Sys.time())
train_tib <- as.DataFrame(dta_train)
test_tib <- as.DataFrame(dta_test)
print("Corriendo modelo...")
print(Sys.time())
model <- spark.als(train_tib, userCol = "steam_id", itemCol = "app_id",
ratingCol = "playtime_forever", alpha = alpha_val, rank = rank_val,
implicitPrefs = TRUE, regParam = lambda_val, nonnegative = FALSE)
predictions <- SparkR::predict(model, test_tib)
print("Collecting predicciones...")
print(Sys.time())
predictions_collect <- SparkR::collect(predictions)
predictions_tib <- predictions_collect %>%
as_tibble() %>%
filter(!is.nan(prediction))
ruta <- paste0('cache/predictions_results/preds_alpha',alpha_val,
'_rank',rank_val,'_lambda',lambda_val,'.rds')
print("Guardando predicciones...")
print(Sys.time())
saveRDS(predictions_tib, ruta)
sparkR.session.stop()
gc()
ruta <- paste0(ruta, "")
return(ruta)
}
# model_grid <- expand.grid(alphas = c(1), ranks = c(20,80), lambdas = c(1,150,500)) %>%
# bind_rows(tibble(alphas = 1, ranks = 160, lambdas = 150)) %>%
# tail(6)
#
# pmap(.l = list(model_grid$alphas, model_grid$ranks, model_grid$lambdas),
# .f = guarda_preds)
# Segunda vuelta ----------------------------------------------------------
# model_grid <- expand.grid(alphas = c(40), ranks = c(80), lambdas = c(150,250))
#
# pmap(.l = list(model_grid$alphas, model_grid$ranks, model_grid$lambdas),
# .f = guarda_preds)
guarda_preds(10,160,250)
| /src/08_magic_loop.R | no_license | mhnk77/recom_system_steam | R | false | false | 2,187 | r | #####################################################################
## Descripcion: Automatizar proceso
##
## Fecha: 2021-05-14
## Autor: MH
#####################################################################
library(tidyverse)
library(SparkR, pos = 999999)
dta_train <- readRDS('cache/dta_train_spark.RDS')
dta_test <- readRDS('cache/dta_test_eval.rds')
#Cambiante
guarda_preds <- function(alpha_val = 40, rank_val = 20, lambda_val = 1){
print("Iniciando sesion de spark ...")
print(Sys.time())
sparkR.session(master = "local[*]", sparkConfig = list(spark.driver.memory = "8g"))
print("Cargando dfs...")
print(Sys.time())
train_tib <- as.DataFrame(dta_train)
test_tib <- as.DataFrame(dta_test)
print("Corriendo modelo...")
print(Sys.time())
model <- spark.als(train_tib, userCol = "steam_id", itemCol = "app_id",
ratingCol = "playtime_forever", alpha = alpha_val, rank = rank_val,
implicitPrefs = TRUE, regParam = lambda_val, nonnegative = FALSE)
predictions <- SparkR::predict(model, test_tib)
print("Collecting predicciones...")
print(Sys.time())
predictions_collect <- SparkR::collect(predictions)
predictions_tib <- predictions_collect %>%
as_tibble() %>%
filter(!is.nan(prediction))
ruta <- paste0('cache/predictions_results/preds_alpha',alpha_val,
'_rank',rank_val,'_lambda',lambda_val,'.rds')
print("Guardando predicciones...")
print(Sys.time())
saveRDS(predictions_tib, ruta)
sparkR.session.stop()
gc()
ruta <- paste0(ruta, "")
return(ruta)
}
# model_grid <- expand.grid(alphas = c(1), ranks = c(20,80), lambdas = c(1,150,500)) %>%
# bind_rows(tibble(alphas = 1, ranks = 160, lambdas = 150)) %>%
# tail(6)
#
# pmap(.l = list(model_grid$alphas, model_grid$ranks, model_grid$lambdas),
# .f = guarda_preds)
# Segunda vuelta ----------------------------------------------------------
# model_grid <- expand.grid(alphas = c(40), ranks = c(80), lambdas = c(150,250))
#
# pmap(.l = list(model_grid$alphas, model_grid$ranks, model_grid$lambdas),
# .f = guarda_preds)
guarda_preds(10,160,250)
|
df019657ee519881c225f3c35fd24ac0 axquery_query44_1344n.qdimacs 618 1647 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/axquery_query44_1344n/axquery_query44_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 71 | r | df019657ee519881c225f3c35fd24ac0 axquery_query44_1344n.qdimacs 618 1647 |
require(ggplot2)
require(kernlab)
require(caret)
spamset <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data", sep=",")
names(spamset) <- c("word_freq_make"
,"word_freq_address"
,"word_freq_all"
,"word_freq_3d"
,"word_freq_our"
,"word_freq_over"
,"word_freq_remove"
,"word_freq_internet"
,"word_freq_order"
,"word_freq_mail"
,"word_freq_receive"
,"word_freq_will"
,"word_freq_people"
,"word_freq_report"
,"word_freq_addresses"
,"word_freq_free"
,"word_freq_business"
,"word_freq_email"
,"word_freq_you"
,"word_freq_credit"
,"word_freq_your"
,"word_freq_font"
,"word_freq_000"
,"word_freq_money"
,"word_freq_hp"
,"word_freq_hpl"
,"word_freq_george"
,"word_freq_650"
,"word_freq_lab"
,"word_freq_labs"
,"word_freq_telnet"
,"word_freq_857"
,"word_freq_data"
,"word_freq_415"
,"word_freq_85"
,"word_freq_technology"
,"word_freq_1999"
,"word_freq_parts"
,"word_freq_pm"
,"word_freq_direct"
,"word_freq_cs"
,"word_freq_meeting"
,"word_freq_original"
,"word_freq_project"
,"word_freq_re"
,"word_freq_edu"
,"word_freq_table"
,"word_freq_conference"
,"char_freq_semi"
,"char_freq_rp"
,"char_freq_rb"
,"char_freq_exc"
,"char_freq_dollar"
,"char_freq_hash"
,"capital_run_length_average"
,"capital_run_length_longest"
,"capital_run_length_total"
,"is_spam")
spamset$is_spam <- as.factor(spamset$is_spam)
trainIndex <- createDataPartition(spamset$is_spam, p = .8, list = FALSE, times = 1)
train <- spamset[ trainIndex,]
test <- spamset[-trainIndex,]
# ggplot(spamset,aes(x=capital_run_length_total, y=char_freq_exc, col = is_spam)) +geom_point()+scale_y_log10()+scale_x_log10()
| /Project/spamdetect/spam_detector.R | no_license | blaklaybul/ILLC | R | false | false | 2,613 | r | require(ggplot2)
require(kernlab)
require(caret)
spamset <- read.table("http://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data", sep=",")
names(spamset) <- c("word_freq_make"
,"word_freq_address"
,"word_freq_all"
,"word_freq_3d"
,"word_freq_our"
,"word_freq_over"
,"word_freq_remove"
,"word_freq_internet"
,"word_freq_order"
,"word_freq_mail"
,"word_freq_receive"
,"word_freq_will"
,"word_freq_people"
,"word_freq_report"
,"word_freq_addresses"
,"word_freq_free"
,"word_freq_business"
,"word_freq_email"
,"word_freq_you"
,"word_freq_credit"
,"word_freq_your"
,"word_freq_font"
,"word_freq_000"
,"word_freq_money"
,"word_freq_hp"
,"word_freq_hpl"
,"word_freq_george"
,"word_freq_650"
,"word_freq_lab"
,"word_freq_labs"
,"word_freq_telnet"
,"word_freq_857"
,"word_freq_data"
,"word_freq_415"
,"word_freq_85"
,"word_freq_technology"
,"word_freq_1999"
,"word_freq_parts"
,"word_freq_pm"
,"word_freq_direct"
,"word_freq_cs"
,"word_freq_meeting"
,"word_freq_original"
,"word_freq_project"
,"word_freq_re"
,"word_freq_edu"
,"word_freq_table"
,"word_freq_conference"
,"char_freq_semi"
,"char_freq_rp"
,"char_freq_rb"
,"char_freq_exc"
,"char_freq_dollar"
,"char_freq_hash"
,"capital_run_length_average"
,"capital_run_length_longest"
,"capital_run_length_total"
,"is_spam")
spamset$is_spam <- as.factor(spamset$is_spam)
trainIndex <- createDataPartition(spamset$is_spam, p = .8, list = FALSE, times = 1)
train <- spamset[ trainIndex,]
test <- spamset[-trainIndex,]
# ggplot(spamset,aes(x=capital_run_length_total, y=char_freq_exc, col = is_spam)) +geom_point()+scale_y_log10()+scale_x_log10()
|
simID <- function(id=NULL, x1, x2, x3, beta1.true, beta2.true, beta3.true,
alpha1.true, alpha2.true, alpha3.true,
kappa1.true, kappa2.true, kappa3.true, theta.true, SigmaV.true=NULL, cens)
{
if(!is.null(id) & is.null(SigmaV.true)){
stop("SigmaV.true must be given to simulate correated data")
}
else
{
n <- dim(x1)[1]
p1 <- dim(x1)[2]
p2 <- dim(x2)[2]
p3 <- dim(x3)[2]
if(theta.true >0)
{
gamma.true <- rgamma(n, 1/theta.true, 1/theta.true)
}
if(theta.true == 0)
{
gamma.true <- rep(1, n)
}
if(is.null(id))
{
LP1 <- as.vector(beta1.true %*% t(x1))
LP2 <- as.vector(beta2.true %*% t(x2))
LP3 <- as.vector(beta3.true %*% t(x3))
}
if(!is.null(id))
{
J <- length(unique(id))
nj <- as.vector(table(id))
Vmat <- mvrnorm(J, rep(0, 3), SigmaV.true) # J X 3
LP1 <- as.vector(beta1.true %*% t(x1) + rep(Vmat[,1], nj))
LP2 <- as.vector(beta2.true %*% t(x2) + rep(Vmat[,2], nj))
LP3 <- as.vector(beta3.true %*% t(x3) + rep(Vmat[,3], nj))
}
Rind <- NULL
R <- rweibull(n, shape = alpha1.true, scale = exp(-(log(kappa1.true) +
LP1 + log(gamma.true))/alpha1.true))
D <- rweibull(n, shape = alpha2.true, scale = exp(-(log(kappa2.true) +
LP2 + log(gamma.true))/alpha2.true))
yesR <- R < D
D[yesR] <- R[yesR] + rweibull(sum(yesR), shape = alpha3.true,
scale = exp(-(log(kappa3.true) + LP3[yesR] + log(gamma.true[yesR]))/alpha3.true))
delta1 <- rep(NA, n)
delta2 <- rep(NA, n)
y1 <- R
y2 <- D
Cen <- runif(n, cens[1], cens[2])
ind01 <- which(D < R & D < Cen)
y1[ind01] <- D[ind01]
delta1[ind01] <- 0
delta2[ind01] <- 1
ind10 <- which(R < D & R < Cen & D >= Cen)
y2[ind10] <- Cen[ind10]
delta1[ind10] <- 1
delta2[ind10] <- 0
ind00 <- which(R >= Cen & D >= Cen)
y1[ind00] <- Cen[ind00]
y2[ind00] <- Cen[ind00]
delta1[ind00] <- 0
delta2[ind00] <- 0
ind11 <- which(R < Cen & D < Cen & R < D)
delta1[ind11] <- 1
delta2[ind11] <- 1
ret <- data.frame(cbind(y1, delta1, y2, delta2))
return(ret)
}
}
| /R/simID.R | no_license | cran/SemiCompRisks | R | false | false | 2,495 | r |
simID <- function(id=NULL, x1, x2, x3, beta1.true, beta2.true, beta3.true,
alpha1.true, alpha2.true, alpha3.true,
kappa1.true, kappa2.true, kappa3.true, theta.true, SigmaV.true=NULL, cens)
{
if(!is.null(id) & is.null(SigmaV.true)){
stop("SigmaV.true must be given to simulate correated data")
}
else
{
n <- dim(x1)[1]
p1 <- dim(x1)[2]
p2 <- dim(x2)[2]
p3 <- dim(x3)[2]
if(theta.true >0)
{
gamma.true <- rgamma(n, 1/theta.true, 1/theta.true)
}
if(theta.true == 0)
{
gamma.true <- rep(1, n)
}
if(is.null(id))
{
LP1 <- as.vector(beta1.true %*% t(x1))
LP2 <- as.vector(beta2.true %*% t(x2))
LP3 <- as.vector(beta3.true %*% t(x3))
}
if(!is.null(id))
{
J <- length(unique(id))
nj <- as.vector(table(id))
Vmat <- mvrnorm(J, rep(0, 3), SigmaV.true) # J X 3
LP1 <- as.vector(beta1.true %*% t(x1) + rep(Vmat[,1], nj))
LP2 <- as.vector(beta2.true %*% t(x2) + rep(Vmat[,2], nj))
LP3 <- as.vector(beta3.true %*% t(x3) + rep(Vmat[,3], nj))
}
Rind <- NULL
R <- rweibull(n, shape = alpha1.true, scale = exp(-(log(kappa1.true) +
LP1 + log(gamma.true))/alpha1.true))
D <- rweibull(n, shape = alpha2.true, scale = exp(-(log(kappa2.true) +
LP2 + log(gamma.true))/alpha2.true))
yesR <- R < D
D[yesR] <- R[yesR] + rweibull(sum(yesR), shape = alpha3.true,
scale = exp(-(log(kappa3.true) + LP3[yesR] + log(gamma.true[yesR]))/alpha3.true))
delta1 <- rep(NA, n)
delta2 <- rep(NA, n)
y1 <- R
y2 <- D
Cen <- runif(n, cens[1], cens[2])
ind01 <- which(D < R & D < Cen)
y1[ind01] <- D[ind01]
delta1[ind01] <- 0
delta2[ind01] <- 1
ind10 <- which(R < D & R < Cen & D >= Cen)
y2[ind10] <- Cen[ind10]
delta1[ind10] <- 1
delta2[ind10] <- 0
ind00 <- which(R >= Cen & D >= Cen)
y1[ind00] <- Cen[ind00]
y2[ind00] <- Cen[ind00]
delta1[ind00] <- 0
delta2[ind00] <- 0
ind11 <- which(R < Cen & D < Cen & R < D)
delta1[ind11] <- 1
delta2[ind11] <- 1
ret <- data.frame(cbind(y1, delta1, y2, delta2))
return(ret)
}
}
|
\name{ecospat.mpa}
\alias{ecospat.mpa}
\title{Minimal Predicted Area}
\description{Calculate the minimal predicted area.}
\usage{ecospat.mpa (Pred, Sp.occ.xy, perc)}
\arguments{
\item{Pred}{Numeric or RasterLayer predicted suitabilities from a SDM prediction.}
\item{Sp.occ.xy}{xy-coordinates of the species (if Pred is a RasterLayer).}
\item{perc}{Percentage of Sp.occ.xy that should be encompassed by the binary map.}
}
\details{The minimal predicted area (MPA) is the minimal surface obtained by considering all pixels with predictions above a defined probability threshold (e.g. 0.7) that still encompasses 90 percent of the species` occurrences (Engler et al. 2004).}
\value{
Returns the minimal predicted area.
}
\author{Frank Breiner \email{frank.breiner@wsl.ch}}
\references{
Engler, R., A. Guisan and L. Rechsteiner. 2004. An improved approach for predicting the distribution of rare and endangered species from occurrence and pseudo-absence data. \emph{Journal of Applied Ecology}, \bold{41}, 263-274.}
\examples{
obs <- (ecospat.testData$glm_Saxifraga_oppositifolia
[which(ecospat.testData$Saxifraga_oppositifolia==1)])
ecospat.mpa(obs)
ecospat.mpa(obs,perc=1) ## 100 percent of the presences encompassed
} | /ecospat/man/ecospat.mpa.Rd | no_license | lzhangss/ecospat | R | false | false | 1,238 | rd | \name{ecospat.mpa}
\alias{ecospat.mpa}
\title{Minimal Predicted Area}
\description{Calculate the minimal predicted area.}
\usage{ecospat.mpa (Pred, Sp.occ.xy, perc)}
\arguments{
\item{Pred}{Numeric or RasterLayer predicted suitabilities from a SDM prediction.}
\item{Sp.occ.xy}{xy-coordinates of the species (if Pred is a RasterLayer).}
\item{perc}{Percentage of Sp.occ.xy that should be encompassed by the binary map.}
}
\details{The minimal predicted area (MPA) is the minimal surface obtained by considering all pixels with predictions above a defined probability threshold (e.g. 0.7) that still encompasses 90 percent of the species` occurrences (Engler et al. 2004).}
\value{
Returns the minimal predicted area.
}
\author{Frank Breiner \email{frank.breiner@wsl.ch}}
\references{
Engler, R., A. Guisan and L. Rechsteiner. 2004. An improved approach for predicting the distribution of rare and endangered species from occurrence and pseudo-absence data. \emph{Journal of Applied Ecology}, \bold{41}, 263-274.}
\examples{
obs <- (ecospat.testData$glm_Saxifraga_oppositifolia
[which(ecospat.testData$Saxifraga_oppositifolia==1)])
ecospat.mpa(obs)
ecospat.mpa(obs,perc=1) ## 100 percent of the presences encompassed
} |
source("ams_initialize_script.R")
source("SCIM_calculation.R")
source("ivsc_2cmt_RR_V1.R")
dirs$rscript_name = "Task51_MultiDose_Figure_RealDrugs.R"
dirs$filename_prefix= str_extract(dirs$rscript_name,"^Task\\d\\d\\w?_")
model = ivsc_2cmt_RR_KdT0L0()
#read in parameter ranges to explore
param_minmax.in = readxl::read_excel("parameters/Task51_Param_Ranges.xlsx")
param_minmax = param_minmax.in %>%
as.data.frame() %>%
select(Parameter,min,max,units,fixed) %>%
filter(!(is.na(fixed))) %>%
filter(fixed==0)
rownames(param_minmax) = param_minmax$Parameter
# Dose time, frequency, compartment, nominal dose
tmax = 52*7 #days
tau = 21 #days
compartment = 2
infusion = FALSE
n_points = 10
i_row = 0
i_drug = 0
result = list()
for (drug in drugs){ #loop over all the drugs in the list
i_drug = i_drug + 1
param.as.double = read.param.file(parameter_files[drug])[model$pin]
for (dose_mpk in 10^(seq(-3,3,by=0.25))) {
dose.nmol = dose_mpk*scale_mpk2nmol
sim = lumped.parameters.simulation(model, param.as.double, dose.nmol, tmax, tau, compartment, infusion = infusion)
thy = lumped.parameters.theory ( param.as.double, dose.nmol, tau)
#all parameter values for the output table
K = 2
par = param.as.double %>%
t() %>%
as.data.frame() %>%
bind_cols(sim,thy) %>%
mutate(dose_mpk = dose_mpk,
drug = drug,
target = drug_target[i_drug],
ligand = drug_ligand[i_drug],
order = drug_order[i_drug],
id = i_row,
tmax = tmax,
tau = tau,
dose_nmol = dose.nmol,
koff_DT = Kd_DT/kon_DT,
Kss_DT = Kd_DT + keDT/kon_DT,
koff_TL = Kd_TL/kon_TL,
Kss_TL = Kd_TL + keDT/kon_TL,
assumption_SCIM_lt_30 = SCIM_Lfold_adhoc_thy < 0.30,
assumption_drug_gg_Ttot = Dss_thy > K*Ttotss_thy,
assumption_drug_gg_KssDT = Dss_thy > K*Kss_DT,
assumption_koffDT_gt_keT = koff_DT > keT,
assumption_Dss_gt_Ccrit = Dss_thy > K*Ccrit_thy,
assumption_Dss_gg_LssKssDT_KssTL = Dss_thy > K*Kss_DT*Lss_thy/Kss_TL,
assumption_ODE_tolerance = Dss_thy/TLss_thy < 1e12,
assumption_all_SCIM = assumption_SCIM_lt_30 &
assumption_drug_gg_Ttot &
assumption_Dss_gt_Ccrit &
assumption_ODE_tolerance &
assumption_Dss_gg_LssKssDT_KssTL)
#create result table
i_row = i_row + 1
result[[i_row]] = par
}
}
results = bind_rows(result)
write.csv(results, file = "results/Task51_MultiDose_Figure.csv")
#plot results ----
data_plot_all = results %>%
select(dose_mpk, drug, target, ligand, order, SCIM_sim, AFIR_thy, SCIM_Lfold_adhoc_thy, assumption = assumption_drug_gg_Ttot) %>%
gather(key,value,-c(drug,dose_mpk,drug,target,ligand,order,assumption)) %>%
arrange(order) %>%
mutate(drug = factor(drug, levels = unique(drug)),
target = paste("Target:",target),
ligand = paste("Ligand:",ligand),
key = plyr::mapvalues(key,
c("SCIM_sim","AFIR_thy","SCIM_Lfold_adhoc_thy"),
c("ASIR simulation","AFIR theory","ASIR theory")))
data_plot = data_plot_all %>%
filter(drug %in% c("Atezolizumab","Siltuximab","Tocilizumab"))
# data_assumption_false = data_plot %>%
# filter(assumption == FALSE) %>%
# filter(key == "ASIR simulation")
g = ggplot(data_plot, aes(x=dose_mpk,y=1-value, color = key, linetype = key))
g = g + geom_line(size = 1, alpha = .5)
#g = g + geom_point(data = data_assumption_false, color = "red", show.legend = FALSE)
g = g + facet_wrap(~drug+target+ligand, nrow = 1)#, dir = "v", nrow = 2) )
g = g + xgx_scale_x_log10(breaks = 10^seq(-2,20,by=1))#, minor_breaks = 1)
breaks = c(0,90,99,99.9,99.99)/100
labels = paste0(breaks*100,"%")
g = g + xgx_scale_y_reverselog10(breaks = breaks, labels = labels)
#g = g + xgx_scale_y_log10()#, minor_breaks = 1)
g = g + scale_color_manual(values = c("AFIR theory" = "red",
"ASIR simulation" = "black",
"ASIR theory" = "blue"))
g = g + scale_linetype_manual(values = c("AFIR theory" = "dotted",
"ASIR simulation" = "solid",
"ASIR theory" = "dashed"))
g = g + labs(x = "Dose (mg/kg) every 3 weeks",
y = "Steady State Inhibition Metric\nSSIM = 1 - TLss/TL0",
caption = "")
g = g + theme(legend.position = "top")
ggsave(width = 6.5, height= 4, filename = "./figures/Task51_DoseRange_Drugs.png")
print(g)
g = g %+% data_plot_all
ggsave(width = 8, height= 4, filename = "./figures/Task51_DoseRange_All6_Drugs.png")
print(g)
| /Task51_MultiDose_Figure_RealDrugs.R | no_license | iamstein/TMDD_EndogenousLigand | R | false | false | 4,920 | r | source("ams_initialize_script.R")
source("SCIM_calculation.R")
source("ivsc_2cmt_RR_V1.R")
dirs$rscript_name = "Task51_MultiDose_Figure_RealDrugs.R"
dirs$filename_prefix= str_extract(dirs$rscript_name,"^Task\\d\\d\\w?_")
model = ivsc_2cmt_RR_KdT0L0()
#read in parameter ranges to explore
param_minmax.in = readxl::read_excel("parameters/Task51_Param_Ranges.xlsx")
param_minmax = param_minmax.in %>%
as.data.frame() %>%
select(Parameter,min,max,units,fixed) %>%
filter(!(is.na(fixed))) %>%
filter(fixed==0)
rownames(param_minmax) = param_minmax$Parameter
# Dose time, frequency, compartment, nominal dose
tmax = 52*7 #days
tau = 21 #days
compartment = 2
infusion = FALSE
n_points = 10
i_row = 0
i_drug = 0
result = list()
for (drug in drugs){ #loop over all the drugs in the list
i_drug = i_drug + 1
param.as.double = read.param.file(parameter_files[drug])[model$pin]
for (dose_mpk in 10^(seq(-3,3,by=0.25))) {
dose.nmol = dose_mpk*scale_mpk2nmol
sim = lumped.parameters.simulation(model, param.as.double, dose.nmol, tmax, tau, compartment, infusion = infusion)
thy = lumped.parameters.theory ( param.as.double, dose.nmol, tau)
#all parameter values for the output table
K = 2
par = param.as.double %>%
t() %>%
as.data.frame() %>%
bind_cols(sim,thy) %>%
mutate(dose_mpk = dose_mpk,
drug = drug,
target = drug_target[i_drug],
ligand = drug_ligand[i_drug],
order = drug_order[i_drug],
id = i_row,
tmax = tmax,
tau = tau,
dose_nmol = dose.nmol,
koff_DT = Kd_DT/kon_DT,
Kss_DT = Kd_DT + keDT/kon_DT,
koff_TL = Kd_TL/kon_TL,
Kss_TL = Kd_TL + keDT/kon_TL,
assumption_SCIM_lt_30 = SCIM_Lfold_adhoc_thy < 0.30,
assumption_drug_gg_Ttot = Dss_thy > K*Ttotss_thy,
assumption_drug_gg_KssDT = Dss_thy > K*Kss_DT,
assumption_koffDT_gt_keT = koff_DT > keT,
assumption_Dss_gt_Ccrit = Dss_thy > K*Ccrit_thy,
assumption_Dss_gg_LssKssDT_KssTL = Dss_thy > K*Kss_DT*Lss_thy/Kss_TL,
assumption_ODE_tolerance = Dss_thy/TLss_thy < 1e12,
assumption_all_SCIM = assumption_SCIM_lt_30 &
assumption_drug_gg_Ttot &
assumption_Dss_gt_Ccrit &
assumption_ODE_tolerance &
assumption_Dss_gg_LssKssDT_KssTL)
#create result table
i_row = i_row + 1
result[[i_row]] = par
}
}
results = bind_rows(result)
write.csv(results, file = "results/Task51_MultiDose_Figure.csv")
#plot results ----
data_plot_all = results %>%
select(dose_mpk, drug, target, ligand, order, SCIM_sim, AFIR_thy, SCIM_Lfold_adhoc_thy, assumption = assumption_drug_gg_Ttot) %>%
gather(key,value,-c(drug,dose_mpk,drug,target,ligand,order,assumption)) %>%
arrange(order) %>%
mutate(drug = factor(drug, levels = unique(drug)),
target = paste("Target:",target),
ligand = paste("Ligand:",ligand),
key = plyr::mapvalues(key,
c("SCIM_sim","AFIR_thy","SCIM_Lfold_adhoc_thy"),
c("ASIR simulation","AFIR theory","ASIR theory")))
data_plot = data_plot_all %>%
filter(drug %in% c("Atezolizumab","Siltuximab","Tocilizumab"))
# data_assumption_false = data_plot %>%
# filter(assumption == FALSE) %>%
# filter(key == "ASIR simulation")
g = ggplot(data_plot, aes(x=dose_mpk,y=1-value, color = key, linetype = key))
g = g + geom_line(size = 1, alpha = .5)
#g = g + geom_point(data = data_assumption_false, color = "red", show.legend = FALSE)
g = g + facet_wrap(~drug+target+ligand, nrow = 1)#, dir = "v", nrow = 2) )
g = g + xgx_scale_x_log10(breaks = 10^seq(-2,20,by=1))#, minor_breaks = 1)
breaks = c(0,90,99,99.9,99.99)/100
labels = paste0(breaks*100,"%")
g = g + xgx_scale_y_reverselog10(breaks = breaks, labels = labels)
#g = g + xgx_scale_y_log10()#, minor_breaks = 1)
g = g + scale_color_manual(values = c("AFIR theory" = "red",
"ASIR simulation" = "black",
"ASIR theory" = "blue"))
g = g + scale_linetype_manual(values = c("AFIR theory" = "dotted",
"ASIR simulation" = "solid",
"ASIR theory" = "dashed"))
g = g + labs(x = "Dose (mg/kg) every 3 weeks",
y = "Steady State Inhibition Metric\nSSIM = 1 - TLss/TL0",
caption = "")
g = g + theme(legend.position = "top")
ggsave(width = 6.5, height= 4, filename = "./figures/Task51_DoseRange_Drugs.png")
print(g)
g = g %+% data_plot_all
ggsave(width = 8, height= 4, filename = "./figures/Task51_DoseRange_All6_Drugs.png")
print(g)
|
getwd() # checks the working directory and file location. Downloaded file should be here.
elecpower <- read.csv(file ="household_power_consumption.txt", sep=";", na.strings="?")
# read the dataset into r
elecpower$Date <- as.Date(elecpower$Date, format = "%d/%m/%Y")
#convert the Date column class to 'Date' format
elecpower2 <- elecpower %>% filter(Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
#select only the rows that match a given date
elecpower2$Time <- strftime(paste(elecpower2$Date, elecpower2$Time), format = "%y/%m/%d %H:%M:%S")
elecpower2$Time <- strptime(elecpower2$Time, format = "%y/%m/%d %H:%M:%S")
#convert Time column to posix format
##plot4
par(mfrow = c(2,2), mar = c(4, 4, 2, 2))
#4a
plot(elec5$Time, elec5$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
#4b
plot(elec5$Time, elec5$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
#4c
plot(elec5$Time, elec5$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(elec5$Time, elec5$Sub_metering_2, type = "l", col = "red")
lines(elec5$Time, elec5$Sub_metering_3, type = "l", col = "blue")
legend("topright", cex = 0.3, col = c("black", "red", "blue"), lty = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#4d
plot(elec5$Time, elec5$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
| /Plot4.R | no_license | manufonn/ExData_Plotting1 | R | false | false | 1,464 | r |
getwd() # checks the working directory and file location. Downloaded file should be here.
elecpower <- read.csv(file ="household_power_consumption.txt", sep=";", na.strings="?")
# read the dataset into r
elecpower$Date <- as.Date(elecpower$Date, format = "%d/%m/%Y")
#convert the Date column class to 'Date' format
elecpower2 <- elecpower %>% filter(Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
#select only the rows that match a given date
elecpower2$Time <- strftime(paste(elecpower2$Date, elecpower2$Time), format = "%y/%m/%d %H:%M:%S")
elecpower2$Time <- strptime(elecpower2$Time, format = "%y/%m/%d %H:%M:%S")
#convert Time column to posix format
##plot4
par(mfrow = c(2,2), mar = c(4, 4, 2, 2))
#4a
plot(elec5$Time, elec5$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
#4b
plot(elec5$Time, elec5$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
#4c
plot(elec5$Time, elec5$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(elec5$Time, elec5$Sub_metering_2, type = "l", col = "red")
lines(elec5$Time, elec5$Sub_metering_3, type = "l", col = "blue")
legend("topright", cex = 0.3, col = c("black", "red", "blue"), lty = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#4d
plot(elec5$Time, elec5$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
|
Z <-
function(c,x){
z<-0
for (i in 1:length(c))
z<-z+c[i]*x[i]
return(z)}
| /R/Z.R | no_license | cran/intpoint | R | false | false | 80 | r | Z <-
function(c,x){
z<-0
for (i in 1:length(c))
z<-z+c[i]*x[i]
return(z)}
|
context("OnDiskMSnExp class")
inMem <- microtofq_in_mem_ms1
onDisk <- microtofq_on_disk_ms1
multiMsInMem1 <- tmt_im_ms1_sub
multiMsInMem2 <- tmt_im_ms2_sub
multiMsOnDisk <- tmt_od_sub
centroided(inMem) <- TRUE
centroided(onDisk) <- TRUE
centroided(multiMsInMem1) <- TRUE
centroided(multiMsInMem2) <- TRUE
centroided(multiMsOnDisk) <- TRUE
############################################################
## validateOnDiskMSnExp
test_that("validateOnDiskMSnExp", {
onDisk2 <- multiMsOnDisk
expect_true(validateOnDiskMSnExp(onDisk2))
## Now modify the fData slightly.
fd <- featureData(onDisk2)
fd$lowMZ[13] <- fd$lowMZ[13] + 3
onDisk2@featureData <- fd
expect_error(validateOnDiskMSnExp(onDisk2))
expect_true(validateOnDiskMSnExp(filterMsLevel(onDisk2, 2)))
})
test_that("Empty validateOnDiskMSnExp", {
x <- filterMsLevel(onDisk, 2) ## empty
expect_identical(length(x), 0L)
expect_true(validObject(x))
expect_true(validateOnDiskMSnExp(x))
})
test_that("Warning validateOnDiskMSnExp", {
expect_warning(val <- validateOnDiskMSnExp(onDisk))
expect_true(val)
})
############################################################
## Testing the on-disk MSnExp stuff.
test_that("OnDiskMSnExp constructor", {
expect_identical(as.character(class(inMem)), "MSnExp")
expect_identical(as.character(class(onDisk)), "OnDiskMSnExp")
expect_true(validObject(onDisk))
})
test_that("Coercion to MSnExp", {
x <- tmt_erwinia_on_disk
y <- tmt_erwinia_in_mem_ms2
expect_error(as(x, "MSnExp"))
x <- filterMsLevel(x, msLevel = 2)
expect_true(all.equal(x, y))
## feature names are different
featureNames(x) <- featureNames(y)
x2 <- as(x, "MSnExp")
## expected to be different: processingData, fData, .cache
expect_identical(spectra(x2), spectra(y))
expect_identical(experimentData(x2), experimentData(y))
})
############################################################
## compare MSnExp against OnDiskMSnExp
test_that("Compare MS1 MSnExp and OnDiskMSnExp content", {
## o Compare spectra values.
expect_true(all.equal(inMem, onDisk))
## o fromFile
expect_identical(fromFile(inMem), fromFile(onDisk))
## o msLevel
expect_identical(msLevel(inMem), msLevel(onDisk))
## o acquisitionNum
expect_identical(acquisitionNum(inMem), acquisitionNum(onDisk))
## o scanIndex
expect_identical(scanIndex(inMem), scanIndex(onDisk))
## o centroided
expect_identical(centroided(inMem), centroided(onDisk))
centroided(inMem) <- FALSE
centroided(onDisk) <- FALSE
expect_identical(centroided(inMem), centroided(onDisk))
expect_that(centroided(onDisk) <- c(TRUE, FALSE, TRUE), throws_error())
## o rtime
expect_identical(rtime(inMem), rtime(onDisk))
## o polarity
expect_identical(polarity(inMem), polarity(onDisk))
## o tic: Note: have to calculate as one of the two doesn't provide the
## initial values.
expect_identical(tic(inMem), tic(onDisk, initial = FALSE))
## o ionCount
expect_identical(ionCount(inMem), ionCount(onDisk))
## o peaksCount
expect_identical(peaksCount(inMem), peaksCount(onDisk))
## o intensity
expect_identical(intensity(inMem), intensity(onDisk))
## o mz
expect_identical(mz(inMem), mz(onDisk))
})
############################################################
## Compare cleaned data.
## o spectra.
## o ionCount.
## o tic
## o peaksCount.
test_that("Compare removePeaks and cleaned MSnExp and OnDiskMSnExp", {
## o clean
inMemCleaned <- clean(inMem)
onDiskCleaned <- clean(onDisk)
expect_true(all.equal(inMemCleaned, onDiskCleaned))
expect_identical(ionCount(inMemCleaned), ionCount(onDiskCleaned))
expect_identical(tic(inMemCleaned), tic(onDiskCleaned, initial = FALSE))
expect_identical(peaksCount(inMemCleaned), peaksCount(onDiskCleaned))
## o removePeaks
inMemRemPeaks <- removePeaks(inMem, t = 1000)
onDiskRemPeaks <- removePeaks(onDisk, t = 1000)
expect_true(all.equal(inMemRemPeaks, onDiskRemPeaks))
expect_identical(ionCount(inMemRemPeaks), ionCount(onDiskRemPeaks))
expect_identical(tic(inMemRemPeaks), tic(onDiskRemPeaks, initial = FALSE))
expect_identical(peaksCount(inMemRemPeaks), peaksCount(onDiskRemPeaks))
## o removePeaks and clean
inMemRemPeaksCleaned <- clean(inMemRemPeaks)
onDiskRemPeaksCleaned <- clean(onDiskRemPeaks)
expect_true(all.equal(inMemRemPeaksCleaned, onDiskRemPeaksCleaned))
expect_identical(ionCount(inMemRemPeaksCleaned),
ionCount(onDiskRemPeaksCleaned))
expect_identical(tic(inMemRemPeaksCleaned), tic(onDiskRemPeaksCleaned,
initial = FALSE))
expect_identical(peaksCount(inMemRemPeaksCleaned),
peaksCount(onDiskRemPeaksCleaned))
## compare assayData, intensity and mz,
expect_equal(assayData(inMemRemPeaksCleaned),
assayData(onDiskRemPeaksCleaned))
expect_equal(intensity(inMemRemPeaksCleaned),
intensity(onDiskRemPeaksCleaned))
expect_equal(mz(inMemRemPeaksCleaned), mz(onDiskRemPeaksCleaned))
})
test_that("clean on OnDiskMSnExp with different MS levels", {
## o Tests on MSnExp
multiMsInMem1_cleaned <- clean(multiMsInMem1)
expect_true(sum(unlist(intensity(multiMsInMem1_cleaned)) == 0) <
sum(unlist(intensity(multiMsInMem1)) == 0))
## o Tests on OnDiskMSnExp and comparison with MSnExp.
multiMsOnDisk_cleaned <- clean(multiMsOnDisk)
expect_true(sum(unlist(intensity(multiMsOnDisk_cleaned)) == 0) <
sum(unlist(intensity(multiMsOnDisk)) == 0))
## Compare with MSnExp
expect_true(all.equal(multiMsInMem1_cleaned,
filterMsLevel(multiMsOnDisk_cleaned, msLevel. = 1)))
## Just cleaning MS 1.
multiMsOnDisk_cleaned_1 <- clean(multiMsOnDisk, msLevel. = 1)
expect_true(all.equal(multiMsOnDisk_cleaned, multiMsOnDisk_cleaned_1))
## Just cleaning MS 2; won't do much at all.
multiMsOnDisk_cleaned_2 <- clean(multiMsOnDisk, msLevel. = 2)
expect_true(all.equal(multiMsOnDisk, multiMsOnDisk_cleaned_2))
## Same with msLevel. 4
multiMsOnDisk_cleaned_4 <- clean(multiMsOnDisk, msLevel. = 4)
expect_true(all.equal(multiMsOnDisk, multiMsOnDisk_cleaned_4))
})
test_that("removePeaks on OnDiskMSnExp with different MS levels", {
## o Tests on MSnExp
multiMsInMem1_rem <- removePeaks(multiMsInMem1)
expect_true(sum(unlist(intensity(multiMsInMem1_rem)) == 0) >
sum(unlist(intensity(multiMsInMem1)) == 0))
multiMsInMem2_rem <- removePeaks(multiMsInMem2)
expect_true(sum(unlist(intensity(multiMsInMem2_rem)) == 0) >
sum(unlist(intensity(multiMsInMem2)) == 0))
## o Tests on OnDiskMSnExp and comparison with MSnExp.
multiMsOnDisk_rem <- removePeaks(multiMsOnDisk)
expect_true(sum(unlist(intensity(multiMsOnDisk_rem)) == 0) >
sum(unlist(intensity(multiMsOnDisk)) == 0))
## Compare with MSnExp
expect_true(all.equal(multiMsInMem1_rem,
filterMsLevel(multiMsOnDisk_rem, msLevel. = 1)))
expect_true(all.equal(multiMsInMem2_rem,
filterMsLevel(multiMsOnDisk_rem, msLevel. = 2)))
## Just processing MS 1.
multiMsOnDisk_rem_1 <- removePeaks(multiMsOnDisk, msLevel. = 1)
expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_1, msLevel. = 1),
filterMsLevel(multiMsOnDisk_rem, msLevel. = 1)))
spects1 <- spectra(filterMsLevel(multiMsOnDisk_rem_1, msLevel. = 2))
spects2 <- spectra(filterMsLevel(multiMsOnDisk, msLevel. = 2))
expect_identical(spects1, spects2)
## expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_1, msLevel. = 2),
## filterMsLevel(multiMsOnDisk, msLevel. = 2)))
## Just processing MS 2.
multiMsOnDisk_rem_2 <- removePeaks(multiMsOnDisk, msLevel. = 2)
expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_2, msLevel. = 2),
filterMsLevel(multiMsOnDisk_rem, msLevel. = 2)))
expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_2, msLevel. = 1),
filterMsLevel(multiMsOnDisk, msLevel. = 1)))
})
############################################################
## bin
test_that("bin on OnDiskMSnExp", {
## o On a single-file multi MS-level file.
multiMsInMem1_bin <- bin(multiMsInMem1, verbose = FALSE)
multiMsInMem2_bin <- bin(multiMsInMem2, verbose = FALSE)
## bin on MS1 level only
multiMsOnDisk_bin_1 <- bin(multiMsOnDisk, msLevel. = 1)
## Results should be the same.
expect_true(all.equal(multiMsInMem1_bin,
filterMsLevel(multiMsOnDisk_bin_1, msLevel. = 1)))
## bin on all levels.
multiMsOnDisk_bin <- bin(multiMsOnDisk)
## Results can not be the same, since the mz range was different for
## the bin.
expect_true(is(all.equal(
multiMsInMem1_bin,
filterMsLevel(multiMsOnDisk_bin, msLevel. = 1)
), "character"))
## bin on MS2 level only
multiMsOnDisk_bin_2 <- bin(multiMsOnDisk, msLevel. = 2)
## Results should be the same.
expect_true(all.equal(multiMsInMem2_bin,
filterMsLevel(multiMsOnDisk_bin_2, msLevel. = 2)))
## o On multiple files.
inMem_bin <- bin(inMem, verbose = FALSE)
onDisk_bin <- bin(onDisk)
expect_true(all.equal(inMem_bin, onDisk_bin))
## bin on MS 2 shouldn't do anything at all
expect_warning(onDisk_bin <- bin(onDisk, msLevel. = 2))
expect_true(all.equal(onDisk_bin, onDisk))
})
############################################################
## Test internal spectrapply method.
test_that("Test internal spectrapply function", {
spl <- spectra(onDisk)
## Test Spectrum method:
res1 <- lapply(spl, ionCount)
res2 <- MSnbase:::spectrapply(onDisk, ionCount)
expect_identical(res1, res2)
## Plain function
res1 <- lapply(spl, function(z) return(mean(mz(z))))
res2 <- MSnbase:::spectrapply(onDisk, function(z) {
return(mean(mz(z)))
})
expect_identical(res1, res2)
## Additional arguments.
res1 <- lapply(spl, function(z, int) {
return(mean(mz(z)[intensity(z) > int]))
}, int = 30)
res2 <- MSnbase:::spectrapply(onDisk, function(z, int) {
return(mean(mz(z)[intensity(z) > int]))
}, int = 30)
expect_identical(res1, res2)
})
############################################################
## Test that the new sorting by acquisitionNum and extraction
## by spIdx works (described in issue #118)
## We're comparing spectra extracted by that between an
## OnDiskMSnExp and an MSnExp.
test_that("Test sorting by acquisitionNum", {
sp1 <- inMem[[13]]
sp2 <- onDisk[[13]]
expect_identical(sp1, sp2)
sp1 <- inMem[[22]]
sp2 <- onDisk[[22]]
expect_identical(sp1, sp2)
## Same using multiMS
onDisk1 <- filterMsLevel(multiMsOnDisk, msLevel. = 1L)
sp1 <- multiMsInMem1[[7]]
sp2 <- onDisk1[[7]]
expect_identical(sp1, sp2)
sp1 <- multiMsInMem1[[9]]
sp2 <- onDisk1[[9]]
expect_identical(sp1, sp2)
onDisk2 <- filterMsLevel(multiMsOnDisk, msLevel. = 2L)
sp1 <- multiMsInMem2[[13]]
sp2 <- onDisk2[[13]]
expect_identical(sp1, sp2)
sp1 <- multiMsInMem2[[22]]
sp2 <- onDisk2[[22]]
expect_identical(sp1, sp2)
})
test_that("spectrapply,OnDiskMSnExp", {
sps <- spectra(onDisk)
sps_2 <- spectrapply(onDisk)
expect_identical(sps, sps_2)
## apply a function.
dfs <- spectrapply(onDisk, FUN = as, Class = "data.frame")
dfs_2 <- lapply(sps, FUN = as, Class = "data.frame")
expect_identical(dfs, dfs_2)
})
test_that("splitByFile,OnDiskMSnExp", {
od <- microtofq_on_disk_ms1
spl <- splitByFile(od, f = factor(c("a", "b")))
expect_equal(pData(spl[[1]]), pData(filterFile(od, 1)))
expect_equal(pData(spl[[2]]), pData(filterFile(od, 2)))
})
test_that("chromatogram,OnDiskMSnExp works", {
library(msdata)
mzf <- c(system.file("microtofq/MM14.mzML", package = "msdata"),
system.file("microtofq/MM8.mzML", package = "msdata"))
tmpd <- tempdir()
file.copy(mzf[1], paste0(tmpd, "a.mzML"))
file.copy(mzf[2], paste0(tmpd, "b.mzML"))
mzf <- c(mzf, paste0(tmpd, c("a.mzML", "b.mzML")))
onDisk <- readMSData(files = mzf, msLevel. = 1, centroided. = TRUE,
mode = "onDisk")
## Full rt range.
mzr <- matrix(c(100, 120), nrow = 1)
res <- MSnbase:::.extractMultipleChromatograms(onDisk, mz = mzr)
flt <- filterMz(onDisk, mz = mzr[1, ])
ints <- split(unlist(lapply(spectra(flt), function(z) sum(intensity(z)))),
fromFile(flt))
expect_equal(ints[[1]], intensity(res[1, 1][[1]]))
expect_equal(ints[[2]], intensity(res[1, 2][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[1]], rtime(res[1, 1][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[2]], rtime(res[1, 2][[1]]))
## Multiple mz ranges.
mzr <- matrix(c(100, 120, 200, 220, 300, 320), nrow = 3, byrow = TRUE)
rtr <- matrix(c(50, 300), nrow = 1)
res <- MSnbase:::.extractMultipleChromatograms(onDisk, mz = mzr, rt = rtr)
## Check that the values for all ranges is within the specified ranges
for (i in 1:nrow(mzr)) {
expect_true(all(mz(res[i, 1][[1]]) >= mzr[i, 1] &
mz(res[i, 1][[1]]) <= mzr[i, 2]))
expect_true(all(mz(res[i, 2][[1]]) >= mzr[i, 1] &
mz(res[i, 2][[1]]) <= mzr[i, 2]))
expect_true(all(rtime(res[i, 1][[1]]) >= rtr[1, 1] &
rtime(res[i, 1][[1]]) <= rtr[1, 2]))
expect_true(all(rtime(res[i, 2][[1]]) >= rtr[1, 1] &
rtime(res[i, 2][[1]]) <= rtr[1, 2]))
}
## Check that values are correct.
flt <- filterMz(filterRt(onDisk, rt = rtr[1, ]), mz = mzr[2, ])
ints <- split(unlist(lapply(spectra(flt), function(z) sum(intensity(z)))),
fromFile(flt))
expect_equal(ints[[1]], intensity(res[2, 1][[1]]))
expect_equal(ints[[2]], intensity(res[2, 2][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[1]], rtime(res[2, 1][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[2]], rtime(res[2, 2][[1]]))
## Now with ranges for which we don't have values in one or the other.
rtr <- matrix(c(280, 300, 20, 40), nrow = 2,
byrow = TRUE) ## Only present in first, or 2nd file
res <- chromatogram(onDisk, rt = rtr)
expect_true(all(unlist(lapply(res, msLevel)) == 1))
## Check fromFile
for (i in 1:ncol(res))
expect_true(all(sapply(res[, i], fromFile) == i))
expect_equal(length(res[2, 1]), 0)
expect_equal(length(res[1, 2]), 0)
## Check rtime
expect_true(all(rtime(res[1, 1]) >= rtr[1, 1] &
rtime(res[1, 1]) <= rtr[1, 2]))
expect_true(all(rtime(res[2, 2]) >= rtr[2, 1] &
rtime(res[2, 2]) <= rtr[2, 2]))
## Check intensity
flt <- filterRt(onDisk, rt = rtr[1, ])
spctr <- split(spectra(flt), fromFile(flt))
ints <- unlist(lapply(spctr[[1]], function(z) sum(intensity(z))))
expect_equal(ints, intensity(res[1, 1]))
flt <- filterRt(onDisk, rt = rtr[2, ])
spctr <- split(spectra(flt), fromFile(flt))
ints <- unlist(lapply(spctr[[1]], function(z) sum(intensity(z))))
expect_equal(ints, intensity(res[2, 2]))
## Check chromatogram with non-present MS level
expect_warning(tmp <- chromatogram(onDisk, rt = rtr, msLevel = 2L))
expect_equal(nrow(tmp), 0)
tmp <- chromatogram(onDisk, rt = rtr, msLevel = 1:10)
expect_equal(tmp, res)
res <- MSnbase:::.extractMultipleChromatograms(onDisk, rt = rtr,
msLevel = 1:5)
colnames(res) <- basename(fileNames(onDisk))
res <- as(res, "MChromatograms")
expect_true(validObject(res))
## pData(res) <- pData(onDisk)
## fData(res) <- fData(tmp)
## expect_equal(tmp, res)
})
## Test the two versions that could/might be called by the
## spectrapply,OnDiskMSnExp method. Each has its own pros and cons and cases
## in which it outperforms the other function.
test_that("low memory spectrapply function works", {
fl <- system.file("lockmass/LockMass_test.mzXML", package = "msdata")
fh <- mzR::openMSfile(fl)
hdr <- mzR::header(fh)
mzR::close(fh)
fData <- hdr
fData$spIdx <- hdr$seqNum
fData$fileIdx <- 1L
fData$smoothed <- FALSE
fData$centroided <- TRUE
fastLoad <- FALSE
expect_equal(
MSnbase:::.applyFun2SpectraOfFileMulti(fData, filenames = fl,
fastLoad = fastLoad),
MSnbase:::.applyFun2IndividualSpectraOfFile(fData,
filenames = fl,
fastLoad = fastLoad))
fd <- fData[c(4, 8, 32, 123), ]
expect_equal(
MSnbase:::.applyFun2SpectraOfFileMulti(fd, filenames = fl,
fastLoad = fastLoad),
MSnbase:::.applyFun2IndividualSpectraOfFile(fd,
filenames = fl,
fastLoad = fastLoad))
## With an function to apply.
expect_equal(
MSnbase:::.applyFun2SpectraOfFileMulti(fd, filenames = fl,
fastLoad = fastLoad,
APPLYFUN = mz),
MSnbase:::.applyFun2IndividualSpectraOfFile(fd,
filenames = fl,
fastLoad = fastLoad,
APPLYFUN = mz))
})
test_that("isolationWindowLowerMz,isolationWindowUpperMz,OnDiskMSnExp works", {
mz_low <- isolationWindowLowerMz(tmt_od_ms2_sub)
mz_high <- isolationWindowUpperMz(tmt_od_ms2_sub)
expect_true(all(mz_low < mz_high))
expect_true(all(precursorMz(tmt_od_ms2_sub) >= mz_low))
expect_true(all(precursorMz(tmt_od_ms2_sub) <= mz_high))
mz_low <- isolationWindowLowerMz(sciex)
mz_high <- isolationWindowUpperMz(sciex)
expect_true(all(is.na(mz_low)))
expect_true(all(is.na(mz_high)))
})
test_that("combineSpectra,MSnExp works with OnDiskMSnExp", {
res <- combineSpectra(filterRt(sciex, c(10, 20)))
expect_true(is(res, "MSnExp"))
expect_true(length(res) == 2)
})
test_that(".on_disk_split_by_file works", {
res <- .on_disk_split_by_file(sciex)
expect_equal(length(res), 2)
expect_equal(featureData(res[[1]]), featureData(filterFile(sciex, 1L)))
expect_equal(featureData(res[[2]]), featureData(filterFile(sciex, 2L)))
expect_equal(phenoData(res[[1]]), phenoData(filterFile(sciex, 1L)))
expect_equal(phenoData(res[[2]]), phenoData(filterFile(sciex, 2L)))
})
| /tests/testthat/test_OnDiskMSnExp.R | no_license | procha2/MSnbase | R | false | false | 19,025 | r | context("OnDiskMSnExp class")
inMem <- microtofq_in_mem_ms1
onDisk <- microtofq_on_disk_ms1
multiMsInMem1 <- tmt_im_ms1_sub
multiMsInMem2 <- tmt_im_ms2_sub
multiMsOnDisk <- tmt_od_sub
centroided(inMem) <- TRUE
centroided(onDisk) <- TRUE
centroided(multiMsInMem1) <- TRUE
centroided(multiMsInMem2) <- TRUE
centroided(multiMsOnDisk) <- TRUE
############################################################
## validateOnDiskMSnExp
test_that("validateOnDiskMSnExp", {
onDisk2 <- multiMsOnDisk
expect_true(validateOnDiskMSnExp(onDisk2))
## Now modify the fData slightly.
fd <- featureData(onDisk2)
fd$lowMZ[13] <- fd$lowMZ[13] + 3
onDisk2@featureData <- fd
expect_error(validateOnDiskMSnExp(onDisk2))
expect_true(validateOnDiskMSnExp(filterMsLevel(onDisk2, 2)))
})
test_that("Empty validateOnDiskMSnExp", {
x <- filterMsLevel(onDisk, 2) ## empty
expect_identical(length(x), 0L)
expect_true(validObject(x))
expect_true(validateOnDiskMSnExp(x))
})
test_that("Warning validateOnDiskMSnExp", {
expect_warning(val <- validateOnDiskMSnExp(onDisk))
expect_true(val)
})
############################################################
## Testing the on-disk MSnExp stuff.
test_that("OnDiskMSnExp constructor", {
expect_identical(as.character(class(inMem)), "MSnExp")
expect_identical(as.character(class(onDisk)), "OnDiskMSnExp")
expect_true(validObject(onDisk))
})
test_that("Coercion to MSnExp", {
x <- tmt_erwinia_on_disk
y <- tmt_erwinia_in_mem_ms2
expect_error(as(x, "MSnExp"))
x <- filterMsLevel(x, msLevel = 2)
expect_true(all.equal(x, y))
## feature names are different
featureNames(x) <- featureNames(y)
x2 <- as(x, "MSnExp")
## expected to be different: processingData, fData, .cache
expect_identical(spectra(x2), spectra(y))
expect_identical(experimentData(x2), experimentData(y))
})
############################################################
## compare MSnExp against OnDiskMSnExp
test_that("Compare MS1 MSnExp and OnDiskMSnExp content", {
## o Compare spectra values.
expect_true(all.equal(inMem, onDisk))
## o fromFile
expect_identical(fromFile(inMem), fromFile(onDisk))
## o msLevel
expect_identical(msLevel(inMem), msLevel(onDisk))
## o acquisitionNum
expect_identical(acquisitionNum(inMem), acquisitionNum(onDisk))
## o scanIndex
expect_identical(scanIndex(inMem), scanIndex(onDisk))
## o centroided
expect_identical(centroided(inMem), centroided(onDisk))
centroided(inMem) <- FALSE
centroided(onDisk) <- FALSE
expect_identical(centroided(inMem), centroided(onDisk))
expect_that(centroided(onDisk) <- c(TRUE, FALSE, TRUE), throws_error())
## o rtime
expect_identical(rtime(inMem), rtime(onDisk))
## o polarity
expect_identical(polarity(inMem), polarity(onDisk))
## o tic: Note: have to calculate as one of the two doesn't provide the
## initial values.
expect_identical(tic(inMem), tic(onDisk, initial = FALSE))
## o ionCount
expect_identical(ionCount(inMem), ionCount(onDisk))
## o peaksCount
expect_identical(peaksCount(inMem), peaksCount(onDisk))
## o intensity
expect_identical(intensity(inMem), intensity(onDisk))
## o mz
expect_identical(mz(inMem), mz(onDisk))
})
############################################################
## Compare cleaned data.
## o spectra.
## o ionCount.
## o tic
## o peaksCount.
test_that("Compare removePeaks and cleaned MSnExp and OnDiskMSnExp", {
## o clean
inMemCleaned <- clean(inMem)
onDiskCleaned <- clean(onDisk)
expect_true(all.equal(inMemCleaned, onDiskCleaned))
expect_identical(ionCount(inMemCleaned), ionCount(onDiskCleaned))
expect_identical(tic(inMemCleaned), tic(onDiskCleaned, initial = FALSE))
expect_identical(peaksCount(inMemCleaned), peaksCount(onDiskCleaned))
## o removePeaks
inMemRemPeaks <- removePeaks(inMem, t = 1000)
onDiskRemPeaks <- removePeaks(onDisk, t = 1000)
expect_true(all.equal(inMemRemPeaks, onDiskRemPeaks))
expect_identical(ionCount(inMemRemPeaks), ionCount(onDiskRemPeaks))
expect_identical(tic(inMemRemPeaks), tic(onDiskRemPeaks, initial = FALSE))
expect_identical(peaksCount(inMemRemPeaks), peaksCount(onDiskRemPeaks))
## o removePeaks and clean
inMemRemPeaksCleaned <- clean(inMemRemPeaks)
onDiskRemPeaksCleaned <- clean(onDiskRemPeaks)
expect_true(all.equal(inMemRemPeaksCleaned, onDiskRemPeaksCleaned))
expect_identical(ionCount(inMemRemPeaksCleaned),
ionCount(onDiskRemPeaksCleaned))
expect_identical(tic(inMemRemPeaksCleaned), tic(onDiskRemPeaksCleaned,
initial = FALSE))
expect_identical(peaksCount(inMemRemPeaksCleaned),
peaksCount(onDiskRemPeaksCleaned))
## compare assayData, intensity and mz,
expect_equal(assayData(inMemRemPeaksCleaned),
assayData(onDiskRemPeaksCleaned))
expect_equal(intensity(inMemRemPeaksCleaned),
intensity(onDiskRemPeaksCleaned))
expect_equal(mz(inMemRemPeaksCleaned), mz(onDiskRemPeaksCleaned))
})
test_that("clean on OnDiskMSnExp with different MS levels", {
## o Tests on MSnExp
multiMsInMem1_cleaned <- clean(multiMsInMem1)
expect_true(sum(unlist(intensity(multiMsInMem1_cleaned)) == 0) <
sum(unlist(intensity(multiMsInMem1)) == 0))
## o Tests on OnDiskMSnExp and comparison with MSnExp.
multiMsOnDisk_cleaned <- clean(multiMsOnDisk)
expect_true(sum(unlist(intensity(multiMsOnDisk_cleaned)) == 0) <
sum(unlist(intensity(multiMsOnDisk)) == 0))
## Compare with MSnExp
expect_true(all.equal(multiMsInMem1_cleaned,
filterMsLevel(multiMsOnDisk_cleaned, msLevel. = 1)))
## Just cleaning MS 1.
multiMsOnDisk_cleaned_1 <- clean(multiMsOnDisk, msLevel. = 1)
expect_true(all.equal(multiMsOnDisk_cleaned, multiMsOnDisk_cleaned_1))
## Just cleaning MS 2; won't do much at all.
multiMsOnDisk_cleaned_2 <- clean(multiMsOnDisk, msLevel. = 2)
expect_true(all.equal(multiMsOnDisk, multiMsOnDisk_cleaned_2))
## Same with msLevel. 4
multiMsOnDisk_cleaned_4 <- clean(multiMsOnDisk, msLevel. = 4)
expect_true(all.equal(multiMsOnDisk, multiMsOnDisk_cleaned_4))
})
test_that("removePeaks on OnDiskMSnExp with different MS levels", {
## o Tests on MSnExp
multiMsInMem1_rem <- removePeaks(multiMsInMem1)
expect_true(sum(unlist(intensity(multiMsInMem1_rem)) == 0) >
sum(unlist(intensity(multiMsInMem1)) == 0))
multiMsInMem2_rem <- removePeaks(multiMsInMem2)
expect_true(sum(unlist(intensity(multiMsInMem2_rem)) == 0) >
sum(unlist(intensity(multiMsInMem2)) == 0))
## o Tests on OnDiskMSnExp and comparison with MSnExp.
multiMsOnDisk_rem <- removePeaks(multiMsOnDisk)
expect_true(sum(unlist(intensity(multiMsOnDisk_rem)) == 0) >
sum(unlist(intensity(multiMsOnDisk)) == 0))
## Compare with MSnExp
expect_true(all.equal(multiMsInMem1_rem,
filterMsLevel(multiMsOnDisk_rem, msLevel. = 1)))
expect_true(all.equal(multiMsInMem2_rem,
filterMsLevel(multiMsOnDisk_rem, msLevel. = 2)))
## Just processing MS 1.
multiMsOnDisk_rem_1 <- removePeaks(multiMsOnDisk, msLevel. = 1)
expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_1, msLevel. = 1),
filterMsLevel(multiMsOnDisk_rem, msLevel. = 1)))
spects1 <- spectra(filterMsLevel(multiMsOnDisk_rem_1, msLevel. = 2))
spects2 <- spectra(filterMsLevel(multiMsOnDisk, msLevel. = 2))
expect_identical(spects1, spects2)
## expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_1, msLevel. = 2),
## filterMsLevel(multiMsOnDisk, msLevel. = 2)))
## Just processing MS 2.
multiMsOnDisk_rem_2 <- removePeaks(multiMsOnDisk, msLevel. = 2)
expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_2, msLevel. = 2),
filterMsLevel(multiMsOnDisk_rem, msLevel. = 2)))
expect_true(all.equal(filterMsLevel(multiMsOnDisk_rem_2, msLevel. = 1),
filterMsLevel(multiMsOnDisk, msLevel. = 1)))
})
############################################################
## bin
test_that("bin on OnDiskMSnExp", {
## o On a single-file multi MS-level file.
multiMsInMem1_bin <- bin(multiMsInMem1, verbose = FALSE)
multiMsInMem2_bin <- bin(multiMsInMem2, verbose = FALSE)
## bin on MS1 level only
multiMsOnDisk_bin_1 <- bin(multiMsOnDisk, msLevel. = 1)
## Results should be the same.
expect_true(all.equal(multiMsInMem1_bin,
filterMsLevel(multiMsOnDisk_bin_1, msLevel. = 1)))
## bin on all levels.
multiMsOnDisk_bin <- bin(multiMsOnDisk)
## Results can not be the same, since the mz range was different for
## the bin.
expect_true(is(all.equal(
multiMsInMem1_bin,
filterMsLevel(multiMsOnDisk_bin, msLevel. = 1)
), "character"))
## bin on MS2 level only
multiMsOnDisk_bin_2 <- bin(multiMsOnDisk, msLevel. = 2)
## Results should be the same.
expect_true(all.equal(multiMsInMem2_bin,
filterMsLevel(multiMsOnDisk_bin_2, msLevel. = 2)))
## o On multiple files.
inMem_bin <- bin(inMem, verbose = FALSE)
onDisk_bin <- bin(onDisk)
expect_true(all.equal(inMem_bin, onDisk_bin))
## bin on MS 2 shouldn't do anything at all
expect_warning(onDisk_bin <- bin(onDisk, msLevel. = 2))
expect_true(all.equal(onDisk_bin, onDisk))
})
############################################################
## Test internal spectrapply method.
test_that("Test internal spectrapply function", {
spl <- spectra(onDisk)
## Test Spectrum method:
res1 <- lapply(spl, ionCount)
res2 <- MSnbase:::spectrapply(onDisk, ionCount)
expect_identical(res1, res2)
## Plain function
res1 <- lapply(spl, function(z) return(mean(mz(z))))
res2 <- MSnbase:::spectrapply(onDisk, function(z) {
return(mean(mz(z)))
})
expect_identical(res1, res2)
## Additional arguments.
res1 <- lapply(spl, function(z, int) {
return(mean(mz(z)[intensity(z) > int]))
}, int = 30)
res2 <- MSnbase:::spectrapply(onDisk, function(z, int) {
return(mean(mz(z)[intensity(z) > int]))
}, int = 30)
expect_identical(res1, res2)
})
############################################################
## Test that the new sorting by acquisitionNum and extraction
## by spIdx works (described in issue #118)
## We're comparing spectra extracted by that between an
## OnDiskMSnExp and an MSnExp.
test_that("Test sorting by acquisitionNum", {
sp1 <- inMem[[13]]
sp2 <- onDisk[[13]]
expect_identical(sp1, sp2)
sp1 <- inMem[[22]]
sp2 <- onDisk[[22]]
expect_identical(sp1, sp2)
## Same using multiMS
onDisk1 <- filterMsLevel(multiMsOnDisk, msLevel. = 1L)
sp1 <- multiMsInMem1[[7]]
sp2 <- onDisk1[[7]]
expect_identical(sp1, sp2)
sp1 <- multiMsInMem1[[9]]
sp2 <- onDisk1[[9]]
expect_identical(sp1, sp2)
onDisk2 <- filterMsLevel(multiMsOnDisk, msLevel. = 2L)
sp1 <- multiMsInMem2[[13]]
sp2 <- onDisk2[[13]]
expect_identical(sp1, sp2)
sp1 <- multiMsInMem2[[22]]
sp2 <- onDisk2[[22]]
expect_identical(sp1, sp2)
})
test_that("spectrapply,OnDiskMSnExp", {
sps <- spectra(onDisk)
sps_2 <- spectrapply(onDisk)
expect_identical(sps, sps_2)
## apply a function.
dfs <- spectrapply(onDisk, FUN = as, Class = "data.frame")
dfs_2 <- lapply(sps, FUN = as, Class = "data.frame")
expect_identical(dfs, dfs_2)
})
test_that("splitByFile,OnDiskMSnExp", {
od <- microtofq_on_disk_ms1
spl <- splitByFile(od, f = factor(c("a", "b")))
expect_equal(pData(spl[[1]]), pData(filterFile(od, 1)))
expect_equal(pData(spl[[2]]), pData(filterFile(od, 2)))
})
test_that("chromatogram,OnDiskMSnExp works", {
library(msdata)
mzf <- c(system.file("microtofq/MM14.mzML", package = "msdata"),
system.file("microtofq/MM8.mzML", package = "msdata"))
tmpd <- tempdir()
file.copy(mzf[1], paste0(tmpd, "a.mzML"))
file.copy(mzf[2], paste0(tmpd, "b.mzML"))
mzf <- c(mzf, paste0(tmpd, c("a.mzML", "b.mzML")))
onDisk <- readMSData(files = mzf, msLevel. = 1, centroided. = TRUE,
mode = "onDisk")
## Full rt range.
mzr <- matrix(c(100, 120), nrow = 1)
res <- MSnbase:::.extractMultipleChromatograms(onDisk, mz = mzr)
flt <- filterMz(onDisk, mz = mzr[1, ])
ints <- split(unlist(lapply(spectra(flt), function(z) sum(intensity(z)))),
fromFile(flt))
expect_equal(ints[[1]], intensity(res[1, 1][[1]]))
expect_equal(ints[[2]], intensity(res[1, 2][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[1]], rtime(res[1, 1][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[2]], rtime(res[1, 2][[1]]))
## Multiple mz ranges.
mzr <- matrix(c(100, 120, 200, 220, 300, 320), nrow = 3, byrow = TRUE)
rtr <- matrix(c(50, 300), nrow = 1)
res <- MSnbase:::.extractMultipleChromatograms(onDisk, mz = mzr, rt = rtr)
## Check that the values for all ranges is within the specified ranges
for (i in 1:nrow(mzr)) {
expect_true(all(mz(res[i, 1][[1]]) >= mzr[i, 1] &
mz(res[i, 1][[1]]) <= mzr[i, 2]))
expect_true(all(mz(res[i, 2][[1]]) >= mzr[i, 1] &
mz(res[i, 2][[1]]) <= mzr[i, 2]))
expect_true(all(rtime(res[i, 1][[1]]) >= rtr[1, 1] &
rtime(res[i, 1][[1]]) <= rtr[1, 2]))
expect_true(all(rtime(res[i, 2][[1]]) >= rtr[1, 1] &
rtime(res[i, 2][[1]]) <= rtr[1, 2]))
}
## Check that values are correct.
flt <- filterMz(filterRt(onDisk, rt = rtr[1, ]), mz = mzr[2, ])
ints <- split(unlist(lapply(spectra(flt), function(z) sum(intensity(z)))),
fromFile(flt))
expect_equal(ints[[1]], intensity(res[2, 1][[1]]))
expect_equal(ints[[2]], intensity(res[2, 2][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[1]], rtime(res[2, 1][[1]]))
expect_equal(split(rtime(flt), fromFile(flt))[[2]], rtime(res[2, 2][[1]]))
## Now with ranges for which we don't have values in one or the other.
rtr <- matrix(c(280, 300, 20, 40), nrow = 2,
byrow = TRUE) ## Only present in first, or 2nd file
res <- chromatogram(onDisk, rt = rtr)
expect_true(all(unlist(lapply(res, msLevel)) == 1))
## Check fromFile
for (i in 1:ncol(res))
expect_true(all(sapply(res[, i], fromFile) == i))
expect_equal(length(res[2, 1]), 0)
expect_equal(length(res[1, 2]), 0)
## Check rtime
expect_true(all(rtime(res[1, 1]) >= rtr[1, 1] &
rtime(res[1, 1]) <= rtr[1, 2]))
expect_true(all(rtime(res[2, 2]) >= rtr[2, 1] &
rtime(res[2, 2]) <= rtr[2, 2]))
## Check intensity
flt <- filterRt(onDisk, rt = rtr[1, ])
spctr <- split(spectra(flt), fromFile(flt))
ints <- unlist(lapply(spctr[[1]], function(z) sum(intensity(z))))
expect_equal(ints, intensity(res[1, 1]))
flt <- filterRt(onDisk, rt = rtr[2, ])
spctr <- split(spectra(flt), fromFile(flt))
ints <- unlist(lapply(spctr[[1]], function(z) sum(intensity(z))))
expect_equal(ints, intensity(res[2, 2]))
## Check chromatogram with non-present MS level
expect_warning(tmp <- chromatogram(onDisk, rt = rtr, msLevel = 2L))
expect_equal(nrow(tmp), 0)
tmp <- chromatogram(onDisk, rt = rtr, msLevel = 1:10)
expect_equal(tmp, res)
res <- MSnbase:::.extractMultipleChromatograms(onDisk, rt = rtr,
msLevel = 1:5)
colnames(res) <- basename(fileNames(onDisk))
res <- as(res, "MChromatograms")
expect_true(validObject(res))
## pData(res) <- pData(onDisk)
## fData(res) <- fData(tmp)
## expect_equal(tmp, res)
})
## Test the two versions that could/might be called by the
## spectrapply,OnDiskMSnExp method. Each has its own pros and cons and cases
## in which it outperforms the other function.
test_that("low memory spectrapply function works", {
fl <- system.file("lockmass/LockMass_test.mzXML", package = "msdata")
fh <- mzR::openMSfile(fl)
hdr <- mzR::header(fh)
mzR::close(fh)
fData <- hdr
fData$spIdx <- hdr$seqNum
fData$fileIdx <- 1L
fData$smoothed <- FALSE
fData$centroided <- TRUE
fastLoad <- FALSE
expect_equal(
MSnbase:::.applyFun2SpectraOfFileMulti(fData, filenames = fl,
fastLoad = fastLoad),
MSnbase:::.applyFun2IndividualSpectraOfFile(fData,
filenames = fl,
fastLoad = fastLoad))
fd <- fData[c(4, 8, 32, 123), ]
expect_equal(
MSnbase:::.applyFun2SpectraOfFileMulti(fd, filenames = fl,
fastLoad = fastLoad),
MSnbase:::.applyFun2IndividualSpectraOfFile(fd,
filenames = fl,
fastLoad = fastLoad))
## With an function to apply.
expect_equal(
MSnbase:::.applyFun2SpectraOfFileMulti(fd, filenames = fl,
fastLoad = fastLoad,
APPLYFUN = mz),
MSnbase:::.applyFun2IndividualSpectraOfFile(fd,
filenames = fl,
fastLoad = fastLoad,
APPLYFUN = mz))
})
test_that("isolationWindowLowerMz,isolationWindowUpperMz,OnDiskMSnExp works", {
mz_low <- isolationWindowLowerMz(tmt_od_ms2_sub)
mz_high <- isolationWindowUpperMz(tmt_od_ms2_sub)
expect_true(all(mz_low < mz_high))
expect_true(all(precursorMz(tmt_od_ms2_sub) >= mz_low))
expect_true(all(precursorMz(tmt_od_ms2_sub) <= mz_high))
mz_low <- isolationWindowLowerMz(sciex)
mz_high <- isolationWindowUpperMz(sciex)
expect_true(all(is.na(mz_low)))
expect_true(all(is.na(mz_high)))
})
test_that("combineSpectra,MSnExp works with OnDiskMSnExp", {
res <- combineSpectra(filterRt(sciex, c(10, 20)))
expect_true(is(res, "MSnExp"))
expect_true(length(res) == 2)
})
test_that(".on_disk_split_by_file works", {
res <- .on_disk_split_by_file(sciex)
expect_equal(length(res), 2)
expect_equal(featureData(res[[1]]), featureData(filterFile(sciex, 1L)))
expect_equal(featureData(res[[2]]), featureData(filterFile(sciex, 2L)))
expect_equal(phenoData(res[[1]]), phenoData(filterFile(sciex, 1L)))
expect_equal(phenoData(res[[2]]), phenoData(filterFile(sciex, 2L)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector.R
\name{cg_ln}
\alias{cg_ln}
\title{Natural Logarithm}
\usage{
cg_ln(x, name = NULL)
}
\arguments{
\item{x}{either a cg_node object or a numerical vector or array.}
\item{name}{character scalar, name of the operation (optional).}
}
\value{
cg_operator object.
}
\description{
Calculate \code{log(x)}.
}
\seealso{
\link[base:log]{log}
}
\author{
Ron Triepels
}
| /man/cg_ln.Rd | permissive | triepels/cgraph | R | false | true | 446 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vector.R
\name{cg_ln}
\alias{cg_ln}
\title{Natural Logarithm}
\usage{
cg_ln(x, name = NULL)
}
\arguments{
\item{x}{either a cg_node object or a numerical vector or array.}
\item{name}{character scalar, name of the operation (optional).}
}
\value{
cg_operator object.
}
\description{
Calculate \code{log(x)}.
}
\seealso{
\link[base:log]{log}
}
\author{
Ron Triepels
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parClass.R
\name{as.parframe}
\alias{as.parframe}
\alias{as.parframe.parlist}
\title{Coerce object to a parameter frame}
\usage{
as.parframe(x, ...)
\method{as.parframe}{parlist}(x, sort.by = "value", ...)
}
\arguments{
\item{x}{object to be coerced}
\item{...}{other arguments}
\item{sort.by}{character indicating by which colum the returned parameter frame
should be sorted. Defaults to \code{"value"}.}
}
\value{
object of class \link{parframe}.
}
\description{
Coerce object to a parameter frame
}
\examples{
## Generate a prediction function
regfn <- c(y = "sin(a*time)")
g <- Y(regfn, parameters = "a")
x <- Xt(condition = "C1")
## Generate data
data <- datalist(
C1 = data.frame(
name = "y",
time = 1:5,
value = sin(1:5) + rnorm(5, 0, .1),
sigma = .1
)
)
## Initialize parameters and time
pars <- c(a = 1)
times <- seq(0, 5, .1)
plot((g*x)(times, pars), data)
## Do many fits from random positions and store them into parlist
out <- as.parlist(lapply(1:50, function(i) {
trust(normL2(data, g*x), pars + rnorm(length(pars), 0, 1), rinit = 1, rmax = 10)
}))
summary(out)
## Reduce parlist to parframe
parframe <- as.parframe(out)
plotValues(parframe)
## Reduce parframe to best fit
bestfit <- as.parvec(parframe)
plot((g*x)(times, bestfit), data)
}
| /man/as.parframe.Rd | no_license | marcusrosenblatt/dMod | R | false | true | 1,366 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parClass.R
\name{as.parframe}
\alias{as.parframe}
\alias{as.parframe.parlist}
\title{Coerce object to a parameter frame}
\usage{
as.parframe(x, ...)
\method{as.parframe}{parlist}(x, sort.by = "value", ...)
}
\arguments{
\item{x}{object to be coerced}
\item{...}{other arguments}
\item{sort.by}{character indicating by which colum the returned parameter frame
should be sorted. Defaults to \code{"value"}.}
}
\value{
object of class \link{parframe}.
}
\description{
Coerce object to a parameter frame
}
\examples{
## Generate a prediction function
regfn <- c(y = "sin(a*time)")
g <- Y(regfn, parameters = "a")
x <- Xt(condition = "C1")
## Generate data
data <- datalist(
C1 = data.frame(
name = "y",
time = 1:5,
value = sin(1:5) + rnorm(5, 0, .1),
sigma = .1
)
)
## Initialize parameters and time
pars <- c(a = 1)
times <- seq(0, 5, .1)
plot((g*x)(times, pars), data)
## Do many fits from random positions and store them into parlist
out <- as.parlist(lapply(1:50, function(i) {
trust(normL2(data, g*x), pars + rnorm(length(pars), 0, 1), rinit = 1, rmax = 10)
}))
summary(out)
## Reduce parlist to parframe
parframe <- as.parframe(out)
plotValues(parframe)
## Reduce parframe to best fit
bestfit <- as.parvec(parframe)
plot((g*x)(times, bestfit), data)
}
|
lgb.is.Booster <- function(x) {
return(lgb.check.r6.class(object = x, name = "lgb.Booster"))
}
lgb.is.Dataset <- function(x) {
return(lgb.check.r6.class(object = x, name = "lgb.Dataset"))
}
lgb.null.handle <- function() {
if (.Machine$sizeof.pointer == 8L) {
return(NA_real_)
} else {
return(NA_integer_)
}
}
lgb.is.null.handle <- function(x) {
return(is.null(x) || is.na(x))
}
lgb.encode.char <- function(arr, len) {
if (!is.raw(arr)) {
stop("lgb.encode.char: Can only encode from raw type")
}
return(rawToChar(arr[seq_len(len)]))
}
# [description] Get the most recent error stored on the C++ side and raise it
# as an R error.
lgb.last_error <- function() {
err_msg <- .Call(
LGBM_GetLastError_R
)
stop("api error: ", err_msg)
return(invisible(NULL))
}
lgb.params2str <- function(params, ...) {
# Check for a list as input
if (!identical(class(params), "list")) {
stop("params must be a list")
}
# Split parameter names
names(params) <- gsub("\\.", "_", names(params))
# Merge parameters from the params and the dots-expansion
dot_params <- list(...)
names(dot_params) <- gsub("\\.", "_", names(dot_params))
# Check for identical parameters
if (length(intersect(names(params), names(dot_params))) > 0L) {
stop(
"Same parameters in "
, sQuote("params")
, " and in the call are not allowed. Please check your "
, sQuote("params")
, " list"
)
}
# Merge parameters
params <- c(params, dot_params)
# Setup temporary variable
ret <- list()
# Perform key value join
for (key in names(params)) {
# If a parameter has multiple values, join those values together with commas.
# trimws() is necessary because format() will pad to make strings the same width
val <- paste0(
trimws(
format(
x = params[[key]]
, scientific = FALSE
)
)
, collapse = ","
)
if (nchar(val) <= 0L) next # Skip join
# Join key value
pair <- paste0(c(key, val), collapse = "=")
ret <- c(ret, pair)
}
# Check ret length
if (length(ret) == 0L) {
return("")
}
return(paste0(ret, collapse = " "))
}
lgb.check_interaction_constraints <- function(interaction_constraints, column_names) {
# Convert interaction constraints to feature numbers
string_constraints <- list()
if (!is.null(interaction_constraints)) {
if (!methods::is(interaction_constraints, "list")) {
stop("interaction_constraints must be a list")
}
if (!all(sapply(interaction_constraints, function(x) {is.character(x) || is.numeric(x)}))) {
stop("every element in interaction_constraints must be a character vector or numeric vector")
}
for (constraint in interaction_constraints) {
# Check for character name
if (is.character(constraint)) {
constraint_indices <- as.integer(match(constraint, column_names) - 1L)
# Provided indices, but some indices are not existing?
if (sum(is.na(constraint_indices)) > 0L) {
stop(
"supplied an unknown feature in interaction_constraints "
, sQuote(constraint[is.na(constraint_indices)])
)
}
} else {
# Check that constraint indices are at most number of features
if (max(constraint) > length(column_names)) {
stop(
"supplied a too large value in interaction_constraints: "
, max(constraint)
, " but only "
, length(column_names)
, " features"
)
}
# Store indices as [0, n-1] indexed instead of [1, n] indexed
constraint_indices <- as.integer(constraint - 1L)
}
# Convert constraint to string
constraint_string <- paste0("[", paste0(constraint_indices, collapse = ","), "]")
string_constraints <- append(string_constraints, constraint_string)
}
}
return(string_constraints)
}
lgb.check.r6.class <- function(object, name) {
# Check for non-existence of R6 class or named class
return(all(c("R6", name) %in% class(object)))
}
lgb.check.obj <- function(params, obj) {
# List known objectives in a vector
OBJECTIVES <- c(
"regression"
, "regression_l1"
, "regression_l2"
, "mean_squared_error"
, "mse"
, "l2_root"
, "root_mean_squared_error"
, "rmse"
, "mean_absolute_error"
, "mae"
, "quantile"
, "huber"
, "fair"
, "poisson"
, "binary"
, "lambdarank"
, "multiclass"
, "softmax"
, "multiclassova"
, "multiclass_ova"
, "ova"
, "ovr"
, "xentropy"
, "cross_entropy"
, "xentlambda"
, "cross_entropy_lambda"
, "mean_absolute_percentage_error"
, "mape"
, "gamma"
, "tweedie"
, "rank_xendcg"
, "xendcg"
, "xe_ndcg"
, "xe_ndcg_mart"
, "xendcg_mart"
)
# Check whether the objective is empty or not, and take it from params if needed
if (!is.null(obj)) {
params$objective <- obj
}
# Check whether the objective is a character
if (is.character(params$objective)) {
# If the objective is a character, check if it is a known objective
if (!(params$objective %in% OBJECTIVES)) {
stop("lgb.check.obj: objective name error should be one of (", paste0(OBJECTIVES, collapse = ", "), ")")
}
} else if (!is.function(params$objective)) {
stop("lgb.check.obj: objective should be a character or a function")
}
return(params)
}
# [description]
# Take any character values from eval and store them in params$metric.
# This has to account for the fact that `eval` could be a character vector,
# a function, a list of functions, or a list with a mix of strings and
# functions
lgb.check.eval <- function(params, eval) {
if (is.null(params$metric)) {
params$metric <- list()
} else if (is.character(params$metric)) {
params$metric <- as.list(params$metric)
}
# if 'eval' is a character vector or list, find the character
# elements and add them to 'metric'
if (!is.function(eval)) {
for (i in seq_along(eval)) {
element <- eval[[i]]
if (is.character(element)) {
params$metric <- append(params$metric, element)
}
}
}
# If more than one character metric was given, then "None" should
# not be included
if (length(params$metric) > 1L) {
params$metric <- Filter(
f = function(metric) {
!(metric %in% .NO_METRIC_STRINGS())
}
, x = params$metric
)
}
# duplicate metrics should be filtered out
params$metric <- as.list(unique(unlist(params$metric)))
return(params)
}
# [description]
#
# Resolve differences between passed-in keyword arguments, parameters,
# and parameter aliases. This function exists because some functions in the
# package take in parameters through their own keyword arguments other than
# the `params` list.
#
# If the same underlying parameter is provided multiple
# ways, the first item in this list is used:
#
# 1. the main (non-alias) parameter found in `params`
# 2. the first alias of that parameter found in `params`
# 3. the keyword argument passed in
#
# For example, "num_iterations" can also be provided to lgb.train()
# via keyword "nrounds". lgb.train() will choose one value for this parameter
# based on the first match in this list:
#
# 1. params[["num_iterations]]
# 2. the first alias of "num_iterations" found in params
# 3. the nrounds keyword argument
#
# If multiple aliases are found in `params` for the same parameter, they are
# all removed before returning `params`.
#
# [return]
# params with num_iterations set to the chosen value, and other aliases
# of num_iterations removed
lgb.check.wrapper_param <- function(main_param_name, params, alternative_kwarg_value) {
aliases <- .PARAMETER_ALIASES()[[main_param_name]]
aliases_provided <- names(params)[names(params) %in% aliases]
aliases_provided <- aliases_provided[aliases_provided != main_param_name]
# prefer the main parameter
if (!is.null(params[[main_param_name]])) {
for (param in aliases_provided) {
params[[param]] <- NULL
}
return(params)
}
# if the main parameter wasn't proovided, prefer the first alias
if (length(aliases_provided) > 0L) {
first_param <- aliases_provided[1L]
params[[main_param_name]] <- params[[first_param]]
for (param in aliases_provided) {
params[[param]] <- NULL
}
return(params)
}
# if not provided in params at all, use the alternative value provided
# through a keyword argument from lgb.train(), lgb.cv(), etc.
params[[main_param_name]] <- alternative_kwarg_value
return(params)
}
| /R-package/R/utils.R | permissive | amauryfournier/LightGBM | R | false | false | 8,862 | r | lgb.is.Booster <- function(x) {
return(lgb.check.r6.class(object = x, name = "lgb.Booster"))
}
lgb.is.Dataset <- function(x) {
return(lgb.check.r6.class(object = x, name = "lgb.Dataset"))
}
lgb.null.handle <- function() {
if (.Machine$sizeof.pointer == 8L) {
return(NA_real_)
} else {
return(NA_integer_)
}
}
lgb.is.null.handle <- function(x) {
return(is.null(x) || is.na(x))
}
lgb.encode.char <- function(arr, len) {
if (!is.raw(arr)) {
stop("lgb.encode.char: Can only encode from raw type")
}
return(rawToChar(arr[seq_len(len)]))
}
# [description] Get the most recent error stored on the C++ side and raise it
# as an R error.
lgb.last_error <- function() {
err_msg <- .Call(
LGBM_GetLastError_R
)
stop("api error: ", err_msg)
return(invisible(NULL))
}
lgb.params2str <- function(params, ...) {
# Check for a list as input
if (!identical(class(params), "list")) {
stop("params must be a list")
}
# Split parameter names
names(params) <- gsub("\\.", "_", names(params))
# Merge parameters from the params and the dots-expansion
dot_params <- list(...)
names(dot_params) <- gsub("\\.", "_", names(dot_params))
# Check for identical parameters
if (length(intersect(names(params), names(dot_params))) > 0L) {
stop(
"Same parameters in "
, sQuote("params")
, " and in the call are not allowed. Please check your "
, sQuote("params")
, " list"
)
}
# Merge parameters
params <- c(params, dot_params)
# Setup temporary variable
ret <- list()
# Perform key value join
for (key in names(params)) {
# If a parameter has multiple values, join those values together with commas.
# trimws() is necessary because format() will pad to make strings the same width
val <- paste0(
trimws(
format(
x = params[[key]]
, scientific = FALSE
)
)
, collapse = ","
)
if (nchar(val) <= 0L) next # Skip join
# Join key value
pair <- paste0(c(key, val), collapse = "=")
ret <- c(ret, pair)
}
# Check ret length
if (length(ret) == 0L) {
return("")
}
return(paste0(ret, collapse = " "))
}
lgb.check_interaction_constraints <- function(interaction_constraints, column_names) {
# Convert interaction constraints to feature numbers
string_constraints <- list()
if (!is.null(interaction_constraints)) {
if (!methods::is(interaction_constraints, "list")) {
stop("interaction_constraints must be a list")
}
if (!all(sapply(interaction_constraints, function(x) {is.character(x) || is.numeric(x)}))) {
stop("every element in interaction_constraints must be a character vector or numeric vector")
}
for (constraint in interaction_constraints) {
# Check for character name
if (is.character(constraint)) {
constraint_indices <- as.integer(match(constraint, column_names) - 1L)
# Provided indices, but some indices are not existing?
if (sum(is.na(constraint_indices)) > 0L) {
stop(
"supplied an unknown feature in interaction_constraints "
, sQuote(constraint[is.na(constraint_indices)])
)
}
} else {
# Check that constraint indices are at most number of features
if (max(constraint) > length(column_names)) {
stop(
"supplied a too large value in interaction_constraints: "
, max(constraint)
, " but only "
, length(column_names)
, " features"
)
}
# Store indices as [0, n-1] indexed instead of [1, n] indexed
constraint_indices <- as.integer(constraint - 1L)
}
# Convert constraint to string
constraint_string <- paste0("[", paste0(constraint_indices, collapse = ","), "]")
string_constraints <- append(string_constraints, constraint_string)
}
}
return(string_constraints)
}
lgb.check.r6.class <- function(object, name) {
# Check for non-existence of R6 class or named class
return(all(c("R6", name) %in% class(object)))
}
lgb.check.obj <- function(params, obj) {
# List known objectives in a vector
OBJECTIVES <- c(
"regression"
, "regression_l1"
, "regression_l2"
, "mean_squared_error"
, "mse"
, "l2_root"
, "root_mean_squared_error"
, "rmse"
, "mean_absolute_error"
, "mae"
, "quantile"
, "huber"
, "fair"
, "poisson"
, "binary"
, "lambdarank"
, "multiclass"
, "softmax"
, "multiclassova"
, "multiclass_ova"
, "ova"
, "ovr"
, "xentropy"
, "cross_entropy"
, "xentlambda"
, "cross_entropy_lambda"
, "mean_absolute_percentage_error"
, "mape"
, "gamma"
, "tweedie"
, "rank_xendcg"
, "xendcg"
, "xe_ndcg"
, "xe_ndcg_mart"
, "xendcg_mart"
)
# Check whether the objective is empty or not, and take it from params if needed
if (!is.null(obj)) {
params$objective <- obj
}
# Check whether the objective is a character
if (is.character(params$objective)) {
# If the objective is a character, check if it is a known objective
if (!(params$objective %in% OBJECTIVES)) {
stop("lgb.check.obj: objective name error should be one of (", paste0(OBJECTIVES, collapse = ", "), ")")
}
} else if (!is.function(params$objective)) {
stop("lgb.check.obj: objective should be a character or a function")
}
return(params)
}
# [description]
# Take any character values from eval and store them in params$metric.
# This has to account for the fact that `eval` could be a character vector,
# a function, a list of functions, or a list with a mix of strings and
# functions
lgb.check.eval <- function(params, eval) {
if (is.null(params$metric)) {
params$metric <- list()
} else if (is.character(params$metric)) {
params$metric <- as.list(params$metric)
}
# if 'eval' is a character vector or list, find the character
# elements and add them to 'metric'
if (!is.function(eval)) {
for (i in seq_along(eval)) {
element <- eval[[i]]
if (is.character(element)) {
params$metric <- append(params$metric, element)
}
}
}
# If more than one character metric was given, then "None" should
# not be included
if (length(params$metric) > 1L) {
params$metric <- Filter(
f = function(metric) {
!(metric %in% .NO_METRIC_STRINGS())
}
, x = params$metric
)
}
# duplicate metrics should be filtered out
params$metric <- as.list(unique(unlist(params$metric)))
return(params)
}
# [description]
#
# Resolve differences between passed-in keyword arguments, parameters,
# and parameter aliases. This function exists because some functions in the
# package take in parameters through their own keyword arguments other than
# the `params` list.
#
# If the same underlying parameter is provided multiple
# ways, the first item in this list is used:
#
# 1. the main (non-alias) parameter found in `params`
# 2. the first alias of that parameter found in `params`
# 3. the keyword argument passed in
#
# For example, "num_iterations" can also be provided to lgb.train()
# via keyword "nrounds". lgb.train() will choose one value for this parameter
# based on the first match in this list:
#
# 1. params[["num_iterations]]
# 2. the first alias of "num_iterations" found in params
# 3. the nrounds keyword argument
#
# If multiple aliases are found in `params` for the same parameter, they are
# all removed before returning `params`.
#
# [return]
# params with num_iterations set to the chosen value, and other aliases
# of num_iterations removed
lgb.check.wrapper_param <- function(main_param_name, params, alternative_kwarg_value) {
aliases <- .PARAMETER_ALIASES()[[main_param_name]]
aliases_provided <- names(params)[names(params) %in% aliases]
aliases_provided <- aliases_provided[aliases_provided != main_param_name]
# prefer the main parameter
if (!is.null(params[[main_param_name]])) {
for (param in aliases_provided) {
params[[param]] <- NULL
}
return(params)
}
# if the main parameter wasn't proovided, prefer the first alias
if (length(aliases_provided) > 0L) {
first_param <- aliases_provided[1L]
params[[main_param_name]] <- params[[first_param]]
for (param in aliases_provided) {
params[[param]] <- NULL
}
return(params)
}
# if not provided in params at all, use the alternative value provided
# through a keyword argument from lgb.train(), lgb.cv(), etc.
params[[main_param_name]] <- alternative_kwarg_value
return(params)
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cppGeneratePartitions <- function(adjList, numBlocks, popSizes, numConstraintLow, numConstraintHigh, popConstraintLow, popConstraintHigh) {
.Call('redist_cppGeneratePartitions', PACKAGE = 'redist', adjList, numBlocks, popSizes, numConstraintLow, numConstraintHigh, popConstraintLow, popConstraintHigh)
}
countpartitions <- function(aList) {
.Call('redist_countpartitions', PACKAGE = 'redist', aList)
}
calcPWDh <- function(x) {
.Call('redist_calcPWDh', PACKAGE = 'redist', x)
}
segregationcalc <- function(distmat, grouppop, fullpop) {
.Call('redist_segregationcalc', PACKAGE = 'redist', distmat, grouppop, fullpop)
}
rsg <- function(adj_list, population, Ndistrict, target_pop, thresh, maxiter) {
.Call('redist_rsg', PACKAGE = 'redist', adj_list, population, Ndistrict, target_pop, thresh, maxiter)
}
swMH <- function(aList, cdvec, cdorigvec, popvec, grouppopvec, nsims, eprob, pct_dist_parity, beta_sequence, beta_weights, ssdmat, lambda = 0L, beta_population = 0.0, beta_compact = 0.0, beta_segregation = 0.0, beta_similar = 0.0, anneal_beta_population = 0L, anneal_beta_compact = 0L, anneal_beta_segregation = 0L, anneal_beta_similar = 0L, adjswap = 1L, exact_mh = 0L, adapt_eprob = 0L, adapt_lambda = 0L) {
.Call('redist_swMH', PACKAGE = 'redist', aList, cdvec, cdorigvec, popvec, grouppopvec, nsims, eprob, pct_dist_parity, beta_sequence, beta_weights, ssdmat, lambda, beta_population, beta_compact, beta_segregation, beta_similar, anneal_beta_population, anneal_beta_compact, anneal_beta_segregation, anneal_beta_similar, adjswap, exact_mh, adapt_eprob, adapt_lambda)
}
genAlConn <- function(aList, cds) {
.Call('redist_genAlConn', PACKAGE = 'redist', aList, cds)
}
findBoundary <- function(fullList, conList) {
.Call('redist_findBoundary', PACKAGE = 'redist', fullList, conList)
}
| /R/RcppExports.R | no_license | leesharma/redist | R | false | false | 1,956 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
cppGeneratePartitions <- function(adjList, numBlocks, popSizes, numConstraintLow, numConstraintHigh, popConstraintLow, popConstraintHigh) {
.Call('redist_cppGeneratePartitions', PACKAGE = 'redist', adjList, numBlocks, popSizes, numConstraintLow, numConstraintHigh, popConstraintLow, popConstraintHigh)
}
countpartitions <- function(aList) {
.Call('redist_countpartitions', PACKAGE = 'redist', aList)
}
calcPWDh <- function(x) {
.Call('redist_calcPWDh', PACKAGE = 'redist', x)
}
segregationcalc <- function(distmat, grouppop, fullpop) {
.Call('redist_segregationcalc', PACKAGE = 'redist', distmat, grouppop, fullpop)
}
rsg <- function(adj_list, population, Ndistrict, target_pop, thresh, maxiter) {
.Call('redist_rsg', PACKAGE = 'redist', adj_list, population, Ndistrict, target_pop, thresh, maxiter)
}
swMH <- function(aList, cdvec, cdorigvec, popvec, grouppopvec, nsims, eprob, pct_dist_parity, beta_sequence, beta_weights, ssdmat, lambda = 0L, beta_population = 0.0, beta_compact = 0.0, beta_segregation = 0.0, beta_similar = 0.0, anneal_beta_population = 0L, anneal_beta_compact = 0L, anneal_beta_segregation = 0L, anneal_beta_similar = 0L, adjswap = 1L, exact_mh = 0L, adapt_eprob = 0L, adapt_lambda = 0L) {
.Call('redist_swMH', PACKAGE = 'redist', aList, cdvec, cdorigvec, popvec, grouppopvec, nsims, eprob, pct_dist_parity, beta_sequence, beta_weights, ssdmat, lambda, beta_population, beta_compact, beta_segregation, beta_similar, anneal_beta_population, anneal_beta_compact, anneal_beta_segregation, anneal_beta_similar, adjswap, exact_mh, adapt_eprob, adapt_lambda)
}
genAlConn <- function(aList, cds) {
.Call('redist_genAlConn', PACKAGE = 'redist', aList, cds)
}
findBoundary <- function(fullList, conList) {
.Call('redist_findBoundary', PACKAGE = 'redist', fullList, conList)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_treatment.R
\name{assign_treatment_indicator}
\alias{assign_treatment_indicator}
\title{Assign treatment status}
\usage{
assign_treatment_indicator(data, assignment)
}
\arguments{
\item{data}{A dataframe, often created by \code{\link{draw_population}} or \code{\link{draw_sample}}.}
\item{assignment}{A assignment object created by \code{\link{declare_assignment}}; or a function that assigns treatment}
}
\value{
A random assignment vector of length N.
}
\description{
This function takes a data.frame and an assignment object and returns an assignment vector. Users will often prefer to use \code{\link{assign_treatment}}.
}
\examples{
population <- declare_population(noise = declare_variable(), size = 1000)
sampling <- declare_sampling(n = 500)
potential_outcomes <- declare_potential_outcomes(formula = Y ~ 5 + .5*Z + noise,
condition_names = c(0, 1),
assignment_variable_name = "Z")
assignment <- declare_assignment(potential_outcomes = potential_outcomes)
pop_draw <- draw_population(population = population)
smp_draw <- draw_sample(data = pop_draw, sampling = sampling)
Z <- assign_treatment_indicator(data = smp_draw, assignment=assignment)
table(Z)
}
| /man/assign_treatment_indicator.Rd | no_license | yadevi/DeclareDesign-1 | R | false | true | 1,349 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_treatment.R
\name{assign_treatment_indicator}
\alias{assign_treatment_indicator}
\title{Assign treatment status}
\usage{
assign_treatment_indicator(data, assignment)
}
\arguments{
\item{data}{A dataframe, often created by \code{\link{draw_population}} or \code{\link{draw_sample}}.}
\item{assignment}{A assignment object created by \code{\link{declare_assignment}}; or a function that assigns treatment}
}
\value{
A random assignment vector of length N.
}
\description{
This function takes a data.frame and an assignment object and returns an assignment vector. Users will often prefer to use \code{\link{assign_treatment}}.
}
\examples{
population <- declare_population(noise = declare_variable(), size = 1000)
sampling <- declare_sampling(n = 500)
potential_outcomes <- declare_potential_outcomes(formula = Y ~ 5 + .5*Z + noise,
condition_names = c(0, 1),
assignment_variable_name = "Z")
assignment <- declare_assignment(potential_outcomes = potential_outcomes)
pop_draw <- draw_population(population = population)
smp_draw <- draw_sample(data = pop_draw, sampling = sampling)
Z <- assign_treatment_indicator(data = smp_draw, assignment=assignment)
table(Z)
}
|
# Installation instructions:
# 1. Install DEBrowser and its dependencies by running the lines below
# in R or RStudio.
if (!requireNamespace("BiocManager", quietly=TRUE))
install.packages("BiocManager")
BiocManager::install("debrowser")
# 2. Load the library
library(debrowser)
# 3. Prepare data for DEBrowser
if (!require("tidyverse")) install.packages("tidyverse"); library(tidyverse)
<<<<<<< HEAD
genefilelist <- list.files(path="SARTools", pattern="*.genes.tsv", full.names=T)
genefiles <- lapply(genefilelist, read_tsv)
=======
# List the count files. You may need to change the path and pattern to match your files.
genefilelist <- list.files(path="SARTools", pattern="*.genes.tsv", full.names=T)
print(genefilelist)
genefiles <- lapply(genefilelist, read_tsv)
# Use grep to change the file names into shorter sample names
>>>>>>> 3dd2cebad87f915c489858f9492346dbb20bea27
samplenames <- gsub("SARTools/S2_DRSC_CG8144_", "", genefilelist)
samplenames <- gsub("SARTools/S2_DRSC_","", samplenames)
samplenames <- gsub(".genes.tsv", "", samplenames)
samplenames <- gsub("-","_", samplenames) # DEBrowser doesn't like -
samplenames
<<<<<<< HEAD
=======
# Reformat the gene files into a single data frame
>>>>>>> 3dd2cebad87f915c489858f9492346dbb20bea27
genefiles
genefiles %>%
bind_cols() %>%
select(Name, starts_with("NumReads")) -> genetable
<<<<<<< HEAD
colnames(genetable)[2:7] <- as.list(samplenames)
head(genetable)
write_tsv(genetable, path="genetable.tsv")
=======
# Rename the columns of the genetable to match the sample names
colnames(genetable)[2:7] <- as.list(samplenames)
# Check the genetable and save it
head(genetable)
write_tsv(genetable, path="genetable.tsv")
### Now repeat all of that for the transcript files
>>>>>>> 3dd2cebad87f915c489858f9492346dbb20bea27
transcriptfilelist <- list.files(path="SARTools", pattern="*.transcripts.tsv", full.names=T)
transcriptfiles <- lapply(transcriptfilelist, read_tsv)
transcriptfiles %>%
bind_cols() %>%
select(Name, starts_with("NumReads")) -> transcripttable
colnames(transcripttable)[2:7] <- as.list(samplenames)
head(transcripttable)
str(transcripttable)
write_tsv(transcripttable, path="transcripttable.tsv")
## Also need to reformat the target.txt file to match the sample names
transcripts_target <- read_delim("SARTools/transcripts.target.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
transcripts_target
colnames(transcripttable) <- gsub("-","_", colnames(transcripttable))
colnames(transcripttable)
transcripts_target$label[1:3] <- colnames(transcripttable)[5:7]
transcripts_target$label[4:6] <- colnames(transcripttable)[2:4]
metadata <- select(transcripts_target, c(label, batch, Treatment))
colnames(metadata) <- c("sample","batch","condition")
write_tsv(metadata, path="metadata.tsv")
metadata
colnames(transcripttable) %in% metadata$sample
# 4. Start DEBrowser
startDEBrowser()
# 5. Data Exploration with DE Browser
#1 Load the Count Data File and the Metadata File
#2 Filter the data using CPM, CPM<1, in at least 3 samples (half of the samples)
#3 Batch correct the data using TMM normalization, Combat correction method, condition as Treatment, batch as Batch
#4 Visualize the PCA plot by changing Color field to "condition", and Shape field to "batch"
#5 Go to DE Analysis and Add New Comparison
# Set the Condition 1 samples to the Untreated and Condition 2 to the RNAi samples
# Chose the appropriate DE Method, and leave the parameters on the default settings
# Go to Main Plots and explore the MA and Volcano plots
# 6. Confirm that RNAi experiment worked
#1 Load in the gene results and metadata
#2 Filter and batch correct as above
#3 Run the appropriate DE Analysis
#4 Go to the Tables and sort by log10padjust search for FBgn0261552 - this is the *pasilla* gene
| /DEBrowser/DEBrowser.R | permissive | ianvogel/RNASeqProject | R | false | false | 3,834 | r | # Installation instructions:
# 1. Install DEBrowser and its dependencies by running the lines below
# in R or RStudio.
if (!requireNamespace("BiocManager", quietly=TRUE))
install.packages("BiocManager")
BiocManager::install("debrowser")
# 2. Load the library
library(debrowser)
# 3. Prepare data for DEBrowser
if (!require("tidyverse")) install.packages("tidyverse"); library(tidyverse)
<<<<<<< HEAD
genefilelist <- list.files(path="SARTools", pattern="*.genes.tsv", full.names=T)
genefiles <- lapply(genefilelist, read_tsv)
=======
# List the count files. You may need to change the path and pattern to match your files.
genefilelist <- list.files(path="SARTools", pattern="*.genes.tsv", full.names=T)
print(genefilelist)
genefiles <- lapply(genefilelist, read_tsv)
# Use grep to change the file names into shorter sample names
>>>>>>> 3dd2cebad87f915c489858f9492346dbb20bea27
samplenames <- gsub("SARTools/S2_DRSC_CG8144_", "", genefilelist)
samplenames <- gsub("SARTools/S2_DRSC_","", samplenames)
samplenames <- gsub(".genes.tsv", "", samplenames)
samplenames <- gsub("-","_", samplenames) # DEBrowser doesn't like -
samplenames
<<<<<<< HEAD
=======
# Reformat the gene files into a single data frame
>>>>>>> 3dd2cebad87f915c489858f9492346dbb20bea27
genefiles
genefiles %>%
bind_cols() %>%
select(Name, starts_with("NumReads")) -> genetable
<<<<<<< HEAD
colnames(genetable)[2:7] <- as.list(samplenames)
head(genetable)
write_tsv(genetable, path="genetable.tsv")
=======
# Rename the columns of the genetable to match the sample names
colnames(genetable)[2:7] <- as.list(samplenames)
# Check the genetable and save it
head(genetable)
write_tsv(genetable, path="genetable.tsv")
### Now repeat all of that for the transcript files
>>>>>>> 3dd2cebad87f915c489858f9492346dbb20bea27
transcriptfilelist <- list.files(path="SARTools", pattern="*.transcripts.tsv", full.names=T)
transcriptfiles <- lapply(transcriptfilelist, read_tsv)
transcriptfiles %>%
bind_cols() %>%
select(Name, starts_with("NumReads")) -> transcripttable
colnames(transcripttable)[2:7] <- as.list(samplenames)
head(transcripttable)
str(transcripttable)
write_tsv(transcripttable, path="transcripttable.tsv")
## Also need to reformat the target.txt file to match the sample names
transcripts_target <- read_delim("SARTools/transcripts.target.txt",
"\t", escape_double = FALSE, trim_ws = TRUE)
transcripts_target
colnames(transcripttable) <- gsub("-","_", colnames(transcripttable))
colnames(transcripttable)
transcripts_target$label[1:3] <- colnames(transcripttable)[5:7]
transcripts_target$label[4:6] <- colnames(transcripttable)[2:4]
metadata <- select(transcripts_target, c(label, batch, Treatment))
colnames(metadata) <- c("sample","batch","condition")
write_tsv(metadata, path="metadata.tsv")
metadata
colnames(transcripttable) %in% metadata$sample
# 4. Start DEBrowser
startDEBrowser()
# 5. Data Exploration with DE Browser
#1 Load the Count Data File and the Metadata File
#2 Filter the data using CPM, CPM<1, in at least 3 samples (half of the samples)
#3 Batch correct the data using TMM normalization, Combat correction method, condition as Treatment, batch as Batch
#4 Visualize the PCA plot by changing Color field to "condition", and Shape field to "batch"
#5 Go to DE Analysis and Add New Comparison
# Set the Condition 1 samples to the Untreated and Condition 2 to the RNAi samples
# Chose the appropriate DE Method, and leave the parameters on the default settings
# Go to Main Plots and explore the MA and Volcano plots
# 6. Confirm that RNAi experiment worked
#1 Load in the gene results and metadata
#2 Filter and batch correct as above
#3 Run the appropriate DE Analysis
#4 Go to the Tables and sort by log10padjust search for FBgn0261552 - this is the *pasilla* gene
|
#!/usr/bin/env Rscript
###################################################
### This program matches each ancestral genome to extant genomes
### and counts the contig co-occurrence on extant chromosomes for every pair of ancestral contigs
### It also summarizes measures for contig matching
###################################################
### input: 1. genome IDs and ancestor tree nodes defined in Genomes.txt
### 2. extant genome karyotypes defined in "karyotype" folder: karyotype_genomeID_genomeName.txt,
### where genomeID and genomeName match the info in Genomes.txt
### 3. contig gene feature files for each descendent genome in ./data/contigGFF/ContigGFF_gid_W*TreeNode*_*_*.txt
### output: 1. heatmap for each ancestor: results/clustering/AncestorNode_*_heat.pdf
### 2. heatmap reordered contigs for each ancestor: results/clustering/ancestor*_clusters.csv
### 3. summary of measures: results/ancestorStats/block_measures.csv
source("./module3/config.R")
source("./module3/helper.R")
## initialize a data frame for stats for each parameter
stats.DF <- data.frame( ws=numeric(), treeNode=numeric(), gf1=numeric(), gf2=numeric(),
gID=numeric(), gName=character(), avgBLKLen=numeric(), BLKLen.N50=numeric(), coverage=numeric(),
avgNoChr=numeric(), stringsAsFactors = FALSE)
for (trn in trn.vector){
## count cooccurrence for each ancestor tree node
ctgPairs<-c() ## initialize pairs of cooccured contigs
for (gID in gid.vector){ ## loop through each genome to be analyzed
gName <- as.character(genomeCoGeID[genomeCoGeID$genomeID == gID,]$genomeName)
cat("\n---Analyzing", gName, "for ancestor", trn,"\n")
### read in extant genome karyotype: chr, size
karyotype <- readIn.karyotype(karyotype.path, gID)
### read in gene features in contig
contigGFF <- readIn.contigGFF(gID, ws, trn, gf1, gf2, nctg, contigGFF.path)
if(is.null(contigGFF)) {
warningMessage <- paste0("*",gID,"_W",ws,"TreeNode",trn,"_",gf1,"_",gf2,".txt file doesn't exist under ", contigGFF.path)
warning(warningMessage);
next
} ## if file doesn't exist, go to next iteration
### merge genes in contigGFF that are within a DIS.threshold distance in extant genome into blocks
blockDF <- generate.blockDF.2(contigGFF, DIS.threshold, ws) ## blocks with window size ws
# add a column lenBLK: length of block
blockDF <- setDT(blockDF)[, lenBLK := end - start]
############################################
### calculate block measures:
### average block length in bp
### block length N50
### average number of chromosomes on which a contig produces a "significant" size block
### extant genome coverage
############################################
## average length and N50 of blocks in bp
avgBlockLen <- mean(blockDF$lenBLK)
blockLen.N50 <- N50(blockDF$lenBLK)
## How many blocks total per chromosome
## How many different contigs/colors per chromosome
chr.st <- blockDF[blockDF$lenBLK>=blockLEN.threshold,] %>%
group_by(chr) %>%
summarise(num_blocks = length(chr), num_diff_blk = length(unique(contig)))
## For each contig/color, how many chrs is it on
## excludes small blocks shorter than blockLEN.threshold
blk.st <- blockDF[blockDF$lenBLK>=blockLEN.threshold,] %>%
group_by(contig) %>%
summarise(no_chr = length(unique(chr)))
ctg.st <- contigGFF %>% group_by(contig) %>% summarize(no_chr = length(unique(chr)))
## average number of chromosomes on which a contig produces a "significant" size block (excludes small blocks shorter than blockLEN.threshold)
avgNoChr <- mean(ctg.st$no_chr)
## block coverage over all chromosomes
coverage <- setDT(merge(karyotype, aggregate(blockDF$lenBLK, by=list(chr=blockDF$chr), FUN=sum), by.x="chr", by.y="chr"))
coverage[,nCovered := size - x]
## calculate extant genome coverage without counting overlapped blocks
total.coverage <- c()
for (c in unique(blockDF$chr)){
coverage.2 <- setDT(data.frame(with(blockDF[chr==c,], sets(start, end, chr, 1)))) ## take union of the intervals
colnames(coverage.2) <- c("start","end")
coverage.2 <- coverage.2[,LenCovered:=end-start]
total.coverage <- c(total.coverage, sum(coverage.2$LenCovered))
}
pCoverage.2 <- sum(total.coverage) / sum(karyotype[,2])#
stats.DF <- rbind(stats.DF,
data.frame(ws=ws, treeNode=trn, gf1=gf1, gf2=gf2, gID=gID, gName=gName,
avgBLKLen=avgBlockLen, BLKLen.N50=blockLen.N50,
coverage=pCoverage.2, avgNoChr=avgNoChr) )
################################################################
### counting co-occurrence of contigs from all extant genomes
################################################################
## get uniq rows of (chr, contig), sort by chr then by contig
uniq <- unique(blockDF[blockDF$lenBLK>=lenBLK.threshold,c(1,4)])
uniq <- uniq[order(uniq$chr,uniq$contig)]
## get all combinations of cooccurrence of contigs on the same chromosome
for (c in unique(uniq$chr)) {
c <- as.numeric(as.character(c))
if(length(uniq[uniq$chr==c,]$contig)>=2) {
combinations <- combn(as.numeric(as.character(uniq[uniq$chr==c,]$contig)),m=2) ## m=2: cooccurrence of two contigs on the same chromosome
ctgPairs <- cbind(ctgPairs, combinations)
## note: by applying cbind, values in pairs are added by 1, starting from 1
## contig0 becomes 1
}
}
} ## done looping through each genome and counting contig co-occurrence
############################################
### construct co-occurrence matrix for the current ancestor
### gather pairs of contigs that appear on the same chromosome,
### then construct the cooccurrence matrix
############################################
if(length(ctgPairs)==0) {
warningMessage <- paste0("No contig pairs detected (co-occurred contigs) in genome ", gName)
warning(warningMessage);
next
}
## count frequencies of uniq ctgPairs
pairs.freq <- data.frame(t(ctgPairs))
colnames(pairs.freq) <- c("ctgID1", "ctgID2")
pairs.freq <- count(pairs.freq, vars = c("ctgID1", "ctgID2")) ## add column "freq" that counts frequency of uniq rows
pairs.freq <- pairs.freq[order(pairs.freq$ctgID1, pairs.freq$ctgID2),]
mat <- as.matrix(list2dist.1(pairs.freq))
mat[is.na(mat)] <- 0
## construct contig distance matrix from cooccurrence matrix
max_freq <- sum(genomeCoGeID$numChr)
mat2 <- (max_freq - mat)/max_freq
diag(mat2) <- 0
## write distance matrix into file under the directory of results.path
# distMat.fname <- file.path(results.path, "clustering", paste0("distanceMat_trn",trn,"_W",ws,"(",gf1,",",gf2,")_",lenBLK.threshold/1000,"kbBLK.csv"))
# write.csv(mat2, file=distMat.fname, row.names=FALSE)
# convert distance matrix to correlation matrix
mat3 <- cov(mat2,mat2, method = c("pearson"))
############### customized binning on correlations of co-occurrence
correlation <- cor(mat)
## write matrix into file under the directory of results.path
Mat.fname <- file.path(results.path, "clustering", paste0("correlationMat_trn",trn,"_W",ws,"(",gf1,",",gf2,")_",lenBLK.threshold/1000,"kbBLK.csv"))
write.csv(correlation, file=Mat.fname, row.names=FALSE)
vec <- as.vector(correlation)
vec2 <- sort(vec, decreasing = FALSE)
l <- length(vec2)
# print(l)
sm <- min(vec2)
lr <- max(vec2)
bins=c(sm,
nth(vec2,as.integer(l*0.5)),
nth(vec2,as.integer(l*0.65)),
nth(vec2,as.integer(l*0.75)),
nth(vec2,as.integer(l*0.81)),
nth(vec2,as.integer(l*0.85)),
nth(vec2,as.integer(l*0.89)),
nth(vec2,as.integer(l*0.93)),
nth(vec2,as.integer(l*0.995)), lr)
############################################
########### generate heatmaps to group contigs into ancestral chromosomes
############################################
## convert distance to -log d
d3 <- -log(mat2); diag(d3) <- 0
# ## set data to be distance or correlation based matrix
# data <- mat3
#
# # different brewer.pal themes: Reds, Greens, Blues
# pdf(file=file.path(results.path, "clustering", paste0("AncestorNode_",trn,"_heat.pdf")), width=40, height=40)
# par(cex.main=4)
# p <- heatmap.2(data,
# main = paste0("Ancestor ",trn),
# dendrogram="row", # only draw a row dendrogram
# hclustfun = hclust, ## defaut method for hclust: "complete"
# srtCol=0, adjCol = c(0.5,1), #breaks=c(min(data), 0.1, 0.15, 0.2, 0.225,0.25,0.275,0.29,0.3,max(data)),#breaks=c(min(data), 0.05, 0.075, 0.1, 0.15,0.2,0.3,0.4,0.5,max(data)), ## breaks for distance based matrices
# breaks=c(min(data),0.0005,0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,max(data)),# breaks for ancestor 1 correlation based matrices
# #breaks=c(min(data),0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,0.006,max(data)),# breaks for ancestor 4 correlation based matrices
# lhei=c(.1,1), lwid=c(.2,1), key.title="Color Key", keysize=0.75,
# col = brewer.pal(9, "Blues"), trace = "none")
#
# # generate small size heatmaps without title and ledgend
# png(file=file.path(results.path, "clustering", paste0("AncestorNode_",trn,"_heat_small.png")), width = 480, height = 480, units = "px")
# p <- heatmap.2(data,
# dendrogram="none",
# hclustfun = hclust,
# srtCol=0, adjCol = c(0.5,1),
# breaks=c(min(data),0.0005,0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,max(data)),
# lhei=c(.1,1), lwid=c(.1,1),
# labCol=FALSE, labRow=FALSE,
# col = brewer.pal(9, "Blues"), trace = "none")
pdf(correlation, file=file.path(results.path, "clustering", paste0("AncestorNode_",trn,"_heat.pdf")), width=40, height=40)
par(cex.main=4)
p <- heatmap.2(correlation,
main = paste0("Ancestor ",trn),
dendrogram="row", # only draw a row dendrogram/ none
hclustfun=hclust,
srtCol=0, adjCol = c(0.5,1),
lhei=c(.1,1), lwid=c(.2,1), key.title="Color Key", keysize=0.75,
col = brewer.pal(9, "Greys"), breaks = bins,
trace = "none")
print(p)
dev.off()
write.csv(rev(rownames(p$carpet)), file=file.path(results.path, "clustering", paste0("cluster_trn",trn,".csv")), row.names=TRUE)
} ## loop through each ancestor
### export analysis stats data
write.csv(stats.DF, file=file.path(results.path, "ancestorStats", "block_measures.csv"), row.names=FALSE)
message("\n~~~~~Rscript finished analyzing co-occurrence and clustering")
| /raccroche/module3/analyze_old.R | no_license | Qiaojilim/RACCROCHE-update | R | false | false | 11,126 | r | #!/usr/bin/env Rscript
###################################################
### This program matches each ancestral genome to extant genomes
### and counts the contig co-occurrence on extant chromosomes for every pair of ancestral contigs
### It also summarizes measures for contig matching
###################################################
### input: 1. genome IDs and ancestor tree nodes defined in Genomes.txt
### 2. extant genome karyotypes defined in "karyotype" folder: karyotype_genomeID_genomeName.txt,
### where genomeID and genomeName match the info in Genomes.txt
### 3. contig gene feature files for each descendent genome in ./data/contigGFF/ContigGFF_gid_W*TreeNode*_*_*.txt
### output: 1. heatmap for each ancestor: results/clustering/AncestorNode_*_heat.pdf
### 2. heatmap reordered contigs for each ancestor: results/clustering/ancestor*_clusters.csv
### 3. summary of measures: results/ancestorStats/block_measures.csv
source("./module3/config.R")
source("./module3/helper.R")
## initialize a data frame for stats for each parameter
stats.DF <- data.frame( ws=numeric(), treeNode=numeric(), gf1=numeric(), gf2=numeric(),
gID=numeric(), gName=character(), avgBLKLen=numeric(), BLKLen.N50=numeric(), coverage=numeric(),
avgNoChr=numeric(), stringsAsFactors = FALSE)
for (trn in trn.vector){
## count cooccurrence for each ancestor tree node
ctgPairs<-c() ## initialize pairs of cooccured contigs
for (gID in gid.vector){ ## loop through each genome to be analyzed
gName <- as.character(genomeCoGeID[genomeCoGeID$genomeID == gID,]$genomeName)
cat("\n---Analyzing", gName, "for ancestor", trn,"\n")
### read in extant genome karyotype: chr, size
karyotype <- readIn.karyotype(karyotype.path, gID)
### read in gene features in contig
contigGFF <- readIn.contigGFF(gID, ws, trn, gf1, gf2, nctg, contigGFF.path)
if(is.null(contigGFF)) {
warningMessage <- paste0("*",gID,"_W",ws,"TreeNode",trn,"_",gf1,"_",gf2,".txt file doesn't exist under ", contigGFF.path)
warning(warningMessage);
next
} ## if file doesn't exist, go to next iteration
### merge genes in contigGFF that are within a DIS.threshold distance in extant genome into blocks
blockDF <- generate.blockDF.2(contigGFF, DIS.threshold, ws) ## blocks with window size ws
# add a column lenBLK: length of block
blockDF <- setDT(blockDF)[, lenBLK := end - start]
############################################
### calculate block measures:
### average block length in bp
### block length N50
### average number of chromosomes on which a contig produces a "significant" size block
### extant genome coverage
############################################
## average length and N50 of blocks in bp
avgBlockLen <- mean(blockDF$lenBLK)
blockLen.N50 <- N50(blockDF$lenBLK)
## How many blocks total per chromosome
## How many different contigs/colors per chromosome
chr.st <- blockDF[blockDF$lenBLK>=blockLEN.threshold,] %>%
group_by(chr) %>%
summarise(num_blocks = length(chr), num_diff_blk = length(unique(contig)))
## For each contig/color, how many chrs is it on
## excludes small blocks shorter than blockLEN.threshold
blk.st <- blockDF[blockDF$lenBLK>=blockLEN.threshold,] %>%
group_by(contig) %>%
summarise(no_chr = length(unique(chr)))
ctg.st <- contigGFF %>% group_by(contig) %>% summarize(no_chr = length(unique(chr)))
## average number of chromosomes on which a contig produces a "significant" size block (excludes small blocks shorter than blockLEN.threshold)
avgNoChr <- mean(ctg.st$no_chr)
## block coverage over all chromosomes
coverage <- setDT(merge(karyotype, aggregate(blockDF$lenBLK, by=list(chr=blockDF$chr), FUN=sum), by.x="chr", by.y="chr"))
coverage[,nCovered := size - x]
## calculate extant genome coverage without counting overlapped blocks
total.coverage <- c()
for (c in unique(blockDF$chr)){
coverage.2 <- setDT(data.frame(with(blockDF[chr==c,], sets(start, end, chr, 1)))) ## take union of the intervals
colnames(coverage.2) <- c("start","end")
coverage.2 <- coverage.2[,LenCovered:=end-start]
total.coverage <- c(total.coverage, sum(coverage.2$LenCovered))
}
pCoverage.2 <- sum(total.coverage) / sum(karyotype[,2])#
stats.DF <- rbind(stats.DF,
data.frame(ws=ws, treeNode=trn, gf1=gf1, gf2=gf2, gID=gID, gName=gName,
avgBLKLen=avgBlockLen, BLKLen.N50=blockLen.N50,
coverage=pCoverage.2, avgNoChr=avgNoChr) )
################################################################
### counting co-occurrence of contigs from all extant genomes
################################################################
## get uniq rows of (chr, contig), sort by chr then by contig
uniq <- unique(blockDF[blockDF$lenBLK>=lenBLK.threshold,c(1,4)])
uniq <- uniq[order(uniq$chr,uniq$contig)]
## get all combinations of cooccurrence of contigs on the same chromosome
for (c in unique(uniq$chr)) {
c <- as.numeric(as.character(c))
if(length(uniq[uniq$chr==c,]$contig)>=2) {
combinations <- combn(as.numeric(as.character(uniq[uniq$chr==c,]$contig)),m=2) ## m=2: cooccurrence of two contigs on the same chromosome
ctgPairs <- cbind(ctgPairs, combinations)
## note: by applying cbind, values in pairs are added by 1, starting from 1
## contig0 becomes 1
}
}
} ## done looping through each genome and counting contig co-occurrence
############################################
### construct co-occurrence matrix for the current ancestor
### gather pairs of contigs that appear on the same chromosome,
### then construct the cooccurrence matrix
############################################
if(length(ctgPairs)==0) {
warningMessage <- paste0("No contig pairs detected (co-occurred contigs) in genome ", gName)
warning(warningMessage);
next
}
## count frequencies of uniq ctgPairs
pairs.freq <- data.frame(t(ctgPairs))
colnames(pairs.freq) <- c("ctgID1", "ctgID2")
pairs.freq <- count(pairs.freq, vars = c("ctgID1", "ctgID2")) ## add column "freq" that counts frequency of uniq rows
pairs.freq <- pairs.freq[order(pairs.freq$ctgID1, pairs.freq$ctgID2),]
mat <- as.matrix(list2dist.1(pairs.freq))
mat[is.na(mat)] <- 0
## construct contig distance matrix from cooccurrence matrix
max_freq <- sum(genomeCoGeID$numChr)
mat2 <- (max_freq - mat)/max_freq
diag(mat2) <- 0
## write distance matrix into file under the directory of results.path
# distMat.fname <- file.path(results.path, "clustering", paste0("distanceMat_trn",trn,"_W",ws,"(",gf1,",",gf2,")_",lenBLK.threshold/1000,"kbBLK.csv"))
# write.csv(mat2, file=distMat.fname, row.names=FALSE)
# convert distance matrix to correlation matrix
mat3 <- cov(mat2,mat2, method = c("pearson"))
############### customized binning on correlations of co-occurrence
correlation <- cor(mat)
## write matrix into file under the directory of results.path
Mat.fname <- file.path(results.path, "clustering", paste0("correlationMat_trn",trn,"_W",ws,"(",gf1,",",gf2,")_",lenBLK.threshold/1000,"kbBLK.csv"))
write.csv(correlation, file=Mat.fname, row.names=FALSE)
vec <- as.vector(correlation)
vec2 <- sort(vec, decreasing = FALSE)
l <- length(vec2)
# print(l)
sm <- min(vec2)
lr <- max(vec2)
bins=c(sm,
nth(vec2,as.integer(l*0.5)),
nth(vec2,as.integer(l*0.65)),
nth(vec2,as.integer(l*0.75)),
nth(vec2,as.integer(l*0.81)),
nth(vec2,as.integer(l*0.85)),
nth(vec2,as.integer(l*0.89)),
nth(vec2,as.integer(l*0.93)),
nth(vec2,as.integer(l*0.995)), lr)
############################################
########### generate heatmaps to group contigs into ancestral chromosomes
############################################
## convert distance to -log d
d3 <- -log(mat2); diag(d3) <- 0
# ## set data to be distance or correlation based matrix
# data <- mat3
#
# # different brewer.pal themes: Reds, Greens, Blues
# pdf(file=file.path(results.path, "clustering", paste0("AncestorNode_",trn,"_heat.pdf")), width=40, height=40)
# par(cex.main=4)
# p <- heatmap.2(data,
# main = paste0("Ancestor ",trn),
# dendrogram="row", # only draw a row dendrogram
# hclustfun = hclust, ## defaut method for hclust: "complete"
# srtCol=0, adjCol = c(0.5,1), #breaks=c(min(data), 0.1, 0.15, 0.2, 0.225,0.25,0.275,0.29,0.3,max(data)),#breaks=c(min(data), 0.05, 0.075, 0.1, 0.15,0.2,0.3,0.4,0.5,max(data)), ## breaks for distance based matrices
# breaks=c(min(data),0.0005,0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,max(data)),# breaks for ancestor 1 correlation based matrices
# #breaks=c(min(data),0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,0.006,max(data)),# breaks for ancestor 4 correlation based matrices
# lhei=c(.1,1), lwid=c(.2,1), key.title="Color Key", keysize=0.75,
# col = brewer.pal(9, "Blues"), trace = "none")
#
# # generate small size heatmaps without title and ledgend
# png(file=file.path(results.path, "clustering", paste0("AncestorNode_",trn,"_heat_small.png")), width = 480, height = 480, units = "px")
# p <- heatmap.2(data,
# dendrogram="none",
# hclustfun = hclust,
# srtCol=0, adjCol = c(0.5,1),
# breaks=c(min(data),0.0005,0.001,0.0015,0.002,0.0025,0.003,0.0035,0.004,max(data)),
# lhei=c(.1,1), lwid=c(.1,1),
# labCol=FALSE, labRow=FALSE,
# col = brewer.pal(9, "Blues"), trace = "none")
pdf(correlation, file=file.path(results.path, "clustering", paste0("AncestorNode_",trn,"_heat.pdf")), width=40, height=40)
par(cex.main=4)
p <- heatmap.2(correlation,
main = paste0("Ancestor ",trn),
dendrogram="row", # only draw a row dendrogram/ none
hclustfun=hclust,
srtCol=0, adjCol = c(0.5,1),
lhei=c(.1,1), lwid=c(.2,1), key.title="Color Key", keysize=0.75,
col = brewer.pal(9, "Greys"), breaks = bins,
trace = "none")
print(p)
dev.off()
write.csv(rev(rownames(p$carpet)), file=file.path(results.path, "clustering", paste0("cluster_trn",trn,".csv")), row.names=TRUE)
} ## loop through each ancestor
### export analysis stats data
write.csv(stats.DF, file=file.path(results.path, "ancestorStats", "block_measures.csv"), row.names=FALSE)
message("\n~~~~~Rscript finished analyzing co-occurrence and clustering")
|
#Get data from particular source which is mentioned in project
data <- read.csv("G:/Data Scientist/Coursera/4. Exploratory Data analysis/project 1/household_power_consumption.txt", sep=";",na.string = "?")
#convert date column into character format in order to transform into date formate
data$Date <- as.character(data$Date)
#select data which is required to be captured and processing
data$Date <- as.Date(data$Date,format = "%d/%m/%Y")
subdate <- subset(data, Date=="2007-02-01" | Date == "2007-02-02")
#convert Active power column into character format in order to transform into numeric format
subdate$Global_active_power <- as.character(subdate$Global_active_power)
subdate$Global_active_power <- as.numeric(subdate$Global_active_power)
#Convert date into weekdays
subdate$days <- wday(subdate$Date,label = TRUE)
#create new column called as "new" with merging time and date column
subdate$new <- paste(subdate$Date, subdate$Time, sep = " ")
subdate$new <- parse_date_time(subdate$new,"%Y%m%d %H%M%S" )
# ploting all four picture together in single png file
png("plot4.png")
#select 2 row and 2 column
par(mfrow = c(2,2))
#firt image plotting
with(subdate,plot(new, Global_active_power,type = "l",ylab = "Global Active Power",xlab = ""))
#second image plotting
with(subdate,plot(new,Voltage,type = "l",ylab = "Voltage",xlab = "datetime"))
#third image plotting
with(subdate, plot(new,Sub_metering_1,type = "l",xlab = "",ylab = "Energy sub metering"))
with(subdate,lines(new, Sub_metering_2,col="red",type = "l"))
with(subdate,lines(new, Sub_metering_3,col="blue",type = "l"))
legend("topright",bty = "n",lty = c(1,1,1),col= c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#fourth image plotting
with(subdate,plot(new,Global_reactive_power,type = "l",ylab = "Global_reactive_power",xlab = "datetime"))
dev.off()
| /plot4.r | no_license | Jineshpanchal/ExData_Plotting1 | R | false | false | 1,871 | r | #Get data from particular source which is mentioned in project
data <- read.csv("G:/Data Scientist/Coursera/4. Exploratory Data analysis/project 1/household_power_consumption.txt", sep=";",na.string = "?")
#convert date column into character format in order to transform into date formate
data$Date <- as.character(data$Date)
#select data which is required to be captured and processing
data$Date <- as.Date(data$Date,format = "%d/%m/%Y")
subdate <- subset(data, Date=="2007-02-01" | Date == "2007-02-02")
#convert Active power column into character format in order to transform into numeric format
subdate$Global_active_power <- as.character(subdate$Global_active_power)
subdate$Global_active_power <- as.numeric(subdate$Global_active_power)
#Convert date into weekdays
subdate$days <- wday(subdate$Date,label = TRUE)
#create new column called as "new" with merging time and date column
subdate$new <- paste(subdate$Date, subdate$Time, sep = " ")
subdate$new <- parse_date_time(subdate$new,"%Y%m%d %H%M%S" )
# ploting all four picture together in single png file
png("plot4.png")
#select 2 row and 2 column
par(mfrow = c(2,2))
#firt image plotting
with(subdate,plot(new, Global_active_power,type = "l",ylab = "Global Active Power",xlab = ""))
#second image plotting
with(subdate,plot(new,Voltage,type = "l",ylab = "Voltage",xlab = "datetime"))
#third image plotting
with(subdate, plot(new,Sub_metering_1,type = "l",xlab = "",ylab = "Energy sub metering"))
with(subdate,lines(new, Sub_metering_2,col="red",type = "l"))
with(subdate,lines(new, Sub_metering_3,col="blue",type = "l"))
legend("topright",bty = "n",lty = c(1,1,1),col= c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#fourth image plotting
with(subdate,plot(new,Global_reactive_power,type = "l",ylab = "Global_reactive_power",xlab = "datetime"))
dev.off()
|
# inference hidden states sequence given a sequence of observations
dhmm.decoding <- function(obser, range = c(2,3,4,5), epsilon = 0.0001, max_iter = 1000) {
stopifnot(is.numeric(obser))
stopifnot(all(range == floor(range)))
stopifnot(all(range > 0))
stopifnot(epsilon > 0)
stopifnot(max_iter > 0)
# range is a postive integer vector
bic <- NULL
model_list <- NULL
best_list <- NULL
time_stp <- length(obser)
likeli_list <- list()
for (i in 1:length(range)) {
lambda <- range[i]
# using kmeans method to get the initial mu and sd value
ini_dhmm <- dhmm.model(lambda, 'eq_full')
kmeans_iter = 100
mu_ini <- matrix(rep(NA, kmeans_iter * lambda), nrow = kmeans_iter)
sd_ini <- matrix(rep(NA, kmeans_iter * lambda), nrow = kmeans_iter)
for(m in 1:kmeans_iter){
km <- kmeans(obser, lambda, iter.max = 10)
mu_ini[m,] <- c(sort(km$centers)) # why c() ???
ord <- order(km$centers)
sd_ini[m,] <- sqrt(km$withinss[ord] / (km$size[ord]-1))
}
ini_dhmm[[3]] <- apply(mu_ini, 2, mean)
ini_dhmm[[4]] <- apply(sd_ini, 2, mean)
# EM to estimate model
res <- dhmm.estimate(ini_dhmm, obser, epsilon, max_iter)
dhmm_model <- res[[1]]
# viterbi algorithm to find best path
best_gauss <- dhmm.viterbi(dhmm_model, obser)
## --------------- BIC compute -----------------##
likeli_list[[i]] <- res[[2]]
max_iter <- length(likeli_list[[i]])
para_num <- lambda * (lambda + 1)
bic[i] <- (-2) * likeli_list[[i]][max_iter] + log(time_stp) * para_num
print(bic[i])
# save data to list
model_list[[i]] <- dhmm_model
best_list <- cbind(best_list, best_gauss[[2]])
}
best_model <- model_list[[which(bic == min(bic))]]
names(best_model) <- c("states", "transition matrix", "mean", "sd")
best_seq <- best_list[, which(bic == min(bic))]
res <- list(best_model, best_seq, likeli_list)
names(res) <- c("best_model", "best_seq", "likeli_list")
return (res)
}
| /mydHMM/R/dhmm_decoding.R | no_license | zkdxtt21/R-package-for-HMM | R | false | false | 1,997 | r |
# inference hidden states sequence given a sequence of observations
dhmm.decoding <- function(obser, range = c(2,3,4,5), epsilon = 0.0001, max_iter = 1000) {
stopifnot(is.numeric(obser))
stopifnot(all(range == floor(range)))
stopifnot(all(range > 0))
stopifnot(epsilon > 0)
stopifnot(max_iter > 0)
# range is a postive integer vector
bic <- NULL
model_list <- NULL
best_list <- NULL
time_stp <- length(obser)
likeli_list <- list()
for (i in 1:length(range)) {
lambda <- range[i]
# using kmeans method to get the initial mu and sd value
ini_dhmm <- dhmm.model(lambda, 'eq_full')
kmeans_iter = 100
mu_ini <- matrix(rep(NA, kmeans_iter * lambda), nrow = kmeans_iter)
sd_ini <- matrix(rep(NA, kmeans_iter * lambda), nrow = kmeans_iter)
for(m in 1:kmeans_iter){
km <- kmeans(obser, lambda, iter.max = 10)
mu_ini[m,] <- c(sort(km$centers)) # why c() ???
ord <- order(km$centers)
sd_ini[m,] <- sqrt(km$withinss[ord] / (km$size[ord]-1))
}
ini_dhmm[[3]] <- apply(mu_ini, 2, mean)
ini_dhmm[[4]] <- apply(sd_ini, 2, mean)
# EM to estimate model
res <- dhmm.estimate(ini_dhmm, obser, epsilon, max_iter)
dhmm_model <- res[[1]]
# viterbi algorithm to find best path
best_gauss <- dhmm.viterbi(dhmm_model, obser)
## --------------- BIC compute -----------------##
likeli_list[[i]] <- res[[2]]
max_iter <- length(likeli_list[[i]])
para_num <- lambda * (lambda + 1)
bic[i] <- (-2) * likeli_list[[i]][max_iter] + log(time_stp) * para_num
print(bic[i])
# save data to list
model_list[[i]] <- dhmm_model
best_list <- cbind(best_list, best_gauss[[2]])
}
best_model <- model_list[[which(bic == min(bic))]]
names(best_model) <- c("states", "transition matrix", "mean", "sd")
best_seq <- best_list[, which(bic == min(bic))]
res <- list(best_model, best_seq, likeli_list)
names(res) <- c("best_model", "best_seq", "likeli_list")
return (res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_csv.R
\name{io_csv.character}
\alias{io_csv.character}
\title{Import Documented Table as CSV}
\usage{
\method{io_csv}{character}(x, ext = getOption("yamlet_extension", ".yaml"), ...)
}
\arguments{
\item{x}{character file path; passed to \code{\link[csv]{as.csv.character}} (by method dispatch)}
\item{ext}{extension for metadata equivalent of x}
\item{...}{passed to \code{\link[csv]{as.csv.character}} and to \code{\link{decorate}}}
}
\value{
data.frame
}
\description{
Imports a documented table as comma-separated variable.
A wrapper for \code{\link[csv]{as.csv.character}} that also
reads associated yamlet metadata, if present, and applies it
as attributes.
}
\examples{
example(io_csv)
}
\seealso{
Other io:
\code{\link{io_csv.data.frame}()},
\code{\link{io_csv}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_table}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{io_yamlet.yamlet}()},
\code{\link{io_yamlet}()}
Other interface:
\code{\link{as_classified.factor}()},
\code{\link{conditionalize.data.frame}()},
\code{\link{decorate.character}()},
\code{\link{decorate.data.frame}()},
\code{\link{ggplot.decorated}()},
\code{\link{ggready.data.frame}()},
\code{\link{io_csv.data.frame}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{is_parseable.default}()},
\code{\link{read_yamlet}()},
\code{\link{resolve.data.frame}()},
\code{\link{write_yamlet}()}
}
\concept{interface}
\concept{io}
\keyword{internal}
| /man/io_csv.character.Rd | no_license | romainfrancois/yamlet | R | false | true | 1,703 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_csv.R
\name{io_csv.character}
\alias{io_csv.character}
\title{Import Documented Table as CSV}
\usage{
\method{io_csv}{character}(x, ext = getOption("yamlet_extension", ".yaml"), ...)
}
\arguments{
\item{x}{character file path; passed to \code{\link[csv]{as.csv.character}} (by method dispatch)}
\item{ext}{extension for metadata equivalent of x}
\item{...}{passed to \code{\link[csv]{as.csv.character}} and to \code{\link{decorate}}}
}
\value{
data.frame
}
\description{
Imports a documented table as comma-separated variable.
A wrapper for \code{\link[csv]{as.csv.character}} that also
reads associated yamlet metadata, if present, and applies it
as attributes.
}
\examples{
example(io_csv)
}
\seealso{
Other io:
\code{\link{io_csv.data.frame}()},
\code{\link{io_csv}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_table}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{io_yamlet.yamlet}()},
\code{\link{io_yamlet}()}
Other interface:
\code{\link{as_classified.factor}()},
\code{\link{conditionalize.data.frame}()},
\code{\link{decorate.character}()},
\code{\link{decorate.data.frame}()},
\code{\link{ggplot.decorated}()},
\code{\link{ggready.data.frame}()},
\code{\link{io_csv.data.frame}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{is_parseable.default}()},
\code{\link{read_yamlet}()},
\code{\link{resolve.data.frame}()},
\code{\link{write_yamlet}()}
}
\concept{interface}
\concept{io}
\keyword{internal}
|
library(QDComparison)
### Name: LP.XY
### Title: A function to compute LP comeans, LP Fourier coefficients,
### LPINFOR test statistic, and a p-value of distribution equality
### Aliases: LP.XY
### Keywords: Helper Functions
### ** Examples
x <- c(rep(0,200),rep(1,200))
y <- c(rnorm(200,0,1),rnorm(200,1,1))
L <- LP.XY(x,y)
L$pval
| /data/genthat_extracted_code/QDComparison/examples/LP.XY.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 341 | r | library(QDComparison)
### Name: LP.XY
### Title: A function to compute LP comeans, LP Fourier coefficients,
### LPINFOR test statistic, and a p-value of distribution equality
### Aliases: LP.XY
### Keywords: Helper Functions
### ** Examples
x <- c(rep(0,200),rep(1,200))
y <- c(rnorm(200,0,1),rnorm(200,1,1))
L <- LP.XY(x,y)
L$pval
|
library('ggplot2')
success.rate <- data.frame(t = c(samepop.t30.t.g10$hits,samepop.t30.t.p$hits),
population=c(samepop.t30.t.g10$population,samepop.t30.t.p$population),
architecture=c(rep('P2P',4),rep('Pool',4)),
levels= samepop.t30.t.g10$population )
ggplot( data=success.rate,aes(x=population,y=t,group=architecture,color=architecture)) +
geom_line() + geom_point() +
xlab("Population") + ylab("Time (ms.)")
ggsave('time-arch.png',width=8,height=8,dpi=100)
| /app/ggplot2-time.R | permissive | JJ/nodeo-data | R | false | false | 552 | r | library('ggplot2')
success.rate <- data.frame(t = c(samepop.t30.t.g10$hits,samepop.t30.t.p$hits),
population=c(samepop.t30.t.g10$population,samepop.t30.t.p$population),
architecture=c(rep('P2P',4),rep('Pool',4)),
levels= samepop.t30.t.g10$population )
ggplot( data=success.rate,aes(x=population,y=t,group=architecture,color=architecture)) +
geom_line() + geom_point() +
xlab("Population") + ylab("Time (ms.)")
ggsave('time-arch.png',width=8,height=8,dpi=100)
|
#' Cashew soil requirement for land evaluation
#'
#' A dataset containing the soil characteristics of the crop requirements for farming Banana.
#'
#' @details
#' The following are the factors for evaluation:
#'
#' \itemize{
#' \item CFragm - Coarse fragment (Vol.\%)
#' \item SoilDpt - Soil depth (cm)
#' \item BS - Base Saturation (\%)
#' \item SumBCs - Sum of basic caions (cmol (+)/kg soil)
#' \item pHH2O - pH H2O
#' \item OC - Organic carbon (\%)
#' \item ECedS - ECe (dS/m)
#' \item ESP - ESP (\%)
#' }
#' @seealso
#' \itemize{
#' \item Yen, B. T., Pheng, K. S., and Hoanh, C. T. (2006). \emph{LUSET: Land Use Suitability Evaluation Tool User's Guide}. International Rice Research Institute.
#' }
#'
#' @docType data
#' @keywords dataset
#' @format A data frame with 8 rows and 8 columns
#' @name CASHEWSoil
NULL | /RDOCS/CASHEWSoil.R | no_license | alstat/ALUES_dataprep | R | false | false | 882 | r | #' Cashew soil requirement for land evaluation
#'
#' A dataset containing the soil characteristics of the crop requirements for farming Banana.
#'
#' @details
#' The following are the factors for evaluation:
#'
#' \itemize{
#' \item CFragm - Coarse fragment (Vol.\%)
#' \item SoilDpt - Soil depth (cm)
#' \item BS - Base Saturation (\%)
#' \item SumBCs - Sum of basic caions (cmol (+)/kg soil)
#' \item pHH2O - pH H2O
#' \item OC - Organic carbon (\%)
#' \item ECedS - ECe (dS/m)
#' \item ESP - ESP (\%)
#' }
#' @seealso
#' \itemize{
#' \item Yen, B. T., Pheng, K. S., and Hoanh, C. T. (2006). \emph{LUSET: Land Use Suitability Evaluation Tool User's Guide}. International Rice Research Institute.
#' }
#'
#' @docType data
#' @keywords dataset
#' @format A data frame with 8 rows and 8 columns
#' @name CASHEWSoil
NULL |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readwrite_spd.R
\name{readSPD}
\alias{readSPD}
\title{readSPD}
\usage{
readSPD(filename = "mat.spd")
}
\arguments{
\item{filename}{name of file containing SPD matrix data.}
}
\description{
read a file containing SPD matrices written using writeSPD().
}
| /man/readSPD.Rd | permissive | zhangzjjjjjj/MGLMRiem | R | false | true | 331 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readwrite_spd.R
\name{readSPD}
\alias{readSPD}
\title{readSPD}
\usage{
readSPD(filename = "mat.spd")
}
\arguments{
\item{filename}{name of file containing SPD matrix data.}
}
\description{
read a file containing SPD matrices written using writeSPD().
}
|
library(genSurv)
### Name: genTDCM
### Title: Generating data from a Cox model with time-dependent covariates
### Aliases: genTDCM
### Keywords: datagen survival
### ** Examples
tdcmdata <- genTDCM(n=1000, dist="weibull", corr=0.8, dist.par=c(2,3,2,3),
model.cens="uniform", cens.par=2.5, beta=c(-3.3,4), lambda=1)
head(tdcmdata, n=20L)
library(survival)
fit1<-coxph(Surv(start,stop,event)~tdcov+covariate,data=tdcmdata)
summary(fit1)
tdcmdata2 <- genTDCM(n=1000, dist="exponential", corr=0, dist.par=c(1,1),
model.cens="uniform", cens.par=1, beta=c(-3,2), lambda=0.5)
head(tdcmdata2, n=20L)
fit2<-coxph(Surv(start,stop,event)~tdcov+covariate,data=tdcmdata2)
summary(fit2)
| /data/genthat_extracted_code/genSurv/examples/genTDCM.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 681 | r | library(genSurv)
### Name: genTDCM
### Title: Generating data from a Cox model with time-dependent covariates
### Aliases: genTDCM
### Keywords: datagen survival
### ** Examples
tdcmdata <- genTDCM(n=1000, dist="weibull", corr=0.8, dist.par=c(2,3,2,3),
model.cens="uniform", cens.par=2.5, beta=c(-3.3,4), lambda=1)
head(tdcmdata, n=20L)
library(survival)
fit1<-coxph(Surv(start,stop,event)~tdcov+covariate,data=tdcmdata)
summary(fit1)
tdcmdata2 <- genTDCM(n=1000, dist="exponential", corr=0, dist.par=c(1,1),
model.cens="uniform", cens.par=1, beta=c(-3,2), lambda=0.5)
head(tdcmdata2, n=20L)
fit2<-coxph(Surv(start,stop,event)~tdcov+covariate,data=tdcmdata2)
summary(fit2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nested.R
\name{nested}
\alias{nested}
\alias{is_nested}
\alias{nested.default}
\alias{nested.model_spec}
\alias{nested.nested_model}
\alias{nested.workflow}
\alias{is_nested.default}
\alias{is_nested.model_spec}
\alias{is_nested.workflow}
\title{Create a Nested Model}
\usage{
nested(x, ...)
is_nested(x, ...)
\method{nested}{default}(x, ...)
\method{nested}{model_spec}(x, allow_par = FALSE, pkgs = NULL, ...)
\method{nested}{nested_model}(x, allow_par = FALSE, pkgs = NULL, ...)
\method{nested}{workflow}(x, allow_par = FALSE, pkgs = NULL, ...)
\method{is_nested}{default}(x, ...)
\method{is_nested}{model_spec}(x, ...)
\method{is_nested}{workflow}(x, ...)
}
\arguments{
\item{x}{A model specification or workflow.}
\item{...}{Not currently used.}
\item{allow_par}{A logical to allow parallel processing over nests during
the fitting process (if a parallel backend is registered).}
\item{pkgs}{An optional character string of R package names that should be
loaded (by namespace) during parallel processing.}
}
\value{
A nested model object, or a workflow containing a nested model.
For \code{is_nested()}, a logical vector of length 1.
}
\description{
\code{nested()} turns a model or workflow into a nested model/workflow.
\code{is_nested()} checks if a model or workflow is nested.
}
\examples{
\dontshow{if (rlang::is_installed("workflows")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
library(parsnip)
library(workflows)
model <- linear_reg() \%>\%
set_engine("lm") \%>\%
nested()
model
is_nested(model)
wf <- workflow() \%>\%
add_model(model)
is_nested(wf)
\dontshow{\}) # examplesIf}
}
| /man/nested.Rd | permissive | ashbythorpe/nestedmodels | R | false | true | 1,723 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nested.R
\name{nested}
\alias{nested}
\alias{is_nested}
\alias{nested.default}
\alias{nested.model_spec}
\alias{nested.nested_model}
\alias{nested.workflow}
\alias{is_nested.default}
\alias{is_nested.model_spec}
\alias{is_nested.workflow}
\title{Create a Nested Model}
\usage{
nested(x, ...)
is_nested(x, ...)
\method{nested}{default}(x, ...)
\method{nested}{model_spec}(x, allow_par = FALSE, pkgs = NULL, ...)
\method{nested}{nested_model}(x, allow_par = FALSE, pkgs = NULL, ...)
\method{nested}{workflow}(x, allow_par = FALSE, pkgs = NULL, ...)
\method{is_nested}{default}(x, ...)
\method{is_nested}{model_spec}(x, ...)
\method{is_nested}{workflow}(x, ...)
}
\arguments{
\item{x}{A model specification or workflow.}
\item{...}{Not currently used.}
\item{allow_par}{A logical to allow parallel processing over nests during
the fitting process (if a parallel backend is registered).}
\item{pkgs}{An optional character string of R package names that should be
loaded (by namespace) during parallel processing.}
}
\value{
A nested model object, or a workflow containing a nested model.
For \code{is_nested()}, a logical vector of length 1.
}
\description{
\code{nested()} turns a model or workflow into a nested model/workflow.
\code{is_nested()} checks if a model or workflow is nested.
}
\examples{
\dontshow{if (rlang::is_installed("workflows")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
library(parsnip)
library(workflows)
model <- linear_reg() \%>\%
set_engine("lm") \%>\%
nested()
model
is_nested(model)
wf <- workflow() \%>\%
add_model(model)
is_nested(wf)
\dontshow{\}) # examplesIf}
}
|
#this code reads data into R, using provided attributes of data set
hpc <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=T, comment.char="", quote='\"')
#Now i will subset two distinct dates which all the work will be done
hpc_filt<-subset(hpc,Date %in% c("1/2/2007","2/2/2007"))
#Now i am setting class of Date variable into Date format. But in plot2 i need to show time variable as well
hpc_filt$Date<-as.Date(hpc_filt$Date,format="%d/%m/%Y")
datetime<-paste(as.Date(hpc_filt$Date),hpc_filt$Time)
hpc_filt$Datetime<-as.POSIXct(datetime)
#Now a quick check
head(hpc_filt)
#Skecth plot 3, I will add two extra lines to plot2
png("plot3.png", width=480, height=480)
with(hpc_filt,{ plot(Sub_metering_1~Datetime, type="l",ylab="Global Active Power (kilowatts)",xlab="")
lines(Sub_metering_2~Datetime,col="Red")
lines(Sub_metering_3~Datetime,col="Blue")
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
| /Plot3.R | no_license | omzeybek/ExploratoryDataAnalysis_coursera | R | false | false | 1,121 | r | #this code reads data into R, using provided attributes of data set
hpc <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=T, comment.char="", quote='\"')
#Now i will subset two distinct dates which all the work will be done
hpc_filt<-subset(hpc,Date %in% c("1/2/2007","2/2/2007"))
#Now i am setting class of Date variable into Date format. But in plot2 i need to show time variable as well
hpc_filt$Date<-as.Date(hpc_filt$Date,format="%d/%m/%Y")
datetime<-paste(as.Date(hpc_filt$Date),hpc_filt$Time)
hpc_filt$Datetime<-as.POSIXct(datetime)
#Now a quick check
head(hpc_filt)
#Skecth plot 3, I will add two extra lines to plot2
png("plot3.png", width=480, height=480)
with(hpc_filt,{ plot(Sub_metering_1~Datetime, type="l",ylab="Global Active Power (kilowatts)",xlab="")
lines(Sub_metering_2~Datetime,col="Red")
lines(Sub_metering_3~Datetime,col="Blue")
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
test_that("dogs-vs-cats dataset", {
dataset <- dogs_vs_cats_dataset(
tempfile(),
download = TRUE,
token = "kaggle.json"
)
expect_length(dataset$.getitem(1), 2)
})
| /tests/testthat/test-dogs-vs-cats.R | permissive | Athospd/torchdatasets | R | false | false | 187 | r |
test_that("dogs-vs-cats dataset", {
dataset <- dogs_vs_cats_dataset(
tempfile(),
download = TRUE,
token = "kaggle.json"
)
expect_length(dataset$.getitem(1), 2)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ee_install.R
\name{ee_install}
\alias{ee_install}
\title{Create an isolated Python virtual environment with all rgee dependencies.}
\usage{
ee_install(
py_env = "rgee",
earthengine_version = ee_version(),
confirm = interactive()
)
}
\arguments{
\item{py_env}{Character. The name, or full path, of the Python environment
to be used by rgee.}
\item{earthengine_version}{Character. The Earth Engine Python API version
to install. By default \code{rgee::ee_version()}.}
\item{confirm}{Logical. Confirm before restarting R?.}
}
\description{
Create an isolated Python virtual environment with all rgee dependencies.
}
\examples{
\dontrun{
library(rgee)
ee_install() #It is just necessary once
ee_Initialize()
}
}
\seealso{
Other ee_install functions:
\code{\link{ee_install_set_pyenv}()}
}
\concept{ee_install functions}
| /man/ee_install.Rd | permissive | fransalari/rgee | R | false | true | 903 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ee_install.R
\name{ee_install}
\alias{ee_install}
\title{Create an isolated Python virtual environment with all rgee dependencies.}
\usage{
ee_install(
py_env = "rgee",
earthengine_version = ee_version(),
confirm = interactive()
)
}
\arguments{
\item{py_env}{Character. The name, or full path, of the Python environment
to be used by rgee.}
\item{earthengine_version}{Character. The Earth Engine Python API version
to install. By default \code{rgee::ee_version()}.}
\item{confirm}{Logical. Confirm before restarting R?.}
}
\description{
Create an isolated Python virtual environment with all rgee dependencies.
}
\examples{
\dontrun{
library(rgee)
ee_install() #It is just necessary once
ee_Initialize()
}
}
\seealso{
Other ee_install functions:
\code{\link{ee_install_set_pyenv}()}
}
\concept{ee_install functions}
|
# Matern 3/2
covf <- function(x, l = 1, sigf = 1) {
r <- as.matrix(dist(x))
k <- sigf * (1 + (sqrt(3) * r / l)) * exp(- sqrt(3) * r / l)
return(k)
}
covfTest <- function(x, y, l = 1, sigf = 1) {
r <- abs(outer(x, y, "-"))
k <- sigf * (1 + (sqrt(3) * r / l)) * exp(- sqrt(3) * r / l)
return(k)
}
Jmat <- function(m) {
return(matrix(rep(1, m^2), ncol = m))
}
BDmat <- function(repnum) {
mat <- lapply(repnum, Jmat)
as.matrix(bdiag(mat))
}
covmat <- function(trainx, repnum, theta) {
k1 <- covf(x = trainx, l = theta[1], sigf = theta[2])
k2 <- covf(x = trainx, l = theta[3], sigf = theta[4])
k2 <- k2 * BDmat(repnum)
return(k1 + k2)
}
testcov <- function(x, y, theta) {
k1 <- covfTest(x = x, y = y, l = theta[1], sigf = theta[2])
return(k1)
}
testmat <- function(x, theta) {
k1 <- covf(x, l = theta[1], sigf = theta[2])
k2 <- covf(x, l = theta[3], sigf = theta[4])
return(k1 + k2)
} | /code/matern32.R | no_license | tahmid-usc/BoneDensity | R | false | false | 933 | r | # Matern 3/2
covf <- function(x, l = 1, sigf = 1) {
r <- as.matrix(dist(x))
k <- sigf * (1 + (sqrt(3) * r / l)) * exp(- sqrt(3) * r / l)
return(k)
}
covfTest <- function(x, y, l = 1, sigf = 1) {
r <- abs(outer(x, y, "-"))
k <- sigf * (1 + (sqrt(3) * r / l)) * exp(- sqrt(3) * r / l)
return(k)
}
Jmat <- function(m) {
return(matrix(rep(1, m^2), ncol = m))
}
BDmat <- function(repnum) {
mat <- lapply(repnum, Jmat)
as.matrix(bdiag(mat))
}
covmat <- function(trainx, repnum, theta) {
k1 <- covf(x = trainx, l = theta[1], sigf = theta[2])
k2 <- covf(x = trainx, l = theta[3], sigf = theta[4])
k2 <- k2 * BDmat(repnum)
return(k1 + k2)
}
testcov <- function(x, y, theta) {
k1 <- covfTest(x = x, y = y, l = theta[1], sigf = theta[2])
return(k1)
}
testmat <- function(x, theta) {
k1 <- covf(x, l = theta[1], sigf = theta[2])
k2 <- covf(x, l = theta[3], sigf = theta[4])
return(k1 + k2)
} |
pathGrob <- function(controls, pathIndex=1) {
BezierGrob(controls$x, controls$y,
open=is.null(attr(controls, "cycle")),
default.units="bigpts",
name=paste0("path-", pathIndex))
}
makeContent.metapostgrob <- function(x) {
wd <- getwd()
mpfile <- tempfile(fileext=".mp")
logfile <- gsub(".mp$", ".log", mpfile)
metapost(x$path, mpfile, x$digits)
mpost(mpfile, tracing=TRUE)
pathControls <- mptrace(logfile)
paths <- mapply(pathGrob, pathControls, 1:length(pathControls),
SIMPLIFY=FALSE)
setChildren(x, do.call(gList, paths))
}
metapostGrob <- function(x, ...) {
UseMethod("metapostGrob")
}
## A solved path (scale already fixed)
metapostGrob.mpcontrols <- function(x,
gp=gpar(),
name=NULL, ...) {
path <- pathGrob(x)
gTree(children=do.call(gList, path),
gp=gp, name=name, cl="mpsolvedgrob")
}
## Several solved paths (scale already fixed)
metapostGrob.mpcontrolList <- function(x,
gp=gpar(),
name=NULL, ...) {
paths <- mapply(pathGrob, x, 1:length(x), SIMPLIFY=FALSE)
gTree(children=do.call(gList, paths),
gp=gp, name=name, cl="mpsolvedgrob")
}
## An unsolved path
metapostGrob.mppath <- function(x,
gp=gpar(),
name=NULL,
digits=2, ...) {
gTree(path=x, gp=gp, name=name, digits=digits, cl="metapostgrob")
}
grid.metapost <- function(...) {
grid.draw(metapostGrob(...))
}
| /R/grid.R | no_license | cran/metapost | R | false | false | 1,679 | r |
pathGrob <- function(controls, pathIndex=1) {
BezierGrob(controls$x, controls$y,
open=is.null(attr(controls, "cycle")),
default.units="bigpts",
name=paste0("path-", pathIndex))
}
makeContent.metapostgrob <- function(x) {
wd <- getwd()
mpfile <- tempfile(fileext=".mp")
logfile <- gsub(".mp$", ".log", mpfile)
metapost(x$path, mpfile, x$digits)
mpost(mpfile, tracing=TRUE)
pathControls <- mptrace(logfile)
paths <- mapply(pathGrob, pathControls, 1:length(pathControls),
SIMPLIFY=FALSE)
setChildren(x, do.call(gList, paths))
}
metapostGrob <- function(x, ...) {
UseMethod("metapostGrob")
}
## A solved path (scale already fixed)
metapostGrob.mpcontrols <- function(x,
gp=gpar(),
name=NULL, ...) {
path <- pathGrob(x)
gTree(children=do.call(gList, path),
gp=gp, name=name, cl="mpsolvedgrob")
}
## Several solved paths (scale already fixed)
metapostGrob.mpcontrolList <- function(x,
gp=gpar(),
name=NULL, ...) {
paths <- mapply(pathGrob, x, 1:length(x), SIMPLIFY=FALSE)
gTree(children=do.call(gList, paths),
gp=gp, name=name, cl="mpsolvedgrob")
}
## An unsolved path
metapostGrob.mppath <- function(x,
gp=gpar(),
name=NULL,
digits=2, ...) {
gTree(path=x, gp=gp, name=name, digits=digits, cl="metapostgrob")
}
grid.metapost <- function(...) {
grid.draw(metapostGrob(...))
}
|
# Box Cox ----------------------------------------------------------------------
# Transformation: Box Cox
box_cox <- function(y, lambda = lambda) {
lambda_cases <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
y <- log(y)
} else {
y <- ((y)^lambda - 1) / lambda
}
return(y)
}
y <- lambda_cases(y = y, lambda = lambda)
return(y = y)
} # End box_cox
# Standardized transformation: Box Cox
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
box_cox_std <- function(y, lambda) {
gm <- geometric.mean(y)
y <- if (abs(lambda) > 1e-12) {
y <- (y^lambda - 1) / (lambda * ((gm)^(lambda - 1)))
} else {
y <- gm * log(y)
}
return(y)
}
# Back transformation: Box Cox
box_cox_back <- function(y, lambda) {
lambda_cases_back <- function(y, lambda = lambda){
if (abs(lambda) <= 1e-12) { #case lambda=0
y <- exp(y)
} else {
y <- (lambda * y + 1)^(1 / lambda)
}
return(y = y)
}
y <- lambda_cases_back(y = y, lambda = lambda)
return(y = y)
} # End box_cox_back
# Transformation: Box Cox shift
with_shift <- function(y, shift) {
min <- min(y)
if (min <= 0) {
shift <- shift + abs(min(y)) + 1
} else {
shift <- shift
}
return(shift)
}
box_cox_shift <- function(y, lambda = lambda, shift = 0) {
with_shift <- function(y, shift) {
min <- min(y)
if (min <= 0) {
shift <- shift + abs(min(y)) + 1
} else {
shift <- shift
}
return(shift)
}
# Shift parameter
shift <- with_shift(y = y, shift = shift)
lambda_cases <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
y <- log(y + shift)
} else {
y <- ((y + shift)^lambda - 1) / lambda
}
return(y)
}
y <- lambda_cases(y = y, lambda = lambda)
return(list(y = y, shift = shift))
} # End box_cox
# Standardized transformation: Box Cox
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
box_cox_shift_std <- function(y, lambda) {
min <- min(y)
if (min <= 0) {
y <- y - min + 1
}
gm <- geometric.mean(y)
y <- if (abs(lambda) > 1e-12) {
y <- (y^lambda - 1) / (lambda * ((gm)^(lambda - 1)))
} else {
y <- gm * log(y)
}
return(y)
}
# Back transformation: Box Cox shift
box_cox_shift_back <- function(y, lambda, shift = 0) {
lambda_cases_back <- function(y, lambda = lambda, shift){
if (abs(lambda) <= 1e-12) { #case lambda=0
y <- exp(y) - shift
} else {
y <- (lambda * y + 1)^(1 / lambda) - shift
}
return(y = y)
}
y <- lambda_cases_back(y = y, lambda = lambda, shift = shift)
return(y = y)
} # End box_cox_back
# Modulus ----------------------------------------------------------------------
# Transformation: Modulus
modul <- function(y, lambda = lambda) {
u <- abs(y) + 1L
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- sign(y)*log(u)
} else {
yt <- sign(y)*(u^lambda - 1L)/lambda
}
return(y = yt)
}
# Standardized transformation: Modulus
modul_std <- function(y, lambda) {
u <- abs(y) + 1L
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
zt <- sign(y) * log(u) * geometric.mean(u)
} else {
zt <- sign(y)*(u^lambda - 1L)/lambda * (1/geometric.mean(u)^(lambda - 1))
}
y <- zt
return(y)
}
# Back transformation: Modulus
modul_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) {
y <- sign(y) * (exp(abs(y)) - 1)
} else {
y <- sign(y) * ((abs(y)*lambda + 1)^(1/lambda) - 1)
}
return(y = y)
}
# The Bickel-Doksum transformation ----------------------------------------------------------------------
# Transformation: Bick-Doksum
Bick_dok <- function(y, lambda = lambda) {
if (lambda > 1e-12){
yt <- (abs(y)^lambda * sign(y) - 1)/lambda
}
else{
stop("lambda must be positive for the Bickel-Doksum transformation")
}
return(y = yt)
}
# Standardized transformation: Bick-Doksum
Bick_dok_std <- function(y, lambda) {
yt <- Bick_dok(y, lambda)
zt <- yt * (1 / geometric.mean(abs(y)^(lambda - 1)))
y <- zt
return(y)
}
# Back transformation: Bick-Doksum
Bick_dok_back <- function(y, lambda = lambda) {
positivos <- which(y >= 0)
y[positivos] <- (lambda * y[positivos] + 1)^(1 / lambda)
negativos <- which(y < 0)
y[negativos] <- (-1) * ((-1) * (lambda * y[negativos] + 1))^(1 / lambda)
return(y = y)
}
# The Manly transformation ----------------------------------------------------------------------
# Transformation: Manly
Manly <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- y
} else {
yt <- (exp(y*lambda) - 1L)/lambda
}
return(y = yt)
}
# Standardized transformation: Manly
Manly_std <- function(y, lambda) {
lambda_absolute <- abs(lambda)
yt <- Manly(y, lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
zt <- y
} else {
zt <- yt/exp((mean(lambda*y)))
}
y <- zt
return(y)
}
# Back transformation: Manly
Manly_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
y <- y
} else {
y <- log(lambda * y + 1) / lambda
}
return(y = y)
}
# The dual transformation ----------------------------------------------------------------------
# Transformation: dual
Dual <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- log(y)
} else if (lambda > 1e-12){
yt <- (y^(lambda) - y^(-lambda))/(2 * lambda)
} else {
stop("lambda can not be negative for the dual transformation")
}
return(y = yt)
}
# Standardized transformation: dual
Dual_std <- function(y, lambda) {
yt <- Dual(y, lambda)
zt <- if (abs(lambda) > 1e-12) {
geo <- geometric.mean(y^(lambda -1) + y^(-lambda -1))
zt <- yt * 2 / geo
} else {
zt <- geometric.mean(y) * log(y)
}
y <- zt
return(y)
}
# Back transformation: dual
Dual_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if(lambda_absolute <= 1e-12)
{
y <- exp(y)
}
else
{
y <- (lambda * y + sqrt(lambda^2 * y^2 + 1))^(1/lambda)
}
return(y = y)
}
# The Yeo-Johnson transformation ----------------------------------------------------------------------
# Transformation: Yeo-Johnson
Yeo_john <- function(y, lambda = lambda) {
n <- length(y)
yt <- rep(NA, n)
negativos <- which(y < 0)
positivos <- which(y >= 0)
if(abs(lambda) <= 1e-12) {
yt[positivos] <- log(y[positivos] + 1)
} else {
yt[positivos] <- ((y[positivos] + 1)^lambda - 1)/lambda
}
if(abs(lambda - 2) <= 1e-12) {
yt[negativos] <- -log(-y[negativos] + 1)
} else {
yt[negativos] <- -((-y[negativos] + 1)^(2-lambda) - 1)/(2-lambda)
}
return(y = yt)
}
# Standardized transformation: Yeo-Johnson
Yeo_john_std <- function(y, lambda) {
n <- length(y)
zt <- rep(NA, n)
negativos <- which(y < 0)
positivos <- which(y >= 0)
if(abs(lambda) <= 1e-12){
gm <- geometric.mean(y[positivos] + 1)
zt[positivos] <- gm * log(y[positivos] + 1)
}
if (lambda != 0) {
gm <- geometric.mean(y[positivos] + 1)
zt[positivos] <- ((y[positivos] + 1)^lambda - 1)/(lambda*gm^(lambda - 1))
}
if(abs(lambda - 2) <= 1e-12) {
gm <- geometric.mean(1 - y[negativos])
zt[negativos] <- -log(-y[negativos] + 1) * gm
}
if (lambda != 2) {
gm <- geometric.mean(1 - y[negativos])
zt[negativos] <- (-((-y[negativos] + 1)^(2 - lambda) - 1)/(2 - lambda))*(1/gm)
}
y <- zt
return(y)
}
# Back transformation: Yeo-Johnson
Yeo_john_back <- function(y, lambda = lambda) {
negativos <- which(y < 0)
positivos <- which(y >= 0)
lambda_absolute <- abs(lambda)
if (lambda != 0) {
y[positivos] <- ((y[positivos] * lambda + 1)^(1 / lambda)) - 1
}
if (lambda_absolute <= 1e-12) {
y[positivos] <- exp(y[positivos]) - 1
}
if (lambda != 2) {
y[negativos] <- (-1) * ((y[negativos] * (lambda - 2) + 1)^(1/(2 - lambda)) - 1)
}
if (lambda_absolute == 2) {
y[negativos] <- (-1) * (exp(-y[negativos]) - 1)
}
return(y = y)
}
###################################### Neue Transformationen #######################################
# Transformation: log_shift_opt
log_shift_opt <- function(y, lambda = lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
log_trafo <- function(y, lambda = lambda) {
y <- log(y + lambda)
return(y)
}
y <- log_trafo(y = y, lambda = lambda)
return(y)
} # End log_shift
# Standardized transformation: Log_shift_opt
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
log_shift_opt_std <- function(y, lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min(y)) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
log_trafo_std <- function(y, lambda = lambda) {
gm <- geometric.mean(y + lambda)
y <- gm * log(y + lambda)
return(y)
}
y <- log_trafo_std(y = y, lambda = lambda)
return(y)
}
# Back transformation: log_shift_opt
log_shift_opt_back <- function(y, lambda) {
log_shift_opt_back <- function(y, lambda = lambda){
y <- exp(y) - lambda
return(y = y)
}
y <- log_shift_opt_back(y = y, lambda = lambda)
return(y = y)
} # End log_shift_opt
##############
# Transformation: neg_log
neg_log <- function(y) {
u <- abs(y) + 1L
yt <- sign(y)*log(u)
return(y = yt)
}
# Standardized transformation: neg_log
neg_log_std <- function(y) {
u <- abs(y) + 1L
zt <- sign(y) * log(u) * geometric.mean(u)
y <- zt
return(y)
}
# Back transformation: neg_log
neg_log_back <- function(y) {
y <- sign(y) * (exp(abs(y)) - 1)
return(y)
}
# Transformation: log
Log <- function(y) {
y <- box_cox(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_std <- function(y) {
y <- box_cox_std(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_back <- function(y) {
y <- box_cox_back(y, lambda = 0)
return(y)
}
# Transformation: log
Log_shift <- function(y) {
y <- box_cox_shift(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_shift_std <- function(y) {
y <- box_cox_shift_std(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_shift_back <- function(y) {
y <- box_cox_shift_back(y, lambda = 0)
return(y)
}
# Transformation: Reciprocal
Reciprocal <- function(y) {#lambda is fixed
y <- box_cox(y, lambda = -1)
return(y)
}
# Standardized transformation: Reciprocal
Reciprocal_std <- function(y) {
y <- box_cox_std(y, lambda = -1)
return(y)
}
# Back transformation: Reciprocal
Reciprocal_back <- function(y) {
box_cox_back(y, lambda = -1)
}
# Standardized transformation: squared_root_shift
sqrt_shift <- function(y, lambda = lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
sqrt_trafo <- function(y, lambda = lambda) {
y <- sqrt(y + lambda)
return(y)
}
y <- sqrt_trafo(y = y, lambda = lambda)
return(y)
} # End log_shift
# Standardized transformation: sqrt_shift
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
sqrt_shift_std <- function(y, lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
sqrt_trafo_std <- function(y, lambda = lambda) {
gm <- geometric.mean(y + lambda)
y <- gm * sqrt(y + lambda)
return(y)
}
y <- sqrt_trafo_std(y = y, lambda = lambda)
return(y)
}
# Back transformation: log_shift
sqrt_shift_back <- function(y, lambda) {
sqrt_shift_back <- function(y, lambda = lambda){
y <- y^2 - lambda
return(y = y)
}
y <- sqrt_shift_back(y = y, lambda = lambda)
return(y = y)
} # End sqrt_shift
# Transformation: Gpower
gPower <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- log(y + sqrt(y^2 + 1))
} else if (lambda_absolute > 1e-12) {
yt <- ((y + sqrt(y^2 + 1))^lambda - 1)/lambda
}
return(y = yt)
}
# Standardized transformation: Gpower
gPower_std <- function(y, lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
zt <- log(y + sqrt(y^2 + 1)) * sqrt(geometric.mean(1 + y^2))
} else if (lambda_absolute > 1e-12) {
zt <- (((y + sqrt(y^2 + 1))^lambda - 1)/lambda) / geometric.mean((y + sqrt(y^2 + 1))^(lambda - 1) * (1 + (y/sqrt(y^2 +1))))
}
y <- zt
return(y)
}
# Back transformation: Gpower
gPower_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- (-(1 - exp(y*2))) / (2 * exp(y))
} else if (lambda_absolute > 1e-12) {
A <- (y * lambda + 1)^(1 / lambda)
yt <- (-(1 - A^2)) / (2*A)
}
return(y = yt)
}
# Glog
g_log <- function(y) {
yt <- log(y + sqrt(y^2 + 1))
return(y = yt)
}
# Standardized transformation
g_log_std <- function(y) {
yt <- log(y + sqrt(y^2 + 1)) * sqrt(geometric.mean(1 + y^2))
return(y = yt)
}
# Back-transformation
g_log_back <- function(y) {
yt <- (-(1 - exp(y*2))) / (2 * exp(y))
return(y = yt)
}
| /R/trafos.R | no_license | cran/trafo | R | false | false | 14,965 | r | # Box Cox ----------------------------------------------------------------------
# Transformation: Box Cox
box_cox <- function(y, lambda = lambda) {
lambda_cases <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
y <- log(y)
} else {
y <- ((y)^lambda - 1) / lambda
}
return(y)
}
y <- lambda_cases(y = y, lambda = lambda)
return(y = y)
} # End box_cox
# Standardized transformation: Box Cox
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
box_cox_std <- function(y, lambda) {
gm <- geometric.mean(y)
y <- if (abs(lambda) > 1e-12) {
y <- (y^lambda - 1) / (lambda * ((gm)^(lambda - 1)))
} else {
y <- gm * log(y)
}
return(y)
}
# Back transformation: Box Cox
box_cox_back <- function(y, lambda) {
lambda_cases_back <- function(y, lambda = lambda){
if (abs(lambda) <= 1e-12) { #case lambda=0
y <- exp(y)
} else {
y <- (lambda * y + 1)^(1 / lambda)
}
return(y = y)
}
y <- lambda_cases_back(y = y, lambda = lambda)
return(y = y)
} # End box_cox_back
# Transformation: Box Cox shift
with_shift <- function(y, shift) {
min <- min(y)
if (min <= 0) {
shift <- shift + abs(min(y)) + 1
} else {
shift <- shift
}
return(shift)
}
box_cox_shift <- function(y, lambda = lambda, shift = 0) {
with_shift <- function(y, shift) {
min <- min(y)
if (min <= 0) {
shift <- shift + abs(min(y)) + 1
} else {
shift <- shift
}
return(shift)
}
# Shift parameter
shift <- with_shift(y = y, shift = shift)
lambda_cases <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
y <- log(y + shift)
} else {
y <- ((y + shift)^lambda - 1) / lambda
}
return(y)
}
y <- lambda_cases(y = y, lambda = lambda)
return(list(y = y, shift = shift))
} # End box_cox
# Standardized transformation: Box Cox
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
box_cox_shift_std <- function(y, lambda) {
min <- min(y)
if (min <= 0) {
y <- y - min + 1
}
gm <- geometric.mean(y)
y <- if (abs(lambda) > 1e-12) {
y <- (y^lambda - 1) / (lambda * ((gm)^(lambda - 1)))
} else {
y <- gm * log(y)
}
return(y)
}
# Back transformation: Box Cox shift
box_cox_shift_back <- function(y, lambda, shift = 0) {
lambda_cases_back <- function(y, lambda = lambda, shift){
if (abs(lambda) <= 1e-12) { #case lambda=0
y <- exp(y) - shift
} else {
y <- (lambda * y + 1)^(1 / lambda) - shift
}
return(y = y)
}
y <- lambda_cases_back(y = y, lambda = lambda, shift = shift)
return(y = y)
} # End box_cox_back
# Modulus ----------------------------------------------------------------------
# Transformation: Modulus
modul <- function(y, lambda = lambda) {
u <- abs(y) + 1L
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- sign(y)*log(u)
} else {
yt <- sign(y)*(u^lambda - 1L)/lambda
}
return(y = yt)
}
# Standardized transformation: Modulus
modul_std <- function(y, lambda) {
u <- abs(y) + 1L
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
zt <- sign(y) * log(u) * geometric.mean(u)
} else {
zt <- sign(y)*(u^lambda - 1L)/lambda * (1/geometric.mean(u)^(lambda - 1))
}
y <- zt
return(y)
}
# Back transformation: Modulus
modul_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) {
y <- sign(y) * (exp(abs(y)) - 1)
} else {
y <- sign(y) * ((abs(y)*lambda + 1)^(1/lambda) - 1)
}
return(y = y)
}
# The Bickel-Doksum transformation ----------------------------------------------------------------------
# Transformation: Bick-Doksum
Bick_dok <- function(y, lambda = lambda) {
if (lambda > 1e-12){
yt <- (abs(y)^lambda * sign(y) - 1)/lambda
}
else{
stop("lambda must be positive for the Bickel-Doksum transformation")
}
return(y = yt)
}
# Standardized transformation: Bick-Doksum
Bick_dok_std <- function(y, lambda) {
yt <- Bick_dok(y, lambda)
zt <- yt * (1 / geometric.mean(abs(y)^(lambda - 1)))
y <- zt
return(y)
}
# Back transformation: Bick-Doksum
Bick_dok_back <- function(y, lambda = lambda) {
positivos <- which(y >= 0)
y[positivos] <- (lambda * y[positivos] + 1)^(1 / lambda)
negativos <- which(y < 0)
y[negativos] <- (-1) * ((-1) * (lambda * y[negativos] + 1))^(1 / lambda)
return(y = y)
}
# The Manly transformation ----------------------------------------------------------------------
# Transformation: Manly
Manly <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- y
} else {
yt <- (exp(y*lambda) - 1L)/lambda
}
return(y = yt)
}
# Standardized transformation: Manly
Manly_std <- function(y, lambda) {
lambda_absolute <- abs(lambda)
yt <- Manly(y, lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
zt <- y
} else {
zt <- yt/exp((mean(lambda*y)))
}
y <- zt
return(y)
}
# Back transformation: Manly
Manly_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
y <- y
} else {
y <- log(lambda * y + 1) / lambda
}
return(y = y)
}
# The dual transformation ----------------------------------------------------------------------
# Transformation: dual
Dual <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- log(y)
} else if (lambda > 1e-12){
yt <- (y^(lambda) - y^(-lambda))/(2 * lambda)
} else {
stop("lambda can not be negative for the dual transformation")
}
return(y = yt)
}
# Standardized transformation: dual
Dual_std <- function(y, lambda) {
yt <- Dual(y, lambda)
zt <- if (abs(lambda) > 1e-12) {
geo <- geometric.mean(y^(lambda -1) + y^(-lambda -1))
zt <- yt * 2 / geo
} else {
zt <- geometric.mean(y) * log(y)
}
y <- zt
return(y)
}
# Back transformation: dual
Dual_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if(lambda_absolute <= 1e-12)
{
y <- exp(y)
}
else
{
y <- (lambda * y + sqrt(lambda^2 * y^2 + 1))^(1/lambda)
}
return(y = y)
}
# The Yeo-Johnson transformation ----------------------------------------------------------------------
# Transformation: Yeo-Johnson
Yeo_john <- function(y, lambda = lambda) {
n <- length(y)
yt <- rep(NA, n)
negativos <- which(y < 0)
positivos <- which(y >= 0)
if(abs(lambda) <= 1e-12) {
yt[positivos] <- log(y[positivos] + 1)
} else {
yt[positivos] <- ((y[positivos] + 1)^lambda - 1)/lambda
}
if(abs(lambda - 2) <= 1e-12) {
yt[negativos] <- -log(-y[negativos] + 1)
} else {
yt[negativos] <- -((-y[negativos] + 1)^(2-lambda) - 1)/(2-lambda)
}
return(y = yt)
}
# Standardized transformation: Yeo-Johnson
Yeo_john_std <- function(y, lambda) {
n <- length(y)
zt <- rep(NA, n)
negativos <- which(y < 0)
positivos <- which(y >= 0)
if(abs(lambda) <= 1e-12){
gm <- geometric.mean(y[positivos] + 1)
zt[positivos] <- gm * log(y[positivos] + 1)
}
if (lambda != 0) {
gm <- geometric.mean(y[positivos] + 1)
zt[positivos] <- ((y[positivos] + 1)^lambda - 1)/(lambda*gm^(lambda - 1))
}
if(abs(lambda - 2) <= 1e-12) {
gm <- geometric.mean(1 - y[negativos])
zt[negativos] <- -log(-y[negativos] + 1) * gm
}
if (lambda != 2) {
gm <- geometric.mean(1 - y[negativos])
zt[negativos] <- (-((-y[negativos] + 1)^(2 - lambda) - 1)/(2 - lambda))*(1/gm)
}
y <- zt
return(y)
}
# Back transformation: Yeo-Johnson
Yeo_john_back <- function(y, lambda = lambda) {
negativos <- which(y < 0)
positivos <- which(y >= 0)
lambda_absolute <- abs(lambda)
if (lambda != 0) {
y[positivos] <- ((y[positivos] * lambda + 1)^(1 / lambda)) - 1
}
if (lambda_absolute <= 1e-12) {
y[positivos] <- exp(y[positivos]) - 1
}
if (lambda != 2) {
y[negativos] <- (-1) * ((y[negativos] * (lambda - 2) + 1)^(1/(2 - lambda)) - 1)
}
if (lambda_absolute == 2) {
y[negativos] <- (-1) * (exp(-y[negativos]) - 1)
}
return(y = y)
}
###################################### Neue Transformationen #######################################
# Transformation: log_shift_opt
log_shift_opt <- function(y, lambda = lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
log_trafo <- function(y, lambda = lambda) {
y <- log(y + lambda)
return(y)
}
y <- log_trafo(y = y, lambda = lambda)
return(y)
} # End log_shift
# Standardized transformation: Log_shift_opt
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
log_shift_opt_std <- function(y, lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min(y)) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
log_trafo_std <- function(y, lambda = lambda) {
gm <- geometric.mean(y + lambda)
y <- gm * log(y + lambda)
return(y)
}
y <- log_trafo_std(y = y, lambda = lambda)
return(y)
}
# Back transformation: log_shift_opt
log_shift_opt_back <- function(y, lambda) {
log_shift_opt_back <- function(y, lambda = lambda){
y <- exp(y) - lambda
return(y = y)
}
y <- log_shift_opt_back(y = y, lambda = lambda)
return(y = y)
} # End log_shift_opt
##############
# Transformation: neg_log
neg_log <- function(y) {
u <- abs(y) + 1L
yt <- sign(y)*log(u)
return(y = yt)
}
# Standardized transformation: neg_log
neg_log_std <- function(y) {
u <- abs(y) + 1L
zt <- sign(y) * log(u) * geometric.mean(u)
y <- zt
return(y)
}
# Back transformation: neg_log
neg_log_back <- function(y) {
y <- sign(y) * (exp(abs(y)) - 1)
return(y)
}
# Transformation: log
Log <- function(y) {
y <- box_cox(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_std <- function(y) {
y <- box_cox_std(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_back <- function(y) {
y <- box_cox_back(y, lambda = 0)
return(y)
}
# Transformation: log
Log_shift <- function(y) {
y <- box_cox_shift(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_shift_std <- function(y) {
y <- box_cox_shift_std(y, lambda = 0)
return(y)
}
# Standardized transformation: log
Log_shift_back <- function(y) {
y <- box_cox_shift_back(y, lambda = 0)
return(y)
}
# Transformation: Reciprocal
Reciprocal <- function(y) {#lambda is fixed
y <- box_cox(y, lambda = -1)
return(y)
}
# Standardized transformation: Reciprocal
Reciprocal_std <- function(y) {
y <- box_cox_std(y, lambda = -1)
return(y)
}
# Back transformation: Reciprocal
Reciprocal_back <- function(y) {
box_cox_back(y, lambda = -1)
}
# Standardized transformation: squared_root_shift
sqrt_shift <- function(y, lambda = lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
sqrt_trafo <- function(y, lambda = lambda) {
y <- sqrt(y + lambda)
return(y)
}
y <- sqrt_trafo(y = y, lambda = lambda)
return(y)
} # End log_shift
# Standardized transformation: sqrt_shift
geometric.mean <- function(x) { #for RMLE in the parameter estimation
exp(mean(log(x)))
}
sqrt_shift_std <- function(y, lambda) {
with_shift <- function(y, lambda) {
min <- min(y + lambda)
if (min <= 0) {
lambda <- lambda + abs(min) + 1
} else {
lambda <- lambda
}
return(lambda)
}
# Shift parameter
lambda <- with_shift(y = y, lambda = lambda )
sqrt_trafo_std <- function(y, lambda = lambda) {
gm <- geometric.mean(y + lambda)
y <- gm * sqrt(y + lambda)
return(y)
}
y <- sqrt_trafo_std(y = y, lambda = lambda)
return(y)
}
# Back transformation: log_shift
sqrt_shift_back <- function(y, lambda) {
sqrt_shift_back <- function(y, lambda = lambda){
y <- y^2 - lambda
return(y = y)
}
y <- sqrt_shift_back(y = y, lambda = lambda)
return(y = y)
} # End sqrt_shift
# Transformation: Gpower
gPower <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- log(y + sqrt(y^2 + 1))
} else if (lambda_absolute > 1e-12) {
yt <- ((y + sqrt(y^2 + 1))^lambda - 1)/lambda
}
return(y = yt)
}
# Standardized transformation: Gpower
gPower_std <- function(y, lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
zt <- log(y + sqrt(y^2 + 1)) * sqrt(geometric.mean(1 + y^2))
} else if (lambda_absolute > 1e-12) {
zt <- (((y + sqrt(y^2 + 1))^lambda - 1)/lambda) / geometric.mean((y + sqrt(y^2 + 1))^(lambda - 1) * (1 + (y/sqrt(y^2 +1))))
}
y <- zt
return(y)
}
# Back transformation: Gpower
gPower_back <- function(y, lambda = lambda) {
lambda_absolute <- abs(lambda)
if (lambda_absolute <= 1e-12) { #case lambda=0
yt <- (-(1 - exp(y*2))) / (2 * exp(y))
} else if (lambda_absolute > 1e-12) {
A <- (y * lambda + 1)^(1 / lambda)
yt <- (-(1 - A^2)) / (2*A)
}
return(y = yt)
}
# Glog
g_log <- function(y) {
yt <- log(y + sqrt(y^2 + 1))
return(y = yt)
}
# Standardized transformation
g_log_std <- function(y) {
yt <- log(y + sqrt(y^2 + 1)) * sqrt(geometric.mean(1 + y^2))
return(y = yt)
}
# Back-transformation
g_log_back <- function(y) {
yt <- (-(1 - exp(y*2))) / (2 * exp(y))
return(y = yt)
}
|
# 3_statistical_analyses
library(MuMIn)
library(plyr)
library(dplyr)
library(quantreg)
library(ggplot2)
# read in binned size spectra data
binned <- read.csv("results/binned_size_spectra.csv",
stringsAsFactors = FALSE)
# global models ####
# full quadratic
global.quadratic <- lm(log_count_corrected~
log_mids_center *pca1 +
I(log_mids_center^2) +
I(log_mids_center^2):pca1,
data=binned,
na.action = "na.fail")
# full linear model
global.linear <- lm(log_count_corrected ~
log_mids_center*pca1,
data=binned,
na.action = "na.fail")
# compare quadratic and linear models
AIC(global.quadratic, global.linear)
# quadratic much better than linear
# quad AIC = 407.179
# linear AIC = 528.801
# move forward with quad model
# systematically test simplified quadratic models using MuMIn::dredge
dredge.models <- dredge(global.quadratic,
beta = "none",
extra = "R^2")
# table 2 for MS ####
# table with results for all simplified models
sink("results/table_2.txt")
dredge.models
sink()
# pick best model based on AICc using MuMIn::get.models
top.models <- get.models(dredge.models,
subset = delta < 2)
# single top model selected
# save top.model for making figure in script 4
saveRDS(top.models[[1]],
"results/top_quadratic_model.RDS")
# Mrange ####
# Mrange ####
# calculate range of M values
mrange <- binned %>%
group_by(site, pca1) %>%
summarise(mrange = max(log_mids_center) - min(log_mids_center))
# save Mrange as csv
write.csv(mrange, "results/mrange_data.CSV",
row.names = FALSE)
# mrange ~ gradient linear model
m.mod <- lm(mrange ~ pca1, data = mrange)
summary(m.mod)
# Quantile regression
M.quant <- rq(log_mids~pca1, data = binned, tau = c(0.05, 0.95))
summary(M.quant)
| /.Rproj.user/3DEF686F/sources/s-37F9EA2/3E65865A-contents | no_license | Jpomz/mining-size-sprectra-Freshwater-Biology-accepted | R | false | false | 1,983 | # 3_statistical_analyses
library(MuMIn)
library(plyr)
library(dplyr)
library(quantreg)
library(ggplot2)
# read in binned size spectra data
binned <- read.csv("results/binned_size_spectra.csv",
stringsAsFactors = FALSE)
# global models ####
# full quadratic
global.quadratic <- lm(log_count_corrected~
log_mids_center *pca1 +
I(log_mids_center^2) +
I(log_mids_center^2):pca1,
data=binned,
na.action = "na.fail")
# full linear model
global.linear <- lm(log_count_corrected ~
log_mids_center*pca1,
data=binned,
na.action = "na.fail")
# compare quadratic and linear models
AIC(global.quadratic, global.linear)
# quadratic much better than linear
# quad AIC = 407.179
# linear AIC = 528.801
# move forward with quad model
# systematically test simplified quadratic models using MuMIn::dredge
dredge.models <- dredge(global.quadratic,
beta = "none",
extra = "R^2")
# table 2 for MS ####
# table with results for all simplified models
sink("results/table_2.txt")
dredge.models
sink()
# pick best model based on AICc using MuMIn::get.models
top.models <- get.models(dredge.models,
subset = delta < 2)
# single top model selected
# save top.model for making figure in script 4
saveRDS(top.models[[1]],
"results/top_quadratic_model.RDS")
# Mrange ####
# Mrange ####
# calculate range of M values
mrange <- binned %>%
group_by(site, pca1) %>%
summarise(mrange = max(log_mids_center) - min(log_mids_center))
# save Mrange as csv
write.csv(mrange, "results/mrange_data.CSV",
row.names = FALSE)
# mrange ~ gradient linear model
m.mod <- lm(mrange ~ pca1, data = mrange)
summary(m.mod)
# Quantile regression
M.quant <- rq(log_mids~pca1, data = binned, tau = c(0.05, 0.95))
summary(M.quant)
| |
#Desc :- Exp data analysis project-1 (coursera-Cor4)
#Filename :- plot2.R
###################Read the data
## load the data
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# convert the Date class
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Subset data that is between 2007/02/01 and 2007/02/02
data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
#Append and convert dates and times
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
# Plot 2
data$datetime <- as.POSIXct(data$datetime)
attach(data)
plot(Global_active_power ~ datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
#save file
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
detach(data) | /plot2.R | no_license | satR23/Expo-data-analysis-pro1 | R | false | false | 842 | r | #Desc :- Exp data analysis project-1 (coursera-Cor4)
#Filename :- plot2.R
###################Read the data
## load the data
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# convert the Date class
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Subset data that is between 2007/02/01 and 2007/02/02
data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
#Append and convert dates and times
data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
# Plot 2
data$datetime <- as.POSIXct(data$datetime)
attach(data)
plot(Global_active_power ~ datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
#save file
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off()
detach(data) |
household_power_consumption <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";",
stringsAsFactors=FALSE, na.strings = "?", dec=".")
##Format Date
household_power_consumption$Date <- as.Date(household_power_consumption$Date, format = "%d/%m/%Y")
##Subset Data to date ranges "2007-02-01" and "2007-02-02"
household_power_consumption_subset <-subset(household_power_consumption, household_power_consumption$Date >= "2007-02-01" & household_power_consumption$Date <= "2007-02-02")
##Combine Date + Time into new column
household_power_consumption_subset$TS <- as.POSIXct(paste(household_power_consumption_subset$Date, household_power_consumption_subset$Time))
#plot 2
png("plot2.png", width=480, height=480)
with(household_power_consumption_subset,plot(TS,Global_active_power,type = "l",xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
| /plot2.R | no_license | MostafaKayed/Exploratory-Data-Analysis-Week-1 | R | false | false | 927 | r | household_power_consumption <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";",
stringsAsFactors=FALSE, na.strings = "?", dec=".")
##Format Date
household_power_consumption$Date <- as.Date(household_power_consumption$Date, format = "%d/%m/%Y")
##Subset Data to date ranges "2007-02-01" and "2007-02-02"
household_power_consumption_subset <-subset(household_power_consumption, household_power_consumption$Date >= "2007-02-01" & household_power_consumption$Date <= "2007-02-02")
##Combine Date + Time into new column
household_power_consumption_subset$TS <- as.POSIXct(paste(household_power_consumption_subset$Date, household_power_consumption_subset$Time))
#plot 2
png("plot2.png", width=480, height=480)
with(household_power_consumption_subset,plot(TS,Global_active_power,type = "l",xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ParallelPC.R
\name{cor2}
\alias{cor2}
\title{The Pearson's correlation test}
\usage{
cor2(x, y, S, suffStat)
}
\arguments{
\item{x,y,S}{It is tested, whether x and y are conditionally independent given the subset S of
the remaining nodes. x, y, S all are integers, corresponding to variable or node
numbers.}
\item{suffStat}{the dataset with rows are samples and columns are variables.}
}
\value{
the p-value of the test.
}
\description{
Linear correlation: Pearson's linear correlation test.
}
\examples{
##########################################
## Using cor2 as a conditional independence test
##########################################
library(pcalg)
library(bnlearn)
data("gmG")
suffStat<-gmG$x
cor2(1,2,3,suffStat)
##Use cor2 with a causal discovery algorithm, e.g. PC
pc_stable(gmG$x, indepTest=cor2, p=ncol(gmG$x), alpha=0.01)
}
\references{
Marco Scutari (2010). Learning Bayesian Networks with the bnlearn R Package. Journal of Statistical Software, 35(3), 1-22.
}
| /ParallelPC/man/cor2.Rd | no_license | thucmi/ParallelPC | R | false | false | 1,064 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ParallelPC.R
\name{cor2}
\alias{cor2}
\title{The Pearson's correlation test}
\usage{
cor2(x, y, S, suffStat)
}
\arguments{
\item{x,y,S}{It is tested, whether x and y are conditionally independent given the subset S of
the remaining nodes. x, y, S all are integers, corresponding to variable or node
numbers.}
\item{suffStat}{the dataset with rows are samples and columns are variables.}
}
\value{
the p-value of the test.
}
\description{
Linear correlation: Pearson's linear correlation test.
}
\examples{
##########################################
## Using cor2 as a conditional independence test
##########################################
library(pcalg)
library(bnlearn)
data("gmG")
suffStat<-gmG$x
cor2(1,2,3,suffStat)
##Use cor2 with a causal discovery algorithm, e.g. PC
pc_stable(gmG$x, indepTest=cor2, p=ncol(gmG$x), alpha=0.01)
}
\references{
Marco Scutari (2010). Learning Bayesian Networks with the bnlearn R Package. Journal of Statistical Software, 35(3), 1-22.
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$plot1 <- renderPlot({
# generate the phase diagram from ui.R
s <- input$s
alpha <- input$alpha
delta <- input$delta
n <- input$n
x <- input$x
k0 <- seq(0,input$k0*3,input$k0*3/1000)
t <- seq(0,100,0.1)
k <- (((k0^(1-alpha))-(s/(delta+n+x)))*(exp(-(delta+n+x)*(1-alpha)*t) + s/(delta+n+x)))^(1/(1-alpha))
kdot <- (s*k^(alpha))-(delta+n+x)*k
# draw the line graph
plot(k, kdot, type = "l")
})
output$plot2 <- renderPlot({
# generate the phase diagram from ui.R
s <- input$s
alpha <- input$alpha
delta <- input$delta
n <- input$n
x <- input$x
k0 <- seq(0,input$k0,input$k0/1000)
t <- seq(0,100,0.1)
k <- ((k0^(1-alpha)-(s/(delta+n+x)))*(exp(-(delta+n+x)*(1-alpha)*t))+s/(delta+n+x))^(1/(1-alpha))
# draw the line graph
plot(t, k, type = "l")
})
})
| /server.R | no_license | laacdm/Data-Products | R | false | false | 1,384 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$plot1 <- renderPlot({
# generate the phase diagram from ui.R
s <- input$s
alpha <- input$alpha
delta <- input$delta
n <- input$n
x <- input$x
k0 <- seq(0,input$k0*3,input$k0*3/1000)
t <- seq(0,100,0.1)
k <- (((k0^(1-alpha))-(s/(delta+n+x)))*(exp(-(delta+n+x)*(1-alpha)*t) + s/(delta+n+x)))^(1/(1-alpha))
kdot <- (s*k^(alpha))-(delta+n+x)*k
# draw the line graph
plot(k, kdot, type = "l")
})
output$plot2 <- renderPlot({
# generate the phase diagram from ui.R
s <- input$s
alpha <- input$alpha
delta <- input$delta
n <- input$n
x <- input$x
k0 <- seq(0,input$k0,input$k0/1000)
t <- seq(0,100,0.1)
k <- ((k0^(1-alpha)-(s/(delta+n+x)))*(exp(-(delta+n+x)*(1-alpha)*t))+s/(delta+n+x))^(1/(1-alpha))
# draw the line graph
plot(t, k, type = "l")
})
})
|
library(Matrix)
ndocs <- 8282
nfeat <- 300
featureData <- read.table("/home/srmq/Documents/Research/textmining/devel/data/webkb-prepared-GoogleNews-SSenseContextMatrix.txt", header=FALSE, colClasses=c("integer", "integer", "numeric"), col.names=c("row", "col", "val"), skip=1)
featureDataMatrix <- sparseMatrix(i=featureData$row, j=featureData$col, x=featureData$val, dims=c(nfeat, ndocs))
rm(featureData)
X <- t(scale(t(featureDataMatrix)))
rowSub <- apply(X, 1, function(row) all(!is.nan(row)))
X <- X[rowSub,]
nfeat <- nrow(X)
distM <- dist(t(X), diag = TRUE)
distAsMatrix <- as.matrix(distM)
distAsMatrix <- distAsMatrix/max(distAsMatrix)
DissimsExport <- as(distAsMatrix, "sparseMatrix")
writeMM(DissimsExport, file="/home/srmq/Documents/Research/textmining/devel/data/webkb-prepared-GoogleNews-SSenseContextMatrix-Dissims.mtx")
| /srmq-nlp/src/main/R/webkb-prepared-GoogleNews-SSenseContext.R | no_license | srmq/nlp | R | false | false | 845 | r | library(Matrix)
ndocs <- 8282
nfeat <- 300
featureData <- read.table("/home/srmq/Documents/Research/textmining/devel/data/webkb-prepared-GoogleNews-SSenseContextMatrix.txt", header=FALSE, colClasses=c("integer", "integer", "numeric"), col.names=c("row", "col", "val"), skip=1)
featureDataMatrix <- sparseMatrix(i=featureData$row, j=featureData$col, x=featureData$val, dims=c(nfeat, ndocs))
rm(featureData)
X <- t(scale(t(featureDataMatrix)))
rowSub <- apply(X, 1, function(row) all(!is.nan(row)))
X <- X[rowSub,]
nfeat <- nrow(X)
distM <- dist(t(X), diag = TRUE)
distAsMatrix <- as.matrix(distM)
distAsMatrix <- distAsMatrix/max(distAsMatrix)
DissimsExport <- as(distAsMatrix, "sparseMatrix")
writeMM(DissimsExport, file="/home/srmq/Documents/Research/textmining/devel/data/webkb-prepared-GoogleNews-SSenseContextMatrix-Dissims.mtx")
|
#' Function to reorder the tip labels of a tree to match the order in which they are plotted, which can change when a tree is rerooted.
#'
#' @param phylo An object of class "phylo".
#' @details
#' First the internal labels in the edges of the tree are relabelled to appear in ascending order, then the tip labels are reordered to match the correct internal labels.
#' @keywords None
#' @return An object of class "phylo"
#' @export
#' @author Chris Field <fieldc@@ethz.ch>
#' @examples
#' None
reorderTips <- function(phylo){
reord <- phylo
reord$edge[reord$edge[,2]<=Ntip(reord),2] <- 1:Ntip(reord)
reord$tip.label <- phylo$tip.label[tipOrder(phylo)]
return(reord)
}
| /R/reorderTips.r | no_license | MicrobiologyETHZ/apextra | R | false | false | 688 | r |
#' Function to reorder the tip labels of a tree to match the order in which they are plotted, which can change when a tree is rerooted.
#'
#' @param phylo An object of class "phylo".
#' @details
#' First the internal labels in the edges of the tree are relabelled to appear in ascending order, then the tip labels are reordered to match the correct internal labels.
#' @keywords None
#' @return An object of class "phylo"
#' @export
#' @author Chris Field <fieldc@@ethz.ch>
#' @examples
#' None
reorderTips <- function(phylo){
reord <- phylo
reord$edge[reord$edge[,2]<=Ntip(reord),2] <- 1:Ntip(reord)
reord$tip.label <- phylo$tip.label[tipOrder(phylo)]
return(reord)
}
|
explore_prepare_before_model<-function()
{
DATA_FILE_IN<-"DataCorrelated.csv"
DATA_FILE_OUT<-"data_model_restant_trous.csv"
fichier_donnees<-paste(CT_PATH_DATA_PREP,DATA_FILE_IN,sep="/")
df<-read.csv(fichier_donnees)
df
head(df)
nrow(df)
summary(df)
#labl_fac1<-c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","")
#levl_fac1 <- c("ete","hiver","hiver","hiver","hiver","hiver","hiver","hiver","Toussaint","Noel","printemps","printemps","printemps","printemps","printemps","printemps","printemps","")
#df$Conge_scolaire_raison <- factor(df$Conge_scolaire_raison, levels = levl_fac1, labels=labl_fac1)
# c("ete","hiver","hiver","hiver","hiver","hiver","hiver","hiver","Toussaint","Noel","printemps","printemps","printemps","printemps","printemps","printemps","printemps","")
df$temps_present <- fct_collapse(df$temps_present,
Clairs = c('<c9>clairs visibles, tonnerre non perceptible','<c9>tat du ciel inchang<e9> dans l<U+0092>ensemble', '<c9>tat du ciel inchang<e9> dans l<U+0092>ensemble'),
Averse = c('Averse(s) de gr<ea>le*, ou de pluie et de gr<ea>le*', 'Averse(s) de neige, faible(s)','Averse(s) de neige, ou de pluie et de neige', 'Averse(s) de pluie', 'Averse(s) de pluie et neige m<ea>l<e9>es, faible(s)', 'Averse(s) de pluie, faible(s)', 'Averse(s) de pluie, mod<e9>r<e9>e(s) ou forte(s)', 'Averse(s) de pluie, violente(s)'),
Neige = c('Chute continue de flocons de neige, faible au moment de l<U+0092>observation','Chute continue de flocons de neige, mod<e9>r<e9>e au moment de l<U+0092>observation', 'Chute intermittente de flocons de neige, faible au moment de l<U+0092>observation','Neige', 'Neige en grains (avec ou sans brouillard)'),
Brouillard = c('Brouillard ou brouillard glac<e9>', 'Brouillard ou brouillard glac<e9> <e0> distance au moment de' ,'Brouillard ou brouillard glac<e9> en bancs', 'Brouillard ou brouillard glac<e9>, ciel invisible, a d<e9>but<e9> ou est devenu plus <e9>pais au cours del\'heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel invisible, s<U+0092>est aminci au cours de l<U+0092>heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel invisible, sans changement appr<e9>ciable au cours de l<U+0092>heure pr<e9>c<e9>dente' ,'Brouillard ou brouillard glac<e9>, ciel visible, a d<e9>but<e9> ou est devenu plus <e9>pais au cours del\'heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel visible, s<U+0092>est aminci au cours de l<U+0092>heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel visible, sans changement appr<e9>ciable au cours de l<U+0092>heure pr<e9>c<e9>dente', 'Brouillard, d<e9>posant du givre, ciel invisible' ,'Brouillard, d<e9>posant du givre, ciel visible','Mince couche de brouillard ou de brouillard glac<e9> <e0> la station, qu<U+0092>il s<U+0092>agisse d<U+0092>une station terrestre ou d<U+0092>une station en mer, d<U+0092>une <e9>paisseur n<U+0092>exc<e9>dant pas 2 m<e8>tres sur terre ou 10 m<e8>tres en mer'),
Bruine = c('Bruine (ne se congelant pas) ou neige en grains', 'Bruine et pluie, faibles', 'Bruine et pluie, mod<e9>r<e9>es ou fortes', 'Bruine ou pluie se congelant', 'Bruine, sans cong<e9>lation, continue, faible au moment de l<U+0092>observation', 'Bruine, sans cong<e9>lation, continue, mod<e9>r<e9>e au moment de l<U+0092>observation','Bruine, sans cong<e9>lation, intermittente, faible au moment de l<U+0092>observation','Bruine, sans cong<e9>lation, intermittente, mod<e9>r<e9>e au moment de l<U+0092>observation'),
Brume = c('Brume', 'Brume s<e8>che'),
Nuages = c('Dans l<U+0092>ensemble, nuages se dissipant ou devenant moins <e9>pais','Nuages en formation ou en train de se d<e9>velopper', 'On n<U+0092>a pas observ<e9> d<U+0092><e9>volution des nuages ou on n<U+0092>a pas pu suivre cette <e9>volution'),
Orage = c('Orage (avec ou sans pr<e9>cipitations)' ,'Orage faible ou mod<e9>r<e9>, sans gr<ea>le*, mais avec pluie ou neige ou pluie et neige m<ea>l<e9>es au moment de l<U+0092>observation', 'Orage, mais pas de pr<e9>cipitations au moment de l<U+0092>observation','Pluie faible au moment de l<U+0092>observation, Orage durant l<U+0092>heure pr<e9>c<e9>dente mais non au moment de l<U+0092>observation', 'Pluie mod<e9>r<e9>e ou forte au moment de l<U+0092>observation, Orage durant l<U+0092>heure pr<e9>c<e9>dente mais non au moment de l<U+0092>observation'),
Pluie = c('Pluie (ne se congelant pas)','Pluie (ou bruine) et neige, faibles', 'Pluie (ou bruine) et neige, mod<e9>r<e9>es ou fortes','Pluie et neige m<ea>l<e9>es ou granules de glace','Pluie, sans cong<e9>lation, continue, faible au moment de l<U+0092>observation', 'Pluie, sans cong<e9>lation, continue, forte au moment de l<U+0092>observation','Pluie, sans cong<e9>lation, continue, mod<e9>r<e9>e au moment de l<U+0092>observation', 'Pluie, sans cong<e9>lation, intermittente, faible au moment de l<U+0092>observation', 'Pluie, sans cong<e9>lation, intermittente, mod<e9>r<e9>e au moment de l<U+0092>observation', 'Pluie, se congelant, faible' ,'Pr<e9>cipitations en vue, atteignant le sol ou la surface de la mer, mais distantes (c<U+0092>est-<e0>-dire <e0> plus de 5 km <e0> l<U+0092>estime) de la station', 'Pr<e9>cipitations en vue, atteignant le sol ou la surface de la mer, pr<e8>s de la station mais pas <e0> la station m<ea>me')
)
#lvel_fac2<-c('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57')
#labl_fac2<-c('Clairs','Clairs','Averse','Neige','Averse','Averse','Averse','Averse','Averse','Averse','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Bruine','Bruine','Bruine','Bruine','Bruine','Bruine','Bruine','Bruine','Brume','Brume','Neige','Neige','Neige','Nuages','Brouillard','Neige','Neige','Nuages','Nuages','Orage','Orage','Orage','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie')
#levels(df$temps_present)<-factor(df$temps_present,labels=lvel_fac2,levels=labl_fac2)
#df$temps_present<-factor(df$temps_present,labels=labl_fac2)
names(df)[11]<-"Ferie"
names(df)[12]<-"Ferie_raison"
#On aura Paris et ailleurs comme congΓ©s escolaire
df[,"Conges_Escolaire_Zone_AB"]<-0
df[df["Conges_Escolaire_Zone_A"]==1 | df["Conges_Escolaire_Zone_B"]==1,"Conges_Escolaire_Zone_AB"]<-1
#Nous supprimons les colonnes dont nous n'avons plus besoin
df$Conge_zones<-NULL
df$Conges_Escolaire<-NULL
df$Conges_Escolaire_Zone_A<-NULL
df$Conges_Escolaire_Zone_B<-NULL
#df$Conges_Escolaire_Zone_C<-NULL
#Jour d'activitΓ© ?
#df["jour_activite"]<-(df["Ferie"]==0 & df["Jour_de_la_Semaine"]<=5)
df[,"jour_activite"]<-0
df[df["Ferie"]==0 & df["Jour_de_la_Semaine"]<=5,"jour_activite"]<-1
#On supprime ces variables
df["Ferie"]<-NULL
df["Conge_hebdomadaire"]<-NULL
#df["Jour_de_la_Semaine"]<-NULL
#"FeriΓ©e raison", il est dΓ©jΓ pris en compte par feriΓ©e + moi. Pareil pour "CongΓ©s raison".
df$Ferie_raison<-NULL
df$Conge_scolaire_raison<-NULL
df$Conge_scolaire<-NULL
df$ext_so2<-NULL
df$ext_co<-NULL
df$X<-NULL
summary(df)
fichier_donnees=paste(CT_PATH_DATA_PREP,DATA_FILE_OUT,sep="/")
write.csv(df,fichier_donnees)
print(fichier_donnees)
} | /Code/Exploration des donnees/Exploration pre model.r | no_license | jaimecruzdev/metroqualitedelair | R | false | false | 7,883 | r |
explore_prepare_before_model<-function()
{
DATA_FILE_IN<-"DataCorrelated.csv"
DATA_FILE_OUT<-"data_model_restant_trous.csv"
fichier_donnees<-paste(CT_PATH_DATA_PREP,DATA_FILE_IN,sep="/")
df<-read.csv(fichier_donnees)
df
head(df)
nrow(df)
summary(df)
#labl_fac1<-c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","")
#levl_fac1 <- c("ete","hiver","hiver","hiver","hiver","hiver","hiver","hiver","Toussaint","Noel","printemps","printemps","printemps","printemps","printemps","printemps","printemps","")
#df$Conge_scolaire_raison <- factor(df$Conge_scolaire_raison, levels = levl_fac1, labels=labl_fac1)
# c("ete","hiver","hiver","hiver","hiver","hiver","hiver","hiver","Toussaint","Noel","printemps","printemps","printemps","printemps","printemps","printemps","printemps","")
df$temps_present <- fct_collapse(df$temps_present,
Clairs = c('<c9>clairs visibles, tonnerre non perceptible','<c9>tat du ciel inchang<e9> dans l<U+0092>ensemble', '<c9>tat du ciel inchang<e9> dans l<U+0092>ensemble'),
Averse = c('Averse(s) de gr<ea>le*, ou de pluie et de gr<ea>le*', 'Averse(s) de neige, faible(s)','Averse(s) de neige, ou de pluie et de neige', 'Averse(s) de pluie', 'Averse(s) de pluie et neige m<ea>l<e9>es, faible(s)', 'Averse(s) de pluie, faible(s)', 'Averse(s) de pluie, mod<e9>r<e9>e(s) ou forte(s)', 'Averse(s) de pluie, violente(s)'),
Neige = c('Chute continue de flocons de neige, faible au moment de l<U+0092>observation','Chute continue de flocons de neige, mod<e9>r<e9>e au moment de l<U+0092>observation', 'Chute intermittente de flocons de neige, faible au moment de l<U+0092>observation','Neige', 'Neige en grains (avec ou sans brouillard)'),
Brouillard = c('Brouillard ou brouillard glac<e9>', 'Brouillard ou brouillard glac<e9> <e0> distance au moment de' ,'Brouillard ou brouillard glac<e9> en bancs', 'Brouillard ou brouillard glac<e9>, ciel invisible, a d<e9>but<e9> ou est devenu plus <e9>pais au cours del\'heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel invisible, s<U+0092>est aminci au cours de l<U+0092>heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel invisible, sans changement appr<e9>ciable au cours de l<U+0092>heure pr<e9>c<e9>dente' ,'Brouillard ou brouillard glac<e9>, ciel visible, a d<e9>but<e9> ou est devenu plus <e9>pais au cours del\'heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel visible, s<U+0092>est aminci au cours de l<U+0092>heure pr<e9>c<e9>dente', 'Brouillard ou brouillard glac<e9>, ciel visible, sans changement appr<e9>ciable au cours de l<U+0092>heure pr<e9>c<e9>dente', 'Brouillard, d<e9>posant du givre, ciel invisible' ,'Brouillard, d<e9>posant du givre, ciel visible','Mince couche de brouillard ou de brouillard glac<e9> <e0> la station, qu<U+0092>il s<U+0092>agisse d<U+0092>une station terrestre ou d<U+0092>une station en mer, d<U+0092>une <e9>paisseur n<U+0092>exc<e9>dant pas 2 m<e8>tres sur terre ou 10 m<e8>tres en mer'),
Bruine = c('Bruine (ne se congelant pas) ou neige en grains', 'Bruine et pluie, faibles', 'Bruine et pluie, mod<e9>r<e9>es ou fortes', 'Bruine ou pluie se congelant', 'Bruine, sans cong<e9>lation, continue, faible au moment de l<U+0092>observation', 'Bruine, sans cong<e9>lation, continue, mod<e9>r<e9>e au moment de l<U+0092>observation','Bruine, sans cong<e9>lation, intermittente, faible au moment de l<U+0092>observation','Bruine, sans cong<e9>lation, intermittente, mod<e9>r<e9>e au moment de l<U+0092>observation'),
Brume = c('Brume', 'Brume s<e8>che'),
Nuages = c('Dans l<U+0092>ensemble, nuages se dissipant ou devenant moins <e9>pais','Nuages en formation ou en train de se d<e9>velopper', 'On n<U+0092>a pas observ<e9> d<U+0092><e9>volution des nuages ou on n<U+0092>a pas pu suivre cette <e9>volution'),
Orage = c('Orage (avec ou sans pr<e9>cipitations)' ,'Orage faible ou mod<e9>r<e9>, sans gr<ea>le*, mais avec pluie ou neige ou pluie et neige m<ea>l<e9>es au moment de l<U+0092>observation', 'Orage, mais pas de pr<e9>cipitations au moment de l<U+0092>observation','Pluie faible au moment de l<U+0092>observation, Orage durant l<U+0092>heure pr<e9>c<e9>dente mais non au moment de l<U+0092>observation', 'Pluie mod<e9>r<e9>e ou forte au moment de l<U+0092>observation, Orage durant l<U+0092>heure pr<e9>c<e9>dente mais non au moment de l<U+0092>observation'),
Pluie = c('Pluie (ne se congelant pas)','Pluie (ou bruine) et neige, faibles', 'Pluie (ou bruine) et neige, mod<e9>r<e9>es ou fortes','Pluie et neige m<ea>l<e9>es ou granules de glace','Pluie, sans cong<e9>lation, continue, faible au moment de l<U+0092>observation', 'Pluie, sans cong<e9>lation, continue, forte au moment de l<U+0092>observation','Pluie, sans cong<e9>lation, continue, mod<e9>r<e9>e au moment de l<U+0092>observation', 'Pluie, sans cong<e9>lation, intermittente, faible au moment de l<U+0092>observation', 'Pluie, sans cong<e9>lation, intermittente, mod<e9>r<e9>e au moment de l<U+0092>observation', 'Pluie, se congelant, faible' ,'Pr<e9>cipitations en vue, atteignant le sol ou la surface de la mer, mais distantes (c<U+0092>est-<e0>-dire <e0> plus de 5 km <e0> l<U+0092>estime) de la station', 'Pr<e9>cipitations en vue, atteignant le sol ou la surface de la mer, pr<e8>s de la station mais pas <e0> la station m<ea>me')
)
#lvel_fac2<-c('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31','32','33','34','35','36','37','38','39','40','41','42','43','44','45','46','47','48','49','50','51','52','53','54','55','56','57')
#labl_fac2<-c('Clairs','Clairs','Averse','Neige','Averse','Averse','Averse','Averse','Averse','Averse','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Brouillard','Bruine','Bruine','Bruine','Bruine','Bruine','Bruine','Bruine','Bruine','Brume','Brume','Neige','Neige','Neige','Nuages','Brouillard','Neige','Neige','Nuages','Nuages','Orage','Orage','Orage','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie','Pluie')
#levels(df$temps_present)<-factor(df$temps_present,labels=lvel_fac2,levels=labl_fac2)
#df$temps_present<-factor(df$temps_present,labels=labl_fac2)
names(df)[11]<-"Ferie"
names(df)[12]<-"Ferie_raison"
#On aura Paris et ailleurs comme congΓ©s escolaire
df[,"Conges_Escolaire_Zone_AB"]<-0
df[df["Conges_Escolaire_Zone_A"]==1 | df["Conges_Escolaire_Zone_B"]==1,"Conges_Escolaire_Zone_AB"]<-1
#Nous supprimons les colonnes dont nous n'avons plus besoin
df$Conge_zones<-NULL
df$Conges_Escolaire<-NULL
df$Conges_Escolaire_Zone_A<-NULL
df$Conges_Escolaire_Zone_B<-NULL
#df$Conges_Escolaire_Zone_C<-NULL
#Jour d'activitΓ© ?
#df["jour_activite"]<-(df["Ferie"]==0 & df["Jour_de_la_Semaine"]<=5)
df[,"jour_activite"]<-0
df[df["Ferie"]==0 & df["Jour_de_la_Semaine"]<=5,"jour_activite"]<-1
#On supprime ces variables
df["Ferie"]<-NULL
df["Conge_hebdomadaire"]<-NULL
#df["Jour_de_la_Semaine"]<-NULL
#"FeriΓ©e raison", il est dΓ©jΓ pris en compte par feriΓ©e + moi. Pareil pour "CongΓ©s raison".
df$Ferie_raison<-NULL
df$Conge_scolaire_raison<-NULL
df$Conge_scolaire<-NULL
df$ext_so2<-NULL
df$ext_co<-NULL
df$X<-NULL
summary(df)
fichier_donnees=paste(CT_PATH_DATA_PREP,DATA_FILE_OUT,sep="/")
write.csv(df,fichier_donnees)
print(fichier_donnees)
} |
CreateBNet <- function ()
{
result <- .Call("pnlCreateBNet")
class(result) <- "pnlBNet"
result
}
pnlCreateBNet <- function() CreateBNet()
CreateDBN <- function ()
{
result <- .Call("pnlCreateDBN")
class(result) <- "pnlDBN"
result
}
pnlCreateDBN <- function() CreateDBN()
Unroll <- function(x) UseMethod("Unroll", x)
Unroll.pnlDBN <- function(x)
{
result <- .Call("pnlUnroll", x)
class(result) <- "pnlBNet"
result
}
CreateLIMID <- function ()
{
result <- .Call("pnlCreateLIMID")
class(result) <- "pnlLIMID"
result
}
pnlCreateLIMID <- function() CreateLIMID()
CreateMRF <- function ()
{
result <- .Call("pnlCreateMRF")
class(result) <- "pnlMRF"
result
}
pnlCreateMRF <- function() CreateMRF()
AddNode <- function (x, names, values) UseMethod("AddNode", x)
AddNode.pnlBNet <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 0, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddNode.pnlDBN <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 1, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddNode.pnlLIMID <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 2, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddNode.pnlMRF <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 3, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelNode <- function(x, nodes) UseMethod("DelNode", x)
DelNode.pnlBNet <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 0, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
DelNode.pnlDBN <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 1, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
DelNode.pnlLIMID <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 2, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
DelNode.pnlMRF <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 3, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
AddArc <- function (x, Start, Finish) UseMethod("AddArc", x)
AddArc.pnlBNet <- function(x, Start, Finish)
{
res <- "ok"
result <- .Call("pnlAddArc", x, 0, Start, Finish)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddArc.pnlDBN <- function(x, Start, Finish)
{
res <- "ok"
result <- .Call("pnlAddArc", x, 1, Start, Finish)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddArc.pnlLIMID <- function(x, Start, Finish)
{
res <- "ok"
result <- .Call("pnlAddArc", x, 2, Start, Finish)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelArc <- function(x, StartOfArc, EndOfArc) UseMethod("DelArc", x)
DelArc.pnlBNet <- function(x, StartOfArc, EndOfArc)
{
res <- "ok"
result <- .Call("pnlDelArc", x, 0, StartOfArc, EndOfArc)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelArc.pnlDBN <- function(x, StartOfArc, EndOfArc)
{
res <- "ok"
result <- .Call("pnlDelArc", x, 1, StartOfArc, EndOfArc)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelArc.pnlLIMID <- function(x, StartOfArc, EndOfArc)
{
res <- "ok"
result <- .Call("pnlDelArc", x, 2, StartOfArc, EndOfArc)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet <- function(x, filename) UseMethod("SaveNet", x)
SaveNet.pnlBNet <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 0, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet.pnlDBN <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 1, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet.pnlLIMID <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 2, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet.pnlMRF <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 3, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetNodeType <- function(x, nodes) UseMethod("GetNodeType", x)
GetNodeType.pnlBNet <- function(x, nodes) .Call("pnlGetNodeType", x, 0, nodes)
GetNodeType.pnlDBN <- function(x, nodes) .Call("pnlGetNodeType", x, 1, nodes)
GetNodeType.pnlMRF <- function(x, nodes) .Call("pnlGetNodeType", x, 3, nodes)
GetNeighbors <- function(x, nodes) UseMethod("GetNeighbors", x)
GetNeighbors.pnlBNet <- function(x, nodes) .Call("pnlGetNeighbors", x, 0, nodes)
GetNeighbors.pnlDBN <- function(x, nodes) .Call("pnlGetNeighbors", x, 1, nodes)
GetParents <- function(x, nodes) UseMethod("GetParents", x)
GetParents.pnlBNet <- function(x, nodes) .Call("pnlGetParents", x, 0, nodes)
GetParents.pnlDBN <- function(x, nodes) .Call("pnlGetParents", x, 1, nodes)
GetChildren <- function(x, nodes) UseMethod("GetChildren", x)
GetChildren.pnlBNet <- function(x, nodes) .Call("pnlGetChildren", x, 0, nodes)
GetChildren.pnlDBN <- function(x, nodes) .Call("pnlGetChildren", x, 1, nodes)
SetPTabular <- function(x, value, probability, ParentValue) UseMethod("SetPTabular", x)
SetPTabular.pnlBNet <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPTabular", x, 0, value, probability)
else result <- .Call("pnlSetPTabularCond",x, 0, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPTabular.pnlDBN <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPTabular", x, 1, value, probability)
else result <- .Call("pnlSetPTabularCond",x, 1, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPTabular.pnlMRF <- function (x, value, probability)
{
res <- "ok"
result <- .Call("pnlSetPTabular", x, 3, value, probability)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPChance <- function(x, value, probability, ParentValue) UseMethod("SetPChance", x)
SetPChance.pnlLIMID <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPChance", x, value, probability)
else result <- .Call("pnlSetPChanceCond",x, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPDecision <- function(x, value, probability, ParentValue) UseMethod("SetPDecision", x)
SetPDecision.pnlLIMID <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPDecision", x, value, probability)
else result <- .Call("pnlSetPDecisionCond",x, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetValueCost <- function(x, value, probability, ParentValue) UseMethod("SetValueCost", x)
SetValueCost.pnlLIMID <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetValueCost", x, value, probability)
else result <- .Call("pnlSetValueCostCond",x, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetPTabularString <- function(x, value, parents) UseMethod("GetPTabularString", x)
GetPTabularString.pnlBNet <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularString", x, 0, value)
else .Call("pnlGetPTabularStringCond", x, 0, value, parents)
}
GetPTabularString.pnlDBN <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularString", x, 1, value)
else .Call("pnlGetPTabularStringCond", x, 1, value, parents)
}
GetPTabularString.pnlMRF <- function(x, value) .Call("pnlGetPTabularString", x, 3, value)
GetPTabularFloat <- function(x, value, parents) UseMethod("GetPTabularFloat", x)
GetPTabularFloat.pnlBNet <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularFloat", x, 0, value)
else .Call("pnlGetPTabularFloatCond", x, 0, value, parents)
}
GetPTabularFloat.pnlDBN <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularFloat", x, 1, value)
else .Call("pnlGetPTabularFloatCond", x, 1, value, parents)
}
GetPTabularFloat.pnlMRF <- function(x, value) .Call("pnlGetPTabularFloat", x, 3, value)
GetPChanceString <- function(x, value, parents) UseMethod("GetPChanceString", x)
GetPChanceString.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPChanceString", x, value)
else .Call("pnlGetPChanceCondString", x, value, parents)
}
GetPChanceFloat <- function(x, value, parents) UseMethod("GetPChanceFloat", x)
GetPChanceFloat.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPChanceFloat", x, value)
else .Call("pnlGetPChanceCondFloat", x, value, parents)
}
GetPDecisionString <- function(x, value, parents) UseMethod("GetPDecisionString")
GetPDecisionString <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPDecisionString", x, value)
else .Call("pnlGetPDecisionCondString", x, value, parents)
}
GetPDecisionFloat <- function(x, value, parents) UseMethod("GetPDecisionFloat")
GetPDecisionFloat.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPDecisionFloat", x, value)
else .Call("pnlGetPDecisionCondFloat", x, value, parents)
}
GetValueCostString <- function(x, value, parents) UseMethod("GetValueCostString")
GetValueCostString.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetValueCostString", x, value)
else .Call("pnlGetValueCostCondString", x, value, parents)
}
GetValueCostFloat <- function(x, value, parents) UseMethod("GetValueCostFloat")
GetValueCostFloat.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetValueCostFloat", x, value)
else .Call("pnlGetValueCostCondFloat", x, value, parents)
}
SetPGaussian <- function(x, node, mean, variance, weight, tabParents) UseMethod("SetPGaussian", x)
SetPGaussian.pnlBNet <- function(x, node, mean, variance, weight = -1, tabParents = -1)
{
res <- "ok"
if (tabParents < 0)
{
if (weight < 0) result <- .Call("pnlSetPGaussian", x, 0, node, mean, variance)
else result <- .Call("pnlSetPGaussianCond", x, 0, node, mean, variance, weight)
}
else result <- .Call("pnlSetPGaussianCondTParents", x, node, mean, variance, weight, tabParents)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPGaussian.pnlDBN <- function(x, node, mean, variance, weight = -1)
{
res <- "ok"
if (weight < 0) result <- .Call("pnlSetPGaussian", x, 1, node, mean, variance)
else result <- .Call("pnlSetPGaussianCond", x, 1, node, mean, variance, weight)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPSoftMax <- function(x, node, weight, offset, parents) UseMethod("SetPSoftMax", x)
SetPSoftMax.pnlBNet <- function(x, node, weight, offset, parents = -1)
{
res <- "ok"
if (parents < 0) result <- .Call("pnlSetPSoftMax", x, node, weight, offset)
else result <- .Call("pnlSetPSoftMaxCond", x, node, weight, offset, parents)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
EditEvidence <- function(x, values) UseMethod("EditEvidence", x)
EditEvidence.pnlBNet <- function(x, values)
{
res <- "ok"
result <- .Call("pnlEditEvidence", x, 0, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
EditEvidence.pnlDBN <- function(x, values)
{
res <- "ok"
result <- .Call("pnlEditEvidence", x, 1, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
EditEvidence.pnlMRF <- function(x, values)
{
res <- "ok"
result <- .Call("pnlEditEvidence", x, 3, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvid <- function(x) UseMethod("ClearEvid", x)
ClearEvid.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvid", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvid.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvid", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvid.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvid", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
CurEvidToBuf <- function(x) UseMethod("CurEvidToBuf", x)
CurEvidToBuf.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlCurEvidToBuf", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
CurEvidToBuf.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlCurEvidToBuf", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
CurEvidToBuf.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlCurEvidToBuf", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddEvidToBuf <- function(x, values) UseMethod("AddEvidToBuf", x)
AddEvidToBuf.pnlBNet <- function(x, values)
{
res <- "ok"
result <- .Call("pnlAddEvidToBuf", x, 0, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddEvidToBuf.pnlDBN <- function(x, values)
{
res <- "ok"
result <- .Call("pnlAddEvidToBuf", x, 1, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddEvidToBuf.pnlMRF <- function(x, values)
{
res <- "ok"
result <- .Call("pnlAddEvidToBuf", x, 3, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvidBuf <- function(x) UseMethod("ClearEvidBuf", x)
ClearEvidBuf.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvidBuf", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvidBuf.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvidBuf", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvidBuf.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvidBuf", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetMPE <- function(x, nodes) UseMethod("GetMPE", x)
GetMPE.pnlBNet <- function(x, nodes) .Call("pnlGetMPE",x, 0, nodes)
GetMPE.pnlDBN <- function(x, nodes) .Call("pnlGetMPE",x, 1, nodes)
GetMPE.pnlMRF <- function(x, nodes) .Call("pnlGetMPE",x, 3, nodes)
GetJPDString <- function(x, nodes) UseMethod("GetJPDString", x)
GetJPDString.pnlBNet <- function(x, nodes) .Call("pnlGetJPDString", x, 0, nodes)
GetJPDString.pnlDBN <- function(x, nodes) .Call("pnlGetJPDString", x, 1, nodes)
GetJPDString.pnlMRF <- function(x, nodes) .Call("pnlGetJPDString", x, 3, nodes)
GetJPDFloat <- function(x, nodes) UseMethod("GetJPDFloat", x)
GetJPDFloat.pnlBNet <- function(x, nodes) .Call("pnlGetJPDFloat", x, 0, nodes)
GetJPDFloat.pnlDBN <- function(x, nodes) .Call("pnlGetJPDFloat", x, 1, nodes)
GetJPDFloat.pnlMRF <- function(x, nodes) .Call("pnlGetJPDFloat", x, 3, nodes)
GetSoftMaxOffset <- function(x, node, ParentValue) UseMethod("GetSoftMaxOffset", x)
GetSoftMaxOffset.pnlBNet <- function(x, node, ParentValue = -1)
{
if (ParentValue < 0) .Call("pnlGetSoftMaxOffset", x, node)
else .Call("pnlGetSoftMaxOffsetCond", x, node, ParentValue)
}
GetSoftMaxWeights <- function(x, node, ParentValue) UseMethod("GetSoftMaxWeights", x)
GetSoftMaxWeights.pnlBNet <- function(x, node, ParentValue = -1)
{
if (ParentValue < 0) .Call("pnlGetSoftMaxWeights", x, node)
else .Call("pnlGetSoftMaxWeightsCond", x, node, ParentValue)
}
GetGaussianMean <- function(x, nodes, tabParents) UseMethod("GetGaussianMean", x)
GetGaussianMean.pnlBNet <- function(x, nodes, tabParents = -1)
{
if (tabParents < 0) .Call("pnlGetGaussianMean", x, 0, nodes)
else .Call("pnlGetGaussianMeanCond", x, 0, nodes, tabParents)
}
GetGaussianMean.pnlDBN <- function(x, nodes) .Call("pnlGetGaussianMean", x, 1, nodes)
GetGaussianCovar <- function(x, nodes, tabParents) UseMethod("GetGaussianCovar", x)
GetGaussianCovar.pnlBNet <- function(x, nodes, tabParents = -1)
{
if (tabParents < 0) .Call("pnlGetGaussianCovar", x, 0, nodes)
else .Call("pnlGetGaussianCovarCond", x, 0, nodes, tabParents)
}
GetGaussianCovar.pnlDBN <- function(x, nodes) .Call("pnlGetGaussianCovar", x, 1, nodes)
GetGaussianWeights <- function(x, nodes, parents, tabParents) UseMethod("GetGaussianWeights", x)
GetGaussianWeights.pnlBNet <- function(x, nodes, parents, tabParents = -1)
{
if (tabParents < 0) .Call("pnlGetGaussianWeights", x, 0, nodes, parents)
else .Call("pnlGetGaussianWeightsCond", x, 0, nodes, parents, tabParents)
}
GetGaussianWeights.pnlDBN <- function(x, nodes, parents) .Call("pnlGetGaussianWeights", x, 1, nodes, parents)
SetProperty <- function(x, name, value) UseMethod("SetProperty", x)
SetProperty.pnlBNet <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 0, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetProperty.pnlDBN <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 1, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetProperty.pnlLIMID <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 2, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetProperty.pnlMRF <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 3, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetProperty <- function(x, name) UseMethod("GetProperty", x)
GetProperty.pnlBNet <- function(x, name) .Call("pnlGetProperty", x, 0, name)
GetProperty.pnlDBN <- function(x, name) .Call("pnlGetProperty", x, 1, name)
GetProperty.pnlLIMID <- function(x, name) .Call("pnlGetProperty", x, 2, name)
GetProperty.pnlMRF <- function(x, name) .Call("pnlGetProperty", x, 3, name)
LearnParameters <- function(x) UseMethod("LearnParameters", x)
LearnParameters.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnParameters", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LearnParameters.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnParameters", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LearnParameters.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnParameters", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LearnStructure <- function(x) UseMethod("LearnStructure", x)
LearnStructure.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnStructure", x)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveEvidBuf <- function(x, filename) UseMethod("SaveEvidBuf", x)
SaveEvidBuf.pnlBNet <- function (x, filename) .Call("pnlSaveEvidBuf", x, 0, filename)
SaveEvidBuf.pnlDBN <- function (x, filename) .Call("pnlSaveEvidBuf", x, 1, filename)
SaveEvidBuf.pnlMRF <- function (x, filename) .Call("pnlSaveEvidBuf", x, 3, filename)
LoadEvidBuf <- function(x, filename, columns) UseMethod("LoadEvidBuf", x)
LoadEvidBuf.pnlBNet <- function (x, filename, columns = -1)
{
if (columns < 0) .Call("pnlLoadEvidBufNative", x, 0, filename)
else .Call("pnlLoadEvidBufForeign", x, 0, filename, columns)
}
LoadEvidBuf.pnlDBN <- function (x, filename, columns = -1)
{
if (columns < 0) .Call("pnlLoadEvidBufNative", x, 1, filename)
else .Call("pnlLoadEvidBufForeign", x, filename, 1, columns)
}
LoadEvidBuf.pnlMRF <- function (x, filename, columns = -1)
{
if (columns < 0) .Call("pnlLoadEvidBufNative", x, 3, filename)
else .Call("pnlLoadEvidBufForeign", x, filename, 3, columns)
}
GenerateEvidences <- function(x, nSamples, ignoreCurrEvid, whatNodes) UseMethod("GenerateEvidences", x)
GenerateEvidences.pnlBNet <- function(x, nSamples, ignoreCurrEvid = -1, whatNodes = -1)
{
res <- "ok"
if (ignoreCurrEvid < 0) result <- .Call("pnlGenerateEvidences", x, 0, nSamples)
else
{
if (whatNodes < 0) result <- .Call("pnlGenerateEvidencesCurr", x, 0, nSamples, ignoreCurrEvid)
else result <- .Call("pnlGenerateEvidencesCurrSome", x, 0, nSamples, ignoreCurrEvid, whatNodes)
}
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GenerateEvidences.pnlMRF <- function(x, nSamples, ignoreCurrEvid = -1, whatNodes = -1)
{
res <- "ok"
if (ignoreCurrEvid < 0) result <- .Call("pnlGenerateEvidences", x, 3, nSamples)
else
{
if (whatNodes < 0) result <- .Call("pnlGenerateEvidencesCurr", x, 3, nSamples, ignoreCurrEvid)
else result <- .Call("pnlGenerateEvidencesCurrSome", x, 3, nSamples, ignoreCurrEvid, whatNodes)
}
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GenerateEvidences.pnlDBN <- function(x, numSamples)
{
res <- "ok"
result <- .Call("dbnGenerateEvidences", x, numSamples)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
MaskEvidBuf <- function(x, whatNodes) UseMethod("MaskEvidBuf", x)
MaskEvidBuf.pnlBNet <- function(x, whatNodes = -1)
{
res <- "ok"
if (whatNodes < 0) result <- .Call("pnlMaskEvidBufFull", x, 0)
else result <- .Call("pnlMaskEvidBufPart", x, 0, whatNodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
MaskEvidBuf.pnlMRF <- function(x, whatNodes = -1)
{
res <- "ok"
if (whatNodes < 0) result <- .Call("pnlMaskEvidBufFull", x, 3)
else result <- .Call("pnlMaskEvidBufPart", x, 3, whatNodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet <- function(x, filename) UseMethod("LoadNet", x)
LoadNet.pnlBNet <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 0, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet.pnlDBN <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 1, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet.pnlLIMID <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 2, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet.pnlMRF <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 3, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetCurEvidenceLogLik <- function(x) UseMethod("GetCurEvidenceLogLik", x)
GetCurEvidenceLogLik.pnlBNet <- function(x) .Call("pnlGetCurEvidenceLogLik", x)
GetEvidBufLogLik <- function(x) UseMethod("GetEvidBufLogLik", x)
GetEvidBufLogLik.pnlBNet <- function(x) .Call("pnlGetEvidBufLogLik", x)
GetEMLearningCriterionValue <- function(x) UseMethod("GetEMLearningCriterionValue", x)
GetEMLearningCriterionValue.pnlBNet <- function(x) .Call("pnlGetEMLearningCriterionValue", x, 0)
GetEMLearningCriterionValue.pnlDBN <- function(x) .Call("pnlGetEMLearningCriterionValue", x, 1)
SetNumSlices <- function(x, NumSlice) UseMethod("SetNumSlices", x)
SetNumSlices.pnlDBN <- function(x, NumSlice)
{
res <- "ok"
result <- .Call("pnlSetNumSlices", x, NumSlice)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetNumSlices <- function(x) UseMethod("GetNumSlices")
GetNumSlices.pnlDBN <- function(x) .Call("pnlGetNumSlices", x)
SetLag <- function(x, LagNum) UseMethod("SetLag", x)
SetLag.pnlDBN <- function(x, LagNum)
{
res <- "ok"
result <- .Call("pnlSetLag", x, LagNum)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetLag <- function(x) UseMethod("GetLag")
GetLag.pnlDBN <- function(x) .Call("pnlGetLag", x)
IsFullDBN <- function(x) UseMethod("IsFullDBN", x)
IsFullDBN.pnlDBN <- function(x) .Call("pnlIsFullDBN", x)
SetIterMax <- function(x, IterMax) UseMethod("SetIterMax")
SetIterMax.pnlLIMID <- function(x, IterMax)
{
res <- "ok"
result <- .Call("pnlSetIterMax", x, IterMax)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetExpectation <- function(x) UseMethod("GetExpectation")
GetExpectation.pnlLIMID <- function(x) .Call("pnlGetExpectation", x)
GetPoliticsString <- function(x) UseMethod("GetPoliticsString")
GetPoliticsString.pnlLIMID <- function(x) .Call("pnlGetPoliticsString", x)
GetPoliticsFloat <- function(x) UseMethod("GetPoliticsFloat")
GetPoliticsFloat.pnlLIMID <- function(x) .Call("pnlGetPoliticsFloat", x)
SetClique <- function(x, nodes) UseMethod("SetClique", x)
SetClique.pnlMRF <- function(x, nodes)
{
res <- "ok"
result <- .Call("pnlSetClique", x, nodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DestroyClique <- function(x, nodes) UseMethod("DestroyClique", x)
DestroyClique.pnlMRF <- function(x, nodes)
{
res <- "ok"
result <- .Call("pnlDestroyClique", x, nodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetNumberOfNodes <- function(x) UseMethod("GetNumberOfNodes")
GetNumberOfNodes.pnlMRF <- function(x) .Call("pnlGetNumberOfNodes")
GetNumberOfCliques <- function(x) UseMethod("GetNumberOfCliques")
GetNumberOfCliques.pnlMRF <- function(x) .Call("pnlGetNumberOfCliques") | /R/PNLRPackage/R/PNLRPackage.R | no_license | artiste-qb-net/OpenPNL | R | false | false | 26,256 | r | CreateBNet <- function ()
{
result <- .Call("pnlCreateBNet")
class(result) <- "pnlBNet"
result
}
pnlCreateBNet <- function() CreateBNet()
CreateDBN <- function ()
{
result <- .Call("pnlCreateDBN")
class(result) <- "pnlDBN"
result
}
pnlCreateDBN <- function() CreateDBN()
Unroll <- function(x) UseMethod("Unroll", x)
Unroll.pnlDBN <- function(x)
{
result <- .Call("pnlUnroll", x)
class(result) <- "pnlBNet"
result
}
CreateLIMID <- function ()
{
result <- .Call("pnlCreateLIMID")
class(result) <- "pnlLIMID"
result
}
pnlCreateLIMID <- function() CreateLIMID()
CreateMRF <- function ()
{
result <- .Call("pnlCreateMRF")
class(result) <- "pnlMRF"
result
}
pnlCreateMRF <- function() CreateMRF()
AddNode <- function (x, names, values) UseMethod("AddNode", x)
AddNode.pnlBNet <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 0, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddNode.pnlDBN <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 1, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddNode.pnlLIMID <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 2, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddNode.pnlMRF <- function(x, names, values)
{
res <- "ok"
result <- .Call("pnlAddNode", x, 3, names, values)
if (result < 0) invisible (result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelNode <- function(x, nodes) UseMethod("DelNode", x)
DelNode.pnlBNet <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 0, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
DelNode.pnlDBN <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 1, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
DelNode.pnlLIMID <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 2, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
DelNode.pnlMRF <- function(x, nodes)
{
result <- "ok"
res <- .Call("pnlDelNode", x, 3, nodes, result)
if (res < 0) invisible(result)
else
{
result <- .Call("pnlReturnError")
result
}
}
AddArc <- function (x, Start, Finish) UseMethod("AddArc", x)
AddArc.pnlBNet <- function(x, Start, Finish)
{
res <- "ok"
result <- .Call("pnlAddArc", x, 0, Start, Finish)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddArc.pnlDBN <- function(x, Start, Finish)
{
res <- "ok"
result <- .Call("pnlAddArc", x, 1, Start, Finish)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddArc.pnlLIMID <- function(x, Start, Finish)
{
res <- "ok"
result <- .Call("pnlAddArc", x, 2, Start, Finish)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelArc <- function(x, StartOfArc, EndOfArc) UseMethod("DelArc", x)
DelArc.pnlBNet <- function(x, StartOfArc, EndOfArc)
{
res <- "ok"
result <- .Call("pnlDelArc", x, 0, StartOfArc, EndOfArc)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelArc.pnlDBN <- function(x, StartOfArc, EndOfArc)
{
res <- "ok"
result <- .Call("pnlDelArc", x, 1, StartOfArc, EndOfArc)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DelArc.pnlLIMID <- function(x, StartOfArc, EndOfArc)
{
res <- "ok"
result <- .Call("pnlDelArc", x, 2, StartOfArc, EndOfArc)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet <- function(x, filename) UseMethod("SaveNet", x)
SaveNet.pnlBNet <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 0, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet.pnlDBN <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 1, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet.pnlLIMID <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 2, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveNet.pnlMRF <- function(x, filename)
{
res <- "ok"
result <- .Call("pnlSaveNet", x, 3, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetNodeType <- function(x, nodes) UseMethod("GetNodeType", x)
GetNodeType.pnlBNet <- function(x, nodes) .Call("pnlGetNodeType", x, 0, nodes)
GetNodeType.pnlDBN <- function(x, nodes) .Call("pnlGetNodeType", x, 1, nodes)
GetNodeType.pnlMRF <- function(x, nodes) .Call("pnlGetNodeType", x, 3, nodes)
GetNeighbors <- function(x, nodes) UseMethod("GetNeighbors", x)
GetNeighbors.pnlBNet <- function(x, nodes) .Call("pnlGetNeighbors", x, 0, nodes)
GetNeighbors.pnlDBN <- function(x, nodes) .Call("pnlGetNeighbors", x, 1, nodes)
GetParents <- function(x, nodes) UseMethod("GetParents", x)
GetParents.pnlBNet <- function(x, nodes) .Call("pnlGetParents", x, 0, nodes)
GetParents.pnlDBN <- function(x, nodes) .Call("pnlGetParents", x, 1, nodes)
GetChildren <- function(x, nodes) UseMethod("GetChildren", x)
GetChildren.pnlBNet <- function(x, nodes) .Call("pnlGetChildren", x, 0, nodes)
GetChildren.pnlDBN <- function(x, nodes) .Call("pnlGetChildren", x, 1, nodes)
SetPTabular <- function(x, value, probability, ParentValue) UseMethod("SetPTabular", x)
SetPTabular.pnlBNet <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPTabular", x, 0, value, probability)
else result <- .Call("pnlSetPTabularCond",x, 0, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPTabular.pnlDBN <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPTabular", x, 1, value, probability)
else result <- .Call("pnlSetPTabularCond",x, 1, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPTabular.pnlMRF <- function (x, value, probability)
{
res <- "ok"
result <- .Call("pnlSetPTabular", x, 3, value, probability)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPChance <- function(x, value, probability, ParentValue) UseMethod("SetPChance", x)
SetPChance.pnlLIMID <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPChance", x, value, probability)
else result <- .Call("pnlSetPChanceCond",x, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPDecision <- function(x, value, probability, ParentValue) UseMethod("SetPDecision", x)
SetPDecision.pnlLIMID <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetPDecision", x, value, probability)
else result <- .Call("pnlSetPDecisionCond",x, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetValueCost <- function(x, value, probability, ParentValue) UseMethod("SetValueCost", x)
SetValueCost.pnlLIMID <- function (x, value, probability, ParentValue = -1)
{
res <- "ok"
if (ParentValue < 0) result <- .Call("pnlSetValueCost", x, value, probability)
else result <- .Call("pnlSetValueCostCond",x, value, probability, ParentValue)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetPTabularString <- function(x, value, parents) UseMethod("GetPTabularString", x)
GetPTabularString.pnlBNet <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularString", x, 0, value)
else .Call("pnlGetPTabularStringCond", x, 0, value, parents)
}
GetPTabularString.pnlDBN <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularString", x, 1, value)
else .Call("pnlGetPTabularStringCond", x, 1, value, parents)
}
GetPTabularString.pnlMRF <- function(x, value) .Call("pnlGetPTabularString", x, 3, value)
GetPTabularFloat <- function(x, value, parents) UseMethod("GetPTabularFloat", x)
GetPTabularFloat.pnlBNet <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularFloat", x, 0, value)
else .Call("pnlGetPTabularFloatCond", x, 0, value, parents)
}
GetPTabularFloat.pnlDBN <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPTabularFloat", x, 1, value)
else .Call("pnlGetPTabularFloatCond", x, 1, value, parents)
}
GetPTabularFloat.pnlMRF <- function(x, value) .Call("pnlGetPTabularFloat", x, 3, value)
GetPChanceString <- function(x, value, parents) UseMethod("GetPChanceString", x)
GetPChanceString.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPChanceString", x, value)
else .Call("pnlGetPChanceCondString", x, value, parents)
}
GetPChanceFloat <- function(x, value, parents) UseMethod("GetPChanceFloat", x)
GetPChanceFloat.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPChanceFloat", x, value)
else .Call("pnlGetPChanceCondFloat", x, value, parents)
}
GetPDecisionString <- function(x, value, parents) UseMethod("GetPDecisionString")
GetPDecisionString <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPDecisionString", x, value)
else .Call("pnlGetPDecisionCondString", x, value, parents)
}
GetPDecisionFloat <- function(x, value, parents) UseMethod("GetPDecisionFloat")
GetPDecisionFloat.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetPDecisionFloat", x, value)
else .Call("pnlGetPDecisionCondFloat", x, value, parents)
}
GetValueCostString <- function(x, value, parents) UseMethod("GetValueCostString")
GetValueCostString.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetValueCostString", x, value)
else .Call("pnlGetValueCostCondString", x, value, parents)
}
GetValueCostFloat <- function(x, value, parents) UseMethod("GetValueCostFloat")
GetValueCostFloat.pnlLIMID <- function(x, value, parents = -1)
{
if (parents < 0) .Call("pnlGetValueCostFloat", x, value)
else .Call("pnlGetValueCostCondFloat", x, value, parents)
}
SetPGaussian <- function(x, node, mean, variance, weight, tabParents) UseMethod("SetPGaussian", x)
SetPGaussian.pnlBNet <- function(x, node, mean, variance, weight = -1, tabParents = -1)
{
res <- "ok"
if (tabParents < 0)
{
if (weight < 0) result <- .Call("pnlSetPGaussian", x, 0, node, mean, variance)
else result <- .Call("pnlSetPGaussianCond", x, 0, node, mean, variance, weight)
}
else result <- .Call("pnlSetPGaussianCondTParents", x, node, mean, variance, weight, tabParents)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPGaussian.pnlDBN <- function(x, node, mean, variance, weight = -1)
{
res <- "ok"
if (weight < 0) result <- .Call("pnlSetPGaussian", x, 1, node, mean, variance)
else result <- .Call("pnlSetPGaussianCond", x, 1, node, mean, variance, weight)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetPSoftMax <- function(x, node, weight, offset, parents) UseMethod("SetPSoftMax", x)
SetPSoftMax.pnlBNet <- function(x, node, weight, offset, parents = -1)
{
res <- "ok"
if (parents < 0) result <- .Call("pnlSetPSoftMax", x, node, weight, offset)
else result <- .Call("pnlSetPSoftMaxCond", x, node, weight, offset, parents)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
EditEvidence <- function(x, values) UseMethod("EditEvidence", x)
EditEvidence.pnlBNet <- function(x, values)
{
res <- "ok"
result <- .Call("pnlEditEvidence", x, 0, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
EditEvidence.pnlDBN <- function(x, values)
{
res <- "ok"
result <- .Call("pnlEditEvidence", x, 1, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
EditEvidence.pnlMRF <- function(x, values)
{
res <- "ok"
result <- .Call("pnlEditEvidence", x, 3, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvid <- function(x) UseMethod("ClearEvid", x)
ClearEvid.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvid", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvid.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvid", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvid.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvid", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
CurEvidToBuf <- function(x) UseMethod("CurEvidToBuf", x)
CurEvidToBuf.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlCurEvidToBuf", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
CurEvidToBuf.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlCurEvidToBuf", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
CurEvidToBuf.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlCurEvidToBuf", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddEvidToBuf <- function(x, values) UseMethod("AddEvidToBuf", x)
AddEvidToBuf.pnlBNet <- function(x, values)
{
res <- "ok"
result <- .Call("pnlAddEvidToBuf", x, 0, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddEvidToBuf.pnlDBN <- function(x, values)
{
res <- "ok"
result <- .Call("pnlAddEvidToBuf", x, 1, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
AddEvidToBuf.pnlMRF <- function(x, values)
{
res <- "ok"
result <- .Call("pnlAddEvidToBuf", x, 3, values)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvidBuf <- function(x) UseMethod("ClearEvidBuf", x)
ClearEvidBuf.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvidBuf", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvidBuf.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvidBuf", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
ClearEvidBuf.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlClearEvidBuf", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetMPE <- function(x, nodes) UseMethod("GetMPE", x)
GetMPE.pnlBNet <- function(x, nodes) .Call("pnlGetMPE",x, 0, nodes)
GetMPE.pnlDBN <- function(x, nodes) .Call("pnlGetMPE",x, 1, nodes)
GetMPE.pnlMRF <- function(x, nodes) .Call("pnlGetMPE",x, 3, nodes)
GetJPDString <- function(x, nodes) UseMethod("GetJPDString", x)
GetJPDString.pnlBNet <- function(x, nodes) .Call("pnlGetJPDString", x, 0, nodes)
GetJPDString.pnlDBN <- function(x, nodes) .Call("pnlGetJPDString", x, 1, nodes)
GetJPDString.pnlMRF <- function(x, nodes) .Call("pnlGetJPDString", x, 3, nodes)
GetJPDFloat <- function(x, nodes) UseMethod("GetJPDFloat", x)
GetJPDFloat.pnlBNet <- function(x, nodes) .Call("pnlGetJPDFloat", x, 0, nodes)
GetJPDFloat.pnlDBN <- function(x, nodes) .Call("pnlGetJPDFloat", x, 1, nodes)
GetJPDFloat.pnlMRF <- function(x, nodes) .Call("pnlGetJPDFloat", x, 3, nodes)
GetSoftMaxOffset <- function(x, node, ParentValue) UseMethod("GetSoftMaxOffset", x)
GetSoftMaxOffset.pnlBNet <- function(x, node, ParentValue = -1)
{
if (ParentValue < 0) .Call("pnlGetSoftMaxOffset", x, node)
else .Call("pnlGetSoftMaxOffsetCond", x, node, ParentValue)
}
GetSoftMaxWeights <- function(x, node, ParentValue) UseMethod("GetSoftMaxWeights", x)
GetSoftMaxWeights.pnlBNet <- function(x, node, ParentValue = -1)
{
if (ParentValue < 0) .Call("pnlGetSoftMaxWeights", x, node)
else .Call("pnlGetSoftMaxWeightsCond", x, node, ParentValue)
}
GetGaussianMean <- function(x, nodes, tabParents) UseMethod("GetGaussianMean", x)
GetGaussianMean.pnlBNet <- function(x, nodes, tabParents = -1)
{
if (tabParents < 0) .Call("pnlGetGaussianMean", x, 0, nodes)
else .Call("pnlGetGaussianMeanCond", x, 0, nodes, tabParents)
}
GetGaussianMean.pnlDBN <- function(x, nodes) .Call("pnlGetGaussianMean", x, 1, nodes)
GetGaussianCovar <- function(x, nodes, tabParents) UseMethod("GetGaussianCovar", x)
GetGaussianCovar.pnlBNet <- function(x, nodes, tabParents = -1)
{
if (tabParents < 0) .Call("pnlGetGaussianCovar", x, 0, nodes)
else .Call("pnlGetGaussianCovarCond", x, 0, nodes, tabParents)
}
GetGaussianCovar.pnlDBN <- function(x, nodes) .Call("pnlGetGaussianCovar", x, 1, nodes)
GetGaussianWeights <- function(x, nodes, parents, tabParents) UseMethod("GetGaussianWeights", x)
GetGaussianWeights.pnlBNet <- function(x, nodes, parents, tabParents = -1)
{
if (tabParents < 0) .Call("pnlGetGaussianWeights", x, 0, nodes, parents)
else .Call("pnlGetGaussianWeightsCond", x, 0, nodes, parents, tabParents)
}
GetGaussianWeights.pnlDBN <- function(x, nodes, parents) .Call("pnlGetGaussianWeights", x, 1, nodes, parents)
SetProperty <- function(x, name, value) UseMethod("SetProperty", x)
SetProperty.pnlBNet <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 0, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetProperty.pnlDBN <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 1, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetProperty.pnlLIMID <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 2, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SetProperty.pnlMRF <- function(x, name, value)
{
res <- "ok"
result <- .Call("pnlSetProperty", x, 3, name, value)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetProperty <- function(x, name) UseMethod("GetProperty", x)
GetProperty.pnlBNet <- function(x, name) .Call("pnlGetProperty", x, 0, name)
GetProperty.pnlDBN <- function(x, name) .Call("pnlGetProperty", x, 1, name)
GetProperty.pnlLIMID <- function(x, name) .Call("pnlGetProperty", x, 2, name)
GetProperty.pnlMRF <- function(x, name) .Call("pnlGetProperty", x, 3, name)
LearnParameters <- function(x) UseMethod("LearnParameters", x)
LearnParameters.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnParameters", x, 0)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LearnParameters.pnlDBN <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnParameters", x, 1)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LearnParameters.pnlMRF <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnParameters", x, 3)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LearnStructure <- function(x) UseMethod("LearnStructure", x)
LearnStructure.pnlBNet <- function(x)
{
res <- "ok"
result <- .Call("pnlLearnStructure", x)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
SaveEvidBuf <- function(x, filename) UseMethod("SaveEvidBuf", x)
SaveEvidBuf.pnlBNet <- function (x, filename) .Call("pnlSaveEvidBuf", x, 0, filename)
SaveEvidBuf.pnlDBN <- function (x, filename) .Call("pnlSaveEvidBuf", x, 1, filename)
SaveEvidBuf.pnlMRF <- function (x, filename) .Call("pnlSaveEvidBuf", x, 3, filename)
LoadEvidBuf <- function(x, filename, columns) UseMethod("LoadEvidBuf", x)
LoadEvidBuf.pnlBNet <- function (x, filename, columns = -1)
{
if (columns < 0) .Call("pnlLoadEvidBufNative", x, 0, filename)
else .Call("pnlLoadEvidBufForeign", x, 0, filename, columns)
}
LoadEvidBuf.pnlDBN <- function (x, filename, columns = -1)
{
if (columns < 0) .Call("pnlLoadEvidBufNative", x, 1, filename)
else .Call("pnlLoadEvidBufForeign", x, filename, 1, columns)
}
LoadEvidBuf.pnlMRF <- function (x, filename, columns = -1)
{
if (columns < 0) .Call("pnlLoadEvidBufNative", x, 3, filename)
else .Call("pnlLoadEvidBufForeign", x, filename, 3, columns)
}
GenerateEvidences <- function(x, nSamples, ignoreCurrEvid, whatNodes) UseMethod("GenerateEvidences", x)
GenerateEvidences.pnlBNet <- function(x, nSamples, ignoreCurrEvid = -1, whatNodes = -1)
{
res <- "ok"
if (ignoreCurrEvid < 0) result <- .Call("pnlGenerateEvidences", x, 0, nSamples)
else
{
if (whatNodes < 0) result <- .Call("pnlGenerateEvidencesCurr", x, 0, nSamples, ignoreCurrEvid)
else result <- .Call("pnlGenerateEvidencesCurrSome", x, 0, nSamples, ignoreCurrEvid, whatNodes)
}
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GenerateEvidences.pnlMRF <- function(x, nSamples, ignoreCurrEvid = -1, whatNodes = -1)
{
res <- "ok"
if (ignoreCurrEvid < 0) result <- .Call("pnlGenerateEvidences", x, 3, nSamples)
else
{
if (whatNodes < 0) result <- .Call("pnlGenerateEvidencesCurr", x, 3, nSamples, ignoreCurrEvid)
else result <- .Call("pnlGenerateEvidencesCurrSome", x, 3, nSamples, ignoreCurrEvid, whatNodes)
}
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GenerateEvidences.pnlDBN <- function(x, numSamples)
{
res <- "ok"
result <- .Call("dbnGenerateEvidences", x, numSamples)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
MaskEvidBuf <- function(x, whatNodes) UseMethod("MaskEvidBuf", x)
MaskEvidBuf.pnlBNet <- function(x, whatNodes = -1)
{
res <- "ok"
if (whatNodes < 0) result <- .Call("pnlMaskEvidBufFull", x, 0)
else result <- .Call("pnlMaskEvidBufPart", x, 0, whatNodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
MaskEvidBuf.pnlMRF <- function(x, whatNodes = -1)
{
res <- "ok"
if (whatNodes < 0) result <- .Call("pnlMaskEvidBufFull", x, 3)
else result <- .Call("pnlMaskEvidBufPart", x, 3, whatNodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet <- function(x, filename) UseMethod("LoadNet", x)
LoadNet.pnlBNet <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 0, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet.pnlDBN <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 1, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet.pnlLIMID <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 2, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
LoadNet.pnlMRF <- function (x, filename)
{
res <- "ok"
result <- .Call("pnlLoadNet", x, 3, filename)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetCurEvidenceLogLik <- function(x) UseMethod("GetCurEvidenceLogLik", x)
GetCurEvidenceLogLik.pnlBNet <- function(x) .Call("pnlGetCurEvidenceLogLik", x)
GetEvidBufLogLik <- function(x) UseMethod("GetEvidBufLogLik", x)
GetEvidBufLogLik.pnlBNet <- function(x) .Call("pnlGetEvidBufLogLik", x)
GetEMLearningCriterionValue <- function(x) UseMethod("GetEMLearningCriterionValue", x)
GetEMLearningCriterionValue.pnlBNet <- function(x) .Call("pnlGetEMLearningCriterionValue", x, 0)
GetEMLearningCriterionValue.pnlDBN <- function(x) .Call("pnlGetEMLearningCriterionValue", x, 1)
SetNumSlices <- function(x, NumSlice) UseMethod("SetNumSlices", x)
SetNumSlices.pnlDBN <- function(x, NumSlice)
{
res <- "ok"
result <- .Call("pnlSetNumSlices", x, NumSlice)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetNumSlices <- function(x) UseMethod("GetNumSlices")
GetNumSlices.pnlDBN <- function(x) .Call("pnlGetNumSlices", x)
SetLag <- function(x, LagNum) UseMethod("SetLag", x)
SetLag.pnlDBN <- function(x, LagNum)
{
res <- "ok"
result <- .Call("pnlSetLag", x, LagNum)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetLag <- function(x) UseMethod("GetLag")
GetLag.pnlDBN <- function(x) .Call("pnlGetLag", x)
IsFullDBN <- function(x) UseMethod("IsFullDBN", x)
IsFullDBN.pnlDBN <- function(x) .Call("pnlIsFullDBN", x)
SetIterMax <- function(x, IterMax) UseMethod("SetIterMax")
SetIterMax.pnlLIMID <- function(x, IterMax)
{
res <- "ok"
result <- .Call("pnlSetIterMax", x, IterMax)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetExpectation <- function(x) UseMethod("GetExpectation")
GetExpectation.pnlLIMID <- function(x) .Call("pnlGetExpectation", x)
GetPoliticsString <- function(x) UseMethod("GetPoliticsString")
GetPoliticsString.pnlLIMID <- function(x) .Call("pnlGetPoliticsString", x)
GetPoliticsFloat <- function(x) UseMethod("GetPoliticsFloat")
GetPoliticsFloat.pnlLIMID <- function(x) .Call("pnlGetPoliticsFloat", x)
SetClique <- function(x, nodes) UseMethod("SetClique", x)
SetClique.pnlMRF <- function(x, nodes)
{
res <- "ok"
result <- .Call("pnlSetClique", x, nodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
DestroyClique <- function(x, nodes) UseMethod("DestroyClique", x)
DestroyClique.pnlMRF <- function(x, nodes)
{
res <- "ok"
result <- .Call("pnlDestroyClique", x, nodes)
if (result < 0) invisible(result)
else
{
res <- .Call("pnlReturnError")
res
}
}
GetNumberOfNodes <- function(x) UseMethod("GetNumberOfNodes")
GetNumberOfNodes.pnlMRF <- function(x) .Call("pnlGetNumberOfNodes")
GetNumberOfCliques <- function(x) UseMethod("GetNumberOfCliques")
GetNumberOfCliques.pnlMRF <- function(x) .Call("pnlGetNumberOfCliques") |
scales <-
function(..., iter=0, resid=FALSE, item_cor=FALSE, sort=FALSE, heat_map=FALSE)
corCFA(..., iter=0, resid=FALSE, item_cor=FALSE, sort=FALSE,
heat_map=FALSE, fun_call=match.call())
| /R/d.R | no_license | cran/lessR | R | false | false | 207 | r | scales <-
function(..., iter=0, resid=FALSE, item_cor=FALSE, sort=FALSE, heat_map=FALSE)
corCFA(..., iter=0, resid=FALSE, item_cor=FALSE, sort=FALSE,
heat_map=FALSE, fun_call=match.call())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hike_spatial.R
\name{hike_spatial_elev}
\alias{hike_spatial_elev}
\title{spatial_elev}
\usage{
hike_spatial_elev(data, shiny_progress, apikey)
}
\value{
sf points with elevation
}
\description{
spatial_elev
}
| /man/hike_spatial_elev.Rd | permissive | EricKrg/hikeR | R | false | true | 287 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hike_spatial.R
\name{hike_spatial_elev}
\alias{hike_spatial_elev}
\title{spatial_elev}
\usage{
hike_spatial_elev(data, shiny_progress, apikey)
}
\value{
sf points with elevation
}
\description{
spatial_elev
}
|
fulldata<-read.table("./household_power_consumption.txt",header = T)
library(tidyr)
library(dplyr)
data_col<-separate(fulldata,1,c( "Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),sep = ";")
working_data <- working_data[complete.cases(working_data),]
SetTime <-strptime(paste(working_data$Date, working_data$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
data<-cbind(working_data,SetTime)
plot(data$SetTime,data$Sub_metering_1,type = "l", xlab="", ylab = "Energy sub metering")
lines(data$SetTime,data$Sub_metering_2,col = "red")
lines(data$SetTime,data$Sub_metering_3,col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty="solid") | /plot3.R | no_license | wudiguigui99/Electric_power_consumption_homework- | R | false | false | 795 | r | fulldata<-read.table("./household_power_consumption.txt",header = T)
library(tidyr)
library(dplyr)
data_col<-separate(fulldata,1,c( "Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),sep = ";")
working_data <- working_data[complete.cases(working_data),]
SetTime <-strptime(paste(working_data$Date, working_data$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
data<-cbind(working_data,SetTime)
plot(data$SetTime,data$Sub_metering_1,type = "l", xlab="", ylab = "Energy sub metering")
lines(data$SetTime,data$Sub_metering_2,col = "red")
lines(data$SetTime,data$Sub_metering_3,col = "blue")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty="solid") |
# Load the package required to read XML files.
library(XML)
library(xml2)
library(lidR)
library(data.table)
library(tools)
# Also load the other required package.
library("methods")
typefor <- "mixte"
typepc <- "allpoints_fl"
pth_input <- paste0("D:/1_Work/2_Ciron/Data/ULM/LAS/unnorm/plots/15m_rad_test/allpoints/")
xml_template <- read_xml("D:/1_Work/5_Bauges/Voxelisation/xml_files/template_bauges.xml")
lasfiles <- list.files(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/",typefor,"/15m_rad/",typepc,"/"))
allpcnames <- as.data.table(list.files(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/",typefor,"/15m_rad/",typepc,"/"),
full.names = TRUE, pattern = "*.las"))
#allpcnames[, grp := sub("_[^_]*$","",basename(file_path_sans_ext(V1)))]
allpcnames[, grp := basename(file_path_sans_ext(V1))]
for(row in 1:nrow(allpcnames))
{
xml_file1 <- xml_template
pth1 <- allpcnames$V1[row]
ls <- readLASheader(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/", typefor,"/15m_rad/", typepc,"/",
allpcnames$grp[row],
".las"))
pth2 <- paste0("D:/1_Work/5_Bauges/Voxelisation/Results/74/", typefor,"/15m_rad/", typepc,"/",
file_path_sans_ext(basename(allpcnames$V1[row])),
".vox")
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 1), 1), "src", pth1)
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 2), 1), "src", pth2)
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmin", ls@PHB[["Min X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymin", ls@PHB[["Min Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmin", ls@PHB[["Min Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmax", ls@PHB[["Max X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymax", ls@PHB[["Max Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmax", ls@PHB[["Max Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitX",
ceiling(ls@PHB[["Max X"]])-floor(ls@PHB[["Min X"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitY",
ceiling(ls@PHB[["Max Y"]])-floor(ls@PHB[["Min Y"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitZ",
ceiling(ls@PHB[["Max Z"]])-floor(ls@PHB[["Min Z"]]))
out <- paste0("D:/1_Work/5_Bauges/Voxelisation/xml_files/74/",typefor,"/15m_rad/",typepc,"/",
tools::file_path_sans_ext(basename(allpcnames$V1[row])),
".xml")
write_xml(xml_file1, out)
}
xml_mod <- function(lasfile, inpth, outpthvox, outpthxml, tmpl)
{
xml_file1 <- tmpl
pth1 <- paste0(inpth,lasfile)
ls <- readLASheader(pth1)
pth2 <- paste0(outpthvox, tools::file_path_sans_ext(lasfile), ".vox")
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 1), 1), "src", pth1)
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 2), 1), "src", pth2)
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmin", ls@PHB[["Min X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymin", ls@PHB[["Min Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmin", ls@PHB[["Min Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmax", ls@PHB[["Max X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymax", ls@PHB[["Max Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmax", ls@PHB[["Max Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitX",
ceiling(ls@PHB[["Max X"]])-floor(ls@PHB[["Min X"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitY",
ceiling(ls@PHB[["Max Y"]])-floor(ls@PHB[["Min Y"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitZ",
ceiling(ls@PHB[["Max Z"]])-floor(ls@PHB[["Min Z"]]))
out <- paste0(outpthxml, tools::file_path_sans_ext(lasfile), ".xml")
write_xml(xml_file1, out)
}
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 2), 1), "src", pth2)
gpstimes <- list()
for(row in 1:nrow(allpcnames))
{
ls <- readLAS(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/feuillus/15m_rad/allpoints/",
allpcnames$grp[row],
".las"), select = "xyzgpstime")
gpstimes <- c(gpstimes, ls@data$gpstime)
}
| /src/config_file_generation_ciron.R | no_license | drkrd/voxelisation_analyses | R | false | false | 4,576 | r | # Load the package required to read XML files.
library(XML)
library(xml2)
library(lidR)
library(data.table)
library(tools)
# Also load the other required package.
library("methods")
typefor <- "mixte"
typepc <- "allpoints_fl"
pth_input <- paste0("D:/1_Work/2_Ciron/Data/ULM/LAS/unnorm/plots/15m_rad_test/allpoints/")
xml_template <- read_xml("D:/1_Work/5_Bauges/Voxelisation/xml_files/template_bauges.xml")
lasfiles <- list.files(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/",typefor,"/15m_rad/",typepc,"/"))
allpcnames <- as.data.table(list.files(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/",typefor,"/15m_rad/",typepc,"/"),
full.names = TRUE, pattern = "*.las"))
#allpcnames[, grp := sub("_[^_]*$","",basename(file_path_sans_ext(V1)))]
allpcnames[, grp := basename(file_path_sans_ext(V1))]
for(row in 1:nrow(allpcnames))
{
xml_file1 <- xml_template
pth1 <- allpcnames$V1[row]
ls <- readLASheader(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/", typefor,"/15m_rad/", typepc,"/",
allpcnames$grp[row],
".las"))
pth2 <- paste0("D:/1_Work/5_Bauges/Voxelisation/Results/74/", typefor,"/15m_rad/", typepc,"/",
file_path_sans_ext(basename(allpcnames$V1[row])),
".vox")
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 1), 1), "src", pth1)
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 2), 1), "src", pth2)
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmin", ls@PHB[["Min X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymin", ls@PHB[["Min Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmin", ls@PHB[["Min Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmax", ls@PHB[["Max X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymax", ls@PHB[["Max Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmax", ls@PHB[["Max Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitX",
ceiling(ls@PHB[["Max X"]])-floor(ls@PHB[["Min X"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitY",
ceiling(ls@PHB[["Max Y"]])-floor(ls@PHB[["Min Y"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitZ",
ceiling(ls@PHB[["Max Z"]])-floor(ls@PHB[["Min Z"]]))
out <- paste0("D:/1_Work/5_Bauges/Voxelisation/xml_files/74/",typefor,"/15m_rad/",typepc,"/",
tools::file_path_sans_ext(basename(allpcnames$V1[row])),
".xml")
write_xml(xml_file1, out)
}
xml_mod <- function(lasfile, inpth, outpthvox, outpthxml, tmpl)
{
xml_file1 <- tmpl
pth1 <- paste0(inpth,lasfile)
ls <- readLASheader(pth1)
pth2 <- paste0(outpthvox, tools::file_path_sans_ext(lasfile), ".vox")
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 1), 1), "src", pth1)
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 2), 1), "src", pth2)
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmin", ls@PHB[["Min X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymin", ls@PHB[["Min Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmin", ls@PHB[["Min Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "xmax", ls@PHB[["Max X"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "ymax", ls@PHB[["Max Y"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3), "zmax", ls@PHB[["Max Z"]])
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitX",
ceiling(ls@PHB[["Max X"]])-floor(ls@PHB[["Min X"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitY",
ceiling(ls@PHB[["Max Y"]])-floor(ls@PHB[["Min Y"]]))
xml_set_attr(xml_child(xml_child(xml_file1, 1), 3),
"splitZ",
ceiling(ls@PHB[["Max Z"]])-floor(ls@PHB[["Min Z"]]))
out <- paste0(outpthxml, tools::file_path_sans_ext(lasfile), ".xml")
write_xml(xml_file1, out)
}
xml_set_attr(xml_child(xml_child(xml_child(xml_file1, 1), 2), 1), "src", pth2)
gpstimes <- list()
for(row in 1:nrow(allpcnames))
{
ls <- readLAS(paste0("D:/1_Work/5_Bauges/Data/ULM/LAS/unnorm/plots/feuillus/15m_rad/allpoints/",
allpcnames$grp[row],
".las"), select = "xyzgpstime")
gpstimes <- c(gpstimes, ls@data$gpstime)
}
|
/doc/tutorial/calc_2d.rd | no_license | gfd-dennou-club/Dennou-deepconv-arare6 | R | false | false | 1,045 | rd | ||
# https://adv-r.hadley.nz/r6
# https://stackoverflow.com/questions/35414576/multiple-inheritance-for-r6-classes
# https://rpubs.com/rappster/153394
library(R6)
#### public fields/methods
Accumulator <- R6Class(
'Accumulator',
public = list(
sum = 0,
add = function(x = 1) {
self$sum <- self$sum + x
invisible(self)
}
)
)
x <- Accumulator$new()
x$add(4)
x$sum
# Side-effect R6 methods should always return self invisibly <- enabling method chaning
x$add(10)$add(10)$sum
x$
add(10)$
add(10)$
sum
#### useful functions - initialize, print
# $initialize() is a good place to check that name and age are the correct types
Person <- R6Class(
'Person',
public = list(
name = NULL,
age = NA,
initialize = function(name, age = NA) {
stopifnot(is.character(name), length(name) == 1)
stopifnot(is.numeric(age), length(age) == 1)
self$name <- name
self$age <- age
},
print = function(...) {
cat('Person: \n')
cat(' Name: ', self$name, '\n', sep = '')
cat(' Age: ', self$age, '\n', sep = '')
invisible(self)
}
)
)
jk <- Person$new('jk', age = 100)
jk
#### adding methods after creation
Accumulator <- R6Class('Accumulator')
Accumulator$set('public', 'sum', 0)
Accumulator$set('public', 'add', function(x = 1) {
self$sum <- self$sum + x
invisible(self)
})
# Accumulator$set('public', 'sum', 1) ## error
Accumulator$set('public', 'sum', 1, overwrite = TRUE)
Accumulator$new()$sum
x1 <- Accumulator$new()
Accumulator$set('public', 'hello', function() message('Hi!'))
# x1$hello() # not for existing objects
x2 <- Accumulator$new()
x2$hello()
#### inheritance
# R6 only supports single inheritance: you cannot supply a vector of classes to inherit
AccumulatorChatty <- R6Class(
'AccumulatorChatty',
inherit = Accumulator,
public = list(
add = function(x = 1) {
cat('Adding ', x, '\n', sep = '')
super$add(x = x)
}
)
)
x3 <- AccumulatorChatty$new()
x3$add(10)$add(1)$sum
#### Introspection
class(x3)
names(x3)
# clone
#### Privacy
# cannot access private fields or methods outside of the class
Person <- R6Class(
'Person',
public = list(
initialize = function(name, age = NA) {
private$name <- name
private$age <- age
},
print = function(...) {
cat('Person: \n')
cat(' Name: ', private$name, '\n', sep = '')
cat(' Age: ', private$age, '\n', sep = '')
invisible(self)
}
),
private = list(
age = NA,
name = NULL
)
)
PersonMore <- R6Class(
'PersonMore',
inherit = Person,
public = list(
hello = function() {
cat('Person: \n')
cat(' Name: ', super$name, '\n', sep = '')
cat(' Age: ', super$age, '\n', sep = '')
}
)
)
jk <- Person$new('jk')
jk
jk$name # NULL
PersonMore$new(jk)$hello() # privates not accessible in child class
#### Active fields
Rando <- R6Class(
'Rando',
active = list(
random = function(value) runif(1)
)
)
x <- Rando$new()
x$random
x$random
Person <- R6Class(
'Person',
private = list(
.age = NA,
.name = NULL,
.region = NULL
),
active = list(
age = function(value) { # ensure age is read-only
if (missing(value)) {
private$.age
} else {
stop('"$age" is read only', call. = FALSE)
}
},
name = function(value) { # ensure name is length 1 character vector
if (missing(value)) {
private$.name
} else {
stopifnot(is.character(value), length(value) == 1)
private$.name <- value
self
}
},
region = function(value) {
if (missing(value)) {
stop('"$region" is write only', call. = FALSE)
} else {
private$.region <- value
}
}
),
public = list(
initialize = function(name, age = NA) {
private$.name <- name
private$.age <- age
},
print = function() {
cat('Person: \n')
cat(' Name: ', private$.name, '\n', sep = '')
cat(' Age: ', private$.age, '\n', sep = '')
cat(' Region: ', private$.region, '\n', sep = '')
invisible(self)
}
)
)
jk <- Person$new('jk', 20)
jk$name
jk$name <- 10 # error
jk$age
jk$age <- 10 # error
jk$region # error
jk$region <- 'AU'
jk
#### Reference semantics - S3 objects built on top of environments
typeof(jk)
# reference semantics - objects are not copied when modified
y1 = Accumulator$new()
y2 <- y1
y1$add(10)
c(y1 = y1$sum, y2 = y2$sum)
#y1 y2
#11 11
# use $clone() for copying
# use $clone(deep = TRUE) for copying nested classes
y1 <- Accumulator$new()
y2 <- y1$clone()
y1$add(10)
c(y1 = y1$sum, y2 = y2$sum)
#### finalizer
TemporaryFile <- R6Class(
'TemporaryFile',
public = list(
path = NULL,
initialize = function() {
self$path <- tempfile()
},
finalize = function() {
message('Cleaning up ', self$path)
},
print = function() {
message('Temp file ', self$path)
}
)
)
tf <- TemporaryFile$new()
rm(tf)
invisible(gc())
#### R6 fields
# child object is initialize once when the class is defined, not institiated
# beaware setting a defalut value to an R6 class
TemporaryDatabase <- R6Class(
'TemporaryDatabase',
public = list(
con = NULL,
file = TemporaryFile$new(),
initialize = function() {
#DBI::dbConnect(RSQLite::SQLite(), path = file$path)
}
)
)
db_a <- TemporaryDatabase$new()
db_b <- TemporaryDatabase$new()
db_a$file$path == db_b$file$path
TemporaryDatabase1 <- R6Class(
'TemporaryDatabase1',
public = list(
con = NULL,
file = NULL,
initialize = function() {
self$file <- TemporaryFile$new()
#DBI::dbConnect(RSQLite::SQLite(), path = file$path)
}
)
)
db1_a <- TemporaryDatabase1$new()
db1_b <- TemporaryDatabase1$new()
db1_a$file$path == db1_b$file$path
## threading state
new_stack <- function(items = list()) {
structure(list(items = items, class = 'stack'))
}
push <- function(x, y) {
x$items <- c(x$items, list(y))
x
}
pop <- function(x) {
n <- length(x$items)
item <- x$items[[n]]
x$items <- x$items[[-n]]
list(item = item, x = x)
}
s <- new_stack()
s <- push(s, 10)
s <- push(s, 20)
out <- pop(s)
out
Stack <- R6Class(
'Stack',
public = list(
items = list(),
push = function(x) {
self$items <- c(self$items, x)
invisible(self)
},
pop = function() {
item <- self$items[[self$length()]]
self$items <- self$items[-self$length()]
item
},
length = function() {
length(self$items)
}
)
)
s1 <- Stack$new()
s1$push(10)$push(20)
s1$pop()
| /update_assets/r6-2.R | no_license | jaehyeon-kim/servewidgets | R | false | false | 7,284 | r | # https://adv-r.hadley.nz/r6
# https://stackoverflow.com/questions/35414576/multiple-inheritance-for-r6-classes
# https://rpubs.com/rappster/153394
library(R6)
#### public fields/methods
Accumulator <- R6Class(
'Accumulator',
public = list(
sum = 0,
add = function(x = 1) {
self$sum <- self$sum + x
invisible(self)
}
)
)
x <- Accumulator$new()
x$add(4)
x$sum
# Side-effect R6 methods should always return self invisibly <- enabling method chaning
x$add(10)$add(10)$sum
x$
add(10)$
add(10)$
sum
#### useful functions - initialize, print
# $initialize() is a good place to check that name and age are the correct types
Person <- R6Class(
'Person',
public = list(
name = NULL,
age = NA,
initialize = function(name, age = NA) {
stopifnot(is.character(name), length(name) == 1)
stopifnot(is.numeric(age), length(age) == 1)
self$name <- name
self$age <- age
},
print = function(...) {
cat('Person: \n')
cat(' Name: ', self$name, '\n', sep = '')
cat(' Age: ', self$age, '\n', sep = '')
invisible(self)
}
)
)
jk <- Person$new('jk', age = 100)
jk
#### adding methods after creation
Accumulator <- R6Class('Accumulator')
Accumulator$set('public', 'sum', 0)
Accumulator$set('public', 'add', function(x = 1) {
self$sum <- self$sum + x
invisible(self)
})
# Accumulator$set('public', 'sum', 1) ## error
Accumulator$set('public', 'sum', 1, overwrite = TRUE)
Accumulator$new()$sum
x1 <- Accumulator$new()
Accumulator$set('public', 'hello', function() message('Hi!'))
# x1$hello() # not for existing objects
x2 <- Accumulator$new()
x2$hello()
#### inheritance
# R6 only supports single inheritance: you cannot supply a vector of classes to inherit
AccumulatorChatty <- R6Class(
'AccumulatorChatty',
inherit = Accumulator,
public = list(
add = function(x = 1) {
cat('Adding ', x, '\n', sep = '')
super$add(x = x)
}
)
)
x3 <- AccumulatorChatty$new()
x3$add(10)$add(1)$sum
#### Introspection
class(x3)
names(x3)
# clone
#### Privacy
# cannot access private fields or methods outside of the class
Person <- R6Class(
'Person',
public = list(
initialize = function(name, age = NA) {
private$name <- name
private$age <- age
},
print = function(...) {
cat('Person: \n')
cat(' Name: ', private$name, '\n', sep = '')
cat(' Age: ', private$age, '\n', sep = '')
invisible(self)
}
),
private = list(
age = NA,
name = NULL
)
)
PersonMore <- R6Class(
'PersonMore',
inherit = Person,
public = list(
hello = function() {
cat('Person: \n')
cat(' Name: ', super$name, '\n', sep = '')
cat(' Age: ', super$age, '\n', sep = '')
}
)
)
jk <- Person$new('jk')
jk
jk$name # NULL
PersonMore$new(jk)$hello() # privates not accessible in child class
#### Active fields
Rando <- R6Class(
'Rando',
active = list(
random = function(value) runif(1)
)
)
x <- Rando$new()
x$random
x$random
Person <- R6Class(
'Person',
private = list(
.age = NA,
.name = NULL,
.region = NULL
),
active = list(
age = function(value) { # ensure age is read-only
if (missing(value)) {
private$.age
} else {
stop('"$age" is read only', call. = FALSE)
}
},
name = function(value) { # ensure name is length 1 character vector
if (missing(value)) {
private$.name
} else {
stopifnot(is.character(value), length(value) == 1)
private$.name <- value
self
}
},
region = function(value) {
if (missing(value)) {
stop('"$region" is write only', call. = FALSE)
} else {
private$.region <- value
}
}
),
public = list(
initialize = function(name, age = NA) {
private$.name <- name
private$.age <- age
},
print = function() {
cat('Person: \n')
cat(' Name: ', private$.name, '\n', sep = '')
cat(' Age: ', private$.age, '\n', sep = '')
cat(' Region: ', private$.region, '\n', sep = '')
invisible(self)
}
)
)
jk <- Person$new('jk', 20)
jk$name
jk$name <- 10 # error
jk$age
jk$age <- 10 # error
jk$region # error
jk$region <- 'AU'
jk
#### Reference semantics - S3 objects built on top of environments
typeof(jk)
# reference semantics - objects are not copied when modified
y1 = Accumulator$new()
y2 <- y1
y1$add(10)
c(y1 = y1$sum, y2 = y2$sum)
#y1 y2
#11 11
# use $clone() for copying
# use $clone(deep = TRUE) for copying nested classes
y1 <- Accumulator$new()
y2 <- y1$clone()
y1$add(10)
c(y1 = y1$sum, y2 = y2$sum)
#### finalizer
TemporaryFile <- R6Class(
'TemporaryFile',
public = list(
path = NULL,
initialize = function() {
self$path <- tempfile()
},
finalize = function() {
message('Cleaning up ', self$path)
},
print = function() {
message('Temp file ', self$path)
}
)
)
tf <- TemporaryFile$new()
rm(tf)
invisible(gc())
#### R6 fields
# child object is initialize once when the class is defined, not institiated
# beaware setting a defalut value to an R6 class
TemporaryDatabase <- R6Class(
'TemporaryDatabase',
public = list(
con = NULL,
file = TemporaryFile$new(),
initialize = function() {
#DBI::dbConnect(RSQLite::SQLite(), path = file$path)
}
)
)
db_a <- TemporaryDatabase$new()
db_b <- TemporaryDatabase$new()
db_a$file$path == db_b$file$path
TemporaryDatabase1 <- R6Class(
'TemporaryDatabase1',
public = list(
con = NULL,
file = NULL,
initialize = function() {
self$file <- TemporaryFile$new()
#DBI::dbConnect(RSQLite::SQLite(), path = file$path)
}
)
)
db1_a <- TemporaryDatabase1$new()
db1_b <- TemporaryDatabase1$new()
db1_a$file$path == db1_b$file$path
## threading state
new_stack <- function(items = list()) {
structure(list(items = items, class = 'stack'))
}
push <- function(x, y) {
x$items <- c(x$items, list(y))
x
}
pop <- function(x) {
n <- length(x$items)
item <- x$items[[n]]
x$items <- x$items[[-n]]
list(item = item, x = x)
}
s <- new_stack()
s <- push(s, 10)
s <- push(s, 20)
out <- pop(s)
out
Stack <- R6Class(
'Stack',
public = list(
items = list(),
push = function(x) {
self$items <- c(self$items, x)
invisible(self)
},
pop = function() {
item <- self$items[[self$length()]]
self$items <- self$items[-self$length()]
item
},
length = function() {
length(self$items)
}
)
)
s1 <- Stack$new()
s1$push(10)$push(20)
s1$pop()
|
#' Make a paged dynamic spectrogram similar to spectral display in Adobe Audition
#'
#' This function works on an object generated with \code{\link{prep_static_ggspectro}}, an alias for prepStaticSpec().
#' Video generation is very time consuming, and all the desired spectrogram parameters should be set
#' in the prep step. The output is an mp4 video of a dynamic spectrogram video. If the input sound file was
#' segmented in the prep step, the resulting video will be a concatenation of multiple dynamic spectrogram "pages."
#' Each page has a sliding window revealing the part of the static spectrogram being played. Temporal width of each page
#' is defined by the xLim parameter in \code{\link{prep_static_ggspectro}}. You can also output temporary segmented files, if desired.
#'
#' @aliases pagedSpectro pagedSpec
#' @usage paged_spectro(specParams,destFolder,vidName,framerate=30,highlightCol="#4B0C6BFF",
#' highlightAlpha=.6,cursorCol="#4B0C6BFF",delTemps=T,...)
#' @param specParams an object returned from \code{\link{prep_static_ggspectro}}
#' @param destFolder destination of output video; this setting overwrites setting from specParams object
#' @param vidName expects "FileName", .mp4 not necessary; if not supplied, will be named after the file you used in prep_static_ggspectro()
#' @param highlightCol default "#4B0C6BFF" (a purple color to match the default viridis 'inferno' palette)
#' @param highlightAlpha opacity of the highlight box; default is 0.6
#' @param cursorCol Color of the leading edge of the highlight box; default "#4B0C6BFF"
#' @param delTemps Default=T, deletes temporary files (specs & WAV files used to create concatenated video)
#' @param framerate by default, set to 30 (currently this is not supported, as animate doesn't honor the setting)
#' @return Nothing is returned, though progress and file save locations are output to user. Video should play after rendering.
#' @seealso \code{\link{prep_static_ggspectro}}
#' @author Matthew R Wilkins (\email{matt@@galacticpolymath.com})
#' @references {
#' Araya-Salas M & Wilkins M R. (2020). *dynaSpec: dynamic spectrogram visualizations in R*. R package version 1.0.0.
#' }
#' @examples {
#' #show wav files included with dynaSpec
#' f <- list.files(pattern=".wav", full.names = TRUE, path = system.file(package="dynaSpec"))
#'
#' femaleBarnSwallow<-prep_static_ggspectro(f[1],destFolder="wd",onlyPlotSpec = F, bgFlood=T)
#' paged_spectro(femaleBarnSwallow)
#'
#' maleBarnSwallow<-prep_static_ggspectro(f[2],destFolder="wd",onlyPlotSpec = F, bgFlood=T,min_dB=-40)
#' paged_spectro(femaleBarnSwallow)
#'
#' # Make a multipage dynamic spec of a humpback whale song
#' # Note, we're saving PNGs of our specs in the working directory; to add
#'# axis labels, we set onlyPlotSpec to F, and to make the same background
#' # color for the entire figure, we set bgFlood=T;
#' # The yLim is set to only go to 0.7kHz, where the sounds are for these big whales;
#' #also applying an amplitude transform to boost signal.
#' #This is a longer file, so we're taking the first 12 seconds with crop=12
#' #xLim=3 means each "page" will be 3 seconds, so we'll have 4 dynamic spec pages that get combined
#'
#' humpback <- prep_static_ggspectro(
#' "http://www.oceanmammalinst.org/songs/hmpback3.wav",savePNG=T,destFolder="wd",
#' onlyPlotSpec=F,bgFlood=T,yLim=c(0,.7),crop=12,xLim=3,ampTrans=3)
#'
#' #to generate multipage dynamic spec (movie), run the following
#' paged_spectro(humpback)
#'
#' # see more examples at https://marce10.github.io/dynaSpec/
#' }
paged_spectro <-function(specParams,destFolder,vidName,framerate=30,highlightCol="#4B0C6BFF",highlightAlpha=.6,cursorCol="#4B0C6BFF",delTemps=T,... )
{
if(!ari::have_ffmpeg_exec()){
cat("\n*****This script needs ffmpeg to work*****\n")
cat("If you have a mac, with HomeBrew installed, you can fix this easily in terminal with:\n")
cat("\n>\tbrew install ffmpeg\n")
cat("\nIf not, download and install it from ffmpeg.org")
}else{
if(missing(destFolder)){destFolder <- specParams$destFolder}
if(!missing(vidName)){
iName0=tools::file_path_sans_ext(vidName)
vidName=paste0(destFolder,iName0,".mp4")
}else{
iName0<-tools::file_path_sans_ext(specParams$outFilename)
vidName=paste0(destFolder,iName0,".mp4")
}#base name for output, sans extension
#To avoid probs if a file contains '
vidName<-gsub("'",".",vidName)
iName0<-gsub("'",".",iName0)
tempdir<-paste0(destFolder,"temp/")
dir.create(tempdir,showWarnings=F)
#always export the newWav version that has been cropped/padded according to user parameters
cat(paste0("Temporary files saved at: ",tempdir))
newWavOut=paste0(tempdir,iName0,"_forVideo.wav")
tuneR::writeWave(specParams$newWav,filename=newWavOut)
#export wav files if spec is to be segmented; not necessary if wav is unaltered
if(length(specParams$segWavs)>1){
#create list of names for WAV audio segments
outWAV<-lapply(1:length(specParams$segWavs),function(x) {paste0(tempdir,iName0,"_",x,"_.wav")})
invisible(
lapply(1:length(specParams$segWavs), function(x){fn=outWAV[[x]]
tuneR::writeWave(specParams$segWavs[[x]],file=fn)
cat(paste0("\nSaved temp wav segment: ",fn))}))
}
for(i in 1:length(specParams$segWavs))
{
#Address missing variables
iName<-paste0(iName0,ifelse(length(specParams$segWavs)==1,"",paste0("_",i,"_")))
#Save background spectrogram PNG to temp directory using tested parameters
outPNG<-paste0(tempdir,paste0(iName,".png"))
outTmpVid<-paste0(tempdir,paste0(iName,".mp4"))
#output spec without axes, b/c we'll have to
ggsave(filename=outPNG,plot=specParams$spec[[i]]+ggplot2::theme_void()+ggplot2::theme(panel.background=ggplot2::element_rect(fill=specParams$bg),legend.position = 'none'),dpi=300,width=specParams$specWidth,height=specParams$specHeight,units="in")
print(paste0("Spec saved @ ",outPNG))
#Read PNG bitmap back in
spec_PNG<-readPNG(outPNG)
spec_width_px<-attributes(spec_PNG)$dim[2]
spec_height_px<-attributes(spec_PNG)$dim[1]
#Create data frame for highlighting box animation for i^th wav segment
range_i<-c((i-1)*specParams$xLim[2],(i-1)*specParams$xLim[2]+specParams$xLim[2])
cursor<-seq(range_i[1],range_i[2],specParams$xLim[2]/framerate)
played<-data.frame(xmin=cursor,xmax=rep(range_i[2],length(cursor)),ymin=rep(specParams$yLim[1],length(cursor)),ymax=rep(specParams$yLim[2], length(cursor)))
#Make ggplot overlay of highlight box on spectrogram
vidSegment<-{
ggplot2::ggplot(played)+ggplot2::xlim(range_i)+ggplot2::ylim(specParams$yLim)+
#Labels
ggplot2::labs(x="Time (s)",y="Frequency (kHz)",fill="Amplitude\n(dB)\n")+
##Animate() seems to shrink font size a bit
mytheme_lg(specParams$bg)+
#Conditional theming based on user prefs (note, legend not currently supported)
#Since I'm reimporting spec as a raster, legend would need to rebuilt manually...gets a little
#warped if I embed it in the raster...doesn't look good.
{
#If user supplied fontAndAxisCol, change those settings (regardless of whether bg is flooded or not)
if(!specParams$autoFontCol){
ggplot2::theme(axis.text=ggplot2::element_text(colour=specParams$fontAndAxisCol),text=ggplot2::element_text(colour=specParams$fontAndAxisCol),axis.line = ggplot2::element_line(colour=specParams$fontAndAxisCol),axis.ticks=element_line(colour=specParams$fontAndAxisCol))
}else{}
}+{
#get rid of axes & legend if requested
if(specParams$onlyPlotSpec){ggplot2::theme_void()+ ggplot2::theme(plot.background=ggplot2::element_rect(fill=specParams$bg),text=ggplot2::element_text(colour=specParams$fontAndAxisCol))
}else{
#For cases where axes are plotted
#if axes to be plotted, flood panel bg color over axis area?
if(specParams$bgFlood){ggplot2::theme(plot.background=ggplot2::element_rect(fill=specParams$bg),axis.text=ggplot2::element_text(colour=specParams$fontAndAxisCol),text=ggplot2::element_text(colour=specParams$fontAndAxisCol),axis.line = ggplot2::element_line(colour=specParams$fontAndAxisCol),axis.ticks=ggplot2::element_line(colour=specParams$fontAndAxisCol),legend.background=ggplot2::element_rect(fill=specParams$bg))}else{}
}
}+
#Add spectrogram
ggplot2::annotation_custom(grid::rasterGrob(spec_PNG,width = unit(1,"npc"), height = unit(1,"npc")),- Inf, Inf, -Inf, Inf)+
#Add box highlights for playback reveal
ggplot2::geom_rect(data=played,aes(xmin=xmin,ymin=ymin,xmax=xmax,ymax=ymax),fill=highlightCol,alpha=highlightAlpha)+
#Add cursor
ggplot2::geom_segment(data=played,aes(x=xmin,xend=xmin,y=ymin,yend=ymax),col=cursorCol,size=2) +
#Add animation
#**** Time consuming animation stage *****
gganimate::transition_reveal(xmin)
}#end GGPLOT stuffs
# #Increase plot margin slightly b/c it gets changed when exporting to video for some reason
# if(!specParams$onlyPlotSpec){axisMargin=40}else{axisMargin=0}
#### Export animated ggplot specs
#save Audio File with sound in 1 step only if not segmented
if(length(specParams$segWavs)==1){
#note, height is set to 500px due to an issue w/ output being garbled at some resolutions; width according to aspect ratio
animate(vidSegment,renderer=av_renderer(vidName,audio=newWavOut),duration=specParams$xLim[2],width=500*(spec_width_px/spec_height_px),height=500,units="px") #Need to save audio for segments!!
}else{
animate(vidSegment,renderer=av_renderer(outTmpVid,audio=outWAV[[i]]),duration=specParams$xLim[2],width=500*(spec_width_px/spec_height_px),height=500,units="px") #Need to save audio for segments!!
}
}#end for loop extracting video pieces
#if necessary, combine segments
if(length(specParams$segWavs)>1){
tmpPaths<-paste0("file '",gsub(".wav","",unlist(outWAV)),".mp4' duration ",specParams$xLim[2])
writeLines(tmpPaths,paste0(tempdir,"mp4Segments.txt"))
#Turns out this was wrong or has been fixed!! MP4s CAN be combined!
# #Unfortunately, can't just slap MP4 files together, so have to have an intermediate .ts file step
# ffmpegTransCode<-paste0(ffmpeg_exec(),' -y -i "',unlist(file_path_sans_ext(outWAV)),'.mp4" -vsync 1 -c copy "',unlist(file_path_sans_ext(outWAV)),'.mkv"')
# invisible(sapply(ffmpegTransCode,system))
#now combine .ts files into .mp4
#For matching audio & video lengths:
cropSmplRt<-specParams$newWav@samp.rate
cropFileDur<-max(length(specParams$newWav@left),length(specParams$newWav@right))/cropSmplRt
# cropFileDur2<-seconds_to_period(cropFileDur)
# cropFileDur3<-sprintf(fmt='%02d:%02d:%2.3f',hour(cropFileDur2),minute(cropFileDur2),second(cropFileDur2))
#Concat Step 1
#concatenate mp4 segments
#slight stutter for continuous sounds across segments, but the alternative step below doesn't work quite right, so good enough
system(paste0(ari::ffmpeg_exec(),' -f concat -ss 00:00:00.000 -safe 0 -i "',paste0(tempdir,"mp4Segments.txt"),'" -codec copy -y "',vidName,'"') )
#Concat Step 2
#Add audio track back in (couldn't figure how to combine these steps)
#THIS STEP CURRENTLY DOESN'T WORK WELL (DROPS LAST FEW FRAMES B/C MISMATCH IN A/V LENGTHS)
# system(paste0(ari::ffmpeg_exec(),' -ss 0 -i "',paste0(tempdir,"deleteme.mp4"),'" -i "',newWavOut,'" -c:v libx264 -map 0:v:0 -map 1:a:0 -c:a aac -ac 1 -b:a 192k -y -vsync 1 -t ',cropFileDur3,' "',vidName,'"'))
#Old Concat Step 1 (when step 2 is implemented); results in deleteme.mp4 intermediate
# system(paste0(ari::ffmpeg_exec(),' -f concat -safe 0 -i "',paste0(tempdir,"mp4Segments.txt"),'" -codec copy -y "',paste0(tempdir,"deleteme.mp4"),'"'))
}
cat("\n\nAll done!\n")
cat(paste0("file saved @",vidName))
system(paste0('open "',vidName,'"'))
if(delTemps){unlink(tempdir,recursive=T);print(paste0("FYI temporary file directory deleted @ ",tempdir))}
}#end else which passed FFMPEG check
}#end paged_spectro definition
#create alias
pagedSpec<-paged_spectro | /R/paged_spectro.R | no_license | Tubbz-alt/dynaSpec | R | false | false | 12,292 | r | #' Make a paged dynamic spectrogram similar to spectral display in Adobe Audition
#'
#' This function works on an object generated with \code{\link{prep_static_ggspectro}}, an alias for prepStaticSpec().
#' Video generation is very time consuming, and all the desired spectrogram parameters should be set
#' in the prep step. The output is an mp4 video of a dynamic spectrogram video. If the input sound file was
#' segmented in the prep step, the resulting video will be a concatenation of multiple dynamic spectrogram "pages."
#' Each page has a sliding window revealing the part of the static spectrogram being played. Temporal width of each page
#' is defined by the xLim parameter in \code{\link{prep_static_ggspectro}}. You can also output temporary segmented files, if desired.
#'
#' @aliases pagedSpectro pagedSpec
#' @usage paged_spectro(specParams,destFolder,vidName,framerate=30,highlightCol="#4B0C6BFF",
#' highlightAlpha=.6,cursorCol="#4B0C6BFF",delTemps=T,...)
#' @param specParams an object returned from \code{\link{prep_static_ggspectro}}
#' @param destFolder destination of output video; this setting overwrites setting from specParams object
#' @param vidName expects "FileName", .mp4 not necessary; if not supplied, will be named after the file you used in prep_static_ggspectro()
#' @param highlightCol default "#4B0C6BFF" (a purple color to match the default viridis 'inferno' palette)
#' @param highlightAlpha opacity of the highlight box; default is 0.6
#' @param cursorCol Color of the leading edge of the highlight box; default "#4B0C6BFF"
#' @param delTemps Default=T, deletes temporary files (specs & WAV files used to create concatenated video)
#' @param framerate by default, set to 30 (currently this is not supported, as animate doesn't honor the setting)
#' @return Nothing is returned, though progress and file save locations are output to user. Video should play after rendering.
#' @seealso \code{\link{prep_static_ggspectro}}
#' @author Matthew R Wilkins (\email{matt@@galacticpolymath.com})
#' @references {
#' Araya-Salas M & Wilkins M R. (2020). *dynaSpec: dynamic spectrogram visualizations in R*. R package version 1.0.0.
#' }
#' @examples {
#' #show wav files included with dynaSpec
#' f <- list.files(pattern=".wav", full.names = TRUE, path = system.file(package="dynaSpec"))
#'
#' femaleBarnSwallow<-prep_static_ggspectro(f[1],destFolder="wd",onlyPlotSpec = F, bgFlood=T)
#' paged_spectro(femaleBarnSwallow)
#'
#' maleBarnSwallow<-prep_static_ggspectro(f[2],destFolder="wd",onlyPlotSpec = F, bgFlood=T,min_dB=-40)
#' paged_spectro(femaleBarnSwallow)
#'
#' # Make a multipage dynamic spec of a humpback whale song
#' # Note, we're saving PNGs of our specs in the working directory; to add
#'# axis labels, we set onlyPlotSpec to F, and to make the same background
#' # color for the entire figure, we set bgFlood=T;
#' # The yLim is set to only go to 0.7kHz, where the sounds are for these big whales;
#' #also applying an amplitude transform to boost signal.
#' #This is a longer file, so we're taking the first 12 seconds with crop=12
#' #xLim=3 means each "page" will be 3 seconds, so we'll have 4 dynamic spec pages that get combined
#'
#' humpback <- prep_static_ggspectro(
#' "http://www.oceanmammalinst.org/songs/hmpback3.wav",savePNG=T,destFolder="wd",
#' onlyPlotSpec=F,bgFlood=T,yLim=c(0,.7),crop=12,xLim=3,ampTrans=3)
#'
#' #to generate multipage dynamic spec (movie), run the following
#' paged_spectro(humpback)
#'
#' # see more examples at https://marce10.github.io/dynaSpec/
#' }
paged_spectro <-function(specParams,destFolder,vidName,framerate=30,highlightCol="#4B0C6BFF",highlightAlpha=.6,cursorCol="#4B0C6BFF",delTemps=T,... )
{
if(!ari::have_ffmpeg_exec()){
cat("\n*****This script needs ffmpeg to work*****\n")
cat("If you have a mac, with HomeBrew installed, you can fix this easily in terminal with:\n")
cat("\n>\tbrew install ffmpeg\n")
cat("\nIf not, download and install it from ffmpeg.org")
}else{
if(missing(destFolder)){destFolder <- specParams$destFolder}
if(!missing(vidName)){
iName0=tools::file_path_sans_ext(vidName)
vidName=paste0(destFolder,iName0,".mp4")
}else{
iName0<-tools::file_path_sans_ext(specParams$outFilename)
vidName=paste0(destFolder,iName0,".mp4")
}#base name for output, sans extension
#To avoid probs if a file contains '
vidName<-gsub("'",".",vidName)
iName0<-gsub("'",".",iName0)
tempdir<-paste0(destFolder,"temp/")
dir.create(tempdir,showWarnings=F)
#always export the newWav version that has been cropped/padded according to user parameters
cat(paste0("Temporary files saved at: ",tempdir))
newWavOut=paste0(tempdir,iName0,"_forVideo.wav")
tuneR::writeWave(specParams$newWav,filename=newWavOut)
#export wav files if spec is to be segmented; not necessary if wav is unaltered
if(length(specParams$segWavs)>1){
#create list of names for WAV audio segments
outWAV<-lapply(1:length(specParams$segWavs),function(x) {paste0(tempdir,iName0,"_",x,"_.wav")})
invisible(
lapply(1:length(specParams$segWavs), function(x){fn=outWAV[[x]]
tuneR::writeWave(specParams$segWavs[[x]],file=fn)
cat(paste0("\nSaved temp wav segment: ",fn))}))
}
for(i in 1:length(specParams$segWavs))
{
#Address missing variables
iName<-paste0(iName0,ifelse(length(specParams$segWavs)==1,"",paste0("_",i,"_")))
#Save background spectrogram PNG to temp directory using tested parameters
outPNG<-paste0(tempdir,paste0(iName,".png"))
outTmpVid<-paste0(tempdir,paste0(iName,".mp4"))
#output spec without axes, b/c we'll have to
ggsave(filename=outPNG,plot=specParams$spec[[i]]+ggplot2::theme_void()+ggplot2::theme(panel.background=ggplot2::element_rect(fill=specParams$bg),legend.position = 'none'),dpi=300,width=specParams$specWidth,height=specParams$specHeight,units="in")
print(paste0("Spec saved @ ",outPNG))
#Read PNG bitmap back in
spec_PNG<-readPNG(outPNG)
spec_width_px<-attributes(spec_PNG)$dim[2]
spec_height_px<-attributes(spec_PNG)$dim[1]
#Create data frame for highlighting box animation for i^th wav segment
range_i<-c((i-1)*specParams$xLim[2],(i-1)*specParams$xLim[2]+specParams$xLim[2])
cursor<-seq(range_i[1],range_i[2],specParams$xLim[2]/framerate)
played<-data.frame(xmin=cursor,xmax=rep(range_i[2],length(cursor)),ymin=rep(specParams$yLim[1],length(cursor)),ymax=rep(specParams$yLim[2], length(cursor)))
#Make ggplot overlay of highlight box on spectrogram
vidSegment<-{
ggplot2::ggplot(played)+ggplot2::xlim(range_i)+ggplot2::ylim(specParams$yLim)+
#Labels
ggplot2::labs(x="Time (s)",y="Frequency (kHz)",fill="Amplitude\n(dB)\n")+
##Animate() seems to shrink font size a bit
mytheme_lg(specParams$bg)+
#Conditional theming based on user prefs (note, legend not currently supported)
#Since I'm reimporting spec as a raster, legend would need to rebuilt manually...gets a little
#warped if I embed it in the raster...doesn't look good.
{
#If user supplied fontAndAxisCol, change those settings (regardless of whether bg is flooded or not)
if(!specParams$autoFontCol){
ggplot2::theme(axis.text=ggplot2::element_text(colour=specParams$fontAndAxisCol),text=ggplot2::element_text(colour=specParams$fontAndAxisCol),axis.line = ggplot2::element_line(colour=specParams$fontAndAxisCol),axis.ticks=element_line(colour=specParams$fontAndAxisCol))
}else{}
}+{
#get rid of axes & legend if requested
if(specParams$onlyPlotSpec){ggplot2::theme_void()+ ggplot2::theme(plot.background=ggplot2::element_rect(fill=specParams$bg),text=ggplot2::element_text(colour=specParams$fontAndAxisCol))
}else{
#For cases where axes are plotted
#if axes to be plotted, flood panel bg color over axis area?
if(specParams$bgFlood){ggplot2::theme(plot.background=ggplot2::element_rect(fill=specParams$bg),axis.text=ggplot2::element_text(colour=specParams$fontAndAxisCol),text=ggplot2::element_text(colour=specParams$fontAndAxisCol),axis.line = ggplot2::element_line(colour=specParams$fontAndAxisCol),axis.ticks=ggplot2::element_line(colour=specParams$fontAndAxisCol),legend.background=ggplot2::element_rect(fill=specParams$bg))}else{}
}
}+
#Add spectrogram
ggplot2::annotation_custom(grid::rasterGrob(spec_PNG,width = unit(1,"npc"), height = unit(1,"npc")),- Inf, Inf, -Inf, Inf)+
#Add box highlights for playback reveal
ggplot2::geom_rect(data=played,aes(xmin=xmin,ymin=ymin,xmax=xmax,ymax=ymax),fill=highlightCol,alpha=highlightAlpha)+
#Add cursor
ggplot2::geom_segment(data=played,aes(x=xmin,xend=xmin,y=ymin,yend=ymax),col=cursorCol,size=2) +
#Add animation
#**** Time consuming animation stage *****
gganimate::transition_reveal(xmin)
}#end GGPLOT stuffs
# #Increase plot margin slightly b/c it gets changed when exporting to video for some reason
# if(!specParams$onlyPlotSpec){axisMargin=40}else{axisMargin=0}
#### Export animated ggplot specs
#save Audio File with sound in 1 step only if not segmented
if(length(specParams$segWavs)==1){
#note, height is set to 500px due to an issue w/ output being garbled at some resolutions; width according to aspect ratio
animate(vidSegment,renderer=av_renderer(vidName,audio=newWavOut),duration=specParams$xLim[2],width=500*(spec_width_px/spec_height_px),height=500,units="px") #Need to save audio for segments!!
}else{
animate(vidSegment,renderer=av_renderer(outTmpVid,audio=outWAV[[i]]),duration=specParams$xLim[2],width=500*(spec_width_px/spec_height_px),height=500,units="px") #Need to save audio for segments!!
}
}#end for loop extracting video pieces
#if necessary, combine segments
if(length(specParams$segWavs)>1){
tmpPaths<-paste0("file '",gsub(".wav","",unlist(outWAV)),".mp4' duration ",specParams$xLim[2])
writeLines(tmpPaths,paste0(tempdir,"mp4Segments.txt"))
#Turns out this was wrong or has been fixed!! MP4s CAN be combined!
# #Unfortunately, can't just slap MP4 files together, so have to have an intermediate .ts file step
# ffmpegTransCode<-paste0(ffmpeg_exec(),' -y -i "',unlist(file_path_sans_ext(outWAV)),'.mp4" -vsync 1 -c copy "',unlist(file_path_sans_ext(outWAV)),'.mkv"')
# invisible(sapply(ffmpegTransCode,system))
#now combine .ts files into .mp4
#For matching audio & video lengths:
cropSmplRt<-specParams$newWav@samp.rate
cropFileDur<-max(length(specParams$newWav@left),length(specParams$newWav@right))/cropSmplRt
# cropFileDur2<-seconds_to_period(cropFileDur)
# cropFileDur3<-sprintf(fmt='%02d:%02d:%2.3f',hour(cropFileDur2),minute(cropFileDur2),second(cropFileDur2))
#Concat Step 1
#concatenate mp4 segments
#slight stutter for continuous sounds across segments, but the alternative step below doesn't work quite right, so good enough
system(paste0(ari::ffmpeg_exec(),' -f concat -ss 00:00:00.000 -safe 0 -i "',paste0(tempdir,"mp4Segments.txt"),'" -codec copy -y "',vidName,'"') )
#Concat Step 2
#Add audio track back in (couldn't figure how to combine these steps)
#THIS STEP CURRENTLY DOESN'T WORK WELL (DROPS LAST FEW FRAMES B/C MISMATCH IN A/V LENGTHS)
# system(paste0(ari::ffmpeg_exec(),' -ss 0 -i "',paste0(tempdir,"deleteme.mp4"),'" -i "',newWavOut,'" -c:v libx264 -map 0:v:0 -map 1:a:0 -c:a aac -ac 1 -b:a 192k -y -vsync 1 -t ',cropFileDur3,' "',vidName,'"'))
#Old Concat Step 1 (when step 2 is implemented); results in deleteme.mp4 intermediate
# system(paste0(ari::ffmpeg_exec(),' -f concat -safe 0 -i "',paste0(tempdir,"mp4Segments.txt"),'" -codec copy -y "',paste0(tempdir,"deleteme.mp4"),'"'))
}
cat("\n\nAll done!\n")
cat(paste0("file saved @",vidName))
system(paste0('open "',vidName,'"'))
if(delTemps){unlink(tempdir,recursive=T);print(paste0("FYI temporary file directory deleted @ ",tempdir))}
}#end else which passed FFMPEG check
}#end paged_spectro definition
#create alias
pagedSpec<-paged_spectro |
## This function is to find the best hospital in the state for specific outcome
## Function takes (1) State and (2) Outcome as input parameters
best <- function(state, outcome) {
## Read outcome data to df
df_outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## convert column 11 : 30.Day.Death..Mortality..Rates.from.Heart.Attack to numeric
suppressWarnings(df_outcome[, 11] <- as.numeric(df_outcome[, 11]))
## convert column 17 : 30.Day.Death..Mortality..Rates.from.Heart.Failure to numeric
suppressWarnings(df_outcome[, 17] <- as.numeric(df_outcome[, 17]))
## convert column 23 : 30.Day.Death..Mortality..Rates.from.Pneumonia to numeric
suppressWarnings(df_outcome[, 23] <- as.numeric(df_outcome[, 23]))
## Build the list of state codes
## R base object "state" has the 50 states - but doesnot include Union Territories
## Build a character vector "union_ter" with Union Territories and DC
union_ter <- c( "GU", "PR", "UM", "VI", "DC")
us_states <- sort(append(state.abb, union_ter ))
## Check whether state is present in R State dataset - error if not present
if(!is.element(state, us_states))
stop("invalid state")
else
## Filter the Outcome data frame for the selected state
sel_state <- df_outcome[which(df_outcome$State==state),]
## Check whether outcome is either "heart attack", "heart failure" or "pneumonia"
if(!is.element(outcome, c('heart attack', 'heart failure', 'pneumonia')))
stop("invalid outcome")
else if(outcome=="heart attack"){
## Filter the Outcome data with valid Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
sel_state_outcome <-
sel_state[which(!is.na(sel_state$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)),]
## Sort filtered data based on 1. Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
## and then by Hospital.Name
sort_state_outcome <- sel_state_outcome[order(
sel_state_outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack,
sel_state_outcome$Hospital.Name),]
} else if (outcome=="heart failure"){
## Filter the Outcome data with valid Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
sel_state_outcome <-
sel_state[which(!is.na(sel_state$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)),]
## Sort filtered data based on 1. Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
## and then by Hospital.Name
sort_state_outcome <-
sel_state_outcome[order(sel_state_outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure,
sel_state_outcome$Hospital.Name),]
} else if (outcome=="pneumonia"){
## Filter the Outcome data with valid Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
sel_state_outcome <-
sel_state[which(!is.na(sel_state$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)),]
## Sort filtered data based on 1. Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
## and then by Hospital.Name
sort_state_outcome <-
sel_state_outcome[order(sel_state_outcome$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia,
sel_state_outcome$Hospital.Name),]
}
## Return hospital name in that state with lowest 30-day death
## This will be the 1st row 2nd column from the sort_state_outcome
sort_state_outcome[1,2]
} | /best.R | no_license | seb1705/rprog-data-ProgAssignment3-data | R | false | false | 3,709 | r | ## This function is to find the best hospital in the state for specific outcome
## Function takes (1) State and (2) Outcome as input parameters
best <- function(state, outcome) {
## Read outcome data to df
df_outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## convert column 11 : 30.Day.Death..Mortality..Rates.from.Heart.Attack to numeric
suppressWarnings(df_outcome[, 11] <- as.numeric(df_outcome[, 11]))
## convert column 17 : 30.Day.Death..Mortality..Rates.from.Heart.Failure to numeric
suppressWarnings(df_outcome[, 17] <- as.numeric(df_outcome[, 17]))
## convert column 23 : 30.Day.Death..Mortality..Rates.from.Pneumonia to numeric
suppressWarnings(df_outcome[, 23] <- as.numeric(df_outcome[, 23]))
## Build the list of state codes
## R base object "state" has the 50 states - but doesnot include Union Territories
## Build a character vector "union_ter" with Union Territories and DC
union_ter <- c( "GU", "PR", "UM", "VI", "DC")
us_states <- sort(append(state.abb, union_ter ))
## Check whether state is present in R State dataset - error if not present
if(!is.element(state, us_states))
stop("invalid state")
else
## Filter the Outcome data frame for the selected state
sel_state <- df_outcome[which(df_outcome$State==state),]
## Check whether outcome is either "heart attack", "heart failure" or "pneumonia"
if(!is.element(outcome, c('heart attack', 'heart failure', 'pneumonia')))
stop("invalid outcome")
else if(outcome=="heart attack"){
## Filter the Outcome data with valid Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
sel_state_outcome <-
sel_state[which(!is.na(sel_state$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)),]
## Sort filtered data based on 1. Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
## and then by Hospital.Name
sort_state_outcome <- sel_state_outcome[order(
sel_state_outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack,
sel_state_outcome$Hospital.Name),]
} else if (outcome=="heart failure"){
## Filter the Outcome data with valid Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
sel_state_outcome <-
sel_state[which(!is.na(sel_state$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)),]
## Sort filtered data based on 1. Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
## and then by Hospital.Name
sort_state_outcome <-
sel_state_outcome[order(sel_state_outcome$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure,
sel_state_outcome$Hospital.Name),]
} else if (outcome=="pneumonia"){
## Filter the Outcome data with valid Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
sel_state_outcome <-
sel_state[which(!is.na(sel_state$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)),]
## Sort filtered data based on 1. Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
## and then by Hospital.Name
sort_state_outcome <-
sel_state_outcome[order(sel_state_outcome$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia,
sel_state_outcome$Hospital.Name),]
}
## Return hospital name in that state with lowest 30-day death
## This will be the 1st row 2nd column from the sort_state_outcome
sort_state_outcome[1,2]
} |
context("Test filtering")
source("sim_eQTL_network.R")
source("metrics.R")
library(rlist)
library(pipeR)
set.seed(1234556)
sim1 <- sim_eQTL_network(make_param_list(cmin=10, cmax=30, b=5, bgmult=0.05))
test_that("Filtering works", {
res <- cbce(sim1$X, sim1$Y)
expect_lte(abs(length(res$comms.fil) - length(sim1$bms)), 2)
}) | /tests/testthat/test-filtering.R | permissive | miheerdew/cbce | R | false | false | 332 | r | context("Test filtering")
source("sim_eQTL_network.R")
source("metrics.R")
library(rlist)
library(pipeR)
set.seed(1234556)
sim1 <- sim_eQTL_network(make_param_list(cmin=10, cmax=30, b=5, bgmult=0.05))
test_that("Filtering works", {
res <- cbce(sim1$X, sim1$Y)
expect_lte(abs(length(res$comms.fil) - length(sim1$bms)), 2)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brandseyer.R
\docType{package}
\name{brandseyer}
\alias{brandseyer}
\alias{brandseye}
\alias{BrandsEye}
\alias{BrandsEyeR}
\alias{brandseyer-package}
\title{brandseyer: access and perform analytics on your BrandsEye accounts}
\description{
BrandsEye is a company that provides online monitoring, analytics,
and insights for social media data. We provide an extensive JSON / RESTful API
to access your data. This library is a wrapper for that data, providing
easy access to the data in your various BrandsEye accounts for use in
any R analyses and data visualisations.
}
\section{Starting points}{
To ease your use of the library, you should begin by adding your API key using
the authenticate function:
\verb{
authenticate(key = "<your api key>", save = TRUE)
}
If you do not know what your api key is, contact BrandsEye to find out.
After this, you can easily see a list of your available accounts using
\code{\link{list_account_codes}}. \code{\link{account_count}} will
let you pull aggregate information matching a given filter from one or more
of your accounts.
}
\section{Online resources}{
The home page is \url{https://github.com/brandseye/brandseyer/}
If you have found a bug in the library, you can report it using the
library's GitHub issue tracker: \url{https://github.com/brandseye/brandseyer/issues}
}
\seealso{
\code{\link{list_account_codes}} Find out what accounts you have access to.
\code{\link{account_count}} Query your account.
\code{\link{account}} Find out about general account queries.
}
| /man/brandseyer.Rd | permissive | brandseye/brandseyer | R | false | true | 1,607 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brandseyer.R
\docType{package}
\name{brandseyer}
\alias{brandseyer}
\alias{brandseye}
\alias{BrandsEye}
\alias{BrandsEyeR}
\alias{brandseyer-package}
\title{brandseyer: access and perform analytics on your BrandsEye accounts}
\description{
BrandsEye is a company that provides online monitoring, analytics,
and insights for social media data. We provide an extensive JSON / RESTful API
to access your data. This library is a wrapper for that data, providing
easy access to the data in your various BrandsEye accounts for use in
any R analyses and data visualisations.
}
\section{Starting points}{
To ease your use of the library, you should begin by adding your API key using
the authenticate function:
\verb{
authenticate(key = "<your api key>", save = TRUE)
}
If you do not know what your api key is, contact BrandsEye to find out.
After this, you can easily see a list of your available accounts using
\code{\link{list_account_codes}}. \code{\link{account_count}} will
let you pull aggregate information matching a given filter from one or more
of your accounts.
}
\section{Online resources}{
The home page is \url{https://github.com/brandseye/brandseyer/}
If you have found a bug in the library, you can report it using the
library's GitHub issue tracker: \url{https://github.com/brandseye/brandseyer/issues}
}
\seealso{
\code{\link{list_account_codes}} Find out what accounts you have access to.
\code{\link{account_count}} Query your account.
\code{\link{account}} Find out about general account queries.
}
|
\name{nw.atlantic.coast}
\alias{nw.atlantic.coast}
\title{
Coastline data for the North West Atlantic
}
\description{
Coastline data for the North West Atlantic, as downloaded using the NOAA Coastline Extractor tool.
}
\usage{
data(nw.atlantic.coast)
}
\details{
Coastline data for the NW Atlantic was obtained using the NOAA Coastline Extractor tool. To get more coastline data, got o \url{http://www.ngdc.noaa.gov/mgg_coastline/}, use the java applet to select the area of interest, or enter manually coordinates. "Coast Format options" should be "Splus".
}
\value{
A 2-column data frame
}
\references{
see \url{http://www.ngdc.noaa.gov/mgg_coastline/}
}
\seealso{
\code{\link{nw.atlantic}}
}
\examples{
# load NW Atlantic data and convert to class bathy
data(nw.atlantic,nw.atlantic.coast)
atl <- as.bathy(nw.atlantic)
## the function plot below plots only isobaths:
## - isobaths between 8000-4000 in light grey,
## - isobaths between 4000-500 in dark grey (to emphasize seamounts)
plot(atl, deep=c(-8000,-4000), shallow=c(-4000,-500), step=c(500,500),
lwd=c(0.5,0.5,1.5),lty=c(1,1,1),
col=c("grey80", "grey20", "blue"),
drawlabels=c(FALSE,FALSE,FALSE) )
## the coastline can be added from a different source,
## and can therefore have a different resolution:
lines(nw.atlantic.coast)
## add a geographical reference on the coast:
points(-71.064,42.358, pch=19); text(-71.064,42.358,"Boston", adj=c(1.2,0))
}
| /man/nw.atlantic.coast.Rd | no_license | abfleishman/marmap | R | false | false | 1,435 | rd | \name{nw.atlantic.coast}
\alias{nw.atlantic.coast}
\title{
Coastline data for the North West Atlantic
}
\description{
Coastline data for the North West Atlantic, as downloaded using the NOAA Coastline Extractor tool.
}
\usage{
data(nw.atlantic.coast)
}
\details{
Coastline data for the NW Atlantic was obtained using the NOAA Coastline Extractor tool. To get more coastline data, got o \url{http://www.ngdc.noaa.gov/mgg_coastline/}, use the java applet to select the area of interest, or enter manually coordinates. "Coast Format options" should be "Splus".
}
\value{
A 2-column data frame
}
\references{
see \url{http://www.ngdc.noaa.gov/mgg_coastline/}
}
\seealso{
\code{\link{nw.atlantic}}
}
\examples{
# load NW Atlantic data and convert to class bathy
data(nw.atlantic,nw.atlantic.coast)
atl <- as.bathy(nw.atlantic)
## the function plot below plots only isobaths:
## - isobaths between 8000-4000 in light grey,
## - isobaths between 4000-500 in dark grey (to emphasize seamounts)
plot(atl, deep=c(-8000,-4000), shallow=c(-4000,-500), step=c(500,500),
lwd=c(0.5,0.5,1.5),lty=c(1,1,1),
col=c("grey80", "grey20", "blue"),
drawlabels=c(FALSE,FALSE,FALSE) )
## the coastline can be added from a different source,
## and can therefore have a different resolution:
lines(nw.atlantic.coast)
## add a geographical reference on the coast:
points(-71.064,42.358, pch=19); text(-71.064,42.358,"Boston", adj=c(1.2,0))
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307881326e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615784001-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307881326e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#' Main function for detecting and evaluating significance of DMRs.
#'
#' Performs a two-step approach that (1) detects candidate regions, and
#' (2) scores candidate regions with an exchangeable (across the genome)
#' statistic and evaluates statistical significance using a
#' permuation test on the pooled null distribution of scores.
#'
#' @param bs bsseq object containing the methylation values as well as the
#' phenotype matrix that contains sample level covariates
#' @param testCovariate Character value indicating which variable
#' (column name) in \code{pData(bs)} to test
#' for association of methylation levels.
#' Can alternatively specify an integer value indicating
#' which of column of
#' \code{pData(bs)} to use. This is used to construct the
#' design matrix for the test statistic calculation. To run using a
#' continuous or categorial covariate with more than two groups, simply pass in
#' the name of a column in `pData` that contains this covariate. A continuous
#' covariate is assmued if the data type in the `testCovariate` slot is
#' continuous, with the exception of if there are only two unique values
#' (then a two group comparison is carried out).
#' @param adjustCovariate an (optional) character value or vector
#' indicating which variables (column names) in \code{pData(bs)}
#' will be adjusted for when
#' testing for the association of methylation value with the
#' \code{testCovariate}.
#' Can alternatively specify an
#' integer value or vector indicating
#' which of the columns of \code{pData(bs)} to adjust for.
#' If not NULL (default), then this is also used to
#' construct the design matrix for the test statistic calculation.
#' @param matchCovariate an (optional) character value
#' indicating which variable (column name) of \code{pData(bs)}
#' will be blocked for when
#' constructing the permutations in order to
#' test for the association of methylation value with the
#' \code{testCovariate}.
#' Alternatively, you can specify an integer value indicating
#' which column of \code{pData(bs)} to block for.
#' Blocking means that only permutations with balanced
#' composition of \code{testCovariate} values will be used (for example if
#' you have samples from different gender and this is not your covariate of
#' interest,
#' it is recommended to use gender as a matching covariate to avoid one
#' of the permutations testing entirely males versus females; this violates
#' the null hypothesis and will decrease power).
#' If not NULL (default), then no blocking is performed.
#' @param minInSpan positive integer that represents the minimum number of
#' CpGs in a smoothing span window if \code{smooth} is TRUE.
#' Default value is 30.
#' @param minNumRegion positive integer that represents the minimum number of
#' CpGs to consider for a candidate region. Default value is 5.
#' Minimum value is 3.
#' @param cutoff scalar value that represents the absolute value (or a vector
#' of two numbers representing a lower and upper bound) for the cutoff of
#' the single CpG coefficient that is used to discover
#' candidate regions. Default value is 0.10.
#' @param smooth logical value that indicates whether or not to smooth the
#' CpG level signal when discovering candidate regions.
#' Defaults to TRUE.
#' @param bpSpan a positive integer that represents the length in basepairs
#' of the smoothing span window if \code{smooth} is TRUE. Default value is
#' 1000.
#' @param verbose logical value that indicates whether progress messages
#' should be printed to stdout. Defaults value is TRUE.
#' @param BPPARAM a \code{BiocParallelParam} object to specify the parallel
#' backend. The default
#' option is \code{BiocParallel::bpparam()} which will automatically creates
#' a cluster appropriate for the operating system.
#' @param maxPerms a positive integer that represents the maximum number
#' of permutations that will be used to generate the global null
#' distribution of test statistics. Default value is 10.
#' @param maxGap integer value representing maximum number of basepairs in
#' between neighboring CpGs to be included in the same DMR.
#' @param maxGapSmooth integer value representing maximum number of basepairs
#' in between neighboring CpGs to be included in the same
#' cluster when performing smoothing (should generally be larger than
#' \code{maxGap})
#' @param stat a character vector indicating the name of the column of the
#' output to use as the region-level test statistic. Default value is 'stat'
#' which is the region level-statistic designed to be comparable across the
#' genome.
#' It is not recommended to change this argument, but it can be done for
#' experimental purposes. Possible values are: 'L' - the number of loci
#' in the region, 'area' - the sum of the smoothed loci statistics,
#' 'beta' - the effect size of the region, 'stat' - the test statistic for
#' the region, or 'avg' - the average smoothed loci statistic.
#' @param block logical indicating whether to search for large-scale (low
#' resolution) blocks of differential methylation (default is FALSE, which
#' means that local DMRs are desired). If TRUE, the parameters for
#' \code{bpSpan}, \code{minInSpan}, and \code{maxGapSmooth} should be adjusted
#' (increased) accordingly. This setting will also merge
#' candidate regions that (1) are in the same direction and (2) are less than
#' 1kb apart with no covered CpGs separating them. The region-level model used
#' is also slightly modified - instead of a loci-specific intercept for each
#' CpG in theregion, the intercept term is modeled as a natural spline with
#' one interior knot per each 10kb of length (up to 10 interior knots).
#' @param blockSize numeric value indicating the minimum number of basepairs
#' to be considered a block (only used if \code{block}=TRUE). Default is
#' 5000 basepairs.
#' @param chrsPerChunk a positive integer value indicating the number of
#' chromosomes per chunk. The default is 1, meaning that the data will be
#' looped through one chromosome at a time. When pairing up multiple
#' chromosomes per chunk, sizes (in terms of numbers of CpGs) will be taken
#' into consideration to balance the sizes of each chunk.
#' @return a \code{GRanges} object that contains the results of the inference.
#' The object contains one row for each candidate region, sorted by q-value
#' and then chromosome. The standard
#' \code{GRanges} chr, start, and end are included, along with at least
#' 7 metadata
#' columns, in the following order:
#' 1. L = the number of CpGs contained in the region,
#' 2. area = the sum of the smoothed beta values
#' 3. beta = the coefficient value for the condition difference (there
#' will be more than one column here if a multi-group comparison
#' was performed),
#' 4. stat = the test statistic for the condition difference,
#' 5. pval = the permutation p-value for the significance of the test
#' statistic, and
#' 6. qval = the q-value for the test statistic (adjustment
#' for multiple comparisons to control false discovery rate).
#' 7. index = an \code{IRanges} containing the indices of the region's
#' first CpG to last CpG.
#'
#' @keywords inference
#' @importFrom outliers grubbs.test
#' @importFrom bumphunter clusterMaker getSegments
#' @importFrom DelayedMatrixStats colMedians rowMads rowSums2 rowMeans2 rowDiffs
#' @importFrom matrixStats rowRanges
#' @importFrom stats formula anova as.formula
#'
#' @importClassesFrom bsseq BSseq
#' @importMethodsFrom bsseq pData seqnames sampleNames start width
#'
#' @importFrom grDevices col2rgb colorRampPalette dev.off pdf rgb
#' @importFrom graphics axis layout legend lines mtext par
#' plot points rect rug text
#' @importFrom methods is
#' @importFrom stats approxfun lm loess median model.matrix p.adjust
#' predict preplot qt quantile rbeta rbinom runif
#' @importFrom utils combn
#' @importFrom BiocParallel bplapply register MulticoreParam bpparam
#' @importFrom splines ns
#'
#' @import bsseq
#' @import GenomicRanges
#' @import nlme
#' @import annotatr
#' @import ggplot2
#' @import S4Vectors
#'
#' @export
#'
#' @examples
#'
#' # load example data
#' data(BS.chr21)
#'
#' # the covariate of interest is the 'CellType' column of pData(BS.chr21)
#' testCovariate <- 'CellType'
#'
#' # run dmrseq on a subset of the chromosome (10K CpGs)
#' regions <- dmrseq(bs=BS.chr21[240001:250000,],
#' cutoff = 0.05,
#' testCovariate=testCovariate)
#'
dmrseq <- function(bs, testCovariate, adjustCovariate = NULL, cutoff = 0.1,
minNumRegion = 5, smooth = TRUE, bpSpan = 1000,
minInSpan = 30, maxGapSmooth = 2500, maxGap = 1000,
verbose = TRUE,
maxPerms = 10, matchCovariate = NULL,
BPPARAM = bpparam(), stat = "stat",
block = FALSE, blockSize = 5000,
chrsPerChunk = 1) {
stopifnot(is(bs, "BSseq"))
if (!(is.null(cutoff) || length(cutoff) %in% seq_len(2)))
stop("'cutoff' has to be either NULL or a vector of length 1 or 2")
if (length(cutoff) == 2)
cutoff <- sort(cutoff)
if (is.null(cutoff) | abs(cutoff) > 1 | abs(cutoff) == 0)
stop("Must specify a value for cutoff between 0 and 1")
subverbose <- max(as.integer(verbose) - 1L, 0)
if(minNumRegion < 3){
stop("minNumRegion must be at least 3")
}
# check statistic name
if (!(stat %in% c("L", "area", "beta", "stat", "avg"))) {
stop("Specified '", stat,
"' as the test statistic which is not ",
"in the results. Please specify a valid name from one of ",
"L, area, beta, stat, or avg")
}
# informative message about blocks if block=TRUE; check for increased
# smoothing window
if (block){
message("Searching for large scale blocks with at least ",
blockSize, " basepairs.")
if(minInSpan < 100 && bpSpan < 2000 && maxGapSmooth < 1e5){
warning("When block=TRUE, it is recommended to increase the values ",
"of minInSpan, bpSpan, and maxGapSmooth in order to widen ",
"the smoothing window")
}
}
# convert covariates to column numbers if characters
if (is.character(testCovariate)) {
if(length(testCovariate) > 1)
stop("Only one testCovariate can be specified")
if(is.character(adjustCovariate)){
if(sum(testCovariate %in% adjustCovariate) > 0)
stop("adjustCovariate can't contain testCovariate")
}
if(is.character(matchCovariate)){
if(sum(testCovariate %in% matchCovariate))
stop("matchCovariate can't contain testCovariate")
}
testCovariate <- which(colnames(pData(bs)) == testCovariate)
if (length(testCovariate) == 0) {
stop("testCovariate not found in pData(). ",
"Please specify a valid testCovariate")
}
}
if (is.character(adjustCovariate)) {
if(is.character(matchCovariate)){
if(matchCovariate == adjustCovariate)
stop("matchCovariate can't be identical to adjustCovariate")
}
adjustCovariate <- which(colnames(pData(bs)) %in% adjustCovariate)
if (length(adjustCovariate) == 0) {
stop("adjustCovariate not found in pData(). ",
"Please specify a valid adjustCovariate")
}
}
# check that chrsPerChunk value makes sense
if (chrsPerChunk != 1){
if (chrsPerChunk%%1 != 0){
stop("chrsPerChunk must be an integer")
}else if(chrsPerChunk < 1){
stop("chrsPerChunk must be strictly positive")
}else if(chrsPerChunk > length(unique(seqnames(bs)))){
stop("chrsPerChunk can't be larger than the total",
" number of chromosomes")
}else if(!identical(as.character(seqnames(bs)@values), seqlevels(bs))){
stop("BSseq object must be ordered if breaking computation ",
"into multiple chromosomes per chunk (see bsseq::orderBSseq())")
}
}
# construct the design matrix using the pData of bs
if (ncol(pData(bs)) < max(testCovariate, adjustCovariate)) {
stop("Error: pData(bs) has too few columns. ","
Please specify valid ",
"covariates to use in the analysis")
}
coeff <- seq(2,(2 + length(testCovariate) - 1))
testCov <- pData(bs)[, testCovariate]
fact <- TRUE
if (length(unique(testCov)) == 1) {
message("Warning: only one unique value of the specified ",
"covariate of interest. Assuming null comparison and ",
"splitting sample group into two equal groups")
testCov <- rep(1, length(testCov))
testCov[seq_len(round(length(testCov)/2))] <- 0
}else if (length(unique(testCov)) > 2 && !is.numeric(testCov)) {
message("Performing a global test of H0: no difference among ",
length(unique(testCov)), " groups (assuming the test ",
"covariate ", colnames(pData(bs))[testCovariate],
" is a factor).")
coeff <- c(coeff, coeff + length(unique(testCov)) - 2)
}else if (length(unique(testCov)) > 2 && is.numeric(testCov)) {
message("Assuming the test ",
"covariate ", colnames(pData(bs))[testCovariate],
" is continuous.")
fact <- FALSE
}else{
message("Assuming the test ",
"covariate ", colnames(pData(bs))[testCovariate],
" is a factor.")
if(min(table(testCov)) < 2)
stop("At least one group has only one sample! ",
"Replicates are required to run dmrseq.")
testCov <- as.factor(testCov)
}
sampleSize <- table(testCov)
if (!is.null(adjustCovariate)) {
mmdat <- data.frame(testCov = testCov)
adjustCov <- pData(bs)[, adjustCovariate, drop = FALSE]
mmdat <- cbind(mmdat, adjustCov)
frm <- paste0("~", paste0(colnames(mmdat), collapse = " + "))
design <- model.matrix(as.formula(frm), data=mmdat)
colnames(design)[coeff] <- colnames(pData(bs))[testCovariate]
colnames(design)[seq((max(coeff) + 1), ncol(design))] <-
colnames(pData(bs))[adjustCovariate]
coeff.adj <- which(colnames(design) ==
colnames(pData(bs))[adjustCovariate])
} else {
design <- model.matrix(~testCov)
colnames(design)[coeff] <- colnames(pData(bs))[testCovariate]
coeff.adj <- NULL
}
# check for interaction terms (not yet supported)
if (length(coeff) > 1 && any(rowSums(design[,coeff]) > 1))
stop("Interaction terms in testCovariate are not yet supported.")
if (length(unique(testCov)) == 2) {
message("Condition: ",
unique(pData(bs)[, testCovariate][which(design[, coeff] == 1)]),
" vs ",
unique(pData(bs)[, testCovariate][which(design[, coeff] == 0)]))
}
if (!is.null(adjustCovariate)) {
message("Adjusting for covariate: ",
paste(colnames(pData(bs))[adjustCovariate], collapse = ", "))
}
if (!is.null(matchCovariate)) {
if (length(matchCovariate) > 1)
stop("Covariate matching can only be carried out for one",
" covariate")
if (length(unique(testCov)) > 2)
stop("Covariate matching can only be carried out for 2-group",
" comparisons")
if (is.character(matchCovariate)) {
if (sum(grepl(matchCovariate, colnames(pData(bs)))) == 0) {
stop("Error: no column in pData() found that matches ",
"the matchCovariate")
} else if (length(grep(matchCovariate, colnames(pData(bs)))) > 1) {
stop("Error: matchCovariate matches more than one ",
"column in pData()")
}
mC <- grep(matchCovariate, colnames(pData(bs)))
} else {
stopifnot(matchCovariate <= ncol(pData(bs)))
}
message("Matching permutations on covariate: ",
colnames(pData(bs))[mC])
}
# check for loci with missing data
if (fact){
lev <- unique(pData(bs)[[testCovariate]])
filter <- NULL
for (l in seq_along(lev)){
filter <- rbind(filter,
1*(DelayedMatrixStats::rowSums2(getCoverage(bs)[,pData(bs)[[testCovariate]] ==
lev[l]]) == 0))
}
filter <- which( apply(filter, 2, max) > 0 )
if (length(filter) > 0) {
stop(length(filter), " loci have zero coverage in all samples ",
"of at least one condition. Please remove these loci ",
"before running dmrseq")
}
}else{
filter <- DelayedMatrixStats::rowSums2(getCoverage(bs)==0) >= ncol(bs) - 1
if(sum(filter) > 0)
stop(sum(filter), " loci have zero coverage in at least ",
ncol(bs) - 1, " samples. Please remove these loci ",
"before running dmrseq")
}
# register the parallel backend
BiocParallel::register(BPPARAM)
backend <- paste0("BiocParallel:", class(bpparam())[1])
if (bpparam()$workers == 1) {
if (verbose) {
mes <- "Using a single core (backend: %s)."
message(sprintf(mes, backend))
}
parallel <- FALSE
} else {
if (verbose) {
mes <- paste0("Parallelizing using %s workers/cores ",
"(backend: %s).")
message(sprintf(mes, bpparam()$workers, backend))
}
parallel <- TRUE
}
message("Computing on ", chrsPerChunk,
" chromosome(s) at a time.\n")
message("Detecting candidate regions with coefficient larger than ",
unique(abs(cutoff)),
" in magnitude.")
OBS <- bumphunt(bs=bs, design = design,
coeff = coeff, coeff.adj = coeff.adj, minInSpan = minInSpan,
minNumRegion = minNumRegion, cutoff = cutoff,
maxGap = maxGap, maxGapSmooth = maxGapSmooth,
smooth = smooth, bpSpan = bpSpan, verbose = verbose,
parallel = parallel, block = block, blockSize = blockSize,
chrsPerChunk = chrsPerChunk, fact = fact)
# check that at least one candidate region was found; if there were none
# there is no need to go on to compute permutation tests...
if (length(OBS) > 0) {
message("* ", nrow(OBS), " candidates detected")
FLIP <- NULL
# configure the permutation matrix for two group comparisons
if (length(unique(design[, coeff[1]])) == 2 &&
length(coeff) == 1 &&
choose(nrow(design), min(sampleSize)) < 5e5 ) {
if (verbose) {
message("Performing balanced permutations of ",
"condition across samples ",
"to generate a null distribution of region test statistics")
}
perms <- combn(seq(1, nrow(design)), min(sampleSize))
# Remove redundant permutations (if balanced)
if (length(unique(table(design[,coeff]))) == 1){
perms <- perms[, seq_len(ncol(perms)/2)]
}
# restrict to unique permutations that don't include any
# groups consisting of all identical conditions
rmv <- NULL
for (p in seq_len(ncol(perms))){
if (length(unique(design[perms[,p],coeff])) == 1){
rmv <- c(rmv, p)
}
}
if (length(rmv) > 0 )
perms <- perms[,-rmv]
# subsample permutations based on similarity to original partition
# gives preference to those with the least similarity
if (maxPerms < ncol(perms)) {
similarity <- apply(perms, 2, function(x) {
max(table(design[x,coeff]))
})
perms.all <- perms
perms <- NULL
levs <- sort(unique(similarity))
l <- 1
num <- 0
while(!(num == maxPerms) && l <= length(levs)) {
keep <- sample(which(similarity == levs[l]),
min(maxPerms-num, sum(similarity == levs[l])) )
perms <- cbind(perms, perms.all[,keep])
l <- l + 1
num <- ncol(perms)
}
}
} else {
# Next consider a multilevel, or continuous covariate where the
# covariate will be permuted in an unrestricted manner
if (verbose) {
message("Performing unrestricted permutation of",
" covariate of interest across samples ",
"to generate a null distribution of region test statistics")
}
perms <- as.matrix(seq_len(nrow(design)))
for (p in seq_len(maxPerms)) {
tries <- 0
candidate <- sample(seq_len(nrow(design)), nrow(design))
# check that the permutation is not a duplicate, and not
# equal to the original
while ((sum(apply(perms, 2, function(x)
all.equal(x, candidate)) == TRUE) > 0 ||
sum(apply(perms, 2, function(x)
all.equal(x, rev(candidate))) == TRUE) > 0) &&
tries <= 20) {
candidate <- sample(seq(seq_len(nrow(design))), nrow(design))
tries <- tries + 1
}
# save the permutation to the permutation matrix
if (tries <= 20){
perms <- cbind(perms, candidate)
}
}
perms <- perms[,-1] # remove original
}
pData.orig <- pData(bs)
levs <- unique(pData.orig[[testCovariate]])
# Now rerun on permuted designs and concatenate results
for (j in seq_len(ncol(perms))) {
if (verbose) {
message("\nBeginning permutation ", j)
}
reorder <- perms[, j]
designr <- design
if (length(unique(design[, coeff[1]])) == 2 &&
length(coeff) == 1 &&
!nrow(perms) == nrow(designr)) {
designr[, coeff] <- 0
designr[reorder, coeff] <- 1
pData(bs)[[testCovariate]] <- levs[1]
pData(bs)[[testCovariate]][reorder] <- levs[2]
if (!all(sort(pData.orig[[testCovariate]]) ==
sort(pData(bs)[[testCovariate]]))){
designr[, coeff] <- 1
designr[reorder, coeff] <- 0
pData(bs)[[testCovariate]] <- levs[2]
pData(bs)[[testCovariate]][reorder] <- levs[1]
}
xr <- NULL
for (rd in seq_len(nrow(pData.orig))) {
match <- which(pData.orig[[testCovariate]] %in%
pData(bs)[rd,][[testCovariate]])
taken <- which(match %in% xr)
if (length(taken) > 0)
match <- match[-taken]
if (length(match) > 0)
xr <- c(xr, match[1])
}
if(length(coeff.adj) > 0){
pData(bs)[,adjustCovariate] <-
pData.orig[xr,adjustCovariate]
}
} else {
designr[, coeff] <- designr[reorder, coeff]
pData(bs) <- pData.orig[reorder, , drop = FALSE]
}
# if matchCovariate is not null, restrict permutations such that
# null comparisons are balanced for the values of
# pData$matchCovariate this avoids comparison of,
# say two different individuals in the null, that the comparison of
# interest is tissue type. Not matching would mean the null is
# really not null
if (!is.null(matchCovariate)) {
permLabel <- paste0(paste0(pData(bs)[designr[, coeff[1]] == 1,
mC], collapse = "_"),
"vs", paste0(pData(bs)[(1 - designr[, coeff[1]]) == 1,
mC], collapse = "_"))
c1 <- unlist(strsplit(permLabel, "vs"))[1]
c2 <- unlist(strsplit(permLabel, "vs"))[2]
c1 <- unlist(strsplit(c1, "_"))
c2 <- unlist(strsplit(c2, "_"))
keepPerm <- 1 * (sum(c1 %in% c2) > 0 &&
sum(c2 %in% c1) > 0)
if (keepPerm == 0) {
if (verbose) {
message(paste0("Skipping permutation ",
gsub("vs", " vs ", permLabel)))
}
next
}
} else {
permLabel <- j
}
res.flip.p <- bumphunt(bs=bs, design = designr,
coeff = coeff,
coeff.adj = coeff.adj,
minInSpan = minInSpan,
minNumRegion = minNumRegion, cutoff = cutoff,
maxGap = maxGap, maxGapSmooth = maxGapSmooth,
smooth = smooth, bpSpan = bpSpan,
verbose = verbose, parallel = parallel,
block = block, blockSize = blockSize,
chrsPerChunk = chrsPerChunk, fact = fact)
if (verbose) {
message("* ", j, " out of ", ncol(perms),
" permutations completed (",
nrow(res.flip.p), " null candidates)")
}
if (!is.null(res.flip.p)) {
res.flip.p$permNum <- permLabel
FLIP <- rbind(FLIP, res.flip.p)
}
}
# restore original pData
pData(bs) <- pData.orig
# if no candidates were found in permutation
# provide informative error message
if (is.null(FLIP)){
warning("No candidate regions found in permutation, so inference ",
"can't be carried out. ",
"Try decreasing the cutoff, or running on a larger ",
"dataset if you are currently using a subset.")
OBS$pval <- NA
OBS$qval <- NA
}else if (nrow(FLIP) < 0.05*nrow(OBS)){
message("Note: Very few null candidate regions were found.",
"For more accurate and sensitive inference, ",
"try decreasing the cutoff, or running on a larger ",
"dataset if you are currently using a subset.")
}
if (!is.null(FLIP)){
# if there are more than 1 million candidate null regions,
# take a random sample
# of 1 million of them
if (nrow(FLIP) > 1e+06) {
rs <- sample(seq_len(nrow(FLIP)), 1e+06, replace = FALSE)
FLIP <- FLIP[rs, ]
}
# which column of results to use as test statistic ?
# check statistic name
if (!(stat %in% c(colnames(OBS), "avg"))) {
stop("Specified '", stat,
"' as the test statistic which is not ",
"in the results. Please specify a valid name from one of ",
"L, area, beta, or stat")
} else if (stat == "avg") {
OBS$avg <- OBS$area/OBS$L
FLIP$avg <- FLIP$area/FLIP$L
}
whichStatO <- which(colnames(OBS) == stat)
whichStatF <- which(colnames(FLIP) == stat)
# Faster way to compute the p-values that doesn't use multiple cores
# Step 1: sort the permuted statistics vector
perm.ordered <- c(sort(abs(FLIP[, whichStatF]),
method = "quick"), Inf)
# Step 2: find the first instance in the sorted vector where the
# permuted value is greater than the observed and use this to
# determine the number of permuted values that are greater than or
# equal to theobserved
pval <- rep(NA, nrow(OBS))
pval[!is.na(OBS[, whichStatO])] <- (1 +
vapply(abs(OBS[!is.na(OBS[, whichStatO]), whichStatO]),
function(x) length(perm.ordered) - min(which(x <= perm.ordered)),
numeric(1))) / (1 + sum(!is.na(FLIP[, whichStatF])))
# missing test statistics cause Inf for the p-value calculation
# instead propagate the missing values
pval[abs(pval) == Inf] <- NA
pval <- data.frame(x = pval, y = p.adjust(pval, method = "BH"))
OBS$pval <- pval$x
OBS$qval <- pval$y
}
# convert output into GRanges, with indexStart/indexEnd as IRanges
indexIR <- IRanges(OBS$indexStart, OBS$indexEnd)
OBS.gr <- makeGRangesFromDataFrame(OBS[,-c(4:5)],
keep.extra.columns = TRUE)
OBS.gr$index <- indexIR
names(OBS.gr) <- NULL
# sort on pval overall (currently sorted within chromsome)
OBS.gr <- OBS.gr[order(OBS.gr$pval, -abs(OBS.gr$stat)),]
return(OBS.gr)
} else {
message("No candidate regions pass the cutoff of ", unique(abs(cutoff)))
return(NULL)
}
}
| /R/dmrseq.R | permissive | trichelab/dmrseq | R | false | false | 30,298 | r | #' Main function for detecting and evaluating significance of DMRs.
#'
#' Performs a two-step approach that (1) detects candidate regions, and
#' (2) scores candidate regions with an exchangeable (across the genome)
#' statistic and evaluates statistical significance using a
#' permuation test on the pooled null distribution of scores.
#'
#' @param bs bsseq object containing the methylation values as well as the
#' phenotype matrix that contains sample level covariates
#' @param testCovariate Character value indicating which variable
#' (column name) in \code{pData(bs)} to test
#' for association of methylation levels.
#' Can alternatively specify an integer value indicating
#' which of column of
#' \code{pData(bs)} to use. This is used to construct the
#' design matrix for the test statistic calculation. To run using a
#' continuous or categorial covariate with more than two groups, simply pass in
#' the name of a column in `pData` that contains this covariate. A continuous
#' covariate is assmued if the data type in the `testCovariate` slot is
#' continuous, with the exception of if there are only two unique values
#' (then a two group comparison is carried out).
#' @param adjustCovariate an (optional) character value or vector
#' indicating which variables (column names) in \code{pData(bs)}
#' will be adjusted for when
#' testing for the association of methylation value with the
#' \code{testCovariate}.
#' Can alternatively specify an
#' integer value or vector indicating
#' which of the columns of \code{pData(bs)} to adjust for.
#' If not NULL (default), then this is also used to
#' construct the design matrix for the test statistic calculation.
#' @param matchCovariate an (optional) character value
#' indicating which variable (column name) of \code{pData(bs)}
#' will be blocked for when
#' constructing the permutations in order to
#' test for the association of methylation value with the
#' \code{testCovariate}.
#' Alternatively, you can specify an integer value indicating
#' which column of \code{pData(bs)} to block for.
#' Blocking means that only permutations with balanced
#' composition of \code{testCovariate} values will be used (for example if
#' you have samples from different gender and this is not your covariate of
#' interest,
#' it is recommended to use gender as a matching covariate to avoid one
#' of the permutations testing entirely males versus females; this violates
#' the null hypothesis and will decrease power).
#' If not NULL (default), then no blocking is performed.
#' @param minInSpan positive integer that represents the minimum number of
#' CpGs in a smoothing span window if \code{smooth} is TRUE.
#' Default value is 30.
#' @param minNumRegion positive integer that represents the minimum number of
#' CpGs to consider for a candidate region. Default value is 5.
#' Minimum value is 3.
#' @param cutoff scalar value that represents the absolute value (or a vector
#' of two numbers representing a lower and upper bound) for the cutoff of
#' the single CpG coefficient that is used to discover
#' candidate regions. Default value is 0.10.
#' @param smooth logical value that indicates whether or not to smooth the
#' CpG level signal when discovering candidate regions.
#' Defaults to TRUE.
#' @param bpSpan a positive integer that represents the length in basepairs
#' of the smoothing span window if \code{smooth} is TRUE. Default value is
#' 1000.
#' @param verbose logical value that indicates whether progress messages
#' should be printed to stdout. Defaults value is TRUE.
#' @param BPPARAM a \code{BiocParallelParam} object to specify the parallel
#' backend. The default
#' option is \code{BiocParallel::bpparam()} which will automatically creates
#' a cluster appropriate for the operating system.
#' @param maxPerms a positive integer that represents the maximum number
#' of permutations that will be used to generate the global null
#' distribution of test statistics. Default value is 10.
#' @param maxGap integer value representing maximum number of basepairs in
#' between neighboring CpGs to be included in the same DMR.
#' @param maxGapSmooth integer value representing maximum number of basepairs
#' in between neighboring CpGs to be included in the same
#' cluster when performing smoothing (should generally be larger than
#' \code{maxGap})
#' @param stat a character vector indicating the name of the column of the
#' output to use as the region-level test statistic. Default value is 'stat'
#' which is the region level-statistic designed to be comparable across the
#' genome.
#' It is not recommended to change this argument, but it can be done for
#' experimental purposes. Possible values are: 'L' - the number of loci
#' in the region, 'area' - the sum of the smoothed loci statistics,
#' 'beta' - the effect size of the region, 'stat' - the test statistic for
#' the region, or 'avg' - the average smoothed loci statistic.
#' @param block logical indicating whether to search for large-scale (low
#' resolution) blocks of differential methylation (default is FALSE, which
#' means that local DMRs are desired). If TRUE, the parameters for
#' \code{bpSpan}, \code{minInSpan}, and \code{maxGapSmooth} should be adjusted
#' (increased) accordingly. This setting will also merge
#' candidate regions that (1) are in the same direction and (2) are less than
#' 1kb apart with no covered CpGs separating them. The region-level model used
#' is also slightly modified - instead of a loci-specific intercept for each
#' CpG in theregion, the intercept term is modeled as a natural spline with
#' one interior knot per each 10kb of length (up to 10 interior knots).
#' @param blockSize numeric value indicating the minimum number of basepairs
#' to be considered a block (only used if \code{block}=TRUE). Default is
#' 5000 basepairs.
#' @param chrsPerChunk a positive integer value indicating the number of
#' chromosomes per chunk. The default is 1, meaning that the data will be
#' looped through one chromosome at a time. When pairing up multiple
#' chromosomes per chunk, sizes (in terms of numbers of CpGs) will be taken
#' into consideration to balance the sizes of each chunk.
#' @return a \code{GRanges} object that contains the results of the inference.
#' The object contains one row for each candidate region, sorted by q-value
#' and then chromosome. The standard
#' \code{GRanges} chr, start, and end are included, along with at least
#' 7 metadata
#' columns, in the following order:
#' 1. L = the number of CpGs contained in the region,
#' 2. area = the sum of the smoothed beta values
#' 3. beta = the coefficient value for the condition difference (there
#' will be more than one column here if a multi-group comparison
#' was performed),
#' 4. stat = the test statistic for the condition difference,
#' 5. pval = the permutation p-value for the significance of the test
#' statistic, and
#' 6. qval = the q-value for the test statistic (adjustment
#' for multiple comparisons to control false discovery rate).
#' 7. index = an \code{IRanges} containing the indices of the region's
#' first CpG to last CpG.
#'
#' @keywords inference
#' @importFrom outliers grubbs.test
#' @importFrom bumphunter clusterMaker getSegments
#' @importFrom DelayedMatrixStats colMedians rowMads rowSums2 rowMeans2 rowDiffs
#' @importFrom matrixStats rowRanges
#' @importFrom stats formula anova as.formula
#'
#' @importClassesFrom bsseq BSseq
#' @importMethodsFrom bsseq pData seqnames sampleNames start width
#'
#' @importFrom grDevices col2rgb colorRampPalette dev.off pdf rgb
#' @importFrom graphics axis layout legend lines mtext par
#' plot points rect rug text
#' @importFrom methods is
#' @importFrom stats approxfun lm loess median model.matrix p.adjust
#' predict preplot qt quantile rbeta rbinom runif
#' @importFrom utils combn
#' @importFrom BiocParallel bplapply register MulticoreParam bpparam
#' @importFrom splines ns
#'
#' @import bsseq
#' @import GenomicRanges
#' @import nlme
#' @import annotatr
#' @import ggplot2
#' @import S4Vectors
#'
#' @export
#'
#' @examples
#'
#' # load example data
#' data(BS.chr21)
#'
#' # the covariate of interest is the 'CellType' column of pData(BS.chr21)
#' testCovariate <- 'CellType'
#'
#' # run dmrseq on a subset of the chromosome (10K CpGs)
#' regions <- dmrseq(bs=BS.chr21[240001:250000,],
#' cutoff = 0.05,
#' testCovariate=testCovariate)
#'
dmrseq <- function(bs, testCovariate, adjustCovariate = NULL, cutoff = 0.1,
minNumRegion = 5, smooth = TRUE, bpSpan = 1000,
minInSpan = 30, maxGapSmooth = 2500, maxGap = 1000,
verbose = TRUE,
maxPerms = 10, matchCovariate = NULL,
BPPARAM = bpparam(), stat = "stat",
block = FALSE, blockSize = 5000,
chrsPerChunk = 1) {
stopifnot(is(bs, "BSseq"))
if (!(is.null(cutoff) || length(cutoff) %in% seq_len(2)))
stop("'cutoff' has to be either NULL or a vector of length 1 or 2")
if (length(cutoff) == 2)
cutoff <- sort(cutoff)
if (is.null(cutoff) | abs(cutoff) > 1 | abs(cutoff) == 0)
stop("Must specify a value for cutoff between 0 and 1")
subverbose <- max(as.integer(verbose) - 1L, 0)
if(minNumRegion < 3){
stop("minNumRegion must be at least 3")
}
# check statistic name
if (!(stat %in% c("L", "area", "beta", "stat", "avg"))) {
stop("Specified '", stat,
"' as the test statistic which is not ",
"in the results. Please specify a valid name from one of ",
"L, area, beta, stat, or avg")
}
# informative message about blocks if block=TRUE; check for increased
# smoothing window
if (block){
message("Searching for large scale blocks with at least ",
blockSize, " basepairs.")
if(minInSpan < 100 && bpSpan < 2000 && maxGapSmooth < 1e5){
warning("When block=TRUE, it is recommended to increase the values ",
"of minInSpan, bpSpan, and maxGapSmooth in order to widen ",
"the smoothing window")
}
}
# convert covariates to column numbers if characters
if (is.character(testCovariate)) {
if(length(testCovariate) > 1)
stop("Only one testCovariate can be specified")
if(is.character(adjustCovariate)){
if(sum(testCovariate %in% adjustCovariate) > 0)
stop("adjustCovariate can't contain testCovariate")
}
if(is.character(matchCovariate)){
if(sum(testCovariate %in% matchCovariate))
stop("matchCovariate can't contain testCovariate")
}
testCovariate <- which(colnames(pData(bs)) == testCovariate)
if (length(testCovariate) == 0) {
stop("testCovariate not found in pData(). ",
"Please specify a valid testCovariate")
}
}
if (is.character(adjustCovariate)) {
if(is.character(matchCovariate)){
if(matchCovariate == adjustCovariate)
stop("matchCovariate can't be identical to adjustCovariate")
}
adjustCovariate <- which(colnames(pData(bs)) %in% adjustCovariate)
if (length(adjustCovariate) == 0) {
stop("adjustCovariate not found in pData(). ",
"Please specify a valid adjustCovariate")
}
}
# check that chrsPerChunk value makes sense
if (chrsPerChunk != 1){
if (chrsPerChunk%%1 != 0){
stop("chrsPerChunk must be an integer")
}else if(chrsPerChunk < 1){
stop("chrsPerChunk must be strictly positive")
}else if(chrsPerChunk > length(unique(seqnames(bs)))){
stop("chrsPerChunk can't be larger than the total",
" number of chromosomes")
}else if(!identical(as.character(seqnames(bs)@values), seqlevels(bs))){
stop("BSseq object must be ordered if breaking computation ",
"into multiple chromosomes per chunk (see bsseq::orderBSseq())")
}
}
# construct the design matrix using the pData of bs
if (ncol(pData(bs)) < max(testCovariate, adjustCovariate)) {
stop("Error: pData(bs) has too few columns. ","
Please specify valid ",
"covariates to use in the analysis")
}
coeff <- seq(2,(2 + length(testCovariate) - 1))
testCov <- pData(bs)[, testCovariate]
fact <- TRUE
if (length(unique(testCov)) == 1) {
message("Warning: only one unique value of the specified ",
"covariate of interest. Assuming null comparison and ",
"splitting sample group into two equal groups")
testCov <- rep(1, length(testCov))
testCov[seq_len(round(length(testCov)/2))] <- 0
}else if (length(unique(testCov)) > 2 && !is.numeric(testCov)) {
message("Performing a global test of H0: no difference among ",
length(unique(testCov)), " groups (assuming the test ",
"covariate ", colnames(pData(bs))[testCovariate],
" is a factor).")
coeff <- c(coeff, coeff + length(unique(testCov)) - 2)
}else if (length(unique(testCov)) > 2 && is.numeric(testCov)) {
message("Assuming the test ",
"covariate ", colnames(pData(bs))[testCovariate],
" is continuous.")
fact <- FALSE
}else{
message("Assuming the test ",
"covariate ", colnames(pData(bs))[testCovariate],
" is a factor.")
if(min(table(testCov)) < 2)
stop("At least one group has only one sample! ",
"Replicates are required to run dmrseq.")
testCov <- as.factor(testCov)
}
sampleSize <- table(testCov)
if (!is.null(adjustCovariate)) {
mmdat <- data.frame(testCov = testCov)
adjustCov <- pData(bs)[, adjustCovariate, drop = FALSE]
mmdat <- cbind(mmdat, adjustCov)
frm <- paste0("~", paste0(colnames(mmdat), collapse = " + "))
design <- model.matrix(as.formula(frm), data=mmdat)
colnames(design)[coeff] <- colnames(pData(bs))[testCovariate]
colnames(design)[seq((max(coeff) + 1), ncol(design))] <-
colnames(pData(bs))[adjustCovariate]
coeff.adj <- which(colnames(design) ==
colnames(pData(bs))[adjustCovariate])
} else {
design <- model.matrix(~testCov)
colnames(design)[coeff] <- colnames(pData(bs))[testCovariate]
coeff.adj <- NULL
}
# check for interaction terms (not yet supported)
if (length(coeff) > 1 && any(rowSums(design[,coeff]) > 1))
stop("Interaction terms in testCovariate are not yet supported.")
if (length(unique(testCov)) == 2) {
message("Condition: ",
unique(pData(bs)[, testCovariate][which(design[, coeff] == 1)]),
" vs ",
unique(pData(bs)[, testCovariate][which(design[, coeff] == 0)]))
}
if (!is.null(adjustCovariate)) {
message("Adjusting for covariate: ",
paste(colnames(pData(bs))[adjustCovariate], collapse = ", "))
}
if (!is.null(matchCovariate)) {
if (length(matchCovariate) > 1)
stop("Covariate matching can only be carried out for one",
" covariate")
if (length(unique(testCov)) > 2)
stop("Covariate matching can only be carried out for 2-group",
" comparisons")
if (is.character(matchCovariate)) {
if (sum(grepl(matchCovariate, colnames(pData(bs)))) == 0) {
stop("Error: no column in pData() found that matches ",
"the matchCovariate")
} else if (length(grep(matchCovariate, colnames(pData(bs)))) > 1) {
stop("Error: matchCovariate matches more than one ",
"column in pData()")
}
mC <- grep(matchCovariate, colnames(pData(bs)))
} else {
stopifnot(matchCovariate <= ncol(pData(bs)))
}
message("Matching permutations on covariate: ",
colnames(pData(bs))[mC])
}
# check for loci with missing data
if (fact){
lev <- unique(pData(bs)[[testCovariate]])
filter <- NULL
for (l in seq_along(lev)){
filter <- rbind(filter,
1*(DelayedMatrixStats::rowSums2(getCoverage(bs)[,pData(bs)[[testCovariate]] ==
lev[l]]) == 0))
}
filter <- which( apply(filter, 2, max) > 0 )
if (length(filter) > 0) {
stop(length(filter), " loci have zero coverage in all samples ",
"of at least one condition. Please remove these loci ",
"before running dmrseq")
}
}else{
filter <- DelayedMatrixStats::rowSums2(getCoverage(bs)==0) >= ncol(bs) - 1
if(sum(filter) > 0)
stop(sum(filter), " loci have zero coverage in at least ",
ncol(bs) - 1, " samples. Please remove these loci ",
"before running dmrseq")
}
# register the parallel backend
BiocParallel::register(BPPARAM)
backend <- paste0("BiocParallel:", class(bpparam())[1])
if (bpparam()$workers == 1) {
if (verbose) {
mes <- "Using a single core (backend: %s)."
message(sprintf(mes, backend))
}
parallel <- FALSE
} else {
if (verbose) {
mes <- paste0("Parallelizing using %s workers/cores ",
"(backend: %s).")
message(sprintf(mes, bpparam()$workers, backend))
}
parallel <- TRUE
}
message("Computing on ", chrsPerChunk,
" chromosome(s) at a time.\n")
message("Detecting candidate regions with coefficient larger than ",
unique(abs(cutoff)),
" in magnitude.")
OBS <- bumphunt(bs=bs, design = design,
coeff = coeff, coeff.adj = coeff.adj, minInSpan = minInSpan,
minNumRegion = minNumRegion, cutoff = cutoff,
maxGap = maxGap, maxGapSmooth = maxGapSmooth,
smooth = smooth, bpSpan = bpSpan, verbose = verbose,
parallel = parallel, block = block, blockSize = blockSize,
chrsPerChunk = chrsPerChunk, fact = fact)
# check that at least one candidate region was found; if there were none
# there is no need to go on to compute permutation tests...
if (length(OBS) > 0) {
message("* ", nrow(OBS), " candidates detected")
FLIP <- NULL
# configure the permutation matrix for two group comparisons
if (length(unique(design[, coeff[1]])) == 2 &&
length(coeff) == 1 &&
choose(nrow(design), min(sampleSize)) < 5e5 ) {
if (verbose) {
message("Performing balanced permutations of ",
"condition across samples ",
"to generate a null distribution of region test statistics")
}
perms <- combn(seq(1, nrow(design)), min(sampleSize))
# Remove redundant permutations (if balanced)
if (length(unique(table(design[,coeff]))) == 1){
perms <- perms[, seq_len(ncol(perms)/2)]
}
# restrict to unique permutations that don't include any
# groups consisting of all identical conditions
rmv <- NULL
for (p in seq_len(ncol(perms))){
if (length(unique(design[perms[,p],coeff])) == 1){
rmv <- c(rmv, p)
}
}
if (length(rmv) > 0 )
perms <- perms[,-rmv]
# subsample permutations based on similarity to original partition
# gives preference to those with the least similarity
if (maxPerms < ncol(perms)) {
similarity <- apply(perms, 2, function(x) {
max(table(design[x,coeff]))
})
perms.all <- perms
perms <- NULL
levs <- sort(unique(similarity))
l <- 1
num <- 0
while(!(num == maxPerms) && l <= length(levs)) {
keep <- sample(which(similarity == levs[l]),
min(maxPerms-num, sum(similarity == levs[l])) )
perms <- cbind(perms, perms.all[,keep])
l <- l + 1
num <- ncol(perms)
}
}
} else {
# Next consider a multilevel, or continuous covariate where the
# covariate will be permuted in an unrestricted manner
if (verbose) {
message("Performing unrestricted permutation of",
" covariate of interest across samples ",
"to generate a null distribution of region test statistics")
}
perms <- as.matrix(seq_len(nrow(design)))
for (p in seq_len(maxPerms)) {
tries <- 0
candidate <- sample(seq_len(nrow(design)), nrow(design))
# check that the permutation is not a duplicate, and not
# equal to the original
while ((sum(apply(perms, 2, function(x)
all.equal(x, candidate)) == TRUE) > 0 ||
sum(apply(perms, 2, function(x)
all.equal(x, rev(candidate))) == TRUE) > 0) &&
tries <= 20) {
candidate <- sample(seq(seq_len(nrow(design))), nrow(design))
tries <- tries + 1
}
# save the permutation to the permutation matrix
if (tries <= 20){
perms <- cbind(perms, candidate)
}
}
perms <- perms[,-1] # remove original
}
pData.orig <- pData(bs)
levs <- unique(pData.orig[[testCovariate]])
# Now rerun on permuted designs and concatenate results
for (j in seq_len(ncol(perms))) {
if (verbose) {
message("\nBeginning permutation ", j)
}
reorder <- perms[, j]
designr <- design
if (length(unique(design[, coeff[1]])) == 2 &&
length(coeff) == 1 &&
!nrow(perms) == nrow(designr)) {
designr[, coeff] <- 0
designr[reorder, coeff] <- 1
pData(bs)[[testCovariate]] <- levs[1]
pData(bs)[[testCovariate]][reorder] <- levs[2]
if (!all(sort(pData.orig[[testCovariate]]) ==
sort(pData(bs)[[testCovariate]]))){
designr[, coeff] <- 1
designr[reorder, coeff] <- 0
pData(bs)[[testCovariate]] <- levs[2]
pData(bs)[[testCovariate]][reorder] <- levs[1]
}
xr <- NULL
for (rd in seq_len(nrow(pData.orig))) {
match <- which(pData.orig[[testCovariate]] %in%
pData(bs)[rd,][[testCovariate]])
taken <- which(match %in% xr)
if (length(taken) > 0)
match <- match[-taken]
if (length(match) > 0)
xr <- c(xr, match[1])
}
if(length(coeff.adj) > 0){
pData(bs)[,adjustCovariate] <-
pData.orig[xr,adjustCovariate]
}
} else {
designr[, coeff] <- designr[reorder, coeff]
pData(bs) <- pData.orig[reorder, , drop = FALSE]
}
# if matchCovariate is not null, restrict permutations such that
# null comparisons are balanced for the values of
# pData$matchCovariate this avoids comparison of,
# say two different individuals in the null, that the comparison of
# interest is tissue type. Not matching would mean the null is
# really not null
if (!is.null(matchCovariate)) {
permLabel <- paste0(paste0(pData(bs)[designr[, coeff[1]] == 1,
mC], collapse = "_"),
"vs", paste0(pData(bs)[(1 - designr[, coeff[1]]) == 1,
mC], collapse = "_"))
c1 <- unlist(strsplit(permLabel, "vs"))[1]
c2 <- unlist(strsplit(permLabel, "vs"))[2]
c1 <- unlist(strsplit(c1, "_"))
c2 <- unlist(strsplit(c2, "_"))
keepPerm <- 1 * (sum(c1 %in% c2) > 0 &&
sum(c2 %in% c1) > 0)
if (keepPerm == 0) {
if (verbose) {
message(paste0("Skipping permutation ",
gsub("vs", " vs ", permLabel)))
}
next
}
} else {
permLabel <- j
}
res.flip.p <- bumphunt(bs=bs, design = designr,
coeff = coeff,
coeff.adj = coeff.adj,
minInSpan = minInSpan,
minNumRegion = minNumRegion, cutoff = cutoff,
maxGap = maxGap, maxGapSmooth = maxGapSmooth,
smooth = smooth, bpSpan = bpSpan,
verbose = verbose, parallel = parallel,
block = block, blockSize = blockSize,
chrsPerChunk = chrsPerChunk, fact = fact)
if (verbose) {
message("* ", j, " out of ", ncol(perms),
" permutations completed (",
nrow(res.flip.p), " null candidates)")
}
if (!is.null(res.flip.p)) {
res.flip.p$permNum <- permLabel
FLIP <- rbind(FLIP, res.flip.p)
}
}
# restore original pData
pData(bs) <- pData.orig
# if no candidates were found in permutation
# provide informative error message
if (is.null(FLIP)){
warning("No candidate regions found in permutation, so inference ",
"can't be carried out. ",
"Try decreasing the cutoff, or running on a larger ",
"dataset if you are currently using a subset.")
OBS$pval <- NA
OBS$qval <- NA
}else if (nrow(FLIP) < 0.05*nrow(OBS)){
message("Note: Very few null candidate regions were found.",
"For more accurate and sensitive inference, ",
"try decreasing the cutoff, or running on a larger ",
"dataset if you are currently using a subset.")
}
if (!is.null(FLIP)){
# if there are more than 1 million candidate null regions,
# take a random sample
# of 1 million of them
if (nrow(FLIP) > 1e+06) {
rs <- sample(seq_len(nrow(FLIP)), 1e+06, replace = FALSE)
FLIP <- FLIP[rs, ]
}
# which column of results to use as test statistic ?
# check statistic name
if (!(stat %in% c(colnames(OBS), "avg"))) {
stop("Specified '", stat,
"' as the test statistic which is not ",
"in the results. Please specify a valid name from one of ",
"L, area, beta, or stat")
} else if (stat == "avg") {
OBS$avg <- OBS$area/OBS$L
FLIP$avg <- FLIP$area/FLIP$L
}
whichStatO <- which(colnames(OBS) == stat)
whichStatF <- which(colnames(FLIP) == stat)
# Faster way to compute the p-values that doesn't use multiple cores
# Step 1: sort the permuted statistics vector
perm.ordered <- c(sort(abs(FLIP[, whichStatF]),
method = "quick"), Inf)
# Step 2: find the first instance in the sorted vector where the
# permuted value is greater than the observed and use this to
# determine the number of permuted values that are greater than or
# equal to theobserved
pval <- rep(NA, nrow(OBS))
pval[!is.na(OBS[, whichStatO])] <- (1 +
vapply(abs(OBS[!is.na(OBS[, whichStatO]), whichStatO]),
function(x) length(perm.ordered) - min(which(x <= perm.ordered)),
numeric(1))) / (1 + sum(!is.na(FLIP[, whichStatF])))
# missing test statistics cause Inf for the p-value calculation
# instead propagate the missing values
pval[abs(pval) == Inf] <- NA
pval <- data.frame(x = pval, y = p.adjust(pval, method = "BH"))
OBS$pval <- pval$x
OBS$qval <- pval$y
}
# convert output into GRanges, with indexStart/indexEnd as IRanges
indexIR <- IRanges(OBS$indexStart, OBS$indexEnd)
OBS.gr <- makeGRangesFromDataFrame(OBS[,-c(4:5)],
keep.extra.columns = TRUE)
OBS.gr$index <- indexIR
names(OBS.gr) <- NULL
# sort on pval overall (currently sorted within chromsome)
OBS.gr <- OBS.gr[order(OBS.gr$pval, -abs(OBS.gr$stat)),]
return(OBS.gr)
} else {
message("No candidate regions pass the cutoff of ", unique(abs(cutoff)))
return(NULL)
}
}
|
#Modified from 'plotUncertainy' from GDM package.
#This code gives you the 95% confidence intervals based off of bootstraps
#Please use citation(package="gdm") for citation
dataUncertainty<-function (spTable, sampleSites, bsIters, geo = FALSE, splines = NULL,
knots = NULL, parallel = FALSE, cores = 2)
{
if (!is(spTable, "gdmData")) {
warning("The spTable object is not of class 'gdmData'. See the formatsitepair function for help.")
}
if (!(is(spTable, "gdmData") | is(spTable, "matrix") | is(spTable,
"data.frame"))) {
stop("The spTable object must be of class 'gdmData', 'matrix', or 'data.frame'.")
}
if (ncol(spTable) < 6) {
stop("spTable object requires at least 6 columns: Observed, weights, s1.xCoord, s1.yCoord, s2.xCoord, s2.yCoord")
}
if (nrow(spTable) < 1) {
stop("spTable object has < 1 rows.")
}
if (!(geo == TRUE | geo == FALSE)) {
stop("geo argument must be either TRUE or FALSE")
}
if (is.null(splines) == FALSE & !is(splines, "numeric")) {
stop("splines object must of of class = 'numeric'.")
}
if (is.null(knots) == FALSE & !is(knots, "numeric")) {
stop("knots object must of of class = 'numeric'.")
}
if (!(parallel == TRUE | parallel == FALSE)) {
stop("parallel argument must be either TRUE or FALSE")
}
if (parallel == TRUE & is.null(cores) == TRUE) {
stop("If parallel==TRUE, the number of cores must be specified")
}
if ((is.null(cores) == FALSE & is.numeric(cores) == FALSE) |
cores < 1) {
stop("argument cores needs to be a positive integer")
}
if ((is.null(bsIters) == FALSE & is.numeric(bsIters) ==
FALSE) | bsIters < 1) {
stop("argument bsIters needs to be a positive integer")
}
if (is.numeric(sampleSites) == FALSE) {
stop("sampleSites must be a number between 0 and 1")
}
if (sampleSites < 0) {
stop("sampleSites must be a number between 0 and 1")
}
if (sampleSites > 1) {
stop("sampleSites must be a number between 0 and 1")
}
if (sampleSites == 0) {
stop("a sampleSites value of 0 will remove all sites from the analysis (bad).")
}
cores <- as.integer(cores)
bsIters <- as.integer(bsIters)
k <- NULL
pred_data <- NULL
lstSP <- lapply(1:bsIters, function(i) {
spTable
})
if (parallel == TRUE) {
cl <- makeCluster(cores, outfile = "")
registerDoParallel(cl)
subSamps <- foreach(k = 1:length(lstSP), .verbose = F,
.packages = c("gdm")) %dopar% subsample.sitepair(lstSP[[k]],
sampleSites = sampleSites)
gdmMods <- foreach(k = 1:length(subSamps), .verbose = F,
.packages = c("gdm")) %dopar% gdm(subSamps[[k]],
geo = geo, splines = splines, knots = knots)
stopCluster(cl)
}
else {
subSamps <- lapply(lstSP, subsample.sitepair, sampleSites = sampleSites)
gdmMods <- lapply(subSamps, gdm, geo = geo, splines = splines,
knots = knots)
}
fullGDMmodel <- gdm(spTable, geo = geo, splines = splines,
knots = knots)
exUncertSplines <- lapply(gdmMods, isplineExtract)
fullGDMsplines <- isplineExtract(fullGDMmodel)
predVars <- colnames(exUncertSplines[[1]][[1]])
totalYmin <- Inf
totalYmax <- -Inf
for (p in 1:length(predVars)) {
predV <- predVars[p]
for (nm in 1:length(exUncertSplines)) {
selPlot <- exUncertSplines[[nm]]
spYmax <- max(selPlot[[2]][, predV])
spYmin <- min(selPlot[[2]][, predV])
totalYmax <- max(c(totalYmax, spYmax))
totalYmin <- min(c(totalYmin, spYmin))
}
}
for (p in 1:length(predVars)) {
predV <- predVars[p]
totalXmin <- Inf
totalXmax <- -Inf
for (nm in 1:length(exUncertSplines)) {
selPlot <- exUncertSplines[[nm]]
spXmax <- max(selPlot[[1]][, predV])
spXmin <- min(selPlot[[1]][, predV])
if (spXmax > totalXmax) {
totalXmax = spXmax
}
if (spXmin < totalXmin) {
totalXmin = spXmin
}
}
if (totalYmax != 0) {
plotX <- NULL
plotY <- NULL
byVarMatX <- NULL
byVarMatY <- NULL
for (nn in 1:length(exUncertSplines)) {
plotX[[nn]] <- exUncertSplines[[nn]][[1]]
plotY[[nn]] <- exUncertSplines[[nn]][[2]]
byVarMatY <- cbind(byVarMatY, plotY[[nn]][,
predV])
byVarMatX <- cbind(byVarMatX, plotX[[nn]][,
predV])
}
fullPlotX <- fullGDMsplines[[1]]
fullPlotX <- fullPlotX[, predV]
fullPlotY <- fullGDMsplines[[2]]
fullPlotY <- fullPlotY[, predV]
sdX <- apply(as.matrix(byVarMatX), 1, sd)
sdY <- apply(as.matrix(byVarMatY), 1, sd)
highBoundX <- fullPlotX + sdX
lowBoundX <- fullPlotX - sdX
highBoundY <- fullPlotY + sdY
lowBoundY <- fullPlotY - sdY
pred_data_1<-data.frame(cbind(fullPlotX,fullPlotY,sdX,sdY,highBoundX,lowBoundX,highBoundY,lowBoundY),"factor"=rep(predV))
pred_data=rbind(pred_data,pred_data_1)
}
}
return(pred_data)
}
| /R_functions/dataUncertainty.R | no_license | ldereske/GLBRC_SG_Microbiome | R | false | false | 5,428 | r | #Modified from 'plotUncertainy' from GDM package.
#This code gives you the 95% confidence intervals based off of bootstraps
#Please use citation(package="gdm") for citation
dataUncertainty<-function (spTable, sampleSites, bsIters, geo = FALSE, splines = NULL,
knots = NULL, parallel = FALSE, cores = 2)
{
if (!is(spTable, "gdmData")) {
warning("The spTable object is not of class 'gdmData'. See the formatsitepair function for help.")
}
if (!(is(spTable, "gdmData") | is(spTable, "matrix") | is(spTable,
"data.frame"))) {
stop("The spTable object must be of class 'gdmData', 'matrix', or 'data.frame'.")
}
if (ncol(spTable) < 6) {
stop("spTable object requires at least 6 columns: Observed, weights, s1.xCoord, s1.yCoord, s2.xCoord, s2.yCoord")
}
if (nrow(spTable) < 1) {
stop("spTable object has < 1 rows.")
}
if (!(geo == TRUE | geo == FALSE)) {
stop("geo argument must be either TRUE or FALSE")
}
if (is.null(splines) == FALSE & !is(splines, "numeric")) {
stop("splines object must of of class = 'numeric'.")
}
if (is.null(knots) == FALSE & !is(knots, "numeric")) {
stop("knots object must of of class = 'numeric'.")
}
if (!(parallel == TRUE | parallel == FALSE)) {
stop("parallel argument must be either TRUE or FALSE")
}
if (parallel == TRUE & is.null(cores) == TRUE) {
stop("If parallel==TRUE, the number of cores must be specified")
}
if ((is.null(cores) == FALSE & is.numeric(cores) == FALSE) |
cores < 1) {
stop("argument cores needs to be a positive integer")
}
if ((is.null(bsIters) == FALSE & is.numeric(bsIters) ==
FALSE) | bsIters < 1) {
stop("argument bsIters needs to be a positive integer")
}
if (is.numeric(sampleSites) == FALSE) {
stop("sampleSites must be a number between 0 and 1")
}
if (sampleSites < 0) {
stop("sampleSites must be a number between 0 and 1")
}
if (sampleSites > 1) {
stop("sampleSites must be a number between 0 and 1")
}
if (sampleSites == 0) {
stop("a sampleSites value of 0 will remove all sites from the analysis (bad).")
}
cores <- as.integer(cores)
bsIters <- as.integer(bsIters)
k <- NULL
pred_data <- NULL
lstSP <- lapply(1:bsIters, function(i) {
spTable
})
if (parallel == TRUE) {
cl <- makeCluster(cores, outfile = "")
registerDoParallel(cl)
subSamps <- foreach(k = 1:length(lstSP), .verbose = F,
.packages = c("gdm")) %dopar% subsample.sitepair(lstSP[[k]],
sampleSites = sampleSites)
gdmMods <- foreach(k = 1:length(subSamps), .verbose = F,
.packages = c("gdm")) %dopar% gdm(subSamps[[k]],
geo = geo, splines = splines, knots = knots)
stopCluster(cl)
}
else {
subSamps <- lapply(lstSP, subsample.sitepair, sampleSites = sampleSites)
gdmMods <- lapply(subSamps, gdm, geo = geo, splines = splines,
knots = knots)
}
fullGDMmodel <- gdm(spTable, geo = geo, splines = splines,
knots = knots)
exUncertSplines <- lapply(gdmMods, isplineExtract)
fullGDMsplines <- isplineExtract(fullGDMmodel)
predVars <- colnames(exUncertSplines[[1]][[1]])
totalYmin <- Inf
totalYmax <- -Inf
for (p in 1:length(predVars)) {
predV <- predVars[p]
for (nm in 1:length(exUncertSplines)) {
selPlot <- exUncertSplines[[nm]]
spYmax <- max(selPlot[[2]][, predV])
spYmin <- min(selPlot[[2]][, predV])
totalYmax <- max(c(totalYmax, spYmax))
totalYmin <- min(c(totalYmin, spYmin))
}
}
for (p in 1:length(predVars)) {
predV <- predVars[p]
totalXmin <- Inf
totalXmax <- -Inf
for (nm in 1:length(exUncertSplines)) {
selPlot <- exUncertSplines[[nm]]
spXmax <- max(selPlot[[1]][, predV])
spXmin <- min(selPlot[[1]][, predV])
if (spXmax > totalXmax) {
totalXmax = spXmax
}
if (spXmin < totalXmin) {
totalXmin = spXmin
}
}
if (totalYmax != 0) {
plotX <- NULL
plotY <- NULL
byVarMatX <- NULL
byVarMatY <- NULL
for (nn in 1:length(exUncertSplines)) {
plotX[[nn]] <- exUncertSplines[[nn]][[1]]
plotY[[nn]] <- exUncertSplines[[nn]][[2]]
byVarMatY <- cbind(byVarMatY, plotY[[nn]][,
predV])
byVarMatX <- cbind(byVarMatX, plotX[[nn]][,
predV])
}
fullPlotX <- fullGDMsplines[[1]]
fullPlotX <- fullPlotX[, predV]
fullPlotY <- fullGDMsplines[[2]]
fullPlotY <- fullPlotY[, predV]
sdX <- apply(as.matrix(byVarMatX), 1, sd)
sdY <- apply(as.matrix(byVarMatY), 1, sd)
highBoundX <- fullPlotX + sdX
lowBoundX <- fullPlotX - sdX
highBoundY <- fullPlotY + sdY
lowBoundY <- fullPlotY - sdY
pred_data_1<-data.frame(cbind(fullPlotX,fullPlotY,sdX,sdY,highBoundX,lowBoundX,highBoundY,lowBoundY),"factor"=rep(predV))
pred_data=rbind(pred_data,pred_data_1)
}
}
return(pred_data)
}
|
#' Set the current ChannelAdvisor account profile (i.e., locale)
#' @param account account name
#' @export
ca_set_account <- function(account = NULL) {
if (is.null(account))
stop("Must select a ChannelAdvisor account!")
out <- ca_accounts()[[account]]
if (is.null(out))
stop(paste0("Could not find ChannelAdvisor account info for account `", account, "`!"))
### set environment variables
message(paste0("Setting current ChannelAdvisor Profile to `", account, "`."))
Sys.setenv("CHANNELADVISOR_ACCOUNT" = account)
Sys.setenv("CHANNELADVISOR_PROFILE_ID" = out$profile_id)
Sys.setenv("CHANNELADVISOR_REFRESH_TOKEN" = out$refresh_token)
### reset/refresh access token
ca_refresh_token()
return(invisible(out))
} | /R/ca_set_account.R | no_license | anthonypileggi/channeladvisor | R | false | false | 742 | r | #' Set the current ChannelAdvisor account profile (i.e., locale)
#' @param account account name
#' @export
ca_set_account <- function(account = NULL) {
if (is.null(account))
stop("Must select a ChannelAdvisor account!")
out <- ca_accounts()[[account]]
if (is.null(out))
stop(paste0("Could not find ChannelAdvisor account info for account `", account, "`!"))
### set environment variables
message(paste0("Setting current ChannelAdvisor Profile to `", account, "`."))
Sys.setenv("CHANNELADVISOR_ACCOUNT" = account)
Sys.setenv("CHANNELADVISOR_PROFILE_ID" = out$profile_id)
Sys.setenv("CHANNELADVISOR_REFRESH_TOKEN" = out$refresh_token)
### reset/refresh access token
ca_refresh_token()
return(invisible(out))
} |
library("dismo")
library("ggplot2")
library("ggpubr")
library("maptools")
library("raster")
library("RColorBrewer")
svm_tiffs <- list.files(path = "/Users/austinsmith/Documents/GitHub/A-quantitative-assessment-of-site-level-factors-in-influencing-Chukar-introduction-outcomes./Predictions_CA/model_svm_folds_CA", full.names= TRUE, pattern = ".tif")
#Function 1
#--------------------------------------------------
# function to stack raster layers
listToStack <- function(tiff_list){
model_stack <- stack()
for (i in 1:length(tiff_list)){
r <- raster(tiff_list[i])
model_stack <- stack(model_stack, r)}
model_stack
}
#compile tiffs to stacks
model_stack_svm <- listToStack( svm_tiffs )
# http://en.wikipedia.org/wiki/Extreme_points_of_the_United_States#Westernmost
top = 49.3457868 # north lat
left = -124.7844079 # west long
right = -66.9513812 # east long
bottom = 24.7433195 # south lat
states <- rgdal::readOGR("/Users/austinsmith/Downloads/ne_110m_admin_1_states_provinces/ne_110m_admin_1_states_provinces.shp")
crs(states) <- crs(model_stack_svm$svmFold1)
# Ac_poly <- rgdal::readOGR("/Users/austinsmith/Documents/SDM_spatial_data/Bird_life_galliformes_fgip/Alectoris_chukar/Alectoris_chukar.shp") # via Bird Life
# crs(Ac_poly) <- crs(model_stack_gbm$gbmFold1)
# #r <- rasterize(Ac.poly, Final.model, field=1)
#
#
# ### Seperate the polygon into native and naturalized regions
# native <- subset(Ac_poly, Ac_poly$OBJECTID == 36 ) # historical native range for A. chukar. Similar to Christensen 1970
# naturalized <- subset(Ac_poly, Ac_poly$OBJECTID != 36 )
#
# fort_native <- fortify(native)
# fort_nat <- fortify(naturalized)
source( "R_script/eBird_data_cleaning.R" )
naturalized <- circles(us_pts, d = d , dissolve=TRUE, lonlat=TRUE) #60km is the average distance recorded
naturalized <- polygons(naturalized )
#fort_native <- fortify(native)
fort_nat <- fortify(naturalized)
# import threshholds
sensSpec_scores <- readRDS("./RDS_objects/CA/sensSpec_scores_CA.rds")
# Compute means for averaged model
svm_mean <- mean(model_stack_svm)
svm_mean_sensSpec <- rowMeans(sensSpec_scores[5,-1])
# -------------------------------------------------------------------------------------------------------------------------------
# Create data frames for ggplots
# Raw
# svm_fold1_raw
svm_fold1_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold1 ) )
colnames(svm_fold1_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold2_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold2 ) )
colnames(svm_fold2_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold3_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold3 ) )
colnames(svm_fold3_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold4_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold4 ) )
colnames(svm_fold4_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold5_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold5 ) )
colnames(svm_fold5_raw_df) <-c("long", "lat", "Score")
# AVERAGE
svm_mean_raw_df <- data.frame( rasterToPoints( mean(model_stack_svm ) ) )
colnames(svm_mean_raw_df) <-c("long", "lat", "Score")
# Binary Classification
# svm_fold1_binary
svm_fold1_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold1 > sensSpec_scores[5,2] ) )
colnames(svm_fold1_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold2_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold2 > sensSpec_scores[5,3] ) )
colnames(svm_fold2_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold3_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold3 > sensSpec_scores[5,4] ) )
colnames(svm_fold3_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold4_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold4 > sensSpec_scores[5,5] ) )
colnames(svm_fold4_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold5_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold5 > sensSpec_scores[5,6] ) )
colnames(svm_fold5_binary_df) <-c("long", "lat", "Score")
### GG OBJECTS
SVM_fold1_raw <-
ggplot(data = svm_fold1_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 1 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold1_raw_df )
gc()
SVM_fold2_raw <-
ggplot(data = svm_fold2_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 2 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold2_raw_df )
gc()
SVM_fold3_raw <-
ggplot(data = svm_fold3_raw_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 3 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold3_raw_df )
gc()
SVM_fold4_raw <-
ggplot(data = svm_fold4_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 4 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold4_raw_df )
gc()
SVM_fold5_raw <-
ggplot(data = svm_fold5_raw_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 5 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold5_raw_df )
gc()
# binary
SVM_fold1_binary <-
ggplot(data = svm_fold1_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 1 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold1_binary_df )
gc()
SVM_fold2_binary <-
ggplot(data = svm_fold2_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 2 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold2_binary_df )
gc()
SVM_fold3_binary <-
ggplot(data = svm_fold3_binary_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 3 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold3_binary_df )
gc()
SVM_fold4_binary <-
ggplot(data = svm_fold4_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 4 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold4_binary_df )
gc()
SVM_fold5_binary <-
ggplot(data = svm_fold5_binary_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 5 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold5_binary_df )
gc()
### OUTPUT 1
ggarrange(
SVM_fold1_raw , SVM_fold1_binary,
SVM_fold2_raw , SVM_fold2_binary,
SVM_fold3_raw , SVM_fold3_binary,
SVM_fold4_raw , SVM_fold4_binary,
SVM_fold5_raw , SVM_fold5_binary,
ncol = 2, nrow = 5
)
remove( SVM_fold1_raw , SVM_fold1_binary,
SVM_fold2_raw , SVM_fold2_binary,
SVM_fold3_raw , SVM_fold3_binary,
SVM_fold4_raw , SVM_fold4_binary,
SVM_fold5_raw , SVM_fold5_binary )
gc()
# -------------------------------------------------------------------------------------------------------------------------------
# Ensembles
svm_mean_raw_df <- data.frame( rasterToPoints( svm_mean ) )
colnames(svm_mean_raw_df) <-c("long", "lat", "Score")
svm_mean_binary_df <- data.frame( rasterToPoints( svm_mean > svm_mean_sensSpec ) )
colnames(svm_mean_binary_df) <-c("long", "lat", "Score")
svm_sum <- sum(model_stack_svm$svmFold1 > sensSpec_scores[5,2],
model_stack_svm$svmFold2 > sensSpec_scores[5,3],
model_stack_svm$svmFold3 > sensSpec_scores[5,4],
model_stack_svm$svmFold4 > sensSpec_scores[5,5],
model_stack_svm$svmFold5 > sensSpec_scores[5,6])
svm_mv <- svm_sum >= 3
svm_ud <- svm_sum == 5
svm_mv_df <- data.frame( rasterToPoints( svm_mv ) )
colnames( svm_mv_df ) <-c("long", "lat", "Score")
svm_ud_df <- data.frame( rasterToPoints( svm_ud ) )
colnames( svm_ud_df ) <-c("long", "lat", "Score")
SVM_mean_raw <-
ggplot(data = svm_mean_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM mean - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)#+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
rm( svm_mean_raw_df )
gc()
SVM_mean_binary <-
ggplot(data = svm_mean_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM mean - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)#+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_mean_binary_df )
gc()
# Majority Native
SVM_mv <-
ggplot(data = svm_mv_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM majority vote - native points") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
rm( svm_mv_df )
gc()
# UD Native
SVM_ud <-
ggplot(data = svm_ud_df , aes(y=lat, x=long)) +
geom_raster( aes(fill = Score ) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM unanimous decision - native points") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
rm( svm_ud_df )
gc()
ggarrange(
SVM_mean_raw, SVM_mean_binary,
SVM_mv, SVM_ud,
ncol = 2, nrow = 2
)
gc()
| /R_script/Making_figures/Supplementary_plots_CA/SVM_supp_figures_CA.R | no_license | amsmith8/A-quantitative-assessment-of-site-level-factors-in-influencing-Chukar-introduction-outcomes | R | false | false | 16,200 | r | library("dismo")
library("ggplot2")
library("ggpubr")
library("maptools")
library("raster")
library("RColorBrewer")
svm_tiffs <- list.files(path = "/Users/austinsmith/Documents/GitHub/A-quantitative-assessment-of-site-level-factors-in-influencing-Chukar-introduction-outcomes./Predictions_CA/model_svm_folds_CA", full.names= TRUE, pattern = ".tif")
#Function 1
#--------------------------------------------------
# function to stack raster layers
listToStack <- function(tiff_list){
model_stack <- stack()
for (i in 1:length(tiff_list)){
r <- raster(tiff_list[i])
model_stack <- stack(model_stack, r)}
model_stack
}
#compile tiffs to stacks
model_stack_svm <- listToStack( svm_tiffs )
# http://en.wikipedia.org/wiki/Extreme_points_of_the_United_States#Westernmost
top = 49.3457868 # north lat
left = -124.7844079 # west long
right = -66.9513812 # east long
bottom = 24.7433195 # south lat
states <- rgdal::readOGR("/Users/austinsmith/Downloads/ne_110m_admin_1_states_provinces/ne_110m_admin_1_states_provinces.shp")
crs(states) <- crs(model_stack_svm$svmFold1)
# Ac_poly <- rgdal::readOGR("/Users/austinsmith/Documents/SDM_spatial_data/Bird_life_galliformes_fgip/Alectoris_chukar/Alectoris_chukar.shp") # via Bird Life
# crs(Ac_poly) <- crs(model_stack_gbm$gbmFold1)
# #r <- rasterize(Ac.poly, Final.model, field=1)
#
#
# ### Seperate the polygon into native and naturalized regions
# native <- subset(Ac_poly, Ac_poly$OBJECTID == 36 ) # historical native range for A. chukar. Similar to Christensen 1970
# naturalized <- subset(Ac_poly, Ac_poly$OBJECTID != 36 )
#
# fort_native <- fortify(native)
# fort_nat <- fortify(naturalized)
source( "R_script/eBird_data_cleaning.R" )
naturalized <- circles(us_pts, d = d , dissolve=TRUE, lonlat=TRUE) #60km is the average distance recorded
naturalized <- polygons(naturalized )
#fort_native <- fortify(native)
fort_nat <- fortify(naturalized)
# import threshholds
sensSpec_scores <- readRDS("./RDS_objects/CA/sensSpec_scores_CA.rds")
# Compute means for averaged model
svm_mean <- mean(model_stack_svm)
svm_mean_sensSpec <- rowMeans(sensSpec_scores[5,-1])
# -------------------------------------------------------------------------------------------------------------------------------
# Create data frames for ggplots
# Raw
# svm_fold1_raw
svm_fold1_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold1 ) )
colnames(svm_fold1_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold2_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold2 ) )
colnames(svm_fold2_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold3_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold3 ) )
colnames(svm_fold3_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold4_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold4 ) )
colnames(svm_fold4_raw_df) <-c("long", "lat", "Score")
# svm_fold1_raw
svm_fold5_raw_df <- data.frame( rasterToPoints( model_stack_svm$svmFold5 ) )
colnames(svm_fold5_raw_df) <-c("long", "lat", "Score")
# AVERAGE
svm_mean_raw_df <- data.frame( rasterToPoints( mean(model_stack_svm ) ) )
colnames(svm_mean_raw_df) <-c("long", "lat", "Score")
# Binary Classification
# svm_fold1_binary
svm_fold1_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold1 > sensSpec_scores[5,2] ) )
colnames(svm_fold1_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold2_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold2 > sensSpec_scores[5,3] ) )
colnames(svm_fold2_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold3_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold3 > sensSpec_scores[5,4] ) )
colnames(svm_fold3_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold4_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold4 > sensSpec_scores[5,5] ) )
colnames(svm_fold4_binary_df) <-c("long", "lat", "Score")
# svm_fold1_binary
svm_fold5_binary_df <- data.frame( rasterToPoints( model_stack_svm$svmFold5 > sensSpec_scores[5,6] ) )
colnames(svm_fold5_binary_df) <-c("long", "lat", "Score")
### GG OBJECTS
SVM_fold1_raw <-
ggplot(data = svm_fold1_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 1 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold1_raw_df )
gc()
SVM_fold2_raw <-
ggplot(data = svm_fold2_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 2 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold2_raw_df )
gc()
SVM_fold3_raw <-
ggplot(data = svm_fold3_raw_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 3 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold3_raw_df )
gc()
SVM_fold4_raw <-
ggplot(data = svm_fold4_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 4 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold4_raw_df )
gc()
SVM_fold5_raw <-
ggplot(data = svm_fold5_raw_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 5 - native points (raw values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)
# remove df
rm( svm_fold5_raw_df )
gc()
# binary
SVM_fold1_binary <-
ggplot(data = svm_fold1_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 1 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold1_binary_df )
gc()
SVM_fold2_binary <-
ggplot(data = svm_fold2_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 2 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold2_binary_df )
gc()
SVM_fold3_binary <-
ggplot(data = svm_fold3_binary_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 3 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold3_binary_df )
gc()
SVM_fold4_binary <-
ggplot(data = svm_fold4_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 4 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold4_binary_df )
gc()
SVM_fold5_binary <-
ggplot(data = svm_fold5_binary_df, aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM fold 5 - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_fold5_binary_df )
gc()
### OUTPUT 1
ggarrange(
SVM_fold1_raw , SVM_fold1_binary,
SVM_fold2_raw , SVM_fold2_binary,
SVM_fold3_raw , SVM_fold3_binary,
SVM_fold4_raw , SVM_fold4_binary,
SVM_fold5_raw , SVM_fold5_binary,
ncol = 2, nrow = 5
)
remove( SVM_fold1_raw , SVM_fold1_binary,
SVM_fold2_raw , SVM_fold2_binary,
SVM_fold3_raw , SVM_fold3_binary,
SVM_fold4_raw , SVM_fold4_binary,
SVM_fold5_raw , SVM_fold5_binary )
gc()
# -------------------------------------------------------------------------------------------------------------------------------
# Ensembles
svm_mean_raw_df <- data.frame( rasterToPoints( svm_mean ) )
colnames(svm_mean_raw_df) <-c("long", "lat", "Score")
svm_mean_binary_df <- data.frame( rasterToPoints( svm_mean > svm_mean_sensSpec ) )
colnames(svm_mean_binary_df) <-c("long", "lat", "Score")
svm_sum <- sum(model_stack_svm$svmFold1 > sensSpec_scores[5,2],
model_stack_svm$svmFold2 > sensSpec_scores[5,3],
model_stack_svm$svmFold3 > sensSpec_scores[5,4],
model_stack_svm$svmFold4 > sensSpec_scores[5,5],
model_stack_svm$svmFold5 > sensSpec_scores[5,6])
svm_mv <- svm_sum >= 3
svm_ud <- svm_sum == 5
svm_mv_df <- data.frame( rasterToPoints( svm_mv ) )
colnames( svm_mv_df ) <-c("long", "lat", "Score")
svm_ud_df <- data.frame( rasterToPoints( svm_ud ) )
colnames( svm_ud_df ) <-c("long", "lat", "Score")
SVM_mean_raw <-
ggplot(data = svm_mean_raw_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM mean - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(10)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)#+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
rm( svm_mean_raw_df )
gc()
SVM_mean_binary <-
ggplot(data = svm_mean_binary_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM mean - native points (binary values)") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA)#+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
# remove df
rm( svm_mean_binary_df )
gc()
# Majority Native
SVM_mv <-
ggplot(data = svm_mv_df , aes(y=lat, x=long)) +
geom_raster( aes(group=Score, fill = Score) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM majority vote - native points") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
rm( svm_mv_df )
gc()
# UD Native
SVM_ud <-
ggplot(data = svm_ud_df , aes(y=lat, x=long)) +
geom_raster( aes(fill = Score ) ) +
coord_cartesian( xlim = c( left , right ), ylim =c( bottom , top ) ) +
ggtitle("SVM unanimous decision - native points") +
theme( panel.background = element_rect( fill = "lightblue",
colour = "lightblue",
size = 0.5, linetype = "solid")) +
scale_fill_gradientn(name = "Suitability", colours = rev(terrain.colors(2)), na.value = "blue") +
geom_polygon(aes(x = long, y = lat, group=id), data = states, colour="black", fill=NA) #+
#geom_polygon(aes(x = long, y = lat, group=group), data = fort_nat, colour="red", fill=NA)
rm( svm_ud_df )
gc()
ggarrange(
SVM_mean_raw, SVM_mean_binary,
SVM_mv, SVM_ud,
ncol = 2, nrow = 2
)
gc()
|
#-------------------------------------------------------------
#Random networks of text features either significantly or not-significantly
#associated with current depression, these feautres are then used to construct personalised
#networks for participants within/outside an episode
#This allows us to test how specific our findings are to the set of text features that we chose
#excluding LIWC supra categories
#Generates data for Figure S2
#-------------------------------------------------------------
library(dplyr)
library(ggplot2)
library(lmerTest)
library(broom)
library(reshape2)
library(lme4)
library(stringr)
library(roll)
library(zoo)
library(rlang)
library(bootnet)
library(qgraph)
library(graphicalVAR)
library(NetworkComparisonTest)
library(Hmisc)
#need this to ensure, set seed is same across different versions of R
RNGkind(sample.kind = "Rounding")
set.seed(2002)
#Participants are included if they have at least 5 days with Tweets and at least
#50% of the tweets from their account are in English
#within-subject test of difference of sentiments during and off depressive episdoe
#remove participants without a depressive episode in the past year OR
#participants with only non-depressed/depressed data
#############################################################
#define functions
#############################################################
#remove outliers
remove_outliers <- function(x, na.rm = TRUE, ...) {
s <- sd(x)
m <- mean(x)
y <- x
y[x > (m + 3*s)] <- NA
y[x < (m - 3*s)] <- NA
y
}
# function to apply to all rows
remove_all_outliers <- function(d){
d[] <- lapply(d, function(x) if (is.numeric(x))
remove_outliers(x) else x)
d
}
'%!in%' <- function(x,y)!('%in%'(x,y))
#############################################################
#############################################################
setwd('/Users/seankelley/Twitter_Depression_Kelley/')
tweet_type = "all_tweets"
path = paste0('Data/Sentiments/',tweet_type,"/VADER_ANEW_LIWC_complete_dep.csv",collapse = "")
participants <- read.csv('Data/Participant_Data/Twitter_Participants.csv')
#sentiment analysis results based on tweet type
FYP_df <- read.csv(path,stringsAsFactors = FALSE)
colnames(FYP_df)[which(colnames(FYP_df) == 'Twitter_Handle')] = 'Id'
FYP_df <- FYP_df[which(FYP_df$Date != ''),]
dc_all <- read.csv('Data/Results/all_tweets/Node.Strength_dechoudhury_episodepastyear.csv')
colnames(dc_all)[1] <- "Id"
FYP_df <- FYP_df %>% filter(Id %in% unique(dc_all$Id))
FYP_df$pro3 <- (FYP_df$shehe + FYP_df$they)/2
FYP_df <- FYP_df[,c(3,8:94,96)]
FYP_df_mean <- aggregate(. ~ Id , data = FYP_df, FUN = "mean")
FYP_df_mean[,c(2:87,89)]= remove_all_outliers(FYP_df_mean[c(2:87,89)])
FYP_df_mean[,c(2:87,89)] = scale(FYP_df_mean[,c(2:87,89)])
FYP <- merge(participants,FYP_df_mean,by='Id')
exclude_col <- c("function.","pronoun","ppron","affect","anx","sad","social","cogproc","percept","bio","drives","relativ","informal")
FYP <- FYP[,colnames(FYP) %!in% exclude_col]
correlation_depression <- FYP %>% select(colnames(FYP)[c(34:106,108)],Depression_zscore)
correlation_depression_mat <- rcorr(as.matrix(correlation_depression), type = "pearson")
dep.net.var <- names(which(correlation_depression_mat$P[,75] <= 0.05))
nodep.net.var <- names(which(correlation_depression_mat$P[,75] > 0.05))
#restrict to participants included in within episode connectivity analysis
dc_ep <- read.csv('Data/Results/all_tweets/Node.Strength_dechoudhury_withinepisode_15d.csv')
colnames(dc_ep)[1] <- "Id"
FYP_df <- FYP_df %>% filter(Id %in% unique(dc_ep$Id))
FYP_df <- FYP_df[,colnames(FYP_df) %!in% exclude_col]
#############################################################
LIWC_var <- list()
for(i in 1:1000) {
LIWC_var[[i]] <- sample(nodep.net.var,9)
}
network_strength <- list()
id_variables <- unique(FYP_df$Id)
#100 random networks significantly or not significantly associated with current depression severity
for(i in 1:100) {
depression_centrality <- list(); nodepression_centrality <- list()
network_variables <- LIWC_var[[i]]
print(network_variables)
for(id in id_variables){
print(paste0(i," ",which(unique(FYP_df$Id) == id)/length(unique(FYP_df$Id))," ",id))
#random variables
en_var <- FYP_df %>% filter(Id == id) %>% select(network_variables,Depressed_today)
depression_network <- en_var %>% filter(Depressed_today == 1) %>% select(-Depressed_today)
nondepression_network <- en_var %>% filter(Depressed_today == 0) %>% select(-Depressed_today)
SDS <- as.numeric(participants %>% filter(Id == id) %>% select(Depression_zscore))
Dep_ep <- as.numeric(participants %>% filter(Id == id) %>% select(Dep_ep_pastyear))
Depressed_today <- (FYP_df %>% filter(Id == id) %>% select(Depressed_today))$Depressed_today
depression_network <- as.matrix(depression_network); nondepression_network <- as.matrix(nondepression_network)
if(dim(depression_network)[1] >= 15 & all(apply(depression_network, 2, function(x) length(x[x!=0])) >= 9)){
if(dim(nondepression_network)[1] >= 15 & all(apply(nondepression_network, 2, function(x) length(x[x!=0])) >= 9)){
try(net_dep <- graphicalVAR(depression_network, nLambda = 10, verbose = T, gamma = 0,scale = TRUE, maxit.in = 100,
maxit.out = 100,deleteMissings = TRUE,centerWithin = TRUE),silent = TRUE)
try(net_nodep <- graphicalVAR(nondepression_network, nLambda = 10, verbose = T, gamma = 0,scale = TRUE, maxit.in = 100,
maxit.out = 100,deleteMissings = TRUE,centerWithin = TRUE),silent = TRUE)
net_dep_PCC <- qgraph(net_dep$PCC);net_nodep_PCC <- qgraph(net_nodep$PCC)
net_dep_centrality <- centrality(net_dep_PCC)$InDegree;net_nodep_centrality <- centrality(net_nodep_PCC)$InDegree
nodepression_centrality[[id]] <- c(net_nodep_centrality,Dep_ep,dim(nondepression_network)[1],SDS,0)
depression_centrality[[id]] <- c(net_dep_centrality,Dep_ep,dim(depression_network)[1],SDS,1)
}
}
}
#network strength of outside episode periods
nodep_net <- do.call(rbind, nodepression_centrality)
colnames(nodep_net )[10:13] <- c("Depressive_Episode_pastyear","Days","SDS_Total","Depressed_Today")
nodep_net <- as.data.frame(nodep_net)
nodep_net$Mean_Centrality <- rowMeans(nodep_net[,1:9])
nodep_net <- nodep_net %>% select(colnames(nodep_net)[1:9],Mean_Centrality,Depressive_Episode_pastyear,SDS_Total,Days,Depressed_Today)
nodep_net$Id <- rownames(nodep_net)
dep_net <- do.call(rbind, depression_centrality)
colnames(dep_net )[10:13] <- c("Depressive_Episode_pastyear","Days","SDS_Total","Depressed_Today")
dep_net <- as.data.frame(dep_net)
dep_net$Mean_Centrality <- rowMeans(dep_net[,1:9])
dep_net <- dep_net %>% select(colnames(dep_net)[1:9],Mean_Centrality,Depressive_Episode_pastyear,SDS_Total,Days,Depressed_Today)
dep_net$Id <- rownames(dep_net)
within_network <- as.data.frame(rbind(dep_net,nodep_net))
within_network <- within_network[order(within_network$Id),]
within_network[,1:10] <- remove_all_outliers(within_network[1:10])
model1 <- summary(lmer(Mean_Centrality ~ Depressed_Today + (1|Id),data = within_network))
model2 <- summary(lmer(Mean_Centrality ~ Depressed_Today + Days + (1|Id),data = within_network))
model1_coeff <- c(paste0(network_variables,collapse = ", "),model1$coefficients[2,1],model1$coefficients[2,2],model1$coefficients[2,5])
model2_coeff <- c(paste0(network_variables,collapse = ", "),model2$coefficients[2,1],model2$coefficients[2,2],model2$coefficients[2,5])
write.table(t(model1_coeff),file = "Data/Results/all_tweets/model1_coeff_rand_nodep_pro3_yellow_9node_test.csv",append = T,row.names = F,col.names = F,sep=",")
write.table(t(model2_coeff),file = "Data/Results/all_tweets/model2_coeff_rand_nodep_pro3_yellow_9node_test.csv",append = T,row.names = F,col.names = F,sep=",")
print(summary(lmer(Mean_Centrality ~ Depressed_Today + Days + (1|Id),data = within_network)))
}
| /Control_Analysis/hierarchical_random_networks.R | no_license | seanwkelley/Twitter_Depression | R | false | false | 8,365 | r | #-------------------------------------------------------------
#Random networks of text features either significantly or not-significantly
#associated with current depression, these feautres are then used to construct personalised
#networks for participants within/outside an episode
#This allows us to test how specific our findings are to the set of text features that we chose
#excluding LIWC supra categories
#Generates data for Figure S2
#-------------------------------------------------------------
library(dplyr)
library(ggplot2)
library(lmerTest)
library(broom)
library(reshape2)
library(lme4)
library(stringr)
library(roll)
library(zoo)
library(rlang)
library(bootnet)
library(qgraph)
library(graphicalVAR)
library(NetworkComparisonTest)
library(Hmisc)
#need this to ensure, set seed is same across different versions of R
RNGkind(sample.kind = "Rounding")
set.seed(2002)
#Participants are included if they have at least 5 days with Tweets and at least
#50% of the tweets from their account are in English
#within-subject test of difference of sentiments during and off depressive episdoe
#remove participants without a depressive episode in the past year OR
#participants with only non-depressed/depressed data
#############################################################
#define functions
#############################################################
#remove outliers
remove_outliers <- function(x, na.rm = TRUE, ...) {
s <- sd(x)
m <- mean(x)
y <- x
y[x > (m + 3*s)] <- NA
y[x < (m - 3*s)] <- NA
y
}
# function to apply to all rows
remove_all_outliers <- function(d){
d[] <- lapply(d, function(x) if (is.numeric(x))
remove_outliers(x) else x)
d
}
'%!in%' <- function(x,y)!('%in%'(x,y))
#############################################################
#############################################################
setwd('/Users/seankelley/Twitter_Depression_Kelley/')
tweet_type = "all_tweets"
path = paste0('Data/Sentiments/',tweet_type,"/VADER_ANEW_LIWC_complete_dep.csv",collapse = "")
participants <- read.csv('Data/Participant_Data/Twitter_Participants.csv')
#sentiment analysis results based on tweet type
FYP_df <- read.csv(path,stringsAsFactors = FALSE)
colnames(FYP_df)[which(colnames(FYP_df) == 'Twitter_Handle')] = 'Id'
FYP_df <- FYP_df[which(FYP_df$Date != ''),]
dc_all <- read.csv('Data/Results/all_tweets/Node.Strength_dechoudhury_episodepastyear.csv')
colnames(dc_all)[1] <- "Id"
FYP_df <- FYP_df %>% filter(Id %in% unique(dc_all$Id))
FYP_df$pro3 <- (FYP_df$shehe + FYP_df$they)/2
FYP_df <- FYP_df[,c(3,8:94,96)]
FYP_df_mean <- aggregate(. ~ Id , data = FYP_df, FUN = "mean")
FYP_df_mean[,c(2:87,89)]= remove_all_outliers(FYP_df_mean[c(2:87,89)])
FYP_df_mean[,c(2:87,89)] = scale(FYP_df_mean[,c(2:87,89)])
FYP <- merge(participants,FYP_df_mean,by='Id')
exclude_col <- c("function.","pronoun","ppron","affect","anx","sad","social","cogproc","percept","bio","drives","relativ","informal")
FYP <- FYP[,colnames(FYP) %!in% exclude_col]
correlation_depression <- FYP %>% select(colnames(FYP)[c(34:106,108)],Depression_zscore)
correlation_depression_mat <- rcorr(as.matrix(correlation_depression), type = "pearson")
dep.net.var <- names(which(correlation_depression_mat$P[,75] <= 0.05))
nodep.net.var <- names(which(correlation_depression_mat$P[,75] > 0.05))
#restrict to participants included in within episode connectivity analysis
dc_ep <- read.csv('Data/Results/all_tweets/Node.Strength_dechoudhury_withinepisode_15d.csv')
colnames(dc_ep)[1] <- "Id"
FYP_df <- FYP_df %>% filter(Id %in% unique(dc_ep$Id))
FYP_df <- FYP_df[,colnames(FYP_df) %!in% exclude_col]
#############################################################
LIWC_var <- list()
for(i in 1:1000) {
LIWC_var[[i]] <- sample(nodep.net.var,9)
}
network_strength <- list()
id_variables <- unique(FYP_df$Id)
#100 random networks significantly or not significantly associated with current depression severity
for(i in 1:100) {
depression_centrality <- list(); nodepression_centrality <- list()
network_variables <- LIWC_var[[i]]
print(network_variables)
for(id in id_variables){
print(paste0(i," ",which(unique(FYP_df$Id) == id)/length(unique(FYP_df$Id))," ",id))
#random variables
en_var <- FYP_df %>% filter(Id == id) %>% select(network_variables,Depressed_today)
depression_network <- en_var %>% filter(Depressed_today == 1) %>% select(-Depressed_today)
nondepression_network <- en_var %>% filter(Depressed_today == 0) %>% select(-Depressed_today)
SDS <- as.numeric(participants %>% filter(Id == id) %>% select(Depression_zscore))
Dep_ep <- as.numeric(participants %>% filter(Id == id) %>% select(Dep_ep_pastyear))
Depressed_today <- (FYP_df %>% filter(Id == id) %>% select(Depressed_today))$Depressed_today
depression_network <- as.matrix(depression_network); nondepression_network <- as.matrix(nondepression_network)
if(dim(depression_network)[1] >= 15 & all(apply(depression_network, 2, function(x) length(x[x!=0])) >= 9)){
if(dim(nondepression_network)[1] >= 15 & all(apply(nondepression_network, 2, function(x) length(x[x!=0])) >= 9)){
try(net_dep <- graphicalVAR(depression_network, nLambda = 10, verbose = T, gamma = 0,scale = TRUE, maxit.in = 100,
maxit.out = 100,deleteMissings = TRUE,centerWithin = TRUE),silent = TRUE)
try(net_nodep <- graphicalVAR(nondepression_network, nLambda = 10, verbose = T, gamma = 0,scale = TRUE, maxit.in = 100,
maxit.out = 100,deleteMissings = TRUE,centerWithin = TRUE),silent = TRUE)
net_dep_PCC <- qgraph(net_dep$PCC);net_nodep_PCC <- qgraph(net_nodep$PCC)
net_dep_centrality <- centrality(net_dep_PCC)$InDegree;net_nodep_centrality <- centrality(net_nodep_PCC)$InDegree
nodepression_centrality[[id]] <- c(net_nodep_centrality,Dep_ep,dim(nondepression_network)[1],SDS,0)
depression_centrality[[id]] <- c(net_dep_centrality,Dep_ep,dim(depression_network)[1],SDS,1)
}
}
}
#network strength of outside episode periods
nodep_net <- do.call(rbind, nodepression_centrality)
colnames(nodep_net )[10:13] <- c("Depressive_Episode_pastyear","Days","SDS_Total","Depressed_Today")
nodep_net <- as.data.frame(nodep_net)
nodep_net$Mean_Centrality <- rowMeans(nodep_net[,1:9])
nodep_net <- nodep_net %>% select(colnames(nodep_net)[1:9],Mean_Centrality,Depressive_Episode_pastyear,SDS_Total,Days,Depressed_Today)
nodep_net$Id <- rownames(nodep_net)
dep_net <- do.call(rbind, depression_centrality)
colnames(dep_net )[10:13] <- c("Depressive_Episode_pastyear","Days","SDS_Total","Depressed_Today")
dep_net <- as.data.frame(dep_net)
dep_net$Mean_Centrality <- rowMeans(dep_net[,1:9])
dep_net <- dep_net %>% select(colnames(dep_net)[1:9],Mean_Centrality,Depressive_Episode_pastyear,SDS_Total,Days,Depressed_Today)
dep_net$Id <- rownames(dep_net)
within_network <- as.data.frame(rbind(dep_net,nodep_net))
within_network <- within_network[order(within_network$Id),]
within_network[,1:10] <- remove_all_outliers(within_network[1:10])
model1 <- summary(lmer(Mean_Centrality ~ Depressed_Today + (1|Id),data = within_network))
model2 <- summary(lmer(Mean_Centrality ~ Depressed_Today + Days + (1|Id),data = within_network))
model1_coeff <- c(paste0(network_variables,collapse = ", "),model1$coefficients[2,1],model1$coefficients[2,2],model1$coefficients[2,5])
model2_coeff <- c(paste0(network_variables,collapse = ", "),model2$coefficients[2,1],model2$coefficients[2,2],model2$coefficients[2,5])
write.table(t(model1_coeff),file = "Data/Results/all_tweets/model1_coeff_rand_nodep_pro3_yellow_9node_test.csv",append = T,row.names = F,col.names = F,sep=",")
write.table(t(model2_coeff),file = "Data/Results/all_tweets/model2_coeff_rand_nodep_pro3_yellow_9node_test.csv",append = T,row.names = F,col.names = F,sep=",")
print(summary(lmer(Mean_Centrality ~ Depressed_Today + Days + (1|Id),data = within_network)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ESTIMATION_FUNS.R
\name{feglm}
\alias{feglm}
\alias{feglm.fit}
\alias{fepois}
\title{Fixed-effects GLM estimations}
\usage{
feglm(
fml,
data,
family = "gaussian",
vcov,
offset,
weights,
subset,
split,
fsplit,
split.keep,
split.drop,
cluster,
se,
ssc,
panel.id,
start = NULL,
etastart = NULL,
mustart = NULL,
fixef,
fixef.rm = "perfect",
fixef.tol = 1e-06,
fixef.iter = 10000,
collin.tol = 1e-10,
glm.iter = 25,
glm.tol = 1e-08,
nthreads = getFixest_nthreads(),
lean = FALSE,
warn = TRUE,
notes = getFixest_notes(),
verbose = 0,
only.coef = FALSE,
combine.quick,
mem.clean = FALSE,
only.env = FALSE,
env,
...
)
feglm.fit(
y,
X,
fixef_df,
family = "gaussian",
vcov,
offset,
split,
fsplit,
split.keep,
split.drop,
cluster,
se,
ssc,
weights,
subset,
start = NULL,
etastart = NULL,
mustart = NULL,
fixef.rm = "perfect",
fixef.tol = 1e-06,
fixef.iter = 10000,
collin.tol = 1e-10,
glm.iter = 25,
glm.tol = 1e-08,
nthreads = getFixest_nthreads(),
lean = FALSE,
warn = TRUE,
notes = getFixest_notes(),
mem.clean = FALSE,
verbose = 0,
only.env = FALSE,
only.coef = FALSE,
env,
...
)
fepois(
fml,
data,
vcov,
offset,
weights,
subset,
split,
fsplit,
split.keep,
split.drop,
cluster,
se,
ssc,
panel.id,
start = NULL,
etastart = NULL,
mustart = NULL,
fixef,
fixef.rm = "perfect",
fixef.tol = 1e-06,
fixef.iter = 10000,
collin.tol = 1e-10,
glm.iter = 25,
glm.tol = 1e-08,
nthreads = getFixest_nthreads(),
lean = FALSE,
warn = TRUE,
notes = getFixest_notes(),
verbose = 0,
combine.quick,
mem.clean = FALSE,
only.env = FALSE,
only.coef = FALSE,
env,
...
)
}
\arguments{
\item{fml}{A formula representing the relation to be estimated. For example: \code{fml = z~x+y}. To include fixed-effects, insert them in this formula using a pipe: e.g. \code{fml = z~x+y|fixef_1+fixef_2}. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. The formula \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)} leads to 6 estimation, see details. Square brackets starting with a dot can be used to call global variables: \code{y.[i] ~ x.[1:2]} will lead to \code{y3 ~ x1 + x2} if \code{i} is equal to 3 in the current environment (see details in \code{\link{xpd}}).}
\item{data}{A data.frame containing the necessary variables to run the model. The variables of the non-linear right hand side of the formula are identified with this \code{data.frame} names. Can also be a matrix.}
\item{family}{Family to be used for the estimation. Defaults to \code{gaussian()}. See \code{\link{family}} for details of family functions.}
\item{vcov}{Versatile argument to specify the VCOV. In general, it is either a character scalar equal to a VCOV type, either a formula of the form: \code{vcov_type ~ variables}. The VCOV types implemented are: "iid", "hetero" (or "HC1"), "cluster", "twoway", "NW" (or "newey_west"), "DK" (or "driscoll_kraay"), and "conley". It also accepts object from \code{\link{vcov_cluster}}, \code{\link[=vcov_hac]{vcov_NW}}, \code{\link[=vcov_hac]{NW}}, \code{\link[=vcov_hac]{vcov_DK}}, \code{\link[=vcov_hac]{DK}}, \code{\link{vcov_conley}} and \code{\link[=vcov_conley]{conley}}. It also accepts covariance matrices computed externally. Finally it accepts functions to compute the covariances. See the \code{vcov} documentation in the \href{https://lrberge.github.io/fixest/articles/fixest_walkthrough.html#the-vcov-argument-1}{vignette}.}
\item{offset}{A formula or a numeric vector. An offset can be added to the estimation. If equal to a formula, it should be of the form (for example) \code{~0.5*x**2}. This offset is linearly added to the elements of the main formula 'fml'.}
\item{weights}{A formula or a numeric vector. Each observation can be weighted, the weights must be greater than 0. If equal to a formula, it should be one-sided: for example \code{~ var_weight}.}
\item{subset}{A vector (logical or numeric) or a one-sided formula. If provided, then the estimation will be performed only on the observations defined by this argument.}
\item{split}{A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. If you also want to include the estimation for the full sample, use the argument \code{fsplit} instead. You can use the special operators \verb{\%keep\%} and \verb{\%drop\%} to select only a subset of values for which to split the sample. E.g. \code{split = ~var \%keep\% c("v1", "v2")} will split the sample only according to the values \code{v1} and \code{v2} of the variable \code{var}; it is equivalent to supplying the argument \code{split.keep = c("v1", "v2")}. By default there is partial matching on each value, you can trigger a regular expression evaluation by adding a \code{'@'} first, as in: \code{~var \%drop\% "@^v[12]"} which will drop values starting with \code{"v1"} or \code{"v2"} (of course you need to know regexes!).}
\item{fsplit}{A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. This argument is the same as split but also includes the full sample as the first estimation. You can use the special operators \verb{\%keep\%} and \verb{\%drop\%} to select only a subset of values for which to split the sample. E.g. \code{split = ~var \%keep\% c("v1", "v2")} will split the sample only according to the values \code{v1} and \code{v2} of the variable \code{var}; it is equivalent to supplying the argument \code{split.keep = c("v1", "v2")}. By default there is partial matching on each value, you can trigger a regular expression evaluation by adding an \code{'@'} first, as in: \code{~var \%drop\% "@^v[12]"} which will drop values starting with \code{"v1"} or \code{"v2"} (of course you need to know regexes!).}
\item{split.keep}{A character vector. Only used when \code{split}, or \code{fsplit}, is supplied. If provided, then the sample will be split only on the values of \code{split.keep}. The values in \code{split.keep} will be partially matched to the values of \code{split}. To enable regular expressions, you need to add an \code{'@'} first. For example \code{split.keep = c("v1", "@other|var")} will keep only the value in \code{split} partially matched by \code{"v1"} or the values containing \code{"other"} or \code{"var"}.}
\item{split.drop}{A character vector. Only used when \code{split}, or \code{fsplit}, is supplied. If provided, then the sample will be split only on the values that are not in \code{split.drop}. The values in \code{split.drop} will be partially matched to the values of \code{split}. To enable regular expressions, you need to add an \code{'@'} first. For example \code{split.drop = c("v1", "@other|var")} will drop only the value in \code{split} partially matched by \code{"v1"} or the values containing \code{"other"} or \code{"var"}.}
\item{cluster}{Tells how to cluster the standard-errors (if clustering is requested). Can be either a list of vectors, a character vector of variable names, a formula or an integer vector. Assume we want to perform 2-way clustering over \code{var1} and \code{var2} contained in the data.frame \code{base} used for the estimation. All the following \code{cluster} arguments are valid and do the same thing: \code{cluster = base[, c("var1", "var2")]}, \code{cluster = c("var1", "var2")}, \code{cluster = ~var1+var2}. If the two variables were used as fixed-effects in the estimation, you can leave it blank with \code{vcov = "twoway"} (assuming \code{var1} [resp. \code{var2}] was the 1st [resp. 2nd] fixed-effect). You can interact two variables using \code{^} with the following syntax: \code{cluster = ~var1^var2} or \code{cluster = "var1^var2"}.}
\item{se}{Character scalar. Which kind of standard error should be computed: \dQuote{standard}, \dQuote{hetero}, \dQuote{cluster}, \dQuote{twoway}, \dQuote{threeway} or \dQuote{fourway}? By default if there are clusters in the estimation: \code{se = "cluster"}, otherwise \code{se = "iid"}. Note that this argument is deprecated, you should use \code{vcov} instead.}
\item{ssc}{An object of class \code{ssc.type} obtained with the function \code{\link{ssc}}. Represents how the degree of freedom correction should be done.You must use the function \code{\link{ssc}} for this argument. The arguments and defaults of the function \code{\link{ssc}} are: \code{adj = TRUE}, \code{fixef.K="nested"}, \code{cluster.adj = TRUE}, \code{cluster.df = "min"}, \code{t.df = "min"}, \verb{fixef.force_exact=FALSE)}. See the help of the function \code{\link{ssc}} for details.}
\item{panel.id}{The panel identifiers. Can either be: i) a one sided formula (e.g. \code{panel.id = ~id+time}), ii) a character vector of length 2 (e.g. \code{panel.id=c('id', 'time')}, or iii) a character scalar of two variables separated by a comma (e.g. \code{panel.id='id,time'}). Note that you can combine variables with \code{^} only inside formulas (see the dedicated section in \code{\link{feols}}).}
\item{start}{Starting values for the coefficients. Can be: i) a numeric of length 1 (e.g. \code{start = 0}), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients). Default is missing.}
\item{etastart}{Numeric vector of the same length as the data. Starting values for the linear predictor. Default is missing.}
\item{mustart}{Numeric vector of the same length as the data. Starting values for the vector of means. Default is missing.}
\item{fixef}{Character vector. The names of variables to be used as fixed-effects. These variables should contain the identifier of each observation (e.g., think of it as a panel identifier). Note that the recommended way to include fixed-effects is to insert them directly in the formula.}
\item{fixef.rm}{Can be equal to "perfect" (default), "singleton", "both" or "none". Controls which observations are to be removed. If "perfect", then observations having a fixed-effect with perfect fit (e.g. only 0 outcomes in Poisson estimations) will be removed. If "singleton", all observations for which a fixed-effect appears only once will be removed. The meaning of "both" and "none" is direct.}
\item{fixef.tol}{Precision used to obtain the fixed-effects. Defaults to \code{1e-6}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations.}
\item{fixef.iter}{Maximum number of iterations in fixed-effects algorithm (only in use for 2+ fixed-effects). Default is 10000.}
\item{collin.tol}{Numeric scalar, default is \code{1e-10}. Threshold deciding when variables should be considered collinear and subsequently removed from the estimation. Higher values means more variables will be removed (if there is presence of collinearity). One signal of presence of collinearity is t-stats that are extremely low (for instance when t-stats < 1e-3).}
\item{glm.iter}{Number of iterations of the glm algorithm. Default is 25.}
\item{glm.tol}{Tolerance level for the glm algorithm. Default is \code{1e-8}.}
\item{nthreads}{The number of threads. Can be: a) an integer lower than, or equal to, the maximum number of threads; b) 0: meaning all available threads will be used; c) a number strictly between 0 and 1 which represents the fraction of all threads to use. The default is to use 50\% of all threads. You can set permanently the number of threads used within this package using the function \code{\link{setFixest_nthreads}}.}
\item{lean}{Logical, default is \code{FALSE}. If \code{TRUE} then all large objects are removed from the returned result: this will save memory but will block the possibility to use many methods. It is recommended to use the arguments \code{se} or \code{cluster} to obtain the appropriate standard-errors at estimation time, since obtaining different SEs won't be possible afterwards.}
\item{warn}{Logical, default is \code{TRUE}. Whether warnings should be displayed (concerns warnings relating to convergence state).}
\item{notes}{Logical. By default, three notes are displayed: when NAs are removed, when some fixed-effects are removed because of only 0 (or 0/1) outcomes, or when a variable is dropped because of collinearity. To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.}
\item{verbose}{Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algoritmh (the first number is the left-hand-side, the other numbers are the right-hand-side variables). It can also detail the step-halving algorithm.}
\item{only.coef}{Logical, default is \code{FALSE}. If \code{TRUE}, then only the estimated coefficients are returned. Note that the length of the vector returned is always the length of the number of coefficients to be estimated: this means that the variables found to be collinear are returned with an NA value.}
\item{combine.quick}{Logical. When you combine different variables to transform them into a single fixed-effects you can do e.g. \code{y ~ x | paste(var1, var2)}. The algorithm provides a shorthand to do the same operation: \code{y ~ x | var1^var2}. Because pasting variables is a costly operation, the internal algorithm may use a numerical trick to hasten the process. The cost of doing so is that you lose the labels. If you are interested in getting the value of the fixed-effects coefficients after the estimation, you should use \code{combine.quick = FALSE}. By default it is equal to \code{FALSE} if the number of observations is lower than 50,000, and to \code{TRUE} otherwise.}
\item{mem.clean}{Logical, default is \code{FALSE}. Only to be used if the data set is large compared to the available RAM. If \code{TRUE} then intermediary objects are removed as much as possible and \code{\link{gc}} is run before each substantial C++ section in the internal code to avoid memory issues.}
\item{only.env}{(Advanced users.) Logical, default is \code{FALSE}. If \code{TRUE}, then only the environment used to make the estimation is returned.}
\item{env}{(Advanced users.) A \code{fixest} environment created by a \code{fixest} estimation with \code{only.env = TRUE}. Default is missing. If provided, the data from this environment will be used to perform the estimation.}
\item{...}{Not currently used.}
\item{y}{Numeric vector/matrix/data.frame of the dependent variable(s). Multiple dependent variables will return a \code{fixest_multi} object.}
\item{X}{Numeric matrix of the regressors.}
\item{fixef_df}{Matrix/data.frame of the fixed-effects.}
}
\value{
A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link{vcov.fixest}}, \code{\link{resid.fixest}}, etc) or functions (like for instance \code{\link{fitstat}} to access any fit statistic).
\item{nobs}{The number of observations.}
\item{fml}{The linear formula of the call.}
\item{call}{The call of the function.}
\item{method}{The method used to estimate the model.}
\item{family}{The family used to estimate the model.}
\item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects.}
\item{nparams}{The number of parameters of the model.}
\item{fixef_vars}{The names of each fixed-effect dimension.}
\item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
\item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
\item{y}{(When relevant.) The dependent variable (used to compute the within-R2 when fixed-effects are present).}
\item{convStatus}{Logical, convergence status of the IRWLS algorithm.}
\item{irls_weights}{The weights of the last iteration of the IRWLS algorithm.}
\item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
\item{fixef_removed}{(When relevant.) In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
\item{coefficients}{The named vector of estimated coefficients.}
\item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
\item{loglik}{The loglikelihood.}
\item{deviance}{Deviance of the fitted model.}
\item{iterations}{Number of iterations of the algorithm.}
\item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
\item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
\item{pseudo_r2}{The adjusted pseudo R2.}
\item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
\item{linear.predictors}{The linear predictors.}
\item{residuals}{The residuals (y minus the fitted values).}
\item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
\item{hessian}{The Hessian of the parameters.}
\item{cov.iid}{The variance-covariance matrix of the parameters.}
\item{se}{The standard-error of the parameters.}
\item{scores}{The matrix of the scores (first derivative for each observation).}
\item{residuals}{The difference between the dependent variable and the expected predictor.}
\item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
\item{offset}{(When relevant.) The offset formula.}
\item{weights}{(When relevant.) The weights formula.}
\item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
\item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
}
\description{
Estimates GLM models with any number of fixed-effects.
}
\details{
The core of the GLM are the weighted OLS estimations. These estimations are performed with \code{\link{feols}}. The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
}
\section{Combining the fixed-effects}{
You can combine two variables to make it a new fixed-effect using \code{^}. The syntax is as follows: \code{fe_1^fe_2}. Here you created a new variable which is the combination of the two variables fe_1 and fe_2. This is identical to doing \code{paste0(fe_1, "_", fe_2)} but more convenient.
Note that pasting is a costly operation, especially for large data sets. Thus, the internal algorithm uses a numerical trick which is fast, but the drawback is that the identity of each observation is lost (i.e. they are now equal to a meaningless number instead of being equal to \code{paste0(fe_1, "_", fe_2)}). These \dQuote{identities} are useful only if you're interested in the value of the fixed-effects (that you can extract with \code{\link{fixef.fixest}}). If you're only interested in coefficients of the variables, it doesn't matter. Anyway, you can use \code{combine.quick = FALSE} to tell the internal algorithm to use \code{paste} instead of the numerical trick. By default, the numerical trick is performed only for large data sets.
}
\section{Varying slopes}{
You can add variables with varying slopes in the fixed-effect part of the formula. The syntax is as follows: \code{fixef_var[var1, var2]}. Here the variables var1 and var2 will be with varying slopes (one slope per value in fixef_var) and the fixed-effect fixef_var will also be added.
To add only the variables with varying slopes and not the fixed-effect, use double square brackets: \code{fixef_var[[var1, var2]]}.
In other words:
\itemize{
\item \code{fixef_var[var1, var2]} is equivalent to \code{fixef_var + fixef_var[[var1]] + fixef_var[[var2]]}
\item \code{fixef_var[[var1, var2]]} is equivalent to \code{fixef_var[[var1]] + fixef_var[[var2]]}
}
In general, for convergence reasons, it is recommended to always add the fixed-effect and avoid using only the variable with varying slope (i.e. use single square brackets).
}
\section{Lagging variables}{
To use leads/lags of variables in the estimation, you can: i) either provide the argument \code{panel.id}, ii) either set your data set as a panel with the function \code{\link{panel}}, \code{\link[=l]{f}} and \code{\link[=l]{d}}.
You can provide several leads/lags/differences at once: e.g. if your formula is equal to \code{f(y) ~ l(x, -1:1)}, it means that the dependent variable is equal to the lead of \code{y}, and you will have as explanatory variables the lead of \code{x1}, \code{x1} and the lag of \code{x1}. See the examples in function \code{\link{l}} for more details.
}
\section{Interactions}{
You can interact a numeric variable with a "factor-like" variable by using \code{i(factor_var, continuous_var, ref)}, where \code{continuous_var} will be interacted with each value of \code{factor_var} and the argument \code{ref} is a value of \code{factor_var} taken as a reference (optional).
Using this specific way to create interactions leads to a different display of the interacted values in \code{\link{etable}}. See examples.
It is important to note that \emph{if you do not care about the standard-errors of the interactions}, then you can add interactions in the fixed-effects part of the formula, it will be incomparably faster (using the syntax \code{factor_var[continuous_var]}, as explained in the section \dQuote{Varying slopes}).
The function \code{\link{i}} has in fact more arguments, please see details in its associated help page.
}
\section{On standard-errors}{
Standard-errors can be computed in different ways, you can use the arguments \code{se} and \code{ssc} in \code{\link{summary.fixest}} to define how to compute them. By default, in the presence of fixed-effects, standard-errors are automatically clustered.
The following vignette: \href{https://lrberge.github.io/fixest/articles/standard_errors.html}{On standard-errors} describes in details how the standard-errors are computed in \code{fixest} and how you can replicate standard-errors from other software.
You can use the functions \code{\link{setFixest_vcov}} and \code{\link[=ssc]{setFixest_ssc}} to permanently set the way the standard-errors are computed.
}
\section{Multiple estimations}{
Multiple estimations can be performed at once, they just have to be specified in the formula. Multiple estimations yield a \code{fixest_multi} object which is \sQuote{kind of} a list of all the results but includes specific methods to access the results in a handy way. Please have a look at the dedicated vignette: \href{https://lrberge.github.io/fixest/articles/multiple_estimations.html}{Multiple estimations}.
To include multiple dependent variables, wrap them in \code{c()} (\code{list()} also works). For instance \code{fml = c(y1, y2) ~ x1} would estimate the model \code{fml = y1 ~ x1} and then the model \code{fml = y2 ~ x1}.
To include multiple independent variables, you need to use the stepwise functions. There are 4 stepwise functions: \code{sw}, \code{sw0}, \code{csw}, \code{csw0}, and \code{mvsw}. Of course \code{sw} stands for stepwise, and \code{csw} for cumulative stepwise. Finally \code{mvsw} is a bit special, it stands for multiverse stepwise. Let's explain that.
Assume you have the following formula: \code{fml = y ~ x1 + sw(x2, x3)}. The stepwise function \code{sw} will estimate the following two models: \code{y ~ x1 + x2} and \code{y ~ x1 + x3}. That is, each element in \code{sw()} is sequentially, and separately, added to the formula. Would have you used \code{sw0} in lieu of \code{sw}, then the model \code{y ~ x1} would also have been estimated. The \code{0} in the name means that the model without any stepwise element also needs to be estimated.
The prefix \code{c} means cumulative: each stepwise element is added to the next. That is, \code{fml = y ~ x1 + csw(x2, x3)} would lead to the following models \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}. The \code{0} has the same meaning and would also lead to the model without the stepwise elements to be estimated: in other words, \code{fml = y ~ x1 + csw0(x2, x3)} leads to the following three models: \code{y ~ x1}, \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}.
Finally \code{mvsw} will add, in a stepwise fashion all possible combinations of the variables in its arguments. For example \code{mvsw(x1, x2, x3)} is equivalent to \code{sw0(x1, x2, x3, x1 + x2, x1 + x3, x2 + x3, x1 + x2 + x3)}. The number of models to estimate grows at a factorial rate: so be cautious!
Multiple independent variables can be combined with multiple dependent variables, as in \code{fml = c(y1, y2) ~ cw(x1, x2, x3)} which would lead to 6 estimations. Multiple estimations can also be combined to split samples (with the arguments \code{split}, \code{fsplit}).
You can also add fixed-effects in a stepwise fashion. Note that you cannot perform stepwise estimations on the IV part of the formula (\code{feols} only).
If NAs are present in the sample, to avoid too many messages, only NA removal concerning the variables common to all estimations is reported.
A note on performance. The feature of multiple estimations has been highly optimized for \code{feols}, in particular in the presence of fixed-effects. It is faster to estimate multiple models using the formula rather than with a loop. For non-\code{feols} models using the formula is roughly similar to using a loop performance-wise.
}
\section{Argument sliding}{
When the data set has been set up globally using \code{\link{setFixest_estimation}}\code{(data = data_set)}, the argument \code{vcov} can be used implicitly. This means that calls such as \code{feols(y ~ x, "HC1")}, or \code{feols(y ~ x, ~id)}, are valid: i) the data is automatically deduced from the global settings, and ii) the \code{vcov} is deduced to be the second argument.
}
\section{Piping}{
Although the argument 'data' is placed in second position, the data can be piped to the estimation functions. For example, with R >= 4.1, \code{mtcars |> feols(mpg ~ cyl)} works as \code{feols(mpg ~ cyl, mtcars)}.
}
\section{Tricks to estimate multiple LHS}{
To use multiple dependent variables in \code{fixest} estimations, you need to include them in a vector: like in \code{c(y1, y2, y3)}.
First, if names are stored in a vector, they can readily be inserted in a formula to perform multiple estimations using the dot square bracket operator. For instance if \code{my_lhs = c("y1", "y2")}, calling \code{fixest} with, say \code{feols(.[my_lhs] ~ x1, etc)} is equivalent to using \code{feols(c(y1, y2) ~ x1, etc)}. Beware that this is a special feature unique to the \emph{left-hand-side} of \code{fixest} estimations (the default behavior of the DSB operator is to aggregate with sums, see \code{\link{xpd}}).
Second, you can use a regular expression to grep the left-hand-sides on the fly. When the \code{..("regex")} feature is used naked on the LHS, the variables grepped are inserted into \code{c()}. For example \verb{..("Pe") ~ Sepal.Length, iris} is equivalent to \verb{c(Petal.Length, Petal.Width) ~ Sepal.Length, iris}. Beware that this is a special feature unique to the \emph{left-hand-side} of \code{fixest} estimations (the default behavior of \code{..("regex")} is to aggregate with sums, see \code{\link{xpd}}).
}
\section{Dot square bracket operator in formulas}{
In a formula, the dot square bracket (DSB) operator can: i) create manifold variables at once, or ii) capture values from the current environment and put them verbatim in the formula.
Say you want to include the variables \code{x1} to \code{x3} in your formula. You can use \code{xpd(y ~ x.[1:3])} and you'll get \code{y ~ x1 + x2 + x3}.
To summon values from the environment, simply put the variable in square brackets. For example: \code{for(i in 1:3) xpd(y.[i] ~ x)} will create the formulas \code{y1 ~ x} to \code{y3 ~ x} depending on the value of \code{i}.
You can include a full variable from the environment in the same way: \code{for(y in c("a", "b")) xpd(.[y] ~ x)} will create the two formulas \code{a ~ x} and \code{b ~ x}.
The DSB can even be used within variable names, but then the variable must be nested in character form. For example \code{y ~ .["x.[1:2]_sq"]} will create \code{y ~ x1_sq + x2_sq}. Using the character form is important to avoid a formula parsing error. Double quotes must be used. Note that the character string that is nested will be parsed with the function \code{\link{dsb}}, and thus it will return a vector.
By default, the DSB operator expands vectors into sums. You can add a comma, like in \code{.[, x]}, to expand with commas--the content can then be used within functions. For instance: \code{c(x.[, 1:2])} will create \code{c(x1, x2)} (and \emph{not} \code{c(x1 + x2)}).
In all \code{fixest} estimations, this special parsing is enabled, so you don't need to use \code{xpd}.
One-sided formulas can be expanded with the DSB operator: let \code{x = ~sepal + petal}, then \code{xpd(y ~ .[x])} leads to \code{color ~ sepal + petal}.
You can even use multiple square brackets within a single variable, but then the use of nesting is required. For example, the following \code{xpd(y ~ .[".[letters[1:2]]_.[1:2]"])} will create \code{y ~ a_1 + b_2}. Remember that the nested character string is parsed with \code{\link{dsb}}, which explains this behavior.
When the element to be expanded i) is equal to the empty string or, ii) is of length 0, it is replaced with a neutral element, namely \code{1}. For example, \verb{x = "" ; xpd(y ~ .[x])} leads to \code{y ~ 1}.
}
\examples{
# Poisson estimation
res = feglm(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris, "poisson")
# You could also use fepois
res_pois = fepois(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
# With the fit method:
res_fit = feglm.fit(iris$Sepal.Length, iris[, 2:3], iris$Species, "poisson")
# All results are identical:
etable(res, res_pois, res_fit)
# Note that you have many more examples in feols
#
# Multiple estimations:
#
# 6 estimations
est_mult = fepois(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
# We can display the results for the first lhs:
etable(est_mult[lhs = 1])
# And now the second (access can be made by name)
etable(est_mult[lhs = "Solar.R"])
# Now we focus on the two last right hand sides
# (note that .N can be used to specify the last item)
etable(est_mult[rhs = 2:.N])
# Combining with split
est_split = fepois(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
airquality, split = ~ Month)
# You can display everything at once with the print method
est_split
# Different way of displaying the results with "compact"
summary(est_split, "compact")
# You can still select which sample/LHS/RHS to display
est_split[sample = 1:2, lhs = 1, rhs = 1]
}
\references{
Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\href{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}{}).
For models with multiple fixed-effects:
Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
}
\seealso{
See also \code{\link{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link{etable}} to visualize the results of multiple estimations.
And other estimation methods: \code{\link{feols}}, \code{\link{femlm}}, \code{\link{fenegbin}}, \code{\link{feNmlm}}.
}
\author{
Laurent Berge
}
| /man/feglm.Rd | no_license | cran/fixest | R | false | true | 33,018 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ESTIMATION_FUNS.R
\name{feglm}
\alias{feglm}
\alias{feglm.fit}
\alias{fepois}
\title{Fixed-effects GLM estimations}
\usage{
feglm(
fml,
data,
family = "gaussian",
vcov,
offset,
weights,
subset,
split,
fsplit,
split.keep,
split.drop,
cluster,
se,
ssc,
panel.id,
start = NULL,
etastart = NULL,
mustart = NULL,
fixef,
fixef.rm = "perfect",
fixef.tol = 1e-06,
fixef.iter = 10000,
collin.tol = 1e-10,
glm.iter = 25,
glm.tol = 1e-08,
nthreads = getFixest_nthreads(),
lean = FALSE,
warn = TRUE,
notes = getFixest_notes(),
verbose = 0,
only.coef = FALSE,
combine.quick,
mem.clean = FALSE,
only.env = FALSE,
env,
...
)
feglm.fit(
y,
X,
fixef_df,
family = "gaussian",
vcov,
offset,
split,
fsplit,
split.keep,
split.drop,
cluster,
se,
ssc,
weights,
subset,
start = NULL,
etastart = NULL,
mustart = NULL,
fixef.rm = "perfect",
fixef.tol = 1e-06,
fixef.iter = 10000,
collin.tol = 1e-10,
glm.iter = 25,
glm.tol = 1e-08,
nthreads = getFixest_nthreads(),
lean = FALSE,
warn = TRUE,
notes = getFixest_notes(),
mem.clean = FALSE,
verbose = 0,
only.env = FALSE,
only.coef = FALSE,
env,
...
)
fepois(
fml,
data,
vcov,
offset,
weights,
subset,
split,
fsplit,
split.keep,
split.drop,
cluster,
se,
ssc,
panel.id,
start = NULL,
etastart = NULL,
mustart = NULL,
fixef,
fixef.rm = "perfect",
fixef.tol = 1e-06,
fixef.iter = 10000,
collin.tol = 1e-10,
glm.iter = 25,
glm.tol = 1e-08,
nthreads = getFixest_nthreads(),
lean = FALSE,
warn = TRUE,
notes = getFixest_notes(),
verbose = 0,
combine.quick,
mem.clean = FALSE,
only.env = FALSE,
only.coef = FALSE,
env,
...
)
}
\arguments{
\item{fml}{A formula representing the relation to be estimated. For example: \code{fml = z~x+y}. To include fixed-effects, insert them in this formula using a pipe: e.g. \code{fml = z~x+y|fixef_1+fixef_2}. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. The formula \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)} leads to 6 estimation, see details. Square brackets starting with a dot can be used to call global variables: \code{y.[i] ~ x.[1:2]} will lead to \code{y3 ~ x1 + x2} if \code{i} is equal to 3 in the current environment (see details in \code{\link{xpd}}).}
\item{data}{A data.frame containing the necessary variables to run the model. The variables of the non-linear right hand side of the formula are identified with this \code{data.frame} names. Can also be a matrix.}
\item{family}{Family to be used for the estimation. Defaults to \code{gaussian()}. See \code{\link{family}} for details of family functions.}
\item{vcov}{Versatile argument to specify the VCOV. In general, it is either a character scalar equal to a VCOV type, either a formula of the form: \code{vcov_type ~ variables}. The VCOV types implemented are: "iid", "hetero" (or "HC1"), "cluster", "twoway", "NW" (or "newey_west"), "DK" (or "driscoll_kraay"), and "conley". It also accepts object from \code{\link{vcov_cluster}}, \code{\link[=vcov_hac]{vcov_NW}}, \code{\link[=vcov_hac]{NW}}, \code{\link[=vcov_hac]{vcov_DK}}, \code{\link[=vcov_hac]{DK}}, \code{\link{vcov_conley}} and \code{\link[=vcov_conley]{conley}}. It also accepts covariance matrices computed externally. Finally it accepts functions to compute the covariances. See the \code{vcov} documentation in the \href{https://lrberge.github.io/fixest/articles/fixest_walkthrough.html#the-vcov-argument-1}{vignette}.}
\item{offset}{A formula or a numeric vector. An offset can be added to the estimation. If equal to a formula, it should be of the form (for example) \code{~0.5*x**2}. This offset is linearly added to the elements of the main formula 'fml'.}
\item{weights}{A formula or a numeric vector. Each observation can be weighted, the weights must be greater than 0. If equal to a formula, it should be one-sided: for example \code{~ var_weight}.}
\item{subset}{A vector (logical or numeric) or a one-sided formula. If provided, then the estimation will be performed only on the observations defined by this argument.}
\item{split}{A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. If you also want to include the estimation for the full sample, use the argument \code{fsplit} instead. You can use the special operators \verb{\%keep\%} and \verb{\%drop\%} to select only a subset of values for which to split the sample. E.g. \code{split = ~var \%keep\% c("v1", "v2")} will split the sample only according to the values \code{v1} and \code{v2} of the variable \code{var}; it is equivalent to supplying the argument \code{split.keep = c("v1", "v2")}. By default there is partial matching on each value, you can trigger a regular expression evaluation by adding a \code{'@'} first, as in: \code{~var \%drop\% "@^v[12]"} which will drop values starting with \code{"v1"} or \code{"v2"} (of course you need to know regexes!).}
\item{fsplit}{A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. This argument is the same as split but also includes the full sample as the first estimation. You can use the special operators \verb{\%keep\%} and \verb{\%drop\%} to select only a subset of values for which to split the sample. E.g. \code{split = ~var \%keep\% c("v1", "v2")} will split the sample only according to the values \code{v1} and \code{v2} of the variable \code{var}; it is equivalent to supplying the argument \code{split.keep = c("v1", "v2")}. By default there is partial matching on each value, you can trigger a regular expression evaluation by adding an \code{'@'} first, as in: \code{~var \%drop\% "@^v[12]"} which will drop values starting with \code{"v1"} or \code{"v2"} (of course you need to know regexes!).}
\item{split.keep}{A character vector. Only used when \code{split}, or \code{fsplit}, is supplied. If provided, then the sample will be split only on the values of \code{split.keep}. The values in \code{split.keep} will be partially matched to the values of \code{split}. To enable regular expressions, you need to add an \code{'@'} first. For example \code{split.keep = c("v1", "@other|var")} will keep only the value in \code{split} partially matched by \code{"v1"} or the values containing \code{"other"} or \code{"var"}.}
\item{split.drop}{A character vector. Only used when \code{split}, or \code{fsplit}, is supplied. If provided, then the sample will be split only on the values that are not in \code{split.drop}. The values in \code{split.drop} will be partially matched to the values of \code{split}. To enable regular expressions, you need to add an \code{'@'} first. For example \code{split.drop = c("v1", "@other|var")} will drop only the value in \code{split} partially matched by \code{"v1"} or the values containing \code{"other"} or \code{"var"}.}
\item{cluster}{Tells how to cluster the standard-errors (if clustering is requested). Can be either a list of vectors, a character vector of variable names, a formula or an integer vector. Assume we want to perform 2-way clustering over \code{var1} and \code{var2} contained in the data.frame \code{base} used for the estimation. All the following \code{cluster} arguments are valid and do the same thing: \code{cluster = base[, c("var1", "var2")]}, \code{cluster = c("var1", "var2")}, \code{cluster = ~var1+var2}. If the two variables were used as fixed-effects in the estimation, you can leave it blank with \code{vcov = "twoway"} (assuming \code{var1} [resp. \code{var2}] was the 1st [resp. 2nd] fixed-effect). You can interact two variables using \code{^} with the following syntax: \code{cluster = ~var1^var2} or \code{cluster = "var1^var2"}.}
\item{se}{Character scalar. Which kind of standard error should be computed: \dQuote{standard}, \dQuote{hetero}, \dQuote{cluster}, \dQuote{twoway}, \dQuote{threeway} or \dQuote{fourway}? By default if there are clusters in the estimation: \code{se = "cluster"}, otherwise \code{se = "iid"}. Note that this argument is deprecated, you should use \code{vcov} instead.}
\item{ssc}{An object of class \code{ssc.type} obtained with the function \code{\link{ssc}}. Represents how the degree of freedom correction should be done.You must use the function \code{\link{ssc}} for this argument. The arguments and defaults of the function \code{\link{ssc}} are: \code{adj = TRUE}, \code{fixef.K="nested"}, \code{cluster.adj = TRUE}, \code{cluster.df = "min"}, \code{t.df = "min"}, \verb{fixef.force_exact=FALSE)}. See the help of the function \code{\link{ssc}} for details.}
\item{panel.id}{The panel identifiers. Can either be: i) a one sided formula (e.g. \code{panel.id = ~id+time}), ii) a character vector of length 2 (e.g. \code{panel.id=c('id', 'time')}, or iii) a character scalar of two variables separated by a comma (e.g. \code{panel.id='id,time'}). Note that you can combine variables with \code{^} only inside formulas (see the dedicated section in \code{\link{feols}}).}
\item{start}{Starting values for the coefficients. Can be: i) a numeric of length 1 (e.g. \code{start = 0}), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients). Default is missing.}
\item{etastart}{Numeric vector of the same length as the data. Starting values for the linear predictor. Default is missing.}
\item{mustart}{Numeric vector of the same length as the data. Starting values for the vector of means. Default is missing.}
\item{fixef}{Character vector. The names of variables to be used as fixed-effects. These variables should contain the identifier of each observation (e.g., think of it as a panel identifier). Note that the recommended way to include fixed-effects is to insert them directly in the formula.}
\item{fixef.rm}{Can be equal to "perfect" (default), "singleton", "both" or "none". Controls which observations are to be removed. If "perfect", then observations having a fixed-effect with perfect fit (e.g. only 0 outcomes in Poisson estimations) will be removed. If "singleton", all observations for which a fixed-effect appears only once will be removed. The meaning of "both" and "none" is direct.}
\item{fixef.tol}{Precision used to obtain the fixed-effects. Defaults to \code{1e-6}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations.}
\item{fixef.iter}{Maximum number of iterations in fixed-effects algorithm (only in use for 2+ fixed-effects). Default is 10000.}
\item{collin.tol}{Numeric scalar, default is \code{1e-10}. Threshold deciding when variables should be considered collinear and subsequently removed from the estimation. Higher values means more variables will be removed (if there is presence of collinearity). One signal of presence of collinearity is t-stats that are extremely low (for instance when t-stats < 1e-3).}
\item{glm.iter}{Number of iterations of the glm algorithm. Default is 25.}
\item{glm.tol}{Tolerance level for the glm algorithm. Default is \code{1e-8}.}
\item{nthreads}{The number of threads. Can be: a) an integer lower than, or equal to, the maximum number of threads; b) 0: meaning all available threads will be used; c) a number strictly between 0 and 1 which represents the fraction of all threads to use. The default is to use 50\% of all threads. You can set permanently the number of threads used within this package using the function \code{\link{setFixest_nthreads}}.}
\item{lean}{Logical, default is \code{FALSE}. If \code{TRUE} then all large objects are removed from the returned result: this will save memory but will block the possibility to use many methods. It is recommended to use the arguments \code{se} or \code{cluster} to obtain the appropriate standard-errors at estimation time, since obtaining different SEs won't be possible afterwards.}
\item{warn}{Logical, default is \code{TRUE}. Whether warnings should be displayed (concerns warnings relating to convergence state).}
\item{notes}{Logical. By default, three notes are displayed: when NAs are removed, when some fixed-effects are removed because of only 0 (or 0/1) outcomes, or when a variable is dropped because of collinearity. To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.}
\item{verbose}{Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algoritmh (the first number is the left-hand-side, the other numbers are the right-hand-side variables). It can also detail the step-halving algorithm.}
\item{only.coef}{Logical, default is \code{FALSE}. If \code{TRUE}, then only the estimated coefficients are returned. Note that the length of the vector returned is always the length of the number of coefficients to be estimated: this means that the variables found to be collinear are returned with an NA value.}
\item{combine.quick}{Logical. When you combine different variables to transform them into a single fixed-effects you can do e.g. \code{y ~ x | paste(var1, var2)}. The algorithm provides a shorthand to do the same operation: \code{y ~ x | var1^var2}. Because pasting variables is a costly operation, the internal algorithm may use a numerical trick to hasten the process. The cost of doing so is that you lose the labels. If you are interested in getting the value of the fixed-effects coefficients after the estimation, you should use \code{combine.quick = FALSE}. By default it is equal to \code{FALSE} if the number of observations is lower than 50,000, and to \code{TRUE} otherwise.}
\item{mem.clean}{Logical, default is \code{FALSE}. Only to be used if the data set is large compared to the available RAM. If \code{TRUE} then intermediary objects are removed as much as possible and \code{\link{gc}} is run before each substantial C++ section in the internal code to avoid memory issues.}
\item{only.env}{(Advanced users.) Logical, default is \code{FALSE}. If \code{TRUE}, then only the environment used to make the estimation is returned.}
\item{env}{(Advanced users.) A \code{fixest} environment created by a \code{fixest} estimation with \code{only.env = TRUE}. Default is missing. If provided, the data from this environment will be used to perform the estimation.}
\item{...}{Not currently used.}
\item{y}{Numeric vector/matrix/data.frame of the dependent variable(s). Multiple dependent variables will return a \code{fixest_multi} object.}
\item{X}{Numeric matrix of the regressors.}
\item{fixef_df}{Matrix/data.frame of the fixed-effects.}
}
\value{
A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link{vcov.fixest}}, \code{\link{resid.fixest}}, etc) or functions (like for instance \code{\link{fitstat}} to access any fit statistic).
\item{nobs}{The number of observations.}
\item{fml}{The linear formula of the call.}
\item{call}{The call of the function.}
\item{method}{The method used to estimate the model.}
\item{family}{The family used to estimate the model.}
\item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects.}
\item{nparams}{The number of parameters of the model.}
\item{fixef_vars}{The names of each fixed-effect dimension.}
\item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
\item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
\item{y}{(When relevant.) The dependent variable (used to compute the within-R2 when fixed-effects are present).}
\item{convStatus}{Logical, convergence status of the IRWLS algorithm.}
\item{irls_weights}{The weights of the last iteration of the IRWLS algorithm.}
\item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
\item{fixef_removed}{(When relevant.) In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
\item{coefficients}{The named vector of estimated coefficients.}
\item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
\item{loglik}{The loglikelihood.}
\item{deviance}{Deviance of the fitted model.}
\item{iterations}{Number of iterations of the algorithm.}
\item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
\item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
\item{pseudo_r2}{The adjusted pseudo R2.}
\item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
\item{linear.predictors}{The linear predictors.}
\item{residuals}{The residuals (y minus the fitted values).}
\item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
\item{hessian}{The Hessian of the parameters.}
\item{cov.iid}{The variance-covariance matrix of the parameters.}
\item{se}{The standard-error of the parameters.}
\item{scores}{The matrix of the scores (first derivative for each observation).}
\item{residuals}{The difference between the dependent variable and the expected predictor.}
\item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
\item{offset}{(When relevant.) The offset formula.}
\item{weights}{(When relevant.) The weights formula.}
\item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
\item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
}
\description{
Estimates GLM models with any number of fixed-effects.
}
\details{
The core of the GLM are the weighted OLS estimations. These estimations are performed with \code{\link{feols}}. The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
}
\section{Combining the fixed-effects}{
You can combine two variables to make it a new fixed-effect using \code{^}. The syntax is as follows: \code{fe_1^fe_2}. Here you created a new variable which is the combination of the two variables fe_1 and fe_2. This is identical to doing \code{paste0(fe_1, "_", fe_2)} but more convenient.
Note that pasting is a costly operation, especially for large data sets. Thus, the internal algorithm uses a numerical trick which is fast, but the drawback is that the identity of each observation is lost (i.e. they are now equal to a meaningless number instead of being equal to \code{paste0(fe_1, "_", fe_2)}). These \dQuote{identities} are useful only if you're interested in the value of the fixed-effects (that you can extract with \code{\link{fixef.fixest}}). If you're only interested in coefficients of the variables, it doesn't matter. Anyway, you can use \code{combine.quick = FALSE} to tell the internal algorithm to use \code{paste} instead of the numerical trick. By default, the numerical trick is performed only for large data sets.
}
\section{Varying slopes}{
You can add variables with varying slopes in the fixed-effect part of the formula. The syntax is as follows: \code{fixef_var[var1, var2]}. Here the variables var1 and var2 will be with varying slopes (one slope per value in fixef_var) and the fixed-effect fixef_var will also be added.
To add only the variables with varying slopes and not the fixed-effect, use double square brackets: \code{fixef_var[[var1, var2]]}.
In other words:
\itemize{
\item \code{fixef_var[var1, var2]} is equivalent to \code{fixef_var + fixef_var[[var1]] + fixef_var[[var2]]}
\item \code{fixef_var[[var1, var2]]} is equivalent to \code{fixef_var[[var1]] + fixef_var[[var2]]}
}
In general, for convergence reasons, it is recommended to always add the fixed-effect and avoid using only the variable with varying slope (i.e. use single square brackets).
}
\section{Lagging variables}{
To use leads/lags of variables in the estimation, you can: i) either provide the argument \code{panel.id}, ii) either set your data set as a panel with the function \code{\link{panel}}, \code{\link[=l]{f}} and \code{\link[=l]{d}}.
You can provide several leads/lags/differences at once: e.g. if your formula is equal to \code{f(y) ~ l(x, -1:1)}, it means that the dependent variable is equal to the lead of \code{y}, and you will have as explanatory variables the lead of \code{x1}, \code{x1} and the lag of \code{x1}. See the examples in function \code{\link{l}} for more details.
}
\section{Interactions}{
You can interact a numeric variable with a "factor-like" variable by using \code{i(factor_var, continuous_var, ref)}, where \code{continuous_var} will be interacted with each value of \code{factor_var} and the argument \code{ref} is a value of \code{factor_var} taken as a reference (optional).
Using this specific way to create interactions leads to a different display of the interacted values in \code{\link{etable}}. See examples.
It is important to note that \emph{if you do not care about the standard-errors of the interactions}, then you can add interactions in the fixed-effects part of the formula, it will be incomparably faster (using the syntax \code{factor_var[continuous_var]}, as explained in the section \dQuote{Varying slopes}).
The function \code{\link{i}} has in fact more arguments, please see details in its associated help page.
}
\section{On standard-errors}{
Standard-errors can be computed in different ways, you can use the arguments \code{se} and \code{ssc} in \code{\link{summary.fixest}} to define how to compute them. By default, in the presence of fixed-effects, standard-errors are automatically clustered.
The following vignette: \href{https://lrberge.github.io/fixest/articles/standard_errors.html}{On standard-errors} describes in details how the standard-errors are computed in \code{fixest} and how you can replicate standard-errors from other software.
You can use the functions \code{\link{setFixest_vcov}} and \code{\link[=ssc]{setFixest_ssc}} to permanently set the way the standard-errors are computed.
}
\section{Multiple estimations}{
Multiple estimations can be performed at once, they just have to be specified in the formula. Multiple estimations yield a \code{fixest_multi} object which is \sQuote{kind of} a list of all the results but includes specific methods to access the results in a handy way. Please have a look at the dedicated vignette: \href{https://lrberge.github.io/fixest/articles/multiple_estimations.html}{Multiple estimations}.
To include multiple dependent variables, wrap them in \code{c()} (\code{list()} also works). For instance \code{fml = c(y1, y2) ~ x1} would estimate the model \code{fml = y1 ~ x1} and then the model \code{fml = y2 ~ x1}.
To include multiple independent variables, you need to use the stepwise functions. There are 4 stepwise functions: \code{sw}, \code{sw0}, \code{csw}, \code{csw0}, and \code{mvsw}. Of course \code{sw} stands for stepwise, and \code{csw} for cumulative stepwise. Finally \code{mvsw} is a bit special, it stands for multiverse stepwise. Let's explain that.
Assume you have the following formula: \code{fml = y ~ x1 + sw(x2, x3)}. The stepwise function \code{sw} will estimate the following two models: \code{y ~ x1 + x2} and \code{y ~ x1 + x3}. That is, each element in \code{sw()} is sequentially, and separately, added to the formula. Would have you used \code{sw0} in lieu of \code{sw}, then the model \code{y ~ x1} would also have been estimated. The \code{0} in the name means that the model without any stepwise element also needs to be estimated.
The prefix \code{c} means cumulative: each stepwise element is added to the next. That is, \code{fml = y ~ x1 + csw(x2, x3)} would lead to the following models \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}. The \code{0} has the same meaning and would also lead to the model without the stepwise elements to be estimated: in other words, \code{fml = y ~ x1 + csw0(x2, x3)} leads to the following three models: \code{y ~ x1}, \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}.
Finally \code{mvsw} will add, in a stepwise fashion all possible combinations of the variables in its arguments. For example \code{mvsw(x1, x2, x3)} is equivalent to \code{sw0(x1, x2, x3, x1 + x2, x1 + x3, x2 + x3, x1 + x2 + x3)}. The number of models to estimate grows at a factorial rate: so be cautious!
Multiple independent variables can be combined with multiple dependent variables, as in \code{fml = c(y1, y2) ~ cw(x1, x2, x3)} which would lead to 6 estimations. Multiple estimations can also be combined to split samples (with the arguments \code{split}, \code{fsplit}).
You can also add fixed-effects in a stepwise fashion. Note that you cannot perform stepwise estimations on the IV part of the formula (\code{feols} only).
If NAs are present in the sample, to avoid too many messages, only NA removal concerning the variables common to all estimations is reported.
A note on performance. The feature of multiple estimations has been highly optimized for \code{feols}, in particular in the presence of fixed-effects. It is faster to estimate multiple models using the formula rather than with a loop. For non-\code{feols} models using the formula is roughly similar to using a loop performance-wise.
}
\section{Argument sliding}{
When the data set has been set up globally using \code{\link{setFixest_estimation}}\code{(data = data_set)}, the argument \code{vcov} can be used implicitly. This means that calls such as \code{feols(y ~ x, "HC1")}, or \code{feols(y ~ x, ~id)}, are valid: i) the data is automatically deduced from the global settings, and ii) the \code{vcov} is deduced to be the second argument.
}
\section{Piping}{
Although the argument 'data' is placed in second position, the data can be piped to the estimation functions. For example, with R >= 4.1, \code{mtcars |> feols(mpg ~ cyl)} works as \code{feols(mpg ~ cyl, mtcars)}.
}
\section{Tricks to estimate multiple LHS}{
To use multiple dependent variables in \code{fixest} estimations, you need to include them in a vector: like in \code{c(y1, y2, y3)}.
First, if names are stored in a vector, they can readily be inserted in a formula to perform multiple estimations using the dot square bracket operator. For instance if \code{my_lhs = c("y1", "y2")}, calling \code{fixest} with, say \code{feols(.[my_lhs] ~ x1, etc)} is equivalent to using \code{feols(c(y1, y2) ~ x1, etc)}. Beware that this is a special feature unique to the \emph{left-hand-side} of \code{fixest} estimations (the default behavior of the DSB operator is to aggregate with sums, see \code{\link{xpd}}).
Second, you can use a regular expression to grep the left-hand-sides on the fly. When the \code{..("regex")} feature is used naked on the LHS, the variables grepped are inserted into \code{c()}. For example \verb{..("Pe") ~ Sepal.Length, iris} is equivalent to \verb{c(Petal.Length, Petal.Width) ~ Sepal.Length, iris}. Beware that this is a special feature unique to the \emph{left-hand-side} of \code{fixest} estimations (the default behavior of \code{..("regex")} is to aggregate with sums, see \code{\link{xpd}}).
}
\section{Dot square bracket operator in formulas}{
In a formula, the dot square bracket (DSB) operator can: i) create manifold variables at once, or ii) capture values from the current environment and put them verbatim in the formula.
Say you want to include the variables \code{x1} to \code{x3} in your formula. You can use \code{xpd(y ~ x.[1:3])} and you'll get \code{y ~ x1 + x2 + x3}.
To summon values from the environment, simply put the variable in square brackets. For example: \code{for(i in 1:3) xpd(y.[i] ~ x)} will create the formulas \code{y1 ~ x} to \code{y3 ~ x} depending on the value of \code{i}.
You can include a full variable from the environment in the same way: \code{for(y in c("a", "b")) xpd(.[y] ~ x)} will create the two formulas \code{a ~ x} and \code{b ~ x}.
The DSB can even be used within variable names, but then the variable must be nested in character form. For example \code{y ~ .["x.[1:2]_sq"]} will create \code{y ~ x1_sq + x2_sq}. Using the character form is important to avoid a formula parsing error. Double quotes must be used. Note that the character string that is nested will be parsed with the function \code{\link{dsb}}, and thus it will return a vector.
By default, the DSB operator expands vectors into sums. You can add a comma, like in \code{.[, x]}, to expand with commas--the content can then be used within functions. For instance: \code{c(x.[, 1:2])} will create \code{c(x1, x2)} (and \emph{not} \code{c(x1 + x2)}).
In all \code{fixest} estimations, this special parsing is enabled, so you don't need to use \code{xpd}.
One-sided formulas can be expanded with the DSB operator: let \code{x = ~sepal + petal}, then \code{xpd(y ~ .[x])} leads to \code{color ~ sepal + petal}.
You can even use multiple square brackets within a single variable, but then the use of nesting is required. For example, the following \code{xpd(y ~ .[".[letters[1:2]]_.[1:2]"])} will create \code{y ~ a_1 + b_2}. Remember that the nested character string is parsed with \code{\link{dsb}}, which explains this behavior.
When the element to be expanded i) is equal to the empty string or, ii) is of length 0, it is replaced with a neutral element, namely \code{1}. For example, \verb{x = "" ; xpd(y ~ .[x])} leads to \code{y ~ 1}.
}
\examples{
# Poisson estimation
res = feglm(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris, "poisson")
# You could also use fepois
res_pois = fepois(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
# With the fit method:
res_fit = feglm.fit(iris$Sepal.Length, iris[, 2:3], iris$Species, "poisson")
# All results are identical:
etable(res, res_pois, res_fit)
# Note that you have many more examples in feols
#
# Multiple estimations:
#
# 6 estimations
est_mult = fepois(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
# We can display the results for the first lhs:
etable(est_mult[lhs = 1])
# And now the second (access can be made by name)
etable(est_mult[lhs = "Solar.R"])
# Now we focus on the two last right hand sides
# (note that .N can be used to specify the last item)
etable(est_mult[rhs = 2:.N])
# Combining with split
est_split = fepois(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
airquality, split = ~ Month)
# You can display everything at once with the print method
est_split
# Different way of displaying the results with "compact"
summary(est_split, "compact")
# You can still select which sample/LHS/RHS to display
est_split[sample = 1:2, lhs = 1, rhs = 1]
}
\references{
Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\href{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}{}).
For models with multiple fixed-effects:
Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
}
\seealso{
See also \code{\link{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link{etable}} to visualize the results of multiple estimations.
And other estimation methods: \code{\link{feols}}, \code{\link{femlm}}, \code{\link{fenegbin}}, \code{\link{feNmlm}}.
}
\author{
Laurent Berge
}
|
library(rvest)
url <- read_html('http://www.barttorvik.com/?year=2019&sort=&lastx=0&hteam=&conlimit=All&state=All&begin=20181101&end=20190117&top=0&quad=4&venue=All&type=All&mingames=0#')
torvik <- html_table(url)[[1]]
head(torvik)
str(torvik)
# Create name conversions with games data
library(readr)
# **Do this part once and save dictionary file **
# all_games <- read_csv(file = '~/Desktop/data_projects/college_basketball_predictions/data/all_games2019.csv')
# head(all_games)
#
# # Check number of matching names
# table(unique(all_games$Home) %in% unique(torvik$Team))
#
# # 52 team names have to be changed to match games data. Let's get that list
# torvik_names <- sort(torvik[,2][!torvik[,2] %in% all_games$Home])
# games_names <- sort(unique(all_games$Home[!all_games$Home %in% torvik[,2]]))
# match_names <- sort(torvik$Team[torvik$Team %in% all_games$Home])
#
#
# write_csv(as.data.frame(torvik_names), path = '~/Desktop/data_projects/college_basketball_predictions/data/torvik_names.csv')
# write_csv(as.data.frame(games_names), path = '~/Desktop/data_projects/college_basketball_predictions/data/games_names.csv')
# write_csv(as.data.frame(match_names), path = '~/Desktop/data_projects/college_basketball_predictions/data/match_names.csv')
# Manually match up team names and make a dictionary in excel
# *******
clean_torvik <- function(torvik){
colnames(torvik) <- c('Rk','Team','Conf','G','Rec','AdjE_off','AdjE_def',
'BARTHAG','EFF_FG_off','EFF_FG_def','TO_off','TO_def',
'Reb_off','Reb_def','FTrate_off','FTrate_def',
'2pt_off','2pt_def','3pt_off','3pt_def','AdjTempo',
'WAB')
paste(colnames(torvik), torvik[1,])
torvik <- torvik[2:nrow(torvik), ]
library(dplyr)
torvik <- filter(torvik, Rk != 'Rk')
# non numeric cols 2,3,5
torvik[,c(1,4,6:ncol(torvik))] <- lapply(torvik[,c(1,4,6:ncol(torvik))], as.numeric)
# Round off percentages to one decimal
torvik[,c(1,4,6,7,9:ncol(torvik))] <- lapply(torvik[,c(1,4,6,7,9:ncol(torvik))], round, digits=1)
torvik[,8] <- round(torvik[,8], 4)
# Issue with Torvik formatting: ranks are included in all entries. They are added as negligible decimals to values that have decimal places (50.5 rank 250 becomes 50.5250). For integer values they skew values (50 rank 250 becomes 50250). Need to create a function that will find and fix these values.
fix_values <- function(df_col){
for(i in 1:length(df_col)) {
if(df_col[i] > 1e5) {
df[i, colnum] <- df_col[i]/1e4
}
if(df_col[i] > 1e4) {
df_col[i] <- df_col[i]/1e3
}
if(df_col[i] > 1e3) {
df_col[i] <- df_col[i]/1e2
}
if(df_col[i] > 1e2) {
df_col[i] <- df_col[i]/1e1
}
df_col <- round(df_col, 1)
}
return(df_col)
}
# Fix percentage values using function
torvik[,9:21] <- lapply(torvik[,9:21], fix_values)
# Drop wins above bubble, add date column
torvik <- torvik[,-22]
# Convert abbreviation of St. to State (matches better with games data)
torvik$Team <- gsub(pattern = 'St\\.', replacement = 'State', x = torvik$Team)
# Convert team names to match with games data
name_dict <- readr::read_csv('~/Desktop/data_projects/college_basketball_predictions/data/cbb_name_dictionary.csv')
torvik <- dplyr::left_join(torvik, name_dict, by = c('Team' = 'torvik_names')) %>%
select(-Team) %>%
select(c(1,21,2:20)) %>%
rename(Team = games_names)
return(torvik)
}
torvik <- clean_torvik(torvik)
torvik$date <- Sys.Date()
head(torvik)
str(torvik)
library(magrittr)
# Scrape and combine torvik ratings from multiple days
test_multiple <- data.frame()
for(month in c('11','12','01')){
if(month == '11'){
days <- 10:30
year <- 2018
}
if(month == '12'){
days <- 1:31
year <- 2018
}
if(month == '01'){
days <- 1:17
year <- 2019
}
for(day in days){
if(day < 10){
url <- read_html(paste0('http://www.barttorvik.com/?year=2019&sort=&lastx=0&hteam=&conlimit=All&state=All&begin=20181101&end=',year,month,'0',day,'&top=0&quad=4&venue=All&type=All&mingames=0#'))
} else{
url <- read_html(paste0('http://www.barttorvik.com/?year=2019&sort=&lastx=0&hteam=&conlimit=All&state=All&begin=20181101&end=',year,month,day,'&top=0&quad=4&venue=All&type=All&mingames=0#'))
}
torvik <- html_table(url)[[1]] %>%
clean_torvik()
torvik$Date <- paste(year, month, day, sep = '-')
test_multiple <- rbind(test_multiple, torvik)
print(paste0(month,'-' ,day, ' done'))
}
}
# Save torvik data
write_csv(test_multiple, path = '~/Desktop/data_projects/college_basketball_predictions/data/torvik_all_2019.csv')
mich <- filter(test_multiple, Team == 'Villanova')
plot(mich$BARTHAG)
| /src/scrape_and_clean_torvik.R | no_license | rberger997/college_basketball_predictions | R | false | false | 4,738 | r |
library(rvest)
url <- read_html('http://www.barttorvik.com/?year=2019&sort=&lastx=0&hteam=&conlimit=All&state=All&begin=20181101&end=20190117&top=0&quad=4&venue=All&type=All&mingames=0#')
torvik <- html_table(url)[[1]]
head(torvik)
str(torvik)
# Create name conversions with games data
library(readr)
# **Do this part once and save dictionary file **
# all_games <- read_csv(file = '~/Desktop/data_projects/college_basketball_predictions/data/all_games2019.csv')
# head(all_games)
#
# # Check number of matching names
# table(unique(all_games$Home) %in% unique(torvik$Team))
#
# # 52 team names have to be changed to match games data. Let's get that list
# torvik_names <- sort(torvik[,2][!torvik[,2] %in% all_games$Home])
# games_names <- sort(unique(all_games$Home[!all_games$Home %in% torvik[,2]]))
# match_names <- sort(torvik$Team[torvik$Team %in% all_games$Home])
#
#
# write_csv(as.data.frame(torvik_names), path = '~/Desktop/data_projects/college_basketball_predictions/data/torvik_names.csv')
# write_csv(as.data.frame(games_names), path = '~/Desktop/data_projects/college_basketball_predictions/data/games_names.csv')
# write_csv(as.data.frame(match_names), path = '~/Desktop/data_projects/college_basketball_predictions/data/match_names.csv')
# Manually match up team names and make a dictionary in excel
# *******
clean_torvik <- function(torvik){
colnames(torvik) <- c('Rk','Team','Conf','G','Rec','AdjE_off','AdjE_def',
'BARTHAG','EFF_FG_off','EFF_FG_def','TO_off','TO_def',
'Reb_off','Reb_def','FTrate_off','FTrate_def',
'2pt_off','2pt_def','3pt_off','3pt_def','AdjTempo',
'WAB')
paste(colnames(torvik), torvik[1,])
torvik <- torvik[2:nrow(torvik), ]
library(dplyr)
torvik <- filter(torvik, Rk != 'Rk')
# non numeric cols 2,3,5
torvik[,c(1,4,6:ncol(torvik))] <- lapply(torvik[,c(1,4,6:ncol(torvik))], as.numeric)
# Round off percentages to one decimal
torvik[,c(1,4,6,7,9:ncol(torvik))] <- lapply(torvik[,c(1,4,6,7,9:ncol(torvik))], round, digits=1)
torvik[,8] <- round(torvik[,8], 4)
# Issue with Torvik formatting: ranks are included in all entries. They are added as negligible decimals to values that have decimal places (50.5 rank 250 becomes 50.5250). For integer values they skew values (50 rank 250 becomes 50250). Need to create a function that will find and fix these values.
fix_values <- function(df_col){
for(i in 1:length(df_col)) {
if(df_col[i] > 1e5) {
df[i, colnum] <- df_col[i]/1e4
}
if(df_col[i] > 1e4) {
df_col[i] <- df_col[i]/1e3
}
if(df_col[i] > 1e3) {
df_col[i] <- df_col[i]/1e2
}
if(df_col[i] > 1e2) {
df_col[i] <- df_col[i]/1e1
}
df_col <- round(df_col, 1)
}
return(df_col)
}
# Fix percentage values using function
torvik[,9:21] <- lapply(torvik[,9:21], fix_values)
# Drop wins above bubble, add date column
torvik <- torvik[,-22]
# Convert abbreviation of St. to State (matches better with games data)
torvik$Team <- gsub(pattern = 'St\\.', replacement = 'State', x = torvik$Team)
# Convert team names to match with games data
name_dict <- readr::read_csv('~/Desktop/data_projects/college_basketball_predictions/data/cbb_name_dictionary.csv')
torvik <- dplyr::left_join(torvik, name_dict, by = c('Team' = 'torvik_names')) %>%
select(-Team) %>%
select(c(1,21,2:20)) %>%
rename(Team = games_names)
return(torvik)
}
torvik <- clean_torvik(torvik)
torvik$date <- Sys.Date()
head(torvik)
str(torvik)
library(magrittr)
# Scrape and combine torvik ratings from multiple days
test_multiple <- data.frame()
for(month in c('11','12','01')){
if(month == '11'){
days <- 10:30
year <- 2018
}
if(month == '12'){
days <- 1:31
year <- 2018
}
if(month == '01'){
days <- 1:17
year <- 2019
}
for(day in days){
if(day < 10){
url <- read_html(paste0('http://www.barttorvik.com/?year=2019&sort=&lastx=0&hteam=&conlimit=All&state=All&begin=20181101&end=',year,month,'0',day,'&top=0&quad=4&venue=All&type=All&mingames=0#'))
} else{
url <- read_html(paste0('http://www.barttorvik.com/?year=2019&sort=&lastx=0&hteam=&conlimit=All&state=All&begin=20181101&end=',year,month,day,'&top=0&quad=4&venue=All&type=All&mingames=0#'))
}
torvik <- html_table(url)[[1]] %>%
clean_torvik()
torvik$Date <- paste(year, month, day, sep = '-')
test_multiple <- rbind(test_multiple, torvik)
print(paste0(month,'-' ,day, ' done'))
}
}
# Save torvik data
write_csv(test_multiple, path = '~/Desktop/data_projects/college_basketball_predictions/data/torvik_all_2019.csv')
mich <- filter(test_multiple, Team == 'Villanova')
plot(mich$BARTHAG)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/secretsmanager_operations.R
\name{secretsmanager_describe_secret}
\alias{secretsmanager_describe_secret}
\title{Retrieves the details of a secret}
\usage{
secretsmanager_describe_secret(SecretId)
}
\arguments{
\item{SecretId}{[required] The identifier of the secret whose details you want to retrieve. You can
specify either the Amazon Resource Name (ARN) or the friendly name of
the secret.
If you specify an ARN, we generally recommend that you specify a
complete ARN. You can specify a partial ARN too---for example, if you
don't include the final hyphen and six random characters that Secrets
Manager adds at the end of the ARN when you created the secret. A
partial ARN match can work as long as it uniquely matches only one
secret. However, if your secret has a name that ends in a hyphen
followed by six characters (before Secrets Manager adds the hyphen and
six characters to the ARN) and you try to use that as a partial ARN,
then those characters cause Secrets Manager to assume that you're
specifying a complete ARN. This confusion can cause unexpected results.
To avoid this situation, we recommend that you don't create secret names
that end with a hyphen followed by six characters.}
}
\description{
Retrieves the details of a secret. It does not include the encrypted
fields. Only those fields that are populated with a value are returned
in the response.
}
\details{
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:DescribeSecret
}
\strong{Related operations}
\itemize{
\item To create a secret, use CreateSecret.
\item To modify a secret, use UpdateSecret.
\item To retrieve the encrypted secret information in a version of the
secret, use GetSecretValue.
\item To list all of the secrets in the AWS account, use ListSecrets.
}
}
\section{Request syntax}{
\preformatted{svc$describe_secret(
SecretId = "string"
)
}
}
\examples{
\dontrun{
# The following example shows how to get the details about a secret.
svc$describe_secret(
SecretId = "MyTestDatabaseSecret"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/secretsmanager_describe_secret.Rd | permissive | johnnytommy/paws | R | false | true | 2,165 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/secretsmanager_operations.R
\name{secretsmanager_describe_secret}
\alias{secretsmanager_describe_secret}
\title{Retrieves the details of a secret}
\usage{
secretsmanager_describe_secret(SecretId)
}
\arguments{
\item{SecretId}{[required] The identifier of the secret whose details you want to retrieve. You can
specify either the Amazon Resource Name (ARN) or the friendly name of
the secret.
If you specify an ARN, we generally recommend that you specify a
complete ARN. You can specify a partial ARN too---for example, if you
don't include the final hyphen and six random characters that Secrets
Manager adds at the end of the ARN when you created the secret. A
partial ARN match can work as long as it uniquely matches only one
secret. However, if your secret has a name that ends in a hyphen
followed by six characters (before Secrets Manager adds the hyphen and
six characters to the ARN) and you try to use that as a partial ARN,
then those characters cause Secrets Manager to assume that you're
specifying a complete ARN. This confusion can cause unexpected results.
To avoid this situation, we recommend that you don't create secret names
that end with a hyphen followed by six characters.}
}
\description{
Retrieves the details of a secret. It does not include the encrypted
fields. Only those fields that are populated with a value are returned
in the response.
}
\details{
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:DescribeSecret
}
\strong{Related operations}
\itemize{
\item To create a secret, use CreateSecret.
\item To modify a secret, use UpdateSecret.
\item To retrieve the encrypted secret information in a version of the
secret, use GetSecretValue.
\item To list all of the secrets in the AWS account, use ListSecrets.
}
}
\section{Request syntax}{
\preformatted{svc$describe_secret(
SecretId = "string"
)
}
}
\examples{
\dontrun{
# The following example shows how to get the details about a secret.
svc$describe_secret(
SecretId = "MyTestDatabaseSecret"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sccanBasedDecoder.R
\name{sccanBasedDecoder}
\alias{sccanBasedDecoder}
\title{Simple eigenSentence estimation function.}
\usage{
sccanBasedDecoder(eventdata, designmat, boldFeatureMatrix, sentenceSpace,
mysparse = c(-0.1, -0.1), nvecs = 5, its = 1, smooth = 0,
cthresh = 0, mask = NA, strategy = NA, doEanat = F, joinEanat = F,
outputfileprefix = "sccanBasedDecoder", interleave = FALSE,
sentenceTransformation = "none", trainset = NA, locwordlist = "")
}
\arguments{
\item{eventdata}{output from annotateEvents}
\item{designmat}{sentences part of the assembled design matrix}
\item{boldFeatureMatrix}{a feature matrix for bold data, currently
spatiotemporal}
\item{sentenceSpace}{the sentence feature space}
\item{mask}{a 4D mask}
\item{doEanat}{also run a sparse PCA on the data}
\item{otherparameters}{see sparseDecom2 documentation}
}
\value{
a data frame with annotation and ground truth vs prediction
annotated ...
}
\description{
Applies a function to a matrix representation of a sentence to get an
eigensentence map.
}
\examples{
\dontrun{
ccaresults<-sccanBasedDecoder( eventdata, dmats, ccafeatspace , sentspace )
}
}
\author{
Avants BB
}
| /man/sccanBasedDecoder.Rd | no_license | stnava/RKRNS | R | false | true | 1,245 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sccanBasedDecoder.R
\name{sccanBasedDecoder}
\alias{sccanBasedDecoder}
\title{Simple eigenSentence estimation function.}
\usage{
sccanBasedDecoder(eventdata, designmat, boldFeatureMatrix, sentenceSpace,
mysparse = c(-0.1, -0.1), nvecs = 5, its = 1, smooth = 0,
cthresh = 0, mask = NA, strategy = NA, doEanat = F, joinEanat = F,
outputfileprefix = "sccanBasedDecoder", interleave = FALSE,
sentenceTransformation = "none", trainset = NA, locwordlist = "")
}
\arguments{
\item{eventdata}{output from annotateEvents}
\item{designmat}{sentences part of the assembled design matrix}
\item{boldFeatureMatrix}{a feature matrix for bold data, currently
spatiotemporal}
\item{sentenceSpace}{the sentence feature space}
\item{mask}{a 4D mask}
\item{doEanat}{also run a sparse PCA on the data}
\item{otherparameters}{see sparseDecom2 documentation}
}
\value{
a data frame with annotation and ground truth vs prediction
annotated ...
}
\description{
Applies a function to a matrix representation of a sentence to get an
eigensentence map.
}
\examples{
\dontrun{
ccaresults<-sccanBasedDecoder( eventdata, dmats, ccafeatspace , sentspace )
}
}
\author{
Avants BB
}
|
library(VecStatGraphs2D)
### Name: VonMisesParameter
### Title: Calculation of von Mises concentration parameter of the azimuths
### Aliases: VonMisesParameter
### Keywords: azimuth
### ** Examples
FileName<-system.file("data/RectangularData.txt", package="VecStatGraphs2D")
dat<-LoadData(FileName, Type=2)
azimuths<-dat[,2]
VonMisesParameter(azimuths)
| /data/genthat_extracted_code/VecStatGraphs2D/examples/VonMisesParameter.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 372 | r | library(VecStatGraphs2D)
### Name: VonMisesParameter
### Title: Calculation of von Mises concentration parameter of the azimuths
### Aliases: VonMisesParameter
### Keywords: azimuth
### ** Examples
FileName<-system.file("data/RectangularData.txt", package="VecStatGraphs2D")
dat<-LoadData(FileName, Type=2)
azimuths<-dat[,2]
VonMisesParameter(azimuths)
|
# 1) ----------------------------------------------------------------------
#a) RegressΓ£o linear mΓΊltipla
#b)
BD1 = readr::read_table2("tabela_1.tsv")
BD1 = BD1[,-1]
pairs(BD1, pch = 19)
cor(BD1)[1,]
#c)
cor(BD1[,-1])
#Manufatura e Pop possuem cor > |0.90|, o que Γ© um indΓcio de multicolineariedade
#d)
modelo1 = lm(data = BD1); summary(modelo1)
#Pela comparaΓ§Γ£o do teste individual e geral de significΓ’ncia, nΓ£o Γ© possΓvel ter conclusΓ£o
#De multicoliniearidade
#e)
BD1_aux = BD1[,-1]
modelo_temp = lm(BD1_aux)
modelo_manuf = lm(dplyr::select(BD1_aux, Manuf, dplyr::everything()))
modelo_pop = lm(dplyr::select(BD1_aux, Pop, dplyr::everything()))
modelo_vento = lm(dplyr::select(BD1_aux, Vento, dplyr::everything()))
modelo_precip = lm(dplyr::select(BD1_aux, Precip, dplyr::everything()))
modelo_dias = lm(dplyr::select(BD1_aux, N_dias, dplyr::everything()))
r2_temp = summary(modelo_temp)$r.squared
r2_manuf = summary(modelo_manuf)$r.squared
r2_pop = summary(modelo_pop)$r.squared
r2_vento = summary(modelo_vento)$r.squared
r2_precip = summary(modelo_precip)$r.squared
r2_dias = summary(modelo_dias)$r.squared
vif = function(x){
1/(1-x)
}
vif_temp = vif(r2_temp)
vif_manuf = vif(r2_manuf)
vif_pop = vif(r2_pop)
vif_vento = vif(r2_vento)
vif_precip = vif(r2_precip)
vif_dias = vif(r2_dias)
#Manuf e Pop apresentam VIF > 10, o que indica existΓͺncia de multicolineariedade
#f)
#SerΓ‘ retirada a variΓ‘vel Pop pois ela nΓ£o Γ© muito informativa para o problema
BD1_f = BD1[,-4]
modelo2 = lm(data = BD1_f); summary(modelo2)
modelo3 = lm(data = BD1_f[,-c(4,5,6)]); summary(modelo3)
#Yi_hat = 77.237 -1.048*Xi1 + 0.024304Xi2
summary(modelo3)$r.squared
# 2) ----------------------------------------------------------------------
#a)
BD2 = read.csv("tabela_2.csv")[,-1]
BD2 = BD2[,-1]
BD2 = BD2 %>%
select(PCB, Idade)
plot(PCB ~ Idade, BD2, pch=19)
#Pelo grΓ‘fico parece que hΓ‘ uma violaΓ§Γ£o na hipΓ³teses de homocedasticidade
#quanto maior a idade do peixe maior a variΓ§Γ£o de PCB
cor(BD2)[2,1]
#b)
modelo1 = lm(BD2); summary(modelo1)
ris = rstandard(modelo1)
plot(ris ~ Idade, BD2,
pch = 19,
ylim = c(-3, 3))
abline(h=c(-2,0,2),
lty = c(2,1,2),
col = c('red','black','red'))
#NΓ£o parece haver uma nuvem de pontos aleatorimaente distrubuidos entre
#o zero
qqnorm(ris, pch = 19); abline(0,1)
#Existe violaΓ§Γ£o de normalidade e heterocedasticidade
#c)
rbruto_squared = resid(modelo1)**2
Idade_2 = BD2$Idade**2
modelo_aux = lm(rbruto_squared ~ BD2$Idade + Idade_2)
nrow(BD2) * summary(modelo_aux)$r.squared
qchisq(0.95, 2)
# Pelo teste de White a um nΓvel de significΓ’ncia de 5% o modelo Γ© heterocedΓ‘stico
#d)
BD2$PCB = log(BD2$PCB)
modelo2 = lm(BD2); summary(modelo2)
plot(PCB ~ Idade, BD2, pch=19)
ris = rstandard(modelo2)
plot(ris ~ BD2$Idade,
pch=19,
ylim = c(-3, 3)); abline(h=c(-2,0,2))
rbruto_squared = resid(modelo2)**2
Idade_2 = BD2$Idade**2
modelo_aux = lm(rbruto_squared ~ BD2$Idade + Idade_2)
nrow(BD2) * summary(modelo_aux)$r.squared
qchisq(0.95, 2)
qqnorm(ris, pch = 19); abline(0,1)
#os problemas foram resolvidos
#e)
summary(modelo2)$r.squared / summary(modelo1)$r.squared
#Um aumento de aproximadamente 28% no poder de explicaΓ§Γ£o de modelo
#f)
#Sim dividindo o modelo por 1/x_i
# 3) ----------------------------------------------------------------------
#a)
BD3 = readr::read_table2("tabela_3.tsv") %>%
select(nota_2, nota_1)
modelo1 = lm(BD3); summary(modelo1)
plot(BD3, pch = 19)
ris = rstandard(modelo1)
plot(ris ~ BD3$nota_1, pch=19, ylim = c(-3,3)); abline(h = c(-2,0,2))
#b)
resid_squared = resid(modelo1)**2
nota_1_sqd = BD3$nota_1**2
modelo_aux = lm(resid_squared ~ BD3$nota_1 + nota_1_sqd)
# 1 - hipoteses
# h0 homocedastico
# h1 heterocedastico
# 2 - estatistica de teste
# W = N * R2 ~ qchisq_gl
# gl numero de variaveis indepednentes
nrow(BD3) * summary(modelo_aux)$r.squared
# 3 - RC
# w > chisq alpha, glq
qchisq(0.95, 2)
# 4 - tomada de decisΓ£o
# NΓ£o pertece a RC o modelo Γ© homocedastico
#c)
# Ychap_i = 18,3833 + 0,7743*x_1
# o valor estimado da note na segunda avaliacao do aluno que tirou 0
# na primeira prova Γ© de 18,3833
# 0,7743 Γ© o valor estimado de acrΓ©scimo para a nota na segunda avaliΓ§Γ£o
# para cada ponto na primeira avaliaΓ§Γ£o
# 4) ----------------------------------------------------------------------
BD4 = readr::read_table2("tabela_4.tsv")
BD4 = BD4[,-1]
BD4 = BD4[,c(2,1)]
#a)
plot(BD4,pch = 19)
text(BD4, as.character(1:nrow(BD4)),pos = 3)
#As observaΓ§Γ΅es 11,12 e 13 se encontram afastadas da grande massa de dados, isso Γ© um indΓcio de ser
#outilers.
#b)
modelo1 = lm(BD4);summary(modelo1)
distancia_cook = cooks.distance(modelo1)
ris = rstandard(modelo1)
#Plot distΓ’ncia de cook
plot(distancia_cook, pch=19, ylim=c(0,2))
id = which(abs(distancia_cook)>0.1);id
text(id, distancia_cook[id],id,pos = 3)
#Plot de ris
plot(ris~fitted(modelo1), pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(abs(ris)>2);id
text(fitted(modelo1)[id], ris[id],id, pos = 4)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-3,3),
xlim = c(0,1.5))
abline(h = c(-2,2),
v = c(0.1),
col = c("blue","blue","red","red"))
id = which(abs(distancia_cook)>0.1);id
text(distancia_cook[id], ris[id],id,pos = 3)
#Observado que os pontos 11 e 12 sΓ£o outliers e os pontos 11 12 e 13 sΓ£o possΓveis pontos influentes
qqnorm(ris, pch = 19);abline(0,1)
#c)
id = which(abs(ris)>2);id
BD4c = BD4[-id,]
modelo2 = lm(BD4c); summary(modelo2)
distancia_cook = cooks.distance(modelo2)
ris = rstandard(modelo2)
#Plot distΓ’ncia de cook
plot(distancia_cook, pch=19, ylim=c(0,10))
id = which(abs(distancia_cook)>1);id
text(id, distancia_cook[id],id,pos = 3)
#Plot de ris
plot(ris~fitted(modelo2), pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(abs(ris)>2);id
text(fitted(modelo2)[id], ris[id],id, pos = 4)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-5,5),
xlim = c(0,10))
abline(h = c(-2,2),
v = c(1),
col = c("blue","blue","red","red"))
id = which(abs(distancia_cook)>1);id
text(distancia_cook[id], ris[id],id,pos = 3)
qqnorm(ris, pch = 19);abline(0,1)
shapiro.test(ris)
#d)
BD4d = BD4c[-11,]
modelo3 = lm(BD4d); summary(modelo3)
distancia_cook = cooks.distance(modelo3)
ris = rstandard(modelo3)
#Plot distΓ’ncia de cook
plot(distancia_cook, pch=19, ylim=c(0,10))
id = which(abs(distancia_cook)>1);id
text(id, distancia_cook[id],id,pos = 3)
#Plot de ris
plot(ris~fitted(modelo3), pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(abs(ris)>2);id
text(fitted(modelo3)[id], ris[id],id, pos = 4)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-5,5),
xlim = c(0,10))
abline(h = c(-2,2),
v = c(1),
col = c("blue","blue","red","red"))
id = which(abs(distancia_cook)>1);id
text(distancia_cook[id], ris[id],id,pos = 3)
qqnorm(ris, pch = 19); abline(0,1)
#NΓ£o houve violaΓ§Γ£o na hipΓ³tese de homos. e a distribuiΓ§Γ£o dos ris Γ© normal.
# 5) ----------------------------------------------------------------------
BD5 = readr::read_table2("tabela_5.tsv")
BD5 = BD5[,-1]
#a)
pairs(BD5)
#Y~X1 -> relaΓ§Γ£o negativa moderada; Y~X2 -> relaΓ§Γ£o negativa fraca; Y~X3 -> relaΓ§Γ£o negativa moderada
#b)
cor(BD5)
modelo1 = lm(BD5);summary(modelo1)
pairs(BD5, pch = 19)
modelo_aux_x1 = lm(BD5[,-1]); summary(modelo_aux_x1)
modelo_aux_x2 = lm(BD5[,-1][,c(2,1,3)]); summary(modelo_aux_x2)
modelo_aux_x3 = lm(BD5[,-1][,c(3,1,2)]); summary(modelo_aux_x3)
r2_x1 = summary(modelo_aux_x1)$r.squared
r2_x2 = summary(modelo_aux_x2)$r.squared
r2_x3 = summary(modelo_aux_x3)$r.squared
vif_x1 = 1 / (1-r2_x1);vif_x1
vif_x2 = 1 / (1-r2_x2);vif_x2
vif_x3 = 1 / (1-r2_x3);vif_x3
#A um nΓvel de significΓ’ncia de 5%, todas as variΓ‘vies possuem uma relaΓ§Γ£o significativa
#Todos os vif <10; logo nΓ£o existe evidΓͺncias de multicolineariedade
#c)
y_chapeu = fitted(modelo1)
ris = rstandard(modelo1)
plot(ris ~y_chapeu, pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(ris>2);id
text(y_chapeu[id],ris[id],id,pos = 3)
#d)
hii = hatvalues(modelo1)
p = sum(hii)
h_pc = 2*p/19; h_pc
subset(hii,subset = hii>h_pc) #id = which(hii>h_pc)
k = 1:length(hii)
plot(hii~k, pch = 19); abline(h = h_pc, col = "red")
#A observaΓ§Γ£o 1 tem alta alavancagem(alto potencial para influenciar o ajuste do modelo)
#e)
ris = rstandard(modelo1)
distancia_cook = cooks.distance(modelo1)
plot(distancia_cook, pch = 19)
id = which(distancia_cook>0.1)
text(as.numeric(names(distancia_cook[id])), distancia_cook[id], id, pos = 2)
x = boxplot(distancia_cook, pch = 19)
text(1,distancia_cook[id],id, pos = 2)
#A observaΓ§Γ£o 12 possui alta influΓͺncia
#f)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-10,10),
xlim = c(0,10))
abline(h = c(-2,2),
v = c(1),
col = c("blue","blue","red","red"))
id = which(abs(ris)>2);id
text(distancia_cook[id], ris[id],id,pos = 3)
#A observaΓ§Γ£o 12 Γ© um outlier mas possui D < 1
#g)
#Iremos retirar a observaΓ§Γ£o 12
BD5a = BD5[-12,]
modelo2 = lm(BD5a); summary(modelo2)
#Melhorou R2
# 6) ----------------------------------------------------------------------
BD6 = readr::read_csv("tabela_6.csv")
BD6 = BD6[,c(-1,-2)]
#a)
modelo1 = lm(BD6[,c(2,1)]); summary(modelo1)
ris = rstandard(modelo1)
y_chapeu = fitted(modelo1)
plot(ris ~ y_chapeu, pch = 19);abline(h = c(-2,0,2))
qqnorm(ris, pch = 19);abline(0,1)
#As hipΓ³teses de normalidade, homost. e independencia aparentemente nΓ£o foram violadas.
#b)
distancia_cook = cooks.distance(modelo1)
ris = rstandard(modelo1)
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-3,3),
xlim = c(0,1.5))
abline(h = c(-2,2),
v = 1)
id = which(abs(ris) >= 2);id
text(distancia_cook[id], ris[id],id, pos = 4,col = "red")
#c)
maiores = sort(distancia_cook,decreasing = T)
maiores = maiores[1:2]
BD6c = BD6[-as.numeric(names(maiores)),]
modelo2 = lm(BD6c[,c(2,1)]); summary(modelo2)
#NΓ£o houve um acrescimo muito significativo.
#f)
#Como houve uma pequena melhora no R2.ajustado para o modelo2, escolheria-se o modelo2
# 7) ----------------------------------------------------------------------
BD7 = readr::read_table2("tabela_7.tsv")
BD7 = BD7[,-1]
BD7[25,] = c(192,17)
#a)
plot(BD7, pch = 19)
#Para maiores valores de vendas, parece haver um pequeno aumento na variabilidade do gosto_com_propaganda
#b)
modelo1 = lm(BD7); summary(modelo1)
ris = rstandard(modelo1); y_chapeu = fitted(modelo1)
plot(ris~y_chapeu, pch = 19, ylim= c(-3,3)); abline(h = c(-2,0,2))
#Γ possΓvel notar um aumento proporcional crescente
#c)
resid_squared = resid(modelo1)**2
gasto_sqd = BD7$gasto_com_propaganda**2
modelo_aux = lm(resid_squared ~ BD7$gasto_com_propaganda + gasto_sqd)
# 1 - hipoteses
# h0 homocedastico
# h1 heterocedastico
# 2 - estatistica de teste
# W = N * R2 ~ qchisq_gl
# gl numero de variaveis indepednentes
nrow(BD7) * summary(modelo_aux)$r.squared
# 3 - RC
# w > chisq alpha, glq
qchisq(0.95, 2)
# 4 - tomada de decisΓ£o
# Pertence a regiΓ£o crΓtica, logo conclui-se heterosedasticidade
#d)
# Yi_hat/Xi = B0_hat/Xi + B1_hat*Xi/Xi Y' = Yi_hat/Xi Xi' = 1/Xi
# Yi_hat' = B1_hat + BO_hat * Xi'
vendas_t = BD7$vendas / BD7$gasto_com_propaganda
gasto_t = 1 / BD7$gasto_com_propaganda
# Yi_hat' = B1_hat + BO_hat * Xi'
# Yi_hat = B1_hat*Xi + BO_hat
# Yi_hat = 7.9511 + 51.9267*Xi
| /modelos_lineares_1/lista_4/script.R | no_license | Lyncoln/UFF | R | false | false | 11,538 | r |
# 1) ----------------------------------------------------------------------
#a) RegressΓ£o linear mΓΊltipla
#b)
BD1 = readr::read_table2("tabela_1.tsv")
BD1 = BD1[,-1]
pairs(BD1, pch = 19)
cor(BD1)[1,]
#c)
cor(BD1[,-1])
#Manufatura e Pop possuem cor > |0.90|, o que Γ© um indΓcio de multicolineariedade
#d)
modelo1 = lm(data = BD1); summary(modelo1)
#Pela comparaΓ§Γ£o do teste individual e geral de significΓ’ncia, nΓ£o Γ© possΓvel ter conclusΓ£o
#De multicoliniearidade
#e)
BD1_aux = BD1[,-1]
modelo_temp = lm(BD1_aux)
modelo_manuf = lm(dplyr::select(BD1_aux, Manuf, dplyr::everything()))
modelo_pop = lm(dplyr::select(BD1_aux, Pop, dplyr::everything()))
modelo_vento = lm(dplyr::select(BD1_aux, Vento, dplyr::everything()))
modelo_precip = lm(dplyr::select(BD1_aux, Precip, dplyr::everything()))
modelo_dias = lm(dplyr::select(BD1_aux, N_dias, dplyr::everything()))
r2_temp = summary(modelo_temp)$r.squared
r2_manuf = summary(modelo_manuf)$r.squared
r2_pop = summary(modelo_pop)$r.squared
r2_vento = summary(modelo_vento)$r.squared
r2_precip = summary(modelo_precip)$r.squared
r2_dias = summary(modelo_dias)$r.squared
vif = function(x){
1/(1-x)
}
vif_temp = vif(r2_temp)
vif_manuf = vif(r2_manuf)
vif_pop = vif(r2_pop)
vif_vento = vif(r2_vento)
vif_precip = vif(r2_precip)
vif_dias = vif(r2_dias)
#Manuf e Pop apresentam VIF > 10, o que indica existΓͺncia de multicolineariedade
#f)
#SerΓ‘ retirada a variΓ‘vel Pop pois ela nΓ£o Γ© muito informativa para o problema
BD1_f = BD1[,-4]
modelo2 = lm(data = BD1_f); summary(modelo2)
modelo3 = lm(data = BD1_f[,-c(4,5,6)]); summary(modelo3)
#Yi_hat = 77.237 -1.048*Xi1 + 0.024304Xi2
summary(modelo3)$r.squared
# 2) ----------------------------------------------------------------------
#a)
BD2 = read.csv("tabela_2.csv")[,-1]
BD2 = BD2[,-1]
BD2 = BD2 %>%
select(PCB, Idade)
plot(PCB ~ Idade, BD2, pch=19)
#Pelo grΓ‘fico parece que hΓ‘ uma violaΓ§Γ£o na hipΓ³teses de homocedasticidade
#quanto maior a idade do peixe maior a variΓ§Γ£o de PCB
cor(BD2)[2,1]
#b)
modelo1 = lm(BD2); summary(modelo1)
ris = rstandard(modelo1)
plot(ris ~ Idade, BD2,
pch = 19,
ylim = c(-3, 3))
abline(h=c(-2,0,2),
lty = c(2,1,2),
col = c('red','black','red'))
#NΓ£o parece haver uma nuvem de pontos aleatorimaente distrubuidos entre
#o zero
qqnorm(ris, pch = 19); abline(0,1)
#Existe violaΓ§Γ£o de normalidade e heterocedasticidade
#c)
rbruto_squared = resid(modelo1)**2
Idade_2 = BD2$Idade**2
modelo_aux = lm(rbruto_squared ~ BD2$Idade + Idade_2)
nrow(BD2) * summary(modelo_aux)$r.squared
qchisq(0.95, 2)
# Pelo teste de White a um nΓvel de significΓ’ncia de 5% o modelo Γ© heterocedΓ‘stico
#d)
BD2$PCB = log(BD2$PCB)
modelo2 = lm(BD2); summary(modelo2)
plot(PCB ~ Idade, BD2, pch=19)
ris = rstandard(modelo2)
plot(ris ~ BD2$Idade,
pch=19,
ylim = c(-3, 3)); abline(h=c(-2,0,2))
rbruto_squared = resid(modelo2)**2
Idade_2 = BD2$Idade**2
modelo_aux = lm(rbruto_squared ~ BD2$Idade + Idade_2)
nrow(BD2) * summary(modelo_aux)$r.squared
qchisq(0.95, 2)
qqnorm(ris, pch = 19); abline(0,1)
#os problemas foram resolvidos
#e)
summary(modelo2)$r.squared / summary(modelo1)$r.squared
#Um aumento de aproximadamente 28% no poder de explicaΓ§Γ£o de modelo
#f)
#Sim dividindo o modelo por 1/x_i
# 3) ----------------------------------------------------------------------
#a)
BD3 = readr::read_table2("tabela_3.tsv") %>%
select(nota_2, nota_1)
modelo1 = lm(BD3); summary(modelo1)
plot(BD3, pch = 19)
ris = rstandard(modelo1)
plot(ris ~ BD3$nota_1, pch=19, ylim = c(-3,3)); abline(h = c(-2,0,2))
#b)
resid_squared = resid(modelo1)**2
nota_1_sqd = BD3$nota_1**2
modelo_aux = lm(resid_squared ~ BD3$nota_1 + nota_1_sqd)
# 1 - hipoteses
# h0 homocedastico
# h1 heterocedastico
# 2 - estatistica de teste
# W = N * R2 ~ qchisq_gl
# gl numero de variaveis indepednentes
nrow(BD3) * summary(modelo_aux)$r.squared
# 3 - RC
# w > chisq alpha, glq
qchisq(0.95, 2)
# 4 - tomada de decisΓ£o
# NΓ£o pertece a RC o modelo Γ© homocedastico
#c)
# Ychap_i = 18,3833 + 0,7743*x_1
# o valor estimado da note na segunda avaliacao do aluno que tirou 0
# na primeira prova Γ© de 18,3833
# 0,7743 Γ© o valor estimado de acrΓ©scimo para a nota na segunda avaliΓ§Γ£o
# para cada ponto na primeira avaliaΓ§Γ£o
# 4) ----------------------------------------------------------------------
BD4 = readr::read_table2("tabela_4.tsv")
BD4 = BD4[,-1]
BD4 = BD4[,c(2,1)]
#a)
plot(BD4,pch = 19)
text(BD4, as.character(1:nrow(BD4)),pos = 3)
#As observaΓ§Γ΅es 11,12 e 13 se encontram afastadas da grande massa de dados, isso Γ© um indΓcio de ser
#outilers.
#b)
modelo1 = lm(BD4);summary(modelo1)
distancia_cook = cooks.distance(modelo1)
ris = rstandard(modelo1)
#Plot distΓ’ncia de cook
plot(distancia_cook, pch=19, ylim=c(0,2))
id = which(abs(distancia_cook)>0.1);id
text(id, distancia_cook[id],id,pos = 3)
#Plot de ris
plot(ris~fitted(modelo1), pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(abs(ris)>2);id
text(fitted(modelo1)[id], ris[id],id, pos = 4)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-3,3),
xlim = c(0,1.5))
abline(h = c(-2,2),
v = c(0.1),
col = c("blue","blue","red","red"))
id = which(abs(distancia_cook)>0.1);id
text(distancia_cook[id], ris[id],id,pos = 3)
#Observado que os pontos 11 e 12 sΓ£o outliers e os pontos 11 12 e 13 sΓ£o possΓveis pontos influentes
qqnorm(ris, pch = 19);abline(0,1)
#c)
id = which(abs(ris)>2);id
BD4c = BD4[-id,]
modelo2 = lm(BD4c); summary(modelo2)
distancia_cook = cooks.distance(modelo2)
ris = rstandard(modelo2)
#Plot distΓ’ncia de cook
plot(distancia_cook, pch=19, ylim=c(0,10))
id = which(abs(distancia_cook)>1);id
text(id, distancia_cook[id],id,pos = 3)
#Plot de ris
plot(ris~fitted(modelo2), pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(abs(ris)>2);id
text(fitted(modelo2)[id], ris[id],id, pos = 4)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-5,5),
xlim = c(0,10))
abline(h = c(-2,2),
v = c(1),
col = c("blue","blue","red","red"))
id = which(abs(distancia_cook)>1);id
text(distancia_cook[id], ris[id],id,pos = 3)
qqnorm(ris, pch = 19);abline(0,1)
shapiro.test(ris)
#d)
BD4d = BD4c[-11,]
modelo3 = lm(BD4d); summary(modelo3)
distancia_cook = cooks.distance(modelo3)
ris = rstandard(modelo3)
#Plot distΓ’ncia de cook
plot(distancia_cook, pch=19, ylim=c(0,10))
id = which(abs(distancia_cook)>1);id
text(id, distancia_cook[id],id,pos = 3)
#Plot de ris
plot(ris~fitted(modelo3), pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(abs(ris)>2);id
text(fitted(modelo3)[id], ris[id],id, pos = 4)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-5,5),
xlim = c(0,10))
abline(h = c(-2,2),
v = c(1),
col = c("blue","blue","red","red"))
id = which(abs(distancia_cook)>1);id
text(distancia_cook[id], ris[id],id,pos = 3)
qqnorm(ris, pch = 19); abline(0,1)
#NΓ£o houve violaΓ§Γ£o na hipΓ³tese de homos. e a distribuiΓ§Γ£o dos ris Γ© normal.
# 5) ----------------------------------------------------------------------
BD5 = readr::read_table2("tabela_5.tsv")
BD5 = BD5[,-1]
#a)
pairs(BD5)
#Y~X1 -> relaΓ§Γ£o negativa moderada; Y~X2 -> relaΓ§Γ£o negativa fraca; Y~X3 -> relaΓ§Γ£o negativa moderada
#b)
cor(BD5)
modelo1 = lm(BD5);summary(modelo1)
pairs(BD5, pch = 19)
modelo_aux_x1 = lm(BD5[,-1]); summary(modelo_aux_x1)
modelo_aux_x2 = lm(BD5[,-1][,c(2,1,3)]); summary(modelo_aux_x2)
modelo_aux_x3 = lm(BD5[,-1][,c(3,1,2)]); summary(modelo_aux_x3)
r2_x1 = summary(modelo_aux_x1)$r.squared
r2_x2 = summary(modelo_aux_x2)$r.squared
r2_x3 = summary(modelo_aux_x3)$r.squared
vif_x1 = 1 / (1-r2_x1);vif_x1
vif_x2 = 1 / (1-r2_x2);vif_x2
vif_x3 = 1 / (1-r2_x3);vif_x3
#A um nΓvel de significΓ’ncia de 5%, todas as variΓ‘vies possuem uma relaΓ§Γ£o significativa
#Todos os vif <10; logo nΓ£o existe evidΓͺncias de multicolineariedade
#c)
y_chapeu = fitted(modelo1)
ris = rstandard(modelo1)
plot(ris ~y_chapeu, pch = 19, ylim = c(-3,3));abline(h = c(-2,0,2))
id = which(ris>2);id
text(y_chapeu[id],ris[id],id,pos = 3)
#d)
hii = hatvalues(modelo1)
p = sum(hii)
h_pc = 2*p/19; h_pc
subset(hii,subset = hii>h_pc) #id = which(hii>h_pc)
k = 1:length(hii)
plot(hii~k, pch = 19); abline(h = h_pc, col = "red")
#A observaΓ§Γ£o 1 tem alta alavancagem(alto potencial para influenciar o ajuste do modelo)
#e)
ris = rstandard(modelo1)
distancia_cook = cooks.distance(modelo1)
plot(distancia_cook, pch = 19)
id = which(distancia_cook>0.1)
text(as.numeric(names(distancia_cook[id])), distancia_cook[id], id, pos = 2)
x = boxplot(distancia_cook, pch = 19)
text(1,distancia_cook[id],id, pos = 2)
#A observaΓ§Γ£o 12 possui alta influΓͺncia
#f)
#Plot de ris + distancia de cook
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-10,10),
xlim = c(0,10))
abline(h = c(-2,2),
v = c(1),
col = c("blue","blue","red","red"))
id = which(abs(ris)>2);id
text(distancia_cook[id], ris[id],id,pos = 3)
#A observaΓ§Γ£o 12 Γ© um outlier mas possui D < 1
#g)
#Iremos retirar a observaΓ§Γ£o 12
BD5a = BD5[-12,]
modelo2 = lm(BD5a); summary(modelo2)
#Melhorou R2
# 6) ----------------------------------------------------------------------
BD6 = readr::read_csv("tabela_6.csv")
BD6 = BD6[,c(-1,-2)]
#a)
modelo1 = lm(BD6[,c(2,1)]); summary(modelo1)
ris = rstandard(modelo1)
y_chapeu = fitted(modelo1)
plot(ris ~ y_chapeu, pch = 19);abline(h = c(-2,0,2))
qqnorm(ris, pch = 19);abline(0,1)
#As hipΓ³teses de normalidade, homost. e independencia aparentemente nΓ£o foram violadas.
#b)
distancia_cook = cooks.distance(modelo1)
ris = rstandard(modelo1)
plot(ris ~ distancia_cook,
pch = 19,
ylim = c(-3,3),
xlim = c(0,1.5))
abline(h = c(-2,2),
v = 1)
id = which(abs(ris) >= 2);id
text(distancia_cook[id], ris[id],id, pos = 4,col = "red")
#c)
maiores = sort(distancia_cook,decreasing = T)
maiores = maiores[1:2]
BD6c = BD6[-as.numeric(names(maiores)),]
modelo2 = lm(BD6c[,c(2,1)]); summary(modelo2)
#NΓ£o houve um acrescimo muito significativo.
#f)
#Como houve uma pequena melhora no R2.ajustado para o modelo2, escolheria-se o modelo2
# 7) ----------------------------------------------------------------------
BD7 = readr::read_table2("tabela_7.tsv")
BD7 = BD7[,-1]
BD7[25,] = c(192,17)
#a)
plot(BD7, pch = 19)
#Para maiores valores de vendas, parece haver um pequeno aumento na variabilidade do gosto_com_propaganda
#b)
modelo1 = lm(BD7); summary(modelo1)
ris = rstandard(modelo1); y_chapeu = fitted(modelo1)
plot(ris~y_chapeu, pch = 19, ylim= c(-3,3)); abline(h = c(-2,0,2))
#Γ possΓvel notar um aumento proporcional crescente
#c)
resid_squared = resid(modelo1)**2
gasto_sqd = BD7$gasto_com_propaganda**2
modelo_aux = lm(resid_squared ~ BD7$gasto_com_propaganda + gasto_sqd)
# 1 - hipoteses
# h0 homocedastico
# h1 heterocedastico
# 2 - estatistica de teste
# W = N * R2 ~ qchisq_gl
# gl numero de variaveis indepednentes
nrow(BD7) * summary(modelo_aux)$r.squared
# 3 - RC
# w > chisq alpha, glq
qchisq(0.95, 2)
# 4 - tomada de decisΓ£o
# Pertence a regiΓ£o crΓtica, logo conclui-se heterosedasticidade
#d)
# Yi_hat/Xi = B0_hat/Xi + B1_hat*Xi/Xi Y' = Yi_hat/Xi Xi' = 1/Xi
# Yi_hat' = B1_hat + BO_hat * Xi'
vendas_t = BD7$vendas / BD7$gasto_com_propaganda
gasto_t = 1 / BD7$gasto_com_propaganda
# Yi_hat' = B1_hat + BO_hat * Xi'
# Yi_hat = B1_hat*Xi + BO_hat
# Yi_hat = 7.9511 + 51.9267*Xi
|
\name{MARSSresiduals.tt1}
\alias{MARSSresiduals.tt1}
\title{ MARSS One-Step-Ahead Residuals }
\description{
Calculates the standardized (or auxiliary) one-step-ahead residuals, aka the innovations residuals and their variance. Not exported. Access this function with \code{residuals(object, conditioning="t-1")}.
}
\usage{
MARSSresiduals.tt1(object, method=c("SS"), normalize=FALSE, silent=FALSE)
}
\arguments{
\item{object}{ An object of class \code{\link{marssMLE}}.}
\item{method}{ Algorithm to use. Currently only "SS". }
\item{normalize}{ TRUE/FALSE }
\item{silent}{ If TRUE, don't print inversion warnings. }
}
\value{
A list with the following components
\item{residuals}{ The model residuals conditioned on the data up to time \eqn{t-1} and the set of model parameters. Called the innovations. Residuals associated with missing data will appear as NA. }
\item{var.residuals}{ The variance of the model residuals as a n x n x T matrix. The variance exists for all t values including missing data. }
\item{std.residuals}{ The Cholesky standardized model residuals as a n x T matrix. This is \code{residuals} multiplied by the inverse of the Cholesky decomposition of \code{var.residuals}. }
\item{mar.residuals}{ The marginal standardized model residuals as a n x T matrix. This is \code{residuals} multiplied by the inverse of the diagonal matrix formed by the square-root of the diagonal of \code{var.residuals}. }
\item{msg}{ Any warning messages. This will be printed unless Object$control$trace = -1 (suppress all error messages). }
}
\details{
This function returns the conditional expected value (mean) and variance of the model one-step-ahead residuals. 'conditional' means in this context, conditioned on the observed data up to time \eqn{t-1} and a set of parameters.
\strong{Model residuals}
\eqn{\mathbf{v}_t}{v_t} is the difference between the data and the predicted data at time \eqn{t} given \eqn{\mathbf{x}_t}{x_t}:
\deqn{ \mathbf{v}_t = \mathbf{y}_t - \mathbf{Z} \mathbf{x}_t - \mathbf{a} }{ v_t = y_t - Z x_t - a}
The observed model residuals \eqn{\hat{\mathbf{v}}_t}{hatv_t} are the difference between the observed data and the predicted data at time \eqn{t} using the fitted model. \code{MARSSresiduals.tt1} fits the model using the data up to time \eqn{t-1}. So
\deqn{ \hat{\mathbf{v}}_t = \mathbf{y}_t - \mathbf{Z}\tilde{\mathbf{x}}_t^{t-1} - \mathbf{a} }{ hatv_t = y_t - Z xtt1 - a}
where \eqn{\tilde{\mathbf{x}}_t^{t-1}}{xtt1} is the expected value of \eqn{\mathbf{X}_t}{X_t} conditioned on the data from 1 to \eqn{t-1} from the Kalman filter. \eqn{\mathbf{y}_t}{y_t} are your data and missing values will appear as NA. These will be returned in \code{residuals}.
\code{var.residuals} returned by the function is the conditional variance of the residuals conditioned on the data up to \eqn{t-1} and the parameter set \eqn{\Theta}. The conditional variance is
\deqn{ \hat{\Sigma}_t = \mathbf{R}+\mathbf{Z}_t \mathbf{V}_t^{t-1} \mathbf{Z}_t^\top }{hatSigma_t = R + Z_t Vtt1 t(Z_t)}
where \eqn{\mathbf{V}_t^{t-1}}{Vtt1} is the variance of \eqn{\mathbf{X}_t}{X_t} conditioned on the data up to time \eqn{t-1}. This is returned by \code{\link{MARSSkf}} in \code{Vtt1}.
\strong{Standardized residuals}
\code{std.residuals} are Cholesky standardized residuals. These are the residuals muliplied by the inverse of the Cholesky decomposition of the variance matrix of the residuals:
\deqn{ \hat{\Sigma}_t^{-1/2} \hat{\mathbf{v}}_t}{ hatSigma_t^{-1/2} hatv_t. }
These residuals are uncorrelated.
The interpretation of the Cholesky standardized residuals is not straight-forward when the Q and R variance-covariance matrices are non-diagonal. The residuals which were generated by a non-diagonal variance-covariance matrices are transformed into orthogonal residuals in MVN(0,I) space. For example, if v is 2x2 correlated errors with variance-covariance matrix R. The transformed residuals (from this function) for the i-th row of v is a combination of the row 1 effect and the row 1 effect plus the row 2 effect. So in this case, row 2 of the transformed residuals would not be regarded as solely the row 2 residual but rather how different row 2 is from row 1, relative to expected. If the errors are highly correlated, then the Cholesky standardized residuals can look rather non-intuitive.
\code{mar.residuals} are the marginal standardized residuals. These are the residuals muliplied by the inverse of the diagonal matrix formed from the square-root of the diagonal of the variance matrix of the residuals:
\deqn{ \textrm{dg}(\hat{\Sigma}_t)^{-1/2} \hat{v}_t}{ dg(hatSigma_t)^{-1/2} hatv_t}, where 'dg(A)' is the square matrix formed from the diagonal of A, aka \code{diag(diag(A))}. These residuals will be correlated if the variance matrix is non-diagonal.
\strong{Normalized residuals}
If \code{normalize=FALSE}, the unconditional variance of \eqn{V_t} and \eqn{W_t} are R and Q and the model is assumed to be written as
\deqn{ y_t = Z x_t + a + v_t}
\deqn{ x_t = B x_{t-1} + u + w_t}
If normalize=TRUE, the model is assumed to be written
\deqn{ y_t = Z x_t + a + Hv_t}
\deqn{ x_t = B x_{t-1} + u + Gw_t}
with the variance of \eqn{V_t} and \eqn{W_t} equal to I (identity).
\code{residuals.marssMLE} returns the residuals defined as in the first equations. To get the residuals defined as Harvey et al. (1998) define them (second equations), then use \code{normalize=TRUE}. In that case the unconditional variance of residuals will be I instead of R and Q. Note, that the `normalized' residuals are not the same as the `standardized' residuals. In former, the unconditional residuals have a variance of I while in the latter it is the conditional residuals that have a variance of I.
}
\author{
Eli Holmes, NOAA, Seattle, USA.
eli(dot)holmes(at)noaa(dot)gov
}
\seealso{ \code{\link{MARSSresiduals.tT}}, \code{\link{fitted.marssMLE}}, \code{\link{plot.marssMLE}} }
\examples{
dat <- t(harborSeal)
dat <- dat[c(2,11),]
MLEobj <- MARSS(dat)
residuals(MLEobj, conditioning="t-1")$std.residuals
}
\references{
R. H. Shumway and D. S. Stoffer (2006). Section on the calculation of the likelihood of state-space models in Time series analysis and its applications. Springer-Verlag, New York.
Holmes, E. E. 2014. Computation of standardized residuals for (MARSS) models. Technical Report. arXiv:1411.0045.
}
| /man/MARSSresiduals_tt1.Rd | permissive | abaudelle/MARSS | R | false | false | 6,407 | rd | \name{MARSSresiduals.tt1}
\alias{MARSSresiduals.tt1}
\title{ MARSS One-Step-Ahead Residuals }
\description{
Calculates the standardized (or auxiliary) one-step-ahead residuals, aka the innovations residuals and their variance. Not exported. Access this function with \code{residuals(object, conditioning="t-1")}.
}
\usage{
MARSSresiduals.tt1(object, method=c("SS"), normalize=FALSE, silent=FALSE)
}
\arguments{
\item{object}{ An object of class \code{\link{marssMLE}}.}
\item{method}{ Algorithm to use. Currently only "SS". }
\item{normalize}{ TRUE/FALSE }
\item{silent}{ If TRUE, don't print inversion warnings. }
}
\value{
A list with the following components
\item{residuals}{ The model residuals conditioned on the data up to time \eqn{t-1} and the set of model parameters. Called the innovations. Residuals associated with missing data will appear as NA. }
\item{var.residuals}{ The variance of the model residuals as a n x n x T matrix. The variance exists for all t values including missing data. }
\item{std.residuals}{ The Cholesky standardized model residuals as a n x T matrix. This is \code{residuals} multiplied by the inverse of the Cholesky decomposition of \code{var.residuals}. }
\item{mar.residuals}{ The marginal standardized model residuals as a n x T matrix. This is \code{residuals} multiplied by the inverse of the diagonal matrix formed by the square-root of the diagonal of \code{var.residuals}. }
\item{msg}{ Any warning messages. This will be printed unless Object$control$trace = -1 (suppress all error messages). }
}
\details{
This function returns the conditional expected value (mean) and variance of the model one-step-ahead residuals. 'conditional' means in this context, conditioned on the observed data up to time \eqn{t-1} and a set of parameters.
\strong{Model residuals}
\eqn{\mathbf{v}_t}{v_t} is the difference between the data and the predicted data at time \eqn{t} given \eqn{\mathbf{x}_t}{x_t}:
\deqn{ \mathbf{v}_t = \mathbf{y}_t - \mathbf{Z} \mathbf{x}_t - \mathbf{a} }{ v_t = y_t - Z x_t - a}
The observed model residuals \eqn{\hat{\mathbf{v}}_t}{hatv_t} are the difference between the observed data and the predicted data at time \eqn{t} using the fitted model. \code{MARSSresiduals.tt1} fits the model using the data up to time \eqn{t-1}. So
\deqn{ \hat{\mathbf{v}}_t = \mathbf{y}_t - \mathbf{Z}\tilde{\mathbf{x}}_t^{t-1} - \mathbf{a} }{ hatv_t = y_t - Z xtt1 - a}
where \eqn{\tilde{\mathbf{x}}_t^{t-1}}{xtt1} is the expected value of \eqn{\mathbf{X}_t}{X_t} conditioned on the data from 1 to \eqn{t-1} from the Kalman filter. \eqn{\mathbf{y}_t}{y_t} are your data and missing values will appear as NA. These will be returned in \code{residuals}.
\code{var.residuals} returned by the function is the conditional variance of the residuals conditioned on the data up to \eqn{t-1} and the parameter set \eqn{\Theta}. The conditional variance is
\deqn{ \hat{\Sigma}_t = \mathbf{R}+\mathbf{Z}_t \mathbf{V}_t^{t-1} \mathbf{Z}_t^\top }{hatSigma_t = R + Z_t Vtt1 t(Z_t)}
where \eqn{\mathbf{V}_t^{t-1}}{Vtt1} is the variance of \eqn{\mathbf{X}_t}{X_t} conditioned on the data up to time \eqn{t-1}. This is returned by \code{\link{MARSSkf}} in \code{Vtt1}.
\strong{Standardized residuals}
\code{std.residuals} are Cholesky standardized residuals. These are the residuals muliplied by the inverse of the Cholesky decomposition of the variance matrix of the residuals:
\deqn{ \hat{\Sigma}_t^{-1/2} \hat{\mathbf{v}}_t}{ hatSigma_t^{-1/2} hatv_t. }
These residuals are uncorrelated.
The interpretation of the Cholesky standardized residuals is not straight-forward when the Q and R variance-covariance matrices are non-diagonal. The residuals which were generated by a non-diagonal variance-covariance matrices are transformed into orthogonal residuals in MVN(0,I) space. For example, if v is 2x2 correlated errors with variance-covariance matrix R. The transformed residuals (from this function) for the i-th row of v is a combination of the row 1 effect and the row 1 effect plus the row 2 effect. So in this case, row 2 of the transformed residuals would not be regarded as solely the row 2 residual but rather how different row 2 is from row 1, relative to expected. If the errors are highly correlated, then the Cholesky standardized residuals can look rather non-intuitive.
\code{mar.residuals} are the marginal standardized residuals. These are the residuals muliplied by the inverse of the diagonal matrix formed from the square-root of the diagonal of the variance matrix of the residuals:
\deqn{ \textrm{dg}(\hat{\Sigma}_t)^{-1/2} \hat{v}_t}{ dg(hatSigma_t)^{-1/2} hatv_t}, where 'dg(A)' is the square matrix formed from the diagonal of A, aka \code{diag(diag(A))}. These residuals will be correlated if the variance matrix is non-diagonal.
\strong{Normalized residuals}
If \code{normalize=FALSE}, the unconditional variance of \eqn{V_t} and \eqn{W_t} are R and Q and the model is assumed to be written as
\deqn{ y_t = Z x_t + a + v_t}
\deqn{ x_t = B x_{t-1} + u + w_t}
If normalize=TRUE, the model is assumed to be written
\deqn{ y_t = Z x_t + a + Hv_t}
\deqn{ x_t = B x_{t-1} + u + Gw_t}
with the variance of \eqn{V_t} and \eqn{W_t} equal to I (identity).
\code{residuals.marssMLE} returns the residuals defined as in the first equations. To get the residuals defined as Harvey et al. (1998) define them (second equations), then use \code{normalize=TRUE}. In that case the unconditional variance of residuals will be I instead of R and Q. Note, that the `normalized' residuals are not the same as the `standardized' residuals. In former, the unconditional residuals have a variance of I while in the latter it is the conditional residuals that have a variance of I.
}
\author{
Eli Holmes, NOAA, Seattle, USA.
eli(dot)holmes(at)noaa(dot)gov
}
\seealso{ \code{\link{MARSSresiduals.tT}}, \code{\link{fitted.marssMLE}}, \code{\link{plot.marssMLE}} }
\examples{
dat <- t(harborSeal)
dat <- dat[c(2,11),]
MLEobj <- MARSS(dat)
residuals(MLEobj, conditioning="t-1")$std.residuals
}
\references{
R. H. Shumway and D. S. Stoffer (2006). Section on the calculation of the likelihood of state-space models in Time series analysis and its applications. Springer-Verlag, New York.
Holmes, E. E. 2014. Computation of standardized residuals for (MARSS) models. Technical Report. arXiv:1411.0045.
}
|
context("Test clmi")
test_that("clmi throws errors correctly", {
lod.var <- toy_data$lod
expect_error(clmi("poll ~ case_cntrl + smoking + gender, toy_data", lod, 1))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lodu, 1))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, NULL, 1))
expect_error(clmi(a * poll ~ case_cntrl + smoking + gender, toy_data,
lod, 1))
a <- 2
fn <- function(x) a * x
expect_true(!is.null(clmi(fn(poll) ~ case_cntrl + smoking + gender, toy_data,
lod, 1)))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, "toy_data", lod, 1))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lod, "a"))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lod, 1, 0))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lod,
1, c(2, 4)))
df <- toy_data
df$lod <- NULL
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
# Test for improper lod values
df <- toy_data
df$poll <- log(df$poll)
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
# Test that missing covariates throw an error
df <- toy_data
df$smoking[1] <- NA
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
# Test that factors cause an error
df <- toy_data
df$gender <- as.factor(df$gender)
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
})
| /tests/testthat/test_clmi.R | no_license | umich-cphds/lodi | R | false | false | 1,559 | r | context("Test clmi")
test_that("clmi throws errors correctly", {
lod.var <- toy_data$lod
expect_error(clmi("poll ~ case_cntrl + smoking + gender, toy_data", lod, 1))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lodu, 1))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, NULL, 1))
expect_error(clmi(a * poll ~ case_cntrl + smoking + gender, toy_data,
lod, 1))
a <- 2
fn <- function(x) a * x
expect_true(!is.null(clmi(fn(poll) ~ case_cntrl + smoking + gender, toy_data,
lod, 1)))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, "toy_data", lod, 1))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lod, "a"))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lod, 1, 0))
expect_error(clmi(poll ~ case_cntrl + smoking + gender, toy_data, lod,
1, c(2, 4)))
df <- toy_data
df$lod <- NULL
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
# Test for improper lod values
df <- toy_data
df$poll <- log(df$poll)
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
# Test that missing covariates throw an error
df <- toy_data
df$smoking[1] <- NA
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
# Test that factors cause an error
df <- toy_data
df$gender <- as.factor(df$gender)
expect_error(clmi(poll ~ case_cntrl + smoking + gender, df, lod, 1))
})
|
library(keras)
library(dplyr)
library(reshape2)
## setting working directories
## wd - with other scripts
## data_folder - with Mx_1x1
wd <- "/home/as/Pulpit/ADS/Analizy/Own/CNN LC/LC CNN/"
data_folder <- "/home/as/Pulpit/ADS/Analizy/Own/CNN LC/LC CNN/death_rates/Mx_1x1"
setwd(wd)
ObsYear = 1999 # last year of training set
T0 <- 10 # number of years back used to forecast
model_type = "CNN"
source("0_dataReading.R")
#source("0_dataReading_Sal.R")
# number of countries
N <- HMD_final %>% select(Country) %>% distinct() %>% nrow()
#model specifications
source("0_b_CNN_model_specification.R")
source("0_c_LSTM_model_specification.R")
# data scaling (MinMaxScaling on whole dataset) of logmx
val.min <- HMD_final %>% summarize(min(logmx)) %>% unlist()
val.max <- HMD_final %>% summarize(max(logmx)) %>% unlist()
HMD_final <- HMD_final %>% mutate(val = (logmx - val.min)/(val.max-val.min))
## transforming HMD data to NN input format
data.preprocessing.CNNs <- function(data.raw, gender, country, T0, ObsYear=1999){
mort_rates <- data.raw %>% filter(Gender == gender, Country == country) %>% select(Year, Age, val)
mort_rates <- dcast(mort_rates, Year ~ Age, value.var="val")
train.rates <- mort_rates %>% filter(Year <= ObsYear) %>% select(-Year) %>% as.matrix()
YT_year <- mort_rates %>% filter(Year <= ObsYear)%>% select(Year) %>% unlist() %>% unname()
YT_year <- tail(YT_year, -10) # omit first 10 years (used only as x)
n.train <- nrow(train.rates)-(T0-1)-1 # number of training samples
xt <- array(NA, c(n.train, T0, 100))
YT <- array(NA, c(n.train, 100))
for (t0 in (1:n.train)){
xt[t0,,] <- train.rates[t0:(t0+T0-1), ]
YT[t0,] <- train.rates[t0+T0,]
}
list(xt, YT, YT_year)
}
## creating the training set (all observation up to 1999) based on mx values only (with data.preprocessing.CNNs function)
## Each yt observation is equal to a whole mortality curve and xt was equal to ten previous mortality curves (matrix 10:100).
## It was done only when all ten previous curves where available for given country.
Genders <- c("Male","Female")
Countries <- HMD_final %>% select(Country) %>% distinct() %>% unlist()
VecGenders <- vector()
VecCountries <- vector()
#VecYears = vector()
ListData <- list()
#HMD_final = HMD_final[with(HMD_final, order(Gender, Country)), ]
obs <-0
for(c in 1:length(Countries)){
for(g in 1:2){
data <- data.preprocessing.CNNs(HMD_final,Genders[g],Countries[c],T0, ObsYear)
n <- dim(data[[1]])[1]
obs <- obs + n
ListData[[(c-1)*2 + g]] <- data
# VecGenders (with 0 or 1 for each observation)
VecGenders<- c(VecGenders,rep(g-1,n))
#years_fore = HMD_final[Gender == Genders[g] & Country ==Countries[c] & Year<2000]$Year%>%unique()%>%as.numeric()
#VecYears = c(VecYears,years_fore[11:length(years_fore)])
# VecCounties (with number from 0 to 37 corresponding to each country for each observation)
VecCountries <- c(VecCountries,rep(c-1,n))
}
}
## Binding observations form different countries into one dataset
## transformation of xtrain from data.preprocessing.CNNs to list of previous xtrain, veccountries and vecgender
x.train <- array(NA, dim=c(obs, dim(ListData[[1]][[1]])[c(2,3)]))
y.train <- array(NA, dim=c(obs,1,dim(ListData[[1]][[2]])[2]))
obsYearVec <- vector()
counter = 0
for (i in 1:(g*c)){
n <- dim(ListData[[i]][[1]])[1]
obsYearVec <- c(obsYearVec,ListData[[i]][[3]] )
for(j in 1:n){
x.train[counter+j,,] <- ListData[[i]][[1]][j,,]
y.train[counter+j,1,] <- ListData[[i]][[2]][j,]
}
counter <- counter + n
}
# sort to be in a temporal order
OrderByYear <- order(obsYearVec)
x.train.sorted <- x.train[OrderByYear,,]
y.train.sorted <- y.train[OrderByYear,,]
dim(y.train.sorted) <- c(2662,1,100)
VecGenders.sorted <- VecGenders[OrderByYear]
VecCountries.sorted <- VecCountries[OrderByYear]
x.train <- list(x.train.sorted, VecCountries.sorted, VecGenders.sorted)
y.train <- y.train.sorted
# model
if(model_type == "CNN"){
model <- CNN(N, T0)
} else if(model_type == "LSTM"){
model <- LSTM(N,T0)
} else
{
stop("Wrong arcitecture specified within model_type variable")
}
modelName = paste(model_type ,T0, sep ="_")
fileName <- paste("./CallBack/best_model_", modelName, sep="")
summary(model)
# define callbacks
model_callback <- callback_model_checkpoint(fileName, monitor = "val_loss", verbose = 1, save_best_only = TRUE, save_weights_only = TRUE)
lr_callback <- callback_reduce_lr_on_plateau(factor=.90, patience =
50, verbose=1, cooldown = 5, min_lr = 0.00005)
CBs <- list(model_callback, lr_callback)
# gradient descent fitting
t1 <- proc.time()
fit <- model %>% fit(x=x.train, y=y.train, epochs = 2000, batch_size =16,
verbose = 2, validation_split = 0.05, shuffle = T,callbacks=CBs) #in paper 2000 there is no difference at this moment
proc.time()-t1
# in-sample error (validation)
fit$metrics$val_loss%>%min()
load_model_weights_hdf5(model, fileName)
## recursive prediction
# testing data pre-processing
testData <- HMD_final %>% filter(Year > (ObsYear - 10))
#vali.Y <- testData %>% fi[which(all_mortV$Year > ObsYear),]
recursive.prediction <- function(ObsYear, all_mort2, gender, country_name, country_index, T0, val.min, val.max, model.p){
single.years <- array(NA, c(2016-ObsYear))
for (ObsYear1 in ((ObsYear+1):2016)){
data1 <- all_mort2 %>% filter(Year >= (ObsYear1-10))
data2 <- data.preprocessing.CNNs(data1, gender, country_name,T0, ObsYear1)
# MinMaxScaler (with minimum and maximum from above)
x.vali <- data2[[1]]
if (gender=="Female"){yy <- 1}else{yy <- 0}
x.vali <- list(x.vali, rep(country_index, dim(x.vali)[1]), rep(yy, dim(x.vali)[1]))
y.vali <- data2[[2]]
predicted_val <- model %>% predict(x.vali) %>% as.vector()
predicted_logmx <- (predicted_val*(val.max-val.min)+ val.min)
Yhat.vali2 <- exp(predicted_logmx)
## error calculated on transformed data, in line with Salvatore comment
single.years[ObsYear1-ObsYear] <- round(10^4*mean((predicted_val-y.vali)^2),4)
predicted <- all_mort2 %>% filter(Year==ObsYear1, Gender == gender, Country == country_name) # [which(all_mort2$Year==ObsYear1),]
keep <- all_mort2 %>% filter(Year!=ObsYear1, Gender == gender, Country == country_name)
predicted$logmx <- predicted_logmx
predicted$mx <- exp(predicted$logmx)
predicted$val <- predicted_val
all_mort2 <- rbind(keep,predicted)
all_mort2 <- all_mort2 %>% arrange(Year, Age)
}
list(all_mort2, single.years)
}
#example prediction
pred.CHE.F <- recursive.prediction(1999, testData, "Female", "CHE",(match("CHE", Countries)-1),T0, val.min, val.max, model)
pred.POL.M <- recursive.prediction(1999, testData, "Male", "POL",(match("POL", Countries)-1),T0, val.min, val.max, model)
| /02 - Life/Mortality_forecasting/LC CNN/_scripts/Mortality_Forecast_CNN.R | no_license | tongliaowuqilong/grupa_ads | R | false | false | 6,934 | r | library(keras)
library(dplyr)
library(reshape2)
## setting working directories
## wd - with other scripts
## data_folder - with Mx_1x1
wd <- "/home/as/Pulpit/ADS/Analizy/Own/CNN LC/LC CNN/"
data_folder <- "/home/as/Pulpit/ADS/Analizy/Own/CNN LC/LC CNN/death_rates/Mx_1x1"
setwd(wd)
ObsYear = 1999 # last year of training set
T0 <- 10 # number of years back used to forecast
model_type = "CNN"
source("0_dataReading.R")
#source("0_dataReading_Sal.R")
# number of countries
N <- HMD_final %>% select(Country) %>% distinct() %>% nrow()
#model specifications
source("0_b_CNN_model_specification.R")
source("0_c_LSTM_model_specification.R")
# data scaling (MinMaxScaling on whole dataset) of logmx
val.min <- HMD_final %>% summarize(min(logmx)) %>% unlist()
val.max <- HMD_final %>% summarize(max(logmx)) %>% unlist()
HMD_final <- HMD_final %>% mutate(val = (logmx - val.min)/(val.max-val.min))
## transforming HMD data to NN input format
data.preprocessing.CNNs <- function(data.raw, gender, country, T0, ObsYear=1999){
mort_rates <- data.raw %>% filter(Gender == gender, Country == country) %>% select(Year, Age, val)
mort_rates <- dcast(mort_rates, Year ~ Age, value.var="val")
train.rates <- mort_rates %>% filter(Year <= ObsYear) %>% select(-Year) %>% as.matrix()
YT_year <- mort_rates %>% filter(Year <= ObsYear)%>% select(Year) %>% unlist() %>% unname()
YT_year <- tail(YT_year, -10) # omit first 10 years (used only as x)
n.train <- nrow(train.rates)-(T0-1)-1 # number of training samples
xt <- array(NA, c(n.train, T0, 100))
YT <- array(NA, c(n.train, 100))
for (t0 in (1:n.train)){
xt[t0,,] <- train.rates[t0:(t0+T0-1), ]
YT[t0,] <- train.rates[t0+T0,]
}
list(xt, YT, YT_year)
}
## creating the training set (all observation up to 1999) based on mx values only (with data.preprocessing.CNNs function)
## Each yt observation is equal to a whole mortality curve and xt was equal to ten previous mortality curves (matrix 10:100).
## It was done only when all ten previous curves where available for given country.
Genders <- c("Male","Female")
Countries <- HMD_final %>% select(Country) %>% distinct() %>% unlist()
VecGenders <- vector()
VecCountries <- vector()
#VecYears = vector()
ListData <- list()
#HMD_final = HMD_final[with(HMD_final, order(Gender, Country)), ]
obs <-0
for(c in 1:length(Countries)){
for(g in 1:2){
data <- data.preprocessing.CNNs(HMD_final,Genders[g],Countries[c],T0, ObsYear)
n <- dim(data[[1]])[1]
obs <- obs + n
ListData[[(c-1)*2 + g]] <- data
# VecGenders (with 0 or 1 for each observation)
VecGenders<- c(VecGenders,rep(g-1,n))
#years_fore = HMD_final[Gender == Genders[g] & Country ==Countries[c] & Year<2000]$Year%>%unique()%>%as.numeric()
#VecYears = c(VecYears,years_fore[11:length(years_fore)])
# VecCounties (with number from 0 to 37 corresponding to each country for each observation)
VecCountries <- c(VecCountries,rep(c-1,n))
}
}
## Binding observations form different countries into one dataset
## transformation of xtrain from data.preprocessing.CNNs to list of previous xtrain, veccountries and vecgender
x.train <- array(NA, dim=c(obs, dim(ListData[[1]][[1]])[c(2,3)]))
y.train <- array(NA, dim=c(obs,1,dim(ListData[[1]][[2]])[2]))
obsYearVec <- vector()
counter = 0
for (i in 1:(g*c)){
n <- dim(ListData[[i]][[1]])[1]
obsYearVec <- c(obsYearVec,ListData[[i]][[3]] )
for(j in 1:n){
x.train[counter+j,,] <- ListData[[i]][[1]][j,,]
y.train[counter+j,1,] <- ListData[[i]][[2]][j,]
}
counter <- counter + n
}
# sort to be in a temporal order
OrderByYear <- order(obsYearVec)
x.train.sorted <- x.train[OrderByYear,,]
y.train.sorted <- y.train[OrderByYear,,]
dim(y.train.sorted) <- c(2662,1,100)
VecGenders.sorted <- VecGenders[OrderByYear]
VecCountries.sorted <- VecCountries[OrderByYear]
x.train <- list(x.train.sorted, VecCountries.sorted, VecGenders.sorted)
y.train <- y.train.sorted
# model
if(model_type == "CNN"){
model <- CNN(N, T0)
} else if(model_type == "LSTM"){
model <- LSTM(N,T0)
} else
{
stop("Wrong arcitecture specified within model_type variable")
}
modelName = paste(model_type ,T0, sep ="_")
fileName <- paste("./CallBack/best_model_", modelName, sep="")
summary(model)
# define callbacks
model_callback <- callback_model_checkpoint(fileName, monitor = "val_loss", verbose = 1, save_best_only = TRUE, save_weights_only = TRUE)
lr_callback <- callback_reduce_lr_on_plateau(factor=.90, patience =
50, verbose=1, cooldown = 5, min_lr = 0.00005)
CBs <- list(model_callback, lr_callback)
# gradient descent fitting
t1 <- proc.time()
fit <- model %>% fit(x=x.train, y=y.train, epochs = 2000, batch_size =16,
verbose = 2, validation_split = 0.05, shuffle = T,callbacks=CBs) #in paper 2000 there is no difference at this moment
proc.time()-t1
# in-sample error (validation)
fit$metrics$val_loss%>%min()
load_model_weights_hdf5(model, fileName)
## recursive prediction
# testing data pre-processing
testData <- HMD_final %>% filter(Year > (ObsYear - 10))
#vali.Y <- testData %>% fi[which(all_mortV$Year > ObsYear),]
recursive.prediction <- function(ObsYear, all_mort2, gender, country_name, country_index, T0, val.min, val.max, model.p){
single.years <- array(NA, c(2016-ObsYear))
for (ObsYear1 in ((ObsYear+1):2016)){
data1 <- all_mort2 %>% filter(Year >= (ObsYear1-10))
data2 <- data.preprocessing.CNNs(data1, gender, country_name,T0, ObsYear1)
# MinMaxScaler (with minimum and maximum from above)
x.vali <- data2[[1]]
if (gender=="Female"){yy <- 1}else{yy <- 0}
x.vali <- list(x.vali, rep(country_index, dim(x.vali)[1]), rep(yy, dim(x.vali)[1]))
y.vali <- data2[[2]]
predicted_val <- model %>% predict(x.vali) %>% as.vector()
predicted_logmx <- (predicted_val*(val.max-val.min)+ val.min)
Yhat.vali2 <- exp(predicted_logmx)
## error calculated on transformed data, in line with Salvatore comment
single.years[ObsYear1-ObsYear] <- round(10^4*mean((predicted_val-y.vali)^2),4)
predicted <- all_mort2 %>% filter(Year==ObsYear1, Gender == gender, Country == country_name) # [which(all_mort2$Year==ObsYear1),]
keep <- all_mort2 %>% filter(Year!=ObsYear1, Gender == gender, Country == country_name)
predicted$logmx <- predicted_logmx
predicted$mx <- exp(predicted$logmx)
predicted$val <- predicted_val
all_mort2 <- rbind(keep,predicted)
all_mort2 <- all_mort2 %>% arrange(Year, Age)
}
list(all_mort2, single.years)
}
#example prediction
pred.CHE.F <- recursive.prediction(1999, testData, "Female", "CHE",(match("CHE", Countries)-1),T0, val.min, val.max, model)
pred.POL.M <- recursive.prediction(1999, testData, "Male", "POL",(match("POL", Countries)-1),T0, val.min, val.max, model)
|
library("DESeq2")
library("tximport")
library("EnsDb.Hsapiens.v86")
library("tidyverse")
library("ggrepel")
library("gridExtra")
library("genefilter")
library("geneplotter")
library("VennDiagram")
library("viridis")
library("biomaRt")
pdf("HeLa_overexpression_TRF2_shRNA_overlaps.pdf")
samples <- read_tsv("20190103_received_samples.txt")
samples$cell <- sapply(samples$LongSampleName, function(x) str_split(x, pattern="-")[[1]][[1]])
samples$sample_replicate_id <- paste0(samples$Sample, "_", samples$Replicate)
samples_HeLa <- filter(samples, cell == "HeLa")
files <- file.path("salmon_on_hg38_output_cdna_ncrna", samples_HeLa$sample_replicate_id, "quant.sf")
names(files) <- samples_HeLa$sample_replicate_id
tx2gene <- values(transcripts(EnsDb.Hsapiens.v86))[, c("tx_id", "gene_id")]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = TRUE)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples_HeLa,
design = ~ Sample)
ddsTxi <- DESeq(ddsTxi)
results_TERRA <- results(ddsTxi, contrast = c("Sample", "TERRA", "EV"))
results_ARIA <- results(ddsTxi, contrast = c("Sample", "ARIA", "EV"))
results_HP <- results(ddsTxi, contrast = c("Sample", "HP", "EV"))
genes_TERRA <- rownames(results_TERRA[which(results_TERRA$padj < 0.01), ])
genes_ARIA <- rownames(results_ARIA[which(results_ARIA$padj < 0.01), ])
genes_HP <- rownames(results_HP[which(results_HP$padj < 0.01), ])
setwd("~/TRF2_siRNA")
samples <- read_tsv("sample_info.txt")
samples$condition <- paste(samples$siRNA, samples$TimePoint, sep = "_")
samples$condition <- relevel(factor(samples$condition), ref = "TRF2_0")
files <- file.path("salmon_on_hg19_output", samples$Filename, "quant.sf")
names(files) <- samples$Filename
tx2gene <- values(transcripts(EnsDb.Hsapiens.v86))[, c("tx_id", "gene_id")]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = TRUE)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples,
design = ~ condition)
ddsTxi <- DESeq(ddsTxi)
# Test for genes differentially expressed between timePoints in the TRF2 shRNA data
res48h <- results(ddsTxi, contrast = c("condition", "TRF2_48", "TRF2_0"))
res96h <- results(ddsTxi, contrast = c("condition", "TRF2_96", "TRF2_0"))
# Test for genes differentially expressed betweent timePoints in the control shRNA data
res48hControl <- results(ddsTxi, contrast = c("condition", "control_48", "control_0"))
res96hControl <- results(ddsTxi, contrast = c("condition", "control_96", "control_0"))
# Explicitly test for genes with a low LFC in the control timecourse
res48hControl_lowLFC <- results(ddsTxi,
lfcThreshold = log2(1.25),
altHypothesis = "lessAbs",
contrast = c("condition", "control_0", "control_48"))
res96hControl_lowLFC <- results(ddsTxi,
lfcThreshold = log2(1.25),
altHypothesis = "lessAbs",
contrast = c("condition", "control_0", "control_96"))
selected_genes_48h <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res48h[which(res48h$padj < 0.1),])))
selected_genes_96h <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res96h[which(res96h$padj < 0.1),])))
selected_genes_48h_control <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res48h[which(res48hControl$padj < 0.1),])))
selected_genes_96h_control <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res96h[which(res96hControl$padj < 0.1),])))
selected_genes_48h_control_low_LFC <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res48h[which(res48hControl_lowLFC$padj < 0.1),])))
selected_genes_96h_control_low_LFC <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res96h[which(res96hControl_lowLFC$padj < 0.1),])))
# Include only the genes that also have a low LFC in the control samples
selected_genes_48h <- selected_genes_48h[(names(selected_genes_48h) %in% names(selected_genes_48h_control_low_LFC))]
selected_genes_96h <- selected_genes_96h[(names(selected_genes_96h) %in% names(selected_genes_96h_control_low_LFC))]
selected_genes_48h
genes_TRF2_48h <- names(selected_genes_48h)
genes_TRF2_96h <- names(selected_genes_96h)
setwd("~/scratch/TALES_RNAseq_analysis/")
venn <- draw.quad.venn(area1 = length(genes_TERRA),
area2 = length(genes_ARIA),
area3 = length(genes_HP),
area4 = length(genes_TRF2_96h),
n12 = length(intersect(genes_TERRA,
genes_ARIA)),
n13 = length(intersect(genes_TERRA,
genes_HP)),
n14 = length(intersect(genes_TERRA,
genes_TRF2_96h)),
n23 = length(intersect(genes_ARIA,
genes_HP)),
n24 = length(intersect(genes_ARIA,
genes_TRF2_96h)),
n34 = length(intersect(genes_HP,
genes_TRF2_96h)),
n123 = length(Reduce(intersect,
list(genes_TERRA,
genes_ARIA,
genes_HP))),
n124 = length(Reduce(intersect,
list(genes_TERRA,
genes_ARIA,
genes_TRF2_96h))),
n134 = length(Reduce(intersect,
list(genes_TERRA,
genes_HP,
genes_TRF2_96h))),
n234 = length(Reduce(intersect,
list(genes_ARIA,
genes_HP,
genes_TRF2_96h))),
n1234 = length(Reduce(intersect,
list(genes_TERRA,
genes_ARIA,
genes_HP,
genes_TRF2_96h))),
category = c("TERRA",
"ARIA",
"HP",
"TRF2"),
col = c("red", "blue", "green", "black"))
print(venn)
dev.off()
| /scripts/HeLa_compare_with_TRF2shRNA.R | no_license | fcadete/TALES_RNAseq_analysis | R | false | false | 7,580 | r |
library("DESeq2")
library("tximport")
library("EnsDb.Hsapiens.v86")
library("tidyverse")
library("ggrepel")
library("gridExtra")
library("genefilter")
library("geneplotter")
library("VennDiagram")
library("viridis")
library("biomaRt")
pdf("HeLa_overexpression_TRF2_shRNA_overlaps.pdf")
samples <- read_tsv("20190103_received_samples.txt")
samples$cell <- sapply(samples$LongSampleName, function(x) str_split(x, pattern="-")[[1]][[1]])
samples$sample_replicate_id <- paste0(samples$Sample, "_", samples$Replicate)
samples_HeLa <- filter(samples, cell == "HeLa")
files <- file.path("salmon_on_hg38_output_cdna_ncrna", samples_HeLa$sample_replicate_id, "quant.sf")
names(files) <- samples_HeLa$sample_replicate_id
tx2gene <- values(transcripts(EnsDb.Hsapiens.v86))[, c("tx_id", "gene_id")]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = TRUE)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples_HeLa,
design = ~ Sample)
ddsTxi <- DESeq(ddsTxi)
results_TERRA <- results(ddsTxi, contrast = c("Sample", "TERRA", "EV"))
results_ARIA <- results(ddsTxi, contrast = c("Sample", "ARIA", "EV"))
results_HP <- results(ddsTxi, contrast = c("Sample", "HP", "EV"))
genes_TERRA <- rownames(results_TERRA[which(results_TERRA$padj < 0.01), ])
genes_ARIA <- rownames(results_ARIA[which(results_ARIA$padj < 0.01), ])
genes_HP <- rownames(results_HP[which(results_HP$padj < 0.01), ])
setwd("~/TRF2_siRNA")
samples <- read_tsv("sample_info.txt")
samples$condition <- paste(samples$siRNA, samples$TimePoint, sep = "_")
samples$condition <- relevel(factor(samples$condition), ref = "TRF2_0")
files <- file.path("salmon_on_hg19_output", samples$Filename, "quant.sf")
names(files) <- samples$Filename
tx2gene <- values(transcripts(EnsDb.Hsapiens.v86))[, c("tx_id", "gene_id")]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = TRUE)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples,
design = ~ condition)
ddsTxi <- DESeq(ddsTxi)
# Test for genes differentially expressed between timePoints in the TRF2 shRNA data
res48h <- results(ddsTxi, contrast = c("condition", "TRF2_48", "TRF2_0"))
res96h <- results(ddsTxi, contrast = c("condition", "TRF2_96", "TRF2_0"))
# Test for genes differentially expressed betweent timePoints in the control shRNA data
res48hControl <- results(ddsTxi, contrast = c("condition", "control_48", "control_0"))
res96hControl <- results(ddsTxi, contrast = c("condition", "control_96", "control_0"))
# Explicitly test for genes with a low LFC in the control timecourse
res48hControl_lowLFC <- results(ddsTxi,
lfcThreshold = log2(1.25),
altHypothesis = "lessAbs",
contrast = c("condition", "control_0", "control_48"))
res96hControl_lowLFC <- results(ddsTxi,
lfcThreshold = log2(1.25),
altHypothesis = "lessAbs",
contrast = c("condition", "control_0", "control_96"))
selected_genes_48h <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res48h[which(res48h$padj < 0.1),])))
selected_genes_96h <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res96h[which(res96h$padj < 0.1),])))
selected_genes_48h_control <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res48h[which(res48hControl$padj < 0.1),])))
selected_genes_96h_control <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res96h[which(res96hControl$padj < 0.1),])))
selected_genes_48h_control_low_LFC <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res48h[which(res48hControl_lowLFC$padj < 0.1),])))
selected_genes_96h_control_low_LFC <- GenomicFeatures::genes(EnsDb.Hsapiens.v86,
filter = GeneIdFilter(rownames(
res96h[which(res96hControl_lowLFC$padj < 0.1),])))
# Include only the genes that also have a low LFC in the control samples
selected_genes_48h <- selected_genes_48h[(names(selected_genes_48h) %in% names(selected_genes_48h_control_low_LFC))]
selected_genes_96h <- selected_genes_96h[(names(selected_genes_96h) %in% names(selected_genes_96h_control_low_LFC))]
selected_genes_48h
genes_TRF2_48h <- names(selected_genes_48h)
genes_TRF2_96h <- names(selected_genes_96h)
setwd("~/scratch/TALES_RNAseq_analysis/")
venn <- draw.quad.venn(area1 = length(genes_TERRA),
area2 = length(genes_ARIA),
area3 = length(genes_HP),
area4 = length(genes_TRF2_96h),
n12 = length(intersect(genes_TERRA,
genes_ARIA)),
n13 = length(intersect(genes_TERRA,
genes_HP)),
n14 = length(intersect(genes_TERRA,
genes_TRF2_96h)),
n23 = length(intersect(genes_ARIA,
genes_HP)),
n24 = length(intersect(genes_ARIA,
genes_TRF2_96h)),
n34 = length(intersect(genes_HP,
genes_TRF2_96h)),
n123 = length(Reduce(intersect,
list(genes_TERRA,
genes_ARIA,
genes_HP))),
n124 = length(Reduce(intersect,
list(genes_TERRA,
genes_ARIA,
genes_TRF2_96h))),
n134 = length(Reduce(intersect,
list(genes_TERRA,
genes_HP,
genes_TRF2_96h))),
n234 = length(Reduce(intersect,
list(genes_ARIA,
genes_HP,
genes_TRF2_96h))),
n1234 = length(Reduce(intersect,
list(genes_TERRA,
genes_ARIA,
genes_HP,
genes_TRF2_96h))),
category = c("TERRA",
"ARIA",
"HP",
"TRF2"),
col = c("red", "blue", "green", "black"))
print(venn)
dev.off()
|
# INTEGRANTES:
# Alexis Bautista
# Bryan Catucuamba
# David JΓ‘come
# Alejandro Naranjo
# Richard Quimbiulco
# Tweets obtenidos de la emisora "La Bruja"
library(twitteR)
library(httr)
require('ROAuth')
require('RCurl')
library(base64enc)
palabrasPositivas <- c("bueno","bonitas","pongan","queremos","mejor")
palabrasNegativas <- c("malo","no","feo","otro","daΓ±ar","horrendo","cambien")
consumer_key <-"RFHeP55qj0ejWv7YiSceWTphX"
consumer_secret<-"YOOUNzPvlJCAIHo23AWGU6hRm9VYxP6AY60H0n3u3dBM44aHZS"
access_token<-"366852754-QE8L7hZs1J0WbtDTImo4P3qxzabRZ6OEY0E9ckOs"
access_secret<-"zUZ3rnyPHCLQxso43A8PvXY1a3jWp81yS8Z2Rr0w96YzB"
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
listaTweets <- searchTwitter(searchString = "labrujaecuador", n=1000)
tweets <- twListToDF(listaTweets)
for (i in 1:NROW(tweets)) {
texto <- tweets[i,1]
#Remover retweets
sinRT <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", texto)
#Remover cuentas
sinCuentas <- gsub("@\\w+", "", sinRT)
#Remover simbolos de puntuacion
sinSimbolos <- gsub("[[:punct:]]", "", sinCuentas)
#Remover numeros
sinNumeros <- gsub("[[:digit:]]", "", sinSimbolos)
#Remover enlaces
sinEnlaces <- gsub("http\\w+", "", sinNumeros)
#Guarda en un vector las palabras del tweet
v <- strsplit(sinEnlaces," ")
palabras <- data.frame(v)
numPositivas <- 0
numNegativas <- 0
for (j in 1:NROW(palabras)) {
for (k in 1:NROW(palabrasPositivas)) {
if (palabras[j,1]==palabrasPositivas[k]) {
numPositivas<-numPositivas+1
}
}
for (k in 1:NROW(palabrasNegativas)) {
if (palabras[j,1]==palabrasNegativas[k]) {
numNegativas<-numNegativas+1
}
}
}
metrica <- numPositivas - numNegativas
if (metrica > 0) {
resultado <- paste("Tweet", i , "positivo", sep = " ")
print(resultado)
} else if (metrica < 0) {
resultado <- paste("Tweet", i , "negativo", sep = " ")
print(resultado)
} else {
resultado <- paste("Tweet", i , "neutro", sep = " ")
print(resultado)
}
}
| /detectarPolaridad.R | no_license | jdjacomee/polaridad | R | false | false | 2,191 | r | # INTEGRANTES:
# Alexis Bautista
# Bryan Catucuamba
# David JΓ‘come
# Alejandro Naranjo
# Richard Quimbiulco
# Tweets obtenidos de la emisora "La Bruja"
library(twitteR)
library(httr)
require('ROAuth')
require('RCurl')
library(base64enc)
palabrasPositivas <- c("bueno","bonitas","pongan","queremos","mejor")
palabrasNegativas <- c("malo","no","feo","otro","daΓ±ar","horrendo","cambien")
consumer_key <-"RFHeP55qj0ejWv7YiSceWTphX"
consumer_secret<-"YOOUNzPvlJCAIHo23AWGU6hRm9VYxP6AY60H0n3u3dBM44aHZS"
access_token<-"366852754-QE8L7hZs1J0WbtDTImo4P3qxzabRZ6OEY0E9ckOs"
access_secret<-"zUZ3rnyPHCLQxso43A8PvXY1a3jWp81yS8Z2Rr0w96YzB"
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
listaTweets <- searchTwitter(searchString = "labrujaecuador", n=1000)
tweets <- twListToDF(listaTweets)
for (i in 1:NROW(tweets)) {
texto <- tweets[i,1]
#Remover retweets
sinRT <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", texto)
#Remover cuentas
sinCuentas <- gsub("@\\w+", "", sinRT)
#Remover simbolos de puntuacion
sinSimbolos <- gsub("[[:punct:]]", "", sinCuentas)
#Remover numeros
sinNumeros <- gsub("[[:digit:]]", "", sinSimbolos)
#Remover enlaces
sinEnlaces <- gsub("http\\w+", "", sinNumeros)
#Guarda en un vector las palabras del tweet
v <- strsplit(sinEnlaces," ")
palabras <- data.frame(v)
numPositivas <- 0
numNegativas <- 0
for (j in 1:NROW(palabras)) {
for (k in 1:NROW(palabrasPositivas)) {
if (palabras[j,1]==palabrasPositivas[k]) {
numPositivas<-numPositivas+1
}
}
for (k in 1:NROW(palabrasNegativas)) {
if (palabras[j,1]==palabrasNegativas[k]) {
numNegativas<-numNegativas+1
}
}
}
metrica <- numPositivas - numNegativas
if (metrica > 0) {
resultado <- paste("Tweet", i , "positivo", sep = " ")
print(resultado)
} else if (metrica < 0) {
resultado <- paste("Tweet", i , "negativo", sep = " ")
print(resultado)
} else {
resultado <- paste("Tweet", i , "neutro", sep = " ")
print(resultado)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R0.R.R
\name{get.scale}
\alias{get.scale}
\title{Scaling of x-axis}
\usage{
get.scale(scale)
}
\arguments{
\item{scale}{Scale to be adjusted on x-axis. Can be \code{d} (day), \code{w} (week (default)), \code{f} (fornight), \code{m} (month).}
}
\value{
An integer corresponding to the number of days between each x-axis tickmark.
}
\description{
Internal scaling function to display proper x-axis labels.
}
\details{
Builds the x-axis labels corresponding to a human-friendly level (day, week...).
}
\author{
Pierre-Yves Boelle, Thomas Obadia
}
\keyword{internal}
| /man/get.scale.Rd | no_license | tobadia/R0 | R | false | true | 646 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R0.R.R
\name{get.scale}
\alias{get.scale}
\title{Scaling of x-axis}
\usage{
get.scale(scale)
}
\arguments{
\item{scale}{Scale to be adjusted on x-axis. Can be \code{d} (day), \code{w} (week (default)), \code{f} (fornight), \code{m} (month).}
}
\value{
An integer corresponding to the number of days between each x-axis tickmark.
}
\description{
Internal scaling function to display proper x-axis labels.
}
\details{
Builds the x-axis labels corresponding to a human-friendly level (day, week...).
}
\author{
Pierre-Yves Boelle, Thomas Obadia
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_one_hot_encoder.R
\name{ft_one_hot_encoder}
\alias{ft_one_hot_encoder}
\title{Feature Transformation -- OneHotEncoder (Transformer)}
\usage{
ft_one_hot_encoder(x, input_col = NULL, output_col = NULL,
drop_last = TRUE, uid = random_string("one_hot_encoder_"), ...)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{drop_last}{Whether to drop the last category. Defaults to \code{TRUE}.}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
}
\description{
One-hot encoding maps a column of label indices to a column of binary
vectors, with at most a single one-value. This encoding allows algorithms
which expect continuous features, such as Logistic Regression, to use
categorical features. Typically, used with \code{ft_string_indexer()} to
index a column first.
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers: \code{\link{ft_binarizer}},
\code{\link{ft_bucketizer}},
\code{\link{ft_chisq_selector}},
\code{\link{ft_count_vectorizer}}, \code{\link{ft_dct}},
\code{\link{ft_elementwise_product}},
\code{\link{ft_feature_hasher}},
\code{\link{ft_hashing_tf}}, \code{\link{ft_idf}},
\code{\link{ft_imputer}},
\code{\link{ft_index_to_string}},
\code{\link{ft_interaction}}, \code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}},
\code{\link{ft_min_max_scaler}}, \code{\link{ft_ngram}},
\code{\link{ft_normalizer}}, \code{\link{ft_pca}},
\code{\link{ft_polynomial_expansion}},
\code{\link{ft_quantile_discretizer}},
\code{\link{ft_r_formula}},
\code{\link{ft_regex_tokenizer}},
\code{\link{ft_sql_transformer}},
\code{\link{ft_standard_scaler}},
\code{\link{ft_stop_words_remover}},
\code{\link{ft_string_indexer}},
\code{\link{ft_tokenizer}},
\code{\link{ft_vector_assembler}},
\code{\link{ft_vector_indexer}},
\code{\link{ft_vector_slicer}}, \code{\link{ft_word2vec}}
}
| /man/ft_one_hot_encoder.Rd | permissive | sunxiaomeiwendy/sparklyr | R | false | true | 3,041 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_one_hot_encoder.R
\name{ft_one_hot_encoder}
\alias{ft_one_hot_encoder}
\title{Feature Transformation -- OneHotEncoder (Transformer)}
\usage{
ft_one_hot_encoder(x, input_col = NULL, output_col = NULL,
drop_last = TRUE, uid = random_string("one_hot_encoder_"), ...)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{drop_last}{Whether to drop the last category. Defaults to \code{TRUE}.}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
}
\description{
One-hot encoding maps a column of label indices to a column of binary
vectors, with at most a single one-value. This encoding allows algorithms
which expect continuous features, such as Logistic Regression, to use
categorical features. Typically, used with \code{ft_string_indexer()} to
index a column first.
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers: \code{\link{ft_binarizer}},
\code{\link{ft_bucketizer}},
\code{\link{ft_chisq_selector}},
\code{\link{ft_count_vectorizer}}, \code{\link{ft_dct}},
\code{\link{ft_elementwise_product}},
\code{\link{ft_feature_hasher}},
\code{\link{ft_hashing_tf}}, \code{\link{ft_idf}},
\code{\link{ft_imputer}},
\code{\link{ft_index_to_string}},
\code{\link{ft_interaction}}, \code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}},
\code{\link{ft_min_max_scaler}}, \code{\link{ft_ngram}},
\code{\link{ft_normalizer}}, \code{\link{ft_pca}},
\code{\link{ft_polynomial_expansion}},
\code{\link{ft_quantile_discretizer}},
\code{\link{ft_r_formula}},
\code{\link{ft_regex_tokenizer}},
\code{\link{ft_sql_transformer}},
\code{\link{ft_standard_scaler}},
\code{\link{ft_stop_words_remover}},
\code{\link{ft_string_indexer}},
\code{\link{ft_tokenizer}},
\code{\link{ft_vector_assembler}},
\code{\link{ft_vector_indexer}},
\code{\link{ft_vector_slicer}}, \code{\link{ft_word2vec}}
}
|
new_ml_model_bisecting_kmeans <- function(pipeline_model, formula, dataset,
features_col) {
m <- new_ml_model_clustering(
pipeline_model = pipeline_model,
formula = formula,
dataset = dataset,
features_col = features_col,
class = "ml_model_bisecting_kmeans"
)
model <- m$model
m$summary <- model$summary
m$centers <- model$cluster_centers() %>%
do.call(rbind, .) %>%
as.data.frame() %>%
rlang::set_names(m$feature_names)
m$cost <- possibly_null(
~ pipeline_model %>%
ml_stage(1) %>%
ml_transform(dataset) %>%
model$compute_cost()
)()
m
}
#' @export
print.ml_model_bisecting_kmeans <- function(x, ...) {
preamble <- sprintf(
"K-means clustering with %s %s",
nrow(x$centers),
if (nrow(x$centers) == 1) "cluster" else "clusters"
)
cat(preamble, sep = "\n")
print_newline()
ml_model_print_centers(x)
print_newline()
cat("Within Set Sum of Squared Errors = ",
if (is.null(x$cost)) "not computed." else x$cost
)
}
| /R/ml_model_bisecting_kmeans.R | permissive | EugenioGrant/sparklyr | R | false | false | 1,062 | r | new_ml_model_bisecting_kmeans <- function(pipeline_model, formula, dataset,
features_col) {
m <- new_ml_model_clustering(
pipeline_model = pipeline_model,
formula = formula,
dataset = dataset,
features_col = features_col,
class = "ml_model_bisecting_kmeans"
)
model <- m$model
m$summary <- model$summary
m$centers <- model$cluster_centers() %>%
do.call(rbind, .) %>%
as.data.frame() %>%
rlang::set_names(m$feature_names)
m$cost <- possibly_null(
~ pipeline_model %>%
ml_stage(1) %>%
ml_transform(dataset) %>%
model$compute_cost()
)()
m
}
#' @export
print.ml_model_bisecting_kmeans <- function(x, ...) {
preamble <- sprintf(
"K-means clustering with %s %s",
nrow(x$centers),
if (nrow(x$centers) == 1) "cluster" else "clusters"
)
cat(preamble, sep = "\n")
print_newline()
ml_model_print_centers(x)
print_newline()
cat("Within Set Sum of Squared Errors = ",
if (is.null(x$cost)) "not computed." else x$cost
)
}
|
#################
## Saliva Metabolome Workflow ##
## 1. Data Preprocessing
#################
file.prefix <- "saliva_metabolome_GCTOF_MS_"
output.dir <- "processed_data/"
#### Collect sample meta data ####
datalist <- data
#pool samples do not have unique identifier
fixpools <- function(data){
data <- data.frame(data, stringsAsFactors = F)
data$order <- 1:nrow(data)
myrow <- grep("label",data[,8])
poollabel <- as.character(data[myrow,])
poollabel2 <- make.unique(poollabel,sep="_")
data[myrow,] <- poollabel2
data$order <- NULL
return(data)
}
datalist <- lapply(datalist, fixpools)
getmeta <- function(data, run){
myrow <- grep("BinBase",data[,1])
col.meta.data <- as.data.frame(data[1:myrow,8:ncol(data)])
col.meta.data <- t(col.meta.data)
colnames(col.meta.data) <- col.meta.data[1,]
col.meta.data <- col.meta.data[-1,]
col.meta.data <- data.frame(col.meta.data, stringsAsFactors = F)
col.meta.data[,"file_id"] <- rownames(col.meta.data)
col.meta.data <- col.meta.data[,c("mx.class","mx.sample","label","comment","species","organ","treatment","file_id")]
col.meta.data$batch <- run
col.meta.data <- data.frame(lapply(col.meta.data, as.character))
return(col.meta.data)
}
col.meta.data <- do.call("rbind",mapply(getmeta, data=datalist, run=c("1","2"), SIMPLIFY = F))
col.meta.data <- col.meta.data[which(col.meta.data$label %in% all.meta$label| grepl("pool",col.meta.data$label)),]
write.csv(col.meta.data, file = file.path(output.dir, paste0(file.prefix, "sample_metadata.csv")))
print("beginning of sample_metadata.csv")
print(head(col.meta.data))
#### Collect metabolite meta data ####
getmetarow <- function(data){
myrow <- grep("BinBase",data[,1]) #identify column headers
myrow2 <- myrow + 1 #identify first row of data
row.meta.data <- data[myrow2:dim(data)[1], 1:8]
colnames(row.meta.data) <- data[myrow,1:8]
row.meta.data[,"ret.index"] <- as.numeric(row.meta.data[,"ret.index"])
row.meta.data[,"quant mz"] <- as.numeric(row.meta.data[,"quant mz"])
row.meta.data[,"BB id"] <- as.numeric(row.meta.data[,"BB id"])
row.meta.data[,"PubChem"] <- as.numeric(row.meta.data[,"PubChem"])
row.meta.data[,"mass spec"] <- NULL
row.meta.data <- row.meta.data[!is.na(row.meta.data$PubChem),]
return(row.meta.data)
}
row.meta.data <- do.call("rbind",lapply(datalist, getmetarow))
row.meta.data <- unique(row.meta.data)
row.meta.data$metabolite_name <- row.meta.data$`BinBase name`
row.meta.data$`BinBase name` <- NULL
write.csv(row.meta.data, file = file.path(output.dir, paste0(file.prefix, "metabolite_metadata.csv")))
print("beginning of metabolite_metadata.csv")
print(head(row.meta.data))
#### Process Data ####
getdata <- function(data){
#get important row numbers
myrow_binbase <- grep("BinBase",data[,1]) #identify column headers
myrow_data <- myrow_binbase + 1 #identify first row of data
myrowlabel <- grep("label",data[,8])
proc.data <- data[myrow_data:nrow(data), 9:ncol(data)]
colnames(proc.data) <- data[myrowlabel,9:ncol(data)]
rownames(proc.data) <- data[myrow_data:nrow(data),1]
return(proc.data)
}
proc.data.list <- lapply(datalist, getdata)
#bring two input file together and transpose
proc.data <- merge(proc.data.list[[1]], proc.data.list[[2]], by='row.names')
rownames(proc.data) <- proc.data$Row.names
proc.data$Row.names <- NULL
proc.data <- proc.data[which(rownames(proc.data) %in% row.meta.data$metabolite_name),]
proc.data <- proc.data[complete.cases(proc.data),]
#get new IDS
met_key <- data.frame("metabolite_name" = rownames(proc.data))
met_key$ID <- paste("metabolite",1:nrow(met_key), sep="_")
rownames(proc.data) <- replace_ids(myIDs=rownames(proc.data), mykey=met_key[,c("metabolite_name","ID")])
proc.data <- data.frame(t(proc.data), stringsAsFactors = F)
#remove samples with fake mrn
proc.data <- proc.data[which(rownames(proc.data) %in% all.meta$label | grepl("pool",rownames(proc.data))),]
###Missing data
#remove 0s by imputing half the minimum for each metabolite.
x <- colSums(as.matrix(proc.data) == 0) #only 3 missing values
saverownames <- rownames(proc.data)
min_impute <- function(mycol){
mycol <- as.numeric(mycol)
mycol[mycol==0] <- min(mycol[mycol>0],na.rm=TRUE)/2
return(mycol)
}
proc.data <- data.frame(apply(proc.data, 2, min_impute))
rownames(proc.data) <- saverownames
#drop patient who was mis-diagnosed. Not HCC but cholangiocarcinoma 75378629
drop <- all.meta[which(all.meta$mrn==75378629),"label"]
proc.data <- proc.data[which(rownames(proc.data)!=drop),]
all.meta <- all.meta[which(all.meta$mrn!=75378629),]
all.meta <- all.meta[which(all.meta$diagnosis %in% c("Healthy","HCC","Cirrhosis")),]
#write to file
write.csv(proc.data, file = file.path(output.dir, paste0(file.prefix, "data_processed.csv")))
print("sample of data_processed.csv")
print(proc.data[1:5,1:5])
| /scripts/data_preprocessing.R | no_license | courtneyhershberger/HCC_Saliva_Metabolomics | R | false | false | 4,840 | r | #################
## Saliva Metabolome Workflow ##
## 1. Data Preprocessing
#################
file.prefix <- "saliva_metabolome_GCTOF_MS_"
output.dir <- "processed_data/"
#### Collect sample meta data ####
datalist <- data
#pool samples do not have unique identifier
fixpools <- function(data){
data <- data.frame(data, stringsAsFactors = F)
data$order <- 1:nrow(data)
myrow <- grep("label",data[,8])
poollabel <- as.character(data[myrow,])
poollabel2 <- make.unique(poollabel,sep="_")
data[myrow,] <- poollabel2
data$order <- NULL
return(data)
}
datalist <- lapply(datalist, fixpools)
getmeta <- function(data, run){
myrow <- grep("BinBase",data[,1])
col.meta.data <- as.data.frame(data[1:myrow,8:ncol(data)])
col.meta.data <- t(col.meta.data)
colnames(col.meta.data) <- col.meta.data[1,]
col.meta.data <- col.meta.data[-1,]
col.meta.data <- data.frame(col.meta.data, stringsAsFactors = F)
col.meta.data[,"file_id"] <- rownames(col.meta.data)
col.meta.data <- col.meta.data[,c("mx.class","mx.sample","label","comment","species","organ","treatment","file_id")]
col.meta.data$batch <- run
col.meta.data <- data.frame(lapply(col.meta.data, as.character))
return(col.meta.data)
}
col.meta.data <- do.call("rbind",mapply(getmeta, data=datalist, run=c("1","2"), SIMPLIFY = F))
col.meta.data <- col.meta.data[which(col.meta.data$label %in% all.meta$label| grepl("pool",col.meta.data$label)),]
write.csv(col.meta.data, file = file.path(output.dir, paste0(file.prefix, "sample_metadata.csv")))
print("beginning of sample_metadata.csv")
print(head(col.meta.data))
#### Collect metabolite meta data ####
getmetarow <- function(data){
myrow <- grep("BinBase",data[,1]) #identify column headers
myrow2 <- myrow + 1 #identify first row of data
row.meta.data <- data[myrow2:dim(data)[1], 1:8]
colnames(row.meta.data) <- data[myrow,1:8]
row.meta.data[,"ret.index"] <- as.numeric(row.meta.data[,"ret.index"])
row.meta.data[,"quant mz"] <- as.numeric(row.meta.data[,"quant mz"])
row.meta.data[,"BB id"] <- as.numeric(row.meta.data[,"BB id"])
row.meta.data[,"PubChem"] <- as.numeric(row.meta.data[,"PubChem"])
row.meta.data[,"mass spec"] <- NULL
row.meta.data <- row.meta.data[!is.na(row.meta.data$PubChem),]
return(row.meta.data)
}
row.meta.data <- do.call("rbind",lapply(datalist, getmetarow))
row.meta.data <- unique(row.meta.data)
row.meta.data$metabolite_name <- row.meta.data$`BinBase name`
row.meta.data$`BinBase name` <- NULL
write.csv(row.meta.data, file = file.path(output.dir, paste0(file.prefix, "metabolite_metadata.csv")))
print("beginning of metabolite_metadata.csv")
print(head(row.meta.data))
#### Process Data ####
getdata <- function(data){
#get important row numbers
myrow_binbase <- grep("BinBase",data[,1]) #identify column headers
myrow_data <- myrow_binbase + 1 #identify first row of data
myrowlabel <- grep("label",data[,8])
proc.data <- data[myrow_data:nrow(data), 9:ncol(data)]
colnames(proc.data) <- data[myrowlabel,9:ncol(data)]
rownames(proc.data) <- data[myrow_data:nrow(data),1]
return(proc.data)
}
proc.data.list <- lapply(datalist, getdata)
#bring two input file together and transpose
proc.data <- merge(proc.data.list[[1]], proc.data.list[[2]], by='row.names')
rownames(proc.data) <- proc.data$Row.names
proc.data$Row.names <- NULL
proc.data <- proc.data[which(rownames(proc.data) %in% row.meta.data$metabolite_name),]
proc.data <- proc.data[complete.cases(proc.data),]
#get new IDS
met_key <- data.frame("metabolite_name" = rownames(proc.data))
met_key$ID <- paste("metabolite",1:nrow(met_key), sep="_")
rownames(proc.data) <- replace_ids(myIDs=rownames(proc.data), mykey=met_key[,c("metabolite_name","ID")])
proc.data <- data.frame(t(proc.data), stringsAsFactors = F)
#remove samples with fake mrn
proc.data <- proc.data[which(rownames(proc.data) %in% all.meta$label | grepl("pool",rownames(proc.data))),]
###Missing data
#remove 0s by imputing half the minimum for each metabolite.
x <- colSums(as.matrix(proc.data) == 0) #only 3 missing values
saverownames <- rownames(proc.data)
min_impute <- function(mycol){
mycol <- as.numeric(mycol)
mycol[mycol==0] <- min(mycol[mycol>0],na.rm=TRUE)/2
return(mycol)
}
proc.data <- data.frame(apply(proc.data, 2, min_impute))
rownames(proc.data) <- saverownames
#drop patient who was mis-diagnosed. Not HCC but cholangiocarcinoma 75378629
drop <- all.meta[which(all.meta$mrn==75378629),"label"]
proc.data <- proc.data[which(rownames(proc.data)!=drop),]
all.meta <- all.meta[which(all.meta$mrn!=75378629),]
all.meta <- all.meta[which(all.meta$diagnosis %in% c("Healthy","HCC","Cirrhosis")),]
#write to file
write.csv(proc.data, file = file.path(output.dir, paste0(file.prefix, "data_processed.csv")))
print("sample of data_processed.csv")
print(proc.data[1:5,1:5])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch_chunks.R
\name{chapter7}
\alias{chapter7}
\title{chapter7 The 67 R-code chunks from Surplus Production Models}
\description{
chapter7 is not an active function but rather acts
as a repository for the various example code chunks found in
chapter7. There are 67 r-code chunks in chapter7 You should,
of course, feel free to use and modify any of these example
chunks in your own work.
}
\examples{
\dontrun{
# All the example code from # Surplus Production Models
# Surplus Production Models
## Introduction
### Data Needs
### The Need for Contrast
### When are Catch-Rates Informative
# R-chunk 1 Page 256
#Yellowfin-tuna data from Schaefer 12957
# R-chunk 2 Page 256 Table 7.1 code not in the book
data(schaef)
kable(halftable(schaef,subdiv=2),digits=c(0,0,0,4))
# R-chunk 3 Page 256
#schaef fishery data and regress cpue and catch Fig 7.1
oldp <- parset(plots=c(3,1),margin=c(0.35,0.4,0.05,0.05))
plot1(schaef[,"year"],schaef[,"catch"],ylab="Catch",xlab="Year",
defpar=FALSE,lwd=2)
plot1(schaef[,"year"],schaef[,"cpue"],ylab="CPUE",xlab="Year",
defpar=FALSE,lwd=2)
plot1(schaef[,"catch"],schaef[,"cpue"],type="p",ylab="CPUE",
xlab="Catch",defpar=FALSE,pch=16,cex=1.0)
model <- lm(schaef[,"cpue"] ~ schaef[,"catch"])
abline(model,lwd=2,col=2) # summary(model)
par(oldp) # return par to old settings; this line not in book
# R-chunk 4 Page 257
#cross correlation between cpue and catch in schaef Fig 7.2
oldp <- parset(cex=0.85) #sets par values for a tidy base graphic
ccf(x=schaef[,"catch"],y=schaef[,"cpue"],type="correlation",
ylab="Correlation",plot=TRUE)
par(oldp) # return par to old settings; this line not in book
# R-chunk 5 Page 257
#now plot schaef data with timelag of 2 years on cpue Fig 7.3
oldp <- parset(plots=c(3,1),margin=c(0.35,0.4,0.05,0.05))
plot1(schaef[1:20,"year"],schaef[1:20,"catch"],ylab="Catch",
xlab="Year",defpar=FALSE,lwd=2)
plot1(schaef[3:22,"year"],schaef[3:22,"cpue"],ylab="CPUE",
xlab="Year",defpar=FALSE,lwd=2)
plot1(schaef[1:20,"catch"],schaef[3:22,"cpue"],type="p",
ylab="CPUE",xlab="Catch",defpar=FALSE,cex=1.0,pch=16)
model2 <- lm(schaef[3:22,"cpue"] ~ schaef[1:20,"catch"])
abline(model2,lwd=2,col=2)
par(oldp) # return par to old settings; this line not in book
# R-chunk 6 Page 259
#write out a summary of he regression model2
summary(model2)
## Some Equations
### Production Functions
# R-chunk 7 Page 262
#plot productivity and density-dependence functions Fig7.4
prodfun <- function(r,Bt,K,p) return((r*Bt/p)*(1-(Bt/K)^p))
densdep <- function(Bt,K,p) return((1/p)*(1-(Bt/K)^p))
r <- 0.75; K <- 1000.0; Bt <- 1:1000
sp <- prodfun(r,Bt,K,1.0) # Schaefer equivalent
sp0 <- prodfun(r,Bt,K,p=1e-08) # Fox equivalent
sp3 <- prodfun(r,Bt,K,3) #left skewed production, marine mammal?
oldp <- parset(plots=c(2,1),margin=c(0.35,0.4,0.1,0.05))
plot1(Bt,sp,type="l",lwd=2,xlab="Stock Size",
ylab="Surplus Production",maxy=200,defpar=FALSE)
lines(Bt,sp0 * (max(sp)/max(sp0)),lwd=2,col=2,lty=2) # rescale
lines(Bt,sp3*(max(sp)/max(sp3)),lwd=3,col=3,lty=3) # production
legend(275,100,cex=1.1,lty=1:3,c("p = 1.0 Schaefer","p = 1e-08 Fox",
"p = 3 LeftSkewed"),col=c(1,2,3),lwd=3,bty="n")
plot1(Bt,densdep(Bt,K,p=1),xlab="Stock Size",defpar=FALSE,
ylab="Density-Dependence",maxy=2.5,lwd=2)
lines(Bt,densdep(Bt,K,1e-08),lwd=2,col=2,lty=2)
lines(Bt,densdep(Bt,K,3),lwd=3,col=3,lty=3)
par(oldp) # return par to old settings; this line not in book
### The Schaefer Model
### Sum of Squared Residuals
### Estimating Management Statistics
# R-chunk 8 Page 266
#compare Schaefer and Fox MSY estimates for same parameters
param <- c(r=1.1,K=1000.0,Binit=800.0,sigma=0.075)
cat("MSY Schaefer = ",getMSY(param,p=1.0),"\n") # p=1 is default
cat("MSY Fox = ",getMSY(param,p=1e-08),"\n")
### The Trouble with Equilibria
## Model Fitting
### A Possible Workflow for Stock Assessment
# R-chunk 9 Page 269
#Initial model 'fit' to the initial parameter guess Fig 7.5
data(schaef); schaef <- as.matrix(schaef)
param <- log(c(r=0.1,K=2250000,Binit=2250000,sigma=0.5))
negatL <- negLL(param,simpspm,schaef,logobs=log(schaef[,"cpue"]))
ans <- plotspmmod(inp=param,indat=schaef,schaefer=TRUE,
addrmse=TRUE,plotprod=FALSE)
# R-chunk 10 Pages 270 - 271
#Fit the model first using optim then nlm in sequence
param <- log(c(0.1,2250000,2250000,0.5))
pnams <- c("r","K","Binit","sigma")
best <- optim(par=param,fn=negLL,funk=simpspm,indat=schaef,
logobs=log(schaef[,"cpue"]),method="BFGS")
outfit(best,digits=4,title="Optim",parnames = pnams)
cat("\n")
best2 <- nlm(negLL,best$par,funk=simpspm,indat=schaef,
logobs=log(schaef[,"cpue"]))
outfit(best2,digits=4,title="nlm",parnames = pnams)
# R-chunk 11 Page 271
#optimum fit. Defaults used in plotprod and schaefer Fig 7.6
ans <- plotspmmod(inp=best2$estimate,indat=schaef,addrmse=TRUE,
plotprod=TRUE)
# R-chunk 12 Page 272
#the high-level structure of ans; try str(ans$Dynamics)
str(ans, width=65, strict.width="cut",max.level=1)
# R-chunk 13 Page 273
#compare the parameteric MSY with the numerical MSY
round(ans$Dynamics$sumout,3)
cat("\n Productivity Statistics \n")
summspm(ans) # the q parameter needs more significantr digits
### Is the Analysis Robust?
# R-chunk 14 Page 274
#conduct a robustness test on the Schaefer model fit
data(schaef); schaef <- as.matrix(schaef); reps <- 12
param <- log(c(r=0.15,K=2250000,Binit=2250000,sigma=0.5))
ansS <- fitSPM(pars=param,fish=schaef,schaefer=TRUE, #use
maxiter=1000,funk=simpspm,funkone=FALSE) #fitSPM
#getseed() #generates random seed for repeatable results
set.seed(777852) #sets random number generator with a known seed
robout <- robustSPM(inpar=ansS$estimate,fish=schaef,N=reps,
scaler=40,verbose=FALSE,schaefer=TRUE,
funk=simpspm,funkone=FALSE)
#use str(robout) to see the components included in the output
# R-chunk 15 Page 275 Table 7.2 code not in the book
#outcome of robustness tests
kable(robout$results[,1:5],digits=c(3,4,3,4,3))
kable(robout$results[,6:11],digits=c(3,4,3,4,5,0))
# R-chunk 16 Pages 275 - 276
#Repeat robustness test on fit to schaef data 100 times
set.seed(777854)
robout2 <- robustSPM(inpar=ansS$estimate,fish=schaef,N=100,
scaler=25,verbose=FALSE,schaefer=TRUE,
funk=simpspm,funkone=TRUE,steptol=1e-06)
lastbits <- tail(robout2$results[,6:11],10)
# R-chunk 17 Page 276 Table 7.3 code not in the book
#last 10 rows of robustness test showing deviations
kable(lastbits,digits=c(5,1,1,4,5,0))
# R-chunk 18 Page 276
# replicates from the robustness test Fig 7.7
result <- robout2$results
oldp <- parset(plots=c(2,2),margin=c(0.35,0.45,0.05,0.05))
hist(result[,"r"],breaks=15,col=2,main="",xlab="r")
hist(result[,"K"],breaks=15,col=2,main="",xlab="K")
hist(result[,"Binit"],breaks=15,col=2,main="",xlab="Binit")
hist(result[,"MSY"],breaks=15,col=2,main="",xlab="MSY")
par(oldp) # return par to old settings; this line not in book
# R-chunk 19 Page 277
#robustSPM parameters against each other Fig 7.8
pairs(result[,c("r","K","Binit","MSY")],upper.panel=NULL,pch=1)
### Using Different Data?
# R-chunk 20 Page 278
#Now use the dataspm data-set, which is noisier
set.seed(777854) #other random seeds give different results
data(dataspm); fish <- dataspm #to generalize the code
param <- log(c(r=0.24,K=5174,Binit=2846,sigma=0.164))
ans <- fitSPM(pars=param,fish=fish,schaefer=TRUE,maxiter=1000,
funkone=TRUE)
out <- robustSPM(ans$estimate,fish,N=100,scaler=15, #making
verbose=FALSE,funkone=TRUE) #scaler=10 gives
result <- tail(out$results[,6:11],10) #16 sub-optimal results
# R-chunk 21 Page 279 Table 7.4 code not in the book
#last 10 trials of robustness on dataspm fit
kable(result,digits=c(4,2,2,4,4,3))
## Uncertainty
### Likelihood Profiles
# R-chunk 22 Page 280
# Fig 7.9 Fit of optimum to the abdat data-set
data(abdat); fish <- as.matrix(abdat)
colnames(fish) <- tolower(colnames(fish)) # just in case
pars <- log(c(r=0.4,K=9400,Binit=3400,sigma=0.05))
ans <- fitSPM(pars,fish,schaefer=TRUE) #Schaefer
answer <- plotspmmod(ans$estimate,abdat,schaefer=TRUE,addrmse=TRUE)
# R-chunk 23 Pages 280 - 282
# likelihood profiles for r and K for fit to abdat Fig 7.10
#doprofile input terms are vector of values, fixed parameter
#location, starting parameters, and free parameter locations.
#all other input are assumed to be in the calling environment
doprofile <- function(val,loc,startest,indat,notfix=c(2:4)) {
pname <- c("r","K","Binit","sigma","-veLL")
numv <- length(val)
outpar <- matrix(NA,nrow=numv,ncol=5,dimnames=list(val,pname))
for (i in 1:numv) { #
param <- log(startest) # reset the parameters
param[loc] <- log(val[i]) #insert new fixed value
parinit <- param # copy revised parameter vector
bestmod <- nlm(f=negLLP,p=param,funk=simpspm,initpar=parinit,
indat=indat,logobs=log(indat[,"cpue"]),notfixed=notfix)
outpar[i,] <- c(exp(bestmod$estimate),bestmod$minimum)
}
return(outpar)
}
rval <- seq(0.32,0.46,0.001)
outr <- doprofile(rval,loc=1,startest=c(rval[1],11500,5000,0.25),
indat=fish,notfix=c(2:4))
Kval <- seq(7200,11500,200)
outk <- doprofile(Kval,loc=2,c(0.4,7200,6500,0.3),indat=fish,notfix=c(1,3,4))
oldp <- parset(plots=c(2,1),cex=0.85,outmargin=c(0.5,0.5,0,0))
plotprofile(outr,var="r",defpar=FALSE,lwd=2) #MQMF function
plotprofile(outk,var="K",defpar=FALSE,lwd=2)
par(oldp) # return par to old settings; this line not in book
### Bootstrap Confidence Intervals
# R-chunk 24 Page 283
#find optimum Schaefer model fit to dataspm data-set Fig 7.11
data(dataspm)
fish <- as.matrix(dataspm)
colnames(fish) <- tolower(colnames(fish))
pars <- log(c(r=0.25,K=5500,Binit=3000,sigma=0.25))
ans <- fitSPM(pars,fish,schaefer=TRUE,maxiter=1000) #Schaefer
answer <- plotspmmod(ans$estimate,fish,schaefer=TRUE,addrmse=TRUE)
# R-chunk 25 Page 284
#bootstrap the log-normal residuals from optimum model fit
set.seed(210368)
reps <- 1000 # can take 10 sec on a large Desktop. Be patient
#startime <- Sys.time() # schaefer=TRUE is the default
boots <- spmboot(ans$estimate,fishery=fish,iter=reps)
#print(Sys.time() - startime) # how long did it take?
str(boots,max.level=1)
# R-chunk 26 Page 285
#Summarize bootstrapped parameter estimates as quantiles seen in Table 7.5
bootpar <- boots$bootpar
rows <- colnames(bootpar)
columns <- c(c(0.025,0.05,0.5,0.95,0.975),"Mean")
bootCI <- matrix(NA,nrow=length(rows),ncol=length(columns),
dimnames=list(rows,columns))
for (i in 1:length(rows)) {
tmp <- bootpar[,i]
qtil <- quantile(tmp,probs=c(0.025,0.05,0.5,0.95,0.975),na.rm=TRUE)
bootCI[i,] <- c(qtil,mean(tmp,na.rm=TRUE))
}
# R-chunk 27 page 285 # not visible in the book but this generates Table 7.5
kable(bootCI,digits=c(4,4,4,4,4,4))
# R-chunk 28 Page 286
#boostrap CI. Note use of uphist to expand scale Fig 7.12
colf <- c(1,1,1,4); lwdf <- c(1,3,1,3); ltyf <- c(1,1,1,2)
colsf <- c(2,3,4,6)
oldp <- parset(plots=c(3,2))
hist(bootpar[,"r"],breaks=25,main="",xlab="r")
abline(v=c(bootCI["r",colsf]),col=colf,lwd=lwdf,lty=ltyf)
uphist(bootpar[,"K"],maxval=14000,breaks=25,main="",xlab="K")
abline(v=c(bootCI["K",colsf]),col=colf,lwd=lwdf,lty=ltyf)
hist(bootpar[,"Binit"],breaks=25,main="",xlab="Binit")
abline(v=c(bootCI["Binit",colsf]),col=colf,lwd=lwdf,lty=ltyf)
uphist(bootpar[,"MSY"],breaks=25,main="",xlab="MSY",maxval=450)
abline(v=c(bootCI["MSY",colsf]),col=colf,lwd=lwdf,lty=ltyf)
hist(bootpar[,"Depl"],breaks=25,main="",xlab="Final Depletion")
abline(v=c(bootCI["Depl",colsf]),col=colf,lwd=lwdf,lty=ltyf)
hist(bootpar[,"Harv"],breaks=25,main="",xlab="End Harvest Rate")
abline(v=c(bootCI["Harv",colsf]),col=colf,lwd=lwdf,lty=ltyf)
par(oldp) # return par to old settings; this line not in book
# R-chunk 29 Page 286
#Fig7.13 1000 bootstrap trajectories for dataspm model fit
dynam <- boots$dynam
years <- fish[,"year"]
nyrs <- length(years)
oldp <- parset()
ymax <- getmax(c(dynam[,,"predCE"],fish[,"cpue"]))
plot(fish[,"year"],fish[,"cpue"],type="n",ylim=c(0,ymax),
xlab="Year",ylab="CPUE",yaxs="i",panel.first = grid())
for (i in 1:reps) lines(years,dynam[i,,"predCE"],lwd=1,col=8)
lines(years,answer$Dynamics$outmat[1:nyrs,"predCE"],lwd=2,col=0)
points(years,fish[,"cpue"],cex=1.2,pch=16,col=1)
percs <- apply(dynam[,,"predCE"],2,quants)
arrows(x0=years,y0=percs["5\%",],y1=percs["95\%",],length=0.03,
angle=90,code=3,col=0)
par(oldp) # return par to old settings; this line not in book
# R-chunk 30 Page 288
#Fit the Fox model to dataspm; note different parameters
pars <- log(c(r=0.15,K=6500,Binit=3000,sigma=0.20))
ansF <- fitSPM(pars,fish,schaefer=FALSE,maxiter=1000) #Fox version
bootsF <- spmboot(ansF$estimate,fishery=fish,iter=reps,schaefer=FALSE)
dynamF <- bootsF$dynam
# R-chunk 31 Pages 288 - 289
# bootstrap trajectories from both model fits Fig 7.14
oldp <- parset()
ymax <- getmax(c(dynam[,,"predCE"],fish[,"cpue"]))
plot(fish[,"year"],fish[,"cpue"],type="n",ylim=c(0,ymax),
xlab="Year",ylab="CPUE",yaxs="i",panel.first = grid())
for (i in 1:reps) lines(years,dynamF[i,,"predCE"],lwd=1,col=1,lty=1)
for (i in 1:reps) lines(years,dynam[i,,"predCE"],lwd=1,col=8)
lines(years,answer$Dynamics$outmat[1:nyrs,"predCE"],lwd=2,col=0)
points(years,fish[,"cpue"],cex=1.1,pch=16,col=1)
percs <- apply(dynam[,,"predCE"],2,quants)
arrows(x0=years,y0=percs["5\%",],y1=percs["95\%",],length=0.03,
angle=90,code=3,col=0)
legend(1985,0.35,c("Schaefer","Fox"),col=c(8,1),bty="n",lwd=3)
par(oldp) # return par to old settings; this line not in book
### Parameter Correlations
# R-chunk 32 Page 290
# plot variables against each other, use MQMF panel.cor Fig 7.15
pairs(boots$bootpar[,c(1:4,6,7)],lower.panel=panel.smooth,
upper.panel=panel.cor,gap=0,lwd=2,cex=0.5)
### Asymptotic Errors
# R-chunk 33 Page 290
#Start the SPM analysis using asymptotic errors.
data(dataspm) # Note the use of hess=TRUE in call to fitSPM
fish <- as.matrix(dataspm) # using as.matrix for more speed
colnames(fish) <- tolower(colnames(fish)) # just in case
pars <- log(c(r=0.25,K=5200,Binit=2900,sigma=0.20))
ans <- fitSPM(pars,fish,schaefer=TRUE,maxiter=1000,hess=TRUE)
# R-chunk 34 page 291
#The hessian matrix from the Schaefer fit to the dataspm data
outfit(ans)
# R-chunk 35 Page 292
#calculate the var-covar matrix and the st errors
vcov <- solve(ans$hessian) # calculate variance-covariance matrix
label <- c("r","K", "Binit","sigma")
colnames(vcov) <- label; rownames(vcov) <- label
outvcov <- rbind(vcov,sqrt(diag(vcov)))
rownames(outvcov) <- c(label,"StErr")
# R-chunk 36 Page 290 Table 7.6 code not in the book
# tabulate the variance covariance matrix and StErrs
kable(outvcov,digits=c(5,5,5,5))
# R-chunk 37 Pages 292 - 293
#generate 1000 parameter vectors from multi-variate normal
library(mvtnorm) # use RStudio, or install.packages("mvtnorm")
N <- 1000 # number of parameter vectors, use vcov from above
mvn <- length(fish[,"year"]) #matrix to store cpue trajectories
mvncpue <- matrix(0,nrow=N,ncol=mvn,dimnames=list(1:N,fish[,"year"]))
columns <- c("r","K","Binit","sigma")
optpar <- ans$estimate # Fill matrix with mvn parameter vectors
mvnpar <- matrix(exp(rmvnorm(N,mean=optpar,sigma=vcov)),nrow=N,
ncol=4,dimnames=list(1:N,columns))
msy <- mvnpar[,"r"]*mvnpar[,"K"]/4
nyr <- length(fish[,"year"])
depletion <- numeric(N) #now calculate N cpue series in linear space
for (i in 1:N) { # calculate dynamics for each parameter set
dynamA <- spm(log(mvnpar[i,1:4]),fish)
mvncpue[i,] <- dynamA$outmat[1:nyr,"predCE"]
depletion[i] <- dynamA$outmat["2016","Depletion"]
}
mvnpar <- cbind(mvnpar,msy,depletion) # try head(mvnpar,10)
# R-chunk 38 Page 293
#data and trajectories from 1000 MVN parameter vectors Fig 7.16
oldp <- plot1(fish[,"year"],fish[,"cpue"],type="p",xlab="Year",
ylab="CPUE",maxy=2.0)
for (i in 1:N) lines(fish[,"year"],mvncpue[i,],col="grey",lwd=1)
points(fish[,"year"],fish[,"cpue"],pch=1,cex=1.3,col=1,lwd=2) # data
lines(fish[,"year"],exp(simpspm(optpar,fish)),lwd=2,col=1)# pred
percs <- apply(mvncpue,2,quants) # obtain the quantiles
arrows(x0=fish[,"year"],y0=percs["5\%",],y1=percs["95\%",],length=0.03,
angle=90,code=3,col=1) #add 90\% quantiles
msy <- mvnpar[,"r"]*mvnpar[,"K"]/4 # 1000 MSY estimates
text(2010,1.75,paste0("MSY ",round(mean(msy),3)),cex=1.25,font=7)
par(oldp) # return par to old settings; this line not in book
# R-chunk 39 Pages 293 - 294
#Isolate errant cpue trajectories Fig 7.17
pickd <- which(mvncpue[,"2016"] < 0.40)
oldp <- plot1(fish[,"year"],fish[,"cpue"],type="n",xlab="Year",
ylab="CPUE",maxy=6.25)
for (i in 1:length(pickd))
lines(fish[,"year"],mvncpue[pickd[i],],col=1,lwd=1)
points(fish[,"year"],fish[,"cpue"],pch=16,cex=1.25,col=4)
lines(fish[,"year"],exp(simpspm(optpar,fish)),lwd=3,col=2,lty=2)
par(oldp) # return par to old settings; this line not in book
# R-chunk 40 Page 294
#Use adhoc function to plot errant parameters Fig 7.18
oldp <- parset(plots=c(2,2),cex=0.85)
outplot <- function(var1,var2,pickdev) {
plot1(mvnpar[,var1],mvnpar[,var2],type="p",pch=16,cex=1.0,
defpar=FALSE,xlab=var1,ylab=var2,col=8)
points(mvnpar[pickdev,var1],mvnpar[pickdev,var2],pch=16,cex=1.0)
}
outplot("r","K",pickd) # assumes mvnpar in working environment
outplot("sigma","Binit",pickd)
outplot("r","Binit",pickd)
outplot("K","Binit",pickd)
par(oldp) # return par to old settings; this line not in book
# R-chunk 41 Page 296
#asymptotically sampled parameter vectors Fig 7.19
pairs(mvnpar,lower.panel=panel.smooth, upper.panel=panel.cor,
gap=0,cex=0.25,lwd=2)
# R-chunk 42 Page 297
# Get the ranges of parameters from bootstrap and asymptotic
bt <- apply(bootpar,2,range)[,c(1:4,6,7)]
ay <- apply(mvnpar,2,range)
out <- rbind(bt,ay)
rownames(out) <- c("MinBoot","MaxBoot","MinAsym","MaxAsym")
# R-chunk 43 Page 297 Table 7.7 code not in the book
#tabulate ranges from two approsches
kable(out,digits=c(4,3,3,4,3,4))
### Sometimes Asymptotic Errors Work
# R-chunk 44 Pages 297 - 298
#repeat asymptotice errors using abdat data-set Figure 7.20
data(abdat)
fish <- as.matrix(abdat)
pars <- log(c(r=0.4,K=9400,Binit=3400,sigma=0.05))
ansA <- fitSPM(pars,fish,schaefer=TRUE,maxiter=1000,hess=TRUE)
vcovA <- solve(ansA$hessian) # calculate var-covar matrix
mvn <- length(fish[,"year"])
N <- 1000 # replicates
mvncpueA <- matrix(0,nrow=N,ncol=mvn,dimnames=list(1:N,fish[,"year"]))
columns <- c("r","K","Binit","sigma")
optparA <- ansA$estimate # Fill matrix of parameter vectors
mvnparA <- matrix(exp(rmvnorm(N,mean=optparA,sigma=vcovA)),
nrow=N,ncol=4,dimnames=list(1:N,columns))
msy <- mvnparA[,"r"]*mvnparA[,"K"]/4
for (i in 1:N) mvncpueA[i,]<-exp(simpspm(log(mvnparA[i,]),fish))
mvnparA <- cbind(mvnparA,msy)
oldp <- plot1(fish[,"year"],fish[,"cpue"],type="p",xlab="Year",
ylab="CPUE",maxy=2.5)
for (i in 1:N) lines(fish[,"year"],mvncpueA[i,],col=8,lwd=1)
points(fish[,"year"],fish[,"cpue"],pch=16,cex=1.0) #orig data
lines(fish[,"year"],exp(simpspm(optparA,fish)),lwd=2,col=0)
par(oldp) # return par to old settings; this line not in book
# R-chunk 45 Page 298
#plot asymptotically sampled parameter vectors Figure 7.21
pairs(mvnparA,lower.panel=panel.smooth, upper.panel=panel.cor,
gap=0,pch=16,col=rgb(red=0,green=0,blue=0,alpha = 1/10))
### Bayesian Posteriors
# R-chunk 46 Page 299
#Fit the Fox Model to the abdat data Figure 7.22
data(abdat); fish <- as.matrix(abdat)
param <- log(c(r=0.3,K=11500,Binit=3300,sigma=0.05))
foxmod <- nlm(f=negLL1,p=param,funk=simpspm,indat=fish,
logobs=log(fish[,"cpue"]),iterlim=1000,schaefer=FALSE)
optpar <- exp(foxmod$estimate)
ans <- plotspmmod(inp=foxmod$estimate,indat=fish,schaefer=FALSE,
addrmse=TRUE, plotprod=TRUE)
# R-chunk 47 Page 301
# Conduct an MCMC using simpspmC on the abdat Fox SPM
# This means you will need to compile simpspmC from appendix
set.seed(698381) #for repeatability, possibly only on Windows10
begin <- gettime() # to enable the time taken to be calculated
inscale <- c(0.07,0.05,0.09,0.45) #note large value for sigma
pars <- log(c(r=0.205,K=11300,Binit=3200,sigma=0.044))
result <- do_MCMC(chains=1,burnin=50,N=2000,thinstep=512,
inpar=pars,infunk=negLL,calcpred=simpspmC,
obsdat=log(fish[,"cpue"]),calcdat=fish,
priorcalc=calcprior,scales=inscale,schaefer=FALSE)
# alternatively, use simpspm, but that will take longer.
cat("acceptance rate = ",result$arate," \n")
cat("time = ",gettime() - begin,"\n")
post1 <- result[[1]][[1]]
p <- 1e-08
msy <- post1[,"r"]*post1[,"K"]/((p + 1)^((p+1)/p))
# R-chunk 48 Page 302
#pairwise comparison for MCMC of Fox model on abdat Fig 7.23
pairs(cbind(post1[,1:4],msy),upper.panel = panel.cor,lwd=2,cex=0.2,
lower.panel=panel.smooth,col=1,gap=0.1)
# R-chunk 49 Page 302
# marginal distributions of 3 parameters and msy Figure 7.24
oldp <- parset(plots=c(2,2), cex=0.85)
plot(density(post1[,"r"]),lwd=2,main="",xlab="r") #plot has a method
plot(density(post1[,"K"]),lwd=2,main="",xlab="K") #for output from
plot(density(post1[,"Binit"]),lwd=2,main="",xlab="Binit") # density
plot(density(msy),lwd=2,main="",xlab="MSY") #try str(density(msy))
par(oldp) # return par to old settings; this line not in book
# R-chunk 50 Page 304
#MCMC r and K parameters, approx 50 + 90\% contours. Fig7.25
puttxt <- function(xs,xvar,ys,yvar,lvar,lab="",sigd=0) {
text(xs*xvar[2],ys*yvar[2],makelabel(lab,lvar,sep=" ",
sigdig=sigd),cex=1.2,font=7,pos=4)
} # end of puttxt - a quick utility function
kran <- range(post1[,"K"]); rran <- range(post1[,"r"])
mran <- range(msy) #ranges used in the plots
oldp <- parset(plots=c(1,2),margin=c(0.35,0.35,0.05,0.1)) #plot r vs K
plot(post1[,"K"],post1[,"r"],type="p",cex=0.5,xlim=kran,
ylim=rran,col="grey",xlab="K",ylab="r",panel.first=grid())
points(optpar[2],optpar[1],pch=16,col=1,cex=1.75) # center
addcontours(post1[,"K"],post1[,"r"],kran,rran, #if fails make
contval=c(0.5,0.9),lwd=2,col=1) #contval smaller
puttxt(0.7,kran,0.97,rran,kran,"K= ",sigd=0)
puttxt(0.7,kran,0.94,rran,rran,"r= ",sigd=4)
plot(post1[,"K"],msy,type="p",cex=0.5,xlim=kran, # K vs msy
ylim=mran,col="grey",xlab="K",ylab="MSY",panel.first=grid())
points(optpar[2],getMSY(optpar,p),pch=16,col=1,cex=1.75)#center
addcontours(post1[,"K"],msy,kran,mran,contval=c(0.5,0.9),lwd=2,col=1)
puttxt(0.6,kran,0.99,mran,kran,"K= ",sigd=0)
puttxt(0.6,kran,0.97,mran,mran,"MSY= ",sigd=3)
par(oldp) # return par to old settings; this line not in book
# R-chunk 51 Page 305
#Traces for the Fox model parameters from the MCMC Fig7.26
oldp <- parset(plots=c(4,1),margin=c(0.3,0.45,0.05,0.05),
outmargin = c(1,0,0,0),cex=0.85)
label <- colnames(post1)
N <- dim(post1)[1]
for (i in 1:3) {
plot(1:N,post1[,i],type="l",lwd=1,ylab=label[i],xlab="")
abline(h=median(post1[,i]),col=2)
}
msy <- post1[,1]*post1[,2]/4
plot(1:N,msy,type="l",lwd=1,ylab="MSY",xlab="")
abline(h=median(msy),col=2)
mtext("Step",side=1,outer=T,line=0.0,font=7,cex=1.1)
par(oldp) # return par to old settings; this line not in book
# R-chunk 52 Page 306
#Do five chains of the same length for the Fox model
set.seed(6396679) # Note all chains start from same place, which is
inscale <- c(0.07,0.05,0.09,0.45) # suboptimal, but still the chains
pars <- log(c(r=0.205,K=11300,Binit=3220,sigma=0.044)) # differ
result <- do_MCMC(chains=5,burnin=50,N=2000,thinstep=512,
inpar=pars,infunk=negLL1,calcpred=simpspmC,
obsdat=log(fish[,"cpue"]),calcdat=fish,
priorcalc=calcprior,scales=inscale,
schaefer=FALSE)
cat("acceptance rate = ",result$arate," \n") # always check this
# R-chunk 53 Page 306
#Now plot marginal posteriors from 5 Fox model chains Fig7.27
oldp <- parset(plots=c(2,1),cex=0.85,margin=c(0.4,0.4,0.05,0.05))
post <- result[[1]][[1]]
plot(density(post[,"K"]),lwd=2,col=1,main="",xlab="K",
ylim=c(0,4.4e-04),panel.first=grid())
for (i in 2:5) lines(density(result$result[[i]][,"K"]),lwd=2,col=i)
p <- 1e-08
post <- result$result[[1]]
msy <- post[,"r"]*post[,"K"]/((p + 1)^((p+1)/p))
plot(density(msy),lwd=2,col=1,main="",xlab="MSY",type="l",
ylim=c(0,0.0175),panel.first=grid())
for (i in 2:5) {
post <- result$result[[i]]
msy <- post[,"r"]*post[,"K"]/((p + 1)^((p+1)/p))
lines(density(msy),lwd=2,col=i)
}
par(oldp) # return par to old settings; this line not in book
# R-chunk 54 Page 307
# get quantiles of each chain
probs <- c(0.025,0.05,0.5,0.95,0.975)
storeQ <- matrix(0,nrow=6,ncol=5,dimnames=list(1:6,probs))
for (i in 1:5) storeQ[i,] <- quants(result$result[[i]][,"K"])
x <- apply(storeQ[1:5,],2,range)
storeQ[6,] <- 100*(x[2,] - x[1,])/x[2,]
# R-chunk 55 Page 308 Table 7.8 code not in the book
#tabulate qunatiles of the five chains
kable(storeQ,digits=c(3,3,3,3,3))
## Management Advice
### Two Views of Risk
### Harvest Strategies
## Risk Assessment Projections
### Deterministic Projections
# R-chunk 56 Pages 310 - 311
#Prepare Fox model on abdat data for future projections Fig7.28
data(abdat); fish <- as.matrix(abdat)
param <- log(c(r=0.3,K=11500,Binit=3300,sigma=0.05))
bestmod <- nlm(f=negLL1,p=param,funk=simpspm,schaefer=FALSE,
logobs=log(fish[,"cpue"]),indat=fish,hessian=TRUE)
optpar <- exp(bestmod$estimate)
ans <- plotspmmod(inp=bestmod$estimate,indat=fish,schaefer=FALSE,
target=0.4,addrmse=TRUE, plotprod=FALSE)
# R-chunk 57 Page 312
out <- spm(bestmod$estimate,indat=fish,schaefer=FALSE)
str(out, width=65, strict.width="cut")
# R-chunk 58 Page 312 Table 7.9 code not in the book
#
kable(out$outmat[1:10,],digits=c(0,4,4,4,4,4,4))
# R-chunk 59 Page 313
# Fig 7.29
catches <- seq(700,1000,50) # projyr=10 is the default
projans <- spmprojDet(spmobj=out,projcatch=catches,plotout=TRUE)
### Accounting for Uncertainty
### Using Asymptotic Errors
# R-chunk 60 Page 315
# generate parameter vectors from a multivariate normal
# project dynamics under a constant catch of 900t
library(mvtnorm)
matpar <- parasympt(bestmod,N=1000) #generate parameter vectors
projs <- spmproj(matpar,fish,projyr=10,constC=900)#do dynamics
# R-chunk 61 Page 315
# Fig 7.30 1000 replicate projections asymptotic errors
outp <- plotproj(projs,out,qprob=c(0.1,0.5),refpts=c(0.2,0.4))
### Using Bootstrap Parameter Vectors
# R-chunk 62 Page 316
#bootstrap generation of plausible parameter vectors for Fox
reps <- 1000
boots <- spmboot(bestmod$estimate,fishery=fish,iter=reps,schaefer=FALSE)
matparb <- boots$bootpar[,1:4] #examine using head(matparb,20)
# R-chunk 63 Page 316
#bootstrap projections. Lower case b for boostrap Fig7.31
projb <- spmproj(matparb,fish,projyr=10,constC=900)
outb <- plotproj(projb,out,qprob=c(0.1,0.5),refpts=c(0.2,0.4))
### Using Samples from a Bayesian Posterior
# R-chunk 64 Pages 317 - 318
#Generate 1000 parameter vectors from Bayesian posterior
param <- log(c(r=0.3,K=11500,Binit=3300,sigma=0.05))
set.seed(444608)
N <- 1000
result <- do_MCMC(chains=1,burnin=100,N=N,thinstep=2048,
inpar=param,infunk=negLL,calcpred=simpspmC,
calcdat=fish,obsdat=log(fish[,"cpue"]),
priorcalc=calcprior,schaefer=FALSE,
scales=c(0.065,0.055,0.1,0.475))
parB <- result[[1]][[1]] #capital B for Bayesian
cat("Acceptance Rate = ",result[[2]],"\n")
# R-chunk 65 Page 318
# auto-correlation, or lack of, and the K trace Fig 7.32
oldp <- parset(plots=c(2,1),cex=0.85)
acf(parB[,2],lwd=2)
plot(1:N,parB[,2],type="l",ylab="K",ylim=c(8000,19000),xlab="")
par(oldp) # return par to old settings; this line not in book
# R-chunk 66 Page 318
# Fig 7.33
matparB <- as.matrix(parB[,1:4]) # B for Bayesian
projs <- spmproj(matparB,fish,constC=900,projyr=10) # project them
plotproj(projs,out,qprob=c(0.1,0.5),refpts=c(0.2,0.4)) #projections
## Concluding Remarks
## Appendix: The Use of Rcpp to Replace simpspm
# R-chunk 67 Page 321
library(Rcpp)
cppFunction('NumericVector simpspmC(NumericVector pars,
NumericMatrix indat, LogicalVector schaefer) {
int nyrs = indat.nrow();
NumericVector predce(nyrs);
NumericVector biom(nyrs+1);
double Bt, qval;
double sumq = 0.0;
double p = 0.00000001;
if (schaefer(0) == TRUE) {
p = 1.0;
}
NumericVector ep = exp(pars);
biom[0] = ep[2];
for (int i = 0; i < nyrs; i++) {
Bt = biom[i];
biom[(i+1)] = Bt + (ep[0]/p)*Bt*(1 - pow((Bt/ep[1]),p)) -
indat(i,1);
if (biom[(i+1)] < 40.0) biom[(i+1)] = 40.0;
sumq += log(indat(i,2)/biom[i]);
}
qval = exp(sumq/nyrs);
for (int i = 0; i < nyrs; i++) {
predce[i] = log(biom[i] * qval);
}
return predce;
}')
}
}
| /man/chapter7.Rd | no_license | haddonm/MQMF | R | false | true | 32,056 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch_chunks.R
\name{chapter7}
\alias{chapter7}
\title{chapter7 The 67 R-code chunks from Surplus Production Models}
\description{
chapter7 is not an active function but rather acts
as a repository for the various example code chunks found in
chapter7. There are 67 r-code chunks in chapter7 You should,
of course, feel free to use and modify any of these example
chunks in your own work.
}
\examples{
\dontrun{
# All the example code from # Surplus Production Models
# Surplus Production Models
## Introduction
### Data Needs
### The Need for Contrast
### When are Catch-Rates Informative
# R-chunk 1 Page 256
#Yellowfin-tuna data from Schaefer 12957
# R-chunk 2 Page 256 Table 7.1 code not in the book
data(schaef)
kable(halftable(schaef,subdiv=2),digits=c(0,0,0,4))
# R-chunk 3 Page 256
#schaef fishery data and regress cpue and catch Fig 7.1
oldp <- parset(plots=c(3,1),margin=c(0.35,0.4,0.05,0.05))
plot1(schaef[,"year"],schaef[,"catch"],ylab="Catch",xlab="Year",
defpar=FALSE,lwd=2)
plot1(schaef[,"year"],schaef[,"cpue"],ylab="CPUE",xlab="Year",
defpar=FALSE,lwd=2)
plot1(schaef[,"catch"],schaef[,"cpue"],type="p",ylab="CPUE",
xlab="Catch",defpar=FALSE,pch=16,cex=1.0)
model <- lm(schaef[,"cpue"] ~ schaef[,"catch"])
abline(model,lwd=2,col=2) # summary(model)
par(oldp) # return par to old settings; this line not in book
# R-chunk 4 Page 257
#cross correlation between cpue and catch in schaef Fig 7.2
oldp <- parset(cex=0.85) #sets par values for a tidy base graphic
ccf(x=schaef[,"catch"],y=schaef[,"cpue"],type="correlation",
ylab="Correlation",plot=TRUE)
par(oldp) # return par to old settings; this line not in book
# R-chunk 5 Page 257
#now plot schaef data with timelag of 2 years on cpue Fig 7.3
oldp <- parset(plots=c(3,1),margin=c(0.35,0.4,0.05,0.05))
plot1(schaef[1:20,"year"],schaef[1:20,"catch"],ylab="Catch",
xlab="Year",defpar=FALSE,lwd=2)
plot1(schaef[3:22,"year"],schaef[3:22,"cpue"],ylab="CPUE",
xlab="Year",defpar=FALSE,lwd=2)
plot1(schaef[1:20,"catch"],schaef[3:22,"cpue"],type="p",
ylab="CPUE",xlab="Catch",defpar=FALSE,cex=1.0,pch=16)
model2 <- lm(schaef[3:22,"cpue"] ~ schaef[1:20,"catch"])
abline(model2,lwd=2,col=2)
par(oldp) # return par to old settings; this line not in book
# R-chunk 6 Page 259
#write out a summary of he regression model2
summary(model2)
## Some Equations
### Production Functions
# R-chunk 7 Page 262
#plot productivity and density-dependence functions Fig7.4
prodfun <- function(r,Bt,K,p) return((r*Bt/p)*(1-(Bt/K)^p))
densdep <- function(Bt,K,p) return((1/p)*(1-(Bt/K)^p))
r <- 0.75; K <- 1000.0; Bt <- 1:1000
sp <- prodfun(r,Bt,K,1.0) # Schaefer equivalent
sp0 <- prodfun(r,Bt,K,p=1e-08) # Fox equivalent
sp3 <- prodfun(r,Bt,K,3) #left skewed production, marine mammal?
oldp <- parset(plots=c(2,1),margin=c(0.35,0.4,0.1,0.05))
plot1(Bt,sp,type="l",lwd=2,xlab="Stock Size",
ylab="Surplus Production",maxy=200,defpar=FALSE)
lines(Bt,sp0 * (max(sp)/max(sp0)),lwd=2,col=2,lty=2) # rescale
lines(Bt,sp3*(max(sp)/max(sp3)),lwd=3,col=3,lty=3) # production
legend(275,100,cex=1.1,lty=1:3,c("p = 1.0 Schaefer","p = 1e-08 Fox",
"p = 3 LeftSkewed"),col=c(1,2,3),lwd=3,bty="n")
plot1(Bt,densdep(Bt,K,p=1),xlab="Stock Size",defpar=FALSE,
ylab="Density-Dependence",maxy=2.5,lwd=2)
lines(Bt,densdep(Bt,K,1e-08),lwd=2,col=2,lty=2)
lines(Bt,densdep(Bt,K,3),lwd=3,col=3,lty=3)
par(oldp) # return par to old settings; this line not in book
### The Schaefer Model
### Sum of Squared Residuals
### Estimating Management Statistics
# R-chunk 8 Page 266
#compare Schaefer and Fox MSY estimates for same parameters
param <- c(r=1.1,K=1000.0,Binit=800.0,sigma=0.075)
cat("MSY Schaefer = ",getMSY(param,p=1.0),"\n") # p=1 is default
cat("MSY Fox = ",getMSY(param,p=1e-08),"\n")
### The Trouble with Equilibria
## Model Fitting
### A Possible Workflow for Stock Assessment
# R-chunk 9 Page 269
#Initial model 'fit' to the initial parameter guess Fig 7.5
data(schaef); schaef <- as.matrix(schaef)
param <- log(c(r=0.1,K=2250000,Binit=2250000,sigma=0.5))
negatL <- negLL(param,simpspm,schaef,logobs=log(schaef[,"cpue"]))
ans <- plotspmmod(inp=param,indat=schaef,schaefer=TRUE,
addrmse=TRUE,plotprod=FALSE)
# R-chunk 10 Pages 270 - 271
#Fit the model first using optim then nlm in sequence
param <- log(c(0.1,2250000,2250000,0.5))
pnams <- c("r","K","Binit","sigma")
best <- optim(par=param,fn=negLL,funk=simpspm,indat=schaef,
logobs=log(schaef[,"cpue"]),method="BFGS")
outfit(best,digits=4,title="Optim",parnames = pnams)
cat("\n")
best2 <- nlm(negLL,best$par,funk=simpspm,indat=schaef,
logobs=log(schaef[,"cpue"]))
outfit(best2,digits=4,title="nlm",parnames = pnams)
# R-chunk 11 Page 271
#optimum fit. Defaults used in plotprod and schaefer Fig 7.6
ans <- plotspmmod(inp=best2$estimate,indat=schaef,addrmse=TRUE,
plotprod=TRUE)
# R-chunk 12 Page 272
#the high-level structure of ans; try str(ans$Dynamics)
str(ans, width=65, strict.width="cut",max.level=1)
# R-chunk 13 Page 273
#compare the parameteric MSY with the numerical MSY
round(ans$Dynamics$sumout,3)
cat("\n Productivity Statistics \n")
summspm(ans) # the q parameter needs more significantr digits
### Is the Analysis Robust?
# R-chunk 14 Page 274
#conduct a robustness test on the Schaefer model fit
data(schaef); schaef <- as.matrix(schaef); reps <- 12
param <- log(c(r=0.15,K=2250000,Binit=2250000,sigma=0.5))
ansS <- fitSPM(pars=param,fish=schaef,schaefer=TRUE, #use
maxiter=1000,funk=simpspm,funkone=FALSE) #fitSPM
#getseed() #generates random seed for repeatable results
set.seed(777852) #sets random number generator with a known seed
robout <- robustSPM(inpar=ansS$estimate,fish=schaef,N=reps,
scaler=40,verbose=FALSE,schaefer=TRUE,
funk=simpspm,funkone=FALSE)
#use str(robout) to see the components included in the output
# R-chunk 15 Page 275 Table 7.2 code not in the book
#outcome of robustness tests
kable(robout$results[,1:5],digits=c(3,4,3,4,3))
kable(robout$results[,6:11],digits=c(3,4,3,4,5,0))
# R-chunk 16 Pages 275 - 276
#Repeat robustness test on fit to schaef data 100 times
set.seed(777854)
robout2 <- robustSPM(inpar=ansS$estimate,fish=schaef,N=100,
scaler=25,verbose=FALSE,schaefer=TRUE,
funk=simpspm,funkone=TRUE,steptol=1e-06)
lastbits <- tail(robout2$results[,6:11],10)
# R-chunk 17 Page 276 Table 7.3 code not in the book
#last 10 rows of robustness test showing deviations
kable(lastbits,digits=c(5,1,1,4,5,0))
# R-chunk 18 Page 276
# replicates from the robustness test Fig 7.7
result <- robout2$results
oldp <- parset(plots=c(2,2),margin=c(0.35,0.45,0.05,0.05))
hist(result[,"r"],breaks=15,col=2,main="",xlab="r")
hist(result[,"K"],breaks=15,col=2,main="",xlab="K")
hist(result[,"Binit"],breaks=15,col=2,main="",xlab="Binit")
hist(result[,"MSY"],breaks=15,col=2,main="",xlab="MSY")
par(oldp) # return par to old settings; this line not in book
# R-chunk 19 Page 277
#robustSPM parameters against each other Fig 7.8
pairs(result[,c("r","K","Binit","MSY")],upper.panel=NULL,pch=1)
### Using Different Data?
# R-chunk 20 Page 278
#Now use the dataspm data-set, which is noisier
set.seed(777854) #other random seeds give different results
data(dataspm); fish <- dataspm #to generalize the code
param <- log(c(r=0.24,K=5174,Binit=2846,sigma=0.164))
ans <- fitSPM(pars=param,fish=fish,schaefer=TRUE,maxiter=1000,
funkone=TRUE)
out <- robustSPM(ans$estimate,fish,N=100,scaler=15, #making
verbose=FALSE,funkone=TRUE) #scaler=10 gives
result <- tail(out$results[,6:11],10) #16 sub-optimal results
# R-chunk 21 Page 279 Table 7.4 code not in the book
#last 10 trials of robustness on dataspm fit
kable(result,digits=c(4,2,2,4,4,3))
## Uncertainty
### Likelihood Profiles
# R-chunk 22 Page 280
# Fig 7.9 Fit of optimum to the abdat data-set
data(abdat); fish <- as.matrix(abdat)
colnames(fish) <- tolower(colnames(fish)) # just in case
pars <- log(c(r=0.4,K=9400,Binit=3400,sigma=0.05))
ans <- fitSPM(pars,fish,schaefer=TRUE) #Schaefer
answer <- plotspmmod(ans$estimate,abdat,schaefer=TRUE,addrmse=TRUE)
# R-chunk 23 Pages 280 - 282
# likelihood profiles for r and K for fit to abdat Fig 7.10
#doprofile input terms are vector of values, fixed parameter
#location, starting parameters, and free parameter locations.
#all other input are assumed to be in the calling environment
doprofile <- function(val,loc,startest,indat,notfix=c(2:4)) {
pname <- c("r","K","Binit","sigma","-veLL")
numv <- length(val)
outpar <- matrix(NA,nrow=numv,ncol=5,dimnames=list(val,pname))
for (i in 1:numv) { #
param <- log(startest) # reset the parameters
param[loc] <- log(val[i]) #insert new fixed value
parinit <- param # copy revised parameter vector
bestmod <- nlm(f=negLLP,p=param,funk=simpspm,initpar=parinit,
indat=indat,logobs=log(indat[,"cpue"]),notfixed=notfix)
outpar[i,] <- c(exp(bestmod$estimate),bestmod$minimum)
}
return(outpar)
}
rval <- seq(0.32,0.46,0.001)
outr <- doprofile(rval,loc=1,startest=c(rval[1],11500,5000,0.25),
indat=fish,notfix=c(2:4))
Kval <- seq(7200,11500,200)
outk <- doprofile(Kval,loc=2,c(0.4,7200,6500,0.3),indat=fish,notfix=c(1,3,4))
oldp <- parset(plots=c(2,1),cex=0.85,outmargin=c(0.5,0.5,0,0))
plotprofile(outr,var="r",defpar=FALSE,lwd=2) #MQMF function
plotprofile(outk,var="K",defpar=FALSE,lwd=2)
par(oldp) # return par to old settings; this line not in book
### Bootstrap Confidence Intervals
# R-chunk 24 Page 283
#find optimum Schaefer model fit to dataspm data-set Fig 7.11
data(dataspm)
fish <- as.matrix(dataspm)
colnames(fish) <- tolower(colnames(fish))
pars <- log(c(r=0.25,K=5500,Binit=3000,sigma=0.25))
ans <- fitSPM(pars,fish,schaefer=TRUE,maxiter=1000) #Schaefer
answer <- plotspmmod(ans$estimate,fish,schaefer=TRUE,addrmse=TRUE)
# R-chunk 25 Page 284
#bootstrap the log-normal residuals from optimum model fit
set.seed(210368)
reps <- 1000 # can take 10 sec on a large Desktop. Be patient
#startime <- Sys.time() # schaefer=TRUE is the default
boots <- spmboot(ans$estimate,fishery=fish,iter=reps)
#print(Sys.time() - startime) # how long did it take?
str(boots,max.level=1)
# R-chunk 26 Page 285
#Summarize bootstrapped parameter estimates as quantiles seen in Table 7.5
bootpar <- boots$bootpar
rows <- colnames(bootpar)
columns <- c(c(0.025,0.05,0.5,0.95,0.975),"Mean")
bootCI <- matrix(NA,nrow=length(rows),ncol=length(columns),
dimnames=list(rows,columns))
for (i in 1:length(rows)) {
tmp <- bootpar[,i]
qtil <- quantile(tmp,probs=c(0.025,0.05,0.5,0.95,0.975),na.rm=TRUE)
bootCI[i,] <- c(qtil,mean(tmp,na.rm=TRUE))
}
# R-chunk 27 page 285 # not visible in the book but this generates Table 7.5
kable(bootCI,digits=c(4,4,4,4,4,4))
# R-chunk 28 Page 286
#boostrap CI. Note use of uphist to expand scale Fig 7.12
colf <- c(1,1,1,4); lwdf <- c(1,3,1,3); ltyf <- c(1,1,1,2)
colsf <- c(2,3,4,6)
oldp <- parset(plots=c(3,2))
hist(bootpar[,"r"],breaks=25,main="",xlab="r")
abline(v=c(bootCI["r",colsf]),col=colf,lwd=lwdf,lty=ltyf)
uphist(bootpar[,"K"],maxval=14000,breaks=25,main="",xlab="K")
abline(v=c(bootCI["K",colsf]),col=colf,lwd=lwdf,lty=ltyf)
hist(bootpar[,"Binit"],breaks=25,main="",xlab="Binit")
abline(v=c(bootCI["Binit",colsf]),col=colf,lwd=lwdf,lty=ltyf)
uphist(bootpar[,"MSY"],breaks=25,main="",xlab="MSY",maxval=450)
abline(v=c(bootCI["MSY",colsf]),col=colf,lwd=lwdf,lty=ltyf)
hist(bootpar[,"Depl"],breaks=25,main="",xlab="Final Depletion")
abline(v=c(bootCI["Depl",colsf]),col=colf,lwd=lwdf,lty=ltyf)
hist(bootpar[,"Harv"],breaks=25,main="",xlab="End Harvest Rate")
abline(v=c(bootCI["Harv",colsf]),col=colf,lwd=lwdf,lty=ltyf)
par(oldp) # return par to old settings; this line not in book
# R-chunk 29 Page 286
#Fig7.13 1000 bootstrap trajectories for dataspm model fit
dynam <- boots$dynam
years <- fish[,"year"]
nyrs <- length(years)
oldp <- parset()
ymax <- getmax(c(dynam[,,"predCE"],fish[,"cpue"]))
plot(fish[,"year"],fish[,"cpue"],type="n",ylim=c(0,ymax),
xlab="Year",ylab="CPUE",yaxs="i",panel.first = grid())
for (i in 1:reps) lines(years,dynam[i,,"predCE"],lwd=1,col=8)
lines(years,answer$Dynamics$outmat[1:nyrs,"predCE"],lwd=2,col=0)
points(years,fish[,"cpue"],cex=1.2,pch=16,col=1)
percs <- apply(dynam[,,"predCE"],2,quants)
arrows(x0=years,y0=percs["5\%",],y1=percs["95\%",],length=0.03,
angle=90,code=3,col=0)
par(oldp) # return par to old settings; this line not in book
# R-chunk 30 Page 288
#Fit the Fox model to dataspm; note different parameters
pars <- log(c(r=0.15,K=6500,Binit=3000,sigma=0.20))
ansF <- fitSPM(pars,fish,schaefer=FALSE,maxiter=1000) #Fox version
bootsF <- spmboot(ansF$estimate,fishery=fish,iter=reps,schaefer=FALSE)
dynamF <- bootsF$dynam
# R-chunk 31 Pages 288 - 289
# bootstrap trajectories from both model fits Fig 7.14
oldp <- parset()
ymax <- getmax(c(dynam[,,"predCE"],fish[,"cpue"]))
plot(fish[,"year"],fish[,"cpue"],type="n",ylim=c(0,ymax),
xlab="Year",ylab="CPUE",yaxs="i",panel.first = grid())
for (i in 1:reps) lines(years,dynamF[i,,"predCE"],lwd=1,col=1,lty=1)
for (i in 1:reps) lines(years,dynam[i,,"predCE"],lwd=1,col=8)
lines(years,answer$Dynamics$outmat[1:nyrs,"predCE"],lwd=2,col=0)
points(years,fish[,"cpue"],cex=1.1,pch=16,col=1)
percs <- apply(dynam[,,"predCE"],2,quants)
arrows(x0=years,y0=percs["5\%",],y1=percs["95\%",],length=0.03,
angle=90,code=3,col=0)
legend(1985,0.35,c("Schaefer","Fox"),col=c(8,1),bty="n",lwd=3)
par(oldp) # return par to old settings; this line not in book
### Parameter Correlations
# R-chunk 32 Page 290
# plot variables against each other, use MQMF panel.cor Fig 7.15
pairs(boots$bootpar[,c(1:4,6,7)],lower.panel=panel.smooth,
upper.panel=panel.cor,gap=0,lwd=2,cex=0.5)
### Asymptotic Errors
# R-chunk 33 Page 290
#Start the SPM analysis using asymptotic errors.
data(dataspm) # Note the use of hess=TRUE in call to fitSPM
fish <- as.matrix(dataspm) # using as.matrix for more speed
colnames(fish) <- tolower(colnames(fish)) # just in case
pars <- log(c(r=0.25,K=5200,Binit=2900,sigma=0.20))
ans <- fitSPM(pars,fish,schaefer=TRUE,maxiter=1000,hess=TRUE)
# R-chunk 34 page 291
#The hessian matrix from the Schaefer fit to the dataspm data
outfit(ans)
# R-chunk 35 Page 292
#calculate the var-covar matrix and the st errors
vcov <- solve(ans$hessian) # calculate variance-covariance matrix
label <- c("r","K", "Binit","sigma")
colnames(vcov) <- label; rownames(vcov) <- label
outvcov <- rbind(vcov,sqrt(diag(vcov)))
rownames(outvcov) <- c(label,"StErr")
# R-chunk 36 Page 290 Table 7.6 code not in the book
# tabulate the variance covariance matrix and StErrs
kable(outvcov,digits=c(5,5,5,5))
# R-chunk 37 Pages 292 - 293
#generate 1000 parameter vectors from multi-variate normal
library(mvtnorm) # use RStudio, or install.packages("mvtnorm")
N <- 1000 # number of parameter vectors, use vcov from above
mvn <- length(fish[,"year"]) #matrix to store cpue trajectories
mvncpue <- matrix(0,nrow=N,ncol=mvn,dimnames=list(1:N,fish[,"year"]))
columns <- c("r","K","Binit","sigma")
optpar <- ans$estimate # Fill matrix with mvn parameter vectors
mvnpar <- matrix(exp(rmvnorm(N,mean=optpar,sigma=vcov)),nrow=N,
ncol=4,dimnames=list(1:N,columns))
msy <- mvnpar[,"r"]*mvnpar[,"K"]/4
nyr <- length(fish[,"year"])
depletion <- numeric(N) #now calculate N cpue series in linear space
for (i in 1:N) { # calculate dynamics for each parameter set
dynamA <- spm(log(mvnpar[i,1:4]),fish)
mvncpue[i,] <- dynamA$outmat[1:nyr,"predCE"]
depletion[i] <- dynamA$outmat["2016","Depletion"]
}
mvnpar <- cbind(mvnpar,msy,depletion) # try head(mvnpar,10)
# R-chunk 38 Page 293
#data and trajectories from 1000 MVN parameter vectors Fig 7.16
oldp <- plot1(fish[,"year"],fish[,"cpue"],type="p",xlab="Year",
ylab="CPUE",maxy=2.0)
for (i in 1:N) lines(fish[,"year"],mvncpue[i,],col="grey",lwd=1)
points(fish[,"year"],fish[,"cpue"],pch=1,cex=1.3,col=1,lwd=2) # data
lines(fish[,"year"],exp(simpspm(optpar,fish)),lwd=2,col=1)# pred
percs <- apply(mvncpue,2,quants) # obtain the quantiles
arrows(x0=fish[,"year"],y0=percs["5\%",],y1=percs["95\%",],length=0.03,
angle=90,code=3,col=1) #add 90\% quantiles
msy <- mvnpar[,"r"]*mvnpar[,"K"]/4 # 1000 MSY estimates
text(2010,1.75,paste0("MSY ",round(mean(msy),3)),cex=1.25,font=7)
par(oldp) # return par to old settings; this line not in book
# R-chunk 39 Pages 293 - 294
#Isolate errant cpue trajectories Fig 7.17
pickd <- which(mvncpue[,"2016"] < 0.40)
oldp <- plot1(fish[,"year"],fish[,"cpue"],type="n",xlab="Year",
ylab="CPUE",maxy=6.25)
for (i in 1:length(pickd))
lines(fish[,"year"],mvncpue[pickd[i],],col=1,lwd=1)
points(fish[,"year"],fish[,"cpue"],pch=16,cex=1.25,col=4)
lines(fish[,"year"],exp(simpspm(optpar,fish)),lwd=3,col=2,lty=2)
par(oldp) # return par to old settings; this line not in book
# R-chunk 40 Page 294
#Use adhoc function to plot errant parameters Fig 7.18
oldp <- parset(plots=c(2,2),cex=0.85)
outplot <- function(var1,var2,pickdev) {
plot1(mvnpar[,var1],mvnpar[,var2],type="p",pch=16,cex=1.0,
defpar=FALSE,xlab=var1,ylab=var2,col=8)
points(mvnpar[pickdev,var1],mvnpar[pickdev,var2],pch=16,cex=1.0)
}
outplot("r","K",pickd) # assumes mvnpar in working environment
outplot("sigma","Binit",pickd)
outplot("r","Binit",pickd)
outplot("K","Binit",pickd)
par(oldp) # return par to old settings; this line not in book
# R-chunk 41 Page 296
#asymptotically sampled parameter vectors Fig 7.19
pairs(mvnpar,lower.panel=panel.smooth, upper.panel=panel.cor,
gap=0,cex=0.25,lwd=2)
# R-chunk 42 Page 297
# Get the ranges of parameters from bootstrap and asymptotic
bt <- apply(bootpar,2,range)[,c(1:4,6,7)]
ay <- apply(mvnpar,2,range)
out <- rbind(bt,ay)
rownames(out) <- c("MinBoot","MaxBoot","MinAsym","MaxAsym")
# R-chunk 43 Page 297 Table 7.7 code not in the book
#tabulate ranges from two approsches
kable(out,digits=c(4,3,3,4,3,4))
### Sometimes Asymptotic Errors Work
# R-chunk 44 Pages 297 - 298
#repeat asymptotice errors using abdat data-set Figure 7.20
data(abdat)
fish <- as.matrix(abdat)
pars <- log(c(r=0.4,K=9400,Binit=3400,sigma=0.05))
ansA <- fitSPM(pars,fish,schaefer=TRUE,maxiter=1000,hess=TRUE)
vcovA <- solve(ansA$hessian) # calculate var-covar matrix
mvn <- length(fish[,"year"])
N <- 1000 # replicates
mvncpueA <- matrix(0,nrow=N,ncol=mvn,dimnames=list(1:N,fish[,"year"]))
columns <- c("r","K","Binit","sigma")
optparA <- ansA$estimate # Fill matrix of parameter vectors
mvnparA <- matrix(exp(rmvnorm(N,mean=optparA,sigma=vcovA)),
nrow=N,ncol=4,dimnames=list(1:N,columns))
msy <- mvnparA[,"r"]*mvnparA[,"K"]/4
for (i in 1:N) mvncpueA[i,]<-exp(simpspm(log(mvnparA[i,]),fish))
mvnparA <- cbind(mvnparA,msy)
oldp <- plot1(fish[,"year"],fish[,"cpue"],type="p",xlab="Year",
ylab="CPUE",maxy=2.5)
for (i in 1:N) lines(fish[,"year"],mvncpueA[i,],col=8,lwd=1)
points(fish[,"year"],fish[,"cpue"],pch=16,cex=1.0) #orig data
lines(fish[,"year"],exp(simpspm(optparA,fish)),lwd=2,col=0)
par(oldp) # return par to old settings; this line not in book
# R-chunk 45 Page 298
#plot asymptotically sampled parameter vectors Figure 7.21
pairs(mvnparA,lower.panel=panel.smooth, upper.panel=panel.cor,
gap=0,pch=16,col=rgb(red=0,green=0,blue=0,alpha = 1/10))
### Bayesian Posteriors
# R-chunk 46 Page 299
#Fit the Fox Model to the abdat data Figure 7.22
data(abdat); fish <- as.matrix(abdat)
param <- log(c(r=0.3,K=11500,Binit=3300,sigma=0.05))
foxmod <- nlm(f=negLL1,p=param,funk=simpspm,indat=fish,
logobs=log(fish[,"cpue"]),iterlim=1000,schaefer=FALSE)
optpar <- exp(foxmod$estimate)
ans <- plotspmmod(inp=foxmod$estimate,indat=fish,schaefer=FALSE,
addrmse=TRUE, plotprod=TRUE)
# R-chunk 47 Page 301
# Conduct an MCMC using simpspmC on the abdat Fox SPM
# This means you will need to compile simpspmC from appendix
set.seed(698381) #for repeatability, possibly only on Windows10
begin <- gettime() # to enable the time taken to be calculated
inscale <- c(0.07,0.05,0.09,0.45) #note large value for sigma
pars <- log(c(r=0.205,K=11300,Binit=3200,sigma=0.044))
result <- do_MCMC(chains=1,burnin=50,N=2000,thinstep=512,
inpar=pars,infunk=negLL,calcpred=simpspmC,
obsdat=log(fish[,"cpue"]),calcdat=fish,
priorcalc=calcprior,scales=inscale,schaefer=FALSE)
# alternatively, use simpspm, but that will take longer.
cat("acceptance rate = ",result$arate," \n")
cat("time = ",gettime() - begin,"\n")
post1 <- result[[1]][[1]]
p <- 1e-08
msy <- post1[,"r"]*post1[,"K"]/((p + 1)^((p+1)/p))
# R-chunk 48 Page 302
#pairwise comparison for MCMC of Fox model on abdat Fig 7.23
pairs(cbind(post1[,1:4],msy),upper.panel = panel.cor,lwd=2,cex=0.2,
lower.panel=panel.smooth,col=1,gap=0.1)
# R-chunk 49 Page 302
# marginal distributions of 3 parameters and msy Figure 7.24
oldp <- parset(plots=c(2,2), cex=0.85)
plot(density(post1[,"r"]),lwd=2,main="",xlab="r") #plot has a method
plot(density(post1[,"K"]),lwd=2,main="",xlab="K") #for output from
plot(density(post1[,"Binit"]),lwd=2,main="",xlab="Binit") # density
plot(density(msy),lwd=2,main="",xlab="MSY") #try str(density(msy))
par(oldp) # return par to old settings; this line not in book
# R-chunk 50 Page 304
#MCMC r and K parameters, approx 50 + 90\% contours. Fig7.25
puttxt <- function(xs,xvar,ys,yvar,lvar,lab="",sigd=0) {
text(xs*xvar[2],ys*yvar[2],makelabel(lab,lvar,sep=" ",
sigdig=sigd),cex=1.2,font=7,pos=4)
} # end of puttxt - a quick utility function
kran <- range(post1[,"K"]); rran <- range(post1[,"r"])
mran <- range(msy) #ranges used in the plots
oldp <- parset(plots=c(1,2),margin=c(0.35,0.35,0.05,0.1)) #plot r vs K
plot(post1[,"K"],post1[,"r"],type="p",cex=0.5,xlim=kran,
ylim=rran,col="grey",xlab="K",ylab="r",panel.first=grid())
points(optpar[2],optpar[1],pch=16,col=1,cex=1.75) # center
addcontours(post1[,"K"],post1[,"r"],kran,rran, #if fails make
contval=c(0.5,0.9),lwd=2,col=1) #contval smaller
puttxt(0.7,kran,0.97,rran,kran,"K= ",sigd=0)
puttxt(0.7,kran,0.94,rran,rran,"r= ",sigd=4)
plot(post1[,"K"],msy,type="p",cex=0.5,xlim=kran, # K vs msy
ylim=mran,col="grey",xlab="K",ylab="MSY",panel.first=grid())
points(optpar[2],getMSY(optpar,p),pch=16,col=1,cex=1.75)#center
addcontours(post1[,"K"],msy,kran,mran,contval=c(0.5,0.9),lwd=2,col=1)
puttxt(0.6,kran,0.99,mran,kran,"K= ",sigd=0)
puttxt(0.6,kran,0.97,mran,mran,"MSY= ",sigd=3)
par(oldp) # return par to old settings; this line not in book
# R-chunk 51 Page 305
#Traces for the Fox model parameters from the MCMC Fig7.26
oldp <- parset(plots=c(4,1),margin=c(0.3,0.45,0.05,0.05),
outmargin = c(1,0,0,0),cex=0.85)
label <- colnames(post1)
N <- dim(post1)[1]
for (i in 1:3) {
plot(1:N,post1[,i],type="l",lwd=1,ylab=label[i],xlab="")
abline(h=median(post1[,i]),col=2)
}
msy <- post1[,1]*post1[,2]/4
plot(1:N,msy,type="l",lwd=1,ylab="MSY",xlab="")
abline(h=median(msy),col=2)
mtext("Step",side=1,outer=T,line=0.0,font=7,cex=1.1)
par(oldp) # return par to old settings; this line not in book
# R-chunk 52 Page 306
#Do five chains of the same length for the Fox model
set.seed(6396679) # Note all chains start from same place, which is
inscale <- c(0.07,0.05,0.09,0.45) # suboptimal, but still the chains
pars <- log(c(r=0.205,K=11300,Binit=3220,sigma=0.044)) # differ
result <- do_MCMC(chains=5,burnin=50,N=2000,thinstep=512,
inpar=pars,infunk=negLL1,calcpred=simpspmC,
obsdat=log(fish[,"cpue"]),calcdat=fish,
priorcalc=calcprior,scales=inscale,
schaefer=FALSE)
cat("acceptance rate = ",result$arate," \n") # always check this
# R-chunk 53 Page 306
#Now plot marginal posteriors from 5 Fox model chains Fig7.27
oldp <- parset(plots=c(2,1),cex=0.85,margin=c(0.4,0.4,0.05,0.05))
post <- result[[1]][[1]]
plot(density(post[,"K"]),lwd=2,col=1,main="",xlab="K",
ylim=c(0,4.4e-04),panel.first=grid())
for (i in 2:5) lines(density(result$result[[i]][,"K"]),lwd=2,col=i)
p <- 1e-08
post <- result$result[[1]]
msy <- post[,"r"]*post[,"K"]/((p + 1)^((p+1)/p))
plot(density(msy),lwd=2,col=1,main="",xlab="MSY",type="l",
ylim=c(0,0.0175),panel.first=grid())
for (i in 2:5) {
post <- result$result[[i]]
msy <- post[,"r"]*post[,"K"]/((p + 1)^((p+1)/p))
lines(density(msy),lwd=2,col=i)
}
par(oldp) # return par to old settings; this line not in book
# R-chunk 54 Page 307
# get quantiles of each chain
probs <- c(0.025,0.05,0.5,0.95,0.975)
storeQ <- matrix(0,nrow=6,ncol=5,dimnames=list(1:6,probs))
for (i in 1:5) storeQ[i,] <- quants(result$result[[i]][,"K"])
x <- apply(storeQ[1:5,],2,range)
storeQ[6,] <- 100*(x[2,] - x[1,])/x[2,]
# R-chunk 55 Page 308 Table 7.8 code not in the book
#tabulate qunatiles of the five chains
kable(storeQ,digits=c(3,3,3,3,3))
## Management Advice
### Two Views of Risk
### Harvest Strategies
## Risk Assessment Projections
### Deterministic Projections
# R-chunk 56 Pages 310 - 311
#Prepare Fox model on abdat data for future projections Fig7.28
data(abdat); fish <- as.matrix(abdat)
param <- log(c(r=0.3,K=11500,Binit=3300,sigma=0.05))
bestmod <- nlm(f=negLL1,p=param,funk=simpspm,schaefer=FALSE,
logobs=log(fish[,"cpue"]),indat=fish,hessian=TRUE)
optpar <- exp(bestmod$estimate)
ans <- plotspmmod(inp=bestmod$estimate,indat=fish,schaefer=FALSE,
target=0.4,addrmse=TRUE, plotprod=FALSE)
# R-chunk 57 Page 312
out <- spm(bestmod$estimate,indat=fish,schaefer=FALSE)
str(out, width=65, strict.width="cut")
# R-chunk 58 Page 312 Table 7.9 code not in the book
#
kable(out$outmat[1:10,],digits=c(0,4,4,4,4,4,4))
# R-chunk 59 Page 313
# Fig 7.29
catches <- seq(700,1000,50) # projyr=10 is the default
projans <- spmprojDet(spmobj=out,projcatch=catches,plotout=TRUE)
### Accounting for Uncertainty
### Using Asymptotic Errors
# R-chunk 60 Page 315
# generate parameter vectors from a multivariate normal
# project dynamics under a constant catch of 900t
library(mvtnorm)
matpar <- parasympt(bestmod,N=1000) #generate parameter vectors
projs <- spmproj(matpar,fish,projyr=10,constC=900)#do dynamics
# R-chunk 61 Page 315
# Fig 7.30 1000 replicate projections asymptotic errors
outp <- plotproj(projs,out,qprob=c(0.1,0.5),refpts=c(0.2,0.4))
### Using Bootstrap Parameter Vectors
# R-chunk 62 Page 316
#bootstrap generation of plausible parameter vectors for Fox
reps <- 1000
boots <- spmboot(bestmod$estimate,fishery=fish,iter=reps,schaefer=FALSE)
matparb <- boots$bootpar[,1:4] #examine using head(matparb,20)
# R-chunk 63 Page 316
#bootstrap projections. Lower case b for boostrap Fig7.31
projb <- spmproj(matparb,fish,projyr=10,constC=900)
outb <- plotproj(projb,out,qprob=c(0.1,0.5),refpts=c(0.2,0.4))
### Using Samples from a Bayesian Posterior
# R-chunk 64 Pages 317 - 318
#Generate 1000 parameter vectors from Bayesian posterior
param <- log(c(r=0.3,K=11500,Binit=3300,sigma=0.05))
set.seed(444608)
N <- 1000
result <- do_MCMC(chains=1,burnin=100,N=N,thinstep=2048,
inpar=param,infunk=negLL,calcpred=simpspmC,
calcdat=fish,obsdat=log(fish[,"cpue"]),
priorcalc=calcprior,schaefer=FALSE,
scales=c(0.065,0.055,0.1,0.475))
parB <- result[[1]][[1]] #capital B for Bayesian
cat("Acceptance Rate = ",result[[2]],"\n")
# R-chunk 65 Page 318
# auto-correlation, or lack of, and the K trace Fig 7.32
oldp <- parset(plots=c(2,1),cex=0.85)
acf(parB[,2],lwd=2)
plot(1:N,parB[,2],type="l",ylab="K",ylim=c(8000,19000),xlab="")
par(oldp) # return par to old settings; this line not in book
# R-chunk 66 Page 318
# Fig 7.33
matparB <- as.matrix(parB[,1:4]) # B for Bayesian
projs <- spmproj(matparB,fish,constC=900,projyr=10) # project them
plotproj(projs,out,qprob=c(0.1,0.5),refpts=c(0.2,0.4)) #projections
## Concluding Remarks
## Appendix: The Use of Rcpp to Replace simpspm
# R-chunk 67 Page 321
library(Rcpp)
cppFunction('NumericVector simpspmC(NumericVector pars,
NumericMatrix indat, LogicalVector schaefer) {
int nyrs = indat.nrow();
NumericVector predce(nyrs);
NumericVector biom(nyrs+1);
double Bt, qval;
double sumq = 0.0;
double p = 0.00000001;
if (schaefer(0) == TRUE) {
p = 1.0;
}
NumericVector ep = exp(pars);
biom[0] = ep[2];
for (int i = 0; i < nyrs; i++) {
Bt = biom[i];
biom[(i+1)] = Bt + (ep[0]/p)*Bt*(1 - pow((Bt/ep[1]),p)) -
indat(i,1);
if (biom[(i+1)] < 40.0) biom[(i+1)] = 40.0;
sumq += log(indat(i,2)/biom[i]);
}
qval = exp(sumq/nyrs);
for (int i = 0; i < nyrs; i++) {
predce[i] = log(biom[i] * qval);
}
return predce;
}')
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.