content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/networks.r \name{networks} \alias{networks} \title{Networks metadata.} \usage{ networks(data = "all", uuid = NULL, query = NULL, identifier = NULL, identifierType = NULL, limit = 100, start = NULL, ...) } \arguments{ \item{data}{The type of data to get. Default is all data.} \item{uuid}{UUID of the data network provider. This must be specified if data is anything other than 'all'.} \item{query}{Query nodes. Only used when data='all'. Ignored otherwise.} \item{identifier}{The value for this parameter can be a simple string or integer, e.g. identifier=120. This parameter doesn't seem to work right now.} \item{identifierType}{Used in combination with the identifier parameter to filter identifiers by identifier type. See details. This parameter doesn't seem to work right now.} \item{limit}{Number of records to return. Default: 100. Maximum: 1000.} \item{start}{Record number to start at. Default: 0. Use in combination with \code{limit} to page through results.} \item{...}{Further named parameters, such as \code{query}, \code{path}, etc, passed on to \code{\link[httr]{modify_url}} within \code{\link[httr]{GET}} call. Unnamed parameters will be combined with \code{\link[httr]{config}}.} } \description{ Networks metadata. } \details{ identifierType options: \itemize{ \item {DOI} No description. \item {FTP} No description. \item {GBIF_NODE} Identifies the node (e.g: 'DK' for Denmark, 'sp2000' for Species 2000). \item {GBIF_PARTICIPANT} Participant identifier from the GBIF IMS Filemaker system. \item {GBIF_PORTAL} Indicates the identifier originated from an auto_increment column in the portal.data_provider or portal.data_resource table respectively. \item {HANDLER} No description. \item {LSID} Reference controlled by a separate system, used for example by DOI. \item {SOURCE_ID} No description. \item {UNKNOWN} No description. \item {URI} No description. \item {URL} No description. \item {UUID} No description. } } \examples{ \dontrun{ networks(limit=5) networks(uuid='16ab5405-6c94-4189-ac71-16ca3b753df7') networks(data='endpoint', uuid='16ab5405-6c94-4189-ac71-16ca3b753df7') # Pass on options to httr library('httr') res <- networks(limit=5, config=progress()) } } \references{ \url{http://www.gbif.org/developer/registry#networks} }
/man/networks.Rd
permissive
poldham/rgbif
R
false
false
2,376
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/networks.r \name{networks} \alias{networks} \title{Networks metadata.} \usage{ networks(data = "all", uuid = NULL, query = NULL, identifier = NULL, identifierType = NULL, limit = 100, start = NULL, ...) } \arguments{ \item{data}{The type of data to get. Default is all data.} \item{uuid}{UUID of the data network provider. This must be specified if data is anything other than 'all'.} \item{query}{Query nodes. Only used when data='all'. Ignored otherwise.} \item{identifier}{The value for this parameter can be a simple string or integer, e.g. identifier=120. This parameter doesn't seem to work right now.} \item{identifierType}{Used in combination with the identifier parameter to filter identifiers by identifier type. See details. This parameter doesn't seem to work right now.} \item{limit}{Number of records to return. Default: 100. Maximum: 1000.} \item{start}{Record number to start at. Default: 0. Use in combination with \code{limit} to page through results.} \item{...}{Further named parameters, such as \code{query}, \code{path}, etc, passed on to \code{\link[httr]{modify_url}} within \code{\link[httr]{GET}} call. Unnamed parameters will be combined with \code{\link[httr]{config}}.} } \description{ Networks metadata. } \details{ identifierType options: \itemize{ \item {DOI} No description. \item {FTP} No description. \item {GBIF_NODE} Identifies the node (e.g: 'DK' for Denmark, 'sp2000' for Species 2000). \item {GBIF_PARTICIPANT} Participant identifier from the GBIF IMS Filemaker system. \item {GBIF_PORTAL} Indicates the identifier originated from an auto_increment column in the portal.data_provider or portal.data_resource table respectively. \item {HANDLER} No description. \item {LSID} Reference controlled by a separate system, used for example by DOI. \item {SOURCE_ID} No description. \item {UNKNOWN} No description. \item {URI} No description. \item {URL} No description. \item {UUID} No description. } } \examples{ \dontrun{ networks(limit=5) networks(uuid='16ab5405-6c94-4189-ac71-16ca3b753df7') networks(data='endpoint', uuid='16ab5405-6c94-4189-ac71-16ca3b753df7') # Pass on options to httr library('httr') res <- networks(limit=5, config=progress()) } } \references{ \url{http://www.gbif.org/developer/registry#networks} }
## Matrix inversion is usually a costly computation and there may be some ## benefit to caching the inverse of a matrix rather than compute it ## repeatedly. The following is a pair of functions that cache ## the inverse of a matrix. ## The first function will ## 1) set the value of the matrix ## 2) get the value of the matrix ## 3) calculate the inverse of the matrix ## 4) return the inverse of the matrix ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinverse <- function(solve) m <<- solve getinverse <- function() m list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve will return the cached inverse of the matrix ## if it is available or else will create inverse of the matrix ## if it is not available. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinverse(m) m }
/cachematrix.R
no_license
prasunpp/ProgrammingAssignment2
R
false
false
1,195
r
## Matrix inversion is usually a costly computation and there may be some ## benefit to caching the inverse of a matrix rather than compute it ## repeatedly. The following is a pair of functions that cache ## the inverse of a matrix. ## The first function will ## 1) set the value of the matrix ## 2) get the value of the matrix ## 3) calculate the inverse of the matrix ## 4) return the inverse of the matrix ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinverse <- function(solve) m <<- solve getinverse <- function() m list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve will return the cached inverse of the matrix ## if it is available or else will create inverse of the matrix ## if it is not available. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinverse(m) m }
data(mtcars) mtcars #rownames(mtcars) #to extract the lables from the dataset #correctly plotting the mtcars$mpg entry in a barplot, with its labels op <- par(mar=c(4,10,2,2)) barplot(mtcars$mpg, main = "the mtcars dataset", names.arg = rownames(mtcars), horiz=T, las=2, col = "skyblue") rm(op) #display the cars that have automatic and manual transmission count = table(mtcars$am) barplot(count, main = "transmission", names.arg=c("automatic", "manual"), col = "red") #histogram of horsepowerr: h <- hist(mtcars$hp, breaks=4, col = "magenta") text(h$mids, h$counts, labels=h$counts, adj=c(0.5,-0.5)) #"box-plot" of horspower: boxplot(mtcars$hp,notch = TRUE, col="brown") #strip chart of horsepower: stripchart(mtcars$hp, method="jitter", pch=1, col="orange") #relation between the 1/4 mile time and hp: plot(mtcars$hp, mtcars$qsec)
/cars.r
no_license
dlgeraghty/R-programming-language
R
false
false
834
r
data(mtcars) mtcars #rownames(mtcars) #to extract the lables from the dataset #correctly plotting the mtcars$mpg entry in a barplot, with its labels op <- par(mar=c(4,10,2,2)) barplot(mtcars$mpg, main = "the mtcars dataset", names.arg = rownames(mtcars), horiz=T, las=2, col = "skyblue") rm(op) #display the cars that have automatic and manual transmission count = table(mtcars$am) barplot(count, main = "transmission", names.arg=c("automatic", "manual"), col = "red") #histogram of horsepowerr: h <- hist(mtcars$hp, breaks=4, col = "magenta") text(h$mids, h$counts, labels=h$counts, adj=c(0.5,-0.5)) #"box-plot" of horspower: boxplot(mtcars$hp,notch = TRUE, col="brown") #strip chart of horsepower: stripchart(mtcars$hp, method="jitter", pch=1, col="orange") #relation between the 1/4 mile time and hp: plot(mtcars$hp, mtcars$qsec)
# Triangle model log linear # Bayes Poisson direct via features model matrix and Stan library(rstan) library(shinystan) library(CRFutil) # Triangle model grphf <- ~X.1:X.2+X.1:X.3+X.2:X.3 adj <- ug(grphf, result="matrix") # Load samps generated by a true triangle model: #fpth <- "C:/Users/aliso/codes/CRFutil/tests/regression_tests/hojsgaard_model_tests/triangle_model/" #fpth <- "/home/npetraco/codes/R/CRFutil/tests/regression_tests/hojsgaard_model_tests/triangle_model/triangle_data/" #load(paste0(fpth,"triangle_samp.RData")) #head(samps) # Use the same "true" theta and seeds as with previous triange examples: trit <- make.empty.field(adj.mat = adj, parameterization.typ = "standard") set.seed(6) trit$par <- runif(6,-1.5,1.1) trit$par # "true" theta should be: #0.07629757 0.93786913 -0.81268462 -0.51175581 0.59945681 1.04299689 # instantiate potentials out.pot <- make.pots(parms = trit$par, crf = trit, rescaleQ = F, replaceQ = T) # Now sample set.seed(1) num.samps <- 25 samps <- sample.exact(trit, num.samps) mrf.sample.plot(samps) # Make state count freq table from samples and compute frequencies of all possible state configs. # State configs not observed will have 0 freq ftab <- data.frame(ftable(data.frame(samps))) X.all <- ftab[,1:ncol(samps)] freqs <- ftab[,ncol(ftab)] # Model Matrix with respect to graph f0 <- function(y){ as.numeric(c((y==1),(y==2)))} M <- compute.model.matrix(configs=X.all, edges.mat=trit$edges, node.par = trit$node.par, edge.par = trit$edge.par, ff = f0) # Fit log(lam) = alp + M theta options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) model.c <- stanc(file = "/home/npetraco/codes/R/CRFutil/inst/poisson_model.stan", model_name = 'model') sm <- stan_model(stanc_ret = model.c, verbose = T) dat <- list( p = ncol(M), N = nrow(M), y = freqs, Mmodl = M ) bfit <- sampling(sm, data = dat, iter=20000, thin = 4, chains = 4) # Get the parameter estimtes: theta <- extract(bfit, "theta")[[1]] alpha <- extract(bfit, "alpha")[[1]] # Extract the graph's partition function: N <- nrow(samps) logZ <- log(N) - alpha hist(logZ) # Now using the (medians) posterior theta, estimate partition # function with Belief propegation llm2 <- make.empty.field(adj.mat = adj, parameterization.typ = "standard") llm2$par <- apply(theta, MARGIN = 2, FUN = median) out.pot <- make.pots(parms = llm2$par, crf = llm2, rescaleQ = F, replaceQ = T) pot.info.llm2.model <- make.gRbase.potentials(llm2, node.names = c("X.1", "X.2", "X.3"), state.nmes = c("1","2")) gR.dist.info.llm2.model <- distribution.from.potentials(pot.info.llm2.model$node.potentials, pot.info.llm2.model$edge.potentials) logZ.llm2.model <- gR.dist.info.llm2.model$logZ logZ.llm2.model # log partition function from BP with a theta (from regression) mean(logZ) # log partition function direct from regression median(logZ) joint.dist.info.llm2.model <- as.data.frame(as.table(gR.dist.info.llm2.model$state.probs)) joint.dist.info.llm2.model <- joint.dist.info.llm2.model[,c(2,3,1,4)] joint.dist.info.llm2.model[,4] <- joint.dist.info.llm2.model[,4] * 100 joint.dist.info.llm2.model # X1 X2 X3 bayes.lrm.cp mle.lrm.cp bayes.llm.cp* bayes.llm2 bayes.llm2.lams true.model.cp # 1 1 1 1 10.3 10.5 16.6 19.9 19.9 18.4 # 2 1 1 2 1.1 1.5 10.7 8.3 8.3 8.0 # 3 2 1 1 22.0 21.5 14.3 14.8 14.8 15.6 # 4 2 1 2 23.1 22.5 17.1 21.0 21.0 22.6 # 5 1 2 1 9.1 9.5 7.9 4.5 4.5 4.2 # 6 1 2 2 10.3 10.5 14.1 14.3 14.3 14.9 # 7 2 2 1 2.0 2.5 4.5 1.5 1.5 1.3 # 8 2 2 2 22.0 21.5 14.8 15.8 15.8 15.0 # X.1 X.2 X.3 Poisson (log linear) # 1 1 1 1 10.0327618 # 2 1 1 2 0.9185033 # 3 2 1 1 22.5293519 # 4 2 1 2 23.7361927 # 5 1 2 1 8.8969562 # 6 1 2 2 10.0371836 # 7 2 2 1 1.7055839 # 8 2 2 2 22.1434666 # Check maximum likelihood result too: fit <- make.empty.field(adj.mat = adj, parameterization.typ = "standard") # First compute the sufficient stats needed by the likelihood and its’ grad fit$par.stat <- mrf.stat(fit, samps) # Auxiliary, gradient convenience function. Follows train.mrf in CRF: gradient <- function(par, crf, ...) { crf$gradient } infr.meth <- infer.exact # inference method needed for Z and marginals calcs opt.info <- stats::optim( # optimize parameters par = fit$par, # theta fn = negloglik, # objective function gr = gradient, # grad of obj func crf = fit, # passed to fn/gr samples = samps, # passed to fn/gr infer.method = infr.meth, # passed to fn/gr update.crfQ = TRUE, # passed to fn/gr method = "L-BFGS-B", control = list(trace = 1, REPORT=1)) opt.info$convergence opt.info$message fit$gradient fit$nll fit$par out.pot <- make.pots(parms = fit$par, crf = fit, rescaleQ = F, replaceQ = T) pot.info.fit.model <- make.gRbase.potentials(fit, node.names = c("X.1", "X.2", "X.3"), state.nmes = c("1","2")) gR.dist.info.fit.model <- distribution.from.potentials(pot.info.fit.model$node.potentials, pot.info.fit.model$edge.potentials) logZ.fit.model <- gR.dist.info.fit.model$logZ logZ.fit.model # log partition function from BP with a theta (from regression) mean(logZ) # log partition function direct from regression median(logZ) joint.dist.info.fit.model <- as.data.frame(as.table(gR.dist.info.fit.model$state.probs)) joint.dist.info.fit.model <- joint.dist.info.fit.model[,c(2,3,1,4)] joint.dist.info.fit.model[,4] <- joint.dist.info.fit.model[,4] * 100 joint.dist.info.fit.model
/tests/regression_tests/hojsgaard_model_tests/triangle_model/triangle-model_bayes-loglin_Notes-example.R
no_license
npetraco/CRFutil
R
false
false
6,013
r
# Triangle model log linear # Bayes Poisson direct via features model matrix and Stan library(rstan) library(shinystan) library(CRFutil) # Triangle model grphf <- ~X.1:X.2+X.1:X.3+X.2:X.3 adj <- ug(grphf, result="matrix") # Load samps generated by a true triangle model: #fpth <- "C:/Users/aliso/codes/CRFutil/tests/regression_tests/hojsgaard_model_tests/triangle_model/" #fpth <- "/home/npetraco/codes/R/CRFutil/tests/regression_tests/hojsgaard_model_tests/triangle_model/triangle_data/" #load(paste0(fpth,"triangle_samp.RData")) #head(samps) # Use the same "true" theta and seeds as with previous triange examples: trit <- make.empty.field(adj.mat = adj, parameterization.typ = "standard") set.seed(6) trit$par <- runif(6,-1.5,1.1) trit$par # "true" theta should be: #0.07629757 0.93786913 -0.81268462 -0.51175581 0.59945681 1.04299689 # instantiate potentials out.pot <- make.pots(parms = trit$par, crf = trit, rescaleQ = F, replaceQ = T) # Now sample set.seed(1) num.samps <- 25 samps <- sample.exact(trit, num.samps) mrf.sample.plot(samps) # Make state count freq table from samples and compute frequencies of all possible state configs. # State configs not observed will have 0 freq ftab <- data.frame(ftable(data.frame(samps))) X.all <- ftab[,1:ncol(samps)] freqs <- ftab[,ncol(ftab)] # Model Matrix with respect to graph f0 <- function(y){ as.numeric(c((y==1),(y==2)))} M <- compute.model.matrix(configs=X.all, edges.mat=trit$edges, node.par = trit$node.par, edge.par = trit$edge.par, ff = f0) # Fit log(lam) = alp + M theta options(mc.cores = parallel::detectCores()) rstan_options(auto_write = TRUE) model.c <- stanc(file = "/home/npetraco/codes/R/CRFutil/inst/poisson_model.stan", model_name = 'model') sm <- stan_model(stanc_ret = model.c, verbose = T) dat <- list( p = ncol(M), N = nrow(M), y = freqs, Mmodl = M ) bfit <- sampling(sm, data = dat, iter=20000, thin = 4, chains = 4) # Get the parameter estimtes: theta <- extract(bfit, "theta")[[1]] alpha <- extract(bfit, "alpha")[[1]] # Extract the graph's partition function: N <- nrow(samps) logZ <- log(N) - alpha hist(logZ) # Now using the (medians) posterior theta, estimate partition # function with Belief propegation llm2 <- make.empty.field(adj.mat = adj, parameterization.typ = "standard") llm2$par <- apply(theta, MARGIN = 2, FUN = median) out.pot <- make.pots(parms = llm2$par, crf = llm2, rescaleQ = F, replaceQ = T) pot.info.llm2.model <- make.gRbase.potentials(llm2, node.names = c("X.1", "X.2", "X.3"), state.nmes = c("1","2")) gR.dist.info.llm2.model <- distribution.from.potentials(pot.info.llm2.model$node.potentials, pot.info.llm2.model$edge.potentials) logZ.llm2.model <- gR.dist.info.llm2.model$logZ logZ.llm2.model # log partition function from BP with a theta (from regression) mean(logZ) # log partition function direct from regression median(logZ) joint.dist.info.llm2.model <- as.data.frame(as.table(gR.dist.info.llm2.model$state.probs)) joint.dist.info.llm2.model <- joint.dist.info.llm2.model[,c(2,3,1,4)] joint.dist.info.llm2.model[,4] <- joint.dist.info.llm2.model[,4] * 100 joint.dist.info.llm2.model # X1 X2 X3 bayes.lrm.cp mle.lrm.cp bayes.llm.cp* bayes.llm2 bayes.llm2.lams true.model.cp # 1 1 1 1 10.3 10.5 16.6 19.9 19.9 18.4 # 2 1 1 2 1.1 1.5 10.7 8.3 8.3 8.0 # 3 2 1 1 22.0 21.5 14.3 14.8 14.8 15.6 # 4 2 1 2 23.1 22.5 17.1 21.0 21.0 22.6 # 5 1 2 1 9.1 9.5 7.9 4.5 4.5 4.2 # 6 1 2 2 10.3 10.5 14.1 14.3 14.3 14.9 # 7 2 2 1 2.0 2.5 4.5 1.5 1.5 1.3 # 8 2 2 2 22.0 21.5 14.8 15.8 15.8 15.0 # X.1 X.2 X.3 Poisson (log linear) # 1 1 1 1 10.0327618 # 2 1 1 2 0.9185033 # 3 2 1 1 22.5293519 # 4 2 1 2 23.7361927 # 5 1 2 1 8.8969562 # 6 1 2 2 10.0371836 # 7 2 2 1 1.7055839 # 8 2 2 2 22.1434666 # Check maximum likelihood result too: fit <- make.empty.field(adj.mat = adj, parameterization.typ = "standard") # First compute the sufficient stats needed by the likelihood and its’ grad fit$par.stat <- mrf.stat(fit, samps) # Auxiliary, gradient convenience function. Follows train.mrf in CRF: gradient <- function(par, crf, ...) { crf$gradient } infr.meth <- infer.exact # inference method needed for Z and marginals calcs opt.info <- stats::optim( # optimize parameters par = fit$par, # theta fn = negloglik, # objective function gr = gradient, # grad of obj func crf = fit, # passed to fn/gr samples = samps, # passed to fn/gr infer.method = infr.meth, # passed to fn/gr update.crfQ = TRUE, # passed to fn/gr method = "L-BFGS-B", control = list(trace = 1, REPORT=1)) opt.info$convergence opt.info$message fit$gradient fit$nll fit$par out.pot <- make.pots(parms = fit$par, crf = fit, rescaleQ = F, replaceQ = T) pot.info.fit.model <- make.gRbase.potentials(fit, node.names = c("X.1", "X.2", "X.3"), state.nmes = c("1","2")) gR.dist.info.fit.model <- distribution.from.potentials(pot.info.fit.model$node.potentials, pot.info.fit.model$edge.potentials) logZ.fit.model <- gR.dist.info.fit.model$logZ logZ.fit.model # log partition function from BP with a theta (from regression) mean(logZ) # log partition function direct from regression median(logZ) joint.dist.info.fit.model <- as.data.frame(as.table(gR.dist.info.fit.model$state.probs)) joint.dist.info.fit.model <- joint.dist.info.fit.model[,c(2,3,1,4)] joint.dist.info.fit.model[,4] <- joint.dist.info.fit.model[,4] * 100 joint.dist.info.fit.model
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stat-qq.r \name{stat_qq} \alias{geom_qq} \alias{stat_qq} \title{Calculation for quantile-quantile plot.} \usage{ stat_qq(mapping = NULL, data = NULL, geom = "point", position = "identity", distribution = stats::qnorm, dparams = list(), na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) geom_qq(mapping = NULL, data = NULL, geom = "point", position = "identity", distribution = stats::qnorm, dparams = list(), na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) } \arguments{ \item{mapping}{The aesthetic mapping, usually constructed with \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set at the layer level if you are overriding the plot defaults.} \item{data}{A layer specific dataset - only needed if you want to override the plot defaults.} \item{geom}{The geometric object to use display the data} \item{position}{The position adjustment to use for overlapping points on this layer} \item{distribution}{Distribution function to use, if x not specified} \item{dparams}{Additional parameters passed on to \code{distribution} function.} \item{na.rm}{If \code{FALSE} (the default), removes missing values with a warning. If \code{TRUE} silently removes missing values.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link{borders}}.} \item{...}{other arguments passed on to \code{\link{layer}}. This can include aesthetics whose values you want to set, not map. See \code{\link{layer}} for more details.} } \description{ Calculation for quantile-quantile plot. } \section{Aesthetics}{ \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "qq")} } \section{Computed variables}{ \describe{ \item{sample}{sample quantiles} \item{theoretical}{theoretical quantiles} } } \examples{ \donttest{ df <- data.frame(y = rt(200, df = 5)) p <- ggplot(df, aes(sample = y)) p + stat_qq() p + geom_point(stat = "qq") # Use fitdistr from MASS to estimate distribution params params <- as.list(MASS::fitdistr(df$y, "t")$estimate) ggplot(df, aes(sample = y)) + stat_qq(distribution = qt, dparams = params["df"]) # Using to explore the distribution of a variable ggplot(mtcars) + stat_qq(aes(sample = mpg)) ggplot(mtcars) + stat_qq(aes(sample = mpg, colour = factor(cyl))) } }
/man/stat_qq.Rd
no_license
jiho/ggplot2
R
false
true
2,727
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stat-qq.r \name{stat_qq} \alias{geom_qq} \alias{stat_qq} \title{Calculation for quantile-quantile plot.} \usage{ stat_qq(mapping = NULL, data = NULL, geom = "point", position = "identity", distribution = stats::qnorm, dparams = list(), na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) geom_qq(mapping = NULL, data = NULL, geom = "point", position = "identity", distribution = stats::qnorm, dparams = list(), na.rm = FALSE, show.legend = NA, inherit.aes = TRUE, ...) } \arguments{ \item{mapping}{The aesthetic mapping, usually constructed with \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set at the layer level if you are overriding the plot defaults.} \item{data}{A layer specific dataset - only needed if you want to override the plot defaults.} \item{geom}{The geometric object to use display the data} \item{position}{The position adjustment to use for overlapping points on this layer} \item{distribution}{Distribution function to use, if x not specified} \item{dparams}{Additional parameters passed on to \code{distribution} function.} \item{na.rm}{If \code{FALSE} (the default), removes missing values with a warning. If \code{TRUE} silently removes missing values.} \item{show.legend}{logical. Should this layer be included in the legends? \code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE} never includes, and \code{TRUE} always includes.} \item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather than combining with them. This is most useful for helper functions that define both data and aesthetics and shouldn't inherit behaviour from the default plot specification, e.g. \code{\link{borders}}.} \item{...}{other arguments passed on to \code{\link{layer}}. This can include aesthetics whose values you want to set, not map. See \code{\link{layer}} for more details.} } \description{ Calculation for quantile-quantile plot. } \section{Aesthetics}{ \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("stat", "qq")} } \section{Computed variables}{ \describe{ \item{sample}{sample quantiles} \item{theoretical}{theoretical quantiles} } } \examples{ \donttest{ df <- data.frame(y = rt(200, df = 5)) p <- ggplot(df, aes(sample = y)) p + stat_qq() p + geom_point(stat = "qq") # Use fitdistr from MASS to estimate distribution params params <- as.list(MASS::fitdistr(df$y, "t")$estimate) ggplot(df, aes(sample = y)) + stat_qq(distribution = qt, dparams = params["df"]) # Using to explore the distribution of a variable ggplot(mtcars) + stat_qq(aes(sample = mpg)) ggplot(mtcars) + stat_qq(aes(sample = mpg, colour = factor(cyl))) } }
#importing required libraries library(dplyr) library(word2vec) library(stylo) library(ggplot2) library(ggrepel) #setting the value of path variable to feed to the word2vec model #change path accordingly path <- "/Users/priyankasingh/tweets.csv" #Setting Hyper-parameters for Model training set.seed(10034) #training the model with dimensions as 25 and 20 iterations model <- word2vec(path, dim = 25, iter = 20) #embedding for word2vec is created after model is trained embedding <- as.matrix(model) ## Creating a Static plot for HateSpeech Vector df <- data.frame(word = gsub("//.+", "", rownames(dendo)), xpos = gsub(".+//", "", rownames(dendo)), x = viz[, 1], y = viz[, 2], stringsAsFactors = FALSE) ggplot(df, aes(x = x, y = y, label = word)) + geom_text_repel() + theme_void() + labs(title = "word2vec - Visualization of Embeddings in 2D using UMAP")
/code_Base/exploratory_data_analysis/Word2Vec_Visualization/Static_Plot_UMAP_2D.R
no_license
anish-singh-07/DataScienceR
R
false
false
928
r
#importing required libraries library(dplyr) library(word2vec) library(stylo) library(ggplot2) library(ggrepel) #setting the value of path variable to feed to the word2vec model #change path accordingly path <- "/Users/priyankasingh/tweets.csv" #Setting Hyper-parameters for Model training set.seed(10034) #training the model with dimensions as 25 and 20 iterations model <- word2vec(path, dim = 25, iter = 20) #embedding for word2vec is created after model is trained embedding <- as.matrix(model) ## Creating a Static plot for HateSpeech Vector df <- data.frame(word = gsub("//.+", "", rownames(dendo)), xpos = gsub(".+//", "", rownames(dendo)), x = viz[, 1], y = viz[, 2], stringsAsFactors = FALSE) ggplot(df, aes(x = x, y = y, label = word)) + geom_text_repel() + theme_void() + labs(title = "word2vec - Visualization of Embeddings in 2D using UMAP")
require(spatialEco) require(sp) require(usedist) require(rgeos) require(raster) require(spatstat) require(igraph) require(sf) require(rgdal) require(gdistance) require(otuSummary) require(gdata) require(maptools) require(tidyverse) require(reshape2) require(data.table) #setwd("E:/LCP sensitivity test/HPC_all_inputs") #Create master map of all habitat created by a conservation strategy i=50 u=1000 model="econ" species="s" replicate=4 XO<- list.files(paste0(model,"/"), pattern =paste0(species,".tif$",sep="")) ManyRunsStack<-raster::stack(paste0(model,"/",XO)) SumStack<-sum(ManyRunsStack) rm(ManyRunsStack) #Bring in ecoregion map to use for crs and extent template Ecoregion <- raster(paste0(model,"/","Ecoregion100f.tif")) #Read in roads file roads <- raster(paste0(model,"/","road.tif",sep="")) #Create empty vectors for connectivity indices nodes <- vector() links <- vector() avgnode <- vector() totnode <- vector() avgLCP <- vector() avgENN <- vector() density <- vector() transitivity <- vector() #Time steps TimestepList <- as.character(seq(from=0, to=80, by=10)) #Connectivity analysis Longleaf<-"PinuPalu" Loblolly<-"PinuTaed" Pine<- c("PinuEchi","PinuTaed","PinuVirg") Hardwood<-c("QuerAlba","AcerRubr","LiriTuli","LiquStyr","OxydArbo","CornFlor") Year0<-list.files(paste0(model,"/",model,replicate),pattern=(".img$")) #paste0("inputs/", model, "/",model,replicate,"/") Longleaf_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Longleaf,"-",i,".img")])) Loblolly_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Loblolly,"-",i,".img")])) Pine_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Pine,"-",i,".img")])) Hardwood_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Hardwood,"-",i,".img")])) Total<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-TotalBiomass-", i, ".img")])) ###Reclassification of biomass into community types ###Rule 1 Longleaf_Stack[Longleaf_Stack> 0.25*(Total),]<-1 Longleaf_Stack[!Longleaf_Stack==1]<-999 ### Rule 2 Loblolly_Stack[Loblolly_Stack> 0.9*(Total),]<-2 Loblolly_Stack[!Loblolly_Stack==2]<-999 ### Rule 3 Pine_Stack[Pine_Stack> 0.65*(Total),]<-3 Pine_Stack[!Pine_Stack==3]<-999 ### Rule 4 Hardwood_Stack[Hardwood_Stack>0.5*(Total),]<-4 Hardwood_Stack[!Hardwood_Stack==4]<-999 ### Rule 5 Total[Total >0,]<-5 bigstack<-stack(Longleaf_Stack, Loblolly_Stack, Pine_Stack, Hardwood_Stack, Total) test_stack<-min(bigstack) crs(test_stack) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" extent(test_stack)<-raster::extent(Ecoregion) median0 <- raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("AGE-MED-",i,".img")])) crs(median0) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" extent(median0)<-raster::extent(Ecoregion) #use to incorporate land use change #LU0 <- raster(paste("C:/Users/tgmozele/Desktop/LCP sensitivity test/geo2noLUC/land-use-", i, ".tif",sep="")) #use to not incorporate land use change, but establish BAU land use types LU0 <- raster(paste0(model,"/","NLCD100.tif")) #Create a raster that will become resistance raster test_raster <- test_stack #Assign projection and reformat to ecoregion extent for the resistance raster crs(test_raster) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" extent(test_raster)<-raster::extent(Ecoregion) #Assign values to resistance raster #longleaf community comp test_raster[test_stack == 1 & median0 %in% c(0:1),] <- (1/90) test_raster[test_stack == 1 & median0 %in% c(2:5),] <- (1/80) test_raster[test_stack == 1 & median0 %in% c(6:7),] <- (1/70) test_raster[test_stack == 1 & median0 %in% c(8:9),] <- (1/40) test_raster[test_stack == 1 & median0 %in% c(10:20),] <- (1/10) test_raster[test_stack == 1 & median0 %in% c(21:34),] <- 1 test_raster[test_stack == 1 & median0 >= 35,] <- 1 #test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95) #test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80) #test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25) #test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5) #test_raster[test_stack == 3 & median0 >= 35,] <- 1 #pine plantation community type (was pine mix) test_raster[test_stack == 2 & median0 %in% c(0:5),] <- (1/90) test_raster[test_stack == 2 & median0 %in% c(6:10),] <- (1/70) test_raster[test_stack == 2 & median0 %in% c(11:20),] <- (1/60) test_raster[test_stack == 2 & median0 %in% c(21:30),] <- (1/50) test_raster[test_stack == 2 & median0 >= 31,] <- (1/40) #pine mix community type (was lob_) test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90) test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/70) test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/40) test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/30) test_raster[test_stack == 3 & median0 >= 35,] <- (1/20) #test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95) #test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80) #test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25) #test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5) #test_raster[test_stack == 3 & median0 >= 35,] <- 1 #hardwood community type (was mix) test_raster[test_stack == 4 & median0 %in% c(0:10),] <- (1/90) test_raster[test_stack == 4 & median0 %in% c(11:20),] <- (1/80) test_raster[test_stack == 4 & median0 %in% c(21:30),] <- (1/70) test_raster[test_stack == 4 & median0 >= 31,] <- (1/60) #mixed forest community type (was hardwood) test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/90) test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/70) test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/60) test_raster[test_stack == 5 & median0 >= 31,] <- (1/50) #test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/100) #test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/85) #test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/70) #test_raster[test_stack == 5 & median0 >= 31,] <- (1/60) test_raster2 <- test_raster test_raster2[test_raster ==0] <- NA #land use types test_raster2[LU0 == 82] <- (1/90) #cropland test_raster2[LU0 == 81] <- (1/90) #hay/pasture test_raster2[LU0 == 11] <- (1/100) #water test_raster2[LU0 == 24] <- (1/100) #developed, high intensity test_raster2[LU0 == 23] <- (1/90) #developed, med intensity test_raster2[LU0 == 22] <- (1/80) #developed, low intensity test_raster2[LU0 == 31] <- (1/90) #barren land #test_raster2[LU0 == 6] <- (1/100) #mining test_raster2[test_raster2 ==0] <- (1/90) #roads test_raster2[roads %in% c(1:2)] <- (1/100) test_raster2[roads %in% c(3:4)] <- (1/100) test_raster2[roads %in% c(5:89)] <- (1/90) test_raster3 <- test_raster2 test_raster3[test_raster3 >0.1] <- 1 test_raster3[test_raster3 < 1] <- 0 habitat_raster <- overlay(test_raster3, SumStack, fun=function(x,y){(x*y)} ) #Cluster habitat cells into habitat nodes using quintiles of occurrence LikelyHabitat8<-habitat_raster LikelyHabitat8[LikelyHabitat8%in%c(0:36),]<-NA pol8 <- rasterToPolygons(LikelyHabitat8) proj4string(pol8) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol8$ID<-seq(1,length(pol8[1])) polbuf <- gBuffer(pol8, byid=TRUE, id=pol8$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) a<-raster::disaggregate(polbufdis) LikelyHabitat5<-habitat_raster LikelyHabitat5[LikelyHabitat5%in%c(0:27,37:45),]<-NA pol5 <- rasterToPolygons(LikelyHabitat5) proj4string(pol5) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol5$ID<-seq(1,length(pol5[1])) polbuf <- gBuffer(pol5, byid=TRUE, id=pol5$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) b<-raster::disaggregate(polbufdis) LikelyHabitat3<-habitat_raster LikelyHabitat3[LikelyHabitat3%in%c(0:18,28:45),]<-NA pol3 <- rasterToPolygons(LikelyHabitat3) proj4string(pol3) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol3$ID<-seq(1,length(pol3[1])) polbuf <- gBuffer(pol3, byid=TRUE, id=pol3$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) c<-raster::disaggregate(polbufdis) LikelyHabitat1<-habitat_raster LikelyHabitat1[LikelyHabitat1%in%c(0:9,19:45),]<-NA pol1 <- rasterToPolygons(LikelyHabitat1) proj4string(pol1) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol1$ID<-seq(1,length(pol1[1])) polbuf <- gBuffer(pol1, byid=TRUE, id=pol1$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) d<-raster::disaggregate(polbufdis) LikelyHabitat<-habitat_raster LikelyHabitat[LikelyHabitat%in%c(0,10:45),]<-NA pol <- rasterToPolygons(LikelyHabitat) proj4string(pol) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol$ID<-seq(1,length(pol[1])) polbuf <- gBuffer(pol, byid=TRUE, id=pol$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) e<-raster::disaggregate(polbufdis) #Bring quintile-based habitat nodes together into one SpatialPolygonsDataFrame, find area of nodes, and assign numbers polys <- bind(a,b,c,d,e) data<-data.frame(ID=seq(1,length(polys))) pol1_dis<-SpatialPolygonsDataFrame(polys,data) pol1_dis$area_ha <- raster::area(pol1_dis)/10000 pol1_dis$num1 <- seq(from = 1, to= length(pol1_dis), by=1) pol1_dis$num2 <- seq(from = 1, to= length(pol1_dis), by=1) #Assign weight to habitat by type and area to be used in Conefor pol1_dis$weight <- NA pol1_dis$weight <- pol1_dis$area_ha #Restrict habitat patches to those 2 hectares and larger, reassign ID's pol1_dis <- pol1_dis[pol1_dis$area_ha >= 2,] pol1_dis$ID<-seq(from = 1, to= length(pol1_dis), by=1) #Make habitat nodes file to be used for Conefor maketext <- cbind(pol1_dis$ID, pol1_dis$weight) write.table(maketext, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/nodes_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE) #use to find #nodes, avg node size, and total habitat area (to be used for ECA:Area) nodes[length(nodes)+1] <- length(pol1_dis$ID) avgnode[length(avgnode)+1] <- mean(pol1_dis$area_ha) totnode[length(totnode)+1] <- sum(pol1_dis$area_ha) ###create transition matrix from resistance raster, which is required by gdistance package to calculate resistance ###distance and least cost path test_tr <- transition(test_raster2, transitionFunction=mean, directions=8) #find polgyon centroid trueCentroids <- gCentroid(pol1_dis, byid=TRUE, id = pol1_dis$ID) #clear memory rm(Longleaf_Stack) rm(Loblolly_Stack) rm(Pine_Stack) rm(Hardwood_Stack) rm(Total) rm(bigstack) rm(pol8) rm(pol5) rm(pol3) rm(pol1) rm(pol) rm(a) rm(b) rm(c) rm(d) rm(e) rm(polbuf) rm(polys) rm(LikelyHabitat8) rm(LikelyHabitat5) rm(LikelyHabitat3) rm(LikelyHabitat1) rm(LikelyHabitat) #get coordinates from trueCentroids cent_coords <- geom(trueCentroids) #find euclidean distance nearest neighbor EUnn <- nndist(cent_coords) avgENN[length(avgENN)+1] <- mean(EUnn) #Euclidean distance between points- if euclidean distance is greater than 2000 meters, remove that pair- STILL NEED TO DO!! #1500 meters for small songbird (Minor and Urban 2008, Sutherland et al. 2000) #timber rattlesnake (generalist) ~1200 meters (USFS FEIS) #~500 (449) for eastern spadefoot toad (Baumberger et al. 2019- Movement and habtiat selecton of western spadefoot) #10,000 biggest median disersal distance for birds found by Sutherland et al. #create matrix of euclidean distance between polygon centroids EUpts <- spDists(x= trueCentroids, y = trueCentroids, longlat = FALSE, segments = FALSE, diagonal = FALSE) #condense matrix into table and remove duplicate pairs EUnew <- subset(reshape2::melt(EUpts), value!=0) EU5000<-EUnew[!(EUnew$value > u),] EU5000_nodups <- EU5000[!duplicated(data.frame(list(do.call(pmin,EU5000),do.call(pmax,EU5000)))),] rm(EUpts) #merge colnames(EU5000_nodups) <- c("num1", "num2", "EUD") lookup <- cbind(pol1_dis$ID, pol1_dis$num1, pol1_dis$num2) colnames(lookup) <- c("ID", "num1", "num2") EU_test <- merge(x = EU5000_nodups, y = lookup, by = "num1", all.x = TRUE) colnames(EU_test) <- c("num1", "num2", "EUD", "ID", "num2.y") EU_test2 <- merge(x = EU_test, y = lookup, by = "num2", all.x = TRUE) EU_fin <- cbind(EU_test2$ID.x, EU_test2$ID.y) EU_fin_df <- data.frame(EU_fin) #clear more memory rm(EU_test) rm(EU_test2) rm(EU_fin) # print("#####################################Entering Cost Distance#############################") #calculate least cost path test_trC <- geoCorrection(test_tr, type="c") #geocorrection for least cost path rm(test_tr) costDist <- costDistance(test_trC, trueCentroids) #LCP rm(trueCentroids) costmatrix <- matrixConvert(costDist, colname = c("X1", "X2", "resistance")) colnames(costmatrix) <- c("X1", "X2", "resistance") EU_fin_df$costdis <- NULL costdist5000 <- merge(EU_fin_df, costmatrix, by.x= c("X2", "X1"), by.y = c("X1", "X2")) costdist5000df <- data.frame(costdist5000) costcomplete <- costdist5000df[!is.infinite(rowSums(costdist5000df)),] write.table(costcomplete, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/distance_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE) #write.csv(costcomplete, file=paste0("Outputs/distance_",u,model,"yr",i,"Rep_",replicate,".csv"), row.names=F) print("#####################################Finished Cost distance#############################") links[length(links)+1] <- nrow(costcomplete) avgLCP[length(avgLCP)+1] <- mean(costcomplete$resistance) #get adjacency matrix to build igraph cost_col<- cbind(costcomplete$X2, costcomplete$X1) adj <- get.adjacency(graph.edgelist(as.matrix(cost_col), directed=FALSE)) network <- graph_from_adjacency_matrix(adj) gdensity <- edge_density(network, loops = FALSE) density[length(density)+1] <- gdensity trans <- transitivity(network, type="global") transitivity[length(transitivity)+1] <- trans results <- data.frame(nodes, links, avgnode, totnode, avgLCP, avgENN, density, transitivity) write.table(results, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/Metrics_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = TRUE, col.names = TRUE)
/Code/HPC_1000econspsyr50Rep4.R
no_license
ZacharyRobbins/TransferHPC
R
false
false
14,796
r
require(spatialEco) require(sp) require(usedist) require(rgeos) require(raster) require(spatstat) require(igraph) require(sf) require(rgdal) require(gdistance) require(otuSummary) require(gdata) require(maptools) require(tidyverse) require(reshape2) require(data.table) #setwd("E:/LCP sensitivity test/HPC_all_inputs") #Create master map of all habitat created by a conservation strategy i=50 u=1000 model="econ" species="s" replicate=4 XO<- list.files(paste0(model,"/"), pattern =paste0(species,".tif$",sep="")) ManyRunsStack<-raster::stack(paste0(model,"/",XO)) SumStack<-sum(ManyRunsStack) rm(ManyRunsStack) #Bring in ecoregion map to use for crs and extent template Ecoregion <- raster(paste0(model,"/","Ecoregion100f.tif")) #Read in roads file roads <- raster(paste0(model,"/","road.tif",sep="")) #Create empty vectors for connectivity indices nodes <- vector() links <- vector() avgnode <- vector() totnode <- vector() avgLCP <- vector() avgENN <- vector() density <- vector() transitivity <- vector() #Time steps TimestepList <- as.character(seq(from=0, to=80, by=10)) #Connectivity analysis Longleaf<-"PinuPalu" Loblolly<-"PinuTaed" Pine<- c("PinuEchi","PinuTaed","PinuVirg") Hardwood<-c("QuerAlba","AcerRubr","LiriTuli","LiquStyr","OxydArbo","CornFlor") Year0<-list.files(paste0(model,"/",model,replicate),pattern=(".img$")) #paste0("inputs/", model, "/",model,replicate,"/") Longleaf_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Longleaf,"-",i,".img")])) Loblolly_Stack<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Loblolly,"-",i,".img")])) Pine_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Pine,"-",i,".img")])) Hardwood_Stack<-raster::stack(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-", Hardwood,"-",i,".img")])) Total<-raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("bio-TotalBiomass-", i, ".img")])) ###Reclassification of biomass into community types ###Rule 1 Longleaf_Stack[Longleaf_Stack> 0.25*(Total),]<-1 Longleaf_Stack[!Longleaf_Stack==1]<-999 ### Rule 2 Loblolly_Stack[Loblolly_Stack> 0.9*(Total),]<-2 Loblolly_Stack[!Loblolly_Stack==2]<-999 ### Rule 3 Pine_Stack[Pine_Stack> 0.65*(Total),]<-3 Pine_Stack[!Pine_Stack==3]<-999 ### Rule 4 Hardwood_Stack[Hardwood_Stack>0.5*(Total),]<-4 Hardwood_Stack[!Hardwood_Stack==4]<-999 ### Rule 5 Total[Total >0,]<-5 bigstack<-stack(Longleaf_Stack, Loblolly_Stack, Pine_Stack, Hardwood_Stack, Total) test_stack<-min(bigstack) crs(test_stack) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" extent(test_stack)<-raster::extent(Ecoregion) median0 <- raster(paste0(model,"/",model,replicate,"/",Year0[Year0 %in% paste0("AGE-MED-",i,".img")])) crs(median0) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" extent(median0)<-raster::extent(Ecoregion) #use to incorporate land use change #LU0 <- raster(paste("C:/Users/tgmozele/Desktop/LCP sensitivity test/geo2noLUC/land-use-", i, ".tif",sep="")) #use to not incorporate land use change, but establish BAU land use types LU0 <- raster(paste0(model,"/","NLCD100.tif")) #Create a raster that will become resistance raster test_raster <- test_stack #Assign projection and reformat to ecoregion extent for the resistance raster crs(test_raster) <- "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" extent(test_raster)<-raster::extent(Ecoregion) #Assign values to resistance raster #longleaf community comp test_raster[test_stack == 1 & median0 %in% c(0:1),] <- (1/90) test_raster[test_stack == 1 & median0 %in% c(2:5),] <- (1/80) test_raster[test_stack == 1 & median0 %in% c(6:7),] <- (1/70) test_raster[test_stack == 1 & median0 %in% c(8:9),] <- (1/40) test_raster[test_stack == 1 & median0 %in% c(10:20),] <- (1/10) test_raster[test_stack == 1 & median0 %in% c(21:34),] <- 1 test_raster[test_stack == 1 & median0 >= 35,] <- 1 #test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95) #test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80) #test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25) #test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5) #test_raster[test_stack == 3 & median0 >= 35,] <- 1 #pine plantation community type (was pine mix) test_raster[test_stack == 2 & median0 %in% c(0:5),] <- (1/90) test_raster[test_stack == 2 & median0 %in% c(6:10),] <- (1/70) test_raster[test_stack == 2 & median0 %in% c(11:20),] <- (1/60) test_raster[test_stack == 2 & median0 %in% c(21:30),] <- (1/50) test_raster[test_stack == 2 & median0 >= 31,] <- (1/40) #pine mix community type (was lob_) test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/90) test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/70) test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/40) test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/30) test_raster[test_stack == 3 & median0 >= 35,] <- (1/20) #test_raster[test_stack == 3 & median0 %in% c(0:5),] <- (1/95) #test_raster[test_stack == 3 & median0 %in% c(6:10),] <- (1/80) #test_raster[test_stack == 3 & median0 %in% c(11:20),] <- (1/25) #test_raster[test_stack == 3 & median0 %in% c(21:34),] <- (1/5) #test_raster[test_stack == 3 & median0 >= 35,] <- 1 #hardwood community type (was mix) test_raster[test_stack == 4 & median0 %in% c(0:10),] <- (1/90) test_raster[test_stack == 4 & median0 %in% c(11:20),] <- (1/80) test_raster[test_stack == 4 & median0 %in% c(21:30),] <- (1/70) test_raster[test_stack == 4 & median0 >= 31,] <- (1/60) #mixed forest community type (was hardwood) test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/90) test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/70) test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/60) test_raster[test_stack == 5 & median0 >= 31,] <- (1/50) #test_raster[test_stack == 5 & median0 %in% c(0:10),] <- (1/100) #test_raster[test_stack == 5 & median0 %in% c(11:20),] <- (1/85) #test_raster[test_stack == 5 & median0 %in% c(21:30),] <- (1/70) #test_raster[test_stack == 5 & median0 >= 31,] <- (1/60) test_raster2 <- test_raster test_raster2[test_raster ==0] <- NA #land use types test_raster2[LU0 == 82] <- (1/90) #cropland test_raster2[LU0 == 81] <- (1/90) #hay/pasture test_raster2[LU0 == 11] <- (1/100) #water test_raster2[LU0 == 24] <- (1/100) #developed, high intensity test_raster2[LU0 == 23] <- (1/90) #developed, med intensity test_raster2[LU0 == 22] <- (1/80) #developed, low intensity test_raster2[LU0 == 31] <- (1/90) #barren land #test_raster2[LU0 == 6] <- (1/100) #mining test_raster2[test_raster2 ==0] <- (1/90) #roads test_raster2[roads %in% c(1:2)] <- (1/100) test_raster2[roads %in% c(3:4)] <- (1/100) test_raster2[roads %in% c(5:89)] <- (1/90) test_raster3 <- test_raster2 test_raster3[test_raster3 >0.1] <- 1 test_raster3[test_raster3 < 1] <- 0 habitat_raster <- overlay(test_raster3, SumStack, fun=function(x,y){(x*y)} ) #Cluster habitat cells into habitat nodes using quintiles of occurrence LikelyHabitat8<-habitat_raster LikelyHabitat8[LikelyHabitat8%in%c(0:36),]<-NA pol8 <- rasterToPolygons(LikelyHabitat8) proj4string(pol8) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol8$ID<-seq(1,length(pol8[1])) polbuf <- gBuffer(pol8, byid=TRUE, id=pol8$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) a<-raster::disaggregate(polbufdis) LikelyHabitat5<-habitat_raster LikelyHabitat5[LikelyHabitat5%in%c(0:27,37:45),]<-NA pol5 <- rasterToPolygons(LikelyHabitat5) proj4string(pol5) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol5$ID<-seq(1,length(pol5[1])) polbuf <- gBuffer(pol5, byid=TRUE, id=pol5$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) b<-raster::disaggregate(polbufdis) LikelyHabitat3<-habitat_raster LikelyHabitat3[LikelyHabitat3%in%c(0:18,28:45),]<-NA pol3 <- rasterToPolygons(LikelyHabitat3) proj4string(pol3) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol3$ID<-seq(1,length(pol3[1])) polbuf <- gBuffer(pol3, byid=TRUE, id=pol3$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) c<-raster::disaggregate(polbufdis) LikelyHabitat1<-habitat_raster LikelyHabitat1[LikelyHabitat1%in%c(0:9,19:45),]<-NA pol1 <- rasterToPolygons(LikelyHabitat1) proj4string(pol1) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol1$ID<-seq(1,length(pol1[1])) polbuf <- gBuffer(pol1, byid=TRUE, id=pol1$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) d<-raster::disaggregate(polbufdis) LikelyHabitat<-habitat_raster LikelyHabitat[LikelyHabitat%in%c(0,10:45),]<-NA pol <- rasterToPolygons(LikelyHabitat) proj4string(pol) = "+proj=utm +zone=17 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" pol$ID<-seq(1,length(pol[1])) polbuf <- gBuffer(pol, byid=TRUE, id=pol$ID, width=1.0, quadsegs=5, capStyle="ROUND", joinStyle="ROUND", mitreLimit=1.0) polbufdis <- gUnaryUnion(polbuf, id = NULL, checkValidity=NULL) e<-raster::disaggregate(polbufdis) #Bring quintile-based habitat nodes together into one SpatialPolygonsDataFrame, find area of nodes, and assign numbers polys <- bind(a,b,c,d,e) data<-data.frame(ID=seq(1,length(polys))) pol1_dis<-SpatialPolygonsDataFrame(polys,data) pol1_dis$area_ha <- raster::area(pol1_dis)/10000 pol1_dis$num1 <- seq(from = 1, to= length(pol1_dis), by=1) pol1_dis$num2 <- seq(from = 1, to= length(pol1_dis), by=1) #Assign weight to habitat by type and area to be used in Conefor pol1_dis$weight <- NA pol1_dis$weight <- pol1_dis$area_ha #Restrict habitat patches to those 2 hectares and larger, reassign ID's pol1_dis <- pol1_dis[pol1_dis$area_ha >= 2,] pol1_dis$ID<-seq(from = 1, to= length(pol1_dis), by=1) #Make habitat nodes file to be used for Conefor maketext <- cbind(pol1_dis$ID, pol1_dis$weight) write.table(maketext, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/nodes_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE) #use to find #nodes, avg node size, and total habitat area (to be used for ECA:Area) nodes[length(nodes)+1] <- length(pol1_dis$ID) avgnode[length(avgnode)+1] <- mean(pol1_dis$area_ha) totnode[length(totnode)+1] <- sum(pol1_dis$area_ha) ###create transition matrix from resistance raster, which is required by gdistance package to calculate resistance ###distance and least cost path test_tr <- transition(test_raster2, transitionFunction=mean, directions=8) #find polgyon centroid trueCentroids <- gCentroid(pol1_dis, byid=TRUE, id = pol1_dis$ID) #clear memory rm(Longleaf_Stack) rm(Loblolly_Stack) rm(Pine_Stack) rm(Hardwood_Stack) rm(Total) rm(bigstack) rm(pol8) rm(pol5) rm(pol3) rm(pol1) rm(pol) rm(a) rm(b) rm(c) rm(d) rm(e) rm(polbuf) rm(polys) rm(LikelyHabitat8) rm(LikelyHabitat5) rm(LikelyHabitat3) rm(LikelyHabitat1) rm(LikelyHabitat) #get coordinates from trueCentroids cent_coords <- geom(trueCentroids) #find euclidean distance nearest neighbor EUnn <- nndist(cent_coords) avgENN[length(avgENN)+1] <- mean(EUnn) #Euclidean distance between points- if euclidean distance is greater than 2000 meters, remove that pair- STILL NEED TO DO!! #1500 meters for small songbird (Minor and Urban 2008, Sutherland et al. 2000) #timber rattlesnake (generalist) ~1200 meters (USFS FEIS) #~500 (449) for eastern spadefoot toad (Baumberger et al. 2019- Movement and habtiat selecton of western spadefoot) #10,000 biggest median disersal distance for birds found by Sutherland et al. #create matrix of euclidean distance between polygon centroids EUpts <- spDists(x= trueCentroids, y = trueCentroids, longlat = FALSE, segments = FALSE, diagonal = FALSE) #condense matrix into table and remove duplicate pairs EUnew <- subset(reshape2::melt(EUpts), value!=0) EU5000<-EUnew[!(EUnew$value > u),] EU5000_nodups <- EU5000[!duplicated(data.frame(list(do.call(pmin,EU5000),do.call(pmax,EU5000)))),] rm(EUpts) #merge colnames(EU5000_nodups) <- c("num1", "num2", "EUD") lookup <- cbind(pol1_dis$ID, pol1_dis$num1, pol1_dis$num2) colnames(lookup) <- c("ID", "num1", "num2") EU_test <- merge(x = EU5000_nodups, y = lookup, by = "num1", all.x = TRUE) colnames(EU_test) <- c("num1", "num2", "EUD", "ID", "num2.y") EU_test2 <- merge(x = EU_test, y = lookup, by = "num2", all.x = TRUE) EU_fin <- cbind(EU_test2$ID.x, EU_test2$ID.y) EU_fin_df <- data.frame(EU_fin) #clear more memory rm(EU_test) rm(EU_test2) rm(EU_fin) # print("#####################################Entering Cost Distance#############################") #calculate least cost path test_trC <- geoCorrection(test_tr, type="c") #geocorrection for least cost path rm(test_tr) costDist <- costDistance(test_trC, trueCentroids) #LCP rm(trueCentroids) costmatrix <- matrixConvert(costDist, colname = c("X1", "X2", "resistance")) colnames(costmatrix) <- c("X1", "X2", "resistance") EU_fin_df$costdis <- NULL costdist5000 <- merge(EU_fin_df, costmatrix, by.x= c("X2", "X1"), by.y = c("X1", "X2")) costdist5000df <- data.frame(costdist5000) costcomplete <- costdist5000df[!is.infinite(rowSums(costdist5000df)),] write.table(costcomplete, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/distance_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = FALSE, col.names = FALSE) #write.csv(costcomplete, file=paste0("Outputs/distance_",u,model,"yr",i,"Rep_",replicate,".csv"), row.names=F) print("#####################################Finished Cost distance#############################") links[length(links)+1] <- nrow(costcomplete) avgLCP[length(avgLCP)+1] <- mean(costcomplete$resistance) #get adjacency matrix to build igraph cost_col<- cbind(costcomplete$X2, costcomplete$X1) adj <- get.adjacency(graph.edgelist(as.matrix(cost_col), directed=FALSE)) network <- graph_from_adjacency_matrix(adj) gdensity <- edge_density(network, loops = FALSE) density[length(density)+1] <- gdensity trans <- transitivity(network, type="global") transitivity[length(transitivity)+1] <- trans results <- data.frame(nodes, links, avgnode, totnode, avgLCP, avgENN, density, transitivity) write.table(results, file=paste0(model,"/",model,replicate,"/","Output_",model,replicate,species,u,"/Metrics_",u,model,species,"yr",i,"Rep_",replicate,".txt"), sep = "\t", row.names = TRUE, col.names = TRUE)
# requires fabsbase.R block #month name to numeric da.wide$month <- factor(da.wide$month, levels = c(1,2,3,4,5,6,7,8,9,10,11,12), labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")) #filter summer, average multiple samples, da.unique.annual <- filter(da.wide, month == "Jun" | month == "Jul" | month == "Aug") %>% group_by(site) %>% summarise_all(funs(mean)) da.unique.annual$uid <- paste(da.unique.annual$site,da.unique.annual$year) da.unique.annual <- column_to_rownames(da.unique.annual, var = "site") nmds.july2014 <- metaMDS(da.unique.annual[(which(colnames(da.wide.habitat.env.july2014) == "argo")) : (which(colnames(da.wide.habitat.env.july2014) == "yero"))], k=3) nmds_plot <- function(data, habitat){ ordiplot(data, display="site", type="n", xlim = c(-2, 2)) points(nmds.july2014, col="black", pch = (as.integer(habitat$veg_subst))) ordihull(nmds.july2014, habitat$veg_subst, scaling = "symmetric", lty=(as.integer(habitat$veg_subst))) legend("topright", levels(habitat$veg_subst), pch=1:(length(levels(habitat$veg_subst))), lty=1:(length(levels(habitat$veg_subst))))} nmds_plot(nmds.july2014, da.wide.habitat.env.july2014)
/biodiversity_analysis/unique_samplingID.R
no_license
Bennymm/fabs
R
false
false
1,458
r
# requires fabsbase.R block #month name to numeric da.wide$month <- factor(da.wide$month, levels = c(1,2,3,4,5,6,7,8,9,10,11,12), labels = c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")) #filter summer, average multiple samples, da.unique.annual <- filter(da.wide, month == "Jun" | month == "Jul" | month == "Aug") %>% group_by(site) %>% summarise_all(funs(mean)) da.unique.annual$uid <- paste(da.unique.annual$site,da.unique.annual$year) da.unique.annual <- column_to_rownames(da.unique.annual, var = "site") nmds.july2014 <- metaMDS(da.unique.annual[(which(colnames(da.wide.habitat.env.july2014) == "argo")) : (which(colnames(da.wide.habitat.env.july2014) == "yero"))], k=3) nmds_plot <- function(data, habitat){ ordiplot(data, display="site", type="n", xlim = c(-2, 2)) points(nmds.july2014, col="black", pch = (as.integer(habitat$veg_subst))) ordihull(nmds.july2014, habitat$veg_subst, scaling = "symmetric", lty=(as.integer(habitat$veg_subst))) legend("topright", levels(habitat$veg_subst), pch=1:(length(levels(habitat$veg_subst))), lty=1:(length(levels(habitat$veg_subst))))} nmds_plot(nmds.july2014, da.wide.habitat.env.july2014)
## Jason Michelizzi ## 27 Apr 2020 ## ## functions to implement a cached inverse matrix ## Makes an object that can cache its own inverse makeCacheMatrix <- function(x = matrix()) { my_inv <- NULL get <- function () {x} set <- function (y) { x <<- y my_inv <<- NULL } getinverse <- function() {my_inv} setinverse <- function(inverse) {my_inv <<- inverse} list (get = get, set = set, getinverse = getinverse, setinverse = setinverse) } ## Solves for the inverse of the matrix and caches the result cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if (!is.null(inv)) { return(inv) } raw_matrix <- x$get() inv <- solve (raw_matrix, ...) x$setinverse(inv) return(inv) }
/cachematrix.R
no_license
morbosad/ProgrammingAssignment2
R
false
false
803
r
## Jason Michelizzi ## 27 Apr 2020 ## ## functions to implement a cached inverse matrix ## Makes an object that can cache its own inverse makeCacheMatrix <- function(x = matrix()) { my_inv <- NULL get <- function () {x} set <- function (y) { x <<- y my_inv <<- NULL } getinverse <- function() {my_inv} setinverse <- function(inverse) {my_inv <<- inverse} list (get = get, set = set, getinverse = getinverse, setinverse = setinverse) } ## Solves for the inverse of the matrix and caches the result cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if (!is.null(inv)) { return(inv) } raw_matrix <- x$get() inv <- solve (raw_matrix, ...) x$setinverse(inv) return(inv) }
library(raster) library(maptools) # read in South America boundary data(wrld_simpl) SA <- c("Argentina", "Bolivia", "Brazil", "Chile", "Colombia", "Ecuador", "Guyana", "Paraguay", "Peru", "Suriname", "Uruguay", "Venezuela") sa <- wrld_simpl[which(wrld_simpl@data$NAME %in% SA),] # read in thermal data and output a matrix # set functions ---------------------- # make a matrix of all the z score values from a raster stack get_raster_vals <- function(data){ z_values <- matrix(NA, nrow=259200,ncol=dim(data)[3]) for (i in 1:dim(data)[3]) { sub_values1 <- getValues(data[[i]]) if(i==1){ z_values <-sub_values1 }else{ z_values <- cbind(z_values, sub_values1) } } return(z_values) } thermal_regions <- raster("/projectnb/modislc/users/rkstan/GE712/data/thermal_regions_IIASA_v3/thermal_regimes_pastures.tif") # combine classes # class 1 - tropics warm, tropics cool # class 2 - subtropics warm, subtropics cool, subtropics cold, subtropics very cold # class 3 - temperate cool, temperate cold, temperate very cold # NA - boreal fun1 <- function(x) {x[x==1 | x==2] <- 1; return(x)} thermal_sub_1 <- calc(thermal_regions, fun1) fun2 <- function(x) {x[x==3| x==4 | x==5 | x==6] <- 2; return(x)} thermal_sub_2 <- calc(thermal_sub_1, fun2) fun3 <- function(x) {x[x==7| x==8 | x==9] <- 3; return(x)} thermal_sub_3 <- calc(thermal_sub_2, fun3) fun4 <- function(x) {x[x==10] <- NA; return(x)} thermal_sub_4 <- calc(thermal_sub_3, fun4) # plot the thermal regions! plot(thermal_sub_4, col=c("red", "black", "green")) plot(sa, add=T,lty=1,lwd=0.5) thermal_regions_vals <- get_raster_vals(thermal_sub_4) write.csv(thermal_regions_vals, file="/projectnb/modislc/users/rkstan/GE712/outputs/thermal_regions_vals.csv", quote=FALSE, row.names=FALSE)
/scripts/read_thermal_regions.R
no_license
rkstan/GE712
R
false
false
1,822
r
library(raster) library(maptools) # read in South America boundary data(wrld_simpl) SA <- c("Argentina", "Bolivia", "Brazil", "Chile", "Colombia", "Ecuador", "Guyana", "Paraguay", "Peru", "Suriname", "Uruguay", "Venezuela") sa <- wrld_simpl[which(wrld_simpl@data$NAME %in% SA),] # read in thermal data and output a matrix # set functions ---------------------- # make a matrix of all the z score values from a raster stack get_raster_vals <- function(data){ z_values <- matrix(NA, nrow=259200,ncol=dim(data)[3]) for (i in 1:dim(data)[3]) { sub_values1 <- getValues(data[[i]]) if(i==1){ z_values <-sub_values1 }else{ z_values <- cbind(z_values, sub_values1) } } return(z_values) } thermal_regions <- raster("/projectnb/modislc/users/rkstan/GE712/data/thermal_regions_IIASA_v3/thermal_regimes_pastures.tif") # combine classes # class 1 - tropics warm, tropics cool # class 2 - subtropics warm, subtropics cool, subtropics cold, subtropics very cold # class 3 - temperate cool, temperate cold, temperate very cold # NA - boreal fun1 <- function(x) {x[x==1 | x==2] <- 1; return(x)} thermal_sub_1 <- calc(thermal_regions, fun1) fun2 <- function(x) {x[x==3| x==4 | x==5 | x==6] <- 2; return(x)} thermal_sub_2 <- calc(thermal_sub_1, fun2) fun3 <- function(x) {x[x==7| x==8 | x==9] <- 3; return(x)} thermal_sub_3 <- calc(thermal_sub_2, fun3) fun4 <- function(x) {x[x==10] <- NA; return(x)} thermal_sub_4 <- calc(thermal_sub_3, fun4) # plot the thermal regions! plot(thermal_sub_4, col=c("red", "black", "green")) plot(sa, add=T,lty=1,lwd=0.5) thermal_regions_vals <- get_raster_vals(thermal_sub_4) write.csv(thermal_regions_vals, file="/projectnb/modislc/users/rkstan/GE712/outputs/thermal_regions_vals.csv", quote=FALSE, row.names=FALSE)
setwd("/work/biernaskie_lab/sarthak_sinha/D0_7_scATAC_Seq_Reindeer/signac/") library(Signac) library(Seurat) library(JASPAR2020) library(TFBSTools) library(BSgenome.Btaurus.UCSC.bosTau9) library(patchwork) set.seed(1234) library(BSgenome.Hsapiens.UCSC.hg19) library(GenomeInfoDb) library(ggplot2) library(patchwork) set.seed(1234) #BiocManager::install("BSgenome.Btaurus.UCSC.bosTau9") library(BSgenome.Btaurus.UCSC.bosTau9) library(hdf5r) library(ensembldb) library(AnnotationHub) library(devtools) library(monocle3) library(cicero) library(SeuratWrappers) load(file = "reindeer_d0_7_back_velvet.fibros.Robj") DefaultAssay(reindeer_d0_7_back_velvet.fibros) = "peaks" reindeer_d0_7_back_velvet.fibros.cds <- as.cell_data_set(x = reindeer_d0_7_back_velvet.fibros) reindeer_d0_7_back_velvet.fibros.cds.cicero <- make_cicero_cds(reindeer_d0_7_back_velvet.fibros.cds, reduced_coordinates = reducedDims(reindeer_d0_7_back_velvet.fibros.cds)$UMAP) genome <- seqlengths(reindeer_d0_7_back_velvet.fibros) genome.df <- data.frame("chr" = names(genome), "length" = genome) conns <- run_cicero(reindeer_d0_7_back_velvet.fibros.cds.cicero, genomic_coords = genome.df, sample_num = 100) head(conns) save(conns, file = "conns.Robj") ccans <- generate_ccans(conns) head(ccans) save(ccans, file = "ccans.Robj") links <- ConnectionsToLinks(conns = conns, ccans = ccans) Links(reindeer_d0_7_back_velvet.fibros) <- links save(reindeer_d0_7_back_velvet.fibros, file = "reindeer_d0_7_back_velvet.fibros.Robj") jpeg(file = "CoveragePlot_1_reindeer_d0_7_back_velvet.fibros_NFRKB_2_co-assisible.jpeg", width = 40, height = 15, units = "cm", res = 500) CoveragePlot( object = reindeer_d0_7_back_velvet.fibros, region = "29-36670323-36675093", extend.upstream = 20, extend.downstream = 20) dev.off()
/Reindeer_Day0_7_ATAC_Analysis/job_cicero.R
no_license
ArzinaJaffer/Reindeer_Wound
R
false
false
1,796
r
setwd("/work/biernaskie_lab/sarthak_sinha/D0_7_scATAC_Seq_Reindeer/signac/") library(Signac) library(Seurat) library(JASPAR2020) library(TFBSTools) library(BSgenome.Btaurus.UCSC.bosTau9) library(patchwork) set.seed(1234) library(BSgenome.Hsapiens.UCSC.hg19) library(GenomeInfoDb) library(ggplot2) library(patchwork) set.seed(1234) #BiocManager::install("BSgenome.Btaurus.UCSC.bosTau9") library(BSgenome.Btaurus.UCSC.bosTau9) library(hdf5r) library(ensembldb) library(AnnotationHub) library(devtools) library(monocle3) library(cicero) library(SeuratWrappers) load(file = "reindeer_d0_7_back_velvet.fibros.Robj") DefaultAssay(reindeer_d0_7_back_velvet.fibros) = "peaks" reindeer_d0_7_back_velvet.fibros.cds <- as.cell_data_set(x = reindeer_d0_7_back_velvet.fibros) reindeer_d0_7_back_velvet.fibros.cds.cicero <- make_cicero_cds(reindeer_d0_7_back_velvet.fibros.cds, reduced_coordinates = reducedDims(reindeer_d0_7_back_velvet.fibros.cds)$UMAP) genome <- seqlengths(reindeer_d0_7_back_velvet.fibros) genome.df <- data.frame("chr" = names(genome), "length" = genome) conns <- run_cicero(reindeer_d0_7_back_velvet.fibros.cds.cicero, genomic_coords = genome.df, sample_num = 100) head(conns) save(conns, file = "conns.Robj") ccans <- generate_ccans(conns) head(ccans) save(ccans, file = "ccans.Robj") links <- ConnectionsToLinks(conns = conns, ccans = ccans) Links(reindeer_d0_7_back_velvet.fibros) <- links save(reindeer_d0_7_back_velvet.fibros, file = "reindeer_d0_7_back_velvet.fibros.Robj") jpeg(file = "CoveragePlot_1_reindeer_d0_7_back_velvet.fibros_NFRKB_2_co-assisible.jpeg", width = 40, height = 15, units = "cm", res = 500) CoveragePlot( object = reindeer_d0_7_back_velvet.fibros, region = "29-36670323-36675093", extend.upstream = 20, extend.downstream = 20) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scale-alpha.r \name{scale_alpha} \alias{scale_alpha} \alias{scale_alpha_continuous} \alias{scale_alpha_binned} \alias{scale_alpha_discrete} \alias{scale_alpha_ordinal} \alias{scale_alpha_datetime} \alias{scale_alpha_date} \title{Alpha transparency scales} \usage{ scale_alpha(..., range = c(0.1, 1)) scale_alpha_continuous(..., range = c(0.1, 1)) scale_alpha_binned(..., range = c(0.1, 1)) scale_alpha_discrete(...) scale_alpha_ordinal(..., range = c(0.1, 1)) } \arguments{ \item{...}{Other arguments passed on to \code{\link[=continuous_scale]{continuous_scale()}}, \link{binned_scale}, or \code{\link[=discrete_scale]{discrete_scale()}} as appropriate, to control name, limits, breaks, labels and so forth.} \item{range}{Output range of alpha values. Must lie between 0 and 1.} } \description{ Alpha-transparency scales are not tremendously useful, but can be a convenient way to visually down-weight less important observations. \code{scale_alpha()} is an alias for \code{scale_alpha_continuous()} since that is the most common use of alpha, and it saves a bit of typing. } \examples{ p <- ggplot(mpg, aes(displ, hwy)) + geom_point(aes(alpha = year)) p p + scale_alpha("cylinders") p + scale_alpha(range = c(0.4, 0.8)) } \seealso{ Other colour scales: \code{\link{scale_colour_brewer}()}, \code{\link{scale_colour_continuous}()}, \code{\link{scale_colour_gradient}()}, \code{\link{scale_colour_grey}()}, \code{\link{scale_colour_hue}()}, \code{\link{scale_colour_steps}()}, \code{\link{scale_colour_viridis_d}()} } \concept{colour scales}
/man/scale_alpha.Rd
permissive
banfai/ggplot2
R
false
true
1,629
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scale-alpha.r \name{scale_alpha} \alias{scale_alpha} \alias{scale_alpha_continuous} \alias{scale_alpha_binned} \alias{scale_alpha_discrete} \alias{scale_alpha_ordinal} \alias{scale_alpha_datetime} \alias{scale_alpha_date} \title{Alpha transparency scales} \usage{ scale_alpha(..., range = c(0.1, 1)) scale_alpha_continuous(..., range = c(0.1, 1)) scale_alpha_binned(..., range = c(0.1, 1)) scale_alpha_discrete(...) scale_alpha_ordinal(..., range = c(0.1, 1)) } \arguments{ \item{...}{Other arguments passed on to \code{\link[=continuous_scale]{continuous_scale()}}, \link{binned_scale}, or \code{\link[=discrete_scale]{discrete_scale()}} as appropriate, to control name, limits, breaks, labels and so forth.} \item{range}{Output range of alpha values. Must lie between 0 and 1.} } \description{ Alpha-transparency scales are not tremendously useful, but can be a convenient way to visually down-weight less important observations. \code{scale_alpha()} is an alias for \code{scale_alpha_continuous()} since that is the most common use of alpha, and it saves a bit of typing. } \examples{ p <- ggplot(mpg, aes(displ, hwy)) + geom_point(aes(alpha = year)) p p + scale_alpha("cylinders") p + scale_alpha(range = c(0.4, 0.8)) } \seealso{ Other colour scales: \code{\link{scale_colour_brewer}()}, \code{\link{scale_colour_continuous}()}, \code{\link{scale_colour_gradient}()}, \code{\link{scale_colour_grey}()}, \code{\link{scale_colour_hue}()}, \code{\link{scale_colour_steps}()}, \code{\link{scale_colour_viridis_d}()} } \concept{colour scales}
# create factors with value labels mtcars$gear <- factor(mtcars$gear,levels=c(3,4,5), labels=c("3gears","4gears","5gears")) mtcars$am <- factor(mtcars$am,levels=c(0,1), labels=c("Automatic","Manual")) mtcars$cyl <- factor(mtcars$cyl,levels=c(4,6,8), labels=c("4cyl","6cyl","8cyl")) qplot(mpg, data=mtcars, geom="density", fill=gear, alpha=I(.5), main="Distribution of Gas Milage", xlab="Miles Per Gallon", ylab="Density")
/projects/r/plot.R
no_license
aca-cg/showcase
R
false
false
442
r
# create factors with value labels mtcars$gear <- factor(mtcars$gear,levels=c(3,4,5), labels=c("3gears","4gears","5gears")) mtcars$am <- factor(mtcars$am,levels=c(0,1), labels=c("Automatic","Manual")) mtcars$cyl <- factor(mtcars$cyl,levels=c(4,6,8), labels=c("4cyl","6cyl","8cyl")) qplot(mpg, data=mtcars, geom="density", fill=gear, alpha=I(.5), main="Distribution of Gas Milage", xlab="Miles Per Gallon", ylab="Density")
library(dplyr) test_that("tagging duplicates works", { data <- data.frame( x = c(1, 1, 2, 2, 3, 4, 4, 5), y = letters[1:8] ) expect_equal(mutate(data, tag_duplicates(x, .add_tags = TRUE))$.n_ |> setNames(NULL), c(1,2,1,2,1,1,2,1)) expect_equal(mutate(data, tag_duplicates(x, .add_tags = TRUE))$.N_ |> setNames(NULL), c(2,2,2,2,1,2,2,1)) expect_equal(mutate(data, tag_duplicates(x, .add_tags = TRUE))$.dup_ |> setNames(NULL), c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE)) })
/tests/testthat/test-tag_duplicates.R
permissive
myominnoo/mStats
R
false
false
558
r
library(dplyr) test_that("tagging duplicates works", { data <- data.frame( x = c(1, 1, 2, 2, 3, 4, 4, 5), y = letters[1:8] ) expect_equal(mutate(data, tag_duplicates(x, .add_tags = TRUE))$.n_ |> setNames(NULL), c(1,2,1,2,1,1,2,1)) expect_equal(mutate(data, tag_duplicates(x, .add_tags = TRUE))$.N_ |> setNames(NULL), c(2,2,2,2,1,2,2,1)) expect_equal(mutate(data, tag_duplicates(x, .add_tags = TRUE))$.dup_ |> setNames(NULL), c(FALSE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, FALSE)) })
suppressMessages(library(tidyverse)) flagstat_path = "output/multiqc_data/multiqc_samtools_flagstat.txt" depth_path = "output/depth_concat.txt" flagstat <- read.table(flagstat_path,header=T,sep='\t') flagstat <- flagstat %>% mutate(total_passed = total_passed/1e+06, properly.paired_passed = properly.paired_passed/1e+06, flagstat_total = flagstat_total/1e+06, mapped_passed = mapped_passed/1e+06, singletons_passed = singletons_passed/1e+06, duplicates_passed = duplicates_passed/1e+06) depth <- read.table(depth_path,header=T,sep='\t') flagstat %>% select(flagstat_total,mapped_passed,properly.paired_passed) %>% summary() ### Flagstat p1 <- flagstat %>% ggplot(aes(x=flagstat_total)) + geom_boxplot() + xlab('Total reads (millions)') + theme_minimal() + xlim(c(600,1200)) + theme(axis.text.y = element_blank()) p2 <- flagstat %>% ggplot(aes(x=mapped_passed)) + geom_boxplot() + xlab('Mapped (millions)') + theme_minimal()+ xlim(c(600,1200)) + theme(axis.text.y = element_blank()) p3 <- flagstat %>% ggplot(aes(x=properly.paired_passed)) + geom_boxplot() + xlab('Properly paired (millions)') + theme_minimal()+ xlim(c(600,1200)) + theme(axis.text.y = element_blank()) # p4 <- flagstat %>% ggplot(aes(x=duplicates_passed)) + geom_boxplot() + # ylab('Duplicates (millions)') + theme_minimal() # p5 <- flagstat %>% ggplot(aes(x=singletons_passed)) + geom_boxplot() + # ylab('Singletons (millions)') + theme_minimal() library(gridExtra) grid.arrange(p1, p2, p3,ncol = 1) ### Average depth summary(depth) depth_intervals <- depth %>% select(-average_depth) %>% pivot_longer(cols = -sample, names_to = 'group', values_to = 'depth' ) avg_depth_plot <- depth %>% ggplot(aes(y=average_depth)) + geom_boxplot() avg_depth_plot depth_percent <- depth_intervals %>% ggplot(aes(y= group, x=depth)) + geom_boxplot() + theme_minimal() + scale_y_discrete(labels=c("percentage_above_30" = "30x", "percentage_above_20" = "20x", "percentage_above_10" = "10x")) + ylab('') + xlab('% coverage') depth_percent ### PSC psc <- read.table('output/total_psc.stats', sep='\t') colnames(psc) <- c('PSC','id', 'sample', 'nRefHom', 'nNonRefHom', 'nHets', 'nTransitions', 'nTransversions', 'nIndels', 'average_depth', 'nSingletons', 'nHapRef', 'nHapAlt', 'nMissing' )
/stats.r
no_license
golikp/NaszeGenomy
R
false
false
2,509
r
suppressMessages(library(tidyverse)) flagstat_path = "output/multiqc_data/multiqc_samtools_flagstat.txt" depth_path = "output/depth_concat.txt" flagstat <- read.table(flagstat_path,header=T,sep='\t') flagstat <- flagstat %>% mutate(total_passed = total_passed/1e+06, properly.paired_passed = properly.paired_passed/1e+06, flagstat_total = flagstat_total/1e+06, mapped_passed = mapped_passed/1e+06, singletons_passed = singletons_passed/1e+06, duplicates_passed = duplicates_passed/1e+06) depth <- read.table(depth_path,header=T,sep='\t') flagstat %>% select(flagstat_total,mapped_passed,properly.paired_passed) %>% summary() ### Flagstat p1 <- flagstat %>% ggplot(aes(x=flagstat_total)) + geom_boxplot() + xlab('Total reads (millions)') + theme_minimal() + xlim(c(600,1200)) + theme(axis.text.y = element_blank()) p2 <- flagstat %>% ggplot(aes(x=mapped_passed)) + geom_boxplot() + xlab('Mapped (millions)') + theme_minimal()+ xlim(c(600,1200)) + theme(axis.text.y = element_blank()) p3 <- flagstat %>% ggplot(aes(x=properly.paired_passed)) + geom_boxplot() + xlab('Properly paired (millions)') + theme_minimal()+ xlim(c(600,1200)) + theme(axis.text.y = element_blank()) # p4 <- flagstat %>% ggplot(aes(x=duplicates_passed)) + geom_boxplot() + # ylab('Duplicates (millions)') + theme_minimal() # p5 <- flagstat %>% ggplot(aes(x=singletons_passed)) + geom_boxplot() + # ylab('Singletons (millions)') + theme_minimal() library(gridExtra) grid.arrange(p1, p2, p3,ncol = 1) ### Average depth summary(depth) depth_intervals <- depth %>% select(-average_depth) %>% pivot_longer(cols = -sample, names_to = 'group', values_to = 'depth' ) avg_depth_plot <- depth %>% ggplot(aes(y=average_depth)) + geom_boxplot() avg_depth_plot depth_percent <- depth_intervals %>% ggplot(aes(y= group, x=depth)) + geom_boxplot() + theme_minimal() + scale_y_discrete(labels=c("percentage_above_30" = "30x", "percentage_above_20" = "20x", "percentage_above_10" = "10x")) + ylab('') + xlab('% coverage') depth_percent ### PSC psc <- read.table('output/total_psc.stats', sep='\t') colnames(psc) <- c('PSC','id', 'sample', 'nRefHom', 'nNonRefHom', 'nHets', 'nTransitions', 'nTransversions', 'nIndels', 'average_depth', 'nSingletons', 'nHapRef', 'nHapAlt', 'nMissing' )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.cv_grid_lasso.R \name{predict.cv_grid_lasso} \alias{predict.cv_grid_lasso} \title{Predictions for continuous response model} \usage{ \method{predict}{cv_grid_lasso}(object, newx, l = NULL, missing.data = FALSE, psd.method = "enet", ...) } \arguments{ \item{object}{Object as returned by grid_lasso or cv_grid_lasso, provided return.list == T} \item{newx}{Design matrix for new observations} \item{l}{Specify point on solution path (optional)} \item{missing.data}{If TRUE then will use (slower) procedure that corrects for missing data, and will only return a (constant shifted) mean square error rather than actual predictions} \item{psd.method}{The way that the gram matrix is made positive semidefinite. By default an elastic net term, alternatives are "coco" for CoCoLasso} \item{...}{Additional arguments to pass to other \code{predict} methods} } \value{ Vector of predictions (or constant-shifted error if missing.data = TRUE) } \description{ Returns predicted response from grid lasso model with new data }
/man/predict.cv_grid_lasso.Rd
no_license
bgs25/SubsetGridRegression
R
false
true
1,105
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.cv_grid_lasso.R \name{predict.cv_grid_lasso} \alias{predict.cv_grid_lasso} \title{Predictions for continuous response model} \usage{ \method{predict}{cv_grid_lasso}(object, newx, l = NULL, missing.data = FALSE, psd.method = "enet", ...) } \arguments{ \item{object}{Object as returned by grid_lasso or cv_grid_lasso, provided return.list == T} \item{newx}{Design matrix for new observations} \item{l}{Specify point on solution path (optional)} \item{missing.data}{If TRUE then will use (slower) procedure that corrects for missing data, and will only return a (constant shifted) mean square error rather than actual predictions} \item{psd.method}{The way that the gram matrix is made positive semidefinite. By default an elastic net term, alternatives are "coco" for CoCoLasso} \item{...}{Additional arguments to pass to other \code{predict} methods} } \value{ Vector of predictions (or constant-shifted error if missing.data = TRUE) } \description{ Returns predicted response from grid lasso model with new data }
/degAD2.R
no_license
floroChen/SPinterpoleMethComparison
R
false
false
285
r
#' Returns quarter ends between two dates #' #' @param start_date #' @param end_date #' #' @return #' @export #' #' @examples qtr_end <- function(start_date, end_date) { seq.Date(as.Date(start_date), as.Date(end_date), by = 'month') %>% as.yearqtr() %>% as.Date(, frac = 1) %>% unique() } #' Month start for a given date #' #' @param date #' #' @return #' @export #' #' @examples get_month_start <- function(date) { date %>% zoo::as.yearmon() %>% as.Date(, frac = 0) %>% as.character() } #' Month end for a given date #' #' @param date #' #' @return #' @export #' #' @examples get_month_end <- function(date) { date %>% zoo::as.yearmon() %>% as.Date(, frac = 1) %>% as.character() } #' Returns month ends between two dates #' #' @param start_date #' @param end_date #' #' @return #' @export #' #' @examples month_end <- function(start_date, end_date) { seq.Date(as.Date(start_date), as.Date(end_date), by = 'month') %>% as.yearmon() %>% as.Date(, frac = 1) } #' Return calendar start date of the year #' #' @param dates #' #' @return #' @export #' #' @examples start_of_year <- function(dates) { dates %>% as.Date() %>% lubridate::year() %>% lubridate::make_date(1, 1) %>% as.character() } date_to_str <- function(date) { format(date, '%Y-%m-%d') } # parse ISO8601 dates parseISO8601 <- function(...) { li <- list(...) if (length(li) > 1) { return(c(li[[1]], li[[2]])) } else { li <- xts::.parseISO8601(li) ans <- c(lubridate::date(li[[1]]), lubridate::date(li[[2]])) return(ans) } } #' Convert a time period string to start date #' #' @param period_str #' @param as_of #' #' @return #' @export #' #' @examples #' period_str <- "近5年" #' period_str <- "今年以来" #' period_str_to_start_date(period_str, as_of) period_str_to_start_date <- function(period_str, as_of) { if (stringr::str_detect(period_str, "今年以来")) { return(start_of_year(as_of)) } else { n_years <- stringr::str_match(period_str, "近([1-9]+)年")[, 2] %>% as.numeric() stopifnot(!is.na(n_years)) (as.Date(as_of) - lubridate::years() * n_years) %>% as.character() } } #' Create and cache calendar #' #' @return #' @export #' #' @examples create_calendar <- function(){ start.date = as.Date("2000-01-01") end.date = as.Date("2020-12-31") tdays <- wind_tdays(freq = "D", start = start.date, end = end.date) alldays <- seq.Date(from = start.date, to = end.date, by = "day") holidays <- dplyr::anti_join(tibble::tibble(date = alldays), tibble::tibble(date = tdays), by = "date") bizdays::create.calendar( "SSE", holidays = holidays$date, start.date = start.date, end.date = end.date, weekdays = c('saturday', 'sunday') ) bizdays::save_calendar("SSE", file.path("~/.R/calendars/SSE.cal")) }
/R/date-util.R
no_license
dfyj/wm
R
false
false
2,865
r
#' Returns quarter ends between two dates #' #' @param start_date #' @param end_date #' #' @return #' @export #' #' @examples qtr_end <- function(start_date, end_date) { seq.Date(as.Date(start_date), as.Date(end_date), by = 'month') %>% as.yearqtr() %>% as.Date(, frac = 1) %>% unique() } #' Month start for a given date #' #' @param date #' #' @return #' @export #' #' @examples get_month_start <- function(date) { date %>% zoo::as.yearmon() %>% as.Date(, frac = 0) %>% as.character() } #' Month end for a given date #' #' @param date #' #' @return #' @export #' #' @examples get_month_end <- function(date) { date %>% zoo::as.yearmon() %>% as.Date(, frac = 1) %>% as.character() } #' Returns month ends between two dates #' #' @param start_date #' @param end_date #' #' @return #' @export #' #' @examples month_end <- function(start_date, end_date) { seq.Date(as.Date(start_date), as.Date(end_date), by = 'month') %>% as.yearmon() %>% as.Date(, frac = 1) } #' Return calendar start date of the year #' #' @param dates #' #' @return #' @export #' #' @examples start_of_year <- function(dates) { dates %>% as.Date() %>% lubridate::year() %>% lubridate::make_date(1, 1) %>% as.character() } date_to_str <- function(date) { format(date, '%Y-%m-%d') } # parse ISO8601 dates parseISO8601 <- function(...) { li <- list(...) if (length(li) > 1) { return(c(li[[1]], li[[2]])) } else { li <- xts::.parseISO8601(li) ans <- c(lubridate::date(li[[1]]), lubridate::date(li[[2]])) return(ans) } } #' Convert a time period string to start date #' #' @param period_str #' @param as_of #' #' @return #' @export #' #' @examples #' period_str <- "近5年" #' period_str <- "今年以来" #' period_str_to_start_date(period_str, as_of) period_str_to_start_date <- function(period_str, as_of) { if (stringr::str_detect(period_str, "今年以来")) { return(start_of_year(as_of)) } else { n_years <- stringr::str_match(period_str, "近([1-9]+)年")[, 2] %>% as.numeric() stopifnot(!is.na(n_years)) (as.Date(as_of) - lubridate::years() * n_years) %>% as.character() } } #' Create and cache calendar #' #' @return #' @export #' #' @examples create_calendar <- function(){ start.date = as.Date("2000-01-01") end.date = as.Date("2020-12-31") tdays <- wind_tdays(freq = "D", start = start.date, end = end.date) alldays <- seq.Date(from = start.date, to = end.date, by = "day") holidays <- dplyr::anti_join(tibble::tibble(date = alldays), tibble::tibble(date = tdays), by = "date") bizdays::create.calendar( "SSE", holidays = holidays$date, start.date = start.date, end.date = end.date, weekdays = c('saturday', 'sunday') ) bizdays::save_calendar("SSE", file.path("~/.R/calendars/SSE.cal")) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.fh.R \name{compare} \alias{compare} \title{Shows plots for the comparison of estimates} \usage{ compare(object, indicator, MSE, CV, ...) } \arguments{ \item{indicator}{optional character vector that selects which indicators shall be returned: (i) all calculated indicators ("all"); (ii) each indicator name: "Mean", "Quantile_10", "Quantile_25", "Median", "Quantile_75", "Quantile_90", "Head_Count", "Poverty_Gap", "Gini", "Quintile_Share" or the function name/s of "custom_indicator/s"; (iii) groups of indicators: "Quantiles", "Poverty", "Inequality" or "Custom".If two of these groups are selected, only the first one is returned. Defaults to "all". Note, additional custom indicators can be defined as argument for model-based approaches (see also \code{\link{ebp}}) and do not appear in groups of indicators even though these might belong to one of the groups.} \item{direct}{optional, an object of type "emdi","direct", representing point and MSE estimates.} \item{model}{an object of type "emdi","model", representing point and MSE estimates.} \item{label}{argument that enables to customize title and axis labels. There are three options to label the evaluation plots: (i) original labels ("orig"), (ii) axis lables but no title ("no_title"), (iii) neither axis labels nor title ("blank").} \item{color}{a vector with two elements. The first color determines the color of the line in the scatter plot and the color for the direct estimates in the line plot. The second color specifies the color of the line for the model-based estimates.} \item{shape}{a numeric vector with two elements. The first shape determines the shape of the points in the line plot for the direct estimates and the second shape for the model-based estimates. The options are numbered from 0 to 25.} \item{line_type}{a character vector with two elements. The first line type determines the type of the line for the direct estimates and the second type for the model-based estimates. The options are: "twodash", "solid", "longdash", "dotted", "dotdash", "dashed" and "blank".} \item{gg_theme}{\code{\link[ggplot2]{theme}} list from package \pkg{ggplot2}. For using this argument, package \pkg{ggplot2} must be loaded via \code{library(ggplot2)}. See also Example 2.} } \value{ A scatter plot and a line plot comparing direct and model-based estimators for each selected indicator obtained by \code{\link[ggplot2]{ggplot}}. } \description{ For all indicators or a selection of indicators two plots are returned. The first plot is a scatter plot of estimates to compare and the second is a line plot with these estimates. }
/man/compare.Rd
no_license
akreutzmann/fayherriot
R
false
true
2,695
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.fh.R \name{compare} \alias{compare} \title{Shows plots for the comparison of estimates} \usage{ compare(object, indicator, MSE, CV, ...) } \arguments{ \item{indicator}{optional character vector that selects which indicators shall be returned: (i) all calculated indicators ("all"); (ii) each indicator name: "Mean", "Quantile_10", "Quantile_25", "Median", "Quantile_75", "Quantile_90", "Head_Count", "Poverty_Gap", "Gini", "Quintile_Share" or the function name/s of "custom_indicator/s"; (iii) groups of indicators: "Quantiles", "Poverty", "Inequality" or "Custom".If two of these groups are selected, only the first one is returned. Defaults to "all". Note, additional custom indicators can be defined as argument for model-based approaches (see also \code{\link{ebp}}) and do not appear in groups of indicators even though these might belong to one of the groups.} \item{direct}{optional, an object of type "emdi","direct", representing point and MSE estimates.} \item{model}{an object of type "emdi","model", representing point and MSE estimates.} \item{label}{argument that enables to customize title and axis labels. There are three options to label the evaluation plots: (i) original labels ("orig"), (ii) axis lables but no title ("no_title"), (iii) neither axis labels nor title ("blank").} \item{color}{a vector with two elements. The first color determines the color of the line in the scatter plot and the color for the direct estimates in the line plot. The second color specifies the color of the line for the model-based estimates.} \item{shape}{a numeric vector with two elements. The first shape determines the shape of the points in the line plot for the direct estimates and the second shape for the model-based estimates. The options are numbered from 0 to 25.} \item{line_type}{a character vector with two elements. The first line type determines the type of the line for the direct estimates and the second type for the model-based estimates. The options are: "twodash", "solid", "longdash", "dotted", "dotdash", "dashed" and "blank".} \item{gg_theme}{\code{\link[ggplot2]{theme}} list from package \pkg{ggplot2}. For using this argument, package \pkg{ggplot2} must be loaded via \code{library(ggplot2)}. See also Example 2.} } \value{ A scatter plot and a line plot comparing direct and model-based estimators for each selected indicator obtained by \code{\link[ggplot2]{ggplot}}. } \description{ For all indicators or a selection of indicators two plots are returned. The first plot is a scatter plot of estimates to compare and the second is a line plot with these estimates. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_theta.R \name{estimateTheta} \alias{estimateTheta} \title{Estimate bias-corrected transcript abundances (FPKM)} \usage{ estimateTheta(transcripts, bam.files, fitpar, genome, models, readlength, minsize, maxsize, subset = TRUE, niter = 100, lib.sizes = NULL, optim = FALSE, custom.features = NULL) } \arguments{ \item{transcripts}{a GRangesList of the exons for multiple isoforms of a gene. For a single-isoform gene, just wrap the exons in \code{GRangesList()}} \item{bam.files}{a named vector pointing to the indexed BAM files} \item{fitpar}{the output of \link{fitBiasModels}} \item{genome}{a BSGenome object} \item{models}{a list of character strings or formula describing the bias models, see vignette} \item{readlength}{the read length} \item{minsize}{the minimum fragment length to model} \item{maxsize}{the maximum fragment length to model} \item{subset}{logical, whether to downsample the non-observed fragments. Default is TRUE} \item{niter}{the number of EM iterations. Default is 100.} \item{lib.sizes}{a named vector of library sizes to use in calculating the FPKM. If NULL (the default) a value of 1e6 is used for all samples.} \item{optim}{logical, whether to use numerical optimization instead of the EM. Default is FALSE.} \item{custom.features}{an optional function to add custom features to the fragment types DataFrame. This function takes in a DataFrame returned by \link{buildFragtypes} and returns a DataFrame with additional columns added. Default is NULL, adding no custom features.} } \value{ a list of lists. For each sample, a list with elements: theta, lambda and count. \itemize{ \item \strong{theta} gives the FPKM estimates for the isoforms in \code{transcripts} \item \strong{lambda} gives the average bias term for the isoforms \item \strong{count} gives the number of fragments which are compatible with any of the isoforms in \code{transcripts} } } \description{ This function takes the fitted bias parameters from \link{fitBiasModels} and uses this information to derive bias corrected estimates of transcript abundance for a gene (with one or more isoforms) across multiple samples. } \examples{ # see vignette for a more realistic example # these next lines just write out a BAM file from R # typically you would already have a BAM file library(alpineData) library(GenomicAlignments) library(rtracklayer) gap <- ERR188088() dir <- system.file(package="alpineData", "extdata") bam.file <- c("ERR188088" = file.path(dir,"ERR188088.bam")) export(gap, con=bam.file) data(preprocessedData) library(GenomicRanges) library(BSgenome.Hsapiens.NCBI.GRCh38) models <- list( "GC"=list(formula="count~ ns(gc,knots=gc.knots,Boundary.knots=gc.bk) + ns(relpos,knots=relpos.knots,Boundary.knots=relpos.bk) + 0", offset=c("fraglen")) ) readlength <- 75 minsize <- 125 # see vignette how to choose maxsize <- 175 # see vignette how to choose txs <- txdf.theta$tx_id[txdf.theta$gene_id == "ENSG00000198918"] res <- estimateTheta(transcripts=ebt.theta[txs], bam.files=bam.file, fitpar=fitpar.small, genome=Hsapiens, models=models, readlength=readlength, minsize=minsize, maxsize=maxsize) }
/man/estimateTheta.Rd
no_license
mikelove/alpine
R
false
true
3,369
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_theta.R \name{estimateTheta} \alias{estimateTheta} \title{Estimate bias-corrected transcript abundances (FPKM)} \usage{ estimateTheta(transcripts, bam.files, fitpar, genome, models, readlength, minsize, maxsize, subset = TRUE, niter = 100, lib.sizes = NULL, optim = FALSE, custom.features = NULL) } \arguments{ \item{transcripts}{a GRangesList of the exons for multiple isoforms of a gene. For a single-isoform gene, just wrap the exons in \code{GRangesList()}} \item{bam.files}{a named vector pointing to the indexed BAM files} \item{fitpar}{the output of \link{fitBiasModels}} \item{genome}{a BSGenome object} \item{models}{a list of character strings or formula describing the bias models, see vignette} \item{readlength}{the read length} \item{minsize}{the minimum fragment length to model} \item{maxsize}{the maximum fragment length to model} \item{subset}{logical, whether to downsample the non-observed fragments. Default is TRUE} \item{niter}{the number of EM iterations. Default is 100.} \item{lib.sizes}{a named vector of library sizes to use in calculating the FPKM. If NULL (the default) a value of 1e6 is used for all samples.} \item{optim}{logical, whether to use numerical optimization instead of the EM. Default is FALSE.} \item{custom.features}{an optional function to add custom features to the fragment types DataFrame. This function takes in a DataFrame returned by \link{buildFragtypes} and returns a DataFrame with additional columns added. Default is NULL, adding no custom features.} } \value{ a list of lists. For each sample, a list with elements: theta, lambda and count. \itemize{ \item \strong{theta} gives the FPKM estimates for the isoforms in \code{transcripts} \item \strong{lambda} gives the average bias term for the isoforms \item \strong{count} gives the number of fragments which are compatible with any of the isoforms in \code{transcripts} } } \description{ This function takes the fitted bias parameters from \link{fitBiasModels} and uses this information to derive bias corrected estimates of transcript abundance for a gene (with one or more isoforms) across multiple samples. } \examples{ # see vignette for a more realistic example # these next lines just write out a BAM file from R # typically you would already have a BAM file library(alpineData) library(GenomicAlignments) library(rtracklayer) gap <- ERR188088() dir <- system.file(package="alpineData", "extdata") bam.file <- c("ERR188088" = file.path(dir,"ERR188088.bam")) export(gap, con=bam.file) data(preprocessedData) library(GenomicRanges) library(BSgenome.Hsapiens.NCBI.GRCh38) models <- list( "GC"=list(formula="count~ ns(gc,knots=gc.knots,Boundary.knots=gc.bk) + ns(relpos,knots=relpos.knots,Boundary.knots=relpos.bk) + 0", offset=c("fraglen")) ) readlength <- 75 minsize <- 125 # see vignette how to choose maxsize <- 175 # see vignette how to choose txs <- txdf.theta$tx_id[txdf.theta$gene_id == "ENSG00000198918"] res <- estimateTheta(transcripts=ebt.theta[txs], bam.files=bam.file, fitpar=fitpar.small, genome=Hsapiens, models=models, readlength=readlength, minsize=minsize, maxsize=maxsize) }
## ff_schedule (MFL) ## #' Get a dataframe detailing every game for every franchise #' #' @param conn a conn object created by \code{ff_connect()} #' @param ... for other platforms #' #' @examples #' \donttest{ #' ssb_conn <- ff_connect(platform = "mfl", league_id = 54040, season = 2020) #' ff_schedule(ssb_conn) #' } #' #' @describeIn ff_schedule MFL: returns schedule data, one row for every franchise for every week. Completed games have result data. #' #' @export ff_schedule.mfl_conn <- function(conn, ...) { schedule_raw <- mfl_getendpoint(conn, "schedule") %>% purrr::pluck("content", "schedule", "weeklySchedule") %>% tibble::tibble() %>% tidyr::unnest_wider(1) if (is.null(schedule_raw[["matchup"]])) { return(NULL) } schedule <- schedule_raw %>% dplyr::mutate( matchup_length = purrr::map(.data$matchup, length), matchup = purrr::map_if(.data$matchup, .data$matchup_length == 1, ~ list(.x)) ) %>% tidyr::unnest_longer("matchup") %>% tidyr::unnest_wider("matchup") %>% tidyr::hoist("franchise", "away" = 1, "home" = 2) %>% tidyr::unnest_wider("away", names_sep = "_") %>% tidyr::unnest_wider("home", names_sep = "_") %>% dplyr::select(-dplyr::ends_with("isHome")) %>% dplyr::mutate_at(dplyr::vars(dplyr::contains("score"), "week", dplyr::contains("spread")), as.numeric) home <- schedule %>% dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_remove(.x, "home_")) %>% dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_replace(.x, "away_", "opponent_")) %>% dplyr::select(dplyr::any_of(c("week", "franchise_id" = "id", "franchise_score" = "score", "spread", "result", "opponent_id", "opponent_score" ))) away <- schedule %>% dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_remove(.x, "away_")) %>% dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_replace(.x, "home_", "opponent_")) %>% dplyr::select(dplyr::any_of(c("week", "franchise_id" = "id", "franchise_score" = "score", "spread", "result", "opponent_id", "opponent_score" ))) full_schedule <- dplyr::bind_rows(home, away) %>% dplyr::arrange(.data$week, .data$franchise_id) return(full_schedule) }
/R/mfl_schedule.R
permissive
tonyelhabr/ffscrapr
R
false
false
2,339
r
## ff_schedule (MFL) ## #' Get a dataframe detailing every game for every franchise #' #' @param conn a conn object created by \code{ff_connect()} #' @param ... for other platforms #' #' @examples #' \donttest{ #' ssb_conn <- ff_connect(platform = "mfl", league_id = 54040, season = 2020) #' ff_schedule(ssb_conn) #' } #' #' @describeIn ff_schedule MFL: returns schedule data, one row for every franchise for every week. Completed games have result data. #' #' @export ff_schedule.mfl_conn <- function(conn, ...) { schedule_raw <- mfl_getendpoint(conn, "schedule") %>% purrr::pluck("content", "schedule", "weeklySchedule") %>% tibble::tibble() %>% tidyr::unnest_wider(1) if (is.null(schedule_raw[["matchup"]])) { return(NULL) } schedule <- schedule_raw %>% dplyr::mutate( matchup_length = purrr::map(.data$matchup, length), matchup = purrr::map_if(.data$matchup, .data$matchup_length == 1, ~ list(.x)) ) %>% tidyr::unnest_longer("matchup") %>% tidyr::unnest_wider("matchup") %>% tidyr::hoist("franchise", "away" = 1, "home" = 2) %>% tidyr::unnest_wider("away", names_sep = "_") %>% tidyr::unnest_wider("home", names_sep = "_") %>% dplyr::select(-dplyr::ends_with("isHome")) %>% dplyr::mutate_at(dplyr::vars(dplyr::contains("score"), "week", dplyr::contains("spread")), as.numeric) home <- schedule %>% dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_remove(.x, "home_")) %>% dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_replace(.x, "away_", "opponent_")) %>% dplyr::select(dplyr::any_of(c("week", "franchise_id" = "id", "franchise_score" = "score", "spread", "result", "opponent_id", "opponent_score" ))) away <- schedule %>% dplyr::rename_at(dplyr::vars(dplyr::contains("away")), ~ stringr::str_remove(.x, "away_")) %>% dplyr::rename_at(dplyr::vars(dplyr::contains("home")), ~ stringr::str_replace(.x, "home_", "opponent_")) %>% dplyr::select(dplyr::any_of(c("week", "franchise_id" = "id", "franchise_score" = "score", "spread", "result", "opponent_id", "opponent_score" ))) full_schedule <- dplyr::bind_rows(home, away) %>% dplyr::arrange(.data$week, .data$franchise_id) return(full_schedule) }
#!/usr/bin/env Rscript args <- commandArgs(trailingOnly = T) mutfile <- args[1] hetmuts <- args[2] hascn <- as.logical(args[3]) output <- args[4] pthr <- as.numeric(args[5]) log <- args[6] p <- as.numeric(args[7]) lookfor <- read.table(gzfile(hetmuts), header=FALSE, stringsAsFactors=FALSE) colnames(lookfor) <- c("id","homet","refreads","mutreads","af") muts <- read.table(gzfile(mutfile), header=FALSE, stringsAsFactors=FALSE) total_gs_het <- nrow(lookfor) # now we look for the muts in muts that are compatible with an AF of 0.5 with a binomial # test, correcting for cn if it's there. #(base) data@clo:~/work/AF_spectra/dataset/mixology$ zcat CRC0277LMX0A03202TUMD02000.nofilter.tsv.gz #chr18:22982188:G:A 1/1 0 220 0.996 if (!hascn) { colnames(muts) <- c("id","homet","refreads","mutreads","af") muts$cn <- 1 } else { colnames(muts) <- c("id","homet","refreads","mutreads","af","cn") } rbinom <- function(mut, p) { refreads <- mut[1] mutreads <- mut[2] cn <- mut[4] if (mutreads == 0) { # a bunch of calls for biod.. return(FALSE) } tot <- refreads+mutreads pvals <- c() #pbinom(q, size, prob, lower.tail = TRUE, log.p = FALSE) if (cn == 1) { bin <- binom.test(mutreads, tot, p=p, alternative="two.sided") pvals <- bin$p.value } else { # if cn == 3 we can be "het" being 1/3 or 2/3, if cn == 4 ... stop("CN != 1 still not implemented") } return(any(pvals > pthr)) } consideredhet <- apply(muts[, c(3,4,5,6)], 1, rbinom, p) save.image("pippo.RData") hets <- muts[consideredhet,] total_found_het <- nrow(hets) common <- intersect(hets$id, lookfor$id) lost <- setdiff(lookfor$id, hets$id) lost_nocalled <- setdiff(lookfor$id, muts$id) callable <- intersect(lookfor$id, muts$id) lost_binomial <- setdiff(callable, hets$id) fromnowhere <- setdiff(hets$id, lookfor$id) callsfromnowhere <- setdiff(muts$id, lookfor$id) info <- data.frame(what=c("total_gs_het","het_found","common","not_found","appeared"), n=c(total_gs_het, total_found_het, length(common), length(lost), length(fromnowhere))) muts$het <- "no" muts[consideredhet,]$het <- "yes" muts$gs_het <- "no" muts[muts$id %in% lookfor$id,]$gs_het <- "yes" write.table(muts, gzfile(output), sep="\t", quote=FALSE, col.names=FALSE, row.names=FALSE) write.table(info, log, sep="\t", quote=FALSE, col.names=FALSE, row.names=FALSE)
/local/src/binomial_AF_p.R
no_license
vodkatad/AF_spectra
R
false
false
2,343
r
#!/usr/bin/env Rscript args <- commandArgs(trailingOnly = T) mutfile <- args[1] hetmuts <- args[2] hascn <- as.logical(args[3]) output <- args[4] pthr <- as.numeric(args[5]) log <- args[6] p <- as.numeric(args[7]) lookfor <- read.table(gzfile(hetmuts), header=FALSE, stringsAsFactors=FALSE) colnames(lookfor) <- c("id","homet","refreads","mutreads","af") muts <- read.table(gzfile(mutfile), header=FALSE, stringsAsFactors=FALSE) total_gs_het <- nrow(lookfor) # now we look for the muts in muts that are compatible with an AF of 0.5 with a binomial # test, correcting for cn if it's there. #(base) data@clo:~/work/AF_spectra/dataset/mixology$ zcat CRC0277LMX0A03202TUMD02000.nofilter.tsv.gz #chr18:22982188:G:A 1/1 0 220 0.996 if (!hascn) { colnames(muts) <- c("id","homet","refreads","mutreads","af") muts$cn <- 1 } else { colnames(muts) <- c("id","homet","refreads","mutreads","af","cn") } rbinom <- function(mut, p) { refreads <- mut[1] mutreads <- mut[2] cn <- mut[4] if (mutreads == 0) { # a bunch of calls for biod.. return(FALSE) } tot <- refreads+mutreads pvals <- c() #pbinom(q, size, prob, lower.tail = TRUE, log.p = FALSE) if (cn == 1) { bin <- binom.test(mutreads, tot, p=p, alternative="two.sided") pvals <- bin$p.value } else { # if cn == 3 we can be "het" being 1/3 or 2/3, if cn == 4 ... stop("CN != 1 still not implemented") } return(any(pvals > pthr)) } consideredhet <- apply(muts[, c(3,4,5,6)], 1, rbinom, p) save.image("pippo.RData") hets <- muts[consideredhet,] total_found_het <- nrow(hets) common <- intersect(hets$id, lookfor$id) lost <- setdiff(lookfor$id, hets$id) lost_nocalled <- setdiff(lookfor$id, muts$id) callable <- intersect(lookfor$id, muts$id) lost_binomial <- setdiff(callable, hets$id) fromnowhere <- setdiff(hets$id, lookfor$id) callsfromnowhere <- setdiff(muts$id, lookfor$id) info <- data.frame(what=c("total_gs_het","het_found","common","not_found","appeared"), n=c(total_gs_het, total_found_het, length(common), length(lost), length(fromnowhere))) muts$het <- "no" muts[consideredhet,]$het <- "yes" muts$gs_het <- "no" muts[muts$id %in% lookfor$id,]$gs_het <- "yes" write.table(muts, gzfile(output), sep="\t", quote=FALSE, col.names=FALSE, row.names=FALSE) write.table(info, log, sep="\t", quote=FALSE, col.names=FALSE, row.names=FALSE)
library(vegan) setwd("~/R/Analysis/1_Test/ITS") ASV.table <- read.table(file="rarefied_ASV_table.txt",header=T) ASV <- ASV.table [,1:(ncol(ASV.table)-7)] ASV.t <- t(ASV) shannon <- diversity(ASV.t, index="shannon",base=2) simpson <- diversity(ASV.t, index="simpson") invsimpson <- diversity(ASV.t, index="invsimpson") fisher <- fisher.alpha(ASV.t) bind<-cbind(shannon, simpson, invsimpson,fisher) write.table(bind, file="diversity.csv")
/Diversity index.r
no_license
Chikae-Tatsumi/Statistics
R
false
false
452
r
library(vegan) setwd("~/R/Analysis/1_Test/ITS") ASV.table <- read.table(file="rarefied_ASV_table.txt",header=T) ASV <- ASV.table [,1:(ncol(ASV.table)-7)] ASV.t <- t(ASV) shannon <- diversity(ASV.t, index="shannon",base=2) simpson <- diversity(ASV.t, index="simpson") invsimpson <- diversity(ASV.t, index="invsimpson") fisher <- fisher.alpha(ASV.t) bind<-cbind(shannon, simpson, invsimpson,fisher) write.table(bind, file="diversity.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aadoc.R, R/RcppExports.R \name{methane} \alias{methane} \alias{ATMOSPHERIC_CH4} \alias{PREINDUSTRIAL_CH4} \alias{EMISSIONS_CH4} \alias{NATURAL_CH4} \alias{LIFETIME_SOIL} \alias{LIFETIME_STRAT} \title{Identifiers for quantities in the methane component} \usage{ ATMOSPHERIC_CH4() PREINDUSTRIAL_CH4() EMISSIONS_CH4() NATURAL_CH4() LIFETIME_SOIL() LIFETIME_STRAT() } \description{ These identifiers correspond to variables that can be read and/or set in the methane component. } \section{Output variables}{ These variables can be read from the methane component. \describe{ \item{ATMOSPHERIC_CH4}{Atmospheric methane concentration} \item{PREINDUSTRIAL_CH4}{Preindustrial methane concentration} } } \section{Input variables}{ These variables can be set in the methane component. The expected units string is given after each description. \describe{ \item{EMISSIONS_CH4}{Methane emissions (\code{"Tg CH4"})} \item{PREINDUSTRIAL_CH4}{Preindustrial methane concentration (\code{"ppbv CH4"})} \item{NATURAL_CH4}{Natural methane emissions (\code{"Tg CH4"})} \item{LIFETIME_SOIL}{Time scale for methane loss into soil (\code{"Years"})} \item{LIFETIME_STRAT}{Time scale for methane loss into stratosphere (\code{"Years"})} } } \section{Note}{ Because these identifiers are provided as \code{#define} macros in the hector code, these identifiers are provided in the R interface as function. Therefore, these objects must be called to use them; \emph{e.g.}, \code{GETDATA()} instead of the more natural looking \code{GETDATA}. } \seealso{ Other capability identifiers: \code{\link{carboncycle}}, \code{\link{concentrations}}, \code{\link{emissions}}, \code{\link{forcings}}, \code{\link{haloemiss}}, \code{\link{haloforcings}}, \code{\link{ocean}}, \code{\link{parameters}}, \code{\link{so2}}, \code{\link{temperature}} } \concept{capability identifiers}
/man/methane.Rd
permissive
bvegawe/hector
R
false
true
1,943
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/aadoc.R, R/RcppExports.R \name{methane} \alias{methane} \alias{ATMOSPHERIC_CH4} \alias{PREINDUSTRIAL_CH4} \alias{EMISSIONS_CH4} \alias{NATURAL_CH4} \alias{LIFETIME_SOIL} \alias{LIFETIME_STRAT} \title{Identifiers for quantities in the methane component} \usage{ ATMOSPHERIC_CH4() PREINDUSTRIAL_CH4() EMISSIONS_CH4() NATURAL_CH4() LIFETIME_SOIL() LIFETIME_STRAT() } \description{ These identifiers correspond to variables that can be read and/or set in the methane component. } \section{Output variables}{ These variables can be read from the methane component. \describe{ \item{ATMOSPHERIC_CH4}{Atmospheric methane concentration} \item{PREINDUSTRIAL_CH4}{Preindustrial methane concentration} } } \section{Input variables}{ These variables can be set in the methane component. The expected units string is given after each description. \describe{ \item{EMISSIONS_CH4}{Methane emissions (\code{"Tg CH4"})} \item{PREINDUSTRIAL_CH4}{Preindustrial methane concentration (\code{"ppbv CH4"})} \item{NATURAL_CH4}{Natural methane emissions (\code{"Tg CH4"})} \item{LIFETIME_SOIL}{Time scale for methane loss into soil (\code{"Years"})} \item{LIFETIME_STRAT}{Time scale for methane loss into stratosphere (\code{"Years"})} } } \section{Note}{ Because these identifiers are provided as \code{#define} macros in the hector code, these identifiers are provided in the R interface as function. Therefore, these objects must be called to use them; \emph{e.g.}, \code{GETDATA()} instead of the more natural looking \code{GETDATA}. } \seealso{ Other capability identifiers: \code{\link{carboncycle}}, \code{\link{concentrations}}, \code{\link{emissions}}, \code{\link{forcings}}, \code{\link{haloemiss}}, \code{\link{haloforcings}}, \code{\link{ocean}}, \code{\link{parameters}}, \code{\link{so2}}, \code{\link{temperature}} } \concept{capability identifiers}
#DEMOGRAPHIC VARIABLES #----------------------------------- NHANES.1112.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/DEMO_G.xpt" NHANES.0910.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/DEMO_F.xpt" NHANES.0708.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/DEMO_E.xpt" NHANES.0506.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/DEMO_D.xpt" NHANES.0304.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/DEMO_C.xpt" NHANES.0102.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/DEMO_B.xpt" NHANES.9900.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/DEMO.xpt" #LAB VARIABLES #-----------------------------------Set names for file locations NHANES.1112.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/GLU_G.xpt" NHANES.1112.Zinc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/CUSEZN_G.xpt" NHANES.1112.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/TRIGLY_G.xpt" NHANES.1112.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/GHB_G.xpt" NHANES.1112.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/PBCD_G.xpt" NHANES.1112.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/FASTQX_G.xpt" NHANES.0910.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/GLU_F.xpt" NHANES.0910.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/TRIGLY_F.xpt" NHANES.0910.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/GHB_F.xpt" NHANES.0910.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/PBCD_F.xpt" NHANES.0910.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/FASTQX_F.xpt" NHANES.0708.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/GLU_E.xpt" NHANES.0708.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/TRIGLY_E.xpt" NHANES.0708.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/GHB_E.xpt" NHANES.0708.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/PBCD_E.xpt" NHANES.0708.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/FASTQX_E.xpt" NHANES.0506.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/GLU_D.xpt" NHANES.0506.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/TRIGLY_D.xpt" NHANES.0506.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/GHB_D.xpt" NHANES.0506.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/PBCD_D.xpt" NHANES.0506.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/FASTQX_D.xpt" NHANES.0304.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L10AM_C.xpt" NHANES.0304.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L13AM_C.xpt" NHANES.0304.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L10_C.xpt" NHANES.0304.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L06BMT_C.xpt" NHANES.0304.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/PH_C.xpt" NHANES.0102.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L10AM_B.xpt" NHANES.0102.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L13AM_B.xpt" NHANES.0102.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L10_B.xpt" NHANES.0102.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L06_B.xpt" NHANES.0102.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/PH_B.xpt" NHANES.9900.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB10AM.xpt" NHANES.9900.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB13AM.xpt" NHANES.9900.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB10.xpt" NHANES.9900.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB06.xpt" NHANES.9900.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/PH.xpt" #BODY MEASUREMENTS VARIABLES #----------------------------------- NHANES.1112.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/BMX_G.xpt" NHANES.0910.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/BMX_F.xpt" NHANES.0708.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/BMX_E.xpt" NHANES.0506.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/BMX_D.xpt" NHANES.0304.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/BMX_C.xpt" NHANES.0102.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/BMX_B.xpt" NHANES.9900.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/BMX.xpt" #BLOOD PRESSURE VARIABLES #----------------------------------- NHANES.1112.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/BPX_G.xpt" NHANES.0910.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/BPX_F.xpt" NHANES.0708.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/BPX_E.xpt" NHANES.0506.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/BPX_D.xpt" NHANES.0304.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/BPX_C.xpt" NHANES.0102.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/BPX_B.xpt" NHANES.9900.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/BPX.xpt" #DIABETES QUESTIONNAIRE VARIABLES #----------------------------------- NHANES.1112.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/DIQ_G.xpt" NHANES.0910.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/DIQ_F.xpt" NHANES.0708.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/DIQ_E.xpt" NHANES.0506.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/DIQ_D.xpt" NHANES.0304.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/DIQ_C.xpt" NHANES.0102.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/DIQ_B.xpt" NHANES.9900.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/DIQ.xpt" #-----------------------------------Download files and save files in .rda NHANES.1112.Demo.df <- download.nhanes.file(NHANES.1112.Demo.location) NHANES.1112.FastGlu.df <- download.nhanes.file(NHANES.1112.FastGluc.location) NHANES.1112.Trigly.df <- download.nhanes.file(NHANES.1112.Trigly.location) NHANES.1112.Glycohemo.df <- download.nhanes.file(NHANES.1112.Glycohemo.location) NHANES.1112.Cadmium.df <- download.nhanes.file(NHANES.1112.Cadmium.location) NHANES.1112.Zinc.df <- download.nhanes.file(NHANES.1112.Zinc.location) NHANES.1112.FastQuest.df <- download.nhanes.file(NHANES.1112.FastQuest.location) NHANES.1112.BM.df <- download.nhanes.file(NHANES.1112.BM.location) NHANES.1112.BP.df <- download.nhanes.file(NHANES.1112.BP.location) NHANES.1112.DIQ.df <- download.nhanes.file(NHANES.1112.DIQ.location) NHANES.0910.Demo.df <- download.nhanes.file(NHANES.0910.Demo.location) NHANES.0910.FastGlu.df <- download.nhanes.file(NHANES.0910.FastGluc.location) NHANES.0910.Trigly.df <- download.nhanes.file(NHANES.0910.Trigly.location) NHANES.0910.Glycohemo.df <- download.nhanes.file(NHANES.0910.Glycohemo.location) NHANES.0910.Cadmium.df <- download.nhanes.file(NHANES.0910.Cadmium.location) NHANES.0910.FastQuest.df <- download.nhanes.file(NHANES.0910.FastQuest.location) NHANES.0910.BM.df <- download.nhanes.file(NHANES.0910.BM.location) NHANES.0910.BP.df <- download.nhanes.file(NHANES.0910.BP.location) NHANES.0910.DIQ.df <- download.nhanes.file(NHANES.0910.DIQ.location) NHANES.0708.Demo.df <- download.nhanes.file(NHANES.0708.Demo.location) NHANES.0708.FastGlu.df <- download.nhanes.file(NHANES.0708.FastGluc.location) NHANES.0708.Trigly.df <- download.nhanes.file(NHANES.0708.Trigly.location) NHANES.0708.Glycohemo.df <- download.nhanes.file(NHANES.0708.Glycohemo.location) NHANES.0708.Cadmium.df <- download.nhanes.file(NHANES.0708.Cadmium.location) NHANES.0708.FastQuest.df <- download.nhanes.file(NHANES.0708.FastQuest.location) NHANES.0708.BM.df <- download.nhanes.file(NHANES.0708.BM.location) NHANES.0708.BP.df <- download.nhanes.file(NHANES.0708.BP.location) NHANES.0708.DIQ.df <- download.nhanes.file(NHANES.0708.DIQ.location) NHANES.0506.Demo.df <- download.nhanes.file(NHANES.0506.Demo.location) NHANES.0506.FastGlu.df <- download.nhanes.file(NHANES.0506.FastGluc.location) NHANES.0506.Trigly.df <- download.nhanes.file(NHANES.0506.Trigly.location) NHANES.0506.Glycohemo.df <- download.nhanes.file(NHANES.0506.Glycohemo.location) NHANES.0506.Cadmium.df <- download.nhanes.file(NHANES.0506.Cadmium.location) NHANES.0506.FastQuest.df <- download.nhanes.file(NHANES.0506.FastQuest.location) NHANES.0506.BM.df <- download.nhanes.file(NHANES.0506.BM.location) NHANES.0506.BP.df <- download.nhanes.file(NHANES.0506.BP.location) NHANES.0506.DIQ.df <- download.nhanes.file(NHANES.0506.DIQ.location) NHANES.0304.Demo.df <- download.nhanes.file(NHANES.0304.Demo.location) NHANES.0304.FastGlu.df <- download.nhanes.file(NHANES.0304.FastGluc.location) NHANES.0304.Trigly.df <- download.nhanes.file(NHANES.0304.Trigly.location) NHANES.0304.Glycohemo.df <- download.nhanes.file(NHANES.0304.Glycohemo.location) NHANES.0304.Cadmium.df <- download.nhanes.file(NHANES.0304.Cadmium.location) NHANES.0304.FastQuest.df <- download.nhanes.file(NHANES.0304.FastQuest.location) NHANES.0304.BM.df <- download.nhanes.file(NHANES.0304.BM.location) NHANES.0304.BP.df <- download.nhanes.file(NHANES.0304.BP.location) NHANES.0304.DIQ.df <- download.nhanes.file(NHANES.0304.DIQ.location) NHANES.0102.Demo.df <- download.nhanes.file(NHANES.0102.Demo.location) NHANES.0102.FastGlu.df <- download.nhanes.file(NHANES.0102.FastGluc.location) NHANES.0102.Trigly.df <- download.nhanes.file(NHANES.0102.Trigly.location) NHANES.0102.Glycohemo.df <- download.nhanes.file(NHANES.0102.Glycohemo.location) NHANES.0102.Cadmium.df <- download.nhanes.file(NHANES.0102.Cadmium.location) NHANES.0102.FastQuest.df <- download.nhanes.file(NHANES.0102.FastQuest.location) NHANES.0102.BM.df <- download.nhanes.file(NHANES.0102.BM.location) NHANES.0102.BP.df <- download.nhanes.file(NHANES.0102.BP.location) NHANES.0102.DIQ.df <- download.nhanes.file(NHANES.0102.DIQ.location) NHANES.9900.Demo.df <- download.nhanes.file(NHANES.9900.Demo.location) NHANES.9900.FastGlu.df <- download.nhanes.file(NHANES.9900.FastGluc.location) NHANES.9900.Trigly.df <- download.nhanes.file(NHANES.9900.Trigly.location) NHANES.9900.Glycohemo.df <- download.nhanes.file(NHANES.9900.Glycohemo.location) NHANES.9900.Cadmium.df <- download.nhanes.file(NHANES.9900.Cadmium.location) NHANES.9900.FastQuest.df <- download.nhanes.file(NHANES.9900.FastQuest.location) NHANES.9900.BM.df <- download.nhanes.file(NHANES.9900.BM.location) NHANES.9900.BP.df <- download.nhanes.file(NHANES.9900.BP.location) NHANES.9900.DIQ.df <- download.nhanes.file(NHANES.9900.DIQ.location) #saves file in current work directory save( NHANES.1112.Demo.df, NHANES.1112.FastGlu.df, NHANES.1112.Trigly.df, NHANES.1112.Glycohemo.df, NHANES.1112.Cadmium.df, NHANES.1112.Zinc.df, NHANES.1112.FastQuest.df, NHANES.1112.BM.df, NHANES.1112.BP.df, NHANES.1112.DIQ.df, NHANES.0910.Demo.df, NHANES.0910.FastGlu.df, NHANES.0910.Trigly.df, NHANES.0910.Glycohemo.df, NHANES.0910.Cadmium.df, NHANES.0910.FastQuest.df, NHANES.0910.BM.df, NHANES.0910.BP.df, NHANES.0910.DIQ.df, NHANES.0708.Demo.df, NHANES.0708.FastGlu.df, NHANES.0708.Trigly.df, NHANES.0708.Glycohemo.df, NHANES.0708.Cadmium.df, NHANES.0708.FastQuest.df, NHANES.0708.BM.df, NHANES.0708.BP.df, NHANES.0708.DIQ.df, NHANES.0506.Demo.df, NHANES.0506.FastGlu.df, NHANES.0506.Trigly.df, NHANES.0506.Glycohemo.df, NHANES.0506.Cadmium.df, NHANES.0506.FastQuest.df, NHANES.0506.BM.df, NHANES.0506.BP.df, NHANES.0506.DIQ.df, NHANES.0304.Demo.df, NHANES.0304.FastGlu.df, NHANES.0304.Trigly.df, NHANES.0304.Glycohemo.df, NHANES.0304.Cadmium.df, NHANES.0304.FastQuest.df, NHANES.0304.BM.df, NHANES.0304.BP.df, NHANES.0304.DIQ.df, NHANES.0102.Demo.df, NHANES.0102.FastGlu.df, NHANES.0102.Trigly.df, NHANES.0102.Glycohemo.df, NHANES.0102.Cadmium.df, NHANES.0102.FastQuest.df, NHANES.0102.BM.df, NHANES.0102.BP.df, NHANES.0102.DIQ.df, NHANES.9900.Demo.df, NHANES.9900.FastGlu.df, NHANES.9900.Trigly.df, NHANES.9900.Glycohemo.df, NHANES.9900.Cadmium.df, NHANES.9900.FastQuest.df, NHANES.9900.BM.df, NHANES.9900.BP.df, NHANES.9900.DIQ.df, file = "NHANES.rda" )
/names_and_download.R
no_license
swordguyman/NHANES-Repo
R
false
false
13,508
r
#DEMOGRAPHIC VARIABLES #----------------------------------- NHANES.1112.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/DEMO_G.xpt" NHANES.0910.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/DEMO_F.xpt" NHANES.0708.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/DEMO_E.xpt" NHANES.0506.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/DEMO_D.xpt" NHANES.0304.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/DEMO_C.xpt" NHANES.0102.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/DEMO_B.xpt" NHANES.9900.Demo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/DEMO.xpt" #LAB VARIABLES #-----------------------------------Set names for file locations NHANES.1112.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/GLU_G.xpt" NHANES.1112.Zinc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/CUSEZN_G.xpt" NHANES.1112.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/TRIGLY_G.xpt" NHANES.1112.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/GHB_G.xpt" NHANES.1112.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/PBCD_G.xpt" NHANES.1112.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/FASTQX_G.xpt" NHANES.0910.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/GLU_F.xpt" NHANES.0910.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/TRIGLY_F.xpt" NHANES.0910.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/GHB_F.xpt" NHANES.0910.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/PBCD_F.xpt" NHANES.0910.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/FASTQX_F.xpt" NHANES.0708.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/GLU_E.xpt" NHANES.0708.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/TRIGLY_E.xpt" NHANES.0708.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/GHB_E.xpt" NHANES.0708.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/PBCD_E.xpt" NHANES.0708.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/FASTQX_E.xpt" NHANES.0506.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/GLU_D.xpt" NHANES.0506.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/TRIGLY_D.xpt" NHANES.0506.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/GHB_D.xpt" NHANES.0506.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/PBCD_D.xpt" NHANES.0506.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/FASTQX_D.xpt" NHANES.0304.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L10AM_C.xpt" NHANES.0304.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L13AM_C.xpt" NHANES.0304.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L10_C.xpt" NHANES.0304.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/L06BMT_C.xpt" NHANES.0304.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/PH_C.xpt" NHANES.0102.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L10AM_B.xpt" NHANES.0102.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L13AM_B.xpt" NHANES.0102.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L10_B.xpt" NHANES.0102.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/L06_B.xpt" NHANES.0102.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/PH_B.xpt" NHANES.9900.FastGluc.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB10AM.xpt" NHANES.9900.Trigly.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB13AM.xpt" NHANES.9900.Glycohemo.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB10.xpt" NHANES.9900.Cadmium.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/LAB06.xpt" NHANES.9900.FastQuest.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/PH.xpt" #BODY MEASUREMENTS VARIABLES #----------------------------------- NHANES.1112.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/BMX_G.xpt" NHANES.0910.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/BMX_F.xpt" NHANES.0708.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/BMX_E.xpt" NHANES.0506.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/BMX_D.xpt" NHANES.0304.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/BMX_C.xpt" NHANES.0102.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/BMX_B.xpt" NHANES.9900.BM.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/BMX.xpt" #BLOOD PRESSURE VARIABLES #----------------------------------- NHANES.1112.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/BPX_G.xpt" NHANES.0910.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/BPX_F.xpt" NHANES.0708.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/BPX_E.xpt" NHANES.0506.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/BPX_D.xpt" NHANES.0304.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/BPX_C.xpt" NHANES.0102.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/BPX_B.xpt" NHANES.9900.BP.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/BPX.xpt" #DIABETES QUESTIONNAIRE VARIABLES #----------------------------------- NHANES.1112.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2011-2012/DIQ_G.xpt" NHANES.0910.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2009-2010/DIQ_F.xpt" NHANES.0708.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2007-2008/DIQ_E.xpt" NHANES.0506.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2005-2006/DIQ_D.xpt" NHANES.0304.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2003-2004/DIQ_C.xpt" NHANES.0102.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/2001-2002/DIQ_B.xpt" NHANES.9900.DIQ.location <- "ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/nhanes/1999-2000/DIQ.xpt" #-----------------------------------Download files and save files in .rda NHANES.1112.Demo.df <- download.nhanes.file(NHANES.1112.Demo.location) NHANES.1112.FastGlu.df <- download.nhanes.file(NHANES.1112.FastGluc.location) NHANES.1112.Trigly.df <- download.nhanes.file(NHANES.1112.Trigly.location) NHANES.1112.Glycohemo.df <- download.nhanes.file(NHANES.1112.Glycohemo.location) NHANES.1112.Cadmium.df <- download.nhanes.file(NHANES.1112.Cadmium.location) NHANES.1112.Zinc.df <- download.nhanes.file(NHANES.1112.Zinc.location) NHANES.1112.FastQuest.df <- download.nhanes.file(NHANES.1112.FastQuest.location) NHANES.1112.BM.df <- download.nhanes.file(NHANES.1112.BM.location) NHANES.1112.BP.df <- download.nhanes.file(NHANES.1112.BP.location) NHANES.1112.DIQ.df <- download.nhanes.file(NHANES.1112.DIQ.location) NHANES.0910.Demo.df <- download.nhanes.file(NHANES.0910.Demo.location) NHANES.0910.FastGlu.df <- download.nhanes.file(NHANES.0910.FastGluc.location) NHANES.0910.Trigly.df <- download.nhanes.file(NHANES.0910.Trigly.location) NHANES.0910.Glycohemo.df <- download.nhanes.file(NHANES.0910.Glycohemo.location) NHANES.0910.Cadmium.df <- download.nhanes.file(NHANES.0910.Cadmium.location) NHANES.0910.FastQuest.df <- download.nhanes.file(NHANES.0910.FastQuest.location) NHANES.0910.BM.df <- download.nhanes.file(NHANES.0910.BM.location) NHANES.0910.BP.df <- download.nhanes.file(NHANES.0910.BP.location) NHANES.0910.DIQ.df <- download.nhanes.file(NHANES.0910.DIQ.location) NHANES.0708.Demo.df <- download.nhanes.file(NHANES.0708.Demo.location) NHANES.0708.FastGlu.df <- download.nhanes.file(NHANES.0708.FastGluc.location) NHANES.0708.Trigly.df <- download.nhanes.file(NHANES.0708.Trigly.location) NHANES.0708.Glycohemo.df <- download.nhanes.file(NHANES.0708.Glycohemo.location) NHANES.0708.Cadmium.df <- download.nhanes.file(NHANES.0708.Cadmium.location) NHANES.0708.FastQuest.df <- download.nhanes.file(NHANES.0708.FastQuest.location) NHANES.0708.BM.df <- download.nhanes.file(NHANES.0708.BM.location) NHANES.0708.BP.df <- download.nhanes.file(NHANES.0708.BP.location) NHANES.0708.DIQ.df <- download.nhanes.file(NHANES.0708.DIQ.location) NHANES.0506.Demo.df <- download.nhanes.file(NHANES.0506.Demo.location) NHANES.0506.FastGlu.df <- download.nhanes.file(NHANES.0506.FastGluc.location) NHANES.0506.Trigly.df <- download.nhanes.file(NHANES.0506.Trigly.location) NHANES.0506.Glycohemo.df <- download.nhanes.file(NHANES.0506.Glycohemo.location) NHANES.0506.Cadmium.df <- download.nhanes.file(NHANES.0506.Cadmium.location) NHANES.0506.FastQuest.df <- download.nhanes.file(NHANES.0506.FastQuest.location) NHANES.0506.BM.df <- download.nhanes.file(NHANES.0506.BM.location) NHANES.0506.BP.df <- download.nhanes.file(NHANES.0506.BP.location) NHANES.0506.DIQ.df <- download.nhanes.file(NHANES.0506.DIQ.location) NHANES.0304.Demo.df <- download.nhanes.file(NHANES.0304.Demo.location) NHANES.0304.FastGlu.df <- download.nhanes.file(NHANES.0304.FastGluc.location) NHANES.0304.Trigly.df <- download.nhanes.file(NHANES.0304.Trigly.location) NHANES.0304.Glycohemo.df <- download.nhanes.file(NHANES.0304.Glycohemo.location) NHANES.0304.Cadmium.df <- download.nhanes.file(NHANES.0304.Cadmium.location) NHANES.0304.FastQuest.df <- download.nhanes.file(NHANES.0304.FastQuest.location) NHANES.0304.BM.df <- download.nhanes.file(NHANES.0304.BM.location) NHANES.0304.BP.df <- download.nhanes.file(NHANES.0304.BP.location) NHANES.0304.DIQ.df <- download.nhanes.file(NHANES.0304.DIQ.location) NHANES.0102.Demo.df <- download.nhanes.file(NHANES.0102.Demo.location) NHANES.0102.FastGlu.df <- download.nhanes.file(NHANES.0102.FastGluc.location) NHANES.0102.Trigly.df <- download.nhanes.file(NHANES.0102.Trigly.location) NHANES.0102.Glycohemo.df <- download.nhanes.file(NHANES.0102.Glycohemo.location) NHANES.0102.Cadmium.df <- download.nhanes.file(NHANES.0102.Cadmium.location) NHANES.0102.FastQuest.df <- download.nhanes.file(NHANES.0102.FastQuest.location) NHANES.0102.BM.df <- download.nhanes.file(NHANES.0102.BM.location) NHANES.0102.BP.df <- download.nhanes.file(NHANES.0102.BP.location) NHANES.0102.DIQ.df <- download.nhanes.file(NHANES.0102.DIQ.location) NHANES.9900.Demo.df <- download.nhanes.file(NHANES.9900.Demo.location) NHANES.9900.FastGlu.df <- download.nhanes.file(NHANES.9900.FastGluc.location) NHANES.9900.Trigly.df <- download.nhanes.file(NHANES.9900.Trigly.location) NHANES.9900.Glycohemo.df <- download.nhanes.file(NHANES.9900.Glycohemo.location) NHANES.9900.Cadmium.df <- download.nhanes.file(NHANES.9900.Cadmium.location) NHANES.9900.FastQuest.df <- download.nhanes.file(NHANES.9900.FastQuest.location) NHANES.9900.BM.df <- download.nhanes.file(NHANES.9900.BM.location) NHANES.9900.BP.df <- download.nhanes.file(NHANES.9900.BP.location) NHANES.9900.DIQ.df <- download.nhanes.file(NHANES.9900.DIQ.location) #saves file in current work directory save( NHANES.1112.Demo.df, NHANES.1112.FastGlu.df, NHANES.1112.Trigly.df, NHANES.1112.Glycohemo.df, NHANES.1112.Cadmium.df, NHANES.1112.Zinc.df, NHANES.1112.FastQuest.df, NHANES.1112.BM.df, NHANES.1112.BP.df, NHANES.1112.DIQ.df, NHANES.0910.Demo.df, NHANES.0910.FastGlu.df, NHANES.0910.Trigly.df, NHANES.0910.Glycohemo.df, NHANES.0910.Cadmium.df, NHANES.0910.FastQuest.df, NHANES.0910.BM.df, NHANES.0910.BP.df, NHANES.0910.DIQ.df, NHANES.0708.Demo.df, NHANES.0708.FastGlu.df, NHANES.0708.Trigly.df, NHANES.0708.Glycohemo.df, NHANES.0708.Cadmium.df, NHANES.0708.FastQuest.df, NHANES.0708.BM.df, NHANES.0708.BP.df, NHANES.0708.DIQ.df, NHANES.0506.Demo.df, NHANES.0506.FastGlu.df, NHANES.0506.Trigly.df, NHANES.0506.Glycohemo.df, NHANES.0506.Cadmium.df, NHANES.0506.FastQuest.df, NHANES.0506.BM.df, NHANES.0506.BP.df, NHANES.0506.DIQ.df, NHANES.0304.Demo.df, NHANES.0304.FastGlu.df, NHANES.0304.Trigly.df, NHANES.0304.Glycohemo.df, NHANES.0304.Cadmium.df, NHANES.0304.FastQuest.df, NHANES.0304.BM.df, NHANES.0304.BP.df, NHANES.0304.DIQ.df, NHANES.0102.Demo.df, NHANES.0102.FastGlu.df, NHANES.0102.Trigly.df, NHANES.0102.Glycohemo.df, NHANES.0102.Cadmium.df, NHANES.0102.FastQuest.df, NHANES.0102.BM.df, NHANES.0102.BP.df, NHANES.0102.DIQ.df, NHANES.9900.Demo.df, NHANES.9900.FastGlu.df, NHANES.9900.Trigly.df, NHANES.9900.Glycohemo.df, NHANES.9900.Cadmium.df, NHANES.9900.FastQuest.df, NHANES.9900.BM.df, NHANES.9900.BP.df, NHANES.9900.DIQ.df, file = "NHANES.rda" )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulation.R \name{calculateE} \alias{calculateE} \title{Calculate the environmental component of the phenotype} \usage{ calculateE(G, h2) } \arguments{ \item{G}{The genetic component of the phenotype.} \item{h2}{The heritability.} } \value{ A vector with the environmental component of each sample. } \description{ Calculates the environmental component of the phenotype using the variance in the genetic component. } \keyword{internal}
/man/calculateE.Rd
no_license
hclimente/martini
R
false
true
517
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulation.R \name{calculateE} \alias{calculateE} \title{Calculate the environmental component of the phenotype} \usage{ calculateE(G, h2) } \arguments{ \item{G}{The genetic component of the phenotype.} \item{h2}{The heritability.} } \value{ A vector with the environmental component of each sample. } \description{ Calculates the environmental component of the phenotype using the variance in the genetic component. } \keyword{internal}
\name{mcmc.chainlength.est} \alias{mcmc.chainlength.est} \title{Estimate appropriate chain length for mixed stock analysis by MCMC} \description{ Determines an appropriate chain length for MCMC estimation of source contributions to a mixed stock by running Raftery and Lewis and Gelman & Rubin diagnostics repeatedly until convergence criteria are met } \usage{ mcmc.chainlength.est(x, mult=1, inflate=sqrt(2), GR.crit=1.2, nchains=x$R, verbose=FALSE) } \arguments{ \item{x}{Mixed stock analysis data (a \code{mixstock.data} object or a list containing \code{sourcesamp} and \code{mixsamp}} \item{mult}{How many different times to run tests} \item{inflate}{How much to increase chain length at every failed iteration of Gelman and Rubin} \item{GR.crit}{Maximum value for Gelman and Rubin 97.5\% quantile in order to declare convergence} \item{nchains}{number of separate MCMC chains to run} \item{verbose}{print lots of detail while running?} } \details{ If \code{mult} is 1, runs Raftery and Lewis diagnostics on a chain starting from equal contributions; if \code{mult} is greater than 1, runs them on as many chains as there are sourcees, each starting from a 95\% contribution from that source. Iteratively increases each chain length to that suggested by the R&L diagnostic, until all chains pass. Then runs Gelman and Rubin on a set of chains starting from each source. If \code{mult} is greater than 1, it does each step on \code{mult} different chains and takes the maximum. } \value{ The maximum chainlength needed to get convergence in all tests } \author{Ben Bolker} \seealso{gibbsC} \examples{ data(simex) mcmc.chainlength.est(simex,verbose=TRUE) } \keyword{misc}
/man/mcmc.chainlength.est.Rd
no_license
bbolker/mixstock
R
false
false
1,728
rd
\name{mcmc.chainlength.est} \alias{mcmc.chainlength.est} \title{Estimate appropriate chain length for mixed stock analysis by MCMC} \description{ Determines an appropriate chain length for MCMC estimation of source contributions to a mixed stock by running Raftery and Lewis and Gelman & Rubin diagnostics repeatedly until convergence criteria are met } \usage{ mcmc.chainlength.est(x, mult=1, inflate=sqrt(2), GR.crit=1.2, nchains=x$R, verbose=FALSE) } \arguments{ \item{x}{Mixed stock analysis data (a \code{mixstock.data} object or a list containing \code{sourcesamp} and \code{mixsamp}} \item{mult}{How many different times to run tests} \item{inflate}{How much to increase chain length at every failed iteration of Gelman and Rubin} \item{GR.crit}{Maximum value for Gelman and Rubin 97.5\% quantile in order to declare convergence} \item{nchains}{number of separate MCMC chains to run} \item{verbose}{print lots of detail while running?} } \details{ If \code{mult} is 1, runs Raftery and Lewis diagnostics on a chain starting from equal contributions; if \code{mult} is greater than 1, runs them on as many chains as there are sourcees, each starting from a 95\% contribution from that source. Iteratively increases each chain length to that suggested by the R&L diagnostic, until all chains pass. Then runs Gelman and Rubin on a set of chains starting from each source. If \code{mult} is greater than 1, it does each step on \code{mult} different chains and takes the maximum. } \value{ The maximum chainlength needed to get convergence in all tests } \author{Ben Bolker} \seealso{gibbsC} \examples{ data(simex) mcmc.chainlength.est(simex,verbose=TRUE) } \keyword{misc}
install.packages("topicmodels") install.packages("dplyr") install.packages("ggplot2") install.packages("MCMCpack") install.packages("purrr") install.packages("readr") install.packages("stringr") install.packages("tibble") install.packages("magrittr") install.packages("devtools") devtools::install_github("lasy/alto")
/binder/install.R
no_license
krisrs1128/alto_demo
R
false
false
318
r
install.packages("topicmodels") install.packages("dplyr") install.packages("ggplot2") install.packages("MCMCpack") install.packages("purrr") install.packages("readr") install.packages("stringr") install.packages("tibble") install.packages("magrittr") install.packages("devtools") devtools::install_github("lasy/alto")
## Rename files from = list.files('terrain', pattern = '3x3', full.names = T, recursive = T) library(stringr) to = str_replace(from, '3x3', '7x7') file.rename(from, to)
/executable_code/utils.R
no_license
loreabad6/Earthflows_R
R
false
false
170
r
## Rename files from = list.files('terrain', pattern = '3x3', full.names = T, recursive = T) library(stringr) to = str_replace(from, '3x3', '7x7') file.rename(from, to)
#' Reads raw SEA data from a file. #' #' Reads, parses, and converts an SEA csv into an R \code{\link{data.frame}}. #' #' @export #' @importFrom dplyr mutate_if #' @importFrom utils read.csv #' @importFrom stringr str_trim #' @importFrom tidyr unite #' @importFrom rlang !! sym UQ #' @param file The name of the file which the data is to be read from. #' @param verbose Print file name (as progress checker)? Defaults to \code{FALSE}. #' @return Returns the file's content as an R \code{\link{data.frame}}. load_sea_file <- function (file, verbose = FALSE) { dat <- read_sea_csv(file) if (verbose) print(file) if (nrow(dat) == 0) { warning(paste0(file, " contains no data!")) return (data.frame()) } # standardize case convention in colnames names(dat) <- standardize_names(dat) dat$file <- file dat <- tidyr::unite(dat, time, current_date, current_year, current_time, sep = " ") dat <- dplyr::mutate_if(dat, is.character, stringr::str_trim) dat <- standardize_sea_column_names(dat) dat <- label_sea_module_conditions(dat) dat <- standardize_sea_module_names(dat) dat <- dat %>% dplyr::group_by(!! sym(UQ(COL_PID))) %>% dplyr::mutate(bid = paste(.data[[COL_PID]], .data[[COL_TIME]][1]), correct_button = ifelse(tolower(.data[[COL_RESPONSE]]) == tolower(.data[[COL_CORRECT_RESPONSE]]), "correct", "incorrect"), half = dplyr::recode(make_half_seq(n()), `1` = "first_half", `2` = "second_half")) %>% dplyr::ungroup() return (dat) } #' Because the "Reading Fluency" module has some cells with unquoted commas, #' They are causing the usual read.csv delimiter guessing to split #' one cell into two, creating too many columns for just a few rows #' #' @importFrom stringr str_trim #' @keywords internal read_sea_csv <- function(file) { dat <- read.table(file, sep = ",", header = F, stringsAsFactors = F, col.names = paste0("V", 1:32), fill = T) if (all(unique(dat[, 31]) == "") | all(is.na(dat[, 31]))) return (dat) names(dat) <- c(stringr::str_trim(dat[1, 1:30]), "junk", "junk2") dat <- dat[2:nrow(dat), ] if (all(is.na(dat$junk2))) { bad_rows <- which(dat$junk != "") dat[bad_rows, "Question Text"] <- paste(dat[bad_rows, "Question Text"], dat[bad_rows, "Question Type"], sep = ",") # Moving these bad rows' data after the bad delimiter one column to the left dat[bad_rows, 6:30] <- dat[bad_rows, 7:31] } else { bad_rows1 <- which(dat$junk != "" & dat$junk2 == "") dat[bad_rows1, "Question Text"] <- paste(dat[bad_rows1, "Question Text"], dat[bad_rows1, "Question Type"], sep = ",") # Moving these bad rows' data after the bad delimiter one column to the left dat[bad_rows1, 6:30] <- dat[bad_rows1, 7:31] bad_rows2 <- which(dat$junk2 != "") dat[bad_rows2, "Question Text"] <- paste(dat[bad_rows2, "Question Text"], dat[bad_rows2, "Question Type"], dat[bad_rows2, "User Answer"], sep = ",") # Moving these bad rows' data after the bad delimiters two columns to the left dat[bad_rows2, 6:30] <- dat[bad_rows2, 8:32] } return (dat[, 1:30]) }
/R/load-sea.R
permissive
jorodondi/aceR
R
false
false
3,344
r
#' Reads raw SEA data from a file. #' #' Reads, parses, and converts an SEA csv into an R \code{\link{data.frame}}. #' #' @export #' @importFrom dplyr mutate_if #' @importFrom utils read.csv #' @importFrom stringr str_trim #' @importFrom tidyr unite #' @importFrom rlang !! sym UQ #' @param file The name of the file which the data is to be read from. #' @param verbose Print file name (as progress checker)? Defaults to \code{FALSE}. #' @return Returns the file's content as an R \code{\link{data.frame}}. load_sea_file <- function (file, verbose = FALSE) { dat <- read_sea_csv(file) if (verbose) print(file) if (nrow(dat) == 0) { warning(paste0(file, " contains no data!")) return (data.frame()) } # standardize case convention in colnames names(dat) <- standardize_names(dat) dat$file <- file dat <- tidyr::unite(dat, time, current_date, current_year, current_time, sep = " ") dat <- dplyr::mutate_if(dat, is.character, stringr::str_trim) dat <- standardize_sea_column_names(dat) dat <- label_sea_module_conditions(dat) dat <- standardize_sea_module_names(dat) dat <- dat %>% dplyr::group_by(!! sym(UQ(COL_PID))) %>% dplyr::mutate(bid = paste(.data[[COL_PID]], .data[[COL_TIME]][1]), correct_button = ifelse(tolower(.data[[COL_RESPONSE]]) == tolower(.data[[COL_CORRECT_RESPONSE]]), "correct", "incorrect"), half = dplyr::recode(make_half_seq(n()), `1` = "first_half", `2` = "second_half")) %>% dplyr::ungroup() return (dat) } #' Because the "Reading Fluency" module has some cells with unquoted commas, #' They are causing the usual read.csv delimiter guessing to split #' one cell into two, creating too many columns for just a few rows #' #' @importFrom stringr str_trim #' @keywords internal read_sea_csv <- function(file) { dat <- read.table(file, sep = ",", header = F, stringsAsFactors = F, col.names = paste0("V", 1:32), fill = T) if (all(unique(dat[, 31]) == "") | all(is.na(dat[, 31]))) return (dat) names(dat) <- c(stringr::str_trim(dat[1, 1:30]), "junk", "junk2") dat <- dat[2:nrow(dat), ] if (all(is.na(dat$junk2))) { bad_rows <- which(dat$junk != "") dat[bad_rows, "Question Text"] <- paste(dat[bad_rows, "Question Text"], dat[bad_rows, "Question Type"], sep = ",") # Moving these bad rows' data after the bad delimiter one column to the left dat[bad_rows, 6:30] <- dat[bad_rows, 7:31] } else { bad_rows1 <- which(dat$junk != "" & dat$junk2 == "") dat[bad_rows1, "Question Text"] <- paste(dat[bad_rows1, "Question Text"], dat[bad_rows1, "Question Type"], sep = ",") # Moving these bad rows' data after the bad delimiter one column to the left dat[bad_rows1, 6:30] <- dat[bad_rows1, 7:31] bad_rows2 <- which(dat$junk2 != "") dat[bad_rows2, "Question Text"] <- paste(dat[bad_rows2, "Question Text"], dat[bad_rows2, "Question Type"], dat[bad_rows2, "User Answer"], sep = ",") # Moving these bad rows' data after the bad delimiters two columns to the left dat[bad_rows2, 6:30] <- dat[bad_rows2, 8:32] } return (dat[, 1:30]) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/initializeVizRepo.R \name{createNewLabels} \alias{createNewLabels} \title{Create new labels on a repository from a JSON file.} \usage{ createNewLabels(label_json, repo_name, org = "USGS-VIZLAB", ctx = get.github.context()) } \arguments{ \item{label_json}{file path indicating the JSON file to be used to define what labels to create} \item{repo_name}{string, name for the new repository} \item{org}{string, GitHub organization to create repository. Defaults to "USGS-VIZLAB"} \item{ctx}{GitHub context for authentication, see \link[grithub]{get.github.context}} } \description{ Create new labels on a repository from a JSON file. }
/man/createNewLabels.Rd
permissive
USGS-VIZLAB/vizlab
R
false
true
715
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/initializeVizRepo.R \name{createNewLabels} \alias{createNewLabels} \title{Create new labels on a repository from a JSON file.} \usage{ createNewLabels(label_json, repo_name, org = "USGS-VIZLAB", ctx = get.github.context()) } \arguments{ \item{label_json}{file path indicating the JSON file to be used to define what labels to create} \item{repo_name}{string, name for the new repository} \item{org}{string, GitHub organization to create repository. Defaults to "USGS-VIZLAB"} \item{ctx}{GitHub context for authentication, see \link[grithub]{get.github.context}} } \description{ Create new labels on a repository from a JSON file. }
library(igraph) library(NetData) data(kracknets, package = "NetData") krack_full_nonzero_edges <- subset(krack_full_data_frame, (advice_tie > 0 | friendship_tie > 0 | reports_to_tie > 0)) head(krack_full_nonzero_edges) krack_full <- graph.data.frame(krack_full_nonzero_edges) summary(krack_full) # Set vertex attributes for (i in V(krack_full)) { for (j in names(attributes)) { krack_full <- set.vertex.attribute(krack_full, j, index=i, attributes[i+1,j]) } } summary(krack_full) # Create sub-graphs based on edge attributes krack_advice <- delete.edges(krack_full, E(krack_full)[get.edge.attribute(krack_full, name = "advice_tie")==0]) summary(krack_advice) krack_friendship <- delete.edges(krack_full, E(krack_full)[get.edge.attribute(krack_full, name = "friendship_tie")==0]) summary(krack_friendship) krack_reports_to <- delete.edges(krack_full, E(krack_full)[get.edge.attribute(krack_full, name = "reports_to_tie")==0]) summary(krack_reports_to) ### # 3. NODE-LEVEL STATISTICS ### # Compute the indegree and outdegree for each node, first in the # full graph (accounting for all tie types) and then in each # tie-specific sub-graph. deg_full_in <- degree(krack_full, mode="in") deg_full_out <- degree(krack_full, mode="out") deg_full_in deg_full_out deg_advice_in <- degree(krack_advice, mode="in") deg_advice_out <- degree(krack_advice, mode="out") deg_advice_in deg_advice_out deg_friendship_in <- degree(krack_friendship, mode="in") deg_friendship_out <- degree(krack_friendship, mode="out") deg_friendship_in deg_friendship_out deg_reports_to_in <- degree(krack_reports_to, mode="in") deg_reports_to_out <- degree(krack_reports_to, mode="out") deg_reports_to_in deg_reports_to_out # Reachability can only be computed on one vertex at a time. To # get graph-wide statistics, change the value of "vertex" # manually or write a for loop. (Remember that, unlike R objects, # igraph objects are numbered from 0.) reachability <- function(g, m) { reach_mat = matrix(nrow = vcount(g), ncol = vcount(g)) for (i in 1:vcount(g)) { reach_mat[i,] = 0 this_node_reach <- subcomponent(g, (i - 1), mode = m) for (j in 1:(length(this_node_reach))) { alter = this_node_reach[j] reach_mat[i, alter] = 1 } } return(reach_mat) } reach_full_in <- reachability(krack_full, 'in') reach_full_out <- reachability(krack_full, 'out') reach_full_in reach_full_out # Often we want to know path distances between individuals in a network. # This is often done by calculating geodesics, or shortest paths between # each ij pair. One can symmetrize the data to do this (see lab 1), or # calculate it for outward and inward ties separately. Averaging geodesics # for the entire network provides an average distance or sort of cohesiveness # score. Dichotomizing distances reveals reach, and an average of reach for # a network reveals what percent of a network is connected in some way. # Compute shortest paths between each pair of nodes. sp_full_in <- shortest.paths(krack_full, mode='in') sp_full_out <- shortest.paths(krack_full, mode='out') sp_full_in sp_full_out # Assemble node-level stats into single data frame for export as CSV. # First, we have to compute average values by node for reachability and # shortest path. (We don't have to do this for degree because it is # already expressed as a node-level value.) reach_full_in_vec <- vector() reach_full_out_vec <- vector() reach_advice_in_vec <- vector() reach_advice_out_vec <- vector() reach_friendship_in_vec <- vector() reach_friendship_out_vec <- vector() reach_reports_to_in_vec <- vector() reach_reports_to_out_vec <- vector() sp_full_in_vec <- vector() sp_full_out_vec <- vector() sp_advice_in_vec <- vector() sp_advice_out_vec <- vector() sp_friendship_in_vec <- vector() sp_friendship_out_vec <- vector() sp_reports_to_in_vec <- vector() sp_reports_to_out_vec <- vector() for (i in 1:vcount(krack_full)) { reach_full_in_vec[i] <- mean(reach_full_in[i,]) reach_full_out_vec[i] <- mean(reach_full_out[i,]) reach_advice_in_vec[i] <- mean(reach_advice_in[i,]) reach_advice_out_vec[i] <- mean(reach_advice_out[i,]) reach_friendship_in_vec[i] <- mean(reach_friendship_in[i,]) reach_friendship_out_vec[i] <- mean(reach_friendship_out[i,]) reach_reports_to_in_vec[i] <- mean(reach_reports_to_in[i,]) reach_reports_to_out_vec[i] <- mean(reach_reports_to_out[i,]) sp_full_in_vec[i] <- mean(sp_full_in[i,]) sp_full_out_vec[i] <- mean(sp_full_out[i,]) sp_advice_in_vec[i] <- mean(sp_advice_in[i,]) sp_advice_out_vec[i] <- mean(sp_advice_out[i,]) sp_friendship_in_vec[i] <- mean(sp_friendship_in[i,]) sp_friendship_out_vec[i] <- mean(sp_friendship_out[i,]) sp_reports_to_in_vec[i] <- mean(sp_reports_to_in[i,]) sp_reports_to_out_vec[i] <- mean(sp_reports_to_out[i,]) } # Next, we assemble all of the vectors of node-level values into a # single data frame, which we can export as a CSV to our working # directory. node_stats_df <- cbind(deg_full_in, deg_full_out, deg_advice_in, deg_advice_out, deg_friendship_in, deg_friendship_out, deg_reports_to_in, deg_reports_to_out, reach_full_in_vec, reach_full_out_vec, reach_advice_in_vec, reach_advice_out_vec, reach_friendship_in_vec, reach_friendship_out_vec, reach_reports_to_in_vec, reach_reports_to_out_vec, sp_full_in_vec, sp_full_out_vec, sp_advice_in_vec, sp_advice_out_vec, sp_friendship_in_vec, sp_friendship_out_vec, sp_reports_to_in_vec, sp_reports_to_out_vec) write.csv(node_stats_df, 'krack_node_stats.cv') # Question #1 - What do these statistics tell us about # each network and its individuals in general? ### # 3. NETWORK-LEVEL STATISTICS ### # Many initial analyses of networks begin with distances and reach, # and then move towards global summary statistics of the network. # # As a reminder, entering a question mark followed by a function # name (e.g., ?graph.density) pulls up the help file for that function. # This can be helpful to understand how, exactly, stats are calculated. # Degree mean(deg_full_in) sd(deg_full_in) # Shortest paths # ***Why do in and out come up with the same results? # In and out shortest paths are simply transposes of one another; # thus, when we compute statistics across the whole network they have to be the same. mean(sp_full_in[which(sp_full_in != Inf)]) # Density graph.density(krack_full) # Reciprocity reciprocity(krack_full) # Transitivity (clustering coefficient) transitivity(krack_full) # Triad census. Here we'll first build a vector of labels for # the different triad types. Then we'll combine this vector # with the triad censuses for the different networks, which # we'll export as a CSV. census_labels = c('003', '012', '102', '021D', '021U', '021C', '111D', '111U', '030T', '030C', '201', '120D', '120U', '120C', '210', '300') tc_full <- triad.census(krack_full) tc_advice <- triad.census(krack_advice) tc_friendship <- triad.census(krack_friendship) tc_reports_to <- triad.census(krack_reports_to) triad_df <- data.frame(census_labels, tc_full, tc_advice, tc_friendship, tc_reports_to) triad_df # To export any of these vectors to a CSV for use in another program, simply # use the write.csv() command: write.csv(triad_df, 'krack_triads.csv') #R code for generating random graphs: #requires packages ergm, intergraph library(ergm) library(intergraph) #set up weighting vectors for clustering and hierarchy clust.mask <- rep(0,16) clust.mask[c(1,3,16)] <- 1 hier.mask <- rep(1,16) hier.mask[c(6:8,10:11)] <- 0 #compute triad count and triad proportion for a given weighting vector mask.stat <- function(my.graph, my.mask){ n.nodes <- vcount(my.graph) n.edges <- ecount(my.graph) #set probability of edge formation in random graph to proportion of possible edges present in original p.edge <- n.edges/(n.nodes*(n.nodes +1)/2) r.graph <- as.network.numeric(n.nodes, desnity = p.edge) r.igraph <- as.igraph(r.graph) tc.graph <- triad.census(r.igraph) clust <- sum(tc.graph*my.mask) clust.norm <- clust/sum(tc.graph) return(c(clust,clust.norm)) } #build 100 random graphs and compute their clustering and hierarchy measurements to create an empirical null distribution emp.distro <- function(this.graph){ clust <- matrix(rep(0,200), nrow=2) hier <- matrix(rep(0,200), nrow=2) for(i in c(1:100)){ clust[,i] <- mask.stat(this.graph, clust.mask) hier[,i] <- mask.stat(this.graph, hier.mask) } my.mat <- rbind(clust, hier) rownames(my.mat) <- c("clust.ct", "clust.norm", "hier.ct", "hier.ct.norm") return(my.mat) } #fix randomization if desired so results are replicable set.seed(3123) #compute empirical distributions for each network hc_advice <- emp.distro(krack_advice) ---------------------------------------- #find empirical p-value get.p <- function(val, distro) { distro.n <- sort(distro) distro.n <- distro.n - median(distro.n) val.n <- val - median(distro.n) p.val <- sum(abs(distro.n) > abs(val.n))/100 return(p.val) } get.p(198, hc_full[1,]) get.p(194, hc_advice[1,]) get.p(525, hc_friend[1,]) get.p(1003, hc_report[1,]) get.p(979, hc_full[3,]) get.p(1047, hc_advice[3,]) get.p(1135, hc_friend[3,]) get.p(1314, hc_report[3,]) #generate 95% empirical confidence intervals for triad counts #clustering c(sort(hc_advice[1,])[5], sort(hc_advice[1,])[95]) c(sort(hc_friend[1,])[5], sort(hc_friend[1,])[95]) c(sort(hc_report[1,])[5], sort(hc_report[1,])[95]) #hierarchy c(sort(hc_advice[3,])[5], sort(hc_advice[3,])[95]) c(sort(hc_friend[3,])[5], sort(hc_friend[3,])[95]) c(sort(hc_report[3,])[5], sort(hc_report[3,])[95])
/lab2.R
no_license
rmasiniexpert/Social-Network-Analysis-in-R
R
false
false
10,645
r
library(igraph) library(NetData) data(kracknets, package = "NetData") krack_full_nonzero_edges <- subset(krack_full_data_frame, (advice_tie > 0 | friendship_tie > 0 | reports_to_tie > 0)) head(krack_full_nonzero_edges) krack_full <- graph.data.frame(krack_full_nonzero_edges) summary(krack_full) # Set vertex attributes for (i in V(krack_full)) { for (j in names(attributes)) { krack_full <- set.vertex.attribute(krack_full, j, index=i, attributes[i+1,j]) } } summary(krack_full) # Create sub-graphs based on edge attributes krack_advice <- delete.edges(krack_full, E(krack_full)[get.edge.attribute(krack_full, name = "advice_tie")==0]) summary(krack_advice) krack_friendship <- delete.edges(krack_full, E(krack_full)[get.edge.attribute(krack_full, name = "friendship_tie")==0]) summary(krack_friendship) krack_reports_to <- delete.edges(krack_full, E(krack_full)[get.edge.attribute(krack_full, name = "reports_to_tie")==0]) summary(krack_reports_to) ### # 3. NODE-LEVEL STATISTICS ### # Compute the indegree and outdegree for each node, first in the # full graph (accounting for all tie types) and then in each # tie-specific sub-graph. deg_full_in <- degree(krack_full, mode="in") deg_full_out <- degree(krack_full, mode="out") deg_full_in deg_full_out deg_advice_in <- degree(krack_advice, mode="in") deg_advice_out <- degree(krack_advice, mode="out") deg_advice_in deg_advice_out deg_friendship_in <- degree(krack_friendship, mode="in") deg_friendship_out <- degree(krack_friendship, mode="out") deg_friendship_in deg_friendship_out deg_reports_to_in <- degree(krack_reports_to, mode="in") deg_reports_to_out <- degree(krack_reports_to, mode="out") deg_reports_to_in deg_reports_to_out # Reachability can only be computed on one vertex at a time. To # get graph-wide statistics, change the value of "vertex" # manually or write a for loop. (Remember that, unlike R objects, # igraph objects are numbered from 0.) reachability <- function(g, m) { reach_mat = matrix(nrow = vcount(g), ncol = vcount(g)) for (i in 1:vcount(g)) { reach_mat[i,] = 0 this_node_reach <- subcomponent(g, (i - 1), mode = m) for (j in 1:(length(this_node_reach))) { alter = this_node_reach[j] reach_mat[i, alter] = 1 } } return(reach_mat) } reach_full_in <- reachability(krack_full, 'in') reach_full_out <- reachability(krack_full, 'out') reach_full_in reach_full_out # Often we want to know path distances between individuals in a network. # This is often done by calculating geodesics, or shortest paths between # each ij pair. One can symmetrize the data to do this (see lab 1), or # calculate it for outward and inward ties separately. Averaging geodesics # for the entire network provides an average distance or sort of cohesiveness # score. Dichotomizing distances reveals reach, and an average of reach for # a network reveals what percent of a network is connected in some way. # Compute shortest paths between each pair of nodes. sp_full_in <- shortest.paths(krack_full, mode='in') sp_full_out <- shortest.paths(krack_full, mode='out') sp_full_in sp_full_out # Assemble node-level stats into single data frame for export as CSV. # First, we have to compute average values by node for reachability and # shortest path. (We don't have to do this for degree because it is # already expressed as a node-level value.) reach_full_in_vec <- vector() reach_full_out_vec <- vector() reach_advice_in_vec <- vector() reach_advice_out_vec <- vector() reach_friendship_in_vec <- vector() reach_friendship_out_vec <- vector() reach_reports_to_in_vec <- vector() reach_reports_to_out_vec <- vector() sp_full_in_vec <- vector() sp_full_out_vec <- vector() sp_advice_in_vec <- vector() sp_advice_out_vec <- vector() sp_friendship_in_vec <- vector() sp_friendship_out_vec <- vector() sp_reports_to_in_vec <- vector() sp_reports_to_out_vec <- vector() for (i in 1:vcount(krack_full)) { reach_full_in_vec[i] <- mean(reach_full_in[i,]) reach_full_out_vec[i] <- mean(reach_full_out[i,]) reach_advice_in_vec[i] <- mean(reach_advice_in[i,]) reach_advice_out_vec[i] <- mean(reach_advice_out[i,]) reach_friendship_in_vec[i] <- mean(reach_friendship_in[i,]) reach_friendship_out_vec[i] <- mean(reach_friendship_out[i,]) reach_reports_to_in_vec[i] <- mean(reach_reports_to_in[i,]) reach_reports_to_out_vec[i] <- mean(reach_reports_to_out[i,]) sp_full_in_vec[i] <- mean(sp_full_in[i,]) sp_full_out_vec[i] <- mean(sp_full_out[i,]) sp_advice_in_vec[i] <- mean(sp_advice_in[i,]) sp_advice_out_vec[i] <- mean(sp_advice_out[i,]) sp_friendship_in_vec[i] <- mean(sp_friendship_in[i,]) sp_friendship_out_vec[i] <- mean(sp_friendship_out[i,]) sp_reports_to_in_vec[i] <- mean(sp_reports_to_in[i,]) sp_reports_to_out_vec[i] <- mean(sp_reports_to_out[i,]) } # Next, we assemble all of the vectors of node-level values into a # single data frame, which we can export as a CSV to our working # directory. node_stats_df <- cbind(deg_full_in, deg_full_out, deg_advice_in, deg_advice_out, deg_friendship_in, deg_friendship_out, deg_reports_to_in, deg_reports_to_out, reach_full_in_vec, reach_full_out_vec, reach_advice_in_vec, reach_advice_out_vec, reach_friendship_in_vec, reach_friendship_out_vec, reach_reports_to_in_vec, reach_reports_to_out_vec, sp_full_in_vec, sp_full_out_vec, sp_advice_in_vec, sp_advice_out_vec, sp_friendship_in_vec, sp_friendship_out_vec, sp_reports_to_in_vec, sp_reports_to_out_vec) write.csv(node_stats_df, 'krack_node_stats.cv') # Question #1 - What do these statistics tell us about # each network and its individuals in general? ### # 3. NETWORK-LEVEL STATISTICS ### # Many initial analyses of networks begin with distances and reach, # and then move towards global summary statistics of the network. # # As a reminder, entering a question mark followed by a function # name (e.g., ?graph.density) pulls up the help file for that function. # This can be helpful to understand how, exactly, stats are calculated. # Degree mean(deg_full_in) sd(deg_full_in) # Shortest paths # ***Why do in and out come up with the same results? # In and out shortest paths are simply transposes of one another; # thus, when we compute statistics across the whole network they have to be the same. mean(sp_full_in[which(sp_full_in != Inf)]) # Density graph.density(krack_full) # Reciprocity reciprocity(krack_full) # Transitivity (clustering coefficient) transitivity(krack_full) # Triad census. Here we'll first build a vector of labels for # the different triad types. Then we'll combine this vector # with the triad censuses for the different networks, which # we'll export as a CSV. census_labels = c('003', '012', '102', '021D', '021U', '021C', '111D', '111U', '030T', '030C', '201', '120D', '120U', '120C', '210', '300') tc_full <- triad.census(krack_full) tc_advice <- triad.census(krack_advice) tc_friendship <- triad.census(krack_friendship) tc_reports_to <- triad.census(krack_reports_to) triad_df <- data.frame(census_labels, tc_full, tc_advice, tc_friendship, tc_reports_to) triad_df # To export any of these vectors to a CSV for use in another program, simply # use the write.csv() command: write.csv(triad_df, 'krack_triads.csv') #R code for generating random graphs: #requires packages ergm, intergraph library(ergm) library(intergraph) #set up weighting vectors for clustering and hierarchy clust.mask <- rep(0,16) clust.mask[c(1,3,16)] <- 1 hier.mask <- rep(1,16) hier.mask[c(6:8,10:11)] <- 0 #compute triad count and triad proportion for a given weighting vector mask.stat <- function(my.graph, my.mask){ n.nodes <- vcount(my.graph) n.edges <- ecount(my.graph) #set probability of edge formation in random graph to proportion of possible edges present in original p.edge <- n.edges/(n.nodes*(n.nodes +1)/2) r.graph <- as.network.numeric(n.nodes, desnity = p.edge) r.igraph <- as.igraph(r.graph) tc.graph <- triad.census(r.igraph) clust <- sum(tc.graph*my.mask) clust.norm <- clust/sum(tc.graph) return(c(clust,clust.norm)) } #build 100 random graphs and compute their clustering and hierarchy measurements to create an empirical null distribution emp.distro <- function(this.graph){ clust <- matrix(rep(0,200), nrow=2) hier <- matrix(rep(0,200), nrow=2) for(i in c(1:100)){ clust[,i] <- mask.stat(this.graph, clust.mask) hier[,i] <- mask.stat(this.graph, hier.mask) } my.mat <- rbind(clust, hier) rownames(my.mat) <- c("clust.ct", "clust.norm", "hier.ct", "hier.ct.norm") return(my.mat) } #fix randomization if desired so results are replicable set.seed(3123) #compute empirical distributions for each network hc_advice <- emp.distro(krack_advice) ---------------------------------------- #find empirical p-value get.p <- function(val, distro) { distro.n <- sort(distro) distro.n <- distro.n - median(distro.n) val.n <- val - median(distro.n) p.val <- sum(abs(distro.n) > abs(val.n))/100 return(p.val) } get.p(198, hc_full[1,]) get.p(194, hc_advice[1,]) get.p(525, hc_friend[1,]) get.p(1003, hc_report[1,]) get.p(979, hc_full[3,]) get.p(1047, hc_advice[3,]) get.p(1135, hc_friend[3,]) get.p(1314, hc_report[3,]) #generate 95% empirical confidence intervals for triad counts #clustering c(sort(hc_advice[1,])[5], sort(hc_advice[1,])[95]) c(sort(hc_friend[1,])[5], sort(hc_friend[1,])[95]) c(sort(hc_report[1,])[5], sort(hc_report[1,])[95]) #hierarchy c(sort(hc_advice[3,])[5], sort(hc_advice[3,])[95]) c(sort(hc_friend[3,])[5], sort(hc_friend[3,])[95]) c(sort(hc_report[3,])[5], sort(hc_report[3,])[95])
## runs experiment using function in ranking_function.r and sim_ranking_experiment.r #create an .RData file for with parameters + ranks #run in ranking setwd("/Users/cora/git_repos/RankingMethods") source("ranking_function.r") #creates clean results currResults <- as.data.frame(matrix(nrow = 0, ncol = 16)) names(currResults) <- c("sim", "N", "a_p", "b_p", "n_min", "n_max", "a_n", "b_n", "n_assignment_method", "rankPriority", "rankSteepness", "f", "loss", "totalLoss", "ranking", "data") currResults$data <- I(list()) results <- currResults #data characteristics for (n in c(50)){ #numItems for (n_min in c(100)){ #what really matters here in number of events for (n_max in c(1000)){ for (a_n in c(1)){ for (b_n in c(1)){ for (a_p in c(1)){ for (b_p in c(1)){ #add results to the results df results <- rbind(results, RunSimulation(N = n, a_p = a_p, b_p = b_p, n_min = n_min, n_max = n_max, a_n = a_n, b_n = b_n, #data n_assignment_method = "random", #ranking settings rankPriority = c("top"), rankSteepness = c(0, 0.0009, 0.009, 0.09, 0.9, 0.99), #rankWeights parameter = NULL, loss = c(2), f=identity, n_sim = 100)) } } } } } } } #saves results. Careful! This overwrites save(results, file = "/Users/cora/git_repos/RankingMethods/results/ranking_experiment_results_1026.RData") #saves as an R object load("/Users/cora/git_repos/RankingMethods/results/ranking_experiment_results_1026.RData") head(results) #for metrics and graphics see experiment_results_graphs.r
/ranking_experiment_run_10_26_18.R
no_license
coraallensavietta/RankingMethods
R
false
false
1,939
r
## runs experiment using function in ranking_function.r and sim_ranking_experiment.r #create an .RData file for with parameters + ranks #run in ranking setwd("/Users/cora/git_repos/RankingMethods") source("ranking_function.r") #creates clean results currResults <- as.data.frame(matrix(nrow = 0, ncol = 16)) names(currResults) <- c("sim", "N", "a_p", "b_p", "n_min", "n_max", "a_n", "b_n", "n_assignment_method", "rankPriority", "rankSteepness", "f", "loss", "totalLoss", "ranking", "data") currResults$data <- I(list()) results <- currResults #data characteristics for (n in c(50)){ #numItems for (n_min in c(100)){ #what really matters here in number of events for (n_max in c(1000)){ for (a_n in c(1)){ for (b_n in c(1)){ for (a_p in c(1)){ for (b_p in c(1)){ #add results to the results df results <- rbind(results, RunSimulation(N = n, a_p = a_p, b_p = b_p, n_min = n_min, n_max = n_max, a_n = a_n, b_n = b_n, #data n_assignment_method = "random", #ranking settings rankPriority = c("top"), rankSteepness = c(0, 0.0009, 0.009, 0.09, 0.9, 0.99), #rankWeights parameter = NULL, loss = c(2), f=identity, n_sim = 100)) } } } } } } } #saves results. Careful! This overwrites save(results, file = "/Users/cora/git_repos/RankingMethods/results/ranking_experiment_results_1026.RData") #saves as an R object load("/Users/cora/git_repos/RankingMethods/results/ranking_experiment_results_1026.RData") head(results) #for metrics and graphics see experiment_results_graphs.r
#프로그래밍 예제 #숫자 맞추기 게임 # n <- readline(prompt="숫자를 입력하세요: ") # cat("입력한 숫자는 ",n,"입니다.\n") num <- round(runif(1)*100,digits=0) guess <- -1 cat("Guess a number between 0 and 100.\n") while(guess != num){ guess <- readline(prompt="Guess number :") guess <- as.integer(guess) if(guess == num){ cat("Congratulations",num,"is right.\n") } else if(guess < num) { cat("It's smaller!\n") } else if(guess > num) { cat("It's bigger!\n") } }
/R_kmooc2/6weeks/[R].6-5.R
no_license
HeeSeok-Kwon/R_study
R
false
false
538
r
#프로그래밍 예제 #숫자 맞추기 게임 # n <- readline(prompt="숫자를 입력하세요: ") # cat("입력한 숫자는 ",n,"입니다.\n") num <- round(runif(1)*100,digits=0) guess <- -1 cat("Guess a number between 0 and 100.\n") while(guess != num){ guess <- readline(prompt="Guess number :") guess <- as.integer(guess) if(guess == num){ cat("Congratulations",num,"is right.\n") } else if(guess < num) { cat("It's smaller!\n") } else if(guess > num) { cat("It's bigger!\n") } }
##' Download Met Drivers for BioCro from NCEP ##' ##' @title InputForWeach ##' @param lat numeric latitude ##' @param lon numeric longitude ##' @param year1 integer ##' @param year2 integer ##' @export ##' @return climate data to be parsed by the \code{\link{weachNEW}} function in BioCro ##' @author Deepak Jaiswal, David LeBauer InputForWeach <- function(lat, lon, year1, year2){ ncep.inputs <- list(year1 = year1, year2 = year2, lat = lat, lon = lon) ## Get Temperature Records avgTemp <- ncep.gather2(variable = "air.2m", inputs = ncep.inputs) avgTemp <- RNCEP::NCEP.aggregate(avgTemp, HOURS = FALSE, fxn = "mean") # Average Flux for the whole day avgTemp <- RNCEP::NCEP.array2df(avgTemp, var.name = "avgTemp") avgTemp <- aggregate(avgTemp ~ datetime, data = avgTemp, mean) # mean of all nearby spatial locations avgTemp$datetime <- substr(avgTemp$datetime, 1, 10) avgTemp$avgTemp <- avgTemp$avgTemp - 273 ## Get Solar Radiation Records solarR <- ncep.gather2(variable = "dswrf.sfc", inputs = ncep.inputs) solarR <- RNCEP::NCEP.aggregate(solarR, HOURS = FALSE, fxn = "mean") # Average Flux for the whole day solarR <- RNCEP::NCEP.array2df(solarR, var.name = "solarR") solarR$solarR <- solarR$solarR * 24 * 60 * 60 * 1e-06 # To convert Wt/m2 to MJ/m2 solarR <- aggregate(solarR ~ datetime, data = solarR, mean) # mean of all nearby spatial locations solarR$datetime <- substr(solarR$datetime, 1, 10) ### T Maximum Data Tmax <- ncep.gather2(variable = "tmax.2m", inputs = ncep.inputs) Tmax <- RNCEP::NCEP.aggregate(Tmax, HOURS = FALSE, fxn = "max") Tmax <- RNCEP::NCEP.array2df(Tmax, var.name = "Tmax") Tmax <- aggregate(Tmax ~ datetime, data = Tmax, max) Tmax$datetime <- substr(Tmax$datetime, 1, 10) Tmax$Tmax <- Tmax$Tmax - 273 ## T Minimum Data Tmin <- ncep.gather2(variable = "tmin.2m", inputs = ncep.inputs) Tmin <- RNCEP::NCEP.aggregate(Tmin, HOURS = FALSE, fxn = "max") Tmin <- RNCEP::NCEP.array2df(Tmin, var.name = "Tmin") Tmin <- aggregate(Tmin ~ datetime, data = Tmin, max) Tmin$datetime <- substr(Tmin$datetime, 1, 10) Tmin$Tmin <- Tmin$Tmin - 273 ## Relative Humidity (I am using surface level, not Grid level to get rlative ## humidity, not absolute humidity, hope its not a problem. RH <- ncep.gather2(variable = c("rhum.sig995"), level = "surface", inputs = ncep.inputs) ## Warnign Message, not available in Reanalysis 2, Instead using Reanalysis 1. RHavg <- RNCEP::NCEP.aggregate(RH, HOURS = FALSE, fxn = "mean") RHmax <- RNCEP::NCEP.aggregate(RH, HOURS = FALSE, fxn = "max") RHmin <- RNCEP::NCEP.aggregate(RH, HOURS = FALSE, fxn = "min") RHavg <- RNCEP::NCEP.array2df(RHavg, var.name = "RH") RHmax <- RNCEP::NCEP.array2df(RHmax, var.name = "RH") RHmin <- RNCEP::NCEP.array2df(RHmin, var.name = "RH") RHavg <- aggregate(RH ~ datetime, data = RHavg, mean) RHmax <- aggregate(RH ~ datetime, data = RHmax, max) RHmin <- aggregate(RH ~ datetime, data = RHmin, min) RHavg$datetime <- substr(RHavg$datetime, 1, 10) RHmax$datetime <- substr(RHmax$datetime, 1, 10) RHmin$datetime <- substr(RHmin$datetime, 1, 10) RHavg$RH <- RHavg$RH * 0.01 ## Percent to Fraction RHmax$RH <- RHmax$RH * 0.01 ## Percent to Fraction RHmin$RH <- RHmin$RH * 0.01 ## Percent to Fraction ## Wind Speed Vwind <- ncep.gather2(variable = "vwnd.10m", inputs = ncep.inputs) Vwind <- RNCEP::NCEP.aggregate(Vwind, HOURS = FALSE, fxn = "mean") Vwind <- RNCEP::NCEP.array2df(Vwind, var.name = "Vwind") Vwind <- aggregate(Vwind ~ datetime, data = Vwind, mean) Vwind$datetime <- substr(Vwind$datetime, 1, 10) Uwind <- ncep.gather2(variable = "uwnd.10m", inputs = ncep.inputs) Uwind <- RNCEP::NCEP.aggregate(Uwind, HOURS = FALSE, fxn = "mean") Uwind <- RNCEP::NCEP.array2df(Uwind, var.name = "Uwind") Uwind <- aggregate(Uwind ~ datetime, data = Uwind, mean) Uwind$datetime <- substr(Uwind$datetime, 1, 10) Uwind$Uwind <- sqrt(Uwind$Uwind^2 + Vwind$Vwind^2) ## converting Windspeed from 10m to 2 m height using correlation ## provided by FAO (http://www.fao.org/docrep/X0490E/x0490e07.htm) Uwind$Uwind <- Uwind$Uwind * 4.87/log(67.8 * 10 - 5.42) Uwind$Uwind <- Uwind$Uwind * (3600)/(1609) # unit conversion from m/s to miles per hr ## Precipitation Rain <- ncep.gather2(variable = "prate.sfc", inputs = ncep.inputs) Rain <- RNCEP::NCEP.aggregate(Rain, HOURS = FALSE, fxn = "mean") Rain <- RNCEP::NCEP.array2df(Rain, var.name = "Rain") Rain <- aggregate(Rain ~ datetime, data = Rain, mean) Rain$datetime <- substr(Rain$datetime, 1, 10) Rain$Rain <- Rain$Rain * (24 * 60 * 60) * (1/1000) * 39.37 # Converting from kg/m2 sec to kg/m2 to m3/m2 to inches day <- numeric(0) year <- numeric(0) for (i in year1:year2){ if ((i%%400) == 0 || (i%%100 != 0 && i%%4 == 0)){ indx <- as.integer(length(day)) day[as.integer(indx + 1):as.integer(indx + 366)] <- seq(1:366) year[as.integer(indx + 1):as.integer(indx + 366)] <- rep(i, 366) } if (!((i%%400) == 0 || (i%%100 != 0 && i%%4 == 0))){ indx <- as.integer(length(day)) day[as.integer(indx + 1):as.integer(indx + 365)] <- seq(1:365) year[as.integer(indx + 1):as.integer(indx + 365)] <- rep(i, 365) } } result <- data.frame(year = year, day = day, solarR = solarR$solarR, Tmax = Tmax$Tmax, Tmin = Tmin$Tmin, Tavg = avgTemp$avgTemp, RHmax = RHmax$RH, RHmin = RHmin$RH, RHavg = RHavg$RH, WS = Uwind$Uwind, precip = Rain$Rain) return(result) } ##' Wrapper for \code{\link{RNCEP::NCEP.gather}} function to specify custom defaults ##' ##' @title ncep.gather2 ##' @param variable variable to be extracted from NCEP data (see ?NCEP.gather) ##' @param level passed to NCEP.gather, either "gaussian" (default), "surface" or numeric value of pressure ##' @param inputs list of parameters passed to InputForWeach ##' @return data from NCEP ##' @author David LeBauer ##' @export ncep.gather2 <- function (variable, level = "gaussian", months.minmax = c(1, 12), inputs, reanalysis2 = TRUE, return.units = FALSE) { years.minmax <- c(inputs$year1, inputs$year2) lat.southnorth <- c(inputs$lat, inputs$lat) lon.westeast <- c(inputs$lon, inputs$lon) if (is.null(level)) { stop("One of 'surface', 'gaussian', or a numeric pressure level must be given for 'level'") } if (length(level) > 1) { stop("Cannot access multiple reference systems in a single function call") } if (is.numeric(level) == FALSE) { if (level %in% c("surface", "gaussian") == FALSE) { stop("level must be one of 'gaussian', 'surface' or a numeric pressure level") } } if (reanalysis2 == TRUE && years.minmax[1] < 1979) { stop("The datetimes specified are out of range for the Reanalysis 2 dataset.") } if (years.minmax[1] < 1948) { stop("The datetimes specified are out of range.") } lon.westeast[1] <- ifelse(lon.westeast[1] < 0, 360 + lon.westeast[1], lon.westeast[1]) lon.westeast[length(lon.westeast)] <- ifelse(lon.westeast[length(lon.westeast)] < 0, 360 + lon.westeast[length(lon.westeast)], lon.westeast[length(lon.westeast)]) if (lon.westeast[1] > lon.westeast[length(lon.westeast)]) { cross.prime <- TRUE } else { cross.prime <- FALSE } tlength <- NULL pb <- NULL if (cross.prime == FALSE) { if (is.numeric(level)) { out <- NCEP.gather.pressure(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = lon.westeast, pressure = level, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) } else if (level == "surface") { out <- NCEP.gather.surface(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = lon.westeast, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) } else if (level == "gaussian") { out <- NCEP.gather.gaussian(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = lon.westeast, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) } } else if (cross.prime == TRUE) { if (is.numeric(level)) { out.west <- NCEP.gather.pressure(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(lon.westeast[1], 357.5), pressure = level, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out.east <- NCEP.gather.pressure(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(0, lon.westeast[2]), pressure = level, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out <- NCEP.bind(data.west = out.west, data.east = out.east) } else if (level == "surface") { out.west <- NCEP.gather.surface(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(lon.westeast[1], 357.5), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out.east <- NCEP.gather.surface(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(0, lon.westeast[2]), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out <- NCEP.bind(data.west = out.west, data.east = out.east) } else if (level == "gaussian") { out.west <- NCEP.gather.gaussian(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(lon.westeast[1], 358.125), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out.east <- NCEP.gather.gaussian(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(0, lon.westeast[2]), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out <- NCEP.bind(data.west = out.west, data.east = out.east) } } return(out) }
/modules/data.atmosphere/R/InputForWeach.R
permissive
rgknox/pecan
R
false
false
12,044
r
##' Download Met Drivers for BioCro from NCEP ##' ##' @title InputForWeach ##' @param lat numeric latitude ##' @param lon numeric longitude ##' @param year1 integer ##' @param year2 integer ##' @export ##' @return climate data to be parsed by the \code{\link{weachNEW}} function in BioCro ##' @author Deepak Jaiswal, David LeBauer InputForWeach <- function(lat, lon, year1, year2){ ncep.inputs <- list(year1 = year1, year2 = year2, lat = lat, lon = lon) ## Get Temperature Records avgTemp <- ncep.gather2(variable = "air.2m", inputs = ncep.inputs) avgTemp <- RNCEP::NCEP.aggregate(avgTemp, HOURS = FALSE, fxn = "mean") # Average Flux for the whole day avgTemp <- RNCEP::NCEP.array2df(avgTemp, var.name = "avgTemp") avgTemp <- aggregate(avgTemp ~ datetime, data = avgTemp, mean) # mean of all nearby spatial locations avgTemp$datetime <- substr(avgTemp$datetime, 1, 10) avgTemp$avgTemp <- avgTemp$avgTemp - 273 ## Get Solar Radiation Records solarR <- ncep.gather2(variable = "dswrf.sfc", inputs = ncep.inputs) solarR <- RNCEP::NCEP.aggregate(solarR, HOURS = FALSE, fxn = "mean") # Average Flux for the whole day solarR <- RNCEP::NCEP.array2df(solarR, var.name = "solarR") solarR$solarR <- solarR$solarR * 24 * 60 * 60 * 1e-06 # To convert Wt/m2 to MJ/m2 solarR <- aggregate(solarR ~ datetime, data = solarR, mean) # mean of all nearby spatial locations solarR$datetime <- substr(solarR$datetime, 1, 10) ### T Maximum Data Tmax <- ncep.gather2(variable = "tmax.2m", inputs = ncep.inputs) Tmax <- RNCEP::NCEP.aggregate(Tmax, HOURS = FALSE, fxn = "max") Tmax <- RNCEP::NCEP.array2df(Tmax, var.name = "Tmax") Tmax <- aggregate(Tmax ~ datetime, data = Tmax, max) Tmax$datetime <- substr(Tmax$datetime, 1, 10) Tmax$Tmax <- Tmax$Tmax - 273 ## T Minimum Data Tmin <- ncep.gather2(variable = "tmin.2m", inputs = ncep.inputs) Tmin <- RNCEP::NCEP.aggregate(Tmin, HOURS = FALSE, fxn = "max") Tmin <- RNCEP::NCEP.array2df(Tmin, var.name = "Tmin") Tmin <- aggregate(Tmin ~ datetime, data = Tmin, max) Tmin$datetime <- substr(Tmin$datetime, 1, 10) Tmin$Tmin <- Tmin$Tmin - 273 ## Relative Humidity (I am using surface level, not Grid level to get rlative ## humidity, not absolute humidity, hope its not a problem. RH <- ncep.gather2(variable = c("rhum.sig995"), level = "surface", inputs = ncep.inputs) ## Warnign Message, not available in Reanalysis 2, Instead using Reanalysis 1. RHavg <- RNCEP::NCEP.aggregate(RH, HOURS = FALSE, fxn = "mean") RHmax <- RNCEP::NCEP.aggregate(RH, HOURS = FALSE, fxn = "max") RHmin <- RNCEP::NCEP.aggregate(RH, HOURS = FALSE, fxn = "min") RHavg <- RNCEP::NCEP.array2df(RHavg, var.name = "RH") RHmax <- RNCEP::NCEP.array2df(RHmax, var.name = "RH") RHmin <- RNCEP::NCEP.array2df(RHmin, var.name = "RH") RHavg <- aggregate(RH ~ datetime, data = RHavg, mean) RHmax <- aggregate(RH ~ datetime, data = RHmax, max) RHmin <- aggregate(RH ~ datetime, data = RHmin, min) RHavg$datetime <- substr(RHavg$datetime, 1, 10) RHmax$datetime <- substr(RHmax$datetime, 1, 10) RHmin$datetime <- substr(RHmin$datetime, 1, 10) RHavg$RH <- RHavg$RH * 0.01 ## Percent to Fraction RHmax$RH <- RHmax$RH * 0.01 ## Percent to Fraction RHmin$RH <- RHmin$RH * 0.01 ## Percent to Fraction ## Wind Speed Vwind <- ncep.gather2(variable = "vwnd.10m", inputs = ncep.inputs) Vwind <- RNCEP::NCEP.aggregate(Vwind, HOURS = FALSE, fxn = "mean") Vwind <- RNCEP::NCEP.array2df(Vwind, var.name = "Vwind") Vwind <- aggregate(Vwind ~ datetime, data = Vwind, mean) Vwind$datetime <- substr(Vwind$datetime, 1, 10) Uwind <- ncep.gather2(variable = "uwnd.10m", inputs = ncep.inputs) Uwind <- RNCEP::NCEP.aggregate(Uwind, HOURS = FALSE, fxn = "mean") Uwind <- RNCEP::NCEP.array2df(Uwind, var.name = "Uwind") Uwind <- aggregate(Uwind ~ datetime, data = Uwind, mean) Uwind$datetime <- substr(Uwind$datetime, 1, 10) Uwind$Uwind <- sqrt(Uwind$Uwind^2 + Vwind$Vwind^2) ## converting Windspeed from 10m to 2 m height using correlation ## provided by FAO (http://www.fao.org/docrep/X0490E/x0490e07.htm) Uwind$Uwind <- Uwind$Uwind * 4.87/log(67.8 * 10 - 5.42) Uwind$Uwind <- Uwind$Uwind * (3600)/(1609) # unit conversion from m/s to miles per hr ## Precipitation Rain <- ncep.gather2(variable = "prate.sfc", inputs = ncep.inputs) Rain <- RNCEP::NCEP.aggregate(Rain, HOURS = FALSE, fxn = "mean") Rain <- RNCEP::NCEP.array2df(Rain, var.name = "Rain") Rain <- aggregate(Rain ~ datetime, data = Rain, mean) Rain$datetime <- substr(Rain$datetime, 1, 10) Rain$Rain <- Rain$Rain * (24 * 60 * 60) * (1/1000) * 39.37 # Converting from kg/m2 sec to kg/m2 to m3/m2 to inches day <- numeric(0) year <- numeric(0) for (i in year1:year2){ if ((i%%400) == 0 || (i%%100 != 0 && i%%4 == 0)){ indx <- as.integer(length(day)) day[as.integer(indx + 1):as.integer(indx + 366)] <- seq(1:366) year[as.integer(indx + 1):as.integer(indx + 366)] <- rep(i, 366) } if (!((i%%400) == 0 || (i%%100 != 0 && i%%4 == 0))){ indx <- as.integer(length(day)) day[as.integer(indx + 1):as.integer(indx + 365)] <- seq(1:365) year[as.integer(indx + 1):as.integer(indx + 365)] <- rep(i, 365) } } result <- data.frame(year = year, day = day, solarR = solarR$solarR, Tmax = Tmax$Tmax, Tmin = Tmin$Tmin, Tavg = avgTemp$avgTemp, RHmax = RHmax$RH, RHmin = RHmin$RH, RHavg = RHavg$RH, WS = Uwind$Uwind, precip = Rain$Rain) return(result) } ##' Wrapper for \code{\link{RNCEP::NCEP.gather}} function to specify custom defaults ##' ##' @title ncep.gather2 ##' @param variable variable to be extracted from NCEP data (see ?NCEP.gather) ##' @param level passed to NCEP.gather, either "gaussian" (default), "surface" or numeric value of pressure ##' @param inputs list of parameters passed to InputForWeach ##' @return data from NCEP ##' @author David LeBauer ##' @export ncep.gather2 <- function (variable, level = "gaussian", months.minmax = c(1, 12), inputs, reanalysis2 = TRUE, return.units = FALSE) { years.minmax <- c(inputs$year1, inputs$year2) lat.southnorth <- c(inputs$lat, inputs$lat) lon.westeast <- c(inputs$lon, inputs$lon) if (is.null(level)) { stop("One of 'surface', 'gaussian', or a numeric pressure level must be given for 'level'") } if (length(level) > 1) { stop("Cannot access multiple reference systems in a single function call") } if (is.numeric(level) == FALSE) { if (level %in% c("surface", "gaussian") == FALSE) { stop("level must be one of 'gaussian', 'surface' or a numeric pressure level") } } if (reanalysis2 == TRUE && years.minmax[1] < 1979) { stop("The datetimes specified are out of range for the Reanalysis 2 dataset.") } if (years.minmax[1] < 1948) { stop("The datetimes specified are out of range.") } lon.westeast[1] <- ifelse(lon.westeast[1] < 0, 360 + lon.westeast[1], lon.westeast[1]) lon.westeast[length(lon.westeast)] <- ifelse(lon.westeast[length(lon.westeast)] < 0, 360 + lon.westeast[length(lon.westeast)], lon.westeast[length(lon.westeast)]) if (lon.westeast[1] > lon.westeast[length(lon.westeast)]) { cross.prime <- TRUE } else { cross.prime <- FALSE } tlength <- NULL pb <- NULL if (cross.prime == FALSE) { if (is.numeric(level)) { out <- NCEP.gather.pressure(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = lon.westeast, pressure = level, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) } else if (level == "surface") { out <- NCEP.gather.surface(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = lon.westeast, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) } else if (level == "gaussian") { out <- NCEP.gather.gaussian(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = lon.westeast, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) } } else if (cross.prime == TRUE) { if (is.numeric(level)) { out.west <- NCEP.gather.pressure(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(lon.westeast[1], 357.5), pressure = level, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out.east <- NCEP.gather.pressure(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(0, lon.westeast[2]), pressure = level, reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out <- NCEP.bind(data.west = out.west, data.east = out.east) } else if (level == "surface") { out.west <- NCEP.gather.surface(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(lon.westeast[1], 357.5), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out.east <- NCEP.gather.surface(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(0, lon.westeast[2]), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out <- NCEP.bind(data.west = out.west, data.east = out.east) } else if (level == "gaussian") { out.west <- NCEP.gather.gaussian(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(lon.westeast[1], 358.125), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out.east <- NCEP.gather.gaussian(variable = variable, months.minmax = months.minmax, years.minmax = years.minmax, lat.minmax = lat.southnorth, lon.minmax = c(0, lon.westeast[2]), reanalysis2 = reanalysis2, return.units = return.units, pb = pb, increments = tlength) out <- NCEP.bind(data.west = out.west, data.east = out.east) } } return(out) }
## File: predModHong.R ## Author: Hong Xu library(caret) library(pROC) library(ROCR) library(Metrics) #==================== mac.os <- "/Users/li11/" linux <- "~/" windows <- "X:/" #root <- windows root <- mac.os ##### REF: http://stats.stackexchange.com/questions/31579/what-is-the-optimal-k-for-the-k-nearest-neighbour-classifier-on-the-iris-dat # https://gist.github.com/zachmayer/3061272 #Multi-Class Summary Function #Based on caret:::twoClassSummary require(compiler) multiClassSummary <- cmpfun(function (data, lev = NULL, model = NULL) { #Load Libraries require(Metrics) require(caret) #Check data if (!all(levels(data[, "pred"]) == levels(data[, "obs"]))) stop("levels of observed and predicted data do not match") #Calculate custom one-vs-all stats for each class prob_stats <- lapply(levels(data[, "pred"]), function(class) { #Grab one-vs-all data for the class pred <- ifelse(data[, "pred"] == class, 1, 0) obs <- ifelse(data[, "obs"] == class, 1, 0) prob <- data[,class] #Calculate one-vs-all AUC and logLoss and return cap_prob <- pmin(pmax(prob, .000001), .999999) prob_stats <- c(auc(obs, prob), logLoss(obs, cap_prob)) names(prob_stats) <- c("ROC", "logLoss") return(prob_stats) }) prob_stats <- do.call(rbind, prob_stats) rownames(prob_stats) <- paste( "Class:" , levels(data[, "pred"])) #Calculate confusion matrix-based statistics CM <- confusionMatrix(data[, "pred"], data[, "obs"]) #Aggregate and average class-wise stats #Todo: add weights class_stats <- cbind(CM$byClass, prob_stats) class_stats <- colMeans(class_stats) #Aggregate overall stats overall_stats <- c(CM$overall) #Combine overall with class-wise stats and remove some stats we don't want stats <- c(overall_stats, class_stats) stats <- stats[! names(stats) %in% c("AccuracyNull","Prevalence", "Detection Prevalence")] #Clean names and return names(stats) <- gsub('[[:blank:]] +', '_' , names(stats)) return(stats) }) ## Note: no visible binding for global variable 'Metrics' ## Note: no visible binding for global variable 'caret' ## set up working directory #setwd(paste (root, "/myGit/mixturemodel/reconData/para1/", sep="")) ## read in data from txt file #data <- read.table("recon_3classes_para1.txt", header=TRUE, sep = "\t") setwd(paste (root, "/myGit/mixturemodel/reconData/para2/", sep="")) ## read in data from txt file #data <- read.table("recon_3classes_para2.txt", header=TRUE, sep = "\t") #data <- read.table("recon_3classes_para3.txt", header=TRUE, sep = "\t") data <- read.table("recon_3classes_para4.txt", header=TRUE, sep = "\t") ##### BEGIN: data partition >>>>> ## set random seed set.seed(12345) #set.seed(34546) ## create data partition inTrainingSet <- createDataPartition(data$label, p=.7, list=FALSE) labelTrain <- data[ inTrainingSet,] labelTest <- data[-inTrainingSet,] nrow(labelTrain) nrow(labelTest) ##### END: data partition <<<<< ##### BEGIN: tune the parameters >>>>> ## control: # resampling technique: 5-repeat 10-fold cross-validation # performance metrics: ROC AUC curve ctrl <- trainControl(method = "repeatedcv", repeats = 5, summaryFunction = multiClassSummary, classProbs = TRUE) ##### END: tune the parameters <<<<< ##### BEGIN: train model - svm >>>>> set.seed(1024) svmFit <- train(label ~ ., data = labelTrain, ## training model: svm >>> method = "svmRadial", metric = "ROC", tuneLength = 10, trControl = ctrl) ## prediction svmPred <- predict(svmFit, labelTest) str(svmPred) ## predicted probabilities svmProbs <- predict(svmFit, labelTest, type = "prob") str(svmProbs) confusionMatrix(svmPred, labelTest$label) ## Confusion Matrix and Statistics ## ## Reference ## Prediction c k n ## c 8 2 0 ## k 0 2 0 ## n 2 2 30 ## ## Overall Statistics ## ## Accuracy : 0.87 ## 95% CI : (0.737, 0.951) ## No Information Rate : 0.652 ## P-Value [Acc > NIR] : 0.000839 ## ## Kappa : 0.72 ## Mcnemar's Test P-Value : 0.111610 ## ## Statistics by Class: ## ## Class: c Class: k Class: n ## Sensitivity 0.800 0.3333 1.000 ## Specificity 0.944 1.0000 0.750 ## Pos Pred Value 0.800 1.0000 0.882 ## Neg Pred Value 0.944 0.9091 1.000 ## Prevalence 0.217 0.1304 0.652 ## Detection Rate 0.174 0.0435 0.652 ## Detection Prevalence 0.217 0.0435 0.739 ## Balanced Accuracy 0.872 0.6667 0.875 ## plot for(stat in c( 'Accuracy', 'Kappa', 'AccuracyLower', 'AccuracyUpper', 'AccuracyPValue', 'Sensitivity', 'Specificity', 'Pos_Pred_Value', 'Neg_Pred_Value', 'Detection_Rate', 'ROC', 'logLoss')) { print(plot(svmFit, metric=stat)) } ##### END: train model - svm <<<<< ##### BEGIN: train model - rf >>>>> rfFit <- train(label ~ ., method = "rf", data = labelTrain) rfPred <- predict(rfFit, labelTest) confusionMatrix(rfPred, labelTest$label) ## Confusion Matrix and Statistics ## ## Reference ## Prediction c k n ## c 8 0 0 ## k 0 3 0 ## n 2 3 30 ## ## Overall Statistics ## ## Accuracy : 0.891 ## 95% CI : (0.764, 0.964) ## No Information Rate : 0.652 ## P-Value [Acc > NIR] : 0.000217 ## ## Kappa : 0.762 ## Mcnemar's Test P-Value : NA ## ## Statistics by Class: ## ## Class: c Class: k Class: n ## Sensitivity 0.800 0.5000 1.000 ## Specificity 1.000 1.0000 0.688 ## Pos Pred Value 1.000 1.0000 0.857 ## Neg Pred Value 0.947 0.9302 1.000 ## Prevalence 0.217 0.1304 0.652 ## Detection Rate 0.174 0.0652 0.652 ## Detection Prevalence 0.174 0.0652 0.761 ## Balanced Accuracy 0.900 0.7500 0.844 ##### END: train model - rf <<<<< ##### BEGIN: train model - rf >>>>> rrfFit <- train(label ~ ., method = "RRF", data = labelTrain) rrfPred <- predict(rrfFit, labelTest) confusionMatrix(rrfPred, labelTest$label) ## Confusion Matrix and Statistics ## ## Reference ## Prediction c k n ## c 8 0 0 ## k 0 4 1 ## n 2 2 29 ## ## Overall Statistics ## ## Accuracy : 0.891 ## 95% CI : (0.764, 0.964) ## No Information Rate : 0.652 ## P-Value [Acc > NIR] : 0.000217 ## ## Kappa : 0.774 ## Mcnemar's Test P-Value : NA
/mixturemodel/Scripts/predModHong.R
no_license
ImageRecognitionMaster/myOCRI-iii
R
false
false
6,444
r
## File: predModHong.R ## Author: Hong Xu library(caret) library(pROC) library(ROCR) library(Metrics) #==================== mac.os <- "/Users/li11/" linux <- "~/" windows <- "X:/" #root <- windows root <- mac.os ##### REF: http://stats.stackexchange.com/questions/31579/what-is-the-optimal-k-for-the-k-nearest-neighbour-classifier-on-the-iris-dat # https://gist.github.com/zachmayer/3061272 #Multi-Class Summary Function #Based on caret:::twoClassSummary require(compiler) multiClassSummary <- cmpfun(function (data, lev = NULL, model = NULL) { #Load Libraries require(Metrics) require(caret) #Check data if (!all(levels(data[, "pred"]) == levels(data[, "obs"]))) stop("levels of observed and predicted data do not match") #Calculate custom one-vs-all stats for each class prob_stats <- lapply(levels(data[, "pred"]), function(class) { #Grab one-vs-all data for the class pred <- ifelse(data[, "pred"] == class, 1, 0) obs <- ifelse(data[, "obs"] == class, 1, 0) prob <- data[,class] #Calculate one-vs-all AUC and logLoss and return cap_prob <- pmin(pmax(prob, .000001), .999999) prob_stats <- c(auc(obs, prob), logLoss(obs, cap_prob)) names(prob_stats) <- c("ROC", "logLoss") return(prob_stats) }) prob_stats <- do.call(rbind, prob_stats) rownames(prob_stats) <- paste( "Class:" , levels(data[, "pred"])) #Calculate confusion matrix-based statistics CM <- confusionMatrix(data[, "pred"], data[, "obs"]) #Aggregate and average class-wise stats #Todo: add weights class_stats <- cbind(CM$byClass, prob_stats) class_stats <- colMeans(class_stats) #Aggregate overall stats overall_stats <- c(CM$overall) #Combine overall with class-wise stats and remove some stats we don't want stats <- c(overall_stats, class_stats) stats <- stats[! names(stats) %in% c("AccuracyNull","Prevalence", "Detection Prevalence")] #Clean names and return names(stats) <- gsub('[[:blank:]] +', '_' , names(stats)) return(stats) }) ## Note: no visible binding for global variable 'Metrics' ## Note: no visible binding for global variable 'caret' ## set up working directory #setwd(paste (root, "/myGit/mixturemodel/reconData/para1/", sep="")) ## read in data from txt file #data <- read.table("recon_3classes_para1.txt", header=TRUE, sep = "\t") setwd(paste (root, "/myGit/mixturemodel/reconData/para2/", sep="")) ## read in data from txt file #data <- read.table("recon_3classes_para2.txt", header=TRUE, sep = "\t") #data <- read.table("recon_3classes_para3.txt", header=TRUE, sep = "\t") data <- read.table("recon_3classes_para4.txt", header=TRUE, sep = "\t") ##### BEGIN: data partition >>>>> ## set random seed set.seed(12345) #set.seed(34546) ## create data partition inTrainingSet <- createDataPartition(data$label, p=.7, list=FALSE) labelTrain <- data[ inTrainingSet,] labelTest <- data[-inTrainingSet,] nrow(labelTrain) nrow(labelTest) ##### END: data partition <<<<< ##### BEGIN: tune the parameters >>>>> ## control: # resampling technique: 5-repeat 10-fold cross-validation # performance metrics: ROC AUC curve ctrl <- trainControl(method = "repeatedcv", repeats = 5, summaryFunction = multiClassSummary, classProbs = TRUE) ##### END: tune the parameters <<<<< ##### BEGIN: train model - svm >>>>> set.seed(1024) svmFit <- train(label ~ ., data = labelTrain, ## training model: svm >>> method = "svmRadial", metric = "ROC", tuneLength = 10, trControl = ctrl) ## prediction svmPred <- predict(svmFit, labelTest) str(svmPred) ## predicted probabilities svmProbs <- predict(svmFit, labelTest, type = "prob") str(svmProbs) confusionMatrix(svmPred, labelTest$label) ## Confusion Matrix and Statistics ## ## Reference ## Prediction c k n ## c 8 2 0 ## k 0 2 0 ## n 2 2 30 ## ## Overall Statistics ## ## Accuracy : 0.87 ## 95% CI : (0.737, 0.951) ## No Information Rate : 0.652 ## P-Value [Acc > NIR] : 0.000839 ## ## Kappa : 0.72 ## Mcnemar's Test P-Value : 0.111610 ## ## Statistics by Class: ## ## Class: c Class: k Class: n ## Sensitivity 0.800 0.3333 1.000 ## Specificity 0.944 1.0000 0.750 ## Pos Pred Value 0.800 1.0000 0.882 ## Neg Pred Value 0.944 0.9091 1.000 ## Prevalence 0.217 0.1304 0.652 ## Detection Rate 0.174 0.0435 0.652 ## Detection Prevalence 0.217 0.0435 0.739 ## Balanced Accuracy 0.872 0.6667 0.875 ## plot for(stat in c( 'Accuracy', 'Kappa', 'AccuracyLower', 'AccuracyUpper', 'AccuracyPValue', 'Sensitivity', 'Specificity', 'Pos_Pred_Value', 'Neg_Pred_Value', 'Detection_Rate', 'ROC', 'logLoss')) { print(plot(svmFit, metric=stat)) } ##### END: train model - svm <<<<< ##### BEGIN: train model - rf >>>>> rfFit <- train(label ~ ., method = "rf", data = labelTrain) rfPred <- predict(rfFit, labelTest) confusionMatrix(rfPred, labelTest$label) ## Confusion Matrix and Statistics ## ## Reference ## Prediction c k n ## c 8 0 0 ## k 0 3 0 ## n 2 3 30 ## ## Overall Statistics ## ## Accuracy : 0.891 ## 95% CI : (0.764, 0.964) ## No Information Rate : 0.652 ## P-Value [Acc > NIR] : 0.000217 ## ## Kappa : 0.762 ## Mcnemar's Test P-Value : NA ## ## Statistics by Class: ## ## Class: c Class: k Class: n ## Sensitivity 0.800 0.5000 1.000 ## Specificity 1.000 1.0000 0.688 ## Pos Pred Value 1.000 1.0000 0.857 ## Neg Pred Value 0.947 0.9302 1.000 ## Prevalence 0.217 0.1304 0.652 ## Detection Rate 0.174 0.0652 0.652 ## Detection Prevalence 0.174 0.0652 0.761 ## Balanced Accuracy 0.900 0.7500 0.844 ##### END: train model - rf <<<<< ##### BEGIN: train model - rf >>>>> rrfFit <- train(label ~ ., method = "RRF", data = labelTrain) rrfPred <- predict(rrfFit, labelTest) confusionMatrix(rrfPred, labelTest$label) ## Confusion Matrix and Statistics ## ## Reference ## Prediction c k n ## c 8 0 0 ## k 0 4 1 ## n 2 2 29 ## ## Overall Statistics ## ## Accuracy : 0.891 ## 95% CI : (0.764, 0.964) ## No Information Rate : 0.652 ## P-Value [Acc > NIR] : 0.000217 ## ## Kappa : 0.774 ## Mcnemar's Test P-Value : NA
library(shiny) library(leaflet) library(plotly) library(visNetwork) library(networkD3) library(shinydashboard) library(shinyjs) library(shinyWidgets) # Choices for drop-downs vars <- c( 'Collision Vehicle 1' = 'VEHICLE.TYPE.CODE.1', 'Collision Vehicle 2' = 'VEHICLE.TYPE.CODE.2', "Injuries" = "NUMBER.OF.PERSONS.INJURED", "Deaths" = "NUMBER.OF.PERSONS.KILLED" ) vars2 <- c( "Injuries" = "NUMBER.OF.PERSONS.INJURED", "Deaths" = "NUMBER.OF.PERSONS.KILLED" ) vars3 <- c( "All Vehicles" = "", "Ambulance" = "AMBULANCE", "Bicycle" = "BICYCLE", "Bus" = "BUS", "Fire Truck" = "FIRE TRUCK", "Large Commercial Vehicle(6 or more tires)" = "LARGE COM VEH(6 OR MORE TIRES)", "Motorcycle" = "MOTORCYCLE", "Passenger" = "PASSENGER VEHICLE", "Pick-up Truck" = "PICK-UP TRUCK", "Small Commercial Vehicle(4 tires)" = "SMALL COM VEH(4 TIRES)", "Taxi" = "TAXI" ) vars4 <- c("All boroughs"="", 'Manhattan'='MANHATTAN', 'Brooklyn'='BROOKLYN', 'Queens'='QUEENS','Bronx'='BRONX') shinyUI(dashboardPage( dashboardHeader(title = "DM-MCDA", tags$li(class = "dropdown", tags$a(icon("github"), href = "https://github.com/",title = "See the code on github")) ), dashboardSidebar(sidebarMenu( menuItem("Data Sources", tabName = "ds", icon = icon("database")), menuItem("ARM", tabName = "arm", icon = icon("random")), menuItem("MCDA", tabName = "mcda", icon = icon("users")), menuItem("MAP", tabName = "map", icon = icon("map")), menuItem("TMS", tabName = "tms", icon = icon("line-chart")), menuItem("About", tabName = "about", icon = icon("info-circle")) )), dashboardBody( # ensure links to sidebar menus tags$script(HTML(" var openTab = function(tabName){ $('a', $('.sidebar')).each(function() { if(this.getAttribute('data-value') == tabName) { this.click() }; }); } ")), #to use mini sidebar useShinyjs(), # Include styling for the app includeCSS("www/app.css"), tabItems( # Meta data section tabItem(tabName = "ds", # Boxes need to be put in a row (or column) fluidRow( column(width = 12, tabBox( side = "left",width = 12, tabPanel("Meta Data Structure", box(width = NULL, title = "Dataset:", status = "primary", DT::dataTableOutput("dataset_ds"),collapsible = TRUE), box(width = NULL, title = "Decision Matrix:", status = "primary", DT::dataTableOutput("descion_matrix_ds"),collapsible = TRUE) ) ) ) ) ), # First tab content tabItem(tabName = "arm", # Boxes need to be put in a row (or column) fluidRow( column(width = 3, box(width = NULL, status = "info", fileInput("file", "File") ), box(width = NULL, status = "info", collapsible = T, title = "Parameters", conditionalPanel( condition = "input.samp=='Sample'", numericInput("nrule", 'Number of Rules', 5) ), conditionalPanel( condition = "input.mytab=='graph'", radioButtons('graphType', label='Graph Type', choices=c('itemsets','items'), inline=T) ), conditionalPanel( condition = "input.lhsv=='Subset'", uiOutput("choose_lhs") ), conditionalPanel( condition = "input.rhsv=='Subset'", uiOutput("choose_rhs") ), conditionalPanel( condition = "input.mytab=='grouped'", sliderInput('k', label='Choose # of rule clusters', min=1, max=150, step=1, value=15) ), conditionalPanel( condition = "input.mytab %in%' c('grouped', 'graph', 'table', 'datatable', 'scatter', 'paracoord', 'matrix', 'itemFreq')", radioButtons('samp', label='Sample', choices=c('All Rules', 'Sample'), inline=T), uiOutput("choose_columns"), sliderInput("supp", "Support:", min = 0, max = 1, value = 0.1 , step = 1/10000), sliderInput("conf", "Confidence:", min = 0, max = 1, value = 0.5 , step = 1/10000), selectInput('sort', label='Sorting Criteria:', choices = c('lift', 'confidence', 'support')), numericInput("minL", "Min. items per set:", 2), numericInput("maxL", "Max. items per set::", 3), downloadButton('downloadData', 'Download Rules as CSV') #radioButtons('lhsv', label='LHS variables', choices=c('All', 'Subset')), br(), #radioButtons('rhsv', label='RHS variables', choices=c('All', 'Subset')), br() ) ) ), column(width = 9, tabBox( side = "left",width = 12, tabPanel('Summary', value='table', verbatimTextOutput("statistics")), tabPanel('ScatterPlot', plotlyOutput("scatterPlot")), tabPanel('FrequentItemset', value='itemFreq', plotOutput("itemFreqPlot", width='100%', height='100%')), tabPanel('Grouped', value='grouped', plotOutput("groupedPlot", width='100%', height='100%')), tabPanel('RulesGraph', visNetworkOutput("graphPlot", width='100%', height='800px')), tabPanel('Parallel Coordinates', value='paracoord', plotOutput("paracoordPlot", width='100%', height='100%')), tabPanel('Rules Table', value='datatable', DT::dataTableOutput("rulesDataTable")) ) ) )), tabItem(tabName = "mcda", fluidRow( column(width = 3, box(width = NULL, status = "info", fileInput("datafile", "File input:") ), box(width = NULL, status = "info", selectInput("txt", "Method:", choices = c('Electre Tri', 'Electre', 'Promethee'), selected = "Electre Tri") ), box(width = NULL, status = "info", sliderInput("slider", "Slider input:", 1, 100, 30), actionButton("action2", "Compute!", class = "btn-primary") ) ), column(width = 9, tabBox(side = "left",width = 12, tabPanel("Decision Matrix",dataTableOutput("filetable_DM1")), tabPanel("Partial Concordance", dataTableOutput("partialConcordance_al_pr_gj")), tabPanel("Global Concordance",dataTableOutput("globalconcordance")), #tabPanel("PartialC_pr_al_gj", dataTableOutput("partialConcordance_pr_al_gj")), tabPanel("Partial Discordance",dataTableOutput("partialDiscordance_al_pr_gj")), #tabPanel("PartialD_pr_al_gj",dataTableOutput("partialDiscordance_pr_al_gj")), tabPanel("Credibility",dataTableOutput("credibility")), tabPanel("Relations",dataTableOutput("relations")), tabPanel("Assignments",dataTableOutput("assignment")) ) ) ) ), tabItem(tabName = "map", tabPanel("Interactive Map", div(class="outer", tags$head( # Include our custom CSS includeCSS("styles.css"), includeScript("gomap.js") ), leafletOutput("map", width="100%", height="100%"), # Shiny versions prior to 0.11 should use class="modal" instead. absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto", width = 330, height = "auto", h2("Vehicle Collisions"), selectInput("color", "Color", vars), selectInput("size", "Size", vars2, selected = "NUMBER.OF.PERSONS.INJURED"), checkboxInput("cluster", "Add Cluster"), helpText("Cluster numbers show total accidents for each area", "(applies to all vehicles only)"), radioButtons("vehicle", "Show Just One Vehicle", vars3, selected = '') ), tags$div(id="cite", 'Data from: ', tags$em('Data Details of Vehicle Collisions'), ' | CNPAC Data. Details of Vehicle Collisions in Marrakech City provided by the Ministry of Equipment, Transport and Logistics of Morocco.' ) ) ) ), tabItem(tabName = "tms", fluidRow( column(width = 3, box(width = NULL, status = "info", numericInput("months", label = "Months to Predict", value = 12, min = 12, max = 144, step = 12) ), box(width = NULL, status = "info", selectInput("interval", label = "Prediction Interval", choices = c("0.80", "0.90", "0.95", "0.99"), selected = "0.95") ), box(width = NULL, status = "info", checkboxInput("showgrid", label = "Show Grid", value = TRUE), actionButton("action2", "Search", class = "btn-primary") ) ), column(width = 9, tabBox(width = NULL, tabPanel("Predicting The Injury", dygraphOutput("dygraph1")), tabPanel("Predicting The Deaths", dygraphOutput("dygraph2")) ) )) ), tabItem(tabName = "about", fluidRow( column(width = 3, box(width = NULL, status = "info", h4("Data model for ARM"), p("For general use of this app, you need to prepare your data with according to your needs, the data model look like this csv file"), br(), downloadButton("dataset_model", label = "Dataset model"), br(), br(), h4("Data model for MCDA"), p("For general use of this app, you need to prepare your data according to your needs, the data model look like this csv file"), br(), downloadButton("decision_matrix_model", label = "Decision matrix model"), br(), br() ) ), column(width = 9, box(width = NULL, status = "info", h4("Abstract"), p("Today’s ultra-connected world is generating massive volumes of data stored in database and cloud environment especially in logistics and transportation, these large data need to be analyzed in order to extract useful knowledge, and present it as a valid element for logistics managers for further use such as road safety, shipping delays and shipping optimization. The potential of data mining algorithms is largely untapped, this paper shows large-scale techniques such as associations rule analysis and time series to improve road safety by identifying hot-spots in advance and giving chance to drivers to avoid the dangers. Indeed, we proposed a framework based on association rules technique as a preliminary task to analyze and extract relationships between variables related to road accident, and then use multiple criteria analysis to select relevant associations rules. The developed system is flexible and allows intuitive creation and execution of different algorithms for an extensive range of road traffic topics. DM-MCDA can be expanded with new topics on demand, rendering knowledge extraction more robust and provide meaningful information that could help in developing suitable policies for decision makers. "), br(), h4("Data Source"), p("Description: ","Data Details of road accident in Morocco provided by CNPAC: National Committee for the Prevention of Traffic Accidents (Morocco)."), p("Source: ",a("Data Details of rainfall | CNPAC",href="http://www.equipement.gov.ma")), br(), h4("Authors Information"), p("¹Addi Ait-Mlouk, ²Tarik Agouti"), p("¹Department of computing Science, Umeå University, Umeå, Sweden "), p("²Cadi Ayyad University, Faculty of science semlalia, Marrakech, Morocco "), br(), h4("Aknowledgement"), p("The authors would like to thank Jihane Mounji Manji for her support"), h4("Maintainer"), p("Email: aitmlouk@gmail.com") ) ) )) )) ))
/ui.R
permissive
ln1267/DM-MCA
R
false
false
15,767
r
library(shiny) library(leaflet) library(plotly) library(visNetwork) library(networkD3) library(shinydashboard) library(shinyjs) library(shinyWidgets) # Choices for drop-downs vars <- c( 'Collision Vehicle 1' = 'VEHICLE.TYPE.CODE.1', 'Collision Vehicle 2' = 'VEHICLE.TYPE.CODE.2', "Injuries" = "NUMBER.OF.PERSONS.INJURED", "Deaths" = "NUMBER.OF.PERSONS.KILLED" ) vars2 <- c( "Injuries" = "NUMBER.OF.PERSONS.INJURED", "Deaths" = "NUMBER.OF.PERSONS.KILLED" ) vars3 <- c( "All Vehicles" = "", "Ambulance" = "AMBULANCE", "Bicycle" = "BICYCLE", "Bus" = "BUS", "Fire Truck" = "FIRE TRUCK", "Large Commercial Vehicle(6 or more tires)" = "LARGE COM VEH(6 OR MORE TIRES)", "Motorcycle" = "MOTORCYCLE", "Passenger" = "PASSENGER VEHICLE", "Pick-up Truck" = "PICK-UP TRUCK", "Small Commercial Vehicle(4 tires)" = "SMALL COM VEH(4 TIRES)", "Taxi" = "TAXI" ) vars4 <- c("All boroughs"="", 'Manhattan'='MANHATTAN', 'Brooklyn'='BROOKLYN', 'Queens'='QUEENS','Bronx'='BRONX') shinyUI(dashboardPage( dashboardHeader(title = "DM-MCDA", tags$li(class = "dropdown", tags$a(icon("github"), href = "https://github.com/",title = "See the code on github")) ), dashboardSidebar(sidebarMenu( menuItem("Data Sources", tabName = "ds", icon = icon("database")), menuItem("ARM", tabName = "arm", icon = icon("random")), menuItem("MCDA", tabName = "mcda", icon = icon("users")), menuItem("MAP", tabName = "map", icon = icon("map")), menuItem("TMS", tabName = "tms", icon = icon("line-chart")), menuItem("About", tabName = "about", icon = icon("info-circle")) )), dashboardBody( # ensure links to sidebar menus tags$script(HTML(" var openTab = function(tabName){ $('a', $('.sidebar')).each(function() { if(this.getAttribute('data-value') == tabName) { this.click() }; }); } ")), #to use mini sidebar useShinyjs(), # Include styling for the app includeCSS("www/app.css"), tabItems( # Meta data section tabItem(tabName = "ds", # Boxes need to be put in a row (or column) fluidRow( column(width = 12, tabBox( side = "left",width = 12, tabPanel("Meta Data Structure", box(width = NULL, title = "Dataset:", status = "primary", DT::dataTableOutput("dataset_ds"),collapsible = TRUE), box(width = NULL, title = "Decision Matrix:", status = "primary", DT::dataTableOutput("descion_matrix_ds"),collapsible = TRUE) ) ) ) ) ), # First tab content tabItem(tabName = "arm", # Boxes need to be put in a row (or column) fluidRow( column(width = 3, box(width = NULL, status = "info", fileInput("file", "File") ), box(width = NULL, status = "info", collapsible = T, title = "Parameters", conditionalPanel( condition = "input.samp=='Sample'", numericInput("nrule", 'Number of Rules', 5) ), conditionalPanel( condition = "input.mytab=='graph'", radioButtons('graphType', label='Graph Type', choices=c('itemsets','items'), inline=T) ), conditionalPanel( condition = "input.lhsv=='Subset'", uiOutput("choose_lhs") ), conditionalPanel( condition = "input.rhsv=='Subset'", uiOutput("choose_rhs") ), conditionalPanel( condition = "input.mytab=='grouped'", sliderInput('k', label='Choose # of rule clusters', min=1, max=150, step=1, value=15) ), conditionalPanel( condition = "input.mytab %in%' c('grouped', 'graph', 'table', 'datatable', 'scatter', 'paracoord', 'matrix', 'itemFreq')", radioButtons('samp', label='Sample', choices=c('All Rules', 'Sample'), inline=T), uiOutput("choose_columns"), sliderInput("supp", "Support:", min = 0, max = 1, value = 0.1 , step = 1/10000), sliderInput("conf", "Confidence:", min = 0, max = 1, value = 0.5 , step = 1/10000), selectInput('sort', label='Sorting Criteria:', choices = c('lift', 'confidence', 'support')), numericInput("minL", "Min. items per set:", 2), numericInput("maxL", "Max. items per set::", 3), downloadButton('downloadData', 'Download Rules as CSV') #radioButtons('lhsv', label='LHS variables', choices=c('All', 'Subset')), br(), #radioButtons('rhsv', label='RHS variables', choices=c('All', 'Subset')), br() ) ) ), column(width = 9, tabBox( side = "left",width = 12, tabPanel('Summary', value='table', verbatimTextOutput("statistics")), tabPanel('ScatterPlot', plotlyOutput("scatterPlot")), tabPanel('FrequentItemset', value='itemFreq', plotOutput("itemFreqPlot", width='100%', height='100%')), tabPanel('Grouped', value='grouped', plotOutput("groupedPlot", width='100%', height='100%')), tabPanel('RulesGraph', visNetworkOutput("graphPlot", width='100%', height='800px')), tabPanel('Parallel Coordinates', value='paracoord', plotOutput("paracoordPlot", width='100%', height='100%')), tabPanel('Rules Table', value='datatable', DT::dataTableOutput("rulesDataTable")) ) ) )), tabItem(tabName = "mcda", fluidRow( column(width = 3, box(width = NULL, status = "info", fileInput("datafile", "File input:") ), box(width = NULL, status = "info", selectInput("txt", "Method:", choices = c('Electre Tri', 'Electre', 'Promethee'), selected = "Electre Tri") ), box(width = NULL, status = "info", sliderInput("slider", "Slider input:", 1, 100, 30), actionButton("action2", "Compute!", class = "btn-primary") ) ), column(width = 9, tabBox(side = "left",width = 12, tabPanel("Decision Matrix",dataTableOutput("filetable_DM1")), tabPanel("Partial Concordance", dataTableOutput("partialConcordance_al_pr_gj")), tabPanel("Global Concordance",dataTableOutput("globalconcordance")), #tabPanel("PartialC_pr_al_gj", dataTableOutput("partialConcordance_pr_al_gj")), tabPanel("Partial Discordance",dataTableOutput("partialDiscordance_al_pr_gj")), #tabPanel("PartialD_pr_al_gj",dataTableOutput("partialDiscordance_pr_al_gj")), tabPanel("Credibility",dataTableOutput("credibility")), tabPanel("Relations",dataTableOutput("relations")), tabPanel("Assignments",dataTableOutput("assignment")) ) ) ) ), tabItem(tabName = "map", tabPanel("Interactive Map", div(class="outer", tags$head( # Include our custom CSS includeCSS("styles.css"), includeScript("gomap.js") ), leafletOutput("map", width="100%", height="100%"), # Shiny versions prior to 0.11 should use class="modal" instead. absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 60, left = "auto", right = 20, bottom = "auto", width = 330, height = "auto", h2("Vehicle Collisions"), selectInput("color", "Color", vars), selectInput("size", "Size", vars2, selected = "NUMBER.OF.PERSONS.INJURED"), checkboxInput("cluster", "Add Cluster"), helpText("Cluster numbers show total accidents for each area", "(applies to all vehicles only)"), radioButtons("vehicle", "Show Just One Vehicle", vars3, selected = '') ), tags$div(id="cite", 'Data from: ', tags$em('Data Details of Vehicle Collisions'), ' | CNPAC Data. Details of Vehicle Collisions in Marrakech City provided by the Ministry of Equipment, Transport and Logistics of Morocco.' ) ) ) ), tabItem(tabName = "tms", fluidRow( column(width = 3, box(width = NULL, status = "info", numericInput("months", label = "Months to Predict", value = 12, min = 12, max = 144, step = 12) ), box(width = NULL, status = "info", selectInput("interval", label = "Prediction Interval", choices = c("0.80", "0.90", "0.95", "0.99"), selected = "0.95") ), box(width = NULL, status = "info", checkboxInput("showgrid", label = "Show Grid", value = TRUE), actionButton("action2", "Search", class = "btn-primary") ) ), column(width = 9, tabBox(width = NULL, tabPanel("Predicting The Injury", dygraphOutput("dygraph1")), tabPanel("Predicting The Deaths", dygraphOutput("dygraph2")) ) )) ), tabItem(tabName = "about", fluidRow( column(width = 3, box(width = NULL, status = "info", h4("Data model for ARM"), p("For general use of this app, you need to prepare your data with according to your needs, the data model look like this csv file"), br(), downloadButton("dataset_model", label = "Dataset model"), br(), br(), h4("Data model for MCDA"), p("For general use of this app, you need to prepare your data according to your needs, the data model look like this csv file"), br(), downloadButton("decision_matrix_model", label = "Decision matrix model"), br(), br() ) ), column(width = 9, box(width = NULL, status = "info", h4("Abstract"), p("Today’s ultra-connected world is generating massive volumes of data stored in database and cloud environment especially in logistics and transportation, these large data need to be analyzed in order to extract useful knowledge, and present it as a valid element for logistics managers for further use such as road safety, shipping delays and shipping optimization. The potential of data mining algorithms is largely untapped, this paper shows large-scale techniques such as associations rule analysis and time series to improve road safety by identifying hot-spots in advance and giving chance to drivers to avoid the dangers. Indeed, we proposed a framework based on association rules technique as a preliminary task to analyze and extract relationships between variables related to road accident, and then use multiple criteria analysis to select relevant associations rules. The developed system is flexible and allows intuitive creation and execution of different algorithms for an extensive range of road traffic topics. DM-MCDA can be expanded with new topics on demand, rendering knowledge extraction more robust and provide meaningful information that could help in developing suitable policies for decision makers. "), br(), h4("Data Source"), p("Description: ","Data Details of road accident in Morocco provided by CNPAC: National Committee for the Prevention of Traffic Accidents (Morocco)."), p("Source: ",a("Data Details of rainfall | CNPAC",href="http://www.equipement.gov.ma")), br(), h4("Authors Information"), p("¹Addi Ait-Mlouk, ²Tarik Agouti"), p("¹Department of computing Science, Umeå University, Umeå, Sweden "), p("²Cadi Ayyad University, Faculty of science semlalia, Marrakech, Morocco "), br(), h4("Aknowledgement"), p("The authors would like to thank Jihane Mounji Manji for her support"), h4("Maintainer"), p("Email: aitmlouk@gmail.com") ) ) )) )) ))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/theta_functions.R \docType{methods} \name{mlef} \alias{mlef} \alias{mlef,item_pool-method} \title{Compute maximum likelihood estimates of theta using fence items} \usage{ mlef( object, select = NULL, resp, fence_slope = 5, fence_difficulty = c(-5, 5), start_theta = NULL, max_iter = 100, crit = 0.001, truncate = FALSE, theta_range = c(-4, 4), max_change = 1, do_Fisher = TRUE ) \S4method{mlef}{item_pool}( object, select = NULL, resp, fence_slope = 5, fence_difficulty = c(-5, 5), start_theta = NULL, max_iter = 50, crit = 0.005, truncate = FALSE, theta_range = c(-4, 4), max_change = 1, do_Fisher = TRUE ) } \arguments{ \item{object}{an \code{\linkS4class{item_pool}} object.} \item{select}{(optional) if item indices are supplied, only the specified items are used.} \item{resp}{item response on all (or selected) items in the \code{object} argument. Can be a vector, a matrix, or a data frame. \code{length(resp)} or \code{ncol(resp)} must be equal to the number of all (or selected) items.} \item{fence_slope}{the slope parameter to use on fence items. Can be one value, or two values for the lower and the upper fence respectively. (default = \code{5})} \item{fence_difficulty}{the difficulty parameter to use on fence items. Must have two values for the lower and the upper fence respectively. (default = \code{c(-5, 5)})} \item{start_theta}{(optional) initial theta values. If not supplied, EAP estimates using uniform priors are used as initial values. Uniform priors are computed using the \code{theta_range} argument below, with increments of \code{.1}.} \item{max_iter}{maximum number of iterations. (default = \code{100})} \item{crit}{convergence criterion to use. (default = \code{0.001})} \item{truncate}{set \code{TRUE} to impose a bound on the estimate. (default = \code{FALSE})} \item{theta_range}{a range of theta values to bound the estimate. Only effective when \code{truncate} is \code{TRUE}. (default = \code{c(-4, 4)})} \item{max_change}{upper bound to impose on the absolute change in theta between iterations. Absolute changes exceeding this value will be capped to \code{max_change}. (default = \code{1.0})} \item{do_Fisher}{set \code{TRUE} to use Fisher scoring instead of Newton-Raphson method. (default = \code{TRUE})} } \value{ \code{\link{mlef}} returns a list containing estimated values. \itemize{ \item{\code{th}} theta value. \item{\code{se}} standard error. \item{\code{conv}} \code{TRUE} if estimation converged. \item{\code{trunc}} \code{TRUE} if truncation was applied on \code{th}. } } \description{ \code{\link{mlef}} is a function to compute maximum likelihood estimates of theta using fence items. } \examples{ mlef(itempool_fatigue, resp = resp_fatigue_data[10, ]) mlef(itempool_fatigue, select = 1:20, resp = resp_fatigue_data[10, 1:20]) } \references{ Han, K. T. (2016). Maximum likelihood score estimation method with fences for short-length tests and computerized adaptive tests. \emph{Applied Psychological Measurement, 40}(4), 289-301. }
/TestDesign/man/mlef-methods.Rd
no_license
akhikolla/TestedPackages-NoIssues
R
false
true
3,136
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/theta_functions.R \docType{methods} \name{mlef} \alias{mlef} \alias{mlef,item_pool-method} \title{Compute maximum likelihood estimates of theta using fence items} \usage{ mlef( object, select = NULL, resp, fence_slope = 5, fence_difficulty = c(-5, 5), start_theta = NULL, max_iter = 100, crit = 0.001, truncate = FALSE, theta_range = c(-4, 4), max_change = 1, do_Fisher = TRUE ) \S4method{mlef}{item_pool}( object, select = NULL, resp, fence_slope = 5, fence_difficulty = c(-5, 5), start_theta = NULL, max_iter = 50, crit = 0.005, truncate = FALSE, theta_range = c(-4, 4), max_change = 1, do_Fisher = TRUE ) } \arguments{ \item{object}{an \code{\linkS4class{item_pool}} object.} \item{select}{(optional) if item indices are supplied, only the specified items are used.} \item{resp}{item response on all (or selected) items in the \code{object} argument. Can be a vector, a matrix, or a data frame. \code{length(resp)} or \code{ncol(resp)} must be equal to the number of all (or selected) items.} \item{fence_slope}{the slope parameter to use on fence items. Can be one value, or two values for the lower and the upper fence respectively. (default = \code{5})} \item{fence_difficulty}{the difficulty parameter to use on fence items. Must have two values for the lower and the upper fence respectively. (default = \code{c(-5, 5)})} \item{start_theta}{(optional) initial theta values. If not supplied, EAP estimates using uniform priors are used as initial values. Uniform priors are computed using the \code{theta_range} argument below, with increments of \code{.1}.} \item{max_iter}{maximum number of iterations. (default = \code{100})} \item{crit}{convergence criterion to use. (default = \code{0.001})} \item{truncate}{set \code{TRUE} to impose a bound on the estimate. (default = \code{FALSE})} \item{theta_range}{a range of theta values to bound the estimate. Only effective when \code{truncate} is \code{TRUE}. (default = \code{c(-4, 4)})} \item{max_change}{upper bound to impose on the absolute change in theta between iterations. Absolute changes exceeding this value will be capped to \code{max_change}. (default = \code{1.0})} \item{do_Fisher}{set \code{TRUE} to use Fisher scoring instead of Newton-Raphson method. (default = \code{TRUE})} } \value{ \code{\link{mlef}} returns a list containing estimated values. \itemize{ \item{\code{th}} theta value. \item{\code{se}} standard error. \item{\code{conv}} \code{TRUE} if estimation converged. \item{\code{trunc}} \code{TRUE} if truncation was applied on \code{th}. } } \description{ \code{\link{mlef}} is a function to compute maximum likelihood estimates of theta using fence items. } \examples{ mlef(itempool_fatigue, resp = resp_fatigue_data[10, ]) mlef(itempool_fatigue, select = 1:20, resp = resp_fatigue_data[10, 1:20]) } \references{ Han, K. T. (2016). Maximum likelihood score estimation method with fences for short-length tests and computerized adaptive tests. \emph{Applied Psychological Measurement, 40}(4), 289-301. }
#!/usr/bin/env Rscript source("read.salary.R") assignCategory <- function(dd, dst, agent, lvl, regex) { dd[,dst] <- factor(NA, levels=lvl) for (i in 1:length(regex)) { dd[grepl(regex[i], agent, perl=T), dst] <- lvl[i] } return (dd) } ################################################################ dd <- read.salary("data/2011_may_final.csv") dd$User.Agent <- as.character(dd$User.Agent) dd <- assignCategory(dd, "platform", dd$User.Agent, c("Windows", "Linux", "Mac", "Android", "iPhone/iPad"), c("Windows NT|WOW64", "(?!Android)Linux", "Macintosh", "Android", "like Mac OS X")) dd <- assignCategory(dd, "browser", dd$User.Agent, c("MSIE", "Firefox", "Chrome", "Opera", "Safari", "Mobile"), c(" MSIE ", " Firefox/", " Chrome/", "Opera/", " Version/[0-9.]* Safari", " Mobile")) ################################################################ library(lattice) png(filename="reports/may2011/toolset.%03d.png", width=1024, height=320, res=90) bwplot(salary ~ browser | cls, data=dd, layout=c(3,1), ylim=c(0,5000), xlab="Browser", ylab="Зарплата, $/мес") png(filename="reports/may2011/reading-hours.%03d.png", width=800, height=400, res=90) dd$hour <- as.numeric(substr(as.character(dd$Дата.заполнения), 11, 13)) densityplot(~ hour, groups=cls, data=dd, lwd=2, alpha=0.7, type="", auto.key=list(columns=1, space="right"), scales=list(x=list(at=do.breaks(c(0, 24), 12))), main="Кто когда читает DOU?", xlab="Время суток (час)") invisible(dev.off())
/toolset.R
no_license
intracer/dou-salaries
R
false
false
1,600
r
#!/usr/bin/env Rscript source("read.salary.R") assignCategory <- function(dd, dst, agent, lvl, regex) { dd[,dst] <- factor(NA, levels=lvl) for (i in 1:length(regex)) { dd[grepl(regex[i], agent, perl=T), dst] <- lvl[i] } return (dd) } ################################################################ dd <- read.salary("data/2011_may_final.csv") dd$User.Agent <- as.character(dd$User.Agent) dd <- assignCategory(dd, "platform", dd$User.Agent, c("Windows", "Linux", "Mac", "Android", "iPhone/iPad"), c("Windows NT|WOW64", "(?!Android)Linux", "Macintosh", "Android", "like Mac OS X")) dd <- assignCategory(dd, "browser", dd$User.Agent, c("MSIE", "Firefox", "Chrome", "Opera", "Safari", "Mobile"), c(" MSIE ", " Firefox/", " Chrome/", "Opera/", " Version/[0-9.]* Safari", " Mobile")) ################################################################ library(lattice) png(filename="reports/may2011/toolset.%03d.png", width=1024, height=320, res=90) bwplot(salary ~ browser | cls, data=dd, layout=c(3,1), ylim=c(0,5000), xlab="Browser", ylab="Зарплата, $/мес") png(filename="reports/may2011/reading-hours.%03d.png", width=800, height=400, res=90) dd$hour <- as.numeric(substr(as.character(dd$Дата.заполнения), 11, 13)) densityplot(~ hour, groups=cls, data=dd, lwd=2, alpha=0.7, type="", auto.key=list(columns=1, space="right"), scales=list(x=list(at=do.breaks(c(0, 24), 12))), main="Кто когда читает DOU?", xlab="Время суток (час)") invisible(dev.off())
SSAGA = function(fitness_func, max_iterations, max_runtime, max_stagnation, input_width) { ################ TOP LEVEL DOCUMENTATION ################ #Input documentation #Fitness function: objective function (READ IN THE INPUTS) #Max iterations: maximum number of generations to allow #Max runtime: number of seconds the algorithm can run (runtime will exceed this by the amount it takes to finish the current loop) #Max stagnation: number of sequential generations without an improvement in fitness #Sample input: SSAGA(fitness_func, 1000, 30, 500, 10) #input_width: how many inputs/dims the input takes #Genetic operators #Elistism (E) - preserve best solutions across generations #Cross-Over/Cross-Breeding (CO) - mix the best solutions #Mutation (M) - randomly change the best solutions #Random Immigrants (RI) - generate totally new solutions ################ CODE SETUP ################ #Capture the entry time CodeEnterTime = Sys.time() #Set algorithm parameters PopulationSize = 64 GeneticOperator_Primary = (1/2) GeneticOperator_Secondary = (1/4) GeneticOperator_Tertiary = (3/16) GeneticOperator_Quaternary = (1/16) #Set states as vector - position 1 = E, position 2 = CO, position 3 = M, position 4 = RI StateBias_Elitism = c(PopulationSize*GeneticOperator_Primary, PopulationSize*GeneticOperator_Tertiary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Quaternary) StateBias_CrossOver = c(PopulationSize*GeneticOperator_Quaternary, PopulationSize*GeneticOperator_Primary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Tertiary) StateBias_Mutation = c(PopulationSize*GeneticOperator_Quaternary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Primary, PopulationSize*GeneticOperator_Tertiary) StateBias_RandomImmigrants = c(PopulationSize*GeneticOperator_Quaternary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Tertiary, PopulationSize*GeneticOperator_Primary) ################ ALGORITHM INTERFACE ################ #Define parameters BurnInPeriod = 100 #Number of iterations to allow before checking stagnation MutationRate = 1 #Number of genes to mutate per chromosome #Initialize the algorithm #Create initial population OldPopulation = apply(matrix(0, input_width, PopulationSize), MARGIN = 1, runif) #Normalize the population OldPopulation = sweep(OldPopulation, 1, rowSums(OldPopulation), "/") #Instantiate a blank matrix to hold the new solutions NewPopulation = matrix(0, PopulationSize, input_width) #Create a blank list to store the best solutions SolutionFitness = c(0) #Record the time entering the optimization procedure CurrentTime = Sys.time() #Initialize iterations count iterations = 0 #Initialize population stagnation index populationStagnationIndex = 0 #Set initial state CurrentState = StateBias_CrossOver #Algorithm iteration #STEPS CORRESPOND TO THE ORDER THE NEW MATRIX IS FILLED #Step 1: Elitism #Step 2: Cross-Over #Step 3: Mutation #Step 4: Random Immigrants (totally new solutions) #Alert user to entrance print("Beginning optimization...") while(iterations < max_iterations && (CurrentTime - CodeEnterTime) < max_runtime && populationStagnationIndex < max_stagnation) { #Create a score vector scoreVect = matrix(0, PopulationSize, 1) #Score the entries for(i in 1:PopulationSize) { scoreVect[i] = fitness_func(OldPopulation[i,]) } #Bind the score column to the solutions matrix OldPopulation = cbind(OldPopulation, scoreVect) #Sort by score OldPopulation = OldPopulation[order(OldPopulation[,input_width+1]),] #Record the best score SolutionFitness = append(SolutionFitness, log(1 + 1/OldPopulation[1, input_width+1])) #Delete the scores OldPopulation = OldPopulation[,-(input_width+1)] #Carry over the top individuals to the next population for(i in 1:CurrentState[1]) { NewPopulation[i,] = OldPopulation[i,] } #Cross-Breed the top solutions using two-point cross-over for(i in seq(1, CurrentState[2], 2)) { points = sample(1:input_width,2) #Set new genes for Chromosome i NewPopulation[(i+CurrentState[1]),] = OldPopulation[i,] NewPopulation[(i+CurrentState[1]), points[1]:points[2]] = OldPopulation[i+1,points[1]:points[2]] #Set new genes for Chromosome i+1 NewPopulation[(i+CurrentState[1]+1),] = OldPopulation[i+1,] NewPopulation[(i+CurrentState[1]+1), points[1]:points[2]] = OldPopulation[i,points[1]:points[2]] } #Mutate the top half of solutions for(i in 1:CurrentState[3]) { mutationIndex = sample(1:input_width, MutationRate) #Randomly select the gene(s) which will be mutated OldPopulation[i,mutationIndex] = runif(1,-1,1) NewPopulation[(i + CurrentState[1] + CurrentState[2]),] = OldPopulation[i,] } #Receive random immigrants for(i in 1:CurrentState[4]) { NewPopulation[(i + CurrentState[1] + CurrentState[2] + CurrentState[3]),] = runif(input_width, -1, 1) } #Assign new matrix to the old matrix OldPopulation = NewPopulation #Update exit conditions iterations = iterations + 1 CurrentTime = Sys.time() if(iterations > BurnInPeriod && SolutionFitness[iterations] == SolutionFitness[iterations-1]) { populationStagnationIndex = populationStagnationIndex + 1 } else { populationStagnationIndex = 0 } } #Delete the dummy entry from the solution matrix SolutionFitness = SolutionFitness[-1] #Algorithm wrap up CodeExitTime = Sys.time() print("Solution convergence.") #plot(SolutionFitness, main = "Solution Fitness", xlab = "Generation", ylab = "Relative Fitness", type = "l", ylim = c(0,25), col = "black") #plot(diff(SolutionFitness), main = "Solution Growth", xlab = "Generation", ylab = "Growth in Relative Fitness", type = "l", col = "black") print(paste("Fitness:", fitness_func(OldPopulation[1,]))) print("Best solution:") print(OldPopulation[1,]) print(paste("Iterations: ", iterations)) print(paste("Time elapsed: ", CodeExitTime - CodeEnterTime)) #Algorithm output ReturnObject = c(0) ReturnObject$Solution = OldPopulation[1,] ReturnObject$FinalFitness = fitness_func(OldPopulation[1,]) ReturnObject$Iterations = iterations ReturnObject$RunTime = CodeExitTime - CodeEnterTime ReturnObject$FitnessHistory = SolutionFitness ReturnObject$FitnessGrowth = diff(SolutionFitness) ReturnObject = ReturnObject[-1] return(ReturnObject) }
/Test Functions/SSAGA_LS_CO.R
permissive
Adam-Diehl/Machine-Learning
R
false
false
6,710
r
SSAGA = function(fitness_func, max_iterations, max_runtime, max_stagnation, input_width) { ################ TOP LEVEL DOCUMENTATION ################ #Input documentation #Fitness function: objective function (READ IN THE INPUTS) #Max iterations: maximum number of generations to allow #Max runtime: number of seconds the algorithm can run (runtime will exceed this by the amount it takes to finish the current loop) #Max stagnation: number of sequential generations without an improvement in fitness #Sample input: SSAGA(fitness_func, 1000, 30, 500, 10) #input_width: how many inputs/dims the input takes #Genetic operators #Elistism (E) - preserve best solutions across generations #Cross-Over/Cross-Breeding (CO) - mix the best solutions #Mutation (M) - randomly change the best solutions #Random Immigrants (RI) - generate totally new solutions ################ CODE SETUP ################ #Capture the entry time CodeEnterTime = Sys.time() #Set algorithm parameters PopulationSize = 64 GeneticOperator_Primary = (1/2) GeneticOperator_Secondary = (1/4) GeneticOperator_Tertiary = (3/16) GeneticOperator_Quaternary = (1/16) #Set states as vector - position 1 = E, position 2 = CO, position 3 = M, position 4 = RI StateBias_Elitism = c(PopulationSize*GeneticOperator_Primary, PopulationSize*GeneticOperator_Tertiary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Quaternary) StateBias_CrossOver = c(PopulationSize*GeneticOperator_Quaternary, PopulationSize*GeneticOperator_Primary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Tertiary) StateBias_Mutation = c(PopulationSize*GeneticOperator_Quaternary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Primary, PopulationSize*GeneticOperator_Tertiary) StateBias_RandomImmigrants = c(PopulationSize*GeneticOperator_Quaternary, PopulationSize*GeneticOperator_Secondary, PopulationSize*GeneticOperator_Tertiary, PopulationSize*GeneticOperator_Primary) ################ ALGORITHM INTERFACE ################ #Define parameters BurnInPeriod = 100 #Number of iterations to allow before checking stagnation MutationRate = 1 #Number of genes to mutate per chromosome #Initialize the algorithm #Create initial population OldPopulation = apply(matrix(0, input_width, PopulationSize), MARGIN = 1, runif) #Normalize the population OldPopulation = sweep(OldPopulation, 1, rowSums(OldPopulation), "/") #Instantiate a blank matrix to hold the new solutions NewPopulation = matrix(0, PopulationSize, input_width) #Create a blank list to store the best solutions SolutionFitness = c(0) #Record the time entering the optimization procedure CurrentTime = Sys.time() #Initialize iterations count iterations = 0 #Initialize population stagnation index populationStagnationIndex = 0 #Set initial state CurrentState = StateBias_CrossOver #Algorithm iteration #STEPS CORRESPOND TO THE ORDER THE NEW MATRIX IS FILLED #Step 1: Elitism #Step 2: Cross-Over #Step 3: Mutation #Step 4: Random Immigrants (totally new solutions) #Alert user to entrance print("Beginning optimization...") while(iterations < max_iterations && (CurrentTime - CodeEnterTime) < max_runtime && populationStagnationIndex < max_stagnation) { #Create a score vector scoreVect = matrix(0, PopulationSize, 1) #Score the entries for(i in 1:PopulationSize) { scoreVect[i] = fitness_func(OldPopulation[i,]) } #Bind the score column to the solutions matrix OldPopulation = cbind(OldPopulation, scoreVect) #Sort by score OldPopulation = OldPopulation[order(OldPopulation[,input_width+1]),] #Record the best score SolutionFitness = append(SolutionFitness, log(1 + 1/OldPopulation[1, input_width+1])) #Delete the scores OldPopulation = OldPopulation[,-(input_width+1)] #Carry over the top individuals to the next population for(i in 1:CurrentState[1]) { NewPopulation[i,] = OldPopulation[i,] } #Cross-Breed the top solutions using two-point cross-over for(i in seq(1, CurrentState[2], 2)) { points = sample(1:input_width,2) #Set new genes for Chromosome i NewPopulation[(i+CurrentState[1]),] = OldPopulation[i,] NewPopulation[(i+CurrentState[1]), points[1]:points[2]] = OldPopulation[i+1,points[1]:points[2]] #Set new genes for Chromosome i+1 NewPopulation[(i+CurrentState[1]+1),] = OldPopulation[i+1,] NewPopulation[(i+CurrentState[1]+1), points[1]:points[2]] = OldPopulation[i,points[1]:points[2]] } #Mutate the top half of solutions for(i in 1:CurrentState[3]) { mutationIndex = sample(1:input_width, MutationRate) #Randomly select the gene(s) which will be mutated OldPopulation[i,mutationIndex] = runif(1,-1,1) NewPopulation[(i + CurrentState[1] + CurrentState[2]),] = OldPopulation[i,] } #Receive random immigrants for(i in 1:CurrentState[4]) { NewPopulation[(i + CurrentState[1] + CurrentState[2] + CurrentState[3]),] = runif(input_width, -1, 1) } #Assign new matrix to the old matrix OldPopulation = NewPopulation #Update exit conditions iterations = iterations + 1 CurrentTime = Sys.time() if(iterations > BurnInPeriod && SolutionFitness[iterations] == SolutionFitness[iterations-1]) { populationStagnationIndex = populationStagnationIndex + 1 } else { populationStagnationIndex = 0 } } #Delete the dummy entry from the solution matrix SolutionFitness = SolutionFitness[-1] #Algorithm wrap up CodeExitTime = Sys.time() print("Solution convergence.") #plot(SolutionFitness, main = "Solution Fitness", xlab = "Generation", ylab = "Relative Fitness", type = "l", ylim = c(0,25), col = "black") #plot(diff(SolutionFitness), main = "Solution Growth", xlab = "Generation", ylab = "Growth in Relative Fitness", type = "l", col = "black") print(paste("Fitness:", fitness_func(OldPopulation[1,]))) print("Best solution:") print(OldPopulation[1,]) print(paste("Iterations: ", iterations)) print(paste("Time elapsed: ", CodeExitTime - CodeEnterTime)) #Algorithm output ReturnObject = c(0) ReturnObject$Solution = OldPopulation[1,] ReturnObject$FinalFitness = fitness_func(OldPopulation[1,]) ReturnObject$Iterations = iterations ReturnObject$RunTime = CodeExitTime - CodeEnterTime ReturnObject$FitnessHistory = SolutionFitness ReturnObject$FitnessGrowth = diff(SolutionFitness) ReturnObject = ReturnObject[-1] return(ReturnObject) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PATHOGEN-PfSI-Methods.R \name{init_PfSI_HumanPop} \alias{init_PfSI_HumanPop} \title{PfSI \code{HumanPop} Method: Initialize PfSI Infections} \usage{ init_PfSI_HumanPop(PfPR) } \arguments{ \item{PfPR}{numeric; prevalence} } \description{ Initialize PfSI infections with parasite prevalence PfPR in a human population. \itemize{ \item This method is bound to \code{HumanPop$init_PfSI()} } }
/MASH-MACRO/man/init_PfSI_HumanPop.Rd
no_license
Chipdelmal/MASH-Main
R
false
true
467
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PATHOGEN-PfSI-Methods.R \name{init_PfSI_HumanPop} \alias{init_PfSI_HumanPop} \title{PfSI \code{HumanPop} Method: Initialize PfSI Infections} \usage{ init_PfSI_HumanPop(PfPR) } \arguments{ \item{PfPR}{numeric; prevalence} } \description{ Initialize PfSI infections with parasite prevalence PfPR in a human population. \itemize{ \item This method is bound to \code{HumanPop$init_PfSI()} } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/result.R \name{plotNDSet} \alias{plotNDSet} \title{Plot the nondominated set (two objectives) in criterion space.} \usage{ plotNDSet( file, contribution = NULL, local = FALSE, labels = NULL, addTriangles = FALSE, latex = FALSE, addHull = FALSE ) } \arguments{ \item{file}{Relative file path within the contribution (or just file path if local file).} \item{contribution}{Name of contribution (if use files at GitHub).} \item{local}{Use local file (otherwise use the file at GitHub).} \item{labels}{A vector of labels to be added.} \item{addTriangles}{Add triangles to the non-dominated points.} \item{latex}{If true make latex math labels for TikZ.} \item{addHull}{Add the convex hull of the non-dominated points and rays.} } \value{ A ggplot object. } \description{ Plot the nondominated set (two objectives) in criterion space. } \examples{ ## Info about MOrepo # Problem classes getProblemClasses() # problem classes with instances getProblemClasses(results = TRUE) # problem classes with results getProblemClasses(contribution = "Pedersen08") # problem classes for a specific contribution # Info about instances getInstanceInfo() # all contributions with instances getInstanceInfo(class = "Assignment") # contributions with instances for problem class Assignment getInstanceInfo(contribution = c("Tuyttens00", "Gadegaard16")) # info about two contributions # Info about results getResultInfo() # all contributions with results getResultInfo(withLinks = TRUE) # add links to output getResultInfo(class="Assignment") # contributions with results for problem class Assignment getResultInfo(contribution = c("Pedersen08", "Gadegaard16")) # info about two contributions ## Download # Download two contributions getContributionAsZip(c("Tuyttens00", "Template")) # Download specific instances getInstance(name="Tuyttens.*n10", onlyList = TRUE) # only file locations returned (no downloading) \dontrun{ getInstance(name="Tuyttens")} ## Plotting results plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08") plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", labels = 1:16) plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", addTriangles = TRUE) plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", addHull = TRUE) plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", addHull = TRUE, addTriangles = TRUE) ## Contributing to MOrepo # Adding a result file points <- data.frame(z1 = c(27, 30, 31, 34, 42, 43, 49, 51), z2 = c(56, 53, 36, 33, 30, 25, 23, 9), type = c('se', 'us', 'se', 'us', 'us', 'us', 'us', 'se')) createResultFile(instanceName = "Tuyttens00_AP_n05", contributionName = "Pedersen08", objectives = 2, points = points, card = 8, suppCard = 3, extCard = 3, objectiveType = c("int", "int"), direction = c("min", "min"), comments = "Results from the paper by Pedersen et. al (2008)", optimal = TRUE ) modifyResultFile("Tuyttens00_AP_n05_result.json", comments = "New changed comment") # Check your contribution \dontrun{ checkContribution()} } \author{ Lars Relund \email{lars@relund.dk} }
/misc/R/MOrepoTools/man/plotNDSet.Rd
no_license
zhhwss/MOrepo
R
false
true
3,302
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/result.R \name{plotNDSet} \alias{plotNDSet} \title{Plot the nondominated set (two objectives) in criterion space.} \usage{ plotNDSet( file, contribution = NULL, local = FALSE, labels = NULL, addTriangles = FALSE, latex = FALSE, addHull = FALSE ) } \arguments{ \item{file}{Relative file path within the contribution (or just file path if local file).} \item{contribution}{Name of contribution (if use files at GitHub).} \item{local}{Use local file (otherwise use the file at GitHub).} \item{labels}{A vector of labels to be added.} \item{addTriangles}{Add triangles to the non-dominated points.} \item{latex}{If true make latex math labels for TikZ.} \item{addHull}{Add the convex hull of the non-dominated points and rays.} } \value{ A ggplot object. } \description{ Plot the nondominated set (two objectives) in criterion space. } \examples{ ## Info about MOrepo # Problem classes getProblemClasses() # problem classes with instances getProblemClasses(results = TRUE) # problem classes with results getProblemClasses(contribution = "Pedersen08") # problem classes for a specific contribution # Info about instances getInstanceInfo() # all contributions with instances getInstanceInfo(class = "Assignment") # contributions with instances for problem class Assignment getInstanceInfo(contribution = c("Tuyttens00", "Gadegaard16")) # info about two contributions # Info about results getResultInfo() # all contributions with results getResultInfo(withLinks = TRUE) # add links to output getResultInfo(class="Assignment") # contributions with results for problem class Assignment getResultInfo(contribution = c("Pedersen08", "Gadegaard16")) # info about two contributions ## Download # Download two contributions getContributionAsZip(c("Tuyttens00", "Template")) # Download specific instances getInstance(name="Tuyttens.*n10", onlyList = TRUE) # only file locations returned (no downloading) \dontrun{ getInstance(name="Tuyttens")} ## Plotting results plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08") plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", labels = 1:16) plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", addTriangles = TRUE) plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", addHull = TRUE) plotNDSet("results/AP/Tuyttens00_AP_n10_result.json", contribution = "Pedersen08", addHull = TRUE, addTriangles = TRUE) ## Contributing to MOrepo # Adding a result file points <- data.frame(z1 = c(27, 30, 31, 34, 42, 43, 49, 51), z2 = c(56, 53, 36, 33, 30, 25, 23, 9), type = c('se', 'us', 'se', 'us', 'us', 'us', 'us', 'se')) createResultFile(instanceName = "Tuyttens00_AP_n05", contributionName = "Pedersen08", objectives = 2, points = points, card = 8, suppCard = 3, extCard = 3, objectiveType = c("int", "int"), direction = c("min", "min"), comments = "Results from the paper by Pedersen et. al (2008)", optimal = TRUE ) modifyResultFile("Tuyttens00_AP_n05_result.json", comments = "New changed comment") # Check your contribution \dontrun{ checkContribution()} } \author{ Lars Relund \email{lars@relund.dk} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PopulationSNPSet.R \name{PopulationSNPSet} \alias{PopulationSNPSet} \title{Simulation of a population of genotypic profiles} \usage{ PopulationSNPSet(n, Sigma = NULL, maf = NULL, marginal = NULL, X = NULL) } \arguments{ \item{n}{the desired number of individuals} \item{Sigma}{a correlation matrix} \item{maf}{a vector of minor allele frequencies; if this is given, Hardy-Weinberg equilibrium is assumed} \item{marginal}{a list of marginal distributions, where each element of the list is a vector of length 1 or 2 of cumulative probabilities. See the \code{ordsample} function from the \code{GenOrd} package for more information} \item{X}{an observed matrix of genotypic profiles; if this is given, the sample is simulated according to the dependence structure and marginal distributions observed in \code{X}.} } \value{ A matrix \code{X} of size N x p of genotypic profiles (displayed as rows). } \description{ \code{PopulationSNPSet} is used to generate a population matrix, where each row is a genotypic profile corresponding to an individual. It can be used to generate a matrix of genotypes according to an observed dependence structure. This function is based on the \code{GenOrd} package. } \references{ Alessandro Barbiero and Pier Alda Ferrari (2015). GenOrd: Simulation of Discrete Random Variables with Given Correlation Matrix and Marginal Distributions. R package version 1.4.0. https://CRAN.R-project.org/package=GenOrd }
/man/PopulationSNPSet.Rd
no_license
fhebert/SNPSetSimulations
R
false
true
1,557
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PopulationSNPSet.R \name{PopulationSNPSet} \alias{PopulationSNPSet} \title{Simulation of a population of genotypic profiles} \usage{ PopulationSNPSet(n, Sigma = NULL, maf = NULL, marginal = NULL, X = NULL) } \arguments{ \item{n}{the desired number of individuals} \item{Sigma}{a correlation matrix} \item{maf}{a vector of minor allele frequencies; if this is given, Hardy-Weinberg equilibrium is assumed} \item{marginal}{a list of marginal distributions, where each element of the list is a vector of length 1 or 2 of cumulative probabilities. See the \code{ordsample} function from the \code{GenOrd} package for more information} \item{X}{an observed matrix of genotypic profiles; if this is given, the sample is simulated according to the dependence structure and marginal distributions observed in \code{X}.} } \value{ A matrix \code{X} of size N x p of genotypic profiles (displayed as rows). } \description{ \code{PopulationSNPSet} is used to generate a population matrix, where each row is a genotypic profile corresponding to an individual. It can be used to generate a matrix of genotypes according to an observed dependence structure. This function is based on the \code{GenOrd} package. } \references{ Alessandro Barbiero and Pier Alda Ferrari (2015). GenOrd: Simulation of Discrete Random Variables with Given Correlation Matrix and Marginal Distributions. R package version 1.4.0. https://CRAN.R-project.org/package=GenOrd }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mixbeg_EM.R \name{func_mgamma_em} \alias{func_mgamma_em} \title{E and M steps for mixture of gamma distributions part of FMBEG} \usage{ func_mgamma_em(data, beta, q, m) } \arguments{ \item{data}{data-vector from FMBEG distribution.} \item{beta}{numeric parameters, which must be numeric greater than 0.} \item{q}{vector of gamma membership probabilities between 0 and 1, and sum equal to 1} \item{m}{number of gamma components.} } \value{ list containing parameter estimates of p and pi } \description{ This function computes the beta and q of FMBEG distribution. }
/man/func_mgamma_em.Rd
permissive
camponsah/BivMixDist
R
false
true
648
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mixbeg_EM.R \name{func_mgamma_em} \alias{func_mgamma_em} \title{E and M steps for mixture of gamma distributions part of FMBEG} \usage{ func_mgamma_em(data, beta, q, m) } \arguments{ \item{data}{data-vector from FMBEG distribution.} \item{beta}{numeric parameters, which must be numeric greater than 0.} \item{q}{vector of gamma membership probabilities between 0 and 1, and sum equal to 1} \item{m}{number of gamma components.} } \value{ list containing parameter estimates of p and pi } \description{ This function computes the beta and q of FMBEG distribution. }
library(readr) library(dplyr) library(janitor) library(ggplot2) library(lubridate) library(openxlsx) library(tidyr) phe <- read_csv("https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv") %>% clean_names() ldn_bs <- c("Barking and Dagenham", "Barnet", "Bexley", "Brent", "Bromley", "Camden", "Croydon", "Ealing", "Enfield", "Greenwich", "Hackney", "Hammersmith and Fulham", "Haringey", "Harrow", "Havering", "Hillingdon", "Hounslow", "Islington", "Kensington and Chelsea", "Kingston upon Thames", "Lambeth", "Lewisham", "Merton", "Newham", "Redbridge", "Richmond upon Thames", "Southwark", "Sutton", "Tower Hamlets", "Waltham Forest", "Wandsworth", "Westminster") thing <- phe %>% filter(area_name %in% ldn_bs) %>% # filter(area_type == "Lower tier local authority") %>% group_by(specimen_date, area_name) %>% summarise(daily_lab_confirmed_cases = sum(daily_lab_confirmed_cases), cumulative_lab_confirmed_cases = sum(cumulative_lab_confirmed_cases)) ggplot(thing, aes(x = ymd(specimen_date), y = cumulative_lab_confirmed_cases, group = area_name, colour = area_name)) + geom_line() phe_d <- read.xlsx("https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2020/06/COVID-19-total-announced-deaths-9-June-2020.xlsx", sheet = "Tab4 Deaths by trust", startRow = 16, colNames = TRUE) %>% clean_names() phe_d[1,5:(ncol(phe_d)-2)] <- as.character(as.Date(as.numeric(gsub("x","",names(phe_d[5:(ncol(phe_d)-2)]))), origin = "1900-01-01")) phe_d[1,c(1:4,(ncol(phe_d)-1):ncol(phe_d))] <- names(phe_d[1,c(1:4,(ncol(phe_d)-1):ncol(phe_d))] ) phe_d <- phe_d %>% row_to_names(row_number = 1) thing <- phe_d %>% filter(nhs_england_region == "London") %>% pivot_longer(-c(nhs_england_region, code, name, awaiting_verification, total),"date") %>% mutate(date = ymd(ifelse(grepl("up_to_01_mar_20", date), "2020-03-01", date)), csum = ave(value, name, FUN=cumsum)) ggplot(thing, aes(x = ymd(date), y = as.numeric(csum), group = name, colour = name)) + geom_line()
/explore_data.R
no_license
maczokni/englangcovid
R
false
false
2,479
r
library(readr) library(dplyr) library(janitor) library(ggplot2) library(lubridate) library(openxlsx) library(tidyr) phe <- read_csv("https://coronavirus.data.gov.uk/downloads/csv/coronavirus-cases_latest.csv") %>% clean_names() ldn_bs <- c("Barking and Dagenham", "Barnet", "Bexley", "Brent", "Bromley", "Camden", "Croydon", "Ealing", "Enfield", "Greenwich", "Hackney", "Hammersmith and Fulham", "Haringey", "Harrow", "Havering", "Hillingdon", "Hounslow", "Islington", "Kensington and Chelsea", "Kingston upon Thames", "Lambeth", "Lewisham", "Merton", "Newham", "Redbridge", "Richmond upon Thames", "Southwark", "Sutton", "Tower Hamlets", "Waltham Forest", "Wandsworth", "Westminster") thing <- phe %>% filter(area_name %in% ldn_bs) %>% # filter(area_type == "Lower tier local authority") %>% group_by(specimen_date, area_name) %>% summarise(daily_lab_confirmed_cases = sum(daily_lab_confirmed_cases), cumulative_lab_confirmed_cases = sum(cumulative_lab_confirmed_cases)) ggplot(thing, aes(x = ymd(specimen_date), y = cumulative_lab_confirmed_cases, group = area_name, colour = area_name)) + geom_line() phe_d <- read.xlsx("https://www.england.nhs.uk/statistics/wp-content/uploads/sites/2/2020/06/COVID-19-total-announced-deaths-9-June-2020.xlsx", sheet = "Tab4 Deaths by trust", startRow = 16, colNames = TRUE) %>% clean_names() phe_d[1,5:(ncol(phe_d)-2)] <- as.character(as.Date(as.numeric(gsub("x","",names(phe_d[5:(ncol(phe_d)-2)]))), origin = "1900-01-01")) phe_d[1,c(1:4,(ncol(phe_d)-1):ncol(phe_d))] <- names(phe_d[1,c(1:4,(ncol(phe_d)-1):ncol(phe_d))] ) phe_d <- phe_d %>% row_to_names(row_number = 1) thing <- phe_d %>% filter(nhs_england_region == "London") %>% pivot_longer(-c(nhs_england_region, code, name, awaiting_verification, total),"date") %>% mutate(date = ymd(ifelse(grepl("up_to_01_mar_20", date), "2020-03-01", date)), csum = ave(value, name, FUN=cumsum)) ggplot(thing, aes(x = ymd(date), y = as.numeric(csum), group = name, colour = name)) + geom_line()
#Case study 22 #url = "http://www.toyotaofbraintree.com/new-cars/for-sale" alldata.22 = function(url){ require(XML) require(plyr) links = getLinklist.22(url) cardata = ldply(links, scrapeInfo.22) return(cardata) } getLinklist.22 = function(url){ doc = htmlParse(url) baselink = paste0(url, "?startrow=1") #total number of cars in new inventory countNode = xmlValue(getNodeSet(doc, "//span[contains(.,'results')]/text()")[[1]],trim = T) totalnumber = as.numeric(gsub('.*of.*?([0-9]+) results','\\1',countNode)) #each pages start number startnumber = seq(1, totalnumber-1, 25) #all the links Linklist = sapply(startnumber, function(num) gsub('\\d+',num,baselink)) return(Linklist) } scrapeInfo.22<-function(url) { #print(url) #url = "http://www.balisetoyota.com/web/new/Toyota-4Runner-2015-West-Springfield-Massachusetts/20934167/?condition_id=10425" doc = htmlParse(url) vin = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-vin') year = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-year') make = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-carmake') model = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-carmodel') trim = "NA" return(data.frame(vin,make,model,trim,as.numeric(year), stringsAsFactors = F)) }
/Case Studies/Case study-22.R
no_license
jpzhangvincent/Dealership-Scraping
R
false
false
1,375
r
#Case study 22 #url = "http://www.toyotaofbraintree.com/new-cars/for-sale" alldata.22 = function(url){ require(XML) require(plyr) links = getLinklist.22(url) cardata = ldply(links, scrapeInfo.22) return(cardata) } getLinklist.22 = function(url){ doc = htmlParse(url) baselink = paste0(url, "?startrow=1") #total number of cars in new inventory countNode = xmlValue(getNodeSet(doc, "//span[contains(.,'results')]/text()")[[1]],trim = T) totalnumber = as.numeric(gsub('.*of.*?([0-9]+) results','\\1',countNode)) #each pages start number startnumber = seq(1, totalnumber-1, 25) #all the links Linklist = sapply(startnumber, function(num) gsub('\\d+',num,baselink)) return(Linklist) } scrapeInfo.22<-function(url) { #print(url) #url = "http://www.balisetoyota.com/web/new/Toyota-4Runner-2015-West-Springfield-Massachusetts/20934167/?condition_id=10425" doc = htmlParse(url) vin = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-vin') year = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-year') make = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-carmake') model = xpathSApply(doc,'//div[contains(@class,"vehicle-item")]',xmlGetAttr,'data-carmodel') trim = "NA" return(data.frame(vin,make,model,trim,as.numeric(year), stringsAsFactors = F)) }
#' Returns dataset with age data for the specified region #' @param dataset Cancensus Datatset #' @param regions Cancensus Regions #' @param should_sum logical, should the regions be summed up or reported separately #' @param refresh logical, should cached data be refreshed #' @export get_age_data <- function(dataset,regions,should_sum=FALSE,refresh=FALSE) { long_labels <- function(data){ labels=cancensus::label_vectors(data) name_change=setNames(as.character(lapply(labels$Vector,function(x){return(labels %>% dplyr::filter(Vector==x) %>% dplyr::pull("Detail"))})), labels$Vector) new_names=as.character(lapply(names(data),function(x){ n=as.character(name_change[x]) return(ifelse(is.na(n),x,n)) })) names(data)=new_names return(data) } male <-cancensus::search_census_vectors('Total - Age',dataset,"Male", quiet=TRUE) if (nrow(male)==0||"Similarly named objects" %in% names(male)) male <-cancensus::search_census_vectors('^Male, total',dataset,"Total", quiet=TRUE) male <- male %>% cancensus::child_census_vectors(TRUE) %>% dplyr::filter(!grepl("Average",label),!grepl(" to ",label),!grepl("5 years and over",label),!grepl("Under 5 years",label)) female <-cancensus::search_census_vectors('Total - Age',dataset,"Female", quiet=TRUE) if (nrow(female)==0||"Similarly named objects" %in% names(female)) female <-cancensus::search_census_vectors('^Female, total',dataset,"Total", quiet=TRUE) female <- female %>% cancensus::child_census_vectors(TRUE) %>% dplyr::filter(!grepl("Average",label),!grepl(" to ",label),!grepl("5 years and over",label),!grepl("Under 5 years",label)) vectors <- rbind(male,female) %>% dplyr::pull("vector") male_data <- cancensus::get_census(dataset = dataset, regions=regions,level="Regions",labels="short", vectors=male$vector, quiet=TRUE, use_cache = !refresh) female_data <- cancensus::get_census(dataset = dataset, regions=regions,level="Regions",labels="short", vectors=female$vector, quiet=TRUE,use_cache = !refresh) labels=male_data %>% cancensus::label_vectors() %>% dplyr::pull("Detail") labels[labels=="100 years and over"]="100+" label_levels=c("Under 1 year",seq(1,99),"100+") data <- rbind(male_data %>% long_labels %>% dplyr::mutate(Gender="Male"), female_data %>% long_labels %>% dplyr::mutate(Gender="Female")) if ("100 years and over" %in% names(data)) { data <- data %>% dplyr::mutate(`100+`=`100 years and over`) %>% dplyr::select(c("GeoUID","Region Name","Gender",labels)) } if (should_sum) { selects <- setdiff(names(data),"Region Name") data <- data %>% dplyr::select(selects) %>% dplyr::group_by(Gender) %>% dplyr::summarize_all(sum,na.rm=TRUE) } if ("Population" %in% names(data)) data <- data %>% select(-Population) plot_data <- data %>% tidyr::pivot_longer(labels,names_to="Age", values_to ="Population") if (setdiff(unique(plot_data$Age),label_levels) %>% length()==0) { plot_data <- plot_data %>% dplyr::mutate(Age=factor(Age,levels=label_levels,ordered=TRUE)) } else { age_labels <- plot_data$Age %>% unique plot_data <- plot_data %>% dplyr::mutate(Age=factor(Age,levels=age_labels,ordered=TRUE)) } plot_data[plot_data$Gender=="Male","Population"]=-plot_data[plot_data$Gender=="Male","Population"] return (plot_data %>% select(GeoUID,`Region Name`,Gender,Age,Population)) } #' Assigns long labels #' @export detail_labels <- function(data){ ns=names(data) labels=cancensus::label_vectors(data) for (i in 1:nrow(labels)) { ns <- gsub(labels[i,"Vector"],labels[i,"Detail"],ns) } names(data) <- ns return(data) } #' @export format_currency <- function(x){paste0("$",format(x,big.mark = ","))} #' @export format_number <- function(x){format(x,big.mark = ",")} #' @export format_percent <- function(x,digits=1){paste0(round(x*100,digits),"%")} #' @export format_ratio <- function(x,digits=2){round(x,digits)} #' Aggregate variables to common cts, returns data2 on new tiling matching data1 geography #' @param data1 Cancensus CT level datatset for year1 < year2 to serve as base for common geography #' @param data2 Cancensus CT level datatset for year2 to be aggregated to common geography #' @param data2_sum_vars vector of variable names to by summed up when aggregating geographies #' @export common_cts <- function(data1,data2,data2_sum_vars) { cts_1 <- data1$GeoUID cts_2 <- data2$GeoUID cts_diff_1 <- setdiff(cts_1,cts_2) %>% sort cts_diff_2 <- setdiff(cts_2,cts_1) %>% sort d<-st_intersection( data2 %>% dplyr::filter(GeoUID %in% cts_diff_2) %>% rename(GeoUID2=GeoUID) %>% dplyr::select(GeoUID2) %>% dplyr::mutate(area2=st_area(geometry)), data1 %>% dplyr::filter(GeoUID %in% cts_diff_1) %>% dplyr::select(GeoUID) %>% dplyr::mutate(area=st_area(geometry)) ) d <- d %>% dplyr::mutate(area3=st_area(geometry)) %>% dplyr::mutate(ratio=as.numeric(area3/area2)) %>% dplyr::filter(ratio>0.1) %>% arrange(ratio) dd<- d %>% as.data.frame %>% dplyr::group_by(GeoUID) %>%summarize(ratio=sum(ratio)/n(),n=n()) if(dd %>% dplyr::filter(n<=1) %>% nrow >0) {base::stop("problem with computing common ct data")} ct_translation <- lapply(split(d, d$GeoUID), function(x) x$GeoUID2) ct_translation2 <- lapply(split(d, d$GeoUID2), function(x) x$GeoUID) new2 <- data2 %>% dplyr::filter(GeoUID %in% cts_diff_2) %>% dplyr::mutate(GeoUID2=GeoUID) %>% dplyr::mutate(GeoUID=as.character(ct_translation2[GeoUID2])) %>% dplyr::group_by(GeoUID) nnew <- summarize_at(new2,data2_sum_vars,sum) data_2 <- rbind(data2 %>% dplyr::filter(!(GeoUID %in% cts_diff_2)) %>% dplyr::select("GeoUID",data2_sum_vars), nnew) return(data_2) } #' convert vector to string to be pasted into code #' @export paste_vector <- function(v){ paste0('c(',purrr::map(v,function(d)paste0('"',d,'"')) %>% unlist %>% paste0(collapse=",\n"),')') %>% cat } #' swap names and values in named vector #' @export name_flip <- function(v){ set_names(names(v),as.character(v)) } #' turn two columns of a tibble into a named vector #' #' @param data a tibble #' @param name_column string with the name of the column to be used as names in the vector #' @param value_column string with the name of the column to be used as values in the vector #' @return a named vector #' @export to_named_vector <- function(data,name_column,value_column){ set_names(data[[value_column]],data[[name_column]]) } #' helper for clean map theme for sf #' @export map_theme <- list( ggplot2::theme_void(), ggplot2::theme(panel.grid.major = ggplot2::element_line(colour = "transparent")) ) #' Get variables for CTs making up Old Toronto #' @export get_old_toronto_data<-function(dataset,vectors=c(),labels="short",geo_format=NA,also_new_toronto=FALSE,aggregate=FALSE){ old_toronto_cts <- list(CT=c("5350002.00","5350001.00","5350008.02","5350011.00","5350012.03","5350012.01","5350013.02","5350012.04","5350014.00","5350016.00","5350013.01","5350015.00","5350017.00","5350029.00","5350068.00","5350034.01","5350037.00","5350041.00","5350040.00","5350039.00","5350010.02","5350035.00","5350036.00","5350038.00","5350032.00","5350034.02","5350033.00","5350030.00","5350019.00","5350018.00","5350031.00","5350069.00","5350028.02","5350028.01","5350027.00","5350020.00","5350026.00","5350021.00","5350022.00","5350023.00","5350078.00","5350024.00","5350079.00","5350080.02","5350080.01","5350076.00","5350082.00","5350075.00","5350077.00","5350074.00","5350081.00","5350007.01","5350004.00","5350047.04","5350047.02","5350005.00","5350007.02","5350008.01","5350009.00","5350010.01","5350044.00","5350043.00","5350048.00","5350049.00","5350050.03","5350050.01","5350050.04","5350104.00","5350103.00","5350152.00","5350105.00","5350101.00","5350106.00","5350107.00","5350111.00","5350112.00","5350113.00","5350114.00","5350116.00","5350118.00","5350119.00","5350131.00","5350167.01","5350130.00","5350132.00","5350133.00","5350277.00","5350141.01","5350141.02","5350142.00","5350138.00","5350139.01","5350140.00","5350139.02","5350137.00","5350127.00","5350126.00","5350125.00","5350086.00","5350067.00","5350136.01","5350135.00","5350134.00","5350136.02","5350128.02","5350129.00","5350122.00","5350128.05","5350128.04","5350121.00","5350124.00","5350110.00","5350108.00","5350100.00","5350102.05","5350102.04","5350102.02","5350102.03","5350099.00","5350098.00","5350051.00","5350052.00","5350053.00","5350046.00","5350047.03","5350042.00","5350045.00","5350054.00","5350096.02","5350097.04","5350096.01","5350109.00","5350097.01","5350097.03","5350056.00","5350055.00","5350065.02","5350064.00","5350063.05","5350061.00","5350060.00","5350057.00","5350058.00","5350059.00","5350063.06","5350066.00","5350063.04","5350063.03","5350062.02","5350062.01","5350087.00","5350088.00","5350089.00","5350091.01","5350092.00","5350093.00","5350094.00","5350095.00","5350115.00","5350091.02","5350090.00","5350120.00","5350117.00","5350128.06","5350025.00","5350065.01","5350123.00","5350006.00","5350003.00")) short_cts <- sub("\\.\\d{2}$","",old_toronto_cts$CT) %>% unique old_toronto <- get_census(dataset,regions=list(CSD="3520005"),vectors=vectors,level="CT",labels=labels,geo_format=geo_format,quiet=TRUE) %>% mutate(short_ct=sub("\\.\\d{2}$","",GeoUID)) %>% mutate(old=short_ct %in% short_cts & GeoUID != "5350167.02") %>% select(-short_ct) if (aggregate){ summary_vars=c("Area (sq km)","Population", "Dwellings", "Households") if(length(vectors)>0) { variable_names=names(old_toronto)[grepl(paste(vectors,collapse="|"),names(old_toronto))] summary_vars=c(summary_vars,variable_names) } else { old_toronto <- old_toronto %>% rename(`Area (sq km)`=`Shape Area`) } old_toronto <- old_toronto %>% group_by(old) %>% summarize_at(summary_vars,sum,na.rm=TRUE) %>% mutate(`Region Name`="Old Toronto", type="", GeoUID="xxx") } if (!also_new_toronto){ old_toronto <- old_toronto %>% filter(old==TRUE) %>% select(-old) } old_toronto } #' Add lat long coordiantes from geometry #' @export sfc_as_cols <- function(x, names = c("x","y")) { stopifnot(inherits(x,"sf") && inherits(sf::st_geometry(x),"sfc_POINT")) ret <- sf::st_coordinates(x) ret <- tibble::as_tibble(ret) stopifnot(length(names) == ncol(ret)) x <- x[ , !names(x) %in% names] ret <- setNames(ret,names) dplyr::bind_cols(x,ret) } #' Simple key-value cache function accepting closures #' @param object closure with return expression to be cached #' @param key cache key #' @param path path to cache the data #' @param refresh bool option to force refresh of cache, default FALSE #' @export simpleCache <- function(object,key,path=getOption("custom_data_path"),refresh=FALSE){ cache_path=file.path(path,key) if(!refresh & file.exists(cache_path)) { readRDS(cache_path) } else { data=object saveRDS(data,file=cache_path) data } } #' load and parse census data for a given year #' @export get_cov_census_data <- function(year,use_cache=TRUE){ if (!(as.integer(year) %in% seq(2001,2016,5))) stop("Only have data for census years 2001 through 2016") base_name="CensusLocalAreaProfiles" year_name=paste0(base_name,year,".csv") path=paste0(getOption("custom_data_path"),year_name) if (!use_cache | !file.exists(path)) { base_data_url="https://webtransfer.vancouver.ca/opendata/csv/" destfile=tempfile() download.file(paste0(base_data_url,year_name),destfile=destfile) data <- read_csv(destfile,skip=4,locale=locale(encoding = "windows-1252"),na=c(NA,"..","F"), col_types = cols(.default="c")) %>% mutate(IDX = 1:n()) if (!("ID" %in% names(data))) { data <- data %>% mutate(ID=IDX) } if (!("Variable" %in% names(data))) { data <- data %>% rename(Variable=...1) } n<- names(data)[!grepl("^X",names(data))] data <- data %>% filter(ID!="") %>% select(n) %>% mutate(Label=Variable) %>% mutate(Variable=ifelse(is.na(ID),paste0("filler_",IDX),paste0("v_COV",year,"_",ID,": ",Variable))) %>% select(-IDX,-ID) %>% pivot_longer(-one_of(c("Variable","Label")),names_to="Region",values_to="Value") %>% mutate(Value=na_if(Value,"-")) %>% mutate(Value=parse_number(Value)) %>% mutate(Region=recode(Region,"Vancouver CMA"="Metro Vancouver","Vancouver CSD"="City of Vancouver")) unlink(destfile) # dd <- data %>% as.data.frame() # row.names(dd)=dd$Variable # d <- t(dd %>% select(-Variable)) # region_names <- rownames(d) # transposed_data <- tibble::as.tibble(d) %>% # dplyr::mutate_all(parse_number) %>% # mutate(NAME=case_when( # grepl("CSD",region_names) ~ "City of Vancouver", # grepl("CMA",region_names) ~ "Metro Vancouver", # TRUE ~ region_names), Year=year) write_csv(data,file=path) } data <- read_csv(path,col_types = cols(.default="c",Value="d")) # %>% inner_join(get_neighbourhood_geos(),by="NAME") } #' Convenience function to serach for census variable #' @export find_cov_variables <- function(data,search_string){ names(data)[grepl(search_string,names(data),ignore.case = TRUE)] } #' Get 2016 population ecumene data #' @export get_ecumene_2016 <- function(refresh=FALSE){ path=file.path(getOption("custom_data_path"),"ecomene_2016") if (!dir.exists(path)){ tmp <- tempfile() download.file("http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/2016/lecu000e16a_e.zip",tmp) unzip(tmp,exdir=path) dir(path) unlink(tmp) } read_sf(file.path(path,"lecu000e16a_e.shp")) } #' generate data for waffle plots` #' @param data tibble with data #' @param grouping_variables variables used for grouping #' @param category column name for coloring #' @param vakue column name that contains the counts #' @param nrow number of rows in waffle #' @param nrow number of columns in waffle #' @export waffle_tile <- function(data,grouping_variables,category="Type",value="Value",nrow=10,ncol=10){ fix_remainders <- function(data,total){ while (sum(data$diff)!=0) { dd <- sum(data$diff) s <- sign(dd) t <- top_n(data,1,-1 * s * rem) data <- data %>% mutate(waffleValue=ifelse(!!as.name(category)==t[[category]],waffleValue-1 * s,waffleValue), rem=ifelse(!!as.name(category)==t[[category]],rem+1*s,rem)) %>% mutate(diff=sum(waffleValue)-total) } data } total=nrow * ncol data %>% group_by_at(vars(grouping_variables)) %>% mutate(Total=sum(!!as.name(value),na.rm=TRUE)) %>% mutate(val=!!as.name(value)/Total*total) %>% mutate(waffleValue=round(val), rem=val-waffleValue) %>% mutate(diff=sum(waffleValue)-total) %>% do(fix_remainders(.,total)) %>% mutate(Value=waffleValue) %>% select(-waffleValue,-rem,-val,-diff) %>% filter(Value>0) %>% group_by_at(vars(c(grouping_variables,category))) %>% expand(counter=seq(1:Value)) %>% group_by_at(vars(grouping_variables)) %>% mutate(n=row_number()) %>% mutate(col=(n-1) %% ncol+1, row=floor((n-1)/ncol)+1) } #' waffle geometry for ggplot #' @param color background color #' @param size size of grid #' @export geom_waffle <- function(color = "white", size = 0.4,...){ list(geom_tile(aes(x = row, y = col),color=color,size=size,...), theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank())) } #' @import xml2 #' @import dplyr #' @importFrom rlang .data #' @import readr NULL
/R/helpers.R
no_license
mountainMath/cancensusHelpers
R
false
false
15,927
r
#' Returns dataset with age data for the specified region #' @param dataset Cancensus Datatset #' @param regions Cancensus Regions #' @param should_sum logical, should the regions be summed up or reported separately #' @param refresh logical, should cached data be refreshed #' @export get_age_data <- function(dataset,regions,should_sum=FALSE,refresh=FALSE) { long_labels <- function(data){ labels=cancensus::label_vectors(data) name_change=setNames(as.character(lapply(labels$Vector,function(x){return(labels %>% dplyr::filter(Vector==x) %>% dplyr::pull("Detail"))})), labels$Vector) new_names=as.character(lapply(names(data),function(x){ n=as.character(name_change[x]) return(ifelse(is.na(n),x,n)) })) names(data)=new_names return(data) } male <-cancensus::search_census_vectors('Total - Age',dataset,"Male", quiet=TRUE) if (nrow(male)==0||"Similarly named objects" %in% names(male)) male <-cancensus::search_census_vectors('^Male, total',dataset,"Total", quiet=TRUE) male <- male %>% cancensus::child_census_vectors(TRUE) %>% dplyr::filter(!grepl("Average",label),!grepl(" to ",label),!grepl("5 years and over",label),!grepl("Under 5 years",label)) female <-cancensus::search_census_vectors('Total - Age',dataset,"Female", quiet=TRUE) if (nrow(female)==0||"Similarly named objects" %in% names(female)) female <-cancensus::search_census_vectors('^Female, total',dataset,"Total", quiet=TRUE) female <- female %>% cancensus::child_census_vectors(TRUE) %>% dplyr::filter(!grepl("Average",label),!grepl(" to ",label),!grepl("5 years and over",label),!grepl("Under 5 years",label)) vectors <- rbind(male,female) %>% dplyr::pull("vector") male_data <- cancensus::get_census(dataset = dataset, regions=regions,level="Regions",labels="short", vectors=male$vector, quiet=TRUE, use_cache = !refresh) female_data <- cancensus::get_census(dataset = dataset, regions=regions,level="Regions",labels="short", vectors=female$vector, quiet=TRUE,use_cache = !refresh) labels=male_data %>% cancensus::label_vectors() %>% dplyr::pull("Detail") labels[labels=="100 years and over"]="100+" label_levels=c("Under 1 year",seq(1,99),"100+") data <- rbind(male_data %>% long_labels %>% dplyr::mutate(Gender="Male"), female_data %>% long_labels %>% dplyr::mutate(Gender="Female")) if ("100 years and over" %in% names(data)) { data <- data %>% dplyr::mutate(`100+`=`100 years and over`) %>% dplyr::select(c("GeoUID","Region Name","Gender",labels)) } if (should_sum) { selects <- setdiff(names(data),"Region Name") data <- data %>% dplyr::select(selects) %>% dplyr::group_by(Gender) %>% dplyr::summarize_all(sum,na.rm=TRUE) } if ("Population" %in% names(data)) data <- data %>% select(-Population) plot_data <- data %>% tidyr::pivot_longer(labels,names_to="Age", values_to ="Population") if (setdiff(unique(plot_data$Age),label_levels) %>% length()==0) { plot_data <- plot_data %>% dplyr::mutate(Age=factor(Age,levels=label_levels,ordered=TRUE)) } else { age_labels <- plot_data$Age %>% unique plot_data <- plot_data %>% dplyr::mutate(Age=factor(Age,levels=age_labels,ordered=TRUE)) } plot_data[plot_data$Gender=="Male","Population"]=-plot_data[plot_data$Gender=="Male","Population"] return (plot_data %>% select(GeoUID,`Region Name`,Gender,Age,Population)) } #' Assigns long labels #' @export detail_labels <- function(data){ ns=names(data) labels=cancensus::label_vectors(data) for (i in 1:nrow(labels)) { ns <- gsub(labels[i,"Vector"],labels[i,"Detail"],ns) } names(data) <- ns return(data) } #' @export format_currency <- function(x){paste0("$",format(x,big.mark = ","))} #' @export format_number <- function(x){format(x,big.mark = ",")} #' @export format_percent <- function(x,digits=1){paste0(round(x*100,digits),"%")} #' @export format_ratio <- function(x,digits=2){round(x,digits)} #' Aggregate variables to common cts, returns data2 on new tiling matching data1 geography #' @param data1 Cancensus CT level datatset for year1 < year2 to serve as base for common geography #' @param data2 Cancensus CT level datatset for year2 to be aggregated to common geography #' @param data2_sum_vars vector of variable names to by summed up when aggregating geographies #' @export common_cts <- function(data1,data2,data2_sum_vars) { cts_1 <- data1$GeoUID cts_2 <- data2$GeoUID cts_diff_1 <- setdiff(cts_1,cts_2) %>% sort cts_diff_2 <- setdiff(cts_2,cts_1) %>% sort d<-st_intersection( data2 %>% dplyr::filter(GeoUID %in% cts_diff_2) %>% rename(GeoUID2=GeoUID) %>% dplyr::select(GeoUID2) %>% dplyr::mutate(area2=st_area(geometry)), data1 %>% dplyr::filter(GeoUID %in% cts_diff_1) %>% dplyr::select(GeoUID) %>% dplyr::mutate(area=st_area(geometry)) ) d <- d %>% dplyr::mutate(area3=st_area(geometry)) %>% dplyr::mutate(ratio=as.numeric(area3/area2)) %>% dplyr::filter(ratio>0.1) %>% arrange(ratio) dd<- d %>% as.data.frame %>% dplyr::group_by(GeoUID) %>%summarize(ratio=sum(ratio)/n(),n=n()) if(dd %>% dplyr::filter(n<=1) %>% nrow >0) {base::stop("problem with computing common ct data")} ct_translation <- lapply(split(d, d$GeoUID), function(x) x$GeoUID2) ct_translation2 <- lapply(split(d, d$GeoUID2), function(x) x$GeoUID) new2 <- data2 %>% dplyr::filter(GeoUID %in% cts_diff_2) %>% dplyr::mutate(GeoUID2=GeoUID) %>% dplyr::mutate(GeoUID=as.character(ct_translation2[GeoUID2])) %>% dplyr::group_by(GeoUID) nnew <- summarize_at(new2,data2_sum_vars,sum) data_2 <- rbind(data2 %>% dplyr::filter(!(GeoUID %in% cts_diff_2)) %>% dplyr::select("GeoUID",data2_sum_vars), nnew) return(data_2) } #' convert vector to string to be pasted into code #' @export paste_vector <- function(v){ paste0('c(',purrr::map(v,function(d)paste0('"',d,'"')) %>% unlist %>% paste0(collapse=",\n"),')') %>% cat } #' swap names and values in named vector #' @export name_flip <- function(v){ set_names(names(v),as.character(v)) } #' turn two columns of a tibble into a named vector #' #' @param data a tibble #' @param name_column string with the name of the column to be used as names in the vector #' @param value_column string with the name of the column to be used as values in the vector #' @return a named vector #' @export to_named_vector <- function(data,name_column,value_column){ set_names(data[[value_column]],data[[name_column]]) } #' helper for clean map theme for sf #' @export map_theme <- list( ggplot2::theme_void(), ggplot2::theme(panel.grid.major = ggplot2::element_line(colour = "transparent")) ) #' Get variables for CTs making up Old Toronto #' @export get_old_toronto_data<-function(dataset,vectors=c(),labels="short",geo_format=NA,also_new_toronto=FALSE,aggregate=FALSE){ old_toronto_cts <- list(CT=c("5350002.00","5350001.00","5350008.02","5350011.00","5350012.03","5350012.01","5350013.02","5350012.04","5350014.00","5350016.00","5350013.01","5350015.00","5350017.00","5350029.00","5350068.00","5350034.01","5350037.00","5350041.00","5350040.00","5350039.00","5350010.02","5350035.00","5350036.00","5350038.00","5350032.00","5350034.02","5350033.00","5350030.00","5350019.00","5350018.00","5350031.00","5350069.00","5350028.02","5350028.01","5350027.00","5350020.00","5350026.00","5350021.00","5350022.00","5350023.00","5350078.00","5350024.00","5350079.00","5350080.02","5350080.01","5350076.00","5350082.00","5350075.00","5350077.00","5350074.00","5350081.00","5350007.01","5350004.00","5350047.04","5350047.02","5350005.00","5350007.02","5350008.01","5350009.00","5350010.01","5350044.00","5350043.00","5350048.00","5350049.00","5350050.03","5350050.01","5350050.04","5350104.00","5350103.00","5350152.00","5350105.00","5350101.00","5350106.00","5350107.00","5350111.00","5350112.00","5350113.00","5350114.00","5350116.00","5350118.00","5350119.00","5350131.00","5350167.01","5350130.00","5350132.00","5350133.00","5350277.00","5350141.01","5350141.02","5350142.00","5350138.00","5350139.01","5350140.00","5350139.02","5350137.00","5350127.00","5350126.00","5350125.00","5350086.00","5350067.00","5350136.01","5350135.00","5350134.00","5350136.02","5350128.02","5350129.00","5350122.00","5350128.05","5350128.04","5350121.00","5350124.00","5350110.00","5350108.00","5350100.00","5350102.05","5350102.04","5350102.02","5350102.03","5350099.00","5350098.00","5350051.00","5350052.00","5350053.00","5350046.00","5350047.03","5350042.00","5350045.00","5350054.00","5350096.02","5350097.04","5350096.01","5350109.00","5350097.01","5350097.03","5350056.00","5350055.00","5350065.02","5350064.00","5350063.05","5350061.00","5350060.00","5350057.00","5350058.00","5350059.00","5350063.06","5350066.00","5350063.04","5350063.03","5350062.02","5350062.01","5350087.00","5350088.00","5350089.00","5350091.01","5350092.00","5350093.00","5350094.00","5350095.00","5350115.00","5350091.02","5350090.00","5350120.00","5350117.00","5350128.06","5350025.00","5350065.01","5350123.00","5350006.00","5350003.00")) short_cts <- sub("\\.\\d{2}$","",old_toronto_cts$CT) %>% unique old_toronto <- get_census(dataset,regions=list(CSD="3520005"),vectors=vectors,level="CT",labels=labels,geo_format=geo_format,quiet=TRUE) %>% mutate(short_ct=sub("\\.\\d{2}$","",GeoUID)) %>% mutate(old=short_ct %in% short_cts & GeoUID != "5350167.02") %>% select(-short_ct) if (aggregate){ summary_vars=c("Area (sq km)","Population", "Dwellings", "Households") if(length(vectors)>0) { variable_names=names(old_toronto)[grepl(paste(vectors,collapse="|"),names(old_toronto))] summary_vars=c(summary_vars,variable_names) } else { old_toronto <- old_toronto %>% rename(`Area (sq km)`=`Shape Area`) } old_toronto <- old_toronto %>% group_by(old) %>% summarize_at(summary_vars,sum,na.rm=TRUE) %>% mutate(`Region Name`="Old Toronto", type="", GeoUID="xxx") } if (!also_new_toronto){ old_toronto <- old_toronto %>% filter(old==TRUE) %>% select(-old) } old_toronto } #' Add lat long coordiantes from geometry #' @export sfc_as_cols <- function(x, names = c("x","y")) { stopifnot(inherits(x,"sf") && inherits(sf::st_geometry(x),"sfc_POINT")) ret <- sf::st_coordinates(x) ret <- tibble::as_tibble(ret) stopifnot(length(names) == ncol(ret)) x <- x[ , !names(x) %in% names] ret <- setNames(ret,names) dplyr::bind_cols(x,ret) } #' Simple key-value cache function accepting closures #' @param object closure with return expression to be cached #' @param key cache key #' @param path path to cache the data #' @param refresh bool option to force refresh of cache, default FALSE #' @export simpleCache <- function(object,key,path=getOption("custom_data_path"),refresh=FALSE){ cache_path=file.path(path,key) if(!refresh & file.exists(cache_path)) { readRDS(cache_path) } else { data=object saveRDS(data,file=cache_path) data } } #' load and parse census data for a given year #' @export get_cov_census_data <- function(year,use_cache=TRUE){ if (!(as.integer(year) %in% seq(2001,2016,5))) stop("Only have data for census years 2001 through 2016") base_name="CensusLocalAreaProfiles" year_name=paste0(base_name,year,".csv") path=paste0(getOption("custom_data_path"),year_name) if (!use_cache | !file.exists(path)) { base_data_url="https://webtransfer.vancouver.ca/opendata/csv/" destfile=tempfile() download.file(paste0(base_data_url,year_name),destfile=destfile) data <- read_csv(destfile,skip=4,locale=locale(encoding = "windows-1252"),na=c(NA,"..","F"), col_types = cols(.default="c")) %>% mutate(IDX = 1:n()) if (!("ID" %in% names(data))) { data <- data %>% mutate(ID=IDX) } if (!("Variable" %in% names(data))) { data <- data %>% rename(Variable=...1) } n<- names(data)[!grepl("^X",names(data))] data <- data %>% filter(ID!="") %>% select(n) %>% mutate(Label=Variable) %>% mutate(Variable=ifelse(is.na(ID),paste0("filler_",IDX),paste0("v_COV",year,"_",ID,": ",Variable))) %>% select(-IDX,-ID) %>% pivot_longer(-one_of(c("Variable","Label")),names_to="Region",values_to="Value") %>% mutate(Value=na_if(Value,"-")) %>% mutate(Value=parse_number(Value)) %>% mutate(Region=recode(Region,"Vancouver CMA"="Metro Vancouver","Vancouver CSD"="City of Vancouver")) unlink(destfile) # dd <- data %>% as.data.frame() # row.names(dd)=dd$Variable # d <- t(dd %>% select(-Variable)) # region_names <- rownames(d) # transposed_data <- tibble::as.tibble(d) %>% # dplyr::mutate_all(parse_number) %>% # mutate(NAME=case_when( # grepl("CSD",region_names) ~ "City of Vancouver", # grepl("CMA",region_names) ~ "Metro Vancouver", # TRUE ~ region_names), Year=year) write_csv(data,file=path) } data <- read_csv(path,col_types = cols(.default="c",Value="d")) # %>% inner_join(get_neighbourhood_geos(),by="NAME") } #' Convenience function to serach for census variable #' @export find_cov_variables <- function(data,search_string){ names(data)[grepl(search_string,names(data),ignore.case = TRUE)] } #' Get 2016 population ecumene data #' @export get_ecumene_2016 <- function(refresh=FALSE){ path=file.path(getOption("custom_data_path"),"ecomene_2016") if (!dir.exists(path)){ tmp <- tempfile() download.file("http://www12.statcan.gc.ca/census-recensement/2011/geo/bound-limit/files-fichiers/2016/lecu000e16a_e.zip",tmp) unzip(tmp,exdir=path) dir(path) unlink(tmp) } read_sf(file.path(path,"lecu000e16a_e.shp")) } #' generate data for waffle plots` #' @param data tibble with data #' @param grouping_variables variables used for grouping #' @param category column name for coloring #' @param vakue column name that contains the counts #' @param nrow number of rows in waffle #' @param nrow number of columns in waffle #' @export waffle_tile <- function(data,grouping_variables,category="Type",value="Value",nrow=10,ncol=10){ fix_remainders <- function(data,total){ while (sum(data$diff)!=0) { dd <- sum(data$diff) s <- sign(dd) t <- top_n(data,1,-1 * s * rem) data <- data %>% mutate(waffleValue=ifelse(!!as.name(category)==t[[category]],waffleValue-1 * s,waffleValue), rem=ifelse(!!as.name(category)==t[[category]],rem+1*s,rem)) %>% mutate(diff=sum(waffleValue)-total) } data } total=nrow * ncol data %>% group_by_at(vars(grouping_variables)) %>% mutate(Total=sum(!!as.name(value),na.rm=TRUE)) %>% mutate(val=!!as.name(value)/Total*total) %>% mutate(waffleValue=round(val), rem=val-waffleValue) %>% mutate(diff=sum(waffleValue)-total) %>% do(fix_remainders(.,total)) %>% mutate(Value=waffleValue) %>% select(-waffleValue,-rem,-val,-diff) %>% filter(Value>0) %>% group_by_at(vars(c(grouping_variables,category))) %>% expand(counter=seq(1:Value)) %>% group_by_at(vars(grouping_variables)) %>% mutate(n=row_number()) %>% mutate(col=(n-1) %% ncol+1, row=floor((n-1)/ncol)+1) } #' waffle geometry for ggplot #' @param color background color #' @param size size of grid #' @export geom_waffle <- function(color = "white", size = 0.4,...){ list(geom_tile(aes(x = row, y = col),color=color,size=size,...), theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank())) } #' @import xml2 #' @import dplyr #' @importFrom rlang .data #' @import readr NULL
library(rstan) d <- read.csv('../chap08/input/data-conc-2.txt') N <- nrow(d) Time <- c(1, 2, 4, 8, 12, 24) T_new <- 60 Time_new <- seq(from=0, to=24, length=T_new) data <- list(N=N, T=length(Time), Time=Time, Y=d[,-1], T_new=T_new, Time_new=Time_new) fit <- stan(file='model/model8-7b.stan', data=data, seed=1234)
/chap09/run-model8-7b.R
no_license
MatsuuraKentaro/RStanBook
R
false
false
315
r
library(rstan) d <- read.csv('../chap08/input/data-conc-2.txt') N <- nrow(d) Time <- c(1, 2, 4, 8, 12, 24) T_new <- 60 Time_new <- seq(from=0, to=24, length=T_new) data <- list(N=N, T=length(Time), Time=Time, Y=d[,-1], T_new=T_new, Time_new=Time_new) fit <- stan(file='model/model8-7b.stan', data=data, seed=1234)
#page number-216 cf=c(80,77,72,65,55,43,28,16,10,8,0) f<-c(cf[1]-cf[2],cf[2]-cf[3],cf[3]-cf[4],cf[4]-cf[5],cf[5]-cf[6],cf[6]-cf[7],cf[7]-cf[8],cf[8]-cf[9],cf[9]-cf[10],cf[10]-cf[11]) f m<-c("0-10","10-20","20-30","30-40","40-50","50-60","60-70","70-80","80-90","90-100") d=data.frame(m,f) d max_f=max(f) max_f l=50 q<-which(d$f==max_f) mode=l+( (d$f[q]-d$f[q-1])/ ((d$f[q]-d$f[q+1])+(d$f[q]-d$f[q-1]) ))*10 mode
/Codes/EX7_25.r
no_license
asquaree/Text-Book-Companion-Statistical-Methods-Vol-I-by-S.P-Gupta
R
false
false
427
r
#page number-216 cf=c(80,77,72,65,55,43,28,16,10,8,0) f<-c(cf[1]-cf[2],cf[2]-cf[3],cf[3]-cf[4],cf[4]-cf[5],cf[5]-cf[6],cf[6]-cf[7],cf[7]-cf[8],cf[8]-cf[9],cf[9]-cf[10],cf[10]-cf[11]) f m<-c("0-10","10-20","20-30","30-40","40-50","50-60","60-70","70-80","80-90","90-100") d=data.frame(m,f) d max_f=max(f) max_f l=50 q<-which(d$f==max_f) mode=l+( (d$f[q]-d$f[q-1])/ ((d$f[q]-d$f[q+1])+(d$f[q]-d$f[q-1]) ))*10 mode
#' Condense a spectral frequence data matrix #' #' Condense a spectral frequence data matrix #' #' This function takes a numeric matrix, whose columns are #' time, and rows are frequency, and bins columns and rows #' to produce a smaller numeric matrix. #' #' It imposes some rules to bin labels: #' #' * Columns are considered time, the label assigned to the #' new time bins are typically the "max" value from each bin, #' such that time "0.01" through "1.00" will be labeled "1.00", #' to represent "the first second bin". #' * Rows are considered frequency, where each bin will be #' labeled by the mean frequency. For example if frequencies #' "6.75" through "7.25" are binned, the output should be "7.0" #' which represents the central frequency of the binned values. #' #' Also, bins at the edges have two options: #' #' 1. "full" indicates the first bin will be full size, and all #' bins will be equal to that size. This strategy makes sense for #' time, so each time bin is the same duration. #' 2. "half" indicates the first bin will be half the width of #' subsequent bins. This strategy makes sense for frequency, to #' help preserve the frequency nearest the edge of the matrix. #' The last bin will also be half the width of intermediate bins. #' In this case the first bin is labeled by the first value, and #' the last bin is labeled by the last value. #' #' #' @family jam matrix functions #' #' @examples #' freq_m <- matrix(ncol=200, #' nrow=96, #' data=seq_len(96*200), #' dimnames=list(frequency=seq(from=1, by=0.2, length.out=96), #' time=seq(from=0, by=0.01, length.out=200))); #' #' condense_freq_matrix(freq_m, column_fixed_size=10, row_fixed_size=5)[1:20,1:20] #' #' @export condense_freq_matrix <- function (x, column_n=ncol(x), column_fixed_size=1, row_n=nrow(x), row_fixed_size=1, column_method=c("min", "max", "mean"), row_method=c("min", "mean", "max"), column_edge=c("full","half"), row_edge=c("full", "half"), column_offset=0, row_offset=0, column_pad=c(0, 0), row_pad=c(0, 0), verbose=TRUE, ...) { ## Purpose is to condense a large data matrix by summarizing groups ## or row or column values column_method <- match.arg(column_method); row_method <- match.arg(row_method); column_edge <- match.arg(column_edge); row_edge <- match.arg(row_edge); ## Process column and row padding if (length(column_pad) < 2) { column_pad <- c(0, column_pad); } column_pad <- rep(column_pad, length.out=2); if (any(column_pad) > 0) { if (verbose) { jamba::printDebug("condense_freq_matrix(): ", "Processing column_pad:", column_pad); } xpaddedcols <- unlist(rep(list(1, seq_len(ncol(x)), ncol(x)), c(column_pad[1], 1, column_pad[2]))); x <- x[,xpaddedcols,drop=FALSE]; } if (length(row_pad) < 2) { row_pad <- c(0, row_pad); } row_pad <- rep(row_pad, length.out=2); if (any(row_pad) > 0) { if (verbose) { jamba::printDebug("condense_freq_matrix(): ", "Processing row_pad:", row_pad); } xpaddedrows <- unlist(rep(list(1, seq_len(nrow(x)), nrow(x)), c(row_pad[1], 1, row_pad[2]))); x <- x[xpaddedrows,,drop=FALSE]; } if (column_n < ncol(x) || column_fixed_size > 1) { ## Condense by column if (verbose) { jamba::printDebug("condense_matrix(): ", "Condensing from ", ncol(x), " columns to ", column_n); } if (length(colnames(x)) == 0) { colnames(x) <- seq_len(ncol(x)); } if ("half" %in% column_edge) { col_l <- cutIntoChunks(nameVector(colnames(x)), column_n*2); } else { col_l <- cutIntoChunks(nameVector(colnames(x)), n=column_n, fixed_size=column_fixed_size); } col_values_l <- lapply(col_l, function(col_i){ as.numeric(col_i); }); if ("min" %in% column_method) { col_values <- sapply(col_values_l, min); } else if ("max" %in% column_method) { col_values <- sapply(col_values_l, max); } else if ("mean" %in% column_method) { col_values <- sapply(col_values_l, mean); } names(col_values_l) <- col_values; col_f <- factor(list2groups(col_l), levels=names(col_l)); #col_names <- rev(colnames(x))[match(unique(col_f), rev(col_f))]; #col_values <- as.numeric(col_names) + column_offset; x <- t(splicejam::shrinkMatrix(t(x), returnClass="matrix", groupBy=col_f)); colnames(x) <- as.character(col_values); } else { col_values_l <- NULL; } if (row_n < nrow(x) || row_fixed_size > 1) { ## Condense by row if (verbose) { jamba::printDebug("condense_matrix(): ", "Condensing from ", nrow(x), " rows to ", row_n); } if (length(rownames(x)) == 0) { rownames(x) <- seq_len(nrow(x)); } if ("half" %in% row_edge) { row_l <- cutIntoChunks(nameVector(rownames(x)), row_n*2); row_first <- head(row_l, 1); row_last <- tail(row_l, 1); row_new <- unlist(tail(head(row_l, -1), -1)); row_l2 <- cutIntoChunks(nameVector(row_new), row_n-1); row_l2_names <- sapply(row_l2, function(i){ mean(as.numeric(i)) }); row_names <- c(as.numeric(head(row_first[[1]], 1)), row_l2_names, as.numeric(tail(row_last[[1]], 1))); row_l <- c(row_first, row_l2, row_last); names(row_l) <- row_names; row_f <- factor(list2groups(row_l), levels=names(row_l)); } else { row_l <- cutIntoChunks(nameVector(rownames(x)), n=row_n, fixed_size=row_fixed_size); row_values_l <- lapply(row_l, function(row_i){ as.numeric(row_i); }); if ("min" %in% row_method) { row_values <- sapply(row_values_l, min); } else if ("max" %in% row_method) { row_values <- sapply(row_values_l, max); } else if ("mean" %in% row_method) { row_values <- sapply(row_values_l, mean); } names(row_values_l) <- row_values; row_f <- factor(list2groups(row_l), levels=names(row_l)); #row_names <- rev(rownames(x))[match(unique(row_f), rev(row_f))]; #row_values <- as.numeric(row_names) + row_offset; row_names <- as.character(row_values); } x <- splicejam::shrinkMatrix(x, returnClass="matrix", groupBy=row_f); rownames(x) <- row_names; } attr(x, "col_values_l") <- col_values_l; return(x); } #' Discretize labels #' #' Discretize labels #' #' This function takes a vector of numeric values, a vector of #' target values intended to be used as labels, and returns #' a vector of labels where all unmatched target values are #' replaced with `""` blank, and matched target values use #' the target label. #' #' @family jam matrix functions #' @family jam heatmap functions #' #' @examples #' x <- seq(from=-2, to=2, by=0.2); #' x; #' xd <- discretize_labels(x, target=pretty(x)); #' data.frame(original_value=x, discretized_label=xd); #' #' @export discretize_labels <- function (x, target=NULL, pretty.n=15, ...) { ## if (!is.numeric(x)) { x <- as.numeric(x); if (all(is.na(x))) { stop("x was not numeric, nor could be coerced to numeric."); } } if (length(target) == 0) { target <- pretty(x, n=pretty.n); } ## Find closest values itarget <- t(sapply(target, function(i){ j <- which.min(abs(i-x)); c(label=i, which=j, row_value=x[j]); })); ## Append fraction difference itarget <- cbind(itarget, diff=jamba::rmNA(infiniteValue=0, abs(itarget[,"row_value"] - itarget[,"label"])/itarget[,"label"]) ); ## Filter by fraction difference from target itarget <- subset(itarget, itarget[,"diff"] <= 0.1); ## Filter for only one match per label itarget <- itarget[match(unique(itarget[,"row_value"]), itarget[,"row_value"]),,drop=FALSE]; itarget <- itarget[match(unique(itarget[,"label"]), itarget[,"label"]),,drop=FALSE]; #printDebug("itarget:");print(itarget); irowlabels <- rep("", length(x)); irowlabels[itarget[,"which"]] <- itarget[,"label"]; irowlabels; } #' Cut vector into a list of fixed size #' #' Cut vector into a list of fixed size #' #' This function provides a basic method to split a vector #' into a list of vectors of a known length, using one of #' two approaches: #' #' * when `n` is supplied, the vector is split into `n` bins #' with roughly equal number of elements in each bin. #' * when `fixed_size` is supplied, the vector is split into #' bins with size `fixed_size`, with optional `offset` used to control #' the position of the first bin. #' * when `n` is supplied and `edge_rule="half"` then the #' first and last bin are half-size, with intermediate bins full-size. #' #' Use `fixed_size` when each bin should be defined by strict unit #' values. Use `offset` to control the break points, when the first #' element might not fall on an appropriate break. #' #' Use `n` as a rapid way to bin a vector into `n` roughly equal #' pieces. #' #' Use `n` and `edge_rule="half"` when you want to bin a vector #' and want to maintain bins central to unit measures. For example #' for the vector `c(1, 1.5, 2, 2.5, 3, 3.5, 4)` if you want 4 #' bins, centered at each integer value. Note that `fixed_size` #' with `offset` might be a better alternative. #' #' @return `list` of vectors, where each vector is a subset of the #' input `x`. #' #' @param x `vector`, or any R object compatible with `base::split()`. #' @param n integer number of list elements to create. #' @param fixed_size integer number of values to include in each bin. #' When `fixed_size` is provided, `n` is ignored. #' @param edge_rule character value to define how to handle the edges, #' where `"full"` treats every bin the same, and `"half"` makes #' the first bin and last bin half-size, with full-size bins #' in between. The `"half"` approach is recommended in cases #' where you are trying to maintain integer values to represent #' the mean signal around each integer value. #' @param ... additional arguments are ignored. #' #' @family jam matrix functions #' #' @examples #' x <- 1:170; #' cutIntoChunks(x, n=4); #' lengths(cutIntoChunks(x, n=8)) #' #' @export cutIntoChunks <- function (x, n=1, fixed_size=1, offset=0, edge_rule=c("full", "half"), ...) { ## purpose is to split a vector into a list with length n ## by evenly distributing x across each group. edge_rule <- match.arg(edge_rule); if (length(fixed_size) > 0 && fixed_size > 1) { offset <- offset %% fixed_size; n_chunks <- ceiling((length(x) + offset) / fixed_size); x_group <- tail(head( rep(seq_len(n_chunks), each=fixed_size), length(x) + offset), length(x)); return(split(x, x_group)) } if ("full" %in% edge_rule || n < 3) { cut_breaks <- round(c(0,cumsum(rep(length(x)/n, n))[-n], length(x))); x_group <- factor(as.numeric(cut(seq_along(x), breaks=cut_breaks)), levels=seq_len(n)); split(x, x_group); } else if ("half" %in% edge_rule) { x_l <- cutIntoChunks(x, n=n * 2 - 1); x_first <- head(x_l, 1); x_last <- tail(x_l, 1); x_new <- unlist(tail(head(x_l, -1), -1)); x_l2 <- cutIntoChunks(nameVector(x_new), n-2); x_l2_names <- sapply(x_l2, function(i){ mean(as.numeric(i)) }); x_names <- c(as.numeric(head(x_first[[1]], 1)), x_l2_names, as.numeric(tail(x_last[[1]], 1))); x_l <- c(x_first, x_l2, x_last); names(x_l) <- x_names; x_f <- factor(list2groups(x_l), levels=names(x_l)); return(x_l); } } #' Convert list to groups #' #' Convert list to groups #' #' This function simply expands the `names(x)` to `lengths(x)` #' which assigns group labels to each element in `x`. #' #' @export list2groups <- function (x, ...) { ## Purpose is to expand a list if (length(x) == 0) { return(NULL); } if (length(names(x)) == 0) { names(x) <- seq_along(x); } rep(names(x), lengths(x)) } #' Heatmap of frequency-time (psd) matrix #' #' Heatmap of frequency-time (psd) matrix #' #' This function takes a numeric matrix of frequency rows, #' and time columns, and produces a heatmap showing the #' power spectral density (psd), using `ComplexHeatmap::Heatmap()`. #' #' All customizations from `ComplexHeatmap::Heatmap()` #' are available by passing arguments to `...`. #' #' This function has some arguments intended to make it easier #' to split rows and columns, as needed. #' #' @family jam matrix functions #' @family jam heatmap functions #' #' @return `Heatmap` object returned by `ComplexHeatmap::Heatmap()`. #' #' @param x numeric matrix containing frequency rows, and time #' columns, where the `rownames(x)` and `colnames(x)` are character #' representations of the numeric values. In future this function #' may use `attributes(x)` or separate function arguments. #' @param quantile_max numeric value usually between 0.8 and 0.99, #' representing the quantile used for the color ramp maximum. This #' setting `0.99` is useful when data may have extremely large #' outlier values, that could otherwise obscure the range of #' numeric values. #' @param col argument passed to `ComplexHeatmap::Heatmap()` to #' define the color ramp. When `col` is a function, it is assumed #' to conform to expectations of `ComplexHeatmap::Heatmap()`, #' specifically that output should mimic the output from #' `circlize::colorRamp2()`. #' @param row_label_n,column_label_n numeric value passed to #' `discretize_labels()` indicating the target number of labels #' to display for rows and columns, respectively. #' @param row_range optional numeric range used to subset the #' heatmap rows where the numeric range is matched with the numeric #' values in `rownames(x)`. The heatmap is built, and `row_split` #' is defined as necessary, before the data is subsetted, in #' order to make sure the row split is applied relative to the #' input data. #' @param column_range optional numeric range used to subset the #' heatmap rows, where the numeric range is matched with the numeric #' values in `colnames(x)`. The heatmap is built, and `column_split` #' is defined as necessary, before the data is subsetted, in #' order to make sure the column split is applied relative to the #' input data. #' @param flip character value indicating whether to flip the y-axis, #' when `flip="y"`. #' @param row_split vector of length `ncol(x)` whose values are #' used to split the heatmap by row When supplied, all #' other `row_split_` arguments are ignored. #' @param row_split_width numeric value used to define `row_split` #' with a fixed number of rows per split. #' @param row_split_at numeric vector indicating where to split #' the heatmap by row, based upon the numeric values of the rownames. #' @param row_split_names optional vector of names used to label #' each heatmap row group. The names are assigned in order, and #' are not recycled. Therefore, any additional row splits will #' use the original integer row split number. #' @param column_split vector of length `ncol(x)` whose values are #' used to split the heatmap by column. When supplied, all #' other `column_split_` arguments are ignored. #' @param column_split_width numeric value used to define `column_split` #' with a fixed number of columns per split. #' @param column_split_at numeric vector indicating where to split #' the heatmap by column, based upon the numeric values of the colnames. #' @param column_split_names optional vector of names used to label #' each heatmap column group. The names are assigned in order, and #' are not recycled. Therefore, any additional column splits will #' use the original integer column split number. #' @param border,row_title_rot,name arguments passed to #' `ComplexHeatmap::Heatmap()` defined here to provide #' suitable recommended default values #' which differ from the Heatmap default. #' @param ... additional arguments are passed to #' `ComplexHeatmap::Heatmap()` #' #' @examples #' #' #freq_heatmap(im, #' # row_split_at=c(6.1, 7.4, 8.7), #' # row_split_names=c("low", "low theta", "high theta", "high")) #' #' @export freq_heatmap <- function (x, quantile_max=0.99, col=jamba::getColorRamp("Reds", lens=2, n=25), row_label_n=25, column_label_n=25, row_range=NULL, column_range=NULL, flip=c("y"), row_split=NULL, row_split_width=NULL, row_split_at=NULL, row_split_names=NULL, column_split=NULL, column_split_width=NULL, column_split_at=NULL, column_split_names=NULL, top_annotation=NULL, border=TRUE, row_title_rot=0, name="psd", ...) { # if ("y" %in% flip) { x <- x[nrow(x):1,,drop=FALSE]; } xcolnames <- colnames(x); xrownames <- rownames(x); ## Apply color ramp to the numeric range if (!is.function(col) && length(quantile_max) > 0) { num_range <- quantile(unlist(abs(x)), na.rm=TRUE, probs=c(1-quantile_max, quantile_max)); col <- circlize::colorRamp2( breaks=seq(from=num_range[1], to=num_range[2], length.out=25), colors=jamba::getColorRamp(col, n=25) ) } ## Process optional column breaks if (length(column_split) == 0) { if (length(column_split_width) > 0) { column_split_at <- seq(from=column_split_width, to=ceiling(max(as.numeric(xcolnames))/column_split_width)*column_split_width, by=column_split_width); } if (length(column_split_at) > 0) { column_split <- as.numeric( cut( as.numeric(xcolnames), breaks=unique(c(0, column_split_at, Inf)))); } if (length(column_split_names) > 0) { column_split <- ifelse( column_split <= length(column_split_names), column_split_names[column_split], column_split); } if (length(column_split) > 0) { column_split <- factor(column_split, levels=unique(column_split)); names(column_split) <- xcolnames; ## top annotation colors top_annotation_1 <- HeatmapAnnotation( event=anno_block(gp=gpar(fill=colorjam::rainbowJam(4))) ); } } ## Process optional row breaks if (length(row_split) == 0) { if (length(row_split_width) > 0) { row_split_at <- seq(from=row_split_width, to=ceiling(max(as.numeric(xrownames))/row_split_width)*row_split_width, by=row_split_width); } if (length(row_split_at) > 0) { row_split <- as.numeric( cut( as.numeric(xrownames), breaks=unique(sort(c(0, row_split_at, Inf))))); } if (length(row_split_names) > 0) { row_split <- ifelse( row_split <= length(row_split_names), row_split_names[row_split], row_split); } if (length(row_split) > 0) { row_split <- factor(row_split, levels=unique(row_split)); names(row_split) <- xrownames; } } ## Optional subset of the heatmap if (length(column_range) > 0) { column_range <- range(column_range); column_keep <- (as.numeric(xcolnames) >= min(column_range) & as.numeric(xcolnames) <= max(column_range)); x <- x[,column_keep,drop=FALSE]; if (length(top_annotation) > 0) { top_annotation <- top_annotation[column_keep,]; } xcolnames <- xcolnames[column_keep] column_split <- column_split[column_keep]; } if (length(row_range) > 0) { row_range <- range(row_range); row_keep <- (as.numeric(xrownames) >= min(row_range) & as.numeric(xrownames) <= max(row_range)); x <- x[row_keep,,drop=FALSE]; xrownames <- xrownames[row_keep] row_split <- row_split[row_keep]; } HM <- ComplexHeatmap::Heatmap( x, name=name, cluster_rows=FALSE, cluster_columns=FALSE, top_annotation=top_annotation, row_labels=discretize_labels(xrownames, pretty.n=row_label_n), column_labels=discretize_labels(xcolnames, pretty.n=column_label_n), column_split=column_split, row_split=row_split, row_title_rot=row_title_rot, col=col, border=border, ... ); return(HM); } #' Heatmap of frequency-time matrix for animal signal data #' #' Heatmap of frequency-time matrix for animal signal data #' #' This function queries the database to return data, then #' calls `freq_heatmap()` to create the heatmap. #' #' @family jam matrix functions #' @family jam heatmap functions #' #' @param dbxcon DBI database connection #' @param animal,project,phase,filename,channel,time_step_sec,freq_step_hz #' arguments used to filter the database table `fca_freq_matrix` in #' order to specify only one row in the results, therefore only one #' frequency-time matrix. #' @param plot logical indicating whether to plot the output. When #' `plot=FALSE` the `data.frame` of query results is returned, and #' no heatmap is created. This option can be helpful when querying #' the database. #' @param type character value indicating the type of output, where #' `"Heatmap"` will return the Heatmap object; and `"matrix"` will #' return the numeric matrix itself, and not the heatmap. #' @param include_motion logical indicating whether to query the database #' for table `"animal_motion_data"` to retrieve motion data for the #' given `animal` and `phase`. When the database table exists, #' `top_annotation` will be created using #' `ComplexHeatmap::anno_barplot()` to produce a barplot at the #' top of the heatmap. For more customization, use `top_annotation`. #' @param motion_table,motion_value_column optional character strings with #' the database table and table column to use to retrieve motion #' data. Note that the table is expected to contain columns: #' `"animal","phase","time_sec"` as well as the `motion_value_column`. #' The `"time_sec"` values are expected to match the time values #' in the `"fca_freq_matrix"` table. #' @param top_annotation optional `HeatmapAnnotation` object as produced #' by `ComplexHeatmap::HeatmapAnnotation()`, suitably aligned with #' the columns of data included in the heatmap. This annotation #' will be subsetted by `freq_heatmap()` as relevant when #' `column_range` is passed to that function. #' @param verbose logical indicating whether to print verbose output. #' @param ... additional arguments are passed to `freq_heatmap()`, #' see that function documentation for info on customizing the #' heatmap. #' #' @examples #' # db_path <- "ephys_db.sqlite"; #' # dbxcon <- dbx::dbxConnect(adapter="sqlite", dbname=db_path); #' #' ## Basic query of available data #' # head(signal_freq_heatmap(dbxcon), 20) #' #' ## Tabulate data by phase #' # table(signal_freq_heatmap(dbxcon)[,c("project","phase")]) #' # table(signal_freq_heatmap(dbxcon)[,c("animal","phase")]) #' #' ## Provide animal and phase, create heatmap #' # signal_freq_heatmap(dbxcon, animal="AF11-4", phase="Acquisition", plot=TRUE) #' #' ## Another example #' # signal_freq_heatmap(dbxcon, animal="SA88-1", phase="Acquisition", plot=TRUE) #' #' ## Zoom into a specific time range #' # signal_freq_heatmap(dbxcon, animal="SA88-1", phase="Acquisition", plot=TRUE, column_range=c(400,700)) #' #' @export signal_freq_heatmap <- function (dbxcon, animal=NULL, project=NULL, phase=NULL, filename=NULL, channel=NULL, time_step_sec=NULL, freq_step_hz=NULL, plot=FALSE, type=c("df", "hm", "Heatmap", "matrix", "m"), include_motion=FALSE, motion_table="animal_motion_data", motion_column="freeze_cnt", top_annotation=NULL, column_block_num=4, column_block_labels=NULL, verbose=FALSE, ...) { ## type <- match.arg(type); freq_df <- dbGetQuery(dbxcon, "SELECT ffm.animal, ffm.channel, ffm.filename, ffm.time_step_sec, ffm.freq_step_hz, ef.project, ef.phase FROM fca_freq_matrix ffm, ephys_file ef WHERE ffm.filename = ef.filename "); paramnames <- c("animal", "project", "phase", "channel", "filename", "time_step_sec", "freq_step_hz"); for (paramname in paramnames) { val <- get(paramname); if (length(val) > 0) { if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "Subsetting data for ", paramname, ": ", val); } freq_df <- subset(freq_df, freq_df[[paramname]] %in% val); } } if (nrow(freq_df) == 0) { jamba::printDebug("No rows were returned from the query. Use argument '", "verbose=TRUE", "' to debug."); } if ("df" %in% type) { return(freq_df); } if (nrow(freq_df) > 1) { jamba::printDebug("signal_freq_heatmap(): ", "Data contains ", nrow(freq_df), " rows. Please reduce query to one row."); } i <- 1; ## Get im_json im_json <- dbGetQuery(dbxcon, "SELECT im_json FROM fca_freq_matrix WHERE filename = ? and channel = ? and animal = ? and time_step_sec = ? and freq_step_hz = ?", params=list( freq_df[["filename"]][i], freq_df[["channel"]][i], freq_df[["animal"]][i], freq_df[["time_step_sec"]][i], freq_df[["freq_step_hz"]][i] ))[[1]]; if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "Converting im_json with jsonlite::fromJSON()."); } im <- jsonlite::fromJSON(im_json); ## Add attributes to help describe the data attr(im, "df") <- freq_df[i,,drop=FALSE]; ## Optionally get motion data if (include_motion && dbExistsTable(dbxcon, "animal_motion_data")) { if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "Loading motion data."); } motion_df <- dbGetQuery(dbxcon, "SELECT animal,phase,time_sec,freeze_cnt FROM animal_motion_data WHERE animal = ? and time_step_sec = ? and phase = ?", params=list( freq_df[["animal"]][i], freq_df[["time_step_sec"]][i], freq_df[["phase"]][i] )); im_column_values <- round(as.numeric(colnames(im))); im_match <- match(im_column_values, motion_df$time_sec); #im_motion <- rmNA(motion_df$freeze_cnt[im_match], 0); im_motion <- motion_df$freeze_cnt[im_match]; names(im_motion) <- colnames(im); if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "table(im_motion):"); print(table(im_motion, useNA="ifany")); } top_annotation <- HeatmapAnnotation( freeze_cnt=anno_barplot(im_motion), event=anno_block(gp=gpar(fill=colorjam::rainbowJam(column_block_num)), labels=column_block_labels, labels_gp=gpar(col=setTextContrastColor(colorjam::rainbowJam(column_block_num), useGrey=15), fontsize=10)) ); } if (any(c("m", "matrix") %in% type)) { return(im); } column_title <- paste0(freq_df[["animal"]][i], " (", freq_df[["phase"]][i], ", ", freq_df[["channel"]][i], ")"); HM <- freq_heatmap(im, top_annotation=top_annotation, column_title=column_title, ...); return(HM); }
/R/neurojam-matrix.R
no_license
jmw86069/neurojam
R
false
false
27,930
r
#' Condense a spectral frequence data matrix #' #' Condense a spectral frequence data matrix #' #' This function takes a numeric matrix, whose columns are #' time, and rows are frequency, and bins columns and rows #' to produce a smaller numeric matrix. #' #' It imposes some rules to bin labels: #' #' * Columns are considered time, the label assigned to the #' new time bins are typically the "max" value from each bin, #' such that time "0.01" through "1.00" will be labeled "1.00", #' to represent "the first second bin". #' * Rows are considered frequency, where each bin will be #' labeled by the mean frequency. For example if frequencies #' "6.75" through "7.25" are binned, the output should be "7.0" #' which represents the central frequency of the binned values. #' #' Also, bins at the edges have two options: #' #' 1. "full" indicates the first bin will be full size, and all #' bins will be equal to that size. This strategy makes sense for #' time, so each time bin is the same duration. #' 2. "half" indicates the first bin will be half the width of #' subsequent bins. This strategy makes sense for frequency, to #' help preserve the frequency nearest the edge of the matrix. #' The last bin will also be half the width of intermediate bins. #' In this case the first bin is labeled by the first value, and #' the last bin is labeled by the last value. #' #' #' @family jam matrix functions #' #' @examples #' freq_m <- matrix(ncol=200, #' nrow=96, #' data=seq_len(96*200), #' dimnames=list(frequency=seq(from=1, by=0.2, length.out=96), #' time=seq(from=0, by=0.01, length.out=200))); #' #' condense_freq_matrix(freq_m, column_fixed_size=10, row_fixed_size=5)[1:20,1:20] #' #' @export condense_freq_matrix <- function (x, column_n=ncol(x), column_fixed_size=1, row_n=nrow(x), row_fixed_size=1, column_method=c("min", "max", "mean"), row_method=c("min", "mean", "max"), column_edge=c("full","half"), row_edge=c("full", "half"), column_offset=0, row_offset=0, column_pad=c(0, 0), row_pad=c(0, 0), verbose=TRUE, ...) { ## Purpose is to condense a large data matrix by summarizing groups ## or row or column values column_method <- match.arg(column_method); row_method <- match.arg(row_method); column_edge <- match.arg(column_edge); row_edge <- match.arg(row_edge); ## Process column and row padding if (length(column_pad) < 2) { column_pad <- c(0, column_pad); } column_pad <- rep(column_pad, length.out=2); if (any(column_pad) > 0) { if (verbose) { jamba::printDebug("condense_freq_matrix(): ", "Processing column_pad:", column_pad); } xpaddedcols <- unlist(rep(list(1, seq_len(ncol(x)), ncol(x)), c(column_pad[1], 1, column_pad[2]))); x <- x[,xpaddedcols,drop=FALSE]; } if (length(row_pad) < 2) { row_pad <- c(0, row_pad); } row_pad <- rep(row_pad, length.out=2); if (any(row_pad) > 0) { if (verbose) { jamba::printDebug("condense_freq_matrix(): ", "Processing row_pad:", row_pad); } xpaddedrows <- unlist(rep(list(1, seq_len(nrow(x)), nrow(x)), c(row_pad[1], 1, row_pad[2]))); x <- x[xpaddedrows,,drop=FALSE]; } if (column_n < ncol(x) || column_fixed_size > 1) { ## Condense by column if (verbose) { jamba::printDebug("condense_matrix(): ", "Condensing from ", ncol(x), " columns to ", column_n); } if (length(colnames(x)) == 0) { colnames(x) <- seq_len(ncol(x)); } if ("half" %in% column_edge) { col_l <- cutIntoChunks(nameVector(colnames(x)), column_n*2); } else { col_l <- cutIntoChunks(nameVector(colnames(x)), n=column_n, fixed_size=column_fixed_size); } col_values_l <- lapply(col_l, function(col_i){ as.numeric(col_i); }); if ("min" %in% column_method) { col_values <- sapply(col_values_l, min); } else if ("max" %in% column_method) { col_values <- sapply(col_values_l, max); } else if ("mean" %in% column_method) { col_values <- sapply(col_values_l, mean); } names(col_values_l) <- col_values; col_f <- factor(list2groups(col_l), levels=names(col_l)); #col_names <- rev(colnames(x))[match(unique(col_f), rev(col_f))]; #col_values <- as.numeric(col_names) + column_offset; x <- t(splicejam::shrinkMatrix(t(x), returnClass="matrix", groupBy=col_f)); colnames(x) <- as.character(col_values); } else { col_values_l <- NULL; } if (row_n < nrow(x) || row_fixed_size > 1) { ## Condense by row if (verbose) { jamba::printDebug("condense_matrix(): ", "Condensing from ", nrow(x), " rows to ", row_n); } if (length(rownames(x)) == 0) { rownames(x) <- seq_len(nrow(x)); } if ("half" %in% row_edge) { row_l <- cutIntoChunks(nameVector(rownames(x)), row_n*2); row_first <- head(row_l, 1); row_last <- tail(row_l, 1); row_new <- unlist(tail(head(row_l, -1), -1)); row_l2 <- cutIntoChunks(nameVector(row_new), row_n-1); row_l2_names <- sapply(row_l2, function(i){ mean(as.numeric(i)) }); row_names <- c(as.numeric(head(row_first[[1]], 1)), row_l2_names, as.numeric(tail(row_last[[1]], 1))); row_l <- c(row_first, row_l2, row_last); names(row_l) <- row_names; row_f <- factor(list2groups(row_l), levels=names(row_l)); } else { row_l <- cutIntoChunks(nameVector(rownames(x)), n=row_n, fixed_size=row_fixed_size); row_values_l <- lapply(row_l, function(row_i){ as.numeric(row_i); }); if ("min" %in% row_method) { row_values <- sapply(row_values_l, min); } else if ("max" %in% row_method) { row_values <- sapply(row_values_l, max); } else if ("mean" %in% row_method) { row_values <- sapply(row_values_l, mean); } names(row_values_l) <- row_values; row_f <- factor(list2groups(row_l), levels=names(row_l)); #row_names <- rev(rownames(x))[match(unique(row_f), rev(row_f))]; #row_values <- as.numeric(row_names) + row_offset; row_names <- as.character(row_values); } x <- splicejam::shrinkMatrix(x, returnClass="matrix", groupBy=row_f); rownames(x) <- row_names; } attr(x, "col_values_l") <- col_values_l; return(x); } #' Discretize labels #' #' Discretize labels #' #' This function takes a vector of numeric values, a vector of #' target values intended to be used as labels, and returns #' a vector of labels where all unmatched target values are #' replaced with `""` blank, and matched target values use #' the target label. #' #' @family jam matrix functions #' @family jam heatmap functions #' #' @examples #' x <- seq(from=-2, to=2, by=0.2); #' x; #' xd <- discretize_labels(x, target=pretty(x)); #' data.frame(original_value=x, discretized_label=xd); #' #' @export discretize_labels <- function (x, target=NULL, pretty.n=15, ...) { ## if (!is.numeric(x)) { x <- as.numeric(x); if (all(is.na(x))) { stop("x was not numeric, nor could be coerced to numeric."); } } if (length(target) == 0) { target <- pretty(x, n=pretty.n); } ## Find closest values itarget <- t(sapply(target, function(i){ j <- which.min(abs(i-x)); c(label=i, which=j, row_value=x[j]); })); ## Append fraction difference itarget <- cbind(itarget, diff=jamba::rmNA(infiniteValue=0, abs(itarget[,"row_value"] - itarget[,"label"])/itarget[,"label"]) ); ## Filter by fraction difference from target itarget <- subset(itarget, itarget[,"diff"] <= 0.1); ## Filter for only one match per label itarget <- itarget[match(unique(itarget[,"row_value"]), itarget[,"row_value"]),,drop=FALSE]; itarget <- itarget[match(unique(itarget[,"label"]), itarget[,"label"]),,drop=FALSE]; #printDebug("itarget:");print(itarget); irowlabels <- rep("", length(x)); irowlabels[itarget[,"which"]] <- itarget[,"label"]; irowlabels; } #' Cut vector into a list of fixed size #' #' Cut vector into a list of fixed size #' #' This function provides a basic method to split a vector #' into a list of vectors of a known length, using one of #' two approaches: #' #' * when `n` is supplied, the vector is split into `n` bins #' with roughly equal number of elements in each bin. #' * when `fixed_size` is supplied, the vector is split into #' bins with size `fixed_size`, with optional `offset` used to control #' the position of the first bin. #' * when `n` is supplied and `edge_rule="half"` then the #' first and last bin are half-size, with intermediate bins full-size. #' #' Use `fixed_size` when each bin should be defined by strict unit #' values. Use `offset` to control the break points, when the first #' element might not fall on an appropriate break. #' #' Use `n` as a rapid way to bin a vector into `n` roughly equal #' pieces. #' #' Use `n` and `edge_rule="half"` when you want to bin a vector #' and want to maintain bins central to unit measures. For example #' for the vector `c(1, 1.5, 2, 2.5, 3, 3.5, 4)` if you want 4 #' bins, centered at each integer value. Note that `fixed_size` #' with `offset` might be a better alternative. #' #' @return `list` of vectors, where each vector is a subset of the #' input `x`. #' #' @param x `vector`, or any R object compatible with `base::split()`. #' @param n integer number of list elements to create. #' @param fixed_size integer number of values to include in each bin. #' When `fixed_size` is provided, `n` is ignored. #' @param edge_rule character value to define how to handle the edges, #' where `"full"` treats every bin the same, and `"half"` makes #' the first bin and last bin half-size, with full-size bins #' in between. The `"half"` approach is recommended in cases #' where you are trying to maintain integer values to represent #' the mean signal around each integer value. #' @param ... additional arguments are ignored. #' #' @family jam matrix functions #' #' @examples #' x <- 1:170; #' cutIntoChunks(x, n=4); #' lengths(cutIntoChunks(x, n=8)) #' #' @export cutIntoChunks <- function (x, n=1, fixed_size=1, offset=0, edge_rule=c("full", "half"), ...) { ## purpose is to split a vector into a list with length n ## by evenly distributing x across each group. edge_rule <- match.arg(edge_rule); if (length(fixed_size) > 0 && fixed_size > 1) { offset <- offset %% fixed_size; n_chunks <- ceiling((length(x) + offset) / fixed_size); x_group <- tail(head( rep(seq_len(n_chunks), each=fixed_size), length(x) + offset), length(x)); return(split(x, x_group)) } if ("full" %in% edge_rule || n < 3) { cut_breaks <- round(c(0,cumsum(rep(length(x)/n, n))[-n], length(x))); x_group <- factor(as.numeric(cut(seq_along(x), breaks=cut_breaks)), levels=seq_len(n)); split(x, x_group); } else if ("half" %in% edge_rule) { x_l <- cutIntoChunks(x, n=n * 2 - 1); x_first <- head(x_l, 1); x_last <- tail(x_l, 1); x_new <- unlist(tail(head(x_l, -1), -1)); x_l2 <- cutIntoChunks(nameVector(x_new), n-2); x_l2_names <- sapply(x_l2, function(i){ mean(as.numeric(i)) }); x_names <- c(as.numeric(head(x_first[[1]], 1)), x_l2_names, as.numeric(tail(x_last[[1]], 1))); x_l <- c(x_first, x_l2, x_last); names(x_l) <- x_names; x_f <- factor(list2groups(x_l), levels=names(x_l)); return(x_l); } } #' Convert list to groups #' #' Convert list to groups #' #' This function simply expands the `names(x)` to `lengths(x)` #' which assigns group labels to each element in `x`. #' #' @export list2groups <- function (x, ...) { ## Purpose is to expand a list if (length(x) == 0) { return(NULL); } if (length(names(x)) == 0) { names(x) <- seq_along(x); } rep(names(x), lengths(x)) } #' Heatmap of frequency-time (psd) matrix #' #' Heatmap of frequency-time (psd) matrix #' #' This function takes a numeric matrix of frequency rows, #' and time columns, and produces a heatmap showing the #' power spectral density (psd), using `ComplexHeatmap::Heatmap()`. #' #' All customizations from `ComplexHeatmap::Heatmap()` #' are available by passing arguments to `...`. #' #' This function has some arguments intended to make it easier #' to split rows and columns, as needed. #' #' @family jam matrix functions #' @family jam heatmap functions #' #' @return `Heatmap` object returned by `ComplexHeatmap::Heatmap()`. #' #' @param x numeric matrix containing frequency rows, and time #' columns, where the `rownames(x)` and `colnames(x)` are character #' representations of the numeric values. In future this function #' may use `attributes(x)` or separate function arguments. #' @param quantile_max numeric value usually between 0.8 and 0.99, #' representing the quantile used for the color ramp maximum. This #' setting `0.99` is useful when data may have extremely large #' outlier values, that could otherwise obscure the range of #' numeric values. #' @param col argument passed to `ComplexHeatmap::Heatmap()` to #' define the color ramp. When `col` is a function, it is assumed #' to conform to expectations of `ComplexHeatmap::Heatmap()`, #' specifically that output should mimic the output from #' `circlize::colorRamp2()`. #' @param row_label_n,column_label_n numeric value passed to #' `discretize_labels()` indicating the target number of labels #' to display for rows and columns, respectively. #' @param row_range optional numeric range used to subset the #' heatmap rows where the numeric range is matched with the numeric #' values in `rownames(x)`. The heatmap is built, and `row_split` #' is defined as necessary, before the data is subsetted, in #' order to make sure the row split is applied relative to the #' input data. #' @param column_range optional numeric range used to subset the #' heatmap rows, where the numeric range is matched with the numeric #' values in `colnames(x)`. The heatmap is built, and `column_split` #' is defined as necessary, before the data is subsetted, in #' order to make sure the column split is applied relative to the #' input data. #' @param flip character value indicating whether to flip the y-axis, #' when `flip="y"`. #' @param row_split vector of length `ncol(x)` whose values are #' used to split the heatmap by row When supplied, all #' other `row_split_` arguments are ignored. #' @param row_split_width numeric value used to define `row_split` #' with a fixed number of rows per split. #' @param row_split_at numeric vector indicating where to split #' the heatmap by row, based upon the numeric values of the rownames. #' @param row_split_names optional vector of names used to label #' each heatmap row group. The names are assigned in order, and #' are not recycled. Therefore, any additional row splits will #' use the original integer row split number. #' @param column_split vector of length `ncol(x)` whose values are #' used to split the heatmap by column. When supplied, all #' other `column_split_` arguments are ignored. #' @param column_split_width numeric value used to define `column_split` #' with a fixed number of columns per split. #' @param column_split_at numeric vector indicating where to split #' the heatmap by column, based upon the numeric values of the colnames. #' @param column_split_names optional vector of names used to label #' each heatmap column group. The names are assigned in order, and #' are not recycled. Therefore, any additional column splits will #' use the original integer column split number. #' @param border,row_title_rot,name arguments passed to #' `ComplexHeatmap::Heatmap()` defined here to provide #' suitable recommended default values #' which differ from the Heatmap default. #' @param ... additional arguments are passed to #' `ComplexHeatmap::Heatmap()` #' #' @examples #' #' #freq_heatmap(im, #' # row_split_at=c(6.1, 7.4, 8.7), #' # row_split_names=c("low", "low theta", "high theta", "high")) #' #' @export freq_heatmap <- function (x, quantile_max=0.99, col=jamba::getColorRamp("Reds", lens=2, n=25), row_label_n=25, column_label_n=25, row_range=NULL, column_range=NULL, flip=c("y"), row_split=NULL, row_split_width=NULL, row_split_at=NULL, row_split_names=NULL, column_split=NULL, column_split_width=NULL, column_split_at=NULL, column_split_names=NULL, top_annotation=NULL, border=TRUE, row_title_rot=0, name="psd", ...) { # if ("y" %in% flip) { x <- x[nrow(x):1,,drop=FALSE]; } xcolnames <- colnames(x); xrownames <- rownames(x); ## Apply color ramp to the numeric range if (!is.function(col) && length(quantile_max) > 0) { num_range <- quantile(unlist(abs(x)), na.rm=TRUE, probs=c(1-quantile_max, quantile_max)); col <- circlize::colorRamp2( breaks=seq(from=num_range[1], to=num_range[2], length.out=25), colors=jamba::getColorRamp(col, n=25) ) } ## Process optional column breaks if (length(column_split) == 0) { if (length(column_split_width) > 0) { column_split_at <- seq(from=column_split_width, to=ceiling(max(as.numeric(xcolnames))/column_split_width)*column_split_width, by=column_split_width); } if (length(column_split_at) > 0) { column_split <- as.numeric( cut( as.numeric(xcolnames), breaks=unique(c(0, column_split_at, Inf)))); } if (length(column_split_names) > 0) { column_split <- ifelse( column_split <= length(column_split_names), column_split_names[column_split], column_split); } if (length(column_split) > 0) { column_split <- factor(column_split, levels=unique(column_split)); names(column_split) <- xcolnames; ## top annotation colors top_annotation_1 <- HeatmapAnnotation( event=anno_block(gp=gpar(fill=colorjam::rainbowJam(4))) ); } } ## Process optional row breaks if (length(row_split) == 0) { if (length(row_split_width) > 0) { row_split_at <- seq(from=row_split_width, to=ceiling(max(as.numeric(xrownames))/row_split_width)*row_split_width, by=row_split_width); } if (length(row_split_at) > 0) { row_split <- as.numeric( cut( as.numeric(xrownames), breaks=unique(sort(c(0, row_split_at, Inf))))); } if (length(row_split_names) > 0) { row_split <- ifelse( row_split <= length(row_split_names), row_split_names[row_split], row_split); } if (length(row_split) > 0) { row_split <- factor(row_split, levels=unique(row_split)); names(row_split) <- xrownames; } } ## Optional subset of the heatmap if (length(column_range) > 0) { column_range <- range(column_range); column_keep <- (as.numeric(xcolnames) >= min(column_range) & as.numeric(xcolnames) <= max(column_range)); x <- x[,column_keep,drop=FALSE]; if (length(top_annotation) > 0) { top_annotation <- top_annotation[column_keep,]; } xcolnames <- xcolnames[column_keep] column_split <- column_split[column_keep]; } if (length(row_range) > 0) { row_range <- range(row_range); row_keep <- (as.numeric(xrownames) >= min(row_range) & as.numeric(xrownames) <= max(row_range)); x <- x[row_keep,,drop=FALSE]; xrownames <- xrownames[row_keep] row_split <- row_split[row_keep]; } HM <- ComplexHeatmap::Heatmap( x, name=name, cluster_rows=FALSE, cluster_columns=FALSE, top_annotation=top_annotation, row_labels=discretize_labels(xrownames, pretty.n=row_label_n), column_labels=discretize_labels(xcolnames, pretty.n=column_label_n), column_split=column_split, row_split=row_split, row_title_rot=row_title_rot, col=col, border=border, ... ); return(HM); } #' Heatmap of frequency-time matrix for animal signal data #' #' Heatmap of frequency-time matrix for animal signal data #' #' This function queries the database to return data, then #' calls `freq_heatmap()` to create the heatmap. #' #' @family jam matrix functions #' @family jam heatmap functions #' #' @param dbxcon DBI database connection #' @param animal,project,phase,filename,channel,time_step_sec,freq_step_hz #' arguments used to filter the database table `fca_freq_matrix` in #' order to specify only one row in the results, therefore only one #' frequency-time matrix. #' @param plot logical indicating whether to plot the output. When #' `plot=FALSE` the `data.frame` of query results is returned, and #' no heatmap is created. This option can be helpful when querying #' the database. #' @param type character value indicating the type of output, where #' `"Heatmap"` will return the Heatmap object; and `"matrix"` will #' return the numeric matrix itself, and not the heatmap. #' @param include_motion logical indicating whether to query the database #' for table `"animal_motion_data"` to retrieve motion data for the #' given `animal` and `phase`. When the database table exists, #' `top_annotation` will be created using #' `ComplexHeatmap::anno_barplot()` to produce a barplot at the #' top of the heatmap. For more customization, use `top_annotation`. #' @param motion_table,motion_value_column optional character strings with #' the database table and table column to use to retrieve motion #' data. Note that the table is expected to contain columns: #' `"animal","phase","time_sec"` as well as the `motion_value_column`. #' The `"time_sec"` values are expected to match the time values #' in the `"fca_freq_matrix"` table. #' @param top_annotation optional `HeatmapAnnotation` object as produced #' by `ComplexHeatmap::HeatmapAnnotation()`, suitably aligned with #' the columns of data included in the heatmap. This annotation #' will be subsetted by `freq_heatmap()` as relevant when #' `column_range` is passed to that function. #' @param verbose logical indicating whether to print verbose output. #' @param ... additional arguments are passed to `freq_heatmap()`, #' see that function documentation for info on customizing the #' heatmap. #' #' @examples #' # db_path <- "ephys_db.sqlite"; #' # dbxcon <- dbx::dbxConnect(adapter="sqlite", dbname=db_path); #' #' ## Basic query of available data #' # head(signal_freq_heatmap(dbxcon), 20) #' #' ## Tabulate data by phase #' # table(signal_freq_heatmap(dbxcon)[,c("project","phase")]) #' # table(signal_freq_heatmap(dbxcon)[,c("animal","phase")]) #' #' ## Provide animal and phase, create heatmap #' # signal_freq_heatmap(dbxcon, animal="AF11-4", phase="Acquisition", plot=TRUE) #' #' ## Another example #' # signal_freq_heatmap(dbxcon, animal="SA88-1", phase="Acquisition", plot=TRUE) #' #' ## Zoom into a specific time range #' # signal_freq_heatmap(dbxcon, animal="SA88-1", phase="Acquisition", plot=TRUE, column_range=c(400,700)) #' #' @export signal_freq_heatmap <- function (dbxcon, animal=NULL, project=NULL, phase=NULL, filename=NULL, channel=NULL, time_step_sec=NULL, freq_step_hz=NULL, plot=FALSE, type=c("df", "hm", "Heatmap", "matrix", "m"), include_motion=FALSE, motion_table="animal_motion_data", motion_column="freeze_cnt", top_annotation=NULL, column_block_num=4, column_block_labels=NULL, verbose=FALSE, ...) { ## type <- match.arg(type); freq_df <- dbGetQuery(dbxcon, "SELECT ffm.animal, ffm.channel, ffm.filename, ffm.time_step_sec, ffm.freq_step_hz, ef.project, ef.phase FROM fca_freq_matrix ffm, ephys_file ef WHERE ffm.filename = ef.filename "); paramnames <- c("animal", "project", "phase", "channel", "filename", "time_step_sec", "freq_step_hz"); for (paramname in paramnames) { val <- get(paramname); if (length(val) > 0) { if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "Subsetting data for ", paramname, ": ", val); } freq_df <- subset(freq_df, freq_df[[paramname]] %in% val); } } if (nrow(freq_df) == 0) { jamba::printDebug("No rows were returned from the query. Use argument '", "verbose=TRUE", "' to debug."); } if ("df" %in% type) { return(freq_df); } if (nrow(freq_df) > 1) { jamba::printDebug("signal_freq_heatmap(): ", "Data contains ", nrow(freq_df), " rows. Please reduce query to one row."); } i <- 1; ## Get im_json im_json <- dbGetQuery(dbxcon, "SELECT im_json FROM fca_freq_matrix WHERE filename = ? and channel = ? and animal = ? and time_step_sec = ? and freq_step_hz = ?", params=list( freq_df[["filename"]][i], freq_df[["channel"]][i], freq_df[["animal"]][i], freq_df[["time_step_sec"]][i], freq_df[["freq_step_hz"]][i] ))[[1]]; if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "Converting im_json with jsonlite::fromJSON()."); } im <- jsonlite::fromJSON(im_json); ## Add attributes to help describe the data attr(im, "df") <- freq_df[i,,drop=FALSE]; ## Optionally get motion data if (include_motion && dbExistsTable(dbxcon, "animal_motion_data")) { if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "Loading motion data."); } motion_df <- dbGetQuery(dbxcon, "SELECT animal,phase,time_sec,freeze_cnt FROM animal_motion_data WHERE animal = ? and time_step_sec = ? and phase = ?", params=list( freq_df[["animal"]][i], freq_df[["time_step_sec"]][i], freq_df[["phase"]][i] )); im_column_values <- round(as.numeric(colnames(im))); im_match <- match(im_column_values, motion_df$time_sec); #im_motion <- rmNA(motion_df$freeze_cnt[im_match], 0); im_motion <- motion_df$freeze_cnt[im_match]; names(im_motion) <- colnames(im); if (verbose) { jamba::printDebug("signal_freq_heatmap(): ", "table(im_motion):"); print(table(im_motion, useNA="ifany")); } top_annotation <- HeatmapAnnotation( freeze_cnt=anno_barplot(im_motion), event=anno_block(gp=gpar(fill=colorjam::rainbowJam(column_block_num)), labels=column_block_labels, labels_gp=gpar(col=setTextContrastColor(colorjam::rainbowJam(column_block_num), useGrey=15), fontsize=10)) ); } if (any(c("m", "matrix") %in% type)) { return(im); } column_title <- paste0(freq_df[["animal"]][i], " (", freq_df[["phase"]][i], ", ", freq_df[["channel"]][i], ")"); HM <- freq_heatmap(im, top_annotation=top_annotation, column_title=column_title, ...); return(HM); }
install.packages("e1071",dependencies=TRUE) library(e1071) Ice_Cream= read.csv("Icecream.csv") index=1:nrow(Ice_Cream) index test_index=sample(index,trunc(length(index)*30/100)) test_set=Ice_Cream[test_index,] training_set=Ice_Cream[-test_index,] summary(test_set) summary(training_set) model= svm(income~ . ,data=training_set) summary(model) main="svm(training_set)" plot(training_set[,-1], main = 'SVM (training_set)') ?svm
/Ice Cream/SVM_Icecream.R
no_license
nandhini08/R-Learning
R
false
false
451
r
install.packages("e1071",dependencies=TRUE) library(e1071) Ice_Cream= read.csv("Icecream.csv") index=1:nrow(Ice_Cream) index test_index=sample(index,trunc(length(index)*30/100)) test_set=Ice_Cream[test_index,] training_set=Ice_Cream[-test_index,] summary(test_set) summary(training_set) model= svm(income~ . ,data=training_set) summary(model) main="svm(training_set)" plot(training_set[,-1], main = 'SVM (training_set)') ?svm
#' Generate a hg19 table with biomaRt attributes #' #' `generateHG19()` retrieves metadata columns using biomaRt. #' #' User won't need this function because we already provide the returned object #' built-in the package. It only needs to run `data(hg19)`. #' #' @param columntributes A character vector indicating biomaRt attributes to #' retrieve. See: #' <https://www.ensembl.org/info/data/biomart/biomart_r_package.html> for #' help. #' #' @return A dataframe with each of the 'columntributes' and corresponding #' genomic coordinates. generateM37 <- function () { #biomaRt::useMart(biomart = "ENSEMBL_MART_ENSEMBL", # host = "grch37.ensembl.org", # path = "/biomart/martservice", biomaRt::useEnsembl(biomart = "ensembl", GRCh = 37, mirror = "eastusa", dataset = "hsapiens_gene_ensembl") } generateHG19 <- function(columntributes = c("hgnc_symbol", "gene_biotype", "description")) { hg19 <- NA if (!requireNamespace("biomaRt", quietly = TRUE)) { warning("Please run BiocManager::install(\"biomaRt\") and retry.") } else { atributosDeseados <- c("chromosome_name", "start_position", "end_position", columntributes) #if (!all(sapply(c("mart37", "hg19"), `%in%`, ls()))) { checkOnline(); mart37 <- generateM37() hg19 <- biomaRt::getBM(attributes = atributosDeseados, mart = mart37) #} } hg19 }
/R/generateHG19.R
no_license
adRn-s/ukbbgwas
R
false
false
1,486
r
#' Generate a hg19 table with biomaRt attributes #' #' `generateHG19()` retrieves metadata columns using biomaRt. #' #' User won't need this function because we already provide the returned object #' built-in the package. It only needs to run `data(hg19)`. #' #' @param columntributes A character vector indicating biomaRt attributes to #' retrieve. See: #' <https://www.ensembl.org/info/data/biomart/biomart_r_package.html> for #' help. #' #' @return A dataframe with each of the 'columntributes' and corresponding #' genomic coordinates. generateM37 <- function () { #biomaRt::useMart(biomart = "ENSEMBL_MART_ENSEMBL", # host = "grch37.ensembl.org", # path = "/biomart/martservice", biomaRt::useEnsembl(biomart = "ensembl", GRCh = 37, mirror = "eastusa", dataset = "hsapiens_gene_ensembl") } generateHG19 <- function(columntributes = c("hgnc_symbol", "gene_biotype", "description")) { hg19 <- NA if (!requireNamespace("biomaRt", quietly = TRUE)) { warning("Please run BiocManager::install(\"biomaRt\") and retry.") } else { atributosDeseados <- c("chromosome_name", "start_position", "end_position", columntributes) #if (!all(sapply(c("mart37", "hg19"), `%in%`, ls()))) { checkOnline(); mart37 <- generateM37() hg19 <- biomaRt::getBM(attributes = atributosDeseados, mart = mart37) #} } hg19 }
###################################### #Importing the dataset M1834 <- read.csv("./marathon_results_2015.csv", header = TRUE, fill = TRUE, sep = ",", na.strings = "", dec = ".", strip.white = TRUE) #Preprocessing library(lubridate) M1834$X5K <- lubridate::period_to_seconds(hms(M1834$X5K)) M1834$X10K <- lubridate::period_to_seconds(hms(M1834$X10K)) M1834$X15K <- lubridate::period_to_seconds(hms(M1834$X15K)) M1834$X20K <- lubridate::period_to_seconds(hms(M1834$X20)) M1834$X25K <- lubridate::period_to_seconds(hms(M1834$X25K)) M1834$X30K <- lubridate::period_to_seconds(hms(M1834$X30K)) M1834$X35K <- lubridate::period_to_seconds(hms(M1834$X35K)) M1834$X40K <- lubridate::period_to_seconds(hms(M1834$X40K)) M1834$Pace <- lubridate::period_to_seconds(hms(M1834$Pace)) M1834$Official.Time <- lubridate::period_to_seconds(hms(M1834$Official.Time)) M1834$Bib <- as.numeric(M1834$Bib) #Filtering for our target: Man of 18-34 years M1834 <- subset(M1834, M1834$M.F == "M" & M1834$Age >= 18 & M1834$Age <= 34 & M1834$Official.Time < 11100,) #Delete some no relevant columns M1834 <- subset(M1834, select = -c(State,Citizen,X.1,Half,Proj.Time)) summary(M1834) #Remove the rows that have null values in any column M1834 <- na.omit(M1834) #Compute The parcial times of each segment distance<-c("X5K","X10K","X15K","X20K","X25K","X30K","X35K","X40K","Official.Time") for (k in distance) { if (k != "X5K") { colName <- paste(k,"Parcial", sep = "") M1834[,colName] <- M1834[,k] - M1834[,temp] if (k == "Official.Time") { paceName <- paste(k,"Pace", sep = "") M1834[,paceName] <- M1834[,colName]/2 } else { paceName <- paste(k,"Pace", sep = "") M1834[,paceName] <- M1834[,colName]/5 } #print(summary(M1834[,k])) #print(sd(M1834[,k])) } else { colName <- paste(k,"Parcial", sep = "") M1834[,colName] <- M1834[,k] paceName <- paste(k,"Pace", sep = "") M1834[,paceName] <- M1834[,colName]/5 } temp = k } names(M1834) <- make.names(names(M1834)) #Pinting the summary and std of all columns in c distance<-c("X5KParcial","X10KParcial","X15KParcial","X20KParcial","X25KParcial","X30KParcial","X35KParcial","X40KParcial","Official.TimeParcial") for (k in distance) { print(k) print(summary(M1834[,k])) print(sd(M1834[,k])) } #Calculant la p d <- sort(M1834$Official.Time) i <- round(nrow(M1834)*0.1) print(d[i+2]) TABLEV2 = as.data.frame((with(M1834,table(Age)))) print(TABLEV2[,"Freq"]/nrow(M1834)) #Calculant la p acumulada TABLEV2$cumsum <- cumsum(TABLEV2[,"Freq"]/nrow(M1834)) #Printant el resultat per fer copypaste a gpss cumsums <- t(TABLEV2$cumsum) for (k in order(cumsums)) { cat(cumsums[k],k+17, sep = ",") cat("/") } #Mitjana de pace d'elits i no elits Elite <- subset(M1834, M1834$Official.Time <= 9000) NoElite <- subset(M1834, M1834$Official.Time > 9000) summary(Elite$Pace) summary(NoElite$Pace) lr<-data.frame() distance<-c("X5KParcial", "X10KParcial","X15KParcial","X20KParcial","X25KParcial","X30KParcial","X35KParcial","X40KParcial") for (i in distance) { f <- paste(i, "~ Age + Pace") model = lm(as.formula(f), M1834) lr <- rbind(lr, model$coefficients) } colnames(lr) <- c('B0','B1','B2') print(lr) model <- lm(X5KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X10KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X15KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X20KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X25KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X30KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X35KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(Official.TimeParcial ~ Age + Pace, data = M1834) summary(model) library(unrepx) #(---, +--, -+-, ++-, --+, +-+, -++, +++) answer <- c(23135.57,22408.35,22152.81,16823.61,22757.01,22428.15,22540.75,12895.36) factors <- c("Eat", "WC", "Water") yates(ans, factors)
/DataPreprocessingMale.R
no_license
oriolborrellroig/SMDE-Marathon-simulation
R
false
false
4,092
r
###################################### #Importing the dataset M1834 <- read.csv("./marathon_results_2015.csv", header = TRUE, fill = TRUE, sep = ",", na.strings = "", dec = ".", strip.white = TRUE) #Preprocessing library(lubridate) M1834$X5K <- lubridate::period_to_seconds(hms(M1834$X5K)) M1834$X10K <- lubridate::period_to_seconds(hms(M1834$X10K)) M1834$X15K <- lubridate::period_to_seconds(hms(M1834$X15K)) M1834$X20K <- lubridate::period_to_seconds(hms(M1834$X20)) M1834$X25K <- lubridate::period_to_seconds(hms(M1834$X25K)) M1834$X30K <- lubridate::period_to_seconds(hms(M1834$X30K)) M1834$X35K <- lubridate::period_to_seconds(hms(M1834$X35K)) M1834$X40K <- lubridate::period_to_seconds(hms(M1834$X40K)) M1834$Pace <- lubridate::period_to_seconds(hms(M1834$Pace)) M1834$Official.Time <- lubridate::period_to_seconds(hms(M1834$Official.Time)) M1834$Bib <- as.numeric(M1834$Bib) #Filtering for our target: Man of 18-34 years M1834 <- subset(M1834, M1834$M.F == "M" & M1834$Age >= 18 & M1834$Age <= 34 & M1834$Official.Time < 11100,) #Delete some no relevant columns M1834 <- subset(M1834, select = -c(State,Citizen,X.1,Half,Proj.Time)) summary(M1834) #Remove the rows that have null values in any column M1834 <- na.omit(M1834) #Compute The parcial times of each segment distance<-c("X5K","X10K","X15K","X20K","X25K","X30K","X35K","X40K","Official.Time") for (k in distance) { if (k != "X5K") { colName <- paste(k,"Parcial", sep = "") M1834[,colName] <- M1834[,k] - M1834[,temp] if (k == "Official.Time") { paceName <- paste(k,"Pace", sep = "") M1834[,paceName] <- M1834[,colName]/2 } else { paceName <- paste(k,"Pace", sep = "") M1834[,paceName] <- M1834[,colName]/5 } #print(summary(M1834[,k])) #print(sd(M1834[,k])) } else { colName <- paste(k,"Parcial", sep = "") M1834[,colName] <- M1834[,k] paceName <- paste(k,"Pace", sep = "") M1834[,paceName] <- M1834[,colName]/5 } temp = k } names(M1834) <- make.names(names(M1834)) #Pinting the summary and std of all columns in c distance<-c("X5KParcial","X10KParcial","X15KParcial","X20KParcial","X25KParcial","X30KParcial","X35KParcial","X40KParcial","Official.TimeParcial") for (k in distance) { print(k) print(summary(M1834[,k])) print(sd(M1834[,k])) } #Calculant la p d <- sort(M1834$Official.Time) i <- round(nrow(M1834)*0.1) print(d[i+2]) TABLEV2 = as.data.frame((with(M1834,table(Age)))) print(TABLEV2[,"Freq"]/nrow(M1834)) #Calculant la p acumulada TABLEV2$cumsum <- cumsum(TABLEV2[,"Freq"]/nrow(M1834)) #Printant el resultat per fer copypaste a gpss cumsums <- t(TABLEV2$cumsum) for (k in order(cumsums)) { cat(cumsums[k],k+17, sep = ",") cat("/") } #Mitjana de pace d'elits i no elits Elite <- subset(M1834, M1834$Official.Time <= 9000) NoElite <- subset(M1834, M1834$Official.Time > 9000) summary(Elite$Pace) summary(NoElite$Pace) lr<-data.frame() distance<-c("X5KParcial", "X10KParcial","X15KParcial","X20KParcial","X25KParcial","X30KParcial","X35KParcial","X40KParcial") for (i in distance) { f <- paste(i, "~ Age + Pace") model = lm(as.formula(f), M1834) lr <- rbind(lr, model$coefficients) } colnames(lr) <- c('B0','B1','B2') print(lr) model <- lm(X5KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X10KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X15KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X20KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X25KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X30KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(X35KParcial ~ Age + Pace, data = M1834) summary(model) model <- lm(Official.TimeParcial ~ Age + Pace, data = M1834) summary(model) library(unrepx) #(---, +--, -+-, ++-, --+, +-+, -++, +++) answer <- c(23135.57,22408.35,22152.81,16823.61,22757.01,22428.15,22540.75,12895.36) factors <- c("Eat", "WC", "Water") yates(ans, factors)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/phs.version.R \name{phs.version} \alias{phs.version} \title{Gets you the full dbGap study ID of your study, with the latest version} \usage{ phs.version(phs) } \arguments{ \item{phs}{dbGap study ID (phs00xxxx, or 00xxxx, or xxx)} } \value{ Returns the full ID of the latest version of your study } \description{ Gets you the full dbGap study ID of your study, with the latest version } \author{ Gregoire Versmee, Laura Versmee, Mikael Dusenne, Niloofar Jalali }
/man/phs.version.Rd
permissive
mikaeldusenne/dbgap2x
R
false
true
540
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/phs.version.R \name{phs.version} \alias{phs.version} \title{Gets you the full dbGap study ID of your study, with the latest version} \usage{ phs.version(phs) } \arguments{ \item{phs}{dbGap study ID (phs00xxxx, or 00xxxx, or xxx)} } \value{ Returns the full ID of the latest version of your study } \description{ Gets you the full dbGap study ID of your study, with the latest version } \author{ Gregoire Versmee, Laura Versmee, Mikael Dusenne, Niloofar Jalali }
SummarisedImputationErrors_MAE = SummarisedImputationErrors %>% group_by(ImputationApproach, FunctionReName, Missing) %>% summarise(MeanAE = mean(MeanAE, na.rm = T)) Ribbon = SummarisedImputationErrors_MAE Ribbon = subset(Ribbon, (ImputationApproach == "Rphylopars" | ImputationApproach == "Mice: mean matching + phylogeny" | ImputationApproach == "Mice: regression + phylogeny" | ImputationApproach == "Mice: random forest + phylogeny" | ImputationApproach == "BHPMF + phylogeny")) Ribbon$Missing = Ribbon$Missing*100 Ribbon$ImputationApproach = factor(Ribbon$ImputationApproach, levels = c("Rphylopars", "Mice: regression + phylogeny", "Mice: mean matching + phylogeny","Mice: random forest + phylogeny", "BHPMF + phylogeny")) NoBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Random"),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.y = element_text(face="bold"), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill= "light grey", colour="black")) + labs(title = "No bias", x = " ", y = "Mean absolute error") ControlledBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Trait" | Ribbon$FunctionReName == "Phylogeny"),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.y=element_blank(), axis.text.y=element_blank(), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill=NA, colour="black")) + labs(title = "Controlled bias", x = " ") WeakBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Trait " | Ribbon$FunctionReName == "Phylogeny " | Ribbon$FunctionReName == "Response" | Ribbon$FunctionReName == "Trait*Response"),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.x = element_text(face="bold"), axis.title.y=element_blank(), axis.text.y=element_blank(), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill= "light grey", colour="black")) + labs(title = "Weak bias", x = "Missing data (%)") SevereBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Trait " | Ribbon$FunctionReName == "Phylogeny " | Ribbon$FunctionReName == "Response " | Ribbon$FunctionReName == "Trait*Response "),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.y=element_blank(), axis.text.y=element_blank(), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill=NA, colour="black")) + labs(title = "Severe bias", x = " ") Plot = ggarrange(NoBias, ControlledBias, WeakBias, SevereBias, ncol= 4, common.legend = TRUE, legend="bottom", widths = c(1.5,2,4,4)) Plot = annotate_figure(Plot, top = text_grob("", color = "black", size = 50, hjust = 22)) ggsave("../Results/SummaryPlots/MAEVsMissingVsBias.png" ,plot = last_plot(), width = 90, height = 45, units = "cm")
/Script7_Expanded/Script7_Expanded_MeanAE_V1.0.R
no_license
GitTFJ/Handling-missing-values-in-trait-data
R
false
false
6,305
r
SummarisedImputationErrors_MAE = SummarisedImputationErrors %>% group_by(ImputationApproach, FunctionReName, Missing) %>% summarise(MeanAE = mean(MeanAE, na.rm = T)) Ribbon = SummarisedImputationErrors_MAE Ribbon = subset(Ribbon, (ImputationApproach == "Rphylopars" | ImputationApproach == "Mice: mean matching + phylogeny" | ImputationApproach == "Mice: regression + phylogeny" | ImputationApproach == "Mice: random forest + phylogeny" | ImputationApproach == "BHPMF + phylogeny")) Ribbon$Missing = Ribbon$Missing*100 Ribbon$ImputationApproach = factor(Ribbon$ImputationApproach, levels = c("Rphylopars", "Mice: regression + phylogeny", "Mice: mean matching + phylogeny","Mice: random forest + phylogeny", "BHPMF + phylogeny")) NoBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Random"),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.y = element_text(face="bold"), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill= "light grey", colour="black")) + labs(title = "No bias", x = " ", y = "Mean absolute error") ControlledBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Trait" | Ribbon$FunctionReName == "Phylogeny"),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.y=element_blank(), axis.text.y=element_blank(), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill=NA, colour="black")) + labs(title = "Controlled bias", x = " ") WeakBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Trait " | Ribbon$FunctionReName == "Phylogeny " | Ribbon$FunctionReName == "Response" | Ribbon$FunctionReName == "Trait*Response"),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.x = element_text(face="bold"), axis.title.y=element_blank(), axis.text.y=element_blank(), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill= "light grey", colour="black")) + labs(title = "Weak bias", x = "Missing data (%)") SevereBias = ggplot() + geom_smooth(data = Ribbon[which(Ribbon$FunctionReName == "Trait " | Ribbon$FunctionReName == "Phylogeny " | Ribbon$FunctionReName == "Response " | Ribbon$FunctionReName == "Trait*Response "),], aes(y = MeanAE, x = Missing, fill = ImputationApproach, linetype = ImputationApproach), alpha = 0.3, method = "lm", colour = "black") + facet_grid(.~FunctionReName, scale="free", space = "free_y") + scale_fill_manual(values = c("red2", "blue", "blue", "blue", "cyan3")) + #scale_colour_manual(values = c("black","black","black","black", "black")) + scale_linetype_manual(values = c("solid", "solid", "dotted", "dashed", "solid")) + scale_x_continuous(breaks = c(0, 20, 40, 60)) + coord_cartesian(xlim = c(0,80), ylim = c(0,5)) + theme_bw() + guides(colour=FALSE) + theme( text = element_text(size=35), panel.grid.major = element_blank(), panel.grid.minor.x = element_blank(), plot.title = element_text(face="bold.italic"), axis.title.y=element_blank(), axis.text.y=element_blank(), legend.title=element_blank(), legend.spacing.x = unit(1, 'cm'), panel.background = element_rect(fill = NA, color = "black"), strip.background = element_rect(fill=NA, colour="black")) + labs(title = "Severe bias", x = " ") Plot = ggarrange(NoBias, ControlledBias, WeakBias, SevereBias, ncol= 4, common.legend = TRUE, legend="bottom", widths = c(1.5,2,4,4)) Plot = annotate_figure(Plot, top = text_grob("", color = "black", size = 50, hjust = 22)) ggsave("../Results/SummaryPlots/MAEVsMissingVsBias.png" ,plot = last_plot(), width = 90, height = 45, units = "cm")
############################################################################### # Author: Aparna Rajpurkar # Date: 2.15.17 # # Required custom scripts: # MainPipelineDriver.R # funcs.R # Required data files: # Data files should be contained within a subdirectory named: # tuft_analysis_datafiles # # Purpose # # These scripts perform analysis for single cell analysis of Tuft cells. # No guarantees made. These scripts were written for personal use. # Contact: arrajpur@stanford.edu with questions. # # Usage: Rscript MainPipelineDriver.R # Modify the driver file for different plots ############################################################################### #source("funcs.R", print.eval=TRUE) library(scran) #needed for SingleCellExperiment library(scater) #needed for calculateQCMetrics ENSEMBL_MOUSE_CONVERSION_FILE<-"tuft_analysis_datafiles/mouse_ensembl_genesymbol_conversion.txt" NORMALIZED_DATA_FILE<-"tuft_analysis_datafiles/POR_all_data.rda" WT_COUNTS<-"tuft_analysis_datafiles/all_wt_counts.txt" AIREKO_COUNTS<-"tuft_analysis_datafiles/aireko_counts.txt" BATCH_INFO<-"tuft_analysis_datafiles/samples_all.txt" conversion<-read.delim(ENSEMBL_MOUSE_CONVERSION_FILE, header=T) convertNames<-function(gene_list, convert_from="mgi_symbol", convert_to="ensembl_gene_id") { result<-conversion[match(gene_list, conversion[,convert_from]), convert_to] result } # get normalized data pos_control<-"kw_tuft_142" vars<-load(NORMALIZED_DATA_FILE) norm.exprs<-normalized norm.exprs<-norm.exprs[,!(colnames(norm.exprs)==pos_control)] norm.exprs<-norm.exprs[!(grepl("ERCC", rownames(norm.exprs))),,drop=FALSE] # get raw counts for basic metadata wt_file <- WT_COUNTS aire_file <- AIREKO_COUNTS controls<-c("kw_tuft_141", "kw_tuft_142", "kw_tuft_143", "kw_tuft_144", "June29_KW.96.featureCounts") DATA_WT <- read.csv(wt_file, header = T, row.names = 1, sep="\t") DATA_AIRE <- read.csv(aire_file, header = T, row.names = 1, sep="\t") DATA <- cbind.data.frame(DATA_WT, DATA_AIRE[rownames(DATA_WT),,drop=FALSE]) raw.counts<-DATA[,!(colnames(DATA) %in% controls),drop=FALSE] raw.counts<-raw.counts[!(grepl("ERCC", rownames(raw.counts))),,drop=FALSE] # get total number of cells from raw counts numCells <- ncol(raw.counts) # set batches batch_info<-read.delim(BATCH_INFO, header=T, row.names=1) batch<-batch_info[colnames(raw.counts),,drop=FALSE] ########################################################################## # deal with the ENSEMBL gene name issue ensembl_mouse<-rownames(raw.counts)[grepl("ENSMUSG", rownames(raw.counts))] rownames(raw.counts)<-sub("(ENSMUSG\\d+|ERCC.+)\\.*\\d*", "\\1", rownames(raw.counts), perl=TRUE) geneNamesForMito<-convertNames(rownames(raw.counts), convert_from="ensembl_gene_id", convert_to="mgi_symbol") mito_bool<-grepl("mt", geneNamesForMito) # get ercc names from raw raw.counts erccs_raw<-rownames(raw.counts[grepl("ERCC",rownames(raw.counts)),]) mito_raw<-rownames(raw.counts[mito_bool,]) # set phenotype data (describe cells) pd<-data.frame(batch=batch) rownames(pd)<-colnames(raw.counts) colnames(pd)<-colnames(batch) fd<-data.frame(rownames(raw.counts)) rownames(fd)<-rownames(raw.counts) colnames(fd)<-"ensembl" # create SingleCellExperiment to hold all data mtecdev<-SingleCellExperiment( list(counts=as.matrix(raw.counts)), colData=pd, rowData=fd ) # calculate basic QC, setting controls as ERCCs mtecdev<-calculateQCMetrics( mtecdev, feature_controls=list(ERCC=erccs_raw, MITO=mito_raw) ) # get phenotype data / sample metadata sampleData<-cbind.data.frame(colData(mtecdev)) rownames(sampleData)<-colnames(mtecdev) norm.cells<-colnames(norm.exprs) sampleData<-sampleData[norm.cells,]
/pipelineHead.R
permissive
mTEC-pipelines/tufts-pipelines
R
false
false
3,891
r
############################################################################### # Author: Aparna Rajpurkar # Date: 2.15.17 # # Required custom scripts: # MainPipelineDriver.R # funcs.R # Required data files: # Data files should be contained within a subdirectory named: # tuft_analysis_datafiles # # Purpose # # These scripts perform analysis for single cell analysis of Tuft cells. # No guarantees made. These scripts were written for personal use. # Contact: arrajpur@stanford.edu with questions. # # Usage: Rscript MainPipelineDriver.R # Modify the driver file for different plots ############################################################################### #source("funcs.R", print.eval=TRUE) library(scran) #needed for SingleCellExperiment library(scater) #needed for calculateQCMetrics ENSEMBL_MOUSE_CONVERSION_FILE<-"tuft_analysis_datafiles/mouse_ensembl_genesymbol_conversion.txt" NORMALIZED_DATA_FILE<-"tuft_analysis_datafiles/POR_all_data.rda" WT_COUNTS<-"tuft_analysis_datafiles/all_wt_counts.txt" AIREKO_COUNTS<-"tuft_analysis_datafiles/aireko_counts.txt" BATCH_INFO<-"tuft_analysis_datafiles/samples_all.txt" conversion<-read.delim(ENSEMBL_MOUSE_CONVERSION_FILE, header=T) convertNames<-function(gene_list, convert_from="mgi_symbol", convert_to="ensembl_gene_id") { result<-conversion[match(gene_list, conversion[,convert_from]), convert_to] result } # get normalized data pos_control<-"kw_tuft_142" vars<-load(NORMALIZED_DATA_FILE) norm.exprs<-normalized norm.exprs<-norm.exprs[,!(colnames(norm.exprs)==pos_control)] norm.exprs<-norm.exprs[!(grepl("ERCC", rownames(norm.exprs))),,drop=FALSE] # get raw counts for basic metadata wt_file <- WT_COUNTS aire_file <- AIREKO_COUNTS controls<-c("kw_tuft_141", "kw_tuft_142", "kw_tuft_143", "kw_tuft_144", "June29_KW.96.featureCounts") DATA_WT <- read.csv(wt_file, header = T, row.names = 1, sep="\t") DATA_AIRE <- read.csv(aire_file, header = T, row.names = 1, sep="\t") DATA <- cbind.data.frame(DATA_WT, DATA_AIRE[rownames(DATA_WT),,drop=FALSE]) raw.counts<-DATA[,!(colnames(DATA) %in% controls),drop=FALSE] raw.counts<-raw.counts[!(grepl("ERCC", rownames(raw.counts))),,drop=FALSE] # get total number of cells from raw counts numCells <- ncol(raw.counts) # set batches batch_info<-read.delim(BATCH_INFO, header=T, row.names=1) batch<-batch_info[colnames(raw.counts),,drop=FALSE] ########################################################################## # deal with the ENSEMBL gene name issue ensembl_mouse<-rownames(raw.counts)[grepl("ENSMUSG", rownames(raw.counts))] rownames(raw.counts)<-sub("(ENSMUSG\\d+|ERCC.+)\\.*\\d*", "\\1", rownames(raw.counts), perl=TRUE) geneNamesForMito<-convertNames(rownames(raw.counts), convert_from="ensembl_gene_id", convert_to="mgi_symbol") mito_bool<-grepl("mt", geneNamesForMito) # get ercc names from raw raw.counts erccs_raw<-rownames(raw.counts[grepl("ERCC",rownames(raw.counts)),]) mito_raw<-rownames(raw.counts[mito_bool,]) # set phenotype data (describe cells) pd<-data.frame(batch=batch) rownames(pd)<-colnames(raw.counts) colnames(pd)<-colnames(batch) fd<-data.frame(rownames(raw.counts)) rownames(fd)<-rownames(raw.counts) colnames(fd)<-"ensembl" # create SingleCellExperiment to hold all data mtecdev<-SingleCellExperiment( list(counts=as.matrix(raw.counts)), colData=pd, rowData=fd ) # calculate basic QC, setting controls as ERCCs mtecdev<-calculateQCMetrics( mtecdev, feature_controls=list(ERCC=erccs_raw, MITO=mito_raw) ) # get phenotype data / sample metadata sampleData<-cbind.data.frame(colData(mtecdev)) rownames(sampleData)<-colnames(mtecdev) norm.cells<-colnames(norm.exprs) sampleData<-sampleData[norm.cells,]
testlist <- list(a = c(5.53353523342196e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), b = numeric(0)) result <- do.call(metacoder:::euclid,testlist) str(result)
/metacoder/inst/testfiles/euclid/AFL_euclid/euclid_valgrind_files/1615763495-test.R
permissive
akhikolla/updatedatatype-list3
R
false
false
301
r
testlist <- list(a = c(5.53353523342196e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), b = numeric(0)) result <- do.call(metacoder:::euclid,testlist) str(result)
# logistic regression attach(trainData) # run after partitioning glm.fit = glm(left ~ average_montly_hours + last_evaluation + time_spend_company + satisfaction_level + number_project, family = binomial) detach(trainData) attach(testData) glm.probsTest = predict(glm.fit,testData,type = "response") glm.predTest = rep(0,nrow(testData)) glm.predTest[glm.probsTest>.35]=1 table(glm.predTest,left) # confusion matrix mean(glm.predTest == left) # accuracy % detach(testData)
/log_reg2.R
no_license
McQe3n/PrilStat
R
false
false
506
r
# logistic regression attach(trainData) # run after partitioning glm.fit = glm(left ~ average_montly_hours + last_evaluation + time_spend_company + satisfaction_level + number_project, family = binomial) detach(trainData) attach(testData) glm.probsTest = predict(glm.fit,testData,type = "response") glm.predTest = rep(0,nrow(testData)) glm.predTest[glm.probsTest>.35]=1 table(glm.predTest,left) # confusion matrix mean(glm.predTest == left) # accuracy % detach(testData)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lib.R \name{hermitePolyCoef} \alias{hermitePolyCoef} \title{Get coefficient of Hermite polynomial} \usage{ hermitePolyCoef(n) } \arguments{ \item{n}{Degree of Hermite polynomial to compute} } \value{ Vector of (n+1) coefficients from requested polynomial } \description{ Calculate coefficients of Hermite polynomial using recursion relation. This function is provided for demonstration/teaching purposes; this method is not used by gaussHermiteData. It is numerically unstable for high-degree polynomials. } \seealso{ \code{\link{gaussHermiteData}}, \code{\link{aghQuad}}, \code{\link{ghQuad}} } \author{ Alexander W Blocker \email{ablocker@gmail.com} } \keyword{math}
/fuzzedpackages/fastGHQuad/man/hermitePolyCoef.Rd
no_license
akhikolla/testpackages
R
false
true
747
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lib.R \name{hermitePolyCoef} \alias{hermitePolyCoef} \title{Get coefficient of Hermite polynomial} \usage{ hermitePolyCoef(n) } \arguments{ \item{n}{Degree of Hermite polynomial to compute} } \value{ Vector of (n+1) coefficients from requested polynomial } \description{ Calculate coefficients of Hermite polynomial using recursion relation. This function is provided for demonstration/teaching purposes; this method is not used by gaussHermiteData. It is numerically unstable for high-degree polynomials. } \seealso{ \code{\link{gaussHermiteData}}, \code{\link{aghQuad}}, \code{\link{ghQuad}} } \author{ Alexander W Blocker \email{ablocker@gmail.com} } \keyword{math}
# Load packages library(here) # Set working directory setwd(here("leptoseris_analysis/analysis/stree_asr/")) # Define path to results, will be used to read in files in for-loop below path <- c("~/Dropbox/osg_results/ASR/leptoseris_asr/") # Initialize vector for number of trees t <- c(1:1000) # Create empty vectors and lists to hold results files <- c() results <- list() # Loop through asr results and add them to a single list for (i in 1:length(t)){ files <- list.files(path = paste(path, "lepto_stree_asr_", i, sep = ""), pattern = ".rds", full.names = T) results[[i]] <- readRDS(files) } # Save list of results saveRDS(results, "stree-asr-lt.rds")
/leptoseris_analysis/analysis/stree_asr/stree-asr-process.R
no_license
jagault/evolution-photosymbiosis
R
false
false
689
r
# Load packages library(here) # Set working directory setwd(here("leptoseris_analysis/analysis/stree_asr/")) # Define path to results, will be used to read in files in for-loop below path <- c("~/Dropbox/osg_results/ASR/leptoseris_asr/") # Initialize vector for number of trees t <- c(1:1000) # Create empty vectors and lists to hold results files <- c() results <- list() # Loop through asr results and add them to a single list for (i in 1:length(t)){ files <- list.files(path = paste(path, "lepto_stree_asr_", i, sep = ""), pattern = ".rds", full.names = T) results[[i]] <- readRDS(files) } # Save list of results saveRDS(results, "stree-asr-lt.rds")
# Fitting Models to Data in R # Colin Millar, modified from Iago Mosqueira and Ernesto Jardim # This script shows the steps followed to fit a stock-recruitment model to # in the file 'north_sea_herring_SR.csv' #============================================================================== # Load and explore data #============================================================================== # load data from comma-separated file to data.frame herring <- read.csv(file = 'north_sea_herring_SR.csv', header = TRUE) # take a look at what we have head(herring) # this looks at the first 6 rows str(herring) # this lets us inspect what the columns contain # lets rename some columns because I am lazy and want the code to be # readable and easier to type names(herring) names(herring) <- c("yc", "ssb", "rec") head(herring) # to access the diffrent columns use '$' i.e to see the SSB values: herring$ssb # and to see observed recruitment herring$rec # initial plot of SSB vs. recruits plot(rec ~ ssb, data = herring, # look in herring for x and y values xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-1 Recruitment') # probably better to set x and y limits to start at zero plot(rec ~ ssb, data = herring, # look in herring for x and y values xlim = c(0, max(ssb)), # set x limits ylim = c(0, max(rec)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment') ############################################################################### # We are now going to demonstrate the same techniques employed in the spreadsheet # solution to the assignment ############################################################################### #============================================================================== # Beverton and holt recruitmet model R=b1*S/(b2+S) #============================================================================== #------------------------------------------------------------------------------ # (1) Calculate predicted R for each year # Rpred = b1 * S / (b2 + S) #------------------------------------------------------------------------------ # starting values for b1 and b2 a <- 1 b <- 1 # set up the other variables (i.e. S) S <- herring$ssb Rpred <- a * S / (b + S) #------------------------------------------------------------------------------ # (2) Calculate log residuals, Ln(obs/pred) #------------------------------------------------------------------------------ # assign observed recruitment Robs <- herring$rec resids <- log(Robs / Rpred) # = log(Robs) - log(Rpred) # note that in R, log is the natural log: ?log # see the help file for the log function log(exp(1)) log(exp(10)) #------------------------------------------------------------------------------ # (3) Calculate sum of squared residuals #------------------------------------------------------------------------------ ?sum # see the help file for sum ssq_resids <- sum(resids^2) #------------------------------------------------------------------------------ # (4) Minimize sum-of-squares with solver by adjusting a and b #------------------------------------------------------------------------------ # to do this, we need to set up a function that takes # a and b as input, and returns the sums of squared residuals # in R a function is a collection of steps: i.e. add <- function(b1, b2) { b1 + b2 } add(1, 2) # 3 # the sums of squares function is collection of the previous 3 steps: ssq <- function(a, b) { # 1. Calculate predicted R for each year Rpred <- a * S / (b + S) # 2. Calculate log residuals, Ln(obs/pred) resids <- log(Robs / Rpred) # 3. Calculate sum of squared residuals ssq_resids <- sum(resids^2) # return ssq_resids } # lets test this out: ssq(a, b) ssq(1, 1) ssq(2, 1) # now we need to search over lots of values for b1 and b2 to # find the minimum. # There are lots of ways to do this, we will first look at the optim function. # the help file for optim is: ?optim ssq_optim <- function(par) { a <- par[1] b <- par[2] ssq(a, b) } # use c to combine the starting values into a vector ?c par0 <- c(1, 1) # lets test the new ssq funciton ssq_optim(par0) # lets run it.. opt <- optim(par0, ssq_optim) opt # it didn't do so well.... lets try with different starting values: opt <- optim(c(32000000, 300), ssq_optim) opt # better now :) #------------------------------------------------------------------------------ # (5) Plot observed and predicted R #------------------------------------------------------------------------------ # get the parameter estimates from the optimisation a <- opt$par[1] b <- opt$par[2] # predict recruitment Rpred <- a * S / (b + S) # plot plot(Robs ~ S, xlim = c(0, max(S)), # set x limits ylim = c(0, max(Robs)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment') # add predictions to the plot points(Rpred ~ S, col = "red", pch = 2) #------------------------------------------------------------------------------ # (6) Plot residuals #------------------------------------------------------------------------------ # calculate residuals resids <- log(Robs / Rpred) # plot them plot(resids ~ S) # add in a reference line abline(h = 0, lty = 2) ############################################################################### # We are now going to demonstrate the same solution, but taking advantage of # the tools provided by a programming / scripting language ############################################################################### #============================================================================== # Beverton and holt recruitmet model R=b1*S/(b2+S) #============================================================================== #------------------------------------------------------------------------------ # (1) Calculate predicted R for each year # Rpred = b1 * S / (b2 + S) #------------------------------------------------------------------------------ # this time we will write a function to do this called bevholt # to be safe we will also pass in S # this way we know for sure wha values of S are being used bevholt <- function(b, S) { b[1] * S / (b[2] + S) } # compute R at the starting values for b1 and b2 Rpred <- bevholt(c(1, 1), S = herring$ssb) # lets jump to step 4 ... #------------------------------------------------------------------------------ # (4) Minimize sum-of-squares with solver by adjusting b1 and b2 #------------------------------------------------------------------------------ # now lets modify the ssq function to accept S and Robs, # and use the function bevholt # the sums of squares function is collection of the previous 3 steps: ssq <- function(b, S, Robs) { # 1. Calculate predicted R for each year Rpred <- bevholt(b, S) # 2. Calculate log residuals, Ln(obs/pred) resids <- log(Robs / Rpred) # 3. Calculate sum of squared residuals ssq_resids <- sum(resids^2) # return ssq_resids } # lets test this out: ssq(c(a, b), herring$ssb, herring$rec) # what to you notice this time? ssq(c(1, 1), herring$ssb, herring$rec) ssq(c(2, 2), herring$ssb, herring$rec) # now we need to search over lots of values for b1 and b2 to # find the minimum. ssq_optim <- function(par, S, Robs) { b <- exp(par) ssq(b, S, Robs) } # use c to combine the starting values into a vector par0 <- log(c(1, 1)) # lets test the new ssq funciton ssq_optim(par0, S = herring$ssb, Robs = herring$rec) # lets run it.. opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = herring$rec) opt # the fit is not quite there yet, so lets try better starting values. # this highlights the presence of multiple 'local' minima par0 <- c(20, 5) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = herring$rec) opt #------------------------------------------------------------------------------ # (5) Plot observed and predicted R #------------------------------------------------------------------------------ # predict recruitment over the full S range Spred <- seq(0, max(herring$ssb), length.out = 100) Rpred <- bevholt(exp(opt$par), S = Spred) # plot plot(rec ~ ssb, data = herring, # pass in data this time xlim = c(0, max(S)), # set x limits ylim = c(0, max(Robs)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment') # add predictions to the plot as a line lines(Rpred ~ Spred, col = "red", pch = 2) ############################################################################### # The following is to demonstrate a techniques for calculating confidence # intervals - this is not part of the course and purely for demonstation # purposes ############################################################################### # Bootstrapping is so called because it is like you are acheieving something # from nothing. # # but in fact it is taking advantage of the fact that your samle of data # contains information about how it varies... # # this can be seen from the residuals: # lets run the fit again fit <- optim(par0, ssq_optim, S = herring$ssb, Robs = herring$rec) # and calculate the residuals Rpred <- bevholt(exp(fit$par), herring$ssb) resids <- log( herring$rec / Rpred) # and plot a histogram hist(resids, nclass = 20) # the mean of the residuals is: mean(resids) # but is there not error in this? # resample from this as if the resuduals are random and reclaculate the mean r_star <- sample(resids, replace = TRUE) mean(r_star) # do it again r_star <- sample(resids, replace = TRUE) mean(r_star) # do it lots of times! rmean_star <- replicate(10000, { r_star <- sample(resids, replace = TRUE) mean(r_star) }) hist(rmean_star) #------------------------------------------------------------------------------ # so we are able to access the error inherent in the model fit? # # And we can propagate this through to the parameter estimates? #------------------------------------------------------------------------------ # resample from the residuals as if the resuduals are random and reestimate the # parameters r_star <- sample(resids, replace = TRUE) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = Rpred * exp(r_star)) opt$par # do it again r_star <- sample(resids, replace = TRUE) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = Rpred * exp(r_star)) opt$par # do it lots of times! par_star <- replicate(10000, { r_star <- sample(resids, replace = TRUE) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = Rpred * exp(r_star), method = "BFGS") opt$par }) # separate b1 and b2 bootstrap simulations for ease of inspection b1_star <- exp(par_star[1,]) b2_star <- exp(par_star[2,]) # plot histograms of simulations hist(log(b1_star), nclass = 50) # add confidence intervals abline(v = quantile(log(b1_star), c(0.025, 0.975)), col = "red") quantile(b1_star, c(0.025, 0.975)) # what does the 2D bootstrap simulation look like? plot(log(b1_star), log(b2_star), pch = ".", col = grey(.5, alpha = 0.5)) # a colourful 2d densty plot image(MASS::kde2d(log(b1_star), log(b2_star), n = 400), xlab = "log b1", ylab = "log b2", xlim = quantile(log(b1_star), c(0.025, 0.975)), ylim = quantile(log(b2_star), c(0.025, 0.975)), main = "bootstraped uncertainty in parameter estimates") # plot the least squares estimate points(fit$par[1], fit$par[2], pch = 16, col = "blue") # plot # predict recruitment over the full S range Spred <- seq(0, max(herring$ssb), length.out = 100) Rpred <- apply(par_star, 2, function(x) bevholt(exp(x), S = Spred)) # plot a few curves to see the uncertainty in the relationship matplot(Spred, Rpred[, sample(1:ncol(Rpred), 100)], type = "l", lty = 1, col = grey(0.5, alpha = 0.5), xlim = c(0, max(S)), # set x limits ylim = c(0, max(Robs)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment', main = "boostraped error in stock recruitment relationship") # add the data points(herring$ssb, herring$rec, type = "b", pch = 16, col = "red")
/02_Model_fitting/02_Model_fitting.R
no_license
ices-eg/tc_tcsai2019
R
false
false
12,157
r
# Fitting Models to Data in R # Colin Millar, modified from Iago Mosqueira and Ernesto Jardim # This script shows the steps followed to fit a stock-recruitment model to # in the file 'north_sea_herring_SR.csv' #============================================================================== # Load and explore data #============================================================================== # load data from comma-separated file to data.frame herring <- read.csv(file = 'north_sea_herring_SR.csv', header = TRUE) # take a look at what we have head(herring) # this looks at the first 6 rows str(herring) # this lets us inspect what the columns contain # lets rename some columns because I am lazy and want the code to be # readable and easier to type names(herring) names(herring) <- c("yc", "ssb", "rec") head(herring) # to access the diffrent columns use '$' i.e to see the SSB values: herring$ssb # and to see observed recruitment herring$rec # initial plot of SSB vs. recruits plot(rec ~ ssb, data = herring, # look in herring for x and y values xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-1 Recruitment') # probably better to set x and y limits to start at zero plot(rec ~ ssb, data = herring, # look in herring for x and y values xlim = c(0, max(ssb)), # set x limits ylim = c(0, max(rec)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment') ############################################################################### # We are now going to demonstrate the same techniques employed in the spreadsheet # solution to the assignment ############################################################################### #============================================================================== # Beverton and holt recruitmet model R=b1*S/(b2+S) #============================================================================== #------------------------------------------------------------------------------ # (1) Calculate predicted R for each year # Rpred = b1 * S / (b2 + S) #------------------------------------------------------------------------------ # starting values for b1 and b2 a <- 1 b <- 1 # set up the other variables (i.e. S) S <- herring$ssb Rpred <- a * S / (b + S) #------------------------------------------------------------------------------ # (2) Calculate log residuals, Ln(obs/pred) #------------------------------------------------------------------------------ # assign observed recruitment Robs <- herring$rec resids <- log(Robs / Rpred) # = log(Robs) - log(Rpred) # note that in R, log is the natural log: ?log # see the help file for the log function log(exp(1)) log(exp(10)) #------------------------------------------------------------------------------ # (3) Calculate sum of squared residuals #------------------------------------------------------------------------------ ?sum # see the help file for sum ssq_resids <- sum(resids^2) #------------------------------------------------------------------------------ # (4) Minimize sum-of-squares with solver by adjusting a and b #------------------------------------------------------------------------------ # to do this, we need to set up a function that takes # a and b as input, and returns the sums of squared residuals # in R a function is a collection of steps: i.e. add <- function(b1, b2) { b1 + b2 } add(1, 2) # 3 # the sums of squares function is collection of the previous 3 steps: ssq <- function(a, b) { # 1. Calculate predicted R for each year Rpred <- a * S / (b + S) # 2. Calculate log residuals, Ln(obs/pred) resids <- log(Robs / Rpred) # 3. Calculate sum of squared residuals ssq_resids <- sum(resids^2) # return ssq_resids } # lets test this out: ssq(a, b) ssq(1, 1) ssq(2, 1) # now we need to search over lots of values for b1 and b2 to # find the minimum. # There are lots of ways to do this, we will first look at the optim function. # the help file for optim is: ?optim ssq_optim <- function(par) { a <- par[1] b <- par[2] ssq(a, b) } # use c to combine the starting values into a vector ?c par0 <- c(1, 1) # lets test the new ssq funciton ssq_optim(par0) # lets run it.. opt <- optim(par0, ssq_optim) opt # it didn't do so well.... lets try with different starting values: opt <- optim(c(32000000, 300), ssq_optim) opt # better now :) #------------------------------------------------------------------------------ # (5) Plot observed and predicted R #------------------------------------------------------------------------------ # get the parameter estimates from the optimisation a <- opt$par[1] b <- opt$par[2] # predict recruitment Rpred <- a * S / (b + S) # plot plot(Robs ~ S, xlim = c(0, max(S)), # set x limits ylim = c(0, max(Robs)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment') # add predictions to the plot points(Rpred ~ S, col = "red", pch = 2) #------------------------------------------------------------------------------ # (6) Plot residuals #------------------------------------------------------------------------------ # calculate residuals resids <- log(Robs / Rpred) # plot them plot(resids ~ S) # add in a reference line abline(h = 0, lty = 2) ############################################################################### # We are now going to demonstrate the same solution, but taking advantage of # the tools provided by a programming / scripting language ############################################################################### #============================================================================== # Beverton and holt recruitmet model R=b1*S/(b2+S) #============================================================================== #------------------------------------------------------------------------------ # (1) Calculate predicted R for each year # Rpred = b1 * S / (b2 + S) #------------------------------------------------------------------------------ # this time we will write a function to do this called bevholt # to be safe we will also pass in S # this way we know for sure wha values of S are being used bevholt <- function(b, S) { b[1] * S / (b[2] + S) } # compute R at the starting values for b1 and b2 Rpred <- bevholt(c(1, 1), S = herring$ssb) # lets jump to step 4 ... #------------------------------------------------------------------------------ # (4) Minimize sum-of-squares with solver by adjusting b1 and b2 #------------------------------------------------------------------------------ # now lets modify the ssq function to accept S and Robs, # and use the function bevholt # the sums of squares function is collection of the previous 3 steps: ssq <- function(b, S, Robs) { # 1. Calculate predicted R for each year Rpred <- bevholt(b, S) # 2. Calculate log residuals, Ln(obs/pred) resids <- log(Robs / Rpred) # 3. Calculate sum of squared residuals ssq_resids <- sum(resids^2) # return ssq_resids } # lets test this out: ssq(c(a, b), herring$ssb, herring$rec) # what to you notice this time? ssq(c(1, 1), herring$ssb, herring$rec) ssq(c(2, 2), herring$ssb, herring$rec) # now we need to search over lots of values for b1 and b2 to # find the minimum. ssq_optim <- function(par, S, Robs) { b <- exp(par) ssq(b, S, Robs) } # use c to combine the starting values into a vector par0 <- log(c(1, 1)) # lets test the new ssq funciton ssq_optim(par0, S = herring$ssb, Robs = herring$rec) # lets run it.. opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = herring$rec) opt # the fit is not quite there yet, so lets try better starting values. # this highlights the presence of multiple 'local' minima par0 <- c(20, 5) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = herring$rec) opt #------------------------------------------------------------------------------ # (5) Plot observed and predicted R #------------------------------------------------------------------------------ # predict recruitment over the full S range Spred <- seq(0, max(herring$ssb), length.out = 100) Rpred <- bevholt(exp(opt$par), S = Spred) # plot plot(rec ~ ssb, data = herring, # pass in data this time xlim = c(0, max(S)), # set x limits ylim = c(0, max(Robs)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment') # add predictions to the plot as a line lines(Rpred ~ Spred, col = "red", pch = 2) ############################################################################### # The following is to demonstrate a techniques for calculating confidence # intervals - this is not part of the course and purely for demonstation # purposes ############################################################################### # Bootstrapping is so called because it is like you are acheieving something # from nothing. # # but in fact it is taking advantage of the fact that your samle of data # contains information about how it varies... # # this can be seen from the residuals: # lets run the fit again fit <- optim(par0, ssq_optim, S = herring$ssb, Robs = herring$rec) # and calculate the residuals Rpred <- bevholt(exp(fit$par), herring$ssb) resids <- log( herring$rec / Rpred) # and plot a histogram hist(resids, nclass = 20) # the mean of the residuals is: mean(resids) # but is there not error in this? # resample from this as if the resuduals are random and reclaculate the mean r_star <- sample(resids, replace = TRUE) mean(r_star) # do it again r_star <- sample(resids, replace = TRUE) mean(r_star) # do it lots of times! rmean_star <- replicate(10000, { r_star <- sample(resids, replace = TRUE) mean(r_star) }) hist(rmean_star) #------------------------------------------------------------------------------ # so we are able to access the error inherent in the model fit? # # And we can propagate this through to the parameter estimates? #------------------------------------------------------------------------------ # resample from the residuals as if the resuduals are random and reestimate the # parameters r_star <- sample(resids, replace = TRUE) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = Rpred * exp(r_star)) opt$par # do it again r_star <- sample(resids, replace = TRUE) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = Rpred * exp(r_star)) opt$par # do it lots of times! par_star <- replicate(10000, { r_star <- sample(resids, replace = TRUE) opt <- optim(par0, ssq_optim, S = herring$ssb, Robs = Rpred * exp(r_star), method = "BFGS") opt$par }) # separate b1 and b2 bootstrap simulations for ease of inspection b1_star <- exp(par_star[1,]) b2_star <- exp(par_star[2,]) # plot histograms of simulations hist(log(b1_star), nclass = 50) # add confidence intervals abline(v = quantile(log(b1_star), c(0.025, 0.975)), col = "red") quantile(b1_star, c(0.025, 0.975)) # what does the 2D bootstrap simulation look like? plot(log(b1_star), log(b2_star), pch = ".", col = grey(.5, alpha = 0.5)) # a colourful 2d densty plot image(MASS::kde2d(log(b1_star), log(b2_star), n = 400), xlab = "log b1", ylab = "log b2", xlim = quantile(log(b1_star), c(0.025, 0.975)), ylim = quantile(log(b2_star), c(0.025, 0.975)), main = "bootstraped uncertainty in parameter estimates") # plot the least squares estimate points(fit$par[1], fit$par[2], pch = 16, col = "blue") # plot # predict recruitment over the full S range Spred <- seq(0, max(herring$ssb), length.out = 100) Rpred <- apply(par_star, 2, function(x) bevholt(exp(x), S = Spred)) # plot a few curves to see the uncertainty in the relationship matplot(Spred, Rpred[, sample(1:ncol(Rpred), 100)], type = "l", lty = 1, col = grey(0.5, alpha = 0.5), xlim = c(0, max(S)), # set x limits ylim = c(0, max(Robs)), # set y limits xlab = 'Spawning Stock Biomass (tonnes)', ylab = 'Age-0 Recruitment', main = "boostraped error in stock recruitment relationship") # add the data points(herring$ssb, herring$rec, type = "b", pch = 16, col = "red")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ang_pga_rotd50_calc.R \name{ang_pga_rotd50_calc} \alias{ang_pga_rotd50_calc} \title{Rotation angle of PGA RotD50 Calculation} \usage{ ang_pga_rotd50_calc(h1, h2) } \arguments{ \item{h1}{A vector of time series for the 1st horizontal component} \item{h2}{A vector of time series for the 2nd horizontal component} } \value{ The rotation angle for PGA RotD50 } \description{ This function calculates the rotation angle (relative to the azimuth of h1) of PGA RotD50 }
/man/ang_pga_rotd50_calc.Rd
permissive
wltcwpf/hvsrProc
R
false
true
543
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ang_pga_rotd50_calc.R \name{ang_pga_rotd50_calc} \alias{ang_pga_rotd50_calc} \title{Rotation angle of PGA RotD50 Calculation} \usage{ ang_pga_rotd50_calc(h1, h2) } \arguments{ \item{h1}{A vector of time series for the 1st horizontal component} \item{h2}{A vector of time series for the 2nd horizontal component} } \value{ The rotation angle for PGA RotD50 } \description{ This function calculates the rotation angle (relative to the azimuth of h1) of PGA RotD50 }
#' Tmisc #' #' Stephen Turner's miscellaneous functions #' #' @author Stephen Turner #' #' @name Tmisc #' @docType package NULL ## quiets concerns of R CMD check re: the .'s that appear in pipelines ## and the "n" that is produced by dplyr::count() in a pipeline if(getRversion() >= "2.15.1") utils::globalVariables(c(".", "n"))
/R/Tmisc-package.R
no_license
stephenturner/Tmisc
R
false
false
330
r
#' Tmisc #' #' Stephen Turner's miscellaneous functions #' #' @author Stephen Turner #' #' @name Tmisc #' @docType package NULL ## quiets concerns of R CMD check re: the .'s that appear in pipelines ## and the "n" that is produced by dplyr::count() in a pipeline if(getRversion() >= "2.15.1") utils::globalVariables(c(".", "n"))
setwd("C:/Documents/DataScience/3. Data Cleaning/Assignments/Assignment1/UCI HAR Dataset/data") ########################################################## #TEST FILE ########################################################## #reading in test (space delimited file) test<-read.table("X_test.txt", sep="") head(test) str(test) summary(test) #reading in features.txt to get names for the 561 variables tempN<-read.csv("features.txt", sep="",header=FALSE) str(tempN) head(tempN) tail(tempN) #changing the names to a vector labels<-as.character(tempN[,2]) #naming variables in x_test using names from features.txt names(test)<-labels str(test) #reading in subject_test (ids) idTest<-read.csv("subject_test.csv",sep=",",header=FALSE) head(idTest) #renaming var but need to load the data.table package #first in order to do that library(data.table,lib.loc="c:/R/RPackages/") setnames(idTest,old=c("V1"), new=c("id")) head(idTest) #reading in y_test (activities var) actvTest<-read.csv("y_test.csv",sep=",",header=FALSE) head(actvTest) str(actvTest) table(actvTest) #renaming var here to indicate activities setnames(actvTest,old=c("V1"),new=c("actv")) head(actvTest) #merging all 3 datasets masterTest<-cbind(idTest,actvTest,test) str(masterTest) tail(masterTest) #checking to see if masterTest has the right number #of rows and columns: 2947 rows and 563 cols dim(idTest) dim(actvTest) dim(test) dim(masterTest) ########################################################## #TRAIN FILE ########################################################## #reading in training (space delimited file) train<-read.table("X_train.txt", sep="") head(train) str(train) summary(train) #naming variables in x_train using names in features.txt names(train)<-labels head(train) str(train) #reading in subject_train (ids) idTrain<-read.csv("subject_train.csv",sep=",",header=FALSE) head(idTrain) table(idTrain) str(idTrain) #renaming var but need to load the data.table package #first in order to do that setnames(idTrain,old=c("V1"), new=c("id")) head(idTrain) #reading in y_train (activities var) actvTrain<-read.csv("y_train.csv",sep=",",header=FALSE) head(actvTrain) str(actvTrain) table(actvTrain) #renaming var here to indicate activities setnames(actvTrain,old=c("V1"),new=c("actv")) head(actvTrain) #merging all 3 datasets masterTrain<-cbind(idTrain,actvTrain,train) str(masterTrain) tail(masterTrain) #Checking the dimensions fo masterTrain, which should be #7532 rows and 563 columns dim(idTrain) dim(actvTrain) dim(train) dim(masterTrain) #merging the train and test data master<-rbind(masterTest, masterTrain) head(master) #checking the dimensions of master, which should be #10299 rows and 563 cols dim(masterTest) dim(masterTrain) dim(master) #creating an unique row of index numbers for merging later #not sure if it'll be needed down tht road but creating one #just in case master$uid<-seq(1,10299,length.out=10299) #recoding activities in the activity variable using descriptive names master$activity[master$actv==1]<-"walk" master$activity[master$actv==2]<-"walkUpstairs" master$activity[master$actv==3]<-"walkDownstairs" master$activity[master$actv==4]<-"sit" master$activity[master$actv==5]<-"stand" master$activity[master$actv==6]<-"lay" table(master$actv) table(master$activity) #creating 4 small datasets to keep the axials vars separately #while the means/stds that have no axial stay on its own varsX<-c("id", "uid", "activity", "tBodyAcc-mean()-X", "tBodyAcc-std()-X", "tGravityAcc-mean()-X", "tGravityAcc-std()-X", "tBodyAccJerk-mean()-X", "tBodyAccJerk-std()-X", "tBodyGyro-mean()-X", "tBodyGyro-std()-X", "tBodyGyroJerk-mean()-X", "tBodyGyroJerk-std()-X", "fBodyAcc-mean()-X", "fBodyAcc-std()-X", "fBodyAccJerk-mean()-X", "fBodyAccJerk-std()-X", "fBodyGyro-mean()-X", "fBodyGyro-std()-X") datX<-master[varsX] varsY<-c("id", "uid", "activity", "tBodyAcc-mean()-Y", "tBodyAcc-std()-Y", "tGravityAcc-mean()-Y", "tGravityAcc-std()-Y", "tBodyAccJerk-mean()-Y", "tBodyAccJerk-std()-Y", "tBodyGyro-mean()-Y", "tBodyGyro-std()-Y", "tBodyGyroJerk-mean()-Y", "tBodyGyroJerk-std()-Y", "fBodyAcc-mean()-Y", "fBodyAcc-std()-Y", "fBodyAccJerk-mean()-Y", "fBodyAccJerk-std()-Y", "fBodyGyro-mean()-Y", "fBodyGyro-std()-Y") datY<-master[varsY] varsZ<-c("id", "uid", "activity", "tBodyAcc-mean()-Z", "tBodyAcc-std()-Z", "tGravityAcc-mean()-Z", "tGravityAcc-std()-Z", "tBodyAccJerk-mean()-Z", "tBodyAccJerk-std()-Z", "tBodyGyro-mean()-Z", "tBodyGyro-std()-Z", "tBodyGyroJerk-mean()-Z", "tBodyGyroJerk-std()-Z", "fBodyAcc-mean()-Z", "fBodyAcc-std()-Z", "fBodyAccJerk-mean()-Z", "fBodyAccJerk-std()-Z", "fBodyGyro-mean()-Z", "fBodyGyro-std()-Z") datZ<-master[varsZ] varsN<-c("id", "uid", "activity", "tBodyAccMag-mean()", "tBodyAccMag-std()", "tGravityAccMag-mean()", "tGravityAccMag-std()", "tBodyAccJerkMag-mean()", "tBodyAccJerkMag-std()", "tBodyGyroMag-mean()", "tBodyGyroMag-std()", "tBodyGyroJerkMag-mean()", "tBodyGyroJerkMag-std()", "fBodyAccMag-mean()", "fBodyAccMag-std()", "fBodyBodyAccJerkMag-mean()", "fBodyBodyAccJerkMag-std()", "fBodyBodyGyroMag-mean()", "fBodyBodyGyroMag-std()", "fBodyBodyGyroJerkMag-mean()", "fBodyBodyGyroJerkMag-std()") datN<-master[varsN] #renaming all the vars in the datasets that contain #info on the 3 axials with the same set of names #for stacking later datX<-rename(datX,c(oldname="newname")) names(datX)<-c("id", "uid", "activity", "tBodyAccMean", "tBodyAccStd", "tGravityAccMean", "tGravityAccStd", "tBodyAccJerkMean", "tBodyAccJerkStd", "tBodyGyroMean", "tBodyGyroStd", "tBodyGyroJerkMean", "tBodyGyroJerkStd", "fBodyAccMean", "fBodyAccStd", "fBodyAccJerkMean", "fBodyAccJerkStd", "fBodyGyroMean", "fBodyGyroStd") datY<-rename(datY,c(oldname="newname")) names(datY)<-c("id", "uid", "activity", "tBodyAccMean", "tBodyAccStd", "tGravityAccMean", "tGravityAccStd", "tBodyAccJerkMean", "tBodyAccJerkStd", "tBodyGyroMean", "tBodyGyroStd", "tBodyGyroJerkMean", "tBodyGyroJerkStd", "fBodyAccMean", "fBodyAccStd", "fBodyAccJerkMean", "fBodyAccJerkStd", "fBodyGyroMean", "fBodyGyroStd") datZ<-rename(datZ,c(oldname="newname")) names(datZ)<-c("id", "uid", "activity", "tBodyAccMean", "tBodyAccStd", "tGravityAccMean", "tGravityAccStd", "tBodyAccJerkMean", "tBodyAccJerkStd", "tBodyGyroMean", "tBodyGyroStd", "tBodyGyroJerkMean", "tBodyGyroJerkStd", "fBodyAccMean", "fBodyAccStd", "fBodyAccJerkMean", "fBodyAccJerkStd", "fBodyGyroMean", "fBodyGyroStd") datN<-rename(datN,c(oldname="newname")) names(datN)<-c("id", "uid", "activity", "tBodyAccMagMean", "tBodyAccMagStd", "tGravityAccMagMean", "tGravityAccMagStd", "tBodyAccJerkMagMean", "tBodyAccJerkMagStd", "tBodyGyroMagMean", "tBodyGyroMagStd", "tBodyGyroJerkMagMean", "tBodyGyroJerkMagStd", "fBodyAccMagMean", "fBodyAccMagStd", "fBodyBodyAccJerkMagMean", "fBodyBodyAccJerkMagStd", "fBodyBodyGyroMagMean", "fBodyBodyGyroMagStd", "fBodyBodyGyroJerkMagMean", "fBodyBodyGyroJerkMagStd") #creating a variable to indicate the axial type in each dataset datX$axial=seq(1,1,length.out=10299) datY$axial=seq(2,2,length.out=10299) datZ$axial=seq(3,3,length.out=10299) datN$axial=seq(0,0,length.out=10299) #merging data on the 3 axials into 1 dataset tempD<-rbind(datX, datY, datZ) dim(datX) dim(datY) dim(datZ) dim(tempD) #getting averages from tempD attach(tempD) avgAxials<-aggregate(tempD, by=list(tempD$activity),FUN=mean) detach(tempD) #getting averages by activity from datN attach(datN) avgNoaxial<-aggregate(datN, by=list(datN$activity), FUN=mean) avgNoaxial detach(datN) #only keeping the avgs and activities dropA<-names(avgAxials) %in% c("id", "uid", "activity","axial") finalA<-avgAxials[!dropA] dropN<-names(avgNoaxial) %in% c("id", "uid", "activity","axial") finalN<-avgNoaxial[!dropN] #renming Group.1 in both datasets names(finalA)[names(finalA)=="Group.1"]<-"activity" names(finalN)[names(finalN)=="Group.1"]<-"activity" names(finalA) names(finalN) #merging the 2 datasets tidy<-merge(x=finalA,y=finalN, by=("activity") ,all.x=TRUE) write.table(tidy,"tidy.txt",row.names=FALSE)
/run_analysis.R
no_license
KlingonChik/Samsung
R
false
false
8,261
r
setwd("C:/Documents/DataScience/3. Data Cleaning/Assignments/Assignment1/UCI HAR Dataset/data") ########################################################## #TEST FILE ########################################################## #reading in test (space delimited file) test<-read.table("X_test.txt", sep="") head(test) str(test) summary(test) #reading in features.txt to get names for the 561 variables tempN<-read.csv("features.txt", sep="",header=FALSE) str(tempN) head(tempN) tail(tempN) #changing the names to a vector labels<-as.character(tempN[,2]) #naming variables in x_test using names from features.txt names(test)<-labels str(test) #reading in subject_test (ids) idTest<-read.csv("subject_test.csv",sep=",",header=FALSE) head(idTest) #renaming var but need to load the data.table package #first in order to do that library(data.table,lib.loc="c:/R/RPackages/") setnames(idTest,old=c("V1"), new=c("id")) head(idTest) #reading in y_test (activities var) actvTest<-read.csv("y_test.csv",sep=",",header=FALSE) head(actvTest) str(actvTest) table(actvTest) #renaming var here to indicate activities setnames(actvTest,old=c("V1"),new=c("actv")) head(actvTest) #merging all 3 datasets masterTest<-cbind(idTest,actvTest,test) str(masterTest) tail(masterTest) #checking to see if masterTest has the right number #of rows and columns: 2947 rows and 563 cols dim(idTest) dim(actvTest) dim(test) dim(masterTest) ########################################################## #TRAIN FILE ########################################################## #reading in training (space delimited file) train<-read.table("X_train.txt", sep="") head(train) str(train) summary(train) #naming variables in x_train using names in features.txt names(train)<-labels head(train) str(train) #reading in subject_train (ids) idTrain<-read.csv("subject_train.csv",sep=",",header=FALSE) head(idTrain) table(idTrain) str(idTrain) #renaming var but need to load the data.table package #first in order to do that setnames(idTrain,old=c("V1"), new=c("id")) head(idTrain) #reading in y_train (activities var) actvTrain<-read.csv("y_train.csv",sep=",",header=FALSE) head(actvTrain) str(actvTrain) table(actvTrain) #renaming var here to indicate activities setnames(actvTrain,old=c("V1"),new=c("actv")) head(actvTrain) #merging all 3 datasets masterTrain<-cbind(idTrain,actvTrain,train) str(masterTrain) tail(masterTrain) #Checking the dimensions fo masterTrain, which should be #7532 rows and 563 columns dim(idTrain) dim(actvTrain) dim(train) dim(masterTrain) #merging the train and test data master<-rbind(masterTest, masterTrain) head(master) #checking the dimensions of master, which should be #10299 rows and 563 cols dim(masterTest) dim(masterTrain) dim(master) #creating an unique row of index numbers for merging later #not sure if it'll be needed down tht road but creating one #just in case master$uid<-seq(1,10299,length.out=10299) #recoding activities in the activity variable using descriptive names master$activity[master$actv==1]<-"walk" master$activity[master$actv==2]<-"walkUpstairs" master$activity[master$actv==3]<-"walkDownstairs" master$activity[master$actv==4]<-"sit" master$activity[master$actv==5]<-"stand" master$activity[master$actv==6]<-"lay" table(master$actv) table(master$activity) #creating 4 small datasets to keep the axials vars separately #while the means/stds that have no axial stay on its own varsX<-c("id", "uid", "activity", "tBodyAcc-mean()-X", "tBodyAcc-std()-X", "tGravityAcc-mean()-X", "tGravityAcc-std()-X", "tBodyAccJerk-mean()-X", "tBodyAccJerk-std()-X", "tBodyGyro-mean()-X", "tBodyGyro-std()-X", "tBodyGyroJerk-mean()-X", "tBodyGyroJerk-std()-X", "fBodyAcc-mean()-X", "fBodyAcc-std()-X", "fBodyAccJerk-mean()-X", "fBodyAccJerk-std()-X", "fBodyGyro-mean()-X", "fBodyGyro-std()-X") datX<-master[varsX] varsY<-c("id", "uid", "activity", "tBodyAcc-mean()-Y", "tBodyAcc-std()-Y", "tGravityAcc-mean()-Y", "tGravityAcc-std()-Y", "tBodyAccJerk-mean()-Y", "tBodyAccJerk-std()-Y", "tBodyGyro-mean()-Y", "tBodyGyro-std()-Y", "tBodyGyroJerk-mean()-Y", "tBodyGyroJerk-std()-Y", "fBodyAcc-mean()-Y", "fBodyAcc-std()-Y", "fBodyAccJerk-mean()-Y", "fBodyAccJerk-std()-Y", "fBodyGyro-mean()-Y", "fBodyGyro-std()-Y") datY<-master[varsY] varsZ<-c("id", "uid", "activity", "tBodyAcc-mean()-Z", "tBodyAcc-std()-Z", "tGravityAcc-mean()-Z", "tGravityAcc-std()-Z", "tBodyAccJerk-mean()-Z", "tBodyAccJerk-std()-Z", "tBodyGyro-mean()-Z", "tBodyGyro-std()-Z", "tBodyGyroJerk-mean()-Z", "tBodyGyroJerk-std()-Z", "fBodyAcc-mean()-Z", "fBodyAcc-std()-Z", "fBodyAccJerk-mean()-Z", "fBodyAccJerk-std()-Z", "fBodyGyro-mean()-Z", "fBodyGyro-std()-Z") datZ<-master[varsZ] varsN<-c("id", "uid", "activity", "tBodyAccMag-mean()", "tBodyAccMag-std()", "tGravityAccMag-mean()", "tGravityAccMag-std()", "tBodyAccJerkMag-mean()", "tBodyAccJerkMag-std()", "tBodyGyroMag-mean()", "tBodyGyroMag-std()", "tBodyGyroJerkMag-mean()", "tBodyGyroJerkMag-std()", "fBodyAccMag-mean()", "fBodyAccMag-std()", "fBodyBodyAccJerkMag-mean()", "fBodyBodyAccJerkMag-std()", "fBodyBodyGyroMag-mean()", "fBodyBodyGyroMag-std()", "fBodyBodyGyroJerkMag-mean()", "fBodyBodyGyroJerkMag-std()") datN<-master[varsN] #renaming all the vars in the datasets that contain #info on the 3 axials with the same set of names #for stacking later datX<-rename(datX,c(oldname="newname")) names(datX)<-c("id", "uid", "activity", "tBodyAccMean", "tBodyAccStd", "tGravityAccMean", "tGravityAccStd", "tBodyAccJerkMean", "tBodyAccJerkStd", "tBodyGyroMean", "tBodyGyroStd", "tBodyGyroJerkMean", "tBodyGyroJerkStd", "fBodyAccMean", "fBodyAccStd", "fBodyAccJerkMean", "fBodyAccJerkStd", "fBodyGyroMean", "fBodyGyroStd") datY<-rename(datY,c(oldname="newname")) names(datY)<-c("id", "uid", "activity", "tBodyAccMean", "tBodyAccStd", "tGravityAccMean", "tGravityAccStd", "tBodyAccJerkMean", "tBodyAccJerkStd", "tBodyGyroMean", "tBodyGyroStd", "tBodyGyroJerkMean", "tBodyGyroJerkStd", "fBodyAccMean", "fBodyAccStd", "fBodyAccJerkMean", "fBodyAccJerkStd", "fBodyGyroMean", "fBodyGyroStd") datZ<-rename(datZ,c(oldname="newname")) names(datZ)<-c("id", "uid", "activity", "tBodyAccMean", "tBodyAccStd", "tGravityAccMean", "tGravityAccStd", "tBodyAccJerkMean", "tBodyAccJerkStd", "tBodyGyroMean", "tBodyGyroStd", "tBodyGyroJerkMean", "tBodyGyroJerkStd", "fBodyAccMean", "fBodyAccStd", "fBodyAccJerkMean", "fBodyAccJerkStd", "fBodyGyroMean", "fBodyGyroStd") datN<-rename(datN,c(oldname="newname")) names(datN)<-c("id", "uid", "activity", "tBodyAccMagMean", "tBodyAccMagStd", "tGravityAccMagMean", "tGravityAccMagStd", "tBodyAccJerkMagMean", "tBodyAccJerkMagStd", "tBodyGyroMagMean", "tBodyGyroMagStd", "tBodyGyroJerkMagMean", "tBodyGyroJerkMagStd", "fBodyAccMagMean", "fBodyAccMagStd", "fBodyBodyAccJerkMagMean", "fBodyBodyAccJerkMagStd", "fBodyBodyGyroMagMean", "fBodyBodyGyroMagStd", "fBodyBodyGyroJerkMagMean", "fBodyBodyGyroJerkMagStd") #creating a variable to indicate the axial type in each dataset datX$axial=seq(1,1,length.out=10299) datY$axial=seq(2,2,length.out=10299) datZ$axial=seq(3,3,length.out=10299) datN$axial=seq(0,0,length.out=10299) #merging data on the 3 axials into 1 dataset tempD<-rbind(datX, datY, datZ) dim(datX) dim(datY) dim(datZ) dim(tempD) #getting averages from tempD attach(tempD) avgAxials<-aggregate(tempD, by=list(tempD$activity),FUN=mean) detach(tempD) #getting averages by activity from datN attach(datN) avgNoaxial<-aggregate(datN, by=list(datN$activity), FUN=mean) avgNoaxial detach(datN) #only keeping the avgs and activities dropA<-names(avgAxials) %in% c("id", "uid", "activity","axial") finalA<-avgAxials[!dropA] dropN<-names(avgNoaxial) %in% c("id", "uid", "activity","axial") finalN<-avgNoaxial[!dropN] #renming Group.1 in both datasets names(finalA)[names(finalA)=="Group.1"]<-"activity" names(finalN)[names(finalN)=="Group.1"]<-"activity" names(finalA) names(finalN) #merging the 2 datasets tidy<-merge(x=finalA,y=finalN, by=("activity") ,all.x=TRUE) write.table(tidy,"tidy.txt",row.names=FALSE)
##Read in data from 2007-02-01 to 2007-02-02 powerConsumption <- read.table("data/household_power_consumption.txt", skip = 66637, sep = ";", nrows = 2880, na.string = "?", header = TRUE, col.names= c("date", "time", "globalactivepower", "globalreactivepower", "voltage", "globalintensity", "submetering1", "submetering2", "submetering3")) powerConsumption <- transform(powerConsumption, date = as.Date(date, format = "%d/%m/%Y"), time = as.POSIXlt(paste(date,time, " "), format = "%d/%m/%Y %T")) ## PLOT 2 of global active power by time png(file = "figure/plot2.png") par(mfrow = c(1,1), mar = c(4, 4, 4, 4), oma = c(0, 1, 2, 1), font.axis = 1, font.lab = 1, cex.lab = 0.8, cex.axis = 0.8) with(powerConsumption, plot(time, globalactivepower, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")) title(main = "Plot 2", outer = TRUE, cex.main = 1.5, font.main = 2, adj = 0) dev.off()
/plot2.R
no_license
mariogtr/ExData_Plotting1
R
false
false
898
r
##Read in data from 2007-02-01 to 2007-02-02 powerConsumption <- read.table("data/household_power_consumption.txt", skip = 66637, sep = ";", nrows = 2880, na.string = "?", header = TRUE, col.names= c("date", "time", "globalactivepower", "globalreactivepower", "voltage", "globalintensity", "submetering1", "submetering2", "submetering3")) powerConsumption <- transform(powerConsumption, date = as.Date(date, format = "%d/%m/%Y"), time = as.POSIXlt(paste(date,time, " "), format = "%d/%m/%Y %T")) ## PLOT 2 of global active power by time png(file = "figure/plot2.png") par(mfrow = c(1,1), mar = c(4, 4, 4, 4), oma = c(0, 1, 2, 1), font.axis = 1, font.lab = 1, cex.lab = 0.8, cex.axis = 0.8) with(powerConsumption, plot(time, globalactivepower, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")) title(main = "Plot 2", outer = TRUE, cex.main = 1.5, font.main = 2, adj = 0) dev.off()
################### # Load libraries ################### library(ggplot2) library(plyr) library(grid) ################### # Load data ################### Results <- subset(read.csv("~/PhD/Brown/Papers/BMj2015/Results/DST.csv"), Honeypot==0) # Add _Lvl2 before .csv ################### # Load data (bad workers removed) ################### Results <- subset(read.csv("~/PhD/Brown/Papers/BMj2015/Results/DST.csv"), Honeypot==0 & !(WorkerId %in% c("A1BAEFJ2OG7NJ0","A1G2TVO0KVLT9P","A1MMCEU78GYAW8","A1REI6H0566OKD","A1TJE3IG2C77IC","A248P03SBIGVNH","A2HR7ZIX42FEPG","A2I7D44F4BA517","A2IICB0FDT3QE2","A2Q6XBT9ZWRWPG","A2SVFFIM1EZI9R","A30R62KFQ9RBSA","A38HCU1O9OS0C5","A3EVU40JQG68SY","A3GQEWG1AAT30G","A3H35QRODK1C4K","A3HPVFAF1U9WXK","A3NQGUINZBH5CG","A3P52V679D3Y5P","A3S5NJK6P3PGNP","A3TQZUEP9A9014","AS109WTSIA8NU","AU48KLNN53BFS","AWY5WM7BEBZD8","A997OZ3H2B3Q2","A27QCN3GYCZTPE","A31A4YKVSOYRVS","A13W8W81TPORYZ","A2PXPKUKK3E0CW"))) # Add _Lvl2 before .csv ################### # Summarise answers ################### Question1Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question1=="Yes", na.rm=TRUE), No=sum(Question1=="No", na.rm=TRUE), CantTell=sum(Question1=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question1) | Question1=='-'), Relevant=mean(Relevant)) Question2Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question2=="Yes", na.rm=TRUE), No=sum(Question2=="No", na.rm=TRUE), CantTell=sum(Question2=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question2) | Question2=='-'), Relevant=mean(Relevant)) Question3Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question3=="Yes", na.rm=TRUE), No=sum(Question3=="No", na.rm=TRUE), CantTell=sum(Question3=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question3) | Question3=='-'), Relevant=mean(Relevant)) Question4Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question4=="Yes", na.rm=TRUE), No=sum(Question4=="No", na.rm=TRUE), CantTell=sum(Question4=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question4) | Question4=='-'), Relevant=mean(Relevant)) ################### # Majority rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question1Negative <- subset(Question1Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question2Negative <- subset(Question2Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question3Negative <- subset(Question3Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question4Negative <- subset(Question4Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) ################### # 1p / SI rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= 1) Question1Negative <- subset(Question1Results, (Yes+CantTell) == 0) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= 1) Question2Negative <- subset(Question2Results, (Yes+CantTell) == 0) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= 1) Question3Negative <- subset(Question3Results, (Yes+CantTell) == 0) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= 1) Question4Negative <- subset(Question4Results, (Yes+CantTell) == 0) ################### # 2p rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question1Negative <- subset(Question1Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question2Negative <- subset(Question2Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question3Negative <- subset(Question3Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question4Negative <- subset(Question4Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) ################### # 3p rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question1Negative <- subset(Question1Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question2Negative <- subset(Question2Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question3Negative <- subset(Question3Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question4Negative <- subset(Question4Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) ################### # 4p rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question1Negative <- subset(Question1Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question2Negative <- subset(Question2Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question3Negative <- subset(Question3Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question4Negative <- subset(Question4Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) ################### # 5p / AI rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question1Negative <- subset(Question1Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) Question2Positive <- subset(Question2Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question2Negative <- subset(Question2Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) Question3Positive <- subset(Question3Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question3Negative <- subset(Question3Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) Question4Positive <- subset(Question4Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question4Negative <- subset(Question4Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) ######################## # Majority Question rule ######################## MQres <- ddply(Results,~AbstractId,summarise,q1p=sum(Question1=='Yes' | Question1=='CantTell'),q1t=length(Question1),q2p=sum(Question2=='Yes' | Question2=='CantTell'),q2t=length(Question2),q3p=sum(Question3=='Yes' | Question3=='CantTell'),q3t=length(Question3),q4p=sum(Question4=='Yes' | Question4=='CantTell'),q4t=length(Question4), Relevant=mean(Relevant)) MQPositive <- subset(MQres, q1p+q2p+q3p+q4p > ((q1t+q2t+q3t+q4t)/2)) MQNegative <- subset(MQres, q1p+q2p+q3p+q4p <= ((q1t+q2t+q3t+q4t)/2)) nrow(subset(MQPositive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(MQNegative, Relevant==0))/(nrow(subset(MQNegative, Relevant==0))+nrow(subset(MQPositive, Relevant==0))) ################### # Champion rule ################### Cres <- ddply(Results,~AbstractId,summarise,q1p=sum(Question1=='Yes' | Question1=='CantTell'),q1t=length(Question1[Question1!='-']),q2p=sum(Question2=='Yes' | Question2=='CantTell'),q2t=length(Question2[Question2!='-']),q3p=sum(Question3=='Yes' | Question3=='CantTell'),q3t=length(Question3[Question3!='-']),q4p=sum(Question4=='Yes' | Question4=='CantTell'),q4t=length(Question4[Question4!='-']), Relevant=mean(Relevant)) Question1Positive <- subset(Cres, q1p >= q1t/2) Question1Negative <- subset(Cres, q1p < pmax(1,q1t/2)) Question2Positive <- subset(Question1Positive, q2p >= q2t/2) Question2Negative <- rbind(subset(Question1Positive, q2p < pmax(1,q2t/2)),Question1Negative) Question3Positive <- subset(Question2Positive, q3p >= q3t/2) Question3Negative <- rbind(subset(Question2Positive, q3p < pmax(1,q3t/2)),Question2Negative) Question4Positive <- subset(Question3Positive, q4p >= q4t/2) Question4Negative <- rbind(subset(Question3Positive, q4p < pmax(1,q4t/2)),Question3Negative) ################### # Champion DR rule ################### Cres <- ddply(Results,~AbstractId,summarise,q1p=sum(Question1=='Yes' | Question1=='CantTell'),q1t=length(Question1[Question1!='-']),q2p=sum(Question2=='Yes' | Question2=='CantTell'),q2t=length(Question2[Question2!='-']),q3p=sum(Question3=='Yes' | Question3=='CantTell'),q3t=length(Question3[Question3!='-']),q4p=sum(Question4=='Yes' | Question4=='CantTell'),q4t=length(Question4[Question4!='-']), Relevant=mean(Relevant)) Question1Positive <- subset(Cres, q1p >= pmax(pmin(5/2,q1t/2),0)) Question1Negative <- subset(Cres, q1p < pmax(pmin(5/2,q1t/2),0)) Question2Positive <- subset(Question1Positive, q2p >= pmax(pmin(4/2,q2t/2),0)) Question2Negative <- rbind(subset(Question1Positive, q2p < pmax(pmin(4/2,q2t/2),0)),Question1Negative) Question3Positive <- subset(Question2Positive, q3p >= pmax(pmin(3/2,q3t/2),0)) Question3Negative <- rbind(subset(Question2Positive, q3p < pmax(pmin(3/2,q3t/2),0)),Question2Negative) Question4Positive <- subset(Question3Positive, q4p >= 1) Question4Negative <- rbind(subset(Question3Positive, q4p < 1),Question3Negative) ################### # Sensitivity and Specificity ################### nrow(subset(Question1Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question1Negative, Relevant==0))/(nrow(subset(Question1Negative, Relevant==0))+nrow(subset(Question1Positive, Relevant==0))) nrow(subset(Question2Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question2Negative, Relevant==0))/(nrow(subset(Question2Negative, Relevant==0))+nrow(subset(Question2Positive, Relevant==0))) nrow(subset(Question3Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question3Negative, Relevant==0))/(nrow(subset(Question3Negative, Relevant==0))+nrow(subset(Question3Positive, Relevant==0))) nrow(subset(Question4Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question4Negative, Relevant==0))/(nrow(subset(Question4Negative, Relevant==0))+nrow(subset(Question4Positive, Relevant==0)))
/Results/DST_processing.r
no_license
bwallace/crowd-sourced-ebm
R
false
false
10,925
r
################### # Load libraries ################### library(ggplot2) library(plyr) library(grid) ################### # Load data ################### Results <- subset(read.csv("~/PhD/Brown/Papers/BMj2015/Results/DST.csv"), Honeypot==0) # Add _Lvl2 before .csv ################### # Load data (bad workers removed) ################### Results <- subset(read.csv("~/PhD/Brown/Papers/BMj2015/Results/DST.csv"), Honeypot==0 & !(WorkerId %in% c("A1BAEFJ2OG7NJ0","A1G2TVO0KVLT9P","A1MMCEU78GYAW8","A1REI6H0566OKD","A1TJE3IG2C77IC","A248P03SBIGVNH","A2HR7ZIX42FEPG","A2I7D44F4BA517","A2IICB0FDT3QE2","A2Q6XBT9ZWRWPG","A2SVFFIM1EZI9R","A30R62KFQ9RBSA","A38HCU1O9OS0C5","A3EVU40JQG68SY","A3GQEWG1AAT30G","A3H35QRODK1C4K","A3HPVFAF1U9WXK","A3NQGUINZBH5CG","A3P52V679D3Y5P","A3S5NJK6P3PGNP","A3TQZUEP9A9014","AS109WTSIA8NU","AU48KLNN53BFS","AWY5WM7BEBZD8","A997OZ3H2B3Q2","A27QCN3GYCZTPE","A31A4YKVSOYRVS","A13W8W81TPORYZ","A2PXPKUKK3E0CW"))) # Add _Lvl2 before .csv ################### # Summarise answers ################### Question1Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question1=="Yes", na.rm=TRUE), No=sum(Question1=="No", na.rm=TRUE), CantTell=sum(Question1=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question1) | Question1=='-'), Relevant=mean(Relevant)) Question2Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question2=="Yes", na.rm=TRUE), No=sum(Question2=="No", na.rm=TRUE), CantTell=sum(Question2=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question2) | Question2=='-'), Relevant=mean(Relevant)) Question3Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question3=="Yes", na.rm=TRUE), No=sum(Question3=="No", na.rm=TRUE), CantTell=sum(Question3=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question3) | Question3=='-'), Relevant=mean(Relevant)) Question4Results <- ddply(Results,~AbstractId,summarise, Yes=sum(Question4=="Yes", na.rm=TRUE), No=sum(Question4=="No", na.rm=TRUE), CantTell=sum(Question4=="CantTell", na.rm=TRUE), NoAnswer=sum(is.na(Question4) | Question4=='-'), Relevant=mean(Relevant)) ################### # Majority rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question1Negative <- subset(Question1Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question2Negative <- subset(Question2Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question3Negative <- subset(Question3Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= (Yes+No+CantTell+NoAnswer)/2) Question4Negative <- subset(Question4Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)/2) ################### # 1p / SI rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= 1) Question1Negative <- subset(Question1Results, (Yes+CantTell) == 0) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= 1) Question2Negative <- subset(Question2Results, (Yes+CantTell) == 0) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= 1) Question3Negative <- subset(Question3Results, (Yes+CantTell) == 0) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= 1) Question4Negative <- subset(Question4Results, (Yes+CantTell) == 0) ################### # 2p rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question1Negative <- subset(Question1Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question2Negative <- subset(Question2Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question3Negative <- subset(Question3Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= pmin(2,(Yes+No+CantTell+NoAnswer))) Question4Negative <- subset(Question4Results, (Yes+CantTell) < pmin(2,(Yes+No+CantTell+NoAnswer))) ################### # 3p rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question1Negative <- subset(Question1Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question2Negative <- subset(Question2Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question3Negative <- subset(Question3Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= pmin(3,(Yes+No+CantTell+NoAnswer))) Question4Negative <- subset(Question4Results, (Yes+CantTell) < pmin(3,(Yes+No+CantTell+NoAnswer))) ################### # 4p rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question1Negative <- subset(Question1Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) Question2Positive <- subset(Question2Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question2Negative <- subset(Question2Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) Question3Positive <- subset(Question3Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question3Negative <- subset(Question3Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) Question4Positive <- subset(Question4Results, (Yes+CantTell) >= pmin(4,(Yes+No+CantTell+NoAnswer))) Question4Negative <- subset(Question4Results, (Yes+CantTell) < pmin(4,(Yes+No+CantTell+NoAnswer))) ################### # 5p / AI rule ################### Question1Positive <- subset(Question1Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question1Negative <- subset(Question1Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) Question2Positive <- subset(Question2Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question2Negative <- subset(Question2Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) Question3Positive <- subset(Question3Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question3Negative <- subset(Question3Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) Question4Positive <- subset(Question4Results, (Yes+CantTell) == (Yes+No+CantTell+NoAnswer)) Question4Negative <- subset(Question4Results, (Yes+CantTell) < (Yes+No+CantTell+NoAnswer)) ######################## # Majority Question rule ######################## MQres <- ddply(Results,~AbstractId,summarise,q1p=sum(Question1=='Yes' | Question1=='CantTell'),q1t=length(Question1),q2p=sum(Question2=='Yes' | Question2=='CantTell'),q2t=length(Question2),q3p=sum(Question3=='Yes' | Question3=='CantTell'),q3t=length(Question3),q4p=sum(Question4=='Yes' | Question4=='CantTell'),q4t=length(Question4), Relevant=mean(Relevant)) MQPositive <- subset(MQres, q1p+q2p+q3p+q4p > ((q1t+q2t+q3t+q4t)/2)) MQNegative <- subset(MQres, q1p+q2p+q3p+q4p <= ((q1t+q2t+q3t+q4t)/2)) nrow(subset(MQPositive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(MQNegative, Relevant==0))/(nrow(subset(MQNegative, Relevant==0))+nrow(subset(MQPositive, Relevant==0))) ################### # Champion rule ################### Cres <- ddply(Results,~AbstractId,summarise,q1p=sum(Question1=='Yes' | Question1=='CantTell'),q1t=length(Question1[Question1!='-']),q2p=sum(Question2=='Yes' | Question2=='CantTell'),q2t=length(Question2[Question2!='-']),q3p=sum(Question3=='Yes' | Question3=='CantTell'),q3t=length(Question3[Question3!='-']),q4p=sum(Question4=='Yes' | Question4=='CantTell'),q4t=length(Question4[Question4!='-']), Relevant=mean(Relevant)) Question1Positive <- subset(Cres, q1p >= q1t/2) Question1Negative <- subset(Cres, q1p < pmax(1,q1t/2)) Question2Positive <- subset(Question1Positive, q2p >= q2t/2) Question2Negative <- rbind(subset(Question1Positive, q2p < pmax(1,q2t/2)),Question1Negative) Question3Positive <- subset(Question2Positive, q3p >= q3t/2) Question3Negative <- rbind(subset(Question2Positive, q3p < pmax(1,q3t/2)),Question2Negative) Question4Positive <- subset(Question3Positive, q4p >= q4t/2) Question4Negative <- rbind(subset(Question3Positive, q4p < pmax(1,q4t/2)),Question3Negative) ################### # Champion DR rule ################### Cres <- ddply(Results,~AbstractId,summarise,q1p=sum(Question1=='Yes' | Question1=='CantTell'),q1t=length(Question1[Question1!='-']),q2p=sum(Question2=='Yes' | Question2=='CantTell'),q2t=length(Question2[Question2!='-']),q3p=sum(Question3=='Yes' | Question3=='CantTell'),q3t=length(Question3[Question3!='-']),q4p=sum(Question4=='Yes' | Question4=='CantTell'),q4t=length(Question4[Question4!='-']), Relevant=mean(Relevant)) Question1Positive <- subset(Cres, q1p >= pmax(pmin(5/2,q1t/2),0)) Question1Negative <- subset(Cres, q1p < pmax(pmin(5/2,q1t/2),0)) Question2Positive <- subset(Question1Positive, q2p >= pmax(pmin(4/2,q2t/2),0)) Question2Negative <- rbind(subset(Question1Positive, q2p < pmax(pmin(4/2,q2t/2),0)),Question1Negative) Question3Positive <- subset(Question2Positive, q3p >= pmax(pmin(3/2,q3t/2),0)) Question3Negative <- rbind(subset(Question2Positive, q3p < pmax(pmin(3/2,q3t/2),0)),Question2Negative) Question4Positive <- subset(Question3Positive, q4p >= 1) Question4Negative <- rbind(subset(Question3Positive, q4p < 1),Question3Negative) ################### # Sensitivity and Specificity ################### nrow(subset(Question1Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question1Negative, Relevant==0))/(nrow(subset(Question1Negative, Relevant==0))+nrow(subset(Question1Positive, Relevant==0))) nrow(subset(Question2Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question2Negative, Relevant==0))/(nrow(subset(Question2Negative, Relevant==0))+nrow(subset(Question2Positive, Relevant==0))) nrow(subset(Question3Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question3Negative, Relevant==0))/(nrow(subset(Question3Negative, Relevant==0))+nrow(subset(Question3Positive, Relevant==0))) nrow(subset(Question4Positive, Relevant==1))/nrow(subset(ddply(Results,~AbstractId,summarise,relevant=mean(Relevant)), relevant==1)) nrow(subset(Question4Negative, Relevant==0))/(nrow(subset(Question4Negative, Relevant==0))+nrow(subset(Question4Positive, Relevant==0)))
#' @title Fill a forecasting directory with basic components. #' #' @description Fill the directory with foundational components. \cr \cr #' \code{fill_dir} combines \code{fill_raw}, \code{fill_casts}, #' \code{fill_models}, and \code{fill_data}. #' \code{fill_raw} downloads the raw data and archive and updates the #' directory configuration metadata accordingly. \cr \cr #' \code{fill_casts} moves the historic casts from the archive into the #' current directory. \cr \cr #' \code{fill_models} writes out the model scripts to the models #' subdirectory. \cr \cr #' \code{fill_data} prepares model-ready data from the raw data. #' #' @details Arguments input directly here take precedence over those in the #' \code{downloads} \code{list}. #' #' @param main \code{character} value of the name of the main component of #' the directory tree. #' #' @param models \code{character} vector of name(s) of model(s) to #' include. #' #' @param end_moon \code{integer} (or integer \code{numeric}) newmoon number #' of the last sample to be included. Default value is \code{NULL}, which #' equates to the most recently included sample. #' #' @param lead_time \code{integer} (or integer \code{numeric}) value for the #' number of timesteps forward a cast will cover. #' #' @param cast_date \code{Date} from which future is defined (the origin of #' the cast). In the recurring forecasting, is set to today's date #' using \code{\link{Sys.Date}}. #' #' @param start_moon \code{integer} (or integer \code{numeric}) newmoon number #' of the first sample to be included. Default value is \code{217}, #' corresponding to \code{1995-01-01}. #' #' @param confidence_level \code{numeric} confidence level used in #' summarizing model output. Must be between \code{0} and \code{1}. #' #' @param controls_model Additional controls for models not in the prefab #' set. \cr #' A \code{list} of a single model's script-writing controls or a #' \code{list} of \code{list}s, each of which is a single model's #' script-writing controls. \cr #' Presently, each model's script writing controls #' should include three elements: \code{name} (a \code{character} value of #' the model name), \code{covariates} (a \code{logical} indicator of if the #' model needs covariates), and \code{lag} (an \code{integer}-conformable #' value of the lag to use with the covariates or \code{NA} if #' \code{covariates = FALSE}). \cr #' If only a single model is added, the name of #' the model from the element \code{name} will be used to name the model's #' \code{list} in the larger \code{list}. If multiple models are added, each #' element \code{list} must be named according to the model and the #' \code{name} element. \cr #' #' @param controls_rodents Control \code{list} or \code{list} of \code{list}s #' (from \code{\link{rodents_controls}}) specifying the structuring of the #' rodents tables. See \code{\link{rodents_controls}} for details. #' #' @param control_climate_dl \code{list} of specifications for the download, #' which are sent to \code{\link{NMME_urls}} to create the specific URLs. See #' \code{\link{climate_dl_control}}. #' #' @param control_files \code{list} of names of the folders and files within #' the sub directories and saving strategies (save, overwrite, append, etc.). #' Generally shouldn't need to be edited. See \code{\link{files_control}}. #' #' @param downloads \code{list} of arguments to pass to \code{\link{download}} #' for raw file downloading. #' #' @param quiet \code{logical} indicator if progress messages should be #' quieted. #' #' @param verbose \code{logical} indicator of whether or not to print out #' all of the information or not (and thus just the tidy messages). #' #' @param arg_checks \code{logical} value of if the arguments should be #' checked using standard protocols via \code{\link{check_args}}. The #' default (\code{arg_checks = TRUE}) ensures that all inputs are #' formatted correctly and provides directed error messages if not. #' #' @return All \code{fill_} functions return \code{NULL}. #' #' @examples #' \donttest{ #' create_dir() #' fill_dir() #' fill_raw() #' fill_casts() #' fill_models() #' fill_data() #' } #' #' @name fill_directory #' NULL #' @rdname fill_directory #' #' @export #' fill_dir <- function(main = ".", models = prefab_models(), end_moon = NULL, start_moon = 217, lead_time = 12, confidence_level = 0.95, cast_date = Sys.Date(), controls_model = NULL, controls_rodents = rodents_controls(), control_climate_dl = climate_dl_control(), control_files = files_control(), downloads = zenodo_downloads(c("1215988", "833438")), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks) messageq("Filling directory with standard content", quiet) fill_raw(main = main, downloads = downloads, quiet = quiet, control_files = control_files, arg_checks = arg_checks) fill_casts(main = main, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) fill_models(main = main, models = models, controls_model = controls_model, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) fill_data(main = main, models = models, end_moon = end_moon, lead_time = lead_time, cast_date = cast_date, start_moon = start_moon, confidence_level = confidence_level, controls_rodents = controls_rodents, controls_model = controls_model, control_climate_dl = control_climate_dl, downloads = downloads, control_files = control_files, quiet = quiet, verbose = verbose, arg_checks = arg_checks) } #' @rdname fill_directory #' #' @export #' fill_data <- function(main = ".", models = prefab_models(), end_moon = NULL,start_moon = 217, lead_time = 12, confidence_level = 0.95, cast_date = Sys.Date(), controls_model = NULL, controls_rodents = rodents_controls(), control_climate_dl = climate_dl_control(), control_files = files_control(), downloads = zenodo_downloads(c("1215988", "833438")), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) min_lag <- extract_min_lag(models = models, controls_model = controls_model, quiet = quiet, arg_checks = arg_checks) data_sets <- extract_data_sets(models = models, controls_model = controls_model, quiet = quiet, arg_checks = arg_checks) raw_data_present <- verify_raw_data(main = main, raw_data = control_files$raw_data, arg_checks = arg_checks) if(!raw_data_present){ fill_raw(main = main, downloads = downloads, quiet = quiet, control_files = control_files, arg_checks = arg_checks) } messageq(" -Adding data files to data subdirectory", quiet) data_m <- prep_moons(main = main, lead_time = lead_time, cast_date = cast_date, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) data_r <- prep_rodents(main = main, moons = data_m, data_sets = data_sets, end_moon = end_moon, start_moon = start_moon, controls_rodents = controls_rodents, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) data_c <- prep_covariates(main = main, moons = data_m, end_moon = end_moon, lead_time = lead_time, min_lag = min_lag, cast_date = cast_date, control_climate_dl = control_climate_dl, quiet = quiet, control_files = control_files, arg_checks = arg_checks) prep_metadata(main = main, models = models, data_sets = data_sets, moons = data_m, rodents = data_r, covariates = data_c, end_moon = end_moon, lead_time = lead_time, min_lag = min_lag, cast_date = cast_date, start_moon = start_moon, confidence_level = confidence_level, controls_model = controls_model, controls_rodents = controls_rodents, quiet = quiet, control_files = control_files, arg_checks = arg_checks) invisible(NULL) } #' @rdname fill_directory #' #' @export #' fill_models <- function(main = ".", models = prefab_models(), controls_model = NULL, control_files = files_control(), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) return_if_null(models) controls_model <- model_controls(models = models, controls_model = controls_model, quiet = quiet, arg_checks = arg_checks) messageq(" -Writing model scripts", quiet) nmodels <- length(models) for(i in 1:nmodels){ write_model(main = main, quiet = quiet, verbose = verbose, control_files = control_files, control_model = controls_model[[models[i]]], arg_checks = arg_checks) } invisible(NULL) } #' @rdname fill_directory #' #' @export #' fill_casts <- function(main = ".", control_files = files_control(), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) directory <- control_files$directory messageq(" -Filling casts folder with files from archive", quiet) path_casts <- paste0(directory, "/casts") archive <- file_path(main = main, sub = "raw", files = path_casts, arg_checks = arg_checks) arch_files <- list.files(archive, full.names = TRUE) if(length(arch_files) == 0){ path_casts <- paste0(directory, "/predictions") archive <- file_path(main = main, sub = "raw", files = path_casts, arg_checks = arg_checks) arch_files <- list.files(archive, full.names = TRUE) } arch_files_local <- paste0(path_casts, arch_files) casts_folder <- casts_path(main = main, arg_checks = arg_checks) fc <- file.copy(arch_files, casts_folder, control_files$overwrite) casts_meta <- read_casts_metadata(main = main, quiet = quiet, arg_checks = arg_checks) fill_casts_message(files = arch_files, movedTF = fc, quiet = !verbose, verbose = verbose, arg_checks = arg_checks) invisible(NULL) } #' @rdname fill_directory #' #' @export #' fill_raw <- function(main = ".", downloads = zenodo_downloads(c("1215988", "833438")), control_files = files_control(), quiet = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) return_if_null(downloads) if(list_depth(downloads) == 1){ downloads <- list(downloads) } messageq(" -Downloading raw files", quiet) ndl <- length(downloads) dl_vers <- rep(NA, ndl) for(i in 1:ndl){ downloads[[i]]$cleanup <- ifnull(downloads[[i]]$cleanup, control_files$cleanup) downloads[[i]]$main <- ifnull(downloads[[i]]$main, main) downloads[[i]]$quiet <- ifnull(downloads[[i]]$quiet, quiet) downloads[[i]]$sub <- "raw" dl_vers[i] <- do.call(download, downloads[[i]]) } update_directory_config(main = main, downloads_versions = dl_vers, quiet = quiet, arg_checks = arg_checks) invisible(NULL) }
/R/fill_dir.R
permissive
ha0ye/portalcasting
R
false
false
12,211
r
#' @title Fill a forecasting directory with basic components. #' #' @description Fill the directory with foundational components. \cr \cr #' \code{fill_dir} combines \code{fill_raw}, \code{fill_casts}, #' \code{fill_models}, and \code{fill_data}. #' \code{fill_raw} downloads the raw data and archive and updates the #' directory configuration metadata accordingly. \cr \cr #' \code{fill_casts} moves the historic casts from the archive into the #' current directory. \cr \cr #' \code{fill_models} writes out the model scripts to the models #' subdirectory. \cr \cr #' \code{fill_data} prepares model-ready data from the raw data. #' #' @details Arguments input directly here take precedence over those in the #' \code{downloads} \code{list}. #' #' @param main \code{character} value of the name of the main component of #' the directory tree. #' #' @param models \code{character} vector of name(s) of model(s) to #' include. #' #' @param end_moon \code{integer} (or integer \code{numeric}) newmoon number #' of the last sample to be included. Default value is \code{NULL}, which #' equates to the most recently included sample. #' #' @param lead_time \code{integer} (or integer \code{numeric}) value for the #' number of timesteps forward a cast will cover. #' #' @param cast_date \code{Date} from which future is defined (the origin of #' the cast). In the recurring forecasting, is set to today's date #' using \code{\link{Sys.Date}}. #' #' @param start_moon \code{integer} (or integer \code{numeric}) newmoon number #' of the first sample to be included. Default value is \code{217}, #' corresponding to \code{1995-01-01}. #' #' @param confidence_level \code{numeric} confidence level used in #' summarizing model output. Must be between \code{0} and \code{1}. #' #' @param controls_model Additional controls for models not in the prefab #' set. \cr #' A \code{list} of a single model's script-writing controls or a #' \code{list} of \code{list}s, each of which is a single model's #' script-writing controls. \cr #' Presently, each model's script writing controls #' should include three elements: \code{name} (a \code{character} value of #' the model name), \code{covariates} (a \code{logical} indicator of if the #' model needs covariates), and \code{lag} (an \code{integer}-conformable #' value of the lag to use with the covariates or \code{NA} if #' \code{covariates = FALSE}). \cr #' If only a single model is added, the name of #' the model from the element \code{name} will be used to name the model's #' \code{list} in the larger \code{list}. If multiple models are added, each #' element \code{list} must be named according to the model and the #' \code{name} element. \cr #' #' @param controls_rodents Control \code{list} or \code{list} of \code{list}s #' (from \code{\link{rodents_controls}}) specifying the structuring of the #' rodents tables. See \code{\link{rodents_controls}} for details. #' #' @param control_climate_dl \code{list} of specifications for the download, #' which are sent to \code{\link{NMME_urls}} to create the specific URLs. See #' \code{\link{climate_dl_control}}. #' #' @param control_files \code{list} of names of the folders and files within #' the sub directories and saving strategies (save, overwrite, append, etc.). #' Generally shouldn't need to be edited. See \code{\link{files_control}}. #' #' @param downloads \code{list} of arguments to pass to \code{\link{download}} #' for raw file downloading. #' #' @param quiet \code{logical} indicator if progress messages should be #' quieted. #' #' @param verbose \code{logical} indicator of whether or not to print out #' all of the information or not (and thus just the tidy messages). #' #' @param arg_checks \code{logical} value of if the arguments should be #' checked using standard protocols via \code{\link{check_args}}. The #' default (\code{arg_checks = TRUE}) ensures that all inputs are #' formatted correctly and provides directed error messages if not. #' #' @return All \code{fill_} functions return \code{NULL}. #' #' @examples #' \donttest{ #' create_dir() #' fill_dir() #' fill_raw() #' fill_casts() #' fill_models() #' fill_data() #' } #' #' @name fill_directory #' NULL #' @rdname fill_directory #' #' @export #' fill_dir <- function(main = ".", models = prefab_models(), end_moon = NULL, start_moon = 217, lead_time = 12, confidence_level = 0.95, cast_date = Sys.Date(), controls_model = NULL, controls_rodents = rodents_controls(), control_climate_dl = climate_dl_control(), control_files = files_control(), downloads = zenodo_downloads(c("1215988", "833438")), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks) messageq("Filling directory with standard content", quiet) fill_raw(main = main, downloads = downloads, quiet = quiet, control_files = control_files, arg_checks = arg_checks) fill_casts(main = main, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) fill_models(main = main, models = models, controls_model = controls_model, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) fill_data(main = main, models = models, end_moon = end_moon, lead_time = lead_time, cast_date = cast_date, start_moon = start_moon, confidence_level = confidence_level, controls_rodents = controls_rodents, controls_model = controls_model, control_climate_dl = control_climate_dl, downloads = downloads, control_files = control_files, quiet = quiet, verbose = verbose, arg_checks = arg_checks) } #' @rdname fill_directory #' #' @export #' fill_data <- function(main = ".", models = prefab_models(), end_moon = NULL,start_moon = 217, lead_time = 12, confidence_level = 0.95, cast_date = Sys.Date(), controls_model = NULL, controls_rodents = rodents_controls(), control_climate_dl = climate_dl_control(), control_files = files_control(), downloads = zenodo_downloads(c("1215988", "833438")), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) min_lag <- extract_min_lag(models = models, controls_model = controls_model, quiet = quiet, arg_checks = arg_checks) data_sets <- extract_data_sets(models = models, controls_model = controls_model, quiet = quiet, arg_checks = arg_checks) raw_data_present <- verify_raw_data(main = main, raw_data = control_files$raw_data, arg_checks = arg_checks) if(!raw_data_present){ fill_raw(main = main, downloads = downloads, quiet = quiet, control_files = control_files, arg_checks = arg_checks) } messageq(" -Adding data files to data subdirectory", quiet) data_m <- prep_moons(main = main, lead_time = lead_time, cast_date = cast_date, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) data_r <- prep_rodents(main = main, moons = data_m, data_sets = data_sets, end_moon = end_moon, start_moon = start_moon, controls_rodents = controls_rodents, quiet = quiet, verbose = verbose, control_files = control_files, arg_checks = arg_checks) data_c <- prep_covariates(main = main, moons = data_m, end_moon = end_moon, lead_time = lead_time, min_lag = min_lag, cast_date = cast_date, control_climate_dl = control_climate_dl, quiet = quiet, control_files = control_files, arg_checks = arg_checks) prep_metadata(main = main, models = models, data_sets = data_sets, moons = data_m, rodents = data_r, covariates = data_c, end_moon = end_moon, lead_time = lead_time, min_lag = min_lag, cast_date = cast_date, start_moon = start_moon, confidence_level = confidence_level, controls_model = controls_model, controls_rodents = controls_rodents, quiet = quiet, control_files = control_files, arg_checks = arg_checks) invisible(NULL) } #' @rdname fill_directory #' #' @export #' fill_models <- function(main = ".", models = prefab_models(), controls_model = NULL, control_files = files_control(), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) return_if_null(models) controls_model <- model_controls(models = models, controls_model = controls_model, quiet = quiet, arg_checks = arg_checks) messageq(" -Writing model scripts", quiet) nmodels <- length(models) for(i in 1:nmodels){ write_model(main = main, quiet = quiet, verbose = verbose, control_files = control_files, control_model = controls_model[[models[i]]], arg_checks = arg_checks) } invisible(NULL) } #' @rdname fill_directory #' #' @export #' fill_casts <- function(main = ".", control_files = files_control(), quiet = FALSE, verbose = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) directory <- control_files$directory messageq(" -Filling casts folder with files from archive", quiet) path_casts <- paste0(directory, "/casts") archive <- file_path(main = main, sub = "raw", files = path_casts, arg_checks = arg_checks) arch_files <- list.files(archive, full.names = TRUE) if(length(arch_files) == 0){ path_casts <- paste0(directory, "/predictions") archive <- file_path(main = main, sub = "raw", files = path_casts, arg_checks = arg_checks) arch_files <- list.files(archive, full.names = TRUE) } arch_files_local <- paste0(path_casts, arch_files) casts_folder <- casts_path(main = main, arg_checks = arg_checks) fc <- file.copy(arch_files, casts_folder, control_files$overwrite) casts_meta <- read_casts_metadata(main = main, quiet = quiet, arg_checks = arg_checks) fill_casts_message(files = arch_files, movedTF = fc, quiet = !verbose, verbose = verbose, arg_checks = arg_checks) invisible(NULL) } #' @rdname fill_directory #' #' @export #' fill_raw <- function(main = ".", downloads = zenodo_downloads(c("1215988", "833438")), control_files = files_control(), quiet = FALSE, arg_checks = TRUE){ check_args(arg_checks = arg_checks) return_if_null(downloads) if(list_depth(downloads) == 1){ downloads <- list(downloads) } messageq(" -Downloading raw files", quiet) ndl <- length(downloads) dl_vers <- rep(NA, ndl) for(i in 1:ndl){ downloads[[i]]$cleanup <- ifnull(downloads[[i]]$cleanup, control_files$cleanup) downloads[[i]]$main <- ifnull(downloads[[i]]$main, main) downloads[[i]]$quiet <- ifnull(downloads[[i]]$quiet, quiet) downloads[[i]]$sub <- "raw" dl_vers[i] <- do.call(download, downloads[[i]]) } update_directory_config(main = main, downloads_versions = dl_vers, quiet = quiet, arg_checks = arg_checks) invisible(NULL) }
library(rio) ### Name: import ### Title: Import ### Aliases: import ### ** Examples # create CSV to import export(iris, "iris1.csv") # specify `format` to override default format export(iris, "iris.tsv", format = "csv") stopifnot(identical(import("iris1.csv"), import("iris.tsv", format = "csv"))) # import CSV as a `data.table` stopifnot(inherits(import("iris1.csv", setclass = "data.table"), "data.table")) stopifnot(inherits(import("iris1.csv", setclass = "data.table"), "data.table")) # pass arguments to underlying import function iris1 <- import("iris1.csv") identical(names(iris), names(iris1)) export(iris, "iris2.csv", col.names = FALSE) iris2 <- import("iris2.csv") identical(names(iris), names(iris2)) # set class for the response data.frame as "tbl_df" (from dplyr) stopifnot(inherits(import("iris1.csv", setclass = "tbl_df"), "tbl_df")) # cleanup unlink("iris.tsv") unlink("iris1.csv") unlink("iris2.csv")
/data/genthat_extracted_code/rio/examples/import.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
933
r
library(rio) ### Name: import ### Title: Import ### Aliases: import ### ** Examples # create CSV to import export(iris, "iris1.csv") # specify `format` to override default format export(iris, "iris.tsv", format = "csv") stopifnot(identical(import("iris1.csv"), import("iris.tsv", format = "csv"))) # import CSV as a `data.table` stopifnot(inherits(import("iris1.csv", setclass = "data.table"), "data.table")) stopifnot(inherits(import("iris1.csv", setclass = "data.table"), "data.table")) # pass arguments to underlying import function iris1 <- import("iris1.csv") identical(names(iris), names(iris1)) export(iris, "iris2.csv", col.names = FALSE) iris2 <- import("iris2.csv") identical(names(iris), names(iris2)) # set class for the response data.frame as "tbl_df" (from dplyr) stopifnot(inherits(import("iris1.csv", setclass = "tbl_df"), "tbl_df")) # cleanup unlink("iris.tsv") unlink("iris1.csv") unlink("iris2.csv")
# https://www.r-bloggers.com/explaining-complex-machine-learning-models-with-lime/ # LIBRARIES AND SOURCES --------------------------------------------------- pacman::p_load(lime) library(caret) library(doParallel) pacman::p_load(OneR) library(janitor) library(tidyverse) # CONSTANTS --------------------------------------------------------------- # DATA -------------------------------------------------------------------- data_16 <- read.table("data/2016.csv", sep = ",", header = TRUE) data_15 <- read.table("data/2015.csv", sep = ",", header = TRUE) common_feats <- colnames(data_16)[which(colnames(data_16) %in% colnames(data_15))] # features and response variable for modeling feats <- setdiff(common_feats, c("Country", "Happiness.Rank", "Happiness.Score")) response <- "Happiness.Score" # combine data from 2015 and 2016 data_15_16 <- rbind(select(data_15, one_of(c(feats, response))), select(data_16, one_of(c(feats, response)))) data_15_16$Happiness.Score.l <- bin(data_15_16$Happiness.Score, nbins = 3, method = "content") data_15_16 <- select(data_15_16, -Happiness.Score) %>% mutate(Happiness.Score.l = plyr::revalue(Happiness.Score.l, c("(2.83,4.79]" = "low", "(4.79,5.89]" = "medium", "(5.89,7.59]" = "high"))) %>% as.tibble() %>% clean_names() # 1 - FIT A MODEL --------------------------------------------------------- # configure multicore cl <- makeCluster(detectCores()) # registerDoParallel(cl) set.seed(42) index <- createDataPartition(data_15_16$happiness_score_l, p = 0.7, list = FALSE) train_data <- data_15_16[index, ] test_data <- data_15_16[-index, ] set.seed(42) model_mlp <- caret::train(happiness_score_l ~ ., data = train_data, method = "mlp", trControl = trainControl(method = "repeatedcv", number = 10, repeats = 5, verboseIter = FALSE)) stopCluster(cl) # 2 - LIME ---------------------------------------------------------------- # 2.1 - lime() function --------------------------------------------------- # The central function of lime is lime() It creates the function that is used # in the next step to explain the model’s predictions. # # We can give a couple of options. Check the help ?lime for details, but the # most important to think about are: # # - Should continuous features be binned? And if so, into how many bins? # - How many features do we want to use in the explanatory function? # - How do we want to choose these features? explanation <- lime(train_data, model_mlp, bin_continuous = TRUE, n_bins = 5, n_permutations = 1000) # 2.2 - Cases to explain -------------------------------------------------- # Now, let’s look at how the model is explained. Here, We are not going to look # at all test cases but we'll' randomly choose three cases with correct # predictions and three with wrong predictions. pred <- data.frame(sample_id = 1:nrow(test_data), predict(model_mlp, test_data, type = "prob"), actual = test_data$happiness_score_l) pred$prediction <- colnames(pred)[2:4][apply(pred[, 2:4], 1, which.max)] pred$correct <- ifelse(pred$actual == pred$prediction, "correct", "wrong") # We need to give our test-set data table row names with the sample names or # IDs to be displayed in the header of our explanatory plots below pred_cor <- filter(pred, correct == "correct") pred_wrong <- filter(pred, correct == "wrong") test_data_cor <- test_data %>% mutate(sample_id = 1:nrow(test_data)) %>% filter(sample_id %in% pred_cor$sample_id) %>% sample_n(size = 3) %>% remove_rownames() %>% tibble::column_to_rownames(var = "sample_id") %>% select(-happiness_score_l) test_data_wrong <- test_data %>% mutate(sample_id = 1:nrow(test_data)) %>% filter(sample_id %in% pred_wrong$sample_id) %>% sample_n(size = 3) %>% remove_rownames() %>% tibble::column_to_rownames(var = "sample_id") %>% select(-happiness_score_l) # 2.3 - explain() function ------------------------------------------------ # n_features: We can choose how many features we want to look at with the # n_features option # # feature_select: To specify how we want this subset of features to be found: # the default, auto, uses forward selection if we chose n_features <= 6 and # uses the features with highest weights otherwise. We can also directly choose # feature_select = "forward_selection", feature_select = "highest_weights" or # feature_select = "lasso_path". # We also want to have explanation for all three class labels in the response # variable (low, medium and high happiness), so I am choosing n_labels = 3. explanation_cor <- lime::explain(test_data_cor, explanation, n_labels = 3, n_features = 5) explanation_wrong <- lime::explain(test_data_wrong, explanation, n_labels = 3, n_features = 5) # It will return a tidy tibble object that we can plot with plot_features() plot_features(explanation_cor, ncol = 3) plot_features(explanation_wrong, ncol = 3) # The information in the output tibble is described in the help function ?lime # and can be viewed with tibble::glimpse(explanation_cor) # 2.4 - Case analysis example --------------------------------------------- # So, what does this tell us, now? Let’s look at case 22 (the first row of our # plot for correctly predicted classes): # (Originally it was case 22, but something is wrong and we'll use case 10) # # This sample has been correctly predicted to come from the high happiness # group because it has: # - a economy_gdp_per_capita > 1.275 # - a trust_government_corruption > 0.1902 # - a dystopia_residual between 2.32 and 2.68 # - a family between 1.004 and 1.119 # - a region = Middle East and Northern Africa # # case_2_study <- 22 case_2_study <- 10 pred %>% filter(sample_id == case_2_study) train_data %>% gather(x, y, economy_gdp_per_capita:dystopia_residual) %>% ggplot(aes(x = happiness_score_l, y = y)) + geom_boxplot(alpha = 0.8, color = "grey") + geom_point(data = gather(test_data[case_2_study, ], x, y, economy_gdp_per_capita:dystopia_residual), color = "red", size = 3) + facet_wrap(~ x, scales = "free", ncol = 4) # An overview over the top 5 explanatory features for case 10 is stored in: as.data.frame(explanation_cor[1:9]) %>% filter(case == as.character(case_2_study)) # In a similar way, we can explore why some predictions were wrong. analyse_case_boxplot <- function(case_2_study, pred, train_data, test_data) { print(pred %>% filter(sample_id == case_2_study)) train_data %>% gather(x, y, economy_gdp_per_capita:dystopia_residual) %>% ggplot(aes(x = happiness_score_l, y = y)) + geom_boxplot(alpha = 0.8, color = "grey") + geom_point(data = gather(test_data[case_2_study, ], x, y, economy_gdp_per_capita:dystopia_residual), color = "red", size = 3) + facet_wrap(~ x, scales = "free", ncol = 4) } analyse_case_boxplot(87, pred, train_data, test_data)
/lime_demo.R
no_license
miguel-conde/LIME_primer
R
false
false
7,761
r
# https://www.r-bloggers.com/explaining-complex-machine-learning-models-with-lime/ # LIBRARIES AND SOURCES --------------------------------------------------- pacman::p_load(lime) library(caret) library(doParallel) pacman::p_load(OneR) library(janitor) library(tidyverse) # CONSTANTS --------------------------------------------------------------- # DATA -------------------------------------------------------------------- data_16 <- read.table("data/2016.csv", sep = ",", header = TRUE) data_15 <- read.table("data/2015.csv", sep = ",", header = TRUE) common_feats <- colnames(data_16)[which(colnames(data_16) %in% colnames(data_15))] # features and response variable for modeling feats <- setdiff(common_feats, c("Country", "Happiness.Rank", "Happiness.Score")) response <- "Happiness.Score" # combine data from 2015 and 2016 data_15_16 <- rbind(select(data_15, one_of(c(feats, response))), select(data_16, one_of(c(feats, response)))) data_15_16$Happiness.Score.l <- bin(data_15_16$Happiness.Score, nbins = 3, method = "content") data_15_16 <- select(data_15_16, -Happiness.Score) %>% mutate(Happiness.Score.l = plyr::revalue(Happiness.Score.l, c("(2.83,4.79]" = "low", "(4.79,5.89]" = "medium", "(5.89,7.59]" = "high"))) %>% as.tibble() %>% clean_names() # 1 - FIT A MODEL --------------------------------------------------------- # configure multicore cl <- makeCluster(detectCores()) # registerDoParallel(cl) set.seed(42) index <- createDataPartition(data_15_16$happiness_score_l, p = 0.7, list = FALSE) train_data <- data_15_16[index, ] test_data <- data_15_16[-index, ] set.seed(42) model_mlp <- caret::train(happiness_score_l ~ ., data = train_data, method = "mlp", trControl = trainControl(method = "repeatedcv", number = 10, repeats = 5, verboseIter = FALSE)) stopCluster(cl) # 2 - LIME ---------------------------------------------------------------- # 2.1 - lime() function --------------------------------------------------- # The central function of lime is lime() It creates the function that is used # in the next step to explain the model’s predictions. # # We can give a couple of options. Check the help ?lime for details, but the # most important to think about are: # # - Should continuous features be binned? And if so, into how many bins? # - How many features do we want to use in the explanatory function? # - How do we want to choose these features? explanation <- lime(train_data, model_mlp, bin_continuous = TRUE, n_bins = 5, n_permutations = 1000) # 2.2 - Cases to explain -------------------------------------------------- # Now, let’s look at how the model is explained. Here, We are not going to look # at all test cases but we'll' randomly choose three cases with correct # predictions and three with wrong predictions. pred <- data.frame(sample_id = 1:nrow(test_data), predict(model_mlp, test_data, type = "prob"), actual = test_data$happiness_score_l) pred$prediction <- colnames(pred)[2:4][apply(pred[, 2:4], 1, which.max)] pred$correct <- ifelse(pred$actual == pred$prediction, "correct", "wrong") # We need to give our test-set data table row names with the sample names or # IDs to be displayed in the header of our explanatory plots below pred_cor <- filter(pred, correct == "correct") pred_wrong <- filter(pred, correct == "wrong") test_data_cor <- test_data %>% mutate(sample_id = 1:nrow(test_data)) %>% filter(sample_id %in% pred_cor$sample_id) %>% sample_n(size = 3) %>% remove_rownames() %>% tibble::column_to_rownames(var = "sample_id") %>% select(-happiness_score_l) test_data_wrong <- test_data %>% mutate(sample_id = 1:nrow(test_data)) %>% filter(sample_id %in% pred_wrong$sample_id) %>% sample_n(size = 3) %>% remove_rownames() %>% tibble::column_to_rownames(var = "sample_id") %>% select(-happiness_score_l) # 2.3 - explain() function ------------------------------------------------ # n_features: We can choose how many features we want to look at with the # n_features option # # feature_select: To specify how we want this subset of features to be found: # the default, auto, uses forward selection if we chose n_features <= 6 and # uses the features with highest weights otherwise. We can also directly choose # feature_select = "forward_selection", feature_select = "highest_weights" or # feature_select = "lasso_path". # We also want to have explanation for all three class labels in the response # variable (low, medium and high happiness), so I am choosing n_labels = 3. explanation_cor <- lime::explain(test_data_cor, explanation, n_labels = 3, n_features = 5) explanation_wrong <- lime::explain(test_data_wrong, explanation, n_labels = 3, n_features = 5) # It will return a tidy tibble object that we can plot with plot_features() plot_features(explanation_cor, ncol = 3) plot_features(explanation_wrong, ncol = 3) # The information in the output tibble is described in the help function ?lime # and can be viewed with tibble::glimpse(explanation_cor) # 2.4 - Case analysis example --------------------------------------------- # So, what does this tell us, now? Let’s look at case 22 (the first row of our # plot for correctly predicted classes): # (Originally it was case 22, but something is wrong and we'll use case 10) # # This sample has been correctly predicted to come from the high happiness # group because it has: # - a economy_gdp_per_capita > 1.275 # - a trust_government_corruption > 0.1902 # - a dystopia_residual between 2.32 and 2.68 # - a family between 1.004 and 1.119 # - a region = Middle East and Northern Africa # # case_2_study <- 22 case_2_study <- 10 pred %>% filter(sample_id == case_2_study) train_data %>% gather(x, y, economy_gdp_per_capita:dystopia_residual) %>% ggplot(aes(x = happiness_score_l, y = y)) + geom_boxplot(alpha = 0.8, color = "grey") + geom_point(data = gather(test_data[case_2_study, ], x, y, economy_gdp_per_capita:dystopia_residual), color = "red", size = 3) + facet_wrap(~ x, scales = "free", ncol = 4) # An overview over the top 5 explanatory features for case 10 is stored in: as.data.frame(explanation_cor[1:9]) %>% filter(case == as.character(case_2_study)) # In a similar way, we can explore why some predictions were wrong. analyse_case_boxplot <- function(case_2_study, pred, train_data, test_data) { print(pred %>% filter(sample_id == case_2_study)) train_data %>% gather(x, y, economy_gdp_per_capita:dystopia_residual) %>% ggplot(aes(x = happiness_score_l, y = y)) + geom_boxplot(alpha = 0.8, color = "grey") + geom_point(data = gather(test_data[case_2_study, ], x, y, economy_gdp_per_capita:dystopia_residual), color = "red", size = 3) + facet_wrap(~ x, scales = "free", ncol = 4) } analyse_case_boxplot(87, pred, train_data, test_data)
NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds")
/load.R
no_license
Nickolay78/EDA-CaseStudy
R
false
false
86
r
NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds")
#' Macaque transcripts to genes #' #' Lookup table for converting Macaque (*Macaca mulatta*) #' Ensembl transcript IDs to gene IDs based on genome assembly #' MMUL801 from Ensembl. #' #' @docType data #' @keywords datasets #' #' @details #' Variables: #' #' - enstxp #' - ensgene #' #' @source \url{http://ensembl.org/macaca_mulatta} #' #' @examples #' head(mmul801_tx2gene) "mmul801_tx2gene"
/R/mmul801_tx2gene.R
no_license
stephenturner/annotables
R
false
false
394
r
#' Macaque transcripts to genes #' #' Lookup table for converting Macaque (*Macaca mulatta*) #' Ensembl transcript IDs to gene IDs based on genome assembly #' MMUL801 from Ensembl. #' #' @docType data #' @keywords datasets #' #' @details #' Variables: #' #' - enstxp #' - ensgene #' #' @source \url{http://ensembl.org/macaca_mulatta} #' #' @examples #' head(mmul801_tx2gene) "mmul801_tx2gene"
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/caseUtils.R \name{makeSeriesData} \alias{makeSeriesData} \title{Make crime series data} \usage{ makeSeriesData(crimedata, offenderTable, time = c("midpoint", "earliest", "latest")) } \arguments{ \item{crimedata}{data.frame of crime incident data. \code{crimedata} must have columns named: \code{crimeID}, \code{DT.FROM}, and \code{DT.TO}. Note: if crime timing is known exactly (uncensored) than only \code{DT.FROM} is required.} \item{offenderTable}{offender table that indicates the offender(s) responsible for solved crimes. \code{offenderTable} must have columns named: \code{offenderID} and \code{crimeID}.} \item{time}{the event time to be returned: 'midpoint', 'earliest', or 'latest'} } \value{ data frame representation of the crime series present in the \code{crimedata}. It includes the crime ID (\code{crimeID}), index of that crimeID in the original \code{crimedata} (\code{Index}), the crime series ID (\code{CS}) corresponding to each \code{offenderID}, and the event time (\code{TIME}). } \description{ Creates a data frame with index to crimedata and offender information. It is used to generate the linkage data. } \details{ The creates a crimeseries data object that is required for creating linkage data. It creates a crime series ID (\code{CS}) for every offender. Because of co-offending, a single crime (\code{crimeID}) can belong to multiple crime series. } \examples{ data(crimes) data(offenders) seriesData = makeSeriesData(crimedata=crimes,offenderTable=offenders) head(seriesData) nCrimes = table(seriesData$offenderID) # length of each crime series table(nCrimes) # distribution of crime series length mean(nCrimes>1) # proportion of offenders with multiple crimes nCO = table(seriesData$crimeID) # number of co-offenders per crime table(nCO) # distribution of number of co-offenders mean(nCO>1) # proportion of crimes with multiple co-offenders } \seealso{ \code{\link{getCrimeSeries}} }
/man/makeSeriesData.Rd
no_license
cran/crimelinkage
R
false
false
2,188
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/caseUtils.R \name{makeSeriesData} \alias{makeSeriesData} \title{Make crime series data} \usage{ makeSeriesData(crimedata, offenderTable, time = c("midpoint", "earliest", "latest")) } \arguments{ \item{crimedata}{data.frame of crime incident data. \code{crimedata} must have columns named: \code{crimeID}, \code{DT.FROM}, and \code{DT.TO}. Note: if crime timing is known exactly (uncensored) than only \code{DT.FROM} is required.} \item{offenderTable}{offender table that indicates the offender(s) responsible for solved crimes. \code{offenderTable} must have columns named: \code{offenderID} and \code{crimeID}.} \item{time}{the event time to be returned: 'midpoint', 'earliest', or 'latest'} } \value{ data frame representation of the crime series present in the \code{crimedata}. It includes the crime ID (\code{crimeID}), index of that crimeID in the original \code{crimedata} (\code{Index}), the crime series ID (\code{CS}) corresponding to each \code{offenderID}, and the event time (\code{TIME}). } \description{ Creates a data frame with index to crimedata and offender information. It is used to generate the linkage data. } \details{ The creates a crimeseries data object that is required for creating linkage data. It creates a crime series ID (\code{CS}) for every offender. Because of co-offending, a single crime (\code{crimeID}) can belong to multiple crime series. } \examples{ data(crimes) data(offenders) seriesData = makeSeriesData(crimedata=crimes,offenderTable=offenders) head(seriesData) nCrimes = table(seriesData$offenderID) # length of each crime series table(nCrimes) # distribution of crime series length mean(nCrimes>1) # proportion of offenders with multiple crimes nCO = table(seriesData$crimeID) # number of co-offenders per crime table(nCO) # distribution of number of co-offenders mean(nCO>1) # proportion of crimes with multiple co-offenders } \seealso{ \code{\link{getCrimeSeries}} }
library(Rssa) library(matrixcalc) library(pcaL1) df <- read.csv(file = 'IMP5130.csv') head(df) cut <- 12 series<-df$IMP5130[cut:240] series[95-cut]<-series[95-cut]*3 series[234-cut]<-series[234-cut]*2 plot(series,type='l') title(main='Series plot') length(series) L<-60 s <- ssa(series, L = L) plot(wcor(s)) plot(s, type = "series", groups = 1:8) X<-hankel(series, L=L) rnk<-5 Pr<-IRLS_mod(X,rnk,'loess') #IRLS modification (trend extraction with loess) Pr0<-hankL2(Pr) #Pr<-IRLS_mod(X,rnk,'median') #IRLS modification (trend extraction with median) #Pr1<-hankL2(Pr) Pr<-IRLS_mod(X,rnk,'lowess') #IRLS modification (trend extraction with lowess) Pr2<-hankL2(Pr) Pr<-IRLS_orig(X,rnk) #IRLS original Pr3<-hankL2(Pr) #s.L1svd<-l1pca(X,center=FALSE,projections="l1",projDim=rnk) #l1pca #Pr<-s.L1svd$projPoints #Pr.L1<-hankL1(Pr) rec <- reconstruct(s, groups = list(c(1:rnk))) trend.season <- rec$F1 gseries<-series gseries[95-cut]<-NA gseries[234-cut]<-NA s.temp <- ssa(gseries, L = L, force.decompose = FALSE) #ig <- igapfill(s.temp, groups = list(1:rnk)) ig <- gapfill(s.temp, groups = list(1:rnk)) sg <- ssa(ig, L = L) rec.g <- reconstruct(sg, groups = list(c(1:rnk))) trend.season.g <- rec.g$F1 plot(series,col='black',type='l') lines(trend.season,type='l',col='red',lw=2) lines(trend.season.g,type='l',col='magenta',lw=2) lines( Pr2,type='l',col='orange',lw=2) lines( Pr3,type='l',col='green',lw=2) lines(Pr0,type='l',col='blue',lw=2) legend('topleft', c("SSA", "SSA no gaps", "IRLS loess","IRLS lowess","IRLS orig"), col=c("red","magenta","blue","orange","green"), lty=1, cex=1.3, lw=c(2,2,2,2)) title(main='Reconstructed series')
/Examples/real_example_gaps.R
no_license
alextret536/RobustSSA
R
false
false
1,709
r
library(Rssa) library(matrixcalc) library(pcaL1) df <- read.csv(file = 'IMP5130.csv') head(df) cut <- 12 series<-df$IMP5130[cut:240] series[95-cut]<-series[95-cut]*3 series[234-cut]<-series[234-cut]*2 plot(series,type='l') title(main='Series plot') length(series) L<-60 s <- ssa(series, L = L) plot(wcor(s)) plot(s, type = "series", groups = 1:8) X<-hankel(series, L=L) rnk<-5 Pr<-IRLS_mod(X,rnk,'loess') #IRLS modification (trend extraction with loess) Pr0<-hankL2(Pr) #Pr<-IRLS_mod(X,rnk,'median') #IRLS modification (trend extraction with median) #Pr1<-hankL2(Pr) Pr<-IRLS_mod(X,rnk,'lowess') #IRLS modification (trend extraction with lowess) Pr2<-hankL2(Pr) Pr<-IRLS_orig(X,rnk) #IRLS original Pr3<-hankL2(Pr) #s.L1svd<-l1pca(X,center=FALSE,projections="l1",projDim=rnk) #l1pca #Pr<-s.L1svd$projPoints #Pr.L1<-hankL1(Pr) rec <- reconstruct(s, groups = list(c(1:rnk))) trend.season <- rec$F1 gseries<-series gseries[95-cut]<-NA gseries[234-cut]<-NA s.temp <- ssa(gseries, L = L, force.decompose = FALSE) #ig <- igapfill(s.temp, groups = list(1:rnk)) ig <- gapfill(s.temp, groups = list(1:rnk)) sg <- ssa(ig, L = L) rec.g <- reconstruct(sg, groups = list(c(1:rnk))) trend.season.g <- rec.g$F1 plot(series,col='black',type='l') lines(trend.season,type='l',col='red',lw=2) lines(trend.season.g,type='l',col='magenta',lw=2) lines( Pr2,type='l',col='orange',lw=2) lines( Pr3,type='l',col='green',lw=2) lines(Pr0,type='l',col='blue',lw=2) legend('topleft', c("SSA", "SSA no gaps", "IRLS loess","IRLS lowess","IRLS orig"), col=c("red","magenta","blue","orange","green"), lty=1, cex=1.3, lw=c(2,2,2,2)) title(main='Reconstructed series')
library(CUB) ### Name: varcovcub0q ### Title: Variance-covariance matrix of CUB models with covariates for the ### feeling component ### Aliases: varcovcub0q ### Keywords: internal ### ** Examples data(univer) m<-7 ordinal<-univer[,9] pai<-0.86 gama<-c(-1.94, -0.17) W<-univer[,4] varmat<-varcovcub0q(m, ordinal, W, pai, gama)
/data/genthat_extracted_code/CUB/examples/varcovcub0q.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
347
r
library(CUB) ### Name: varcovcub0q ### Title: Variance-covariance matrix of CUB models with covariates for the ### feeling component ### Aliases: varcovcub0q ### Keywords: internal ### ** Examples data(univer) m<-7 ordinal<-univer[,9] pai<-0.86 gama<-c(-1.94, -0.17) W<-univer[,4] varmat<-varcovcub0q(m, ordinal, W, pai, gama)
#' @title #' somewhat enhanced print function for (G)LMM #' @description #' Prints the summary of an lmerMod or glmerMod plus: #' which optimizer was used, how many iterations were allowed for #' optimization, and any warnings that occurred. #' @param model An lmerMod or glmerMod object #' @return nothing #' @author Tobias Heed, 2013-09 or so #' @export #' @import Matrix mPrint <- function( model ) { # check whether correct class has been passed if ( ( class( model ) != "glmerMod" ) & ( class( model ) != "lmerMod" ) ) { cat( "The object passed to mPrint is not of class (g)lmerMod" ) mPrint <- 1 # if yes... } else { print( summary( model ) ) # add info about optimizer used for fitting and the number of iterations allowed cat( "\nmodel fitted with:", model@optinfo$optimizer, "\n" ) if ( is.null( model@optinfo$control$maxfun ) == TRUE ) { cat( "no maxfun value found (probably means default == 10.000)\n" ) } else { cat( "iterations (maxfun): ", model@optinfo$control$maxfun, "\n" ) } # show warnings, if any are present if ( length( model@optinfo$warnings ) == 0 ) { cat( "no warnings" ) } else { cat( "warnings:\n" ) for ( i in 1 : length( model@optinfo$warnings ) ) { cat( i, ": ", model@optinfo$warnings[[ i ]], sep = "" ) } } return( 0 ) } }
/R/mPrint.R
no_license
rttl/rttl
R
false
false
1,362
r
#' @title #' somewhat enhanced print function for (G)LMM #' @description #' Prints the summary of an lmerMod or glmerMod plus: #' which optimizer was used, how many iterations were allowed for #' optimization, and any warnings that occurred. #' @param model An lmerMod or glmerMod object #' @return nothing #' @author Tobias Heed, 2013-09 or so #' @export #' @import Matrix mPrint <- function( model ) { # check whether correct class has been passed if ( ( class( model ) != "glmerMod" ) & ( class( model ) != "lmerMod" ) ) { cat( "The object passed to mPrint is not of class (g)lmerMod" ) mPrint <- 1 # if yes... } else { print( summary( model ) ) # add info about optimizer used for fitting and the number of iterations allowed cat( "\nmodel fitted with:", model@optinfo$optimizer, "\n" ) if ( is.null( model@optinfo$control$maxfun ) == TRUE ) { cat( "no maxfun value found (probably means default == 10.000)\n" ) } else { cat( "iterations (maxfun): ", model@optinfo$control$maxfun, "\n" ) } # show warnings, if any are present if ( length( model@optinfo$warnings ) == 0 ) { cat( "no warnings" ) } else { cat( "warnings:\n" ) for ( i in 1 : length( model@optinfo$warnings ) ) { cat( i, ": ", model@optinfo$warnings[[ i ]], sep = "" ) } } return( 0 ) } }
library("caret") library("e1071") library("ROCR") library("randomForest") library("WGCNA") set.seed(123) for(k in 1:1){ ##fold_change rankfc=seq(length=2842, from=0, to=0) log2fc2<-del_list(log2fc) for(i in 1:10){ rankfc=rankfc+log2fc2[[i]] names<-names(rankfc) } rankfc=abs(rankfc)/10 rankfc<-sort(rankfc,decreasing = TRUE) ##WGRFE-SVM ranking ranksvm=seq(length=2842, from=0, to=0) svm_w<-list() for(i in 1:10){ obj<-tune.svm(class~.,data=train2[[i]],gamma = 10^(0:0),cost = 10^(-2:2),kernel = "linear") #gamma=-3 best_gm <- obj$best.parameters$gamma best_ct <- obj$best.parameters$cost stepmodel <- svm(class~.,data=train2[[i]], kernel = "linear", gamma = best_gm, cost = best_ct, cross = 0,probability = TRUE) w<-t(find_SVM_weight(stepmodel)) cat(i) svm_w[[i]]=w rm(obj) rm(stepmodel) rm(w) } for(i in 1:10){ ranksvm=ranksvm+svm_w[[i]]*R2[[i]]*10000 } ranksvm<-as.vector(abs(ranksvm)) temp<-train2[[1]] names(ranksvm)<-colnames(temp[1:ncol(temp)-1]) ranksvm<-sort(ranksvm,decreasing = TRUE) rm(temp) rm(svm_w) ###T-testresult ranktest=seq(length=2842, from=0, to=0) testlist<-del_test(train2) for(i in 1:10){ ranktest=ranktest+testlist[[i]] } ranktest=abs(ranktest)/10 names(ranktest) <- names ranktest<-sort(ranktest,decreasing = TRUE) ###pearson pearson1<-list() for(i in 1:10){ temptrain=train[[i]] pearson1[[i]]<-cor(temptrain[1:ncol(temptrain)-1], temptrain[ncol(temptrain)],method = "pearson") rm(temptrain) } rankpearson=seq(length=2842, from=0, to=0) pearson2<-del_list(pearson1) for(i in 1:10){ rankpearson=rankpearson+pearson2[[i]] } rm(pearson1) rankpearson=abs(rankpearson)/10 rankpearson<-as.vector(rankpearson) names(rankpearson) <- names rankpearson<-sort(rankpearson,decreasing = TRUE) } rankingsvm<-list() rankingtest<-list() rankingfc<-list() rankingpearson<-list() #train the svm model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(ranksvm,j))### genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm3(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingsvm[[j-4]]=temp } #train the foldchange model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(rankfc,j))### genesnames =gsub("-", ".", genesnames, fixed = TRUE) genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm4(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingfc[[j-4]]=temp } #train the pearson model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(rankpearson,j))### genesnames =gsub("-", ".", genesnames, fixed = TRUE) genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm4(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingpearson[[j-4]]=temp } #train the t-test model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(ranktest,j))### genesnames =gsub("-", ".", genesnames, fixed = TRUE) genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm4(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingtest[[j-4]]=temp } for(k in 1:1){ result_svmranking<-vector() result_testranking<-vector() result_fcranking<-vector() result_pearsonranking<-vector() for(j in 1:16){ meanacc<-mean(rankingsvm[[j]][,1]) temp<-union(meanacc,j+4) result_svmranking<-cbind(result_svmranking,temp) } for(j in 1:16){ meanacc<-mean(rankingtest[[j]][6,]) temp<-union(meanacc,j+4) result_testranking<-cbind(result_testranking,temp) } for(j in 1:16){ meanacc<-mean(rankingfc[[j]][6,]) temp<-union(meanacc,j+4) result_fcranking<-cbind(result_fcranking,temp) } for(j in 1:16){ meanacc<-mean(rankingpearson[[j]][6,]) temp<-union(meanacc,j+4) result_pearsonranking<-cbind(result_pearsonranking,temp) } } result2<-vector() result2<-cbind(result2,result_svmranking[1,]) result2<-cbind(result2,result_fcranking[1,]) result2<-cbind(result2,result_testranking[1,]) result2<-cbind(result2,result_pearsonranking[1,]) test_svm3 = function(train,test){ result2<-list() result<-vector() traindata=train testdata=test obj<-tune.svm(class~.,data=traindata,gamma = 10^0,cost = 10^(-3:3),kernel = "linear") #gamma=-3 best_gm <- obj$best.parameters$gamma best_ct <- obj$best.parameters$cost model <- svm(class~.,data=traindata, kernel = "linear", gamma = best_gm, cost = best_ct, cross = 0,probability = TRUE) pre_label <- predict(model, testdata, probability = TRUE) stat_res <- table(pre_label, testdata$class) accurary <- (stat_res[1, 1] + stat_res[2, 2])/length(pre_label) sensitivity <- stat_res[2, 2]/(stat_res[1, 2] + stat_res[2, 2]) specificity <- stat_res[1, 1]/(stat_res[1, 1] + stat_res[2, 1]) result<-cbind(accurary,sensitivity,specificity) return(result) } test_svm4 = function(train,test){ result2<-list() result<-vector() traindata=train testdata=test obj<-tune.svm(class~.,data=traindata,gamma = 10^0,cost = 10^(-1:1),kernel = "linear") #gamma=-3 best_gm <- obj$best.parameters$gamma best_ct <- obj$best.parameters$cost model <- svm(class~.,data=traindata, kernel = "linear", gamma = best_gm, cost = best_ct, cross = 0,probability = TRUE) pre_label <- predict(model, testdata, probability = TRUE) stat_res <- table(pre_label, testdata$class) accurary <- (stat_res[1, 1] + stat_res[2, 2])/length(pre_label) sensitivity <- stat_res[2, 2]/(stat_res[1, 2] + stat_res[2, 2]) specificity <- stat_res[1, 1]/(stat_res[1, 1] + stat_res[2, 1]) result<-accurary return(result) } #Get the T statistic for a set of samples del_test<-function(train){ temp<-list() for(i in 1:10){ temp2<-vector() a<-train[[i]] a1<-subset(a,class=="1") a2<-subset(a,class=="2") for(j in 1:2842){ a3<-a1[,j] a4<-a2[,j] a5=t.test(a3,a4,paired = FALSE) temp2[j]<-a5$statistic } temp[[i]]=temp2 } return(temp) }
/4.0four_methods Comparison.R
no_license
mosaddek-hossain/NSCLC-subtype-classification
R
false
false
6,819
r
library("caret") library("e1071") library("ROCR") library("randomForest") library("WGCNA") set.seed(123) for(k in 1:1){ ##fold_change rankfc=seq(length=2842, from=0, to=0) log2fc2<-del_list(log2fc) for(i in 1:10){ rankfc=rankfc+log2fc2[[i]] names<-names(rankfc) } rankfc=abs(rankfc)/10 rankfc<-sort(rankfc,decreasing = TRUE) ##WGRFE-SVM ranking ranksvm=seq(length=2842, from=0, to=0) svm_w<-list() for(i in 1:10){ obj<-tune.svm(class~.,data=train2[[i]],gamma = 10^(0:0),cost = 10^(-2:2),kernel = "linear") #gamma=-3 best_gm <- obj$best.parameters$gamma best_ct <- obj$best.parameters$cost stepmodel <- svm(class~.,data=train2[[i]], kernel = "linear", gamma = best_gm, cost = best_ct, cross = 0,probability = TRUE) w<-t(find_SVM_weight(stepmodel)) cat(i) svm_w[[i]]=w rm(obj) rm(stepmodel) rm(w) } for(i in 1:10){ ranksvm=ranksvm+svm_w[[i]]*R2[[i]]*10000 } ranksvm<-as.vector(abs(ranksvm)) temp<-train2[[1]] names(ranksvm)<-colnames(temp[1:ncol(temp)-1]) ranksvm<-sort(ranksvm,decreasing = TRUE) rm(temp) rm(svm_w) ###T-testresult ranktest=seq(length=2842, from=0, to=0) testlist<-del_test(train2) for(i in 1:10){ ranktest=ranktest+testlist[[i]] } ranktest=abs(ranktest)/10 names(ranktest) <- names ranktest<-sort(ranktest,decreasing = TRUE) ###pearson pearson1<-list() for(i in 1:10){ temptrain=train[[i]] pearson1[[i]]<-cor(temptrain[1:ncol(temptrain)-1], temptrain[ncol(temptrain)],method = "pearson") rm(temptrain) } rankpearson=seq(length=2842, from=0, to=0) pearson2<-del_list(pearson1) for(i in 1:10){ rankpearson=rankpearson+pearson2[[i]] } rm(pearson1) rankpearson=abs(rankpearson)/10 rankpearson<-as.vector(rankpearson) names(rankpearson) <- names rankpearson<-sort(rankpearson,decreasing = TRUE) } rankingsvm<-list() rankingtest<-list() rankingfc<-list() rankingpearson<-list() #train the svm model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(ranksvm,j))### genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm3(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingsvm[[j-4]]=temp } #train the foldchange model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(rankfc,j))### genesnames =gsub("-", ".", genesnames, fixed = TRUE) genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm4(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingfc[[j-4]]=temp } #train the pearson model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(rankpearson,j))### genesnames =gsub("-", ".", genesnames, fixed = TRUE) genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm4(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingpearson[[j-4]]=temp } #train the t-test model for(j in 5:20){ temp<-vector() for(i in 1:10){ genesnames<-names(head(ranktest,j))### genesnames =gsub("-", ".", genesnames, fixed = TRUE) genesnames<-union(genesnames,"class") temptrain<-train[[i]] temptest<-test[[i]] temptrain<-temptrain[,genesnames] temptest<-temptest[,genesnames] #get the results cat("\n-->",j,i) tempmodel<-test_svm4(temptrain,temptest) temp<-rbind(temp,tempmodel) } rankingtest[[j-4]]=temp } for(k in 1:1){ result_svmranking<-vector() result_testranking<-vector() result_fcranking<-vector() result_pearsonranking<-vector() for(j in 1:16){ meanacc<-mean(rankingsvm[[j]][,1]) temp<-union(meanacc,j+4) result_svmranking<-cbind(result_svmranking,temp) } for(j in 1:16){ meanacc<-mean(rankingtest[[j]][6,]) temp<-union(meanacc,j+4) result_testranking<-cbind(result_testranking,temp) } for(j in 1:16){ meanacc<-mean(rankingfc[[j]][6,]) temp<-union(meanacc,j+4) result_fcranking<-cbind(result_fcranking,temp) } for(j in 1:16){ meanacc<-mean(rankingpearson[[j]][6,]) temp<-union(meanacc,j+4) result_pearsonranking<-cbind(result_pearsonranking,temp) } } result2<-vector() result2<-cbind(result2,result_svmranking[1,]) result2<-cbind(result2,result_fcranking[1,]) result2<-cbind(result2,result_testranking[1,]) result2<-cbind(result2,result_pearsonranking[1,]) test_svm3 = function(train,test){ result2<-list() result<-vector() traindata=train testdata=test obj<-tune.svm(class~.,data=traindata,gamma = 10^0,cost = 10^(-3:3),kernel = "linear") #gamma=-3 best_gm <- obj$best.parameters$gamma best_ct <- obj$best.parameters$cost model <- svm(class~.,data=traindata, kernel = "linear", gamma = best_gm, cost = best_ct, cross = 0,probability = TRUE) pre_label <- predict(model, testdata, probability = TRUE) stat_res <- table(pre_label, testdata$class) accurary <- (stat_res[1, 1] + stat_res[2, 2])/length(pre_label) sensitivity <- stat_res[2, 2]/(stat_res[1, 2] + stat_res[2, 2]) specificity <- stat_res[1, 1]/(stat_res[1, 1] + stat_res[2, 1]) result<-cbind(accurary,sensitivity,specificity) return(result) } test_svm4 = function(train,test){ result2<-list() result<-vector() traindata=train testdata=test obj<-tune.svm(class~.,data=traindata,gamma = 10^0,cost = 10^(-1:1),kernel = "linear") #gamma=-3 best_gm <- obj$best.parameters$gamma best_ct <- obj$best.parameters$cost model <- svm(class~.,data=traindata, kernel = "linear", gamma = best_gm, cost = best_ct, cross = 0,probability = TRUE) pre_label <- predict(model, testdata, probability = TRUE) stat_res <- table(pre_label, testdata$class) accurary <- (stat_res[1, 1] + stat_res[2, 2])/length(pre_label) sensitivity <- stat_res[2, 2]/(stat_res[1, 2] + stat_res[2, 2]) specificity <- stat_res[1, 1]/(stat_res[1, 1] + stat_res[2, 1]) result<-accurary return(result) } #Get the T statistic for a set of samples del_test<-function(train){ temp<-list() for(i in 1:10){ temp2<-vector() a<-train[[i]] a1<-subset(a,class=="1") a2<-subset(a,class=="2") for(j in 1:2842){ a3<-a1[,j] a4<-a2[,j] a5=t.test(a3,a4,paired = FALSE) temp2[j]<-a5$statistic } temp[[i]]=temp2 } return(temp) }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{vector_read} \alias{vector_read} \alias{vector_read_eps} \alias{vector_read_svg} \alias{vector_read_xml} \title{Load Vector Graphics as a Picture Object} \usage{ vector_read(filename) vector_read_xml(filename) vector_read_eps(filename) vector_read_svg(filename) } \arguments{ \item{filename}{filename of a vector object in svg, eps, or eps.xml format.} } \description{ Read in a vector file as a \code{grImport::Picture} object. } \details{ This is a bit of a Rube Goldberg machine because of a disconnect between how most vector images are likely to be found (SVG, e.g., \url{http://phylopic.org}) and the formats that R is capable of reading. There are a number of steps here that require external programs, and cause potential platform non-indepenence. The process is: \enumerate{ \item{Convert SVG to EPS. This can be done with Inkscape automatically, or with other programs manually. The fucntion \code{forest:::inkscape_svg_to_eps} function does this step, but requires Inkscape to be installed.} \item{Convert EPS to XML. \code{grImport::readPicture} has an internal XML format that it uses, called RGML. To convert to this format, it uses ghostscript to process the EPS file and then does some post-processing. The function \code{forest:::ghostscript_eps_to_xml} does this step, but requires ghostscript to be installed.} \item{Read XML into R. This is done with \code{grImport::readPicture} and creates an object of class \code{Picture} that can be drawn using \code{grid.picture} and eventuall some forest functions.} } The function \code{vector_read} is a high level wrapper to this process that attempts to do as little work as possible. This means that if an XML file that corresponds to an EPS/SVG file exists, that file will be read rather than going back and recreating the XML file. This means that not all of the conversion software needs to installed if the processed files exist. Alternatively, \code{vector_read_svg}, \code{vector_read_eps} and \code{vector_read_xml} will do all the necessary processing and read the resulting object in as a Picture. They will \emph{always} reprocess files though. The above functions use a set of conventions for filenames: \code{picture.svg} becomes \code{picture.eps} becomes \code{picture.xml}. If the starting point is an eps file then this is simply \code{picture.eps} becomes \code{picture.xml}. Reading in the XML files can be quite slow. I may add an additional step here that serialises the object as RDS and read from the preferentially. In this case the processed \code{picture.xml} becomes \code{picture.rds}. } \author{ Rich FitzJohn }
/man/vector_read.Rd
no_license
richfitz/forest
R
false
false
2,737
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{vector_read} \alias{vector_read} \alias{vector_read_eps} \alias{vector_read_svg} \alias{vector_read_xml} \title{Load Vector Graphics as a Picture Object} \usage{ vector_read(filename) vector_read_xml(filename) vector_read_eps(filename) vector_read_svg(filename) } \arguments{ \item{filename}{filename of a vector object in svg, eps, or eps.xml format.} } \description{ Read in a vector file as a \code{grImport::Picture} object. } \details{ This is a bit of a Rube Goldberg machine because of a disconnect between how most vector images are likely to be found (SVG, e.g., \url{http://phylopic.org}) and the formats that R is capable of reading. There are a number of steps here that require external programs, and cause potential platform non-indepenence. The process is: \enumerate{ \item{Convert SVG to EPS. This can be done with Inkscape automatically, or with other programs manually. The fucntion \code{forest:::inkscape_svg_to_eps} function does this step, but requires Inkscape to be installed.} \item{Convert EPS to XML. \code{grImport::readPicture} has an internal XML format that it uses, called RGML. To convert to this format, it uses ghostscript to process the EPS file and then does some post-processing. The function \code{forest:::ghostscript_eps_to_xml} does this step, but requires ghostscript to be installed.} \item{Read XML into R. This is done with \code{grImport::readPicture} and creates an object of class \code{Picture} that can be drawn using \code{grid.picture} and eventuall some forest functions.} } The function \code{vector_read} is a high level wrapper to this process that attempts to do as little work as possible. This means that if an XML file that corresponds to an EPS/SVG file exists, that file will be read rather than going back and recreating the XML file. This means that not all of the conversion software needs to installed if the processed files exist. Alternatively, \code{vector_read_svg}, \code{vector_read_eps} and \code{vector_read_xml} will do all the necessary processing and read the resulting object in as a Picture. They will \emph{always} reprocess files though. The above functions use a set of conventions for filenames: \code{picture.svg} becomes \code{picture.eps} becomes \code{picture.xml}. If the starting point is an eps file then this is simply \code{picture.eps} becomes \code{picture.xml}. Reading in the XML files can be quite slow. I may add an additional step here that serialises the object as RDS and read from the preferentially. In this case the processed \code{picture.xml} becomes \code{picture.rds}. } \author{ Rich FitzJohn }
% Generated by roxygen2 (4.0.0): do not edit by hand \name{install_github} \alias{install_github} \title{Attempts to install a package directly from github.} \usage{ install_github(repo, username = getOption("github.user"), ref = "master", pull = NULL, subdir = NULL, branch = NULL, auth_user = NULL, password = NULL, ...) } \arguments{ \item{repo}{Repository address in the format \code{[username/]repo[/subdir][@ref|#pull]}. Alternatively, you can specify \code{username}, \code{subdir}, \code{ref} or \code{pull} using the respective parameters (see below); if both is specified, the values in \code{repo} take precedence.} \item{username}{User name} \item{ref}{Desired git reference. Could be a commit, tag, or branch name. Defaults to \code{"master"}.} \item{pull}{Desired pull request. A pull request refers to a branch, so you can't specify both \code{branch} and \code{pull}; one of them must be \code{NULL}.} \item{subdir}{subdirectory within repo that contains the R package.} \item{branch}{Deprecated. Use \code{ref} instead.} \item{auth_user}{your account username if you're attempting to install a package hosted in a private repository (and your username is different to \code{username})} \item{password}{your password} \item{...}{Other arguments passed on to \code{\link{install}}.} } \description{ This function is vectorised on \code{repo} so you can install multiple packages in a single command. } \examples{ \dontrun{ install_github("roxygen") install_github("wch/ggplot2") install_github(c("rstudio/httpuv", "rstudio/shiny")) install_github(c("devtools@devtools-1.4", "klutometis/roxygen#142", "mfrasca/r-logging/pkg)) # Update devtools to the latest version, on Linux and Mac # On Windows, this won't work - see ?build_github_devtools install_github("hadley/devtools") } } \seealso{ Other package installation: \code{\link{install_bitbucket}}; \code{\link{install_gitorious}}; \code{\link{install_git}}; \code{\link{install_url}}; \code{\link{install_version}}; \code{\link{install}} }
/man/install_github.Rd
no_license
BrunoVilela/devtools
R
false
false
2,073
rd
% Generated by roxygen2 (4.0.0): do not edit by hand \name{install_github} \alias{install_github} \title{Attempts to install a package directly from github.} \usage{ install_github(repo, username = getOption("github.user"), ref = "master", pull = NULL, subdir = NULL, branch = NULL, auth_user = NULL, password = NULL, ...) } \arguments{ \item{repo}{Repository address in the format \code{[username/]repo[/subdir][@ref|#pull]}. Alternatively, you can specify \code{username}, \code{subdir}, \code{ref} or \code{pull} using the respective parameters (see below); if both is specified, the values in \code{repo} take precedence.} \item{username}{User name} \item{ref}{Desired git reference. Could be a commit, tag, or branch name. Defaults to \code{"master"}.} \item{pull}{Desired pull request. A pull request refers to a branch, so you can't specify both \code{branch} and \code{pull}; one of them must be \code{NULL}.} \item{subdir}{subdirectory within repo that contains the R package.} \item{branch}{Deprecated. Use \code{ref} instead.} \item{auth_user}{your account username if you're attempting to install a package hosted in a private repository (and your username is different to \code{username})} \item{password}{your password} \item{...}{Other arguments passed on to \code{\link{install}}.} } \description{ This function is vectorised on \code{repo} so you can install multiple packages in a single command. } \examples{ \dontrun{ install_github("roxygen") install_github("wch/ggplot2") install_github(c("rstudio/httpuv", "rstudio/shiny")) install_github(c("devtools@devtools-1.4", "klutometis/roxygen#142", "mfrasca/r-logging/pkg)) # Update devtools to the latest version, on Linux and Mac # On Windows, this won't work - see ?build_github_devtools install_github("hadley/devtools") } } \seealso{ Other package installation: \code{\link{install_bitbucket}}; \code{\link{install_gitorious}}; \code{\link{install_git}}; \code{\link{install_url}}; \code{\link{install_version}}; \code{\link{install}} }
rm(list=ls()) library(raster) library(rgdal) setwd("DIRECTORY WITH PILEHIEGHT FILES") #Get exceedence probability from ecdf (currently 0.1 m) exceedenceRaster <- function(x){ 1-ecdf(x)(0.1) } #Read in and flip raster - Pileheights are reversed in y axis. processRasters <- function(rasterFile) { r <- flip(raster(rasterFile), direction = 'y') r[is.na(r)] <- 0 return(r) } #Get list of directories dList <- list.dirs(recursive = FALSE) Hexceed <- list() #Create list of exceedences #Loop through directories (could run this as an lapply as well, but that seems a bit crazy) for (i in 1:length(dList)) { #Get list of raster files fList <- list.files(path = dList[i], pattern="*.asc", full.names = TRUE, recursive = TRUE) #Process raster (flip, turn NA into 0) rList <- lapply(fList, processRasters) #Create raster stack heights <- stack(rList) #Create new raster (exceedence prob) r <- raster(heights) r <- overlay(heights, fun=exceedenceRaster) #Extract AEP (1/ARI) aep <- 1/as.numeric(strsplit(dList[i], split = "./ARI")[[1]][2]) r <- r*aep Hexceed[i] <- r } #Now get exceedences Exceedence <- overlay(stack(Hexceed), fun=sum) writeRaster(Exceedence, "Exceedence0_1m", format="GTiff", overwrite=TRUE)
/Chapter 4 - Probabilistic lahar hazard modelling/Exceedence_rasters.R
no_license
stuartmead/laharthesis-sources
R
false
false
1,242
r
rm(list=ls()) library(raster) library(rgdal) setwd("DIRECTORY WITH PILEHIEGHT FILES") #Get exceedence probability from ecdf (currently 0.1 m) exceedenceRaster <- function(x){ 1-ecdf(x)(0.1) } #Read in and flip raster - Pileheights are reversed in y axis. processRasters <- function(rasterFile) { r <- flip(raster(rasterFile), direction = 'y') r[is.na(r)] <- 0 return(r) } #Get list of directories dList <- list.dirs(recursive = FALSE) Hexceed <- list() #Create list of exceedences #Loop through directories (could run this as an lapply as well, but that seems a bit crazy) for (i in 1:length(dList)) { #Get list of raster files fList <- list.files(path = dList[i], pattern="*.asc", full.names = TRUE, recursive = TRUE) #Process raster (flip, turn NA into 0) rList <- lapply(fList, processRasters) #Create raster stack heights <- stack(rList) #Create new raster (exceedence prob) r <- raster(heights) r <- overlay(heights, fun=exceedenceRaster) #Extract AEP (1/ARI) aep <- 1/as.numeric(strsplit(dList[i], split = "./ARI")[[1]][2]) r <- r*aep Hexceed[i] <- r } #Now get exceedences Exceedence <- overlay(stack(Hexceed), fun=sum) writeRaster(Exceedence, "Exceedence0_1m", format="GTiff", overwrite=TRUE)
#Formacao Cientista de Dados dist = graph(edges=c("A","C","A","B","B","E","B","F","C","D","G","H","D","H","E","H","F","G"),directed=TRUE) E(dist)$weight = c(2,1,2,1,2,1,1,3,1) plot(dist, edge.label = E(dist)$weight) tkplot(dist, edge.label = E(dist)$weight, vertex.color="white") distances(dist,1,8) distances(dist,V(dist)$name=="A",V(dist)$name=="H") caminho = shortest_paths(dist,V(dist)$name=="A",V(dist)$name=="H", output=c("both")) caminho$vpath for(i in 1:length(V(dist))) { V(dist)$color[i] <- ifelse(i %in% as.vector(unlist(caminho$vpath)) ,"green","gray") } for(i in 1:length(E(dist))) { E(dist)$color[i] <- ifelse(i %in% as.vector(unlist(caminho$epath)) , "green","gray") } plot(dist, edge.label = E(dist)$weight) caminho = shortest_paths(dist,V(dist)$name=="H",V(dist)$name=="A", output=c("both"))
/Grafos/9.igraph4.R
no_license
samuelsoaress/Study-R
R
false
false
824
r
#Formacao Cientista de Dados dist = graph(edges=c("A","C","A","B","B","E","B","F","C","D","G","H","D","H","E","H","F","G"),directed=TRUE) E(dist)$weight = c(2,1,2,1,2,1,1,3,1) plot(dist, edge.label = E(dist)$weight) tkplot(dist, edge.label = E(dist)$weight, vertex.color="white") distances(dist,1,8) distances(dist,V(dist)$name=="A",V(dist)$name=="H") caminho = shortest_paths(dist,V(dist)$name=="A",V(dist)$name=="H", output=c("both")) caminho$vpath for(i in 1:length(V(dist))) { V(dist)$color[i] <- ifelse(i %in% as.vector(unlist(caminho$vpath)) ,"green","gray") } for(i in 1:length(E(dist))) { E(dist)$color[i] <- ifelse(i %in% as.vector(unlist(caminho$epath)) , "green","gray") } plot(dist, edge.label = E(dist)$weight) caminho = shortest_paths(dist,V(dist)$name=="H",V(dist)$name=="A", output=c("both"))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_model_dyn.R \name{fit_model_dyn} \alias{fit_model_dyn} \title{Fit a model after filtering data frame to particular geography and traning period.} \usage{ fit_model_dyn(area_sh_use, train_start_date, train_end_date, spec, data) } \arguments{ \item{data}{} } \value{ } \description{ Fit a model after filtering data frame to particular geography and traning period. }
/man/fit_model_dyn.Rd
permissive
aranryan/arlodr
R
false
true
449
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_model_dyn.R \name{fit_model_dyn} \alias{fit_model_dyn} \title{Fit a model after filtering data frame to particular geography and traning period.} \usage{ fit_model_dyn(area_sh_use, train_start_date, train_end_date, spec, data) } \arguments{ \item{data}{} } \value{ } \description{ Fit a model after filtering data frame to particular geography and traning period. }
library(MASTA) #head(longitudinal) #head(follow_up_time) #head(survival) #--------------------------------------------------- # Error 2: #--------------------------------------------------- # Error in xj[i] : invalid subscript type 'list' #--------------------------------------------------- #---- create data sample for test --- nn = 200 ncode = 4 fu_month = 60 fu_rate = 0.1 set.seed(123) #-- follow-up-data id = 1:nn train_valid = rep(2, nn) ; train_valid[1:(nn/2)] = 1 fu_time = rexp(nn, rate=fu_rate) fu_time = pmax(fu_time, fu_month) ##pmax not pmin! follow_up_time_data = data.frame(id = id, fu_time = fu_time, train_valid = train_valid) #-- longitudinal data --- out=c() junk = 1:fu_month for (j in 1:ncode){ for (i in 1:nn){ rsize = rpois(1, lambda=20) tmp1=sort(sample(junk, size = rsize, replace=TRUE)) tmp2=cbind(rep(j,rsize),rep(i,rsize),tmp1) out=rbind(out, tmp2) } } out = data.frame(out) colnames(out)=c("code","id","time") longitudinal_data = out head(longitudinal_data) system.time(Z <- fpca.combine(longitudinal_data, follow_up_time_data, K.select = "PropVar"))
/check/check2.R
no_license
shufessm/MASTA
R
false
false
1,107
r
library(MASTA) #head(longitudinal) #head(follow_up_time) #head(survival) #--------------------------------------------------- # Error 2: #--------------------------------------------------- # Error in xj[i] : invalid subscript type 'list' #--------------------------------------------------- #---- create data sample for test --- nn = 200 ncode = 4 fu_month = 60 fu_rate = 0.1 set.seed(123) #-- follow-up-data id = 1:nn train_valid = rep(2, nn) ; train_valid[1:(nn/2)] = 1 fu_time = rexp(nn, rate=fu_rate) fu_time = pmax(fu_time, fu_month) ##pmax not pmin! follow_up_time_data = data.frame(id = id, fu_time = fu_time, train_valid = train_valid) #-- longitudinal data --- out=c() junk = 1:fu_month for (j in 1:ncode){ for (i in 1:nn){ rsize = rpois(1, lambda=20) tmp1=sort(sample(junk, size = rsize, replace=TRUE)) tmp2=cbind(rep(j,rsize),rep(i,rsize),tmp1) out=rbind(out, tmp2) } } out = data.frame(out) colnames(out)=c("code","id","time") longitudinal_data = out head(longitudinal_data) system.time(Z <- fpca.combine(longitudinal_data, follow_up_time_data, K.select = "PropVar"))
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427846e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L) result <- do.call(myTAI:::cpp_bootMatrix,testlist) str(result)
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615766732-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
1,803
r
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427846e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L) result <- do.call(myTAI:::cpp_bootMatrix,testlist) str(result)
\name{pisa.log.pv} \alias{pisa.log.pv} \alias{pisa2015.log.pv} \title{Logistic regression analysis with plausible values} \description{ pisa.log.pv performs logistic regression with plausible values and replicate weights. Use the pisa2015.log.pv() for data from PISA 2015 study. } \usage{ pisa.log.pv(pvlabel="READ", x, by, cutoff, data, export=FALSE, name= "output", folder=getwd()) pisa2015.log.pv(pvlabel="READ", x, by, cutoff, data, export=FALSE, name= "output", folder=getwd()) } \arguments{ \item{pvlabel}{ The label corresponding to the achievement variable, for example, "READ", for overall reading performance. } \item{x}{ Data labels of independent variables. } \item{cutoff}{ The cut-off point at which the dependent plausible values scores are dichotomised (1 is larger than the cut-off) } \item{by}{ The label for the categorical grouping variable (i.e., by="IDCNTRYL") or variables (e.g., x= c("IDCNTRYL", "ST79Q03")). } \item{data}{ An R object, normally a data frame, containing the data from PISA. } \item{export}{ A logical value. If TRUE, the output is exported to a file in comma-separated value format (.csv) that can be opened from LibreOffice or Excel. } \item{name}{ The name of the exported file. } \item{folder}{ The folder where the exported file is located. } } \value{ pisa.log.pv returns a data frame with coefficients, standard errors, t-values, and odds ratios. If "by" is specified, results are reported in a list. } \seealso{ timss.log.pv, pirls.log.pv } \examples{ \dontrun{ timss.log.pv(pvlabel="BSMMAT", cutoff= 550, x=c("ITSEX", "BSBGSLM"), by="IDCNTRYL", data=timss8g) intsvy.log.pv(pvlabel="BSMMAT", cutoff= 550, x="ITSEX", by="IDCNTRYL", data=timss8g, config=timss8_conf) }}
/man/pisa.log.pv.Rd
no_license
dickli/intsvy2
R
false
false
1,756
rd
\name{pisa.log.pv} \alias{pisa.log.pv} \alias{pisa2015.log.pv} \title{Logistic regression analysis with plausible values} \description{ pisa.log.pv performs logistic regression with plausible values and replicate weights. Use the pisa2015.log.pv() for data from PISA 2015 study. } \usage{ pisa.log.pv(pvlabel="READ", x, by, cutoff, data, export=FALSE, name= "output", folder=getwd()) pisa2015.log.pv(pvlabel="READ", x, by, cutoff, data, export=FALSE, name= "output", folder=getwd()) } \arguments{ \item{pvlabel}{ The label corresponding to the achievement variable, for example, "READ", for overall reading performance. } \item{x}{ Data labels of independent variables. } \item{cutoff}{ The cut-off point at which the dependent plausible values scores are dichotomised (1 is larger than the cut-off) } \item{by}{ The label for the categorical grouping variable (i.e., by="IDCNTRYL") or variables (e.g., x= c("IDCNTRYL", "ST79Q03")). } \item{data}{ An R object, normally a data frame, containing the data from PISA. } \item{export}{ A logical value. If TRUE, the output is exported to a file in comma-separated value format (.csv) that can be opened from LibreOffice or Excel. } \item{name}{ The name of the exported file. } \item{folder}{ The folder where the exported file is located. } } \value{ pisa.log.pv returns a data frame with coefficients, standard errors, t-values, and odds ratios. If "by" is specified, results are reported in a list. } \seealso{ timss.log.pv, pirls.log.pv } \examples{ \dontrun{ timss.log.pv(pvlabel="BSMMAT", cutoff= 550, x=c("ITSEX", "BSBGSLM"), by="IDCNTRYL", data=timss8g) intsvy.log.pv(pvlabel="BSMMAT", cutoff= 550, x="ITSEX", by="IDCNTRYL", data=timss8g, config=timss8_conf) }}
library(tidyverse) library(moderndive) str(evals) xtabs(~ethnicity + language, data = evals) -> T1 T1 addmargins(T1) chisq.test(T1) chisq.test(T1)$exp xtabs(~ethnicity + rank, data = evals) -> T2 T2 chisq.test(T2) chisq.test(T2)$exp xtabs(~gender + rank, data = evals) -> T3 T3 chisq.test(T3) chisq.test(T3)$exp obs_stat <- chisq.test(T3)$stat obs_stat B <- 10^4 x2v <- numeric(B) for(i in 1:B){ x2v[i] <- chisq.test(xtabs(~gender + sample(rank), data = evals))$stat } hist(x2v, freq = FALSE, col = "blue", ylim = c(0, 0.5)) curve(dchisq(x, 2), add = TRUE, col = "red") (pvalue <- (sum(x2v >= obs_stat) + 1)/(B + 1)) library(infer) evals %>% specify(gender ~ rank) %>% hypothesize(null = "independence") %>% calculate(stat = "Chisq") %>% pull() -> obs_stat_infer obs_stat_infer null_dist <- evals %>% specify(gender ~ rank) %>% hypothesize(null = "independence") %>% generate(reps = 10^3, type = "permute") %>% calculate(stat = "Chisq") head(null_dist) null_dist %>% visualize() + shade_p_value(obs_stat_infer, direction = "greater") ############################################################# library(resampledata) names(GSS2002) T4 <- xtabs(~Marital + Happy, data = GSS2002) T4 chisq.test(T4) T5 <- xtabs(~Education + Gender, data = GSS2002) T5 chisq.test(T5) ########################################################### limitRange <- function(fun, df, min, max){ function(x){ y <- fun(x, df) y[x < min | x > max] <- NA return(y) } } limitRangeC <- function(fun, df, min, max){ function(x){ y <- fun(x, df) y[x < max & x > min] <- NA return(y) } } dlimit <- limitRange(fun = dchisq, df = 5, min = qchisq(.10/2, 5), max = qchisq(1-.10/2, 5)) dlimitC <- limitRangeC(fun = dchisq, df = 5, min = qchisq(.10/2, 5), max = qchisq(1-.10/2, 5)) dlimitC(0:(4*5)) p <- ggplot(data = data.frame(x = 0:(4*5)), aes(x = x)) p + stat_function(fun = dchisq, args = list(df = 5), n = 200) + stat_function(fun = dlimit, geom = "area", fill = "blue", alpha = .3, n = 1000) + stat_function(fun = dlimitC, geom = "area", fill = "green", alpha = .9, n = 1000) + theme_bw() + labs(x = "", y = "") + stat_function(fun = dchisq, args = list(df = 5), n = 1500)
/Rscripts/CHISQ.R
no_license
alanarnholt/STT3850
R
false
false
2,331
r
library(tidyverse) library(moderndive) str(evals) xtabs(~ethnicity + language, data = evals) -> T1 T1 addmargins(T1) chisq.test(T1) chisq.test(T1)$exp xtabs(~ethnicity + rank, data = evals) -> T2 T2 chisq.test(T2) chisq.test(T2)$exp xtabs(~gender + rank, data = evals) -> T3 T3 chisq.test(T3) chisq.test(T3)$exp obs_stat <- chisq.test(T3)$stat obs_stat B <- 10^4 x2v <- numeric(B) for(i in 1:B){ x2v[i] <- chisq.test(xtabs(~gender + sample(rank), data = evals))$stat } hist(x2v, freq = FALSE, col = "blue", ylim = c(0, 0.5)) curve(dchisq(x, 2), add = TRUE, col = "red") (pvalue <- (sum(x2v >= obs_stat) + 1)/(B + 1)) library(infer) evals %>% specify(gender ~ rank) %>% hypothesize(null = "independence") %>% calculate(stat = "Chisq") %>% pull() -> obs_stat_infer obs_stat_infer null_dist <- evals %>% specify(gender ~ rank) %>% hypothesize(null = "independence") %>% generate(reps = 10^3, type = "permute") %>% calculate(stat = "Chisq") head(null_dist) null_dist %>% visualize() + shade_p_value(obs_stat_infer, direction = "greater") ############################################################# library(resampledata) names(GSS2002) T4 <- xtabs(~Marital + Happy, data = GSS2002) T4 chisq.test(T4) T5 <- xtabs(~Education + Gender, data = GSS2002) T5 chisq.test(T5) ########################################################### limitRange <- function(fun, df, min, max){ function(x){ y <- fun(x, df) y[x < min | x > max] <- NA return(y) } } limitRangeC <- function(fun, df, min, max){ function(x){ y <- fun(x, df) y[x < max & x > min] <- NA return(y) } } dlimit <- limitRange(fun = dchisq, df = 5, min = qchisq(.10/2, 5), max = qchisq(1-.10/2, 5)) dlimitC <- limitRangeC(fun = dchisq, df = 5, min = qchisq(.10/2, 5), max = qchisq(1-.10/2, 5)) dlimitC(0:(4*5)) p <- ggplot(data = data.frame(x = 0:(4*5)), aes(x = x)) p + stat_function(fun = dchisq, args = list(df = 5), n = 200) + stat_function(fun = dlimit, geom = "area", fill = "blue", alpha = .3, n = 1000) + stat_function(fun = dlimitC, geom = "area", fill = "green", alpha = .9, n = 1000) + theme_bw() + labs(x = "", y = "") + stat_function(fun = dchisq, args = list(df = 5), n = 1500)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/metab_day.R \name{metab_day} \alias{metab_day} \alias{metab_day.default} \title{Identify metabolic days in a time series} \usage{ metab_day(dat_in, ...) \method{metab_day}{default}(dat_in, tz, lat, long, ...) } \arguments{ \item{dat_in}{data.frame} \item{...}{arguments passed to or from other methods} \item{tz}{chr string for timezone, e.g., 'America/Chicago'} \item{lat}{numeric for latitude} \item{long}{numeric for longitude (negative west of prime meridian)} } \description{ Identify metabolic days in a time series based on sunrise and sunset times for a location and date. The metabolic day is considered the 24 hour period between sunsets for two adjacent calendar days. The function calls the \code{\link[maptools]{sunriset}} function from the maptools package, which uses algorithms from the National Oceanic and Atmospheric Administration (\url{http://www.esrl.noaa.gov/gmd/grad/solcalc/}). } \details{ This function is only used within \code{\link{ecometab}} and should not be called explicitly. } \seealso{ \code{\link{ecometab}}, \code{\link[maptools]{sunriset}} }
/man/metab_day.Rd
no_license
swmpkim/SWMPr
R
false
true
1,165
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/metab_day.R \name{metab_day} \alias{metab_day} \alias{metab_day.default} \title{Identify metabolic days in a time series} \usage{ metab_day(dat_in, ...) \method{metab_day}{default}(dat_in, tz, lat, long, ...) } \arguments{ \item{dat_in}{data.frame} \item{...}{arguments passed to or from other methods} \item{tz}{chr string for timezone, e.g., 'America/Chicago'} \item{lat}{numeric for latitude} \item{long}{numeric for longitude (negative west of prime meridian)} } \description{ Identify metabolic days in a time series based on sunrise and sunset times for a location and date. The metabolic day is considered the 24 hour period between sunsets for two adjacent calendar days. The function calls the \code{\link[maptools]{sunriset}} function from the maptools package, which uses algorithms from the National Oceanic and Atmospheric Administration (\url{http://www.esrl.noaa.gov/gmd/grad/solcalc/}). } \details{ This function is only used within \code{\link{ecometab}} and should not be called explicitly. } \seealso{ \code{\link{ecometab}}, \code{\link[maptools]{sunriset}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/panelPomp_methods.R \name{as} \alias{as} \title{Coercing \code{panelPomp} objects as a \code{list}} \description{ Extracts the \code{unit.objects} slot of \code{panelPomp} objects. Coerces a \code{panelPomp} into a data frame. } \seealso{ Other panelPomp methods: \code{\link{panelPomp_methods}} Other panelPomp methods: \code{\link{panelPomp_methods}} } \concept{panelPomp methods}
/man/as.Rd
no_license
regicid/panelPomp
R
false
true
466
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/panelPomp_methods.R \name{as} \alias{as} \title{Coercing \code{panelPomp} objects as a \code{list}} \description{ Extracts the \code{unit.objects} slot of \code{panelPomp} objects. Coerces a \code{panelPomp} into a data frame. } \seealso{ Other panelPomp methods: \code{\link{panelPomp_methods}} Other panelPomp methods: \code{\link{panelPomp_methods}} } \concept{panelPomp methods}
#' Plot the pseudotime ordering of clusters #' #' Plot the pseudotime ordering of clusters using igraph tools. #' #' @param network an igraph network with weighted edges corresponding to the pseudotime distance #' @param node_color an optional vector of colors for node labeling. If null then the nodes are all colored black #' @param alpha_color the gradient of the nodes #' @param exaggerate whether to exponentiate then linearly scale the edge weights, thus emphasizing their differences #' @param weight_scaler the weight scaler for edges #' @param arrow_scaler the arrow width scaler for edges #' #' @return nothing #' #' @importFrom igraph edges E plot.igraph E<- #' #' @export #' PlotLineage <- function(network, node_color = NULL, alpha_color = 1, exaggerate = TRUE, weight_scaler = 0.2, arrow_scaler = 0.8){ set.seed(1) if(is.null(node_color)){ node_color <- ColorHue(n = length(edges(network)[[1]][1])) node_color <- node_color$hex.1.n. } if(exaggerate){ E(network)$width <- (exp(E(network)$weight)) * weight_scaler E(network)$arrow.width <- arrow_scaler * E(network)$weight }else{ E(network)$width <- E(network)$weight } vertex_order <- as.numeric(names(edges(network)[[1]][1])) node_color <- node_color[vertex_order] plot.igraph(network, vertex.color = node_color) } #' Produce a plot of matlab DTree data and return the object #' #' If an output dir and filename are provided, a plot will be saved, otherwise the function will just return the graph. This is used for internal analysis to compare with matlab implementation. #' #' @param edge_table a numeric matrix whose rows are directed edges of a tree: #' col 1 is v1, col2 is v2, col3 is weight #' @param predecessors a vector of tree predecessors such that pred[i] = the predecessor of i #' @param outputdir the output directory, relative to getwd() #' @param outputfile the output file #' #' @importFrom grDevices pdf dev.off #' @importFrom graphics plot #' @importFrom igraph plot.igraph layout_with_kk #' #' @return an igraph representation of the tree #' PlotMatlabDtree <- function(edge_table, predecessors, outputdir = NULL, outputfile = NULL){ directed_edge_table <- ProcessMatlabDTree(edge_table, predecessors) directed_graph <- GetDGFromTable(directed_edge_table) if(!is.null(outputdir) && !is.null(outputfile)){ file_path <- paste0(getwd(), .Platform$file.sep, outputdir, .Platform$file.sep, outputfile) pdf(file_path) plot(directed_graph, layout=layout_with_kk) dev.off() } return(directed_graph) } #' Produce a scatter plot of the cells #' #' Create a scatter plot of the data on a 2-dim ebedding colored by feature. If discrete (factor) data is passed, the colorscale parameter must be provided, otherwise the plot will default to gradient shading. #' #' @param flat_embedding a low dim embedding of cells #' @param feature a scalar representation of feature #' @param title the title of the plot #' @param subtitle the subtitle of the plot #' @param featurename the name of the feature to title the legend #' @param colorscale color scale for discrete data #' @param pointsize the size of geom point #' @param hide_legend hide the legend and return a list, containing the legendless plot and the legend #' @param legsize the size of the legend #' @param legt size of legend title #' @param legtxt size of legend text #' @param grad_lim the limits of the color gradient for continuous features, this should usually be in log-expression space, overrides autoscale_dropout #' @param autoscale_dropout whether to set plots where the feature space is 0 to #102033, default min for scale_color_gradient auto scaling, overrides dropout_NA #' @param dropout_NA whether to set plots where the feature space is 0 to NA, note: the legend element in a hide_legend output will be NULL #' #' @return a ggplot2 object #' #' @import ggplot2 #' @importFrom cowplot get_legend #' #' @export #' FeatureScatterPlot <- function(flat_embedding, feature, title, subtitle, featurename, colorscale = NULL, pointsize = 1, hide_legend = FALSE, legsize = 5, legt = 5, legtxt = 5, grad_lim = NULL, autoscale_dropout = FALSE, dropout_NA = TRUE){ n_features <- length(unique(feature)) p <- ggplot(flat_embedding, aes_(x = as.name(colnames(flat_embedding)[1]), y = as.name(colnames(flat_embedding)[2]), color = ~`feature`)) if(is.null(colorscale)){ p <- p + geom_point(size = pointsize) + labs(title = title, subtitle = subtitle, color = featurename) + theme_minimal() + theme(legend.title = element_text(size=legt), legend.text = element_text(size = legtxt)) if(!is.null(grad_lim)){ p <- p + scale_colour_gradient(limits = grad_lim) } else if (autoscale_dropout) { if(length(unique(feature)) == 1){ p <- p + scale_colour_gradient(low = "#102033", high = "#102033") } } else if (dropout_NA){ if(length(unique(feature)) == 1){ feature[which(feature == 0)] <- NA p <- p + scale_color_gradient(na.value = "grey50") } } }else{ p <- p + geom_point(size = pointsize) + labs(title = title, subtitle = subtitle, color = featurename) + theme_minimal() + scale_color_manual(values = colorscale) + guides(colour = guide_legend(override.aes = list(size=legsize))) + theme(legend.title = element_text(size=legt), legend.text = element_text(size = legtxt)) } if(hide_legend){ if(length(unique(feature)) == 1){ # NAs have replaced the dropout 0's p <- p + theme_minimal() p <- p + theme(legend.position = "none") list(legend = NULL, plot = p) } else { leg <- get_legend(p) p <- p + theme_minimal() p <- p + theme(legend.position = "none") list(legend = leg, plot = p) } }else{ p } } #' Heatmap of the top n markers #' #' Plot the heatmap of the top n inferred markers for each cluster. #' #' @param data expression data with genes x cells #' @param gene_names a vector of symbolic gene names corresponding to the rows in the data matrix #' @param cluster_labels a vector of cluster labels corresponding to the columns in the data matrix #' @param markers a table of markers #' @param n_features the top n features to plot #' @param ... extra args to heatmap.2 #' #' @return nothing #' #' @import dplyr #' @import RColorBrewer #' @import gplots #' @importFrom tibble as_tibble #' @importFrom Matrix as.matrix #' #' @export #' PlotTopN <- function(data, gene_names, cluster_labels, markers, n_features, ...){ clusterId = geneScore = NULL # r cmd check pass n_cells <- ncol(data) n_clusters <- length(unique(cluster_labels)) sorted_cell <- sort.int(cluster_labels, index.return = TRUE) markers_table <- as.data.frame(markers) sortedmarkers <- arrange(markers_table, clusterId, desc(geneScore)) sorted_gene_table <- as_tibble(sortedmarkers) %>% group_by(clusterId) %>% top_n(n_features, geneScore) sorted_gene <- sorted_gene_table$geneID plot_data <- data[sorted_gene, sorted_cell$ix] rlabs <- gene_names[sorted_gene] clabs <- c(rep(NA, n_cells)) counts <- as.matrix(table(cluster_labels)) accumulator <- matrix(1, n_clusters, n_clusters)*lower.tri(matrix(1, n_clusters, n_clusters), diag = TRUE) last_cells <- accumulator %*% counts offset <- floor(counts/2) labeled_columns <- last_cells - offset clabs[labeled_columns] <- c(1:n_clusters) p <- heatmap.2(as.matrix(plot_data), col = rev(brewer.pal(11,"RdBu")), trace = 'none', dendrogram='none', Rowv=FALSE, Colv=FALSE, labCol = clabs, labRow = rlabs, srtCol = 0, #cexCol =1, #cexRow = .7, #lhei = c(1,3.5), key.title = NA, scale = "row", ...) } #' Heatmap of specific genes #' #' Given a set of markers, plot their expression across clusters on a heatmap. #' #' @param data expression data with genes x cells #' @param gene_names a vector of symbolic gene names corresponding to the rows in the data matrix #' @param cluster_labels a vector of cluster labels corresponding to the columns in the data matrix #' @param markers a vector of markers #' #' @return nothing #' #' @import RColorBrewer #' @import gplots #' #' @export #' PlotClusterExpression <- function(data, gene_names, cluster_labels, markers){ n_clusters <- length(unique(cluster_labels)) sorted_cell <- sort.int(cluster_labels, index.return = TRUE) filtered <- ClusterAvg(M = data, gene_names = gene_names, cell_order = sorted_cell$ix, cell_labels = cluster_labels, gene_list = markers) p <- heatmap.2(log10(t(filtered) + 1), col = rev(brewer.pal(11,"RdBu")), trace = 'none', dendrogram='none', Rowv=FALSE, Colv=FALSE, labCol = c(1:n_clusters), labRow = markers, srtCol = 0, cexCol =1, cexRow = .7, lhei = c(1,2), key.title = NA, scale = "row") } #' Violin plot of gene expression #' #' Produce a violin plot of gene expression. #' #' @param data a matrix with genes in rows and cells in columns #' @param gene_names a vector of names corresponding to the rows in data #' @param labels a vector of cluster assignments for the cells #' @param gene_name the name of the gene to plot #' @param colorscale optionally provided color scale #' @param jitsize the jitter size parameter #' #' @return a violin plot #' #' @import ggplot2 #' #' @export #' ViolinPlotExpression <- function(data, gene_names, labels, gene_name, colorscale = NULL, jitsize = 0.2){ n_features <- length(unique(labels)) if(is.null(colorscale)){ colorscale <- ColorHue(n = n_features) colorscale <- colorscale$hex.1.n. } gene_index <- which(tolower(gene_names) == tolower(gene_name)) expression <- data[gene_index,] plot_data <- as.data.frame(cbind(exp = expression, cluster = labels)) plot_data$cluster <- as.factor(plot_data$cluster) colors <- colorscale[sort(unique(labels))]; p <- ggplot(plot_data, aes_string(x = 'cluster', y = 'exp', fill = 'cluster')) + geom_violin(alpha = 0.6) + theme(legend.position = 'none') + labs(title = paste0("Expression of ", gene_name), x = "Cluster", y = "Relative Target Expression") + geom_jitter(shape=16, position=position_jitter(jitsize)) + scale_fill_manual(values = colors) + theme_minimal() } #' Circos plot of signaling score #' #' Produce a plot of a cell signaling network according to the cell-normalized expression score. #' #' @param P signaling probabilities cells x cells #' @param rgb_gap real percent of the inter-cluster spectrum gap to encompass cluster highlighting #' @param cluster_labels labels of cells ordered from 1 to n #' @param lig_cells_per_cluster int 0 to keep all #' @param rec_cells_per_cluster int 0 to keep all #' @param zero_threshold real value for zero cutoff (0 to keep all edges) #' @param cD_reduce ratio of the circlos plot grid element (cell or cluster) to the whole circle is less than this value, the item is omitted from the plot. Default is zero, don't change unless you are inspecting an unhighlighted chord diagram, since it breaks highlighting if one or more pairs above the zero_threshold happen to have a low grid/circle ratio. #' @param highlight_clusters whether to label the plot with clusters. Default is true. #' @param title_text the title of the plot #' @param n_clusters if n is higher than the number of unique labels (colorscale can be used for meta plotting) #' @param n_cluster_cols for plots with different subpopulations, use this to reserve dropout clusters in individual plots. WARNING: this is strongly recommended if there are dropouts, ie unique(labels) != 1:length(labels) #' @param receptor_chord_color color the chords according to the receptor-bearing cell they point to. default is false #' #' #' @import dplyr #' @import circlize #' @importFrom reshape2 melt #' @importFrom graphics legend title #' #' @export #' SigPlot <- function(P, rgb_gap = 0.2, cluster_labels, lig_cells_per_cluster = 10, rec_cells_per_cluster = 10, zero_threshold = 0, cD_reduce = 0, highlight_clusters = TRUE, title_text = NULL, n_clusters = NULL, n_cluster_cols = NULL, receptor_chord_color = FALSE){ if(max(P) <= zero_threshold){ print('no signaling in P') return() } label = lig_cluster_number = lig_cell = rec_cell = rec_cluster_number = legend = title = link_weight = arbitrary_order = NULL # r cmd check pass circos.clear() # compute # ordering: int vector of indices corresponding to the labels # labels: int vector cluster labels of the cells ordered from cluster 1 to n sorted_cell <- sort.int(cluster_labels, index.return = TRUE) ordering <- sorted_cell$ix labels <- sorted_cell$x if(is.null(n_clusters)){ n_clusters <- length(unique(labels)) } if(is.null(n_cluster_cols)){ n_cluster_cols <- max(labels) } n_cells <- length(labels) # find nonzero rows and columns of ordered P P <- P[drop=FALSE,ordering, ordering] nzrow <- which(rowSums(P) > zero_threshold) nzcol <- which(colSums(P) > zero_threshold) # prune the ordering and labels for rows and cols nzordering_lig <- ordering[nzrow] nzordering_rec <- ordering[nzcol] nzlabel_lig <- labels[nzrow] nzlabel_rec <- labels[nzcol] # prune P P_ordered <- P[drop=FALSE,nzrow, nzcol] rownames(P_ordered) <- nzordering_lig colnames(P_ordered) <- nzordering_rec # for each cluster, get the location of the first and the last cell in the permutation of labels lig_counts <- matrix(0, nrow = n_clusters, ncol = 1) rec_counts <- matrix(0, nrow = n_clusters, ncol = 1) rownames(lig_counts) <- rownames(rec_counts) <- as.character(c(1:n_clusters)) for(c in 1:n_clusters){ cc <- unique(labels)[c] lig_counts[c,1] <- length(which(nzlabel_lig == cc)) rec_counts[c,1] <- length(which(nzlabel_rec == cc)) } # for each cluster, get the location of the first and the last cell in the permutation of labels accumulator <- matrix(1, n_clusters, n_clusters)*lower.tri(matrix(1, n_clusters, n_clusters), diag = TRUE) lig_last_cells <- accumulator %*% lig_counts lig_first_cells <- lig_last_cells - lig_counts + 1 accumulator <- matrix(1, n_clusters, n_clusters)*lower.tri(matrix(1, n_clusters, n_clusters), diag = TRUE) rec_last_cells <- accumulator %*% rec_counts rec_first_cells <- rec_last_cells - rec_counts + 1 ### apply cell and edge thresholds ### # apply top cell per cluster lig if(lig_cells_per_cluster > 0){ sums <- rowSums(P_ordered) tab <- data.frame(cell = nzordering_lig, label = nzlabel_lig, sum = sums, arbitrary_order = 1:length(sums)) grouped <- tab %>% group_by(label) filtered <- top_n(grouped, n = lig_cells_per_cluster,wt = arbitrary_order) P_ordered <- P_ordered[drop=FALSE,as.character(filtered$cell),] } # apply top cell per cluster rec if(rec_cells_per_cluster > 0){ sums <- colSums(P_ordered) tab <- data.frame(cell = nzordering_rec, label = nzlabel_rec, sum = sums, arbitrary_order = 1:length(sums)) grouped <- tab %>% group_by(label) filtered <- top_n(grouped, n = rec_cells_per_cluster, wt = arbitrary_order) P_ordered <- P_ordered[drop=FALSE,,as.character(filtered$cell)] } # flatten the adjacency matrix: # col1 is the ligand cell # col2 is the receptor cell # col3 is the weight of the link P_table <- melt(P_ordered) colnames(P_table) <- c("lig_cell", "rec_cell", "link_weight") ind_lig <- match(P_table$lig_cell, ordering) lig_clust_num <- labels[ind_lig] P_table$lig_cluster_number <- lig_clust_num ind_rec <- match(P_table$rec_cell, ordering) rec_clust_num <- labels[ind_rec] P_table$rec_cluster_number <- rec_clust_num P_table <- arrange(P_table, lig_cluster_number, lig_cell) # get the ordering of the receptors lig_cell_order <- unique(P_table$lig_cell[which(P_table$link_weight > zero_threshold)]) P_tab <- arrange(P_table[which(P_table$link_weight > zero_threshold),], rec_cluster_number, rec_cell) rec_cell_unique <- unique(P_tab$rec_cell) rec_cell_unique <- paste(rec_cell_unique, "R", sep = "_") chord_plot_sector_order <- c(lig_cell_order, rec_cell_unique) # add a prefix to make the rec cells unique for sector highlighting P_table$rec_cell <- paste(P_table$rec_cell, "R", sep = "_") # get rgb codes and 0-360 hue map for the clusters cluster_colors <- ColorHue(n_cluster_cols) alt_cluster_colors <- ColorHue(n_cluster_cols, luminance = 100, chroma = 90) # rownames(cluster_colors) <- sort(unique(labels)) # rownames(alt_cluster_colors) <- sort(unique(labels)) # cluster_colors <- cluster_colors[sort(unique(cluster_labels)),] # alt_cluster_colors <- alt_cluster_colors[sort(unique(cluster_labels)),] # get the rgb codes for the sectors (cells), based on 20% of the spectrum starting from the cluster hue gap <- rgb_gap*(cluster_colors[2,1] - cluster_colors[1,1]) if(!receptor_chord_color){ # get the chordcolors for the ligands cols <- ChordColors(P_table, cluster_colors, gap) # plot the chords circos.clear() chordDiagram(P_table[which(P_table$link_weight > zero_threshold),1:3], order = chord_plot_sector_order, directional = TRUE, direction.type = c("diffHeight", "arrows"), link.arr.type = "big.arrow", annotationTrack = "grid", grid.col = cols, preAllocateTracks = list(list(track.height = 0.05), list(track.height = 0.05)), reduce = cD_reduce) # apply highlighting to the ligand signaling cells # Circlize only plots the P_table connections that are non-zero # In case zero_threshold is <= 0, find which clusters are being plotted # find highlightable pairs if(length(which(P_table$link_weight <= zero_threshold)) > 0){ nz_lig_clust <- unique(P_table$lig_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) nz_rec_clust <- unique(P_table$rec_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) } else { nz_lig_clust <- unique(P_table$lig_cluster_number) nz_rec_clust <- unique(P_table$rec_cluster_number) } if(highlight_clusters){ for(i in nz_lig_clust){ ii <- which(rownames(cluster_colors) == i) lig_cells <- unique(P_table$lig_cell[which(P_table$lig_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = lig_cells, col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } for(i in nz_rec_clust){ ii <- which(rownames(cluster_colors) == i) rec_cells <- unique(P_table$rec_cell[which(P_table$rec_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = rec_cells[which(rec_cells %in% get.all.sector.index())], col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } subset_cols <- cluster_colors$hex.1.n.[sort(unique(labels))] legend("topleft", legend = paste0("C", sort(unique(cluster_labels))), pch=16, pt.cex=1.5, cex=1, bty='n', col = subset_cols) title(title_text, cex.main = 1.5, line = -0.5) } } else { P_table <- arrange(P_table, lig_cluster_number, rec_cluster_number, desc(link_weight), rec_cell) P_table <- P_table[which(P_table$link_weight > zero_threshold),] #colnames(P_table) <- c("lig_link", "rec_cell", "link_weight", "lig_cluster_number", "rec_cluster_number") P_table$lig_cell <- 1:nrow(P_table) # this is not actually cells anymore, more like cell-extracted links chord_plot_sector_order <- c(P_table$lig_cell, rec_cell_unique) # get the chordcolors for the ligands cols <- ChordColors(P_table, cluster_colors, gap, receptor_chord_color = TRUE) # plot the chords circos.clear() chordDiagram(P_table[which(P_table$link_weight > zero_threshold),1:3], order = chord_plot_sector_order, directional = TRUE, direction.type = c("diffHeight", "arrows"), link.arr.type = "big.arrow", annotationTrack = "grid", grid.col = cols, preAllocateTracks = list(list(track.height = 0.05), list(track.height = 0.05)), reduce = cD_reduce) # apply highlighting to the ligand signaling cells # Circlize only plots the P_table connections that are non-zero # In case zero_threshold is <= 0, find which clusters are being plotted # find highlightable pairs if(length(which(P_table$link_weight <= zero_threshold)) > 0){ nz_lig_clust <- unique(P_table$lig_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) nz_rec_clust <- unique(P_table$rec_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) } else { nz_lig_clust <- unique(P_table$lig_cluster_number) nz_rec_clust <- unique(P_table$rec_cluster_number) } if(highlight_clusters){ for(i in nz_lig_clust){ ii <- which(rownames(cluster_colors) == i) lig_cells <- unique(P_table$lig_cell[which(P_table$lig_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = lig_cells, col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } for(i in nz_rec_clust){ ii <- which(rownames(cluster_colors) == i) rec_cells <- unique(P_table$rec_cell[which(P_table$rec_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = rec_cells[which(rec_cells %in% get.all.sector.index())], col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } subset_cols <- cluster_colors$hex.1.n.[sort(unique(labels))] legend("topleft", legend = paste0("C", sort(unique(cluster_labels))), pch=16, pt.cex=1.5, cex=1, bty='n', col = subset_cols) title(title_text, cex.main = 1.5, line = -0.5) } } } #' Get a vector of n equally spaced rgb colors #' #' Get a vector of n equally spaced rgb colors. #' #' @param n integer number of hex codes to return #' @param starthue real hue argument for the grDevices::hcl() generates value 1 #' @param endhue real hue argument #' @param luminance the luminance of the hcl value #' @param chroma the chroma of the hcl value #' #' @importFrom grDevices hcl #' #' @export #' ColorHue <- function(n, starthue = 15, endhue = 360, luminance = 65, chroma = 100) { hues <- seq(starthue, endhue, length = n + 1) hex <- hcl(h = hues, l = luminance, c = chroma)[1:n] hue_color_map <- data.frame(hues[1:n], hex[1:n]) hue_color_map[,2] <- as.character(hue_color_map[,2]) return(hue_color_map) } #' Get chord colors for cells #' #' Get chord colors for cells. #' #' @param edge_table a table with lig, rec, score, lig_cluster, rec_cluster #' @param cluster_cols colors for the clusters #' @param gap the hue gap #' @param receptor_chord_color color the chords according to the receptor-bearing cell they point to. default is false #' ChordColors <- function(edge_table, cluster_cols, gap, receptor_chord_color = FALSE) { chords <- c() if(!receptor_chord_color){ for(i in unique(edge_table$lig_cluster_number)){ cell_labels <- edge_table[which(edge_table$lig_cluster_number == i),]$lig_cell starthue <- cluster_cols[as.character(i),1] cols <- ColorHue(n = length(cell_labels), starthue = starthue, endhue = starthue + gap) cols <- cols$hex.1.n. names(cols) <- cell_labels chords <- c(chords, cols) } cols_rec <- rep("grey", length(edge_table$rec_cell)) names(cols_rec) <- edge_table$rec_cell chords <- c(chords, cols_rec) } else { for(i in unique(edge_table$lig_cluster_number)){ sub_table <- edge_table[which(edge_table$lig_cluster_number == i),] for(ii in unique(sub_table$rec_cluster_number)){ sub_sub_table <- sub_table[which(sub_table$rec_cluster_number == ii),] starthue <- cluster_cols[as.character(ii),1] cols <- ColorHue(n = nrow(sub_sub_table), starthue = starthue, endhue = starthue + gap) cols <- cols$hex.1.n. names(cols) <- sub_sub_table$lig_cell chords <- c(chords, cols) } } cols_rec <- rep("grey", length(edge_table$rec_cell)) names(cols_rec) <- edge_table$rec_cell chords <- c(chords, cols_rec) } }
/R/PlottingFunctions.R
no_license
adamlmaclean/RSoptSC
R
false
false
27,361
r
#' Plot the pseudotime ordering of clusters #' #' Plot the pseudotime ordering of clusters using igraph tools. #' #' @param network an igraph network with weighted edges corresponding to the pseudotime distance #' @param node_color an optional vector of colors for node labeling. If null then the nodes are all colored black #' @param alpha_color the gradient of the nodes #' @param exaggerate whether to exponentiate then linearly scale the edge weights, thus emphasizing their differences #' @param weight_scaler the weight scaler for edges #' @param arrow_scaler the arrow width scaler for edges #' #' @return nothing #' #' @importFrom igraph edges E plot.igraph E<- #' #' @export #' PlotLineage <- function(network, node_color = NULL, alpha_color = 1, exaggerate = TRUE, weight_scaler = 0.2, arrow_scaler = 0.8){ set.seed(1) if(is.null(node_color)){ node_color <- ColorHue(n = length(edges(network)[[1]][1])) node_color <- node_color$hex.1.n. } if(exaggerate){ E(network)$width <- (exp(E(network)$weight)) * weight_scaler E(network)$arrow.width <- arrow_scaler * E(network)$weight }else{ E(network)$width <- E(network)$weight } vertex_order <- as.numeric(names(edges(network)[[1]][1])) node_color <- node_color[vertex_order] plot.igraph(network, vertex.color = node_color) } #' Produce a plot of matlab DTree data and return the object #' #' If an output dir and filename are provided, a plot will be saved, otherwise the function will just return the graph. This is used for internal analysis to compare with matlab implementation. #' #' @param edge_table a numeric matrix whose rows are directed edges of a tree: #' col 1 is v1, col2 is v2, col3 is weight #' @param predecessors a vector of tree predecessors such that pred[i] = the predecessor of i #' @param outputdir the output directory, relative to getwd() #' @param outputfile the output file #' #' @importFrom grDevices pdf dev.off #' @importFrom graphics plot #' @importFrom igraph plot.igraph layout_with_kk #' #' @return an igraph representation of the tree #' PlotMatlabDtree <- function(edge_table, predecessors, outputdir = NULL, outputfile = NULL){ directed_edge_table <- ProcessMatlabDTree(edge_table, predecessors) directed_graph <- GetDGFromTable(directed_edge_table) if(!is.null(outputdir) && !is.null(outputfile)){ file_path <- paste0(getwd(), .Platform$file.sep, outputdir, .Platform$file.sep, outputfile) pdf(file_path) plot(directed_graph, layout=layout_with_kk) dev.off() } return(directed_graph) } #' Produce a scatter plot of the cells #' #' Create a scatter plot of the data on a 2-dim ebedding colored by feature. If discrete (factor) data is passed, the colorscale parameter must be provided, otherwise the plot will default to gradient shading. #' #' @param flat_embedding a low dim embedding of cells #' @param feature a scalar representation of feature #' @param title the title of the plot #' @param subtitle the subtitle of the plot #' @param featurename the name of the feature to title the legend #' @param colorscale color scale for discrete data #' @param pointsize the size of geom point #' @param hide_legend hide the legend and return a list, containing the legendless plot and the legend #' @param legsize the size of the legend #' @param legt size of legend title #' @param legtxt size of legend text #' @param grad_lim the limits of the color gradient for continuous features, this should usually be in log-expression space, overrides autoscale_dropout #' @param autoscale_dropout whether to set plots where the feature space is 0 to #102033, default min for scale_color_gradient auto scaling, overrides dropout_NA #' @param dropout_NA whether to set plots where the feature space is 0 to NA, note: the legend element in a hide_legend output will be NULL #' #' @return a ggplot2 object #' #' @import ggplot2 #' @importFrom cowplot get_legend #' #' @export #' FeatureScatterPlot <- function(flat_embedding, feature, title, subtitle, featurename, colorscale = NULL, pointsize = 1, hide_legend = FALSE, legsize = 5, legt = 5, legtxt = 5, grad_lim = NULL, autoscale_dropout = FALSE, dropout_NA = TRUE){ n_features <- length(unique(feature)) p <- ggplot(flat_embedding, aes_(x = as.name(colnames(flat_embedding)[1]), y = as.name(colnames(flat_embedding)[2]), color = ~`feature`)) if(is.null(colorscale)){ p <- p + geom_point(size = pointsize) + labs(title = title, subtitle = subtitle, color = featurename) + theme_minimal() + theme(legend.title = element_text(size=legt), legend.text = element_text(size = legtxt)) if(!is.null(grad_lim)){ p <- p + scale_colour_gradient(limits = grad_lim) } else if (autoscale_dropout) { if(length(unique(feature)) == 1){ p <- p + scale_colour_gradient(low = "#102033", high = "#102033") } } else if (dropout_NA){ if(length(unique(feature)) == 1){ feature[which(feature == 0)] <- NA p <- p + scale_color_gradient(na.value = "grey50") } } }else{ p <- p + geom_point(size = pointsize) + labs(title = title, subtitle = subtitle, color = featurename) + theme_minimal() + scale_color_manual(values = colorscale) + guides(colour = guide_legend(override.aes = list(size=legsize))) + theme(legend.title = element_text(size=legt), legend.text = element_text(size = legtxt)) } if(hide_legend){ if(length(unique(feature)) == 1){ # NAs have replaced the dropout 0's p <- p + theme_minimal() p <- p + theme(legend.position = "none") list(legend = NULL, plot = p) } else { leg <- get_legend(p) p <- p + theme_minimal() p <- p + theme(legend.position = "none") list(legend = leg, plot = p) } }else{ p } } #' Heatmap of the top n markers #' #' Plot the heatmap of the top n inferred markers for each cluster. #' #' @param data expression data with genes x cells #' @param gene_names a vector of symbolic gene names corresponding to the rows in the data matrix #' @param cluster_labels a vector of cluster labels corresponding to the columns in the data matrix #' @param markers a table of markers #' @param n_features the top n features to plot #' @param ... extra args to heatmap.2 #' #' @return nothing #' #' @import dplyr #' @import RColorBrewer #' @import gplots #' @importFrom tibble as_tibble #' @importFrom Matrix as.matrix #' #' @export #' PlotTopN <- function(data, gene_names, cluster_labels, markers, n_features, ...){ clusterId = geneScore = NULL # r cmd check pass n_cells <- ncol(data) n_clusters <- length(unique(cluster_labels)) sorted_cell <- sort.int(cluster_labels, index.return = TRUE) markers_table <- as.data.frame(markers) sortedmarkers <- arrange(markers_table, clusterId, desc(geneScore)) sorted_gene_table <- as_tibble(sortedmarkers) %>% group_by(clusterId) %>% top_n(n_features, geneScore) sorted_gene <- sorted_gene_table$geneID plot_data <- data[sorted_gene, sorted_cell$ix] rlabs <- gene_names[sorted_gene] clabs <- c(rep(NA, n_cells)) counts <- as.matrix(table(cluster_labels)) accumulator <- matrix(1, n_clusters, n_clusters)*lower.tri(matrix(1, n_clusters, n_clusters), diag = TRUE) last_cells <- accumulator %*% counts offset <- floor(counts/2) labeled_columns <- last_cells - offset clabs[labeled_columns] <- c(1:n_clusters) p <- heatmap.2(as.matrix(plot_data), col = rev(brewer.pal(11,"RdBu")), trace = 'none', dendrogram='none', Rowv=FALSE, Colv=FALSE, labCol = clabs, labRow = rlabs, srtCol = 0, #cexCol =1, #cexRow = .7, #lhei = c(1,3.5), key.title = NA, scale = "row", ...) } #' Heatmap of specific genes #' #' Given a set of markers, plot their expression across clusters on a heatmap. #' #' @param data expression data with genes x cells #' @param gene_names a vector of symbolic gene names corresponding to the rows in the data matrix #' @param cluster_labels a vector of cluster labels corresponding to the columns in the data matrix #' @param markers a vector of markers #' #' @return nothing #' #' @import RColorBrewer #' @import gplots #' #' @export #' PlotClusterExpression <- function(data, gene_names, cluster_labels, markers){ n_clusters <- length(unique(cluster_labels)) sorted_cell <- sort.int(cluster_labels, index.return = TRUE) filtered <- ClusterAvg(M = data, gene_names = gene_names, cell_order = sorted_cell$ix, cell_labels = cluster_labels, gene_list = markers) p <- heatmap.2(log10(t(filtered) + 1), col = rev(brewer.pal(11,"RdBu")), trace = 'none', dendrogram='none', Rowv=FALSE, Colv=FALSE, labCol = c(1:n_clusters), labRow = markers, srtCol = 0, cexCol =1, cexRow = .7, lhei = c(1,2), key.title = NA, scale = "row") } #' Violin plot of gene expression #' #' Produce a violin plot of gene expression. #' #' @param data a matrix with genes in rows and cells in columns #' @param gene_names a vector of names corresponding to the rows in data #' @param labels a vector of cluster assignments for the cells #' @param gene_name the name of the gene to plot #' @param colorscale optionally provided color scale #' @param jitsize the jitter size parameter #' #' @return a violin plot #' #' @import ggplot2 #' #' @export #' ViolinPlotExpression <- function(data, gene_names, labels, gene_name, colorscale = NULL, jitsize = 0.2){ n_features <- length(unique(labels)) if(is.null(colorscale)){ colorscale <- ColorHue(n = n_features) colorscale <- colorscale$hex.1.n. } gene_index <- which(tolower(gene_names) == tolower(gene_name)) expression <- data[gene_index,] plot_data <- as.data.frame(cbind(exp = expression, cluster = labels)) plot_data$cluster <- as.factor(plot_data$cluster) colors <- colorscale[sort(unique(labels))]; p <- ggplot(plot_data, aes_string(x = 'cluster', y = 'exp', fill = 'cluster')) + geom_violin(alpha = 0.6) + theme(legend.position = 'none') + labs(title = paste0("Expression of ", gene_name), x = "Cluster", y = "Relative Target Expression") + geom_jitter(shape=16, position=position_jitter(jitsize)) + scale_fill_manual(values = colors) + theme_minimal() } #' Circos plot of signaling score #' #' Produce a plot of a cell signaling network according to the cell-normalized expression score. #' #' @param P signaling probabilities cells x cells #' @param rgb_gap real percent of the inter-cluster spectrum gap to encompass cluster highlighting #' @param cluster_labels labels of cells ordered from 1 to n #' @param lig_cells_per_cluster int 0 to keep all #' @param rec_cells_per_cluster int 0 to keep all #' @param zero_threshold real value for zero cutoff (0 to keep all edges) #' @param cD_reduce ratio of the circlos plot grid element (cell or cluster) to the whole circle is less than this value, the item is omitted from the plot. Default is zero, don't change unless you are inspecting an unhighlighted chord diagram, since it breaks highlighting if one or more pairs above the zero_threshold happen to have a low grid/circle ratio. #' @param highlight_clusters whether to label the plot with clusters. Default is true. #' @param title_text the title of the plot #' @param n_clusters if n is higher than the number of unique labels (colorscale can be used for meta plotting) #' @param n_cluster_cols for plots with different subpopulations, use this to reserve dropout clusters in individual plots. WARNING: this is strongly recommended if there are dropouts, ie unique(labels) != 1:length(labels) #' @param receptor_chord_color color the chords according to the receptor-bearing cell they point to. default is false #' #' #' @import dplyr #' @import circlize #' @importFrom reshape2 melt #' @importFrom graphics legend title #' #' @export #' SigPlot <- function(P, rgb_gap = 0.2, cluster_labels, lig_cells_per_cluster = 10, rec_cells_per_cluster = 10, zero_threshold = 0, cD_reduce = 0, highlight_clusters = TRUE, title_text = NULL, n_clusters = NULL, n_cluster_cols = NULL, receptor_chord_color = FALSE){ if(max(P) <= zero_threshold){ print('no signaling in P') return() } label = lig_cluster_number = lig_cell = rec_cell = rec_cluster_number = legend = title = link_weight = arbitrary_order = NULL # r cmd check pass circos.clear() # compute # ordering: int vector of indices corresponding to the labels # labels: int vector cluster labels of the cells ordered from cluster 1 to n sorted_cell <- sort.int(cluster_labels, index.return = TRUE) ordering <- sorted_cell$ix labels <- sorted_cell$x if(is.null(n_clusters)){ n_clusters <- length(unique(labels)) } if(is.null(n_cluster_cols)){ n_cluster_cols <- max(labels) } n_cells <- length(labels) # find nonzero rows and columns of ordered P P <- P[drop=FALSE,ordering, ordering] nzrow <- which(rowSums(P) > zero_threshold) nzcol <- which(colSums(P) > zero_threshold) # prune the ordering and labels for rows and cols nzordering_lig <- ordering[nzrow] nzordering_rec <- ordering[nzcol] nzlabel_lig <- labels[nzrow] nzlabel_rec <- labels[nzcol] # prune P P_ordered <- P[drop=FALSE,nzrow, nzcol] rownames(P_ordered) <- nzordering_lig colnames(P_ordered) <- nzordering_rec # for each cluster, get the location of the first and the last cell in the permutation of labels lig_counts <- matrix(0, nrow = n_clusters, ncol = 1) rec_counts <- matrix(0, nrow = n_clusters, ncol = 1) rownames(lig_counts) <- rownames(rec_counts) <- as.character(c(1:n_clusters)) for(c in 1:n_clusters){ cc <- unique(labels)[c] lig_counts[c,1] <- length(which(nzlabel_lig == cc)) rec_counts[c,1] <- length(which(nzlabel_rec == cc)) } # for each cluster, get the location of the first and the last cell in the permutation of labels accumulator <- matrix(1, n_clusters, n_clusters)*lower.tri(matrix(1, n_clusters, n_clusters), diag = TRUE) lig_last_cells <- accumulator %*% lig_counts lig_first_cells <- lig_last_cells - lig_counts + 1 accumulator <- matrix(1, n_clusters, n_clusters)*lower.tri(matrix(1, n_clusters, n_clusters), diag = TRUE) rec_last_cells <- accumulator %*% rec_counts rec_first_cells <- rec_last_cells - rec_counts + 1 ### apply cell and edge thresholds ### # apply top cell per cluster lig if(lig_cells_per_cluster > 0){ sums <- rowSums(P_ordered) tab <- data.frame(cell = nzordering_lig, label = nzlabel_lig, sum = sums, arbitrary_order = 1:length(sums)) grouped <- tab %>% group_by(label) filtered <- top_n(grouped, n = lig_cells_per_cluster,wt = arbitrary_order) P_ordered <- P_ordered[drop=FALSE,as.character(filtered$cell),] } # apply top cell per cluster rec if(rec_cells_per_cluster > 0){ sums <- colSums(P_ordered) tab <- data.frame(cell = nzordering_rec, label = nzlabel_rec, sum = sums, arbitrary_order = 1:length(sums)) grouped <- tab %>% group_by(label) filtered <- top_n(grouped, n = rec_cells_per_cluster, wt = arbitrary_order) P_ordered <- P_ordered[drop=FALSE,,as.character(filtered$cell)] } # flatten the adjacency matrix: # col1 is the ligand cell # col2 is the receptor cell # col3 is the weight of the link P_table <- melt(P_ordered) colnames(P_table) <- c("lig_cell", "rec_cell", "link_weight") ind_lig <- match(P_table$lig_cell, ordering) lig_clust_num <- labels[ind_lig] P_table$lig_cluster_number <- lig_clust_num ind_rec <- match(P_table$rec_cell, ordering) rec_clust_num <- labels[ind_rec] P_table$rec_cluster_number <- rec_clust_num P_table <- arrange(P_table, lig_cluster_number, lig_cell) # get the ordering of the receptors lig_cell_order <- unique(P_table$lig_cell[which(P_table$link_weight > zero_threshold)]) P_tab <- arrange(P_table[which(P_table$link_weight > zero_threshold),], rec_cluster_number, rec_cell) rec_cell_unique <- unique(P_tab$rec_cell) rec_cell_unique <- paste(rec_cell_unique, "R", sep = "_") chord_plot_sector_order <- c(lig_cell_order, rec_cell_unique) # add a prefix to make the rec cells unique for sector highlighting P_table$rec_cell <- paste(P_table$rec_cell, "R", sep = "_") # get rgb codes and 0-360 hue map for the clusters cluster_colors <- ColorHue(n_cluster_cols) alt_cluster_colors <- ColorHue(n_cluster_cols, luminance = 100, chroma = 90) # rownames(cluster_colors) <- sort(unique(labels)) # rownames(alt_cluster_colors) <- sort(unique(labels)) # cluster_colors <- cluster_colors[sort(unique(cluster_labels)),] # alt_cluster_colors <- alt_cluster_colors[sort(unique(cluster_labels)),] # get the rgb codes for the sectors (cells), based on 20% of the spectrum starting from the cluster hue gap <- rgb_gap*(cluster_colors[2,1] - cluster_colors[1,1]) if(!receptor_chord_color){ # get the chordcolors for the ligands cols <- ChordColors(P_table, cluster_colors, gap) # plot the chords circos.clear() chordDiagram(P_table[which(P_table$link_weight > zero_threshold),1:3], order = chord_plot_sector_order, directional = TRUE, direction.type = c("diffHeight", "arrows"), link.arr.type = "big.arrow", annotationTrack = "grid", grid.col = cols, preAllocateTracks = list(list(track.height = 0.05), list(track.height = 0.05)), reduce = cD_reduce) # apply highlighting to the ligand signaling cells # Circlize only plots the P_table connections that are non-zero # In case zero_threshold is <= 0, find which clusters are being plotted # find highlightable pairs if(length(which(P_table$link_weight <= zero_threshold)) > 0){ nz_lig_clust <- unique(P_table$lig_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) nz_rec_clust <- unique(P_table$rec_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) } else { nz_lig_clust <- unique(P_table$lig_cluster_number) nz_rec_clust <- unique(P_table$rec_cluster_number) } if(highlight_clusters){ for(i in nz_lig_clust){ ii <- which(rownames(cluster_colors) == i) lig_cells <- unique(P_table$lig_cell[which(P_table$lig_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = lig_cells, col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } for(i in nz_rec_clust){ ii <- which(rownames(cluster_colors) == i) rec_cells <- unique(P_table$rec_cell[which(P_table$rec_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = rec_cells[which(rec_cells %in% get.all.sector.index())], col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } subset_cols <- cluster_colors$hex.1.n.[sort(unique(labels))] legend("topleft", legend = paste0("C", sort(unique(cluster_labels))), pch=16, pt.cex=1.5, cex=1, bty='n', col = subset_cols) title(title_text, cex.main = 1.5, line = -0.5) } } else { P_table <- arrange(P_table, lig_cluster_number, rec_cluster_number, desc(link_weight), rec_cell) P_table <- P_table[which(P_table$link_weight > zero_threshold),] #colnames(P_table) <- c("lig_link", "rec_cell", "link_weight", "lig_cluster_number", "rec_cluster_number") P_table$lig_cell <- 1:nrow(P_table) # this is not actually cells anymore, more like cell-extracted links chord_plot_sector_order <- c(P_table$lig_cell, rec_cell_unique) # get the chordcolors for the ligands cols <- ChordColors(P_table, cluster_colors, gap, receptor_chord_color = TRUE) # plot the chords circos.clear() chordDiagram(P_table[which(P_table$link_weight > zero_threshold),1:3], order = chord_plot_sector_order, directional = TRUE, direction.type = c("diffHeight", "arrows"), link.arr.type = "big.arrow", annotationTrack = "grid", grid.col = cols, preAllocateTracks = list(list(track.height = 0.05), list(track.height = 0.05)), reduce = cD_reduce) # apply highlighting to the ligand signaling cells # Circlize only plots the P_table connections that are non-zero # In case zero_threshold is <= 0, find which clusters are being plotted # find highlightable pairs if(length(which(P_table$link_weight <= zero_threshold)) > 0){ nz_lig_clust <- unique(P_table$lig_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) nz_rec_clust <- unique(P_table$rec_cluster_number[-(which(P_table$link_weight <= zero_threshold))]) } else { nz_lig_clust <- unique(P_table$lig_cluster_number) nz_rec_clust <- unique(P_table$rec_cluster_number) } if(highlight_clusters){ for(i in nz_lig_clust){ ii <- which(rownames(cluster_colors) == i) lig_cells <- unique(P_table$lig_cell[which(P_table$lig_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = lig_cells, col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } for(i in nz_rec_clust){ ii <- which(rownames(cluster_colors) == i) rec_cells <- unique(P_table$rec_cell[which(P_table$rec_cluster_number == i)]) highlight_col <- cluster_colors$hex.1.n.[ii] cluster_name <- paste0("C", i) highlight.sector(sector.index = rec_cells[which(rec_cells %in% get.all.sector.index())], col = highlight_col, #text = cluster_name, # these may be cramped text.vjust = -1, niceFacing = TRUE, track.index = 2) } subset_cols <- cluster_colors$hex.1.n.[sort(unique(labels))] legend("topleft", legend = paste0("C", sort(unique(cluster_labels))), pch=16, pt.cex=1.5, cex=1, bty='n', col = subset_cols) title(title_text, cex.main = 1.5, line = -0.5) } } } #' Get a vector of n equally spaced rgb colors #' #' Get a vector of n equally spaced rgb colors. #' #' @param n integer number of hex codes to return #' @param starthue real hue argument for the grDevices::hcl() generates value 1 #' @param endhue real hue argument #' @param luminance the luminance of the hcl value #' @param chroma the chroma of the hcl value #' #' @importFrom grDevices hcl #' #' @export #' ColorHue <- function(n, starthue = 15, endhue = 360, luminance = 65, chroma = 100) { hues <- seq(starthue, endhue, length = n + 1) hex <- hcl(h = hues, l = luminance, c = chroma)[1:n] hue_color_map <- data.frame(hues[1:n], hex[1:n]) hue_color_map[,2] <- as.character(hue_color_map[,2]) return(hue_color_map) } #' Get chord colors for cells #' #' Get chord colors for cells. #' #' @param edge_table a table with lig, rec, score, lig_cluster, rec_cluster #' @param cluster_cols colors for the clusters #' @param gap the hue gap #' @param receptor_chord_color color the chords according to the receptor-bearing cell they point to. default is false #' ChordColors <- function(edge_table, cluster_cols, gap, receptor_chord_color = FALSE) { chords <- c() if(!receptor_chord_color){ for(i in unique(edge_table$lig_cluster_number)){ cell_labels <- edge_table[which(edge_table$lig_cluster_number == i),]$lig_cell starthue <- cluster_cols[as.character(i),1] cols <- ColorHue(n = length(cell_labels), starthue = starthue, endhue = starthue + gap) cols <- cols$hex.1.n. names(cols) <- cell_labels chords <- c(chords, cols) } cols_rec <- rep("grey", length(edge_table$rec_cell)) names(cols_rec) <- edge_table$rec_cell chords <- c(chords, cols_rec) } else { for(i in unique(edge_table$lig_cluster_number)){ sub_table <- edge_table[which(edge_table$lig_cluster_number == i),] for(ii in unique(sub_table$rec_cluster_number)){ sub_sub_table <- sub_table[which(sub_table$rec_cluster_number == ii),] starthue <- cluster_cols[as.character(ii),1] cols <- ColorHue(n = nrow(sub_sub_table), starthue = starthue, endhue = starthue + gap) cols <- cols$hex.1.n. names(cols) <- sub_sub_table$lig_cell chords <- c(chords, cols) } } cols_rec <- rep("grey", length(edge_table$rec_cell)) names(cols_rec) <- edge_table$rec_cell chords <- c(chords, cols_rec) } }
#load prereqs and download dataset library(dplyr) filename <- "Coursera_DS3_Final.zip" if (!file.exists(filename)){ fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, filename, method="curl") } if (!file.exists("UCI HAR Dataset")) { unzip(filename) } #One: Merge training/test X <- rbind(x_train, x_test) Y <- rbind(y_train, y_test) Subject <- rbind(subject_train, subject_test) Merged_Data <- cbind(Subject, Y, X) #Two: STD/Mean extraction Selection <- Merged_Data %>% select(subject, code, contains("mean"), contains("std")) #Three: Description activities Selection$code <- activities[Selection$code, 2] #Four: Labels names(Selection)[2] = "activity" names(Selection)<-gsub("Acc", "Accelerometer", names(Selection)) names(Selection)<-gsub("Gyro", "Gyroscope", names(Selection)) names(Selection)<-gsub("BodyBody", "Body", names(Selection)) names(Selection)<-gsub("Mag", "Magnitude", names(Selection)) names(Selection)<-gsub("^t", "Time", names(Selection)) names(Selection)<-gsub("^f", "Frequency", names(Selection)) names(Selection)<-gsub("tBody", "TimeBody", names(Selection)) names(Selection)<-gsub("-mean()", "Mean", names(Selection), ignore.case = TRUE) names(Selection)<-gsub("-std()", "STD", names(Selection), ignore.case = TRUE) names(Selection)<-gsub("-freq()", "Frequency", names(Selection), ignore.case = TRUE) names(Selection)<-gsub("angle", "Angle", names(Selection)) names(Selection)<-gsub("gravity", "Gravity", names(Selection)) #Five: Average ProjectOut <- Selection %>% group_by(subject, activity) %>% summarise_all(funs(mean)) write.table(ProjectOut, "ProjectOut.txt", row.name=FALSE) #Six: Check str(ProjectOut) print(ProjectOut)
/Run_analysis.R
no_license
merkasaur/Week-Four-Project
R
false
false
1,758
r
#load prereqs and download dataset library(dplyr) filename <- "Coursera_DS3_Final.zip" if (!file.exists(filename)){ fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(fileURL, filename, method="curl") } if (!file.exists("UCI HAR Dataset")) { unzip(filename) } #One: Merge training/test X <- rbind(x_train, x_test) Y <- rbind(y_train, y_test) Subject <- rbind(subject_train, subject_test) Merged_Data <- cbind(Subject, Y, X) #Two: STD/Mean extraction Selection <- Merged_Data %>% select(subject, code, contains("mean"), contains("std")) #Three: Description activities Selection$code <- activities[Selection$code, 2] #Four: Labels names(Selection)[2] = "activity" names(Selection)<-gsub("Acc", "Accelerometer", names(Selection)) names(Selection)<-gsub("Gyro", "Gyroscope", names(Selection)) names(Selection)<-gsub("BodyBody", "Body", names(Selection)) names(Selection)<-gsub("Mag", "Magnitude", names(Selection)) names(Selection)<-gsub("^t", "Time", names(Selection)) names(Selection)<-gsub("^f", "Frequency", names(Selection)) names(Selection)<-gsub("tBody", "TimeBody", names(Selection)) names(Selection)<-gsub("-mean()", "Mean", names(Selection), ignore.case = TRUE) names(Selection)<-gsub("-std()", "STD", names(Selection), ignore.case = TRUE) names(Selection)<-gsub("-freq()", "Frequency", names(Selection), ignore.case = TRUE) names(Selection)<-gsub("angle", "Angle", names(Selection)) names(Selection)<-gsub("gravity", "Gravity", names(Selection)) #Five: Average ProjectOut <- Selection %>% group_by(subject, activity) %>% summarise_all(funs(mean)) write.table(ProjectOut, "ProjectOut.txt", row.name=FALSE) #Six: Check str(ProjectOut) print(ProjectOut)
posture.reliability.df <- sqldf("select a.participantId, a.task, a.privacy, a.cleanliness, a.posture as 'test_posture', b.posture as 'retest_posture' from 'thought.experiment' as 'a' join 'thought.experiment' as 'b' where a.questionnaire_round = 'retest' and b.questionnaire_round = 'test' and a.participantId = b.participantId and a.task = b.task and a.privacy = b.privacy and a.cleanliness = b.cleanliness;", dbname = DB)
/analyze/data/20-posture_reliability.r
no_license
tlevine/toilet-posture-release
R
false
false
425
r
posture.reliability.df <- sqldf("select a.participantId, a.task, a.privacy, a.cleanliness, a.posture as 'test_posture', b.posture as 'retest_posture' from 'thought.experiment' as 'a' join 'thought.experiment' as 'b' where a.questionnaire_round = 'retest' and b.questionnaire_round = 'test' and a.participantId = b.participantId and a.task = b.task and a.privacy = b.privacy and a.cleanliness = b.cleanliness;", dbname = DB)
## R Programming Assignment 2 ## Gerald McGarvin April 3, 2017 ## Call makeCacheMatrix to store a matrix and its inverse, if setInv is called. ## makeCacheMatrix expects a square matrix as input. ## It returns an R list object containing get and set functions provided by makeCacheMatrix. ## setOrig() can be called to define the original matrix in makeCacheMatrix. ## setInv() can be called to store the inverses of the original matrix in makeCacheMatrix. ## getOrig() returns the original matrix. ## getInv() returns the inverse of the original matrix. ## Test usintg the following statements in RStudio: ## > source("cachematrix.R") ## > m1 <- matrix(c(1/2, -1/4, -1, 3/4), nrow = 2, ncol = 2) ## > my_matrix <- makeCacheMatrix(m1) ## creates a makeCacheMatrix object containing m1 matrix. ## > cacheSolve(my_matrix) ## The first time the last statement is executed the inverse matrix is displayed. ## If repeated for the same input matrix, the inverse matrix is displayed along with the following message: ## "Retrieved inverse of matrix from cache" makeCacheMatrix <- function(origX = matrix()) { invX <- NULL ## clear variable when cache is instantiated. setOrig <- function(origX) { ## set function used to change origX and clear invX in cache. x <<- origX ## override x in parent env with new value. invX <<- NULL ## since origX has changed, previous value of invX must be removed. } getOrig <- function() origX ## return value of the original matrix to caller. ## was set when makeCacheMatrix object was instantiated or setOrig was called. setInv <- function(passedInv) invX <<- passedInv ## set invX to value passed to setInv(). getInv <- function() invX ## return value of the inverse matrix to caller. ## Return named list of setter and getter functions to caller: list( setOrig = setOrig, getOrig = getOrig, setInv = setInv, getInv = getInv) } ## Call cacheSolve passing in a makeCacheMatrix object. ## Return the inverse of the matrix, origX already in the makeCacheMatrix object. ## Call get and set functions in makeCacheMatrix as needed. cacheSolve <- function(x, ...) { ## pass a makeCacheMatrix object. invX <- x$getInv() ## attempt to get cached inverse of x, the original matrix. if(!is.null(invX)) { ## not NULL means cache provided inverse of x. message("Retrieved inverse of matrix from cache") return(invX) ## exit function and return invX, the matric inverse. } invX <- solve(x$getOrig()) ## only executed if invX is NULL. Determine inverse. x$setInv(invX) ## set inverse in cache in case needed later. invX ## return inverse of X to the caller. }
/cachematrix.R
no_license
gmcgarvin/ProgrammingAssignment2
R
false
false
2,642
r
## R Programming Assignment 2 ## Gerald McGarvin April 3, 2017 ## Call makeCacheMatrix to store a matrix and its inverse, if setInv is called. ## makeCacheMatrix expects a square matrix as input. ## It returns an R list object containing get and set functions provided by makeCacheMatrix. ## setOrig() can be called to define the original matrix in makeCacheMatrix. ## setInv() can be called to store the inverses of the original matrix in makeCacheMatrix. ## getOrig() returns the original matrix. ## getInv() returns the inverse of the original matrix. ## Test usintg the following statements in RStudio: ## > source("cachematrix.R") ## > m1 <- matrix(c(1/2, -1/4, -1, 3/4), nrow = 2, ncol = 2) ## > my_matrix <- makeCacheMatrix(m1) ## creates a makeCacheMatrix object containing m1 matrix. ## > cacheSolve(my_matrix) ## The first time the last statement is executed the inverse matrix is displayed. ## If repeated for the same input matrix, the inverse matrix is displayed along with the following message: ## "Retrieved inverse of matrix from cache" makeCacheMatrix <- function(origX = matrix()) { invX <- NULL ## clear variable when cache is instantiated. setOrig <- function(origX) { ## set function used to change origX and clear invX in cache. x <<- origX ## override x in parent env with new value. invX <<- NULL ## since origX has changed, previous value of invX must be removed. } getOrig <- function() origX ## return value of the original matrix to caller. ## was set when makeCacheMatrix object was instantiated or setOrig was called. setInv <- function(passedInv) invX <<- passedInv ## set invX to value passed to setInv(). getInv <- function() invX ## return value of the inverse matrix to caller. ## Return named list of setter and getter functions to caller: list( setOrig = setOrig, getOrig = getOrig, setInv = setInv, getInv = getInv) } ## Call cacheSolve passing in a makeCacheMatrix object. ## Return the inverse of the matrix, origX already in the makeCacheMatrix object. ## Call get and set functions in makeCacheMatrix as needed. cacheSolve <- function(x, ...) { ## pass a makeCacheMatrix object. invX <- x$getInv() ## attempt to get cached inverse of x, the original matrix. if(!is.null(invX)) { ## not NULL means cache provided inverse of x. message("Retrieved inverse of matrix from cache") return(invX) ## exit function and return invX, the matric inverse. } invX <- solve(x$getOrig()) ## only executed if invX is NULL. Determine inverse. x$setInv(invX) ## set inverse in cache in case needed later. invX ## return inverse of X to the caller. }
library(na.tools) ### Name: na.unreplace ### Title: na.unreplace ### Aliases: na.unreplace na.unreplace.default na.unreplace.character ### na.unreplace.factor na.implicit ### ** Examples na.unreplace( c(1,2,3,4), 3 ) na.unreplace( c("A", "(NA)", "B", "C") ) na.unreplace( c("A", NA_explicit_, "B", "C") ) df <- data.frame( char=c('A', 'NA', 'C', NA_explicit_), num=1:4 ) na.unreplace(df)
/data/genthat_extracted_code/na.tools/examples/na.unreplace.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
410
r
library(na.tools) ### Name: na.unreplace ### Title: na.unreplace ### Aliases: na.unreplace na.unreplace.default na.unreplace.character ### na.unreplace.factor na.implicit ### ** Examples na.unreplace( c(1,2,3,4), 3 ) na.unreplace( c("A", "(NA)", "B", "C") ) na.unreplace( c("A", NA_explicit_, "B", "C") ) df <- data.frame( char=c('A', 'NA', 'C', NA_explicit_), num=1:4 ) na.unreplace(df)
## Test file for the Utilities object library("GSFAnalytics") test.getFincadDateAdjust <- function() { financialCenter <- "nyb" holidays <- HolidayDataLoader() holidayList <- holidays$getHolidays("financialcalendar",financialCenter) initialDate <- "2007/08/13" shouldBomb(getFincadDateAdjust("whatever","d",3,holidayList)) shouldBomb(getFincadDateAdjust(initialDate,"blog",3,holidayList)) shouldBomb(getFincadDateAdjust(initialDate,"d","yes",holidayList)) shouldBomb(getFincadDateAdjust(initialDate,"d",3,"I like Holidays")) checkAdjust <- function(start, end, units, num, reverse = FALSE) { checkSame(as.POSIXct(end),getFincadDateAdjust(start,units,num,holidayList)) if(reverse) checkSame(as.POSIXct(start),getFincadDateAdjust(end,units,-num,holidayList)) } checkAdjust(initialDate, "2007/08/16", "d", 3, TRUE) checkAdjust("2007/08/31", "2007/09/06", "d", 3, TRUE) checkAdjust("2007/08/03", "2007/09/04", "m", 1, TRUE) checkAdjust("2007/08/20", "2007/09/04", "w", 2) checkAdjust("2006/12/29", "2008/01/02", "d", 252, TRUE) checkAdjust("2006/12/29", "2007/12/31", "y", 1) checkAdjust("2007/01/01", "2008/01/02", "y", 1) checkSame(as.POSIXct("2007/09/05"),getFincadDateAdjust("2007/08/31","d",3,NULL)) } test.getDaysBetween <- function() { startDate <- "2007/07/13" endDate <- "2008/02/23" shouldBomb(getDaysBetween("BusinessTime",endDate)) shouldBomb(getDaysBetween(startDate,"BusinessTime")) checkEquals(round(getDaysBetween(startDate,endDate),0),225) } test.getBusinessDaysBetween <- function(){ checkSame(21, getBusinessDaysBetween("2007-12-01", "2008-01-02", "nyb")) checkSame(-21, getBusinessDaysBetween("2008-01-02","2007-12-01","nyb")) checkSame(0,getBusinessDaysBetween("2008-01-02","2008-01-02","nyb")) } test.getSystemDBFXMarketName <- function() { fxCurr <- FXCurr$setByCross("usd/jpy") shouldBomb(getSystemDBFXMarketName()) shouldBomb(getSystemDBFXMarketName(fxCurr = "badmojo",tenor="1y",putCall="call",SystemNumber=1)) # shouldBomb(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="badmojo",putCall="call",SystemNumber=1)) shouldBomb(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="1y",putCall="businessTime",SystemNumber=1)) shouldBomb(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="1y",putCall="call",SystemNumber=-1)) checkSame(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="6m",putCall="call",SystemNumber=1),"USDJPY6MTRI.C1") checkSame(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="1y",putCall="put",SystemNumber=1),"USDJPY1YTRI.P1") } test.getELDateFromRDate <- function() { shouldBomb(getELDateFromRDate()) shouldBomb(getELDateFromRDate(dates = TRUE,timeStamps = 0)) shouldBomb(getELDateFromRDate(dates = "2007-07-07",timeStamps = TRUE)) checkEquals(getELDateFromRDate(dates = "2007-07-07",timeStamps = 0),1070707) checkEquals(getELDateFromRDate(dates = "2007-07-07",timeStamps = 1600),1070707.16) checkEquals(getELDateFromRDate(dates = c("2007-07-07","2007-07-08"),timeStamps = 1600),c(1070707.16,1070708.16)) checkEquals(getELDateFromRDate(dates = c("2007-07-07","2007-07-08"),timeStamps = c(0,1600)),c(1070707,1070708.16)) } test.getELDateFromRDate <- function() { shouldBomb(getELDateFromRDate()) shouldBomb(getELDateFromRDate(dates = TRUE)) checkEquals(getELDateFromRDate(dates = "2007-07-07"),1070707) checkEquals(getELDateFromRDate(dates = "2007-07-07 16:00:00"),1070707.16) checkEquals(getELDateFromRDate(dates = c("2007-07-07 16:00:00","2007-07-08 16:00:00")),c(1070707.16,1070708.16)) checkEquals(getELDateFromRDate(dates = c("2007-07-07 00:00:00","2007-07-08 16:00:00")),c(1070707,1070708.16)) checkEquals(getELDateFromRDate(dates = c("2007-07-07","2007-07-08")),c(1070707,1070708)) } test.getELTimeFromPOSIXlt <- function() { shouldBomb(getELTimeFromPOSIXlt()) shouldBomb(getELTimeFromPOSIXlt(date = TRUE)) checkEquals(getELTimeFromPOSIXlt(date = as.POSIXlt("2007-07-07 15:00:00")),1500) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07 15:00:00"),1500) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07 01:00:00"),100) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07 16:00:00"),1600) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07"),0) checkEquals(getELTimeFromPOSIXlt(date = c("2007-07-07","2007-07-08")),c(0,0)) checkEquals(getELTimeFromPOSIXlt(date = c("2007-07-07 16:00:00","2007-07-08 05:00:00")),c(1600,500)) } testGetHistoricalVolatility <- function(){ triExample <- zoo(1:70) shouldBomb(getHistoricalVolatility()) shouldBomb(getHistoricalVolatility(as.numeric(triExample))) shouldBomb(getHistoricalVolatility(-triExample)) shouldBomb(getHistoricalVolatility(triExample,window = 0)) shouldBomb(getHistoricalVolatility(triExample,dt = 0)) shouldBomb(getHistoricalVolatility(triExample,method = "not_implemented")) # test standard_deviation result <- getHistoricalVolatility(triExample, 60, 1/252,"standard_deviation") checkEquals(0.3105343,last(result)) result <- getHistoricalVolatility(triExample[1:60], 60, 1/252,"standard_deviation") checkEquals(NULL,result) result <- getHistoricalVolatility(triExample[1:61], 60, 1/252,"standard_deviation") checkEquals(1.703155421,last(result)) # test EWMA result <- getHistoricalVolatility(triExample, 60, 1/252,"EWMA") checkEquals(7.475219715,last(result)) triExample <- zoo(rep(-5:5, 8)) shouldBombMatching(getHistoricalVolatility(triExample, 60, 1/252, 'standard_deviation'), 'The TRI series should always be positive') result <- getHistoricalVolatility(triExample, 60, 1, 'standard_deviation', logChanges = FALSE) checkEquals(3.065895874638324, last(result)) result <- getHistoricalVolatility(triExample, 60, 1, 'EWMA', logChanges = FALSE) checkEquals(2.449786723182203, last(result)) } testGetEWMA <- function(){ shouldBomb(getEWMA()) shouldBomb(getEWMA(c(1,5,"SD"))) shouldBomb(getEWMA(c(1,5),0)) checkEquals(getEWMA(c(1)),1) checkEquals(getEWMA(c(1,2,3,2),60),c(1,1.011485979647104,1.034326011212858,1.045417722993805)) } testGetZScore <- function(){ zooObj <- zoo(c(1,2,3,4),c("2007-01-01","2007-01-02","2007-01-03","2007-01-04")) checkEquals( getZScore(zooObj), zoo(c(-1.1618950038622251,-0.3872983346207417,0.3872983346207417,1.1618950038622251),c("2007-01-01","2007-01-02","2007-01-03","2007-01-04")) ) } test.parseSimpleTenor <- function() { shouldBomb(parseSimpleTenor("No Way")) shouldBomb(parseSimpleTenor("1.5m")) test.list <- parseSimpleTenor("spot") checkSame(test.list$numUnits,0) checkSame(test.list$unit,"d") test.list <- parseSimpleTenor("18m") checkSame(test.list$numUnits,18) checkSame(test.list$unit,"m") } test.getSubList <- function() { DEACTIVATED("CDS_TICKER_UNIVERSE") library(QFCredit) tickerList <- CDSDataLoader$getScrubbedNames() shouldBomb(getSubList()) shouldBomb(getSubList(tickerList,0,1)) shouldBomb(getSubList(tickerList,2,0)) shouldBomb(getSubList(tickerList,2,4)) a = getSubList(tickerList[1:100],4,1) b = getSubList(tickerList[1:100],4,2) c = getSubList(tickerList[1:100],4,3) d = getSubList(tickerList[1:100],4,4) checkEquals(c(a,b,c,d),tickerList[1:100]) checkEquals(a,tickerList[1:25]) checkEquals(b,tickerList[26:50]) checkEquals(c,tickerList[51:75]) checkEquals(d,tickerList[76:100]) a = getSubList(tickerList[1:105],4,1) b = getSubList(tickerList[1:105],4,2) c = getSubList(tickerList[1:105],4,3) d = getSubList(tickerList[1:105],4,4) checkEquals(c(a,b,c,d),tickerList[1:105]) checkEquals(a,tickerList[1:26]) checkEquals(b,tickerList[27:52]) checkEquals(c,tickerList[53:78]) checkEquals(d,tickerList[79:105]) } test.removeCommasFromDataFrame <- function() { shouldBomb(removeCommasFromDataFrame('junk')) shouldBomb(removeCommasFromDataFrame(1)) testData <- data.frame(col1 = c('497,190,785','3.73'), col2 = c('-100,000.100','2.11'), row.names = c('a','b')) output <- removeCommasFromDataFrame(testData, returnAsNumeric = TRUE) target <- data.frame(col1 = c(497190785, 3.73), col2 = c(-100000.100,2.11), stringsAsFactors = FALSE, row.names = c('a','b')) checkSame(output, target) testData <- data.frame(col1 = c('497,190,785','3.73'), col2 = c('-100,000.100','2.11'), row.names = c('a','b')) output <- removeCommasFromDataFrame(testData, returnAsNumeric = FALSE) target <- data.frame(col1 = c('497190785', '3.73'), col2 = c('-100000.100','2.11'), stringsAsFactors = FALSE, row.names = c('a','b')) checkSame(output, target) } test.mergeDataFrames <- function() { res <- mergeDataFrames(data.frame(a = '1'),b = data.frame(a = '5')) checkSame(res,data.frame(a = c('1','5'))) res <- mergeDataFrames(data.frame(a = '1',b= 5),b = data.frame(a = '5')) checkSame(res,data.frame(a = c('1','5'),b = c(5,NA))) res <- mergeDataFrames(data.frame(a = '1'),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5'),b = c(NA,5))) res <- mergeDataFrames(data.frame(a = '1',e = 8),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5'),e = c(8,NA),b = c(NA,5))) res <- mergeDataFrames(data.frame(e = 8),b = data.frame(b = 5)) checkSame(res,data.frame(e = c(8,NA),b = c(NA,5))) res <- mergeDataFrames(data.frame(a = '1',e = 8,k = TRUE),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5'),e = c(8,NA),k = c(TRUE,NA),b = c(NA,5))) shouldBomb(mergeDataFrames(1,data.frame(a = 5))) shouldBomb(mergeDataFrames(data.frame(a = 5),5)) res <- mergeDataFrames(data.frame(a = c('1','5'),e = 7:8,k = c(TRUE,FALSE)),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5','5'),e = c(7,8,NA),k = c(TRUE,FALSE,NA),b = c(NA,NA,5))) res <- mergeDataFrames(data.frame(a = c('1','5'),e = 7:8,k = c(TRUE,FALSE),stringsAsFactors = FALSE),b = data.frame(a = c('5','10'),b = 5:6,stringsAsFactors = FALSE)) checkSame(res,data.frame(a = c('1','5','5','10'),e = c(7,8,NA,NA),k = c(TRUE,FALSE,NA,NA),b = c(NA,NA,5,6),stringsAsFactors = FALSE)) res <- mergeDataFrames(data.frame(a = '5',b = 5,stringsAsFactors = FALSE),data.frame(a = c('1','5'),e = 7:8,k = c(TRUE,FALSE),stringsAsFactors = FALSE)) checkSame(res,data.frame(a = c('5','1','5'),b = c(5,NA,NA),e = c(NA,7,8),k = c(NA,TRUE,FALSE),stringsAsFactors = FALSE)) } testAddSlashToDirectory <- function() { shouldBomb(addSlashToDirectory(1)) checkSame(addSlashToDirectory('c:'), 'c:/') checkSame(addSlashToDirectory('c:/'), 'c:/') }
/R/src/GSFAnalytics/tests/testUtilities.R
no_license
rsheftel/ratel
R
false
false
10,858
r
## Test file for the Utilities object library("GSFAnalytics") test.getFincadDateAdjust <- function() { financialCenter <- "nyb" holidays <- HolidayDataLoader() holidayList <- holidays$getHolidays("financialcalendar",financialCenter) initialDate <- "2007/08/13" shouldBomb(getFincadDateAdjust("whatever","d",3,holidayList)) shouldBomb(getFincadDateAdjust(initialDate,"blog",3,holidayList)) shouldBomb(getFincadDateAdjust(initialDate,"d","yes",holidayList)) shouldBomb(getFincadDateAdjust(initialDate,"d",3,"I like Holidays")) checkAdjust <- function(start, end, units, num, reverse = FALSE) { checkSame(as.POSIXct(end),getFincadDateAdjust(start,units,num,holidayList)) if(reverse) checkSame(as.POSIXct(start),getFincadDateAdjust(end,units,-num,holidayList)) } checkAdjust(initialDate, "2007/08/16", "d", 3, TRUE) checkAdjust("2007/08/31", "2007/09/06", "d", 3, TRUE) checkAdjust("2007/08/03", "2007/09/04", "m", 1, TRUE) checkAdjust("2007/08/20", "2007/09/04", "w", 2) checkAdjust("2006/12/29", "2008/01/02", "d", 252, TRUE) checkAdjust("2006/12/29", "2007/12/31", "y", 1) checkAdjust("2007/01/01", "2008/01/02", "y", 1) checkSame(as.POSIXct("2007/09/05"),getFincadDateAdjust("2007/08/31","d",3,NULL)) } test.getDaysBetween <- function() { startDate <- "2007/07/13" endDate <- "2008/02/23" shouldBomb(getDaysBetween("BusinessTime",endDate)) shouldBomb(getDaysBetween(startDate,"BusinessTime")) checkEquals(round(getDaysBetween(startDate,endDate),0),225) } test.getBusinessDaysBetween <- function(){ checkSame(21, getBusinessDaysBetween("2007-12-01", "2008-01-02", "nyb")) checkSame(-21, getBusinessDaysBetween("2008-01-02","2007-12-01","nyb")) checkSame(0,getBusinessDaysBetween("2008-01-02","2008-01-02","nyb")) } test.getSystemDBFXMarketName <- function() { fxCurr <- FXCurr$setByCross("usd/jpy") shouldBomb(getSystemDBFXMarketName()) shouldBomb(getSystemDBFXMarketName(fxCurr = "badmojo",tenor="1y",putCall="call",SystemNumber=1)) # shouldBomb(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="badmojo",putCall="call",SystemNumber=1)) shouldBomb(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="1y",putCall="businessTime",SystemNumber=1)) shouldBomb(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="1y",putCall="call",SystemNumber=-1)) checkSame(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="6m",putCall="call",SystemNumber=1),"USDJPY6MTRI.C1") checkSame(getSystemDBFXMarketName(fxCurr = fxCurr,tenor="1y",putCall="put",SystemNumber=1),"USDJPY1YTRI.P1") } test.getELDateFromRDate <- function() { shouldBomb(getELDateFromRDate()) shouldBomb(getELDateFromRDate(dates = TRUE,timeStamps = 0)) shouldBomb(getELDateFromRDate(dates = "2007-07-07",timeStamps = TRUE)) checkEquals(getELDateFromRDate(dates = "2007-07-07",timeStamps = 0),1070707) checkEquals(getELDateFromRDate(dates = "2007-07-07",timeStamps = 1600),1070707.16) checkEquals(getELDateFromRDate(dates = c("2007-07-07","2007-07-08"),timeStamps = 1600),c(1070707.16,1070708.16)) checkEquals(getELDateFromRDate(dates = c("2007-07-07","2007-07-08"),timeStamps = c(0,1600)),c(1070707,1070708.16)) } test.getELDateFromRDate <- function() { shouldBomb(getELDateFromRDate()) shouldBomb(getELDateFromRDate(dates = TRUE)) checkEquals(getELDateFromRDate(dates = "2007-07-07"),1070707) checkEquals(getELDateFromRDate(dates = "2007-07-07 16:00:00"),1070707.16) checkEquals(getELDateFromRDate(dates = c("2007-07-07 16:00:00","2007-07-08 16:00:00")),c(1070707.16,1070708.16)) checkEquals(getELDateFromRDate(dates = c("2007-07-07 00:00:00","2007-07-08 16:00:00")),c(1070707,1070708.16)) checkEquals(getELDateFromRDate(dates = c("2007-07-07","2007-07-08")),c(1070707,1070708)) } test.getELTimeFromPOSIXlt <- function() { shouldBomb(getELTimeFromPOSIXlt()) shouldBomb(getELTimeFromPOSIXlt(date = TRUE)) checkEquals(getELTimeFromPOSIXlt(date = as.POSIXlt("2007-07-07 15:00:00")),1500) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07 15:00:00"),1500) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07 01:00:00"),100) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07 16:00:00"),1600) checkEquals(getELTimeFromPOSIXlt(date = "2007-07-07"),0) checkEquals(getELTimeFromPOSIXlt(date = c("2007-07-07","2007-07-08")),c(0,0)) checkEquals(getELTimeFromPOSIXlt(date = c("2007-07-07 16:00:00","2007-07-08 05:00:00")),c(1600,500)) } testGetHistoricalVolatility <- function(){ triExample <- zoo(1:70) shouldBomb(getHistoricalVolatility()) shouldBomb(getHistoricalVolatility(as.numeric(triExample))) shouldBomb(getHistoricalVolatility(-triExample)) shouldBomb(getHistoricalVolatility(triExample,window = 0)) shouldBomb(getHistoricalVolatility(triExample,dt = 0)) shouldBomb(getHistoricalVolatility(triExample,method = "not_implemented")) # test standard_deviation result <- getHistoricalVolatility(triExample, 60, 1/252,"standard_deviation") checkEquals(0.3105343,last(result)) result <- getHistoricalVolatility(triExample[1:60], 60, 1/252,"standard_deviation") checkEquals(NULL,result) result <- getHistoricalVolatility(triExample[1:61], 60, 1/252,"standard_deviation") checkEquals(1.703155421,last(result)) # test EWMA result <- getHistoricalVolatility(triExample, 60, 1/252,"EWMA") checkEquals(7.475219715,last(result)) triExample <- zoo(rep(-5:5, 8)) shouldBombMatching(getHistoricalVolatility(triExample, 60, 1/252, 'standard_deviation'), 'The TRI series should always be positive') result <- getHistoricalVolatility(triExample, 60, 1, 'standard_deviation', logChanges = FALSE) checkEquals(3.065895874638324, last(result)) result <- getHistoricalVolatility(triExample, 60, 1, 'EWMA', logChanges = FALSE) checkEquals(2.449786723182203, last(result)) } testGetEWMA <- function(){ shouldBomb(getEWMA()) shouldBomb(getEWMA(c(1,5,"SD"))) shouldBomb(getEWMA(c(1,5),0)) checkEquals(getEWMA(c(1)),1) checkEquals(getEWMA(c(1,2,3,2),60),c(1,1.011485979647104,1.034326011212858,1.045417722993805)) } testGetZScore <- function(){ zooObj <- zoo(c(1,2,3,4),c("2007-01-01","2007-01-02","2007-01-03","2007-01-04")) checkEquals( getZScore(zooObj), zoo(c(-1.1618950038622251,-0.3872983346207417,0.3872983346207417,1.1618950038622251),c("2007-01-01","2007-01-02","2007-01-03","2007-01-04")) ) } test.parseSimpleTenor <- function() { shouldBomb(parseSimpleTenor("No Way")) shouldBomb(parseSimpleTenor("1.5m")) test.list <- parseSimpleTenor("spot") checkSame(test.list$numUnits,0) checkSame(test.list$unit,"d") test.list <- parseSimpleTenor("18m") checkSame(test.list$numUnits,18) checkSame(test.list$unit,"m") } test.getSubList <- function() { DEACTIVATED("CDS_TICKER_UNIVERSE") library(QFCredit) tickerList <- CDSDataLoader$getScrubbedNames() shouldBomb(getSubList()) shouldBomb(getSubList(tickerList,0,1)) shouldBomb(getSubList(tickerList,2,0)) shouldBomb(getSubList(tickerList,2,4)) a = getSubList(tickerList[1:100],4,1) b = getSubList(tickerList[1:100],4,2) c = getSubList(tickerList[1:100],4,3) d = getSubList(tickerList[1:100],4,4) checkEquals(c(a,b,c,d),tickerList[1:100]) checkEquals(a,tickerList[1:25]) checkEquals(b,tickerList[26:50]) checkEquals(c,tickerList[51:75]) checkEquals(d,tickerList[76:100]) a = getSubList(tickerList[1:105],4,1) b = getSubList(tickerList[1:105],4,2) c = getSubList(tickerList[1:105],4,3) d = getSubList(tickerList[1:105],4,4) checkEquals(c(a,b,c,d),tickerList[1:105]) checkEquals(a,tickerList[1:26]) checkEquals(b,tickerList[27:52]) checkEquals(c,tickerList[53:78]) checkEquals(d,tickerList[79:105]) } test.removeCommasFromDataFrame <- function() { shouldBomb(removeCommasFromDataFrame('junk')) shouldBomb(removeCommasFromDataFrame(1)) testData <- data.frame(col1 = c('497,190,785','3.73'), col2 = c('-100,000.100','2.11'), row.names = c('a','b')) output <- removeCommasFromDataFrame(testData, returnAsNumeric = TRUE) target <- data.frame(col1 = c(497190785, 3.73), col2 = c(-100000.100,2.11), stringsAsFactors = FALSE, row.names = c('a','b')) checkSame(output, target) testData <- data.frame(col1 = c('497,190,785','3.73'), col2 = c('-100,000.100','2.11'), row.names = c('a','b')) output <- removeCommasFromDataFrame(testData, returnAsNumeric = FALSE) target <- data.frame(col1 = c('497190785', '3.73'), col2 = c('-100000.100','2.11'), stringsAsFactors = FALSE, row.names = c('a','b')) checkSame(output, target) } test.mergeDataFrames <- function() { res <- mergeDataFrames(data.frame(a = '1'),b = data.frame(a = '5')) checkSame(res,data.frame(a = c('1','5'))) res <- mergeDataFrames(data.frame(a = '1',b= 5),b = data.frame(a = '5')) checkSame(res,data.frame(a = c('1','5'),b = c(5,NA))) res <- mergeDataFrames(data.frame(a = '1'),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5'),b = c(NA,5))) res <- mergeDataFrames(data.frame(a = '1',e = 8),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5'),e = c(8,NA),b = c(NA,5))) res <- mergeDataFrames(data.frame(e = 8),b = data.frame(b = 5)) checkSame(res,data.frame(e = c(8,NA),b = c(NA,5))) res <- mergeDataFrames(data.frame(a = '1',e = 8,k = TRUE),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5'),e = c(8,NA),k = c(TRUE,NA),b = c(NA,5))) shouldBomb(mergeDataFrames(1,data.frame(a = 5))) shouldBomb(mergeDataFrames(data.frame(a = 5),5)) res <- mergeDataFrames(data.frame(a = c('1','5'),e = 7:8,k = c(TRUE,FALSE)),b = data.frame(a = '5',b = 5)) checkSame(res,data.frame(a = c('1','5','5'),e = c(7,8,NA),k = c(TRUE,FALSE,NA),b = c(NA,NA,5))) res <- mergeDataFrames(data.frame(a = c('1','5'),e = 7:8,k = c(TRUE,FALSE),stringsAsFactors = FALSE),b = data.frame(a = c('5','10'),b = 5:6,stringsAsFactors = FALSE)) checkSame(res,data.frame(a = c('1','5','5','10'),e = c(7,8,NA,NA),k = c(TRUE,FALSE,NA,NA),b = c(NA,NA,5,6),stringsAsFactors = FALSE)) res <- mergeDataFrames(data.frame(a = '5',b = 5,stringsAsFactors = FALSE),data.frame(a = c('1','5'),e = 7:8,k = c(TRUE,FALSE),stringsAsFactors = FALSE)) checkSame(res,data.frame(a = c('5','1','5'),b = c(5,NA,NA),e = c(NA,7,8),k = c(NA,TRUE,FALSE),stringsAsFactors = FALSE)) } testAddSlashToDirectory <- function() { shouldBomb(addSlashToDirectory(1)) checkSame(addSlashToDirectory('c:'), 'c:/') checkSame(addSlashToDirectory('c:/'), 'c:/') }
loadOdds <- function(matchIds, quelle, dbName = 'soccerlabdata') { matchIdStr <- paste(matchIds, sep = ',', collapse = ',') ### Reading Booky Odds oddQuery <- paste('select spiel_id as matchId, quoteHeim as homeOdd, quoteAusw as visitorsOdd, quoteUnent as drawOdd from quote where quelle = \'', quelle, '\' and spiel_id in (', matchIdStr, ')', sep = '') con <- dbConnect(MySQL(), user="root", password="root", dbname=dbName) odds <- dbGetQuery(con, oddQuery) dbDisconnect(con) # Enrich odds with probabilities odds$HomeVictory <- 1 / odds$homeOdd odds$VisitorsVictory <- 1 / odds$visitorsOdd odds$Draw <- 1 / odds$drawOdd odds <- filter(odds, !is.infinite(Draw) & !is.infinite(VisitorsVictory) & !is.infinite(HomeVictory)) odds <- filter(odds, matchId %in% matchIds) return(odds) } loadFinishedMatches <- function(toMatchday, seasons, leagues, dbName = 'soccerlabdata') { fullSeasons <- seasons[-length(seasons)] lastSeason <- seasons[length(seasons)] seasonsStr <- paste('\'', fullSeasons, '\'', sep = '') seasonsStr <- do.call(paste, c(as.list(seasonsStr), sep = ',')) leaguesStr <- paste('\'', leagues, '\'', sep = '') leaguesStr <- do.call(paste, c(as.list(leaguesStr), sep = ',')) lastSeasonStr <- paste('\'', lastSeason, '\'', sep = '') ## Reading matches matchQuery <- sprintf('select sp.id as matchId, sp.liga as league, sp.saison as season, sp.spieltag as matchday, sp.spielZeit as matchtime, sp.heimMan_id as homeTeamId, sp.auswMan_id as visitorsTeamId, heimAuf.transFormation as heimFormation, auswAuf.transFormation as auswFormation, sp.toreHeim as goalsHome, sp.toreAusw as goalsVisitors from spiel sp inner join aufstellung heimAuf on sp.heimAuf_id = heimAuf.id inner join aufstellung auswAuf on sp.auswAuf_id = auswAuf.id where (sp.saison in (%s) OR (sp.saison = %s and sp.spieltag <= %i)) and liga in (%s) and sp.toreHeim is not null and sp.toreAusw is not null order by sp.spielZeit desc, sp.id asc', seasonsStr, lastSeasonStr, toMatchday, leaguesStr) con <- dbConnect(MySQL(), user="root", password="root", dbname=dbName) matches <- dbGetQuery(con, matchQuery) dbDisconnect(con) matches <- mutate(matches, goalDiff = goalsHome - goalsVisitors, matchResult = ifelse(goalDiff > 0, 'HomeVictory', ifelse(goalDiff < 0, 'VisitorsVictory', 'Draw'))) matches <- mutate(matches, matchResult = factor(matchResult, levels = c('VisitorsVictory', 'Draw', 'HomeVictory'), labels = c('VisitorsVictory', 'Draw', 'HomeVictory'), ordered = TRUE)) matches <- arrange(matches, season, matchday, matchtime, matchId) return(matches) }
/dataProvider/matchesDao.R
no_license
Tobsen1x/BetLab
R
false
false
3,150
r
loadOdds <- function(matchIds, quelle, dbName = 'soccerlabdata') { matchIdStr <- paste(matchIds, sep = ',', collapse = ',') ### Reading Booky Odds oddQuery <- paste('select spiel_id as matchId, quoteHeim as homeOdd, quoteAusw as visitorsOdd, quoteUnent as drawOdd from quote where quelle = \'', quelle, '\' and spiel_id in (', matchIdStr, ')', sep = '') con <- dbConnect(MySQL(), user="root", password="root", dbname=dbName) odds <- dbGetQuery(con, oddQuery) dbDisconnect(con) # Enrich odds with probabilities odds$HomeVictory <- 1 / odds$homeOdd odds$VisitorsVictory <- 1 / odds$visitorsOdd odds$Draw <- 1 / odds$drawOdd odds <- filter(odds, !is.infinite(Draw) & !is.infinite(VisitorsVictory) & !is.infinite(HomeVictory)) odds <- filter(odds, matchId %in% matchIds) return(odds) } loadFinishedMatches <- function(toMatchday, seasons, leagues, dbName = 'soccerlabdata') { fullSeasons <- seasons[-length(seasons)] lastSeason <- seasons[length(seasons)] seasonsStr <- paste('\'', fullSeasons, '\'', sep = '') seasonsStr <- do.call(paste, c(as.list(seasonsStr), sep = ',')) leaguesStr <- paste('\'', leagues, '\'', sep = '') leaguesStr <- do.call(paste, c(as.list(leaguesStr), sep = ',')) lastSeasonStr <- paste('\'', lastSeason, '\'', sep = '') ## Reading matches matchQuery <- sprintf('select sp.id as matchId, sp.liga as league, sp.saison as season, sp.spieltag as matchday, sp.spielZeit as matchtime, sp.heimMan_id as homeTeamId, sp.auswMan_id as visitorsTeamId, heimAuf.transFormation as heimFormation, auswAuf.transFormation as auswFormation, sp.toreHeim as goalsHome, sp.toreAusw as goalsVisitors from spiel sp inner join aufstellung heimAuf on sp.heimAuf_id = heimAuf.id inner join aufstellung auswAuf on sp.auswAuf_id = auswAuf.id where (sp.saison in (%s) OR (sp.saison = %s and sp.spieltag <= %i)) and liga in (%s) and sp.toreHeim is not null and sp.toreAusw is not null order by sp.spielZeit desc, sp.id asc', seasonsStr, lastSeasonStr, toMatchday, leaguesStr) con <- dbConnect(MySQL(), user="root", password="root", dbname=dbName) matches <- dbGetQuery(con, matchQuery) dbDisconnect(con) matches <- mutate(matches, goalDiff = goalsHome - goalsVisitors, matchResult = ifelse(goalDiff > 0, 'HomeVictory', ifelse(goalDiff < 0, 'VisitorsVictory', 'Draw'))) matches <- mutate(matches, matchResult = factor(matchResult, levels = c('VisitorsVictory', 'Draw', 'HomeVictory'), labels = c('VisitorsVictory', 'Draw', 'HomeVictory'), ordered = TRUE)) matches <- arrange(matches, season, matchday, matchtime, matchId) return(matches) }
# overfitting and underfitting # https://jjallaire.github.io/deep-learning-with-r-notebooks/notebooks/4.4-overfitting-and-underfitting.nb.html # load data same as in ch03 library(keras) imdb <- dataset_imdb(num_words = 10000) c(c(train_data, train_labels), c(test_data, test_labels)) %<-% imdb vectorize_sequences <- function(sequences, dimension = 10000) { # Create an all-zero matrix of shape (len(sequences), dimension) results <- matrix(0, nrow = length(sequences), ncol = dimension) for (i in 1:length(sequences)) # Sets specific indices of results[i] to 1s results[i, sequences[[i]]] <- 1 results } # Our vectorized training data x_train <- vectorize_sequences(train_data) # Our vectorized test data x_test <- vectorize_sequences(test_data) # Our vectorized labels y_train <- as.numeric(train_labels) y_test <- as.numeric(test_labels) # Fighting overfitting #------------------ Reducing the network's size--------------------------- original_model <- keras_model_sequential() %>% layer_dense(units = 16, activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 16, activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") original_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("accuracy") ) # use smaller network smaller_model <- keras_model_sequential() %>% layer_dense(units = 4, activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 4, activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") smaller_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("accuracy") ) # plot losses from different models library(ggplot2) library(tidyr) plot_training_losses <- function(losses) { loss_names <- names(losses) losses <- as.data.frame(losses) losses$epoch <- seq_len(nrow(losses)) losses %>% gather(model, loss, loss_names[[1]], loss_names[[2]]) %>% ggplot(aes(x = epoch, y = loss, colour = model)) + geom_point() } # Validating our approach val_indices <- 1:10000 x_val <- x_train[val_indices,] partial_x_train <- x_train[-val_indices,] y_val <- y_train[val_indices] partial_y_train <- y_train[-val_indices] # fit model using original model original_hist <- original_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # fit model using smaller model smaller_hist <- smaller_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # compare the losses over epoch plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, smaller_model = smaller_hist$metrics$val_loss )) # smaller network show less performance degradation when overfitting # use a bigger model, for comparison bigger_model <- keras_model_sequential() %>% layer_dense(units = 512, activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 512, activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") bigger_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c('acc') ) # fit data on bigger mdoel bigger_hist <- bigger_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # plot bigger model vs original model plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, bigger_model = bigger_hist$metrics$val_loss )) # bigger model start overfitting very early # plot training errors plot_training_losses(losses = list( original_model = original_hist$metrics$loss, bigger_model = bigger_hist$metrics$loss )) # bigger model performs 'perfect' on training data set, clearly overfitting occurs # another method to reduce overfitting # -------------------using weight regularization------------------------- # add l2 regulization l2_model <- keras_model_sequential() %>% layer_dense(units = 16, kernel_regularizer = regularizer_l2(0.001), activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 16, kernel_regularizer = regularizer_l2(0.001), activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") l2_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("acc") ) # fit model on l2 model l2_hist <- l2_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # compare l2 with original plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, l2_model = l2_hist$metrics$val_loss )) # l2 model is more resistent to overfitting than the original model # L1 regularization # regularizer_l1(0.001) # L1 and L2 regularization at the same time # regularizer_l1_l2(l1 = 0.001, l2 = 0.001) # using dropout method #------------------------dropout method------------------------- dpt_model <- keras_model_sequential() %>% layer_dense(units = 16, activation = "relu", input_shape = c(10000)) %>% layer_dropout(rate = 0.5) %>% layer_dense(units = 16, activation = "relu") %>% layer_dropout(rate = 0.5) %>% layer_dense(units = 1, activation = "sigmoid") dpt_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("acc") ) dpt_hist <- dpt_model %>% fit( x_train, y_train, epochs = 20, batch_size = 512, validation_data = list(x_test, y_test) ) # compare dropout vs original model plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, dpt_model = dpt_hist$metrics$val_loss )) # lower losses for dropout model in validation dataset #-------------most common ways to reduce overfitting---------------------- # 1---Getting more training data. # 2---Reducing the capacity of the network. # 3---Adding weight regularization. # 4---Adding dropout.
/DeepLearningR/Ch04_overfitting_imdb.R
no_license
PyRPy/Keras_R
R
false
false
6,138
r
# overfitting and underfitting # https://jjallaire.github.io/deep-learning-with-r-notebooks/notebooks/4.4-overfitting-and-underfitting.nb.html # load data same as in ch03 library(keras) imdb <- dataset_imdb(num_words = 10000) c(c(train_data, train_labels), c(test_data, test_labels)) %<-% imdb vectorize_sequences <- function(sequences, dimension = 10000) { # Create an all-zero matrix of shape (len(sequences), dimension) results <- matrix(0, nrow = length(sequences), ncol = dimension) for (i in 1:length(sequences)) # Sets specific indices of results[i] to 1s results[i, sequences[[i]]] <- 1 results } # Our vectorized training data x_train <- vectorize_sequences(train_data) # Our vectorized test data x_test <- vectorize_sequences(test_data) # Our vectorized labels y_train <- as.numeric(train_labels) y_test <- as.numeric(test_labels) # Fighting overfitting #------------------ Reducing the network's size--------------------------- original_model <- keras_model_sequential() %>% layer_dense(units = 16, activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 16, activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") original_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("accuracy") ) # use smaller network smaller_model <- keras_model_sequential() %>% layer_dense(units = 4, activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 4, activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") smaller_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("accuracy") ) # plot losses from different models library(ggplot2) library(tidyr) plot_training_losses <- function(losses) { loss_names <- names(losses) losses <- as.data.frame(losses) losses$epoch <- seq_len(nrow(losses)) losses %>% gather(model, loss, loss_names[[1]], loss_names[[2]]) %>% ggplot(aes(x = epoch, y = loss, colour = model)) + geom_point() } # Validating our approach val_indices <- 1:10000 x_val <- x_train[val_indices,] partial_x_train <- x_train[-val_indices,] y_val <- y_train[val_indices] partial_y_train <- y_train[-val_indices] # fit model using original model original_hist <- original_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # fit model using smaller model smaller_hist <- smaller_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # compare the losses over epoch plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, smaller_model = smaller_hist$metrics$val_loss )) # smaller network show less performance degradation when overfitting # use a bigger model, for comparison bigger_model <- keras_model_sequential() %>% layer_dense(units = 512, activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 512, activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") bigger_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c('acc') ) # fit data on bigger mdoel bigger_hist <- bigger_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # plot bigger model vs original model plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, bigger_model = bigger_hist$metrics$val_loss )) # bigger model start overfitting very early # plot training errors plot_training_losses(losses = list( original_model = original_hist$metrics$loss, bigger_model = bigger_hist$metrics$loss )) # bigger model performs 'perfect' on training data set, clearly overfitting occurs # another method to reduce overfitting # -------------------using weight regularization------------------------- # add l2 regulization l2_model <- keras_model_sequential() %>% layer_dense(units = 16, kernel_regularizer = regularizer_l2(0.001), activation = "relu", input_shape = c(10000)) %>% layer_dense(units = 16, kernel_regularizer = regularizer_l2(0.001), activation = "relu") %>% layer_dense(units = 1, activation = "sigmoid") l2_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("acc") ) # fit model on l2 model l2_hist <- l2_model %>% fit( partial_x_train, partial_y_train, epochs=20, batch_size = 512, validation_data = list(x_val, y_val)) # compare l2 with original plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, l2_model = l2_hist$metrics$val_loss )) # l2 model is more resistent to overfitting than the original model # L1 regularization # regularizer_l1(0.001) # L1 and L2 regularization at the same time # regularizer_l1_l2(l1 = 0.001, l2 = 0.001) # using dropout method #------------------------dropout method------------------------- dpt_model <- keras_model_sequential() %>% layer_dense(units = 16, activation = "relu", input_shape = c(10000)) %>% layer_dropout(rate = 0.5) %>% layer_dense(units = 16, activation = "relu") %>% layer_dropout(rate = 0.5) %>% layer_dense(units = 1, activation = "sigmoid") dpt_model %>% compile( optimizer = "rmsprop", loss = "binary_crossentropy", metrics = c("acc") ) dpt_hist <- dpt_model %>% fit( x_train, y_train, epochs = 20, batch_size = 512, validation_data = list(x_test, y_test) ) # compare dropout vs original model plot_training_losses(losses = list( original_model = original_hist$metrics$val_loss, dpt_model = dpt_hist$metrics$val_loss )) # lower losses for dropout model in validation dataset #-------------most common ways to reduce overfitting---------------------- # 1---Getting more training data. # 2---Reducing the capacity of the network. # 3---Adding weight regularization. # 4---Adding dropout.
x=c(12,32,54,33,21,65) x=c(x,55,32) x A = matrix (c (1,0,2,-6,3,0), nrow=2, ncol=3); A B = t (A); B; A=matrix(c(54,49,49,41,26,43,49,50,58,71),nrow=5,ncol=2) A rowSums(A) colSums(A) rowMeans(A) colMeans(A) # max of each columns apply(A,2,max) # min of each row apply(A,1,min) data (iris) help (data) ls(); iris names (iris); dim (iris); plot (iris[ , 3], iris[ , 4]); example(plot) cars <- c(1, 3, 6, 4, 9) trucks <- c(2, 5, 4, 5, 12) plot(cars, type="o", col="blue", ylim=c(0,12)) lines(trucks, type="o", pch=22, lty=2, col="red") title(main="Autos", col.main="red", font.main=4) jpeg("myplotiris.jpg") plot(iris[,3],iris[,4]) dev.off() # Export data write.table(mydata, "mydata.txt", sep="\t") # Import data a<-read.table(“a.txt”) mydata <- read.csv("CSE413-T02 Weather.csv", header=TRUE, sep=",") names(mydata) str(mydata) mydata head(mydata, n=10) tail(mydata, n=5) mydata$Coldness <- factor(mydata$Coldness, levels = c(1,2,3), labels = c("red", "blue", "green")) x = c(2,4,6,8) x = c(x, 7, 9) x x = "abcdef" x # substr(x, 2, 4) toupper(x) x substr(x,4,4) <- "g" strsplit(x,"") rep(1,2,4) rep(1:2,4)
/class/L02.r
no_license
yaoshanliang/XJTLU-CSE-413
R
false
false
1,128
r
x=c(12,32,54,33,21,65) x=c(x,55,32) x A = matrix (c (1,0,2,-6,3,0), nrow=2, ncol=3); A B = t (A); B; A=matrix(c(54,49,49,41,26,43,49,50,58,71),nrow=5,ncol=2) A rowSums(A) colSums(A) rowMeans(A) colMeans(A) # max of each columns apply(A,2,max) # min of each row apply(A,1,min) data (iris) help (data) ls(); iris names (iris); dim (iris); plot (iris[ , 3], iris[ , 4]); example(plot) cars <- c(1, 3, 6, 4, 9) trucks <- c(2, 5, 4, 5, 12) plot(cars, type="o", col="blue", ylim=c(0,12)) lines(trucks, type="o", pch=22, lty=2, col="red") title(main="Autos", col.main="red", font.main=4) jpeg("myplotiris.jpg") plot(iris[,3],iris[,4]) dev.off() # Export data write.table(mydata, "mydata.txt", sep="\t") # Import data a<-read.table(“a.txt”) mydata <- read.csv("CSE413-T02 Weather.csv", header=TRUE, sep=",") names(mydata) str(mydata) mydata head(mydata, n=10) tail(mydata, n=5) mydata$Coldness <- factor(mydata$Coldness, levels = c(1,2,3), labels = c("red", "blue", "green")) x = c(2,4,6,8) x = c(x, 7, 9) x x = "abcdef" x # substr(x, 2, 4) toupper(x) x substr(x,4,4) <- "g" strsplit(x,"") rep(1,2,4) rep(1:2,4)
\name{snntsdensityplot} \Rdversion{2.1} \alias{snntsdensityplot} \title{Plots a SNNTS density for spherical data} \description{Computes the points needed to plot the SNNTS density function for spherical data } \usage{snntsdensityplot(long, lat, cpars = 1, M = c(0,0))} \arguments{ \item{long}{Grid for longitude. Vector with values between zero and 2*pi} \item{lat}{Grid for latitude. Vector with values between zero and pi} \item{cpars}{Vector of complex numbers of dimension prod(M+1). The sum of the squared moduli of the c parameters must be equal to one} \item{M}{Vector with the number of components in the SNNTS for each dimension} } \value{The points needed to plot the SNNTS density function} \references{Fernandez-Duran J. J. y Gregorio Dominguez, M. M. (2008) Spherical Distributions Based on Nonnegative Trigonometric Sums, Working Paper, Statistics Department, ITAM, DE-C08.6 } \author{Juan Jose Fernandez-Duran and Maria Mercedes Gregorio-Dominguez} \note{The parameters cpars used by this function are the transformed parameters of the SNNTS density function, which lie on the surface of the unit hypersphere} \examples{ set.seed(200) data(Datab6fisher_ready) data<-Datab6fisher_ready M<-c(4,4) cest<-snntsmanifoldnewtonestimation(data, M, iter=150) cpars<-cest$cestimates[,3] longitud<-seq(0,360,10)*(pi/180) latitud<-seq(0,180,5)*(pi/180) z<-outer(longitud,latitud,FUN="snntsdensityplot",cpars,M) persp(longitud,latitud,z,theta=45,phi=30) contour(longitud,latitud,z) points(data[,1],data[,2]) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %\keyword{ ~kwd1 } %\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/snntsdensityplot.Rd
no_license
cran/CircNNTSR
R
false
false
1,737
rd
\name{snntsdensityplot} \Rdversion{2.1} \alias{snntsdensityplot} \title{Plots a SNNTS density for spherical data} \description{Computes the points needed to plot the SNNTS density function for spherical data } \usage{snntsdensityplot(long, lat, cpars = 1, M = c(0,0))} \arguments{ \item{long}{Grid for longitude. Vector with values between zero and 2*pi} \item{lat}{Grid for latitude. Vector with values between zero and pi} \item{cpars}{Vector of complex numbers of dimension prod(M+1). The sum of the squared moduli of the c parameters must be equal to one} \item{M}{Vector with the number of components in the SNNTS for each dimension} } \value{The points needed to plot the SNNTS density function} \references{Fernandez-Duran J. J. y Gregorio Dominguez, M. M. (2008) Spherical Distributions Based on Nonnegative Trigonometric Sums, Working Paper, Statistics Department, ITAM, DE-C08.6 } \author{Juan Jose Fernandez-Duran and Maria Mercedes Gregorio-Dominguez} \note{The parameters cpars used by this function are the transformed parameters of the SNNTS density function, which lie on the surface of the unit hypersphere} \examples{ set.seed(200) data(Datab6fisher_ready) data<-Datab6fisher_ready M<-c(4,4) cest<-snntsmanifoldnewtonestimation(data, M, iter=150) cpars<-cest$cestimates[,3] longitud<-seq(0,360,10)*(pi/180) latitud<-seq(0,180,5)*(pi/180) z<-outer(longitud,latitud,FUN="snntsdensityplot",cpars,M) persp(longitud,latitud,z,theta=45,phi=30) contour(longitud,latitud,z) points(data[,1],data[,2]) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %\keyword{ ~kwd1 } %\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
setwd("/datascience/projects/statisticallyfit/github/learningmathstat/RStatistics/STAT210 Statistical Modelling and Experimental Design/ASSIGNMENTS/A2/") source('/datascience/projects/statisticallyfit/github/learningmathstat/RStatistics/Rfunctions.R') library(ggplot2) library(car) options(digits=10, show.signif.stars = F) # part a) bflowData <- read.table("bloodflow.txt", header=TRUE) # Scatterplot suggests there are 3 peaks and troughs ggplot(bflowData, aes(x=AOT, y=BF)) + geom_point(shape=19, size=3) + ggtitle("Arterial Oxygen Tension vs. Bloodflow") # Polynomial order = peaks/troughs - 1. Here is looks like peaks/troughs = 3 # so at most 3+1= 4th order could be fitted, perhaps. # There is definite curvature so at least a quadratic model could be fitted. # part b) # see: linear is definitely not appropriate: anova term for AOT # means model with AOT is not significant. That is the p-value of the # global F-test. bflow.1.lm <- lm(BF ~ AOT, data=bflowData) anova(bflow.1.lm) # diagnostics: there is curvature in residuals, which suggests the # quadratic term is missing. par(mfrow=c(1,1)) crPlot(bflow.1.lm, variable="AOT", main="Partial Residuals for AOT in Linear Model") summary(bflow.1.lm) # --------------------------------------------------------------------- bflow.2.lm <- lm(BF ~ AOT + I(AOT^2), data=bflowData) anova(bflow.2.lm) # quadratic model is significant, given linear # model has been fitted, so continue # Partial plot suggests curvature was removed. par(mfrow=c(1,1)) crPlot(bflow.2.lm, variable="AOT", main="Partial Residuals for AOT in Quadratic Model") summary(bflow.2.lm) betaCI(bflow.2.lm) # --------------------------------------------------------------------- bflow.3.lm <- lm(BF ~ AOT + I(AOT^2) + I(AOT^3), data=bflowData) anova(bflow.3.lm) # cubic model isn't significant so just use quadratic. # part c) from = min(bflowData$AOT) to = max(bflowData$AOT) xs <- data.frame(AOT=seq(from=from,to=to, len=nrow(bflowData))) # the levels are by default 95% for the CIs CI <- data.frame(predict(bflow.2.lm, interval="confidence", newdata=xs)) # placing also the AOT generated values here for plotting purposes. pred.df <- data.frame(AOT=xs$AOT, fit=CI$fit, lwr=CI$lwr, upr=CI$upr) # Plotting confidence bands p.data = ggplot(bflowData, aes(x=AOT, y=BF)) + geom_point(shape=19, size=3) p.fits = p.data + geom_line(data=pred.df, aes(y=fit, colour="a", linetype="a"),size=1) + geom_line(data=pred.df, aes(y=lwr, colour="b", linetype="b"),size=1) + geom_line(data=pred.df, aes(y=upr, colour="b", linetype="b"),size=1) p.plot <- p.fits + ggtitle("Predicted and Observed Values of BF vs AOT and 95% Confidence Bands") + scale_colour_manual(name="Legend", values=c("a"="red", "b"="dodgerblue"), labels=c("Fitted Line", "95%\nConfidence\nBands")) + scale_linetype_manual(name="Legend", values=c("a"="solid", "b"="dashed"), labels=c("Fitted Line", "95%\nConfidence\nBands")) p.plot # NOTE: my addition later - this works!!!! ------------------------------------ plotConfidenceBands.lm(bflow.3.lm) plotConfPredBands.lm(bflow.2.lm) plotConfPredBands.lm(bflow.3.lm) # ----------------------------------------------------------------------------- # part d) # Check residuals and normality par(mfrow=c(2,2)) plot(bflow.2.lm, add.smooth=FALSE, which=c(1,2,3,5), cook.levels=c(0.2,0.5,1.0)) # Checking normality assumption: formal test. shapiro.test(bflow.2.lm$residuals) # no deviation from normality. # Checking influential points # This is a function to calculate leverage values of all observations # and compare them to the mean. If any are greater than the h.mean # then they are influential. influence.leverageValues <- function(fit){ hs <- hatvalues(fit) k <- length(fit$model) - 1 n <- nrow(fit$model) h.mean <- 2*(k+1)/n isInfluential <- hs > h.mean return(data.frame(InfluentialPoints=hs, CutOffInflMean=h.mean, IsInfluential=isInfluential)) } # this is a function to compare the cooks distances with the critical value # at the cutoff point: if any cooks value is greater than the cooks critical # value at the 50th percentile on the F(k+1, n-k-1) distribution, then # that observation is influential. influence.cooksDistances <- function(fit) { cks <- cooks.distance(fit) k <- length(fit$model) - 1 n <- nrow(fit$model) Fcrit <- qf(0.5, df1=k+1, df2=n-k-1) isInfluential <- cks > Fcrit return(data.frame(CooksPoints=cks, CutOffFcrit=Fcrit, IsInfluential=isInfluential)) } leverageInfo <- influence.leverageValues(bflow.2.lm) obs14 <- which(leverageInfo$IsInfluential) # So observation 14 is influential # The leverage of the 14th observation is about 0.4508 > h.mean = 0.4 leverageInfo[obs14, ] cookInfo <- influence.cooksDistances(bflow.2.lm) which(cookInfo$IsInfluential) # integer(0) array so none are past the # cutoff cooks value. cookInfo$CutOffFcrit[1] cookInfo[obs14,] # its value is close to the cutoff, but not past it. cookInfo[2, ] # observation 2 is not close to the cooks cutoff. cookInfo[4, ] # observation 4 is not close to the cooks cutoff.
/STAT210 Statistical Modelling and Experimental Design/ASSIGNMENTS/A2/Question3_bloodflow.R
no_license
statisticallyfit/RStatistics
R
false
false
5,305
r
setwd("/datascience/projects/statisticallyfit/github/learningmathstat/RStatistics/STAT210 Statistical Modelling and Experimental Design/ASSIGNMENTS/A2/") source('/datascience/projects/statisticallyfit/github/learningmathstat/RStatistics/Rfunctions.R') library(ggplot2) library(car) options(digits=10, show.signif.stars = F) # part a) bflowData <- read.table("bloodflow.txt", header=TRUE) # Scatterplot suggests there are 3 peaks and troughs ggplot(bflowData, aes(x=AOT, y=BF)) + geom_point(shape=19, size=3) + ggtitle("Arterial Oxygen Tension vs. Bloodflow") # Polynomial order = peaks/troughs - 1. Here is looks like peaks/troughs = 3 # so at most 3+1= 4th order could be fitted, perhaps. # There is definite curvature so at least a quadratic model could be fitted. # part b) # see: linear is definitely not appropriate: anova term for AOT # means model with AOT is not significant. That is the p-value of the # global F-test. bflow.1.lm <- lm(BF ~ AOT, data=bflowData) anova(bflow.1.lm) # diagnostics: there is curvature in residuals, which suggests the # quadratic term is missing. par(mfrow=c(1,1)) crPlot(bflow.1.lm, variable="AOT", main="Partial Residuals for AOT in Linear Model") summary(bflow.1.lm) # --------------------------------------------------------------------- bflow.2.lm <- lm(BF ~ AOT + I(AOT^2), data=bflowData) anova(bflow.2.lm) # quadratic model is significant, given linear # model has been fitted, so continue # Partial plot suggests curvature was removed. par(mfrow=c(1,1)) crPlot(bflow.2.lm, variable="AOT", main="Partial Residuals for AOT in Quadratic Model") summary(bflow.2.lm) betaCI(bflow.2.lm) # --------------------------------------------------------------------- bflow.3.lm <- lm(BF ~ AOT + I(AOT^2) + I(AOT^3), data=bflowData) anova(bflow.3.lm) # cubic model isn't significant so just use quadratic. # part c) from = min(bflowData$AOT) to = max(bflowData$AOT) xs <- data.frame(AOT=seq(from=from,to=to, len=nrow(bflowData))) # the levels are by default 95% for the CIs CI <- data.frame(predict(bflow.2.lm, interval="confidence", newdata=xs)) # placing also the AOT generated values here for plotting purposes. pred.df <- data.frame(AOT=xs$AOT, fit=CI$fit, lwr=CI$lwr, upr=CI$upr) # Plotting confidence bands p.data = ggplot(bflowData, aes(x=AOT, y=BF)) + geom_point(shape=19, size=3) p.fits = p.data + geom_line(data=pred.df, aes(y=fit, colour="a", linetype="a"),size=1) + geom_line(data=pred.df, aes(y=lwr, colour="b", linetype="b"),size=1) + geom_line(data=pred.df, aes(y=upr, colour="b", linetype="b"),size=1) p.plot <- p.fits + ggtitle("Predicted and Observed Values of BF vs AOT and 95% Confidence Bands") + scale_colour_manual(name="Legend", values=c("a"="red", "b"="dodgerblue"), labels=c("Fitted Line", "95%\nConfidence\nBands")) + scale_linetype_manual(name="Legend", values=c("a"="solid", "b"="dashed"), labels=c("Fitted Line", "95%\nConfidence\nBands")) p.plot # NOTE: my addition later - this works!!!! ------------------------------------ plotConfidenceBands.lm(bflow.3.lm) plotConfPredBands.lm(bflow.2.lm) plotConfPredBands.lm(bflow.3.lm) # ----------------------------------------------------------------------------- # part d) # Check residuals and normality par(mfrow=c(2,2)) plot(bflow.2.lm, add.smooth=FALSE, which=c(1,2,3,5), cook.levels=c(0.2,0.5,1.0)) # Checking normality assumption: formal test. shapiro.test(bflow.2.lm$residuals) # no deviation from normality. # Checking influential points # This is a function to calculate leverage values of all observations # and compare them to the mean. If any are greater than the h.mean # then they are influential. influence.leverageValues <- function(fit){ hs <- hatvalues(fit) k <- length(fit$model) - 1 n <- nrow(fit$model) h.mean <- 2*(k+1)/n isInfluential <- hs > h.mean return(data.frame(InfluentialPoints=hs, CutOffInflMean=h.mean, IsInfluential=isInfluential)) } # this is a function to compare the cooks distances with the critical value # at the cutoff point: if any cooks value is greater than the cooks critical # value at the 50th percentile on the F(k+1, n-k-1) distribution, then # that observation is influential. influence.cooksDistances <- function(fit) { cks <- cooks.distance(fit) k <- length(fit$model) - 1 n <- nrow(fit$model) Fcrit <- qf(0.5, df1=k+1, df2=n-k-1) isInfluential <- cks > Fcrit return(data.frame(CooksPoints=cks, CutOffFcrit=Fcrit, IsInfluential=isInfluential)) } leverageInfo <- influence.leverageValues(bflow.2.lm) obs14 <- which(leverageInfo$IsInfluential) # So observation 14 is influential # The leverage of the 14th observation is about 0.4508 > h.mean = 0.4 leverageInfo[obs14, ] cookInfo <- influence.cooksDistances(bflow.2.lm) which(cookInfo$IsInfluential) # integer(0) array so none are past the # cutoff cooks value. cookInfo$CutOffFcrit[1] cookInfo[obs14,] # its value is close to the cutoff, but not past it. cookInfo[2, ] # observation 2 is not close to the cooks cutoff. cookInfo[4, ] # observation 4 is not close to the cooks cutoff.
#' Decomposing scores into miscalibration, discrimination and uncertainty #' #' An object of class \code{reliabilitydiag} contains the observations, the #' original forecasts, and recalibrated forecasts given by isotonic regression. #' The function \code{summary.reliabilitydiag} calculates quantitative measures #' of predictive performance, miscalibration, discrimination, #' and uncertainty, for each of the prediction methods in relation to their #' recalibrated version. #' #' Predictive performance is measured by the mean score of the original #' forecast values, denoted by \eqn{S}. #' #' Uncertainty, denoted by \eqn{UNC}, is the mean score of a constant #' prediction at the value of the average observation. #' It is the highest possible mean score of a calibrated prediction method. #' #' Discrimination, denoted by \eqn{DSC}, is \eqn{UNC} minus the mean score #' of the PAV-recalibrated forecast values. #' A small value indicates a low information content (low signal) in the #' original forecast values. #' #' Miscalibration, denoted by \eqn{MCB}, is \eqn{S} minus the mean score #' of the PAV-recalibrated forecast values. #' A high value indicates that predictive performance of the prediction method #' can be improved by recalibration. #' #' These measures are related by the following equation, #' \deqn{S = MCB - DSC + UNC.} #' Score decompositions of this type have been studied extensively, but the #' optimality of the PAV solution ensures that \eqn{MCB} is nonnegative, #' regardless of the chosen (admissible) scoring function. #' This is a unique property achieved by choosing PAV-recalibration. #' #' If deviating from the Brier score as performance metric, make sure to choose #' a proper scoring rule for binary events, or equivalently, #' a scoring function with outcome space \{0, 1\} that is consistent for the #' expectation functional. #' #' @param object an object inheriting from the class \code{'reliabilitydiag'}. #' @param ... further arguments to be passed to or from methods. #' @param score currently only "brier" or a vectorized scoring function, #' that is, \code{function(observation, prediction)}. #' #' @return #' A \code{'summary.reliability'} object, which is also a #' tibble (see \code{\link[tibble:tibble]{tibble::tibble()}}) with columns: #' \tabular{ll}{ #' \code{forecast} \tab the name of the prediction method.\cr #' \code{mean_score} \tab the mean score of the original #' forecast values.\cr #' \code{miscalibration} \tab a measure of miscalibration #' (\emph{how reliable is the prediction method?}), #' smaller is better.\cr #' \code{discrimination} \tab a measure of discrimination #' (\emph{how variable are the recalibrated predictions?}), #' larger is better.\cr #' \code{uncertainty} \tab the mean score of a constant prediction at the #' value of the average observation. #' } #' #' @examples #' data("precip_Niamey_2016", package = "reliabilitydiag") #' r <- reliabilitydiag( #' precip_Niamey_2016[c("Logistic", "EMOS", "ENS", "EPC")], #' y = precip_Niamey_2016$obs, #' region.level = NA #' ) #' summary(r) #' summary(r, score = function(y, x) (x - y)^2) #' #' @export summary.reliabilitydiag <- function(object, ..., score = "brier") { r <- object if (identical(length(r), 0L)) { sr <- sprintf( "empty reliabilitydiag: 0 prediction methods for %i observations.", length(attr(r, "y"))) class(sr) <- c("summary.reliabilitydiag", class(sr)) return(sr) } score <- rlang::enquo(score) sr <- decomposition(r, score = rlang::eval_tidy(score)) attr(sr, "score")$name <- if (is.character(rlang::eval_tidy(score))) { rlang::as_name(score) } else { rlang::as_label(score) } attr(sr, "score")$fn <- rlang::eval_tidy(score) %>% (function(x) if (is.character(x)) "scoringFunctions::x" else x) class(sr) <- c("summary.reliabilitydiag", class(sr)) sr } decomposition <- function(r, score = "brier") { stopifnot(is.reliabilitydiag(r)) if (is.character(score) && identical(length(score), 1L)) { score <- get(score) } lapply(r, function(l) { tibble::tibble( mean_score = with(l$cases, mean(score(y, x))), uncertainty = with(l$cases, mean(score(y, mean(y)))), Sc = with(l$cases, mean(score(y, CEP_pav))), discrimination = .data$uncertainty - .data$Sc, miscalibration = .data$mean_score - .data$Sc ) }) %>% dplyr::bind_rows(.id = "forecast") %>% dplyr::select(.data$forecast, .data$mean_score, .data$miscalibration, .data$discrimination, .data$uncertainty) } brier <- function(y, x) { (x - y)^2 }
/R/summary.R
no_license
XuetongWang-DRR/reliabilitydiag
R
false
false
4,641
r
#' Decomposing scores into miscalibration, discrimination and uncertainty #' #' An object of class \code{reliabilitydiag} contains the observations, the #' original forecasts, and recalibrated forecasts given by isotonic regression. #' The function \code{summary.reliabilitydiag} calculates quantitative measures #' of predictive performance, miscalibration, discrimination, #' and uncertainty, for each of the prediction methods in relation to their #' recalibrated version. #' #' Predictive performance is measured by the mean score of the original #' forecast values, denoted by \eqn{S}. #' #' Uncertainty, denoted by \eqn{UNC}, is the mean score of a constant #' prediction at the value of the average observation. #' It is the highest possible mean score of a calibrated prediction method. #' #' Discrimination, denoted by \eqn{DSC}, is \eqn{UNC} minus the mean score #' of the PAV-recalibrated forecast values. #' A small value indicates a low information content (low signal) in the #' original forecast values. #' #' Miscalibration, denoted by \eqn{MCB}, is \eqn{S} minus the mean score #' of the PAV-recalibrated forecast values. #' A high value indicates that predictive performance of the prediction method #' can be improved by recalibration. #' #' These measures are related by the following equation, #' \deqn{S = MCB - DSC + UNC.} #' Score decompositions of this type have been studied extensively, but the #' optimality of the PAV solution ensures that \eqn{MCB} is nonnegative, #' regardless of the chosen (admissible) scoring function. #' This is a unique property achieved by choosing PAV-recalibration. #' #' If deviating from the Brier score as performance metric, make sure to choose #' a proper scoring rule for binary events, or equivalently, #' a scoring function with outcome space \{0, 1\} that is consistent for the #' expectation functional. #' #' @param object an object inheriting from the class \code{'reliabilitydiag'}. #' @param ... further arguments to be passed to or from methods. #' @param score currently only "brier" or a vectorized scoring function, #' that is, \code{function(observation, prediction)}. #' #' @return #' A \code{'summary.reliability'} object, which is also a #' tibble (see \code{\link[tibble:tibble]{tibble::tibble()}}) with columns: #' \tabular{ll}{ #' \code{forecast} \tab the name of the prediction method.\cr #' \code{mean_score} \tab the mean score of the original #' forecast values.\cr #' \code{miscalibration} \tab a measure of miscalibration #' (\emph{how reliable is the prediction method?}), #' smaller is better.\cr #' \code{discrimination} \tab a measure of discrimination #' (\emph{how variable are the recalibrated predictions?}), #' larger is better.\cr #' \code{uncertainty} \tab the mean score of a constant prediction at the #' value of the average observation. #' } #' #' @examples #' data("precip_Niamey_2016", package = "reliabilitydiag") #' r <- reliabilitydiag( #' precip_Niamey_2016[c("Logistic", "EMOS", "ENS", "EPC")], #' y = precip_Niamey_2016$obs, #' region.level = NA #' ) #' summary(r) #' summary(r, score = function(y, x) (x - y)^2) #' #' @export summary.reliabilitydiag <- function(object, ..., score = "brier") { r <- object if (identical(length(r), 0L)) { sr <- sprintf( "empty reliabilitydiag: 0 prediction methods for %i observations.", length(attr(r, "y"))) class(sr) <- c("summary.reliabilitydiag", class(sr)) return(sr) } score <- rlang::enquo(score) sr <- decomposition(r, score = rlang::eval_tidy(score)) attr(sr, "score")$name <- if (is.character(rlang::eval_tidy(score))) { rlang::as_name(score) } else { rlang::as_label(score) } attr(sr, "score")$fn <- rlang::eval_tidy(score) %>% (function(x) if (is.character(x)) "scoringFunctions::x" else x) class(sr) <- c("summary.reliabilitydiag", class(sr)) sr } decomposition <- function(r, score = "brier") { stopifnot(is.reliabilitydiag(r)) if (is.character(score) && identical(length(score), 1L)) { score <- get(score) } lapply(r, function(l) { tibble::tibble( mean_score = with(l$cases, mean(score(y, x))), uncertainty = with(l$cases, mean(score(y, mean(y)))), Sc = with(l$cases, mean(score(y, CEP_pav))), discrimination = .data$uncertainty - .data$Sc, miscalibration = .data$mean_score - .data$Sc ) }) %>% dplyr::bind_rows(.id = "forecast") %>% dplyr::select(.data$forecast, .data$mean_score, .data$miscalibration, .data$discrimination, .data$uncertainty) } brier <- function(y, x) { (x - y)^2 }
library(ape) simtree <- function(nsamplesinpresenttime) { # Initiate list of nodes and their height in the tree (corresponding to time measured from present time and back in time) # (with on node per sample in the present and height 0 since we start in the present) nodes <- rep(0, times=nsamplesinpresenttime) names(nodes) <- 1:nsamplesinpresenttime # Initiate the time of the last coalescence even (which is set to 0 before we begin T <- 0 # Simulate one coalescence event after the other until all samples have coalesced for(i in seq(1, nsamplesinpresenttime-1)) { nodecount <- length(nodes) # node count # Pick two random nodes to coalesce tocoalesce <- sample(nodecount, size=2) # Sample random coalescence time(from exponential distribution) coalescencerate = nodecount*(nodecount-1)/2 coalescencetime <- rexp(1, rate=coalescencerate) print(paste("Time to coalescence when there are ",nodecount,"nodes")) print(coalescencetime) # Set time of the coalescence event measure from the present time T <- T+coalescencetime # Get height of the two nodes left <- nodes[tocoalesce[1]] right <- nodes[tocoalesce[2]] # Get branch lengths leftbranchlength <- T-left rightbranchlength <- T-right # Make new node name; (here slightly more complicated to make plotting later easier) # We name it by the names of the two coalesced nodes each following by its branch length # = Newick tree format cnode <- paste("(", names(left), ":", leftbranchlength, ",", names(right), ":", rightbranchlength, ")", sep="") # Remove the two merged nodes nodes <- nodes[-tocoalesce] # Add the new composite node nodes <- c(nodes, T) # First the height names(nodes)[length(nodes)] <- cnode # then the name } # add semicolon, a Newick requirement return(paste0(names(nodes), ";")) } # Test: # Simulate the tree: #newicktree <- simtree(10) #newicktree ## [1] "(((6:0.207031552610069,(2:0.0444964009647568,1:0.0444964009647568):0.162535151645312):0.3157373523820487 # plot the tree #ct<-read.tree(text=newicktree); plot(ct) #add.scale.bar(cex = 0.7, font = 2, col = "red") #set.seed(8);newicktree <- simtree(5);ct<-read.tree(text=newicktree); plot(ct);add.scale.bar(cex = 0.7, font = 2, col = "red")
/simulatecoalescencetrees.R
no_license
FerRacimo/CopenhagenTutorial
R
false
false
2,264
r
library(ape) simtree <- function(nsamplesinpresenttime) { # Initiate list of nodes and their height in the tree (corresponding to time measured from present time and back in time) # (with on node per sample in the present and height 0 since we start in the present) nodes <- rep(0, times=nsamplesinpresenttime) names(nodes) <- 1:nsamplesinpresenttime # Initiate the time of the last coalescence even (which is set to 0 before we begin T <- 0 # Simulate one coalescence event after the other until all samples have coalesced for(i in seq(1, nsamplesinpresenttime-1)) { nodecount <- length(nodes) # node count # Pick two random nodes to coalesce tocoalesce <- sample(nodecount, size=2) # Sample random coalescence time(from exponential distribution) coalescencerate = nodecount*(nodecount-1)/2 coalescencetime <- rexp(1, rate=coalescencerate) print(paste("Time to coalescence when there are ",nodecount,"nodes")) print(coalescencetime) # Set time of the coalescence event measure from the present time T <- T+coalescencetime # Get height of the two nodes left <- nodes[tocoalesce[1]] right <- nodes[tocoalesce[2]] # Get branch lengths leftbranchlength <- T-left rightbranchlength <- T-right # Make new node name; (here slightly more complicated to make plotting later easier) # We name it by the names of the two coalesced nodes each following by its branch length # = Newick tree format cnode <- paste("(", names(left), ":", leftbranchlength, ",", names(right), ":", rightbranchlength, ")", sep="") # Remove the two merged nodes nodes <- nodes[-tocoalesce] # Add the new composite node nodes <- c(nodes, T) # First the height names(nodes)[length(nodes)] <- cnode # then the name } # add semicolon, a Newick requirement return(paste0(names(nodes), ";")) } # Test: # Simulate the tree: #newicktree <- simtree(10) #newicktree ## [1] "(((6:0.207031552610069,(2:0.0444964009647568,1:0.0444964009647568):0.162535151645312):0.3157373523820487 # plot the tree #ct<-read.tree(text=newicktree); plot(ct) #add.scale.bar(cex = 0.7, font = 2, col = "red") #set.seed(8);newicktree <- simtree(5);ct<-read.tree(text=newicktree); plot(ct);add.scale.bar(cex = 0.7, font = 2, col = "red")