content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# COMPARE TO THE SAME FILE IN EZH2_final_MAPQ, HERE TAD HEADER FILE = TRUE
options(scipen=100)
startTime <- Sys.time()
suppressPackageStartupMessages(library(optparse, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
option_list = list(
make_option(c("-f", "--feature_file"), type="character", default=NULL,
help="input feature (gene) file", metavar="character"),
make_option(c("-m", "--matrix_file"), type="character", default=NULL,
help="input matrix file", metavar="character"),
make_option(c("-s", "--start_matrix"), type="integer", default=NULL,
help="draw from start_matrix (in bp !)", metavar="character"),
make_option(c("-e", "--end_matrix"), type="integer", default=NULL,
help="draw to end_matrix (in bp !)", metavar="character"),
make_option(c("-o", "--output_file"), type="character", default=NULL,
help="path to output file", metavar="character"),
make_option(c("-k", "--col_to_skip"), type="integer", default=NULL,
help="columns to skip", metavar="integer"),
make_option(c("-c", "--chromo"), type="character", default=NULL,
help="chromosome to draw", metavar="character"),
make_option(c("-b", "--bin_size"), type="integer", default=NULL,
help="binning size", metavar="integer")
);
opt_parser <- OptionParser(option_list=option_list);
opt <- parse_args(opt_parser);
if(is.null(opt$matrix_file) | is.null(opt$bin_size) |
is.null(opt$output_file) ) {
stop("Missing arguments \n")
}
chromo <- opt$chromo
featureFile <- opt$feature_file
matrixFile <- opt$matrix_file
binSize <- opt$bin_size
skipcol <- ifelse(is.null(opt$col_to_skip), 3, opt$col_to_skip)
start_matrix <- opt$start_matrix
end_matrix <- opt$end_matrix
ds = basename(matrixFile)
ds = gsub("(.+)_chr.+_.+", "\\1", ds)
outFile <- opt$output_file
system(paste0("mkdir -p ", dirname(outFile)))
stopifnot(file.exists(matrixFile))
if(!is.null(featureFile)) stopifnot(file.exists(featureFile))
system(paste0("mkdir -p ", dirname(outFile)))
plotType <- gsub(".+\\.(.+?)$", "\\1", basename(outFile))
myHeight <- ifelse(plotType == "pdf" | plotType == "svg", 7, 480)
myWidth <- myHeight
imageColPalette <- colorRampPalette(c("blue", "red"))( 12 )
matrixFormat <- "domaincaller"
########################################## HARD-CODED PARAMETERS
matrixHeader <- FALSE
featureHeader <- FALSE
featureCol <- "cyan"
#### DROP THE FIRST COLUMNS OF THE MATRIX
cat(paste0("... load matrix data\t", Sys.time(), "\t"))
if(matrixFormat == "dekker") {
matrixDT <- read.delim(matrixFile, header=T, skip = 1, check.names = F)
cat(paste0(Sys.time(), "\n"))
rownames(matrixDT) <- matrixDT[,1]
matrixDT[,1] <- NULL
stopifnot(ncol(matrixDT) == nrow(matrixDT) + skipcol)
stopifnot(!is.na(colnames(matrixDT)))
stopifnot(!is.na(rownames(matrixDT)))
if(skipcol > 0)
matrixDT <- matrixDT[,-c(1:skipcol)]
stopifnot(colnames(matrixDT) == rownames(matrixDT))
stopifnot(nrow(matrixDT) == ncol(matrixDT) )
rownames(matrixDT) <- colnames(matrixDT) <- NULL
} else {
matrixDT <- read.delim(matrixFile, header=matrixHeader, stringsAsFactors = FALSE)
cat(paste0(Sys.time(), "\n"))
stopifnot(ncol(matrixDT) == nrow(matrixDT) + skipcol)
if(skipcol > 0)
matrixDT <- matrixDT[,-c(1:skipcol)]
stopifnot(nrow(matrixDT) == ncol(matrixDT) )
}
cat("... discard data don't want to plot\n")
#### PREPARE THE MATRIX - SELECT FROM THE MATRIX THE AREA WE WANT TO PLOT
if(is.null(start_matrix)) {
start_matrix <- 1
} else {
# convert the start limit in bp to bin
start_matrix <- floor(start_matrix/binSize) + 1
}
if(start_matrix > nrow(matrixDT)) {
stop("... want to start plotting after the end of the matrix!\n")
}
if(is.null(end_matrix)) {
end_matrix <- nrow(matrixDT)
} else {
end_matrix <- ceiling(end_matrix/binSize)
if(end_matrix > ncol(matrixDT)){
cat("! WARNING: wanted end position is after end of the data, will plot up to the end\n")
end_matrix <- ncol(matrixDT)
}
}
stopifnot(end_matrix >= start_matrix)
stopifnot(start_matrix > 0 & end_matrix <= ncol(matrixDT))
cat("... will draw from bin:\t", start_matrix, "\tto:\t", end_matrix , "(inclusive)\n")
matrixDT <- matrixDT[start_matrix:end_matrix, start_matrix:end_matrix]
# revert the matrix to have the plot from topleft to bottom right
drawMatrixDT <- t(matrixDT)[,nrow(matrixDT):1]
#### PREPARE THE TADs - ADJUST POSITIONS
shift_bin <- start_matrix - 1
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
totBin <- nrow(matrixDT) + 1
axLab <- seq(1.5, length.out=nrow(matrixDT))
# image(x=axLab, y=axLab, as.matrix(drawMatrixDT),
# xlab="", ylab="",
# xaxt = "n", yaxt="n")
cat("... draw the image\n")
image(x=axLab, y=axLab, as.matrix(log10(drawMatrixDT+0.001)),
xlab="", ylab="",
xaxt = "n", yaxt="n",
col = imageColPalette)
mtext(ds, side=3)
title(paste0(chromo, " - ", 1+binSize*(start_matrix-1), "(", start_matrix, "):", end_matrix*binSize, "(", end_matrix,")"))
### add starts for the genes if provided
if(!is.null(featureFile)){
cat("... add feature segments \n")
featureDT <- read.delim(featureFile, header=featureHeader, stringsAsFactors = FALSE)
if(ncol(featureDT) == 3){
colnames(featureDT) <- c("chromo", "start", "end")
labelFeature <- FALSE
} else if(ncol(featureDT) == 4){
colnames(featureDT) <- c("chromo", "start", "end", "gene")
} else{
stop("unknown format feature file\n")
}
featureDT <- featureDT[featureDT$chromo == chromo,]
if(nrow(featureDT) > 0){
for(i in 1:nrow(featureDT)) {
firstBin <- floor(featureDT$start[i]/binSize)+1 -shift_bin
lastBin <- ceiling(featureDT$end[i]/binSize) -shift_bin
stopifnot(lastBin >= firstBin)
for(feat_bin in firstBin:lastBin){
my_xpos <- (feat_bin + (feat_bin+1))*0.5
my_ypos <- (totBin-feat_bin+1 + totBin -feat_bin)*0.5
points(x=my_xpos, y=my_ypos, pch=16, cex = 1, adj=0.5, col = featureCol)
}
}
}
}
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
################################################################################################################################################
################################################################################################################################################
################################################################################################################################################
cat("*** DONE\n")
cat(paste0(startTime, "\n", Sys.time(), "\n"))
| /draw_matrix.R | no_license | marzuf/Cancer_HiC_data_TAD_DA | R | false | false | 6,720 | r | # COMPARE TO THE SAME FILE IN EZH2_final_MAPQ, HERE TAD HEADER FILE = TRUE
options(scipen=100)
startTime <- Sys.time()
suppressPackageStartupMessages(library(optparse, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
option_list = list(
make_option(c("-f", "--feature_file"), type="character", default=NULL,
help="input feature (gene) file", metavar="character"),
make_option(c("-m", "--matrix_file"), type="character", default=NULL,
help="input matrix file", metavar="character"),
make_option(c("-s", "--start_matrix"), type="integer", default=NULL,
help="draw from start_matrix (in bp !)", metavar="character"),
make_option(c("-e", "--end_matrix"), type="integer", default=NULL,
help="draw to end_matrix (in bp !)", metavar="character"),
make_option(c("-o", "--output_file"), type="character", default=NULL,
help="path to output file", metavar="character"),
make_option(c("-k", "--col_to_skip"), type="integer", default=NULL,
help="columns to skip", metavar="integer"),
make_option(c("-c", "--chromo"), type="character", default=NULL,
help="chromosome to draw", metavar="character"),
make_option(c("-b", "--bin_size"), type="integer", default=NULL,
help="binning size", metavar="integer")
);
opt_parser <- OptionParser(option_list=option_list);
opt <- parse_args(opt_parser);
if(is.null(opt$matrix_file) | is.null(opt$bin_size) |
is.null(opt$output_file) ) {
stop("Missing arguments \n")
}
chromo <- opt$chromo
featureFile <- opt$feature_file
matrixFile <- opt$matrix_file
binSize <- opt$bin_size
skipcol <- ifelse(is.null(opt$col_to_skip), 3, opt$col_to_skip)
start_matrix <- opt$start_matrix
end_matrix <- opt$end_matrix
ds = basename(matrixFile)
ds = gsub("(.+)_chr.+_.+", "\\1", ds)
outFile <- opt$output_file
system(paste0("mkdir -p ", dirname(outFile)))
stopifnot(file.exists(matrixFile))
if(!is.null(featureFile)) stopifnot(file.exists(featureFile))
system(paste0("mkdir -p ", dirname(outFile)))
plotType <- gsub(".+\\.(.+?)$", "\\1", basename(outFile))
myHeight <- ifelse(plotType == "pdf" | plotType == "svg", 7, 480)
myWidth <- myHeight
imageColPalette <- colorRampPalette(c("blue", "red"))( 12 )
matrixFormat <- "domaincaller"
########################################## HARD-CODED PARAMETERS
matrixHeader <- FALSE
featureHeader <- FALSE
featureCol <- "cyan"
#### DROP THE FIRST COLUMNS OF THE MATRIX
cat(paste0("... load matrix data\t", Sys.time(), "\t"))
if(matrixFormat == "dekker") {
matrixDT <- read.delim(matrixFile, header=T, skip = 1, check.names = F)
cat(paste0(Sys.time(), "\n"))
rownames(matrixDT) <- matrixDT[,1]
matrixDT[,1] <- NULL
stopifnot(ncol(matrixDT) == nrow(matrixDT) + skipcol)
stopifnot(!is.na(colnames(matrixDT)))
stopifnot(!is.na(rownames(matrixDT)))
if(skipcol > 0)
matrixDT <- matrixDT[,-c(1:skipcol)]
stopifnot(colnames(matrixDT) == rownames(matrixDT))
stopifnot(nrow(matrixDT) == ncol(matrixDT) )
rownames(matrixDT) <- colnames(matrixDT) <- NULL
} else {
matrixDT <- read.delim(matrixFile, header=matrixHeader, stringsAsFactors = FALSE)
cat(paste0(Sys.time(), "\n"))
stopifnot(ncol(matrixDT) == nrow(matrixDT) + skipcol)
if(skipcol > 0)
matrixDT <- matrixDT[,-c(1:skipcol)]
stopifnot(nrow(matrixDT) == ncol(matrixDT) )
}
cat("... discard data don't want to plot\n")
#### PREPARE THE MATRIX - SELECT FROM THE MATRIX THE AREA WE WANT TO PLOT
if(is.null(start_matrix)) {
start_matrix <- 1
} else {
# convert the start limit in bp to bin
start_matrix <- floor(start_matrix/binSize) + 1
}
if(start_matrix > nrow(matrixDT)) {
stop("... want to start plotting after the end of the matrix!\n")
}
if(is.null(end_matrix)) {
end_matrix <- nrow(matrixDT)
} else {
end_matrix <- ceiling(end_matrix/binSize)
if(end_matrix > ncol(matrixDT)){
cat("! WARNING: wanted end position is after end of the data, will plot up to the end\n")
end_matrix <- ncol(matrixDT)
}
}
stopifnot(end_matrix >= start_matrix)
stopifnot(start_matrix > 0 & end_matrix <= ncol(matrixDT))
cat("... will draw from bin:\t", start_matrix, "\tto:\t", end_matrix , "(inclusive)\n")
matrixDT <- matrixDT[start_matrix:end_matrix, start_matrix:end_matrix]
# revert the matrix to have the plot from topleft to bottom right
drawMatrixDT <- t(matrixDT)[,nrow(matrixDT):1]
#### PREPARE THE TADs - ADJUST POSITIONS
shift_bin <- start_matrix - 1
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
totBin <- nrow(matrixDT) + 1
axLab <- seq(1.5, length.out=nrow(matrixDT))
# image(x=axLab, y=axLab, as.matrix(drawMatrixDT),
# xlab="", ylab="",
# xaxt = "n", yaxt="n")
cat("... draw the image\n")
image(x=axLab, y=axLab, as.matrix(log10(drawMatrixDT+0.001)),
xlab="", ylab="",
xaxt = "n", yaxt="n",
col = imageColPalette)
mtext(ds, side=3)
title(paste0(chromo, " - ", 1+binSize*(start_matrix-1), "(", start_matrix, "):", end_matrix*binSize, "(", end_matrix,")"))
### add starts for the genes if provided
if(!is.null(featureFile)){
cat("... add feature segments \n")
featureDT <- read.delim(featureFile, header=featureHeader, stringsAsFactors = FALSE)
if(ncol(featureDT) == 3){
colnames(featureDT) <- c("chromo", "start", "end")
labelFeature <- FALSE
} else if(ncol(featureDT) == 4){
colnames(featureDT) <- c("chromo", "start", "end", "gene")
} else{
stop("unknown format feature file\n")
}
featureDT <- featureDT[featureDT$chromo == chromo,]
if(nrow(featureDT) > 0){
for(i in 1:nrow(featureDT)) {
firstBin <- floor(featureDT$start[i]/binSize)+1 -shift_bin
lastBin <- ceiling(featureDT$end[i]/binSize) -shift_bin
stopifnot(lastBin >= firstBin)
for(feat_bin in firstBin:lastBin){
my_xpos <- (feat_bin + (feat_bin+1))*0.5
my_ypos <- (totBin-feat_bin+1 + totBin -feat_bin)*0.5
points(x=my_xpos, y=my_ypos, pch=16, cex = 1, adj=0.5, col = featureCol)
}
}
}
}
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
################################################################################################################################################
################################################################################################################################################
################################################################################################################################################
cat("*** DONE\n")
cat(paste0(startTime, "\n", Sys.time(), "\n"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_outliers.R
\name{remove_outliers}
\alias{remove_outliers}
\title{Remove outliers}
\usage{
remove_outliers(vec, coef = 1.5)
}
\arguments{
\item{vec}{A vector of numeric values}
\item{coef}{A number specifying the maximum distance from the inter-quartile range of \code{vec} for which values in \code{vec} are not replaced with NA.}
}
\value{
A vector of numeric values of length \code{length(vec)} whith all elements identical as in \code{vec} except that outliers are replaced by NA.
}
\description{
Removes outliers based on their distance from the inter-quartile range (IQR). Excludes all points beyond \code{coef} times the IQR.
The function uses the command \code{boxplot.stats()} which uses the Tukey's method to identify the outliers ranged above and below the \code{coef*}IQR.
}
\examples{
vec <- remove_outliers( vec, coef=3 )
}
| /man/remove_outliers.Rd | no_license | stineb/ingestr | R | false | true | 926 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remove_outliers.R
\name{remove_outliers}
\alias{remove_outliers}
\title{Remove outliers}
\usage{
remove_outliers(vec, coef = 1.5)
}
\arguments{
\item{vec}{A vector of numeric values}
\item{coef}{A number specifying the maximum distance from the inter-quartile range of \code{vec} for which values in \code{vec} are not replaced with NA.}
}
\value{
A vector of numeric values of length \code{length(vec)} whith all elements identical as in \code{vec} except that outliers are replaced by NA.
}
\description{
Removes outliers based on their distance from the inter-quartile range (IQR). Excludes all points beyond \code{coef} times the IQR.
The function uses the command \code{boxplot.stats()} which uses the Tukey's method to identify the outliers ranged above and below the \code{coef*}IQR.
}
\examples{
vec <- remove_outliers( vec, coef=3 )
}
|
## ----setup, message = FALSE, warning = FALSE----------------------------------
library(secrlinear) # also loads secr
options(digits = 4) # for more readable output
inputdir <- system.file("extdata", package = "secrlinear")
## ----readarvicola, eval = TRUE------------------------------------------------
captfile <- paste0(inputdir, "/Jun84capt.txt")
trapfile <- paste0(inputdir, "/glymetrap.txt")
arvicola <- read.capthist(captfile, trapfile, covname = "sex")
## ----readglyme, eval = TRUE---------------------------------------------------
habitatmap <- paste0(inputdir, "/glymemap.txt")
glymemask <- read.linearmask(file = habitatmap, spacing = 4)
## ----plotglyme, eval = TRUE, fig.width = 7, fig.height = 3.5------------------
par(mar = c(1,1,4,1))
plot(glymemask)
plot(arvicola, add = TRUE, tracks = TRUE)
plot(traps(arvicola), add = TRUE)
## ----fit1, eval = TRUE, warning = FALSE---------------------------------------
# 2-D habitat, Euclidean distance
fit2DEuc <- secr.fit(arvicola, buffer = 200, trace = FALSE)
# 1-D habitat, Euclidean distance
fit1DEuc <- secr.fit(arvicola, mask = glymemask, trace = FALSE)
# 1-D habitat, river distance
fit1DNet <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
details = list(userdist = networkdistance))
## ----predict, eval = TRUE-----------------------------------------------------
predict(fit2DEuc)
predict(fit1DEuc)
predict(fit1DNet)
## ----silvermask, eval = TRUE--------------------------------------------------
habitatmap <- paste0(inputdir, "/silverstream.shp")
silverstreammask <- read.linearmask(file = habitatmap, spacing = 50)
par(mar = c(1,1,1,1))
plot(silverstreammask)
## ----networklength, eval = TRUE-----------------------------------------------
sldf <- attr(silverstreammask, "SLDF")
networklength <- sum(sp::SpatialLinesLengths(sldf)) / 1000 # km
discrepancy <- networklength - masklength(silverstreammask) # km
## ----silvermask2, eval = FALSE------------------------------------------------
# habitatmap <- paste0(inputdir, "/silverstream.shp")
# silverstreamsf <- st_read(habitatmap)
# silverstreamSLDF <- as(silverstreamsf, 'Spatial')
# silverstreammask <- read.linearmask(data = silverstreamSLDF, spacing = 50)
## ----dataframemask, eval=TRUE-------------------------------------------------
x <- seq(0, 4*pi, length = 200)
xy <- data.frame(x = x*100, y = sin(x)*300)
linmask <- read.linearmask(data = xy, spacing = 20)
## ----plotlinmask, eval = TRUE-------------------------------------------------
plot(linmask)
## ----showpath, eval = FALSE---------------------------------------------------
# # start interactive session and click on two points
# showpath(silverstreammask, lwd = 3)
## ----makeline, eval = TRUE----------------------------------------------------
trps <- make.line(linmask, detector = "proximity", n = 40, startbuffer = 0, by = 300,
endbuffer = 80, cluster = c(0,40,80), type = 'randomstart')
## ----plotline, eval = TRUE, fig.width = 7, fig.height = 3.5-------------------
plot(linmask)
plot(trps, add = TRUE, detpar = list(pch = 16, cex = 1.2, col='red'))
## ----snappoints, eval = FALSE-------------------------------------------------
# plot(silverstreammask)
# loc <- locator(30)
# xy <- snapPointsToLinearMask(data.frame(loc), silverstreammask)
# tr <- read.traps(data = xy, detector = 'multi')
# plot(tr, add = TRUE)
## ----transect, eval = FALSE---------------------------------------------------
# transects <- read.traps('transectxy.txt', detector = 'transect')
# capt <- read.table('capt.txt')
# tempCH <- make.capthist(capt, transects, fmt = 'XY')
# tempCH <- snip(tempCH, by = 100) # for 100-m segments
# CH <- reduce(tempCH, outputdetector = "count")
## ----silvertrps, eval = TRUE, echo = FALSE------------------------------------
trapfile <- paste0(inputdir, "/silverstreamtraps.txt")
tr <- read.traps(trapfile, detector = "multi")
## ----simCH, eval = TRUE, cache = TRUE-----------------------------------------
# simulate population of 2 animals / km
pop <- sim.linearpopn(mask = silverstreammask, D = 2)
# simulate detections using network distances
CH <- sim.capthist(traps = tr, popn = pop, noccasions = 4,
detectpar = list(g0 = 0.25, sigma = 500),
userdist = networkdistance)
summary(CH) # detector spacing uses Euclidean distances
## ----plotsim, eval=TRUE-------------------------------------------------------
# and plot the simulated detections...
par(mar = c(1,1,1,1))
plot(silverstreammask)
plot(CH, add = TRUE, tracks = TRUE, varycol = TRUE, rad = 100, cappar = list(cex = 2))
plot(tr, add = TRUE)
## ----sfit, eval = FALSE-------------------------------------------------------
# userd <- networkdistance(tr, silverstreammask)
# userd[!is.finite(userd)] <- 1e8 # testing
# sfit <- secr.fit(CH, mask = silverstreammask, details = list(userdist = userd))
# predict(sfit)
## ----regionN, eval = TRUE-----------------------------------------------------
region.N(fit2DEuc)
region.N(fit1DNet)
## ----plotregion, eval = TRUE, fig.width = 6.5, fig.height=3-------------------
par(mfrow = c(1,2), mar = c(1,1,1,1))
plot(fit2DEuc$mask)
plot(traps(arvicola), add = TRUE)
mtext(side = 3,line = -1.8, "fit2DEuc$mask", cex = 0.9)
plot(fit1DNet$mask)
plot(traps(arvicola), add = TRUE)
mtext(side = 3,line = -1.8,"fit1DNet$mask", cex = 0.9)
## ----derived, eval = TRUE-----------------------------------------------------
derived(fit2DEuc)
derived(fit1DNet)
## ----covariates, eval = FALSE-------------------------------------------------
# # interactively obtain LineID for central 'spine' by clicking on
# # each component line in plot
# tmp <- getLineID(silverstreammask)
# # extract coordinates of 'spine'
# spine <- subset(silverstreammask, LineID = tmp$LineID)
# # obtain network distances to spine and save for later use
# netd <- networkdistance(spine, silverstreammask) # matrix dim = c(nrow(spine), nrow(mask))
# dfs <- apply(netd, 2, min) / 1000 # km
# covariates(silverstreammask)$dist.from.spine <- dfs
## ----plotcovariate, eval = FALSE----------------------------------------------
# par(mar=c(1,1,1,4))
# plot(silverstreammask, covariate = 'dist.from.spine', col = topo.colors(13),
# cex = 1.5, legend = FALSE)
# strip.legend('right', legend = seq(0, 6.5, 0.5), col = topo.colors(13),
# title = 'dist.from.spine km', height = 0.35)
# plot(spine, add = TRUE, linecol = NA, cex = 0.3)
## ----checkmoves, eval = FALSE, strip.white = TRUE-----------------------------
# # initially OK (no movement > 1000 m)--
# checkmoves(arvicola, mask = glymemask, accept = c(0,1000))
# # deliberately break graph of linear mask
# attr(glymemask, 'graph')[200:203,201:204] <- NULL
# # no longer OK --
# out <- checkmoves(arvicola, mask = glymemask, accept = c(0,1000))
# # display captures of animals 32 and 35 whose records span break
# out$df
## ----showedges, eval = FALSE--------------------------------------------------
# # problem shows up where voles recaptured either side of break:
# showedges(glymemask, col = 'red', lwd = 6)
# plot(out$CH, add = TRUE, tracks = TRUE, rad=8,cappar=list(cex=1.5))
# pos <- traps(arvicola)['560.B',]
# text(pos$x+5, pos$y+80, 'break', srt=90, cex=1.1)
## ----plotglymeedges, eval = FALSE---------------------------------------------
# plot(glymemask)
# replot(glymemask) # click on corners to zoom in
# showedges(glymemask, col = 'red', lwd = 2, add=T)
# glymemask <- addedges(glymemask)
## ----linearHR, eval = FALSE---------------------------------------------------
# par(mfrow = c(1,1), mar = c(1,1,1,5))
# plot(silverstreammask)
# centres <- data.frame(locator(4))
# OK <- networkdistance(centres, silverstreammask) < 1000
# for (i in 1:nrow(OK)) {
# m1 <- subset(silverstreammask, OK[i,])
# plot(m1, add = TRUE, col = 'red', cex = 1.7)
# ml <- masklength(m1)
# points(centres, pch = 16, col = 'yellow', cex = 1.4)
# text (1406000, mean(m1$y), paste(ml, 'km'), cex = 1.2)
# }
#
## ----secrdesign, eval = TRUE, warning = FALSE---------------------------------
library(secrdesign)
# create a habitat geometry
x <- seq(0, 4*pi, length = 200)
xy <- data.frame(x = x*100, y = sin(x)*300)
linmask <- read.linearmask(data = xy, spacing = 5)
# define two possible detector layouts
trp1 <- make.line(linmask, detector = "proximity", n = 80, start = 200, by = 30)
trp2 <- make.line(linmask, detector = "proximity", n = 40, start = 200, by = 60)
trplist <- list(spacing30 = trp1, spacing60 = trp2)
# create a scenarios dataframe
scen1 <- make.scenarios(D = c(50,200), trapsindex = 1:2, sigma = 25, g0 = 0.2)
# we specify the mask, rather than construct it 'on the fly',
# we will use a non-Euclidean distance function for both
# simulating detections and fitting the model...
det.arg <- list(userdist = networkdistance)
fit.arg <- list(details = list(userdist = networkdistance))
# run the scenarios and summarise results
sims1 <- run.scenarios(nrepl = 50, trapset = trplist, maskset = linmask,
det.args = list(det.arg), fit.args = list(fit.arg),
scenarios = scen1, seed = 345, fit = FALSE)
summary(sims1)
## ----sims2, eval = FALSE------------------------------------------------------
# sims2 <- run.scenarios(nrepl = 5, trapset = trplist, maskset = linmask,
# det.args = list(det.arg), scenarios = scen1, seed = 345, fit = TRUE)
# summary(sims2)
## ----appendix, eval = FALSE---------------------------------------------------
# # It is efficient to pre-compute a matrix of distances between traps (rows)
# # and mask points (columns)
# distmat <- networkdistance (traps(arvicola), glymemask, glymemask)
#
# # Morning and evening trap checks as a time covariate
# tcov <- data.frame(ampm = rep(c("am","pm"),3))
#
# glymefit1 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = g0~1, hcov = "sex")
# glymefit2 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = g0~ampm, timecov = tcov, hcov = "sex")
# glymefit3 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = g0~ampm + h2, timecov = tcov, hcov = "sex")
# glymefit4 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = list(sigma~h2, g0~ampm + h2),
# timecov = tcov, hcov = "sex")
#
# fitlist <- secrlist(glymefit1, glymefit2, glymefit3, glymefit4)
# # dropping the detectfn (halfnormal) column to save space...
# AIC(fitlist)[,-2]
# # model npar logLik AIC AICc dAICc AICcwt
# # glymefit4 D~1 g0~ampm + h2 sigma~h2 pmix~h2 7 -322.5 659.1 665.3 0.00 1
# # glymefit3 D~1 g0~ampm + h2 sigma~1 pmix~h2 6 -347.3 706.7 711.1 45.80 0
# # glymefit2 D~1 g0~ampm sigma~1 pmix~h2 5 -353.5 717.0 720.0 54.66 0
# # glymefit1 D~1 g0~1 sigma~1 pmix~h2 4 -356.8 721.6 723.5 58.20 0
#
# # summaries of estimated density and sex ratio under different models
# options(digits=3)
#
# # model does not affect density estimate
# collate(fitlist, perm = c(2,3,1,4))[,,1,"D"]
# # estimate SE.estimate lcl ucl
# # glymefit1 26.5 5.27 18.0 39.0
# # glymefit2 26.4 5.26 18.0 38.9
# # glymefit3 26.3 5.25 17.9 38.8
# # glymefit4 27.2 5.45 18.5 40.2
#
# # model does affect the estimate of sex ratio (here proportion female)
# collate(fitlist, perm=c(2,3,1,4))[,,1,"pmix"]
# # estimate SE.estimate lcl ucl
# # glymefit1 0.615 0.0954 0.421 0.779
# # glymefit2 0.615 0.0954 0.421 0.779
# # glymefit3 0.634 0.0938 0.439 0.793
# # glymefit4 0.669 0.0897 0.477 0.817
#
# # predictions from best model
# newdata <- expand.grid(ampm = c("am", "pm"), h2 = c("F", "M"))
# predict(glymefit4, newdata = newdata)
#
# # $`ampm = am, h2 = F`
# # link estimate SE.estimate lcl ucl
# # D log 27.239 5.4478 18.477 40.158
# # g0 logit 0.218 0.0463 0.141 0.322
# # sigma log 13.624 1.8764 10.414 17.823
# # pmix logit 0.669 0.0897 0.477 0.817
# #
# # $`ampm = pm, h2 = F`
# # link estimate SE.estimate lcl ucl
# # D log 27.239 5.4478 18.4768 40.158
# # g0 logit 0.116 0.0293 0.0694 0.186
# # sigma log 13.624 1.8764 10.4136 17.823
# # pmix logit 0.669 0.0897 0.4774 0.817
# #
# # $`ampm = am, h2 = M`
# # link estimate SE.estimate lcl ucl
# # D log 27.239 5.4478 18.4768 40.158
# # g0 logit 0.153 0.0392 0.0908 0.246
# # sigma log 70.958 10.0551 53.8247 93.545
# # pmix logit 0.331 0.0897 0.1829 0.523
# #
# # $`ampm = pm, h2 = M`
# # link estimate SE.estimate lcl ucl
# # D log 27.2394 5.4478 18.4768 40.158
# # g0 logit 0.0782 0.0201 0.0468 0.128
# # sigma log 70.9581 10.0551 53.8247 93.545
# # pmix logit 0.3311 0.0897 0.1829 0.523
## ----derivedapp, eval = FALSE-------------------------------------------------
# derived(glymefit4, distribution = 'binomial')
# # estimate SE.estimate lcl ucl CVn CVa CVD
# # esa 0.9545 NA NA NA NA NA NA
# # D 27.2396 2.867 22.17 33.46 0.1038 0.01747 0.1053
| /inst/doc/secrlinear-vignette.R | no_license | cran/secrlinear | R | false | false | 13,996 | r | ## ----setup, message = FALSE, warning = FALSE----------------------------------
library(secrlinear) # also loads secr
options(digits = 4) # for more readable output
inputdir <- system.file("extdata", package = "secrlinear")
## ----readarvicola, eval = TRUE------------------------------------------------
captfile <- paste0(inputdir, "/Jun84capt.txt")
trapfile <- paste0(inputdir, "/glymetrap.txt")
arvicola <- read.capthist(captfile, trapfile, covname = "sex")
## ----readglyme, eval = TRUE---------------------------------------------------
habitatmap <- paste0(inputdir, "/glymemap.txt")
glymemask <- read.linearmask(file = habitatmap, spacing = 4)
## ----plotglyme, eval = TRUE, fig.width = 7, fig.height = 3.5------------------
par(mar = c(1,1,4,1))
plot(glymemask)
plot(arvicola, add = TRUE, tracks = TRUE)
plot(traps(arvicola), add = TRUE)
## ----fit1, eval = TRUE, warning = FALSE---------------------------------------
# 2-D habitat, Euclidean distance
fit2DEuc <- secr.fit(arvicola, buffer = 200, trace = FALSE)
# 1-D habitat, Euclidean distance
fit1DEuc <- secr.fit(arvicola, mask = glymemask, trace = FALSE)
# 1-D habitat, river distance
fit1DNet <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
details = list(userdist = networkdistance))
## ----predict, eval = TRUE-----------------------------------------------------
predict(fit2DEuc)
predict(fit1DEuc)
predict(fit1DNet)
## ----silvermask, eval = TRUE--------------------------------------------------
habitatmap <- paste0(inputdir, "/silverstream.shp")
silverstreammask <- read.linearmask(file = habitatmap, spacing = 50)
par(mar = c(1,1,1,1))
plot(silverstreammask)
## ----networklength, eval = TRUE-----------------------------------------------
sldf <- attr(silverstreammask, "SLDF")
networklength <- sum(sp::SpatialLinesLengths(sldf)) / 1000 # km
discrepancy <- networklength - masklength(silverstreammask) # km
## ----silvermask2, eval = FALSE------------------------------------------------
# habitatmap <- paste0(inputdir, "/silverstream.shp")
# silverstreamsf <- st_read(habitatmap)
# silverstreamSLDF <- as(silverstreamsf, 'Spatial')
# silverstreammask <- read.linearmask(data = silverstreamSLDF, spacing = 50)
## ----dataframemask, eval=TRUE-------------------------------------------------
x <- seq(0, 4*pi, length = 200)
xy <- data.frame(x = x*100, y = sin(x)*300)
linmask <- read.linearmask(data = xy, spacing = 20)
## ----plotlinmask, eval = TRUE-------------------------------------------------
plot(linmask)
## ----showpath, eval = FALSE---------------------------------------------------
# # start interactive session and click on two points
# showpath(silverstreammask, lwd = 3)
## ----makeline, eval = TRUE----------------------------------------------------
trps <- make.line(linmask, detector = "proximity", n = 40, startbuffer = 0, by = 300,
endbuffer = 80, cluster = c(0,40,80), type = 'randomstart')
## ----plotline, eval = TRUE, fig.width = 7, fig.height = 3.5-------------------
plot(linmask)
plot(trps, add = TRUE, detpar = list(pch = 16, cex = 1.2, col='red'))
## ----snappoints, eval = FALSE-------------------------------------------------
# plot(silverstreammask)
# loc <- locator(30)
# xy <- snapPointsToLinearMask(data.frame(loc), silverstreammask)
# tr <- read.traps(data = xy, detector = 'multi')
# plot(tr, add = TRUE)
## ----transect, eval = FALSE---------------------------------------------------
# transects <- read.traps('transectxy.txt', detector = 'transect')
# capt <- read.table('capt.txt')
# tempCH <- make.capthist(capt, transects, fmt = 'XY')
# tempCH <- snip(tempCH, by = 100) # for 100-m segments
# CH <- reduce(tempCH, outputdetector = "count")
## ----silvertrps, eval = TRUE, echo = FALSE------------------------------------
trapfile <- paste0(inputdir, "/silverstreamtraps.txt")
tr <- read.traps(trapfile, detector = "multi")
## ----simCH, eval = TRUE, cache = TRUE-----------------------------------------
# simulate population of 2 animals / km
pop <- sim.linearpopn(mask = silverstreammask, D = 2)
# simulate detections using network distances
CH <- sim.capthist(traps = tr, popn = pop, noccasions = 4,
detectpar = list(g0 = 0.25, sigma = 500),
userdist = networkdistance)
summary(CH) # detector spacing uses Euclidean distances
## ----plotsim, eval=TRUE-------------------------------------------------------
# and plot the simulated detections...
par(mar = c(1,1,1,1))
plot(silverstreammask)
plot(CH, add = TRUE, tracks = TRUE, varycol = TRUE, rad = 100, cappar = list(cex = 2))
plot(tr, add = TRUE)
## ----sfit, eval = FALSE-------------------------------------------------------
# userd <- networkdistance(tr, silverstreammask)
# userd[!is.finite(userd)] <- 1e8 # testing
# sfit <- secr.fit(CH, mask = silverstreammask, details = list(userdist = userd))
# predict(sfit)
## ----regionN, eval = TRUE-----------------------------------------------------
region.N(fit2DEuc)
region.N(fit1DNet)
## ----plotregion, eval = TRUE, fig.width = 6.5, fig.height=3-------------------
par(mfrow = c(1,2), mar = c(1,1,1,1))
plot(fit2DEuc$mask)
plot(traps(arvicola), add = TRUE)
mtext(side = 3,line = -1.8, "fit2DEuc$mask", cex = 0.9)
plot(fit1DNet$mask)
plot(traps(arvicola), add = TRUE)
mtext(side = 3,line = -1.8,"fit1DNet$mask", cex = 0.9)
## ----derived, eval = TRUE-----------------------------------------------------
derived(fit2DEuc)
derived(fit1DNet)
## ----covariates, eval = FALSE-------------------------------------------------
# # interactively obtain LineID for central 'spine' by clicking on
# # each component line in plot
# tmp <- getLineID(silverstreammask)
# # extract coordinates of 'spine'
# spine <- subset(silverstreammask, LineID = tmp$LineID)
# # obtain network distances to spine and save for later use
# netd <- networkdistance(spine, silverstreammask) # matrix dim = c(nrow(spine), nrow(mask))
# dfs <- apply(netd, 2, min) / 1000 # km
# covariates(silverstreammask)$dist.from.spine <- dfs
## ----plotcovariate, eval = FALSE----------------------------------------------
# par(mar=c(1,1,1,4))
# plot(silverstreammask, covariate = 'dist.from.spine', col = topo.colors(13),
# cex = 1.5, legend = FALSE)
# strip.legend('right', legend = seq(0, 6.5, 0.5), col = topo.colors(13),
# title = 'dist.from.spine km', height = 0.35)
# plot(spine, add = TRUE, linecol = NA, cex = 0.3)
## ----checkmoves, eval = FALSE, strip.white = TRUE-----------------------------
# # initially OK (no movement > 1000 m)--
# checkmoves(arvicola, mask = glymemask, accept = c(0,1000))
# # deliberately break graph of linear mask
# attr(glymemask, 'graph')[200:203,201:204] <- NULL
# # no longer OK --
# out <- checkmoves(arvicola, mask = glymemask, accept = c(0,1000))
# # display captures of animals 32 and 35 whose records span break
# out$df
## ----showedges, eval = FALSE--------------------------------------------------
# # problem shows up where voles recaptured either side of break:
# showedges(glymemask, col = 'red', lwd = 6)
# plot(out$CH, add = TRUE, tracks = TRUE, rad=8,cappar=list(cex=1.5))
# pos <- traps(arvicola)['560.B',]
# text(pos$x+5, pos$y+80, 'break', srt=90, cex=1.1)
## ----plotglymeedges, eval = FALSE---------------------------------------------
# plot(glymemask)
# replot(glymemask) # click on corners to zoom in
# showedges(glymemask, col = 'red', lwd = 2, add=T)
# glymemask <- addedges(glymemask)
## ----linearHR, eval = FALSE---------------------------------------------------
# par(mfrow = c(1,1), mar = c(1,1,1,5))
# plot(silverstreammask)
# centres <- data.frame(locator(4))
# OK <- networkdistance(centres, silverstreammask) < 1000
# for (i in 1:nrow(OK)) {
# m1 <- subset(silverstreammask, OK[i,])
# plot(m1, add = TRUE, col = 'red', cex = 1.7)
# ml <- masklength(m1)
# points(centres, pch = 16, col = 'yellow', cex = 1.4)
# text (1406000, mean(m1$y), paste(ml, 'km'), cex = 1.2)
# }
#
## ----secrdesign, eval = TRUE, warning = FALSE---------------------------------
library(secrdesign)
# create a habitat geometry
x <- seq(0, 4*pi, length = 200)
xy <- data.frame(x = x*100, y = sin(x)*300)
linmask <- read.linearmask(data = xy, spacing = 5)
# define two possible detector layouts
trp1 <- make.line(linmask, detector = "proximity", n = 80, start = 200, by = 30)
trp2 <- make.line(linmask, detector = "proximity", n = 40, start = 200, by = 60)
trplist <- list(spacing30 = trp1, spacing60 = trp2)
# create a scenarios dataframe
scen1 <- make.scenarios(D = c(50,200), trapsindex = 1:2, sigma = 25, g0 = 0.2)
# we specify the mask, rather than construct it 'on the fly',
# we will use a non-Euclidean distance function for both
# simulating detections and fitting the model...
det.arg <- list(userdist = networkdistance)
fit.arg <- list(details = list(userdist = networkdistance))
# run the scenarios and summarise results
sims1 <- run.scenarios(nrepl = 50, trapset = trplist, maskset = linmask,
det.args = list(det.arg), fit.args = list(fit.arg),
scenarios = scen1, seed = 345, fit = FALSE)
summary(sims1)
## ----sims2, eval = FALSE------------------------------------------------------
# sims2 <- run.scenarios(nrepl = 5, trapset = trplist, maskset = linmask,
# det.args = list(det.arg), scenarios = scen1, seed = 345, fit = TRUE)
# summary(sims2)
## ----appendix, eval = FALSE---------------------------------------------------
# # It is efficient to pre-compute a matrix of distances between traps (rows)
# # and mask points (columns)
# distmat <- networkdistance (traps(arvicola), glymemask, glymemask)
#
# # Morning and evening trap checks as a time covariate
# tcov <- data.frame(ampm = rep(c("am","pm"),3))
#
# glymefit1 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = g0~1, hcov = "sex")
# glymefit2 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = g0~ampm, timecov = tcov, hcov = "sex")
# glymefit3 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = g0~ampm + h2, timecov = tcov, hcov = "sex")
# glymefit4 <- secr.fit(arvicola, mask = glymemask, trace = FALSE,
# details = list(userdist = distmat),
# model = list(sigma~h2, g0~ampm + h2),
# timecov = tcov, hcov = "sex")
#
# fitlist <- secrlist(glymefit1, glymefit2, glymefit3, glymefit4)
# # dropping the detectfn (halfnormal) column to save space...
# AIC(fitlist)[,-2]
# # model npar logLik AIC AICc dAICc AICcwt
# # glymefit4 D~1 g0~ampm + h2 sigma~h2 pmix~h2 7 -322.5 659.1 665.3 0.00 1
# # glymefit3 D~1 g0~ampm + h2 sigma~1 pmix~h2 6 -347.3 706.7 711.1 45.80 0
# # glymefit2 D~1 g0~ampm sigma~1 pmix~h2 5 -353.5 717.0 720.0 54.66 0
# # glymefit1 D~1 g0~1 sigma~1 pmix~h2 4 -356.8 721.6 723.5 58.20 0
#
# # summaries of estimated density and sex ratio under different models
# options(digits=3)
#
# # model does not affect density estimate
# collate(fitlist, perm = c(2,3,1,4))[,,1,"D"]
# # estimate SE.estimate lcl ucl
# # glymefit1 26.5 5.27 18.0 39.0
# # glymefit2 26.4 5.26 18.0 38.9
# # glymefit3 26.3 5.25 17.9 38.8
# # glymefit4 27.2 5.45 18.5 40.2
#
# # model does affect the estimate of sex ratio (here proportion female)
# collate(fitlist, perm=c(2,3,1,4))[,,1,"pmix"]
# # estimate SE.estimate lcl ucl
# # glymefit1 0.615 0.0954 0.421 0.779
# # glymefit2 0.615 0.0954 0.421 0.779
# # glymefit3 0.634 0.0938 0.439 0.793
# # glymefit4 0.669 0.0897 0.477 0.817
#
# # predictions from best model
# newdata <- expand.grid(ampm = c("am", "pm"), h2 = c("F", "M"))
# predict(glymefit4, newdata = newdata)
#
# # $`ampm = am, h2 = F`
# # link estimate SE.estimate lcl ucl
# # D log 27.239 5.4478 18.477 40.158
# # g0 logit 0.218 0.0463 0.141 0.322
# # sigma log 13.624 1.8764 10.414 17.823
# # pmix logit 0.669 0.0897 0.477 0.817
# #
# # $`ampm = pm, h2 = F`
# # link estimate SE.estimate lcl ucl
# # D log 27.239 5.4478 18.4768 40.158
# # g0 logit 0.116 0.0293 0.0694 0.186
# # sigma log 13.624 1.8764 10.4136 17.823
# # pmix logit 0.669 0.0897 0.4774 0.817
# #
# # $`ampm = am, h2 = M`
# # link estimate SE.estimate lcl ucl
# # D log 27.239 5.4478 18.4768 40.158
# # g0 logit 0.153 0.0392 0.0908 0.246
# # sigma log 70.958 10.0551 53.8247 93.545
# # pmix logit 0.331 0.0897 0.1829 0.523
# #
# # $`ampm = pm, h2 = M`
# # link estimate SE.estimate lcl ucl
# # D log 27.2394 5.4478 18.4768 40.158
# # g0 logit 0.0782 0.0201 0.0468 0.128
# # sigma log 70.9581 10.0551 53.8247 93.545
# # pmix logit 0.3311 0.0897 0.1829 0.523
## ----derivedapp, eval = FALSE-------------------------------------------------
# derived(glymefit4, distribution = 'binomial')
# # estimate SE.estimate lcl ucl CVn CVa CVD
# # esa 0.9545 NA NA NA NA NA NA
# # D 27.2396 2.867 22.17 33.46 0.1038 0.01747 0.1053
|
#' Helper function for detecting values out of the environmental range of M
#'
#' @description plot.out detects which environmental values in an area of projection are
#' out of the range of environmental values in the area where ecological niche models are
#' calibrated. This function is designed to be used specifically in the \code{\link{kuenm_mop}} function.
#'
#' @param M1 a numeric matrix containing values of all environmental variables in the calibration area.
#' @param G1 a numeric matrix containing values of all environmental variables in the full area of interest.
#'
#' @return A vector of environmental values in a projection area that are outside the range of values
#' in the calibration area of an ecological niche model.
#'
#' @export
plot_out <- function (M1, G1) {
if(class(M1)[1] %in% c("RasterBrick", "RasterLayer", "RasterStack")){
M1 <- raster::values(M1)
}
if(class(G1)[1] %in% c("RasterBrick", "RasterLayer", "RasterStack")){
G1 <- raster::values(G1)
}
d1 <- dim(M1)
AllVec <- vector()
for (i in 1:d1[2]) {
MRange <- range(M1[, i])
l1 <- which(G1[, i] < range(M1[, i], na.rm = T)[1] | G1[, i] > range(M1[, i], na.rm = T)[2])
AllVec <- c(l1, AllVec)
}
AllVec <- unique(AllVec)
return(AllVec)
}
| /R/plot_out.R | no_license | marlonecobos/kuenm | R | false | false | 1,268 | r | #' Helper function for detecting values out of the environmental range of M
#'
#' @description plot.out detects which environmental values in an area of projection are
#' out of the range of environmental values in the area where ecological niche models are
#' calibrated. This function is designed to be used specifically in the \code{\link{kuenm_mop}} function.
#'
#' @param M1 a numeric matrix containing values of all environmental variables in the calibration area.
#' @param G1 a numeric matrix containing values of all environmental variables in the full area of interest.
#'
#' @return A vector of environmental values in a projection area that are outside the range of values
#' in the calibration area of an ecological niche model.
#'
#' @export
plot_out <- function (M1, G1) {
if(class(M1)[1] %in% c("RasterBrick", "RasterLayer", "RasterStack")){
M1 <- raster::values(M1)
}
if(class(G1)[1] %in% c("RasterBrick", "RasterLayer", "RasterStack")){
G1 <- raster::values(G1)
}
d1 <- dim(M1)
AllVec <- vector()
for (i in 1:d1[2]) {
MRange <- range(M1[, i])
l1 <- which(G1[, i] < range(M1[, i], na.rm = T)[1] | G1[, i] > range(M1[, i], na.rm = T)[2])
AllVec <- c(l1, AllVec)
}
AllVec <- unique(AllVec)
return(AllVec)
}
|
library(dplyr)
# Map 1-based optional input ports to variables
appearances <- maml.mapInputPort(1) # class: data.frame
# Contents of optional Zip port are in ./src/
# source("src/yourfile.R");
# load("src/yourData.rdata");
g = group_by(appearances, playerID, yearID)
app_grp = dplyr::summarise(g, G_p=sum(G_p),G_c=sum(G_c),G_1b=sum(G_1b),G_2b=sum(G_2b),G_3b=sum(G_3b),G_ss=sum(G_ss),G_lf=sum(G_lf),G_cf=sum(G_cf),G_rf=sum(G_rf),G_of=sum(G_of))
gpbypos = select(app_grp,playerID,G_p,G_c,G_1b,G_2b,G_3b,G_ss,G_lf,G_cf,G_rf,G_of)
pos = data.frame(position=colnames(gpbypos[,-1])[max.col(as.matrix(gpbypos[,-1]), ties.method = 'first')])
pos$position[pos$position %in% c("G_lf", "G_rf", "G_cf")] = "G_of"
pos$position = as.factor(pos$position)
pos = droplevels(pos)
app_grp$position = pos$position
# Select data.frame to be sent to the output Dataset port
maml.mapOutputPort("appearances"); | /snippet2.R | no_license | mikeydavison/bbasgdemo | R | false | false | 892 | r |
library(dplyr)
# Map 1-based optional input ports to variables
appearances <- maml.mapInputPort(1) # class: data.frame
# Contents of optional Zip port are in ./src/
# source("src/yourfile.R");
# load("src/yourData.rdata");
g = group_by(appearances, playerID, yearID)
app_grp = dplyr::summarise(g, G_p=sum(G_p),G_c=sum(G_c),G_1b=sum(G_1b),G_2b=sum(G_2b),G_3b=sum(G_3b),G_ss=sum(G_ss),G_lf=sum(G_lf),G_cf=sum(G_cf),G_rf=sum(G_rf),G_of=sum(G_of))
gpbypos = select(app_grp,playerID,G_p,G_c,G_1b,G_2b,G_3b,G_ss,G_lf,G_cf,G_rf,G_of)
pos = data.frame(position=colnames(gpbypos[,-1])[max.col(as.matrix(gpbypos[,-1]), ties.method = 'first')])
pos$position[pos$position %in% c("G_lf", "G_rf", "G_cf")] = "G_of"
pos$position = as.factor(pos$position)
pos = droplevels(pos)
app_grp$position = pos$position
# Select data.frame to be sent to the output Dataset port
maml.mapOutputPort("appearances"); |
#----Libraries----
if(T){
library(rgdal)
library(proj4)
library(sp)
library(raster)
library(dplyr)
library(RColorBrewer)
library(classInt)
library(mgcv)
library(gamm4)
library(lme4)
library(predictmeans)
library(ggplot2)
}
#----Importing PFW data----
if(F){
raw.pfw = read.csv("C:/Users/itloaner/Desktop/WNV/UpdatedData/PFW_amecro_zerofill.csv") # BLJA and AMCR the same?
}
#----Converting to duplicate data----
if(T){
rawData = raw.pfw
}
#----Formatting effort----
if(T){
rawData[is.na(rawData)] <- 0 # Makes all the NA cells be filled with 0
rawData$effortDaysNumerical = rawData$DAY1_AM + rawData$DAY1_PM + rawData$DAY2_AM + rawData$DAY2_PM # Summing half days
# Assiging Effort Hours to categorical levels ###
idx<- rawData$EFFORT_HRS_ATLEAST == "0"
rawData$obshrs[idx] <- "x"
idx<- rawData$EFFORT_HRS_ATLEAST == "0.001"
rawData$obshrs[idx] <- "D"
idx<- rawData$EFFORT_HRS_ATLEAST == "1"
rawData$obshrs[idx] <- "C"
idx<- rawData$EFFORT_HRS_ATLEAST == "4"
rawData$obshrs[idx] <- "B"
idx<- rawData$EFFORT_HRS_ATLEAST == "4.001"
rawData$obshrs[idx] <- "B"
idx<- rawData$EFFORT_HRS_ATLEAST == "8.001"
rawData$obshrs[idx] <- "A"
}
#----Relevant data only----
if(T){
locID = rawData$LOC_ID
yr = rawData$FW_Year
maxFlock = rawData$nseen
lat = rawData$LATITUDE
long = rawData$LONGITUDE
effortHours = rawData$obshrs # empty = x, 0.001 = D, 1 = C, 4 = B, 8+ = A
effortDays = rawData$effortDaysNumerical
state = rawData$StatProv
#Final product:
dfEffHD = data.frame(locID, yr, maxFlock, lat, long, state, effortDays, effortHours) # just the necessary data
}
#----Removing observations pre-1995----
if(T){
dfR91 = dfEffHD[dfEffHD$yr != 'PFW_1991',]
dfR92 = dfR91[dfR91$yr != 'PFW_1992',]
dfR93 = dfR92[dfR92$yr != 'PFW_1993',]
dfR94 = dfR93[dfR93$yr != 'PFW_1994',]
dfR95 = dfR94[dfR94$yr != 'PFW_1995',]
dfR95 = droplevels(dfR95) # Dropping unused levels
}
#----Removing empty effort and high counts----
if(T){
dfRhigh = subset(dfR95, maxFlock < 49) #exclude high counts (over 50 birds)
dfEffRx = subset(dfRhigh, effortHours != "x") # excluding blank data
dfDaysR0 = subset(dfEffRx, effortDays != 0) # excluding blank data
}
#----Caclulate percent removed by exluding high counts----
if(F){
100*(1-(length(dfRhigh$maxFlock))/(length(dfR95$maxFlock)))
}
#----Selecting power users----
if(T){
###Cleaning up the data with conditionals###
# 1) Only include LocIDs active for at least 3 years ###
# 2) Only include LocIDs with at least 10 checklists during those years ###
yearly_list_count = table(dfDaysR0$locID, dfDaysR0$yr) # creates a table showing number of observations at each location each year.
row_sums = rowSums(yearly_list_count >= 10) # rows where there are at least 10 observations
threeyears = which(row_sums >=3) # for rows with 10 obs over at least 3 years
newIDs = names(threeyears) # just setting a new variable name
#Final product:
dfPwrUsrs = dfDaysR0[dfDaysR0$locID %in% newIDs,]
}
#----Renaming main dataframe----
if(T){
df4Uniq = data.frame(dfPwrUsrs$locID, dfPwrUsrs$yr, dfPwrUsrs$lat, dfPwrUsrs$long, dfPwrUsrs$effortDays, dfPwrUsrs$effortHours, dfPwrUsrs$maxFlock) #this makes a new df so I can effectively use unique()
pfw = unique(df4Uniq)
names(pfw)[1] = 'locID'
names(pfw)[2] = 'yr'
names(pfw)[3] = 'lat'
names(pfw)[4] = 'long'
names(pfw)[5] = 'effortDays'
names(pfw)[6] = 'effortHours'
names(pfw)[7] = 'maxFlock'
}
#----Duplicate dataframe to conserve data, remove later----
if(T){
toSPDF = pfw
}
#----Spatially formatting main dataframe----
if(T){
# Tom Auer CRS" +init=epsg:4326
xy <- toSPDF[,c(3,4)]
SPDF <- SpatialPointsDataFrame(coords = toSPDF[,c("long", "lat")], data = toSPDF,
proj4string = CRS("+init=epsg:4326"))
}
#----Importing BCR shape file, no state borders----
if(T){
shp = shapefile("C:/Users/itloaner/Desktop/BCR/BCR_Terrestrial_master_International.shx")
BCRs = spTransform(shp, CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
}
#----Impoting BCR shapefile with states----
if(T){
stateshp = shapefile("C:/Users/itloaner/Desktop/BCR/BCR_Terrestrial_master.shx")
StateProv = spTransform(stateshp, CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
}
#----Checking for same projections (ask about this)----
if(T){
isTRUE(proj4string(BCRs) == proj4string(SPDF))
}
#----Overlay: spatially joining attributes (BCR & pts) by location----
if(T){
unattachedBCR <- over(SPDF, BCRs[,"BCRNAME"])
SPDF$BCR <- NA # This is to avoid replacement row length issue (+/-1)
SPDF$BCR <- unattachedBCR$BCRNAME
}
#----Convert from spdf to dataframe with BCRs----
if(T){
dfWithBCRS = as.data.frame(SPDF)
}
#----Assigning Hawaii and BadBCRs----
if(T){
idx<- dfWithBCRS$BCR == "NOT RATED"
dfWithBCRS$BCR[idx] <- "HAWAII"
idx<- is.na(dfWithBCRS$BCR)
dfWithBCRS$BCR[idx] <- "BadBCR"
}
#----Plotting the Bad BCRs----
if(F){
# dfNAs = dfWithBCRS[dfWithBCRS$BCR == "BadBCR",] # Accounts for about 0.6 % of the data
# qplot(
# y = dfNAs$lat,
# x = dfNAs$long,
# data = dfNAs,
# color = dfNAs$BCR
# )
#
# # Plotting all BCR data
# qplot(
# y = dfWithBCRS$lat,
# x = dfWithBCRS$long,
# data = dfWithBCRS,
# color = dfWithBCRS$BCR
# )
}
#----Removing pts with BadBCRs----
if(T){
dfCleanBCR = dfWithBCRS[dfWithBCRS$BCR != "BadBCR",] # Removing the points that are plotted too close to coastline.
dfCheckN = dfCleanBCR[dfCleanBCR$maxFlock > 0, ]
}
#----Removing data versions----
if(T){
rm(dfCleanBCR, dfWithBCRS, unattachedBCR,
SPDF, stateshp,StateProv,xy,toSPDF,pfw,df4Uniq,
rawData, dfEffHD,dfR91,dfR92,dfR93,dfR94,dfR95)
}
#----Dataframe for model testing - select BCRs----
if(T){
#bcr.regions = dfCheckN[dfCheckN$BCR == "NEW ENGLAND/MID-ATLANTIC COAST"|dfCheckN$BCR == "PIEDMONT"|dfCheckN$BCR == "SOUTHEASTERN COASTAL PLAIN"|dfCheckN$BCR == "ATLANTIC NORTHERN FOREST"|dfCheckN$BCR == "APPALACHIAN MOUNTAINS"|dfCheckN$BCR == "LOWER GREAT LAKES/ ST. LAWRENCE PLAIN",]
bcr.regions = dfCheckN[dfCheckN$BCR == "NEW ENGLAND/MID-ATLANTIC COAST",]
regionData = bcr.regions
#unique(regionData$BCR)
regionData$log.maxFlock = log(regionData$maxFlock)
qplot(regionData$lat, regionData$long, data = regionData, color = regionData$maxFlock, size = regionData$maxFlock)
}
#----Interval plot color pallette----
if(F){
pal = brewer.pal(5, "Reds")
q5 = classIntervals(regionData$maxFlock, n=5, style = "quantile")
q5Colours = findColours(q5,pal)
plot(regionData$lat, regionData$long, col = q5Colours, pch = 19, axes = T, cex = 0.3, main = "maxFlock")
legend("topleft", fill = attr(q5Colours, "palette"),
legend = names(attr(q5Colours, "table")),bty = "n")
}
#----Plotting pfw data by lat/lon----
if(F){
plot(maxFlock~lat, data = regionData, main = "maxFlock by Lat")
lines(supsmu(regionData$lat, newEngland$maxFlock),col=2,lwd=2)
plot(maxFlock~long, data = regionData, main = "maxFlock by Lat")
lines(supsmu(regionData$long, newEngland$maxFlock),col=2,lwd=2)
}
#----First GAM----
if(F){
maxFlock.gam = gam(maxFlock~s(lat,long),data = regionData)
summary(maxFlock.gam)
}
#----Deviance smoothing----
if(F){
dev.rss = c()
kval = c()
for(i in seq(10,130, by = 10)){
dev.rss = c(dev.rss, deviance(gam(maxFlock~s(lat,long,k=i), data=regionData)))
kval = c(kval, i)
}
plot(kval, dev.rss, xlab = "Parameters", ylab = "Deviance (RSS)", pch=15, main = "Smoothing Parameter Guide")
}
#----AIC smoothing----
if(F){
dev.rss = c()
kval = c()
for(i in seq(10,130, by = 10)){
dev.rss = c(dev.rss, AIC(gam(maxFlock~s(lat,long,k=i), data=regionData)))
kval = c(kval, i)
}
plot(kval, dev.rss, xlab = "Parameters", ylab = "AIC", pch=15, main = "Smoothing Parameter Guide")
}
#----GAM k=120----
if(F){
xy.maxFlock.gam = gam(log.maxFlock ~ s(lat,long ,k=120) + effortDays + effortHours, data = regionData)
xy.maxFlock.pred = predict(xy.maxFlock.gam,se.fit=T)
summary(xy.maxFlock.gam)
}
#----GAM k=120 predictions----
if(F){
maxFlock.120.gam.pred = data.frame(
x = regionData$lat,
y = regionData$long,
pred = fitted(xy.maxFlock.gam))
head(maxFlock.120.gam.pred)
coordinates(maxFlock.120.gam.pred) = c("x","y")
}
#----GAM k=120 pred. plot----
if(F){
pal = brewer.pal(5,"Reds")
q5 = classIntervals(maxFlock.120.gam.pred$pred, n=5, style = "quantile")
q5Colours = findColours(q5, pal)
plot(maxFlock.120.gam.pred, col=q5Colours,pch=19,cex=0.7,axes=T,main="GAM k=120")
legend("topleft", fill=attr(q5Colours, "palette"),
legend = names(attr(q5Colours,"table")),cex=0.7,bty="n")
}
#----Kriging----
if(F){
library(gstat)
xy2 <- regionData[,c(3,4)]
regionData.spdf <- SpatialPointsDataFrame(coords = regionData[,c("long", "lat")], data = regionData,
proj4string = CRS("+init=epsg:4326"))
logMf.vario = variogram(log.maxFlock~1,regionData.spdf)
plot(logMf.vario, pch=20,col=1,cex=2)
logMf.fit = fit.variogram(logMf.vario,
vgm(psill=10,"Sph",range=1.0,nugget=2))
plot(logMf.vario,logMf.fit,pch=20,col=2,cex=2,lwd=3,main="Log maxFlock Variogram")
#... see HW5 6700
}
#----lm model selection----
if(F){
lm1 = lm(maxFlock~yr+lat+long+effortDays+effortHours, data=regionData)
lm2 = lm(maxFlock~yr+lat*long+effortDays+effortHours, data=regionData)
lm3 = lm(maxFlock~yr+lat+long+effortHours, data=regionData)
lm4 = lm(maxFlock~yr+lat+long+effortDays, data=regionData)
lm5 = lm(maxFlock~yr+lat+effortDays+effortHours, data=regionData)
lm6 = lm(maxFlock~yr+long+effortDays+effortHours, data=regionData)
lm7 = lm(maxFlock~lat+long+effortDays+effortHours, data=regionData)
lm8 = lm(maxFlock~yr+lat+long+effortDays*effortHours, data=regionData)
lm9 = lm(maxFlock~yr+lat+long+effortDays*effortHours, data=regionData)
mdls = AIC(lm1,lm3,lm4,lm5,lm6,lm7)
(best = mdls[mdls$AIC == min(mdls$AIC),])
mdls = AIC(lm1,lm2,lm8,lm9) # Adding the model with the interaction term
(best = mdls[mdls$AIC == min(mdls$AIC),])
}
#----lm and predictions----
if(F){
pfw.lm = lm(maxFlock~yr+lat*long+effortDays+effortHours, data=regionData)
summary(pfw.lm)
# Predicted values by year
pfw.lm.pred = predictmeans(model = pfw.lm, modelterm = "yr", plot = F, newwd = F)
# Predicted means and standard error
pfw.lm.pred.pmeans = as.data.frame(pfw.lm.pred$`Predicted Means`)
pfw.lm.pred.pse = as.data.frame(pfw.lm.pred$`Standard Error of Means`)
# Pulling output
pfw.lm.pred.yr = as.character(pfw.lm.pred.pmeans$yr)
pfw.lm.pred.means = pfw.lm.pred.pmeans$Freq
pfw.lm.pred.se = pfw.lm.pred.pse$Freq
pfw.lm.pred.df = data.frame(pfw.lm.pred.yr, pfw.lm.pred.means, pfw.lm.pred.se)
}
#----Plotting predicted means from lm----
if(F){
pfw.lm.pred.df$pfw.lm.pred.yr = as.numeric(gsub(".*_","",pfw.lm.pred.df$pfw.lm.pred.yr))
ggplot(pfw.lm.pred.df, aes(factor(pfw.lm.pred.yr), pfw.lm.pred.means)) +
geom_point(color = 'red') +
geom_errorbar(aes(ymin = pfw.lm.pred.means - pfw.lm.pred.se,
ymax = pfw.lm.pred.means + pfw.lm.pred.se)) +
geom_line(aes(x=factor(pfw.lm.pred.yr), y=pfw.lm.pred.means, group=1),
linetype='dotted') +
ggtitle('New England/Mid-Atlantic Coasts') + theme(axis.title.x = element_text(color = 'blue'), axis.title.y = element_text(color = 'blue')) +
labs(x = 'Project FeederWatch', y = 'Maximum Flock') +
theme(panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = 'aliceblue'),
panel.grid.minor = element_line(colour = 'white'),
plot.title = element_text(face = 'bold', hjust = 0.5, family = 'sans'))
}
#----Plotting BCR data----
if(F){
qplot(
y = dfCheckN$lat,
x = dfCheckN$long,
data = dfCheckN,
color = dfCheckN$BCR
) + labs(x = "Longitude", y = "Latitude", color = "Legend")
}
#----Cropping to show miss-IDs----
if(F){
e = extent(-126, -114, 32, 42.5)
cp = crop(SPDF[SPDF@data$maxFlock>0,], e)
cs = crop(StateProv, e)
plot(cs)
points(cp, col = "blue", pch = 20)
}
#----Plotting regions bbox----
if(T){
library(ggplot2)
regionSPDF = regionData
coordinates(regionSPDF) = c("lat","long")
bbox = data.frame(bbox(regionSPDF))
box = data.frame(maxlat = bbox$max[1], minlat = bbox$min[1], maxlong = bbox$max[2], minlong = bbox$min[2], id="1")
fortBCR = fortify(BCRs)
ggplot() +
geom_polygon(data=fortBCR, aes(x=long, y=lat, group=group), color="black", fill="white") +
geom_rect(data = box, aes(xmin=minlong, xmax = maxlong, ymin=minlat, ymax=maxlat), color="red", fill="transparent")
}
#----spplot of data----
if(T){
spplot(regionSPDF, zcol="maxFlock", colorkey=T,cex=2*regionSPDF$maxFlock/max(regionSPDF$maxFlock))
}
#----Exploring data by variable----
if(F){
# Days
ggplot() +
geom_point(aes(x=effortDays,y=maxFlock),data=regionData)
# Hours
ggplot() +
geom_point(aes(x=effortHours,y=maxFlock),data=regionData)
#Year
ggplot() +
geom_point(aes(x=yr,y=maxFlock),data=regionData)
# Latitude
ggplot() +
geom_point(aes(x=lat.1,y=maxFlock),data=regionData)
# Longitude
ggplot() +
geom_point(aes(x=long.1,y=maxFlock),data=regionData@data)
}
#----OLS regression----
if(F){
rownames(regionData) = NULL
#m.ols = lm()
}
#----Data exploration w Regression----
if(F){
lm = lm(log.maxFlock~yr+lat+long+effortDays+effortHours, data=regionData)
summary(lm)
par(mfrow = c(1,2))
plot(lm, which = c(1,2))
par(mfrow = c(1,1))
}
#----LAND USE----
if(F){
# Check working directory
#----Importing LCLU data as raster----
file_name='Downloads/na_landcover_2010_30m/na_landcover_2010_30m/NA_NALCMS_LC_30m_LAEA_mmu12_urb05/NA_NALCMS_LC_30m_LAEA_mmu12_urb05.tif'
nlcd=raster(file_name)
#----Importing nlcd legend metdata----
legend = read.csv("nlcd_2010_30m_metadata_legend.csv")
# Switching latlon to lonlat
o <- c(4,3,1,2,5:(length(colnames(regionData))))
test0 = regionData[,o]
#test = test0[1:3,]
test = test0
coordinates(test) = c("long", "lat")
proj4string(test) = CRS('+proj=longlat +datum=WGS84')
# Transform CRS of points to match that of NLCD
tp = spTransform(test, CRS(proj4string(nlcd))) # Do these need to be switched
#----Extracting the land cover classes----
date()
lclu.ext = extract(nlcd, tp, buffer = 1000)
date()
#View(lclu.ext)
#----Calculating proportions----
lclu.classes = sapply(lclu.ext, function(x) tabulate(x, 19))
lclu.classes = 100 * (lclu.classes / colSums(lclu.classes))
#----Flipping classes from rows to columns----
transpose.step = as.data.frame(t(lclu.classes))
names(transpose.step)[1:19] = as.character(c(1:19))
colnames(transpose.step) = as.character(legend$ClassType)
#----Combining the lclu classes product with original data----
final = cbind(test@data,transpose.step)
View(final)
} # This works, but will take 7 hrs to run and cause R to abort
| /WNV031318.R | no_license | GatesDupont/WestNileVirus | R | false | false | 15,564 | r | #----Libraries----
if(T){
library(rgdal)
library(proj4)
library(sp)
library(raster)
library(dplyr)
library(RColorBrewer)
library(classInt)
library(mgcv)
library(gamm4)
library(lme4)
library(predictmeans)
library(ggplot2)
}
#----Importing PFW data----
if(F){
raw.pfw = read.csv("C:/Users/itloaner/Desktop/WNV/UpdatedData/PFW_amecro_zerofill.csv") # BLJA and AMCR the same?
}
#----Converting to duplicate data----
if(T){
rawData = raw.pfw
}
#----Formatting effort----
if(T){
rawData[is.na(rawData)] <- 0 # Makes all the NA cells be filled with 0
rawData$effortDaysNumerical = rawData$DAY1_AM + rawData$DAY1_PM + rawData$DAY2_AM + rawData$DAY2_PM # Summing half days
# Assiging Effort Hours to categorical levels ###
idx<- rawData$EFFORT_HRS_ATLEAST == "0"
rawData$obshrs[idx] <- "x"
idx<- rawData$EFFORT_HRS_ATLEAST == "0.001"
rawData$obshrs[idx] <- "D"
idx<- rawData$EFFORT_HRS_ATLEAST == "1"
rawData$obshrs[idx] <- "C"
idx<- rawData$EFFORT_HRS_ATLEAST == "4"
rawData$obshrs[idx] <- "B"
idx<- rawData$EFFORT_HRS_ATLEAST == "4.001"
rawData$obshrs[idx] <- "B"
idx<- rawData$EFFORT_HRS_ATLEAST == "8.001"
rawData$obshrs[idx] <- "A"
}
#----Relevant data only----
if(T){
locID = rawData$LOC_ID
yr = rawData$FW_Year
maxFlock = rawData$nseen
lat = rawData$LATITUDE
long = rawData$LONGITUDE
effortHours = rawData$obshrs # empty = x, 0.001 = D, 1 = C, 4 = B, 8+ = A
effortDays = rawData$effortDaysNumerical
state = rawData$StatProv
#Final product:
dfEffHD = data.frame(locID, yr, maxFlock, lat, long, state, effortDays, effortHours) # just the necessary data
}
#----Removing observations pre-1995----
if(T){
dfR91 = dfEffHD[dfEffHD$yr != 'PFW_1991',]
dfR92 = dfR91[dfR91$yr != 'PFW_1992',]
dfR93 = dfR92[dfR92$yr != 'PFW_1993',]
dfR94 = dfR93[dfR93$yr != 'PFW_1994',]
dfR95 = dfR94[dfR94$yr != 'PFW_1995',]
dfR95 = droplevels(dfR95) # Dropping unused levels
}
#----Removing empty effort and high counts----
if(T){
dfRhigh = subset(dfR95, maxFlock < 49) #exclude high counts (over 50 birds)
dfEffRx = subset(dfRhigh, effortHours != "x") # excluding blank data
dfDaysR0 = subset(dfEffRx, effortDays != 0) # excluding blank data
}
#----Caclulate percent removed by exluding high counts----
if(F){
100*(1-(length(dfRhigh$maxFlock))/(length(dfR95$maxFlock)))
}
#----Selecting power users----
if(T){
###Cleaning up the data with conditionals###
# 1) Only include LocIDs active for at least 3 years ###
# 2) Only include LocIDs with at least 10 checklists during those years ###
yearly_list_count = table(dfDaysR0$locID, dfDaysR0$yr) # creates a table showing number of observations at each location each year.
row_sums = rowSums(yearly_list_count >= 10) # rows where there are at least 10 observations
threeyears = which(row_sums >=3) # for rows with 10 obs over at least 3 years
newIDs = names(threeyears) # just setting a new variable name
#Final product:
dfPwrUsrs = dfDaysR0[dfDaysR0$locID %in% newIDs,]
}
#----Renaming main dataframe----
if(T){
df4Uniq = data.frame(dfPwrUsrs$locID, dfPwrUsrs$yr, dfPwrUsrs$lat, dfPwrUsrs$long, dfPwrUsrs$effortDays, dfPwrUsrs$effortHours, dfPwrUsrs$maxFlock) #this makes a new df so I can effectively use unique()
pfw = unique(df4Uniq)
names(pfw)[1] = 'locID'
names(pfw)[2] = 'yr'
names(pfw)[3] = 'lat'
names(pfw)[4] = 'long'
names(pfw)[5] = 'effortDays'
names(pfw)[6] = 'effortHours'
names(pfw)[7] = 'maxFlock'
}
#----Duplicate dataframe to conserve data, remove later----
if(T){
toSPDF = pfw
}
#----Spatially formatting main dataframe----
if(T){
# Tom Auer CRS" +init=epsg:4326
xy <- toSPDF[,c(3,4)]
SPDF <- SpatialPointsDataFrame(coords = toSPDF[,c("long", "lat")], data = toSPDF,
proj4string = CRS("+init=epsg:4326"))
}
#----Importing BCR shape file, no state borders----
if(T){
shp = shapefile("C:/Users/itloaner/Desktop/BCR/BCR_Terrestrial_master_International.shx")
BCRs = spTransform(shp, CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
}
#----Impoting BCR shapefile with states----
if(T){
stateshp = shapefile("C:/Users/itloaner/Desktop/BCR/BCR_Terrestrial_master.shx")
StateProv = spTransform(stateshp, CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
}
#----Checking for same projections (ask about this)----
if(T){
isTRUE(proj4string(BCRs) == proj4string(SPDF))
}
#----Overlay: spatially joining attributes (BCR & pts) by location----
if(T){
unattachedBCR <- over(SPDF, BCRs[,"BCRNAME"])
SPDF$BCR <- NA # This is to avoid replacement row length issue (+/-1)
SPDF$BCR <- unattachedBCR$BCRNAME
}
#----Convert from spdf to dataframe with BCRs----
if(T){
dfWithBCRS = as.data.frame(SPDF)
}
#----Assigning Hawaii and BadBCRs----
if(T){
idx<- dfWithBCRS$BCR == "NOT RATED"
dfWithBCRS$BCR[idx] <- "HAWAII"
idx<- is.na(dfWithBCRS$BCR)
dfWithBCRS$BCR[idx] <- "BadBCR"
}
#----Plotting the Bad BCRs----
if(F){
# dfNAs = dfWithBCRS[dfWithBCRS$BCR == "BadBCR",] # Accounts for about 0.6 % of the data
# qplot(
# y = dfNAs$lat,
# x = dfNAs$long,
# data = dfNAs,
# color = dfNAs$BCR
# )
#
# # Plotting all BCR data
# qplot(
# y = dfWithBCRS$lat,
# x = dfWithBCRS$long,
# data = dfWithBCRS,
# color = dfWithBCRS$BCR
# )
}
#----Removing pts with BadBCRs----
if(T){
dfCleanBCR = dfWithBCRS[dfWithBCRS$BCR != "BadBCR",] # Removing the points that are plotted too close to coastline.
dfCheckN = dfCleanBCR[dfCleanBCR$maxFlock > 0, ]
}
#----Removing data versions----
if(T){
rm(dfCleanBCR, dfWithBCRS, unattachedBCR,
SPDF, stateshp,StateProv,xy,toSPDF,pfw,df4Uniq,
rawData, dfEffHD,dfR91,dfR92,dfR93,dfR94,dfR95)
}
#----Dataframe for model testing - select BCRs----
if(T){
#bcr.regions = dfCheckN[dfCheckN$BCR == "NEW ENGLAND/MID-ATLANTIC COAST"|dfCheckN$BCR == "PIEDMONT"|dfCheckN$BCR == "SOUTHEASTERN COASTAL PLAIN"|dfCheckN$BCR == "ATLANTIC NORTHERN FOREST"|dfCheckN$BCR == "APPALACHIAN MOUNTAINS"|dfCheckN$BCR == "LOWER GREAT LAKES/ ST. LAWRENCE PLAIN",]
bcr.regions = dfCheckN[dfCheckN$BCR == "NEW ENGLAND/MID-ATLANTIC COAST",]
regionData = bcr.regions
#unique(regionData$BCR)
regionData$log.maxFlock = log(regionData$maxFlock)
qplot(regionData$lat, regionData$long, data = regionData, color = regionData$maxFlock, size = regionData$maxFlock)
}
#----Interval plot color pallette----
if(F){
pal = brewer.pal(5, "Reds")
q5 = classIntervals(regionData$maxFlock, n=5, style = "quantile")
q5Colours = findColours(q5,pal)
plot(regionData$lat, regionData$long, col = q5Colours, pch = 19, axes = T, cex = 0.3, main = "maxFlock")
legend("topleft", fill = attr(q5Colours, "palette"),
legend = names(attr(q5Colours, "table")),bty = "n")
}
#----Plotting pfw data by lat/lon----
if(F){
plot(maxFlock~lat, data = regionData, main = "maxFlock by Lat")
lines(supsmu(regionData$lat, newEngland$maxFlock),col=2,lwd=2)
plot(maxFlock~long, data = regionData, main = "maxFlock by Lat")
lines(supsmu(regionData$long, newEngland$maxFlock),col=2,lwd=2)
}
#----First GAM----
if(F){
maxFlock.gam = gam(maxFlock~s(lat,long),data = regionData)
summary(maxFlock.gam)
}
#----Deviance smoothing----
if(F){
dev.rss = c()
kval = c()
for(i in seq(10,130, by = 10)){
dev.rss = c(dev.rss, deviance(gam(maxFlock~s(lat,long,k=i), data=regionData)))
kval = c(kval, i)
}
plot(kval, dev.rss, xlab = "Parameters", ylab = "Deviance (RSS)", pch=15, main = "Smoothing Parameter Guide")
}
#----AIC smoothing----
if(F){
dev.rss = c()
kval = c()
for(i in seq(10,130, by = 10)){
dev.rss = c(dev.rss, AIC(gam(maxFlock~s(lat,long,k=i), data=regionData)))
kval = c(kval, i)
}
plot(kval, dev.rss, xlab = "Parameters", ylab = "AIC", pch=15, main = "Smoothing Parameter Guide")
}
#----GAM k=120----
if(F){
xy.maxFlock.gam = gam(log.maxFlock ~ s(lat,long ,k=120) + effortDays + effortHours, data = regionData)
xy.maxFlock.pred = predict(xy.maxFlock.gam,se.fit=T)
summary(xy.maxFlock.gam)
}
#----GAM k=120 predictions----
if(F){
maxFlock.120.gam.pred = data.frame(
x = regionData$lat,
y = regionData$long,
pred = fitted(xy.maxFlock.gam))
head(maxFlock.120.gam.pred)
coordinates(maxFlock.120.gam.pred) = c("x","y")
}
#----GAM k=120 pred. plot----
if(F){
pal = brewer.pal(5,"Reds")
q5 = classIntervals(maxFlock.120.gam.pred$pred, n=5, style = "quantile")
q5Colours = findColours(q5, pal)
plot(maxFlock.120.gam.pred, col=q5Colours,pch=19,cex=0.7,axes=T,main="GAM k=120")
legend("topleft", fill=attr(q5Colours, "palette"),
legend = names(attr(q5Colours,"table")),cex=0.7,bty="n")
}
#----Kriging----
if(F){
library(gstat)
xy2 <- regionData[,c(3,4)]
regionData.spdf <- SpatialPointsDataFrame(coords = regionData[,c("long", "lat")], data = regionData,
proj4string = CRS("+init=epsg:4326"))
logMf.vario = variogram(log.maxFlock~1,regionData.spdf)
plot(logMf.vario, pch=20,col=1,cex=2)
logMf.fit = fit.variogram(logMf.vario,
vgm(psill=10,"Sph",range=1.0,nugget=2))
plot(logMf.vario,logMf.fit,pch=20,col=2,cex=2,lwd=3,main="Log maxFlock Variogram")
#... see HW5 6700
}
#----lm model selection----
if(F){
lm1 = lm(maxFlock~yr+lat+long+effortDays+effortHours, data=regionData)
lm2 = lm(maxFlock~yr+lat*long+effortDays+effortHours, data=regionData)
lm3 = lm(maxFlock~yr+lat+long+effortHours, data=regionData)
lm4 = lm(maxFlock~yr+lat+long+effortDays, data=regionData)
lm5 = lm(maxFlock~yr+lat+effortDays+effortHours, data=regionData)
lm6 = lm(maxFlock~yr+long+effortDays+effortHours, data=regionData)
lm7 = lm(maxFlock~lat+long+effortDays+effortHours, data=regionData)
lm8 = lm(maxFlock~yr+lat+long+effortDays*effortHours, data=regionData)
lm9 = lm(maxFlock~yr+lat+long+effortDays*effortHours, data=regionData)
mdls = AIC(lm1,lm3,lm4,lm5,lm6,lm7)
(best = mdls[mdls$AIC == min(mdls$AIC),])
mdls = AIC(lm1,lm2,lm8,lm9) # Adding the model with the interaction term
(best = mdls[mdls$AIC == min(mdls$AIC),])
}
#----lm and predictions----
if(F){
pfw.lm = lm(maxFlock~yr+lat*long+effortDays+effortHours, data=regionData)
summary(pfw.lm)
# Predicted values by year
pfw.lm.pred = predictmeans(model = pfw.lm, modelterm = "yr", plot = F, newwd = F)
# Predicted means and standard error
pfw.lm.pred.pmeans = as.data.frame(pfw.lm.pred$`Predicted Means`)
pfw.lm.pred.pse = as.data.frame(pfw.lm.pred$`Standard Error of Means`)
# Pulling output
pfw.lm.pred.yr = as.character(pfw.lm.pred.pmeans$yr)
pfw.lm.pred.means = pfw.lm.pred.pmeans$Freq
pfw.lm.pred.se = pfw.lm.pred.pse$Freq
pfw.lm.pred.df = data.frame(pfw.lm.pred.yr, pfw.lm.pred.means, pfw.lm.pred.se)
}
#----Plotting predicted means from lm----
if(F){
pfw.lm.pred.df$pfw.lm.pred.yr = as.numeric(gsub(".*_","",pfw.lm.pred.df$pfw.lm.pred.yr))
ggplot(pfw.lm.pred.df, aes(factor(pfw.lm.pred.yr), pfw.lm.pred.means)) +
geom_point(color = 'red') +
geom_errorbar(aes(ymin = pfw.lm.pred.means - pfw.lm.pred.se,
ymax = pfw.lm.pred.means + pfw.lm.pred.se)) +
geom_line(aes(x=factor(pfw.lm.pred.yr), y=pfw.lm.pred.means, group=1),
linetype='dotted') +
ggtitle('New England/Mid-Atlantic Coasts') + theme(axis.title.x = element_text(color = 'blue'), axis.title.y = element_text(color = 'blue')) +
labs(x = 'Project FeederWatch', y = 'Maximum Flock') +
theme(panel.background = element_rect(fill = 'white'),
panel.grid.major = element_line(colour = 'aliceblue'),
panel.grid.minor = element_line(colour = 'white'),
plot.title = element_text(face = 'bold', hjust = 0.5, family = 'sans'))
}
#----Plotting BCR data----
if(F){
qplot(
y = dfCheckN$lat,
x = dfCheckN$long,
data = dfCheckN,
color = dfCheckN$BCR
) + labs(x = "Longitude", y = "Latitude", color = "Legend")
}
#----Cropping to show miss-IDs----
if(F){
e = extent(-126, -114, 32, 42.5)
cp = crop(SPDF[SPDF@data$maxFlock>0,], e)
cs = crop(StateProv, e)
plot(cs)
points(cp, col = "blue", pch = 20)
}
#----Plotting regions bbox----
if(T){
library(ggplot2)
regionSPDF = regionData
coordinates(regionSPDF) = c("lat","long")
bbox = data.frame(bbox(regionSPDF))
box = data.frame(maxlat = bbox$max[1], minlat = bbox$min[1], maxlong = bbox$max[2], minlong = bbox$min[2], id="1")
fortBCR = fortify(BCRs)
ggplot() +
geom_polygon(data=fortBCR, aes(x=long, y=lat, group=group), color="black", fill="white") +
geom_rect(data = box, aes(xmin=minlong, xmax = maxlong, ymin=minlat, ymax=maxlat), color="red", fill="transparent")
}
#----spplot of data----
if(T){
spplot(regionSPDF, zcol="maxFlock", colorkey=T,cex=2*regionSPDF$maxFlock/max(regionSPDF$maxFlock))
}
#----Exploring data by variable----
if(F){
# Days
ggplot() +
geom_point(aes(x=effortDays,y=maxFlock),data=regionData)
# Hours
ggplot() +
geom_point(aes(x=effortHours,y=maxFlock),data=regionData)
#Year
ggplot() +
geom_point(aes(x=yr,y=maxFlock),data=regionData)
# Latitude
ggplot() +
geom_point(aes(x=lat.1,y=maxFlock),data=regionData)
# Longitude
ggplot() +
geom_point(aes(x=long.1,y=maxFlock),data=regionData@data)
}
#----OLS regression----
if(F){
rownames(regionData) = NULL
#m.ols = lm()
}
#----Data exploration w Regression----
if(F){
lm = lm(log.maxFlock~yr+lat+long+effortDays+effortHours, data=regionData)
summary(lm)
par(mfrow = c(1,2))
plot(lm, which = c(1,2))
par(mfrow = c(1,1))
}
#----LAND USE----
if(F){
# Check working directory
#----Importing LCLU data as raster----
file_name='Downloads/na_landcover_2010_30m/na_landcover_2010_30m/NA_NALCMS_LC_30m_LAEA_mmu12_urb05/NA_NALCMS_LC_30m_LAEA_mmu12_urb05.tif'
nlcd=raster(file_name)
#----Importing nlcd legend metdata----
legend = read.csv("nlcd_2010_30m_metadata_legend.csv")
# Switching latlon to lonlat
o <- c(4,3,1,2,5:(length(colnames(regionData))))
test0 = regionData[,o]
#test = test0[1:3,]
test = test0
coordinates(test) = c("long", "lat")
proj4string(test) = CRS('+proj=longlat +datum=WGS84')
# Transform CRS of points to match that of NLCD
tp = spTransform(test, CRS(proj4string(nlcd))) # Do these need to be switched
#----Extracting the land cover classes----
date()
lclu.ext = extract(nlcd, tp, buffer = 1000)
date()
#View(lclu.ext)
#----Calculating proportions----
lclu.classes = sapply(lclu.ext, function(x) tabulate(x, 19))
lclu.classes = 100 * (lclu.classes / colSums(lclu.classes))
#----Flipping classes from rows to columns----
transpose.step = as.data.frame(t(lclu.classes))
names(transpose.step)[1:19] = as.character(c(1:19))
colnames(transpose.step) = as.character(legend$ClassType)
#----Combining the lclu classes product with original data----
final = cbind(test@data,transpose.step)
View(final)
} # This works, but will take 7 hrs to run and cause R to abort
|
# post-processing-make-five-panel-plot.R
#
###############################################################################
cat(" \n -------------------------------- \n \n Running post-processing-make-five-panel-plot.R \n \n -------------------------------- \n")
suppressMessages(library(data.table, quietly = TRUE))
suppressMessages(library(bayesplot, quietly = TRUE))
suppressMessages(library(ggplot2, quietly = TRUE))
suppressMessages(library(tidyverse, quietly = TRUE))
suppressMessages(library(RColorBrewer, quietly = TRUE))
suppressMessages(library(scales, quietly = TRUE))
suppressMessages(library(ggpubr, quietly = TRUE))
suppressMessages(library(gridExtra, quietly = TRUE))
suppressMessages(library(cowplot, quietly = TRUE))
suppressMessages(library(magick, quietly = TRUE))
suppressMessages(library(viridis, quietly = TRUE))
suppressMessages(library(covid19AgeModel, quietly = TRUE))
# for dev purposes
if(1)
{
args_dir <- list()
args_dir[['stanModelFile']] <- 'base_age_fsq_mobility_200821b2_cmdstanv'
args_dir[['out_dir']] <- '/rds/general/project/ratmann_covid19/live/age_renewal_usa/base_age_fsq_mobility_200821b2_cmdstanv-39states_Aug20'
args_dir[['job_tag']] <- '39states_Aug20'
args_dir[['overwrite']] <- 0
args_dir[["include_lambda_age"]] <- 0
}
# for runtime
args_line <- as.list(commandArgs(trailingOnly=TRUE))
if(length(args_line) > 0)
{
stopifnot(args_line[[1]]=='-stanModelFile')
stopifnot(args_line[[3]]=='-out_dir')
stopifnot(args_line[[5]]=='-job_tag')
stopifnot(args_line[[7]]=='-overwrite')
stopifnot(args_line[[9]]=='-with_forecast')
args_dir <- list()
args_dir[['stanModelFile']] <- args_line[[2]]
args_dir[['out_dir']] <- args_line[[4]]
args_dir[['job_tag']] <- args_line[[6]]
args_dir[['overwrite']] <- as.integer(args_line[[8]])
args_dir[['with_forecast']] <- as.integer(args_line[[10]])
args_dir[["include_lambda_age"]] <- 0
}
## start script
cat(" \n -------------------------------- \n with post-processing arguments \n -------------------------------- \n")
str(args_dir)
outfile.base <- paste0(args_dir$out_dir, "/", args_dir$stanModelFile , "-", args_dir$job_tag)
cat(" \n -------------------------------- \n summarise case samples: start \n -------------------------------- \n")
# load inputs for this script
file <- paste0(outfile.base,'-stanout-basic.RDS')
cat("\n read RDS:", file)
plot.pars.basic <- readRDS(file)
# map model age groups to report age groups
age_cat_map <- make_age_cat_map_7(plot.pars.basic$pop_info)
#
# summarise Rt by age
file <- paste0(outfile.base,'-summary-Rt-age_averageover', "1", 'days.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-E_effcasesByAge-gqs.RDS')
cat("\n read RDS:", file2)
E_effcasesByAge <- readRDS(file2)
file3 <- paste0(outfile.base,'-stanout-RtByAge-gqs.RDS')
cat("\n read RDS:", file3)
RtByAge <- readRDS(file3)
cat("\n ----------- summarise_Rt_instantaneous_byage_c ----------- \n")
Rt_byage_c <- summarise_Rt_instantaneous_byage_c(E_effcasesByAge,
RtByAge,
period_length = 1,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(Rt_byage_c, file=file)
}
if(file.exists(file))
{
Rt_byage_c <- readRDS(file)
}
if(nrow(subset(Rt_byage_c, loc == 'US')) > 0)
{
Rt_byage_c = subset(Rt_byage_c, loc != 'US')
}
#
# summarise effectively infectious cases by age
file <- paste0(outfile.base,'-summary-eff-infectious-cases-age.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-E_effcasesByAge-gqs.RDS')
cat("\n read RDS:", file2)
E_effcasesByAge <- readRDS(file2)
cat("\n ----------- summarise_e_acases_eff_byage_c ----------- \n")
e_acases_eff_byage_c <- summarise_e_acases_eff_byage_c(E_effcasesByAge,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(e_acases_eff_byage_c, file=file)
}
if(file.exists(file))
{
e_acases_eff_byage_c <- readRDS(file)
}
#
# summarise cases by age
file <- paste0(outfile.base,'-summary-cases-age.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-E_casesByAge-gqs.RDS')
cat("\n read RDS:", file2)
E_casesByAge <- readRDS(file2)
cat("\n ----------- summarise_e_acases_byage_c ----------- \n")
e_acases_byage_c <- summarise_e_acases_byage_c(E_casesByAge,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(e_acases_byage_c, file=file)
}
if(file.exists(file))
{
e_acases_byage_c <- readRDS(file)
}
E_effcasesByAge <- NULL
RtByAge <- NULL
E_casesByAge <- NULL
gc()
#
# summarise cumulative attack rate by age just for plotting
cat("\n ----------- summarise_attackrate_byage_c ----------- \n")
attackrate_byage_c <- summarise_attackrate_byage_c(e_acases_byage_c,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$regions)
#
# rescale for plotting
tmp <- subset(plot.pars.basic$pop_info, select=c( loc, age.cat, pop, pop_total))
tmp <- merge(tmp, subset(age_cat_map, select=c(age.cat2, age.cat)), by=c('age.cat'))
pop_c <- tmp[, list(prop_c=sum(pop)/pop_total), by=c('loc','age.cat2')]
pop_c <- unique(subset(pop_c,select=c(loc,age.cat2,prop_c)))
attackrate_byage_c <- subset(attackrate_byage_c, select=c(age_cat,age_band,date,M,time,loc,loc_label))
attackrate_byage_c <- merge( attackrate_byage_c, pop_c,by.x=c('age_cat','loc'),by.y=c('age.cat2','loc'))
attackrate_byage_c[, Mc:= M*prop_c]
attackrate_byage_c[, M:=NULL]
setnames(attackrate_byage_c,'Mc','M')
cat(" \n -------------------------------- \n summarise case samples: end \n -------------------------------- \n")
cat(" \n -------------------------------- \n summarise transmission par samples: start \n -------------------------------- \n")
#
# summarise force of infection
file <- paste0(outfile.base,'-summary-lambda-age.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-lambdaByAge-gqs.RDS')
cat("\n read RDS:", file2)
lambdaByAge <- readRDS(file2)
cat("\n ----------- summarise_lambda_byage_c ----------- \n")
lambda_byage_c <- summarise_lambda_byage_c(lambdaByAge,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(lambda_byage_c, file=file)
}
if(file.exists(file))
{
lambda_byage_c <- readRDS(file)
}
lambdaByAge <- NULL
cat(" \n -------------------------------- \n summarise transmission par samples: end \n -------------------------------- \n")
cat(" \n -------------------------------- \n generating parameter plots \n -------------------------------- \n")
#
# handle if forecast period is to be included in plots
if(!args_dir$with_forecast)
{
date.max <- max( as.Date( sapply( plot.pars.basic$dates, function(x) max(as.character(x)) ) ) )
cat("\nExcluding forecast period from plotting, setting max date to ",as.character(date.max))
Rt_byage_c <- subset(Rt_byage_c, date<=date.max)
e_acases_eff_byage_c <- subset(e_acases_eff_byage_c, date<=date.max)
e_acases_byage_c <- subset(e_acases_byage_c, date<=date.max)
attackrate_byage_c <- subset(attackrate_byage_c, date<=date.max)
lambda_byage_c <- subset(lambda_byage_c, date<=date.max)
}
p_aRt <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_aRt[[c]] <- plot_Rt_byage_c(Rt_byage_c,
"aRt",
ylab='Rt\n(posterior median by age band)',
c,
outfile.base=NULL) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_eacases_eff <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_eacases_eff[[c]] <- plot_par_byage_c(e_acases_eff_byage_c,
"e_acases_eff",
ylab='Total number of \n infectious people \n(posterior median by age band)',
c,
outfile.base=NULL) +
scale_y_continuous(labels = function(x) format(x, scientific = FALSE)) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_acases <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_acases[[c]] <- plot_par_byage_c(e_acases_byage_c,
"e_acases",
ylab='Cumulative cases\n(posterior median by age band)',
c,
outfile.base=NULL) +
scale_y_continuous(labels = function(x) format(x, scientific = FALSE)) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_attrate <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_attrate[[c]] <- plot_par_byage_c(attackrate_byage_c,
"attrate",
ylab='Cumulative attack rate\n(posterior median by age band)',
c,
outfile.base=NULL) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_lambda <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_lambda[[c]] <- plot_par_byage_c(lambda_byage_c,
"lambda",
ylab='Infectious contacts \n(posterior median by age band)',
c,
outfile.base=NULL) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
cat(" \n -------------------------------- \n combinining plots to panel \n -------------------------------- \n")
panel <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
if(args_dir$include_lambda_age){
panel[[c]] <- ggarrange( p_acases[[c]],
p_attrate[[c]],
p_eacases_eff[[c]],
p_lambda[[c]],
legend="bottom",
common.legend=TRUE,
labels=c('B','C','D','E'),
font.label=list(size=20),
hjust=0,
vjust=0.5,
heights=c(2,2,2,2),
widths=c(3,3,3,3))
} else
{
panel[[c]] <- ggarrange( p_acases[[c]],
p_attrate[[c]],
p_eacases_eff[[c]],
legend="bottom",
common.legend=TRUE,
labels=c('B','C','D'),
font.label=list(size=20),
hjust=0,
vjust=0.5,
heights=c(2,2,2,2),
widths=c(3,3,3,3))
}
p_aRt[[c]] <- p_aRt[[c]] + theme(legend.position="none")
panel[[c]] <- ggarrange( p_aRt[[c]],
panel[[c]],
labels=c('A'),
ncol=1,
font.label=list(size=20),
hjust=0,
vjust=1,
heights=c(2,4),
widths=c(4,4))
ggsave(paste0(outfile.base,'-five_panel_plot_new-', c, '.png'), panel[[c]], w = 14, h=10)
}
cat(" \n -------------------------------- \n \n Completed post-processing-make-five-panel-plot.R \n \n -------------------------------- \n")
| /covid19AgeModel/inst/scripts/post-processing-make-five-panel-plot.R | permissive | viniciuszendron/covid19model | R | false | false | 11,698 | r | # post-processing-make-five-panel-plot.R
#
###############################################################################
cat(" \n -------------------------------- \n \n Running post-processing-make-five-panel-plot.R \n \n -------------------------------- \n")
suppressMessages(library(data.table, quietly = TRUE))
suppressMessages(library(bayesplot, quietly = TRUE))
suppressMessages(library(ggplot2, quietly = TRUE))
suppressMessages(library(tidyverse, quietly = TRUE))
suppressMessages(library(RColorBrewer, quietly = TRUE))
suppressMessages(library(scales, quietly = TRUE))
suppressMessages(library(ggpubr, quietly = TRUE))
suppressMessages(library(gridExtra, quietly = TRUE))
suppressMessages(library(cowplot, quietly = TRUE))
suppressMessages(library(magick, quietly = TRUE))
suppressMessages(library(viridis, quietly = TRUE))
suppressMessages(library(covid19AgeModel, quietly = TRUE))
# for dev purposes
if(1)
{
args_dir <- list()
args_dir[['stanModelFile']] <- 'base_age_fsq_mobility_200821b2_cmdstanv'
args_dir[['out_dir']] <- '/rds/general/project/ratmann_covid19/live/age_renewal_usa/base_age_fsq_mobility_200821b2_cmdstanv-39states_Aug20'
args_dir[['job_tag']] <- '39states_Aug20'
args_dir[['overwrite']] <- 0
args_dir[["include_lambda_age"]] <- 0
}
# for runtime
args_line <- as.list(commandArgs(trailingOnly=TRUE))
if(length(args_line) > 0)
{
stopifnot(args_line[[1]]=='-stanModelFile')
stopifnot(args_line[[3]]=='-out_dir')
stopifnot(args_line[[5]]=='-job_tag')
stopifnot(args_line[[7]]=='-overwrite')
stopifnot(args_line[[9]]=='-with_forecast')
args_dir <- list()
args_dir[['stanModelFile']] <- args_line[[2]]
args_dir[['out_dir']] <- args_line[[4]]
args_dir[['job_tag']] <- args_line[[6]]
args_dir[['overwrite']] <- as.integer(args_line[[8]])
args_dir[['with_forecast']] <- as.integer(args_line[[10]])
args_dir[["include_lambda_age"]] <- 0
}
## start script
cat(" \n -------------------------------- \n with post-processing arguments \n -------------------------------- \n")
str(args_dir)
outfile.base <- paste0(args_dir$out_dir, "/", args_dir$stanModelFile , "-", args_dir$job_tag)
cat(" \n -------------------------------- \n summarise case samples: start \n -------------------------------- \n")
# load inputs for this script
file <- paste0(outfile.base,'-stanout-basic.RDS')
cat("\n read RDS:", file)
plot.pars.basic <- readRDS(file)
# map model age groups to report age groups
age_cat_map <- make_age_cat_map_7(plot.pars.basic$pop_info)
#
# summarise Rt by age
file <- paste0(outfile.base,'-summary-Rt-age_averageover', "1", 'days.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-E_effcasesByAge-gqs.RDS')
cat("\n read RDS:", file2)
E_effcasesByAge <- readRDS(file2)
file3 <- paste0(outfile.base,'-stanout-RtByAge-gqs.RDS')
cat("\n read RDS:", file3)
RtByAge <- readRDS(file3)
cat("\n ----------- summarise_Rt_instantaneous_byage_c ----------- \n")
Rt_byage_c <- summarise_Rt_instantaneous_byage_c(E_effcasesByAge,
RtByAge,
period_length = 1,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(Rt_byage_c, file=file)
}
if(file.exists(file))
{
Rt_byage_c <- readRDS(file)
}
if(nrow(subset(Rt_byage_c, loc == 'US')) > 0)
{
Rt_byage_c = subset(Rt_byage_c, loc != 'US')
}
#
# summarise effectively infectious cases by age
file <- paste0(outfile.base,'-summary-eff-infectious-cases-age.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-E_effcasesByAge-gqs.RDS')
cat("\n read RDS:", file2)
E_effcasesByAge <- readRDS(file2)
cat("\n ----------- summarise_e_acases_eff_byage_c ----------- \n")
e_acases_eff_byage_c <- summarise_e_acases_eff_byage_c(E_effcasesByAge,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(e_acases_eff_byage_c, file=file)
}
if(file.exists(file))
{
e_acases_eff_byage_c <- readRDS(file)
}
#
# summarise cases by age
file <- paste0(outfile.base,'-summary-cases-age.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-E_casesByAge-gqs.RDS')
cat("\n read RDS:", file2)
E_casesByAge <- readRDS(file2)
cat("\n ----------- summarise_e_acases_byage_c ----------- \n")
e_acases_byage_c <- summarise_e_acases_byage_c(E_casesByAge,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(e_acases_byage_c, file=file)
}
if(file.exists(file))
{
e_acases_byage_c <- readRDS(file)
}
E_effcasesByAge <- NULL
RtByAge <- NULL
E_casesByAge <- NULL
gc()
#
# summarise cumulative attack rate by age just for plotting
cat("\n ----------- summarise_attackrate_byage_c ----------- \n")
attackrate_byage_c <- summarise_attackrate_byage_c(e_acases_byage_c,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$regions)
#
# rescale for plotting
tmp <- subset(plot.pars.basic$pop_info, select=c( loc, age.cat, pop, pop_total))
tmp <- merge(tmp, subset(age_cat_map, select=c(age.cat2, age.cat)), by=c('age.cat'))
pop_c <- tmp[, list(prop_c=sum(pop)/pop_total), by=c('loc','age.cat2')]
pop_c <- unique(subset(pop_c,select=c(loc,age.cat2,prop_c)))
attackrate_byage_c <- subset(attackrate_byage_c, select=c(age_cat,age_band,date,M,time,loc,loc_label))
attackrate_byage_c <- merge( attackrate_byage_c, pop_c,by.x=c('age_cat','loc'),by.y=c('age.cat2','loc'))
attackrate_byage_c[, Mc:= M*prop_c]
attackrate_byage_c[, M:=NULL]
setnames(attackrate_byage_c,'Mc','M')
cat(" \n -------------------------------- \n summarise case samples: end \n -------------------------------- \n")
cat(" \n -------------------------------- \n summarise transmission par samples: start \n -------------------------------- \n")
#
# summarise force of infection
file <- paste0(outfile.base,'-summary-lambda-age.RDS')
if(!file.exists(file) | args_dir[['overwrite']])
{
file2 <- paste0(outfile.base,'-stanout-lambdaByAge-gqs.RDS')
cat("\n read RDS:", file2)
lambdaByAge <- readRDS(file2)
cat("\n ----------- summarise_lambda_byage_c ----------- \n")
lambda_byage_c <- summarise_lambda_byage_c(lambdaByAge,
age_cat_map,
plot.pars.basic$pop_info,
plot.pars.basic$dates,
plot.pars.basic$regions)
cat("\nWrite ",file," ... ")
saveRDS(lambda_byage_c, file=file)
}
if(file.exists(file))
{
lambda_byage_c <- readRDS(file)
}
lambdaByAge <- NULL
cat(" \n -------------------------------- \n summarise transmission par samples: end \n -------------------------------- \n")
cat(" \n -------------------------------- \n generating parameter plots \n -------------------------------- \n")
#
# handle if forecast period is to be included in plots
if(!args_dir$with_forecast)
{
date.max <- max( as.Date( sapply( plot.pars.basic$dates, function(x) max(as.character(x)) ) ) )
cat("\nExcluding forecast period from plotting, setting max date to ",as.character(date.max))
Rt_byage_c <- subset(Rt_byage_c, date<=date.max)
e_acases_eff_byage_c <- subset(e_acases_eff_byage_c, date<=date.max)
e_acases_byage_c <- subset(e_acases_byage_c, date<=date.max)
attackrate_byage_c <- subset(attackrate_byage_c, date<=date.max)
lambda_byage_c <- subset(lambda_byage_c, date<=date.max)
}
p_aRt <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_aRt[[c]] <- plot_Rt_byage_c(Rt_byage_c,
"aRt",
ylab='Rt\n(posterior median by age band)',
c,
outfile.base=NULL) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_eacases_eff <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_eacases_eff[[c]] <- plot_par_byage_c(e_acases_eff_byage_c,
"e_acases_eff",
ylab='Total number of \n infectious people \n(posterior median by age band)',
c,
outfile.base=NULL) +
scale_y_continuous(labels = function(x) format(x, scientific = FALSE)) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_acases <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_acases[[c]] <- plot_par_byage_c(e_acases_byage_c,
"e_acases",
ylab='Cumulative cases\n(posterior median by age band)',
c,
outfile.base=NULL) +
scale_y_continuous(labels = function(x) format(x, scientific = FALSE)) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_attrate <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_attrate[[c]] <- plot_par_byage_c(attackrate_byage_c,
"attrate",
ylab='Cumulative attack rate\n(posterior median by age band)',
c,
outfile.base=NULL) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
p_lambda <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
p_lambda[[c]] <- plot_par_byage_c(lambda_byage_c,
"lambda",
ylab='Infectious contacts \n(posterior median by age band)',
c,
outfile.base=NULL) +
theme_bw(base_size=14) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),axis.title.y = element_text(size=12),
legend.position="bottom")
}
cat(" \n -------------------------------- \n combinining plots to panel \n -------------------------------- \n")
panel <- vector('list',length(plot.pars.basic$regions))
for(c in plot.pars.basic$regions)
{
if(args_dir$include_lambda_age){
panel[[c]] <- ggarrange( p_acases[[c]],
p_attrate[[c]],
p_eacases_eff[[c]],
p_lambda[[c]],
legend="bottom",
common.legend=TRUE,
labels=c('B','C','D','E'),
font.label=list(size=20),
hjust=0,
vjust=0.5,
heights=c(2,2,2,2),
widths=c(3,3,3,3))
} else
{
panel[[c]] <- ggarrange( p_acases[[c]],
p_attrate[[c]],
p_eacases_eff[[c]],
legend="bottom",
common.legend=TRUE,
labels=c('B','C','D'),
font.label=list(size=20),
hjust=0,
vjust=0.5,
heights=c(2,2,2,2),
widths=c(3,3,3,3))
}
p_aRt[[c]] <- p_aRt[[c]] + theme(legend.position="none")
panel[[c]] <- ggarrange( p_aRt[[c]],
panel[[c]],
labels=c('A'),
ncol=1,
font.label=list(size=20),
hjust=0,
vjust=1,
heights=c(2,4),
widths=c(4,4))
ggsave(paste0(outfile.base,'-five_panel_plot_new-', c, '.png'), panel[[c]], w = 14, h=10)
}
cat(" \n -------------------------------- \n \n Completed post-processing-make-five-panel-plot.R \n \n -------------------------------- \n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/env.R
\name{env_unbind}
\alias{env_unbind}
\title{Remove bindings from an environment.}
\usage{
env_unbind(env = caller_env(), nms, inherit = FALSE)
}
\arguments{
\item{env}{An environment or an object with a S3 method for
\code{env()}. If missing, the environment of the current
evaluation frame is returned.}
\item{nms}{A character vector containing the names of the bindings
to remove.}
\item{inherit}{Whether to look for bindings in the parent
environments.}
}
\value{
The input object \code{env}, with its associated
environment modified in place.
}
\description{
\code{env_unbind()} is the complement of
\code{\link{env_bind}()}. Like \code{env_has()}, it ignores the
parent environments of \code{env} by default. Set \code{inherit} to
\code{TRUE} to track down bindings in parent environments.
}
\examples{
data <- stats::setNames(letters, letters)
env_bind(environment(), data)
env_has(environment(), letters)
# env_unbind() removes bindings:
env_unbind(environment(), letters)
env_has(environment(), letters)
# With inherit = TRUE, it removes bindings in parent environments
# as well:
parent <- new_env(empty_env(), list(foo = "a"))
env <- new_env(parent, list(foo = "b"))
env_unbind(env, "foo", inherit = TRUE)
env_has(env, "foo", inherit = TRUE)
}
| /man/env_unbind.Rd | no_license | jmpasmoi/rlang | R | false | true | 1,344 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/env.R
\name{env_unbind}
\alias{env_unbind}
\title{Remove bindings from an environment.}
\usage{
env_unbind(env = caller_env(), nms, inherit = FALSE)
}
\arguments{
\item{env}{An environment or an object with a S3 method for
\code{env()}. If missing, the environment of the current
evaluation frame is returned.}
\item{nms}{A character vector containing the names of the bindings
to remove.}
\item{inherit}{Whether to look for bindings in the parent
environments.}
}
\value{
The input object \code{env}, with its associated
environment modified in place.
}
\description{
\code{env_unbind()} is the complement of
\code{\link{env_bind}()}. Like \code{env_has()}, it ignores the
parent environments of \code{env} by default. Set \code{inherit} to
\code{TRUE} to track down bindings in parent environments.
}
\examples{
data <- stats::setNames(letters, letters)
env_bind(environment(), data)
env_has(environment(), letters)
# env_unbind() removes bindings:
env_unbind(environment(), letters)
env_has(environment(), letters)
# With inherit = TRUE, it removes bindings in parent environments
# as well:
parent <- new_env(empty_env(), list(foo = "a"))
env <- new_env(parent, list(foo = "b"))
env_unbind(env, "foo", inherit = TRUE)
env_has(env, "foo", inherit = TRUE)
}
|
.onLoad <- function(libname, pkgname) {
pkgconfig::set_config("EigenH5::use_blosc" = has_blosc())
pkgconfig::set_config("EigenH5::use_lzf" = has_lzf())
start_blosc()
}
fix_paths <- function(...) {
ret <- stringr::str_replace(normalizePath(paste(..., sep = "/"), mustWork = FALSE), "//", "/")
if (length(ret) == 0) {
ret <- "/"
}
return(ret)
}
#' Convert RLE-encoded vector to offset+size dataframe
#'
#' @param x either a vector of class `rle`, or a vector that can be converte to one via (`rle(x)`)
#' @param na_replace value to replace NA (rle doesn't play well with NA)
#'
#' @return tibble with columns `value`,`offset` and `datasize`
#' @export
#'
#' @examples
#' x <- rev(rep(6:10, 1:5))
#' x_na <- c(NA,NA,NA,rev(rep(6:10, 1:5)))
#' x_n3 <- c(-3,-3,-3,rev(rep(6:10, 1:5)))
#' print(rle2offset(rle(x)))
#' stopifnot(
#' identical(rle2offset(rle(x)),rle2offset(x)),
#' identical(rle2offset(x_na,na_replace=-3),rle2offset(x_n3)))
rle2offset <- function(x,na_replace = -1L){
if (!inherits(x,"rle")){
x[is.na(x)] <- na_replace
x <- rle(x)
}
x$values[x$values==na_replace] <- NA_integer_
tibble::tibble(value=x$values,
offset=c(0,cumsum(x$lengths)[-length(x$lengths)]),
datasize=x$lengths)
}
ls_h5 <- function(filename,groupname="/",full_names=FALSE,details=FALSE){
if(!details){
fs::path_norm(ls_h5_exp(filename = fs::path_expand(filename),
groupname = groupname,
full_names = full_names))
}else{
full_n <- ls_h5_exp(filename = fs::path_expand(filename),
groupname = groupname,
full_names = TRUE)
id_type=purrr::map_chr(full_n,~typeof_h5(filename,.x))
id_dim=purrr::map(full_n,~dim_h5(filename,.x))
if(all(lengths(id_dim)==length(id_dim[[1]]))){
id_dim <- purrr::flatten_int(id_dim)
}
if(!full_names){
full_n <- fs::path_rel(full_n,start=groupname)
}
tibble::tibble(name=full_n,dims=id_dim,type=id_type)
}
}
construct_data_path <- function(...){
arguments <- list(...)
retpath <- gsub("^/","",paste(arguments,collapse="/"))
retpath <- gsub("//","/",retpath)
return(retpath)
}
## lockf <- function(filename){
## return(paste0(filename,".lck"))
## }
isObject_h5 <- function(filename,datapath){
stopifnot(file.exists(filename))
if(!hasArg(timeout)){
timeout <- Inf
}
ret <- isObject(filename,datapath)
return(ret)
}
gen_matslice_df <- function(filename,group_prefix,dataname){
sub_grps <- ls_h5(filename,group_prefix)
retdf <- dplyr::data_frame(filenames=filename,
groupnames=paste0(group_prefix,"/",sub_grps),
datanames=dataname) %>% dplyr::arrange(as.integer(sub_grps))
return(retdf)
}
get_dims_h5 <- function(f,...){
return(dim_h5(f,construct_data_path(...)))
}
write_h5 <- function(data,filename,datapath,offset=0L,subsets=list(subset_rows=integer(),subset_cols=integer())){
if(is.list(data)){
write_l_h5(h5filepath=h5filepath,datapath=datapath,datal=data)
}else{
if(is.vector(data)){
write_vector_h5(filename = h5filepath,datapath=datapath,data = data,offset=offset,subset = subsets[["subset_rows"]])
}else{
if(is.matrix(data)){
write_matrix_h5(filename = h5filepath,datapath=datapath,data = data,
subset_rows = subsets[["subset_rows"]],
subset_cols = subsets[["subset_cols"]])
}else{
if(!is.null(data)){
stop("data is of unknown type!")
}
}
}
}
}
get_objs_h5 <- function(f,gn,full_names=F){
return(ls_h5(f,gn,full_names))
}
split_chunk_df<- function(info_df,pos_id,group_id,rowsel=T,colsel=T){
q_pos <- dplyr::enquo(pos_id)
q_group <- dplyr::enquo(group_id)
sel_df <- dplyr::group_by(info_df,!!q_group) %>%
dplyr::summarise(offset=as.integer(min(!!q_pos)-1),chunksize=as.integer(n()))
if(rowsel){
sel_df <- dplyr::mutate(sel_df,row_offsets=offset,row_chunksizes=chunksize)
}
if(colsel){
sel_df <- dplyr::mutate(sel_df,col_offsets=offset,col_chunksizes=chunksize)
}
sel_df <- dplyr::select(sel_df,-offset,-chunksize)
return(sel_df)
}
# read_h
write_l_h5 <- function(data,filename,datapath,...){
stopifnot(is.list(data))
if(datapath=="/"){
datapath <- ""
}
purrr::iwalk(datal,~write_h5(filename,fix_paths(datapath,.y),data = .x))
}
## path_exists_h5 <- function(h5filepath,datapath){
## retvec <- c(FALSE,FALSE)
## retvec[1] <- file.exists(h5filepath)
## if(retvec[1]){
## return(c(file.exists(h5filepath
# get_sub_obj <- function(h5filepath,tpath="/"){
# res <- purrr::possibly(get_objs_h5,otherwise=NULL,quiet = T)(h5filepath,tpath)
# if(is.null(res)){
# return(tpath)
# }
# return(paste0(ifelse(tpath=="/","",tpath),"/",res))
# }
# split_chunk_df<- function(info_df,pos_id,group_id,rowsel=T,colsel=T){
# q_pos <- dplyr::enquo(pos_id)
# q_group <- dplyr::enquo(group_id)
# sel_df <- dplyr::group_by(info_df,!!q_group) %>%
# dplyr::summarise(offset=as.integer(min(!!q_pos)-1),chunksize=as.integer(n()))
# if(rowsel){
# sel_df <- dplyr::mutate(sel_df,row_offsets=offset,row_chunksizes=chunksize)
# }
# if(colsel){
# sel_df <- dplyr::mutate(sel_df,col_offsets=offset,col_chunksizes=chunksize)
# }
# sel_df <- dplyr::select(sel_df,-offset,-chunksize)
# return(sel_df)
# }
# h5ls_df <- function(h5filepath){
# root_objs <- get_sub_obj(h5filepath =h5filepath)
# bg_objs <- purrr::possibly(get_objs_h5,otherwise = NULL)
#
# node_objs <- purrr::map(root_objs,~paste0(ifelse(.x=="/","",.x),"/",bg_objs(h5filepath=h5filepath,groupname = .x)))
#
# }
#
# read_mat_h5 <- function(filename,groupname,dataname,offset_rows=0,offset_cols=0,chunksize_rows=NULL,chunksize_cols=NULL){
# mat_dims <- get_dims_h5(filename,groupname,dataname)
# stopifnot(length(mat_dims)==2)
# if(is.null(chunksize_cols)){
# chunksize_cols <- mat_dims[2]-offset_cols
# }
# if(is.null(chunksize_rows)){
# chunksize_rows <- mat_dims[1]-offset_rows
# }
# return(read_matrix_h5(filename = filename,
# groupname = groupname,
# dataname = dataname,
# offsets = c(offset_rows,offset_cols),
# chunksizes = c(chunksize_rows,chunksize_cols)))
# }
## read_l_h5 <- function(filename,h5path="/",...){
## all_objs <- ls_h5(filename,h5path,full_names = T)
## names(all_objs) <- basename(all_objs)
## purrr::map(all_objs,function(fp){
## if(isGroup(filename,fp)){
## return(read_l_h5(filename,fp))
## }
## md <- dims_h5(filename,fp)
## if(length(md)>1){
## return(read_matrix(filename,fp))
## }
## return(read_vector(filename,datapath = fp))
## })
## }
create_mat_l <- function(dff){
tl <- list(integer=integer(),numeric=numeric())
return(purrr::pwalk(dff,function(filenames,
groupnames,
datanames,
datatypes,
row_chunksizes,
col_chunksizes,
row_c_chunksizes=NULL,
col_c_chunksizes=NULL,
...){
EigenH5::create_matrix_h5(
filenames,
groupnames,
datanames,
tl[[datatypes]],
doTranspose=F,
dims=c(row_chunksizes,col_chunksizes),
chunksizes=c(row_c_chunksizes,col_c_chunksizes))
}))
}
#' Convert to HDF5 with a custom callback
#'
#' @param input_file one or more files able to be read by `readr::read_delim`
#' @param output_file output HDF5 file
#' @param h5_args ... args for write_df_h5 unpacked and passed to `callback_fun`
#' @param callback_fun function with signature matching function(df,filename,datapath,...) (defaults to `write_df_h5`)
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
#'
#' temp_h5 <- fs::file_temp(ext="h5")
#' delim2h5(readr::readr_example("mtcars.csv"),temp_h5,delim="/")
#' new_data <- read_df_h5(temp_h5)
delim2h5 <- function(input_files,
output_file,
h5_args = list(datapath = "/"),
callback_fun = write_df_h5,
id_col = NULL, ...){
h5_args[["append"]] <- TRUE
callback_args <- formalArgs(callback_fun)
stopifnot(all(fs::file_exists(input_files)))
stopifnot(all.equal(callback_args, formalArgs(write_df_h5)))
global_offset <- 0L
h5_args[["append"]] <- TRUE
if (is.null(id_col) || isFALSE(id_col)) {
wf <- function(x, pos)
rlang::exec(callback_fun, df = x, filename = output_file, !!!h5_args)
}else {
if (is.character(id_col)) {
stopifnot(length(id_col) == 1)
wf <- function(x, pos) {
pos_seq <- as.integer(seq.int(from = as.integer(pos)+global_offset, length.out = as.integer(nrow(x))))
rlang::exec(callback_fun,
df = dplyr::mutate(x, {{id_col}} := pos_seq),
filename = output_file, !!!h5_args)
}
}else {
stopifnot(isTRUE(id_col))
wf <- function(x, pos) {
pos_seq <- as.integer(seq(from = pos+global_offset, length.out = nrow(x)))
rlang::exec(callback_fun,
df = dplyr::mutate(x, id_col = pos_seq),
filename = output_file, !!!h5_args)
}
}
}
for(f in input_files){
readr::read_delim_chunked(file = f, callback = readr::SideEffectChunkCallback$new(wf), ...)
all_ds <- ls_h5(output_file,h5_args$datapath,full_names = T)
global_offset <- as.integer(dim_h5(output_file,all_ds[1])[1])
}
}
| /R/utils.R | no_license | CreRecombinase/EigenH5 | R | false | false | 9,962 | r | .onLoad <- function(libname, pkgname) {
pkgconfig::set_config("EigenH5::use_blosc" = has_blosc())
pkgconfig::set_config("EigenH5::use_lzf" = has_lzf())
start_blosc()
}
fix_paths <- function(...) {
ret <- stringr::str_replace(normalizePath(paste(..., sep = "/"), mustWork = FALSE), "//", "/")
if (length(ret) == 0) {
ret <- "/"
}
return(ret)
}
#' Convert RLE-encoded vector to offset+size dataframe
#'
#' @param x either a vector of class `rle`, or a vector that can be converte to one via (`rle(x)`)
#' @param na_replace value to replace NA (rle doesn't play well with NA)
#'
#' @return tibble with columns `value`,`offset` and `datasize`
#' @export
#'
#' @examples
#' x <- rev(rep(6:10, 1:5))
#' x_na <- c(NA,NA,NA,rev(rep(6:10, 1:5)))
#' x_n3 <- c(-3,-3,-3,rev(rep(6:10, 1:5)))
#' print(rle2offset(rle(x)))
#' stopifnot(
#' identical(rle2offset(rle(x)),rle2offset(x)),
#' identical(rle2offset(x_na,na_replace=-3),rle2offset(x_n3)))
rle2offset <- function(x,na_replace = -1L){
if (!inherits(x,"rle")){
x[is.na(x)] <- na_replace
x <- rle(x)
}
x$values[x$values==na_replace] <- NA_integer_
tibble::tibble(value=x$values,
offset=c(0,cumsum(x$lengths)[-length(x$lengths)]),
datasize=x$lengths)
}
ls_h5 <- function(filename,groupname="/",full_names=FALSE,details=FALSE){
if(!details){
fs::path_norm(ls_h5_exp(filename = fs::path_expand(filename),
groupname = groupname,
full_names = full_names))
}else{
full_n <- ls_h5_exp(filename = fs::path_expand(filename),
groupname = groupname,
full_names = TRUE)
id_type=purrr::map_chr(full_n,~typeof_h5(filename,.x))
id_dim=purrr::map(full_n,~dim_h5(filename,.x))
if(all(lengths(id_dim)==length(id_dim[[1]]))){
id_dim <- purrr::flatten_int(id_dim)
}
if(!full_names){
full_n <- fs::path_rel(full_n,start=groupname)
}
tibble::tibble(name=full_n,dims=id_dim,type=id_type)
}
}
construct_data_path <- function(...){
arguments <- list(...)
retpath <- gsub("^/","",paste(arguments,collapse="/"))
retpath <- gsub("//","/",retpath)
return(retpath)
}
## lockf <- function(filename){
## return(paste0(filename,".lck"))
## }
isObject_h5 <- function(filename,datapath){
stopifnot(file.exists(filename))
if(!hasArg(timeout)){
timeout <- Inf
}
ret <- isObject(filename,datapath)
return(ret)
}
gen_matslice_df <- function(filename,group_prefix,dataname){
sub_grps <- ls_h5(filename,group_prefix)
retdf <- dplyr::data_frame(filenames=filename,
groupnames=paste0(group_prefix,"/",sub_grps),
datanames=dataname) %>% dplyr::arrange(as.integer(sub_grps))
return(retdf)
}
get_dims_h5 <- function(f,...){
return(dim_h5(f,construct_data_path(...)))
}
write_h5 <- function(data,filename,datapath,offset=0L,subsets=list(subset_rows=integer(),subset_cols=integer())){
if(is.list(data)){
write_l_h5(h5filepath=h5filepath,datapath=datapath,datal=data)
}else{
if(is.vector(data)){
write_vector_h5(filename = h5filepath,datapath=datapath,data = data,offset=offset,subset = subsets[["subset_rows"]])
}else{
if(is.matrix(data)){
write_matrix_h5(filename = h5filepath,datapath=datapath,data = data,
subset_rows = subsets[["subset_rows"]],
subset_cols = subsets[["subset_cols"]])
}else{
if(!is.null(data)){
stop("data is of unknown type!")
}
}
}
}
}
get_objs_h5 <- function(f,gn,full_names=F){
return(ls_h5(f,gn,full_names))
}
split_chunk_df<- function(info_df,pos_id,group_id,rowsel=T,colsel=T){
q_pos <- dplyr::enquo(pos_id)
q_group <- dplyr::enquo(group_id)
sel_df <- dplyr::group_by(info_df,!!q_group) %>%
dplyr::summarise(offset=as.integer(min(!!q_pos)-1),chunksize=as.integer(n()))
if(rowsel){
sel_df <- dplyr::mutate(sel_df,row_offsets=offset,row_chunksizes=chunksize)
}
if(colsel){
sel_df <- dplyr::mutate(sel_df,col_offsets=offset,col_chunksizes=chunksize)
}
sel_df <- dplyr::select(sel_df,-offset,-chunksize)
return(sel_df)
}
# read_h
write_l_h5 <- function(data,filename,datapath,...){
stopifnot(is.list(data))
if(datapath=="/"){
datapath <- ""
}
purrr::iwalk(datal,~write_h5(filename,fix_paths(datapath,.y),data = .x))
}
## path_exists_h5 <- function(h5filepath,datapath){
## retvec <- c(FALSE,FALSE)
## retvec[1] <- file.exists(h5filepath)
## if(retvec[1]){
## return(c(file.exists(h5filepath
# get_sub_obj <- function(h5filepath,tpath="/"){
# res <- purrr::possibly(get_objs_h5,otherwise=NULL,quiet = T)(h5filepath,tpath)
# if(is.null(res)){
# return(tpath)
# }
# return(paste0(ifelse(tpath=="/","",tpath),"/",res))
# }
# split_chunk_df<- function(info_df,pos_id,group_id,rowsel=T,colsel=T){
# q_pos <- dplyr::enquo(pos_id)
# q_group <- dplyr::enquo(group_id)
# sel_df <- dplyr::group_by(info_df,!!q_group) %>%
# dplyr::summarise(offset=as.integer(min(!!q_pos)-1),chunksize=as.integer(n()))
# if(rowsel){
# sel_df <- dplyr::mutate(sel_df,row_offsets=offset,row_chunksizes=chunksize)
# }
# if(colsel){
# sel_df <- dplyr::mutate(sel_df,col_offsets=offset,col_chunksizes=chunksize)
# }
# sel_df <- dplyr::select(sel_df,-offset,-chunksize)
# return(sel_df)
# }
# h5ls_df <- function(h5filepath){
# root_objs <- get_sub_obj(h5filepath =h5filepath)
# bg_objs <- purrr::possibly(get_objs_h5,otherwise = NULL)
#
# node_objs <- purrr::map(root_objs,~paste0(ifelse(.x=="/","",.x),"/",bg_objs(h5filepath=h5filepath,groupname = .x)))
#
# }
#
# read_mat_h5 <- function(filename,groupname,dataname,offset_rows=0,offset_cols=0,chunksize_rows=NULL,chunksize_cols=NULL){
# mat_dims <- get_dims_h5(filename,groupname,dataname)
# stopifnot(length(mat_dims)==2)
# if(is.null(chunksize_cols)){
# chunksize_cols <- mat_dims[2]-offset_cols
# }
# if(is.null(chunksize_rows)){
# chunksize_rows <- mat_dims[1]-offset_rows
# }
# return(read_matrix_h5(filename = filename,
# groupname = groupname,
# dataname = dataname,
# offsets = c(offset_rows,offset_cols),
# chunksizes = c(chunksize_rows,chunksize_cols)))
# }
## read_l_h5 <- function(filename,h5path="/",...){
## all_objs <- ls_h5(filename,h5path,full_names = T)
## names(all_objs) <- basename(all_objs)
## purrr::map(all_objs,function(fp){
## if(isGroup(filename,fp)){
## return(read_l_h5(filename,fp))
## }
## md <- dims_h5(filename,fp)
## if(length(md)>1){
## return(read_matrix(filename,fp))
## }
## return(read_vector(filename,datapath = fp))
## })
## }
create_mat_l <- function(dff){
tl <- list(integer=integer(),numeric=numeric())
return(purrr::pwalk(dff,function(filenames,
groupnames,
datanames,
datatypes,
row_chunksizes,
col_chunksizes,
row_c_chunksizes=NULL,
col_c_chunksizes=NULL,
...){
EigenH5::create_matrix_h5(
filenames,
groupnames,
datanames,
tl[[datatypes]],
doTranspose=F,
dims=c(row_chunksizes,col_chunksizes),
chunksizes=c(row_c_chunksizes,col_c_chunksizes))
}))
}
#' Convert to HDF5 with a custom callback
#'
#' @param input_file one or more files able to be read by `readr::read_delim`
#' @param output_file output HDF5 file
#' @param h5_args ... args for write_df_h5 unpacked and passed to `callback_fun`
#' @param callback_fun function with signature matching function(df,filename,datapath,...) (defaults to `write_df_h5`)
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
#'
#' temp_h5 <- fs::file_temp(ext="h5")
#' delim2h5(readr::readr_example("mtcars.csv"),temp_h5,delim="/")
#' new_data <- read_df_h5(temp_h5)
delim2h5 <- function(input_files,
output_file,
h5_args = list(datapath = "/"),
callback_fun = write_df_h5,
id_col = NULL, ...){
h5_args[["append"]] <- TRUE
callback_args <- formalArgs(callback_fun)
stopifnot(all(fs::file_exists(input_files)))
stopifnot(all.equal(callback_args, formalArgs(write_df_h5)))
global_offset <- 0L
h5_args[["append"]] <- TRUE
if (is.null(id_col) || isFALSE(id_col)) {
wf <- function(x, pos)
rlang::exec(callback_fun, df = x, filename = output_file, !!!h5_args)
}else {
if (is.character(id_col)) {
stopifnot(length(id_col) == 1)
wf <- function(x, pos) {
pos_seq <- as.integer(seq.int(from = as.integer(pos)+global_offset, length.out = as.integer(nrow(x))))
rlang::exec(callback_fun,
df = dplyr::mutate(x, {{id_col}} := pos_seq),
filename = output_file, !!!h5_args)
}
}else {
stopifnot(isTRUE(id_col))
wf <- function(x, pos) {
pos_seq <- as.integer(seq(from = pos+global_offset, length.out = nrow(x)))
rlang::exec(callback_fun,
df = dplyr::mutate(x, id_col = pos_seq),
filename = output_file, !!!h5_args)
}
}
}
for(f in input_files){
readr::read_delim_chunked(file = f, callback = readr::SideEffectChunkCallback$new(wf), ...)
all_ds <- ls_h5(output_file,h5_args$datapath,full_names = T)
global_offset <- as.integer(dim_h5(output_file,all_ds[1])[1])
}
}
|
#!/usr/bin/R
####################################################
### This Shiny app provides a means of interacting #
### with the results of a search in LIGO data for ##
### continuous gravitational waves from neutron ####
### star candidates in supernova remnants. See #####
### the following paper (ApJ):
### http://iopscience.iop.org/article/10.1088/0004-637X/813/1/39/meta
### Or browse it on the arXiv:
### https://arxiv.org/abs/1412.5942
###
###
####################################################
###
### Created: 16 June 2016, Ra Inta
### Last modified: 20161221, RI
###################################################
library(ggplot2)
library(Cairo)
library(XML)
library(scales)
library(ggthemes)
library(shiny)
###################################################
### Get metadata on all the search targets ###
###################################################
target_properties <- read.table('target_properties.dat', header=T, stringsAsFactors=F)
rownames(target_properties) <- target_properties$TargName
###
# Note: the header format of target_properties is:
#TargName D tau h_age
# We'll reference the h_age by the rowname later.
###################################################
##################################################
# Load search results from LIGO S6 data XMLs
# This is an appropriate place for a function...
###
load_ul_data <- function(targName="G111.7"){
old_xml <- paste(targName,"upper_limit_bands.xml", sep="/")
old_doc <- xmlParse(old_xml)
old_data <- xmlToDataFrame(nodes = getNodeSet(old_doc, "//upper_limit_band/loudest_nonvetoed_template"), stringsAsFactors=FALSE)
old_data_h0 <- xmlToDataFrame(nodes = getNodeSet(old_doc, "//upper_limit_band/upper_limit_h0"), stringsAsFactors=FALSE)
names(old_data_h0) <- "upper_limit"
old_data <- cbind(old_data,old_data_h0)
old_data <- transform(old_data, freq=as.numeric(freq), twoF=as.numeric(twoF), twoF_H1=as.numeric(twoFH1), twoF_L1=as.numeric(twoFL1), upper_limit=as.numeric(upper_limit), cover_freq=as.numeric(cover_freq), cover_band=as.numeric(cover_band), f1dot=as.numeric(f1dot), f2dot=as.numeric(f2dot))
}
##################################################
##################################################
# Some CSS to animate a spinner while loading
# Adapted from: https://github.com/daattali/advanced-shiny/blob/master/plot-spinner/app.R
### Note: this currently doesn't work as it should!
##################################################
mycss <- "
#plot-container {
position: relative;
}
#loading-spinner {
position: absolute;
left: 50%;
top: 50%;
z-index: -1;
margin-top: -33px; /* half of the spinner's height */
margin-left: -33px; /* half of the spinner's width */
}
#plot.recalculating {
z-index: -2;
}
"
##################################################
ui <- fluidPage(
# Plot animated merger gif while waiting to load...
tags$head(tags$style(HTML(mycss))),
theme = "bootstrap.css",
titlePanel("Interactive upper limit plots"),
fluidRow(
column(width = 10, class = "well", align="center",offset=1,
h4("Upper plot controls zoom for lower plots: upper limits, f1dot and f2dot"),
fluidRow(
column(width = 2,align="center",offset=0,
### Give a drop-down list of the targets to choose from.
selectInput("target", "Select target:", choices = target_properties$TargName )
)
),
fluidRow(
column(width = 8,align="center",offset=2,
### Make a place for the 'master' plot
plotOutput("plot0", height = 400,
brush = brushOpts(
id = "plot0_brush",
resetOnNew = TRUE
)
)
)
),
fluidRow(
column(width = 8,align="center",offset=2,
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div(
id = "plot-container",
tags$img(src = "merger.gif",
id = "loading-spinner")
)
),
plotOutput("plot1", height = 400)
)
),
fluidRow(
column(width = 8,align="center",offset=2,
plotOutput("plot2", height = 400)
)
),
fluidRow(
column(width = 8,align="center",offset=2,
plotOutput("plot3", height = 400)
)
) # fluidRow #2
) # main column
) # fluidRow #1
) # fluidPage
server <- function(input, output) {
##################################################
# Linked plots
ranges2 <- reactiveValues(x = NULL, y = NULL)
output$plot0 <- renderPlot({
### load XML depending on value of target chosen
# Note: the dumb behavior of the reactiveValues
# object means that we have to load the data for
# _each_ plot! This makes for virtually unacceptable latency.
###
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$upper_limit ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + scale_y_log10(limits=c(10^(floor(log10(min(ul_data$upper_limit, na.rm=TRUE)))), 10^(ceiling(log10(max(ul_data$upper_limit, na.rm=TRUE))))) ) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + xlab("Frequency (Hz)") + ggtitle(input$target) + ylab("h0") + theme(axis.text=element_text(size=12, family="xkcd"), axis.title=element_text(size=14, face="bold", family="xkcd"), plot.title = element_text(size = 16, face="bold", family= "xkcd")) + geom_line(data=ul_data, aes(x = ul_data$freq, y = h_age), size=1.5, colour="red", alpha=0.5)
})
output$plot1 <- renderPlot({
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$upper_limit ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + scale_y_log10(breaks=pretty(ranges2$y, n=5)) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + coord_cartesian(xlim = ranges2$x, ylim = ranges2$y) + xlab("Frequency (Hz)") + ggtitle(input$target) + ylab("h0") + theme(axis.text=element_text(size=12, family="xkcd"), axis.title=element_text(size=14, face="bold", family="xkcd"), plot.title = element_text(size = 16, face="bold", family= "xkcd")) + geom_line(data=ul_data, aes(x = ul_data$freq, y = h_age), size=1.5, colour="red", alpha=0.5)
})
output$plot2 <- renderPlot({
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$f1dot, na.rm=TRUE ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + coord_cartesian(xlim = ranges2$x) + scale_y_continuous(breaks=pretty(range(ul_data$f1dot),n=5)) + xlab("Frequency (Hz)") + ylab("f1dot (Hz/s)") + theme(axis.text=element_text(size=12, family="xkcd"))
})
output$plot3 <- renderPlot({
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$f2dot, na.rm=TRUE ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + coord_cartesian(xlim = ranges2$x) + scale_y_continuous(breaks=pretty(ul_data$f2dot, n=5)) + xlab("Frequency (Hz)") + ylab("f2dot (Hz/s^{-2})") + theme(axis.text=element_text(size=12, family="xkcd"))
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observe({
brush <- input$plot0_brush
if (!is.null(brush)) {
ranges2$x <- c(brush$xmin, brush$xmax)
ranges2$y <- c(brush$ymin, brush$ymax)
} else {
ranges2$x <- NULL
ranges2$y <- NULL
}
})
}
shinyApp(ui, server)
| /app.R | no_license | bbw7561135/shiny_9snr | R | false | false | 7,913 | r | #!/usr/bin/R
####################################################
### This Shiny app provides a means of interacting #
### with the results of a search in LIGO data for ##
### continuous gravitational waves from neutron ####
### star candidates in supernova remnants. See #####
### the following paper (ApJ):
### http://iopscience.iop.org/article/10.1088/0004-637X/813/1/39/meta
### Or browse it on the arXiv:
### https://arxiv.org/abs/1412.5942
###
###
####################################################
###
### Created: 16 June 2016, Ra Inta
### Last modified: 20161221, RI
###################################################
library(ggplot2)
library(Cairo)
library(XML)
library(scales)
library(ggthemes)
library(shiny)
###################################################
### Get metadata on all the search targets ###
###################################################
target_properties <- read.table('target_properties.dat', header=T, stringsAsFactors=F)
rownames(target_properties) <- target_properties$TargName
###
# Note: the header format of target_properties is:
#TargName D tau h_age
# We'll reference the h_age by the rowname later.
###################################################
##################################################
# Load search results from LIGO S6 data XMLs
# This is an appropriate place for a function...
###
load_ul_data <- function(targName="G111.7"){
old_xml <- paste(targName,"upper_limit_bands.xml", sep="/")
old_doc <- xmlParse(old_xml)
old_data <- xmlToDataFrame(nodes = getNodeSet(old_doc, "//upper_limit_band/loudest_nonvetoed_template"), stringsAsFactors=FALSE)
old_data_h0 <- xmlToDataFrame(nodes = getNodeSet(old_doc, "//upper_limit_band/upper_limit_h0"), stringsAsFactors=FALSE)
names(old_data_h0) <- "upper_limit"
old_data <- cbind(old_data,old_data_h0)
old_data <- transform(old_data, freq=as.numeric(freq), twoF=as.numeric(twoF), twoF_H1=as.numeric(twoFH1), twoF_L1=as.numeric(twoFL1), upper_limit=as.numeric(upper_limit), cover_freq=as.numeric(cover_freq), cover_band=as.numeric(cover_band), f1dot=as.numeric(f1dot), f2dot=as.numeric(f2dot))
}
##################################################
##################################################
# Some CSS to animate a spinner while loading
# Adapted from: https://github.com/daattali/advanced-shiny/blob/master/plot-spinner/app.R
### Note: this currently doesn't work as it should!
##################################################
mycss <- "
#plot-container {
position: relative;
}
#loading-spinner {
position: absolute;
left: 50%;
top: 50%;
z-index: -1;
margin-top: -33px; /* half of the spinner's height */
margin-left: -33px; /* half of the spinner's width */
}
#plot.recalculating {
z-index: -2;
}
"
##################################################
ui <- fluidPage(
# Plot animated merger gif while waiting to load...
tags$head(tags$style(HTML(mycss))),
theme = "bootstrap.css",
titlePanel("Interactive upper limit plots"),
fluidRow(
column(width = 10, class = "well", align="center",offset=1,
h4("Upper plot controls zoom for lower plots: upper limits, f1dot and f2dot"),
fluidRow(
column(width = 2,align="center",offset=0,
### Give a drop-down list of the targets to choose from.
selectInput("target", "Select target:", choices = target_properties$TargName )
)
),
fluidRow(
column(width = 8,align="center",offset=2,
### Make a place for the 'master' plot
plotOutput("plot0", height = 400,
brush = brushOpts(
id = "plot0_brush",
resetOnNew = TRUE
)
)
)
),
fluidRow(
column(width = 8,align="center",offset=2,
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div(
id = "plot-container",
tags$img(src = "merger.gif",
id = "loading-spinner")
)
),
plotOutput("plot1", height = 400)
)
),
fluidRow(
column(width = 8,align="center",offset=2,
plotOutput("plot2", height = 400)
)
),
fluidRow(
column(width = 8,align="center",offset=2,
plotOutput("plot3", height = 400)
)
) # fluidRow #2
) # main column
) # fluidRow #1
) # fluidPage
server <- function(input, output) {
##################################################
# Linked plots
ranges2 <- reactiveValues(x = NULL, y = NULL)
output$plot0 <- renderPlot({
### load XML depending on value of target chosen
# Note: the dumb behavior of the reactiveValues
# object means that we have to load the data for
# _each_ plot! This makes for virtually unacceptable latency.
###
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$upper_limit ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + scale_y_log10(limits=c(10^(floor(log10(min(ul_data$upper_limit, na.rm=TRUE)))), 10^(ceiling(log10(max(ul_data$upper_limit, na.rm=TRUE))))) ) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + xlab("Frequency (Hz)") + ggtitle(input$target) + ylab("h0") + theme(axis.text=element_text(size=12, family="xkcd"), axis.title=element_text(size=14, face="bold", family="xkcd"), plot.title = element_text(size = 16, face="bold", family= "xkcd")) + geom_line(data=ul_data, aes(x = ul_data$freq, y = h_age), size=1.5, colour="red", alpha=0.5)
})
output$plot1 <- renderPlot({
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$upper_limit ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + scale_y_log10(breaks=pretty(ranges2$y, n=5)) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + coord_cartesian(xlim = ranges2$x, ylim = ranges2$y) + xlab("Frequency (Hz)") + ggtitle(input$target) + ylab("h0") + theme(axis.text=element_text(size=12, family="xkcd"), axis.title=element_text(size=14, face="bold", family="xkcd"), plot.title = element_text(size = 16, face="bold", family= "xkcd")) + geom_line(data=ul_data, aes(x = ul_data$freq, y = h_age), size=1.5, colour="red", alpha=0.5)
})
output$plot2 <- renderPlot({
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$f1dot, na.rm=TRUE ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + coord_cartesian(xlim = ranges2$x) + scale_y_continuous(breaks=pretty(range(ul_data$f1dot),n=5)) + xlab("Frequency (Hz)") + ylab("f1dot (Hz/s)") + theme(axis.text=element_text(size=12, family="xkcd"))
})
output$plot3 <- renderPlot({
ul_data <- load_ul_data(input$target)
h_age <- target_properties[input$target,]$h_age
ggplot(data=ul_data, aes(x=ul_data$freq, y=ul_data$f2dot, na.rm=TRUE ) ) + geom_point(colour="skyblue", fill="tan") + guides(fill=FALSE, colour=FALSE) + theme_solarized(light = FALSE) + scale_colour_solarized("blue") + coord_cartesian(xlim = ranges2$x) + scale_y_continuous(breaks=pretty(ul_data$f2dot, n=5)) + xlab("Frequency (Hz)") + ylab("f2dot (Hz/s^{-2})") + theme(axis.text=element_text(size=12, family="xkcd"))
})
# When a double-click happens, check if there's a brush on the plot.
# If so, zoom to the brush bounds; if not, reset the zoom.
observe({
brush <- input$plot0_brush
if (!is.null(brush)) {
ranges2$x <- c(brush$xmin, brush$xmax)
ranges2$y <- c(brush$ymin, brush$ymax)
} else {
ranges2$x <- NULL
ranges2$y <- NULL
}
})
}
shinyApp(ui, server)
|
#' cohort class
#'
#' This class creates a cohort object, which holds the information related to a
#' cohort: cohort ID, name, description, query, table columns. This class is used
#' in functions which carry out operations related to specific cohorts.
#' A cohort class object can be created using constructor functions
#' \code{\link{cb_create_cohort}} or \code{\link{cb_load_cohort}}.
#'
#' @slot id cohort ID.
#' @slot name cohort name.
#' @slot desc cohort description.
#' @slot phenoptype_filters phenotypes displayed in the cohort overview.
#' @slot query applied query.
#' @slot columns All the columns
#' @slot cb_version chort browser version
#'
#' @name cohort-class
#' @rdname cohort-class
#' @export
setClass("cohort",
slots = list(id = "character",
name = "character",
desc = "character",
phenoptype_filters = "list", # renamed from 'fields' to match v2 naming
query = "list", # replaces v1 more_fields / moreFields with more flexible v2 structure
columns = "list", # v1 and v2 are structured differently
cb_version = "character")
)
.get_cohort_info <- function(cohort_id, cb_version = "v2") {
if (cb_version == "v1") {
return(.get_cohort_info_v1(cohort_id))
} else if (cb_version == "v2") {
return(.get_cohort_info_v2(cohort_id))
} else {
stop('Unknown cohort browser version string ("cb_version"). Choose either "v1" or "v2".')
}
}
.get_cohort_info_v1 <- function(cohort_id) {
cloudos <- .check_and_load_all_cloudos_env_var()
url <- paste(cloudos$base_url, "v1/cohort", cohort_id, sep = "/")
r <- httr::GET(url,
.get_httr_headers(cloudos$token),
query = list("teamId" = cloudos$team_id)
)
httr::stop_for_status(r, task = NULL)
# parse the content
res <- httr::content(r)
return(res)
}
.get_cohort_info_v2 <- function(cohort_id) {
cloudos <- .check_and_load_all_cloudos_env_var()
url <- paste(cloudos$base_url, "v2/cohort", cohort_id, sep = "/")
r <- httr::GET(url,
.get_httr_headers(cloudos$token),
query = list("teamId" = cloudos$team_id)
)
httr::stop_for_status(r, task = NULL)
# parse the content
res <- httr::content(r)
return(res)
}
.get_val_or_range <- function(field_item){
if (!is.null(field_item$value)){
return(field_item$value)
}else{
return(field_item$range)
}
}
#' Convert a v1 style query (moreFields) to v2 style (query).
#' v2 queries are a superset of v1 queries. A list of v1 phenotype queries are equivalent to a
#' set of nested v2 AND operators containing those phenotypes. This function builds the nested
#' AND query from the flat list of v1 phenotypes.
#' @param cohort_more_fields query information ('moreFields') from .get_cohort_info(cohort_id, cb_version="v1)
.v1_query_to_v2 <- function(cohort_more_fields){
andop <- list("operator" = "AND",
"queries" = list())
# make empty query field better behaved by setting it as empty list
if (!is.list(cohort_more_fields)) cohort_more_fields <- list()
if (identical(cohort_more_fields, list(""))) cohort_more_fields <- list()
l <- length(cohort_more_fields)
query <- list()
if (l > 0){
query <- andop
query$queries <- list(list("field" = cohort_more_fields[[l]]$fieldId,
"instance" = cohort_more_fields[[l]]$instance,
"value" = .get_val_or_range(cohort_more_fields[[l]])))
}
if (l > 1){
query$queries <- list(list("field" = cohort_more_fields[[l-1]]$fieldId,
"instance" = cohort_more_fields[[l-1]]$instance,
"value" = .get_val_or_range(cohort_more_fields[[l-1]])),
query$queries[[1]])
}
if (l > 2){
for (i in (l-2):1){
new_query <- andop
new_query$queries <- list(list("field" = cohort_more_fields[[i]]$fieldId,
"instance" = cohort_more_fields[[i]]$instance,
"value" = .get_val_or_range(cohort_more_fields[[i]])),
query)
query <- new_query
}
}
return(query)
}
#' @title Get cohort information
#'
#' @description Get all the details about a cohort including
#' applied query.
#'
#' @param cohort_id Cohort id (Required)
#' @param cb_version cohort browser version (Optional) \[ "v1" | "v2" \]
#'
#' @return A \linkS4class{cohort} object.
#'
#' @example
#' \dontrun{
#' my_cohort <- cb_load_cohort(cohort_id = "5f9af3793dd2dc6091cd17cd")
#' }
#'
#' @seealso \code{\link{cb_create_cohort}} for creating a new cohort.
#'
#' @export
cb_load_cohort <- function(cohort_id, cb_version = "v2"){
my_cohort <- .get_cohort_info(cohort_id = cohort_id, cb_version = cb_version)
# convert v1 query to v2 query and rename objects to v2 style
if (cb_version == "v1"){
my_cohort$phenotypeFilters = my_cohort$fields
my_cohort$query = .v1_query_to_v2(my_cohort$moreFields)
}
# For empty fields backend can return NULL
if(is.null(my_cohort$description)) my_cohort$description = "" # change everything to ""
if(is.null(my_cohort$query)) my_cohort$query = list()
cohort_class_obj <- methods::new("cohort",
id = cohort_id,
name = my_cohort$name,
desc = my_cohort$description,
phenoptype_filters = my_cohort$phenotypeFilters,
query = my_cohort$query,
columns = my_cohort$columns,
cb_version = cb_version)
return(cohort_class_obj)
}
# method for cohort object
setMethod("show", "cohort",
function(object) {
cat("Cohort ID: ", object@id, "\n")
cat("Cohort Name: ", object@name, "\n")
cat("Cohort Description: ", object@desc, "\n")
cat("Number of phenotypes in query: ", length(.unnest_query(object@query)), "\n")
cat("Cohort Browser version: ", object@cb_version, "\n")
}
)
| /R/cb_class.R | permissive | abrahamlifebit/cloudos | R | false | false | 6,266 | r | #' cohort class
#'
#' This class creates a cohort object, which holds the information related to a
#' cohort: cohort ID, name, description, query, table columns. This class is used
#' in functions which carry out operations related to specific cohorts.
#' A cohort class object can be created using constructor functions
#' \code{\link{cb_create_cohort}} or \code{\link{cb_load_cohort}}.
#'
#' @slot id cohort ID.
#' @slot name cohort name.
#' @slot desc cohort description.
#' @slot phenoptype_filters phenotypes displayed in the cohort overview.
#' @slot query applied query.
#' @slot columns All the columns
#' @slot cb_version chort browser version
#'
#' @name cohort-class
#' @rdname cohort-class
#' @export
setClass("cohort",
slots = list(id = "character",
name = "character",
desc = "character",
phenoptype_filters = "list", # renamed from 'fields' to match v2 naming
query = "list", # replaces v1 more_fields / moreFields with more flexible v2 structure
columns = "list", # v1 and v2 are structured differently
cb_version = "character")
)
.get_cohort_info <- function(cohort_id, cb_version = "v2") {
if (cb_version == "v1") {
return(.get_cohort_info_v1(cohort_id))
} else if (cb_version == "v2") {
return(.get_cohort_info_v2(cohort_id))
} else {
stop('Unknown cohort browser version string ("cb_version"). Choose either "v1" or "v2".')
}
}
.get_cohort_info_v1 <- function(cohort_id) {
cloudos <- .check_and_load_all_cloudos_env_var()
url <- paste(cloudos$base_url, "v1/cohort", cohort_id, sep = "/")
r <- httr::GET(url,
.get_httr_headers(cloudos$token),
query = list("teamId" = cloudos$team_id)
)
httr::stop_for_status(r, task = NULL)
# parse the content
res <- httr::content(r)
return(res)
}
.get_cohort_info_v2 <- function(cohort_id) {
cloudos <- .check_and_load_all_cloudos_env_var()
url <- paste(cloudos$base_url, "v2/cohort", cohort_id, sep = "/")
r <- httr::GET(url,
.get_httr_headers(cloudos$token),
query = list("teamId" = cloudos$team_id)
)
httr::stop_for_status(r, task = NULL)
# parse the content
res <- httr::content(r)
return(res)
}
.get_val_or_range <- function(field_item){
if (!is.null(field_item$value)){
return(field_item$value)
}else{
return(field_item$range)
}
}
#' Convert a v1 style query (moreFields) to v2 style (query).
#' v2 queries are a superset of v1 queries. A list of v1 phenotype queries are equivalent to a
#' set of nested v2 AND operators containing those phenotypes. This function builds the nested
#' AND query from the flat list of v1 phenotypes.
#' @param cohort_more_fields query information ('moreFields') from .get_cohort_info(cohort_id, cb_version="v1)
.v1_query_to_v2 <- function(cohort_more_fields){
andop <- list("operator" = "AND",
"queries" = list())
# make empty query field better behaved by setting it as empty list
if (!is.list(cohort_more_fields)) cohort_more_fields <- list()
if (identical(cohort_more_fields, list(""))) cohort_more_fields <- list()
l <- length(cohort_more_fields)
query <- list()
if (l > 0){
query <- andop
query$queries <- list(list("field" = cohort_more_fields[[l]]$fieldId,
"instance" = cohort_more_fields[[l]]$instance,
"value" = .get_val_or_range(cohort_more_fields[[l]])))
}
if (l > 1){
query$queries <- list(list("field" = cohort_more_fields[[l-1]]$fieldId,
"instance" = cohort_more_fields[[l-1]]$instance,
"value" = .get_val_or_range(cohort_more_fields[[l-1]])),
query$queries[[1]])
}
if (l > 2){
for (i in (l-2):1){
new_query <- andop
new_query$queries <- list(list("field" = cohort_more_fields[[i]]$fieldId,
"instance" = cohort_more_fields[[i]]$instance,
"value" = .get_val_or_range(cohort_more_fields[[i]])),
query)
query <- new_query
}
}
return(query)
}
#' @title Get cohort information
#'
#' @description Get all the details about a cohort including
#' applied query.
#'
#' @param cohort_id Cohort id (Required)
#' @param cb_version cohort browser version (Optional) \[ "v1" | "v2" \]
#'
#' @return A \linkS4class{cohort} object.
#'
#' @example
#' \dontrun{
#' my_cohort <- cb_load_cohort(cohort_id = "5f9af3793dd2dc6091cd17cd")
#' }
#'
#' @seealso \code{\link{cb_create_cohort}} for creating a new cohort.
#'
#' @export
cb_load_cohort <- function(cohort_id, cb_version = "v2"){
my_cohort <- .get_cohort_info(cohort_id = cohort_id, cb_version = cb_version)
# convert v1 query to v2 query and rename objects to v2 style
if (cb_version == "v1"){
my_cohort$phenotypeFilters = my_cohort$fields
my_cohort$query = .v1_query_to_v2(my_cohort$moreFields)
}
# For empty fields backend can return NULL
if(is.null(my_cohort$description)) my_cohort$description = "" # change everything to ""
if(is.null(my_cohort$query)) my_cohort$query = list()
cohort_class_obj <- methods::new("cohort",
id = cohort_id,
name = my_cohort$name,
desc = my_cohort$description,
phenoptype_filters = my_cohort$phenotypeFilters,
query = my_cohort$query,
columns = my_cohort$columns,
cb_version = cb_version)
return(cohort_class_obj)
}
# method for cohort object
setMethod("show", "cohort",
function(object) {
cat("Cohort ID: ", object@id, "\n")
cat("Cohort Name: ", object@name, "\n")
cat("Cohort Description: ", object@desc, "\n")
cat("Number of phenotypes in query: ", length(.unnest_query(object@query)), "\n")
cat("Cohort Browser version: ", object@cb_version, "\n")
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customvision_predict.R
\name{predict.customvision_model}
\alias{predict.customvision_model}
\alias{predict}
\alias{predict.classification_service}
\alias{predict.object_detection_service}
\title{Get predictions from a Custom Vision model}
\usage{
\method{predict}{customvision_model}(object, images, type = c("class", "prob", "list"), ...)
\method{predict}{classification_service}(object, images, type = c("class",
"prob", "list"), save_result = FALSE, ...)
\method{predict}{object_detection_service}(object, images, type = c("class",
"prob", "list"), save_result = FALSE, ...)
}
\arguments{
\item{object}{A Custom Vision object from which to get predictions. See 'Details' below.}
\item{images}{The images for which to get predictions.}
\item{type}{The type of prediction: either class membership (the default), the class probabilities, or a list containing all information returned by the prediction endpoint.}
\item{...}{Further arguments passed to lower-level functions; not used.}
\item{save_result}{For the predictive service methods, whether to store the predictions on the server for future use.}
}
\description{
Get predictions from a Custom Vision model
}
\details{
AzureVision defines prediction methods for both Custom Vision model training objects (of class \code{customvision_model}) and prediction services (\code{classification_service} and \code{object_detection_service}). The method for model training objects calls the "quick test" endpoint, and is meant only for testing purposes.
The prediction endpoints accept a single image per request, so supplying multiple images to these functions will call the endpoints multiple times, in sequence. The images can be specified as:
\itemize{
\item A vector of local filenames. All common image file formats are supported.
\item A vector of publicly accessible URLs.
\item A raw vector, or a list of raw vectors, holding the binary contents of the image files.
}
}
\examples{
\dontrun{
# predicting with the training endpoint
endp <- customvision_training_endpoint(url="endpoint_url", key="key")
myproj <- get_project(endp, "myproject")
mod <- get_model(myproj)
predict(mod, "testimage.jpg")
predict(mod, "https://mysite.example.com/testimage.jpg", type="prob")
imgraw <- readBin("testimage.jpg", "raw", file.size("testimage.jpg"))
predict(mod, imgraw, type="list")
# predicting with the prediction endpoint
# you'll need either the project object or the ID
proj_id <- myproj$project$id
pred_endp <- customvision_prediction_endpoint(url="endpoint_url", key="pred_key")
pred_svc <- classification_service(pred_endp, proj_id, "iteration1")
predict(pred_svc, "testimage.jpg")
}
}
\seealso{
\code{\link{train_model}}, \code{\link{publish_model}}, \code{\link{classification_service}}, \code{\link{object_detection_service}}
}
| /man/customvision_predict.Rd | no_license | cran/AzureVision | R | false | true | 2,945 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/customvision_predict.R
\name{predict.customvision_model}
\alias{predict.customvision_model}
\alias{predict}
\alias{predict.classification_service}
\alias{predict.object_detection_service}
\title{Get predictions from a Custom Vision model}
\usage{
\method{predict}{customvision_model}(object, images, type = c("class", "prob", "list"), ...)
\method{predict}{classification_service}(object, images, type = c("class",
"prob", "list"), save_result = FALSE, ...)
\method{predict}{object_detection_service}(object, images, type = c("class",
"prob", "list"), save_result = FALSE, ...)
}
\arguments{
\item{object}{A Custom Vision object from which to get predictions. See 'Details' below.}
\item{images}{The images for which to get predictions.}
\item{type}{The type of prediction: either class membership (the default), the class probabilities, or a list containing all information returned by the prediction endpoint.}
\item{...}{Further arguments passed to lower-level functions; not used.}
\item{save_result}{For the predictive service methods, whether to store the predictions on the server for future use.}
}
\description{
Get predictions from a Custom Vision model
}
\details{
AzureVision defines prediction methods for both Custom Vision model training objects (of class \code{customvision_model}) and prediction services (\code{classification_service} and \code{object_detection_service}). The method for model training objects calls the "quick test" endpoint, and is meant only for testing purposes.
The prediction endpoints accept a single image per request, so supplying multiple images to these functions will call the endpoints multiple times, in sequence. The images can be specified as:
\itemize{
\item A vector of local filenames. All common image file formats are supported.
\item A vector of publicly accessible URLs.
\item A raw vector, or a list of raw vectors, holding the binary contents of the image files.
}
}
\examples{
\dontrun{
# predicting with the training endpoint
endp <- customvision_training_endpoint(url="endpoint_url", key="key")
myproj <- get_project(endp, "myproject")
mod <- get_model(myproj)
predict(mod, "testimage.jpg")
predict(mod, "https://mysite.example.com/testimage.jpg", type="prob")
imgraw <- readBin("testimage.jpg", "raw", file.size("testimage.jpg"))
predict(mod, imgraw, type="list")
# predicting with the prediction endpoint
# you'll need either the project object or the ID
proj_id <- myproj$project$id
pred_endp <- customvision_prediction_endpoint(url="endpoint_url", key="pred_key")
pred_svc <- classification_service(pred_endp, proj_id, "iteration1")
predict(pred_svc, "testimage.jpg")
}
}
\seealso{
\code{\link{train_model}}, \code{\link{publish_model}}, \code{\link{classification_service}}, \code{\link{object_detection_service}}
}
|
# Run with:
# rscript packages.R
packages <- c('R.utils', "Rcpp", "rvest", "XML", "XBRL")
install.packages(packages, repos="http://cran.rstudio.com/") | /packages.R | permissive | xbrlware/ernest | R | false | false | 154 | r | # Run with:
# rscript packages.R
packages <- c('R.utils', "Rcpp", "rvest", "XML", "XBRL")
install.packages(packages, repos="http://cran.rstudio.com/") |
#'
#' @name truncatedDistribution
#' @aliases dtrunc
#' @aliases ptrunc
#' @aliases qtrunc
#' @aliases rtrunc
#'
#' @title Truncated Distributions
#'
#' @description Truncated probability density function, truncated cumulative density function, inverse truncated cumulative density function, and random variates from a truncated distribution.
#'
#'
#' @param x Vector of quantiles.
#' @param q Vector of quantiles.
#' @param p Vector of probabilities.
#' @param n A positive integer specifying the desired number of random variates.
#' @param distribution Character value specifying the desired probability distribution.
#' @param tbound Numeric vector specifying the lower and upper truncation bounds. Default is \code{c(-Inf, Inf)}.
#' @param ... Additional arguments passed to the non-truncated distribution functions.
#' @param log Logical; if TRUE, log densities are returned.
#' @param lower.tail Logical; if TRUE (default), probabilities are P(X <= x) otherwise, P(X > x).
#' @param log.p Currently ignored.
#'
#'
#' @details The non truncated distribution functions are assumed to be available. For example if the normal distribution is desired then used \code{distribution='norm'}, the functions then look for 'qnorm', 'pnorm', etc.
#'
#' The \code{max(tbound)} and \code{min(tbound)} are considered the upper and lower truncation bounds, respectively.
#'
#' @return \code{dtrunc} returns a vector of densities.
#'
#' @export dtrunc
#'
#' @examples
#'
#' ## dtrunc
#' # not truncted
#' dnorm(5,mean=5)
#' # truncated
#' dtrunc(x=5,distribution='norm',tbound=c(4,5.5),mean=5)
#'
#'
dtrunc <- function(x, distribution, tbound=c(-Inf, Inf), ...,log=FALSE){
##print('dtrunc:');print(as.list(match.call()))
##############################################
### argument checking
if(!is.character(distribution)|length(distribution)!=1){
stop('argument distribution must be a single character string')
}
if(!is.numeric(tbound)){
stop('arguments lowBound and highBound need to be numeric')
} #end if
if(!is.logical(log)|length(log)!=1){
stop('Argument log must be a single logical value.')
}#
if(!is.numeric(x)){
stop('Argument x must be numeric.')
} #end if
###############################################
## get truncation bounds
low <- min(tbound,na.rm=TRUE)
high <- max(tbound,na.rm=TRUE)
if (low == high){
stop("argument tbound must be a vector of at least two elements that are not the same")
}# end if
pNonTrunc <- getDistributionFunction(type='p',dist=distribution)##get(paste("p", distribution, sep = ""), mode = "function")
dNonTrunc <- getDistributionFunction(type='d',dist=distribution)##get(paste("d", distribution, sep = ""), mode = "function")
## for testing
##pLow <- pNonTrunc(low,shape=3,rate=2,lower.tail=FALSE)
##pHigh <- pNonTrunc(high,shape=3,rate=2,lower.tail=FALSE)
pLow <- pNonTrunc(low,...)
pHigh <- pNonTrunc(high,...)
(pCheck <- c(pLow,pHigh))
if(any(!is.finite(pCheck))| any(is.na(pCheck))){
## if pNonTrunc return NA, then return NA
return(rep(NA,length(x)))
}# end if
## calculate truncated density
out <- dNonTrunc(x,...)/(pHigh-pLow)
## make value zero when outside the truncation bounds
out[x<low | x>high] <- 0
if(log){
out <- log(out)
}# end if
return(out)
} #end function
| /R/dtrunc.R | no_license | cran/windAC | R | false | false | 3,532 | r |
#'
#' @name truncatedDistribution
#' @aliases dtrunc
#' @aliases ptrunc
#' @aliases qtrunc
#' @aliases rtrunc
#'
#' @title Truncated Distributions
#'
#' @description Truncated probability density function, truncated cumulative density function, inverse truncated cumulative density function, and random variates from a truncated distribution.
#'
#'
#' @param x Vector of quantiles.
#' @param q Vector of quantiles.
#' @param p Vector of probabilities.
#' @param n A positive integer specifying the desired number of random variates.
#' @param distribution Character value specifying the desired probability distribution.
#' @param tbound Numeric vector specifying the lower and upper truncation bounds. Default is \code{c(-Inf, Inf)}.
#' @param ... Additional arguments passed to the non-truncated distribution functions.
#' @param log Logical; if TRUE, log densities are returned.
#' @param lower.tail Logical; if TRUE (default), probabilities are P(X <= x) otherwise, P(X > x).
#' @param log.p Currently ignored.
#'
#'
#' @details The non truncated distribution functions are assumed to be available. For example if the normal distribution is desired then used \code{distribution='norm'}, the functions then look for 'qnorm', 'pnorm', etc.
#'
#' The \code{max(tbound)} and \code{min(tbound)} are considered the upper and lower truncation bounds, respectively.
#'
#' @return \code{dtrunc} returns a vector of densities.
#'
#' @export dtrunc
#'
#' @examples
#'
#' ## dtrunc
#' # not truncted
#' dnorm(5,mean=5)
#' # truncated
#' dtrunc(x=5,distribution='norm',tbound=c(4,5.5),mean=5)
#'
#'
dtrunc <- function(x, distribution, tbound=c(-Inf, Inf), ...,log=FALSE){
##print('dtrunc:');print(as.list(match.call()))
##############################################
### argument checking
if(!is.character(distribution)|length(distribution)!=1){
stop('argument distribution must be a single character string')
}
if(!is.numeric(tbound)){
stop('arguments lowBound and highBound need to be numeric')
} #end if
if(!is.logical(log)|length(log)!=1){
stop('Argument log must be a single logical value.')
}#
if(!is.numeric(x)){
stop('Argument x must be numeric.')
} #end if
###############################################
## get truncation bounds
low <- min(tbound,na.rm=TRUE)
high <- max(tbound,na.rm=TRUE)
if (low == high){
stop("argument tbound must be a vector of at least two elements that are not the same")
}# end if
pNonTrunc <- getDistributionFunction(type='p',dist=distribution)##get(paste("p", distribution, sep = ""), mode = "function")
dNonTrunc <- getDistributionFunction(type='d',dist=distribution)##get(paste("d", distribution, sep = ""), mode = "function")
## for testing
##pLow <- pNonTrunc(low,shape=3,rate=2,lower.tail=FALSE)
##pHigh <- pNonTrunc(high,shape=3,rate=2,lower.tail=FALSE)
pLow <- pNonTrunc(low,...)
pHigh <- pNonTrunc(high,...)
(pCheck <- c(pLow,pHigh))
if(any(!is.finite(pCheck))| any(is.na(pCheck))){
## if pNonTrunc return NA, then return NA
return(rep(NA,length(x)))
}# end if
## calculate truncated density
out <- dNonTrunc(x,...)/(pHigh-pLow)
## make value zero when outside the truncation bounds
out[x<low | x>high] <- 0
if(log){
out <- log(out)
}# end if
return(out)
} #end function
|
## Get levels of species list
levels(Easplist)
## Add aggregate as new taxonomic level
levels(Easplist) <- c("form", "variety", "subspecies", "species", "complex",
"aggregate", "genus", "family")
summary(Easplist)
| /examples/levels.R | no_license | ropensci/taxlist | R | false | false | 219 | r | ## Get levels of species list
levels(Easplist)
## Add aggregate as new taxonomic level
levels(Easplist) <- c("form", "variety", "subspecies", "species", "complex",
"aggregate", "genus", "family")
summary(Easplist)
|
# Convert the user-provided information on parameters into a data frame.
d$covariateSetup = getCovariateSetupDF(d)
# Identify which covariates are fixed and which are random
d$randomCovariateIDs = which(d$covariateSetup[,2] != 0)
d$fixedCovariateIDs = which(d$covariateSetup[,2] == 0)
d$fixedIDs = which(d$covariateSetup[,2] == 0)
d$normIDs = which(d$covariateSetup[,2] == 1)
d$logNormIDs = which(d$covariateSetup[,2] == 2)
d$numRandom = length(d$randomCovariateIDs)
d$numFixed = length(d$fixedCovariateIDs)
# Setup choice variables
d$choice = as.matrix(d$choiceData[d$choice])
d$observationID = as.matrix(d$choiceData[d$observationID])
d$numObs = length(unique(d$observationID))
# Setup attribute variables (P and X)
d$betaNames = d$covariateSetup[,1]
d$P = as.matrix(d$choiceData[d$priceVar])
d$X = as.matrix(d$choiceData[d$betaNames])
if (d$modelSpace == 'wtp') {
d$P = -1*d$P
d$X = d$X[,which(colnames(d$X) != d$priceVar)]
}
# Setup weights
d$weights = matrix(1, nrow(d$X))
if (d$useWeights) {
d$weights = as.matrix(d$choiceData[d$weights])
}
# Setup names of variables
d$allParNames = d$betaNames
if (d$modelType == 'mxl') {
d$sigmaNames = paste(d$betaNames[d$randomCovariateIDs], 'sigma', sep='_')
d$betaNames[d$randomCovariateIDs] =
paste(d$betaNames[d$randomCovariateIDs], 'mu', sep='_')
d$allParNames = c(d$betaNames, d$sigmaNames)
}
# Set variables for some basic numbers
d$numBetas = nrow(d$covariateSetup)
d$numParams = length(d$allParNames)
# Scale P and X for optimization if desired
d$scaleFactors = rep(1, d$numBetas)
if (d$scaleParams) {
if (d$modelSpace == 'pref') {
Xout = scaleX(d$X, 1)
d$X = Xout[[1]]
d$scaleFactors = Xout[[2]]
} else {
Pout = scaleVar(d$P, 1)
Xout = scaleX(d$X, 1)
PscaleFactor = Pout[[2]]
XscaleFactors = Xout[[2]]
d$P = Pout[[1]]
d$X = Xout[[1]]
d$scaleFactors = c(PscaleFactor, XscaleFactors)
}
}
# Replicate scale factors for the sigma terms of the randomly distributed
# betas in the mxl models
if (d$modelType == 'mxl') {
randomSFs = d$scaleFactors[d$randomCovariateIDs]
d$scaleFactors = c(d$scaleFactors, randomSFs)
}
# Load the standard normal draws for the simulation
d$standardDraws = getStandardNormalHaltonDraws(d$numDraws, d$numBetas)
colnames(d$standardDraws) = d$betaNames
d$standardDraws[,d$fixedCovariateIDs] = rep(0, d$numDraws)
| /code/setupVariables.R | permissive | mindis/logitr | R | false | false | 2,581 | r | # Convert the user-provided information on parameters into a data frame.
d$covariateSetup = getCovariateSetupDF(d)
# Identify which covariates are fixed and which are random
d$randomCovariateIDs = which(d$covariateSetup[,2] != 0)
d$fixedCovariateIDs = which(d$covariateSetup[,2] == 0)
d$fixedIDs = which(d$covariateSetup[,2] == 0)
d$normIDs = which(d$covariateSetup[,2] == 1)
d$logNormIDs = which(d$covariateSetup[,2] == 2)
d$numRandom = length(d$randomCovariateIDs)
d$numFixed = length(d$fixedCovariateIDs)
# Setup choice variables
d$choice = as.matrix(d$choiceData[d$choice])
d$observationID = as.matrix(d$choiceData[d$observationID])
d$numObs = length(unique(d$observationID))
# Setup attribute variables (P and X)
d$betaNames = d$covariateSetup[,1]
d$P = as.matrix(d$choiceData[d$priceVar])
d$X = as.matrix(d$choiceData[d$betaNames])
if (d$modelSpace == 'wtp') {
d$P = -1*d$P
d$X = d$X[,which(colnames(d$X) != d$priceVar)]
}
# Setup weights
d$weights = matrix(1, nrow(d$X))
if (d$useWeights) {
d$weights = as.matrix(d$choiceData[d$weights])
}
# Setup names of variables
d$allParNames = d$betaNames
if (d$modelType == 'mxl') {
d$sigmaNames = paste(d$betaNames[d$randomCovariateIDs], 'sigma', sep='_')
d$betaNames[d$randomCovariateIDs] =
paste(d$betaNames[d$randomCovariateIDs], 'mu', sep='_')
d$allParNames = c(d$betaNames, d$sigmaNames)
}
# Set variables for some basic numbers
d$numBetas = nrow(d$covariateSetup)
d$numParams = length(d$allParNames)
# Scale P and X for optimization if desired
d$scaleFactors = rep(1, d$numBetas)
if (d$scaleParams) {
if (d$modelSpace == 'pref') {
Xout = scaleX(d$X, 1)
d$X = Xout[[1]]
d$scaleFactors = Xout[[2]]
} else {
Pout = scaleVar(d$P, 1)
Xout = scaleX(d$X, 1)
PscaleFactor = Pout[[2]]
XscaleFactors = Xout[[2]]
d$P = Pout[[1]]
d$X = Xout[[1]]
d$scaleFactors = c(PscaleFactor, XscaleFactors)
}
}
# Replicate scale factors for the sigma terms of the randomly distributed
# betas in the mxl models
if (d$modelType == 'mxl') {
randomSFs = d$scaleFactors[d$randomCovariateIDs]
d$scaleFactors = c(d$scaleFactors, randomSFs)
}
# Load the standard normal draws for the simulation
d$standardDraws = getStandardNormalHaltonDraws(d$numDraws, d$numBetas)
colnames(d$standardDraws) = d$betaNames
d$standardDraws[,d$fixedCovariateIDs] = rep(0, d$numDraws)
|
#' Himmelblau Function
#'
#' Two-dimensional test function based on the function defintion
#' \deqn{f(\mathbf{x}) = (\mathbf{x}_1^2 + \mathbf{x}_2 - 11)^2 + (\mathbf{x}_1 + \mathbf{x}_2^2 - 7)^2}
#' with box-constraings \eqn{\mathbf{x}_i \in [-5, 5], i = 1, 2}.
#'
#' @references D. M. Himmelblau, Applied Nonlinear Programming, McGraw-Hill, 1972.
#'
#' @template ret_smoof_single
#' @export
makeHimmelblauFunction = function() {
makeSingleObjectiveFunction(
name = "Himmelblau Function",
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
(x[1]^2 + x[2] - 11)^2 + (x[1] + x[2]^2 - 7)^2
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-5, -5),
upper = c(5, 5),
vector = TRUE
),
tags = attr(makeHimmelblauFunction, "tags"),
global.opt.params = c(x1 = 3, x2 = 2),
global.opt.value = 0
)
}
class(makeHimmelblauFunction) = c("function", "smoof_generator")
attr(makeHimmelblauFunction, "name") = c("Himmelblau")
attr(makeHimmelblauFunction, "type") = c("single-objective")
attr(makeHimmelblauFunction, "tags") = c("single-objective", "continuous", "differentiable", "non-separable", "non-scalable", "multimodal")
| /R/sof.himmelblau.R | no_license | mllg/smoof | R | false | false | 1,240 | r | #' Himmelblau Function
#'
#' Two-dimensional test function based on the function defintion
#' \deqn{f(\mathbf{x}) = (\mathbf{x}_1^2 + \mathbf{x}_2 - 11)^2 + (\mathbf{x}_1 + \mathbf{x}_2^2 - 7)^2}
#' with box-constraings \eqn{\mathbf{x}_i \in [-5, 5], i = 1, 2}.
#'
#' @references D. M. Himmelblau, Applied Nonlinear Programming, McGraw-Hill, 1972.
#'
#' @template ret_smoof_single
#' @export
makeHimmelblauFunction = function() {
makeSingleObjectiveFunction(
name = "Himmelblau Function",
fn = function(x) {
assertNumeric(x, len = 2L, any.missing = FALSE, all.missing = FALSE)
(x[1]^2 + x[2] - 11)^2 + (x[1] + x[2]^2 - 7)^2
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-5, -5),
upper = c(5, 5),
vector = TRUE
),
tags = attr(makeHimmelblauFunction, "tags"),
global.opt.params = c(x1 = 3, x2 = 2),
global.opt.value = 0
)
}
class(makeHimmelblauFunction) = c("function", "smoof_generator")
attr(makeHimmelblauFunction, "name") = c("Himmelblau")
attr(makeHimmelblauFunction, "type") = c("single-objective")
attr(makeHimmelblauFunction, "tags") = c("single-objective", "continuous", "differentiable", "non-separable", "non-scalable", "multimodal")
|
### Name: inf.3D
### Title: Function to plot the infromation surface in three-dimensional
### style
### Aliases: inf.3D
### Keywords: MIRT information
### ** Examples
a1<-c(0.48 , 1.16 , 1.48 , 0.44 , 0.36 , 1.78 , 0.64 , 1.10 , 0.76 , 0.52 , 0.83 ,0.88, 0.34 , 0.74 , 0.66)
a2<-c( 0.54, 0.35, 0.44, 1.72, 0.69, 0.47, 1.21, 1.74, 0.89, 0.53, 0.41, 0.98, 0.59, 0.59, 0.70)
d<-c( -1.11,0.29, 1.51,-0.82,-1.89,-0.49,1.35,0.82,-0.21,-0.04,-0.68, 0.22,-0.86,-1.33, 1.21)
inf.3D(pi/3, a1, a2, d)
| /Visualization of Multi-dimensional Item Response Theory Model/R-ex/inf.3D.R | no_license | zmeers/Visualization-of-Multidimensional-Item-Response-Theory- | R | false | false | 497 | r | ### Name: inf.3D
### Title: Function to plot the infromation surface in three-dimensional
### style
### Aliases: inf.3D
### Keywords: MIRT information
### ** Examples
a1<-c(0.48 , 1.16 , 1.48 , 0.44 , 0.36 , 1.78 , 0.64 , 1.10 , 0.76 , 0.52 , 0.83 ,0.88, 0.34 , 0.74 , 0.66)
a2<-c( 0.54, 0.35, 0.44, 1.72, 0.69, 0.47, 1.21, 1.74, 0.89, 0.53, 0.41, 0.98, 0.59, 0.59, 0.70)
d<-c( -1.11,0.29, 1.51,-0.82,-1.89,-0.49,1.35,0.82,-0.21,-0.04,-0.68, 0.22,-0.86,-1.33, 1.21)
inf.3D(pi/3, a1, a2, d)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5985
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5984
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5984
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt50_51_167.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1856
c no.of clauses 5985
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 5984
c
c QBFLIB/Basler/terminator/stmt50_51_167.qdimacs 1856 5985 E1 [1] 0 147 1708 5984 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt50_51_167/stmt50_51_167.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 711 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 5985
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5984
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 5984
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt50_51_167.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1856
c no.of clauses 5985
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 5984
c
c QBFLIB/Basler/terminator/stmt50_51_167.qdimacs 1856 5985 E1 [1] 0 147 1708 5984 RED
|
## -- C-STAD: plotting results ------------ ##
## Date: 24/06/2019
## Author: Ugofilippo Basellini
## Comments:
## - FIGURE A1: Aligned distributions
##
## ------------------------------------------ ##
## clean the workspace
rm(list = ls())
## load useful packages
library(MortalitySmooth)
library(colorspace)
library(viridis)
library(fields)
## -- DATA & FUNCTIONS ----------
## load C-STAD functions
setwd("~/Documents/Demography/Work/STADcohorts/99_Github/C-STAD/R/Functions")
source("C-STAD_Functions.R")
source("BootDxFUN.R")
## load data
setwd("~/Documents/Demography/Work/STADcohorts/99_Github/C-STAD/R/Data")
cou <- "SWE" ## SWE or DNK
sex <- "F" ## only F
name <- paste0(cou,"coh",sex,".Rdata") ## Females
load(name)
## age dimensions
ages <- as.numeric(rownames(cE))
cohorts <- as.numeric(colnames(cE))
age.start <- 40
xo <- age.start:110 ## HMD ages
x <- age.start:120 ## expanded ages
mo <- length(xo)
m <- length(x)
delta <- 0.1
xs <- seq(min(x), max(x), delta) ## ages at fine grid
ms <- length(xs)
## cohort dimensions
year.start <- 1835
year.end <- 2015 - age.start - 5
y <- year.start:year.end ## use for both SWE and DNK (to have same time-range of analysis + reliable data)
n <- length(y)
coly <- rainbow_hcl(n)
## (1835 = first cohort with data observed at all ages in DNK)
## (1970 = 5 years before last cohort with data observed at age 40)
## cohorts first Lexis parallelogram (c1)
c_breve <- cohorts[min(which(is.na(cE[nrow(cE),])))] - 1
c1 <- y[1]:c_breve ## 1905 = last cohort with fully observed data
n1 <- length(c1)
## starting data
E <- cE[ages%in%xo,cohorts%in%y]
MX.act <- cMx[ages%in%xo,cohorts%in%y]
Z <- Zna <- cZ[ages%in%xo,cohorts%in%y]
Z[E==0] <- 0
W <- matrix(1,mo,n)
W[is.na(E)] <- 0 ## zero weight where data is missing
## expand data for extrapolation
EA <- rbind(E,matrix(100,(m-mo),n))
ZA <- rbind(Z,matrix(100,(m-mo),n))
Wup <- cbind(matrix(1,(m-mo),n1),
matrix(0,(m-mo),(n-n1))) ## 1 to consider 110-120 of c1 for standard
WA <- rbind(W,Wup)
## weights augmented (repeat each weight 10 times) for the smooth standard
One <- matrix(rep(1,10),1/delta,1)
Ws <- kronecker(WA,One) ## expanded matrix of weights
Ws <- Ws[c(1:ms),] ## remove last 9 weights
WA[EA==0] <- 0 ## zero weight where data equal to zero
WA[which(x>xo[mo]),] <- 0 ## zero weight for ages above 110
## log death rates
LMX.act <- log(MX.act)
matplot(xo,LMX.act,t="l",lty=1,col = coly,xlab="Age",
main=paste("Observed Cohort Mortality,",cou,sex,y[1],"-",y[n]))
## B-splines parameters
xl <- min(x)
xr <- max(x)
xmin <- round(xl - 0.01 * (xr - xl),3)
xmax <- round(xr + 0.01 * (xr - xl),3)
ndx <- floor(m/5)
yl <- min(y)
yr <- max(y)
ymin <- round(yl - 0.01 * (yr - yl),3)
ymax <- round(yr + 0.01 * (yr - yl),3)
ndy <- floor(n/5)
deg <- 3
## B-splines bases
Bx <- MortSmooth_bbase(x, xmin, xmax, ndx, deg)
Bxs <- MortSmooth_bbase(xs, xmin, xmax, ndx, deg)
By <- MortSmooth_bbase(y, ymin, ymax, ndy, deg)
B <- kronecker(By,Bx)
Bs <- kronecker(By,Bxs)
## -- STANDARD ----------
## 2D smooth (optimal parameters)
if(cou=="DNK"){
lambdaX.hat <- 10^2.5
lambdaY.hat <- 10^3
}else if(cou=="SWE"){
lambdaX.hat <- 10^2.5
lambdaY.hat <- 10^3.5
}
smooth2D <- Mort2Dsmooth(x=x,y=y,Z=ZA,offset=log(EA),W=WA,
ndx=c(ndx,ndy),method = 3,
lambdas = c(lambdaX.hat,lambdaY.hat))
plot(smooth2D, palette = "terrain.colors")
LMX.smo2D <- matrix(Bs %*% c(smooth2D$coefficients),ms,n)
LMX.smo2DW <- LMX.smo2D*Ws
LMX.smo2DW[LMX.smo2DW==0] <- NA
par(mfrow=c(1,3))
matplot(xo,LMX.act,t="l",lty=1,col = coly,xlab="Age",ylim = range(LMX.act,LMX.smo2D,finite=T),
main=paste("Observed Mortality,",cou,y[1],"-",y[n]))
matplot(xs,LMX.smo2D,t="l",lty=1,col = coly,xlab="Age",ylim = range(LMX.act,LMX.smo2D,finite=T),
main=paste("Smooth lmx extrapolated"))
matplot(xs,LMX.smo2DW,t="l",lty=1,col = coly,xlab="Age",ylim = range(LMX.act,LMX.smo2D,finite=T),
main=paste("Smooth lmx observed"))
par(mfrow=c(1,1))
## actual and 2Dsmo e40, g40
e40.act <- apply(exp(LMX.act[,y%in%c1]),2,lifetable.ex,x=xo,sex=sex)
g40.act <- apply(exp(LMX.act[,y%in%c1]),2,GINI_func,ages=xo,sex=sex)
e40_2D <- apply(exp(LMX.smo2D[xs%in%xo,]),2,lifetable.ex,x=xo,sex=sex)
g40_2D <- apply(exp(LMX.smo2D[xs%in%xo,]),2,GINI_func,ages=xo,sex=sex)
par(mfrow=c(1,2))
plot(c1,e40.act,ylim=range(e40.act,e40_2D),pch=16,
main=paste0("E",xo[1]),ylab="",cex.lab=1.25,xlim=range(y),xlab="Cohort")
lines(y,e40_2D,col=5,lwd=2)
plot(c1,g40.act,ylim=range(g40.act,g40_2D),pch=16,
main=paste0("G",xo[1]),ylab="",cex.lab=1.25,xlim=range(y),xlab="Cohort")
lines(y,g40_2D,col=5,lwd=2)
par(mfrow=c(1,1))
## FX
FX.smo2D <- apply(exp(LMX.smo2D),2,dx_from_mx,age=xs)
FX.smo2DW <- FX.smo2D*Ws
FX.smo2DW[FX.smo2DW==0] <- NA
par(mfrow=c(1,2))
matplot(xs, FX.smo2D, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, extrapolated",cex.lab=1.25)
matplot(xs, FX.smo2DW, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, observed",cex.lab=1.25)
par(mfrow=c(1,1))
## compute modal age at death (it's pretty smooth here due to 2D smoothing)
M_2D <- xs[apply(FX.smo2D, 2, which.max)] + (delta/2)
plot(y, M_2D, t="o", lwd=2, pch=16, main="Modal Age at Death (smooth)")
## STANDARD DISTRIBUTION
s_2D <- M_2D - M_2D[1]
## derive aligned distributions
FX.align <- matrix(0, nrow=ms, ncol=n)
for(i in 1:n){
FX.align[,i] <- fx_shift(age=xs,fx=FX.smo2D[,i],shift=-s_2D[i],ndx = ndx,deg = deg)
}
FX.alignW <- FX.align*Ws
FX.alignW[FX.alignW==0] <- NA
par(mfrow=c(1,2))
matplot(xs, FX.align, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, extrapolated",cex.lab=1.25)
matplot(xs, FX.alignW, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, observed",cex.lab=1.25)
par(mfrow=c(1,1))
## Standard = mean of the aligned densities
FXallmeanW <- exp(apply(log(FX.alignW), 1, mean, na.rm=T))
FXstand <- FXallmeanW
matplot(xs, FX.alignW, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, extrapolated",cex.lab=1.25)
lines(xs, FXallmeanW, lwd=3,col=1)
legend("bottomleft", c("Standard"),col=1, lwd=3,lty = 1,
bg="white", pt.cex=1.2,bty="n",cex = 1.5)
ylim <- range(FX.smo2DW,na.rm=T)
## colors
# display.brewer.pal(n=9, name = 'Purples')
# col.stad <- brewer.pal(n = 8, name = 'Blues')[8]
# col.stadT <- adjustcolor(col.stad, alpha=0.3)
# my.orange <- brewer.pal(n=9, name = 'Oranges')[6]
# my.green <- brewer.pal(n=9, name = 'Greens')[6]
# my.purple <- brewer.pal(n=9, name = 'Purples')[7]
# my.cols <- c(col.stad,my.orange,my.green,my.purple)
# my.colsT <- adjustcolor(my.cols, alpha=0.2)
cex.x.axis <- 1.1
cex.y.axis <- 1.1
cex.x.lab <- 2
cex.y.lab <- 1.75
cex.leg <- 1.15
cex.coh <- 1.5
cex.title <- 1.7
cex.obs <- 0.85
cex.age <- 0.9
lwd.pt <- 0.9
lwd.mean <- 2.5
my.col <- colorRampPalette(c("purple3",
"blue2", "cyan2","green", "yellow","goldenrod2"))(n)
my.col <- rainbow_hcl(n)
## SAVE
setwd("~/Documents/Demography/Work/STADcohorts/99_Github/C-STAD/Paper/Figures")
pdf("FA0.pdf",width = 10,height = 5.5)
par(mfrow = c(1,2),
oma = c(1.25,1.2,0.1,0.25),
mar = c(1.75,1.3,1.2,0.1))
## bottom, left, top, right
## Smooth dx, observed
matplot(xs,FX.smo2DW,xlim=range(x),
ylim=ylim,xlab="",ylab="",t="n",axes=F)
axis(1,cex.axis=cex.x.axis,padj = -0.5)
axis(2,cex.axis=cex.y.axis,at=seq(0,0.04,0.01),
labels = c("0","0.01","0.02","0.03","0.04"))
grid();box()
title(main="Smooth distributions", cex.main=cex.title)
matlines(xs,FX.smo2DW,t="l",lty=1,col=my.col)
# lines(xs,apply(FX.smo2DW,1,mean,na.rm=T),lwd=3)
image.plot(smallplot=c(.15,.45, .84,.88),axis.args=list(cex.axis=0.8),
legend.only=TRUE, zlim=range(y),
col=my.col, nlevel=n,
horizontal = TRUE)
## Aligned dx
matplot(xs,FX.smo2DW,xlim=range(x),
ylim=ylim,xlab="",ylab="",t="n",axes=F)
axis(1,cex.axis=cex.x.axis,padj = -0.5)
axis(2,las=2,cex.axis=cex.y.axis,at=seq(0,0.04,0.01),
labels = rep("",5))
grid();box()
title(main="Aligned distributions", cex.main=cex.title)
abline(v=xs[which.max(apply(FX.alignW,1,mean,na.rm=T))],lty=2)
matlines(xs,FX.alignW,t="l",lty=1,col=my.col)
lines(xs,apply(FX.alignW,1,mean,na.rm=T),lwd=3)
legend("topright", c("Standard"),col=1, lwd=3,lty = 1,
bg="white", pt.cex=1.2,bty="n",cex = 1.5)
cex.x.lab <- cex.y.lab
title(xlab = "Ages",cex.lab=cex.x.lab,
outer = TRUE, line = 0.1)
dev.off()
| /R/Figures/FA1_Alignment.R | no_license | ubasellini/C-STAD | R | false | false | 8,546 | r | ## -- C-STAD: plotting results ------------ ##
## Date: 24/06/2019
## Author: Ugofilippo Basellini
## Comments:
## - FIGURE A1: Aligned distributions
##
## ------------------------------------------ ##
## clean the workspace
rm(list = ls())
## load useful packages
library(MortalitySmooth)
library(colorspace)
library(viridis)
library(fields)
## -- DATA & FUNCTIONS ----------
## load C-STAD functions
setwd("~/Documents/Demography/Work/STADcohorts/99_Github/C-STAD/R/Functions")
source("C-STAD_Functions.R")
source("BootDxFUN.R")
## load data
setwd("~/Documents/Demography/Work/STADcohorts/99_Github/C-STAD/R/Data")
cou <- "SWE" ## SWE or DNK
sex <- "F" ## only F
name <- paste0(cou,"coh",sex,".Rdata") ## Females
load(name)
## age dimensions
ages <- as.numeric(rownames(cE))
cohorts <- as.numeric(colnames(cE))
age.start <- 40
xo <- age.start:110 ## HMD ages
x <- age.start:120 ## expanded ages
mo <- length(xo)
m <- length(x)
delta <- 0.1
xs <- seq(min(x), max(x), delta) ## ages at fine grid
ms <- length(xs)
## cohort dimensions
year.start <- 1835
year.end <- 2015 - age.start - 5
y <- year.start:year.end ## use for both SWE and DNK (to have same time-range of analysis + reliable data)
n <- length(y)
coly <- rainbow_hcl(n)
## (1835 = first cohort with data observed at all ages in DNK)
## (1970 = 5 years before last cohort with data observed at age 40)
## cohorts first Lexis parallelogram (c1)
c_breve <- cohorts[min(which(is.na(cE[nrow(cE),])))] - 1
c1 <- y[1]:c_breve ## 1905 = last cohort with fully observed data
n1 <- length(c1)
## starting data
E <- cE[ages%in%xo,cohorts%in%y]
MX.act <- cMx[ages%in%xo,cohorts%in%y]
Z <- Zna <- cZ[ages%in%xo,cohorts%in%y]
Z[E==0] <- 0
W <- matrix(1,mo,n)
W[is.na(E)] <- 0 ## zero weight where data is missing
## expand data for extrapolation
EA <- rbind(E,matrix(100,(m-mo),n))
ZA <- rbind(Z,matrix(100,(m-mo),n))
Wup <- cbind(matrix(1,(m-mo),n1),
matrix(0,(m-mo),(n-n1))) ## 1 to consider 110-120 of c1 for standard
WA <- rbind(W,Wup)
## weights augmented (repeat each weight 10 times) for the smooth standard
One <- matrix(rep(1,10),1/delta,1)
Ws <- kronecker(WA,One) ## expanded matrix of weights
Ws <- Ws[c(1:ms),] ## remove last 9 weights
WA[EA==0] <- 0 ## zero weight where data equal to zero
WA[which(x>xo[mo]),] <- 0 ## zero weight for ages above 110
## log death rates
LMX.act <- log(MX.act)
matplot(xo,LMX.act,t="l",lty=1,col = coly,xlab="Age",
main=paste("Observed Cohort Mortality,",cou,sex,y[1],"-",y[n]))
## B-splines parameters
xl <- min(x)
xr <- max(x)
xmin <- round(xl - 0.01 * (xr - xl),3)
xmax <- round(xr + 0.01 * (xr - xl),3)
ndx <- floor(m/5)
yl <- min(y)
yr <- max(y)
ymin <- round(yl - 0.01 * (yr - yl),3)
ymax <- round(yr + 0.01 * (yr - yl),3)
ndy <- floor(n/5)
deg <- 3
## B-splines bases
Bx <- MortSmooth_bbase(x, xmin, xmax, ndx, deg)
Bxs <- MortSmooth_bbase(xs, xmin, xmax, ndx, deg)
By <- MortSmooth_bbase(y, ymin, ymax, ndy, deg)
B <- kronecker(By,Bx)
Bs <- kronecker(By,Bxs)
## -- STANDARD ----------
## 2D smooth (optimal parameters)
if(cou=="DNK"){
lambdaX.hat <- 10^2.5
lambdaY.hat <- 10^3
}else if(cou=="SWE"){
lambdaX.hat <- 10^2.5
lambdaY.hat <- 10^3.5
}
smooth2D <- Mort2Dsmooth(x=x,y=y,Z=ZA,offset=log(EA),W=WA,
ndx=c(ndx,ndy),method = 3,
lambdas = c(lambdaX.hat,lambdaY.hat))
plot(smooth2D, palette = "terrain.colors")
LMX.smo2D <- matrix(Bs %*% c(smooth2D$coefficients),ms,n)
LMX.smo2DW <- LMX.smo2D*Ws
LMX.smo2DW[LMX.smo2DW==0] <- NA
par(mfrow=c(1,3))
matplot(xo,LMX.act,t="l",lty=1,col = coly,xlab="Age",ylim = range(LMX.act,LMX.smo2D,finite=T),
main=paste("Observed Mortality,",cou,y[1],"-",y[n]))
matplot(xs,LMX.smo2D,t="l",lty=1,col = coly,xlab="Age",ylim = range(LMX.act,LMX.smo2D,finite=T),
main=paste("Smooth lmx extrapolated"))
matplot(xs,LMX.smo2DW,t="l",lty=1,col = coly,xlab="Age",ylim = range(LMX.act,LMX.smo2D,finite=T),
main=paste("Smooth lmx observed"))
par(mfrow=c(1,1))
## actual and 2Dsmo e40, g40
e40.act <- apply(exp(LMX.act[,y%in%c1]),2,lifetable.ex,x=xo,sex=sex)
g40.act <- apply(exp(LMX.act[,y%in%c1]),2,GINI_func,ages=xo,sex=sex)
e40_2D <- apply(exp(LMX.smo2D[xs%in%xo,]),2,lifetable.ex,x=xo,sex=sex)
g40_2D <- apply(exp(LMX.smo2D[xs%in%xo,]),2,GINI_func,ages=xo,sex=sex)
par(mfrow=c(1,2))
plot(c1,e40.act,ylim=range(e40.act,e40_2D),pch=16,
main=paste0("E",xo[1]),ylab="",cex.lab=1.25,xlim=range(y),xlab="Cohort")
lines(y,e40_2D,col=5,lwd=2)
plot(c1,g40.act,ylim=range(g40.act,g40_2D),pch=16,
main=paste0("G",xo[1]),ylab="",cex.lab=1.25,xlim=range(y),xlab="Cohort")
lines(y,g40_2D,col=5,lwd=2)
par(mfrow=c(1,1))
## FX
FX.smo2D <- apply(exp(LMX.smo2D),2,dx_from_mx,age=xs)
FX.smo2DW <- FX.smo2D*Ws
FX.smo2DW[FX.smo2DW==0] <- NA
par(mfrow=c(1,2))
matplot(xs, FX.smo2D, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, extrapolated",cex.lab=1.25)
matplot(xs, FX.smo2DW, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, observed",cex.lab=1.25)
par(mfrow=c(1,1))
## compute modal age at death (it's pretty smooth here due to 2D smoothing)
M_2D <- xs[apply(FX.smo2D, 2, which.max)] + (delta/2)
plot(y, M_2D, t="o", lwd=2, pch=16, main="Modal Age at Death (smooth)")
## STANDARD DISTRIBUTION
s_2D <- M_2D - M_2D[1]
## derive aligned distributions
FX.align <- matrix(0, nrow=ms, ncol=n)
for(i in 1:n){
FX.align[,i] <- fx_shift(age=xs,fx=FX.smo2D[,i],shift=-s_2D[i],ndx = ndx,deg = deg)
}
FX.alignW <- FX.align*Ws
FX.alignW[FX.alignW==0] <- NA
par(mfrow=c(1,2))
matplot(xs, FX.align, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, extrapolated",cex.lab=1.25)
matplot(xs, FX.alignW, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, observed",cex.lab=1.25)
par(mfrow=c(1,1))
## Standard = mean of the aligned densities
FXallmeanW <- exp(apply(log(FX.alignW), 1, mean, na.rm=T))
FXstand <- FXallmeanW
matplot(xs, FX.alignW, lty=1, t="l", col=coly,xlab="Age",ylab="fx",
main="Smooth Dx, extrapolated",cex.lab=1.25)
lines(xs, FXallmeanW, lwd=3,col=1)
legend("bottomleft", c("Standard"),col=1, lwd=3,lty = 1,
bg="white", pt.cex=1.2,bty="n",cex = 1.5)
ylim <- range(FX.smo2DW,na.rm=T)
## colors
# display.brewer.pal(n=9, name = 'Purples')
# col.stad <- brewer.pal(n = 8, name = 'Blues')[8]
# col.stadT <- adjustcolor(col.stad, alpha=0.3)
# my.orange <- brewer.pal(n=9, name = 'Oranges')[6]
# my.green <- brewer.pal(n=9, name = 'Greens')[6]
# my.purple <- brewer.pal(n=9, name = 'Purples')[7]
# my.cols <- c(col.stad,my.orange,my.green,my.purple)
# my.colsT <- adjustcolor(my.cols, alpha=0.2)
cex.x.axis <- 1.1
cex.y.axis <- 1.1
cex.x.lab <- 2
cex.y.lab <- 1.75
cex.leg <- 1.15
cex.coh <- 1.5
cex.title <- 1.7
cex.obs <- 0.85
cex.age <- 0.9
lwd.pt <- 0.9
lwd.mean <- 2.5
my.col <- colorRampPalette(c("purple3",
"blue2", "cyan2","green", "yellow","goldenrod2"))(n)
my.col <- rainbow_hcl(n)
## SAVE
setwd("~/Documents/Demography/Work/STADcohorts/99_Github/C-STAD/Paper/Figures")
pdf("FA0.pdf",width = 10,height = 5.5)
par(mfrow = c(1,2),
oma = c(1.25,1.2,0.1,0.25),
mar = c(1.75,1.3,1.2,0.1))
## bottom, left, top, right
## Smooth dx, observed
matplot(xs,FX.smo2DW,xlim=range(x),
ylim=ylim,xlab="",ylab="",t="n",axes=F)
axis(1,cex.axis=cex.x.axis,padj = -0.5)
axis(2,cex.axis=cex.y.axis,at=seq(0,0.04,0.01),
labels = c("0","0.01","0.02","0.03","0.04"))
grid();box()
title(main="Smooth distributions", cex.main=cex.title)
matlines(xs,FX.smo2DW,t="l",lty=1,col=my.col)
# lines(xs,apply(FX.smo2DW,1,mean,na.rm=T),lwd=3)
image.plot(smallplot=c(.15,.45, .84,.88),axis.args=list(cex.axis=0.8),
legend.only=TRUE, zlim=range(y),
col=my.col, nlevel=n,
horizontal = TRUE)
## Aligned dx
matplot(xs,FX.smo2DW,xlim=range(x),
ylim=ylim,xlab="",ylab="",t="n",axes=F)
axis(1,cex.axis=cex.x.axis,padj = -0.5)
axis(2,las=2,cex.axis=cex.y.axis,at=seq(0,0.04,0.01),
labels = rep("",5))
grid();box()
title(main="Aligned distributions", cex.main=cex.title)
abline(v=xs[which.max(apply(FX.alignW,1,mean,na.rm=T))],lty=2)
matlines(xs,FX.alignW,t="l",lty=1,col=my.col)
lines(xs,apply(FX.alignW,1,mean,na.rm=T),lwd=3)
legend("topright", c("Standard"),col=1, lwd=3,lty = 1,
bg="white", pt.cex=1.2,bty="n",cex = 1.5)
cex.x.lab <- cex.y.lab
title(xlab = "Ages",cex.lab=cex.x.lab,
outer = TRUE, line = 0.1)
dev.off()
|
packages <- c("CIMseq", "CIMseq.data", "tidyverse", "Seurat", "harmony", "future.apply")
purrr::walk(packages, library, character.only = TRUE)
rm(packages)
#check package version
algoV <- sessionInfo()$otherPkgs$CIMseq$Version
last3 <- paste(strsplit(algoV, "\\.")[[1]][2:4], collapse = "")
if(!as.numeric(last3) >= 100) {
stop("sp.scRNAseq package version too low. Must be >= 0.2.0.0")
}
currPath <- getwd()
#setup spCounts
keep.plates.SI <- c(
"NJA01201", "NJA01202", "NJA01301", "NJA01302", "NJA01501"
)
s <- str_detect(colnames(MGA.Counts), "^s")
samples <- filter(MGA.Meta, !filtered & unique_key %in% keep.plates.SI)$sample
e <- colnames(MGA.Counts) %in% samples
boolSng <- s & e
boolMul <- !s & e
boolRSI <- colnames(RSI.Counts) %in% filter(RSI.Meta, !filtered)$sample
iGenes <- intersect(intersect(rownames(RSI.Counts), rownames(MGA.Counts)), rownames(TMD.Counts))
singlets <- cbind(MGA.Counts[iGenes, boolSng], RSI.Counts[iGenes, boolRSI])
singletERCC <- cbind(MGA.CountsERCC[, boolSng], RSI.CountsERCC[, boolRSI])
multiplets <- MGA.Counts[iGenes, boolMul]
multipletERCC <- MGA.CountsERCC[, boolMul]
#Dimensionality reduction and classification
print(paste0("Starting all cells analysis at ", Sys.time()))
mca <- CreateSeuratObject(raw.data = singlets)
mca@meta.data$source <- case_when(
str_detect(rownames(mca@meta.data), "SRR654") | str_detect(rownames(mca@meta.data), "SRR510") ~ "External",
str_detect(rownames(mca@meta.data), "NJA") | str_detect(rownames(mca@meta.data), "NJD") ~ "Enge",
TRUE ~ "error"
)
mca <- NormalizeData(
object = mca, normalization.method = "LogNormalize", scale.factor = 1e6
)
mca <- FindVariableGenes(
object = mca, mean.function = ExpMean, dispersion.function = LogVMR,
do.plot = FALSE, x.low.cutoff = 1, y.cutoff = 1
)
mca <- ScaleData(
object = mca, genes.use = mca@var.genes, display.progress = FALSE, do.par = TRUE,
num.cores = 4
)
mca <- RunPCA(
object = mca, pc.genes = mca@var.genes, pcs.compute = 100, do.print = FALSE
)
# DimPlot(
# object = mca, reduction.use = "pca", dim.1 = 1, dim.2 = 2,
# no.legend = FALSE, do.return = TRUE, group.by = "source",
# vector.friendly = FALSE, pt.size = 1
# )
# mca <- JackStraw(object = mca, num.replicate = 100, display.progress = TRUE, num.pc = 50)
# mca <- JackStrawPlot(object = mca, PCs = 1:50)
# PCp <- mca@dr$pca@jackstraw@overall.p.values
# pcs <- PCp[PCp[, 2] < 10^-6, 1]
# pcs <- c(
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
# 18, 19, 20, 21, 22, 23, 24, 25, 26
# )
pcs <- 1:50
######################################################################################
#check for PC
pc.cor <- cor(mca@dr$pca@cell.embeddings, as.numeric(as.factor(mca@meta.data$source)))
#PC9 seems to be the culprit
pcs <- pcs[!pcs %in% which(pc.cor > 0.25)]
######################################################################################
print(paste0("Using ", max(pcs), " principal components."))
#PCElbowPlot(object = mca, num.pc = 100) + scale_x_continuous(breaks = seq(0, 100, 5))
mca <- RunUMAP(
object = mca, reduction.use = "pca", dims.use = pcs, min_dist = 0.3,
n_neighbors = 15, seed.use = 9823493
)
#mca <- RunTSNE(mca, dims.use = pcs, gene.use = mca@var.genes, seed.use = 78239)
mca <- FindClusters(
object = mca, reduction.type = "pca", dims.use = pcs, resolution = 0.65,
n.start = 100, n.iter = 1000, nn.eps = 0, k.param = 30, prune.SNN = 1/15,
algorithm = 1, save.SNN = TRUE, print.output = FALSE, plot.SNN = FALSE,
force.recalc = TRUE, random.seed = 93820
)
# DimPlot(
# object = mca, reduction.use = "umap", no.legend = FALSE, do.return = TRUE,
# vector.friendly = FALSE, pt.size = 1
# ) + scale_colour_manual(values = col40())
#
# mca@meta.data %>%
# group_by(source, res.0.6) %>%
# summarize(n = n()) %>%
# ungroup() %>%
# group_by(source) %>%
# mutate(`%` = n / sum(n) * 100) %>%
# ggplot() +
# geom_bar(aes(res.0.6, `%`, fill = source), stat = "identity", position = position_dodge(width = 1)) +
# facet_wrap(~res.0.6, scales = "free") +
# labs(x = "Class", y = "% of dataset")
#
# mca@meta.data %>%
# count(source, res.0.6) %>%
# ggplot() +
# geom_bar(aes(res.0.6, n, fill = source), stat = "identity", position = position_dodge(width = 1)) +
# facet_wrap(~res.0.6, scales = "free") +
# labs(x = "Class", y = "Count")
#
# mca@dr$umap@cell.embeddings %>%
# matrix_to_tibble("sample") %>%
# mutate(source = case_when(
# str_detect(sample, "SRR654") ~ "Tabula Muris",
# str_detect(sample, "SRR510") ~ "Regev",
# TRUE ~ "Enge"
# )) %>%
# sample_n(nrow(.), FALSE) %>%
# ggplot() +
# geom_point(aes(UMAP1, UMAP2, colour = source), alpha = 0.75)
#
# FeaturePlot(
# mca,
# c("Lgr5", "Ptprc", "Chga", "Dclk1", "Atoh1", "Lyz1", "Alpi", "Mki67"),
# reduction.use = "umap", dark.theme = FALSE, pt.size = 0.1,
# vector.friendly = FALSE
# )
#
# FeaturePlot(
# mca,
# c("Lgr5", "Alpi", "Mki67"),
# reduction.use = "umap", dark.theme = FALSE, pt.size = 0.1,
# vector.friendly = FALSE
# )
#
# FeaturePlot(
# mca,
# c("Plet1"),
# reduction.use = "umap", dark.theme = FALSE, pt.size = 0.1,
# vector.friendly = FALSE
# )
markers <- FindAllMarkers(
object = mca, only.pos = TRUE, min.diff.pct = 0.25, logfc.threshold = log(1.5),
test.use = "roc"
)
# DoHeatmap(
# object = mca, genes.use = unique(markers$gene), slim.col.label = TRUE, remove.key = TRUE,
# group.label.rot = TRUE, cex.row = 1
# )
print(paste0("Done all cells analysis at ", Sys.time()))
singlets <- singlets[, colnames(singlets) %in% colnames(mca@data)]
singletERCC <- singletERCC[, colnames(singletERCC) %in% colnames(singlets)]
idx <- match(rownames(FetchData(mca, "ident")), colnames(singlets))
classes <- as.character(FetchData(mca, "ident")[[1]])[idx]
names(classes) <- rownames(FetchData(mca, "ident"))[idx]
var.genes <- unique(markers$gene)
select <- which(rownames(singlets) %in% var.genes)
dim.red <- mca@dr$umap@cell.embeddings
colnames(dim.red) <- NULL
#setup CIMseqData objects
cObjSng <- CIMseqSinglets(singlets, singletERCC, dim.red, classes)
cObjMul <- CIMseqMultiplets(multiplets, multipletERCC, select)
#save
if(!"data" %in% list.dirs(currPath, full.names = FALSE)) system('mkdir data')
print(paste0("saving data to ", currPath, "."))
save(cObjSng, cObjMul, file = file.path(currPath, "data/CIMseqData.rda"))
#write logs
writeLines(capture.output(sessionInfo()), file.path(currPath, "logs/sessionInfo_CIMseqData.txt")) | /inst/analysis/MGA.analysis_SI.engeRegev/scripts/CIMseqData.R | no_license | jasonserviss/CIMseq.testing | R | false | false | 6,463 | r | packages <- c("CIMseq", "CIMseq.data", "tidyverse", "Seurat", "harmony", "future.apply")
purrr::walk(packages, library, character.only = TRUE)
rm(packages)
#check package version
algoV <- sessionInfo()$otherPkgs$CIMseq$Version
last3 <- paste(strsplit(algoV, "\\.")[[1]][2:4], collapse = "")
if(!as.numeric(last3) >= 100) {
stop("sp.scRNAseq package version too low. Must be >= 0.2.0.0")
}
currPath <- getwd()
#setup spCounts
keep.plates.SI <- c(
"NJA01201", "NJA01202", "NJA01301", "NJA01302", "NJA01501"
)
s <- str_detect(colnames(MGA.Counts), "^s")
samples <- filter(MGA.Meta, !filtered & unique_key %in% keep.plates.SI)$sample
e <- colnames(MGA.Counts) %in% samples
boolSng <- s & e
boolMul <- !s & e
boolRSI <- colnames(RSI.Counts) %in% filter(RSI.Meta, !filtered)$sample
iGenes <- intersect(intersect(rownames(RSI.Counts), rownames(MGA.Counts)), rownames(TMD.Counts))
singlets <- cbind(MGA.Counts[iGenes, boolSng], RSI.Counts[iGenes, boolRSI])
singletERCC <- cbind(MGA.CountsERCC[, boolSng], RSI.CountsERCC[, boolRSI])
multiplets <- MGA.Counts[iGenes, boolMul]
multipletERCC <- MGA.CountsERCC[, boolMul]
#Dimensionality reduction and classification
print(paste0("Starting all cells analysis at ", Sys.time()))
mca <- CreateSeuratObject(raw.data = singlets)
mca@meta.data$source <- case_when(
str_detect(rownames(mca@meta.data), "SRR654") | str_detect(rownames(mca@meta.data), "SRR510") ~ "External",
str_detect(rownames(mca@meta.data), "NJA") | str_detect(rownames(mca@meta.data), "NJD") ~ "Enge",
TRUE ~ "error"
)
mca <- NormalizeData(
object = mca, normalization.method = "LogNormalize", scale.factor = 1e6
)
mca <- FindVariableGenes(
object = mca, mean.function = ExpMean, dispersion.function = LogVMR,
do.plot = FALSE, x.low.cutoff = 1, y.cutoff = 1
)
mca <- ScaleData(
object = mca, genes.use = mca@var.genes, display.progress = FALSE, do.par = TRUE,
num.cores = 4
)
mca <- RunPCA(
object = mca, pc.genes = mca@var.genes, pcs.compute = 100, do.print = FALSE
)
# DimPlot(
# object = mca, reduction.use = "pca", dim.1 = 1, dim.2 = 2,
# no.legend = FALSE, do.return = TRUE, group.by = "source",
# vector.friendly = FALSE, pt.size = 1
# )
# mca <- JackStraw(object = mca, num.replicate = 100, display.progress = TRUE, num.pc = 50)
# mca <- JackStrawPlot(object = mca, PCs = 1:50)
# PCp <- mca@dr$pca@jackstraw@overall.p.values
# pcs <- PCp[PCp[, 2] < 10^-6, 1]
# pcs <- c(
# 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
# 18, 19, 20, 21, 22, 23, 24, 25, 26
# )
pcs <- 1:50
######################################################################################
#check for PC
pc.cor <- cor(mca@dr$pca@cell.embeddings, as.numeric(as.factor(mca@meta.data$source)))
#PC9 seems to be the culprit
pcs <- pcs[!pcs %in% which(pc.cor > 0.25)]
######################################################################################
print(paste0("Using ", max(pcs), " principal components."))
#PCElbowPlot(object = mca, num.pc = 100) + scale_x_continuous(breaks = seq(0, 100, 5))
mca <- RunUMAP(
object = mca, reduction.use = "pca", dims.use = pcs, min_dist = 0.3,
n_neighbors = 15, seed.use = 9823493
)
#mca <- RunTSNE(mca, dims.use = pcs, gene.use = mca@var.genes, seed.use = 78239)
mca <- FindClusters(
object = mca, reduction.type = "pca", dims.use = pcs, resolution = 0.65,
n.start = 100, n.iter = 1000, nn.eps = 0, k.param = 30, prune.SNN = 1/15,
algorithm = 1, save.SNN = TRUE, print.output = FALSE, plot.SNN = FALSE,
force.recalc = TRUE, random.seed = 93820
)
# DimPlot(
# object = mca, reduction.use = "umap", no.legend = FALSE, do.return = TRUE,
# vector.friendly = FALSE, pt.size = 1
# ) + scale_colour_manual(values = col40())
#
# mca@meta.data %>%
# group_by(source, res.0.6) %>%
# summarize(n = n()) %>%
# ungroup() %>%
# group_by(source) %>%
# mutate(`%` = n / sum(n) * 100) %>%
# ggplot() +
# geom_bar(aes(res.0.6, `%`, fill = source), stat = "identity", position = position_dodge(width = 1)) +
# facet_wrap(~res.0.6, scales = "free") +
# labs(x = "Class", y = "% of dataset")
#
# mca@meta.data %>%
# count(source, res.0.6) %>%
# ggplot() +
# geom_bar(aes(res.0.6, n, fill = source), stat = "identity", position = position_dodge(width = 1)) +
# facet_wrap(~res.0.6, scales = "free") +
# labs(x = "Class", y = "Count")
#
# mca@dr$umap@cell.embeddings %>%
# matrix_to_tibble("sample") %>%
# mutate(source = case_when(
# str_detect(sample, "SRR654") ~ "Tabula Muris",
# str_detect(sample, "SRR510") ~ "Regev",
# TRUE ~ "Enge"
# )) %>%
# sample_n(nrow(.), FALSE) %>%
# ggplot() +
# geom_point(aes(UMAP1, UMAP2, colour = source), alpha = 0.75)
#
# FeaturePlot(
# mca,
# c("Lgr5", "Ptprc", "Chga", "Dclk1", "Atoh1", "Lyz1", "Alpi", "Mki67"),
# reduction.use = "umap", dark.theme = FALSE, pt.size = 0.1,
# vector.friendly = FALSE
# )
#
# FeaturePlot(
# mca,
# c("Lgr5", "Alpi", "Mki67"),
# reduction.use = "umap", dark.theme = FALSE, pt.size = 0.1,
# vector.friendly = FALSE
# )
#
# FeaturePlot(
# mca,
# c("Plet1"),
# reduction.use = "umap", dark.theme = FALSE, pt.size = 0.1,
# vector.friendly = FALSE
# )
markers <- FindAllMarkers(
object = mca, only.pos = TRUE, min.diff.pct = 0.25, logfc.threshold = log(1.5),
test.use = "roc"
)
# DoHeatmap(
# object = mca, genes.use = unique(markers$gene), slim.col.label = TRUE, remove.key = TRUE,
# group.label.rot = TRUE, cex.row = 1
# )
print(paste0("Done all cells analysis at ", Sys.time()))
singlets <- singlets[, colnames(singlets) %in% colnames(mca@data)]
singletERCC <- singletERCC[, colnames(singletERCC) %in% colnames(singlets)]
idx <- match(rownames(FetchData(mca, "ident")), colnames(singlets))
classes <- as.character(FetchData(mca, "ident")[[1]])[idx]
names(classes) <- rownames(FetchData(mca, "ident"))[idx]
var.genes <- unique(markers$gene)
select <- which(rownames(singlets) %in% var.genes)
dim.red <- mca@dr$umap@cell.embeddings
colnames(dim.red) <- NULL
#setup CIMseqData objects
cObjSng <- CIMseqSinglets(singlets, singletERCC, dim.red, classes)
cObjMul <- CIMseqMultiplets(multiplets, multipletERCC, select)
#save
if(!"data" %in% list.dirs(currPath, full.names = FALSE)) system('mkdir data')
print(paste0("saving data to ", currPath, "."))
save(cObjSng, cObjMul, file = file.path(currPath, "data/CIMseqData.rda"))
#write logs
writeLines(capture.output(sessionInfo()), file.path(currPath, "logs/sessionInfo_CIMseqData.txt")) |
#' Enframe a list of file paths into a tibble
#'
#' @param folder string, path to folder where the files are stored
#' @param pattern string, a pattern used to identify the files, see \code{\link[base]{list.files}}
#' @param recursive logical, should the listing recurse into directories?
#' @param sep string, separator that is used to split up the basename of the files into different columns.
#' If character, sep is interpreted as a regular expression. The default value is a regular expression that matches any sequence of non-alphanumeric values.
#' If numeric, sep is interpreted as character positions to split at. Positive values start at 1 at the far-left of the string; negative value start at -1 at the far-right of the string. The length of sep should be one less than sep_into.
#' see \code{\link[tidyr]{separate}}
#' @param sep_into string, names of new variables to create as character vector. Use NA to omit the variable in the output.
#'
#' @return
#' @export
#'
#' @examples
#' folder <- 'data/files'
#' df <- rg_enframe(folder, pattern = '.csv$', sep = '_', sep_into = c(NA, 'year', 'type'))
rg_enframe <- function(folder, pattern = '.tif$', recursive = F, sep = '_', sep_into = NULL) {
files <- list.files(folder, pattern = pattern, full.names = T, recursive = recursive)
if (is.null(sep_into)) {
l <- map_dbl(basename(files), ~length(str_split(., pattern = sep)[[1]]))
sep_into <- str_c('c', 1:max(l))
}
result <-
files %>%
enframe(NULL, 'path') %>%
mutate(filename = str_replace(basename(path), pattern, '')) %>%
separate(filename, into = sep_into, sep = sep)
if (length(setdiff(c('year', 'month', 'day'), colnames(result))) == 0) {
result <- mutate(result, date = lubridate::ymd(str_c(year, month, day)))
}
return(result)
}
| /R/enframe.R | no_license | sitscholl/Rgadgets | R | false | false | 1,796 | r | #' Enframe a list of file paths into a tibble
#'
#' @param folder string, path to folder where the files are stored
#' @param pattern string, a pattern used to identify the files, see \code{\link[base]{list.files}}
#' @param recursive logical, should the listing recurse into directories?
#' @param sep string, separator that is used to split up the basename of the files into different columns.
#' If character, sep is interpreted as a regular expression. The default value is a regular expression that matches any sequence of non-alphanumeric values.
#' If numeric, sep is interpreted as character positions to split at. Positive values start at 1 at the far-left of the string; negative value start at -1 at the far-right of the string. The length of sep should be one less than sep_into.
#' see \code{\link[tidyr]{separate}}
#' @param sep_into string, names of new variables to create as character vector. Use NA to omit the variable in the output.
#'
#' @return
#' @export
#'
#' @examples
#' folder <- 'data/files'
#' df <- rg_enframe(folder, pattern = '.csv$', sep = '_', sep_into = c(NA, 'year', 'type'))
rg_enframe <- function(folder, pattern = '.tif$', recursive = F, sep = '_', sep_into = NULL) {
files <- list.files(folder, pattern = pattern, full.names = T, recursive = recursive)
if (is.null(sep_into)) {
l <- map_dbl(basename(files), ~length(str_split(., pattern = sep)[[1]]))
sep_into <- str_c('c', 1:max(l))
}
result <-
files %>%
enframe(NULL, 'path') %>%
mutate(filename = str_replace(basename(path), pattern, '')) %>%
separate(filename, into = sep_into, sep = sep)
if (length(setdiff(c('year', 'month', 'day'), colnames(result))) == 0) {
result <- mutate(result, date = lubridate::ymd(str_c(year, month, day)))
}
return(result)
}
|
context("metrics")
source("utils.R")
test_succeeds("metrics can be used when compiling models", {
define_model() %>%
compile(
loss='binary_crossentropy',
optimizer = optimizer_sgd(),
metrics=list(
metric_binary_accuracy,
metric_binary_crossentropy,
metric_hinge
)
)
})
test_succeeds("metrics be can called directly", {
K <- backend()
y_true <- K$constant(matrix(runif(100), nrow = 10, ncol = 10))
y_pred <- K$constant(matrix(runif(100), nrow = 10, ncol = 10))
metric_binary_accuracy(y_true, y_pred)
metric_binary_crossentropy(y_true, y_pred)
metric_hinge(y_true, y_pred)
}) | /tests/testthat/test-metrics.R | no_license | martinstuder/keras-1 | R | false | false | 647 | r | context("metrics")
source("utils.R")
test_succeeds("metrics can be used when compiling models", {
define_model() %>%
compile(
loss='binary_crossentropy',
optimizer = optimizer_sgd(),
metrics=list(
metric_binary_accuracy,
metric_binary_crossentropy,
metric_hinge
)
)
})
test_succeeds("metrics be can called directly", {
K <- backend()
y_true <- K$constant(matrix(runif(100), nrow = 10, ncol = 10))
y_pred <- K$constant(matrix(runif(100), nrow = 10, ncol = 10))
metric_binary_accuracy(y_true, y_pred)
metric_binary_crossentropy(y_true, y_pred)
metric_hinge(y_true, y_pred)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search.R
\name{fit_tuner}
\alias{fit_tuner}
\title{Start the search for the best hyperparameter configuration.
The call to search has the same signature as model.fit().}
\usage{
fit_tuner(
tuner = NULL,
x = NULL,
y = NULL,
steps_per_epoch = NULL,
epochs = NULL,
validation_data = NULL,
validation_steps = NULL,
...
)
}
\arguments{
\item{tuner}{A tuner object}
\item{x}{Vector, matrix, or array of training data (or list if the model has multiple inputs).
If all inputs in the model are named, you can also pass a list mapping input names to data. x can be NULL
(default) if feeding from framework-native tensors (e.g. TensorFlow data tensors).}
\item{y}{Vector, matrix, or array of target (label) data (or list if the model has multiple outputs).
If all outputs in the model are named, you can also pass a list mapping output names to data. y can be
NULL (default) if feeding from framework-native tensors (e.g. TensorFlow data tensors).}
\item{steps_per_epoch}{Integer. Total number of steps (batches of samples) to yield from generator before
declaring one epoch finished and starting the next epoch. It should typically be equal to
ceil(num_samples / batch_size). Optional for Sequence: if unspecified, will use the len(generator)
as a number of steps.}
\item{epochs}{to train the model. Note that in conjunction with initial_epoch,
epochs is to be understood as "final epoch". The model is not trained for a number of iterations
given by epochs, but merely until the epoch of index epochs is reached.}
\item{validation_data}{Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. validation_data will override validation_split.
validation_data could be: - tuple (x_val, y_val) of Numpy arrays or
tensors - tuple (x_val, y_val, val_sample_weights) of Numpy arrays - dataset or a dataset iterator}
\item{validation_steps}{Only relevant if steps_per_epoch is specified. Total number of steps (batches of samples)
to validate before stopping.}
\item{...}{Some additional arguments}
}
\description{
Models are built iteratively by calling the model-building function, which populates the hyperparameter space
(search space) tracked by the hp object. The tuner progressively explores the space, recording metrics for
each configuration.
}
| /man/fit_tuner.Rd | no_license | dA505819/kerastuneR | R | false | true | 2,425 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search.R
\name{fit_tuner}
\alias{fit_tuner}
\title{Start the search for the best hyperparameter configuration.
The call to search has the same signature as model.fit().}
\usage{
fit_tuner(
tuner = NULL,
x = NULL,
y = NULL,
steps_per_epoch = NULL,
epochs = NULL,
validation_data = NULL,
validation_steps = NULL,
...
)
}
\arguments{
\item{tuner}{A tuner object}
\item{x}{Vector, matrix, or array of training data (or list if the model has multiple inputs).
If all inputs in the model are named, you can also pass a list mapping input names to data. x can be NULL
(default) if feeding from framework-native tensors (e.g. TensorFlow data tensors).}
\item{y}{Vector, matrix, or array of target (label) data (or list if the model has multiple outputs).
If all outputs in the model are named, you can also pass a list mapping output names to data. y can be
NULL (default) if feeding from framework-native tensors (e.g. TensorFlow data tensors).}
\item{steps_per_epoch}{Integer. Total number of steps (batches of samples) to yield from generator before
declaring one epoch finished and starting the next epoch. It should typically be equal to
ceil(num_samples / batch_size). Optional for Sequence: if unspecified, will use the len(generator)
as a number of steps.}
\item{epochs}{to train the model. Note that in conjunction with initial_epoch,
epochs is to be understood as "final epoch". The model is not trained for a number of iterations
given by epochs, but merely until the epoch of index epochs is reached.}
\item{validation_data}{Data on which to evaluate the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. validation_data will override validation_split.
validation_data could be: - tuple (x_val, y_val) of Numpy arrays or
tensors - tuple (x_val, y_val, val_sample_weights) of Numpy arrays - dataset or a dataset iterator}
\item{validation_steps}{Only relevant if steps_per_epoch is specified. Total number of steps (batches of samples)
to validate before stopping.}
\item{...}{Some additional arguments}
}
\description{
Models are built iteratively by calling the model-building function, which populates the hyperparameter space
(search space) tracked by the hp object. The tuner progressively explores the space, recording metrics for
each configuration.
}
|
# Exercise 5: dplyr grouped operations
# Install the `"nycflights13"` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
#install.packages("nycflights13") # should be done already
library("nycflights13")
library("dplyr")
# What was the average departure delay in each month?
# Save this as a data frame `dep_delay_by_month`
# Hint: you'll have to perform a grouping operation then summarizing your data
dep_delay_by_month <- flights %>%
group_by(month) %>%
summarize(
avg_dep_delay = mean(dep_delay, na.rm = TRUE)
)
dep_delay_by_month
View(flights)
# Which month had the greatest average departure delay?
dep_delay_by_month %>%
filter(avg_dep_delay == max(avg_dep_delay))
# If your above data frame contains just two columns (e.g., "month", and "delay"
# in that order), you can create a scatterplot by passing that data frame to the
# `plot()` function
plot(dep_delay_by_month)
# To which destinations were the average arrival delays the highest?
# Hint: you'll have to perform a grouping operation then summarize your data
# You can use the `head()` function to view just the first few rows
flights %>%
group_by(dest) %>%
summarize(
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
arrange(-avg_arr_delay)
# You can look up these airports in the `airports` data frame!
View(airports)
airports %>%
filter(faa == "CAE")
# Which city was flown to with the highest average speed?
most_com_cty <- mutate(flights, distance/air_time) %>%
left_join(airports, by = c("dest" = "faa"))%>%
na.omit %>%
filter(distance/air_time == max(distance/air_time)) %>%
select(tzone)
most_com_cty
| /chapter-11-exercises/exercise-5/exercise.R | permissive | allykrinsky/book-exercises | R | false | false | 1,665 | r | # Exercise 5: dplyr grouped operations
# Install the `"nycflights13"` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
#install.packages("nycflights13") # should be done already
library("nycflights13")
library("dplyr")
# What was the average departure delay in each month?
# Save this as a data frame `dep_delay_by_month`
# Hint: you'll have to perform a grouping operation then summarizing your data
dep_delay_by_month <- flights %>%
group_by(month) %>%
summarize(
avg_dep_delay = mean(dep_delay, na.rm = TRUE)
)
dep_delay_by_month
View(flights)
# Which month had the greatest average departure delay?
dep_delay_by_month %>%
filter(avg_dep_delay == max(avg_dep_delay))
# If your above data frame contains just two columns (e.g., "month", and "delay"
# in that order), you can create a scatterplot by passing that data frame to the
# `plot()` function
plot(dep_delay_by_month)
# To which destinations were the average arrival delays the highest?
# Hint: you'll have to perform a grouping operation then summarize your data
# You can use the `head()` function to view just the first few rows
flights %>%
group_by(dest) %>%
summarize(
avg_arr_delay = mean(arr_delay, na.rm = TRUE)
) %>%
arrange(-avg_arr_delay)
# You can look up these airports in the `airports` data frame!
View(airports)
airports %>%
filter(faa == "CAE")
# Which city was flown to with the highest average speed?
most_com_cty <- mutate(flights, distance/air_time) %>%
left_join(airports, by = c("dest" = "faa"))%>%
na.omit %>%
filter(distance/air_time == max(distance/air_time)) %>%
select(tzone)
most_com_cty
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{sic_to_ff38}
\alias{sic_to_ff38}
\title{Convert SIC codes to Fama French 38 industry codes}
\usage{
sic_to_ff38(SIC)
}
\arguments{
\item{SIC}{A numeric vector of SIC codes}
}
\value{
A numeric vector of Fama-French 38 industry portfolio codes
}
\description{
Converts SIC codes to their corresponding industry code using the Fama-French
38 industry portfolio classifications
}
\examples{
x <- c(800,2000,4537)
sic_to_ff38(x)
}
| /Merge Data Tools/indclass/man/sic_to_ff38.Rd | no_license | jandres01/Stock-Prediction- | R | false | true | 525 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{sic_to_ff38}
\alias{sic_to_ff38}
\title{Convert SIC codes to Fama French 38 industry codes}
\usage{
sic_to_ff38(SIC)
}
\arguments{
\item{SIC}{A numeric vector of SIC codes}
}
\value{
A numeric vector of Fama-French 38 industry portfolio codes
}
\description{
Converts SIC codes to their corresponding industry code using the Fama-French
38 industry portfolio classifications
}
\examples{
x <- c(800,2000,4537)
sic_to_ff38(x)
}
|
# setup
options(scipen=999)
library(mgcv)
library(gratia)
##################################################
test <- mgcv::gam(price ~ s(lat,lng), data = data_gps_sample_dublin, method = "REML")
gratia::draw(test)
vis.gam(test)
vis.gam(test,type="response", plot.type="contour")
vis.gam(test, view = c("lat","lng"), plot.type = "persp", se = 2)
vis.gam(test, view = c("lat","lng"),
plot.type = "contour", too.far = 0.05)
gratia::appraise(test, type = "response")
| /script/gam_gps.R | no_license | damien-dupre/accessibility_evolution | R | false | false | 474 | r | # setup
options(scipen=999)
library(mgcv)
library(gratia)
##################################################
test <- mgcv::gam(price ~ s(lat,lng), data = data_gps_sample_dublin, method = "REML")
gratia::draw(test)
vis.gam(test)
vis.gam(test,type="response", plot.type="contour")
vis.gam(test, view = c("lat","lng"), plot.type = "persp", se = 2)
vis.gam(test, view = c("lat","lng"),
plot.type = "contour", too.far = 0.05)
gratia::appraise(test, type = "response")
|
library(evolvability)
### Name: meanStdG
### Title: Mean standardize a G-matrix
### Aliases: meanStdG
### Keywords: array algebra
### ** Examples
G = matrix(c(1, 1, 0, 1, 4, 1, 0, 1, 2), ncol = 3)
means = c(1, 1.4, 2.1)
meanStdG(G, means)
| /data/genthat_extracted_code/evolvability/examples/meanStdG.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 247 | r | library(evolvability)
### Name: meanStdG
### Title: Mean standardize a G-matrix
### Aliases: meanStdG
### Keywords: array algebra
### ** Examples
G = matrix(c(1, 1, 0, 1, 4, 1, 0, 1, 2), ncol = 3)
means = c(1, 1.4, 2.1)
meanStdG(G, means)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/col_is_logical.R
\name{col_is_logical}
\alias{col_is_logical}
\alias{expect_col_is_logical}
\alias{test_col_is_logical}
\title{Do the columns contain logical values?}
\usage{
col_is_logical(
x,
columns,
actions = NULL,
step_id = NULL,
label = NULL,
brief = NULL,
active = TRUE
)
expect_col_is_logical(object, columns, threshold = 1)
test_col_is_logical(object, columns, threshold = 1)
}
\arguments{
\item{x}{A data frame, tibble (\code{tbl_df} or \code{tbl_dbi}), Spark DataFrame
(\code{tbl_spark}), or, an agent object of class \code{ptblank_agent} that is created
with \code{\link[=create_agent]{create_agent()}}.}
\item{columns}{The column (or a set of columns, provided as a character
vector) to which this validation should be applied.}
\item{actions}{A list containing threshold levels so that the validation step
can react accordingly when exceeding the set levels. This is to be created
with the \code{\link[=action_levels]{action_levels()}} helper function.}
\item{step_id}{One or more optional identifiers for the single or multiple
validation steps generated from calling a validation function. The use of
step IDs serves to distinguish validation steps from each other and provide
an opportunity for supplying a more meaningful label compared to the step
index. By default this is \code{NULL}, and \strong{pointblank} will automatically
generate the step ID value (based on the step index) in this case. One or
more values can be provided, and the exact number of ID values should (1)
match the number of validation steps that the validation function call will
produce (influenced by the number of \code{columns} provided), (2) be an ID
string not used in any previous validation step, and (3) be a vector with
unique values.}
\item{label}{An optional label for the validation step. This label appears in
the agent report and for the best appearance it should be kept short.}
\item{brief}{An optional, text-based description for the validation step. If
nothing is provided here then an \emph{autobrief} is generated by the agent,
using the language provided in \code{\link[=create_agent]{create_agent()}}'s \code{lang} argument (which
defaults to \code{"en"} or English). The \emph{autobrief} incorporates details of the
validation step so it's often the preferred option in most cases (where a
\code{label} might be better suited to succinctly describe the validation).}
\item{active}{A logical value indicating whether the validation step should
be active. If the step function is working with an agent, \code{FALSE} will make
the validation step inactive (still reporting its presence and keeping
indexes for the steps unchanged). If the step function will be operating
directly on data, then any step with \code{active = FALSE} will simply pass the
data through with no validation whatsoever. The default for this is \code{TRUE}.}
\item{object}{A data frame, tibble (\code{tbl_df} or \code{tbl_dbi}), or Spark
DataFrame (\code{tbl_spark}) that serves as the target table for the expectation
function or the test function.}
\item{threshold}{A simple failure threshold value for use with the
expectation (\code{expect_}) and the test (\code{test_}) function variants. By
default, this is set to \code{1} meaning that any single unit of failure in data
validation results in an overall test failure. Whole numbers beyond \code{1}
indicate that any failing units up to that absolute threshold value will
result in a succeeding \strong{testthat} test or evaluate to \code{TRUE}. Likewise,
fractional values (between \code{0} and \code{1}) act as a proportional failure
threshold, where \code{0.15} means that 15 percent of failing test units results
in an overall test failure.}
}
\value{
For the validation function, the return value is either a
\code{ptblank_agent} object or a table object (depending on whether an agent
object or a table was passed to \code{x}). The expectation function invisibly
returns its input but, in the context of testing data, the function is
called primarily for its potential side-effects (e.g., signaling failure).
The test function returns a logical value.
}
\description{
The \code{col_is_logical()} validation function, the \code{expect_col_is_logical()}
expectation function, and the \code{test_col_is_logical()} test function all check
whether one or more columns in a table is of the logical (\code{TRUE}/\code{FALSE})
type. Like many of the \verb{col_is_*()}-type functions in \strong{pointblank}, the
only requirement is a specification of the column names. The validation
function can be used directly on a data table or with an \emph{agent} object
(technically, a \code{ptblank_agent} object) whereas the expectation and test
functions can only be used with a data table. The types of data tables that
can be used include data frames, tibbles, database tables (\code{tbl_dbi}), and
Spark DataFrames (\code{tbl_spark}). Each validation step or expectation will
operate over a single test unit, which is whether the column is an
logical-type column or not.
}
\details{
If providing multiple column names, the result will be an expansion of
validation steps to that number of column names (e.g., \code{vars(col_a, col_b)}
will result in the entry of two validation steps). Aside from column names in
quotes and in \code{vars()}, \strong{tidyselect} helper functions are available for
specifying columns. They are: \code{starts_with()}, \code{ends_with()}, \code{contains()},
\code{matches()}, and \code{everything()}.
Often, we will want to specify \code{actions} for the validation. This argument,
present in every validation function, takes a specially-crafted list object
that is best produced by the \code{\link[=action_levels]{action_levels()}} function. Read that function's
documentation for the lowdown on how to create reactions to above-threshold
failure levels in validation. The basic gist is that you'll want at least a
single threshold level (specified as either the fraction of test units
failed, or, an absolute value), often using the \code{warn_at} argument. This is
especially true when \code{x} is a table object because, otherwise, nothing
happens. For the \verb{col_is_*()}-type functions, using
\code{action_levels(warn_at = 1)} or \code{action_levels(stop_at = 1)} are good choices
depending on the situation (the first produces a warning, the other
\code{stop()}s).
Want to describe this validation step in some detail? Keep in mind that this
is only useful if \code{x} is an \emph{agent}. If that's the case, \code{brief} the agent
with some text that fits. Don't worry if you don't want to do it. The
\emph{autobrief} protocol is kicked in when \code{brief = NULL} and a simple brief will
then be automatically generated.
}
\section{Function ID}{
2-19
}
\examples{
# The `small_table` dataset in the
# package has an `e` column which has
# logical values; the following examples
# will validate that that column is of
# the `logical` class
# A: Using an `agent` with validation
# functions and then `interrogate()`
# Validate that the column `e` has the
# `logical` class
agent <-
create_agent(small_table) \%>\%
col_is_logical(vars(e)) \%>\%
interrogate()
# Determine if this validation
# had no failing test units (1)
all_passed(agent)
# Calling `agent` in the console
# prints the agent's report; but we
# can get a `gt_tbl` object directly
# with `get_agent_report(agent)`
# B: Using the validation function
# directly on the data (no `agent`)
# This way of using validation functions
# acts as a data filter: data is passed
# through but should `stop()` if there
# is a single test unit failing; the
# behavior of side effects can be
# customized with the `actions` option
small_table \%>\%
col_is_logical(vars(e)) \%>\%
dplyr::slice(1:5)
# C: Using the expectation function
# With the `expect_*()` form, we would
# typically perform one validation at a
# time; this is primarily used in
# testthat tests
expect_col_is_logical(
small_table, vars(e)
)
# D: Using the test function
# With the `test_*()` form, we should
# get a single logical value returned
# to us
small_table \%>\%
test_col_is_logical(vars(e))
}
\seealso{
Other validation functions:
\code{\link{col_exists}()},
\code{\link{col_is_character}()},
\code{\link{col_is_date}()},
\code{\link{col_is_factor}()},
\code{\link{col_is_integer}()},
\code{\link{col_is_numeric}()},
\code{\link{col_is_posix}()},
\code{\link{col_schema_match}()},
\code{\link{col_vals_between}()},
\code{\link{col_vals_decreasing}()},
\code{\link{col_vals_equal}()},
\code{\link{col_vals_expr}()},
\code{\link{col_vals_gte}()},
\code{\link{col_vals_gt}()},
\code{\link{col_vals_in_set}()},
\code{\link{col_vals_increasing}()},
\code{\link{col_vals_lte}()},
\code{\link{col_vals_lt}()},
\code{\link{col_vals_not_between}()},
\code{\link{col_vals_not_equal}()},
\code{\link{col_vals_not_in_set}()},
\code{\link{col_vals_not_null}()},
\code{\link{col_vals_null}()},
\code{\link{col_vals_regex}()},
\code{\link{conjointly}()},
\code{\link{rows_distinct}()}
}
\concept{validation functions}
| /man/col_is_logical.Rd | permissive | lcreteig/pointblank | R | false | true | 9,146 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/col_is_logical.R
\name{col_is_logical}
\alias{col_is_logical}
\alias{expect_col_is_logical}
\alias{test_col_is_logical}
\title{Do the columns contain logical values?}
\usage{
col_is_logical(
x,
columns,
actions = NULL,
step_id = NULL,
label = NULL,
brief = NULL,
active = TRUE
)
expect_col_is_logical(object, columns, threshold = 1)
test_col_is_logical(object, columns, threshold = 1)
}
\arguments{
\item{x}{A data frame, tibble (\code{tbl_df} or \code{tbl_dbi}), Spark DataFrame
(\code{tbl_spark}), or, an agent object of class \code{ptblank_agent} that is created
with \code{\link[=create_agent]{create_agent()}}.}
\item{columns}{The column (or a set of columns, provided as a character
vector) to which this validation should be applied.}
\item{actions}{A list containing threshold levels so that the validation step
can react accordingly when exceeding the set levels. This is to be created
with the \code{\link[=action_levels]{action_levels()}} helper function.}
\item{step_id}{One or more optional identifiers for the single or multiple
validation steps generated from calling a validation function. The use of
step IDs serves to distinguish validation steps from each other and provide
an opportunity for supplying a more meaningful label compared to the step
index. By default this is \code{NULL}, and \strong{pointblank} will automatically
generate the step ID value (based on the step index) in this case. One or
more values can be provided, and the exact number of ID values should (1)
match the number of validation steps that the validation function call will
produce (influenced by the number of \code{columns} provided), (2) be an ID
string not used in any previous validation step, and (3) be a vector with
unique values.}
\item{label}{An optional label for the validation step. This label appears in
the agent report and for the best appearance it should be kept short.}
\item{brief}{An optional, text-based description for the validation step. If
nothing is provided here then an \emph{autobrief} is generated by the agent,
using the language provided in \code{\link[=create_agent]{create_agent()}}'s \code{lang} argument (which
defaults to \code{"en"} or English). The \emph{autobrief} incorporates details of the
validation step so it's often the preferred option in most cases (where a
\code{label} might be better suited to succinctly describe the validation).}
\item{active}{A logical value indicating whether the validation step should
be active. If the step function is working with an agent, \code{FALSE} will make
the validation step inactive (still reporting its presence and keeping
indexes for the steps unchanged). If the step function will be operating
directly on data, then any step with \code{active = FALSE} will simply pass the
data through with no validation whatsoever. The default for this is \code{TRUE}.}
\item{object}{A data frame, tibble (\code{tbl_df} or \code{tbl_dbi}), or Spark
DataFrame (\code{tbl_spark}) that serves as the target table for the expectation
function or the test function.}
\item{threshold}{A simple failure threshold value for use with the
expectation (\code{expect_}) and the test (\code{test_}) function variants. By
default, this is set to \code{1} meaning that any single unit of failure in data
validation results in an overall test failure. Whole numbers beyond \code{1}
indicate that any failing units up to that absolute threshold value will
result in a succeeding \strong{testthat} test or evaluate to \code{TRUE}. Likewise,
fractional values (between \code{0} and \code{1}) act as a proportional failure
threshold, where \code{0.15} means that 15 percent of failing test units results
in an overall test failure.}
}
\value{
For the validation function, the return value is either a
\code{ptblank_agent} object or a table object (depending on whether an agent
object or a table was passed to \code{x}). The expectation function invisibly
returns its input but, in the context of testing data, the function is
called primarily for its potential side-effects (e.g., signaling failure).
The test function returns a logical value.
}
\description{
The \code{col_is_logical()} validation function, the \code{expect_col_is_logical()}
expectation function, and the \code{test_col_is_logical()} test function all check
whether one or more columns in a table is of the logical (\code{TRUE}/\code{FALSE})
type. Like many of the \verb{col_is_*()}-type functions in \strong{pointblank}, the
only requirement is a specification of the column names. The validation
function can be used directly on a data table or with an \emph{agent} object
(technically, a \code{ptblank_agent} object) whereas the expectation and test
functions can only be used with a data table. The types of data tables that
can be used include data frames, tibbles, database tables (\code{tbl_dbi}), and
Spark DataFrames (\code{tbl_spark}). Each validation step or expectation will
operate over a single test unit, which is whether the column is an
logical-type column or not.
}
\details{
If providing multiple column names, the result will be an expansion of
validation steps to that number of column names (e.g., \code{vars(col_a, col_b)}
will result in the entry of two validation steps). Aside from column names in
quotes and in \code{vars()}, \strong{tidyselect} helper functions are available for
specifying columns. They are: \code{starts_with()}, \code{ends_with()}, \code{contains()},
\code{matches()}, and \code{everything()}.
Often, we will want to specify \code{actions} for the validation. This argument,
present in every validation function, takes a specially-crafted list object
that is best produced by the \code{\link[=action_levels]{action_levels()}} function. Read that function's
documentation for the lowdown on how to create reactions to above-threshold
failure levels in validation. The basic gist is that you'll want at least a
single threshold level (specified as either the fraction of test units
failed, or, an absolute value), often using the \code{warn_at} argument. This is
especially true when \code{x} is a table object because, otherwise, nothing
happens. For the \verb{col_is_*()}-type functions, using
\code{action_levels(warn_at = 1)} or \code{action_levels(stop_at = 1)} are good choices
depending on the situation (the first produces a warning, the other
\code{stop()}s).
Want to describe this validation step in some detail? Keep in mind that this
is only useful if \code{x} is an \emph{agent}. If that's the case, \code{brief} the agent
with some text that fits. Don't worry if you don't want to do it. The
\emph{autobrief} protocol is kicked in when \code{brief = NULL} and a simple brief will
then be automatically generated.
}
\section{Function ID}{
2-19
}
\examples{
# The `small_table` dataset in the
# package has an `e` column which has
# logical values; the following examples
# will validate that that column is of
# the `logical` class
# A: Using an `agent` with validation
# functions and then `interrogate()`
# Validate that the column `e` has the
# `logical` class
agent <-
create_agent(small_table) \%>\%
col_is_logical(vars(e)) \%>\%
interrogate()
# Determine if this validation
# had no failing test units (1)
all_passed(agent)
# Calling `agent` in the console
# prints the agent's report; but we
# can get a `gt_tbl` object directly
# with `get_agent_report(agent)`
# B: Using the validation function
# directly on the data (no `agent`)
# This way of using validation functions
# acts as a data filter: data is passed
# through but should `stop()` if there
# is a single test unit failing; the
# behavior of side effects can be
# customized with the `actions` option
small_table \%>\%
col_is_logical(vars(e)) \%>\%
dplyr::slice(1:5)
# C: Using the expectation function
# With the `expect_*()` form, we would
# typically perform one validation at a
# time; this is primarily used in
# testthat tests
expect_col_is_logical(
small_table, vars(e)
)
# D: Using the test function
# With the `test_*()` form, we should
# get a single logical value returned
# to us
small_table \%>\%
test_col_is_logical(vars(e))
}
\seealso{
Other validation functions:
\code{\link{col_exists}()},
\code{\link{col_is_character}()},
\code{\link{col_is_date}()},
\code{\link{col_is_factor}()},
\code{\link{col_is_integer}()},
\code{\link{col_is_numeric}()},
\code{\link{col_is_posix}()},
\code{\link{col_schema_match}()},
\code{\link{col_vals_between}()},
\code{\link{col_vals_decreasing}()},
\code{\link{col_vals_equal}()},
\code{\link{col_vals_expr}()},
\code{\link{col_vals_gte}()},
\code{\link{col_vals_gt}()},
\code{\link{col_vals_in_set}()},
\code{\link{col_vals_increasing}()},
\code{\link{col_vals_lte}()},
\code{\link{col_vals_lt}()},
\code{\link{col_vals_not_between}()},
\code{\link{col_vals_not_equal}()},
\code{\link{col_vals_not_in_set}()},
\code{\link{col_vals_not_null}()},
\code{\link{col_vals_null}()},
\code{\link{col_vals_regex}()},
\code{\link{conjointly}()},
\code{\link{rows_distinct}()}
}
\concept{validation functions}
|
x <- 5
#Hifoo.
foo <- x+5
jhgjh | /git_handout.R | no_license | ticklishgorilla13/Faido | R | false | false | 33 | r | x <- 5
#Hifoo.
foo <- x+5
jhgjh |
#' @title Traverse node for query alternatives and download data.
#'
#' @description Goes through the dataNode and ask user for input for all
#' variables and then put this together to a query for \link{get_pxweb_data}.
#'
#' @param dataNode Botton node in node tree.
#' @param test_input Vector of length 4 to test inputs to the first 4 questions in the query.
#' @param ... further parameters. These are currently ignored.
#'
#'
download_pxweb <- function(dataNode, test_input = NULL, ...) {
# Assertions
stopifnot(length(test_input) == 0 | length(test_input) == 3 )
# Define tests
if(length(test_input) == 0){
testInputDown <- testInputClean <- character(0)
testInputCode <- testInputVarAlt <- character(0)
} else {
testInputDown <- test_input[1]
testInputClean <- test_input[2]
testInputCode <- test_input[3]
testInputVarAlt <- "1"
}
dataNodeName <- dataNode[[2]]
dataNode <- dataNode[[1]]
# Ask if the file should be downloaded
inputDown <- findData.input(
type = "yesno",
input = str_c("Do you want to download '", dataNodeName, "'?", sep=""),
test_input = testInputDown)
download <- inputDown == "y"
inputClean <- findData.input(
type = "yesno",
input = "Do you want to clean and melt this file (to wide R format)?",
test_input = testInputClean)
cleanBool <- inputClean == "y"
inputCode <- findData.input(
type="yesno",
input="Do you want to print the code for downloading this data?",
test_input = testInputCode)
# Choose variables values
varList <- list()
varListText <- character(0)
# Print the alternatives (for data to download) and choose alternatives to download i<-2
for(i in 1:length(dataNode$variables$variables)) {
# Print the alternatives to download
listElem <- dataNode$variables$variables[[i]]
if(is.null(listElem$values) | is.null(listElem$valueTexts)) {
next()
}
varDF <- data.frame(id = listElem$values,
text = listElem$valueTexts,
stringsAsFactors = FALSE)
# Ask for input from user
varAlt <- findData.input(
type="alt",
input=list(varDF, listElem$text),
test_input = testInputVarAlt)
# Convert the alternatives from the user to the PX-WEB API format
if (varAlt[1] != "*") {
tempAlt <- character(0)
tempAlt <- listElem$values[as.numeric(varAlt)]
} else {
tempAlt <- "*"
}
# Save the alternative to use to download data
varList[[listElem$code]] <- tempAlt
varListText <- c(varListText,
str_c(ifelse(make.names(listElem$code) == listElem$code,
listElem$code,
str_c("\"", listElem$code, "\"", collapse="")),
" = c('",
str_c(tempAlt, collapse="', '"),
"')",
collapse=""))
}
if(download){
cat("Downloading... ")
tempData <- get_pxweb_data(dataNode$URL, varList, clean = cleanBool)
cat("Done.\n")
}
# Print the code to repeat the downloading
if (inputCode == "y") {
findData.printCode(dataNode$URL,
varListText,
clean = cleanBool)
}
if(download){ return(tempData) } else {return(invisible(NULL))}
}
findData.inputBaseCat <- function(alt, codedAlt) {
# The function prints the 'alt' rows in 'codedAlt'.
# The purpose is to print alternatives for each input from the user
output<-"\n("
for (i in 1:length(alt)){
if (i != 1){
output <- str_c(output, ", ", sep="")
}
output <- str_c(output,
"'",
codedAlt[alt[i], 1],
"' = ",
codedAlt[alt[i],2], sep="")
}
return(str_c(output,")", sep=""))
}
#' Get input that is consistent with
#'
#' @param type type of input to get.
#' @param input data.frame with input data to use with
#' @param test_input input for test cases
#' @param silent no output
#'
findData.input <- function(type, input = NULL, test_input = character(0), silent = FALSE){
# If silent sink output
if(silent){
temp <- tempfile()
sink(file=temp)
}
# Define the possible alternatives that the user can do (except alternatives)
codedAlt <- data.frame(abbr=c("esc", "b", "*", "y", "n", "a"),
name=c("Quit", "Back", "Select all", "Yes", "No", "Show all"),
stringsAsFactors = FALSE)
textTitle <- alt <- character(0)
baseCat <- numeric(0)
max_cat <- NA
# Define the different types of input
if (type == "node") {
baseCat<-1:2
alt <- rownames(input)
textHead <- "\nEnter the data (number) you want to explore:"
}
if (type == "yesno") {
baseCat <- c(1,4:5)
textHead <- input
}
if (type == "text") {
textHead <- input
}
if (type == "alt") {
baseCat <- c(1,3,6)
varDF <- input[[1]]
alt <- rownames(varDF)
max_cat <- length(alt)
# Calculate a short list of alternatives
if (nrow(varDF) > 11) {
varDFshort <- varDF[c(1:6, (nrow(varDF)-4):nrow(varDF)), ]
rownames(varDFshort)[6] <- "."
} else {
varDFshort <- varDF }
textTitle <- str_c("\nALTERNATIVES FOR VARIABLE: ",
toupper(input[[2]]),
" \n",
str_c(
rep("=", round(getOption("width")*0.9)), collapse = ""),
"\n", sep="")
textHead <-
str_c("\nChoose your alternative(s) by number:",
"\nSeparate multiple choices by ',' and intervals by ':'", sep="")
}
if (type == "db") {
baseCat <- c(1)
toprint <- data.frame(id=1:nrow(input), text = input$text)
alt <- rownames(toprint)
max_cat <- 1
textTitle <- str_c("\nCHOOSE DATABASE:\n",
str_c(
rep("=", round(getOption("width")*0.9)), collapse = ""),
"\n", sep="")
textHead <-
str_c("\nChoose database by number:", sep="")
}
if (type == "api") {
baseCat <- c(1)
toprint <- data.frame(id=input[,1], text = input[,2])
alt <- rownames(toprint)
max_cat <- 1
textTitle <- str_c("\nCHOOSE API:\n",
str_c(
rep("=", round(getOption("width")*0.9)), collapse = ""),
"\n", sep="")
textHead <-
str_c("\nChoose api by number:", sep="")
}
inputOK <- FALSE
inputScan <- ""
while(!inputOK) {
# Print title, alternatives and so forth
cat(textTitle)
if (type == "alt") {
if (inputScan == "a") {
toprint <- varDF
} else {
toprint <- varDFshort
}
findData.printNode(xscb = toprint, print = TRUE)
}
if (type == "db" | type == "api") {
findData.printNode(xscb = toprint, print = TRUE)
}
cat(textHead)
if (type != "text") {
cat(findData.inputBaseCat(baseCat, codedAlt), "\n")
}
# Get input from the user (if not test run)
if (length(test_input)==0) {
inputScanRaw <- scan(what=character(), multi.line = FALSE, quiet=TRUE, nlines=1 , sep=",")
} else {
inputScanRaw <- scan(what=character(), quiet=TRUE, sep=",", text=test_input)
}
# If just an enter is entered -> start over
if (length(inputScanRaw) == 0) { next() }
# Format the input data (to lowercase and without whitespaces) and as char vector
inputScan <- tolower(str_trim(inputScanRaw))
# If a = "Show all", restart, but show all alternatives
if (inputScan[1] == "a") { next() }
# Case sensitive text input
if (type == "text") inputScan <- inputScanRaw
# Scan for duplicates and do corrections
inputScan <- findData.inputConvert(inputScan, max_value=max_cat)
# Test if the input are OK (valid)
inputOK <-
(length(inputScan) == 1 && inputScan %in% tolower(codedAlt$abbr[baseCat])) |
all(inputScan %in% tolower(alt)) |
type == "text"
if(type != "alt" & length(inputScan) > 1) inputOK <- FALSE
if(type == "text") {
if(make.names(inputScan) != inputScan) {
inputOK <- FALSE
cat("This is not a valid name of a data.frame object in R.\n")
cat("You could change the name to '",
make.names(inputScan),
"'.\n", sep="")
}
}
if(!inputOK){
cat("Sorry, no such entry allowed. Please try again!\n\n")
}
}
# Stop sink and remove output
if(silent){
sink()
unlink(temp)
}
return(inputScan)
}
findData.printNode <- function(xscb, print=TRUE) {
# Preparations of for printing the node
xscb$text <- as.character(xscb$text)
nSCBidlen <- max(str_length(as.character(xscb$id))) # Get max str length of id
nSCBpos <- max(str_length(rownames(xscb))) # Get max str length of row number
nSCBconsole <- round(getOption("width")*0.9)
# Calculates where the different output should be printed
startPos <- nSCBpos+nSCBidlen+5
scbTextSpace <- nSCBconsole-startPos
finalText <- character(0)
for (i in 1:nrow(xscb)) {
# Corrections if there is an shortened list of alternatives
if (rownames(xscb)[i] == "."){
finalText <- str_c(finalText,"\n")
next()
}
# The text that should be printed
finalText <- str_c(
finalText,
rownames(xscb)[i],
".",
str_c(
rep(" ", nSCBpos - str_length(rownames(xscb)[i])), collapse=""),
" [",
xscb$id[i],
"]",
str_c(rep(" ", nSCBidlen - str_length(as.character(xscb$id[i]))), collapse=""),
" ",collapse="")
# Convert if there is console is too narrow for the text
first <- rerun <- TRUE
tempText <- xscb$text[i]
while(first | rerun){
# Cut upp the alternative text to pieces that fit the console width
tempTextSpaces <- str_locate_all(tempText,pattern=" ")[[1]][ , 1]
if (str_length(tempText) > scbTextSpace){
tempTextCut <- max(tempTextSpaces[tempTextSpaces < scbTextSpace]) - 1
} else {
tempTextCut <- str_length(tempText)
rerun <- FALSE
}
finalText <-
str_c(finalText,
str_c(rep(" ", startPos*(1-as.numeric(first))), collapse=""),
str_sub(tempText, 1, tempTextCut), "\n", collapse="")
if (rerun) {
tempText <- str_sub(tempText, tempTextCut + 2)
}
first <- FALSE
}
}
# Print node text or save it as a character value
if (print) {
cat(finalText)
} else {
return(finalText)
}
}
findData.printCode <- function(url, varListText, clean) {
# Print the code used to download the data
cat("To download the same data again, use the following code:\n(save code using UTF-8 encoding)\n\n")
cat("myDataSetName",
" <- \n get_pxweb_data(url = \"",
url,
"\",\n",
rep(" ",13),
"dims = list(", sep="")
# Print the chosen alternatives for each data dimension
for (i in 1:length(varListText)){
if(i != 1){
cat(rep(" ", 25), sep="")
}
cat(varListText[i], sep="")
if (i != length(varListText)) {
cat(",\n",sep="")
}
}
cat("),\n")
# Print if the data should be cleaned or not
cat(rep(" ",13),
"clean = ",
as.character(clean), sep="")
cat(")\n\n")
}
findData.inputConvert <- function(input, max_value=NA) {
# Set the output (for input of length == 1)
output <- input
# Do conversions for i<-1
if (length(input) > 1 || str_detect(input, ":")) {
output <- character(0)
for(i in 1 : length(input)) { # i <- 2
# Split input values on the format [0-9]+:[0-9]+
if (str_detect(input[i], ":")){
index <- as.numeric(unlist(str_split(input[i], pattern = ":")))
if(is.na(index[1])) index[1] <- 1
if(is.na(index[2])) {
index[2] <- max_value
if(is.na(max_value)) index[2] <- index[1]
}
output <- c(output, as.character(index[1]:index[2]))
} else {
# Otherwise just add the value
output <- c(output, input[i])
}
}
# Sort and remove duplicates
output <- unique(output)
output <- output[order(as.numeric(output))]
}
return(output)
}
#' Calculate a specific database to get data from
#'
#' @param baseURL The basic url to the pxweb api
#' @param pre_choice Predifined choice of database
#'
#' @return base url to the specific data base
#'
choose_pxweb_database_url <- function(baseURL, pre_choice = NULL){
data_bases <- get_pxweb_metadata(baseURL = baseURL)
if(nrow(data_bases) == 1){
return(paste0(baseURL, "/", text_to_url(data_bases$dbid)))
} else if(is.null(pre_choice)) {
db_choice <- as.numeric(findData.input(type = "db", input = data_bases))
return(paste0(baseURL, "/", text_to_url(data_bases$dbid[db_choice])))
} else if(!is.null(pre_choice)){
return(paste0(baseURL, "/", text_to_url(data_bases$dbid[pre_choice])))
}
}
#' Choose an api from api_catalogue
#'
#' @return base url to the specific data base
#'
choose_pxweb_api <- function(){
res <- character(3)
apis <- api_catalogue()
api_df <- data.frame(api_names = unlist(lapply(apis, FUN=function(X) X$api)),
text = unlist(lapply(apis, FUN=function(X) X$description)))
api_choice <- as.numeric(findData.input(type = "api", input = api_df))
res[1] <- apis[[api_choice]]$api
i <- 1
for(type in c("languages", "versions")){
i <- i + 1
if(type == "languages") vec <- apis[[api_choice]]$languages
if(type == "versions") vec <- apis[[api_choice]]$versions
if(length(vec) > 1) {
choice <-
as.numeric(findData.input(type = "api",
input = data.frame(id = 1:length(vec),
text = vec)))
choice <- vec[choice]
} else {
choice <- vec
}
res[i] <- choice
}
return(res)
}
| /R/interactive_pxweb_internal.R | no_license | krose/pxweb | R | false | false | 14,161 | r | #' @title Traverse node for query alternatives and download data.
#'
#' @description Goes through the dataNode and ask user for input for all
#' variables and then put this together to a query for \link{get_pxweb_data}.
#'
#' @param dataNode Botton node in node tree.
#' @param test_input Vector of length 4 to test inputs to the first 4 questions in the query.
#' @param ... further parameters. These are currently ignored.
#'
#'
download_pxweb <- function(dataNode, test_input = NULL, ...) {
# Assertions
stopifnot(length(test_input) == 0 | length(test_input) == 3 )
# Define tests
if(length(test_input) == 0){
testInputDown <- testInputClean <- character(0)
testInputCode <- testInputVarAlt <- character(0)
} else {
testInputDown <- test_input[1]
testInputClean <- test_input[2]
testInputCode <- test_input[3]
testInputVarAlt <- "1"
}
dataNodeName <- dataNode[[2]]
dataNode <- dataNode[[1]]
# Ask if the file should be downloaded
inputDown <- findData.input(
type = "yesno",
input = str_c("Do you want to download '", dataNodeName, "'?", sep=""),
test_input = testInputDown)
download <- inputDown == "y"
inputClean <- findData.input(
type = "yesno",
input = "Do you want to clean and melt this file (to wide R format)?",
test_input = testInputClean)
cleanBool <- inputClean == "y"
inputCode <- findData.input(
type="yesno",
input="Do you want to print the code for downloading this data?",
test_input = testInputCode)
# Choose variables values
varList <- list()
varListText <- character(0)
# Print the alternatives (for data to download) and choose alternatives to download i<-2
for(i in 1:length(dataNode$variables$variables)) {
# Print the alternatives to download
listElem <- dataNode$variables$variables[[i]]
if(is.null(listElem$values) | is.null(listElem$valueTexts)) {
next()
}
varDF <- data.frame(id = listElem$values,
text = listElem$valueTexts,
stringsAsFactors = FALSE)
# Ask for input from user
varAlt <- findData.input(
type="alt",
input=list(varDF, listElem$text),
test_input = testInputVarAlt)
# Convert the alternatives from the user to the PX-WEB API format
if (varAlt[1] != "*") {
tempAlt <- character(0)
tempAlt <- listElem$values[as.numeric(varAlt)]
} else {
tempAlt <- "*"
}
# Save the alternative to use to download data
varList[[listElem$code]] <- tempAlt
varListText <- c(varListText,
str_c(ifelse(make.names(listElem$code) == listElem$code,
listElem$code,
str_c("\"", listElem$code, "\"", collapse="")),
" = c('",
str_c(tempAlt, collapse="', '"),
"')",
collapse=""))
}
if(download){
cat("Downloading... ")
tempData <- get_pxweb_data(dataNode$URL, varList, clean = cleanBool)
cat("Done.\n")
}
# Print the code to repeat the downloading
if (inputCode == "y") {
findData.printCode(dataNode$URL,
varListText,
clean = cleanBool)
}
if(download){ return(tempData) } else {return(invisible(NULL))}
}
findData.inputBaseCat <- function(alt, codedAlt) {
# The function prints the 'alt' rows in 'codedAlt'.
# The purpose is to print alternatives for each input from the user
output<-"\n("
for (i in 1:length(alt)){
if (i != 1){
output <- str_c(output, ", ", sep="")
}
output <- str_c(output,
"'",
codedAlt[alt[i], 1],
"' = ",
codedAlt[alt[i],2], sep="")
}
return(str_c(output,")", sep=""))
}
#' Get input that is consistent with
#'
#' @param type type of input to get.
#' @param input data.frame with input data to use with
#' @param test_input input for test cases
#' @param silent no output
#'
findData.input <- function(type, input = NULL, test_input = character(0), silent = FALSE){
# If silent sink output
if(silent){
temp <- tempfile()
sink(file=temp)
}
# Define the possible alternatives that the user can do (except alternatives)
codedAlt <- data.frame(abbr=c("esc", "b", "*", "y", "n", "a"),
name=c("Quit", "Back", "Select all", "Yes", "No", "Show all"),
stringsAsFactors = FALSE)
textTitle <- alt <- character(0)
baseCat <- numeric(0)
max_cat <- NA
# Define the different types of input
if (type == "node") {
baseCat<-1:2
alt <- rownames(input)
textHead <- "\nEnter the data (number) you want to explore:"
}
if (type == "yesno") {
baseCat <- c(1,4:5)
textHead <- input
}
if (type == "text") {
textHead <- input
}
if (type == "alt") {
baseCat <- c(1,3,6)
varDF <- input[[1]]
alt <- rownames(varDF)
max_cat <- length(alt)
# Calculate a short list of alternatives
if (nrow(varDF) > 11) {
varDFshort <- varDF[c(1:6, (nrow(varDF)-4):nrow(varDF)), ]
rownames(varDFshort)[6] <- "."
} else {
varDFshort <- varDF }
textTitle <- str_c("\nALTERNATIVES FOR VARIABLE: ",
toupper(input[[2]]),
" \n",
str_c(
rep("=", round(getOption("width")*0.9)), collapse = ""),
"\n", sep="")
textHead <-
str_c("\nChoose your alternative(s) by number:",
"\nSeparate multiple choices by ',' and intervals by ':'", sep="")
}
if (type == "db") {
baseCat <- c(1)
toprint <- data.frame(id=1:nrow(input), text = input$text)
alt <- rownames(toprint)
max_cat <- 1
textTitle <- str_c("\nCHOOSE DATABASE:\n",
str_c(
rep("=", round(getOption("width")*0.9)), collapse = ""),
"\n", sep="")
textHead <-
str_c("\nChoose database by number:", sep="")
}
if (type == "api") {
baseCat <- c(1)
toprint <- data.frame(id=input[,1], text = input[,2])
alt <- rownames(toprint)
max_cat <- 1
textTitle <- str_c("\nCHOOSE API:\n",
str_c(
rep("=", round(getOption("width")*0.9)), collapse = ""),
"\n", sep="")
textHead <-
str_c("\nChoose api by number:", sep="")
}
inputOK <- FALSE
inputScan <- ""
while(!inputOK) {
# Print title, alternatives and so forth
cat(textTitle)
if (type == "alt") {
if (inputScan == "a") {
toprint <- varDF
} else {
toprint <- varDFshort
}
findData.printNode(xscb = toprint, print = TRUE)
}
if (type == "db" | type == "api") {
findData.printNode(xscb = toprint, print = TRUE)
}
cat(textHead)
if (type != "text") {
cat(findData.inputBaseCat(baseCat, codedAlt), "\n")
}
# Get input from the user (if not test run)
if (length(test_input)==0) {
inputScanRaw <- scan(what=character(), multi.line = FALSE, quiet=TRUE, nlines=1 , sep=",")
} else {
inputScanRaw <- scan(what=character(), quiet=TRUE, sep=",", text=test_input)
}
# If just an enter is entered -> start over
if (length(inputScanRaw) == 0) { next() }
# Format the input data (to lowercase and without whitespaces) and as char vector
inputScan <- tolower(str_trim(inputScanRaw))
# If a = "Show all", restart, but show all alternatives
if (inputScan[1] == "a") { next() }
# Case sensitive text input
if (type == "text") inputScan <- inputScanRaw
# Scan for duplicates and do corrections
inputScan <- findData.inputConvert(inputScan, max_value=max_cat)
# Test if the input are OK (valid)
inputOK <-
(length(inputScan) == 1 && inputScan %in% tolower(codedAlt$abbr[baseCat])) |
all(inputScan %in% tolower(alt)) |
type == "text"
if(type != "alt" & length(inputScan) > 1) inputOK <- FALSE
if(type == "text") {
if(make.names(inputScan) != inputScan) {
inputOK <- FALSE
cat("This is not a valid name of a data.frame object in R.\n")
cat("You could change the name to '",
make.names(inputScan),
"'.\n", sep="")
}
}
if(!inputOK){
cat("Sorry, no such entry allowed. Please try again!\n\n")
}
}
# Stop sink and remove output
if(silent){
sink()
unlink(temp)
}
return(inputScan)
}
findData.printNode <- function(xscb, print=TRUE) {
# Preparations of for printing the node
xscb$text <- as.character(xscb$text)
nSCBidlen <- max(str_length(as.character(xscb$id))) # Get max str length of id
nSCBpos <- max(str_length(rownames(xscb))) # Get max str length of row number
nSCBconsole <- round(getOption("width")*0.9)
# Calculates where the different output should be printed
startPos <- nSCBpos+nSCBidlen+5
scbTextSpace <- nSCBconsole-startPos
finalText <- character(0)
for (i in 1:nrow(xscb)) {
# Corrections if there is an shortened list of alternatives
if (rownames(xscb)[i] == "."){
finalText <- str_c(finalText,"\n")
next()
}
# The text that should be printed
finalText <- str_c(
finalText,
rownames(xscb)[i],
".",
str_c(
rep(" ", nSCBpos - str_length(rownames(xscb)[i])), collapse=""),
" [",
xscb$id[i],
"]",
str_c(rep(" ", nSCBidlen - str_length(as.character(xscb$id[i]))), collapse=""),
" ",collapse="")
# Convert if there is console is too narrow for the text
first <- rerun <- TRUE
tempText <- xscb$text[i]
while(first | rerun){
# Cut upp the alternative text to pieces that fit the console width
tempTextSpaces <- str_locate_all(tempText,pattern=" ")[[1]][ , 1]
if (str_length(tempText) > scbTextSpace){
tempTextCut <- max(tempTextSpaces[tempTextSpaces < scbTextSpace]) - 1
} else {
tempTextCut <- str_length(tempText)
rerun <- FALSE
}
finalText <-
str_c(finalText,
str_c(rep(" ", startPos*(1-as.numeric(first))), collapse=""),
str_sub(tempText, 1, tempTextCut), "\n", collapse="")
if (rerun) {
tempText <- str_sub(tempText, tempTextCut + 2)
}
first <- FALSE
}
}
# Print node text or save it as a character value
if (print) {
cat(finalText)
} else {
return(finalText)
}
}
findData.printCode <- function(url, varListText, clean) {
# Print the code used to download the data
cat("To download the same data again, use the following code:\n(save code using UTF-8 encoding)\n\n")
cat("myDataSetName",
" <- \n get_pxweb_data(url = \"",
url,
"\",\n",
rep(" ",13),
"dims = list(", sep="")
# Print the chosen alternatives for each data dimension
for (i in 1:length(varListText)){
if(i != 1){
cat(rep(" ", 25), sep="")
}
cat(varListText[i], sep="")
if (i != length(varListText)) {
cat(",\n",sep="")
}
}
cat("),\n")
# Print if the data should be cleaned or not
cat(rep(" ",13),
"clean = ",
as.character(clean), sep="")
cat(")\n\n")
}
findData.inputConvert <- function(input, max_value=NA) {
# Set the output (for input of length == 1)
output <- input
# Do conversions for i<-1
if (length(input) > 1 || str_detect(input, ":")) {
output <- character(0)
for(i in 1 : length(input)) { # i <- 2
# Split input values on the format [0-9]+:[0-9]+
if (str_detect(input[i], ":")){
index <- as.numeric(unlist(str_split(input[i], pattern = ":")))
if(is.na(index[1])) index[1] <- 1
if(is.na(index[2])) {
index[2] <- max_value
if(is.na(max_value)) index[2] <- index[1]
}
output <- c(output, as.character(index[1]:index[2]))
} else {
# Otherwise just add the value
output <- c(output, input[i])
}
}
# Sort and remove duplicates
output <- unique(output)
output <- output[order(as.numeric(output))]
}
return(output)
}
#' Calculate a specific database to get data from
#'
#' @param baseURL The basic url to the pxweb api
#' @param pre_choice Predifined choice of database
#'
#' @return base url to the specific data base
#'
choose_pxweb_database_url <- function(baseURL, pre_choice = NULL){
data_bases <- get_pxweb_metadata(baseURL = baseURL)
if(nrow(data_bases) == 1){
return(paste0(baseURL, "/", text_to_url(data_bases$dbid)))
} else if(is.null(pre_choice)) {
db_choice <- as.numeric(findData.input(type = "db", input = data_bases))
return(paste0(baseURL, "/", text_to_url(data_bases$dbid[db_choice])))
} else if(!is.null(pre_choice)){
return(paste0(baseURL, "/", text_to_url(data_bases$dbid[pre_choice])))
}
}
#' Choose an api from api_catalogue
#'
#' @return base url to the specific data base
#'
choose_pxweb_api <- function(){
res <- character(3)
apis <- api_catalogue()
api_df <- data.frame(api_names = unlist(lapply(apis, FUN=function(X) X$api)),
text = unlist(lapply(apis, FUN=function(X) X$description)))
api_choice <- as.numeric(findData.input(type = "api", input = api_df))
res[1] <- apis[[api_choice]]$api
i <- 1
for(type in c("languages", "versions")){
i <- i + 1
if(type == "languages") vec <- apis[[api_choice]]$languages
if(type == "versions") vec <- apis[[api_choice]]$versions
if(length(vec) > 1) {
choice <-
as.numeric(findData.input(type = "api",
input = data.frame(id = 1:length(vec),
text = vec)))
choice <- vec[choice]
} else {
choice <- vec
}
res[i] <- choice
}
return(res)
}
|
# define string formatting
`%--%` <- function(x, y)
# from stack exchange:
# https://stackoverflow.com/questions/46085274/is-there-a-string-formatting-operator-in-r-similar-to-pythons
{
do.call(sprintf, c(list(x), y))
}
results <- data.frame(matrix(0, ncol = 8, nrow = 2))
colnames(results) <- c("BART-Int", "BART-Int-se", "GPBQ", "GPBQ-se", "MI", "MI-se", "dim", "n")
dims = c(1, 2, 3)
l = 1
for (num_data in c(20)) {
for (dim in c(1)) {
bart = c()
gp = c()
mi = c()
for (num_cv in 1:20) {
df <- read.csv("results/fisher_function/fisher_function/PaperDim%sUniform_%s_%s.csv"
%--% c(dim, num_data, num_cv))
bart = c(bart, df$BARTMean)
gp = c(gp, df$GPMean)
mi = c(mi, df$MIMean)
}
}
results[l, 1] = mean(abs(bart - df$actual))
results[l, 2] = sd(abs(bart - df$actual)) / sqrt(20)
results[l, 3] = mean(abs(gp - df$actual))
results[l, 4] = sd(abs(gp - df$actual)) / sqrt(20)
results[l, 5] = mean(abs(mi - df$actual))
results[l, 6] = sd(abs(mi - df$actual)) / sqrt(20)
results[l, 7] = dim
results[l, 8] = num_data*dim
l = l + 1
}
for (num_data in c(20)) {
for (dim in c(2)) {
bart = c()
gp = c()
mi = c()
for (num_cv in 1:20) {
df <- read.csv("results/fisher_function/fisher_function/PaperDim%sUniform_%s_%s.csv"
%--% c(dim, num_data, num_cv))
bart = c(bart, df$BARTMean)
gp = c(gp, df$GPMean)
mi = c(mi, df$MIMean)
}
}
results[l, 1] = mean(abs(bart - df$actual))
results[l, 2] = sd(abs(bart - df$actual)) / sqrt(20)
results[l, 3] = mean(abs(gp - df$actual))
results[l, 4] = sd(abs(gp - df$actual)) / sqrt(20)
results[l, 5] = mean(abs(mi - df$actual))
results[l, 6] = sd(abs(mi - df$actual)) / sqrt(20)
results[l, 7] = dim
results[l, 8] = num_data*dim
l = l + 1
}
for (num_data in c(20)) {
for (dim in c(3)) {
bart = c()
gp = c()
mi = c()
for (num_cv in 1:20) {
df <- read.csv("results/fisher_function/fisher_function/PaperDim%sUniform_%s_%s.csv"
%--% c(dim, num_data, num_cv))
bart = c(bart, df$BARTMean)
gp = c(gp, df$GPMean)
mi = c(mi, df$MIMean)
}
}
results[l, 1] = mean(abs(bart - df$actual))
results[l, 2] = sd(abs(bart - df$actual)) / sqrt(20)
results[l, 3] = mean(abs(gp - df$actual))
results[l, 4] = sd(abs(gp - df$actual)) / sqrt(20)
results[l, 5] = mean(abs(mi - df$actual))
results[l, 6] = sd(abs(mi - df$actual)) / sqrt(20)
results[l, 7] = dim
results[l, 8] = num_data*dim
l = l + 1
}
write.csv(results, "figures_code/non_stationarity.csv" %--% c(dim, num_data))
| /figures_code/plot_fisher.r | permissive | XingLLiu/BO-BART | R | false | false | 2,650 | r | # define string formatting
`%--%` <- function(x, y)
# from stack exchange:
# https://stackoverflow.com/questions/46085274/is-there-a-string-formatting-operator-in-r-similar-to-pythons
{
do.call(sprintf, c(list(x), y))
}
results <- data.frame(matrix(0, ncol = 8, nrow = 2))
colnames(results) <- c("BART-Int", "BART-Int-se", "GPBQ", "GPBQ-se", "MI", "MI-se", "dim", "n")
dims = c(1, 2, 3)
l = 1
for (num_data in c(20)) {
for (dim in c(1)) {
bart = c()
gp = c()
mi = c()
for (num_cv in 1:20) {
df <- read.csv("results/fisher_function/fisher_function/PaperDim%sUniform_%s_%s.csv"
%--% c(dim, num_data, num_cv))
bart = c(bart, df$BARTMean)
gp = c(gp, df$GPMean)
mi = c(mi, df$MIMean)
}
}
results[l, 1] = mean(abs(bart - df$actual))
results[l, 2] = sd(abs(bart - df$actual)) / sqrt(20)
results[l, 3] = mean(abs(gp - df$actual))
results[l, 4] = sd(abs(gp - df$actual)) / sqrt(20)
results[l, 5] = mean(abs(mi - df$actual))
results[l, 6] = sd(abs(mi - df$actual)) / sqrt(20)
results[l, 7] = dim
results[l, 8] = num_data*dim
l = l + 1
}
for (num_data in c(20)) {
for (dim in c(2)) {
bart = c()
gp = c()
mi = c()
for (num_cv in 1:20) {
df <- read.csv("results/fisher_function/fisher_function/PaperDim%sUniform_%s_%s.csv"
%--% c(dim, num_data, num_cv))
bart = c(bart, df$BARTMean)
gp = c(gp, df$GPMean)
mi = c(mi, df$MIMean)
}
}
results[l, 1] = mean(abs(bart - df$actual))
results[l, 2] = sd(abs(bart - df$actual)) / sqrt(20)
results[l, 3] = mean(abs(gp - df$actual))
results[l, 4] = sd(abs(gp - df$actual)) / sqrt(20)
results[l, 5] = mean(abs(mi - df$actual))
results[l, 6] = sd(abs(mi - df$actual)) / sqrt(20)
results[l, 7] = dim
results[l, 8] = num_data*dim
l = l + 1
}
for (num_data in c(20)) {
for (dim in c(3)) {
bart = c()
gp = c()
mi = c()
for (num_cv in 1:20) {
df <- read.csv("results/fisher_function/fisher_function/PaperDim%sUniform_%s_%s.csv"
%--% c(dim, num_data, num_cv))
bart = c(bart, df$BARTMean)
gp = c(gp, df$GPMean)
mi = c(mi, df$MIMean)
}
}
results[l, 1] = mean(abs(bart - df$actual))
results[l, 2] = sd(abs(bart - df$actual)) / sqrt(20)
results[l, 3] = mean(abs(gp - df$actual))
results[l, 4] = sd(abs(gp - df$actual)) / sqrt(20)
results[l, 5] = mean(abs(mi - df$actual))
results[l, 6] = sd(abs(mi - df$actual)) / sqrt(20)
results[l, 7] = dim
results[l, 8] = num_data*dim
l = l + 1
}
write.csv(results, "figures_code/non_stationarity.csv" %--% c(dim, num_data))
|
library(testthat)
library(exPrior)
test_check("exPrior")
| /tests/testthat.R | permissive | GeoStat-Bayesian/exPrior | R | false | false | 58 | r | library(testthat)
library(exPrior)
test_check("exPrior")
|
#load preprocessed hsb2 dataset from SPSS file
library(foreign)
f <- read.spss("R.sav", to.data.frame = TRUE)
names(f)
#check for normality
qqnorm(f$read, pch = 1, frame = FALSE)
qqline(f$read, col = "steelblue", lwd = 2)
qqnorm(f$math, pch = 1, frame = FALSE)
qqline(f$math, col = "steelblue", lwd = 2)
qqnorm(f$science, pch = 1, frame = FALSE)
qqline(f$science, col = "steelblue", lwd = 2)
qqnorm(f$socst, pch = 1, frame = FALSE)
qqline(f$socst, col = "steelblue", lwd = 2)
#build regression model
glm.fit1 <- lm(write ~ female + read + math + science + socst, data = f)
summary(glm.fit1)
#check for multicollinearity
library(car)
vif(glm.fit1)
predict(glm.fit1, data.frame(female=0, read=49, math=45, science=57, socst=52), type="response")
| /Assignment 2/R script.R | no_license | assemkussainova/MATH-540-Statistical-Learning | R | false | false | 778 | r | #load preprocessed hsb2 dataset from SPSS file
library(foreign)
f <- read.spss("R.sav", to.data.frame = TRUE)
names(f)
#check for normality
qqnorm(f$read, pch = 1, frame = FALSE)
qqline(f$read, col = "steelblue", lwd = 2)
qqnorm(f$math, pch = 1, frame = FALSE)
qqline(f$math, col = "steelblue", lwd = 2)
qqnorm(f$science, pch = 1, frame = FALSE)
qqline(f$science, col = "steelblue", lwd = 2)
qqnorm(f$socst, pch = 1, frame = FALSE)
qqline(f$socst, col = "steelblue", lwd = 2)
#build regression model
glm.fit1 <- lm(write ~ female + read + math + science + socst, data = f)
summary(glm.fit1)
#check for multicollinearity
library(car)
vif(glm.fit1)
predict(glm.fit1, data.frame(female=0, read=49, math=45, science=57, socst=52), type="response")
|
#### Use this script to plot features and prediction combines
####$ import libraries
library(psych)
library(ggplot2)
library(plotly)
library(dplyr)
library(MASS)
#### Interactive plot for acceleration 1 hour file
accPlot <- plot_ly(accHour, x = ~HEADER_TIME_STAMP, y = ~X_ACCELERATION_METERS_PER_SECOND_SQUARED,
name = 'X_acc', type = 'scatter', legendgroup = "RAW", mode = 'lines') %>%
add_trace(y = ~Y_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Y_acc', mode = 'lines') %>%
add_trace(y = ~Z_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Z_acc', mode = 'lines')
#accPlot
# c("white", "grey", "cyan", "blue", "green", "yellow", "orange", "red")
## High Signal = 8
## Ambulation = 7
## Other = 6
## Sedentary = 5
## Sleep = 4
## Non-wear = 3
## Low signal = 2
## No label = 1
featuretest <- featureHour
featuretest$MDCAS_PREDICTION <- factor(featuretest$MDCAS_PREDICTION , levels =c("Nonwear","sleep", "sedentary", "notthese", "ambulation"))
featureCol <- c("thistle", "skyblue", "navy", "green2", "gold3", "orangered3")
featurePlot <- plot_ly(featuretest, x = ~START_TIME, y = ~MDCAS_PREDICTION_PROB,
name = 'Prediction', type = 'bar', legendgroup = "ALGO", color = ~MDCAS_PREDICTION,
colors = featureCol) ##%>%
## add_trace(y = ~Y_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Y_acc', mode = 'lines') %>%
##add_trace(y = ~Z_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Z_acc', mode = 'lines')
#featurePlot
#saveHTMLPath = "C:/Users/Dharam/Downloads/MDCAS Files/MDCAS_ALGO_RAW_VIZ/SEDENTARY_GROUND_TRUTH.html"
subP <- subplot(style(accPlot, showlegend = TRUE), style(featurePlot, showlegend = TRUE), nrows = 2, margin = 0.05, shareX = TRUE)
### Save the plot as HTML and skip pandoc execution
# htmlwidgets::saveWidget(subP, saveHTMLPath, selfcontained = FALSE)
# transforms = list(
# list(
# type = 'groupby',
# groups = featureHour$cyl,
# styles = list(
# list(target = 4, value = list(marker =list(color = 'blue'))),
# list(target = 6, value = list(marker =list(color = 'red'))),
# list(target = 8, value = list(marker =list(color = 'black')))
# )))
saveCombo = "C:/Users/Dharam/Downloads/MDCAS Files/MDCAS_ALGO_RAW_VIZ/AMB_SLEEP_NONWEAR_2/AMB_SLEEP_NONWEAR_2.html"
htmlwidgets::saveWidget(subP, saveCombo, selfcontained = FALSE)
| /plotRawAndAlgorithm.R | no_license | adityaponnada/accelerometerPredictionVisualizer | R | false | false | 2,366 | r | #### Use this script to plot features and prediction combines
####$ import libraries
library(psych)
library(ggplot2)
library(plotly)
library(dplyr)
library(MASS)
#### Interactive plot for acceleration 1 hour file
accPlot <- plot_ly(accHour, x = ~HEADER_TIME_STAMP, y = ~X_ACCELERATION_METERS_PER_SECOND_SQUARED,
name = 'X_acc', type = 'scatter', legendgroup = "RAW", mode = 'lines') %>%
add_trace(y = ~Y_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Y_acc', mode = 'lines') %>%
add_trace(y = ~Z_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Z_acc', mode = 'lines')
#accPlot
# c("white", "grey", "cyan", "blue", "green", "yellow", "orange", "red")
## High Signal = 8
## Ambulation = 7
## Other = 6
## Sedentary = 5
## Sleep = 4
## Non-wear = 3
## Low signal = 2
## No label = 1
featuretest <- featureHour
featuretest$MDCAS_PREDICTION <- factor(featuretest$MDCAS_PREDICTION , levels =c("Nonwear","sleep", "sedentary", "notthese", "ambulation"))
featureCol <- c("thistle", "skyblue", "navy", "green2", "gold3", "orangered3")
featurePlot <- plot_ly(featuretest, x = ~START_TIME, y = ~MDCAS_PREDICTION_PROB,
name = 'Prediction', type = 'bar', legendgroup = "ALGO", color = ~MDCAS_PREDICTION,
colors = featureCol) ##%>%
## add_trace(y = ~Y_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Y_acc', mode = 'lines') %>%
##add_trace(y = ~Z_ACCELERATION_METERS_PER_SECOND_SQUARED, name = 'Z_acc', mode = 'lines')
#featurePlot
#saveHTMLPath = "C:/Users/Dharam/Downloads/MDCAS Files/MDCAS_ALGO_RAW_VIZ/SEDENTARY_GROUND_TRUTH.html"
subP <- subplot(style(accPlot, showlegend = TRUE), style(featurePlot, showlegend = TRUE), nrows = 2, margin = 0.05, shareX = TRUE)
### Save the plot as HTML and skip pandoc execution
# htmlwidgets::saveWidget(subP, saveHTMLPath, selfcontained = FALSE)
# transforms = list(
# list(
# type = 'groupby',
# groups = featureHour$cyl,
# styles = list(
# list(target = 4, value = list(marker =list(color = 'blue'))),
# list(target = 6, value = list(marker =list(color = 'red'))),
# list(target = 8, value = list(marker =list(color = 'black')))
# )))
saveCombo = "C:/Users/Dharam/Downloads/MDCAS Files/MDCAS_ALGO_RAW_VIZ/AMB_SLEEP_NONWEAR_2/AMB_SLEEP_NONWEAR_2.html"
htmlwidgets::saveWidget(subP, saveCombo, selfcontained = FALSE)
|
#Checking and setting working directory
getwd()
setwd("Documents/github/DataScience/exploratory_data_analysis/week1")
data <- read.csv("./data/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
# Choosing data between Feb 1 and Feb 2
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
chosen <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Converting to weekdays
converting <- paste(as.Date(chosen$Date), chosen$Time)
chosen$weekdays <- as.POSIXct(converting)
# making plot 1
hist(chosen$Global_active_power, main="Global Active Power", xlab="Global Active Power (kW)", ylab="Frequency", col="#de9b95")
# making plot 2
plot(chosen$Global_active_power~chosen$weekdays, main="Global Active Power Thu to Sat", type="l",ylab="Global Active Power (kW)", xlab="", col = "#de9b95")
# making plot 3
with(chosen, {plot(Sub_metering_1~weekdays, type="l", main = "Energy Sub-Metering", col="#de9b95",ylab="Global Active Power (kW)", xlab="")
#Adding two more lines to the same graph
lines(Sub_metering_2~weekdays,col='black')
lines(Sub_metering_3~weekdays,col='#95b0de')
})
legend("topright", col=c("#de9b95", "black", "#95b0de"), lwd=1, legend=c("Kitchen", "Laundry Room", "Heater and AC"))
# making plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(1,1,1,1))
with(chosen, {
# 1st plot
plot(Global_active_power~weekdays, type="l", ylab="Global Active Power (kW)", col="#de9b95", xlab="")
# 2nd plot
plot(Voltage~weekdays, type="l", ylab="Voltage (V)", xlab="datetime", col="#95b0de")
# 3rd plot
plot(Sub_metering_1~weekdays, type="l", ylab="Global Active Power (kW)", col="#de9b95", xlab="")
# Adding 2 more lines to 3rd plot
lines(Sub_metering_2~weekdays,col='black')
lines(Sub_metering_3~weekdays,col='#95b0de')
legend("topright", col=c("#de9b95", "black", "#95b0de"), lwd=1, bty="l",
legend=c("Kitchen", "Laundry Room", "Heater and AC"), cex = 0.65)
# 4th plot
plot(Global_reactive_power~weekdays, type="l", ylab="Global Rective Power (kW)", col = "#a7c4bb", xlab="datetime")
})
| /exploratory_data_analysis/week1/project1/plots.R | no_license | yeshancqcq/DataScience | R | false | false | 2,190 | r | #Checking and setting working directory
getwd()
setwd("Documents/github/DataScience/exploratory_data_analysis/week1")
data <- read.csv("./data/household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
# Choosing data between Feb 1 and Feb 2
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
chosen <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Converting to weekdays
converting <- paste(as.Date(chosen$Date), chosen$Time)
chosen$weekdays <- as.POSIXct(converting)
# making plot 1
hist(chosen$Global_active_power, main="Global Active Power", xlab="Global Active Power (kW)", ylab="Frequency", col="#de9b95")
# making plot 2
plot(chosen$Global_active_power~chosen$weekdays, main="Global Active Power Thu to Sat", type="l",ylab="Global Active Power (kW)", xlab="", col = "#de9b95")
# making plot 3
with(chosen, {plot(Sub_metering_1~weekdays, type="l", main = "Energy Sub-Metering", col="#de9b95",ylab="Global Active Power (kW)", xlab="")
#Adding two more lines to the same graph
lines(Sub_metering_2~weekdays,col='black')
lines(Sub_metering_3~weekdays,col='#95b0de')
})
legend("topright", col=c("#de9b95", "black", "#95b0de"), lwd=1, legend=c("Kitchen", "Laundry Room", "Heater and AC"))
# making plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(1,1,1,1))
with(chosen, {
# 1st plot
plot(Global_active_power~weekdays, type="l", ylab="Global Active Power (kW)", col="#de9b95", xlab="")
# 2nd plot
plot(Voltage~weekdays, type="l", ylab="Voltage (V)", xlab="datetime", col="#95b0de")
# 3rd plot
plot(Sub_metering_1~weekdays, type="l", ylab="Global Active Power (kW)", col="#de9b95", xlab="")
# Adding 2 more lines to 3rd plot
lines(Sub_metering_2~weekdays,col='black')
lines(Sub_metering_3~weekdays,col='#95b0de')
legend("topright", col=c("#de9b95", "black", "#95b0de"), lwd=1, bty="l",
legend=c("Kitchen", "Laundry Room", "Heater and AC"), cex = 0.65)
# 4th plot
plot(Global_reactive_power~weekdays, type="l", ylab="Global Rective Power (kW)", col = "#a7c4bb", xlab="datetime")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.quantregRanger}
\alias{predict.quantregRanger}
\title{quantregRanger prediction}
\usage{
\method{predict}{quantregRanger}(object, data = NULL, quantiles = c(0.1,
0.5, 0.9), all = TRUE, obs = 1, ...)
}
\arguments{
\item{object}{\code{quantregRanger} object.}
\item{data}{New test data of class \code{data.frame}}
\item{quantiles}{Numeric vector of quantiles that should be estimated}
\item{all}{A logical value. all=TRUE uses all observations for prediction.
all=FALSE uses only a certain number of observations per node for prediction
(set with argument obs). The default is all=TRUE}
\item{obs}{An integer number. Determines the maximal number of observations per node}
\item{...}{Currently ignored.
to use for prediction. The input is ignored for all=TRUE. The default is obs=1}
}
\value{
A matrix. The first column contains the conditional quantile
estimates for the first entry in the vector quantiles. The second
column contains the estimates for the second entry of quantiles and so on.
}
\description{
Predicts quantiles for a quantile regression forest trained with quantregRanger.
}
| /quantregRanger/man/predict.quantregRanger.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,238 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.quantregRanger}
\alias{predict.quantregRanger}
\title{quantregRanger prediction}
\usage{
\method{predict}{quantregRanger}(object, data = NULL, quantiles = c(0.1,
0.5, 0.9), all = TRUE, obs = 1, ...)
}
\arguments{
\item{object}{\code{quantregRanger} object.}
\item{data}{New test data of class \code{data.frame}}
\item{quantiles}{Numeric vector of quantiles that should be estimated}
\item{all}{A logical value. all=TRUE uses all observations for prediction.
all=FALSE uses only a certain number of observations per node for prediction
(set with argument obs). The default is all=TRUE}
\item{obs}{An integer number. Determines the maximal number of observations per node}
\item{...}{Currently ignored.
to use for prediction. The input is ignored for all=TRUE. The default is obs=1}
}
\value{
A matrix. The first column contains the conditional quantile
estimates for the first entry in the vector quantiles. The second
column contains the estimates for the second entry of quantiles and so on.
}
\description{
Predicts quantiles for a quantile regression forest trained with quantregRanger.
}
|
#############################
# Set the working directory #
#############################
setwd('~/Dropbox/videogame-archetypes/R')
###################
# Import libraries.
###################
library('archetypes')
library('vcd') # for ternaryplot
library('foreign') # Used to read .arff files.
library('xtable')
###################
# Tutorial
###################
# vignette("archetypes", package = "archetypes")
# edit(vignette("archetypes", package = "archetypes"))
###################
# Main Code
###################
raw = read.csv('../csv/ultimaiv_party.csv', header= T, sep =",")
# Save non-numeric data.
characters <- raw$Character
classes <- raw$Class
weapons <- raw$Weapon
armors <- raw$Armor
gender <- raw$Gender
# Remove non-numeric data.
data <- raw[, -which(names(raw) %in% c("Character","Class","Weapon","Armor","Gender"))]
# Remove any columns with zero standard-deviation.
data <- subset(data, select = sapply(data,sd)!=0)
# Set seed.
set.seed(2013)
# Perform AA.
as <- stepArchetypes(data, k=1:10, verbose =T, nrep = 3)
# Scree-plot
screeplot(as, main="RSS Values Across Varying Number of Archetypes", cex.main=1.5, cex.axis=1.2, cex.lab=1.5)
# Start PDF output
# pdf('../pdf/lol_champions_base.pdf')
# Select the best model.
model <- bestModel(as[[3]])
# Transpose the representation of the model for readibility.
params <- t(parameters(model))
params.table <- xtable(params)
# Barplot of the archetypes in the model.
barplot(model, data, percentiles=T)
# Get the alpha coefficients of the data.
alphas <- cbind(cbind(data.frame(characters), classes), model$alphas)
alphas_table <- xtable(alphas)
# Sorted alphas
sort1 <- alphas[order(-alphas$'1'),]
# Graph the ternary-plot.
ternaryplot(coef(model, 'alphas'), col = 6*as.numeric(gender), id = characters, dimnames=c("1","2", "3"), cex=0.8, dimnames_position = c('corner'), labels = c('inside'), main="Archetypal Party Members in Ultima IV")
# Graph the Parallel Coordinates plot.
pcplot(model, data)
# End PDF output
# dev.off() | /R/ultimaiv_party.R | permissive | chongdashu/videogame-archetypes | R | false | false | 2,019 | r | #############################
# Set the working directory #
#############################
setwd('~/Dropbox/videogame-archetypes/R')
###################
# Import libraries.
###################
library('archetypes')
library('vcd') # for ternaryplot
library('foreign') # Used to read .arff files.
library('xtable')
###################
# Tutorial
###################
# vignette("archetypes", package = "archetypes")
# edit(vignette("archetypes", package = "archetypes"))
###################
# Main Code
###################
raw = read.csv('../csv/ultimaiv_party.csv', header= T, sep =",")
# Save non-numeric data.
characters <- raw$Character
classes <- raw$Class
weapons <- raw$Weapon
armors <- raw$Armor
gender <- raw$Gender
# Remove non-numeric data.
data <- raw[, -which(names(raw) %in% c("Character","Class","Weapon","Armor","Gender"))]
# Remove any columns with zero standard-deviation.
data <- subset(data, select = sapply(data,sd)!=0)
# Set seed.
set.seed(2013)
# Perform AA.
as <- stepArchetypes(data, k=1:10, verbose =T, nrep = 3)
# Scree-plot
screeplot(as, main="RSS Values Across Varying Number of Archetypes", cex.main=1.5, cex.axis=1.2, cex.lab=1.5)
# Start PDF output
# pdf('../pdf/lol_champions_base.pdf')
# Select the best model.
model <- bestModel(as[[3]])
# Transpose the representation of the model for readibility.
params <- t(parameters(model))
params.table <- xtable(params)
# Barplot of the archetypes in the model.
barplot(model, data, percentiles=T)
# Get the alpha coefficients of the data.
alphas <- cbind(cbind(data.frame(characters), classes), model$alphas)
alphas_table <- xtable(alphas)
# Sorted alphas
sort1 <- alphas[order(-alphas$'1'),]
# Graph the ternary-plot.
ternaryplot(coef(model, 'alphas'), col = 6*as.numeric(gender), id = characters, dimnames=c("1","2", "3"), cex=0.8, dimnames_position = c('corner'), labels = c('inside'), main="Archetypal Party Members in Ultima IV")
# Graph the Parallel Coordinates plot.
pcplot(model, data)
# End PDF output
# dev.off() |
\name{multi.mantel}
\alias{multi.mantel}
\title{Multiple matrix regression (partial Mantel test)}
\usage{
multi.mantel(Y, X, nperm=1000)
}
\arguments{
\item{Y}{single "dependent" square matrix. Can be either a symmetric matrix of class \code{"matrix"} or a distance matrix of class \code{"dist"}.}
\item{X}{a single independent matrix or multiple independent matrices in a list. As with \code{Y} can be a object of class \code{"matrix"} or class \code{"dist"}, or a list of such objects.}
\item{nperm}{number of Mantel permutations to be used to compute a P-value of the test.}
}
\description{
This function conducting a multiple matrix regression (partial Mantel test) and uses Mantel (1967) permutations to test the significance of the model and individual coefficients. It also returns the residual and predicted matrices.
}
\value{
An object of class \code{"multi.mantel"} consisting of the following elements:
\item{r.squared}{multiple R-squared.}
\item{coefficients}{model coefficients, including intercept.}
\item{tstatistic}{t-statistics for model coefficients.}
\item{fstatistic}{F-statistic for the overall model.}
\item{probt}{vector of probabilities, based on permutations, for \code{tstatistic}.}
\item{probF}{probability of F, based on Mantel permutations.}
\item{residuals}{matrix of residuals.}
\item{predicted}{matrix of predicted values.}
\item{nperm}{tne number of permutations used.}
}
\details{
Printing the object to screen will result in a summary of the analysis similar to \code{summary.lm}, but with p-values derived from Mantel permutations.
Methods \code{residuals} and \code{fitted} can be used to return residual and fitted matrices, respectively.
}
\references{
Mantel, N. (1967) The detection of disease clustering and a generalized regression approach. \emph{Cancer Research}, \bold{27}, 209--220.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\keyword{comparative method}
\keyword{statistics}
\keyword{least squares}
\keyword{distance matrix}
| /man/multi.mantel.Rd | no_license | Phyo-Khine/phytools | R | false | false | 2,193 | rd | \name{multi.mantel}
\alias{multi.mantel}
\title{Multiple matrix regression (partial Mantel test)}
\usage{
multi.mantel(Y, X, nperm=1000)
}
\arguments{
\item{Y}{single "dependent" square matrix. Can be either a symmetric matrix of class \code{"matrix"} or a distance matrix of class \code{"dist"}.}
\item{X}{a single independent matrix or multiple independent matrices in a list. As with \code{Y} can be a object of class \code{"matrix"} or class \code{"dist"}, or a list of such objects.}
\item{nperm}{number of Mantel permutations to be used to compute a P-value of the test.}
}
\description{
This function conducting a multiple matrix regression (partial Mantel test) and uses Mantel (1967) permutations to test the significance of the model and individual coefficients. It also returns the residual and predicted matrices.
}
\value{
An object of class \code{"multi.mantel"} consisting of the following elements:
\item{r.squared}{multiple R-squared.}
\item{coefficients}{model coefficients, including intercept.}
\item{tstatistic}{t-statistics for model coefficients.}
\item{fstatistic}{F-statistic for the overall model.}
\item{probt}{vector of probabilities, based on permutations, for \code{tstatistic}.}
\item{probF}{probability of F, based on Mantel permutations.}
\item{residuals}{matrix of residuals.}
\item{predicted}{matrix of predicted values.}
\item{nperm}{tne number of permutations used.}
}
\details{
Printing the object to screen will result in a summary of the analysis similar to \code{summary.lm}, but with p-values derived from Mantel permutations.
Methods \code{residuals} and \code{fitted} can be used to return residual and fitted matrices, respectively.
}
\references{
Mantel, N. (1967) The detection of disease clustering and a generalized regression approach. \emph{Cancer Research}, \bold{27}, 209--220.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\keyword{comparative method}
\keyword{statistics}
\keyword{least squares}
\keyword{distance matrix}
|
test_that("DYNPROG returns a matrix with the same number of rows as the input data set", {
expect_equal(nrow(DYNPROG_interface(iris[,1],3)), length(iris[,1]))
})
test_that("DYNPROG returns the same changepoint as jointseg::Fpsn for the first segment",{
jointseg.result<-jointseg::Fpsn(iris[,1],3)
DYNPROG.result <- DYNPROG_interface(iris[,1],3)
expect_equal(jointseg.result$t.est[1,1],which.min(DYNPROG.result[,1]))
})
| /tests/testthat/test-DYNPROG.R | no_license | TsChala/CS599Changepoint | R | false | false | 427 | r | test_that("DYNPROG returns a matrix with the same number of rows as the input data set", {
expect_equal(nrow(DYNPROG_interface(iris[,1],3)), length(iris[,1]))
})
test_that("DYNPROG returns the same changepoint as jointseg::Fpsn for the first segment",{
jointseg.result<-jointseg::Fpsn(iris[,1],3)
DYNPROG.result <- DYNPROG_interface(iris[,1],3)
expect_equal(jointseg.result$t.est[1,1],which.min(DYNPROG.result[,1]))
})
|
# Functions used for Data Analysis and Visualization Creations
stored.procedure.from.db <- function(srv.nm, db.nm, procedure.nm) {
db.con <- dbConnect(odbc::odbc(),
driver = "SQL Server",
server = srv.nm,
database = db.nm,
trusted_connection = "yes")
w.tbl <- DBI::dbGetQuery(db.con, procedure.nm)
odbc::dbDisconnect(db.con)
as_tibble(w.tbl)
return(w.tbl)
}
get.county.census <- function(c.type, c.yr, c.table, c.geo="county:033,035,053,061", c.state="state:53", l.yr = label.yr) {
# Download Table from API
tbl.values <- suppressWarnings(getCensus(name = c.type, vintage = c.yr, vars = c("NAME",paste0("group(",c.table,")")),region = c.geo, regionin = c.state) %>%
select(ends_with(c("E","M"))) %>%
select(-state) %>%
rename(Geography=NAME) %>%
pivot_longer(cols=contains("_"), names_to="name", values_to="value") %>%
mutate(Geography = str_replace(Geography, ", Washington", "")))
# Get variable labels
tbl.vars <- listCensusMetadata(name = c.type, vintage = l.yr, type = "variables", group = c.table) %>%
filter(grepl("(E|M)$", name)) %>%
select(name,label) %>%
mutate(label = gsub("!!"," ", label), label = gsub("Margin of Error","MoE", label), label = gsub(" Total:",":", label))
# JOin values and labels
tbl.values <- inner_join(tbl.values, tbl.vars, by="name") %>%
select(-name) %>%
pivot_wider(names_from = label)
tbl.values[tbl.values == -555555555 ] <- 0
# Add total for region with calculated MoE for county to region aggregation
region.moe <- suppressWarnings(tbl.values %>% select(contains("MoE")) %>% mutate(PSRC=1) %>% group_by(PSRC) %>% summarise_all(moe_sum))
region.tot <- tbl.values %>% select(!contains("MOE"),-Geography) %>% mutate(PSRC=1) %>% group_by(PSRC) %>% summarise_all(sum)
region <- inner_join(region.tot,region.moe,by="PSRC") %>% mutate(Geography="Central Puget Sound") %>% select(-PSRC)
# Append Region Total to table
tbl.values <- bind_rows(tbl.values,region)
return(tbl.values)
}
return.value <-function(data=results, c.geo=c, c.year=c.yr, acs.typ, c.tbl, c.val ) {
r <- data[[c.year]][['tables']][[acs.typ]][[c.tbl]] %>%
filter(Geography %in% c.geo) %>%
pull(c.val) %>%
sum()
return(r)
}
# Functions ---------------------------------------------------------------
download.equity.data.acs <- function(c.yr=yr, c.tbl, c.acs=acs, t.type) {
results <- NULL
if (t.type=="subject") {c.var<-paste0(c.acs,"/subject")} else {c.var<-paste0(c.acs)}
# Load labels for all variables in the dataset
variable.labels <- load_variables(c.yr, c.var, cache = TRUE) %>% rename(variable = name)
# Download the data for all counties
county.tbl <- get_acs(geography = "county", state="53", year=c.yr, survey = c.acs, table = c.tbl) %>%
mutate(NAME = gsub(", Washington", "", NAME)) %>%
filter(NAME %in% psrc.county) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="County")
# Download the data for all places
place.tbl <- get_acs(geography = "place", state="53", year=c.yr, survey = c.acs, table = c.tbl) %>%
filter(!grepl('CDP', NAME)) %>%
mutate(NAME = gsub(" city, Washington", "", NAME)) %>%
mutate(NAME = gsub(" town, Washington", "", NAME)) %>%
filter(NAME %in% psrc.cities) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="Place")
# Download the data for all msa's
msa.tbl <- get_acs(geography = "metropolitan statistical area/micropolitan statistical area", year=c.yr, survey = c.acs, table = c.tbl) %>%
filter(!grepl('Micro Area', NAME))
msa.tbl <- msa.tbl %>%
filter(GEOID %in% msa.list) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="MSA")
# Download Tract data
tract.tbl <- get_acs(geography = "tract", state="53", year=c.yr, survey = c.acs, table = c.tbl) %>%
filter(str_detect(NAME, 'King County|Kitsap County|Pierce County|Snohomish County')) %>%
mutate(NAME = gsub(", Washington", "", NAME)) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="Tract")
# Get a region total and add it to the county and place table
region <- county.tbl %>%
select(variable, estimate, moe) %>%
group_by(variable) %>%
summarize(sumest = sum(estimate), summoe = moe_sum(moe, estimate)) %>%
rename(estimate=sumest, moe=summoe) %>%
mutate(GEOID="53033035053061", NAME="Region",ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="Region")
results <- bind_rows(list(county.tbl, region, place.tbl, msa.tbl, tract.tbl))
results <- left_join(results,variable.labels,by=c("variable"))
if (t.type=="subject") {
results <- results %>%
filter(!grepl('Percent', label), grepl('RACE', label)) %>%
separate(variable, c("ACS_Table", "ACS_Subject","ACS_Variable"), "_")
}
if (t.type=="detailed") {
results <- results %>%
separate(variable, c("ACS_Table", "ACS_Variable"), "_") %>%
mutate(ACS_Subject="C01") %>%
mutate(label = gsub("Estimate!!","",label)) %>%
mutate(label = gsub(":","",label)) %>%
separate(label, c("temp", "ACS_Hispanic_Origin","ACS_Race","ACS_Race_Category"), "!!") %>%
select(-temp) %>%
mutate(ACS_Hispanic_Origin = replace_na(ACS_Hispanic_Origin,"Total"), ACS_Race = replace_na(ACS_Race,"Total"), ACS_Race_Category = replace_na(ACS_Race_Category,"All")) %>%
filter(ACS_Race_Category=="All") %>%
select(-ACS_Race_Category,-concept) %>%
mutate(ACS_Category="Population")
}
return(results)
} | /asian-pacific-heritage/functions.R | no_license | psrc/equity-data-tools | R | false | false | 5,603 | r | # Functions used for Data Analysis and Visualization Creations
stored.procedure.from.db <- function(srv.nm, db.nm, procedure.nm) {
db.con <- dbConnect(odbc::odbc(),
driver = "SQL Server",
server = srv.nm,
database = db.nm,
trusted_connection = "yes")
w.tbl <- DBI::dbGetQuery(db.con, procedure.nm)
odbc::dbDisconnect(db.con)
as_tibble(w.tbl)
return(w.tbl)
}
get.county.census <- function(c.type, c.yr, c.table, c.geo="county:033,035,053,061", c.state="state:53", l.yr = label.yr) {
# Download Table from API
tbl.values <- suppressWarnings(getCensus(name = c.type, vintage = c.yr, vars = c("NAME",paste0("group(",c.table,")")),region = c.geo, regionin = c.state) %>%
select(ends_with(c("E","M"))) %>%
select(-state) %>%
rename(Geography=NAME) %>%
pivot_longer(cols=contains("_"), names_to="name", values_to="value") %>%
mutate(Geography = str_replace(Geography, ", Washington", "")))
# Get variable labels
tbl.vars <- listCensusMetadata(name = c.type, vintage = l.yr, type = "variables", group = c.table) %>%
filter(grepl("(E|M)$", name)) %>%
select(name,label) %>%
mutate(label = gsub("!!"," ", label), label = gsub("Margin of Error","MoE", label), label = gsub(" Total:",":", label))
# JOin values and labels
tbl.values <- inner_join(tbl.values, tbl.vars, by="name") %>%
select(-name) %>%
pivot_wider(names_from = label)
tbl.values[tbl.values == -555555555 ] <- 0
# Add total for region with calculated MoE for county to region aggregation
region.moe <- suppressWarnings(tbl.values %>% select(contains("MoE")) %>% mutate(PSRC=1) %>% group_by(PSRC) %>% summarise_all(moe_sum))
region.tot <- tbl.values %>% select(!contains("MOE"),-Geography) %>% mutate(PSRC=1) %>% group_by(PSRC) %>% summarise_all(sum)
region <- inner_join(region.tot,region.moe,by="PSRC") %>% mutate(Geography="Central Puget Sound") %>% select(-PSRC)
# Append Region Total to table
tbl.values <- bind_rows(tbl.values,region)
return(tbl.values)
}
return.value <-function(data=results, c.geo=c, c.year=c.yr, acs.typ, c.tbl, c.val ) {
r <- data[[c.year]][['tables']][[acs.typ]][[c.tbl]] %>%
filter(Geography %in% c.geo) %>%
pull(c.val) %>%
sum()
return(r)
}
# Functions ---------------------------------------------------------------
download.equity.data.acs <- function(c.yr=yr, c.tbl, c.acs=acs, t.type) {
results <- NULL
if (t.type=="subject") {c.var<-paste0(c.acs,"/subject")} else {c.var<-paste0(c.acs)}
# Load labels for all variables in the dataset
variable.labels <- load_variables(c.yr, c.var, cache = TRUE) %>% rename(variable = name)
# Download the data for all counties
county.tbl <- get_acs(geography = "county", state="53", year=c.yr, survey = c.acs, table = c.tbl) %>%
mutate(NAME = gsub(", Washington", "", NAME)) %>%
filter(NAME %in% psrc.county) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="County")
# Download the data for all places
place.tbl <- get_acs(geography = "place", state="53", year=c.yr, survey = c.acs, table = c.tbl) %>%
filter(!grepl('CDP', NAME)) %>%
mutate(NAME = gsub(" city, Washington", "", NAME)) %>%
mutate(NAME = gsub(" town, Washington", "", NAME)) %>%
filter(NAME %in% psrc.cities) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="Place")
# Download the data for all msa's
msa.tbl <- get_acs(geography = "metropolitan statistical area/micropolitan statistical area", year=c.yr, survey = c.acs, table = c.tbl) %>%
filter(!grepl('Micro Area', NAME))
msa.tbl <- msa.tbl %>%
filter(GEOID %in% msa.list) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="MSA")
# Download Tract data
tract.tbl <- get_acs(geography = "tract", state="53", year=c.yr, survey = c.acs, table = c.tbl) %>%
filter(str_detect(NAME, 'King County|Kitsap County|Pierce County|Snohomish County')) %>%
mutate(NAME = gsub(", Washington", "", NAME)) %>%
mutate(ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="Tract")
# Get a region total and add it to the county and place table
region <- county.tbl %>%
select(variable, estimate, moe) %>%
group_by(variable) %>%
summarize(sumest = sum(estimate), summoe = moe_sum(moe, estimate)) %>%
rename(estimate=sumest, moe=summoe) %>%
mutate(GEOID="53033035053061", NAME="Region",ACS_Year=c.yr, ACS_Type=c.acs, ACS_Geography="Region")
results <- bind_rows(list(county.tbl, region, place.tbl, msa.tbl, tract.tbl))
results <- left_join(results,variable.labels,by=c("variable"))
if (t.type=="subject") {
results <- results %>%
filter(!grepl('Percent', label), grepl('RACE', label)) %>%
separate(variable, c("ACS_Table", "ACS_Subject","ACS_Variable"), "_")
}
if (t.type=="detailed") {
results <- results %>%
separate(variable, c("ACS_Table", "ACS_Variable"), "_") %>%
mutate(ACS_Subject="C01") %>%
mutate(label = gsub("Estimate!!","",label)) %>%
mutate(label = gsub(":","",label)) %>%
separate(label, c("temp", "ACS_Hispanic_Origin","ACS_Race","ACS_Race_Category"), "!!") %>%
select(-temp) %>%
mutate(ACS_Hispanic_Origin = replace_na(ACS_Hispanic_Origin,"Total"), ACS_Race = replace_na(ACS_Race,"Total"), ACS_Race_Category = replace_na(ACS_Race_Category,"All")) %>%
filter(ACS_Race_Category=="All") %>%
select(-ACS_Race_Category,-concept) %>%
mutate(ACS_Category="Population")
}
return(results)
} |
library(timereg)
### Name: pc.hazard
### Title: Simulation of Piecewise constant hazard model (Cox).
### Aliases: pc.hazard pchazard.sim
### Keywords: survival
### ** Examples
rates <- c(0,0.01,0.052,0.01,0.04)
breaks <- c(0,10, 20, 30, 40)
haz <- cbind(breaks,rates)
n <- 1000
X <- rbinom(n,1,0.5)
beta <- 0.2
rrcox <- exp(X * beta)
cumhaz <- cumsum(c(0,diff(breaks)*rates[-1]))
cumhaz <- cbind(breaks,cumhaz)
pctime <- pc.hazard(haz,1000,cum.hazard=FALSE)
par(mfrow=c(1,2))
ss <- aalen(Surv(time,status)~+1,data=pctime,robust=0)
plot(ss)
lines(cumhaz,col=2,lwd=2)
pctimecox <- pc.hazard(cumhaz,rrcox)
pctime <- cbind(pctime,X)
ssx <- cox.aalen(Surv(time,status)~+prop(X),data=pctimecox,robust=0)
plot(ssx)
lines(cumhaz,col=2,lwd=2)
### simulating data with hazard as real data
data(TRACE)
par(mfrow=c(1,2))
ss <- cox.aalen(Surv(time,status==9)~+prop(vf),data=TRACE,robust=0)
par(mfrow=c(1,2))
plot(ss)
###
pctime <- pc.hazard(ss$cum,1000)
###
sss <- aalen(Surv(time,status)~+1,data=pctime,robust=0)
lines(sss$cum,col=2,lwd=2)
pctime <- pc.hazard(ss$cum,rrcox)
pctime <- cbind(pctime,X)
###
sss <- cox.aalen(Surv(time,status)~+prop(X),data=pctime,robust=0)
summary(sss)
plot(ss)
lines(sss$cum,col=3,lwd=3)
| /data/genthat_extracted_code/timereg/examples/pc.hazard.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,229 | r | library(timereg)
### Name: pc.hazard
### Title: Simulation of Piecewise constant hazard model (Cox).
### Aliases: pc.hazard pchazard.sim
### Keywords: survival
### ** Examples
rates <- c(0,0.01,0.052,0.01,0.04)
breaks <- c(0,10, 20, 30, 40)
haz <- cbind(breaks,rates)
n <- 1000
X <- rbinom(n,1,0.5)
beta <- 0.2
rrcox <- exp(X * beta)
cumhaz <- cumsum(c(0,diff(breaks)*rates[-1]))
cumhaz <- cbind(breaks,cumhaz)
pctime <- pc.hazard(haz,1000,cum.hazard=FALSE)
par(mfrow=c(1,2))
ss <- aalen(Surv(time,status)~+1,data=pctime,robust=0)
plot(ss)
lines(cumhaz,col=2,lwd=2)
pctimecox <- pc.hazard(cumhaz,rrcox)
pctime <- cbind(pctime,X)
ssx <- cox.aalen(Surv(time,status)~+prop(X),data=pctimecox,robust=0)
plot(ssx)
lines(cumhaz,col=2,lwd=2)
### simulating data with hazard as real data
data(TRACE)
par(mfrow=c(1,2))
ss <- cox.aalen(Surv(time,status==9)~+prop(vf),data=TRACE,robust=0)
par(mfrow=c(1,2))
plot(ss)
###
pctime <- pc.hazard(ss$cum,1000)
###
sss <- aalen(Surv(time,status)~+1,data=pctime,robust=0)
lines(sss$cum,col=2,lwd=2)
pctime <- pc.hazard(ss$cum,rrcox)
pctime <- cbind(pctime,X)
###
sss <- cox.aalen(Surv(time,status)~+prop(X),data=pctime,robust=0)
summary(sss)
plot(ss)
lines(sss$cum,col=3,lwd=3)
|
## version: 1.33
## method: get
## path: /plugins
## code: 200
NULL
data_frame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
settings <- list(
mounts = data_frame(
name = "some-mount",
description = "This is a mount that's used by the plugin.",
settable = structure(list("string"), class = "AsIs"),
source = "/var/lib/docker/plugins/",
destination = "/mnt/state",
type = "bind",
options = I(list(c("rbind", "rw")))),
env = "DEBUG=0",
args = "string",
devices = data_frame(
name = "string",
description = "string",
settable = I(list("string")),
path = "/dev/fuse"))
config <- list(
docker_version = "17.06.0-ce",
description = "A sample volume plugin for Docker",
documentation = "https://docs.docker.com/engine/extend/plugins/",
interface = list(
types = data_frame(
prefix = NA_character_,
capability = NA_character_,
version = NA_character_),
socket = "plugins.sock"),
entrypoint = c("/usr/bin/sample-volume-plugin", "/data"),
work_dir = "/bin/",
user = list(uid = 1000L, gid = 1000L),
network = list(type = "host"),
linux = list(
capabilities = c("CAP_SYS_ADMIN", "CAP_SYSLOG"),
allow_all_devices = FALSE,
devices = data_frame(
name = "string",
description = "string",
settable = I(list("string")),
path = "/dev/fuse")),
propagated_mount = "/mnt/volumes",
ipc_host = FALSE,
pid_host = FALSE,
mounts = data_frame(
name = "some-mount",
description = "This is a mount that's used by the plugin.",
settable = structure(list("string"), class = "AsIs"),
source = "/var/lib/docker/plugins/",
destination = "/mnt/state",
type = "bind",
options = I(list(c("rbind", "rw")))),
env = data_frame(
name = "DEBUG",
description = "If set, prints debug messages",
settable = I(list(character(0))),
value = "0"),
args = list(
name = "args",
description = "command line arguments",
settable = "string",
value = "string"),
rootfs = list(
type = "layers",
diff_ids = c(
"sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887",
"sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
)))
data_frame(
id = "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078",
name = "tiborvass/sample-volume-plugin",
enabled = TRUE,
settings = I(list(settings)),
plugin_reference = "localhost:5000/tiborvass/sample-volume-plugin:latest",
config = I(list(config)))
| /tests/testthat/sample_responses/v1.33/plugin_list.R | no_license | cran/stevedore | R | false | false | 2,543 | r | ## version: 1.33
## method: get
## path: /plugins
## code: 200
NULL
data_frame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
settings <- list(
mounts = data_frame(
name = "some-mount",
description = "This is a mount that's used by the plugin.",
settable = structure(list("string"), class = "AsIs"),
source = "/var/lib/docker/plugins/",
destination = "/mnt/state",
type = "bind",
options = I(list(c("rbind", "rw")))),
env = "DEBUG=0",
args = "string",
devices = data_frame(
name = "string",
description = "string",
settable = I(list("string")),
path = "/dev/fuse"))
config <- list(
docker_version = "17.06.0-ce",
description = "A sample volume plugin for Docker",
documentation = "https://docs.docker.com/engine/extend/plugins/",
interface = list(
types = data_frame(
prefix = NA_character_,
capability = NA_character_,
version = NA_character_),
socket = "plugins.sock"),
entrypoint = c("/usr/bin/sample-volume-plugin", "/data"),
work_dir = "/bin/",
user = list(uid = 1000L, gid = 1000L),
network = list(type = "host"),
linux = list(
capabilities = c("CAP_SYS_ADMIN", "CAP_SYSLOG"),
allow_all_devices = FALSE,
devices = data_frame(
name = "string",
description = "string",
settable = I(list("string")),
path = "/dev/fuse")),
propagated_mount = "/mnt/volumes",
ipc_host = FALSE,
pid_host = FALSE,
mounts = data_frame(
name = "some-mount",
description = "This is a mount that's used by the plugin.",
settable = structure(list("string"), class = "AsIs"),
source = "/var/lib/docker/plugins/",
destination = "/mnt/state",
type = "bind",
options = I(list(c("rbind", "rw")))),
env = data_frame(
name = "DEBUG",
description = "If set, prints debug messages",
settable = I(list(character(0))),
value = "0"),
args = list(
name = "args",
description = "command line arguments",
settable = "string",
value = "string"),
rootfs = list(
type = "layers",
diff_ids = c(
"sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887",
"sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8"
)))
data_frame(
id = "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078",
name = "tiborvass/sample-volume-plugin",
enabled = TRUE,
settings = I(list(settings)),
plugin_reference = "localhost:5000/tiborvass/sample-volume-plugin:latest",
config = I(list(config)))
|
##
## Examples: Completely Randomized Design (CRD)
##
## The parameters can be: vectors, design matrix and the response variable,
## data.frame or aov
## Example 1
library(ScottKnott)
data(CRD1)
## From: vectors x and y
sk1 <- with(CRD1,
SK(x=x,
y=y,
model='y ~ x',
which='x'))
summary(sk1)
plot(sk1)
## From: design matrix (dm) and response variable (y)
sk2 <- with(CRD1,
SK(x=dm,
y=y,
model='y ~ x',
which='x',
dispersion='s'))
summary(sk2)
plot(sk2,
pch=15,
col=c('blue', 'red'),
mm.lty=4,
ylab='Response',
title=NULL)
## From: data.frame (dfm)
sk3 <- with(CRD1,
SK(x=dfm,
model='y ~ x',
which='x',
dispersion='se'))
summary(sk3)
plot(sk3,
mm.lty=3,
id.col=FALSE,
title=NULL)
## From: aov
av1 <- with(CRD1,
aov(y ~ x,
data=dfm))
summary(av1)
sk4 <- SK(x=av1,
which='x')
summary(sk4)
plot(sk4, title=NULL)
## Example 2
library(ScottKnott)
data(CRD2)
## From: vectors x and y
sk5 <- with(CRD2,
SK(x=x,
y=y,
model='y ~ x',
which='x'))
summary(sk5)
plot(sk5,
id.las=2,
rl=FALSE)
## From: design matrix (dm) and response variable (y)
sk6 <- with(CRD2,
SK(x=dm,
y=y,
model='y ~ x',
which='x',
sig.level=0.005))
summary(sk6)
plot(sk6,
col=rainbow(max(sk6$groups)),
mm.lty=3,
id.las=2,
rl=FALSE,
title='sig.level=0.005', )
## From: data.frame (dfm)
sk7 <- with(CRD2,
SK(x=dfm,
model='y ~ x',
which='x'))
summary(sk7)
plot(sk7,
col=rainbow(max(sk7$groups)),
id.las=2,
id.col=FALSE,
rl=FALSE)
## From: aov
av2 <- with(CRD2,
aov(y ~ x,
data=dfm))
summary(av2)
sk8 <- SK(x=av2,
which='x')
summary(sk8)
plot(sk8,
col=rainbow(max(sk8$groups)),
rl=FALSE,
id.las=2,
id.col=FALSE,
title=NULL)
| /demo/CRD.R | no_license | klainfo/ScottKnott | R | false | false | 2,263 | r | ##
## Examples: Completely Randomized Design (CRD)
##
## The parameters can be: vectors, design matrix and the response variable,
## data.frame or aov
## Example 1
library(ScottKnott)
data(CRD1)
## From: vectors x and y
sk1 <- with(CRD1,
SK(x=x,
y=y,
model='y ~ x',
which='x'))
summary(sk1)
plot(sk1)
## From: design matrix (dm) and response variable (y)
sk2 <- with(CRD1,
SK(x=dm,
y=y,
model='y ~ x',
which='x',
dispersion='s'))
summary(sk2)
plot(sk2,
pch=15,
col=c('blue', 'red'),
mm.lty=4,
ylab='Response',
title=NULL)
## From: data.frame (dfm)
sk3 <- with(CRD1,
SK(x=dfm,
model='y ~ x',
which='x',
dispersion='se'))
summary(sk3)
plot(sk3,
mm.lty=3,
id.col=FALSE,
title=NULL)
## From: aov
av1 <- with(CRD1,
aov(y ~ x,
data=dfm))
summary(av1)
sk4 <- SK(x=av1,
which='x')
summary(sk4)
plot(sk4, title=NULL)
## Example 2
library(ScottKnott)
data(CRD2)
## From: vectors x and y
sk5 <- with(CRD2,
SK(x=x,
y=y,
model='y ~ x',
which='x'))
summary(sk5)
plot(sk5,
id.las=2,
rl=FALSE)
## From: design matrix (dm) and response variable (y)
sk6 <- with(CRD2,
SK(x=dm,
y=y,
model='y ~ x',
which='x',
sig.level=0.005))
summary(sk6)
plot(sk6,
col=rainbow(max(sk6$groups)),
mm.lty=3,
id.las=2,
rl=FALSE,
title='sig.level=0.005', )
## From: data.frame (dfm)
sk7 <- with(CRD2,
SK(x=dfm,
model='y ~ x',
which='x'))
summary(sk7)
plot(sk7,
col=rainbow(max(sk7$groups)),
id.las=2,
id.col=FALSE,
rl=FALSE)
## From: aov
av2 <- with(CRD2,
aov(y ~ x,
data=dfm))
summary(av2)
sk8 <- SK(x=av2,
which='x')
summary(sk8)
plot(sk8,
col=rainbow(max(sk8$groups)),
rl=FALSE,
id.las=2,
id.col=FALSE,
title=NULL)
|
#' @import rlang
#' @importFrom httr GET http_type http_error status_code content
#' @importFrom purrr modify_if map
NULL
#' Get articles from NYT Archieve
#'
#' The function returns a tibble as default, which contains all New York Times articles of the specified
#' year and month. Articles are available from 1851 and up to the present year and month.
#'
#' @param month the desired month provided as a wholenumber
#' @param year the desired year provided as a wholenumber
#' @param tibble if TRUE the API result is formatted and returned as a tibble, else a complete response
#' is provided
#' @seealso \link[Rnewyorktimes]{nyt_token}
#' @export
#' @examples
#' \dontrun{nyt_archieve(month = 1, year = 1970) #Remember to set token}
nyt_archieve <- function(month = 1, year = 1970, tibble = TRUE ) {
token <- is_token_set()
archieve_input_success(month = month, year = year)
url <- "http://api.nytimes.com/"
path <- sprintf("svc/archive/v1/%s/%s.json",
year,
month)
resp <- GET(url, path = path, query = list(`api-key` = token))
is_json(resp)
parsed <- jsonlite::fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE)
request_failed(resp, parsed)
if (tibble) {
res <- archieve_tibble(parsed$response$docs)
} else {
res <- list(content = parsed,
response = resp)
}
attr(res, "tibble") <- tibble
attr(res, "path") <- path
attr(res, "date") <- sprintf("%s-%s", year, month)
attr(res, "size") <- bytes(length(resp$content))
class(res) <- c("NewYorkTimesAPI", "NewYorkTimesAPI_archieve", class(res))
res
}
archieve_input_success <- function(month, year) {
if (!is.numeric(month) | !is.numeric(year)) {
abort("month and year must be whole numbers provided as integers or double")
}
if (!is_whole_number(month) | !is_whole_number(year)) {
abort("month and year must be whole numbers provided as integers or double")
}
if (!(month %in% 1:12)) {
abort("month can only take values from 1 to 12")
}
if (!(year %in% 1851:as.numeric(format(Sys.time(), "%Y")))) {
abort(
sprintf("year can only take values from 1851 to %s",
format(Sys.time(), "%Y"))
)
}
if (year == sys_time(TRUE, "%Y") & month > sys_time(TRUE, "%m")) {
abort(
sprintf(
"month must be equal to or smaller than %s",
sys_time(TRUE, format = "%m")
)
)
}
}
#' @export
print.NewYorkTimesAPI_archieve <- function(x, ...) {
cat(sprintf("<New York Times - Archieve>\n Date: %s\n Path: %s\n Size: %s\n",
attr(x, "date"),
attr(x, "path"),
attr(x, "size")))
if(attr(x, "tibble") == TRUE) {
res <- x
class(res) <- c("tbl_df", "tbl", "data.frame")
print(res)
} else {
cat(utils::str(x, max.level = 1, give.attr = FALSE))
}
}
archieve_tibble <- function(x) {
col01 = as.character(modify_if(map(x, ~.x[["web_url"]]), is_null, ~NA))
col02 = as.character(modify_if(map(x, ~.x[["snippet"]]), is_null, ~NA))
col03 = as.character(modify_if(map(x, ~.x[["lead_paragraph"]]), is_null, ~NA))
col04 = as.character(modify_if(map(x, ~.x[["print_page"]]), is_null, ~NA))
col05 = modify_if(map(x, ~.x[["blog"]]), is_null, ~NA)
col06 = as.character(modify_if(map(x, ~.x[["source"]]), is_null, ~NA))
col07 = modify_if(map(x, ~.x[["multimedia"]]), is_null, ~NA)
col08 = modify_if(map(x, ~.x[["headline"]]), is_null, ~NA)
col09 = modify_if(map(x, ~.x[["keywords"]]), is_null, ~NA)
col10 = modify_if(map(x, ~.x[["pub_date"]]), is_null, ~NA)
col11 = modify_if(map(x, ~.x[["document_type"]]), is_null, ~NA)
col12 = modify_if(map(x, ~.x[["news_desk"]]), is_null, ~NA)
col13 = modify_if(map(x, ~.x[["section_name"]]), is_null, ~NA)
col14 = modify_if(map(x, ~.x[["subsection_name"]]), is_null, ~NA)
col15 = modify_if(map(x, ~.x[["byline"]]), is_null, ~NA)
col16 = modify_if(map(x, ~.x[["type_of_material"]]), is_null, ~NA)
col17 = modify_if(map(x, ~.x[["_id"]]), is_null, ~NA)
col18 = modify_if(map(x, ~.x[["word_count"]]), is_null, ~NA)
col19 = modify_if(map(x, ~.x[["slideshow_credits"]]), is_null, ~NA)
tibble::tibble(web_url = col01,
snippet = col02,
lead_paragraph = col03,
print_page = col04,
blog = col05,
source = col06,
multimedia = col07,
headline = col08,
keywords = col09,
pub_data = col10,
document_type = col11,
news_desk = col12,
section_name = col13,
subsection_name = col14,
byline = col15,
type_of_material = col16,
id = col17,
word_count = col18,
slideshow_credits = col19
)
}
| /R/archieve.R | no_license | elben10/Rnewyorktimes | R | false | false | 4,719 | r | #' @import rlang
#' @importFrom httr GET http_type http_error status_code content
#' @importFrom purrr modify_if map
NULL
#' Get articles from NYT Archieve
#'
#' The function returns a tibble as default, which contains all New York Times articles of the specified
#' year and month. Articles are available from 1851 and up to the present year and month.
#'
#' @param month the desired month provided as a wholenumber
#' @param year the desired year provided as a wholenumber
#' @param tibble if TRUE the API result is formatted and returned as a tibble, else a complete response
#' is provided
#' @seealso \link[Rnewyorktimes]{nyt_token}
#' @export
#' @examples
#' \dontrun{nyt_archieve(month = 1, year = 1970) #Remember to set token}
nyt_archieve <- function(month = 1, year = 1970, tibble = TRUE ) {
token <- is_token_set()
archieve_input_success(month = month, year = year)
url <- "http://api.nytimes.com/"
path <- sprintf("svc/archive/v1/%s/%s.json",
year,
month)
resp <- GET(url, path = path, query = list(`api-key` = token))
is_json(resp)
parsed <- jsonlite::fromJSON(content(resp, "text", encoding = "UTF-8"), simplifyVector = FALSE)
request_failed(resp, parsed)
if (tibble) {
res <- archieve_tibble(parsed$response$docs)
} else {
res <- list(content = parsed,
response = resp)
}
attr(res, "tibble") <- tibble
attr(res, "path") <- path
attr(res, "date") <- sprintf("%s-%s", year, month)
attr(res, "size") <- bytes(length(resp$content))
class(res) <- c("NewYorkTimesAPI", "NewYorkTimesAPI_archieve", class(res))
res
}
archieve_input_success <- function(month, year) {
if (!is.numeric(month) | !is.numeric(year)) {
abort("month and year must be whole numbers provided as integers or double")
}
if (!is_whole_number(month) | !is_whole_number(year)) {
abort("month and year must be whole numbers provided as integers or double")
}
if (!(month %in% 1:12)) {
abort("month can only take values from 1 to 12")
}
if (!(year %in% 1851:as.numeric(format(Sys.time(), "%Y")))) {
abort(
sprintf("year can only take values from 1851 to %s",
format(Sys.time(), "%Y"))
)
}
if (year == sys_time(TRUE, "%Y") & month > sys_time(TRUE, "%m")) {
abort(
sprintf(
"month must be equal to or smaller than %s",
sys_time(TRUE, format = "%m")
)
)
}
}
#' @export
print.NewYorkTimesAPI_archieve <- function(x, ...) {
cat(sprintf("<New York Times - Archieve>\n Date: %s\n Path: %s\n Size: %s\n",
attr(x, "date"),
attr(x, "path"),
attr(x, "size")))
if(attr(x, "tibble") == TRUE) {
res <- x
class(res) <- c("tbl_df", "tbl", "data.frame")
print(res)
} else {
cat(utils::str(x, max.level = 1, give.attr = FALSE))
}
}
archieve_tibble <- function(x) {
col01 = as.character(modify_if(map(x, ~.x[["web_url"]]), is_null, ~NA))
col02 = as.character(modify_if(map(x, ~.x[["snippet"]]), is_null, ~NA))
col03 = as.character(modify_if(map(x, ~.x[["lead_paragraph"]]), is_null, ~NA))
col04 = as.character(modify_if(map(x, ~.x[["print_page"]]), is_null, ~NA))
col05 = modify_if(map(x, ~.x[["blog"]]), is_null, ~NA)
col06 = as.character(modify_if(map(x, ~.x[["source"]]), is_null, ~NA))
col07 = modify_if(map(x, ~.x[["multimedia"]]), is_null, ~NA)
col08 = modify_if(map(x, ~.x[["headline"]]), is_null, ~NA)
col09 = modify_if(map(x, ~.x[["keywords"]]), is_null, ~NA)
col10 = modify_if(map(x, ~.x[["pub_date"]]), is_null, ~NA)
col11 = modify_if(map(x, ~.x[["document_type"]]), is_null, ~NA)
col12 = modify_if(map(x, ~.x[["news_desk"]]), is_null, ~NA)
col13 = modify_if(map(x, ~.x[["section_name"]]), is_null, ~NA)
col14 = modify_if(map(x, ~.x[["subsection_name"]]), is_null, ~NA)
col15 = modify_if(map(x, ~.x[["byline"]]), is_null, ~NA)
col16 = modify_if(map(x, ~.x[["type_of_material"]]), is_null, ~NA)
col17 = modify_if(map(x, ~.x[["_id"]]), is_null, ~NA)
col18 = modify_if(map(x, ~.x[["word_count"]]), is_null, ~NA)
col19 = modify_if(map(x, ~.x[["slideshow_credits"]]), is_null, ~NA)
tibble::tibble(web_url = col01,
snippet = col02,
lead_paragraph = col03,
print_page = col04,
blog = col05,
source = col06,
multimedia = col07,
headline = col08,
keywords = col09,
pub_data = col10,
document_type = col11,
news_desk = col12,
section_name = col13,
subsection_name = col14,
byline = col15,
type_of_material = col16,
id = col17,
word_count = col18,
slideshow_credits = col19
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsql_table.R
\docType{methods}
\name{$,rsql_table-method}
\alias{$,rsql_table-method}
\title{Overloading the $ operator for access to column references}
\usage{
\S4method{$}{rsql_table}(x, name)
}
\description{
Overloading the $ operator for access to column references
}
| /pkg/man/cash-rsql_table-method.Rd | permissive | AlephbetResearch/rsql | R | false | true | 350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsql_table.R
\docType{methods}
\name{$,rsql_table-method}
\alias{$,rsql_table-method}
\title{Overloading the $ operator for access to column references}
\usage{
\S4method{$}{rsql_table}(x, name)
}
\description{
Overloading the $ operator for access to column references
}
|
epc <- read.table("", sep = ";")
epc$Time <- strptime(paste(epc$Date,epc$Time),format = "%d/%m/%Y %H:%M:%S", tz = "")
epc$Date <- as.Date(epc$Date,"%d/%m/%Y")
sapply(epc,class)
epcmod <- epc[which(epc$Date == "2007-02-01" | epc$Date == "2007-02-02"),]
epcmod$Global_active_power <- as.numeric(epcmod$Global_active_power)
sapply(epcmod,class)
par(mfrow = c(2,2), mar = c(2,2,2,2))
plot(epcmod$Time,epcmod$Global_active_power, type = "l", ylab = "Global Active Power (Kilowatts)")
plot(epcmod$Time,epcmod$Voltage, type = "l", ylab = "Voltage")
plot(epcmod$Time,epcmod$Sub_metering_3, type = "l", ylab = "Energy sub metering", col = "blue")
plot(epcmod$Time,epcmod$Global_reactive_power, type = "l", ylab = "Global_reactive_power")
dev.copy(png, file = "plot4.png")
dev.off() | /plot4.R | no_license | RajMitra/datasciencecoursera | R | false | false | 775 | r | epc <- read.table("", sep = ";")
epc$Time <- strptime(paste(epc$Date,epc$Time),format = "%d/%m/%Y %H:%M:%S", tz = "")
epc$Date <- as.Date(epc$Date,"%d/%m/%Y")
sapply(epc,class)
epcmod <- epc[which(epc$Date == "2007-02-01" | epc$Date == "2007-02-02"),]
epcmod$Global_active_power <- as.numeric(epcmod$Global_active_power)
sapply(epcmod,class)
par(mfrow = c(2,2), mar = c(2,2,2,2))
plot(epcmod$Time,epcmod$Global_active_power, type = "l", ylab = "Global Active Power (Kilowatts)")
plot(epcmod$Time,epcmod$Voltage, type = "l", ylab = "Voltage")
plot(epcmod$Time,epcmod$Sub_metering_3, type = "l", ylab = "Energy sub metering", col = "blue")
plot(epcmod$Time,epcmod$Global_reactive_power, type = "l", ylab = "Global_reactive_power")
dev.copy(png, file = "plot4.png")
dev.off() |
source("functions.R")
data <- read_data()
png("plot2.png", bg = "transparent", width = 480, height = 480, units = "px")
plot(
data$Datetime,
data$Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = ""
)
dev.off()
| /plot2.R | no_license | paphlagonia/ExData_Plotting1 | R | false | false | 264 | r | source("functions.R")
data <- read_data()
png("plot2.png", bg = "transparent", width = 480, height = 480, units = "px")
plot(
data$Datetime,
data$Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = ""
)
dev.off()
|
library(dplyr)
library(ggplot2)
NEIds <- readRDS(file = "summarySCC_PM25.rds")
SCCds <- readRDS(file = "Source_Classification_Code.rds")
NEIds$year <- factor(NEIds$year)
coal <- SCCds[grep(pattern = "coal",x = SCCds$SCC.Level.Three, ignore.case = T),]
coal.scc <- coal$SCC
coal.emission <- NEIds[NEIds$SCC %in% coal.scc,]
coal.emission2 <- aggregate(coal.emission$Emissions ~ coal.emission$year, data = coal.emission, FUN = sum)
names(coal.emission2) <- c("Year", "Emissions")
coal.emission2$Year <- as.numeric(levels(coal.emission2$Year))
ggplot(data = coal.emission2, aes(x = Year, y = Emissions)) + geom_point() + geom_line() + ggtitle("Emissions from Coal Combustion")
dev.copy(device = png, file="plot4.png", width=1366, height=768, units="px")
dev.off() | /plot4.R | no_license | divydeep/ExData_Plotting2 | R | false | false | 763 | r | library(dplyr)
library(ggplot2)
NEIds <- readRDS(file = "summarySCC_PM25.rds")
SCCds <- readRDS(file = "Source_Classification_Code.rds")
NEIds$year <- factor(NEIds$year)
coal <- SCCds[grep(pattern = "coal",x = SCCds$SCC.Level.Three, ignore.case = T),]
coal.scc <- coal$SCC
coal.emission <- NEIds[NEIds$SCC %in% coal.scc,]
coal.emission2 <- aggregate(coal.emission$Emissions ~ coal.emission$year, data = coal.emission, FUN = sum)
names(coal.emission2) <- c("Year", "Emissions")
coal.emission2$Year <- as.numeric(levels(coal.emission2$Year))
ggplot(data = coal.emission2, aes(x = Year, y = Emissions)) + geom_point() + geom_line() + ggtitle("Emissions from Coal Combustion")
dev.copy(device = png, file="plot4.png", width=1366, height=768, units="px")
dev.off() |
##' Test connection to database
##'
##' Useful to only run tests that depend on database when a connection exists
##' @title db.exists
##' @return TRUE if database connection works; else FALSE
##' @export
##' @author David LeBauer
db.exists <- function(...){
if(!exists("settings")){
settings <- list(database =
list(userid = "bety",
passwd = "bety",
location = "localhost",
name = "bety"))
}
ans <- tryl(query.base.con(settings))
return(ans)
}
| /db/R/utils.R | permissive | dwng/pecan | R | false | false | 568 | r | ##' Test connection to database
##'
##' Useful to only run tests that depend on database when a connection exists
##' @title db.exists
##' @return TRUE if database connection works; else FALSE
##' @export
##' @author David LeBauer
db.exists <- function(...){
if(!exists("settings")){
settings <- list(database =
list(userid = "bety",
passwd = "bety",
location = "localhost",
name = "bety"))
}
ans <- tryl(query.base.con(settings))
return(ans)
}
|
## loading source/packages needed application server.R
# package check
packs <- c("shiny", "lattice", "plyr", "EnvStats", "Metrics", "reshape2", "NADA")
packs <- packs[!packs %in% rownames(installed.packages())]
if(length(packs) > 0 ) sapply(packs, install.packages)
# load needed libraries:
library(shiny)
library(lattice)
library(plyr)
library(EnvStats)
library(Metrics)
library(reshape2)
library(NADA)
source("./utilityFunctions.R")
source("./EH_Bayes.R")
| /global.R | permissive | YoJimboDurant/EPC_small | R | false | false | 460 | r | ## loading source/packages needed application server.R
# package check
packs <- c("shiny", "lattice", "plyr", "EnvStats", "Metrics", "reshape2", "NADA")
packs <- packs[!packs %in% rownames(installed.packages())]
if(length(packs) > 0 ) sapply(packs, install.packages)
# load needed libraries:
library(shiny)
library(lattice)
library(plyr)
library(EnvStats)
library(Metrics)
library(reshape2)
library(NADA)
source("./utilityFunctions.R")
source("./EH_Bayes.R")
|
# Data: Credit dataset
#install.packages("ISLR")
library(ISLR)
# check sample of data
head(Credit)
# check data structure
str(Credit)
# check summary
summary(Credit)
df <- Credit
head(df)
str(df)
#transform column to factor
df$Cards <- factor(df$Cards)
# structure of dataframe
str(df)
##########################################
### plot functions in ggplot2 package ####
### Introduction ####
##########################################
#install.packages("ggplot2")
library(ggplot2)
## GGPLOT CHEATSHEET: https://github.com/rstudio/cheatsheets/blob/master/data-visualization-2.1.pdf
## https://ggplot2.tidyverse.org/reference/ggplot.html
## Data visualisation with ggplot2 - Chapter 3###
## ggplot is based on the philosophy of grammar of graphics
## the idea is to add layers to visualisation
## layers 1-3
## layer 1: data , layer 2: aesthetics (data columns to use in plotting),
## layer 3 : geometries (type of plot)
head(Credit)
str(Credit)
#### scatterplot ###
## First steps - ##
## Aesthetic mappings (data columns to use in plotting) ##
p1 <- ggplot(data=Credit, aes(x=Income, y=Balance))
## Geometric objects (type of plot) ##
p1 + geom_point(color='blue',alpha=0.5) + labs(x="Income", y="Balance", title="Balance vs. Income")
# p1 + geom_point(color='blue',alpha=0.5)
ggplot(data=Credit, aes(x=Income, y=Balance)) + geom_point(color='blue',alpha=0.5)
#change axis labels
ggplot(data=Credit, aes(x=Income, y=Balance)) + geom_point(color='blue',alpha=0.5) + labs(x="Income", y="Balance", title="Balance vs. Income")
#### histogram 1###
### in ggplot histogram is used to get frequency by band for for one continious variable
## data and aesthetics##
p <- ggplot(data=Credit, aes(x=Income))
# geometry
p + geom_histogram() + labs(x='Income band',y='Count',title="Income distribution")
# p + geom_histogram()
ggplot(data=Credit, aes(x=Income)) + geom_histogram()
ggplot(data=Credit, aes(x=Income)) + geom_histogram(fill='blue',alpha=0.5,binwidth=10) +
labs(x='Income band',y='Count',title="Income distribution")
## Barplots ##
## in ggplot barplot is used to get frequency by category for a categorical variable##
## data and aesthetics
p <- ggplot(data=Credit,aes(x=Gender))
# geometry
# barplot - categorical data, bars separated by spaces
p + geom_bar(fill='blue', alpha=0.5)
ggplot(data=Credit,aes(x=Gender)) + geom_bar(fill='blue', alpha=0.5)
ggplot(data=Credit,aes(x=Gender)) + geom_bar(fill='blue', alpha=0.5) +
labs(x='Gender',y='Count',title="Gender distribution")
str(Credit)
## Boxplots ##
## quartiles, end of whiskers 1.5 IQL - check wiki
## data and aesthetics
# discrete x, continuous y
## quartiles, end of whiskers 1.5 IQL - check wiki
## data and aesthetics
p <- ggplot(data=Credit, aes(x=Student, y=Balance))
# geometry - boxplot
p + geom_boxplot()
# flip coords
p + geom_boxplot() + coord_flip()
ggplot(data=Credit, aes(x=Student, y=Balance)) + geom_boxplot() +
labs(x='Student',y='Balance',title="Balance Distribution by Student")
ggplot(data=Credit, aes(x=Student, y=Balance)) + geom_boxplot(aes(fill=Student)) +
labs(x='Student',y='Balance',title="Balance Distribution by Student")
head(Credit)
#install.packages('dplyr')
library(dplyr)
# balance by Married flag
df.married.balance <- Credit %>% group_by(Married) %>% summarise(sum.balance=sum(Balance))
head(df.married.balance)
str(df.married.balance)
# balance by Married flag
# geom_col() : x = discrete, y=continious
ggplot(data=df.married.balance, aes(x=Married, y=sum.balance)) + geom_col() +
labs(x='Married',y='Balance',title="Balance Distribution by Married")
# balance by cards
df.cards.balance <- Credit %>% group_by(Cards) %>% summarise(sum.balance=sum(Balance)) %>% arrange(desc(Cards))
head(df.cards.balance)
str(df.cards.balance)
# balance by cards
# geom_col() : x = discrete, y=continious
ggplot(data=df.cards.balance, aes(x=factor(Cards), y=sum.balance)) + geom_col() +
labs(x='Cards',y='Balance',title="Balance Distribution by Cards")
#################################
### plot functions in Base R ####
#################################
# https://cran.r-project.org/doc/contrib/Short-refcard.pdf
# plot(x) plot of the values of x (on the y-axis) ordered on the x-axis
plot(Credit$Income)
#barplot
plot(Credit$Married)
#barplot
plot(factor(Credit$Education))
#barplot
plot(Credit$Gender)
# plot(x) plot of the values of x (on the y-axis) ordered on the x-axis
plot(Credit$Cards)
#barplot
plot(factor(Credit$Cards))
#histogram of cards
hist(Credit$Cards)
#boxplot
boxplot(Credit$Cards)
| /1/Rdatviz_intro.R | no_license | uhaz1/rbusinessanalytics | R | false | false | 4,587 | r |
# Data: Credit dataset
#install.packages("ISLR")
library(ISLR)
# check sample of data
head(Credit)
# check data structure
str(Credit)
# check summary
summary(Credit)
df <- Credit
head(df)
str(df)
#transform column to factor
df$Cards <- factor(df$Cards)
# structure of dataframe
str(df)
##########################################
### plot functions in ggplot2 package ####
### Introduction ####
##########################################
#install.packages("ggplot2")
library(ggplot2)
## GGPLOT CHEATSHEET: https://github.com/rstudio/cheatsheets/blob/master/data-visualization-2.1.pdf
## https://ggplot2.tidyverse.org/reference/ggplot.html
## Data visualisation with ggplot2 - Chapter 3###
## ggplot is based on the philosophy of grammar of graphics
## the idea is to add layers to visualisation
## layers 1-3
## layer 1: data , layer 2: aesthetics (data columns to use in plotting),
## layer 3 : geometries (type of plot)
head(Credit)
str(Credit)
#### scatterplot ###
## First steps - ##
## Aesthetic mappings (data columns to use in plotting) ##
p1 <- ggplot(data=Credit, aes(x=Income, y=Balance))
## Geometric objects (type of plot) ##
p1 + geom_point(color='blue',alpha=0.5) + labs(x="Income", y="Balance", title="Balance vs. Income")
# p1 + geom_point(color='blue',alpha=0.5)
ggplot(data=Credit, aes(x=Income, y=Balance)) + geom_point(color='blue',alpha=0.5)
#change axis labels
ggplot(data=Credit, aes(x=Income, y=Balance)) + geom_point(color='blue',alpha=0.5) + labs(x="Income", y="Balance", title="Balance vs. Income")
#### histogram 1###
### in ggplot histogram is used to get frequency by band for for one continious variable
## data and aesthetics##
p <- ggplot(data=Credit, aes(x=Income))
# geometry
p + geom_histogram() + labs(x='Income band',y='Count',title="Income distribution")
# p + geom_histogram()
ggplot(data=Credit, aes(x=Income)) + geom_histogram()
ggplot(data=Credit, aes(x=Income)) + geom_histogram(fill='blue',alpha=0.5,binwidth=10) +
labs(x='Income band',y='Count',title="Income distribution")
## Barplots ##
## in ggplot barplot is used to get frequency by category for a categorical variable##
## data and aesthetics
p <- ggplot(data=Credit,aes(x=Gender))
# geometry
# barplot - categorical data, bars separated by spaces
p + geom_bar(fill='blue', alpha=0.5)
ggplot(data=Credit,aes(x=Gender)) + geom_bar(fill='blue', alpha=0.5)
ggplot(data=Credit,aes(x=Gender)) + geom_bar(fill='blue', alpha=0.5) +
labs(x='Gender',y='Count',title="Gender distribution")
str(Credit)
## Boxplots ##
## quartiles, end of whiskers 1.5 IQL - check wiki
## data and aesthetics
# discrete x, continuous y
## quartiles, end of whiskers 1.5 IQL - check wiki
## data and aesthetics
p <- ggplot(data=Credit, aes(x=Student, y=Balance))
# geometry - boxplot
p + geom_boxplot()
# flip coords
p + geom_boxplot() + coord_flip()
ggplot(data=Credit, aes(x=Student, y=Balance)) + geom_boxplot() +
labs(x='Student',y='Balance',title="Balance Distribution by Student")
ggplot(data=Credit, aes(x=Student, y=Balance)) + geom_boxplot(aes(fill=Student)) +
labs(x='Student',y='Balance',title="Balance Distribution by Student")
head(Credit)
#install.packages('dplyr')
library(dplyr)
# balance by Married flag
df.married.balance <- Credit %>% group_by(Married) %>% summarise(sum.balance=sum(Balance))
head(df.married.balance)
str(df.married.balance)
# balance by Married flag
# geom_col() : x = discrete, y=continious
ggplot(data=df.married.balance, aes(x=Married, y=sum.balance)) + geom_col() +
labs(x='Married',y='Balance',title="Balance Distribution by Married")
# balance by cards
df.cards.balance <- Credit %>% group_by(Cards) %>% summarise(sum.balance=sum(Balance)) %>% arrange(desc(Cards))
head(df.cards.balance)
str(df.cards.balance)
# balance by cards
# geom_col() : x = discrete, y=continious
ggplot(data=df.cards.balance, aes(x=factor(Cards), y=sum.balance)) + geom_col() +
labs(x='Cards',y='Balance',title="Balance Distribution by Cards")
#################################
### plot functions in Base R ####
#################################
# https://cran.r-project.org/doc/contrib/Short-refcard.pdf
# plot(x) plot of the values of x (on the y-axis) ordered on the x-axis
plot(Credit$Income)
#barplot
plot(Credit$Married)
#barplot
plot(factor(Credit$Education))
#barplot
plot(Credit$Gender)
# plot(x) plot of the values of x (on the y-axis) ordered on the x-axis
plot(Credit$Cards)
#barplot
plot(factor(Credit$Cards))
#histogram of cards
hist(Credit$Cards)
#boxplot
boxplot(Credit$Cards)
|
######################################################################
#
# zzz.R
#
# Edited by Zack Almquist
# Written by Carter T. Butts <buttsc@uci.edu>; based on an original by
# Carter T. Butts <buttsc@uci.edu>, David Hunter <dhunter@stat.psu.edu>,
# and Mark S. Handcock <handcock@u.washington.edu>.
# Last Modified 7/14/10
# Licensed under the GNU General Public License version 3 or later
#
# Part of the R/census package
#
# .First.lib is run when the package is loaded with library(UScensus2000)
#
######################################################################
.onLoad <- function(libname, pkgname){
dscr <- utils::packageDescription('UScensus2010')
packageStartupMessage("\n")
packageStartupMessage(paste('Package ',dscr$Package,': ',dscr$Title,"\n",
"Version ",dscr$Version,
" created on ", dscr$Date ,".\n", sep=""))
packageStartupMessage(paste("Zack Almquist, University of California-Irvine
ne\n",sep=""))
packageStartupMessage('For citation information, type citation("UScensus2010").')
packageStartupMessage('Type help(package=UScensus2010) to get started.')
}
| /UScensus2010/R/zzz.R | no_license | ingted/R-Examples | R | false | false | 1,139 | r | ######################################################################
#
# zzz.R
#
# Edited by Zack Almquist
# Written by Carter T. Butts <buttsc@uci.edu>; based on an original by
# Carter T. Butts <buttsc@uci.edu>, David Hunter <dhunter@stat.psu.edu>,
# and Mark S. Handcock <handcock@u.washington.edu>.
# Last Modified 7/14/10
# Licensed under the GNU General Public License version 3 or later
#
# Part of the R/census package
#
# .First.lib is run when the package is loaded with library(UScensus2000)
#
######################################################################
.onLoad <- function(libname, pkgname){
dscr <- utils::packageDescription('UScensus2010')
packageStartupMessage("\n")
packageStartupMessage(paste('Package ',dscr$Package,': ',dscr$Title,"\n",
"Version ",dscr$Version,
" created on ", dscr$Date ,".\n", sep=""))
packageStartupMessage(paste("Zack Almquist, University of California-Irvine
ne\n",sep=""))
packageStartupMessage('For citation information, type citation("UScensus2010").')
packageStartupMessage('Type help(package=UScensus2010) to get started.')
}
|
library(rprojroot)
root_dir = rprojroot::find_rstudio_root_file()
src.path <- paste(root_dir, "/src/phylowgs/ssSignature_Local_Functions.R",sep="" )
utils.path <- paste(root_dir, "/src/phylowgs/utils.R",sep="" )
input.dir <- paste(root_dir, "/inputs",sep="" )
output.dir <- paste(root_dir, "/outputs",sep="" )
ref.dir <- paste(root_dir, "/refs",sep="" )
samples <- paste(input.dir,'/samples.txt',sep="")
cancer.genes.path <- paste(ref.dir,'/cancer.bed',sep="")
sample.prefix <- "samples.pt"
patient.prefix <- "Patient"
# phylowgs best indecies
index <- c("589","2144","2097","2197","1971","2499","1250")
recalculate_indecies <- TRUE
samplelist<-c("Patient1","Patient2","Patient3","Patient4","Patient5","Patient6","Patient7")
names(index) <- samplelist
maf.dir <- paste(input.dir, "/mafs",sep="" )
ccf.dir <- paste(input.dir, "/ccfs",sep="" )
witness.data <- "~/witness_dec6" #hard-coded for now
source(src.path, local = TRUE)
source(utils.path, local = TRUE) | /src/config_phylowgs.R | permissive | pughlab/braf_rapid_autopsy | R | false | false | 971 | r | library(rprojroot)
root_dir = rprojroot::find_rstudio_root_file()
src.path <- paste(root_dir, "/src/phylowgs/ssSignature_Local_Functions.R",sep="" )
utils.path <- paste(root_dir, "/src/phylowgs/utils.R",sep="" )
input.dir <- paste(root_dir, "/inputs",sep="" )
output.dir <- paste(root_dir, "/outputs",sep="" )
ref.dir <- paste(root_dir, "/refs",sep="" )
samples <- paste(input.dir,'/samples.txt',sep="")
cancer.genes.path <- paste(ref.dir,'/cancer.bed',sep="")
sample.prefix <- "samples.pt"
patient.prefix <- "Patient"
# phylowgs best indecies
index <- c("589","2144","2097","2197","1971","2499","1250")
recalculate_indecies <- TRUE
samplelist<-c("Patient1","Patient2","Patient3","Patient4","Patient5","Patient6","Patient7")
names(index) <- samplelist
maf.dir <- paste(input.dir, "/mafs",sep="" )
ccf.dir <- paste(input.dir, "/ccfs",sep="" )
witness.data <- "~/witness_dec6" #hard-coded for now
source(src.path, local = TRUE)
source(utils.path, local = TRUE) |
get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
lm_formula <- function(variables.vec,
dependent = '',
interactions = F,
quadratics = F,
non.num.vars = NA) {
if (interactions) {
collapse.symbol <- '*'
} else {
collapse.symbol <- '+'
}
if (quadratics) {
quadratic.formula <- paste0('+' , paste0('I(',
setdiff(variables.vec,
non.num.vars),
'^2)',
collapse = '+'))
} else {
quadratic.formula <- ''
}
as.formula(paste0(dependent, '~ ',
paste0(paste0(variables.vec, collapse = collapse.symbol),
quadratic.formula)))
}
get_truncated_spline = function(y, x, n_knots, poly_order){
knots <- seq(min(x), max(x), length=n_knots)
x_eval = seq(min(x), max(x), length=250)
coef_list = list()
knot_coef_list = list()
x_eval_list = list()
knot_x_eval_list = list()
for (i in 1:poly_order) {
coef_list[[i]] = x^i
x_eval_list[[i]] = x_eval^i
}
for (i in 1:(n_knots)) {
knot_coef_list[[i]] = ((x - knots[i])^poly_order) * (x > knots[i])
knot_x_eval_list[[i]] = ((x_eval - knots[i])^poly_order) * (x_eval > knots[i])
}
x_eval_df = data.frame(rep(1,250), x_eval_list, knot_x_eval_list)
temp_df = data.frame(y, coef_list, knot_coef_list)
colnames(x_eval_df) <- c('c', as.character(1:(poly_order + n_knots)))
colnames(temp_df) <- c('profit', as.character(1:(poly_order + n_knots)))
model_trunc <- lm(profit~., data=temp_df)
a = x_eval_df[, !is.na(coef(model_trunc))]
b = coef(model_trunc)[!is.na(coef(model_trunc))]
fitted_trunc <- as.matrix(a)%*%b
return(list(x_eval, fitted_trunc, model_trunc))
}
get_bsplines <- function(x, y, nrknots){
minx <- min(x)-0.001
maxx <- max(x)+0.001
step <- (maxx-minx)/(nrknots-1)
inner.knots <- seq(minx,maxx,length=nrknots)
knots <- seq(minx-2*step,maxx+2*step,by=step)
xseq <- seq(min(x),max(x),length=100)
B <- spline.des(knots=knots, x, ord=3)$design
Bfit <- spline.des(knots=knots, xseq, ord=3)$design
betahat <- solve(t(B)%*%B)%*%t(B)%*%y
fitted <- Bfit%*%betahat
n <- length(x)
S <- B%*%solve(t(B)%*%B)%*%t(B)
fit <- as.vector(B%*%betahat)
diags <- diag(S)
df <- sum(diags)
sigma2 <- sum((y-fit)^2)/n
my_aic <- n*log(sigma2) + 2*(df+1)
r_aic <- n*(log(2*pi)+1+log(sigma2))++ 2*(df+1)
return(list(xseq, fitted, my_aic, r_aic))
}
get_lambda <- function(x, y, nrknots) {
minx <- min(x)-0.001
maxx <- max(x)+0.001
step <- (maxx-minx)/(nrknots-1)
inner.knots <- seq(minx,maxx,length=nrknots)
knots <- seq(minx-3*step,maxx+3*step,by=step)
D2 <- matrix(0,nrknots,nrknots+2)
for(i in 1:nrknots) {
D2[i,i] <- 1
D2[i,i+1] <- -2
D2[i,i+2] <- 1
}
K2 <- t(D2)%*%D2
B <- spline.des(knots=knots,x,ord=4)$design
lambda <- seq(1, 200, length=250)
#lambda <- c(1:10 %o% 10^(-1:1.5))
gcv <- rep(0,length(lambda))
aic <- rep(0,length(lambda))
bic <- rep(0,length(lambda))
n <- length(x)
for(i in 1:length(lambda)) {
S <- B%*%solve(t(B)%*%B + lambda[i]*K2)%*%t(B)
diags <- diag(S)
trs <- mean(diags)
df <- sum(diags)
fit <- as.vector(S%*%y)
gcv[i] <- mean(((y-fit)/(1-trs))^2)
# aic[i] <- n*log(sigma2) + sum((data$z-fit)^2)/sigma2 + 2*df
# bic[i] <- n*log(sigma2) + sum((data$z-fit)^2)/sigma2 + log(n)*df
sigma2 <- sum((y-fit)^2)/n
aic[i] <- n*log(sigma2) + 2*(df+1)
bic[i] <- n*log(sigma2) + log(n)*(df+1)
}
return(list('lambda' = lambda,
'gcv' = gcv,
'AIC' = aic,
'BIC' = bic))
}
get_cubic_psplines <- function(x, y, nrknots, lambda) {
minx <- min(x)-0.001
maxx <- max(x)+0.001
step <- (maxx-minx)/(nrknots-1)
inner.knots <- seq(minx,maxx,length=nrknots)
knots <- seq(minx-3*step,maxx+3*step,by=step)
xplot <- seq(min(x),max(x),length=500)
xobs <- unique(x)
nunique <- length(xobs)
D2 <- matrix(0,nrknots,nrknots+2)
for(i in 1:nrknots)
{
D2[i,i] <- 1
D2[i,i+1] <- -2
D2[i,i+2] <- 1
}
K2 <- t(D2)%*%D2
B <- spline.des(knots=knots, x , ord=4)$design
Bobs <- spline.des(knots=knots, xobs, ord=4)$design
Bplot <- spline.des(knots=knots, xplot, ord=4)$design
betahat <- solve(t(B)%*%B + lambda*K2)%*%t(B)%*%y
fitted <- B%*%betahat
fittedplot <- Bplot%*%betahat
n <- length(x)
S <- B%*%solve(t(B)%*%B + lambda*K2)%*%t(B)
fit <- as.vector(B%*%betahat)
diags <- diag(S)
df <- sum(diags)
sigma2 <- sum((y-fit)^2)/n
my_aic <- n*log(sigma2) + 2*(df+1)
r_aic <- n*(log(2*pi)+1+log(sigma2))++ 2*(df+1)
return(list(xplot, fittedplot, my_aic, r_aic))
}
| /glm_functions.R | no_license | thduvivier/GLM | R | false | false | 4,993 | r | get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
lm_formula <- function(variables.vec,
dependent = '',
interactions = F,
quadratics = F,
non.num.vars = NA) {
if (interactions) {
collapse.symbol <- '*'
} else {
collapse.symbol <- '+'
}
if (quadratics) {
quadratic.formula <- paste0('+' , paste0('I(',
setdiff(variables.vec,
non.num.vars),
'^2)',
collapse = '+'))
} else {
quadratic.formula <- ''
}
as.formula(paste0(dependent, '~ ',
paste0(paste0(variables.vec, collapse = collapse.symbol),
quadratic.formula)))
}
get_truncated_spline = function(y, x, n_knots, poly_order){
knots <- seq(min(x), max(x), length=n_knots)
x_eval = seq(min(x), max(x), length=250)
coef_list = list()
knot_coef_list = list()
x_eval_list = list()
knot_x_eval_list = list()
for (i in 1:poly_order) {
coef_list[[i]] = x^i
x_eval_list[[i]] = x_eval^i
}
for (i in 1:(n_knots)) {
knot_coef_list[[i]] = ((x - knots[i])^poly_order) * (x > knots[i])
knot_x_eval_list[[i]] = ((x_eval - knots[i])^poly_order) * (x_eval > knots[i])
}
x_eval_df = data.frame(rep(1,250), x_eval_list, knot_x_eval_list)
temp_df = data.frame(y, coef_list, knot_coef_list)
colnames(x_eval_df) <- c('c', as.character(1:(poly_order + n_knots)))
colnames(temp_df) <- c('profit', as.character(1:(poly_order + n_knots)))
model_trunc <- lm(profit~., data=temp_df)
a = x_eval_df[, !is.na(coef(model_trunc))]
b = coef(model_trunc)[!is.na(coef(model_trunc))]
fitted_trunc <- as.matrix(a)%*%b
return(list(x_eval, fitted_trunc, model_trunc))
}
get_bsplines <- function(x, y, nrknots){
minx <- min(x)-0.001
maxx <- max(x)+0.001
step <- (maxx-minx)/(nrknots-1)
inner.knots <- seq(minx,maxx,length=nrknots)
knots <- seq(minx-2*step,maxx+2*step,by=step)
xseq <- seq(min(x),max(x),length=100)
B <- spline.des(knots=knots, x, ord=3)$design
Bfit <- spline.des(knots=knots, xseq, ord=3)$design
betahat <- solve(t(B)%*%B)%*%t(B)%*%y
fitted <- Bfit%*%betahat
n <- length(x)
S <- B%*%solve(t(B)%*%B)%*%t(B)
fit <- as.vector(B%*%betahat)
diags <- diag(S)
df <- sum(diags)
sigma2 <- sum((y-fit)^2)/n
my_aic <- n*log(sigma2) + 2*(df+1)
r_aic <- n*(log(2*pi)+1+log(sigma2))++ 2*(df+1)
return(list(xseq, fitted, my_aic, r_aic))
}
get_lambda <- function(x, y, nrknots) {
minx <- min(x)-0.001
maxx <- max(x)+0.001
step <- (maxx-minx)/(nrknots-1)
inner.knots <- seq(minx,maxx,length=nrknots)
knots <- seq(minx-3*step,maxx+3*step,by=step)
D2 <- matrix(0,nrknots,nrknots+2)
for(i in 1:nrknots) {
D2[i,i] <- 1
D2[i,i+1] <- -2
D2[i,i+2] <- 1
}
K2 <- t(D2)%*%D2
B <- spline.des(knots=knots,x,ord=4)$design
lambda <- seq(1, 200, length=250)
#lambda <- c(1:10 %o% 10^(-1:1.5))
gcv <- rep(0,length(lambda))
aic <- rep(0,length(lambda))
bic <- rep(0,length(lambda))
n <- length(x)
for(i in 1:length(lambda)) {
S <- B%*%solve(t(B)%*%B + lambda[i]*K2)%*%t(B)
diags <- diag(S)
trs <- mean(diags)
df <- sum(diags)
fit <- as.vector(S%*%y)
gcv[i] <- mean(((y-fit)/(1-trs))^2)
# aic[i] <- n*log(sigma2) + sum((data$z-fit)^2)/sigma2 + 2*df
# bic[i] <- n*log(sigma2) + sum((data$z-fit)^2)/sigma2 + log(n)*df
sigma2 <- sum((y-fit)^2)/n
aic[i] <- n*log(sigma2) + 2*(df+1)
bic[i] <- n*log(sigma2) + log(n)*(df+1)
}
return(list('lambda' = lambda,
'gcv' = gcv,
'AIC' = aic,
'BIC' = bic))
}
get_cubic_psplines <- function(x, y, nrknots, lambda) {
minx <- min(x)-0.001
maxx <- max(x)+0.001
step <- (maxx-minx)/(nrknots-1)
inner.knots <- seq(minx,maxx,length=nrknots)
knots <- seq(minx-3*step,maxx+3*step,by=step)
xplot <- seq(min(x),max(x),length=500)
xobs <- unique(x)
nunique <- length(xobs)
D2 <- matrix(0,nrknots,nrknots+2)
for(i in 1:nrknots)
{
D2[i,i] <- 1
D2[i,i+1] <- -2
D2[i,i+2] <- 1
}
K2 <- t(D2)%*%D2
B <- spline.des(knots=knots, x , ord=4)$design
Bobs <- spline.des(knots=knots, xobs, ord=4)$design
Bplot <- spline.des(knots=knots, xplot, ord=4)$design
betahat <- solve(t(B)%*%B + lambda*K2)%*%t(B)%*%y
fitted <- B%*%betahat
fittedplot <- Bplot%*%betahat
n <- length(x)
S <- B%*%solve(t(B)%*%B + lambda*K2)%*%t(B)
fit <- as.vector(B%*%betahat)
diags <- diag(S)
df <- sum(diags)
sigma2 <- sum((y-fit)^2)/n
my_aic <- n*log(sigma2) + 2*(df+1)
r_aic <- n*(log(2*pi)+1+log(sigma2))++ 2*(df+1)
return(list(xplot, fittedplot, my_aic, r_aic))
}
|
context('test corpus.R')
test_that("test show.corpus", {
testcorpus <- corpus(c('The'))
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 1 document.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox')
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 1 docvar.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4, test2=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 2 docvars.')
)
})
test_that("test c.corpus", {
concat.corpus <- c(data_corpus_inaugural, data_corpus_inaugural, data_corpus_inaugural)
expected_docvars <-rbind(docvars(data_corpus_inaugural), docvars(data_corpus_inaugural), docvars(data_corpus_inaugural))
rownames(expected_docvars) <- make.unique(rep(rownames(docvars(data_corpus_inaugural)), 3), sep='')
expect_equal(
docvars(concat.corpus),
expected_docvars
)
expect_is(
docvars(concat.corpus),
'data.frame'
)
expected_texts <- c(texts(data_corpus_inaugural), texts(data_corpus_inaugural), texts(data_corpus_inaugural))
names(expected_texts) <- make.unique(rep(names(texts(data_corpus_inaugural)), 3), sep='')
expect_equal(
texts(concat.corpus),
expected_texts
)
expect_is(
texts(concat.corpus),
'character'
)
expect_true(
grepl('Concatenation by c.corpus', metacorpus(concat.corpus)$source)
)
})
test_that("test corpus constructors works for kwic", {
kwiccorpus <- corpus(kwic(data_corpus_inaugural, "christmas"))
expect_that(kwiccorpus, is_a("corpus"))
expect_equal(names(docvars(kwiccorpus)),
c("docname", "from", "to", "keyword", "context"))
})
test_that("test corpus constructors works for character", {
expect_that(corpus(data_char_ukimmig2010), is_a("corpus"))
})
test_that("test corpus constructors works for data.frame", {
mydf <- data.frame(letter_factor = factor(rep(letters[1:3], each = 2)),
some_ints = 1L:6L,
some_text = paste0("This is text number ", 1:6, "."),
some_logical = rep(c(TRUE, FALSE), 3),
stringsAsFactors = FALSE,
row.names = paste0("fromDf_", 1:6))
mycorp <- corpus(mydf, text_field = "some_text",
metacorpus = list(source = "From a data.frame called mydf."))
expect_equal(docnames(mycorp),
paste("fromDf", 1:6, sep = "_"))
expect_equal(mycorp[["letter_factor"]][3,1],
factor("b", levels = c("a", "b", "c")))
mydf2 <- mydf
names(mydf2)[3] <- "text"
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf2))
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf, text_field = 3))
expect_error(corpus(mydf, text_field = "some_ints"),
"text_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = c(1,3)),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = c("some_text", "letter_factor")),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = 0),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = -1),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "notfound"),
"column name notfound not found")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "some_ints"),
"docid_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c(1,3)),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c("some_text", "letter_factor")),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = 0),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = -1),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "notfound"),
"column name notfound not found")
})
test_that("test corpus constructor works for tm objects", {
skip_if_not_installed("tm")
require(tm)
# VCorpus
data(crude, package = "tm") # load in a tm example VCorpus
mytmCorpus <- corpus(crude)
expect_equal(substring(texts(mytmCorpus)[1], 1, 21),
c("127" = "Diamond Shamrock Corp"))
data(acq, package = "tm")
mytmCorpus2 <- corpus(acq)
expect_equal(dim(docvars(mytmCorpus2)), c(50,12))
# SimpleCorpus
txt <- system.file("texts", "txt", package = "tm")
mytmCorpus3 <- SimpleCorpus(DirSource(txt, encoding = "UTF-8"),
control = list(language = "lat"))
qcorpus3 <- corpus(mytmCorpus3)
expect_equal(content(mytmCorpus3), texts(qcorpus3))
expect_equal(unclass(meta(mytmCorpus3, type = "corpus")[1]),
metacorpus(qcorpus3)[names(meta(mytmCorpus3, type = "corpus"))])
# any other type
mytmCorpus4 <- mytmCorpus3
class(mytmCorpus4)[1] <- "OtherCorpus"
expect_error(
corpus(mytmCorpus4),
"Cannot construct a corpus from this tm OtherCorpus object"
)
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for VCorpus with one document (#445)", {
skip_if_not_installed("tm")
require(tm)
tmCorpus_length1 <- VCorpus(VectorSource(data_corpus_inaugural[2]))
expect_silent(qcorpus <- corpus(tmCorpus_length1))
expect_equivalent(texts(qcorpus)[1], data_corpus_inaugural[2])
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for complex VCorpus (#849)", {
skip_if_not_installed("tm")
load("../data/corpora/complex_Corpus.rda")
qc <- corpus(complex_Corpus)
expect_equal(
head(docnames(qc), 3),
c("41113_201309.1", "41113_201309.2", "41113_201309.3")
)
expect_equal(
tail(docnames(qc), 3),
c("41223_201309.2553", "41223_201309.2554", "41223_201309.2555")
)
expect_output(
print(qc),
"Corpus consisting of 8,230 documents and 16 docvars\\."
)
})
test_that("corpus_subset works", {
txt <- c(doc1 = "This is a sample text.\nIt has three lines.\nThe third line.",
doc2 = "one\ntwo\tpart two\nthree\nfour.",
doc3 = "A single sentence.",
doc4 = "A sentence with \"escaped quotes\".")
dv <- data.frame(varnumeric = 10:13, varfactor = factor(c("A", "B", "A", "B")), varchar = letters[1:4])
data_corpus_test <- corpus(txt, docvars = dv, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test, varfactor == "B")), 2)
expect_equal(docnames(corpus_subset(data_corpus_test, varfactor == "B")), c("doc2", "doc4"))
data_corpus_test_nodv <- corpus(txt, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), 1)
expect_equal(docnames(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), c("doc2"))
})
test_that("summary method works for corpus", {
expect_output(summary(print(data_corpus_irishbudget2010)), regexp = "^Corpus consisting of 14 documents")
})
test_that("corpus works for texts with duplicate filenames", {
txt <- c(one = "Text one.", two = "text two", one = "second first text")
cor <- corpus(txt)
expect_equal(docnames(cor), c("one", "two", "one.1"))
})
test_that("create a corpus on a corpus", {
expect_identical(
data_corpus_irishbudget2010,
corpus(data_corpus_irishbudget2010)
)
tmpcorp <- data_corpus_irishbudget2010
docnames(tmpcorp) <- paste0("d", seq_len(ndoc(tmpcorp)))
expect_identical(
tmpcorp,
corpus(data_corpus_irishbudget2010, docnames = paste0("d", seq_len(ndoc(tmpcorp))))
)
expect_identical(
corpus(data_corpus_irishbudget2010, compress = TRUE),
corpus(texts(data_corpus_irishbudget2010),
docvars = docvars(data_corpus_irishbudget2010),
metacorpus = metacorpus(data_corpus_irishbudget2010),
compress = TRUE)
)
})
test_that("summary.corpus with verbose prints warning", {
expect_warning(
summary(data_corpus_irishbudget2010, verbose = FALSE),
"verbose argument is defunct"
)
})
test_that("head, tail.corpus work as expected", {
crp <- corpus_subset(data_corpus_inaugural, Year < 2018)
expect_equal(
docnames(head(crp, 3)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(head(crp, -55)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(tail(crp, 3)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
expect_equal(
docnames(tail(crp, -55)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
})
test_that("internal documents fn works", {
mydfm <- dfm(corpus_subset(data_corpus_inaugural, Year < 1800))
expect_is(quanteda:::documents.dfm(mydfm), "data.frame")
expect_equal(
dim(quanteda:::documents.dfm(mydfm)),
c(3, 3)
)
})
test_that("corpus constructor works with tibbles", {
skip_if_not_installed("tibble")
dd <- tibble::data_frame(a=1:3, text=c("Hello", "quanteda", "world"))
expect_is(
corpus(dd),
"corpus"
)
expect_equal(
texts(corpus(dd)),
c(text1 = "Hello", text2 = "quanteda", text3 = "world")
)
})
test_that("print.summary.corpus work", {
summ1 <- summary(data_corpus_inaugural + data_corpus_inaugural)
expect_output(
print(summ1),
"Corpus consisting of 116 documents, showing 100 documents:"
)
expect_output(
print(summ1[1:5, ]),
"\\s+Text Types Tokens"
)
expect_output(
print(summ1[, c("Types", "Tokens")]),
"^\\s+Types Tokens\\n1\\s+625\\s+1538"
)
})
| /tests/testthat/test-corpus.R | no_license | TalkStats/quanteda | R | false | false | 10,895 | r | context('test corpus.R')
test_that("test show.corpus", {
testcorpus <- corpus(c('The'))
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 1 document.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox')
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 1 docvar.')
)
testcorpus <- corpus(
c('The', 'quick', 'brown', 'fox'),
docvars=data.frame(list(test=1:4, test2=1:4))
)
expect_that(
show(testcorpus),
prints_text('Corpus consisting of 4 documents and 2 docvars.')
)
})
test_that("test c.corpus", {
concat.corpus <- c(data_corpus_inaugural, data_corpus_inaugural, data_corpus_inaugural)
expected_docvars <-rbind(docvars(data_corpus_inaugural), docvars(data_corpus_inaugural), docvars(data_corpus_inaugural))
rownames(expected_docvars) <- make.unique(rep(rownames(docvars(data_corpus_inaugural)), 3), sep='')
expect_equal(
docvars(concat.corpus),
expected_docvars
)
expect_is(
docvars(concat.corpus),
'data.frame'
)
expected_texts <- c(texts(data_corpus_inaugural), texts(data_corpus_inaugural), texts(data_corpus_inaugural))
names(expected_texts) <- make.unique(rep(names(texts(data_corpus_inaugural)), 3), sep='')
expect_equal(
texts(concat.corpus),
expected_texts
)
expect_is(
texts(concat.corpus),
'character'
)
expect_true(
grepl('Concatenation by c.corpus', metacorpus(concat.corpus)$source)
)
})
test_that("test corpus constructors works for kwic", {
kwiccorpus <- corpus(kwic(data_corpus_inaugural, "christmas"))
expect_that(kwiccorpus, is_a("corpus"))
expect_equal(names(docvars(kwiccorpus)),
c("docname", "from", "to", "keyword", "context"))
})
test_that("test corpus constructors works for character", {
expect_that(corpus(data_char_ukimmig2010), is_a("corpus"))
})
test_that("test corpus constructors works for data.frame", {
mydf <- data.frame(letter_factor = factor(rep(letters[1:3], each = 2)),
some_ints = 1L:6L,
some_text = paste0("This is text number ", 1:6, "."),
some_logical = rep(c(TRUE, FALSE), 3),
stringsAsFactors = FALSE,
row.names = paste0("fromDf_", 1:6))
mycorp <- corpus(mydf, text_field = "some_text",
metacorpus = list(source = "From a data.frame called mydf."))
expect_equal(docnames(mycorp),
paste("fromDf", 1:6, sep = "_"))
expect_equal(mycorp[["letter_factor"]][3,1],
factor("b", levels = c("a", "b", "c")))
mydf2 <- mydf
names(mydf2)[3] <- "text"
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf2))
expect_equal(corpus(mydf, text_field = "some_text"),
corpus(mydf, text_field = 3))
expect_error(corpus(mydf, text_field = "some_ints"),
"text_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = c(1,3)),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = c("some_text", "letter_factor")),
"text_field must refer to a single column")
expect_error(corpus(mydf, text_field = 0),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = -1),
"text_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "notfound"),
"column name notfound not found")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "some_ints"),
"docid_field must refer to a character mode column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c(1,3)),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = c("some_text", "letter_factor")),
"docid_field must refer to a single column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = 0),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = -1),
"docid_field index refers to an invalid column")
expect_error(corpus(mydf, text_field = "some_text", docid_field = "notfound"),
"column name notfound not found")
})
test_that("test corpus constructor works for tm objects", {
skip_if_not_installed("tm")
require(tm)
# VCorpus
data(crude, package = "tm") # load in a tm example VCorpus
mytmCorpus <- corpus(crude)
expect_equal(substring(texts(mytmCorpus)[1], 1, 21),
c("127" = "Diamond Shamrock Corp"))
data(acq, package = "tm")
mytmCorpus2 <- corpus(acq)
expect_equal(dim(docvars(mytmCorpus2)), c(50,12))
# SimpleCorpus
txt <- system.file("texts", "txt", package = "tm")
mytmCorpus3 <- SimpleCorpus(DirSource(txt, encoding = "UTF-8"),
control = list(language = "lat"))
qcorpus3 <- corpus(mytmCorpus3)
expect_equal(content(mytmCorpus3), texts(qcorpus3))
expect_equal(unclass(meta(mytmCorpus3, type = "corpus")[1]),
metacorpus(qcorpus3)[names(meta(mytmCorpus3, type = "corpus"))])
# any other type
mytmCorpus4 <- mytmCorpus3
class(mytmCorpus4)[1] <- "OtherCorpus"
expect_error(
corpus(mytmCorpus4),
"Cannot construct a corpus from this tm OtherCorpus object"
)
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for VCorpus with one document (#445)", {
skip_if_not_installed("tm")
require(tm)
tmCorpus_length1 <- VCorpus(VectorSource(data_corpus_inaugural[2]))
expect_silent(qcorpus <- corpus(tmCorpus_length1))
expect_equivalent(texts(qcorpus)[1], data_corpus_inaugural[2])
detach("package:tm", unload = TRUE)
detach("package:NLP", unload = TRUE)
})
test_that("test corpus constructor works for complex VCorpus (#849)", {
skip_if_not_installed("tm")
load("../data/corpora/complex_Corpus.rda")
qc <- corpus(complex_Corpus)
expect_equal(
head(docnames(qc), 3),
c("41113_201309.1", "41113_201309.2", "41113_201309.3")
)
expect_equal(
tail(docnames(qc), 3),
c("41223_201309.2553", "41223_201309.2554", "41223_201309.2555")
)
expect_output(
print(qc),
"Corpus consisting of 8,230 documents and 16 docvars\\."
)
})
test_that("corpus_subset works", {
txt <- c(doc1 = "This is a sample text.\nIt has three lines.\nThe third line.",
doc2 = "one\ntwo\tpart two\nthree\nfour.",
doc3 = "A single sentence.",
doc4 = "A sentence with \"escaped quotes\".")
dv <- data.frame(varnumeric = 10:13, varfactor = factor(c("A", "B", "A", "B")), varchar = letters[1:4])
data_corpus_test <- corpus(txt, docvars = dv, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test, varfactor == "B")), 2)
expect_equal(docnames(corpus_subset(data_corpus_test, varfactor == "B")), c("doc2", "doc4"))
data_corpus_test_nodv <- corpus(txt, metacorpus = list(source = "From test-corpus.R"))
expect_equal(ndoc(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), 1)
expect_equal(docnames(corpus_subset(data_corpus_test_nodv, LETTERS[1:4] == "B")), c("doc2"))
})
test_that("summary method works for corpus", {
expect_output(summary(print(data_corpus_irishbudget2010)), regexp = "^Corpus consisting of 14 documents")
})
test_that("corpus works for texts with duplicate filenames", {
txt <- c(one = "Text one.", two = "text two", one = "second first text")
cor <- corpus(txt)
expect_equal(docnames(cor), c("one", "two", "one.1"))
})
test_that("create a corpus on a corpus", {
expect_identical(
data_corpus_irishbudget2010,
corpus(data_corpus_irishbudget2010)
)
tmpcorp <- data_corpus_irishbudget2010
docnames(tmpcorp) <- paste0("d", seq_len(ndoc(tmpcorp)))
expect_identical(
tmpcorp,
corpus(data_corpus_irishbudget2010, docnames = paste0("d", seq_len(ndoc(tmpcorp))))
)
expect_identical(
corpus(data_corpus_irishbudget2010, compress = TRUE),
corpus(texts(data_corpus_irishbudget2010),
docvars = docvars(data_corpus_irishbudget2010),
metacorpus = metacorpus(data_corpus_irishbudget2010),
compress = TRUE)
)
})
test_that("summary.corpus with verbose prints warning", {
expect_warning(
summary(data_corpus_irishbudget2010, verbose = FALSE),
"verbose argument is defunct"
)
})
test_that("head, tail.corpus work as expected", {
crp <- corpus_subset(data_corpus_inaugural, Year < 2018)
expect_equal(
docnames(head(crp, 3)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(head(crp, -55)),
c("1789-Washington", "1793-Washington", "1797-Adams")
)
expect_equal(
docnames(tail(crp, 3)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
expect_equal(
docnames(tail(crp, -55)),
c("2009-Obama", "2013-Obama", "2017-Trump")
)
})
test_that("internal documents fn works", {
mydfm <- dfm(corpus_subset(data_corpus_inaugural, Year < 1800))
expect_is(quanteda:::documents.dfm(mydfm), "data.frame")
expect_equal(
dim(quanteda:::documents.dfm(mydfm)),
c(3, 3)
)
})
test_that("corpus constructor works with tibbles", {
skip_if_not_installed("tibble")
dd <- tibble::data_frame(a=1:3, text=c("Hello", "quanteda", "world"))
expect_is(
corpus(dd),
"corpus"
)
expect_equal(
texts(corpus(dd)),
c(text1 = "Hello", text2 = "quanteda", text3 = "world")
)
})
test_that("print.summary.corpus work", {
summ1 <- summary(data_corpus_inaugural + data_corpus_inaugural)
expect_output(
print(summ1),
"Corpus consisting of 116 documents, showing 100 documents:"
)
expect_output(
print(summ1[1:5, ]),
"\\s+Text Types Tokens"
)
expect_output(
print(summ1[, c("Types", "Tokens")]),
"^\\s+Types Tokens\\n1\\s+625\\s+1538"
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TriIndex.R
\name{TriIndex}
\alias{TriIndex}
\title{Get the row and column indices of upper and lower trianges of a matrix}
\usage{
TriIndex(Nrow, which = "lower")
}
\arguments{
\item{Nrow}{The number of rows}
\item{which}{Specify \code{which = "lower"} or \code{which = "upper"}.
Defaults to \code{"lower"}.}
}
\value{
A two-column matrix.
}
\description{
Given the number of rows in a symmetric matrix, calculate the row and column
indices of the upper or lower triangles.
}
\note{
A straightforward way to do this is to use
\code{which(lower.tri(YourMatrix), arr.ind = TRUE)}, however, this can be
quite slow as the number of rows increases.
}
\examples{
TriIndex(4)
TriIndex(4, "upper")
m <- matrix(0, nrow = 4, ncol = 4)
which(lower.tri(m), arr.ind = TRUE)
}
\references{
\url{http://stackoverflow.com/a/20899060/1270695}
}
\author{
Ananda Mahto
}
| /man/TriIndex.Rd | no_license | mrdwab/SOfun | R | false | true | 934 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TriIndex.R
\name{TriIndex}
\alias{TriIndex}
\title{Get the row and column indices of upper and lower trianges of a matrix}
\usage{
TriIndex(Nrow, which = "lower")
}
\arguments{
\item{Nrow}{The number of rows}
\item{which}{Specify \code{which = "lower"} or \code{which = "upper"}.
Defaults to \code{"lower"}.}
}
\value{
A two-column matrix.
}
\description{
Given the number of rows in a symmetric matrix, calculate the row and column
indices of the upper or lower triangles.
}
\note{
A straightforward way to do this is to use
\code{which(lower.tri(YourMatrix), arr.ind = TRUE)}, however, this can be
quite slow as the number of rows increases.
}
\examples{
TriIndex(4)
TriIndex(4, "upper")
m <- matrix(0, nrow = 4, ncol = 4)
which(lower.tri(m), arr.ind = TRUE)
}
\references{
\url{http://stackoverflow.com/a/20899060/1270695}
}
\author{
Ananda Mahto
}
|
### データの標準化 (気候データによる例)
myData <- subset(read.csv("data/tokyo_weather.csv",
fileEncoding="utf8"),
select=c(気温,降水量,日射量,風速))
### 基本的な箱ひげ図
head(myData)
myData.std <- scale(myData) # 各変数ごとに標準化
head(myData.std)
colMeans(myData.std) # 各変数の平均が0か確認
apply(myData.std, 2, "sd") # 各変数の標準偏差が1か確認
| /docs/code/summary-scale.r | no_license | noboru-murata/sda | R | false | false | 459 | r | ### データの標準化 (気候データによる例)
myData <- subset(read.csv("data/tokyo_weather.csv",
fileEncoding="utf8"),
select=c(気温,降水量,日射量,風速))
### 基本的な箱ひげ図
head(myData)
myData.std <- scale(myData) # 各変数ごとに標準化
head(myData.std)
colMeans(myData.std) # 各変数の平均が0か確認
apply(myData.std, 2, "sd") # 各変数の標準偏差が1か確認
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248236410559e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615833057-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248236410559e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
/Lab 5/lab_referencia/Lab5.r | no_license | kevin-alvarez/LabAnalisisDeDatos2_2017 | R | false | false | 3,382 | r | ||
#' @title
#' Write data frame to plain text delimited files
#'
#' @description
#' Write data frames to plain text delimited files while retaining factor levels
#'
#' @usage
#' df_write(df, df_name, file_path = "plain.txt")
#'
#' @param df
#' a data frame to be stored.
#'
#' @param df_file_name
#' txt file name to store the file of the data frame.
#'
#' @export
df_write <- function(df, df_file_name) {
# check if the input is a data frame or not
if (!is.data.frame(df)) {
stop("This is not a data frame")
}
# Check if name is valid .txt
pattern <- ".*(.txt)$"
if (!grepl(pattern, df_file_name)) {
stop("Input file is not in valid txt format")
}
# write data frame
dput(df, df_file_name)
}
| /foofactors/R/df_write.R | no_license | STAT545-UBC-hw-2018-19/hw07-yihaoz | R | false | false | 720 | r | #' @title
#' Write data frame to plain text delimited files
#'
#' @description
#' Write data frames to plain text delimited files while retaining factor levels
#'
#' @usage
#' df_write(df, df_name, file_path = "plain.txt")
#'
#' @param df
#' a data frame to be stored.
#'
#' @param df_file_name
#' txt file name to store the file of the data frame.
#'
#' @export
df_write <- function(df, df_file_name) {
# check if the input is a data frame or not
if (!is.data.frame(df)) {
stop("This is not a data frame")
}
# Check if name is valid .txt
pattern <- ".*(.txt)$"
if (!grepl(pattern, df_file_name)) {
stop("Input file is not in valid txt format")
}
# write data frame
dput(df, df_file_name)
}
|
#' @export
modular_sample_net <- function(net, num_nodes) {
# adding nodes to the extracted sub network until i read users specified size
for (i in 1:num_nodes) {
if (i == 1) {
# first node is the seed node with is randomly sampled forom TF network
sampled_nodes <- c(sample(V(net)$name, size=1))
} else {
node_to_add <- find_node_to_add(net, sampled_nodes)
sampled_nodes <- c(sampled_nodes, node_to_add)
}
}
return (sampled_nodes)
}
#' @export
get_nieghboring_nodes <- function(net, sampled_nodes, mode='out') {
neighboring_nodes <- c()
neighbors <- igraph::adjacent_vertices(net, sampled_nodes, mode='out')
for (node in sampled_nodes) {
node_neighbors <- neighbors[[node]]
num_of_neighbors <- length(node_neighbors)
for (i in 1:num_of_neighbors) {
neighbor <- node_neighbors[i]$name
neighboring_nodes <- c(neighboring_nodes, neighbor)
}
}
return (neighboring_nodes)
}
find_node_to_add <- function(net, sampled_nodes) {
neighboring_nodes <- get_nieghboring_nodes(net, sampled_nodes)
modularity_df <- calculate_modularity(net, sampled_nodes, neighboring_nodes)
node_to_add <- get_node_with_max_modularity(modularity_df)
return (node_to_add)
}
calculate_modularity <- function(net, sampled_nodes, neighboring_nodes) {
q <- c()
nodes <- V(net)$name
for (neigbor in neighboring_nodes) {
proposed_nodes <- c(sampled_nodes, neigbor)
membership_ids <- get_membership(proposed_nodes, nodes)
proposed_q <- igraph::modularity(net, membership_ids)
q <- c(q, proposed_q)
}
modularity_df <- data.frame(nodes=neighboring_nodes, modularity=as.double(q), stringsAsFactors=FALSE)
return (modularity_df)
}
get_node_with_max_modularity <- function(modularity_df) {
node_to_add <- modularity_df %>% filter(modularity == max(modularity)) %>% pull(nodes)
# if there are multiple nodes with the same modularity randomly select one.
if (length(node_to_add) > 1){
node_to_add <- sample(node_to_add, size=1)
}
return (node_to_add)
}
get_membership <- function(proposed_nodes, net_nodes) {
membership_id <- c()
for (node in net_nodes){
if (node %in% proposed_nodes){
membership_id <- c(membership_id, 2)
} else {
membership_id <- c(membership_id, 1)
}
}
return (membership_id)
} | /R/modular_sampling.R | no_license | frogman141/crigen | R | false | false | 2,549 | r | #' @export
modular_sample_net <- function(net, num_nodes) {
# adding nodes to the extracted sub network until i read users specified size
for (i in 1:num_nodes) {
if (i == 1) {
# first node is the seed node with is randomly sampled forom TF network
sampled_nodes <- c(sample(V(net)$name, size=1))
} else {
node_to_add <- find_node_to_add(net, sampled_nodes)
sampled_nodes <- c(sampled_nodes, node_to_add)
}
}
return (sampled_nodes)
}
#' @export
get_nieghboring_nodes <- function(net, sampled_nodes, mode='out') {
neighboring_nodes <- c()
neighbors <- igraph::adjacent_vertices(net, sampled_nodes, mode='out')
for (node in sampled_nodes) {
node_neighbors <- neighbors[[node]]
num_of_neighbors <- length(node_neighbors)
for (i in 1:num_of_neighbors) {
neighbor <- node_neighbors[i]$name
neighboring_nodes <- c(neighboring_nodes, neighbor)
}
}
return (neighboring_nodes)
}
find_node_to_add <- function(net, sampled_nodes) {
neighboring_nodes <- get_nieghboring_nodes(net, sampled_nodes)
modularity_df <- calculate_modularity(net, sampled_nodes, neighboring_nodes)
node_to_add <- get_node_with_max_modularity(modularity_df)
return (node_to_add)
}
calculate_modularity <- function(net, sampled_nodes, neighboring_nodes) {
q <- c()
nodes <- V(net)$name
for (neigbor in neighboring_nodes) {
proposed_nodes <- c(sampled_nodes, neigbor)
membership_ids <- get_membership(proposed_nodes, nodes)
proposed_q <- igraph::modularity(net, membership_ids)
q <- c(q, proposed_q)
}
modularity_df <- data.frame(nodes=neighboring_nodes, modularity=as.double(q), stringsAsFactors=FALSE)
return (modularity_df)
}
get_node_with_max_modularity <- function(modularity_df) {
node_to_add <- modularity_df %>% filter(modularity == max(modularity)) %>% pull(nodes)
# if there are multiple nodes with the same modularity randomly select one.
if (length(node_to_add) > 1){
node_to_add <- sample(node_to_add, size=1)
}
return (node_to_add)
}
get_membership <- function(proposed_nodes, net_nodes) {
membership_id <- c()
for (node in net_nodes){
if (node %in% proposed_nodes){
membership_id <- c(membership_id, 2)
} else {
membership_id <- c(membership_id, 1)
}
}
return (membership_id)
} |
context("uvhydrograph-render tests")
wd <- getwd()
setwd(dir = tempdir())
test_that("uvhydrographPlot correctly includes list of months with rendering items for a normal Q hydrograph",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-minimal.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1510']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1510']][['plot1']]))
expect_false(is.null(renderList[['1510']][['plot2']]))
expect_false(is.null(renderList[['1510']][['table1']]))
expect_true(is.null(renderList[['1510']][['ratingShiftTable']])) #no rating shift table
expect_false(is.null(renderList[['1510']][['table2']]))
expect_true(is.null(renderList[['1510']][['status_msg1']]))
expect_true(is.null(renderList[['1510']][['status_msg2']]))
})
test_that("uvhydrographPlot correctly skips rendering all if no primary series exists",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-no-primary-pts.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 0)
})
test_that("uvhydrographPlot correctly skips secondard plot if an upchain series is not provided for Q hydrographs",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-Q-no-upchain.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1510']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1510']][['plot1']]))
expect_true(is.null(renderList[['1510']][['plot2']])) #skipped
expect_false(is.null(renderList[['1510']][['table1']]))
expect_true(is.null(renderList[['1510']][['ratingShiftTable']])) #no rating shift table
expect_true(is.null(renderList[['1510']][['table2']])) #skipped
expect_true(is.null(renderList[['1510']][['status_msg1']])) #no error message
expect_true(is.null(renderList[['1510']][['status_msg2']])) #no error message
})
test_that("uvhydrographPlot correctly renders secondary plot if a reference series is provided for non-Q hydrographs",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1206']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1206']][['plot1']]))
expect_false(is.null(renderList[['1206']][['plot2']]))
expect_false(is.null(renderList[['1206']][['table1']]))
expect_true(is.null(renderList[['1206']][['ratingShiftTable']])) #no rating shift table
expect_false(is.null(renderList[['1206']][['table2']]))
expect_true(is.null(renderList[['1206']][['status_msg1']])) #no error message
expect_true(is.null(renderList[['1206']][['status_msg2']])) #no error message
})
test_that("uvhydrographPlot correctly skips secondary plot if a reference series is not provided for non-Q hydrographs",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-no-ref.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1206']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1206']][['plot1']]))
expect_true(is.null(renderList[['1206']][['plot2']])) #skipped
expect_false(is.null(renderList[['1206']][['table1']]))
expect_true(is.null(renderList[['1206']][['ratingShiftTable']])) #no rating shift table
expect_true(is.null(renderList[['1206']][['table2']])) #skipped
expect_true(is.null(renderList[['1206']][['status_msg1']]))
expect_true(is.null(renderList[['1206']][['status_msg2']]))
})
test_that("useSecondaryPlot correctly flags when to use a secondary plot",{
expect_false(repgen:::useSecondaryPlot(fromJSON(system.file('extdata','testsnippets','test-uvhydro-Q-no-upchain.json', package = 'repgen'))))
expect_true(repgen:::useSecondaryPlot(fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))))
expect_false(repgen:::useSecondaryPlot(fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-no-ref.json', package = 'repgen'))))
})
test_that("getPrimaryReportElements correctly configured gsplot, a corrections table, and/or failure message depending on report config",{
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-no-primary-pts.json', package = 'repgen'))
, "1510", "Etc/GMT", TRUE)
expect_equal(reportEls[['plot']], NULL)
expect_equal(reportEls[['table']], NULL)
expect_equal(reportEls[['status_msg']], "Corrected data missing for Discharge.ft^3/s@01047200")
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-Q-no-upchain.json', package = 'repgen'))
, "1510", "Etc/GMT", TRUE)
expect_is(reportEls[['plot']], "gsplot")
expect_is(reportEls[['table']], "data.frame")
expect_equal(reportEls[['table']][1,][["Time"]], "2015-10-06")
expect_equal(reportEls[['table']][1,][["Correction Comments"]], "End : Approval period copy paste from Ref")
expect_equal(reportEls[['status_msg']], NULL)
})
test_that("getPrimaryReportElements correctly configured gsplot, a corrections table, and/or failure message depending on report config",{
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))
, "1510", "Etc/GMT", TRUE) #wrong month
expect_equal(reportEls[['plot']], NULL)
expect_equal(reportEls[['table']], NULL)
expect_equal(reportEls[['status_msg']], "Corrected data missing for WaterLevel, BelowLSD.ft@353922083345600")
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))
, "1206", "Etc/GMT", TRUE)
expect_is(reportEls[['plot']], "gsplot")
expect_is(reportEls[['table']], "data.frame")
expect_equal(reportEls[['table']][1,][["Time"]], "2012-06-29 10:17:00")
expect_equal(reportEls[['table']][1,][["Correction Comments"]], "Start : Example primary series correction")
expect_equal(reportEls[['table']][2,][["Time"]], "2012-06-30 22:59:00")
expect_equal(reportEls[['table']][2,][["Correction Comments"]], "End : Example primary series correction")
expect_equal(reportEls[['status_msg']], NULL)
})
test_that("createPrimaryPlot only can handle minimal requirements (just corrected series)",{
Sys.setenv(TZ = "UTC")
#minimal case should plot (only corrected series)
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
plot_object <- repgen:::createPrimaryPlot(
list(label="Primary Test Series", units="ft", type="Test"),
NULL,
NULL,
NULL,
list(corrected=testSeries, estimated=NULL, uncorrected=NULL, corrected_reference=NULL,
estimated_reference=NULL,
comparison=NULL,inverted=FALSE,loggedAxis=FALSE),
list(),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), minQ=as.numeric(NA), maxQ=as.numeric(NA), n=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE)),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
list(),
list(),
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE)),
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE)),
TRUE,
"Etc/GMT",
FALSE)
expect_is(plot_object[['side.1']], "list")
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_is(plot_object[['side.2']], "list")
expect_equal(ylim(plot_object)[['side.2']][1], 10)
expect_equal(ylim(plot_object)[['side.2']][2], 20)
expect_is(plot_object[['legend']], "list")
expect_equal(plot_object[['legend']][['legend.auto']][['legend']], "Corrected UV Primary Test Series")
})
test_that("createPrimaryPlot correctly configured gsplot",{
Sys.setenv(TZ = "UTC")
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-02 17:00:00"), as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(-1, 10, 20),
month=c("1605", "1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesEst <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-02 17:00:00"), as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(-1, 10, 20),
month=c("1605", "1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesUnc <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-02 17:00:00"), as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(-1, 10, 20),
month=c("1605", "1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesRef <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(4, 15),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesEstRef <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-24 17:15:00"), as.POSIXct("2016-05-28 17:45:00")),
value=c(7, 16),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesComp <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(9, 12),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
dvs <- list(
approved_dv=data.frame(
time=c(as.POSIXct("2016-05-03"), as.POSIXct("2016-05-04")),
value=c(10, 11),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("Test DV", "Test DV"),
stringsAsFactors=FALSE),
inreview_dv=data.frame(
time=c(as.POSIXct("2016-05-05"), as.POSIXct("2016-05-06")),
value=c(12, 14),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("In Review Test DV", "In Review Test DV"),
stringsAsFactors=FALSE),
working_dv=data.frame(
time=c(as.POSIXct("2016-05-20"), as.POSIXct("2016-05-22")),
value=c(15, 16),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("Working Test DV", "Working Test DV"),
stringsAsFactors=FALSE)
)
qMeas <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(7, 8),
minQ=c(6, 18),
maxQ=c(12, 50),
n=c("33", "44"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
wq <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(14, 10),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
gw <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(13, 9),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
readings <- list(
reference=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(6, 7),
uncertainty=c(1, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
crest_stage_gage=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(8, 9),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
high_water_mark=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(4, 5),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
approvalBars <- list(
appr_working_uv=list(x0=as.POSIXct("2016-05-01 00:00:00"), x1=as.POSIXct("2016-05-06 00:00:00"), legend.name="Working Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_inreview_uv=list(x0=as.POSIXct("2016-05-06 00:00:00"), x1=as.POSIXct("2016-05-20 00:00:00"), legend.name="In Review Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_approved_uv=list(x0=as.POSIXct("2016-05-20 00:00:00"), x1=as.POSIXct("2016-06-30 00:00:00"), legend.name="Approved Test Series", time=as.POSIXct("2016-05-01 00:00:00"))
)
testCorrections <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("correction 1", "correction 2", "correction 3"),
stringsAsFactors=FALSE)
testRatingShifts <- data.frame(
time=c(as.POSIXct("2016-05-04 17:00:00"), as.POSIXct("2016-05-15 17:45:00"), as.POSIXct("2016-05-20 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("Prrorate on over ice-out rise for scour to control.", "Based on Qms 403-406.", "Based on Qms 403-406. Carried over from previous period."),
stringsAsFactors=FALSE)
plot_object <- repgen:::createPrimaryPlot(
list(label="Primary Test Series", units="ft", type="Test"),
list(label="Reference Test Series", units="ft", type="Test"),
list(label="Comparison Test Series", units="ft", type="Test"),
"testComparisonStationId",
list(corrected=testSeries, estimated=testSeriesEst, uncorrected=testSeriesUnc, corrected_reference=testSeriesRef,
estimated_reference=testSeriesEstRef,
comparison=testSeriesComp,inverted=FALSE,loggedAxis=FALSE),
dvs,
qMeas,
wq,
gw,
readings,
approvalBars,
testCorrections,
testRatingShifts,
TRUE,
"Etc/GMT",
TRUE)
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], -1)
expect_equal(ylim(plot_object)[['side.2']][2], 50) #The high matches the top of the Q error bar
expect_equal(plot_object[['global']][['title']][['xlab']], "UV Series: 2016-05-02 17:00:00 through 2016-05-23 17:45:00")
expect_is(plot_object[['view.1.2']], "list")
expect_equal(length(plot_object[['view.1.2']]), 27) #all plot calls are there
#do not exclude negatives
plot_object <- repgen:::createPrimaryPlot(
list(label="Primary Test Series", units="ft", type="Test"),
list(label="Reference Test Series", units="ft", type="Test"),
list(label="Comparison Test Series", units="ft", type="Test"),
"testComparisonStationId",
list(corrected=testSeries, estimated=testSeriesEst, uncorrected=testSeriesUnc, corrected_reference=testSeriesRef,
estimated_reference=testSeriesEstRef,
comparison=testSeriesComp,inverted=FALSE,loggedAxis=FALSE),
dvs,
qMeas,
wq,
gw,
readings,
approvalBars,
testCorrections,
testRatingShifts,
TRUE,
"Etc/GMT",
FALSE)
#TODO need an assertion to test if zeros/negatives are excluded
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], -1)
expect_equal(ylim(plot_object)[['side.2']][2], 50) #The high matches the top of the Q error bar
expect_equal(plot_object[['global']][['title']][['xlab']], "UV Series: 2016-05-02 17:00:00 through 2016-05-23 17:45:00")
expect_is(plot_object[['view.1.2']], "list")
expect_equal(length(plot_object[['view.1.2']]), 27) #all plot calls are there
})
test_that("createSecondaryPlot only can handle minimal requirements (just corrected series)",{
Sys.setenv(TZ = "UTC")
#minimal case should plot (only corrected series)
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
plot_object <- repgen:::createSecondaryPlot(
list(label="Test Series", units="ft", type="Test"),
list(corrected=testSeries, estimated=NULL, uncorrected=NULL, inverted=FALSE),
list(),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), minShift=as.numeric(NA), maxShift=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE)),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE)),
list(),
"Etc/GMT",
FALSE,
tertiary_label="")
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], 10)
expect_equal(ylim(plot_object)[['side.2']][2], 20)
expect_is(plot_object[['legend']], "list")
expect_equal(plot_object[['legend']][['legend.auto']][['legend']], "Corrected UV Test Series")
})
test_that("createSecondaryPlot more tests",{
Sys.setenv(TZ = "UTC")
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesEst <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:15:00"), as.POSIXct("2016-05-23 17:15:00")),
value=c(11, 22),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesUnc <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(20, 30),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
approvalBars <- list(
appr_working_uv=list(x0=as.POSIXct("2016-05-01 00:00:00"), x1=as.POSIXct("2016-05-06 00:00:00"), legend.name="Working Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_inreview_uv=list(x0=as.POSIXct("2016-05-06 00:00:00"), x1=as.POSIXct("2016-05-20 00:00:00"), legend.name="In Review Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_approved_uv=list(x0=as.POSIXct("2016-05-20 00:00:00"), x1=as.POSIXct("2016-06-30 00:00:00"), legend.name="Approved Test Series", time=as.POSIXct("2016-05-01 00:00:00"))
)
effShift <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(2, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
measShift <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
minShift=c(9, 18),
maxShift=c(12, 44),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
gageHeight <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
n=c("1222", "22"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
readings <- list(
reference=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(6, 7),
uncertainty=c(1, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
crest_stage_gage=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(8, 9),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
high_water_mark=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(4, 5),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testCorrections <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("correction 1", "correction 2", "correction 3"),
stringsAsFactors=FALSE)
plot_object <- repgen:::createSecondaryPlot(
list(label="Test Series", units="ft", type="Test"),
list(corrected=testSeries, estimated=testSeriesEst, uncorrected=testSeriesUnc, inverted=FALSE),
approvalBars,
effShift,
measShift,
gageHeight,
readings,
testCorrections,
"Etc/GMT",
FALSE,
tertiary_label="Tertiary Label")
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], 2)
expect_equal(ylim(plot_object)[['side.2']][2], 29)
expect_equal(ylim(plot_object)[['side.4']][1], 2) # low of effective shift series
expect_equal(ylim(plot_object)[['side.4']][2], 44) # high of top of meas shift error
expect_equal(plot_object[['global']][['title']][['ylab']], "Test Series")
expect_equal(plot_object[['global']][['title']][['xlab']], "UV Series: 2016-05-03 17:00:00 through 2016-05-23 17:45:00")
expect_is(plot_object[['view.1.2']], "list")
expect_equal(length(plot_object[['view.1.2']]), 17) #all plot calls are there
expect_is(plot_object[['view.1.4']], "list")
expect_equal(length(plot_object[['view.1.4']]), 6) #all plot calls are there
expect_is(plot_object[['view.7.2']], "list")
expect_equal(length(plot_object[['view.7.2']]), 6) #all plot calls are there
})
test_that("calculateYLim returns y-lim which covers corrected points and most (possibly not all) of the uncorrected points ",{
yVals1 <- c(10, 15, 16, 17, 40)
#this series within 30% on both ends, will use as lims
yVals2 <- c(5, 15, 16, 17, 45)
#this series much larger range on both ends and will not be used
yVals3 <- c(-5, 15, 16, 17, 50)
#this series much larger range on only one end, will use lims on one end
yVals4 <- c(8, 15, 16, 17, 52)
#this is a smaller lims, won't use lims
yVals5 <- c(15, 16, 17)
limsSeries1 <- repgen:::calculateYLim(yVals1, yVals2)
limsSeries2 <- repgen:::calculateYLim(yVals1, yVals3)
limsSeries3 <- repgen:::calculateYLim(yVals1, yVals4)
limsSeries4 <- repgen:::calculateYLim(yVals1, yVals5)
#lims expanded on both ends
expect_equal(limsSeries1[1], 5)
expect_equal(limsSeries1[2], 45)
#lims not expanded at all
expect_equal(limsSeries2[1], 10)
expect_equal(limsSeries2[2], 40)
#lims allowed to expanded only on 1 side
expect_equal(limsSeries3[1], 8)
expect_equal(limsSeries3[2], 40)
#lims not allowed to contract
expect_equal(limsSeries4[1], 10)
expect_equal(limsSeries4[2], 40)
})
test_that("getPrimaryPlotConfig correctly creates lines for 6 possible types of series for gsplot",{
testSeries <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
testLimits <- c(10,20)
asCorrected <- repgen:::getPrimaryPlotConfig(testSeries, "corrected", "Test Series", testLimits)
asEstimated <- repgen:::getPrimaryPlotConfig(testSeries, "estimated", "Test Series", testLimits)
asUncorrected <- repgen:::getPrimaryPlotConfig(testSeries, "uncorrected", "Test Series", testLimits)
asComparisonSharedAxis <- repgen:::getPrimaryPlotConfig(testSeries, "comparison", "Test Series", testLimits, dataSide=4)
asComparisonIndependentAxis <- repgen:::getPrimaryPlotConfig(testSeries, "comparison", "Test Series", testLimits, dataSide=6, comparisonOnIndependentAxes=FALSE)
asCorrectedReference <- repgen:::getPrimaryPlotConfig(testSeries, "corrected_reference", "Test Series", testLimits, dataSide=4)
asEstimatedReference <- repgen:::getPrimaryPlotConfig(testSeries, "estimated_reference", "Test Series", testLimits, dataSide=4)
#corrected lines
expect_equal(length(asCorrected$lines$x), 2)
expect_equal(length(asCorrected$lines$y), 2)
expect_equal(asCorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Corrected", asCorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asCorrected$lines[['legend.name']]))
#estimated lines
expect_equal(length(asEstimated$lines$x), 2)
expect_equal(length(asEstimated$lines$y), 2)
expect_equal(asEstimated$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asEstimated$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$col[1])) #only care that color was set
expect_true(grepl("Estimated", asEstimated$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asEstimated$lines[['legend.name']]))
#uncorrected lines
expect_equal(length(asUncorrected$lines$x), 2)
expect_equal(length(asUncorrected$lines$y), 2)
expect_equal(asUncorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asUncorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Uncorrected", asUncorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asUncorrected$lines[['legend.name']]))
#comparison lines
expect_equal(length(asComparisonSharedAxis$lines$x), 2)
expect_equal(length(asComparisonSharedAxis$lines$y), 2)
expect_equal(asComparisonSharedAxis$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asComparisonSharedAxis$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asComparisonSharedAxis$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asComparisonSharedAxis$lines$col[1])) #only care that color was set
expect_equal("Test Series", asComparisonSharedAxis$lines[['legend.name']])
expect_equal("Test Series", asComparisonSharedAxis$lines[['ylab']])
expect_false(asComparisonSharedAxis$lines[['ann']])
expect_false(asComparisonSharedAxis$lines[['axes']])
#comparison (independent) lines
expect_equal(length(asComparisonIndependentAxis$lines$x), 2)
expect_equal(length(asComparisonIndependentAxis$lines$y), 2)
expect_equal(asComparisonIndependentAxis$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asComparisonIndependentAxis$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asComparisonIndependentAxis$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asComparisonIndependentAxis$lines$col[1])) #only care that color was set
expect_equal("Test Series", asComparisonIndependentAxis$lines[['legend.name']])
expect_equal("Test Series", asComparisonIndependentAxis$lines[['ylab']])
expect_true(asComparisonIndependentAxis$lines[['ann']])
expect_true(asComparisonIndependentAxis$lines[['axes']])
#corrected ref lines
expect_equal(length(asCorrectedReference$lines$x), 2)
expect_equal(length(asCorrectedReference$lines$y), 2)
expect_equal(asCorrectedReference$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCorrectedReference$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCorrectedReference$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asCorrectedReference$lines$col[1])) #only care that color was set
expect_true(grepl("Corrected", asCorrectedReference$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asCorrectedReference$lines[['legend.name']]))
#estimated ref lines
expect_equal(length(asEstimatedReference$lines$x), 2)
expect_equal(length(asEstimatedReference$lines$y), 2)
expect_equal(asEstimatedReference$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asEstimatedReference$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asEstimatedReference$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asEstimatedReference$lines$col[1])) #only care that color was set
expect_true(grepl("Estimated", asEstimatedReference$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asEstimatedReference$lines[['legend.name']]))
#ensure estimated and corrected have different line type
expect_false(asCorrected$lines$lty[1] == asEstimated$lines$lty[1])
expect_false(asCorrectedReference$lines$lty[1] == asEstimatedReference$lines$lty[1])
#ensure color is different for different series types
expect_false(asCorrected$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrected$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asEstimated$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asComparisonSharedAxis$lines$col[1] == asCorrected$lines$col[1])
expect_false(asComparisonSharedAxis$lines$col[1] == asEstimated$lines$col[1])
expect_false(asComparisonSharedAxis$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asComparisonIndependentAxis$lines$col[1] == asCorrected$lines$col[1])
expect_false(asComparisonIndependentAxis$lines$col[1] == asEstimated$lines$col[1])
expect_false(asComparisonIndependentAxis$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asComparisonSharedAxis$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asCorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asCorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asUncorrected$lines$col[1])
})
test_that("getSecondaryPlotConfig correctly creates lines for 3 possible types of series for gsplot",{
testSeries <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
asCorrected <- repgen:::getSecondaryPlotConfig(testSeries, "corrected", "Test Series", c(10, 20))
asEstimated <- repgen:::getSecondaryPlotConfig(testSeries, "estimated", "Test Series", c(10, 20))
asUncorrected <- repgen:::getSecondaryPlotConfig(testSeries, "uncorrected", "Test Series", c(10, 20))
#corrected lines
expect_equal(length(asCorrected$lines$x), 2)
expect_equal(length(asCorrected$lines$y), 2)
expect_equal(asCorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Corrected", asCorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asCorrected$lines[['legend.name']]))
#estimated lines
expect_equal(length(asEstimated$lines$x), 2)
expect_equal(length(asEstimated$lines$y), 2)
expect_equal(asEstimated$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asEstimated$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$col[1])) #only care that color was set
expect_true(grepl("Estimated", asEstimated$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asEstimated$lines[['legend.name']]))
#uncorrected lines
expect_equal(length(asUncorrected$lines$x), 2)
expect_equal(length(asUncorrected$lines$y), 2)
expect_equal(asUncorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asUncorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Uncorrected", asUncorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asUncorrected$lines[['legend.name']]))
#ensure color is different for different series types
expect_false(asCorrected$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrected$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asEstimated$lines$col[1] == asUncorrected$lines$col[1])
})
test_that("getWqPlotConfig correctly creates a points for gsplot",{
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
wqConfig <- repgen:::getWqPlotConfig(testData)
expect_equal(length(wqConfig$points$x), 2)
expect_equal(length(wqConfig$points$y), 2)
#points correct
expect_equal(wqConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(wqConfig$points$y[1], 10)
expect_equal(wqConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(wqConfig$points$y[2], 20)
})
test_that("getMeasQPlotConfig correctly creates a points, error bars, and callouts calls for gsplot",{
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
minQ=c(9, 18),
maxQ=c(12, 23),
n=c("33", "44"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
measuredQConfig <- repgen:::getMeasQPlotConfig(testData)
expect_equal(length(measuredQConfig$points$x), 2)
expect_equal(length(measuredQConfig$points$y), 2)
expect_equal(length(measuredQConfig$callouts$x), 2)
expect_equal(length(measuredQConfig$callouts$y), 2)
expect_equal(length(measuredQConfig$callouts$labels), 2)
expect_equal(length(measuredQConfig$points$y), 2)
expect_equal(length(measuredQConfig$error_bar$x), 2)
expect_equal(length(measuredQConfig$error_bar$y), 2)
expect_equal(length(measuredQConfig$error_bar$y.low), 2)
expect_equal(length(measuredQConfig$error_bar$y.high), 2)
#points correct
expect_equal(measuredQConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measuredQConfig$points$y[1], 10)
expect_equal(measuredQConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measuredQConfig$points$y[2], 20)
#bars correct
expect_equal(measuredQConfig$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measuredQConfig$error_bar$y[1], 10)
expect_equal(measuredQConfig$error_bar$y.low[1], 1)
expect_equal(measuredQConfig$error_bar$y.high[1], 2)
expect_equal(measuredQConfig$error_bar$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measuredQConfig$error_bar$y[2], 20)
expect_equal(measuredQConfig$error_bar$y.low[2], 2)
expect_equal(measuredQConfig$error_bar$y.high[2], 3)
#callouts correct
expect_equal(measuredQConfig$callouts$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measuredQConfig$callouts$y[1], 10)
expect_equal(measuredQConfig$callouts$labels[1], "33")
expect_equal(measuredQConfig$callouts$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measuredQConfig$callouts$y[2], 20)
expect_equal(measuredQConfig$callouts$labels[2], "44")
})
test_that("getGwPlotConfig correctly creates a points call for gsplot",{
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
gwConfig <- repgen:::getGwPlotConfig(testData)
expect_equal(length(gwConfig$points$x), 2)
expect_equal(length(gwConfig$points$y), 2)
#points correct
expect_equal(gwConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(gwConfig$points$y[1], 10)
expect_equal(gwConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(gwConfig$points$y[2], 20)
})
test_that("getReadingsPlotConfig correctly creates points and erorr bar calls for gsplot with different styles for different reading types",{
testReadings <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(1, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
asCsg <- repgen:::getReadingsPlotConfig("csg", testReadings)
asRef <- repgen:::getReadingsPlotConfig("ref", testReadings)
asHwm <- repgen:::getReadingsPlotConfig("hwm", testReadings)
#csg points
expect_equal(length(asCsg$points$x), 2)
expect_equal(length(asCsg$points$y), 2)
expect_equal(asCsg$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCsg$points$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCsg$points$pch[1])) #only care that pch was set
expect_false(repgen:::isEmptyOrBlank(asCsg$points$col[1])) #only care that color was set
#csg error_bar
expect_equal(length(asCsg$error_bar$x), 2)
expect_equal(length(asCsg$error_bar$y), 2)
expect_equal(asCsg$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCsg$error_bar$y[1], 10)
expect_equal(asCsg$error_bar$y.low[1], 1)
expect_equal(asCsg$error_bar$y.high[1], 1)
expect_false(repgen:::isEmptyOrBlank(asCsg$error_bar$col[1])) #only care that color was set
#ref points
expect_equal(length(asRef$points$x), 2)
expect_equal(length(asRef$points$y), 2)
expect_equal(asRef$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asRef$points$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asRef$points$pch[1])) #only care that pch was set
expect_false(repgen:::isEmptyOrBlank(asRef$points$col[1])) #only care that color was set
#ref error_bar
expect_equal(length(asRef$error_bar$x), 2)
expect_equal(length(asRef$error_bar$y), 2)
expect_equal(asRef$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asRef$error_bar$y[1], 10)
expect_equal(asRef$error_bar$y.low[1], 1)
expect_equal(asRef$error_bar$y.high[1], 1)
expect_false(repgen:::isEmptyOrBlank(asRef$error_bar$col[1])) #only care that color was set
#hwm points
expect_equal(length(asHwm$points$x), 2)
expect_equal(length(asHwm$points$y), 2)
expect_equal(asHwm$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asHwm$points$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asHwm$points$pch[1])) #only care that pch was set
expect_false(repgen:::isEmptyOrBlank(asHwm$points$col[1])) #only care that color was set
#hwm error_bar
expect_equal(length(asHwm$error_bar$x), 2)
expect_equal(length(asHwm$error_bar$y), 2)
expect_equal(asHwm$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asHwm$error_bar$y[1], 10)
expect_equal(asHwm$error_bar$y.low[1], 1)
expect_equal(asHwm$error_bar$y.high[1], 1)
expect_false(repgen:::isEmptyOrBlank(asHwm$error_bar$col[1])) #only care that color was set
#ensure pch and color are different for different reading types
expect_false(asCsg$points$pch[1] == asRef$points$pch[1])
expect_false(asCsg$points$pch[1] == asHwm$points$pch[1])
expect_false(asRef$points$pch[1] == asHwm$points$pch[1])
expect_false(asCsg$points$col[1] == asRef$points$col[1])
expect_false(asCsg$points$col[1] == asHwm$points$col[1])
expect_false(asRef$points$col[1] == asHwm$points$col[1])
expect_false(asCsg$error_bar$col[1] == asRef$error_bar$col[1])
expect_false(asCsg$error_bar$col[1] == asHwm$error_bar$col[1])
expect_false(asRef$error_bar$col[1] == asHwm$error_bar$col[1])
})
test_that("getDvPlotConfig correctly creates points calls for gsplot with different styles for different approval levels",{
dvPoints <- data.frame(
time=c(as.POSIXct("2016-05-03"), as.POSIXct("2016-05-23")),
value=c(10, 20),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("Test DV", "Test DV"),
stringsAsFactors=FALSE)
asApproved <- repgen:::getDvPlotConfig("approved_dv", dvPoints)
asInReview <- repgen:::getDvPlotConfig("inreview_dv", dvPoints)
asWorking <- repgen:::getDvPlotConfig("working_dv", dvPoints)
#approved points
expect_equal(length(asApproved$points$x), 2)
expect_equal(length(asApproved$points$y), 2)
expect_equal(asApproved$points$x[1], as.POSIXct("2016-05-03"))
expect_equal(asApproved$points$y[1], 10)
expect_equal(asApproved$points$legend.name[1], "Test DV")
expect_equal(asApproved$points$pch[1], 21)
expect_false(repgen:::isEmptyOrBlank(asApproved$points$bg[1])) #only care that color was set
expect_equal(asApproved$points$legend.name[1], "Test DV")
expect_equal(asApproved$points$x[2], as.POSIXct("2016-05-23"))
expect_equal(asApproved$points$legend.name[2], "Test DV")
expect_equal(asApproved$points$y[2], 20)
expect_equal(asApproved$points$pch[2], 21)
#in-review points
expect_equal(length(asInReview$points$x), 2)
expect_equal(length(asInReview$points$y), 2)
expect_equal(asInReview$points$x[1], as.POSIXct("2016-05-03"))
expect_equal(asInReview$points$y[1], 10)
expect_equal(asInReview$points$legend.name[1], "Test DV")
expect_equal(asInReview$points$pch[1], 21)
expect_false(repgen:::isEmptyOrBlank(asInReview$points$bg[1])) #only care that bg was set
expect_equal(asInReview$points$legend.name[1], "Test DV")
expect_equal(asInReview$points$x[2], as.POSIXct("2016-05-23"))
expect_equal(asInReview$points$legend.name[2], "Test DV")
expect_equal(asInReview$points$y[2], 20)
expect_equal(asInReview$points$pch[2], 21)
#working points
expect_equal(length(asWorking$points$x), 2)
expect_equal(length(asWorking$points$y), 2)
expect_equal(asWorking$points$x[1], as.POSIXct("2016-05-03"))
expect_equal(asWorking$points$y[1], 10)
expect_equal(asWorking$points$legend.name[1], "Test DV")
expect_equal(asWorking$points$pch[1], 21)
expect_false(repgen:::isEmptyOrBlank(asWorking$points$bg[1])) #only care that bg was set
expect_equal(asWorking$points$legend.name[1], "Test DV")
expect_equal(asWorking$points$x[2], as.POSIXct("2016-05-23"))
expect_equal(asWorking$points$legend.name[2], "Test DV")
expect_equal(asWorking$points$y[2], 20)
expect_equal(asWorking$points$pch[2], 21)
#ensure background color are different accross levels
expect_false(asApproved$points$bg[1] == asInReview$points$bg[1])
expect_false(asApproved$points$bg[1] == asWorking$points$bg[1])
expect_false(asInReview$points$bg[1] == asWorking$points$bg[1])
})
test_that("getEffectiveShiftPlotConfig correctly creates lines with correct legend name for gsplot",{
#empty case returns empty list
emptyConfigs <- repgen:::getEffectiveShiftPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE))
, "label1", "label2"
)
expect_equal(length(emptyConfigs$lines$x), 0)
expect_equal(length(emptyConfigs$lines$y), 0)
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
effShiftConfig <- repgen:::getEffectiveShiftPlotConfig(testData, "label1", "label2")
expect_equal(length(effShiftConfig$lines$x), 2)
expect_equal(length(effShiftConfig$lines$y), 2)
#points correct
expect_equal(effShiftConfig$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(effShiftConfig$lines$y[1], 10)
expect_equal(effShiftConfig$lines$legend.name[1], "label1 label2")
expect_equal(effShiftConfig$lines$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(effShiftConfig$lines$y[2], 20)
#a text entry exists to ensure axis shows, BUT this might be removed, remove from test if that happens
expect_equal(length(effShiftConfig$text$x), 1)
expect_equal(length(effShiftConfig$text$y), 1)
expect_equal(effShiftConfig$text$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(effShiftConfig$text$y[1], 10)
})
test_that("getGageHeightPlotConfig correctly creates points and call out labels for gsplot",{
#empty case returns empty list
emptyConfigs <- repgen:::getGageHeightPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), n=as.character(NA), month=as.character(NA), stringsAsFactors=FALSE))
)
expect_equal(length(emptyConfigs$points$x), 0)
expect_equal(length(emptyConfigs$points$y), 0)
expect_equal(length(emptyConfigs$callouts$x), 0)
expect_equal(length(emptyConfigs$callouts$y), 0)
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
n=c("1222", "22"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
ghConfig <- repgen:::getGageHeightPlotConfig(testData)
expect_equal(length(ghConfig$points$x), 2)
expect_equal(length(ghConfig$points$y), 2)
expect_equal(length(ghConfig$callouts$x), 2)
expect_equal(length(ghConfig$callouts$y), 2)
expect_equal(length(ghConfig$callouts$labels), 2)
#points correct
expect_equal(ghConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(ghConfig$points$y[1], 10)
expect_equal(ghConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(ghConfig$points$y[2], 20)
#callouts correct
expect_equal(ghConfig$callouts$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(ghConfig$callouts$y[1], 10)
expect_equal(ghConfig$callouts$labels[1], "1222")
expect_equal(ghConfig$callouts$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(ghConfig$callouts$y[2], 20)
expect_equal(ghConfig$callouts$labels[2], "22")
})
test_that("getMeasuredShiftPlotConfig correctly creates points and error bars calls for gsplot",{
#empty case returns empty list
emptyConfigs <- repgen:::getMeasuredShiftPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), minShift=as.numeric(NA), maxShift=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE))
)
expect_equal(length(emptyConfigs$points$x), 0)
expect_equal(length(emptyConfigs$points$y), 0)
expect_equal(length(emptyConfigs$error_bar$x), 0)
expect_equal(length(emptyConfigs$error_bar$y), 0)
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
minShift=c(9, 18),
maxShift=c(12, 23),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
measShiftConfig <- repgen:::getMeasuredShiftPlotConfig(testData)
expect_equal(length(measShiftConfig$points$x), 2)
expect_equal(length(measShiftConfig$points$y), 2)
expect_equal(length(measShiftConfig$error_bar$x), 2)
expect_equal(length(measShiftConfig$error_bar$y), 2)
expect_equal(length(measShiftConfig$error_bar$y.low), 2)
expect_equal(length(measShiftConfig$error_bar$y.high), 2)
#points correct
expect_equal(measShiftConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measShiftConfig$points$y[1], 10)
expect_equal(measShiftConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measShiftConfig$points$y[2], 20)
#bars correct
expect_equal(measShiftConfig$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measShiftConfig$error_bar$y[1], 10)
expect_equal(measShiftConfig$error_bar$y.low[1], 1)
expect_equal(measShiftConfig$error_bar$y.high[1], 2)
expect_equal(measShiftConfig$error_bar$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measShiftConfig$error_bar$y[2], 20)
expect_equal(measShiftConfig$error_bar$y.low[2], 2)
expect_equal(measShiftConfig$error_bar$y.high[2], 3)
})
test_that("getCorrectionsPlotConfig correctly returns a list of gsplot calls with needed corrections elements",{
#NULL case returns empty list
expect_equal(length(repgen:::getCorrectionsPlotConfig(NULL, NULL, NULL, NULL, NULL)), 0)
expect_equal(length(repgen:::getCorrectionsPlotConfig(list(), NULL, NULL, NULL, NULL)), 0)
#empty data frame case returns empty list
expect_equal(length(repgen:::getCorrectionsPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE))
, NULL, NULL, NULL, NULL)), 0)
testCorrections <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("correction 1", "correction 2", "correction 3"),
stringsAsFactors=FALSE)
starteDate <- as.POSIXct("2016-05-01 17:00:00");
endDate <- as.POSIXct("2016-05-30 17:00:00");
testLims <- list(xlim=c(as.POSIXct("2016-05-01 00:00:00"), as.POSIXct("2016-05-31 00:00:00")), ylim=c(1, 2))
correctionsPlotConfigs <- repgen:::getCorrectionsPlotConfig(testCorrections, starteDate, endDate, "TEST", testLims)
#lines call constructed
expect_equal(correctionsPlotConfigs$lines$x, 0)
expect_equal(correctionsPlotConfigs$lines$y, 0)
expect_equal(correctionsPlotConfigs$lines$xlim[1], as.POSIXct("2016-05-01 17:00:00"))
expect_equal(correctionsPlotConfigs$lines$xlim[2], as.POSIXct("2016-05-30 17:00:00"))
#two vertical lines for corrections (of the 3, two are on the same datetime)
expect_equal(correctionsPlotConfigs$abline$v[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(correctionsPlotConfigs$abline$v[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(grep(".*TEST.*", correctionsPlotConfigs$abline$legend.name), 1) #legend entry contains the passed in label
# horizontal arrows for connecting the vertical correction lines to their boxed labels
expect_equal(correctionsPlotConfigs$arrows$x0[1], as.POSIXct("2016-05-03 17:00:00")) #starts at correction line
expect_true(as.integer(correctionsPlotConfigs$arrows$x1[1]) > as.integer(as.POSIXct("2016-05-03 17:00:00"))) #in millis form, shifted to the right of x0
expect_equal(correctionsPlotConfigs$arrows$y0[1], correctionsPlotConfigs$arrows$y1[1]) #y vals are equal for horizontal line
expect_equal(correctionsPlotConfigs$arrows$x0[2], as.POSIXct("2016-05-23 17:45:00")) #starts at correction line
expect_true(as.integer(correctionsPlotConfigs$arrows$x1[2]) > as.integer(as.POSIXct("2016-05-23 17:45:00"))) #in millis form, shifted to the right of x0
expect_equal(correctionsPlotConfigs$arrows$y0[2], correctionsPlotConfigs$arrows$y1[2]) #y vals are equal for horizontal line
expect_equal(correctionsPlotConfigs$arrows$x0[3], as.POSIXct("2016-05-23 17:45:00")) #starts at correction line
expect_true(as.integer(correctionsPlotConfigs$arrows$x1[3]) > as.integer(as.POSIXct("2016-05-23 17:45:00"))) #in millis form, shifted to the right of x0
expect_equal(correctionsPlotConfigs$arrows$y0[3], correctionsPlotConfigs$arrows$y1[3]) #y vals are equal for horizontal line
expect_equal(correctionsPlotConfigs$arrows$x0[2], correctionsPlotConfigs$arrows$x0[3]) #2nd and 3rd correction line are the same
expect_true(correctionsPlotConfigs$arrows$y0[3] < correctionsPlotConfigs$arrows$y0[2]) #arrow for 3rd correction is lower than 2nd to not overlap
#3 points as boxes around labels for each correction (these tests are "fuzzy" since exact distances may change depending on styling requests)
expect_true(correctionsPlotConfigs$points$x[1] > as.integer(correctionsPlotConfigs$abline$v[1])) #x shifted to the right of correction line
expect_true(correctionsPlotConfigs$points$x[1] - as.integer(correctionsPlotConfigs$abline$v[1]) < 50000) #but not by too much
expect_true(correctionsPlotConfigs$points$x[2] > as.integer(correctionsPlotConfigs$abline$v[2])) #x shifted to the right of correction line
expect_true(correctionsPlotConfigs$points$x[2] - as.integer(correctionsPlotConfigs$abline$v[2]) < 50000) #but not by too much
expect_true(correctionsPlotConfigs$points$x[3] > as.integer(correctionsPlotConfigs$abline$v[2])) #x shifted to the right of correction line
expect_true(correctionsPlotConfigs$points$x[3] - as.integer(correctionsPlotConfigs$abline$v[2]) < 50000) #but not by too much
expect_equal(correctionsPlotConfigs$points$x[2], correctionsPlotConfigs$points$x[2]) #at same x for the duplicate time
expect_equal(correctionsPlotConfigs$points$y[1], correctionsPlotConfigs$points$y[2]) #corr 1 and 2 are at same y since they are far enough apart and won't overlap
expect_true(correctionsPlotConfigs$points$y[3] < correctionsPlotConfigs$points$y[2]) #corr 3 is lower than 2 since it is at the same x and we don't want it to overlap
#4 positioning of actual labels should match points above and be numbered labels instead of full comment
expect_equal(correctionsPlotConfigs$text$x[1], correctionsPlotConfigs$points$x[1])
expect_equal(correctionsPlotConfigs$text$x[2], correctionsPlotConfigs$points$x[2])
expect_equal(correctionsPlotConfigs$text$x[3], correctionsPlotConfigs$points$x[3])
expect_equal(correctionsPlotConfigs$text$y[1], correctionsPlotConfigs$points$y[1])
expect_equal(correctionsPlotConfigs$text$y[2], correctionsPlotConfigs$points$y[2])
expect_equal(correctionsPlotConfigs$text$y[3], correctionsPlotConfigs$points$y[3])
expect_equal(correctionsPlotConfigs$text$label[1], 1)
expect_equal(correctionsPlotConfigs$text$label[2], 3) #looks like the ordering of dupes is backward on labeling, but that's ok. This could change though
expect_equal(correctionsPlotConfigs$text$label[3], 2)
})
setwd(dir = wd)
| /tests/testthat/test-uvhydrograph-render.R | permissive | mwernimont/repgen | R | false | false | 55,805 | r | context("uvhydrograph-render tests")
wd <- getwd()
setwd(dir = tempdir())
test_that("uvhydrographPlot correctly includes list of months with rendering items for a normal Q hydrograph",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-minimal.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1510']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1510']][['plot1']]))
expect_false(is.null(renderList[['1510']][['plot2']]))
expect_false(is.null(renderList[['1510']][['table1']]))
expect_true(is.null(renderList[['1510']][['ratingShiftTable']])) #no rating shift table
expect_false(is.null(renderList[['1510']][['table2']]))
expect_true(is.null(renderList[['1510']][['status_msg1']]))
expect_true(is.null(renderList[['1510']][['status_msg2']]))
})
test_that("uvhydrographPlot correctly skips rendering all if no primary series exists",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-no-primary-pts.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 0)
})
test_that("uvhydrographPlot correctly skips secondard plot if an upchain series is not provided for Q hydrographs",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-Q-no-upchain.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1510']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1510']][['plot1']]))
expect_true(is.null(renderList[['1510']][['plot2']])) #skipped
expect_false(is.null(renderList[['1510']][['table1']]))
expect_true(is.null(renderList[['1510']][['ratingShiftTable']])) #no rating shift table
expect_true(is.null(renderList[['1510']][['table2']])) #skipped
expect_true(is.null(renderList[['1510']][['status_msg1']])) #no error message
expect_true(is.null(renderList[['1510']][['status_msg2']])) #no error message
})
test_that("uvhydrographPlot correctly renders secondary plot if a reference series is provided for non-Q hydrographs",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1206']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1206']][['plot1']]))
expect_false(is.null(renderList[['1206']][['plot2']]))
expect_false(is.null(renderList[['1206']][['table1']]))
expect_true(is.null(renderList[['1206']][['ratingShiftTable']])) #no rating shift table
expect_false(is.null(renderList[['1206']][['table2']]))
expect_true(is.null(renderList[['1206']][['status_msg1']])) #no error message
expect_true(is.null(renderList[['1206']][['status_msg2']])) #no error message
})
test_that("uvhydrographPlot correctly skips secondary plot if a reference series is not provided for non-Q hydrographs",{
library('jsonlite')
reportObject <- fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-no-ref.json', package = 'repgen'))
renderList <- repgen:::uvhydrographPlot(reportObject)
expect_equal(length(renderList), 1) #1 item for the month of 1510
expect_equal(length(renderList[['1206']]), 7) #2 plots, 2 corrections tables, 1 rating shift table, and 2 status messages
expect_false(is.null(renderList[['1206']][['plot1']]))
expect_true(is.null(renderList[['1206']][['plot2']])) #skipped
expect_false(is.null(renderList[['1206']][['table1']]))
expect_true(is.null(renderList[['1206']][['ratingShiftTable']])) #no rating shift table
expect_true(is.null(renderList[['1206']][['table2']])) #skipped
expect_true(is.null(renderList[['1206']][['status_msg1']]))
expect_true(is.null(renderList[['1206']][['status_msg2']]))
})
test_that("useSecondaryPlot correctly flags when to use a secondary plot",{
expect_false(repgen:::useSecondaryPlot(fromJSON(system.file('extdata','testsnippets','test-uvhydro-Q-no-upchain.json', package = 'repgen'))))
expect_true(repgen:::useSecondaryPlot(fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))))
expect_false(repgen:::useSecondaryPlot(fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-no-ref.json', package = 'repgen'))))
})
test_that("getPrimaryReportElements correctly configured gsplot, a corrections table, and/or failure message depending on report config",{
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-no-primary-pts.json', package = 'repgen'))
, "1510", "Etc/GMT", TRUE)
expect_equal(reportEls[['plot']], NULL)
expect_equal(reportEls[['table']], NULL)
expect_equal(reportEls[['status_msg']], "Corrected data missing for Discharge.ft^3/s@01047200")
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-Q-no-upchain.json', package = 'repgen'))
, "1510", "Etc/GMT", TRUE)
expect_is(reportEls[['plot']], "gsplot")
expect_is(reportEls[['table']], "data.frame")
expect_equal(reportEls[['table']][1,][["Time"]], "2015-10-06")
expect_equal(reportEls[['table']][1,][["Correction Comments"]], "End : Approval period copy paste from Ref")
expect_equal(reportEls[['status_msg']], NULL)
})
test_that("getPrimaryReportElements correctly configured gsplot, a corrections table, and/or failure message depending on report config",{
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))
, "1510", "Etc/GMT", TRUE) #wrong month
expect_equal(reportEls[['plot']], NULL)
expect_equal(reportEls[['table']], NULL)
expect_equal(reportEls[['status_msg']], "Corrected data missing for WaterLevel, BelowLSD.ft@353922083345600")
reportEls <- repgen:::getPrimaryReportElements(
fromJSON(system.file('extdata','testsnippets','test-uvhydro-gw-with-ref.json', package = 'repgen'))
, "1206", "Etc/GMT", TRUE)
expect_is(reportEls[['plot']], "gsplot")
expect_is(reportEls[['table']], "data.frame")
expect_equal(reportEls[['table']][1,][["Time"]], "2012-06-29 10:17:00")
expect_equal(reportEls[['table']][1,][["Correction Comments"]], "Start : Example primary series correction")
expect_equal(reportEls[['table']][2,][["Time"]], "2012-06-30 22:59:00")
expect_equal(reportEls[['table']][2,][["Correction Comments"]], "End : Example primary series correction")
expect_equal(reportEls[['status_msg']], NULL)
})
test_that("createPrimaryPlot only can handle minimal requirements (just corrected series)",{
Sys.setenv(TZ = "UTC")
#minimal case should plot (only corrected series)
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
plot_object <- repgen:::createPrimaryPlot(
list(label="Primary Test Series", units="ft", type="Test"),
NULL,
NULL,
NULL,
list(corrected=testSeries, estimated=NULL, uncorrected=NULL, corrected_reference=NULL,
estimated_reference=NULL,
comparison=NULL,inverted=FALSE,loggedAxis=FALSE),
list(),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), minQ=as.numeric(NA), maxQ=as.numeric(NA), n=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE)),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
list(),
list(),
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE)),
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE)),
TRUE,
"Etc/GMT",
FALSE)
expect_is(plot_object[['side.1']], "list")
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_is(plot_object[['side.2']], "list")
expect_equal(ylim(plot_object)[['side.2']][1], 10)
expect_equal(ylim(plot_object)[['side.2']][2], 20)
expect_is(plot_object[['legend']], "list")
expect_equal(plot_object[['legend']][['legend.auto']][['legend']], "Corrected UV Primary Test Series")
})
test_that("createPrimaryPlot correctly configured gsplot",{
Sys.setenv(TZ = "UTC")
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-02 17:00:00"), as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(-1, 10, 20),
month=c("1605", "1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesEst <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-02 17:00:00"), as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(-1, 10, 20),
month=c("1605", "1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesUnc <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-02 17:00:00"), as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(-1, 10, 20),
month=c("1605", "1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesRef <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(4, 15),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesEstRef <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-24 17:15:00"), as.POSIXct("2016-05-28 17:45:00")),
value=c(7, 16),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesComp <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(9, 12),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
dvs <- list(
approved_dv=data.frame(
time=c(as.POSIXct("2016-05-03"), as.POSIXct("2016-05-04")),
value=c(10, 11),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("Test DV", "Test DV"),
stringsAsFactors=FALSE),
inreview_dv=data.frame(
time=c(as.POSIXct("2016-05-05"), as.POSIXct("2016-05-06")),
value=c(12, 14),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("In Review Test DV", "In Review Test DV"),
stringsAsFactors=FALSE),
working_dv=data.frame(
time=c(as.POSIXct("2016-05-20"), as.POSIXct("2016-05-22")),
value=c(15, 16),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("Working Test DV", "Working Test DV"),
stringsAsFactors=FALSE)
)
qMeas <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(7, 8),
minQ=c(6, 18),
maxQ=c(12, 50),
n=c("33", "44"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
wq <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(14, 10),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
gw <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(13, 9),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
readings <- list(
reference=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(6, 7),
uncertainty=c(1, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
crest_stage_gage=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(8, 9),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
high_water_mark=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(4, 5),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
approvalBars <- list(
appr_working_uv=list(x0=as.POSIXct("2016-05-01 00:00:00"), x1=as.POSIXct("2016-05-06 00:00:00"), legend.name="Working Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_inreview_uv=list(x0=as.POSIXct("2016-05-06 00:00:00"), x1=as.POSIXct("2016-05-20 00:00:00"), legend.name="In Review Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_approved_uv=list(x0=as.POSIXct("2016-05-20 00:00:00"), x1=as.POSIXct("2016-06-30 00:00:00"), legend.name="Approved Test Series", time=as.POSIXct("2016-05-01 00:00:00"))
)
testCorrections <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("correction 1", "correction 2", "correction 3"),
stringsAsFactors=FALSE)
testRatingShifts <- data.frame(
time=c(as.POSIXct("2016-05-04 17:00:00"), as.POSIXct("2016-05-15 17:45:00"), as.POSIXct("2016-05-20 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("Prrorate on over ice-out rise for scour to control.", "Based on Qms 403-406.", "Based on Qms 403-406. Carried over from previous period."),
stringsAsFactors=FALSE)
plot_object <- repgen:::createPrimaryPlot(
list(label="Primary Test Series", units="ft", type="Test"),
list(label="Reference Test Series", units="ft", type="Test"),
list(label="Comparison Test Series", units="ft", type="Test"),
"testComparisonStationId",
list(corrected=testSeries, estimated=testSeriesEst, uncorrected=testSeriesUnc, corrected_reference=testSeriesRef,
estimated_reference=testSeriesEstRef,
comparison=testSeriesComp,inverted=FALSE,loggedAxis=FALSE),
dvs,
qMeas,
wq,
gw,
readings,
approvalBars,
testCorrections,
testRatingShifts,
TRUE,
"Etc/GMT",
TRUE)
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], -1)
expect_equal(ylim(plot_object)[['side.2']][2], 50) #The high matches the top of the Q error bar
expect_equal(plot_object[['global']][['title']][['xlab']], "UV Series: 2016-05-02 17:00:00 through 2016-05-23 17:45:00")
expect_is(plot_object[['view.1.2']], "list")
expect_equal(length(plot_object[['view.1.2']]), 27) #all plot calls are there
#do not exclude negatives
plot_object <- repgen:::createPrimaryPlot(
list(label="Primary Test Series", units="ft", type="Test"),
list(label="Reference Test Series", units="ft", type="Test"),
list(label="Comparison Test Series", units="ft", type="Test"),
"testComparisonStationId",
list(corrected=testSeries, estimated=testSeriesEst, uncorrected=testSeriesUnc, corrected_reference=testSeriesRef,
estimated_reference=testSeriesEstRef,
comparison=testSeriesComp,inverted=FALSE,loggedAxis=FALSE),
dvs,
qMeas,
wq,
gw,
readings,
approvalBars,
testCorrections,
testRatingShifts,
TRUE,
"Etc/GMT",
FALSE)
#TODO need an assertion to test if zeros/negatives are excluded
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], -1)
expect_equal(ylim(plot_object)[['side.2']][2], 50) #The high matches the top of the Q error bar
expect_equal(plot_object[['global']][['title']][['xlab']], "UV Series: 2016-05-02 17:00:00 through 2016-05-23 17:45:00")
expect_is(plot_object[['view.1.2']], "list")
expect_equal(length(plot_object[['view.1.2']]), 27) #all plot calls are there
})
test_that("createSecondaryPlot only can handle minimal requirements (just corrected series)",{
Sys.setenv(TZ = "UTC")
#minimal case should plot (only corrected series)
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
plot_object <- repgen:::createSecondaryPlot(
list(label="Test Series", units="ft", type="Test"),
list(corrected=testSeries, estimated=NULL, uncorrected=NULL, inverted=FALSE),
list(),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), minShift=as.numeric(NA), maxShift=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE)),
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA))),
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE)),
list(),
"Etc/GMT",
FALSE,
tertiary_label="")
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], 10)
expect_equal(ylim(plot_object)[['side.2']][2], 20)
expect_is(plot_object[['legend']], "list")
expect_equal(plot_object[['legend']][['legend.auto']][['legend']], "Corrected UV Test Series")
})
test_that("createSecondaryPlot more tests",{
Sys.setenv(TZ = "UTC")
testSeries <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesEst <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:15:00"), as.POSIXct("2016-05-23 17:15:00")),
value=c(11, 22),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testSeriesUnc <- list(
points=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(20, 30),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
approvalBars <- list(
appr_working_uv=list(x0=as.POSIXct("2016-05-01 00:00:00"), x1=as.POSIXct("2016-05-06 00:00:00"), legend.name="Working Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_inreview_uv=list(x0=as.POSIXct("2016-05-06 00:00:00"), x1=as.POSIXct("2016-05-20 00:00:00"), legend.name="In Review Test Series", time=as.POSIXct("2016-05-01 00:00:00")),
appr_approved_uv=list(x0=as.POSIXct("2016-05-20 00:00:00"), x1=as.POSIXct("2016-06-30 00:00:00"), legend.name="Approved Test Series", time=as.POSIXct("2016-05-01 00:00:00"))
)
effShift <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(2, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
measShift <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
minShift=c(9, 18),
maxShift=c(12, 44),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
gageHeight <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
n=c("1222", "22"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
readings <- list(
reference=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(6, 7),
uncertainty=c(1, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
crest_stage_gage=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(8, 9),
month=c("1605", "1605"),
stringsAsFactors=FALSE),
high_water_mark=data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(4, 5),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
)
testCorrections <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("correction 1", "correction 2", "correction 3"),
stringsAsFactors=FALSE)
plot_object <- repgen:::createSecondaryPlot(
list(label="Test Series", units="ft", type="Test"),
list(corrected=testSeries, estimated=testSeriesEst, uncorrected=testSeriesUnc, inverted=FALSE),
approvalBars,
effShift,
measShift,
gageHeight,
readings,
testCorrections,
"Etc/GMT",
FALSE,
tertiary_label="Tertiary Label")
#full month on plot
expect_equal(xlim(plot_object)[['side.1']][1], as.POSIXct("2016-05-01 00:00:00"))
expect_equal(xlim(plot_object)[['side.1']][2], as.POSIXct("2016-05-31 23:45:00"))
expect_equal(ylim(plot_object)[['side.2']][1], 2)
expect_equal(ylim(plot_object)[['side.2']][2], 29)
expect_equal(ylim(plot_object)[['side.4']][1], 2) # low of effective shift series
expect_equal(ylim(plot_object)[['side.4']][2], 44) # high of top of meas shift error
expect_equal(plot_object[['global']][['title']][['ylab']], "Test Series")
expect_equal(plot_object[['global']][['title']][['xlab']], "UV Series: 2016-05-03 17:00:00 through 2016-05-23 17:45:00")
expect_is(plot_object[['view.1.2']], "list")
expect_equal(length(plot_object[['view.1.2']]), 17) #all plot calls are there
expect_is(plot_object[['view.1.4']], "list")
expect_equal(length(plot_object[['view.1.4']]), 6) #all plot calls are there
expect_is(plot_object[['view.7.2']], "list")
expect_equal(length(plot_object[['view.7.2']]), 6) #all plot calls are there
})
test_that("calculateYLim returns y-lim which covers corrected points and most (possibly not all) of the uncorrected points ",{
yVals1 <- c(10, 15, 16, 17, 40)
#this series within 30% on both ends, will use as lims
yVals2 <- c(5, 15, 16, 17, 45)
#this series much larger range on both ends and will not be used
yVals3 <- c(-5, 15, 16, 17, 50)
#this series much larger range on only one end, will use lims on one end
yVals4 <- c(8, 15, 16, 17, 52)
#this is a smaller lims, won't use lims
yVals5 <- c(15, 16, 17)
limsSeries1 <- repgen:::calculateYLim(yVals1, yVals2)
limsSeries2 <- repgen:::calculateYLim(yVals1, yVals3)
limsSeries3 <- repgen:::calculateYLim(yVals1, yVals4)
limsSeries4 <- repgen:::calculateYLim(yVals1, yVals5)
#lims expanded on both ends
expect_equal(limsSeries1[1], 5)
expect_equal(limsSeries1[2], 45)
#lims not expanded at all
expect_equal(limsSeries2[1], 10)
expect_equal(limsSeries2[2], 40)
#lims allowed to expanded only on 1 side
expect_equal(limsSeries3[1], 8)
expect_equal(limsSeries3[2], 40)
#lims not allowed to contract
expect_equal(limsSeries4[1], 10)
expect_equal(limsSeries4[2], 40)
})
test_that("getPrimaryPlotConfig correctly creates lines for 6 possible types of series for gsplot",{
testSeries <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
testLimits <- c(10,20)
asCorrected <- repgen:::getPrimaryPlotConfig(testSeries, "corrected", "Test Series", testLimits)
asEstimated <- repgen:::getPrimaryPlotConfig(testSeries, "estimated", "Test Series", testLimits)
asUncorrected <- repgen:::getPrimaryPlotConfig(testSeries, "uncorrected", "Test Series", testLimits)
asComparisonSharedAxis <- repgen:::getPrimaryPlotConfig(testSeries, "comparison", "Test Series", testLimits, dataSide=4)
asComparisonIndependentAxis <- repgen:::getPrimaryPlotConfig(testSeries, "comparison", "Test Series", testLimits, dataSide=6, comparisonOnIndependentAxes=FALSE)
asCorrectedReference <- repgen:::getPrimaryPlotConfig(testSeries, "corrected_reference", "Test Series", testLimits, dataSide=4)
asEstimatedReference <- repgen:::getPrimaryPlotConfig(testSeries, "estimated_reference", "Test Series", testLimits, dataSide=4)
#corrected lines
expect_equal(length(asCorrected$lines$x), 2)
expect_equal(length(asCorrected$lines$y), 2)
expect_equal(asCorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Corrected", asCorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asCorrected$lines[['legend.name']]))
#estimated lines
expect_equal(length(asEstimated$lines$x), 2)
expect_equal(length(asEstimated$lines$y), 2)
expect_equal(asEstimated$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asEstimated$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$col[1])) #only care that color was set
expect_true(grepl("Estimated", asEstimated$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asEstimated$lines[['legend.name']]))
#uncorrected lines
expect_equal(length(asUncorrected$lines$x), 2)
expect_equal(length(asUncorrected$lines$y), 2)
expect_equal(asUncorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asUncorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Uncorrected", asUncorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asUncorrected$lines[['legend.name']]))
#comparison lines
expect_equal(length(asComparisonSharedAxis$lines$x), 2)
expect_equal(length(asComparisonSharedAxis$lines$y), 2)
expect_equal(asComparisonSharedAxis$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asComparisonSharedAxis$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asComparisonSharedAxis$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asComparisonSharedAxis$lines$col[1])) #only care that color was set
expect_equal("Test Series", asComparisonSharedAxis$lines[['legend.name']])
expect_equal("Test Series", asComparisonSharedAxis$lines[['ylab']])
expect_false(asComparisonSharedAxis$lines[['ann']])
expect_false(asComparisonSharedAxis$lines[['axes']])
#comparison (independent) lines
expect_equal(length(asComparisonIndependentAxis$lines$x), 2)
expect_equal(length(asComparisonIndependentAxis$lines$y), 2)
expect_equal(asComparisonIndependentAxis$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asComparisonIndependentAxis$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asComparisonIndependentAxis$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asComparisonIndependentAxis$lines$col[1])) #only care that color was set
expect_equal("Test Series", asComparisonIndependentAxis$lines[['legend.name']])
expect_equal("Test Series", asComparisonIndependentAxis$lines[['ylab']])
expect_true(asComparisonIndependentAxis$lines[['ann']])
expect_true(asComparisonIndependentAxis$lines[['axes']])
#corrected ref lines
expect_equal(length(asCorrectedReference$lines$x), 2)
expect_equal(length(asCorrectedReference$lines$y), 2)
expect_equal(asCorrectedReference$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCorrectedReference$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCorrectedReference$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asCorrectedReference$lines$col[1])) #only care that color was set
expect_true(grepl("Corrected", asCorrectedReference$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asCorrectedReference$lines[['legend.name']]))
#estimated ref lines
expect_equal(length(asEstimatedReference$lines$x), 2)
expect_equal(length(asEstimatedReference$lines$y), 2)
expect_equal(asEstimatedReference$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asEstimatedReference$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asEstimatedReference$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asEstimatedReference$lines$col[1])) #only care that color was set
expect_true(grepl("Estimated", asEstimatedReference$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asEstimatedReference$lines[['legend.name']]))
#ensure estimated and corrected have different line type
expect_false(asCorrected$lines$lty[1] == asEstimated$lines$lty[1])
expect_false(asCorrectedReference$lines$lty[1] == asEstimatedReference$lines$lty[1])
#ensure color is different for different series types
expect_false(asCorrected$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrected$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asEstimated$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asComparisonSharedAxis$lines$col[1] == asCorrected$lines$col[1])
expect_false(asComparisonSharedAxis$lines$col[1] == asEstimated$lines$col[1])
expect_false(asComparisonSharedAxis$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asComparisonIndependentAxis$lines$col[1] == asCorrected$lines$col[1])
expect_false(asComparisonIndependentAxis$lines$col[1] == asEstimated$lines$col[1])
expect_false(asComparisonIndependentAxis$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asComparisonSharedAxis$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asCorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asCorrected$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrectedReference$lines$col[1] == asUncorrected$lines$col[1])
})
test_that("getSecondaryPlotConfig correctly creates lines for 3 possible types of series for gsplot",{
testSeries <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
asCorrected <- repgen:::getSecondaryPlotConfig(testSeries, "corrected", "Test Series", c(10, 20))
asEstimated <- repgen:::getSecondaryPlotConfig(testSeries, "estimated", "Test Series", c(10, 20))
asUncorrected <- repgen:::getSecondaryPlotConfig(testSeries, "uncorrected", "Test Series", c(10, 20))
#corrected lines
expect_equal(length(asCorrected$lines$x), 2)
expect_equal(length(asCorrected$lines$y), 2)
expect_equal(asCorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asCorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Corrected", asCorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asCorrected$lines[['legend.name']]))
#estimated lines
expect_equal(length(asEstimated$lines$x), 2)
expect_equal(length(asEstimated$lines$y), 2)
expect_equal(asEstimated$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asEstimated$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asEstimated$lines$col[1])) #only care that color was set
expect_true(grepl("Estimated", asEstimated$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asEstimated$lines[['legend.name']]))
#uncorrected lines
expect_equal(length(asUncorrected$lines$x), 2)
expect_equal(length(asUncorrected$lines$y), 2)
expect_equal(asUncorrected$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asUncorrected$lines$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$lty[1])) #only care that lty was set
expect_false(repgen:::isEmptyOrBlank(asUncorrected$lines$col[1])) #only care that color was set
expect_true(grepl("Uncorrected", asUncorrected$lines[['legend.name']])) #note this depends on uvhydrograph-style
expect_true(grepl("Test Series", asUncorrected$lines[['legend.name']]))
#ensure color is different for different series types
expect_false(asCorrected$lines$col[1] == asEstimated$lines$col[1])
expect_false(asCorrected$lines$col[1] == asUncorrected$lines$col[1])
expect_false(asEstimated$lines$col[1] == asUncorrected$lines$col[1])
})
test_that("getWqPlotConfig correctly creates a points for gsplot",{
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
wqConfig <- repgen:::getWqPlotConfig(testData)
expect_equal(length(wqConfig$points$x), 2)
expect_equal(length(wqConfig$points$y), 2)
#points correct
expect_equal(wqConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(wqConfig$points$y[1], 10)
expect_equal(wqConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(wqConfig$points$y[2], 20)
})
test_that("getMeasQPlotConfig correctly creates a points, error bars, and callouts calls for gsplot",{
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
minQ=c(9, 18),
maxQ=c(12, 23),
n=c("33", "44"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
measuredQConfig <- repgen:::getMeasQPlotConfig(testData)
expect_equal(length(measuredQConfig$points$x), 2)
expect_equal(length(measuredQConfig$points$y), 2)
expect_equal(length(measuredQConfig$callouts$x), 2)
expect_equal(length(measuredQConfig$callouts$y), 2)
expect_equal(length(measuredQConfig$callouts$labels), 2)
expect_equal(length(measuredQConfig$points$y), 2)
expect_equal(length(measuredQConfig$error_bar$x), 2)
expect_equal(length(measuredQConfig$error_bar$y), 2)
expect_equal(length(measuredQConfig$error_bar$y.low), 2)
expect_equal(length(measuredQConfig$error_bar$y.high), 2)
#points correct
expect_equal(measuredQConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measuredQConfig$points$y[1], 10)
expect_equal(measuredQConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measuredQConfig$points$y[2], 20)
#bars correct
expect_equal(measuredQConfig$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measuredQConfig$error_bar$y[1], 10)
expect_equal(measuredQConfig$error_bar$y.low[1], 1)
expect_equal(measuredQConfig$error_bar$y.high[1], 2)
expect_equal(measuredQConfig$error_bar$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measuredQConfig$error_bar$y[2], 20)
expect_equal(measuredQConfig$error_bar$y.low[2], 2)
expect_equal(measuredQConfig$error_bar$y.high[2], 3)
#callouts correct
expect_equal(measuredQConfig$callouts$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measuredQConfig$callouts$y[1], 10)
expect_equal(measuredQConfig$callouts$labels[1], "33")
expect_equal(measuredQConfig$callouts$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measuredQConfig$callouts$y[2], 20)
expect_equal(measuredQConfig$callouts$labels[2], "44")
})
test_that("getGwPlotConfig correctly creates a points call for gsplot",{
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
gwConfig <- repgen:::getGwPlotConfig(testData)
expect_equal(length(gwConfig$points$x), 2)
expect_equal(length(gwConfig$points$y), 2)
#points correct
expect_equal(gwConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(gwConfig$points$y[1], 10)
expect_equal(gwConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(gwConfig$points$y[2], 20)
})
test_that("getReadingsPlotConfig correctly creates points and erorr bar calls for gsplot with different styles for different reading types",{
testReadings <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
uncertainty=c(1, 3),
month=c("1605", "1605"),
stringsAsFactors=FALSE)
asCsg <- repgen:::getReadingsPlotConfig("csg", testReadings)
asRef <- repgen:::getReadingsPlotConfig("ref", testReadings)
asHwm <- repgen:::getReadingsPlotConfig("hwm", testReadings)
#csg points
expect_equal(length(asCsg$points$x), 2)
expect_equal(length(asCsg$points$y), 2)
expect_equal(asCsg$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCsg$points$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asCsg$points$pch[1])) #only care that pch was set
expect_false(repgen:::isEmptyOrBlank(asCsg$points$col[1])) #only care that color was set
#csg error_bar
expect_equal(length(asCsg$error_bar$x), 2)
expect_equal(length(asCsg$error_bar$y), 2)
expect_equal(asCsg$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asCsg$error_bar$y[1], 10)
expect_equal(asCsg$error_bar$y.low[1], 1)
expect_equal(asCsg$error_bar$y.high[1], 1)
expect_false(repgen:::isEmptyOrBlank(asCsg$error_bar$col[1])) #only care that color was set
#ref points
expect_equal(length(asRef$points$x), 2)
expect_equal(length(asRef$points$y), 2)
expect_equal(asRef$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asRef$points$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asRef$points$pch[1])) #only care that pch was set
expect_false(repgen:::isEmptyOrBlank(asRef$points$col[1])) #only care that color was set
#ref error_bar
expect_equal(length(asRef$error_bar$x), 2)
expect_equal(length(asRef$error_bar$y), 2)
expect_equal(asRef$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asRef$error_bar$y[1], 10)
expect_equal(asRef$error_bar$y.low[1], 1)
expect_equal(asRef$error_bar$y.high[1], 1)
expect_false(repgen:::isEmptyOrBlank(asRef$error_bar$col[1])) #only care that color was set
#hwm points
expect_equal(length(asHwm$points$x), 2)
expect_equal(length(asHwm$points$y), 2)
expect_equal(asHwm$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asHwm$points$y[1], 10)
expect_false(repgen:::isEmptyOrBlank(asHwm$points$pch[1])) #only care that pch was set
expect_false(repgen:::isEmptyOrBlank(asHwm$points$col[1])) #only care that color was set
#hwm error_bar
expect_equal(length(asHwm$error_bar$x), 2)
expect_equal(length(asHwm$error_bar$y), 2)
expect_equal(asHwm$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(asHwm$error_bar$y[1], 10)
expect_equal(asHwm$error_bar$y.low[1], 1)
expect_equal(asHwm$error_bar$y.high[1], 1)
expect_false(repgen:::isEmptyOrBlank(asHwm$error_bar$col[1])) #only care that color was set
#ensure pch and color are different for different reading types
expect_false(asCsg$points$pch[1] == asRef$points$pch[1])
expect_false(asCsg$points$pch[1] == asHwm$points$pch[1])
expect_false(asRef$points$pch[1] == asHwm$points$pch[1])
expect_false(asCsg$points$col[1] == asRef$points$col[1])
expect_false(asCsg$points$col[1] == asHwm$points$col[1])
expect_false(asRef$points$col[1] == asHwm$points$col[1])
expect_false(asCsg$error_bar$col[1] == asRef$error_bar$col[1])
expect_false(asCsg$error_bar$col[1] == asHwm$error_bar$col[1])
expect_false(asRef$error_bar$col[1] == asHwm$error_bar$col[1])
})
test_that("getDvPlotConfig correctly creates points calls for gsplot with different styles for different approval levels",{
dvPoints <- data.frame(
time=c(as.POSIXct("2016-05-03"), as.POSIXct("2016-05-23")),
value=c(10, 20),
month=c("1605", "1605"),
point_type=c(21, 21),
legend.name=c("Test DV", "Test DV"),
stringsAsFactors=FALSE)
asApproved <- repgen:::getDvPlotConfig("approved_dv", dvPoints)
asInReview <- repgen:::getDvPlotConfig("inreview_dv", dvPoints)
asWorking <- repgen:::getDvPlotConfig("working_dv", dvPoints)
#approved points
expect_equal(length(asApproved$points$x), 2)
expect_equal(length(asApproved$points$y), 2)
expect_equal(asApproved$points$x[1], as.POSIXct("2016-05-03"))
expect_equal(asApproved$points$y[1], 10)
expect_equal(asApproved$points$legend.name[1], "Test DV")
expect_equal(asApproved$points$pch[1], 21)
expect_false(repgen:::isEmptyOrBlank(asApproved$points$bg[1])) #only care that color was set
expect_equal(asApproved$points$legend.name[1], "Test DV")
expect_equal(asApproved$points$x[2], as.POSIXct("2016-05-23"))
expect_equal(asApproved$points$legend.name[2], "Test DV")
expect_equal(asApproved$points$y[2], 20)
expect_equal(asApproved$points$pch[2], 21)
#in-review points
expect_equal(length(asInReview$points$x), 2)
expect_equal(length(asInReview$points$y), 2)
expect_equal(asInReview$points$x[1], as.POSIXct("2016-05-03"))
expect_equal(asInReview$points$y[1], 10)
expect_equal(asInReview$points$legend.name[1], "Test DV")
expect_equal(asInReview$points$pch[1], 21)
expect_false(repgen:::isEmptyOrBlank(asInReview$points$bg[1])) #only care that bg was set
expect_equal(asInReview$points$legend.name[1], "Test DV")
expect_equal(asInReview$points$x[2], as.POSIXct("2016-05-23"))
expect_equal(asInReview$points$legend.name[2], "Test DV")
expect_equal(asInReview$points$y[2], 20)
expect_equal(asInReview$points$pch[2], 21)
#working points
expect_equal(length(asWorking$points$x), 2)
expect_equal(length(asWorking$points$y), 2)
expect_equal(asWorking$points$x[1], as.POSIXct("2016-05-03"))
expect_equal(asWorking$points$y[1], 10)
expect_equal(asWorking$points$legend.name[1], "Test DV")
expect_equal(asWorking$points$pch[1], 21)
expect_false(repgen:::isEmptyOrBlank(asWorking$points$bg[1])) #only care that bg was set
expect_equal(asWorking$points$legend.name[1], "Test DV")
expect_equal(asWorking$points$x[2], as.POSIXct("2016-05-23"))
expect_equal(asWorking$points$legend.name[2], "Test DV")
expect_equal(asWorking$points$y[2], 20)
expect_equal(asWorking$points$pch[2], 21)
#ensure background color are different accross levels
expect_false(asApproved$points$bg[1] == asInReview$points$bg[1])
expect_false(asApproved$points$bg[1] == asWorking$points$bg[1])
expect_false(asInReview$points$bg[1] == asWorking$points$bg[1])
})
test_that("getEffectiveShiftPlotConfig correctly creates lines with correct legend name for gsplot",{
#empty case returns empty list
emptyConfigs <- repgen:::getEffectiveShiftPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE))
, "label1", "label2"
)
expect_equal(length(emptyConfigs$lines$x), 0)
expect_equal(length(emptyConfigs$lines$y), 0)
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
effShiftConfig <- repgen:::getEffectiveShiftPlotConfig(testData, "label1", "label2")
expect_equal(length(effShiftConfig$lines$x), 2)
expect_equal(length(effShiftConfig$lines$y), 2)
#points correct
expect_equal(effShiftConfig$lines$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(effShiftConfig$lines$y[1], 10)
expect_equal(effShiftConfig$lines$legend.name[1], "label1 label2")
expect_equal(effShiftConfig$lines$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(effShiftConfig$lines$y[2], 20)
#a text entry exists to ensure axis shows, BUT this might be removed, remove from test if that happens
expect_equal(length(effShiftConfig$text$x), 1)
expect_equal(length(effShiftConfig$text$y), 1)
expect_equal(effShiftConfig$text$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(effShiftConfig$text$y[1], 10)
})
test_that("getGageHeightPlotConfig correctly creates points and call out labels for gsplot",{
#empty case returns empty list
emptyConfigs <- repgen:::getGageHeightPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), n=as.character(NA), month=as.character(NA), stringsAsFactors=FALSE))
)
expect_equal(length(emptyConfigs$points$x), 0)
expect_equal(length(emptyConfigs$points$y), 0)
expect_equal(length(emptyConfigs$callouts$x), 0)
expect_equal(length(emptyConfigs$callouts$y), 0)
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
n=c("1222", "22"),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
ghConfig <- repgen:::getGageHeightPlotConfig(testData)
expect_equal(length(ghConfig$points$x), 2)
expect_equal(length(ghConfig$points$y), 2)
expect_equal(length(ghConfig$callouts$x), 2)
expect_equal(length(ghConfig$callouts$y), 2)
expect_equal(length(ghConfig$callouts$labels), 2)
#points correct
expect_equal(ghConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(ghConfig$points$y[1], 10)
expect_equal(ghConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(ghConfig$points$y[2], 20)
#callouts correct
expect_equal(ghConfig$callouts$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(ghConfig$callouts$y[1], 10)
expect_equal(ghConfig$callouts$labels[1], "1222")
expect_equal(ghConfig$callouts$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(ghConfig$callouts$y[2], 20)
expect_equal(ghConfig$callouts$labels[2], "22")
})
test_that("getMeasuredShiftPlotConfig correctly creates points and error bars calls for gsplot",{
#empty case returns empty list
emptyConfigs <- repgen:::getMeasuredShiftPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=as.numeric(NA), minShift=as.numeric(NA), maxShift=as.numeric(NA), month=as.character(NA), stringsAsFactors=FALSE))
)
expect_equal(length(emptyConfigs$points$x), 0)
expect_equal(length(emptyConfigs$points$y), 0)
expect_equal(length(emptyConfigs$error_bar$x), 0)
expect_equal(length(emptyConfigs$error_bar$y), 0)
testData <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(10, 20),
minShift=c(9, 18),
maxShift=c(12, 23),
month=c("1605", "1605"),
stringsAsFactors=FALSE
)
measShiftConfig <- repgen:::getMeasuredShiftPlotConfig(testData)
expect_equal(length(measShiftConfig$points$x), 2)
expect_equal(length(measShiftConfig$points$y), 2)
expect_equal(length(measShiftConfig$error_bar$x), 2)
expect_equal(length(measShiftConfig$error_bar$y), 2)
expect_equal(length(measShiftConfig$error_bar$y.low), 2)
expect_equal(length(measShiftConfig$error_bar$y.high), 2)
#points correct
expect_equal(measShiftConfig$points$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measShiftConfig$points$y[1], 10)
expect_equal(measShiftConfig$points$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measShiftConfig$points$y[2], 20)
#bars correct
expect_equal(measShiftConfig$error_bar$x[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(measShiftConfig$error_bar$y[1], 10)
expect_equal(measShiftConfig$error_bar$y.low[1], 1)
expect_equal(measShiftConfig$error_bar$y.high[1], 2)
expect_equal(measShiftConfig$error_bar$x[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(measShiftConfig$error_bar$y[2], 20)
expect_equal(measShiftConfig$error_bar$y.low[2], 2)
expect_equal(measShiftConfig$error_bar$y.high[2], 3)
})
test_that("getCorrectionsPlotConfig correctly returns a list of gsplot calls with needed corrections elements",{
#NULL case returns empty list
expect_equal(length(repgen:::getCorrectionsPlotConfig(NULL, NULL, NULL, NULL, NULL)), 0)
expect_equal(length(repgen:::getCorrectionsPlotConfig(list(), NULL, NULL, NULL, NULL)), 0)
#empty data frame case returns empty list
expect_equal(length(repgen:::getCorrectionsPlotConfig(
na.omit(data.frame(time=as.POSIXct(NA), value=NA, month=as.character(NA), comment=as.character(NA), stringsAsFactors=FALSE))
, NULL, NULL, NULL, NULL)), 0)
testCorrections <- data.frame(
time=c(as.POSIXct("2016-05-03 17:00:00"), as.POSIXct("2016-05-23 17:45:00"), as.POSIXct("2016-05-23 17:45:00")),
value=c(NA, NA, NA),
month=c("1605", "1605", "1605"),
comment=c("correction 1", "correction 2", "correction 3"),
stringsAsFactors=FALSE)
starteDate <- as.POSIXct("2016-05-01 17:00:00");
endDate <- as.POSIXct("2016-05-30 17:00:00");
testLims <- list(xlim=c(as.POSIXct("2016-05-01 00:00:00"), as.POSIXct("2016-05-31 00:00:00")), ylim=c(1, 2))
correctionsPlotConfigs <- repgen:::getCorrectionsPlotConfig(testCorrections, starteDate, endDate, "TEST", testLims)
#lines call constructed
expect_equal(correctionsPlotConfigs$lines$x, 0)
expect_equal(correctionsPlotConfigs$lines$y, 0)
expect_equal(correctionsPlotConfigs$lines$xlim[1], as.POSIXct("2016-05-01 17:00:00"))
expect_equal(correctionsPlotConfigs$lines$xlim[2], as.POSIXct("2016-05-30 17:00:00"))
#two vertical lines for corrections (of the 3, two are on the same datetime)
expect_equal(correctionsPlotConfigs$abline$v[1], as.POSIXct("2016-05-03 17:00:00"))
expect_equal(correctionsPlotConfigs$abline$v[2], as.POSIXct("2016-05-23 17:45:00"))
expect_equal(grep(".*TEST.*", correctionsPlotConfigs$abline$legend.name), 1) #legend entry contains the passed in label
# horizontal arrows for connecting the vertical correction lines to their boxed labels
expect_equal(correctionsPlotConfigs$arrows$x0[1], as.POSIXct("2016-05-03 17:00:00")) #starts at correction line
expect_true(as.integer(correctionsPlotConfigs$arrows$x1[1]) > as.integer(as.POSIXct("2016-05-03 17:00:00"))) #in millis form, shifted to the right of x0
expect_equal(correctionsPlotConfigs$arrows$y0[1], correctionsPlotConfigs$arrows$y1[1]) #y vals are equal for horizontal line
expect_equal(correctionsPlotConfigs$arrows$x0[2], as.POSIXct("2016-05-23 17:45:00")) #starts at correction line
expect_true(as.integer(correctionsPlotConfigs$arrows$x1[2]) > as.integer(as.POSIXct("2016-05-23 17:45:00"))) #in millis form, shifted to the right of x0
expect_equal(correctionsPlotConfigs$arrows$y0[2], correctionsPlotConfigs$arrows$y1[2]) #y vals are equal for horizontal line
expect_equal(correctionsPlotConfigs$arrows$x0[3], as.POSIXct("2016-05-23 17:45:00")) #starts at correction line
expect_true(as.integer(correctionsPlotConfigs$arrows$x1[3]) > as.integer(as.POSIXct("2016-05-23 17:45:00"))) #in millis form, shifted to the right of x0
expect_equal(correctionsPlotConfigs$arrows$y0[3], correctionsPlotConfigs$arrows$y1[3]) #y vals are equal for horizontal line
expect_equal(correctionsPlotConfigs$arrows$x0[2], correctionsPlotConfigs$arrows$x0[3]) #2nd and 3rd correction line are the same
expect_true(correctionsPlotConfigs$arrows$y0[3] < correctionsPlotConfigs$arrows$y0[2]) #arrow for 3rd correction is lower than 2nd to not overlap
#3 points as boxes around labels for each correction (these tests are "fuzzy" since exact distances may change depending on styling requests)
expect_true(correctionsPlotConfigs$points$x[1] > as.integer(correctionsPlotConfigs$abline$v[1])) #x shifted to the right of correction line
expect_true(correctionsPlotConfigs$points$x[1] - as.integer(correctionsPlotConfigs$abline$v[1]) < 50000) #but not by too much
expect_true(correctionsPlotConfigs$points$x[2] > as.integer(correctionsPlotConfigs$abline$v[2])) #x shifted to the right of correction line
expect_true(correctionsPlotConfigs$points$x[2] - as.integer(correctionsPlotConfigs$abline$v[2]) < 50000) #but not by too much
expect_true(correctionsPlotConfigs$points$x[3] > as.integer(correctionsPlotConfigs$abline$v[2])) #x shifted to the right of correction line
expect_true(correctionsPlotConfigs$points$x[3] - as.integer(correctionsPlotConfigs$abline$v[2]) < 50000) #but not by too much
expect_equal(correctionsPlotConfigs$points$x[2], correctionsPlotConfigs$points$x[2]) #at same x for the duplicate time
expect_equal(correctionsPlotConfigs$points$y[1], correctionsPlotConfigs$points$y[2]) #corr 1 and 2 are at same y since they are far enough apart and won't overlap
expect_true(correctionsPlotConfigs$points$y[3] < correctionsPlotConfigs$points$y[2]) #corr 3 is lower than 2 since it is at the same x and we don't want it to overlap
#4 positioning of actual labels should match points above and be numbered labels instead of full comment
expect_equal(correctionsPlotConfigs$text$x[1], correctionsPlotConfigs$points$x[1])
expect_equal(correctionsPlotConfigs$text$x[2], correctionsPlotConfigs$points$x[2])
expect_equal(correctionsPlotConfigs$text$x[3], correctionsPlotConfigs$points$x[3])
expect_equal(correctionsPlotConfigs$text$y[1], correctionsPlotConfigs$points$y[1])
expect_equal(correctionsPlotConfigs$text$y[2], correctionsPlotConfigs$points$y[2])
expect_equal(correctionsPlotConfigs$text$y[3], correctionsPlotConfigs$points$y[3])
expect_equal(correctionsPlotConfigs$text$label[1], 1)
expect_equal(correctionsPlotConfigs$text$label[2], 3) #looks like the ordering of dupes is backward on labeling, but that's ok. This could change though
expect_equal(correctionsPlotConfigs$text$label[3], 2)
})
setwd(dir = wd)
|
##' Logistic model
##'
##' @param theta parameter vector
##' @param x vector of x values
##' @return vector of model predictions
pred.logistic <- function(theta,x){
z = exp(theta[3]+theta[4]*x)
Ey = theta[1]+theta[2]*z/(1+z)
return(Ey)
}
##' Fit logistic model
##'
##' @param dat dataframe of day of year (doy), gcc_mean, gcc_std
##' @param par vector of initial parameter guess
##' @return output from numerical optimization
fit.logistic <- function(dat,par){
## define log likelihood
lnL.logistic <- function(theta,dat){
-sum(dnorm(dat$gcc_mean,pred.logistic(theta,dat$doy),dat$gcc_std,log=TRUE))
}
## fit by numerical optimization
optim(par,fn = lnL.logistic,dat=dat)
}
| /03_logistic.R | permissive | ashiklom/forecasting_activity4 | R | false | false | 714 | r | ##' Logistic model
##'
##' @param theta parameter vector
##' @param x vector of x values
##' @return vector of model predictions
pred.logistic <- function(theta,x){
z = exp(theta[3]+theta[4]*x)
Ey = theta[1]+theta[2]*z/(1+z)
return(Ey)
}
##' Fit logistic model
##'
##' @param dat dataframe of day of year (doy), gcc_mean, gcc_std
##' @param par vector of initial parameter guess
##' @return output from numerical optimization
fit.logistic <- function(dat,par){
## define log likelihood
lnL.logistic <- function(theta,dat){
-sum(dnorm(dat$gcc_mean,pred.logistic(theta,dat$doy),dat$gcc_std,log=TRUE))
}
## fit by numerical optimization
optim(par,fn = lnL.logistic,dat=dat)
}
|
# Imports the data
MyData <- as.matrix(read.csv("../Data/PoundHillData.csv",header = F, stringsAsFactors = F))
MyMetaData <- read.csv("../Data/PoundHillMetaData.csv",header = T, sep=";", stringsAsFactors = F)
head(MyData)
MyData[MyData == ""] = 0 # Suitable in this case only because area was exhaustively sampled
MyData <- t(MyData) # Turns rows into columns and adds them to the data set
TempData <- as.data.frame(MyData[-1,], stringsAsFactors = F) # Turns it into a data frame minus the first row
head(TempData)
colnames(TempData) <- MyData[1,] # Assigns column names
rownames(TempData) <- NULL
head(TempData)
require (reshape2)
# Sorts the data into a long format
MyWrangledData <- melt(TempData, id=c("Cultivation", "Block", "Plot", "Quadrat"), variable.name = "Species", value.name = "Count")
head(MyWrangledData); tail(MyWrangledData)
# Block to ensure that all data is the correct type
MyWrangledData[, "Cultivation"] <- as.factor(MyWrangledData[, "Cultivation"])
MyWrangledData[, "Block"] <- as.factor(MyWrangledData[, "Block"])
MyWrangledData[, "Plot"] <- as.factor(MyWrangledData[, "Plot"])
MyWrangledData[, "Quadrat"] <- as.factor(MyWrangledData[, "Quadrat"])
MyWrangledData[, "Count"] <- as.integer(MyWrangledData[, "Count"])
str(MyWrangledData) # Displays the structure
| /Week3/Sandbox/Pound_hill_data.R | no_license | RLBat/CMEECourseWork | R | false | false | 1,299 | r |
# Imports the data
MyData <- as.matrix(read.csv("../Data/PoundHillData.csv",header = F, stringsAsFactors = F))
MyMetaData <- read.csv("../Data/PoundHillMetaData.csv",header = T, sep=";", stringsAsFactors = F)
head(MyData)
MyData[MyData == ""] = 0 # Suitable in this case only because area was exhaustively sampled
MyData <- t(MyData) # Turns rows into columns and adds them to the data set
TempData <- as.data.frame(MyData[-1,], stringsAsFactors = F) # Turns it into a data frame minus the first row
head(TempData)
colnames(TempData) <- MyData[1,] # Assigns column names
rownames(TempData) <- NULL
head(TempData)
require (reshape2)
# Sorts the data into a long format
MyWrangledData <- melt(TempData, id=c("Cultivation", "Block", "Plot", "Quadrat"), variable.name = "Species", value.name = "Count")
head(MyWrangledData); tail(MyWrangledData)
# Block to ensure that all data is the correct type
MyWrangledData[, "Cultivation"] <- as.factor(MyWrangledData[, "Cultivation"])
MyWrangledData[, "Block"] <- as.factor(MyWrangledData[, "Block"])
MyWrangledData[, "Plot"] <- as.factor(MyWrangledData[, "Plot"])
MyWrangledData[, "Quadrat"] <- as.factor(MyWrangledData[, "Quadrat"])
MyWrangledData[, "Count"] <- as.integer(MyWrangledData[, "Count"])
str(MyWrangledData) # Displays the structure
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_config_mat.R
\name{init_config_mat}
\alias{init_config_mat}
\title{initializes the bookkeeping object for population level counts}
\usage{
init_config_mat(epimodel, init_state, t0, tmax)
}
\arguments{
\item{epimodel}{an epimodel list}
\item{t0}{the first observation time}
\item{tmax}{the final observation time}
}
\value{
initialized matrix with columns to store event times and counts of
individuals in each compartment. The first and last rows of the matrix
always correspond to time 0 and tmax. The state at tmax is initialized to
init_state.
}
\description{
initializes the bookkeeping object for population level counts
}
\examples{
init_state <- c(S = 45, I = 5, R = 0)
tmax <- 5
init_pop_traj(init_state, tmax)
}
| /man/init_config_mat.Rd | no_license | fintzij/BDAepimodel | R | false | true | 813 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init_config_mat.R
\name{init_config_mat}
\alias{init_config_mat}
\title{initializes the bookkeeping object for population level counts}
\usage{
init_config_mat(epimodel, init_state, t0, tmax)
}
\arguments{
\item{epimodel}{an epimodel list}
\item{t0}{the first observation time}
\item{tmax}{the final observation time}
}
\value{
initialized matrix with columns to store event times and counts of
individuals in each compartment. The first and last rows of the matrix
always correspond to time 0 and tmax. The state at tmax is initialized to
init_state.
}
\description{
initializes the bookkeeping object for population level counts
}
\examples{
init_state <- c(S = 45, I = 5, R = 0)
tmax <- 5
init_pop_traj(init_state, tmax)
}
|
library(ggplot2)
library(Rtsne)
set.seed(13)
exp <- read.table("113CTC_tumor_cellline.m1n13.exp.matrix", header = T)
anno <- read.table("anno.txt", header = T)
exp <- t(log2(exp + 1))
tsne <- Rtsne(test, check_duplicates = FALSE,
pca = TRUE, perplexity=30, theta=0, dims=2)
embedding <- as.data.frame(tsne$Y)
embedding$Group <- as.factor(anno$Group)
pdf("CTC_CL_TA.TSNE.pdf", 5.5, 4.5)
ggplot(embedding, aes(x=V1, y=V2, color=Group),alpha=0.30) +
geom_point(size=1.5) +
scale_color_manual(values=c(CTC="blue", "cell_line"="green", "primary_tumor"="red")) +
theme_bw() +
theme(panel.grid =element_blank()) +
labs(x= "tSNE.1",y = "tSNE.2")
dev.off() | /R/CTC_CL_TA.TSNE.R | permissive | Cacti-Jiang/CTC | R | false | false | 678 | r | library(ggplot2)
library(Rtsne)
set.seed(13)
exp <- read.table("113CTC_tumor_cellline.m1n13.exp.matrix", header = T)
anno <- read.table("anno.txt", header = T)
exp <- t(log2(exp + 1))
tsne <- Rtsne(test, check_duplicates = FALSE,
pca = TRUE, perplexity=30, theta=0, dims=2)
embedding <- as.data.frame(tsne$Y)
embedding$Group <- as.factor(anno$Group)
pdf("CTC_CL_TA.TSNE.pdf", 5.5, 4.5)
ggplot(embedding, aes(x=V1, y=V2, color=Group),alpha=0.30) +
geom_point(size=1.5) +
scale_color_manual(values=c(CTC="blue", "cell_line"="green", "primary_tumor"="red")) +
theme_bw() +
theme(panel.grid =element_blank()) +
labs(x= "tSNE.1",y = "tSNE.2")
dev.off() |
library(ggplot2)
library(dplyr)
library(phenoScreen)
library(phenoDist)
library(platetools)
library(Smisc)
library(caret)
library(reshape2)
library(viridis)
library(dplyr)
# load data
df <- read.csv("data/df_cell_subclass.csv")
# principal components of the feature data columns
pca <- prcomp(df[, get_featuredata(df)])
# create dataframe of the first 2 principal components and metadata
pca_df <- data.frame(pca$x[,0:2], # first 2 prin comps
df[, grep("Metadata_", colnames(df))]) # metadata
# calculate the multivariate z-factor for each cell lines
pca_df_z <- data.frame(pca$x,
df[, grep("Metadata_", colnames(df))])
cl_z_factor <- sapply(split(pca_df_z, pca_df_z$Metadata_CellLine),
function(x){
multi_z(x,
feature_cols = get_featuredata(x),
cmpd_col = "Metadata_compound",
pos = "STS",
neg = "DMSO")})
# dataframe of cell lines and z-factor values
cl_z_df <- data.frame(cell_line = rownames(data.frame(cl_z_factor)),
z_prime = cl_z_factor)
# sort by values of z_prime
cl_z_df <- transform(cl_z_df, cell_line = reorder(cell_line, - z_prime))
# dotchart of z-prime values
ggplot(data = cl_z_df,
aes()) +
geom_segment(aes(x = 0,
xend = z_prime,
y = cell_line,
yend = cell_line),
col = "gray40") +
geom_point(aes(z_prime, cell_line), size = 2.5) +
xlab("multivariate Z'") +
ylab("") +
theme(axis.text.y = element_text(face = "bold"))
ggsave("figures/z_factor.eps", width = 6, height = 4)
#########################################################################
# centre principal components so that the DMSO centroid is centered
# at co-ordinates 0,0
pca_df <- centre_control(pca_df,
cols = get_featuredata(pca_df),
cmpd_col = "Metadata_compound",
cmpd = "DMSO")
# euclidean distance function
distance <- function(x, y){
dist <- sqrt(x^2 + y^2)
return(dist)
}
# calculate norm (length) of each vector
pca_df$dist <- NA
for (row in 1:nrow(pca_df)){
pca_df$dist[row] <- distance(pca_df$PC1[row], pca_df$PC2[row])
}
# select a single cell line
df_mda231 <- filter(pca_df, Metadata_CellLine == "MDA231")
# select single compound data within that cell line
df_mda231_barasertib <- filter(df_mda231, Metadata_compound == "barasertib")
# scatter plot of first 2 principal components
# barasertib datapoints coloured by concentration
ggplot() +
geom_point(data = df_mda231,
colour = "gray50",
aes(x = PC1,
y = PC2)) +
geom_point(size = 3,
data = df_mda231_barasertib,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
geom_line(data = df_mda231_barasertib,
size = 1,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
scale_color_viridis(name = "Concentration (nM)",
trans = "log10")
ggsave("figures/increasing_barasertib_mda231.eps", width = 8, height = 6)
# select a single cell line
df_mda231 <- filter(pca_df, Metadata_CellLine == "MDA231")
# select single compound data within that cell line
df_mda231_cycloheximide <- filter(df_mda231,
Metadata_compound == "cycloheximide")
# scatter plot of first 2 principal components of cycloheximide
# points coloured by concentration
ggplot() +
geom_point(data = df_mda231,
colour = "gray50",
aes(x = PC1,
y = PC2)) +
geom_point(size = 3,
data = df_mda231_cycloheximide,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
geom_line(data = df_mda231_cycloheximide,
size = 1,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
scale_color_viridis(name = "Concentration (nM)",
trans = "log10")
ggsave("figures/increasing_cycloheximide_mda231.eps", width = 8, height = 6)
pca_df$theta <- NA # initialise empty column for loop
# loop through rows of data calculating theta for each vector(PC1, PC2)
for (i in 1:nrow(pca_df)){
pca_df$theta[i] <- theta0(c(pca_df$PC1[i], pca_df$PC2[i]))
}
# filter just barasertib data
df_barasertib <- filter(pca_df, Metadata_compound == "barasertib")
# circular hisotgram of batasertib theta values
ggplot(data = df_barasertib,
aes(x = theta,
group = Metadata_concentration)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_concentration)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
xlab("") + ylab("") +
scale_fill_viridis(name = "concentration (nM)",
trans = "log10")
ggsave("figures/directional_histogram_barasertib.eps", width = 8, height = 6)
# circular histogram of batasertib theta values
# small plot for each cell line
ggplot(data = df_barasertib,
aes(x = theta,
group = Metadata_concentration)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_concentration)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
scale_fill_viridis(name = "concentration (nM)",
trans = "log10") +
xlab("") + ylab("") +
facet_wrap(~Metadata_CellLine, ncol = 2) +
theme(axis.text.x = element_text(size = 6))
ggsave("figures/directional_histogram_barasertib_split.eps", width = 8, height = 12)
# filter barasertib and monastrol data
wanted_compounds <- c("monastrol", "barasertib")
df_two <- filter(pca_df, Metadata_compound %in% wanted_compounds, Metadata_CellLine == "MDA231")
df_two$Metadata_compound <- relevel(df_two$Metadata_compound, "cycloheximide")
# circular histogram of barasetib and monastrol data
ggplot(data = df_two,
aes(x = theta,
group = Metadata_compound)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_compound)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
scale_fill_brewer(name = "compound", palette = "Set2")
ggsave("figures/barasertib_monastrol_hist.eps", width = 8, height = 6)
# function to calculate the average vector from replicates
average_vector <- function(dat){
# data will be in the form of rows = vectors, columns = components
means <- as.vector(colMedians(dat))
return(means)
}
# filter just barasertib data from MDA-231 cell line
barasertib_data <- filter(pca_df, Metadata_compound == "barasertib" &
Metadata_CellLine == "MDA231")
# calculate the average vector from the replicates of baraserib in MDA231
# from the vector(PC1, PC2)
vector_info_barasertib <- matrix(c(barasertib_data$PC1, barasertib_data$PC2), ncol = 2)
vector_barasertib <- average_vector(vector_info_barasertib)
# filter monastrol data from MDA-231 cell line
monastrol_data <- filter(pca_df, Metadata_compound == "monastrol" &
Metadata_CellLine == "MDA231")
# calculate the average vector from the replicates of monastrol in MDA231
# from the vector(PC1, PC2)
vector_info_monastrol <- matrix(c(monastrol_data$PC1, monastrol_data$PC2), ncol = 2)
vector_monastrol <- average_vector(vector_info_monastrol)
# calculate theta between two the averaged vectors of baraserib and monastrol
theta_out <- theta(vector_barasertib, vector_monastrol)
# circular histogram of theta values from monastrol and barasertib
# this time labelled with the average vector and calcualted theta value
# between the two vectors
ggplot(data = df_two,
aes(x = theta,
group = Metadata_compound)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_compound)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
geom_vline(xintercept = theta0(vector_monastrol)) +
geom_vline(xintercept = theta0(vector_barasertib)) +
geom_text(data = NULL, size = 4, x = 225, y = 10,
label = paste("theta =", format(round(theta_out, 2), nsmall = 2))) +
scale_fill_brewer(name = "compound", palette = "Set2")
ggsave("figures/barasertib_monastrol_hist_ann.eps", width = 8, height = 6)
# calculate theta value between two cell lines (MDA231 & HCC1569) from their
# average PC1/2 vectors
# select barasertib data for MDA231 and HCC1569 lines:
data_comp_cells <- filter(pca_df, Metadata_compound == "barasertib",
Metadata_CellLine == "MDA231" | Metadata_CellLine == "HCC1569")
# mean vector MDA231
just_mda <- filter(data_comp_cells, Metadata_CellLine == "MDA231")
vector_mda <- average_vector(matrix(c(just_mda$PC1, just_mda$PC2), ncol = 2))
# mean vector HCC1569
just_hcc <- filter(data_comp_cells, Metadata_CellLine == "HCC1569")
vector_hcc <- average_vector(matrix(c(just_hcc$PC1, just_hcc$PC2), ncol = 2))
# theta value between the 2 cell line's averaged vectors
theta_out <- theta(vector_mda, vector_hcc)
# circular histogram of MDA-231 and HCC1569 treated with barasertib, with
# labelled average vectors and theta value between the two cell lines
ggplot(data = data_comp_cells,
aes(x = theta,
group = Metadata_CellLine)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_CellLine)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
geom_vline(xintercept = theta0(vector_mda)) +
geom_vline(xintercept = theta0(vector_hcc)) +
geom_text(data = NULL, size = 4, x = 175, y = 15,
label = paste("theta =", format(round(theta_out, 2), nsmall = 2))) +
scale_fill_brewer(name = "Cell line", palette = "Pastel1")
ggsave("figures/hcc1569_231_hist_ann.eps", width = 8, height = 6)
##########################################
#--- Cosine analysis ---#
##########################################
# filter single concentration (100nM)
concentration <- 1000
df_1000 <- filter(df, Metadata_concentration == concentration)
controls <- c("DMSO", "STS")
# get control data (DMSO & staurosporine)
df_dmso <- filter(df, Metadata_compound %in% controls)
# row-bind 100nM and control data into a single dataframe
df_new <- rbind(df_1000, df_dmso)
# calculate the first two principal components of the featuredata
pca_out <- prcomp(df_new[, get_featuredata(df_new)])$x[,1:2]
# create dataframe of first two principal components and metadata
pca_df <- data.frame(pca_out,
df_new[, grep("Metadata_", colnames(df))])
# calculate theta and vector distances
# initialise empty columns for loop
pca_df$theta <- NA
pca_df$vector_norm <- NA
# loop through rows of principal components, calculating the theta value
# against a place-holder vector (1, 0), and the norm from the origin
for (i in 1:nrow(pca_df)){
pca_df$theta[i] <- theta0(c(pca_df$PC1[i], pca_df$PC2[i]))
pca_df$vector_norm[i] <- norm_vector(c(pca_df$PC1[i], pca_df$PC2[i]))
}
# create %notin% function
`%notin%` <- function(x, y) !(x %in% y)
# create cutoff constants
cutoff_n <- 1
max_cutoff_n <- 100
# calculate cutoff from standard deviations of the norms from the origin
cutoff <- cutoff_n * sd(pca_df$vector_norm)
max_cutoff <- max_cutoff_n * sd(pca_df$vector_norm)
pca_df$cutoff <- paste("<", cutoff_n)
pca_df$cutoff[pca_df$vector_norm > cutoff] <- paste(max_cutoff_n, "> x >", cutoff_n)
pca_df$cutoff[pca_df$vector_norm > max_cutoff] <- paste(">", max_cutoff_n)
# scatter plot of first two principal components, coloured by whether they are
# beyond the cut-off value or not
ggplot(data = pca_df,
aes(x = PC1,
y = PC2,
col = as.factor(cutoff))) +
geom_point() +
coord_fixed() +
scale_color_brewer("Standard deviations", palette = "Set1")
ggsave("figures/cutoff.eps", width = 8, height = 6)
# unwanted control data labels
unwanted <- c("DMSO", "STS")
cell_lines <- c("MDA231",
"SKBR3",
"MDA157",
"T47D",
"KPL4",
"MCF7",
"HCC1569",
"HCC1954")
# function to filter rows that are beyond the cutoff for each cell line
# in the compound data
cell_line_cutoff <- function(x){
filter(pca_df, Metadata_CellLine == x,
vector_norm > cutoff,
Metadata_compound %notin% unwanted) %>%
distinct(Metadata_compound)
}
# convert cell-line names to lower case for eval() to match variable names
for (i in cell_lines){
assign(tolower(i),
cell_line_cutoff(i))
}
# as compounds may be within the cut-off in some cell lines and beyond the
# cut-off in other cell lines, only the compounds that are beyond the cutoff in
# all eight cell lines are used in the futher analyses
common_compounds <- Reduce(intersect, list(mda231$Metadata_compound,
skbr3$Metadata_compound,
mda157$Metadata_compound,
t47d$Metadata_compound,
kpl4$Metadata_compound,
mcf7$Metadata_compound,
hcc1569$Metadata_compound,
hcc1954$Metadata_compound))
filter_common_compounds <- function(x){
filter(eval(parse(text = x)), Metadata_compound %in% common_compounds)
}
# convert cell lines names back to lower-case (again)
for (i in cell_lines){
assign(tolower(i),
filter_common_compounds(tolower(i)))
}
th_A <- mda231$theta
th_B <- kpl4$theta
out_test <- sapply(th_A, function(x, y = th_B){abs(x - y)})
out_test <- apply(out_test, 1:2, fold_180)
dimnames(out_test) <- list(mda231$Metadata_compound, mda231$Metadata_compound)
# can use diag() to extract the diagonal of the matrix, which returns the angle
# between the drugs between the two cell-lines
diag_out <- as.data.frame(diag(out_test))
diag_out <- cbind(drug = rownames(diag_out), diag_out)
rownames(diag_out) <- NULL
names(diag_out)[2] <- "difference"
diag_out$drug <- with(diag_out, reorder(drug, difference))
cell_lines <- c("mda231",
"skbr3",
"mda157",
"t47d",
"kpl4",
"mcf7",
"hcc1569",
"hcc1954")
# dataframe of all combinations of cell-lines:
clb <- expand.grid(cell_lines, cell_lines)
# PITA factors
clb <- sapply(clb, as.character)
# start empty data frame to place results into
# will contain all combinations of cell-lines, drugs and their dt values
df_delta_theta <- data.frame(A = NA,
B = NA,
drug = NA,
difference = NA)
# function to find delta-theta values between two cell-lines
find_delta_theta <- function(a, b){
a_ <- get(a)
b_ <- get(b)
th_A <- a_$theta
th_B <- b_$theta
out_test <- sapply(th_A, function(x, y = th_B){abs(x - y)})
out_test <- apply(out_test, 1:2, fold_180)
# compound vectors are identical across all cell lines
# use any one of them (mda231 in this case)
dimnames(out_test) <- list(mda231$Metadata_compound, mda231$Metadata_compound)
# can use diag() to extract the diagonal of the matrix, which returns the angle
# between the drugs between the two cell-lines
diag_out <- as.data.frame(diag(out_test))
diag_out <- cbind(drug = rownames(diag_out), diag_out)
rownames(diag_out) <- NULL
names(diag_out)[2] <- "difference"
diag_out$A <- eval(substitute(a)) # add cell-line name
diag_out$B <- eval(substitute(b)) # add cell-line name
# refactor 'drug' so in numerical order according to difference
diag_out$drug <- with(diag_out, reorder(drug, difference))
diag_out
}
# loop through all possible combinations of cell-lines and store as a list of
# dfs
list_of_df <- list()
for (row in 1:nrow(clb)){
list_of_df[row] <- list(find_delta_theta(clb[row, 1], clb[row, 2]))
}
# row-wise bind of list into single df
df_delta_theta <- do.call(rbind, list_of_df)
# make cell lines uppercase for figures
df_delta_theta[, 3:4] <- apply(df_delta_theta[, 3:4], 2, toupper)
saveRDS(df_delta_theta, file = "data/df_delta_theta")
| /analysis/analysis_figures.R | no_license | Swarchal/TCCS_paper | R | false | false | 17,043 | r | library(ggplot2)
library(dplyr)
library(phenoScreen)
library(phenoDist)
library(platetools)
library(Smisc)
library(caret)
library(reshape2)
library(viridis)
library(dplyr)
# load data
df <- read.csv("data/df_cell_subclass.csv")
# principal components of the feature data columns
pca <- prcomp(df[, get_featuredata(df)])
# create dataframe of the first 2 principal components and metadata
pca_df <- data.frame(pca$x[,0:2], # first 2 prin comps
df[, grep("Metadata_", colnames(df))]) # metadata
# calculate the multivariate z-factor for each cell lines
pca_df_z <- data.frame(pca$x,
df[, grep("Metadata_", colnames(df))])
cl_z_factor <- sapply(split(pca_df_z, pca_df_z$Metadata_CellLine),
function(x){
multi_z(x,
feature_cols = get_featuredata(x),
cmpd_col = "Metadata_compound",
pos = "STS",
neg = "DMSO")})
# dataframe of cell lines and z-factor values
cl_z_df <- data.frame(cell_line = rownames(data.frame(cl_z_factor)),
z_prime = cl_z_factor)
# sort by values of z_prime
cl_z_df <- transform(cl_z_df, cell_line = reorder(cell_line, - z_prime))
# dotchart of z-prime values
ggplot(data = cl_z_df,
aes()) +
geom_segment(aes(x = 0,
xend = z_prime,
y = cell_line,
yend = cell_line),
col = "gray40") +
geom_point(aes(z_prime, cell_line), size = 2.5) +
xlab("multivariate Z'") +
ylab("") +
theme(axis.text.y = element_text(face = "bold"))
ggsave("figures/z_factor.eps", width = 6, height = 4)
#########################################################################
# centre principal components so that the DMSO centroid is centered
# at co-ordinates 0,0
pca_df <- centre_control(pca_df,
cols = get_featuredata(pca_df),
cmpd_col = "Metadata_compound",
cmpd = "DMSO")
# euclidean distance function
distance <- function(x, y){
dist <- sqrt(x^2 + y^2)
return(dist)
}
# calculate norm (length) of each vector
pca_df$dist <- NA
for (row in 1:nrow(pca_df)){
pca_df$dist[row] <- distance(pca_df$PC1[row], pca_df$PC2[row])
}
# select a single cell line
df_mda231 <- filter(pca_df, Metadata_CellLine == "MDA231")
# select single compound data within that cell line
df_mda231_barasertib <- filter(df_mda231, Metadata_compound == "barasertib")
# scatter plot of first 2 principal components
# barasertib datapoints coloured by concentration
ggplot() +
geom_point(data = df_mda231,
colour = "gray50",
aes(x = PC1,
y = PC2)) +
geom_point(size = 3,
data = df_mda231_barasertib,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
geom_line(data = df_mda231_barasertib,
size = 1,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
scale_color_viridis(name = "Concentration (nM)",
trans = "log10")
ggsave("figures/increasing_barasertib_mda231.eps", width = 8, height = 6)
# select a single cell line
df_mda231 <- filter(pca_df, Metadata_CellLine == "MDA231")
# select single compound data within that cell line
df_mda231_cycloheximide <- filter(df_mda231,
Metadata_compound == "cycloheximide")
# scatter plot of first 2 principal components of cycloheximide
# points coloured by concentration
ggplot() +
geom_point(data = df_mda231,
colour = "gray50",
aes(x = PC1,
y = PC2)) +
geom_point(size = 3,
data = df_mda231_cycloheximide,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
geom_line(data = df_mda231_cycloheximide,
size = 1,
aes(x = PC1,
y = PC2,
colour = Metadata_concentration)) +
scale_color_viridis(name = "Concentration (nM)",
trans = "log10")
ggsave("figures/increasing_cycloheximide_mda231.eps", width = 8, height = 6)
pca_df$theta <- NA # initialise empty column for loop
# loop through rows of data calculating theta for each vector(PC1, PC2)
for (i in 1:nrow(pca_df)){
pca_df$theta[i] <- theta0(c(pca_df$PC1[i], pca_df$PC2[i]))
}
# filter just barasertib data
df_barasertib <- filter(pca_df, Metadata_compound == "barasertib")
# circular hisotgram of batasertib theta values
ggplot(data = df_barasertib,
aes(x = theta,
group = Metadata_concentration)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_concentration)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
xlab("") + ylab("") +
scale_fill_viridis(name = "concentration (nM)",
trans = "log10")
ggsave("figures/directional_histogram_barasertib.eps", width = 8, height = 6)
# circular histogram of batasertib theta values
# small plot for each cell line
ggplot(data = df_barasertib,
aes(x = theta,
group = Metadata_concentration)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_concentration)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
scale_fill_viridis(name = "concentration (nM)",
trans = "log10") +
xlab("") + ylab("") +
facet_wrap(~Metadata_CellLine, ncol = 2) +
theme(axis.text.x = element_text(size = 6))
ggsave("figures/directional_histogram_barasertib_split.eps", width = 8, height = 12)
# filter barasertib and monastrol data
wanted_compounds <- c("monastrol", "barasertib")
df_two <- filter(pca_df, Metadata_compound %in% wanted_compounds, Metadata_CellLine == "MDA231")
df_two$Metadata_compound <- relevel(df_two$Metadata_compound, "cycloheximide")
# circular histogram of barasetib and monastrol data
ggplot(data = df_two,
aes(x = theta,
group = Metadata_compound)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_compound)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
scale_fill_brewer(name = "compound", palette = "Set2")
ggsave("figures/barasertib_monastrol_hist.eps", width = 8, height = 6)
# function to calculate the average vector from replicates
average_vector <- function(dat){
# data will be in the form of rows = vectors, columns = components
means <- as.vector(colMedians(dat))
return(means)
}
# filter just barasertib data from MDA-231 cell line
barasertib_data <- filter(pca_df, Metadata_compound == "barasertib" &
Metadata_CellLine == "MDA231")
# calculate the average vector from the replicates of baraserib in MDA231
# from the vector(PC1, PC2)
vector_info_barasertib <- matrix(c(barasertib_data$PC1, barasertib_data$PC2), ncol = 2)
vector_barasertib <- average_vector(vector_info_barasertib)
# filter monastrol data from MDA-231 cell line
monastrol_data <- filter(pca_df, Metadata_compound == "monastrol" &
Metadata_CellLine == "MDA231")
# calculate the average vector from the replicates of monastrol in MDA231
# from the vector(PC1, PC2)
vector_info_monastrol <- matrix(c(monastrol_data$PC1, monastrol_data$PC2), ncol = 2)
vector_monastrol <- average_vector(vector_info_monastrol)
# calculate theta between two the averaged vectors of baraserib and monastrol
theta_out <- theta(vector_barasertib, vector_monastrol)
# circular histogram of theta values from monastrol and barasertib
# this time labelled with the average vector and calcualted theta value
# between the two vectors
ggplot(data = df_two,
aes(x = theta,
group = Metadata_compound)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_compound)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
geom_vline(xintercept = theta0(vector_monastrol)) +
geom_vline(xintercept = theta0(vector_barasertib)) +
geom_text(data = NULL, size = 4, x = 225, y = 10,
label = paste("theta =", format(round(theta_out, 2), nsmall = 2))) +
scale_fill_brewer(name = "compound", palette = "Set2")
ggsave("figures/barasertib_monastrol_hist_ann.eps", width = 8, height = 6)
# calculate theta value between two cell lines (MDA231 & HCC1569) from their
# average PC1/2 vectors
# select barasertib data for MDA231 and HCC1569 lines:
data_comp_cells <- filter(pca_df, Metadata_compound == "barasertib",
Metadata_CellLine == "MDA231" | Metadata_CellLine == "HCC1569")
# mean vector MDA231
just_mda <- filter(data_comp_cells, Metadata_CellLine == "MDA231")
vector_mda <- average_vector(matrix(c(just_mda$PC1, just_mda$PC2), ncol = 2))
# mean vector HCC1569
just_hcc <- filter(data_comp_cells, Metadata_CellLine == "HCC1569")
vector_hcc <- average_vector(matrix(c(just_hcc$PC1, just_hcc$PC2), ncol = 2))
# theta value between the 2 cell line's averaged vectors
theta_out <- theta(vector_mda, vector_hcc)
# circular histogram of MDA-231 and HCC1569 treated with barasertib, with
# labelled average vectors and theta value between the two cell lines
ggplot(data = data_comp_cells,
aes(x = theta,
group = Metadata_CellLine)) +
geom_histogram(binwidth = 15,
aes(fill = Metadata_CellLine)) +
coord_polar(start = -1.57, direction = -1) +
scale_x_continuous(breaks = seq(0, 360, by = 45), expand = c(0,0), lim = c(0, 360)) +
scale_size_area() +
geom_vline(xintercept = theta0(vector_mda)) +
geom_vline(xintercept = theta0(vector_hcc)) +
geom_text(data = NULL, size = 4, x = 175, y = 15,
label = paste("theta =", format(round(theta_out, 2), nsmall = 2))) +
scale_fill_brewer(name = "Cell line", palette = "Pastel1")
ggsave("figures/hcc1569_231_hist_ann.eps", width = 8, height = 6)
##########################################
#--- Cosine analysis ---#
##########################################
# filter single concentration (100nM)
concentration <- 1000
df_1000 <- filter(df, Metadata_concentration == concentration)
controls <- c("DMSO", "STS")
# get control data (DMSO & staurosporine)
df_dmso <- filter(df, Metadata_compound %in% controls)
# row-bind 100nM and control data into a single dataframe
df_new <- rbind(df_1000, df_dmso)
# calculate the first two principal components of the featuredata
pca_out <- prcomp(df_new[, get_featuredata(df_new)])$x[,1:2]
# create dataframe of first two principal components and metadata
pca_df <- data.frame(pca_out,
df_new[, grep("Metadata_", colnames(df))])
# calculate theta and vector distances
# initialise empty columns for loop
pca_df$theta <- NA
pca_df$vector_norm <- NA
# loop through rows of principal components, calculating the theta value
# against a place-holder vector (1, 0), and the norm from the origin
for (i in 1:nrow(pca_df)){
pca_df$theta[i] <- theta0(c(pca_df$PC1[i], pca_df$PC2[i]))
pca_df$vector_norm[i] <- norm_vector(c(pca_df$PC1[i], pca_df$PC2[i]))
}
# create %notin% function
`%notin%` <- function(x, y) !(x %in% y)
# create cutoff constants
cutoff_n <- 1
max_cutoff_n <- 100
# calculate cutoff from standard deviations of the norms from the origin
cutoff <- cutoff_n * sd(pca_df$vector_norm)
max_cutoff <- max_cutoff_n * sd(pca_df$vector_norm)
pca_df$cutoff <- paste("<", cutoff_n)
pca_df$cutoff[pca_df$vector_norm > cutoff] <- paste(max_cutoff_n, "> x >", cutoff_n)
pca_df$cutoff[pca_df$vector_norm > max_cutoff] <- paste(">", max_cutoff_n)
# scatter plot of first two principal components, coloured by whether they are
# beyond the cut-off value or not
ggplot(data = pca_df,
aes(x = PC1,
y = PC2,
col = as.factor(cutoff))) +
geom_point() +
coord_fixed() +
scale_color_brewer("Standard deviations", palette = "Set1")
ggsave("figures/cutoff.eps", width = 8, height = 6)
# unwanted control data labels
unwanted <- c("DMSO", "STS")
cell_lines <- c("MDA231",
"SKBR3",
"MDA157",
"T47D",
"KPL4",
"MCF7",
"HCC1569",
"HCC1954")
# function to filter rows that are beyond the cutoff for each cell line
# in the compound data
cell_line_cutoff <- function(x){
filter(pca_df, Metadata_CellLine == x,
vector_norm > cutoff,
Metadata_compound %notin% unwanted) %>%
distinct(Metadata_compound)
}
# convert cell-line names to lower case for eval() to match variable names
for (i in cell_lines){
assign(tolower(i),
cell_line_cutoff(i))
}
# as compounds may be within the cut-off in some cell lines and beyond the
# cut-off in other cell lines, only the compounds that are beyond the cutoff in
# all eight cell lines are used in the futher analyses
common_compounds <- Reduce(intersect, list(mda231$Metadata_compound,
skbr3$Metadata_compound,
mda157$Metadata_compound,
t47d$Metadata_compound,
kpl4$Metadata_compound,
mcf7$Metadata_compound,
hcc1569$Metadata_compound,
hcc1954$Metadata_compound))
filter_common_compounds <- function(x){
filter(eval(parse(text = x)), Metadata_compound %in% common_compounds)
}
# convert cell lines names back to lower-case (again)
for (i in cell_lines){
assign(tolower(i),
filter_common_compounds(tolower(i)))
}
th_A <- mda231$theta
th_B <- kpl4$theta
out_test <- sapply(th_A, function(x, y = th_B){abs(x - y)})
out_test <- apply(out_test, 1:2, fold_180)
dimnames(out_test) <- list(mda231$Metadata_compound, mda231$Metadata_compound)
# can use diag() to extract the diagonal of the matrix, which returns the angle
# between the drugs between the two cell-lines
diag_out <- as.data.frame(diag(out_test))
diag_out <- cbind(drug = rownames(diag_out), diag_out)
rownames(diag_out) <- NULL
names(diag_out)[2] <- "difference"
diag_out$drug <- with(diag_out, reorder(drug, difference))
cell_lines <- c("mda231",
"skbr3",
"mda157",
"t47d",
"kpl4",
"mcf7",
"hcc1569",
"hcc1954")
# dataframe of all combinations of cell-lines:
clb <- expand.grid(cell_lines, cell_lines)
# PITA factors
clb <- sapply(clb, as.character)
# start empty data frame to place results into
# will contain all combinations of cell-lines, drugs and their dt values
df_delta_theta <- data.frame(A = NA,
B = NA,
drug = NA,
difference = NA)
# function to find delta-theta values between two cell-lines
find_delta_theta <- function(a, b){
a_ <- get(a)
b_ <- get(b)
th_A <- a_$theta
th_B <- b_$theta
out_test <- sapply(th_A, function(x, y = th_B){abs(x - y)})
out_test <- apply(out_test, 1:2, fold_180)
# compound vectors are identical across all cell lines
# use any one of them (mda231 in this case)
dimnames(out_test) <- list(mda231$Metadata_compound, mda231$Metadata_compound)
# can use diag() to extract the diagonal of the matrix, which returns the angle
# between the drugs between the two cell-lines
diag_out <- as.data.frame(diag(out_test))
diag_out <- cbind(drug = rownames(diag_out), diag_out)
rownames(diag_out) <- NULL
names(diag_out)[2] <- "difference"
diag_out$A <- eval(substitute(a)) # add cell-line name
diag_out$B <- eval(substitute(b)) # add cell-line name
# refactor 'drug' so in numerical order according to difference
diag_out$drug <- with(diag_out, reorder(drug, difference))
diag_out
}
# loop through all possible combinations of cell-lines and store as a list of
# dfs
list_of_df <- list()
for (row in 1:nrow(clb)){
list_of_df[row] <- list(find_delta_theta(clb[row, 1], clb[row, 2]))
}
# row-wise bind of list into single df
df_delta_theta <- do.call(rbind, list_of_df)
# make cell lines uppercase for figures
df_delta_theta[, 3:4] <- apply(df_delta_theta[, 3:4], 2, toupper)
saveRDS(df_delta_theta, file = "data/df_delta_theta")
|
## Purpose: Quantify forest degradation 2011-2020
## Project: Mature Forest Decline
## Upstream: dashboard.R
## Downstream: plot_deg.R; spat_anal.R; plot_wifri.R
degrade = function(path #user defined path to spatial files
)
{
library(tidyverse)
library(terra)
library(sf)
## Read in first cc layer for projection
cc11 = rast(paste0(path, '/cc_2011.tif'))
## Read in SSierra Scenes and creates study area
sa = vect(paste0(path, '/ss_scenes.shp')) %>%
project(crs(cc11))
## Bring in fire perimeter data
pers = vect(paste0(path, '/firep11_20_2.shp')) %>%
project(crs(sa)) %>%
buffer(width = 0) %>%
crop(sa)
rm(sa)
pers.sf = st_as_sf(pers) %>%
group_by(year = YEAR_) %>%
summarise()
rm(pers)
## Function to calculate forest and old forest area
f1 = function(x, low, high, oldr = NULL) {
tmp = classify(x, rbind(c(0,low,0), #below thrsholds
c(low,high+1,1), #within range
c(high+1, 101, 0))) #above
if(!is.null(oldr)) {tmp = tmp * oldr}
freq(tmp) %>%
as.data.frame() %>%
filter(value == 1) %>%
pull(count) * 900 / 10000
}
## Function to calculate area within the reference FRI
f2 = function(x, low, high, oldr = NULL, cc_year = 2011) {
tmp = classify(x, rbind(c(0,low,0), #below thrsholds
c(low,high+1,1), #within range
c(high+1, 101, 0))) #above
if(!is.null(oldr)) {tmp = tmp * oldr}
## pull ref fri condition as of Jan 1 of the next year
wi_year = cc_year + 1
## bring in binary within (1) rfri or greater than (0)
rfri = rast(
paste0(path, "/ss_wi_2mnrfri_",
wi_year, ".tif")) %>%
project(tmp, method = 'near')
## Get where within forest and rfri
tmp_wi = tmp * rfri
freq(tmp_wi) %>%
as.data.frame() %>%
filter(value == 1) %>%
pull(count) * 900 / 10000
}
## Bring in height
ht11 = rast('local/ss_ht2011.tif') %>%
mask(cc11) #only conifer (previously mased in cc_subtract)
## convert meters to feet for height
ht_ft = 30 * 3.28
## define mature forests
## default is right = T, which means interval is 'closed' on the right (does not include the last value)
## in this case ht_ft is not included in the first interval, but is in the second; thus 1 indicates >= ht_ft
ht_old = classify(ht11,
rbind(c(0,ht_ft,0), c(ht_ft,200,1)))
## Everything above 40% is potentially mature; will split later
cc_old = classify(cc11,
rbind(c(0,40,0), c(40,101,1)))
old = ht_old * cc_old
## moderate density mature
old_mm = classify(cc11,
rbind(c(0,40,0), c(40,61,1), c(61,101,0))) * old
## high density mature
old_hm = classify(cc11,
rbind(c(0,61,0), c(61,101,1))) * old
## Save for later
writeRaster(old, paste0(path,'/mature11.tif'), overwrite = T)
writeRaster(old_mm, paste0(path, '/mmature11.tif'), overwrite = T)
writeRaster(old_hm, paste0(path, '/hmature11.tif'), overwrite = T)
## Need a big sample because of many NAs
ds = spatSample(cc11_for, 50000, method = "regular",
as.points = T, na.rm = T)
names(ds) = "cc2011"
ds_old = spatSample(cc11_old, 500000, method = "regular",
as.points = T, na.rm = T)
names(ds_old) = "cc2011_old"
samples11 = extract(cc11, ds) %>%
rename(conifer = 2) %>%
mutate(year = 2011) %>%
pivot_longer(cols = conifer, names_to = "class", values_to = "cc")
samples11_old = extract(cc11, ds_old) %>%
rename(mature = 2) %>%
mutate(year = 2011) %>%
pivot_longer(cols = mature, names_to = "class", values_to = "cc")
## Start sample dataframe
d2 = bind_rows(samples11, samples11_old)
## Calculate areas
for_ha = f1(cc11, low = 25, high = 100)
old_ha = f1(cc11, low = 40, high = 100, oldr = old)
mm_ha = f1(cc11, low = 40, high = 60, oldr = old)
hm_ha = f1(cc11, low = 61, high = 100, oldr = old)
for_rfri = f2(cc11, low = 25, high = 100, cc_year = 2011)
old_rfri = f2(cc11, low = 40, high = 100, oldr = old, cc_year = 2011)
mm_rfri = f2(cc11, low = 40, high = 60, oldr = old, cc_year = 2011)
hm_rfri = f2(cc11, low = 61, high = 100, oldr = old, cc_year = 2011)
## Set up dataframe to populate
d = data.frame(year = 2011,
for_ha = for_ha,
mature_ha = old_ha,
mmature_ha = mm_ha,
hmature_ha = hm_ha,
for_rfri = for_rfri,
mature_rfri = old_rfri,
mmature_rfri = mm_rfri,
hmature_rfri = hm_rfri,
for_loss = NA,
mature_loss = NA,
mmature_loss = NA,
hmature_loss = NA,
for_bloss = NA,
mature_bloss = NA,
mmature_bloss = NA,
hmature_bloss = NA)
## clean up RAM
gc()
## Years to iterate through
years = 2012:2020
## Loop through each year and subtract cc loss
## Takes about an hour
for(year in years) {
print(year)
## Previous & current year canopy cover
cc_prev = rast(paste0(path, '/cc_', year-1, '.tif'))
cc = rast(paste0(path, '/cc_', year, '.tif'))
## Change
mmi = cc_prev - cc
## Subset burned area
burn = filter(pers.sf, year == {{year}})
mmi_b = mask(mmi, vect(burn))
cc_prev_b = mask(cc_prev, vect(burn))
cc_b = mask(cc, vect(burn))
## Calculate current area in each category
fa_new = f1(cc, low = 25, high = 100)
ma_new = f1(cc, low = 40, high = 100, oldr = old)
mma_new = f1(cc, low = 40, high = 60, oldr = old)
hma_new = f1(cc, low = 61, high = 100, oldr = old)
## Get area within rfri
for_rfri_new = f2(cc, low = 25, high = 100, cc_year = year)
old_rfri_new = f2(cc, low = 40, high = 100, oldr = old, cc_year = year)
mm_rfri_new = f2(cc, low = 40, high = 60, oldr = old, cc_year = year)
hm_rfri_new = f2(cc, low = 61, high = 100, oldr = old, cc_year = year)
## Get area lost
fa_lost = d[d$year == year - 1, "for_ha"] - fa_new
ma_lost = d[d$year == year - 1, "mature_ha"] - ma_new
mma_lost = d[d$year == year - 1, "mmature_ha"] - mma_new
hma_lost = d[d$year == year - 1, "hmature_ha"] - hma_new
fa_bl = f1(cc_prev_b, low = 25, high = 100) - f1(cc_b, low = 25, high = 100)
ma_bl = f1(cc_prev_b, low = 40, high = 100, oldr = old) - f1(cc_b, low = 40, high = 100, oldr = old)
mma_bl = f1(cc_prev_b, low = 40, high = 60, oldr = old) - f1(cc_b, low = 40, high = 60, oldr = old)
hma_bl = f1(cc_prev_b, low = 61, high = 100, oldr = old) - f1(cc_b, low = 61, high = 100, oldr = old)
## some issues with zero values
if(length(ma_bl) == 0) {ma_bl = 0}
if(length(mma_bl) == 0) {mma_bl = 0}
if(length(hma_bl) == 0) {hma_bl = 0}
d = bind_rows(d, data.frame(year = year,
for_ha = fa_new,
mature_ha = ma_new,
mmature_ha = mma_new,
hmature_ha = hma_new,
for_rfri = for_rfri_new,
mature_rfri = old_rfri_new,
mmature_rfri = mm_rfri_new,
hmature_rfri = hm_rfri_new,
for_loss = fa_lost,
mature_loss = ma_lost,
mmature_loss = mma_lost,
hmature_loss = hma_lost,
for_bloss = fa_bl,
mature_bloss = ma_bl,
mmature_bloss = mma_bl,
hmature_bloss = hma_bl))
print(d)
## Save some samples
samples = extract(cc, ds) %>%
rename(conifer = 2) %>%
mutate(year = year) %>%
pivot_longer(cols = conifer, names_to = "class", values_to = "cc")
samples_old = extract(cc, ds_old) %>%
rename(mature = 2) %>%
mutate(year = year) %>%
pivot_longer(cols = mature, names_to = "class", values_to = "cc")
d2 = bind_rows(d2, samples, samples_old)
## clean house
gc()
}
write_csv(d, 'results/ann_chg.csv')
write_csv(d2, 'results/samples.csv')
}
| /code/functions/degrade.R | no_license | zacksteel/MatureForestDecline | R | false | false | 8,541 | r | ## Purpose: Quantify forest degradation 2011-2020
## Project: Mature Forest Decline
## Upstream: dashboard.R
## Downstream: plot_deg.R; spat_anal.R; plot_wifri.R
degrade = function(path #user defined path to spatial files
)
{
library(tidyverse)
library(terra)
library(sf)
## Read in first cc layer for projection
cc11 = rast(paste0(path, '/cc_2011.tif'))
## Read in SSierra Scenes and creates study area
sa = vect(paste0(path, '/ss_scenes.shp')) %>%
project(crs(cc11))
## Bring in fire perimeter data
pers = vect(paste0(path, '/firep11_20_2.shp')) %>%
project(crs(sa)) %>%
buffer(width = 0) %>%
crop(sa)
rm(sa)
pers.sf = st_as_sf(pers) %>%
group_by(year = YEAR_) %>%
summarise()
rm(pers)
## Function to calculate forest and old forest area
f1 = function(x, low, high, oldr = NULL) {
tmp = classify(x, rbind(c(0,low,0), #below thrsholds
c(low,high+1,1), #within range
c(high+1, 101, 0))) #above
if(!is.null(oldr)) {tmp = tmp * oldr}
freq(tmp) %>%
as.data.frame() %>%
filter(value == 1) %>%
pull(count) * 900 / 10000
}
## Function to calculate area within the reference FRI
f2 = function(x, low, high, oldr = NULL, cc_year = 2011) {
tmp = classify(x, rbind(c(0,low,0), #below thrsholds
c(low,high+1,1), #within range
c(high+1, 101, 0))) #above
if(!is.null(oldr)) {tmp = tmp * oldr}
## pull ref fri condition as of Jan 1 of the next year
wi_year = cc_year + 1
## bring in binary within (1) rfri or greater than (0)
rfri = rast(
paste0(path, "/ss_wi_2mnrfri_",
wi_year, ".tif")) %>%
project(tmp, method = 'near')
## Get where within forest and rfri
tmp_wi = tmp * rfri
freq(tmp_wi) %>%
as.data.frame() %>%
filter(value == 1) %>%
pull(count) * 900 / 10000
}
## Bring in height
ht11 = rast('local/ss_ht2011.tif') %>%
mask(cc11) #only conifer (previously mased in cc_subtract)
## convert meters to feet for height
ht_ft = 30 * 3.28
## define mature forests
## default is right = T, which means interval is 'closed' on the right (does not include the last value)
## in this case ht_ft is not included in the first interval, but is in the second; thus 1 indicates >= ht_ft
ht_old = classify(ht11,
rbind(c(0,ht_ft,0), c(ht_ft,200,1)))
## Everything above 40% is potentially mature; will split later
cc_old = classify(cc11,
rbind(c(0,40,0), c(40,101,1)))
old = ht_old * cc_old
## moderate density mature
old_mm = classify(cc11,
rbind(c(0,40,0), c(40,61,1), c(61,101,0))) * old
## high density mature
old_hm = classify(cc11,
rbind(c(0,61,0), c(61,101,1))) * old
## Save for later
writeRaster(old, paste0(path,'/mature11.tif'), overwrite = T)
writeRaster(old_mm, paste0(path, '/mmature11.tif'), overwrite = T)
writeRaster(old_hm, paste0(path, '/hmature11.tif'), overwrite = T)
## Need a big sample because of many NAs
ds = spatSample(cc11_for, 50000, method = "regular",
as.points = T, na.rm = T)
names(ds) = "cc2011"
ds_old = spatSample(cc11_old, 500000, method = "regular",
as.points = T, na.rm = T)
names(ds_old) = "cc2011_old"
samples11 = extract(cc11, ds) %>%
rename(conifer = 2) %>%
mutate(year = 2011) %>%
pivot_longer(cols = conifer, names_to = "class", values_to = "cc")
samples11_old = extract(cc11, ds_old) %>%
rename(mature = 2) %>%
mutate(year = 2011) %>%
pivot_longer(cols = mature, names_to = "class", values_to = "cc")
## Start sample dataframe
d2 = bind_rows(samples11, samples11_old)
## Calculate areas
for_ha = f1(cc11, low = 25, high = 100)
old_ha = f1(cc11, low = 40, high = 100, oldr = old)
mm_ha = f1(cc11, low = 40, high = 60, oldr = old)
hm_ha = f1(cc11, low = 61, high = 100, oldr = old)
for_rfri = f2(cc11, low = 25, high = 100, cc_year = 2011)
old_rfri = f2(cc11, low = 40, high = 100, oldr = old, cc_year = 2011)
mm_rfri = f2(cc11, low = 40, high = 60, oldr = old, cc_year = 2011)
hm_rfri = f2(cc11, low = 61, high = 100, oldr = old, cc_year = 2011)
## Set up dataframe to populate
d = data.frame(year = 2011,
for_ha = for_ha,
mature_ha = old_ha,
mmature_ha = mm_ha,
hmature_ha = hm_ha,
for_rfri = for_rfri,
mature_rfri = old_rfri,
mmature_rfri = mm_rfri,
hmature_rfri = hm_rfri,
for_loss = NA,
mature_loss = NA,
mmature_loss = NA,
hmature_loss = NA,
for_bloss = NA,
mature_bloss = NA,
mmature_bloss = NA,
hmature_bloss = NA)
## clean up RAM
gc()
## Years to iterate through
years = 2012:2020
## Loop through each year and subtract cc loss
## Takes about an hour
for(year in years) {
print(year)
## Previous & current year canopy cover
cc_prev = rast(paste0(path, '/cc_', year-1, '.tif'))
cc = rast(paste0(path, '/cc_', year, '.tif'))
## Change
mmi = cc_prev - cc
## Subset burned area
burn = filter(pers.sf, year == {{year}})
mmi_b = mask(mmi, vect(burn))
cc_prev_b = mask(cc_prev, vect(burn))
cc_b = mask(cc, vect(burn))
## Calculate current area in each category
fa_new = f1(cc, low = 25, high = 100)
ma_new = f1(cc, low = 40, high = 100, oldr = old)
mma_new = f1(cc, low = 40, high = 60, oldr = old)
hma_new = f1(cc, low = 61, high = 100, oldr = old)
## Get area within rfri
for_rfri_new = f2(cc, low = 25, high = 100, cc_year = year)
old_rfri_new = f2(cc, low = 40, high = 100, oldr = old, cc_year = year)
mm_rfri_new = f2(cc, low = 40, high = 60, oldr = old, cc_year = year)
hm_rfri_new = f2(cc, low = 61, high = 100, oldr = old, cc_year = year)
## Get area lost
fa_lost = d[d$year == year - 1, "for_ha"] - fa_new
ma_lost = d[d$year == year - 1, "mature_ha"] - ma_new
mma_lost = d[d$year == year - 1, "mmature_ha"] - mma_new
hma_lost = d[d$year == year - 1, "hmature_ha"] - hma_new
fa_bl = f1(cc_prev_b, low = 25, high = 100) - f1(cc_b, low = 25, high = 100)
ma_bl = f1(cc_prev_b, low = 40, high = 100, oldr = old) - f1(cc_b, low = 40, high = 100, oldr = old)
mma_bl = f1(cc_prev_b, low = 40, high = 60, oldr = old) - f1(cc_b, low = 40, high = 60, oldr = old)
hma_bl = f1(cc_prev_b, low = 61, high = 100, oldr = old) - f1(cc_b, low = 61, high = 100, oldr = old)
## some issues with zero values
if(length(ma_bl) == 0) {ma_bl = 0}
if(length(mma_bl) == 0) {mma_bl = 0}
if(length(hma_bl) == 0) {hma_bl = 0}
d = bind_rows(d, data.frame(year = year,
for_ha = fa_new,
mature_ha = ma_new,
mmature_ha = mma_new,
hmature_ha = hma_new,
for_rfri = for_rfri_new,
mature_rfri = old_rfri_new,
mmature_rfri = mm_rfri_new,
hmature_rfri = hm_rfri_new,
for_loss = fa_lost,
mature_loss = ma_lost,
mmature_loss = mma_lost,
hmature_loss = hma_lost,
for_bloss = fa_bl,
mature_bloss = ma_bl,
mmature_bloss = mma_bl,
hmature_bloss = hma_bl))
print(d)
## Save some samples
samples = extract(cc, ds) %>%
rename(conifer = 2) %>%
mutate(year = year) %>%
pivot_longer(cols = conifer, names_to = "class", values_to = "cc")
samples_old = extract(cc, ds_old) %>%
rename(mature = 2) %>%
mutate(year = year) %>%
pivot_longer(cols = mature, names_to = "class", values_to = "cc")
d2 = bind_rows(d2, samples, samples_old)
## clean house
gc()
}
write_csv(d, 'results/ann_chg.csv')
write_csv(d2, 'results/samples.csv')
}
|
data("mttoyotacorolla")
View(mttoyotacorolla)
toyotacorolla <- read.csv(file.choose()) # choose the toyotacorolla.csv data set
View(toyotacorolla)
attach(toyotacorolla)
### Partial Correlation matrix - Pure Correlation b/n the varibles
#install.packages("corpcor")
library(corpcor)
cor2pcor(cor(toyotacorolla))
# The Linear Model of interest
model.toyotacorolla <- lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight,data = toyotacorolla)
summary(model.toyotacorolla)
###r-squared:0.78,so above 0.86 model is strong corelated
# Prediction based on only age_08_04
model.computer_dataA<-lm(Price~Age_08_04)
summary(model.computer_dataA) # age_08_04 became significant
#r-squared:0.76 modrate corelated
# Prediction based on only KM
model.computer_dataKM<-lm(Price~KM)
summary(model.computer_dataKM) # km became significant
# Prediction based on only HP
model.computer_dataHP<-lm(Price~HP)
summary(model.computer_dataHP) # Hp became significant
# Prediction based on only cc
model.computer_datacc<-lm(Price~cc)
summary(model.computer_datacc) # cc became significant
# Prediction based on only doors
model.computer_dataD<-lm(Price~Doors)
summary(model.computer_dataD) # doors significant
## Prediction based on only Gears
model.computer_dataP<-lm(Price~Gears)
summary(model.computer_dataP) # Gears became significant
# Prediction based on only Quartely_tax
model.computer_dataQ<-lm(Price~Quarterly_Tax)
summary(model.computer_dataQ) # quartely_tax became significant
# Prediction based on only Weight
model.computer_dataW<-lm(Price~Weight)
summary(model.computer_dataW) # weight became significant
####final model
model.toyotacorollaf <- lm(Price~Age_08_04+KM+HP+Gears+Quarterly_Tax+Weight,data = toyotacorolla)
summary(model.toyotacorollaf)
library(psych)
pairs.panels(toyotacorolla)
library(car)
## Variance Inflation factor to check collinearity b/n variables
vif(model.toyotacorollaf)
## vif>10 then there exists collinearity among all the variables
## Added Variable plot to check correlation b/n variables and o/p variable
avPlots(model.toyotacorollaf)
## VIF and AV plot has given us an indication to delete "wt" variable
panel.cor<-function(x,y,digits=2,prefix="",cex.cor)
{
usr<- par("usr"); on.exit(par(usr))
par(usr=c(0,1,0,1))
r=(cor(x,y))
txt<- format(c(r,0.123456789),digits=digits)[1]
txt<- paste(prefix,txt,sep="")
if(missing(cex.cor)) cex<-0.4/strwidth(txt)
text(0.5,0.5,txt,cex=cex)
}
pairs(toyotacorolla,upper.panel = panel.cor,main="Scatter plot matrix with Correlation coefficients")
# It is Better to delete influential observations rather than deleting entire column which is
# costliest process
# Deletion Diagnostics for identifying influential observations
influence.measures(model.toyotacorollaf)
library(car)
## plotting Influential measures
windows()
influenceIndexPlot(model.toyotacorollaf,id.n=3) # index plots for infuence measures
influencePlot(model.toyotacorollaf,id.n=3) # A user friendly representation of the above
model_1<-lm(price~.,data=toyotacorolla[-c(961)])
summary(model_1)
model_2<-lm(price~.,data=toyotacorolla[-c(222)])
summary(model_2)
model_3<-lm(price~.,data=toyotacorolla[-c(602,222)])
summary(model_3)
########fianl model
plot(lm(price~.,data=computer_data[-c(602,222)]))
summary(lm(price~.,data=computer_data[-c(602,222)]))
# Evaluate model LINE assumptions
#Residual plots,QQplot,std-Residuals Vs Fitted,Cook's Distance
qqPlot(model.toyotacorollaf,id.n = 4)
# QQ plot of studentized residuals helps in identifying outlier
hist(residuals(model_3)) # close to normal distribution
| /toyotacorolla.R | no_license | monika2612/multi-linear-regression | R | false | false | 3,701 | r |
data("mttoyotacorolla")
View(mttoyotacorolla)
toyotacorolla <- read.csv(file.choose()) # choose the toyotacorolla.csv data set
View(toyotacorolla)
attach(toyotacorolla)
### Partial Correlation matrix - Pure Correlation b/n the varibles
#install.packages("corpcor")
library(corpcor)
cor2pcor(cor(toyotacorolla))
# The Linear Model of interest
model.toyotacorolla <- lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight,data = toyotacorolla)
summary(model.toyotacorolla)
###r-squared:0.78,so above 0.86 model is strong corelated
# Prediction based on only age_08_04
model.computer_dataA<-lm(Price~Age_08_04)
summary(model.computer_dataA) # age_08_04 became significant
#r-squared:0.76 modrate corelated
# Prediction based on only KM
model.computer_dataKM<-lm(Price~KM)
summary(model.computer_dataKM) # km became significant
# Prediction based on only HP
model.computer_dataHP<-lm(Price~HP)
summary(model.computer_dataHP) # Hp became significant
# Prediction based on only cc
model.computer_datacc<-lm(Price~cc)
summary(model.computer_datacc) # cc became significant
# Prediction based on only doors
model.computer_dataD<-lm(Price~Doors)
summary(model.computer_dataD) # doors significant
## Prediction based on only Gears
model.computer_dataP<-lm(Price~Gears)
summary(model.computer_dataP) # Gears became significant
# Prediction based on only Quartely_tax
model.computer_dataQ<-lm(Price~Quarterly_Tax)
summary(model.computer_dataQ) # quartely_tax became significant
# Prediction based on only Weight
model.computer_dataW<-lm(Price~Weight)
summary(model.computer_dataW) # weight became significant
####final model
model.toyotacorollaf <- lm(Price~Age_08_04+KM+HP+Gears+Quarterly_Tax+Weight,data = toyotacorolla)
summary(model.toyotacorollaf)
library(psych)
pairs.panels(toyotacorolla)
library(car)
## Variance Inflation factor to check collinearity b/n variables
vif(model.toyotacorollaf)
## vif>10 then there exists collinearity among all the variables
## Added Variable plot to check correlation b/n variables and o/p variable
avPlots(model.toyotacorollaf)
## VIF and AV plot has given us an indication to delete "wt" variable
panel.cor<-function(x,y,digits=2,prefix="",cex.cor)
{
usr<- par("usr"); on.exit(par(usr))
par(usr=c(0,1,0,1))
r=(cor(x,y))
txt<- format(c(r,0.123456789),digits=digits)[1]
txt<- paste(prefix,txt,sep="")
if(missing(cex.cor)) cex<-0.4/strwidth(txt)
text(0.5,0.5,txt,cex=cex)
}
pairs(toyotacorolla,upper.panel = panel.cor,main="Scatter plot matrix with Correlation coefficients")
# It is Better to delete influential observations rather than deleting entire column which is
# costliest process
# Deletion Diagnostics for identifying influential observations
influence.measures(model.toyotacorollaf)
library(car)
## plotting Influential measures
windows()
influenceIndexPlot(model.toyotacorollaf,id.n=3) # index plots for infuence measures
influencePlot(model.toyotacorollaf,id.n=3) # A user friendly representation of the above
model_1<-lm(price~.,data=toyotacorolla[-c(961)])
summary(model_1)
model_2<-lm(price~.,data=toyotacorolla[-c(222)])
summary(model_2)
model_3<-lm(price~.,data=toyotacorolla[-c(602,222)])
summary(model_3)
########fianl model
plot(lm(price~.,data=computer_data[-c(602,222)]))
summary(lm(price~.,data=computer_data[-c(602,222)]))
# Evaluate model LINE assumptions
#Residual plots,QQplot,std-Residuals Vs Fitted,Cook's Distance
qqPlot(model.toyotacorollaf,id.n = 4)
# QQ plot of studentized residuals helps in identifying outlier
hist(residuals(model_3)) # close to normal distribution
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trends.R
\name{cancer_trends}
\alias{cancer_trends}
\title{cancer_trends}
\usage{
cancer_trends(trend = "incidence", state = NULL)
}
\arguments{
\item{trend}{"incidence" or "mortality"}
\item{state}{state abbreviation to download data for, e.g. "MA"}
}
\description{
trends in cancer mortality over time
}
| /man/cancer_trends.Rd | permissive | SilentSpringInstitute/RStateCancerProfiles | R | false | true | 386 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trends.R
\name{cancer_trends}
\alias{cancer_trends}
\title{cancer_trends}
\usage{
cancer_trends(trend = "incidence", state = NULL)
}
\arguments{
\item{trend}{"incidence" or "mortality"}
\item{state}{state abbreviation to download data for, e.g. "MA"}
}
\description{
trends in cancer mortality over time
}
|
library(ggplot2)
library(dplyr)
library(igraph)
wyniki <- read.csv("../dane/przetworzone/sumy_laureaty.csv")
kolumny = grep("podstawowa|rozszerzona", colnames(wyniki))
koincydencje <- wyniki %>% select(matches("podstawowa|rozszerzona")) %>%
select(-matches("polski_p|matematyka_p|angielski_p")) %>%
is.na %>% `!` %>%
as.matrix %>% crossprod
write.csv(koincydencje, "../dane/przetworzone/koincydencje.csv")
g <- graph.adjacency(koincydencje, weighted=TRUE, mode="undirected", diag = FALSE)
V(g)$size <- sqrt(diag(koincydencje)/1000)
E(g)$width <- 2*sqrt(E(g)$weight/1000)
# layout.kamada.kawai
# layout.fruchterman.reingold
# nie wiem czemu nie moge zrobic z tego ladnego, wagowanego przyciagania...
pozycje <- layout.spring(g, params=list(weights=sqrt(E(g)$weight/100), niter=10000))
plot(g,
layout=pozycje,
vertex.label.dist=0.5)
| /eksploracje/wspoludzial.r | no_license | stared/delab-matury | R | false | false | 858 | r | library(ggplot2)
library(dplyr)
library(igraph)
wyniki <- read.csv("../dane/przetworzone/sumy_laureaty.csv")
kolumny = grep("podstawowa|rozszerzona", colnames(wyniki))
koincydencje <- wyniki %>% select(matches("podstawowa|rozszerzona")) %>%
select(-matches("polski_p|matematyka_p|angielski_p")) %>%
is.na %>% `!` %>%
as.matrix %>% crossprod
write.csv(koincydencje, "../dane/przetworzone/koincydencje.csv")
g <- graph.adjacency(koincydencje, weighted=TRUE, mode="undirected", diag = FALSE)
V(g)$size <- sqrt(diag(koincydencje)/1000)
E(g)$width <- 2*sqrt(E(g)$weight/1000)
# layout.kamada.kawai
# layout.fruchterman.reingold
# nie wiem czemu nie moge zrobic z tego ladnego, wagowanego przyciagania...
pozycje <- layout.spring(g, params=list(weights=sqrt(E(g)$weight/100), niter=10000))
plot(g,
layout=pozycje,
vertex.label.dist=0.5)
|
library(tidyverse)
library("gridExtra")
library(waffle)
library(scales)
library(gt)
library(webshot)
source(file = "code/functions.R")
source(file = "1_process_data.R")
# waffle ------------------------------------------------------------------
waf <- ggplot(
secsum_20[order(secsum_20$sec_frq), ],
aes(
fill = reorder(sector, rank),
values = round(sec_frq / 100)
)
) +
expand_limits(
x = c(0, 0),
y = c(0, 0)
) +
coord_equal() +
labs(
fill = NULL,
color = NULL
)
waf <- waf + geom_waffle(
n_rows = 10,
size = .5,
make_proportional = TRUE,
flip = TRUE,
color = "#f8f2e4",
radius = unit(9, "pt")
) +
ggtitle("Sector Count 2020") +
guides(fill = guide_legend(nrow = 9))
waf <- theme_plt(waf, "waf") + theme(plot.title = element_text(size = 32))
# 2020 Sector Summary -----------------------------------------------------
p_secsum_d_20 <- ggplot(
secsum_20,
aes(
y = reorder(
factor(sector),
rank
),
x = money,
fill = reorder(
factor(sector),
rank
)
)
) +
geom_bar(stat = "identity") +
ggtitle("Sector Avg Earnings 2020") +
xlab("Avg Earnings") +
ylab(element_blank()) +
scale_x_continuous(labels = scales::dollar_format())
p_secsum_d_20 <- theme_plt(p_secsum_d_20, "nl")
# Change from 19-20 -------------------------------------------------------
p_change_1920 <- ggplot(
change_1920,
aes(
x = reorder(
sector,
rank
),
y = delta_count,
label = delta_count,
color = reorder(
sector,
rank
)
)
) +
geom_point(aes(fill = reorder(
sector,
rank
)),
stat = "identity",
size = 8,
pch = 21
) +
geom_segment(aes(
y = 0,
x = sector,
yend = delta_count,
xend = sector,
color = reorder(
sector,
rank
)
),
size = 2
) +
geom_text(
color = "black",
size = 5,
nudge_x = .25
) +
scale_y_continuous(
labels = scales::number_format(),
limits = c(-500, 17500)
) +
coord_flip() +
ggtitle("Sector Change (19-20) in Count") +
ylab("Delta Count") +
xlab("")
p_change_1920 <- theme_plt(p_change_1920, "nl")
# --- # Average Salaries
p_change_1920_d <- ggplot(
change_1920,
aes(
x = reorder(
sector,
rank
),
y = delta_money,
label = dollar(round(delta_money)),
color = reorder(
sector,
rank
)
)
) +
geom_point(aes(fill = reorder(
sector,
rank
)),
stat = "identity",
size = 8,
pch = 21
) +
geom_segment(aes(
y = 0,
x = sector,
yend = delta_money,
xend = sector,
color = reorder(
sector,
rank
)
),
size = 2
) +
geom_text(
color = "black",
size = 5,
nudge_x = .25
) +
coord_flip() +
ggtitle("Sector Change (19 - 20) in Avg Earnings") +
ylab("Delta Avg Earnings") +
xlab("Count") +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(-6000, 6000)
)
p_change_1920_d <- theme_plt(p_change_1920_d, "nl")
# Analysis through time ---------------------------------------------------
p_longsum <- ggplot(
master,
aes(x = lbl_year)
) +
scale_y_continuous(labels = scales::number_format()) +
geom_bar() +
ggtitle("Count Across Time") +
ylab("Count") +
xlab("Year")
p_longsum_d <- ggplot(
master,
aes(y = total_income, x = lbl_year)
) +
geom_bar(stat = "summary", ) +
scale_y_continuous(labels = scales::dollar_format()) +
ggtitle("Avg Earnings Across Time") +
ylab("Avg Earnings") +
xlab("Year")
p_longsum_grid <- arrangeGrob(theme_plt(p_longsum, "nl"),
theme_plt(p_longsum_d, "nl"),
nrow = 2
)
# --- # Including Sectors
p_longsecsum <- ggplot(master, aes(x = lbl_year)) +
geom_histogram(aes(fill = reorder(factor(sector), rank)),
binwidth = .5
) +
scale_y_continuous(labels = scales::number_format()) +
ggtitle("Sector Count Across Time") +
xlab("Year") +
ylab("Count")
p_secsum_d<- ggplot(
secsum,
aes(
x = lbl_year,
y = money
)
) +
geom_line(aes(color = reorder(factor(sector), rank)), lwd = 1.3) +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(100000, 175000)
) +
ggtitle("Sector Avg Earnings Across Time")
p_longsecsum <- theme_plt(p_longsecsum, "l") +
theme(legend.justification = "left")
p_secsum_d<- theme_plt(p_secsum_d, "nl")
# Percent Change Through Time ---------------------------------------------
p_secsum_perc <- ggplot(secsum_perc, aes(x = lbl_year, y = sec_frq_chg)) +
geom_line(aes(color = reorder(factor(sector), rank))) +
scale_y_continuous(labels = scales::percent_format())
p_secsum_perc <- theme_plt(p_secsum_perc, "grid")
p_secsum_perc_d <- ggplot(secsum_perc, aes(x = lbl_year, y = money_chg)) +
geom_line(aes(color = reorder(factor(sector), rank))) +
scale_y_continuous(labels = scales::percent_format())
p_secsum_perc_d <- theme_plt(p_secsum_perc_d, "line") +
theme(legend.justification = "left")
p_secsum_perc_grid <- arrangeGrob(p_secsum_perc,
p_secsum_perc_d,
nrow = 2
)
# Infl - Violins ----------------------------------------------------------
p_violins <- ggplot(master_inf, aes(x = factor(lbl_year), y = salary_inf)) +
geom_violin() +
geom_hline(yintercept = 100000, color = "#2270b5") +
ggtitle("Count Across Time (1996 Dollars)") +
xlab("Year") +
ylab("Salary in Real Dollars (1996)") +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(50000, 500000)
)
p_violins <- theme_plt(p_violins, "nl")
# Infl - Sector Summary 2020 ----------------------------------------------
p_secsum_20_adj <- ggplot(
secsum_20_adj,
aes(
y = reorder(factor(sector), -rank),
x = sec_frq,
fill = reorder(factor(sector), rank)
)
) +
geom_bar(stat = "identity") +
ggtitle("Sector Count 2020 (1996 Dollars)") +
xlab("Count") +
ylab("") +
scale_x_continuous(labels = scales::number_format())
p_secsum_d_20_adj <- ggplot(
secsum_20_adj,
aes(
y = reorder(factor(sector), -rank),
x = money,
fill = reorder(factor(sector), rank)
)
) +
geom_bar(stat = "identity") +
ggtitle("Sector Avg Earnings 2020 (1996 Dollars)") +
xlab("Avg Earnings") +
ylab("") +
scale_x_continuous(labels = scales::dollar_format())
p_secsum_20_adj <- theme_plt(p_secsum_20_adj, "nl")
p_secsum_d_20_adj <- theme_plt(p_secsum_d_20_adj, "nl")
p_longsecsum_adj <- ggplot(master_adj, aes(x = lbl_year)) +
geom_histogram(aes(fill = reorder(factor(sector), rank)),
binwidth = .5
) +
scale_y_continuous(labels = scales::number_format()) +
ggtitle("Sector Count (1996 Dollars)")
p_longsecsum_adj <- theme_plt(p_longsecsum_adj, "l")
secsum_adj = left_join(secsum_adj, rank_secs, "sector")
p_longsecsum_adj_d <- ggplot(
secsum_adj,
aes(
x = lbl_year,
y = money
)
) +
geom_line(aes(color = reorder(factor(sector), rank)), lwd = 1.3) +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(100000, 175000)
) +
ggtitle("Sector Avg Earnings (1996 Dollars)")
p_longsecsum_adj_d <- theme_plt(p_longsecsum_adj_d, "l")
# Modeling ----------------------------------------------------------------
p_longsum_pred <- ggplot(
longsum %>% filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(
method = "gam",
formula = y ~ s(x, k=9),
fullrange = TRUE,
color = "brown") +
geom_point(
data = longsum %>% filter(lbl_year > 2019),
aes(x = lbl_year, y = sec_frq),
color = "orange",
size = 3
) +
ggtitle("Forecasting 2020") +
xlab("Year") +
ylab("Count")
p_longsum_pred
p_longsum_pred_sec <- ggplot(
secsum %>% filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(method = "gam",
formula = y ~ s(x, k=9),
fullrange = TRUE,
color = "brown") +
geom_point(
data = secsum %>% filter(lbl_year == 2020),
aes(x = lbl_year, y = sec_frq),
color = "orange"
) +
ggtitle("Sector Forecasting 2020") +
xlab("Year") +
ylab("Count") +
geom_rect(
data = subset(
secsum,
sector %in% c(
"Crown Agencies",
"School Boards",
"Hospitals And Boards Of Public Health"
)
),
fill = NA, colour = "brown", xmin = -Inf, xmax = Inf,
ymin = -Inf, ymax = Inf
) +
facet_wrap(~sector, scales = "free_y")
p_longsum_pred <- theme_plt(p_longsum_pred, "nl")
p_longsum_pred_sec <- theme_plt(p_longsum_pred_sec, "nl")
# Adjusted Models ---------------------------------------------------------
p_longsum_pred_adj <- ggplot(
longsum_adj %>% filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(method = "gam", fullrange = TRUE, color = "blue") +
geom_point(
data = longsum_adj %>% filter(lbl_year == 2020),
aes(x = lbl_year, y = sec_frq),
color = "red",
size = 3
) +
ggtitle("Forecasting 2020 (1996 Dollars)") +
xlab("Year") +
ylab("Count")
p_longsum_pred_sec_adj <- ggplot(
secsum_adj %>%
filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(method = "gam", fullrange = TRUE) +
geom_point(
data = secsum_adj %>%
filter(lbl_year == 2020),
aes(x = lbl_year, y = sec_frq),
color = "red"
) +
ggtitle("Sector Forecasting 2020 (1996 Dollars)") +
xlab("Year") +
ylab("Count") +
facet_wrap(~sector, scales = "free_y")
p_longsum_pred_sec_adj <- theme_plt(p_longsum_pred_sec_adj, "nl")
p_longsum_pred_adj <- theme_plt(p_longsum_pred_adj, "nl")
# Management versus Professionals ------------------------------------------
p_jobsum_20 <- ggplot(mgmt_20, aes(x = job_title, y = n)) +
geom_bar(stat = "summary") +
scale_y_continuous(labels = number_format()) +
ggtitle("(2020 Dollars)")
p_jobsum_20 <- theme_plt(p_jobsum_20, "nl")
p_jobsum_20_adj <- ggplot(mgmt_20_adj, aes(x = job_title, y = n)) +
geom_bar(stat = "summary") +
scale_y_continuous(labels = number_format()) +
ggtitle("(1996 Dollars)")
p_jobsum_20_adj <- theme_plt(p_jobsum_20_adj, "nl")
p_jobsum_20_grid <- arrangeGrob(p_jobsum_20, p_jobsum_20_adj, ncol = 2)
# Supplemental ------------------------------------------------------------
sector_db <- master_raw %>% group_by(calendar_year, sector) %>% summarise(n=n())
p_sector_db <- ggplot(sector_db, aes(x=calendar_year, y=sector)) +
geom_line(size=2) +
ggtitle("Sectors Change Through Years")
p_sector_db <- theme_plt(p_sector_db, "nl")
# Creating Tables ---------------------------------------------------------
# Top 5 Jobs
top_5_jobs = top_jobs %>%
slice(1:5)
top_5_jobs = top_5_jobs %>% mutate(job_title = stringr::str_to_sentence(job_title))
top_5_table = top_5_jobs %>% gt() %>%
fmt_number(columns = vars(n), decimals = 0) %>%
fmt_currency(columns = vars(mean_sal), currency = "USD",, decimals = 0) %>%
tab_header(
title = md("**The 5 Most Popular Job Titles in 2020**")) %>%
cols_label(
job_title = md("**Job Title**"),
n = md("**Count**"),
mean_sal = md("**Avg Salary**")) %>%
tab_options(table.width = pct(100)) %>%
cols_align(align = "center")
top_5_table %>% gtsave(filename = "top_5_table.png", path="tables/")
# --- # Relevant Sector Tables
jobs = dd_delta %>%
group_by(sector) %>%
arrange(-delta, .by_group = TRUE) %>% slice(1:5)
jobs = jobs %>% mutate(job_title = ifelse(nchar(job_title)>2,
stringr::str_to_sentence(job_title),
stringr::str_to_upper(job_title)))
jobs_ch = change_1920 %>%
filter(sector %in% c("Crown Agencies",
"Hospitals And Boards Of Public Health",
"School Boards")) %>%
select(sector, delta_count)
change_20 = nrow(master_20) - nrow(master_19)
jobs_tab = left_join(jobs, jobs_ch, by="sector") %>%
mutate(secinc = delta/delta_count) %>%
mutate(totinc = delta/change_20)
crown_change = make_table("Crown Agencies" )%>%
tab_source_note(
source_note = "Specialists increased from 2 in 2019 to 216 in 2020."
) %>%
tab_source_note(
source_note = "CM = Case Manager PM = Project Manger"
)
hosp_change = make_table("Hospitals And Boards Of Public Health")
school_change = make_table("School Boards")
crown_change %>% gtsave(filename = "crown_change.png", path="tables/")
hosp_change %>% gtsave(filename = "hosp_change.png", path="tables/")
school_change %>% gtsave(filename = "school_change.png", path="tables/")
# Saves -------------------------------------------------------------------
save_plt(waf, "waffle", 12, 8)
save_plt(p_secsum_d_20, "p_secsum_d_20", 15, 8.5)
save_plt(p_change_1920, "p_change_1920", 15, 8.5)
save_plt(p_change_1920_d, "p_change_1920_d", 15, 8.5)
save_plt(p_longsum_grid, "p_longsum_grid", 12, 10)
save_plt(p_secsum_perc_grid, "p_secsum_perc_grid", 12, 10)
save_plt(p_longsecsum, "p_longsecsum", 15, 7.5)
save_plt(p_secsum_d, "p_secsum_d", 15, 7.5)
save_plt(p_longsecsum_adj, "p_longsecsum_adj", 15, 7.5)
save_plt(p_longsecsum_adj_d, "p_longsecsum_adj_d", 15, 7.5)
save_plt(p_violins, "p_violins", 16, 10) +
theme(plot.title = element_text(size = 18))
save_plt(p_secsum_20_adj, "p_secsum_20_adj", 15, 10)
save_plt(p_secsum_d_20_adj, "p_secsum_d_20_adj", 18, 10)
save_plt(p_longsum_pred, "p_longsum_pred", 15, 8.5)
save_plt(p_longsum_pred_sec, "p_longsum_pred_sec", 15, 8.5)
save_plt(p_longsum_pred_adj, "p_longsum_pred_adj", 15, 8.5)
save_plt(p_longsum_pred_sec_adj, "p_longsum_pred_sec_adj", 15, 8.5)
save_plt(p_jobsum_20_grid, "p_jobsum_20_grid", 12, 8.5)
save_plt(p_sector_db, "p_sector_db", 12, 6)
| /2_plot_data.R | no_license | ricardochejfec/BreakingDown_OSL20 | R | false | false | 13,762 | r | library(tidyverse)
library("gridExtra")
library(waffle)
library(scales)
library(gt)
library(webshot)
source(file = "code/functions.R")
source(file = "1_process_data.R")
# waffle ------------------------------------------------------------------
waf <- ggplot(
secsum_20[order(secsum_20$sec_frq), ],
aes(
fill = reorder(sector, rank),
values = round(sec_frq / 100)
)
) +
expand_limits(
x = c(0, 0),
y = c(0, 0)
) +
coord_equal() +
labs(
fill = NULL,
color = NULL
)
waf <- waf + geom_waffle(
n_rows = 10,
size = .5,
make_proportional = TRUE,
flip = TRUE,
color = "#f8f2e4",
radius = unit(9, "pt")
) +
ggtitle("Sector Count 2020") +
guides(fill = guide_legend(nrow = 9))
waf <- theme_plt(waf, "waf") + theme(plot.title = element_text(size = 32))
# 2020 Sector Summary -----------------------------------------------------
p_secsum_d_20 <- ggplot(
secsum_20,
aes(
y = reorder(
factor(sector),
rank
),
x = money,
fill = reorder(
factor(sector),
rank
)
)
) +
geom_bar(stat = "identity") +
ggtitle("Sector Avg Earnings 2020") +
xlab("Avg Earnings") +
ylab(element_blank()) +
scale_x_continuous(labels = scales::dollar_format())
p_secsum_d_20 <- theme_plt(p_secsum_d_20, "nl")
# Change from 19-20 -------------------------------------------------------
p_change_1920 <- ggplot(
change_1920,
aes(
x = reorder(
sector,
rank
),
y = delta_count,
label = delta_count,
color = reorder(
sector,
rank
)
)
) +
geom_point(aes(fill = reorder(
sector,
rank
)),
stat = "identity",
size = 8,
pch = 21
) +
geom_segment(aes(
y = 0,
x = sector,
yend = delta_count,
xend = sector,
color = reorder(
sector,
rank
)
),
size = 2
) +
geom_text(
color = "black",
size = 5,
nudge_x = .25
) +
scale_y_continuous(
labels = scales::number_format(),
limits = c(-500, 17500)
) +
coord_flip() +
ggtitle("Sector Change (19-20) in Count") +
ylab("Delta Count") +
xlab("")
p_change_1920 <- theme_plt(p_change_1920, "nl")
# --- # Average Salaries
p_change_1920_d <- ggplot(
change_1920,
aes(
x = reorder(
sector,
rank
),
y = delta_money,
label = dollar(round(delta_money)),
color = reorder(
sector,
rank
)
)
) +
geom_point(aes(fill = reorder(
sector,
rank
)),
stat = "identity",
size = 8,
pch = 21
) +
geom_segment(aes(
y = 0,
x = sector,
yend = delta_money,
xend = sector,
color = reorder(
sector,
rank
)
),
size = 2
) +
geom_text(
color = "black",
size = 5,
nudge_x = .25
) +
coord_flip() +
ggtitle("Sector Change (19 - 20) in Avg Earnings") +
ylab("Delta Avg Earnings") +
xlab("Count") +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(-6000, 6000)
)
p_change_1920_d <- theme_plt(p_change_1920_d, "nl")
# Analysis through time ---------------------------------------------------
p_longsum <- ggplot(
master,
aes(x = lbl_year)
) +
scale_y_continuous(labels = scales::number_format()) +
geom_bar() +
ggtitle("Count Across Time") +
ylab("Count") +
xlab("Year")
p_longsum_d <- ggplot(
master,
aes(y = total_income, x = lbl_year)
) +
geom_bar(stat = "summary", ) +
scale_y_continuous(labels = scales::dollar_format()) +
ggtitle("Avg Earnings Across Time") +
ylab("Avg Earnings") +
xlab("Year")
p_longsum_grid <- arrangeGrob(theme_plt(p_longsum, "nl"),
theme_plt(p_longsum_d, "nl"),
nrow = 2
)
# --- # Including Sectors
p_longsecsum <- ggplot(master, aes(x = lbl_year)) +
geom_histogram(aes(fill = reorder(factor(sector), rank)),
binwidth = .5
) +
scale_y_continuous(labels = scales::number_format()) +
ggtitle("Sector Count Across Time") +
xlab("Year") +
ylab("Count")
p_secsum_d<- ggplot(
secsum,
aes(
x = lbl_year,
y = money
)
) +
geom_line(aes(color = reorder(factor(sector), rank)), lwd = 1.3) +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(100000, 175000)
) +
ggtitle("Sector Avg Earnings Across Time")
p_longsecsum <- theme_plt(p_longsecsum, "l") +
theme(legend.justification = "left")
p_secsum_d<- theme_plt(p_secsum_d, "nl")
# Percent Change Through Time ---------------------------------------------
p_secsum_perc <- ggplot(secsum_perc, aes(x = lbl_year, y = sec_frq_chg)) +
geom_line(aes(color = reorder(factor(sector), rank))) +
scale_y_continuous(labels = scales::percent_format())
p_secsum_perc <- theme_plt(p_secsum_perc, "grid")
p_secsum_perc_d <- ggplot(secsum_perc, aes(x = lbl_year, y = money_chg)) +
geom_line(aes(color = reorder(factor(sector), rank))) +
scale_y_continuous(labels = scales::percent_format())
p_secsum_perc_d <- theme_plt(p_secsum_perc_d, "line") +
theme(legend.justification = "left")
p_secsum_perc_grid <- arrangeGrob(p_secsum_perc,
p_secsum_perc_d,
nrow = 2
)
# Infl - Violins ----------------------------------------------------------
p_violins <- ggplot(master_inf, aes(x = factor(lbl_year), y = salary_inf)) +
geom_violin() +
geom_hline(yintercept = 100000, color = "#2270b5") +
ggtitle("Count Across Time (1996 Dollars)") +
xlab("Year") +
ylab("Salary in Real Dollars (1996)") +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(50000, 500000)
)
p_violins <- theme_plt(p_violins, "nl")
# Infl - Sector Summary 2020 ----------------------------------------------
p_secsum_20_adj <- ggplot(
secsum_20_adj,
aes(
y = reorder(factor(sector), -rank),
x = sec_frq,
fill = reorder(factor(sector), rank)
)
) +
geom_bar(stat = "identity") +
ggtitle("Sector Count 2020 (1996 Dollars)") +
xlab("Count") +
ylab("") +
scale_x_continuous(labels = scales::number_format())
p_secsum_d_20_adj <- ggplot(
secsum_20_adj,
aes(
y = reorder(factor(sector), -rank),
x = money,
fill = reorder(factor(sector), rank)
)
) +
geom_bar(stat = "identity") +
ggtitle("Sector Avg Earnings 2020 (1996 Dollars)") +
xlab("Avg Earnings") +
ylab("") +
scale_x_continuous(labels = scales::dollar_format())
p_secsum_20_adj <- theme_plt(p_secsum_20_adj, "nl")
p_secsum_d_20_adj <- theme_plt(p_secsum_d_20_adj, "nl")
p_longsecsum_adj <- ggplot(master_adj, aes(x = lbl_year)) +
geom_histogram(aes(fill = reorder(factor(sector), rank)),
binwidth = .5
) +
scale_y_continuous(labels = scales::number_format()) +
ggtitle("Sector Count (1996 Dollars)")
p_longsecsum_adj <- theme_plt(p_longsecsum_adj, "l")
secsum_adj = left_join(secsum_adj, rank_secs, "sector")
p_longsecsum_adj_d <- ggplot(
secsum_adj,
aes(
x = lbl_year,
y = money
)
) +
geom_line(aes(color = reorder(factor(sector), rank)), lwd = 1.3) +
scale_y_continuous(
labels = scales::dollar_format(),
limits = c(100000, 175000)
) +
ggtitle("Sector Avg Earnings (1996 Dollars)")
p_longsecsum_adj_d <- theme_plt(p_longsecsum_adj_d, "l")
# Modeling ----------------------------------------------------------------
p_longsum_pred <- ggplot(
longsum %>% filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(
method = "gam",
formula = y ~ s(x, k=9),
fullrange = TRUE,
color = "brown") +
geom_point(
data = longsum %>% filter(lbl_year > 2019),
aes(x = lbl_year, y = sec_frq),
color = "orange",
size = 3
) +
ggtitle("Forecasting 2020") +
xlab("Year") +
ylab("Count")
p_longsum_pred
p_longsum_pred_sec <- ggplot(
secsum %>% filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(method = "gam",
formula = y ~ s(x, k=9),
fullrange = TRUE,
color = "brown") +
geom_point(
data = secsum %>% filter(lbl_year == 2020),
aes(x = lbl_year, y = sec_frq),
color = "orange"
) +
ggtitle("Sector Forecasting 2020") +
xlab("Year") +
ylab("Count") +
geom_rect(
data = subset(
secsum,
sector %in% c(
"Crown Agencies",
"School Boards",
"Hospitals And Boards Of Public Health"
)
),
fill = NA, colour = "brown", xmin = -Inf, xmax = Inf,
ymin = -Inf, ymax = Inf
) +
facet_wrap(~sector, scales = "free_y")
p_longsum_pred <- theme_plt(p_longsum_pred, "nl")
p_longsum_pred_sec <- theme_plt(p_longsum_pred_sec, "nl")
# Adjusted Models ---------------------------------------------------------
p_longsum_pred_adj <- ggplot(
longsum_adj %>% filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(method = "gam", fullrange = TRUE, color = "blue") +
geom_point(
data = longsum_adj %>% filter(lbl_year == 2020),
aes(x = lbl_year, y = sec_frq),
color = "red",
size = 3
) +
ggtitle("Forecasting 2020 (1996 Dollars)") +
xlab("Year") +
ylab("Count")
p_longsum_pred_sec_adj <- ggplot(
secsum_adj %>%
filter(lbl_year <= 2019),
aes(x = lbl_year, y = sec_frq)
) +
geom_point() +
xlim(1996, 2020) +
stat_smooth(method = "gam", fullrange = TRUE) +
geom_point(
data = secsum_adj %>%
filter(lbl_year == 2020),
aes(x = lbl_year, y = sec_frq),
color = "red"
) +
ggtitle("Sector Forecasting 2020 (1996 Dollars)") +
xlab("Year") +
ylab("Count") +
facet_wrap(~sector, scales = "free_y")
p_longsum_pred_sec_adj <- theme_plt(p_longsum_pred_sec_adj, "nl")
p_longsum_pred_adj <- theme_plt(p_longsum_pred_adj, "nl")
# Management versus Professionals ------------------------------------------
p_jobsum_20 <- ggplot(mgmt_20, aes(x = job_title, y = n)) +
geom_bar(stat = "summary") +
scale_y_continuous(labels = number_format()) +
ggtitle("(2020 Dollars)")
p_jobsum_20 <- theme_plt(p_jobsum_20, "nl")
p_jobsum_20_adj <- ggplot(mgmt_20_adj, aes(x = job_title, y = n)) +
geom_bar(stat = "summary") +
scale_y_continuous(labels = number_format()) +
ggtitle("(1996 Dollars)")
p_jobsum_20_adj <- theme_plt(p_jobsum_20_adj, "nl")
p_jobsum_20_grid <- arrangeGrob(p_jobsum_20, p_jobsum_20_adj, ncol = 2)
# Supplemental ------------------------------------------------------------
sector_db <- master_raw %>% group_by(calendar_year, sector) %>% summarise(n=n())
p_sector_db <- ggplot(sector_db, aes(x=calendar_year, y=sector)) +
geom_line(size=2) +
ggtitle("Sectors Change Through Years")
p_sector_db <- theme_plt(p_sector_db, "nl")
# Creating Tables ---------------------------------------------------------
# Top 5 Jobs
top_5_jobs = top_jobs %>%
slice(1:5)
top_5_jobs = top_5_jobs %>% mutate(job_title = stringr::str_to_sentence(job_title))
top_5_table = top_5_jobs %>% gt() %>%
fmt_number(columns = vars(n), decimals = 0) %>%
fmt_currency(columns = vars(mean_sal), currency = "USD",, decimals = 0) %>%
tab_header(
title = md("**The 5 Most Popular Job Titles in 2020**")) %>%
cols_label(
job_title = md("**Job Title**"),
n = md("**Count**"),
mean_sal = md("**Avg Salary**")) %>%
tab_options(table.width = pct(100)) %>%
cols_align(align = "center")
top_5_table %>% gtsave(filename = "top_5_table.png", path="tables/")
# --- # Relevant Sector Tables
jobs = dd_delta %>%
group_by(sector) %>%
arrange(-delta, .by_group = TRUE) %>% slice(1:5)
jobs = jobs %>% mutate(job_title = ifelse(nchar(job_title)>2,
stringr::str_to_sentence(job_title),
stringr::str_to_upper(job_title)))
jobs_ch = change_1920 %>%
filter(sector %in% c("Crown Agencies",
"Hospitals And Boards Of Public Health",
"School Boards")) %>%
select(sector, delta_count)
change_20 = nrow(master_20) - nrow(master_19)
jobs_tab = left_join(jobs, jobs_ch, by="sector") %>%
mutate(secinc = delta/delta_count) %>%
mutate(totinc = delta/change_20)
crown_change = make_table("Crown Agencies" )%>%
tab_source_note(
source_note = "Specialists increased from 2 in 2019 to 216 in 2020."
) %>%
tab_source_note(
source_note = "CM = Case Manager PM = Project Manger"
)
hosp_change = make_table("Hospitals And Boards Of Public Health")
school_change = make_table("School Boards")
crown_change %>% gtsave(filename = "crown_change.png", path="tables/")
hosp_change %>% gtsave(filename = "hosp_change.png", path="tables/")
school_change %>% gtsave(filename = "school_change.png", path="tables/")
# Saves -------------------------------------------------------------------
save_plt(waf, "waffle", 12, 8)
save_plt(p_secsum_d_20, "p_secsum_d_20", 15, 8.5)
save_plt(p_change_1920, "p_change_1920", 15, 8.5)
save_plt(p_change_1920_d, "p_change_1920_d", 15, 8.5)
save_plt(p_longsum_grid, "p_longsum_grid", 12, 10)
save_plt(p_secsum_perc_grid, "p_secsum_perc_grid", 12, 10)
save_plt(p_longsecsum, "p_longsecsum", 15, 7.5)
save_plt(p_secsum_d, "p_secsum_d", 15, 7.5)
save_plt(p_longsecsum_adj, "p_longsecsum_adj", 15, 7.5)
save_plt(p_longsecsum_adj_d, "p_longsecsum_adj_d", 15, 7.5)
save_plt(p_violins, "p_violins", 16, 10) +
theme(plot.title = element_text(size = 18))
save_plt(p_secsum_20_adj, "p_secsum_20_adj", 15, 10)
save_plt(p_secsum_d_20_adj, "p_secsum_d_20_adj", 18, 10)
save_plt(p_longsum_pred, "p_longsum_pred", 15, 8.5)
save_plt(p_longsum_pred_sec, "p_longsum_pred_sec", 15, 8.5)
save_plt(p_longsum_pred_adj, "p_longsum_pred_adj", 15, 8.5)
save_plt(p_longsum_pred_sec_adj, "p_longsum_pred_sec_adj", 15, 8.5)
save_plt(p_jobsum_20_grid, "p_jobsum_20_grid", 12, 8.5)
save_plt(p_sector_db, "p_sector_db", 12, 6)
|
/SAR_function_biome_power_model.R | no_license | laurajkehoe/SAR_resampling_approach | R | false | false | 6,444 | r | ||
#' Extract data and iterate over batches to estimate zero probability models
#'
#' @param s.data,cpm.data raw and transformed data
#' @param batch the batch vector
#' @param n.mean.class see zeroProbModel
#' @param minFracZeroes minimum fraction of zeroes before zero-inflation is applied
#'
#' @return a list of binomial regression parameters
fracZeroLogitModel <- function(s.data, batch, cpm.data, n.mean.class,
minFracZeroes){
LS = colSums(s.data)
zeroMat = s.data == 0
zeroModels = tapply(colnames(s.data), batch, function(coln){
zeroModel = zeroProbModel(cpm.data = cpm.data[, coln], logL = log(LS[coln]),
zeroMat[, coln], n.mean.class = n.mean.class)
#Calculate zero fractions of each gene within the batches
zeroFrac = rowMeans(zeroMat[, coln])
#Calculate gene-wise means
geneMeans = rowMeans(cpm.data[, coln])
#Retain the means of the genes exceeding the zero fraction threshold
meansLarge = geneMeans[zeroFrac > minFracZeroes]
list(zeroModel = zeroModel, meansLarge = meansLarge)
})
} | /R/fracZeroLogitModel.R | no_license | CenterForStatistics-UGent/SPsimSeq | R | false | false | 1,086 | r | #' Extract data and iterate over batches to estimate zero probability models
#'
#' @param s.data,cpm.data raw and transformed data
#' @param batch the batch vector
#' @param n.mean.class see zeroProbModel
#' @param minFracZeroes minimum fraction of zeroes before zero-inflation is applied
#'
#' @return a list of binomial regression parameters
fracZeroLogitModel <- function(s.data, batch, cpm.data, n.mean.class,
minFracZeroes){
LS = colSums(s.data)
zeroMat = s.data == 0
zeroModels = tapply(colnames(s.data), batch, function(coln){
zeroModel = zeroProbModel(cpm.data = cpm.data[, coln], logL = log(LS[coln]),
zeroMat[, coln], n.mean.class = n.mean.class)
#Calculate zero fractions of each gene within the batches
zeroFrac = rowMeans(zeroMat[, coln])
#Calculate gene-wise means
geneMeans = rowMeans(cpm.data[, coln])
#Retain the means of the genes exceeding the zero fraction threshold
meansLarge = geneMeans[zeroFrac > minFracZeroes]
list(zeroModel = zeroModel, meansLarge = meansLarge)
})
} |
## README: If you want to see how your input data works here, you can run:
#-----------------------------
# 0. Setup environment & load dataset
getwd();
workingDir = "/Users/jylee43/Google Drive/coursework_2017SPRING/ProblemSolving/4_ReadDataWork/Microarray";
setwd(workingDir);
getwd();
fileList=list.files(workingDir, pattern="_ave_TopVar.csv")
fileList
#-----------------------------
# 1. load dataset
InputFileName="GSE10670_ave_TopVar.csv"
InputFileName=fileList[1]
InputFileName
ExprData <- read.table(InputFileName, sep = "," , header = T, na.strings ="", stringsAsFactors= F)
dim(ExprData)
head(ExprData)
#--------
# sort
head(ExprData[,1])
#ExprData[order(ExprData[,1]),]
ExprDataOrdered= ExprData[order(ExprData[,1]),]
head(ExprDataOrdered)
#--------
exp.data = ExprDataOrdered[, 2: (dim(ExprDataOrdered)[2]) ]
rownames(exp.data) = ExprDataOrdered[,1]
dim(exp.data)
head(exp.data)
#-----------------------------
# 1. load dataset
DapDataFileName="AllUniq.chr1-5_GEM_events.nS_targets.tab"
DapData <- read.table(paste0("/Users/jylee43/Google Drive/coursework_2017SPRING/ProblemSolving/4_ReadDataWork/", DapDataFileName), sep = "\t" , header = T, na.strings ="", stringsAsFactors= F)
dim(DapData)
head(DapData)
head(rownames(exp.data) )
head(DapData[1])
head(DapData[2])
DapData[1,1] %in% rownames(exp.data)
DapData[1,2] %in% rownames(exp.data)
DapData[1,1] %in% rownames(exp.data) && DapData[1,2] %in% rownames(exp.data)
(DapData[2,] %in% rownames(exp.data) )[2] * (DapData[2,] %in% rownames(exp.data) )[1]
head(DapData[,1] %in% rownames(exp.data))
head(DapData[,2] %in% rownames(exp.data))
cbind(head(DapData), (as.matrix(head(DapData[,1] %in% rownames(exp.data)) * head(DapData[,2] %in% rownames(exp.data)))))
DapFiltered=as.matrix(DapData[,1] %in% rownames(exp.data) * DapData[,2] %in% rownames(exp.data))
DapData_Filtered= cbind(DapData, DAP_RNA=DapFiltered)
head(DapData_Filtered)
head(DapData_Filtered[,3])
head( DapData_Filtered[which(DapData_Filtered[,3]==1), ] )
DapFilteredData= DapData_Filtered[which(DapData_Filtered[,3]==1), c(1,2) ]
dim(DapFilteredData)
head(DapFilteredData)
dim(DapData)
#-----------------------------
DapDataFileName
InputFileName
OutputFileName=paste0(gsub(".csv","",InputFileName),"_", gsub(".tab",".csv", DapDataFileName))
OutputFileName
write.csv(DapFilteredData, file = OutputFileName, row.names=FALSE)
#-----------------------------#-----------------------------
format(Sys.time(), "%Y/%m/%d_%H:%M:%S")
DapFilteredList=list()
DapFilteredList
for ( i in c(1:length(DapData[,1])) )
{
print(i)
if (DapData[i,1] %in% rownames(exp.data) && DapData[i,2] %in% rownames(exp.data))
{
#print(DapData[i,])
DapFilteredList=rbind(DapFilteredList, DapData[i,])
}
}
format(Sys.time(), "%Y/%m/%d_%H:%M:%S")
#-----------------------------#-----------------------------
#http://stackoverflow.com/questions/16584948/how-to-create-weighted-adjacency-list-matrix-from-edge-list
library(igraph)
head(DapFilteredData)
DapFilteredDataList=c(DapFilteredData[,1],DapFilteredData[,2])
NumUniqGenes= length(unique(DapFilteredDataList))
NumUniqGenes
DapFilteredDataGraph=graph.data.frame(DapFilteredData)
DapFilteredDataGraph
DapFilteredDataGraphMatrix=get.adjacency(DapFilteredDataGraph,sparse=FALSE)
dim(DapFilteredDataGraphMatrix)
sum(DapFilteredDataGraphMatrix)
#-----------------------------#-----------------------------
format(Sys.time(), "%Y/%m/%d_%H:%M:%S")
DapFilteredList=list()
DapFilteredList
for ( i in c(1:length(DapData[,1])) )
{
print(i)
if (DapData[i,1] %in% rownames(exp.data) && DapData[i,2] %in% rownames(exp.data))
{
#print(DapData[i,])
DapFilteredList=rbind(DapFilteredList, DapData[i,])
}
}
#-----------------------------#-----------------------------
#-----------------------------
library(iRafNet)
dim(exp.data)
DapFilteredDataUniqueGenes =unique(DapFilteredDataList)
head(DapFilteredDataUniqueGenes)
length(DapFilteredDataUniqueGenes)
exp.data_Filtered = exp.data[rownames(exp.data) %in% DapFilteredDataUniqueGenes, ]
dim(exp.data_Filtered)
head(exp.data_Filtered)
#-------
head(rownames(DapFilteredDataGraphMatrix))
head(rownames(exp.data_Filtered))
indx=match(rownames(DapFilteredDataGraphMatrix), rownames(exp.data_Filtered))
head(indx)
head(exp.data_Filtered[c(indx),])
exp.data_FilteredMatched=exp.data_Filtered[c(indx),]
head(exp.data_FilteredMatched)
head(rownames(DapFilteredDataGraphMatrix))
OutputFileName=paste0(gsub(".csv","",InputFileName),"_FilteredSortedFor_", gsub("tab","csv", DapDataFileName))
OutputFileName
write.csv(exp.data_FilteredMatched, file = OutputFileName)
#-----------------------------
exp.data=t(exp.data_FilteredMatched)
dim(exp.data)
# 2. Standardize variables to mean 0 and variance 1
exp.data.st1 =(apply(exp.data, 1, function(x) { (x - mean(x)) / sd(x) } )) #st for column, genes
exp.data.st2 =(apply(exp.data.st1, 2, function(x) { (x - mean(x)) / sd(x) } )) #st for column, genes
par(mar=c(8, 4, 4, 4) + 0.1)
par(mfrow = c(1,3));
boxplot(t(exp.data), las = 2)
boxplot((exp.data.st1), las = 2)
boxplot((exp.data.st2), las = 2)
#-----------------------------#-----------------------------
# 2-1. Visualization of Standardized variables
par(mfrow = c(1,2));
boxplot(data, ylim=c(-4, 3))
abline(h=c(-1,1), lty=2,col="blue"); abline(h=c(0), lty=1,col="red");
boxplot(data.st, ylim=c(-4, 3))
abline(h=c(-1,1), lty=2,col="blue"); abline(h=c(0), lty=1,col="red");
summary(data)
summary(data.st)
#-----------------------------#-----------------------------
# 3. Run iRafNet and obtain importance score of regulatory relationships
library(iRafNet)
data.st=(exp.data.st2)
dim(data.st)
W=DapFilteredDataGraphMatrix
dim(W)
NumUniqGenes
MTRY=round(sqrt(NumUniqGenes-1))
MTRY
genes.name=rownames(data.st)
head(genes.name)
out.iRafNet<-iRafNet(data.st, W, mtry=round(sqrt(NumUniqGenes-1)), ntree=1000, genes.name)
#-----------------------------
str(out.iRafNet)
head(out.iRafNet)
dim(out.iRafNet)
min(out.iRafNet[,3])
max(out.iRafNet[,3])
summary(out.iRafNet[,3])
boxplot(out.iRafNet[,3])
hist(out.iRafNet[,3])
dim(out.iRafNet)
dim(out.iRafNet[(out.iRafNet[,3]>0), ])
out.iRafNet_Filtered=out.iRafNet[(out.iRafNet[,3]>0), ]
head(out.iRafNet_Filtered)
min(out.iRafNet_Filtered[,3])
max(out.iRafNet_Filtered[,3])
summary(out.iRafNet_Filtered[,3])
boxplot(out.iRafNet_Filtered[,3])
hist(out.iRafNet_Filtered[,3])
# 4. Run iRafNet for M permuted data sets
dim(data.st)
head(data.st)
data.st=t(data.st)
dim(W)
out.perm<-Run_permutation(data.st, W, mtry=round(sqrt(NumUniqGenes-1)), ntree=1000, genes.name, 10)
#-----------------------------
# Z. Save nets into RData
# Save module colors and labels for use in subsequent parts
RDataFileName=paste0(gsub(".csv","",InputFileName),"_FilteredSortedFor_", gsub(".tab","", DapDataFileName), "__iRafNet2.RData")
RDataFileName
save(out.perm, out.iRafNet,data.st, W, MTRY,genes.name, file = RDataFileName)
#-----------------------------
# Load network data saved in the second part.
lnames = load(file = RDataFileName);
#The variable lnames contains the names of loaded variables.
lnames
#-----------------------------
head(out.perm)
dim(out.perm)
head(out.iRafNet)
dim(out.iRafNet)
# 5. Derive final networks
final.net<-iRafNet_network(out.iRafNet,out.perm, 100)
?iRafNet_network
dim(final.net)
head(final.net)
# 6. Matrix of true regulations
truth<-out.iRafNet[,seq(1,2)]
truth<-cbind(as.character(truth[,1]),as.character(truth[,2]),as.data.frame(rep(0,,dim(out)[1])));
truth[(truth[,1]=="G2" & truth[,2]=="G1") | (truth[,1]=="G1" & truth[,2]=="G2"),3]<-1
# 6-1. Plot ROC curve and compute AUC
auc<-roc_curve(out,truth)
| /script/randomforest/iRafNet_Microarray_2017-05-06_JL.R | no_license | LiLabAtVT/RegulatoryNetwork | R | false | false | 7,607 | r | ## README: If you want to see how your input data works here, you can run:
#-----------------------------
# 0. Setup environment & load dataset
getwd();
workingDir = "/Users/jylee43/Google Drive/coursework_2017SPRING/ProblemSolving/4_ReadDataWork/Microarray";
setwd(workingDir);
getwd();
fileList=list.files(workingDir, pattern="_ave_TopVar.csv")
fileList
#-----------------------------
# 1. load dataset
InputFileName="GSE10670_ave_TopVar.csv"
InputFileName=fileList[1]
InputFileName
ExprData <- read.table(InputFileName, sep = "," , header = T, na.strings ="", stringsAsFactors= F)
dim(ExprData)
head(ExprData)
#--------
# sort
head(ExprData[,1])
#ExprData[order(ExprData[,1]),]
ExprDataOrdered= ExprData[order(ExprData[,1]),]
head(ExprDataOrdered)
#--------
exp.data = ExprDataOrdered[, 2: (dim(ExprDataOrdered)[2]) ]
rownames(exp.data) = ExprDataOrdered[,1]
dim(exp.data)
head(exp.data)
#-----------------------------
# 1. load dataset
DapDataFileName="AllUniq.chr1-5_GEM_events.nS_targets.tab"
DapData <- read.table(paste0("/Users/jylee43/Google Drive/coursework_2017SPRING/ProblemSolving/4_ReadDataWork/", DapDataFileName), sep = "\t" , header = T, na.strings ="", stringsAsFactors= F)
dim(DapData)
head(DapData)
head(rownames(exp.data) )
head(DapData[1])
head(DapData[2])
DapData[1,1] %in% rownames(exp.data)
DapData[1,2] %in% rownames(exp.data)
DapData[1,1] %in% rownames(exp.data) && DapData[1,2] %in% rownames(exp.data)
(DapData[2,] %in% rownames(exp.data) )[2] * (DapData[2,] %in% rownames(exp.data) )[1]
head(DapData[,1] %in% rownames(exp.data))
head(DapData[,2] %in% rownames(exp.data))
cbind(head(DapData), (as.matrix(head(DapData[,1] %in% rownames(exp.data)) * head(DapData[,2] %in% rownames(exp.data)))))
DapFiltered=as.matrix(DapData[,1] %in% rownames(exp.data) * DapData[,2] %in% rownames(exp.data))
DapData_Filtered= cbind(DapData, DAP_RNA=DapFiltered)
head(DapData_Filtered)
head(DapData_Filtered[,3])
head( DapData_Filtered[which(DapData_Filtered[,3]==1), ] )
DapFilteredData= DapData_Filtered[which(DapData_Filtered[,3]==1), c(1,2) ]
dim(DapFilteredData)
head(DapFilteredData)
dim(DapData)
#-----------------------------
DapDataFileName
InputFileName
OutputFileName=paste0(gsub(".csv","",InputFileName),"_", gsub(".tab",".csv", DapDataFileName))
OutputFileName
write.csv(DapFilteredData, file = OutputFileName, row.names=FALSE)
#-----------------------------#-----------------------------
format(Sys.time(), "%Y/%m/%d_%H:%M:%S")
DapFilteredList=list()
DapFilteredList
for ( i in c(1:length(DapData[,1])) )
{
print(i)
if (DapData[i,1] %in% rownames(exp.data) && DapData[i,2] %in% rownames(exp.data))
{
#print(DapData[i,])
DapFilteredList=rbind(DapFilteredList, DapData[i,])
}
}
format(Sys.time(), "%Y/%m/%d_%H:%M:%S")
#-----------------------------#-----------------------------
#http://stackoverflow.com/questions/16584948/how-to-create-weighted-adjacency-list-matrix-from-edge-list
library(igraph)
head(DapFilteredData)
DapFilteredDataList=c(DapFilteredData[,1],DapFilteredData[,2])
NumUniqGenes= length(unique(DapFilteredDataList))
NumUniqGenes
DapFilteredDataGraph=graph.data.frame(DapFilteredData)
DapFilteredDataGraph
DapFilteredDataGraphMatrix=get.adjacency(DapFilteredDataGraph,sparse=FALSE)
dim(DapFilteredDataGraphMatrix)
sum(DapFilteredDataGraphMatrix)
#-----------------------------#-----------------------------
format(Sys.time(), "%Y/%m/%d_%H:%M:%S")
DapFilteredList=list()
DapFilteredList
for ( i in c(1:length(DapData[,1])) )
{
print(i)
if (DapData[i,1] %in% rownames(exp.data) && DapData[i,2] %in% rownames(exp.data))
{
#print(DapData[i,])
DapFilteredList=rbind(DapFilteredList, DapData[i,])
}
}
#-----------------------------#-----------------------------
#-----------------------------
library(iRafNet)
dim(exp.data)
DapFilteredDataUniqueGenes =unique(DapFilteredDataList)
head(DapFilteredDataUniqueGenes)
length(DapFilteredDataUniqueGenes)
exp.data_Filtered = exp.data[rownames(exp.data) %in% DapFilteredDataUniqueGenes, ]
dim(exp.data_Filtered)
head(exp.data_Filtered)
#-------
head(rownames(DapFilteredDataGraphMatrix))
head(rownames(exp.data_Filtered))
indx=match(rownames(DapFilteredDataGraphMatrix), rownames(exp.data_Filtered))
head(indx)
head(exp.data_Filtered[c(indx),])
exp.data_FilteredMatched=exp.data_Filtered[c(indx),]
head(exp.data_FilteredMatched)
head(rownames(DapFilteredDataGraphMatrix))
OutputFileName=paste0(gsub(".csv","",InputFileName),"_FilteredSortedFor_", gsub("tab","csv", DapDataFileName))
OutputFileName
write.csv(exp.data_FilteredMatched, file = OutputFileName)
#-----------------------------
exp.data=t(exp.data_FilteredMatched)
dim(exp.data)
# 2. Standardize variables to mean 0 and variance 1
exp.data.st1 =(apply(exp.data, 1, function(x) { (x - mean(x)) / sd(x) } )) #st for column, genes
exp.data.st2 =(apply(exp.data.st1, 2, function(x) { (x - mean(x)) / sd(x) } )) #st for column, genes
par(mar=c(8, 4, 4, 4) + 0.1)
par(mfrow = c(1,3));
boxplot(t(exp.data), las = 2)
boxplot((exp.data.st1), las = 2)
boxplot((exp.data.st2), las = 2)
#-----------------------------#-----------------------------
# 2-1. Visualization of Standardized variables
par(mfrow = c(1,2));
boxplot(data, ylim=c(-4, 3))
abline(h=c(-1,1), lty=2,col="blue"); abline(h=c(0), lty=1,col="red");
boxplot(data.st, ylim=c(-4, 3))
abline(h=c(-1,1), lty=2,col="blue"); abline(h=c(0), lty=1,col="red");
summary(data)
summary(data.st)
#-----------------------------#-----------------------------
# 3. Run iRafNet and obtain importance score of regulatory relationships
library(iRafNet)
data.st=(exp.data.st2)
dim(data.st)
W=DapFilteredDataGraphMatrix
dim(W)
NumUniqGenes
MTRY=round(sqrt(NumUniqGenes-1))
MTRY
genes.name=rownames(data.st)
head(genes.name)
out.iRafNet<-iRafNet(data.st, W, mtry=round(sqrt(NumUniqGenes-1)), ntree=1000, genes.name)
#-----------------------------
str(out.iRafNet)
head(out.iRafNet)
dim(out.iRafNet)
min(out.iRafNet[,3])
max(out.iRafNet[,3])
summary(out.iRafNet[,3])
boxplot(out.iRafNet[,3])
hist(out.iRafNet[,3])
dim(out.iRafNet)
dim(out.iRafNet[(out.iRafNet[,3]>0), ])
out.iRafNet_Filtered=out.iRafNet[(out.iRafNet[,3]>0), ]
head(out.iRafNet_Filtered)
min(out.iRafNet_Filtered[,3])
max(out.iRafNet_Filtered[,3])
summary(out.iRafNet_Filtered[,3])
boxplot(out.iRafNet_Filtered[,3])
hist(out.iRafNet_Filtered[,3])
# 4. Run iRafNet for M permuted data sets
dim(data.st)
head(data.st)
data.st=t(data.st)
dim(W)
out.perm<-Run_permutation(data.st, W, mtry=round(sqrt(NumUniqGenes-1)), ntree=1000, genes.name, 10)
#-----------------------------
# Z. Save nets into RData
# Save module colors and labels for use in subsequent parts
RDataFileName=paste0(gsub(".csv","",InputFileName),"_FilteredSortedFor_", gsub(".tab","", DapDataFileName), "__iRafNet2.RData")
RDataFileName
save(out.perm, out.iRafNet,data.st, W, MTRY,genes.name, file = RDataFileName)
#-----------------------------
# Load network data saved in the second part.
lnames = load(file = RDataFileName);
#The variable lnames contains the names of loaded variables.
lnames
#-----------------------------
head(out.perm)
dim(out.perm)
head(out.iRafNet)
dim(out.iRafNet)
# 5. Derive final networks
final.net<-iRafNet_network(out.iRafNet,out.perm, 100)
?iRafNet_network
dim(final.net)
head(final.net)
# 6. Matrix of true regulations
truth<-out.iRafNet[,seq(1,2)]
truth<-cbind(as.character(truth[,1]),as.character(truth[,2]),as.data.frame(rep(0,,dim(out)[1])));
truth[(truth[,1]=="G2" & truth[,2]=="G1") | (truth[,1]=="G1" & truth[,2]=="G2"),3]<-1
# 6-1. Plot ROC curve and compute AUC
auc<-roc_curve(out,truth)
|
function (Mu1, Mu2, sigma)
{
e <- get("data.env", .GlobalEnv)
e[["Wijs_mat52_cpp"]][[length(e[["Wijs_mat52_cpp"]]) + 1]] <- list(Mu1 = Mu1,
Mu2 = Mu2, sigma = sigma)
.Call("_hetGP_Wijs_mat52_cpp", PACKAGE = "hetGP", Mu1, Mu2,
sigma)
}
| /valgrind_test_dir/Wijs_mat52_cpp-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 266 | r | function (Mu1, Mu2, sigma)
{
e <- get("data.env", .GlobalEnv)
e[["Wijs_mat52_cpp"]][[length(e[["Wijs_mat52_cpp"]]) + 1]] <- list(Mu1 = Mu1,
Mu2 = Mu2, sigma = sigma)
.Call("_hetGP_Wijs_mat52_cpp", PACKAGE = "hetGP", Mu1, Mu2,
sigma)
}
|
###################################
### Author:Eduardo Clark
### Project: Homicides and Fútbol
### Date: September 2013
### For mediotiempo.com
###################################
### Run all for project
source("src/loadLibraries.R") #Load Libraries
#Get and clean data
source("src/GameDates.R") ## Get Game Dates from 2007-2011
source("src/CleanMatches.R") ## Clean Game Dates and create complete DF of Game Dates
source("src/HomicideData.R") ## Load Homicide Data and merge with Game Dates
#Game Day effect estimation
source("src/EffectsEstimation.R") #Some estimation scripts and more data cleaning
source("src/NegativeBinomial.R") #Estimation results from the negative binomial and ZI models
#Summary Statistics for tables
source("src/SummaryStatistics.R") ##Latex Text outputs to latex-plots
| /RunAll.R | no_license | EduardoClark/Football-Homicides | R | false | false | 805 | r | ###################################
### Author:Eduardo Clark
### Project: Homicides and Fútbol
### Date: September 2013
### For mediotiempo.com
###################################
### Run all for project
source("src/loadLibraries.R") #Load Libraries
#Get and clean data
source("src/GameDates.R") ## Get Game Dates from 2007-2011
source("src/CleanMatches.R") ## Clean Game Dates and create complete DF of Game Dates
source("src/HomicideData.R") ## Load Homicide Data and merge with Game Dates
#Game Day effect estimation
source("src/EffectsEstimation.R") #Some estimation scripts and more data cleaning
source("src/NegativeBinomial.R") #Estimation results from the negative binomial and ZI models
#Summary Statistics for tables
source("src/SummaryStatistics.R") ##Latex Text outputs to latex-plots
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_confirm_device}
\alias{cognitoidentityprovider_confirm_device}
\title{Confirms tracking of the device}
\usage{
cognitoidentityprovider_confirm_device(AccessToken, DeviceKey,
DeviceSecretVerifierConfig, DeviceName)
}
\arguments{
\item{AccessToken}{[required] The access token.}
\item{DeviceKey}{[required] The device key.}
\item{DeviceSecretVerifierConfig}{The configuration of the device secret verifier.}
\item{DeviceName}{The device name.}
}
\description{
Confirms tracking of the device. This API call is the call that begins
device tracking.
}
\section{Request syntax}{
\preformatted{svc$confirm_device(
AccessToken = "string",
DeviceKey = "string",
DeviceSecretVerifierConfig = list(
PasswordVerifier = "string",
Salt = "string"
),
DeviceName = "string"
)
}
}
\keyword{internal}
| /cran/paws.security.identity/man/cognitoidentityprovider_confirm_device.Rd | permissive | johnnytommy/paws | R | false | true | 953 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_confirm_device}
\alias{cognitoidentityprovider_confirm_device}
\title{Confirms tracking of the device}
\usage{
cognitoidentityprovider_confirm_device(AccessToken, DeviceKey,
DeviceSecretVerifierConfig, DeviceName)
}
\arguments{
\item{AccessToken}{[required] The access token.}
\item{DeviceKey}{[required] The device key.}
\item{DeviceSecretVerifierConfig}{The configuration of the device secret verifier.}
\item{DeviceName}{The device name.}
}
\description{
Confirms tracking of the device. This API call is the call that begins
device tracking.
}
\section{Request syntax}{
\preformatted{svc$confirm_device(
AccessToken = "string",
DeviceKey = "string",
DeviceSecretVerifierConfig = list(
PasswordVerifier = "string",
Salt = "string"
),
DeviceName = "string"
)
}
}
\keyword{internal}
|
### Estuary methods
# Bring in estuary layer
estuarinePolys <- st_read('GIS/WQS_layers_05072020.gdb', layer = 'estuarinepolygons_05072020' , fid_column_name = "OBJECTID")%>%
mutate(UID = paste0('EP_', as.character(BASIN), "_", sprintf("%06d",as.numeric(as.character(OBJECTID))))) %>% # using OBEJCTID as row number for now
st_transform(4326)
# Identify which subbasins needs esturine work
subB <- c("Potomac River", "Rappahannock River", "Atlantic Ocean Coastal", "Chesapeake Bay Tributaries",
"Chesapeake Bay - Mainstem", "James River - Lower", "Appomattox River" , "Chowan River",
"Atlantic Ocean - South" , "Dismal Swamp/Albemarle Sound")
# Identify sites in said subB that will have estuary WQS attempted to be joined
distinctSites_sf_e <- filter(distinctSites_sf, SUBBASIN %in% subB)
# First work with Polygons since computationally faster
# Spatially join to polygon layer and create table to store links
estuaryPolyWQS <- st_join(distinctSites_sf_e, estuarinePolys, join = st_intersects) %>%
filter(!is.na(OBJECTID))
WQStable <- bind_rows(WQStable,
dplyr::select(estuaryPolyWQS, FDT_STA_ID, UID) %>%
st_drop_geometry() %>%
rename('StationID' = 'FDT_STA_ID')) %>%
drop_na()
# Now remove the sites that fell into estuary polygons from list of sites to test against estuary lines
distinctSites_sf_e <- filter(distinctSites_sf_e, ! FDT_STA_ID %in% estuaryPolyWQS$FDT_STA_ID)
# clean up workspace
rm(estuarinePolys); rm(estuaryPolyWQS)
## now work with estuarine lines
snapAndOrganizeWQS
snapAndOrganizeAU_ListOutput(irData_join, riverineAUs,
bufferDistances = seq(10,50,by=10),
outDir = 'data/preAnalyzedRegionalAUdata/BRRO/Riverine/')
| /1.preprocessData/preprocessingModules/estuaryWQS.R | no_license | EmmaVJones/IR2022 | R | false | false | 1,822 | r | ### Estuary methods
# Bring in estuary layer
estuarinePolys <- st_read('GIS/WQS_layers_05072020.gdb', layer = 'estuarinepolygons_05072020' , fid_column_name = "OBJECTID")%>%
mutate(UID = paste0('EP_', as.character(BASIN), "_", sprintf("%06d",as.numeric(as.character(OBJECTID))))) %>% # using OBEJCTID as row number for now
st_transform(4326)
# Identify which subbasins needs esturine work
subB <- c("Potomac River", "Rappahannock River", "Atlantic Ocean Coastal", "Chesapeake Bay Tributaries",
"Chesapeake Bay - Mainstem", "James River - Lower", "Appomattox River" , "Chowan River",
"Atlantic Ocean - South" , "Dismal Swamp/Albemarle Sound")
# Identify sites in said subB that will have estuary WQS attempted to be joined
distinctSites_sf_e <- filter(distinctSites_sf, SUBBASIN %in% subB)
# First work with Polygons since computationally faster
# Spatially join to polygon layer and create table to store links
estuaryPolyWQS <- st_join(distinctSites_sf_e, estuarinePolys, join = st_intersects) %>%
filter(!is.na(OBJECTID))
WQStable <- bind_rows(WQStable,
dplyr::select(estuaryPolyWQS, FDT_STA_ID, UID) %>%
st_drop_geometry() %>%
rename('StationID' = 'FDT_STA_ID')) %>%
drop_na()
# Now remove the sites that fell into estuary polygons from list of sites to test against estuary lines
distinctSites_sf_e <- filter(distinctSites_sf_e, ! FDT_STA_ID %in% estuaryPolyWQS$FDT_STA_ID)
# clean up workspace
rm(estuarinePolys); rm(estuaryPolyWQS)
## now work with estuarine lines
snapAndOrganizeWQS
snapAndOrganizeAU_ListOutput(irData_join, riverineAUs,
bufferDistances = seq(10,50,by=10),
outDir = 'data/preAnalyzedRegionalAUdata/BRRO/Riverine/')
|
# Created on : 29-06-2021
# Course work:
# @author: Harsha Vardhan
# Source:
#Using character vector as index
x <- c("first"=3, "second"=0, "third"=9)
x
x["second"]
x[c("first", "third")] | /harsha1/character-vector.R | no_license | tactlabs/r-samples | R | false | false | 190 | r | # Created on : 29-06-2021
# Course work:
# @author: Harsha Vardhan
# Source:
#Using character vector as index
x <- c("first"=3, "second"=0, "third"=9)
x
x["second"]
x[c("first", "third")] |
library(Seurat)
library(dplyr)
library(viridis)
library(reshape2)
library(extrafont)
setwd("C:/Users/alexm/Documents/git/Protein Analysis/")
mingeneappearancethreshold <- 5
lowUMIpercellthreshold <- 500
lowgenepercellthreshold <- 100
# load("AX206genes")
# AX206 <- Genes
# load("AX207genes")
# AX207 <- Genes
# load("AX208genes")
# AX208 <- Genes
# load("AX206Redogenes")
# AX206Redo <- Genes
# load("AX208Redogenes")
# AX208Redo <- Genes
# load("AX218genes")
# AX218 <- Genes
# load("AX219genes")
# AX219 <- Genes
# colnames(IntegratedData) <- gsub("X", "AX219X", colnames(IntegratedData))
# colnames(NoCellIntegratedData) <- gsub("X", "AX219X", colnames(NoCellIntegratedData))
# save(list = c("IntegratedData", "NoCellIntegratedData"), file = "AX219alldata")
print("Loading data")
load("AX206alldata")
AX206all <- IntegratedData
AX206NoCell <- NoCellIntegratedData
load("AX207alldata")
AX207all <- IntegratedData
AX207NoCell <- NoCellIntegratedData
load("AX208alldata")
AX208all <- IntegratedData
AX208NoCell <- NoCellIntegratedData
load("AX206Redoalldata")
AX206Redoall <- IntegratedData
AX206RedoNoCell <- NoCellIntegratedData
load("AX208Redoalldata")
AX208Redoall <- IntegratedData
AX208RedoNoCell <- NoCellIntegratedData
load("AX218alldata")
AX218all <- IntegratedData
AX218NoCell <- NoCellIntegratedData
load("AX219alldata")
AX219all <- IntegratedData
AX219NoCell <- NoCellIntegratedData
# save(list=c("AX206Vals","AX207Vals","AX208Vals","AX218Vals","AX219Vals","AX206Zeros","AX207Zeros","AX208Zeros","AX218Zeros","AX219Zeros","AX206RedoVals","AX208RedoVals","AX206RedoZeros","AX208RedoZeros"), file = "AllProteinValues")
load("AllProteinValues")
# AX206Vals <- data.frame(t(ProteinsPerBeads))
# rownames(AX219Vals) <- gsub("X","AX219X",rownames(AX219Vals))
# AX219Zeros <- AX219Vals[which(AX219Vals[,4]==0),]
print("Applying normalization and background subtraction")
AX206Background <- apply(AX206Zeros,2,mean)[1:3]
AX207Background <- apply(AX207Zeros,2,mean)[1:3]
AX208Background <- apply(AX208Zeros,2,mean)[1:3]
AX218Background <- apply(AX218Zeros,2,mean)[1:3]
AX219Background <- apply(AX219Zeros,2,mean)[1:3]
AX206RedoBackground <- apply(AX206RedoZeros,2,mean)[1:3]
AX208RedoBackground <- apply(AX208RedoZeros,2,mean)[1:3]
AX206ConversionFactors <- AX206Background/100
AX207ConversionFactors <- AX207Background/100
AX208ConversionFactors <- AX208Background/100
AX218ConversionFactors <- AX218Background/100
AX219ConversionFactors <- AX219Background/100
AX206RedoConversionFactors <- AX206RedoBackground/100
AX208RedoConversionFactors <- AX208RedoBackground/100
AX206NormalizedProteins <- AX206Vals
AX206NormalizedProteins[,1:3] <- t(apply(AX206Vals[,1:3],1,function(x) (x-AX206Background)/AX206ConversionFactors))
AX207NormalizedProteins <- AX207Vals
AX207NormalizedProteins[,1:3] <- t(apply(AX207Vals[,1:3],1,function(x) (x-AX207Background)/AX207ConversionFactors))
AX208NormalizedProteins <- AX208Vals
AX208NormalizedProteins[,1:3] <- t(apply(AX208Vals[,1:3],1,function(x) (x-AX208Background)/AX208ConversionFactors))
AX218NormalizedProteins <- AX218Vals
AX218NormalizedProteins[,1:3] <- t(apply(AX218Vals[,1:3],1,function(x) (x-AX218Background)/AX218ConversionFactors))
AX219NormalizedProteins <- AX219Vals
AX219NormalizedProteins[,1:3] <- t(apply(AX219Vals[,1:3],1,function(x) (x-AX219Background)/AX219ConversionFactors))
AX206RedoNormalizedProteins <- AX206RedoVals
AX206RedoNormalizedProteins[,1:3] <- t(apply(AX206RedoVals[,1:3],1,function(x) (x-AX206RedoBackground)/AX206RedoConversionFactors))
AX208RedoNormalizedProteins <- AX208RedoVals
AX208RedoNormalizedProteins[,1:3] <- t(apply(AX208RedoVals[,1:3],1,function(x) (x-AX208RedoBackground)/AX208RedoConversionFactors))
# AX206NormalizedProteins[,"Chip"] <- "AX206"
# AX207NormalizedProteins[,"Chip"] <- "AX207"
# AX208NormalizedProteins[,"Chip"] <- "AX208"
# AX218NormalizedProteins[,"Chip"] <- "AX218"
# AX219NormalizedProteins[,"Chip"] <- "AX219"
AX206 <- AX206all[-((nrow(AX206all)-11):nrow(AX206all)),]
AX207 <- AX207all[-((nrow(AX207all)-11):nrow(AX207all)),]
AX208 <- AX208all[-((nrow(AX208all)-11):nrow(AX208all)),]
AX206Redo <- AX206Redoall[-((nrow(AX206Redoall)-11):nrow(AX206Redoall)),]
AX208Redo <- AX208Redoall[-((nrow(AX208Redoall)-11):nrow(AX208Redoall)),]
AX218 <- AX218all[-((nrow(AX218all)-11):nrow(AX218all)),]
AX219 <- AX219all[-((nrow(AX219all)-11):nrow(AX219all)),]
print("Creating Seurat objects")
AX206S <- CreateSeuratObject(raw.data=AX206, project="AX206", min.cells=mingeneappearancethreshold)
AX206S@meta.data$celltype <- "U87"
AX206S <- FilterCells(AX206S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX206S <- NormalizeData(AX206S, display.progress=F)
AX206S <- ScaleData(AX206S, display.progress=F)
AX206S <- FindVariableGenes(AX206S, do.plot = F, display.progress=F)
# AX206S <- SetAssayData(AX206S, assay.type = "SCBC", slot = "raw.data", new.data = AX206all[((nrow(AX206all)-3):(nrow(AX206all)-1)),])
# AX206S <- NormalizeData(AX206S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX206S <- ScaleData(AX206S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX206S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX206S@raw.data[mito.genes, ])/Matrix::colSums(AX206S@raw.data)
AX206S <- AddMetaData(object = AX206S, metadata = percent.mito, col.name = "percent.mito")
AX207S <- CreateSeuratObject(raw.data=AX207, project="AX207", min.cells=mingeneappearancethreshold)
AX207S@meta.data$celltype <- "HEK"
AX207S <- FilterCells(AX207S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX207S <- NormalizeData(AX207S, display.progress=F)
AX207S <- ScaleData(AX207S, display.progress=F)
AX207S <- FindVariableGenes(AX207S, do.plot = F, display.progress=F)
# AX207S <- SetAssayData(AX207S, assay.type = "SCBC", slot = "raw.data", new.data = AX207all[((nrow(AX207all)-3):(nrow(AX207all)-1)),])
# AX207S <- NormalizeData(AX207S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX207S <- ScaleData(AX207S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX207S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX207S@raw.data[mito.genes, ])/Matrix::colSums(AX207S@raw.data)
AX207S <- AddMetaData(object = AX207S, metadata = percent.mito, col.name = "percent.mito")
AX208S <- CreateSeuratObject(raw.data=AX208, project="AX208", min.cells=mingeneappearancethreshold)
AX208S@meta.data$celltype <- "HEK"
AX208S <- FilterCells(AX208S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX208S <- NormalizeData(AX208S, display.progress=F)
AX208S <- ScaleData(AX208S, display.progress=F)
AX208S <- FindVariableGenes(AX208S, do.plot = F, display.progress=F)
# AX208S <- SetAssayData(AX208S, assay.type = "SCBC", slot = "raw.data", new.data = AX208all[((nrow(AX208all)-3):(nrow(AX208all)-1)),])
# AX208S <- NormalizeData(AX208S, assay.type = "SCBC", normalization.method = "genesCLR")
# AX208S <- ScaleData(AX208S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX208S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX208S@raw.data[mito.genes, ])/Matrix::colSums(AX208S@raw.data)
AX208S <- AddMetaData(object = AX208S, metadata = percent.mito, col.name = "percent.mito")
AX218S <- CreateSeuratObject(raw.data=AX218, project="AX218", min.cells=mingeneappearancethreshold)
AX218S@meta.data$celltype <- "U87"
AX218S <- FilterCells(AX218S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX218S <- NormalizeData(AX218S, display.progress=F)
AX218S <- ScaleData(AX218S, display.progress=F)
AX218S <- FindVariableGenes(AX218S, do.plot = F, display.progress=F)
# AX218S <- SetAssayData(AX218S, assay.type = "SCBC", slot = "raw.data", new.data = AX218all[((nrow(AX218all)-3):(nrow(AX218all)-1)),])
# AX218S <- NormalizeData(AX218S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX218S <- ScaleData(AX218S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX218S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX218S@raw.data[mito.genes, ])/Matrix::colSums(AX218S@raw.data)
AX218S <- AddMetaData(object = AX218S, metadata = percent.mito, col.name = "percent.mito")
AX219S <- CreateSeuratObject(raw.data=AX219, project="AX219", min.cells=mingeneappearancethreshold)
AX219S@meta.data$celltype <- "U87"
AX219S <- FilterCells(AX219S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX219S <- NormalizeData(AX219S, display.progress=F)
AX219S <- ScaleData(AX219S, display.progress=F)
AX219S <- FindVariableGenes(AX219S, do.plot = F, display.progress=F)
# AX219S <- SetAssayData(AX219S, assay.type = "SCBC", slot = "raw.data", new.data = AX219all[((nrow(AX219all)-3):(nrow(AX219all)-1)),])
# AX219S <- NormalizeData(AX219S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX219S <- ScaleData(AX219S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX219S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX219S@raw.data[mito.genes, ])/Matrix::colSums(AX219S@raw.data)
AX219S <- AddMetaData(object = AX219S, metadata = percent.mito, col.name = "percent.mito")
AX206RedoS <- CreateSeuratObject(raw.data=AX206Redo, project="AX206Redo", min.cells=mingeneappearancethreshold)
AX206RedoS@meta.data$celltype <- "U87"
AX206RedoS <- FilterCells(AX206RedoS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX206RedoS <- NormalizeData(AX206RedoS, display.progress=F)
AX206RedoS <- ScaleData(AX206RedoS, display.progress=F)
AX206RedoS <- FindVariableGenes(AX206RedoS, do.plot = F, display.progress=F)
# AX206RedoS <- SetAssayData(AX206RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX206Redoall[((nrow(AX206Redoall)-3):(nrow(AX206Redoall)-1)),])
# AX206RedoS <- NormalizeData(AX206RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX206RedoS <- ScaleData(AX206RedoS, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX206RedoS@data), value = TRUE)
percent.mito <- Matrix::colSums(AX206RedoS@raw.data[mito.genes, ])/Matrix::colSums(AX206RedoS@raw.data)
AX206RedoS <- AddMetaData(object = AX206RedoS, metadata = percent.mito, col.name = "percent.mito")
AX208RedoS <- CreateSeuratObject(raw.data=AX208Redo, project="AX208Redo", min.cells=mingeneappearancethreshold)
AX208RedoS@meta.data$celltype <- "HEK"
AX208RedoS <- FilterCells(AX208RedoS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX208RedoS <- NormalizeData(AX208RedoS, display.progress=F)
AX208RedoS <- ScaleData(AX208RedoS, display.progress=F)
AX208RedoS <- FindVariableGenes(AX208RedoS, do.plot = F, display.progress=F)
# AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX208RedoS@data), value = TRUE)
percent.mito <- Matrix::colSums(AX208RedoS@raw.data[mito.genes, ])/Matrix::colSums(AX208RedoS@raw.data)
AX208RedoS <- AddMetaData(object = AX208RedoS, metadata = percent.mito, col.name = "percent.mito")
# U871 <- read.csv("GSM2794663_U87_con_1_Genes_ReadCount.txt", sep = "\t", row.names = 1)
# colnames(U871) <- "U87Control1"
# U872 <- read.csv("GSM2794664_U87_con_2_Genes_ReadCount.txt", sep = "\t", row.names = 1)
# colnames(U872) <- "U87Control2"
#
# HEKCombinedSingleCell <- CombinedGenesbyMerge@raw.data[,CombinedGenesbyMerge@meta.data$celltype=="HEK"]
# U87CombinedSingleCell <- CombinedGenesbyMerge@raw.data[,CombinedGenesbyMerge@meta.data$celltype=="U87"]
# U87CombinedSingleCell <- apply(U87CombinedSingleCell,1,mean)
# HEKCombinedSingleCell <- apply(HEKCombinedSingleCell,1,mean)
#
# BulkComp <- data.frame(cbind(U87CombinedSingleCell, HEKCombinedSingleCell))
# CombinedS <- CreateSeuratObject(raw.data=BulkComp, project="CombinedCells")
# CombinedS@meta.data$celltype <- "U87"
# CombinedS <- FilterCells(CombinedS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
# CombinedS <- NormalizeData(CombinedS, display.progress=F)
# CombinedS <- ScaleData(CombinedS, display.progress=F)
# CombinedS <- FindVariableGenes(CombinedS, do.plot = F, display.progress=F)
# # AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# # AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# # AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# mito.genes <- grep(pattern = "^MT-", x = rownames(x = CombinedS@data), value = TRUE)
# percent.mito <- Matrix::colSums(CombinedS@raw.data[mito.genes, ])/Matrix::colSums(CombinedS@raw.data)
# CombinedS <- AddMetaData(object = CombinedS, metadata = percent.mito, col.name = "percent.mito")
# # GSM2794664
# U87S <- CreateSeuratObject(raw.data=U87BulkControls, project="U87Control1")
# U87S@meta.data$celltype <- "U87"
# U87S <- FilterCells(U87S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
# U87S <- NormalizeData(U87S, display.progress=F)
# U87S <- ScaleData(U87S, display.progress=F)
# U87S <- FindVariableGenes(U87S, do.plot = F, display.progress=F)
# # AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# # AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# # AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# mito.genes <- grep(pattern = "^MT-", x = rownames(x = U87S@data), value = TRUE)
# percent.mito <- Matrix::colSums(U87S@raw.data[mito.genes, ])/Matrix::colSums(U87S@raw.data)
# U87S <- AddMetaData(object = U87S, metadata = percent.mito, col.name = "percent.mito")
#
# # GSM2599702
# HEKS <- CreateSeuratObject(raw.data=UMI_count, project="HEK")
# HEKS@meta.data$celltype <- "HEK"
# HEKS <- FilterCells(HEKS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
# HEKS <- NormalizeData(HEKS, display.progress=F)
# HEKS <- ScaleData(HEKS, display.progress=F)
# HEKS <- FindVariableGenes(HEKS, do.plot = F, display.progress=F)
# # AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# # AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# # AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# mito.genes <- grep(pattern = "^MT-", x = rownames(x = HEKS@data), value = TRUE)
# percent.mito <- Matrix::colSums(HEKS@raw.data[mito.genes, ])/Matrix::colSums(HEKS@raw.data)
# HEKS <- AddMetaData(object = HEKS, metadata = percent.mito, col.name = "percent.mito")
print("Adding protein values to Seurat")
AX206NormalizedProteins <- AX206NormalizedProteins[rownames(AX206NormalizedProteins) %in% AX206S@cell.names,]
AX206AllProts <- AX206NormalizedProteins
AX206NormalizedProteins[,1:3] <- AX206NormalizedProteins[,1:3]/AX206NormalizedProteins[,4]
AX207NormalizedProteins <- AX207NormalizedProteins[rownames(AX207NormalizedProteins) %in% AX207S@cell.names,]
AX207AllProts <- AX207NormalizedProteins
AX207NormalizedProteins[,1:3] <- AX207NormalizedProteins[,1:3]/AX207NormalizedProteins[,4]
AX208NormalizedProteins <- AX208NormalizedProteins[rownames(AX208NormalizedProteins) %in% AX208S@cell.names,]
AX208AllProts <- AX208NormalizedProteins
AX208NormalizedProteins[,1:3] <- AX208NormalizedProteins[,1:3]/AX208NormalizedProteins[,4]
AX218NormalizedProteins <- AX218NormalizedProteins[rownames(AX218NormalizedProteins) %in% AX218S@cell.names,]
AX218AllProts <- AX218NormalizedProteins
AX218NormalizedProteins[,1:3] <- AX218NormalizedProteins[,1:3]/AX218NormalizedProteins[,4]
AX219NormalizedProteins <- AX219NormalizedProteins[rownames(AX219NormalizedProteins) %in% AX219S@cell.names,]
AX219AllProts <- AX219NormalizedProteins
AX219NormalizedProteins[,1:3] <- AX219NormalizedProteins[,1:3]/AX219NormalizedProteins[,4]
AX206RedoNormalizedProteins <- AX206RedoNormalizedProteins[rownames(AX206RedoNormalizedProteins) %in% AX206RedoS@cell.names,]
AX206RedoAllProts <- AX206RedoNormalizedProteins
AX206RedoNormalizedProteins[,1:3] <- AX206RedoNormalizedProteins[,1:3]/AX206RedoNormalizedProteins[,4]
AX208RedoNormalizedProteins <- AX208RedoNormalizedProteins[rownames(AX208RedoNormalizedProteins) %in% AX208RedoS@cell.names,]
AX208RedoAllProts <- AX208RedoNormalizedProteins
AX208RedoNormalizedProteins[,1:3] <- AX208RedoNormalizedProteins[,1:3]/AX208RedoNormalizedProteins[,4]
AX206S <- SetAssayData(AX206S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX206NormalizedProteins[,1:3]))
AX206S <- AddMetaData(object = AX206S, metadata = AX206NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX206S <- NormalizeData(AX206S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX206S <- ScaleData(AX206S, assay.type = "SCBC", display.progress = F)
AX207S <- SetAssayData(AX207S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX207NormalizedProteins[,1:3]))
AX207S <- AddMetaData(object = AX207S, metadata = AX207NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX207S <- NormalizeData(AX207S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX207S <- ScaleData(AX207S, assay.type = "SCBC", display.progress = F)
AX208S <- SetAssayData(AX208S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX208NormalizedProteins[,1:3]))
AX208S <- AddMetaData(object = AX208S, metadata = AX208NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX208S <- NormalizeData(AX208S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX208S <- ScaleData(AX208S, assay.type = "SCBC", display.progress = F)
AX218S <- SetAssayData(AX218S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX218NormalizedProteins[,1:3]))
AX218S <- AddMetaData(object = AX218S, metadata = AX218NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX218S <- NormalizeData(AX218S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX218S <- ScaleData(AX218S, assay.type = "SCBC", display.progress = F)
AX219S <- SetAssayData(AX219S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX219NormalizedProteins[,1:3]))
AX219S <- AddMetaData(object = AX219S, metadata = AX219NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX219S <- NormalizeData(AX219S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX219S <- ScaleData(AX219S, assay.type = "SCBC", display.progress = F)
AX206RedoS <- SetAssayData(AX206RedoS, assay.type = "SCBC", slot = "raw.data", new.data = t(AX206RedoNormalizedProteins[,1:3]))
AX206RedoS <- AddMetaData(object = AX206RedoS, metadata = AX206RedoNormalizedProteins[,4:5], col.name = c("cells","beads"))
AX206RedoS <- NormalizeData(AX206RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX206RedoS <- ScaleData(AX206RedoS, assay.type = "SCBC", display.progress = F)
AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = t(AX208RedoNormalizedProteins[,1:3]))
AX208RedoS <- AddMetaData(object = AX208RedoS, metadata = AX208RedoNormalizedProteins[,4:5], col.name = c("cells","beads"))
AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# AX206SGeneNames <- head(rownames(AX206S@hvg.info), 1000)
# AX207SGeneNames <- head(rownames(AX207S@hvg.info), 1000)
# AX208SGeneNames <- head(rownames(AX208S@hvg.info), 1000)
# AX218SGeneNames <- head(rownames(AX218S@hvg.info), 1000)
# AX219SGeneNames <- head(rownames(AX219S@hvg.info), 1000)
# AX206RedoSGeneNames <- head(rownames(AX206RedoS@hvg.info), 1000)
# AX208RedoSGeneNames <- head(rownames(AX208RedoS@hvg.info), 1000)
print("Integrating multiple chips")
AX206NormalizedProteins[,"Chip"] <- "AX206"
AX207NormalizedProteins[,"Chip"] <- "AX207"
AX208NormalizedProteins[,"Chip"] <- "AX208"
AX218NormalizedProteins[,"Chip"] <- "AX218"
AX219NormalizedProteins[,"Chip"] <- "AX219"
AX206RedoNormalizedProteins[,"Chip"] <- "AX206Redo"
AX208RedoNormalizedProteins[,"Chip"] <- "AX208Redo"
Allprotsnormalized <- rbind(AX206NormalizedProteins,AX207NormalizedProteins,AX208NormalizedProteins,AX218NormalizedProteins,AX219NormalizedProteins, AX206RedoNormalizedProteins, AX208RedoNormalizedProteins)
Allprotsall <- rbind(AX206AllProts,AX207AllProts,AX208AllProts,AX218AllProts,AX219AllProts, AX206RedoAllProts, AX208RedoAllProts)
Allprotsall["Chip"] <- gsub(pattern = "*X(.*)", replacement="", x=gsub(pattern = "AX",replacement="A", x=rownames(Allprotsall)))
colnames(Allprotsall)[1:3] <- c("PKM2","c-MYC","PDHK1")
AllprotsallPlot <- melt(Allprotsall, id=c("Cells","Beads", "Chip"))
AllprotsallPlot[,1] <- as.factor(AllprotsallPlot[,1])
Allprotsnormalizedplot <- melt(Allprotsnormalized, id=c("Cells", "Beads", "Chip"))
Allprotsnormalizedplot$Chip <- as.factor(Allprotsnormalizedplot$Chip)
Allprotsnormalizedplot$Cells <- as.factor(Allprotsnormalizedplot$Cells)
Allprotsnormalizedplot["Celltype"] <- NA
Allprotsnormalizedplot$Celltype[Allprotsnormalizedplot$Chip %in% c("AX206", "AX206Redo", "AX218", "AX219")] <- "U87"
Allprotsnormalizedplot$Celltype[Allprotsnormalizedplot$Chip %in% c("AX208", "AX208Redo", "AX207")] <- "HEK"
ggplot(Allprotsnormalizedplot) + geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3)+geom_point(aes(x=variable, y=value, fill=Chip, size=Cells), position=position_dodge(width = 0.75), alpha=0.5)+scale_size_discrete(range = c(1,5))
print("Choosing variable genes")
AX206SGeneNames <- AX206S@var.genes
AX207SGeneNames <- AX207S@var.genes
AX208SGeneNames <- AX208S@var.genes
AX218SGeneNames <- AX218S@var.genes
AX219SGeneNames <- AX219S@var.genes
AX206RedoSGeneNames <- AX206RedoS@var.genes
AX208RedoSGeneNames <- AX208RedoS@var.genes
GenestoUse <- unique(c(AX206SGeneNames, AX207SGeneNames, AX208SGeneNames, AX206RedoSGeneNames, AX208RedoSGeneNames, AX218SGeneNames, AX219SGeneNames))
GenestoUse <- intersect(GenestoUse, rownames(AX206S@raw.data))
# GenestoUse <- intersect(GenestoUse, rownames(AX207S@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX208S@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX208RedoS@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX206RedoS@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX218S@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX219S@raw.data))
HEKOnly <- MergeSeurat(AX207S, AX208S)
HEKOnly <- MergeSeurat(HEKOnly, AX208RedoS)
U87Only <- MergeSeurat(AX206S, AX206RedoS)
U87Only <- MergeSeurat(U87Only, AX218S)
U87Only <- MergeSeurat(U87Only, AX219S)
CombinedGenesbyMerge <- MergeSeurat(AX206S, AX207S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX208S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX218S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX219S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX206RedoS)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX208RedoS)
# Allprotein <- cbind(AX206all[((nrow(AX206all)-3):(nrow(AX206all)-1)),], AX207all[((nrow(AX207all)-3):(nrow(AX207all)-1)),], AX208all[((nrow(AX208all)-3):(nrow(AX208all)-1)),],
# AX218all[((nrow(AX218all)-3):(nrow(AX218all)-1)),], AX219all[((nrow(AX219all)-3):(nrow(AX219all)-1)),], AX206Redoall[((nrow(AX206Redoall)-3):(nrow(AX206Redoall)-1)),],
# AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
CombinedGenesbyMerge <- SetAssayData(CombinedGenesbyMerge, assay.type = "SCBC", slot = "raw.data", new.data = t(Allprotsnormalized[,1:3]))
CombinedGenesbyMerge <- NormalizeData(CombinedGenesbyMerge, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
CombinedGenesbyMerge <- ScaleData(CombinedGenesbyMerge, assay.type = "SCBC", display.progress = F)
print("Analyzing combined data")
source("BulkComp.R")
# CombinedGenesbyMerge@var.genes <- GenestoUse
# CombinedGenesbyMerge@var.genes <- rownames(CombinedGenesbyMerge@raw.data)[rownames(CombinedGenesbyMerge@raw.data) %in% rownames(resOrdered)]
CombinedGenesbyMerge@var.genes <- TestBulkvar
CombinedGenesbyMerge <- NormalizeData(CombinedGenesbyMerge, display.progress = F)
CombinedGenesbyMerge <- ScaleData(CombinedGenesbyMerge, vars.to.regress = c("nUMI"), display.progress = F)
CombinedGenesbyMerge <- RunPCA(object = CombinedGenesbyMerge, pc.genes = CombinedGenesbyMerge@var.genes, do.print = TRUE, pcs.print = 1:5, genes.print = 5)
# CombinedGenesbyMergePlusBulks <- MergeSeurat(CombinedS, U87S)
# # CombinedGenesbyMergePlusBulks <- MergeSeurat(CombinedGenesbyMergePlusBulks, HEKS)
# CombinedGenesbyMergePlusBulks@var.genes <- GenestoUse
# CombinedGenesbyMergePlusBulks <- ScaleData(CombinedGenesbyMergePlusBulks, vars.to.regress = c("nUMI", "orig.ident"))
# CombinedGenesbyMergePlusBulks <- RunPCA(object = CombinedGenesbyMergePlusBulks, pc.genes = CombinedGenesbyMergePlusBulks@var.genes, do.print = TRUE, pcs.print = 1:5, genes.print = 5)
# CombinedGenesbyMergePlusBulks <- ProjectPCA(object = CombinedGenesbyMergePlusBulks)
# CombinedGenesbyMergePlusBulks <- JackStraw(object = CombinedGenesbyMergePlusBulks, num.replicate = 50, display.progress = FALSE)
# CombinedGenesbyMergePlusBulks <- FindClusters(object = CombinedGenesbyMergePlusBulks, reduction.type = "pca", dims.use = 1:20, resolution = 1.1, print.output = 0, save.SNN = TRUE, force.recalc=TRUE)
# CombinedGenesbyMergePlusBulks <- RunTSNE(object = CombinedGenesbyMergePlusBulks, dims.use = 1:20, do.fast = TRUE)
# cluster1.markers <- FindMarkers(object = CombinedGenesbyMergePlusBulks, ident.1 = 1, min.pct = 0.25)
# CombinedGenesbyMergePlusBulks.markers <- FindAllMarkers(object = CombinedGenesbyMergePlusBulks, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)
VizPCA(object = CombinedGenesbyMerge, pcs.use = 1:2)
PCAPlot(object = CombinedGenesbyMerge, dim.1 = 1, dim.2 = 2, group.by = "celltype")
CombinedGenesbyMerge <- ProjectPCA(object = CombinedGenesbyMerge)
PCHeatmap(object = CombinedGenesbyMerge, pc.use = 1, do.balanced = TRUE, label.columns = FALSE)
CombinedGenesbyMerge <- JackStraw(object = CombinedGenesbyMerge, num.replicate = 50, display.progress = FALSE)
# JackStrawPlot(object = CombinedGenesbyMerge, PCs = 1:20)
# PCElbowPlot(object = CombinedGenesbyMerge)
CombinedGenesbyMerge <- FindClusters(object = CombinedGenesbyMerge, reduction.type = "pca", dims.use = 1:20, resolution = 1.1, print.output = 0, save.SNN = TRUE, force.recalc=TRUE)
PrintFindClustersParams(object = CombinedGenesbyMerge)
CombinedGenesbyMerge <- RunTSNE(object = CombinedGenesbyMerge, dims.use = 1:20, do.fast = TRUE)
cluster1.markers <- FindMarkers(object = CombinedGenesbyMerge, ident.1 = 1, min.pct = 0.25)
print(x = head(x = cluster1.markers, n = 5))
CombinedGenesbyMerge.markers <- FindAllMarkers(object = CombinedGenesbyMerge, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)
CombinedGenesbyMerge.markers %>% group_by(cluster) %>% top_n(5, avg_logFC)
Metadata <- CombinedGenesbyMerge@meta.data
Metadata[,"GeneCellRatio"] <- Metadata[,1]/Metadata[,6]
Metadata[,"GeneBeadRatio"] <- Metadata[,1]/Metadata[,7]
Metadata[,"CellBeadRatio"] <- Metadata[,7]/Metadata[,6]
NoCellIncludedMetadata <- data.frame(t(cbind(tail(AX206NoCell,6),tail(AX206RedoNoCell,6),tail(AX207NoCell,6),tail(AX208NoCell,6),tail(AX208RedoNoCell,6),tail(AX218NoCell,6),tail(AX219NoCell,6))))
TSNEPlot(object = CombinedGenesbyMerge, group.by = "orig.ident", pt.size = 3)
# Figure 3 B
TSNEPlot(object = CombinedGenesbyMerge, group.by = "celltype", pt.size = 4, colors.use = c(NineColScheme[1], NineColScheme[6]), no.legend = TRUE)
RidgePlot(CombinedGenesbyMerge, features.plot = c("B","C","D"), nCol = 2, group.by = "celltype")
ggplot(Metadata, aes(x=Beads, y=nGene))+geom_point()+geom_smooth(method='lm',formula=y~x) + scale_x_continuous(breaks=seq(0,11,1)) + coord_fixed(ratio = 11/4000) + theme(text=element_text(family="Calibri"))
ggplot(Metadata, aes(x=Cells, y=nGene))+geom_point()+geom_smooth(method='lm',formula=y~x) + scale_x_continuous(breaks=seq(0,11,1)) + coord_fixed(ratio = 9/4000) + theme(text=element_text(family="Calibri"))
# cbmc_cite <- RunPCA(CombinedGenesbyMerge, pc.genes = c("B","C","D"), assay.type = "SCBC", pcs.print = 0, pcs.compute = 1:5)
# PCAPlot(cbmc_cite, pt.size = 3, group.by="celltype")
FileName <- "AllCells"
GenesofInterest <- list()
ProteinNames <- c()
U87cells <- CombinedGenesbyMerge@meta.data[,"celltype"]=="U87"
HEKcells <- CombinedGenesbyMerge@meta.data[,"celltype"]=="HEK"
# IntegratedSeuratDataset <- data.frame(as.matrix(t(rbind(CombinedGenesbyMerge@scale.data[CombinedGenesbyMerge@var.genes,U87cells], CombinedGenesbyMerge@assay$SCBC@raw.data[,U87cells]))))
IntegratedSeuratDataset <- data.frame(as.matrix(t(rbind(CombinedGenesbyMerge@scale.data, CombinedGenesbyMerge@assay$SCBC@scale.data))))
for (n in 1:3)
{
Target <- colnames(Allprotsnormalized[,1:3])[n]
print(Target)
# ProteinNames <- c(ProteinNames, Target)
PairwiseMatrixLinearRegression <- apply(IntegratedSeuratDataset[ , 1:(ncol(IntegratedSeuratDataset)-3)], 2,
function(x) lm(x ~ IntegratedSeuratDataset[ , ncol(IntegratedSeuratDataset)-3+n],
data = IntegratedSeuratDataset))
assign(paste0(Target,"PairwiseLinearRegression"), PairwiseMatrixLinearRegression)
Coefficients <- sapply(PairwiseMatrixLinearRegression,coef)
assign(paste0(Target,"Coefficients"), Coefficients)
Rsquared <- sapply(PairwiseMatrixLinearRegression,summary)[8,,drop=FALSE]
assign(paste0(Target,"Rsquared"), Rsquared)
assign(paste0(FileName,Target,"LinearModel"), t(rbind(Coefficients,unlist(Rsquared))))
SpearmanMatrix <- apply(IntegratedSeuratDataset[ , 1:(ncol(IntegratedSeuratDataset)-3)], 2,
function(x) cor.test(x,IntegratedSeuratDataset[ , ncol(IntegratedSeuratDataset)-3+n], method="spearman"))
assign(paste0(FileName,Target,"Spearman"), SpearmanMatrix)
SpearmanPValues <- sapply(SpearmanMatrix, function(x) x$p.value)
PearsonMatrix <- apply(IntegratedSeuratDataset[ , 1:(ncol(IntegratedSeuratDataset)-3)], 2,
function(x) cor.test(x,IntegratedSeuratDataset[ , ncol(IntegratedSeuratDataset)-3+n], method="pearson"))
assign(paste0(FileName,Target,"Pearson"), PearsonMatrix)
PearsonPValues <- sapply(PearsonMatrix, function(x) x$p.value)
SignificanceTable <- data.frame(cbind(Rsquared=unlist(Rsquared), SpearmanPValues, PearsonPValues))
SignificanceTable <- cbind(SignificanceTable, RsquaredThres=SignificanceTable[,"Rsquared"]>0.4,
SpearmanPValuesThres=SignificanceTable[,"SpearmanPValues"]<0.05,
PearsonPValuesThres=SignificanceTable[,"PearsonPValues"]<0.05)
SignificanceTable <- cbind(SignificanceTable, SoftHit=SignificanceTable[,"RsquaredThres"]|SignificanceTable[,"SpearmanPValuesThres"]|SignificanceTable[,"PearsonPValuesThres"],
HardHit=SignificanceTable[,"RsquaredThres"]&SignificanceTable[,"SpearmanPValuesThres"]&SignificanceTable[,"PearsonPValuesThres"])
assign(paste0(FileName,Target,"SignificanceTable"), SignificanceTable)
SoftHits <- rownames(SignificanceTable[which(SignificanceTable["SoftHit"]==1),])
names(SoftHits) <- SoftHits
SoftHits <- list(data.frame(t(SoftHits)))
GenesofInterest <- c(GenesofInterest, SoftHits)
}
library(plyr)
GenesofInterest <- t(do.call(rbind.fill, GenesofInterest))
colnames(GenesofInterest) <- ProteinNames
GenesofInterest[is.na(GenesofInterest)] <- ""
library(xlsx)
write.xlsx(GenesofInterest, paste0(FileName, "GenesofInterest.xlsx"), row.names = FALSE)
ggplot(Metadata) +
geom_violin(aes(x="nUMI", y=nUMI), width=0.7, fill="red") +
geom_jitter(aes(x="nUMI", y=nUMI), width=0.2, size=4, alpha=0.6) +
geom_violin(aes(x="nGene", y=nGene), width=0.7) +
geom_jitter(aes(x="nGene", y=nGene), width=0.2, size=4, alpha=0.6) +
theme(text=element_text(family="Calibri")) +
labs(x = "Counts", y = "Metric")
ggplot(IntegratedSeuratDataset, aes(x=B, y=ITGA10))+geom_point()+geom_smooth(method='lm',formula=y~x)
ggplot(AllprotsallPlot, aes(x=variable, y=value, color=Cells)) +
geom_jitter(width=0.3, size=4, alpha=0.6) +
scale_color_manual(values=rev(viridis(9))) +
ggtitle("Proteins") +
ylab("Fluorescence (arbitrary units)") +
xlab("Protein") +
theme(legend.position = c(0.8,0.8), text=element_text(family="Calibri"))
ggplot(Allprotsnormalizedplot) +
geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3) +
# geom_point(aes(x=variable, y=value, fill=Chip, size=Cells), position=position_dodge(width = 0.75), alpha=0.5) +
scale_size_discrete(range = c(1,5)) +
theme(text=element_text(family="Calibri"))
# viridis(9)
AllprotsnormalizedNoRep <- rbind(AX206NormalizedProteins,AX207NormalizedProteins,AX208NormalizedProteins,AX218NormalizedProteins,AX219NormalizedProteins)
# Allprotsall <- rbind(AX206AllProts,AX207AllProts,AX208AllProts,AX218AllProts,AX219AllProts, AX206RedoAllProts, AX208RedoAllProts)
# Allprotsall["Chip"] <- gsub(pattern = "*X(.*)", replacement="", x=gsub(pattern = "AX",replacement="A", x=rownames(Allprotsall)))
# colnames(Allprotsall)[1:3] <- c("PKM2","c-MYC","PDHK1")
# AllprotsallPlot <- melt(Allprotsall, id=c("Cells","Beads", "Chip"))
# AllprotsallPlot[,1] <- as.factor(AllprotsallPlot[,1])
colnames(AllprotsnormalizedNoRep)[1:3] <- c("PKM2", "c-MYC", "PDHK1")
AllprotsnormalizedplotNoRep <- melt(AllprotsnormalizedNoRep, id=c("Cells", "Beads", "Chip"))
AllprotsnormalizedplotNoRep$Chip <- as.factor(AllprotsnormalizedplotNoRep$Chip)
AllprotsnormalizedplotNoRep$Cells <- as.factor(AllprotsnormalizedplotNoRep$Cells)
AllprotsnormalizedplotNoRep["Celltype"] <- NA
AllprotsnormalizedplotNoRep$Celltype[AllprotsnormalizedplotNoRep$Chip %in% c("AX206", "AX218", "AX219")] <- "U87"
AllprotsnormalizedplotNoRep$Celltype[AllprotsnormalizedplotNoRep$Chip %in% c("AX208", "AX207")] <- "HEK"
BProts <- AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$variable=="PKM2",]
CProts <- AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$variable=="c-MYC",]
DProts <- AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$variable=="PDHK1",]
t.test(BProts$value ~ CProts$Celltype)
t.test(CProts$value ~ CProts$Celltype)
t.test(DProts$value ~ DProts$Celltype)
# Figure 3 A Set width to 500
ggplot(AllprotsnormalizedplotNoRep) +
geom_boxplot(aes(x=variable, y=value, fill=Celltype), outlier.shape = 3, width = 0.5) +
scale_size_discrete(range = c(1,5)) +
scale_fill_manual(values=c(NineColScheme[1], NineColScheme[6])) +
theme(text = element_text(family = "Arial"), legend.position = c(0, 0.9)) +
coord_fixed(ratio = 1/80) +
labs(x="Protein", y="Fluorescence (a.u.)") +
theme(text=element_text(family="Arial", size = 15))
# Supfig 3 A
ggplot(AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$Celltype=="U87",]) +
geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3, width = 0.5) +
scale_fill_manual(values=c(NineColScheme[1],NineColScheme[5],NineColScheme[6])) +
theme(text = element_text(family = "Arial"), legend.position = "none") +
scale_y_continuous(limits = c(-8,240)) +
coord_fixed(ratio = 1/100) +
labs(x="Protein", y="Fluorescence (a.u.)") +
theme(text=element_text(family="Arial", size = 15))
t.test(CProts$value ~ CProts$Celltype)
ggplot(AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$Celltype=="HEK",]) +
geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3, width = 0.5) +
scale_fill_manual(values=c(NineColScheme[1],NineColScheme[6])) +
theme(text = element_text(family = "Arial"), legend.position = "none") +
scale_y_continuous(limits = c(-5,240)) +
coord_fixed(ratio = 1/80) +
labs(x="Protein", y="Fluorescence (a.u.)") +
theme(text=element_text(family="Arial", size = 15))
Allcellrawmeta <- rbind(AX206Vals, AX207Vals, AX208Vals, AX218Vals, AX219Vals, AX206RedoVals, AX208RedoVals)
ggplot(Allcellrawmeta) + geom_jitter(aes(x=Cells, y=Beads), width = 0.2, height = 0.1, alpha = 0.3)
# Figure 2 b. Reduce width by 33%
NoCellIncludedMetadata <- NoCellIncludedMetadata[NoCellIncludedMetadata$Cells<7,]
NoCellIncludedMetadata <- NoCellIncludedMetadata[NoCellIncludedMetadata$Cells<7,]
NoCellIncludedMetadata$Cells <- factor(NoCellIncludedMetadata$Cells)
ggplot(NoCellIncludedMetadata, aes(x=Cells, fill=Cells, y=TotalReads/Beads)) +
geom_boxplot(aes(group = Cells), alpha = 0.4, outlier.color = NA) +
geom_jitter(width=0.1) +
scale_fill_manual(values = NineColScheme) +
labs(x="Cells", y="Reads per bead") +
theme(text=element_text(family="Arial", size = 15), legend.position = "none")
CountHeatmap <- data.frame(as.matrix(NoCellIncludedMetadata[,c("Cells", "Beads")] %>% table)) #%>% group_by(Digital, Physical)
CountHeatmap$Cells <- as.numeric(as.character(CountHeatmap$Cells))
CountHeatmap$Beads <- as.numeric(as.character(CountHeatmap$Beads))
ggplot(CountHeatmap, aes(y=Cells, x=Beads, color=Freq))+geom_point(size = 9)+scale_color_gradientn(colors = c("#FFFFFF",NineColScheme[1:5]))+scale_x_continuous(breaks=0:max(CountHeatmap$Beads),limits = c(0,max(CountHeatmap$Beads)))+scale_y_continuous(breaks = 0:max(CountHeatmap$Cells), limits = c(0,max(CountHeatmap$Cells)))+theme(text=element_text(family="Arial", size = 15))+coord_fixed(ratio=1)
ggplot(NoCellIncludedMetadata) + geom_point(aes(x=Beads, y=TotalReads))
# SupFig 2 B
TestS <- AX206RedoS
colnames(TestS@scale.data) <- gsub("AX206Redo", "", colnames(TestS@scale.data))
colnames(TestS@scale.data) <- gsub("-.", " ", colnames(TestS@scale.data))
colnames(TestS@scale.data) <- gsub("Y", "Y-", colnames(TestS@scale.data))
colnames(TestS@scale.data) <- gsub("X", "X-", colnames(TestS@scale.data))
heatmap.2(as.matrix(TestS@scale.data), trace = "none", margins = c(5,2), labRow = FALSE)
heatmap.2(as.matrix(TestS@scale.data[TestS@var.genes,]), trace = "none", margins = c(5,2), labRow = FALSE)
heatmap.2(as.matrix(AX206RedoS@scale.data), trace="none", margins = c(8,5), labRow = FALSE)
VlnPlot(object = CombinedGenesbyMerge, features.plot = c("nGene", "nUMI", "percent.mito"), nCol = 3, group.by = "orig.ident", y.lab.rot = TRUE)
FeaturePlot(CombinedGenesbyMerge, features.plot = c("B","C","D"), cols.use = c("lightgrey", "blue"), pt.size = 2, nCol = 1)
# SupFig 2
ggplot(Metadata, aes(fill=celltype)) +
geom_violin(aes(x="Genes", y=nGene), scale = "count") +
geom_violin(aes(x="Transcripts", y=nUMI), scale = "count") +
coord_fixed(ratio = 1/10000) +
scale_fill_manual(values = c(NineColScheme[1],NineColScheme[6])) +
labs(y="Counts") +
theme(text=element_text(family="Arial", size = 15), legend.position = "none")
VlnPlot(object = CombinedGenesbyMerge, features.plot = c("nGene", "nUMI", "percent.mito"), nCol = 3, group.by = "orig.ident", y.lab.rot = TRUE)
#SupFig3
FeaturePlot(CombinedGenesbyMerge, features.plot = c("D"), cols.use = c("lightgrey", NineColScheme[6]), pt.size = 4) | /AggregatedCells.R | no_license | alexandermxu/IntegratedSCBCAnalysis | R | false | false | 40,304 | r | library(Seurat)
library(dplyr)
library(viridis)
library(reshape2)
library(extrafont)
setwd("C:/Users/alexm/Documents/git/Protein Analysis/")
mingeneappearancethreshold <- 5
lowUMIpercellthreshold <- 500
lowgenepercellthreshold <- 100
# load("AX206genes")
# AX206 <- Genes
# load("AX207genes")
# AX207 <- Genes
# load("AX208genes")
# AX208 <- Genes
# load("AX206Redogenes")
# AX206Redo <- Genes
# load("AX208Redogenes")
# AX208Redo <- Genes
# load("AX218genes")
# AX218 <- Genes
# load("AX219genes")
# AX219 <- Genes
# colnames(IntegratedData) <- gsub("X", "AX219X", colnames(IntegratedData))
# colnames(NoCellIntegratedData) <- gsub("X", "AX219X", colnames(NoCellIntegratedData))
# save(list = c("IntegratedData", "NoCellIntegratedData"), file = "AX219alldata")
print("Loading data")
load("AX206alldata")
AX206all <- IntegratedData
AX206NoCell <- NoCellIntegratedData
load("AX207alldata")
AX207all <- IntegratedData
AX207NoCell <- NoCellIntegratedData
load("AX208alldata")
AX208all <- IntegratedData
AX208NoCell <- NoCellIntegratedData
load("AX206Redoalldata")
AX206Redoall <- IntegratedData
AX206RedoNoCell <- NoCellIntegratedData
load("AX208Redoalldata")
AX208Redoall <- IntegratedData
AX208RedoNoCell <- NoCellIntegratedData
load("AX218alldata")
AX218all <- IntegratedData
AX218NoCell <- NoCellIntegratedData
load("AX219alldata")
AX219all <- IntegratedData
AX219NoCell <- NoCellIntegratedData
# save(list=c("AX206Vals","AX207Vals","AX208Vals","AX218Vals","AX219Vals","AX206Zeros","AX207Zeros","AX208Zeros","AX218Zeros","AX219Zeros","AX206RedoVals","AX208RedoVals","AX206RedoZeros","AX208RedoZeros"), file = "AllProteinValues")
load("AllProteinValues")
# AX206Vals <- data.frame(t(ProteinsPerBeads))
# rownames(AX219Vals) <- gsub("X","AX219X",rownames(AX219Vals))
# AX219Zeros <- AX219Vals[which(AX219Vals[,4]==0),]
print("Applying normalization and background subtraction")
AX206Background <- apply(AX206Zeros,2,mean)[1:3]
AX207Background <- apply(AX207Zeros,2,mean)[1:3]
AX208Background <- apply(AX208Zeros,2,mean)[1:3]
AX218Background <- apply(AX218Zeros,2,mean)[1:3]
AX219Background <- apply(AX219Zeros,2,mean)[1:3]
AX206RedoBackground <- apply(AX206RedoZeros,2,mean)[1:3]
AX208RedoBackground <- apply(AX208RedoZeros,2,mean)[1:3]
AX206ConversionFactors <- AX206Background/100
AX207ConversionFactors <- AX207Background/100
AX208ConversionFactors <- AX208Background/100
AX218ConversionFactors <- AX218Background/100
AX219ConversionFactors <- AX219Background/100
AX206RedoConversionFactors <- AX206RedoBackground/100
AX208RedoConversionFactors <- AX208RedoBackground/100
AX206NormalizedProteins <- AX206Vals
AX206NormalizedProteins[,1:3] <- t(apply(AX206Vals[,1:3],1,function(x) (x-AX206Background)/AX206ConversionFactors))
AX207NormalizedProteins <- AX207Vals
AX207NormalizedProteins[,1:3] <- t(apply(AX207Vals[,1:3],1,function(x) (x-AX207Background)/AX207ConversionFactors))
AX208NormalizedProteins <- AX208Vals
AX208NormalizedProteins[,1:3] <- t(apply(AX208Vals[,1:3],1,function(x) (x-AX208Background)/AX208ConversionFactors))
AX218NormalizedProteins <- AX218Vals
AX218NormalizedProteins[,1:3] <- t(apply(AX218Vals[,1:3],1,function(x) (x-AX218Background)/AX218ConversionFactors))
AX219NormalizedProteins <- AX219Vals
AX219NormalizedProteins[,1:3] <- t(apply(AX219Vals[,1:3],1,function(x) (x-AX219Background)/AX219ConversionFactors))
AX206RedoNormalizedProteins <- AX206RedoVals
AX206RedoNormalizedProteins[,1:3] <- t(apply(AX206RedoVals[,1:3],1,function(x) (x-AX206RedoBackground)/AX206RedoConversionFactors))
AX208RedoNormalizedProteins <- AX208RedoVals
AX208RedoNormalizedProteins[,1:3] <- t(apply(AX208RedoVals[,1:3],1,function(x) (x-AX208RedoBackground)/AX208RedoConversionFactors))
# AX206NormalizedProteins[,"Chip"] <- "AX206"
# AX207NormalizedProteins[,"Chip"] <- "AX207"
# AX208NormalizedProteins[,"Chip"] <- "AX208"
# AX218NormalizedProteins[,"Chip"] <- "AX218"
# AX219NormalizedProteins[,"Chip"] <- "AX219"
AX206 <- AX206all[-((nrow(AX206all)-11):nrow(AX206all)),]
AX207 <- AX207all[-((nrow(AX207all)-11):nrow(AX207all)),]
AX208 <- AX208all[-((nrow(AX208all)-11):nrow(AX208all)),]
AX206Redo <- AX206Redoall[-((nrow(AX206Redoall)-11):nrow(AX206Redoall)),]
AX208Redo <- AX208Redoall[-((nrow(AX208Redoall)-11):nrow(AX208Redoall)),]
AX218 <- AX218all[-((nrow(AX218all)-11):nrow(AX218all)),]
AX219 <- AX219all[-((nrow(AX219all)-11):nrow(AX219all)),]
print("Creating Seurat objects")
AX206S <- CreateSeuratObject(raw.data=AX206, project="AX206", min.cells=mingeneappearancethreshold)
AX206S@meta.data$celltype <- "U87"
AX206S <- FilterCells(AX206S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX206S <- NormalizeData(AX206S, display.progress=F)
AX206S <- ScaleData(AX206S, display.progress=F)
AX206S <- FindVariableGenes(AX206S, do.plot = F, display.progress=F)
# AX206S <- SetAssayData(AX206S, assay.type = "SCBC", slot = "raw.data", new.data = AX206all[((nrow(AX206all)-3):(nrow(AX206all)-1)),])
# AX206S <- NormalizeData(AX206S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX206S <- ScaleData(AX206S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX206S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX206S@raw.data[mito.genes, ])/Matrix::colSums(AX206S@raw.data)
AX206S <- AddMetaData(object = AX206S, metadata = percent.mito, col.name = "percent.mito")
AX207S <- CreateSeuratObject(raw.data=AX207, project="AX207", min.cells=mingeneappearancethreshold)
AX207S@meta.data$celltype <- "HEK"
AX207S <- FilterCells(AX207S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX207S <- NormalizeData(AX207S, display.progress=F)
AX207S <- ScaleData(AX207S, display.progress=F)
AX207S <- FindVariableGenes(AX207S, do.plot = F, display.progress=F)
# AX207S <- SetAssayData(AX207S, assay.type = "SCBC", slot = "raw.data", new.data = AX207all[((nrow(AX207all)-3):(nrow(AX207all)-1)),])
# AX207S <- NormalizeData(AX207S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX207S <- ScaleData(AX207S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX207S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX207S@raw.data[mito.genes, ])/Matrix::colSums(AX207S@raw.data)
AX207S <- AddMetaData(object = AX207S, metadata = percent.mito, col.name = "percent.mito")
AX208S <- CreateSeuratObject(raw.data=AX208, project="AX208", min.cells=mingeneappearancethreshold)
AX208S@meta.data$celltype <- "HEK"
AX208S <- FilterCells(AX208S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX208S <- NormalizeData(AX208S, display.progress=F)
AX208S <- ScaleData(AX208S, display.progress=F)
AX208S <- FindVariableGenes(AX208S, do.plot = F, display.progress=F)
# AX208S <- SetAssayData(AX208S, assay.type = "SCBC", slot = "raw.data", new.data = AX208all[((nrow(AX208all)-3):(nrow(AX208all)-1)),])
# AX208S <- NormalizeData(AX208S, assay.type = "SCBC", normalization.method = "genesCLR")
# AX208S <- ScaleData(AX208S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX208S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX208S@raw.data[mito.genes, ])/Matrix::colSums(AX208S@raw.data)
AX208S <- AddMetaData(object = AX208S, metadata = percent.mito, col.name = "percent.mito")
AX218S <- CreateSeuratObject(raw.data=AX218, project="AX218", min.cells=mingeneappearancethreshold)
AX218S@meta.data$celltype <- "U87"
AX218S <- FilterCells(AX218S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX218S <- NormalizeData(AX218S, display.progress=F)
AX218S <- ScaleData(AX218S, display.progress=F)
AX218S <- FindVariableGenes(AX218S, do.plot = F, display.progress=F)
# AX218S <- SetAssayData(AX218S, assay.type = "SCBC", slot = "raw.data", new.data = AX218all[((nrow(AX218all)-3):(nrow(AX218all)-1)),])
# AX218S <- NormalizeData(AX218S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX218S <- ScaleData(AX218S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX218S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX218S@raw.data[mito.genes, ])/Matrix::colSums(AX218S@raw.data)
AX218S <- AddMetaData(object = AX218S, metadata = percent.mito, col.name = "percent.mito")
AX219S <- CreateSeuratObject(raw.data=AX219, project="AX219", min.cells=mingeneappearancethreshold)
AX219S@meta.data$celltype <- "U87"
AX219S <- FilterCells(AX219S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX219S <- NormalizeData(AX219S, display.progress=F)
AX219S <- ScaleData(AX219S, display.progress=F)
AX219S <- FindVariableGenes(AX219S, do.plot = F, display.progress=F)
# AX219S <- SetAssayData(AX219S, assay.type = "SCBC", slot = "raw.data", new.data = AX219all[((nrow(AX219all)-3):(nrow(AX219all)-1)),])
# AX219S <- NormalizeData(AX219S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX219S <- ScaleData(AX219S, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX219S@data), value = TRUE)
percent.mito <- Matrix::colSums(AX219S@raw.data[mito.genes, ])/Matrix::colSums(AX219S@raw.data)
AX219S <- AddMetaData(object = AX219S, metadata = percent.mito, col.name = "percent.mito")
AX206RedoS <- CreateSeuratObject(raw.data=AX206Redo, project="AX206Redo", min.cells=mingeneappearancethreshold)
AX206RedoS@meta.data$celltype <- "U87"
AX206RedoS <- FilterCells(AX206RedoS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX206RedoS <- NormalizeData(AX206RedoS, display.progress=F)
AX206RedoS <- ScaleData(AX206RedoS, display.progress=F)
AX206RedoS <- FindVariableGenes(AX206RedoS, do.plot = F, display.progress=F)
# AX206RedoS <- SetAssayData(AX206RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX206Redoall[((nrow(AX206Redoall)-3):(nrow(AX206Redoall)-1)),])
# AX206RedoS <- NormalizeData(AX206RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX206RedoS <- ScaleData(AX206RedoS, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX206RedoS@data), value = TRUE)
percent.mito <- Matrix::colSums(AX206RedoS@raw.data[mito.genes, ])/Matrix::colSums(AX206RedoS@raw.data)
AX206RedoS <- AddMetaData(object = AX206RedoS, metadata = percent.mito, col.name = "percent.mito")
AX208RedoS <- CreateSeuratObject(raw.data=AX208Redo, project="AX208Redo", min.cells=mingeneappearancethreshold)
AX208RedoS@meta.data$celltype <- "HEK"
AX208RedoS <- FilterCells(AX208RedoS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
AX208RedoS <- NormalizeData(AX208RedoS, display.progress=F)
AX208RedoS <- ScaleData(AX208RedoS, display.progress=F)
AX208RedoS <- FindVariableGenes(AX208RedoS, do.plot = F, display.progress=F)
# AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
mito.genes <- grep(pattern = "^MT-", x = rownames(x = AX208RedoS@data), value = TRUE)
percent.mito <- Matrix::colSums(AX208RedoS@raw.data[mito.genes, ])/Matrix::colSums(AX208RedoS@raw.data)
AX208RedoS <- AddMetaData(object = AX208RedoS, metadata = percent.mito, col.name = "percent.mito")
# U871 <- read.csv("GSM2794663_U87_con_1_Genes_ReadCount.txt", sep = "\t", row.names = 1)
# colnames(U871) <- "U87Control1"
# U872 <- read.csv("GSM2794664_U87_con_2_Genes_ReadCount.txt", sep = "\t", row.names = 1)
# colnames(U872) <- "U87Control2"
#
# HEKCombinedSingleCell <- CombinedGenesbyMerge@raw.data[,CombinedGenesbyMerge@meta.data$celltype=="HEK"]
# U87CombinedSingleCell <- CombinedGenesbyMerge@raw.data[,CombinedGenesbyMerge@meta.data$celltype=="U87"]
# U87CombinedSingleCell <- apply(U87CombinedSingleCell,1,mean)
# HEKCombinedSingleCell <- apply(HEKCombinedSingleCell,1,mean)
#
# BulkComp <- data.frame(cbind(U87CombinedSingleCell, HEKCombinedSingleCell))
# CombinedS <- CreateSeuratObject(raw.data=BulkComp, project="CombinedCells")
# CombinedS@meta.data$celltype <- "U87"
# CombinedS <- FilterCells(CombinedS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
# CombinedS <- NormalizeData(CombinedS, display.progress=F)
# CombinedS <- ScaleData(CombinedS, display.progress=F)
# CombinedS <- FindVariableGenes(CombinedS, do.plot = F, display.progress=F)
# # AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# # AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# # AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# mito.genes <- grep(pattern = "^MT-", x = rownames(x = CombinedS@data), value = TRUE)
# percent.mito <- Matrix::colSums(CombinedS@raw.data[mito.genes, ])/Matrix::colSums(CombinedS@raw.data)
# CombinedS <- AddMetaData(object = CombinedS, metadata = percent.mito, col.name = "percent.mito")
# # GSM2794664
# U87S <- CreateSeuratObject(raw.data=U87BulkControls, project="U87Control1")
# U87S@meta.data$celltype <- "U87"
# U87S <- FilterCells(U87S, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
# U87S <- NormalizeData(U87S, display.progress=F)
# U87S <- ScaleData(U87S, display.progress=F)
# U87S <- FindVariableGenes(U87S, do.plot = F, display.progress=F)
# # AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# # AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# # AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# mito.genes <- grep(pattern = "^MT-", x = rownames(x = U87S@data), value = TRUE)
# percent.mito <- Matrix::colSums(U87S@raw.data[mito.genes, ])/Matrix::colSums(U87S@raw.data)
# U87S <- AddMetaData(object = U87S, metadata = percent.mito, col.name = "percent.mito")
#
# # GSM2599702
# HEKS <- CreateSeuratObject(raw.data=UMI_count, project="HEK")
# HEKS@meta.data$celltype <- "HEK"
# HEKS <- FilterCells(HEKS, subset.names=c("nUMI","nGene"), low.thresholds=c(lowUMIpercellthreshold,lowgenepercellthreshold), high.thresholds=c(Inf,Inf))
# HEKS <- NormalizeData(HEKS, display.progress=F)
# HEKS <- ScaleData(HEKS, display.progress=F)
# HEKS <- FindVariableGenes(HEKS, do.plot = F, display.progress=F)
# # AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
# # AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
# # AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# mito.genes <- grep(pattern = "^MT-", x = rownames(x = HEKS@data), value = TRUE)
# percent.mito <- Matrix::colSums(HEKS@raw.data[mito.genes, ])/Matrix::colSums(HEKS@raw.data)
# HEKS <- AddMetaData(object = HEKS, metadata = percent.mito, col.name = "percent.mito")
print("Adding protein values to Seurat")
AX206NormalizedProteins <- AX206NormalizedProteins[rownames(AX206NormalizedProteins) %in% AX206S@cell.names,]
AX206AllProts <- AX206NormalizedProteins
AX206NormalizedProteins[,1:3] <- AX206NormalizedProteins[,1:3]/AX206NormalizedProteins[,4]
AX207NormalizedProteins <- AX207NormalizedProteins[rownames(AX207NormalizedProteins) %in% AX207S@cell.names,]
AX207AllProts <- AX207NormalizedProteins
AX207NormalizedProteins[,1:3] <- AX207NormalizedProteins[,1:3]/AX207NormalizedProteins[,4]
AX208NormalizedProteins <- AX208NormalizedProteins[rownames(AX208NormalizedProteins) %in% AX208S@cell.names,]
AX208AllProts <- AX208NormalizedProteins
AX208NormalizedProteins[,1:3] <- AX208NormalizedProteins[,1:3]/AX208NormalizedProteins[,4]
AX218NormalizedProteins <- AX218NormalizedProteins[rownames(AX218NormalizedProteins) %in% AX218S@cell.names,]
AX218AllProts <- AX218NormalizedProteins
AX218NormalizedProteins[,1:3] <- AX218NormalizedProteins[,1:3]/AX218NormalizedProteins[,4]
AX219NormalizedProteins <- AX219NormalizedProteins[rownames(AX219NormalizedProteins) %in% AX219S@cell.names,]
AX219AllProts <- AX219NormalizedProteins
AX219NormalizedProteins[,1:3] <- AX219NormalizedProteins[,1:3]/AX219NormalizedProteins[,4]
AX206RedoNormalizedProteins <- AX206RedoNormalizedProteins[rownames(AX206RedoNormalizedProteins) %in% AX206RedoS@cell.names,]
AX206RedoAllProts <- AX206RedoNormalizedProteins
AX206RedoNormalizedProteins[,1:3] <- AX206RedoNormalizedProteins[,1:3]/AX206RedoNormalizedProteins[,4]
AX208RedoNormalizedProteins <- AX208RedoNormalizedProteins[rownames(AX208RedoNormalizedProteins) %in% AX208RedoS@cell.names,]
AX208RedoAllProts <- AX208RedoNormalizedProteins
AX208RedoNormalizedProteins[,1:3] <- AX208RedoNormalizedProteins[,1:3]/AX208RedoNormalizedProteins[,4]
AX206S <- SetAssayData(AX206S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX206NormalizedProteins[,1:3]))
AX206S <- AddMetaData(object = AX206S, metadata = AX206NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX206S <- NormalizeData(AX206S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX206S <- ScaleData(AX206S, assay.type = "SCBC", display.progress = F)
AX207S <- SetAssayData(AX207S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX207NormalizedProteins[,1:3]))
AX207S <- AddMetaData(object = AX207S, metadata = AX207NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX207S <- NormalizeData(AX207S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX207S <- ScaleData(AX207S, assay.type = "SCBC", display.progress = F)
AX208S <- SetAssayData(AX208S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX208NormalizedProteins[,1:3]))
AX208S <- AddMetaData(object = AX208S, metadata = AX208NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX208S <- NormalizeData(AX208S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX208S <- ScaleData(AX208S, assay.type = "SCBC", display.progress = F)
AX218S <- SetAssayData(AX218S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX218NormalizedProteins[,1:3]))
AX218S <- AddMetaData(object = AX218S, metadata = AX218NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX218S <- NormalizeData(AX218S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX218S <- ScaleData(AX218S, assay.type = "SCBC", display.progress = F)
AX219S <- SetAssayData(AX219S, assay.type = "SCBC", slot = "raw.data", new.data = t(AX219NormalizedProteins[,1:3]))
AX219S <- AddMetaData(object = AX219S, metadata = AX219NormalizedProteins[,4:5], col.name = c("cells","beads"))
AX219S <- NormalizeData(AX219S, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX219S <- ScaleData(AX219S, assay.type = "SCBC", display.progress = F)
AX206RedoS <- SetAssayData(AX206RedoS, assay.type = "SCBC", slot = "raw.data", new.data = t(AX206RedoNormalizedProteins[,1:3]))
AX206RedoS <- AddMetaData(object = AX206RedoS, metadata = AX206RedoNormalizedProteins[,4:5], col.name = c("cells","beads"))
AX206RedoS <- NormalizeData(AX206RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX206RedoS <- ScaleData(AX206RedoS, assay.type = "SCBC", display.progress = F)
AX208RedoS <- SetAssayData(AX208RedoS, assay.type = "SCBC", slot = "raw.data", new.data = t(AX208RedoNormalizedProteins[,1:3]))
AX208RedoS <- AddMetaData(object = AX208RedoS, metadata = AX208RedoNormalizedProteins[,4:5], col.name = c("cells","beads"))
AX208RedoS <- NormalizeData(AX208RedoS, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
AX208RedoS <- ScaleData(AX208RedoS, assay.type = "SCBC", display.progress = F)
# AX206SGeneNames <- head(rownames(AX206S@hvg.info), 1000)
# AX207SGeneNames <- head(rownames(AX207S@hvg.info), 1000)
# AX208SGeneNames <- head(rownames(AX208S@hvg.info), 1000)
# AX218SGeneNames <- head(rownames(AX218S@hvg.info), 1000)
# AX219SGeneNames <- head(rownames(AX219S@hvg.info), 1000)
# AX206RedoSGeneNames <- head(rownames(AX206RedoS@hvg.info), 1000)
# AX208RedoSGeneNames <- head(rownames(AX208RedoS@hvg.info), 1000)
print("Integrating multiple chips")
AX206NormalizedProteins[,"Chip"] <- "AX206"
AX207NormalizedProteins[,"Chip"] <- "AX207"
AX208NormalizedProteins[,"Chip"] <- "AX208"
AX218NormalizedProteins[,"Chip"] <- "AX218"
AX219NormalizedProteins[,"Chip"] <- "AX219"
AX206RedoNormalizedProteins[,"Chip"] <- "AX206Redo"
AX208RedoNormalizedProteins[,"Chip"] <- "AX208Redo"
Allprotsnormalized <- rbind(AX206NormalizedProteins,AX207NormalizedProteins,AX208NormalizedProteins,AX218NormalizedProteins,AX219NormalizedProteins, AX206RedoNormalizedProteins, AX208RedoNormalizedProteins)
Allprotsall <- rbind(AX206AllProts,AX207AllProts,AX208AllProts,AX218AllProts,AX219AllProts, AX206RedoAllProts, AX208RedoAllProts)
Allprotsall["Chip"] <- gsub(pattern = "*X(.*)", replacement="", x=gsub(pattern = "AX",replacement="A", x=rownames(Allprotsall)))
colnames(Allprotsall)[1:3] <- c("PKM2","c-MYC","PDHK1")
AllprotsallPlot <- melt(Allprotsall, id=c("Cells","Beads", "Chip"))
AllprotsallPlot[,1] <- as.factor(AllprotsallPlot[,1])
Allprotsnormalizedplot <- melt(Allprotsnormalized, id=c("Cells", "Beads", "Chip"))
Allprotsnormalizedplot$Chip <- as.factor(Allprotsnormalizedplot$Chip)
Allprotsnormalizedplot$Cells <- as.factor(Allprotsnormalizedplot$Cells)
Allprotsnormalizedplot["Celltype"] <- NA
Allprotsnormalizedplot$Celltype[Allprotsnormalizedplot$Chip %in% c("AX206", "AX206Redo", "AX218", "AX219")] <- "U87"
Allprotsnormalizedplot$Celltype[Allprotsnormalizedplot$Chip %in% c("AX208", "AX208Redo", "AX207")] <- "HEK"
ggplot(Allprotsnormalizedplot) + geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3)+geom_point(aes(x=variable, y=value, fill=Chip, size=Cells), position=position_dodge(width = 0.75), alpha=0.5)+scale_size_discrete(range = c(1,5))
print("Choosing variable genes")
AX206SGeneNames <- AX206S@var.genes
AX207SGeneNames <- AX207S@var.genes
AX208SGeneNames <- AX208S@var.genes
AX218SGeneNames <- AX218S@var.genes
AX219SGeneNames <- AX219S@var.genes
AX206RedoSGeneNames <- AX206RedoS@var.genes
AX208RedoSGeneNames <- AX208RedoS@var.genes
GenestoUse <- unique(c(AX206SGeneNames, AX207SGeneNames, AX208SGeneNames, AX206RedoSGeneNames, AX208RedoSGeneNames, AX218SGeneNames, AX219SGeneNames))
GenestoUse <- intersect(GenestoUse, rownames(AX206S@raw.data))
# GenestoUse <- intersect(GenestoUse, rownames(AX207S@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX208S@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX208RedoS@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX206RedoS@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX218S@raw.data))
GenestoUse <- intersect(GenestoUse, rownames(AX219S@raw.data))
HEKOnly <- MergeSeurat(AX207S, AX208S)
HEKOnly <- MergeSeurat(HEKOnly, AX208RedoS)
U87Only <- MergeSeurat(AX206S, AX206RedoS)
U87Only <- MergeSeurat(U87Only, AX218S)
U87Only <- MergeSeurat(U87Only, AX219S)
CombinedGenesbyMerge <- MergeSeurat(AX206S, AX207S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX208S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX218S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX219S)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX206RedoS)
CombinedGenesbyMerge <- MergeSeurat(CombinedGenesbyMerge, AX208RedoS)
# Allprotein <- cbind(AX206all[((nrow(AX206all)-3):(nrow(AX206all)-1)),], AX207all[((nrow(AX207all)-3):(nrow(AX207all)-1)),], AX208all[((nrow(AX208all)-3):(nrow(AX208all)-1)),],
# AX218all[((nrow(AX218all)-3):(nrow(AX218all)-1)),], AX219all[((nrow(AX219all)-3):(nrow(AX219all)-1)),], AX206Redoall[((nrow(AX206Redoall)-3):(nrow(AX206Redoall)-1)),],
# AX208Redoall[((nrow(AX208Redoall)-3):(nrow(AX208Redoall)-1)),])
CombinedGenesbyMerge <- SetAssayData(CombinedGenesbyMerge, assay.type = "SCBC", slot = "raw.data", new.data = t(Allprotsnormalized[,1:3]))
CombinedGenesbyMerge <- NormalizeData(CombinedGenesbyMerge, assay.type = "SCBC", normalization.method = "genesCLR", display.progress = F)
CombinedGenesbyMerge <- ScaleData(CombinedGenesbyMerge, assay.type = "SCBC", display.progress = F)
print("Analyzing combined data")
source("BulkComp.R")
# CombinedGenesbyMerge@var.genes <- GenestoUse
# CombinedGenesbyMerge@var.genes <- rownames(CombinedGenesbyMerge@raw.data)[rownames(CombinedGenesbyMerge@raw.data) %in% rownames(resOrdered)]
CombinedGenesbyMerge@var.genes <- TestBulkvar
CombinedGenesbyMerge <- NormalizeData(CombinedGenesbyMerge, display.progress = F)
CombinedGenesbyMerge <- ScaleData(CombinedGenesbyMerge, vars.to.regress = c("nUMI"), display.progress = F)
CombinedGenesbyMerge <- RunPCA(object = CombinedGenesbyMerge, pc.genes = CombinedGenesbyMerge@var.genes, do.print = TRUE, pcs.print = 1:5, genes.print = 5)
# CombinedGenesbyMergePlusBulks <- MergeSeurat(CombinedS, U87S)
# # CombinedGenesbyMergePlusBulks <- MergeSeurat(CombinedGenesbyMergePlusBulks, HEKS)
# CombinedGenesbyMergePlusBulks@var.genes <- GenestoUse
# CombinedGenesbyMergePlusBulks <- ScaleData(CombinedGenesbyMergePlusBulks, vars.to.regress = c("nUMI", "orig.ident"))
# CombinedGenesbyMergePlusBulks <- RunPCA(object = CombinedGenesbyMergePlusBulks, pc.genes = CombinedGenesbyMergePlusBulks@var.genes, do.print = TRUE, pcs.print = 1:5, genes.print = 5)
# CombinedGenesbyMergePlusBulks <- ProjectPCA(object = CombinedGenesbyMergePlusBulks)
# CombinedGenesbyMergePlusBulks <- JackStraw(object = CombinedGenesbyMergePlusBulks, num.replicate = 50, display.progress = FALSE)
# CombinedGenesbyMergePlusBulks <- FindClusters(object = CombinedGenesbyMergePlusBulks, reduction.type = "pca", dims.use = 1:20, resolution = 1.1, print.output = 0, save.SNN = TRUE, force.recalc=TRUE)
# CombinedGenesbyMergePlusBulks <- RunTSNE(object = CombinedGenesbyMergePlusBulks, dims.use = 1:20, do.fast = TRUE)
# cluster1.markers <- FindMarkers(object = CombinedGenesbyMergePlusBulks, ident.1 = 1, min.pct = 0.25)
# CombinedGenesbyMergePlusBulks.markers <- FindAllMarkers(object = CombinedGenesbyMergePlusBulks, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)
VizPCA(object = CombinedGenesbyMerge, pcs.use = 1:2)
PCAPlot(object = CombinedGenesbyMerge, dim.1 = 1, dim.2 = 2, group.by = "celltype")
CombinedGenesbyMerge <- ProjectPCA(object = CombinedGenesbyMerge)
PCHeatmap(object = CombinedGenesbyMerge, pc.use = 1, do.balanced = TRUE, label.columns = FALSE)
CombinedGenesbyMerge <- JackStraw(object = CombinedGenesbyMerge, num.replicate = 50, display.progress = FALSE)
# JackStrawPlot(object = CombinedGenesbyMerge, PCs = 1:20)
# PCElbowPlot(object = CombinedGenesbyMerge)
CombinedGenesbyMerge <- FindClusters(object = CombinedGenesbyMerge, reduction.type = "pca", dims.use = 1:20, resolution = 1.1, print.output = 0, save.SNN = TRUE, force.recalc=TRUE)
PrintFindClustersParams(object = CombinedGenesbyMerge)
CombinedGenesbyMerge <- RunTSNE(object = CombinedGenesbyMerge, dims.use = 1:20, do.fast = TRUE)
cluster1.markers <- FindMarkers(object = CombinedGenesbyMerge, ident.1 = 1, min.pct = 0.25)
print(x = head(x = cluster1.markers, n = 5))
CombinedGenesbyMerge.markers <- FindAllMarkers(object = CombinedGenesbyMerge, only.pos = TRUE, min.pct = 0.25, thresh.use = 0.25)
CombinedGenesbyMerge.markers %>% group_by(cluster) %>% top_n(5, avg_logFC)
Metadata <- CombinedGenesbyMerge@meta.data
Metadata[,"GeneCellRatio"] <- Metadata[,1]/Metadata[,6]
Metadata[,"GeneBeadRatio"] <- Metadata[,1]/Metadata[,7]
Metadata[,"CellBeadRatio"] <- Metadata[,7]/Metadata[,6]
NoCellIncludedMetadata <- data.frame(t(cbind(tail(AX206NoCell,6),tail(AX206RedoNoCell,6),tail(AX207NoCell,6),tail(AX208NoCell,6),tail(AX208RedoNoCell,6),tail(AX218NoCell,6),tail(AX219NoCell,6))))
TSNEPlot(object = CombinedGenesbyMerge, group.by = "orig.ident", pt.size = 3)
# Figure 3 B
TSNEPlot(object = CombinedGenesbyMerge, group.by = "celltype", pt.size = 4, colors.use = c(NineColScheme[1], NineColScheme[6]), no.legend = TRUE)
RidgePlot(CombinedGenesbyMerge, features.plot = c("B","C","D"), nCol = 2, group.by = "celltype")
ggplot(Metadata, aes(x=Beads, y=nGene))+geom_point()+geom_smooth(method='lm',formula=y~x) + scale_x_continuous(breaks=seq(0,11,1)) + coord_fixed(ratio = 11/4000) + theme(text=element_text(family="Calibri"))
ggplot(Metadata, aes(x=Cells, y=nGene))+geom_point()+geom_smooth(method='lm',formula=y~x) + scale_x_continuous(breaks=seq(0,11,1)) + coord_fixed(ratio = 9/4000) + theme(text=element_text(family="Calibri"))
# cbmc_cite <- RunPCA(CombinedGenesbyMerge, pc.genes = c("B","C","D"), assay.type = "SCBC", pcs.print = 0, pcs.compute = 1:5)
# PCAPlot(cbmc_cite, pt.size = 3, group.by="celltype")
FileName <- "AllCells"
GenesofInterest <- list()
ProteinNames <- c()
U87cells <- CombinedGenesbyMerge@meta.data[,"celltype"]=="U87"
HEKcells <- CombinedGenesbyMerge@meta.data[,"celltype"]=="HEK"
# IntegratedSeuratDataset <- data.frame(as.matrix(t(rbind(CombinedGenesbyMerge@scale.data[CombinedGenesbyMerge@var.genes,U87cells], CombinedGenesbyMerge@assay$SCBC@raw.data[,U87cells]))))
IntegratedSeuratDataset <- data.frame(as.matrix(t(rbind(CombinedGenesbyMerge@scale.data, CombinedGenesbyMerge@assay$SCBC@scale.data))))
for (n in 1:3)
{
Target <- colnames(Allprotsnormalized[,1:3])[n]
print(Target)
# ProteinNames <- c(ProteinNames, Target)
PairwiseMatrixLinearRegression <- apply(IntegratedSeuratDataset[ , 1:(ncol(IntegratedSeuratDataset)-3)], 2,
function(x) lm(x ~ IntegratedSeuratDataset[ , ncol(IntegratedSeuratDataset)-3+n],
data = IntegratedSeuratDataset))
assign(paste0(Target,"PairwiseLinearRegression"), PairwiseMatrixLinearRegression)
Coefficients <- sapply(PairwiseMatrixLinearRegression,coef)
assign(paste0(Target,"Coefficients"), Coefficients)
Rsquared <- sapply(PairwiseMatrixLinearRegression,summary)[8,,drop=FALSE]
assign(paste0(Target,"Rsquared"), Rsquared)
assign(paste0(FileName,Target,"LinearModel"), t(rbind(Coefficients,unlist(Rsquared))))
SpearmanMatrix <- apply(IntegratedSeuratDataset[ , 1:(ncol(IntegratedSeuratDataset)-3)], 2,
function(x) cor.test(x,IntegratedSeuratDataset[ , ncol(IntegratedSeuratDataset)-3+n], method="spearman"))
assign(paste0(FileName,Target,"Spearman"), SpearmanMatrix)
SpearmanPValues <- sapply(SpearmanMatrix, function(x) x$p.value)
PearsonMatrix <- apply(IntegratedSeuratDataset[ , 1:(ncol(IntegratedSeuratDataset)-3)], 2,
function(x) cor.test(x,IntegratedSeuratDataset[ , ncol(IntegratedSeuratDataset)-3+n], method="pearson"))
assign(paste0(FileName,Target,"Pearson"), PearsonMatrix)
PearsonPValues <- sapply(PearsonMatrix, function(x) x$p.value)
SignificanceTable <- data.frame(cbind(Rsquared=unlist(Rsquared), SpearmanPValues, PearsonPValues))
SignificanceTable <- cbind(SignificanceTable, RsquaredThres=SignificanceTable[,"Rsquared"]>0.4,
SpearmanPValuesThres=SignificanceTable[,"SpearmanPValues"]<0.05,
PearsonPValuesThres=SignificanceTable[,"PearsonPValues"]<0.05)
SignificanceTable <- cbind(SignificanceTable, SoftHit=SignificanceTable[,"RsquaredThres"]|SignificanceTable[,"SpearmanPValuesThres"]|SignificanceTable[,"PearsonPValuesThres"],
HardHit=SignificanceTable[,"RsquaredThres"]&SignificanceTable[,"SpearmanPValuesThres"]&SignificanceTable[,"PearsonPValuesThres"])
assign(paste0(FileName,Target,"SignificanceTable"), SignificanceTable)
SoftHits <- rownames(SignificanceTable[which(SignificanceTable["SoftHit"]==1),])
names(SoftHits) <- SoftHits
SoftHits <- list(data.frame(t(SoftHits)))
GenesofInterest <- c(GenesofInterest, SoftHits)
}
library(plyr)
GenesofInterest <- t(do.call(rbind.fill, GenesofInterest))
colnames(GenesofInterest) <- ProteinNames
GenesofInterest[is.na(GenesofInterest)] <- ""
library(xlsx)
write.xlsx(GenesofInterest, paste0(FileName, "GenesofInterest.xlsx"), row.names = FALSE)
ggplot(Metadata) +
geom_violin(aes(x="nUMI", y=nUMI), width=0.7, fill="red") +
geom_jitter(aes(x="nUMI", y=nUMI), width=0.2, size=4, alpha=0.6) +
geom_violin(aes(x="nGene", y=nGene), width=0.7) +
geom_jitter(aes(x="nGene", y=nGene), width=0.2, size=4, alpha=0.6) +
theme(text=element_text(family="Calibri")) +
labs(x = "Counts", y = "Metric")
ggplot(IntegratedSeuratDataset, aes(x=B, y=ITGA10))+geom_point()+geom_smooth(method='lm',formula=y~x)
ggplot(AllprotsallPlot, aes(x=variable, y=value, color=Cells)) +
geom_jitter(width=0.3, size=4, alpha=0.6) +
scale_color_manual(values=rev(viridis(9))) +
ggtitle("Proteins") +
ylab("Fluorescence (arbitrary units)") +
xlab("Protein") +
theme(legend.position = c(0.8,0.8), text=element_text(family="Calibri"))
ggplot(Allprotsnormalizedplot) +
geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3) +
# geom_point(aes(x=variable, y=value, fill=Chip, size=Cells), position=position_dodge(width = 0.75), alpha=0.5) +
scale_size_discrete(range = c(1,5)) +
theme(text=element_text(family="Calibri"))
# viridis(9)
AllprotsnormalizedNoRep <- rbind(AX206NormalizedProteins,AX207NormalizedProteins,AX208NormalizedProteins,AX218NormalizedProteins,AX219NormalizedProteins)
# Allprotsall <- rbind(AX206AllProts,AX207AllProts,AX208AllProts,AX218AllProts,AX219AllProts, AX206RedoAllProts, AX208RedoAllProts)
# Allprotsall["Chip"] <- gsub(pattern = "*X(.*)", replacement="", x=gsub(pattern = "AX",replacement="A", x=rownames(Allprotsall)))
# colnames(Allprotsall)[1:3] <- c("PKM2","c-MYC","PDHK1")
# AllprotsallPlot <- melt(Allprotsall, id=c("Cells","Beads", "Chip"))
# AllprotsallPlot[,1] <- as.factor(AllprotsallPlot[,1])
colnames(AllprotsnormalizedNoRep)[1:3] <- c("PKM2", "c-MYC", "PDHK1")
AllprotsnormalizedplotNoRep <- melt(AllprotsnormalizedNoRep, id=c("Cells", "Beads", "Chip"))
AllprotsnormalizedplotNoRep$Chip <- as.factor(AllprotsnormalizedplotNoRep$Chip)
AllprotsnormalizedplotNoRep$Cells <- as.factor(AllprotsnormalizedplotNoRep$Cells)
AllprotsnormalizedplotNoRep["Celltype"] <- NA
AllprotsnormalizedplotNoRep$Celltype[AllprotsnormalizedplotNoRep$Chip %in% c("AX206", "AX218", "AX219")] <- "U87"
AllprotsnormalizedplotNoRep$Celltype[AllprotsnormalizedplotNoRep$Chip %in% c("AX208", "AX207")] <- "HEK"
BProts <- AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$variable=="PKM2",]
CProts <- AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$variable=="c-MYC",]
DProts <- AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$variable=="PDHK1",]
t.test(BProts$value ~ CProts$Celltype)
t.test(CProts$value ~ CProts$Celltype)
t.test(DProts$value ~ DProts$Celltype)
# Figure 3 A Set width to 500
ggplot(AllprotsnormalizedplotNoRep) +
geom_boxplot(aes(x=variable, y=value, fill=Celltype), outlier.shape = 3, width = 0.5) +
scale_size_discrete(range = c(1,5)) +
scale_fill_manual(values=c(NineColScheme[1], NineColScheme[6])) +
theme(text = element_text(family = "Arial"), legend.position = c(0, 0.9)) +
coord_fixed(ratio = 1/80) +
labs(x="Protein", y="Fluorescence (a.u.)") +
theme(text=element_text(family="Arial", size = 15))
# Supfig 3 A
ggplot(AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$Celltype=="U87",]) +
geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3, width = 0.5) +
scale_fill_manual(values=c(NineColScheme[1],NineColScheme[5],NineColScheme[6])) +
theme(text = element_text(family = "Arial"), legend.position = "none") +
scale_y_continuous(limits = c(-8,240)) +
coord_fixed(ratio = 1/100) +
labs(x="Protein", y="Fluorescence (a.u.)") +
theme(text=element_text(family="Arial", size = 15))
t.test(CProts$value ~ CProts$Celltype)
ggplot(AllprotsnormalizedplotNoRep[AllprotsnormalizedplotNoRep$Celltype=="HEK",]) +
geom_boxplot(aes(x=variable, y=value, fill=Chip), outlier.shape = 3, width = 0.5) +
scale_fill_manual(values=c(NineColScheme[1],NineColScheme[6])) +
theme(text = element_text(family = "Arial"), legend.position = "none") +
scale_y_continuous(limits = c(-5,240)) +
coord_fixed(ratio = 1/80) +
labs(x="Protein", y="Fluorescence (a.u.)") +
theme(text=element_text(family="Arial", size = 15))
Allcellrawmeta <- rbind(AX206Vals, AX207Vals, AX208Vals, AX218Vals, AX219Vals, AX206RedoVals, AX208RedoVals)
ggplot(Allcellrawmeta) + geom_jitter(aes(x=Cells, y=Beads), width = 0.2, height = 0.1, alpha = 0.3)
# Figure 2 b. Reduce width by 33%
NoCellIncludedMetadata <- NoCellIncludedMetadata[NoCellIncludedMetadata$Cells<7,]
NoCellIncludedMetadata <- NoCellIncludedMetadata[NoCellIncludedMetadata$Cells<7,]
NoCellIncludedMetadata$Cells <- factor(NoCellIncludedMetadata$Cells)
ggplot(NoCellIncludedMetadata, aes(x=Cells, fill=Cells, y=TotalReads/Beads)) +
geom_boxplot(aes(group = Cells), alpha = 0.4, outlier.color = NA) +
geom_jitter(width=0.1) +
scale_fill_manual(values = NineColScheme) +
labs(x="Cells", y="Reads per bead") +
theme(text=element_text(family="Arial", size = 15), legend.position = "none")
CountHeatmap <- data.frame(as.matrix(NoCellIncludedMetadata[,c("Cells", "Beads")] %>% table)) #%>% group_by(Digital, Physical)
CountHeatmap$Cells <- as.numeric(as.character(CountHeatmap$Cells))
CountHeatmap$Beads <- as.numeric(as.character(CountHeatmap$Beads))
ggplot(CountHeatmap, aes(y=Cells, x=Beads, color=Freq))+geom_point(size = 9)+scale_color_gradientn(colors = c("#FFFFFF",NineColScheme[1:5]))+scale_x_continuous(breaks=0:max(CountHeatmap$Beads),limits = c(0,max(CountHeatmap$Beads)))+scale_y_continuous(breaks = 0:max(CountHeatmap$Cells), limits = c(0,max(CountHeatmap$Cells)))+theme(text=element_text(family="Arial", size = 15))+coord_fixed(ratio=1)
ggplot(NoCellIncludedMetadata) + geom_point(aes(x=Beads, y=TotalReads))
# SupFig 2 B
TestS <- AX206RedoS
colnames(TestS@scale.data) <- gsub("AX206Redo", "", colnames(TestS@scale.data))
colnames(TestS@scale.data) <- gsub("-.", " ", colnames(TestS@scale.data))
colnames(TestS@scale.data) <- gsub("Y", "Y-", colnames(TestS@scale.data))
colnames(TestS@scale.data) <- gsub("X", "X-", colnames(TestS@scale.data))
heatmap.2(as.matrix(TestS@scale.data), trace = "none", margins = c(5,2), labRow = FALSE)
heatmap.2(as.matrix(TestS@scale.data[TestS@var.genes,]), trace = "none", margins = c(5,2), labRow = FALSE)
heatmap.2(as.matrix(AX206RedoS@scale.data), trace="none", margins = c(8,5), labRow = FALSE)
VlnPlot(object = CombinedGenesbyMerge, features.plot = c("nGene", "nUMI", "percent.mito"), nCol = 3, group.by = "orig.ident", y.lab.rot = TRUE)
FeaturePlot(CombinedGenesbyMerge, features.plot = c("B","C","D"), cols.use = c("lightgrey", "blue"), pt.size = 2, nCol = 1)
# SupFig 2
ggplot(Metadata, aes(fill=celltype)) +
geom_violin(aes(x="Genes", y=nGene), scale = "count") +
geom_violin(aes(x="Transcripts", y=nUMI), scale = "count") +
coord_fixed(ratio = 1/10000) +
scale_fill_manual(values = c(NineColScheme[1],NineColScheme[6])) +
labs(y="Counts") +
theme(text=element_text(family="Arial", size = 15), legend.position = "none")
VlnPlot(object = CombinedGenesbyMerge, features.plot = c("nGene", "nUMI", "percent.mito"), nCol = 3, group.by = "orig.ident", y.lab.rot = TRUE)
#SupFig3
FeaturePlot(CombinedGenesbyMerge, features.plot = c("D"), cols.use = c("lightgrey", NineColScheme[6]), pt.size = 4) |
library(RSQLite)
library(dplyr)
library(ggvis)
library(shiny)
library(magrittr)
library(ggplot2)
library(tidyr)
# connect to the database
db <- dbConnect(dbDriver("SQLite"), "database.sqlite")
dbGetQuery(db, "PRAGMA temp_store=2;")
# read csv file
df <- read.csv("MERGED2013_PP.csv", na.strings = "NULL")
getSAT <- function() {
sat <- dbGetQuery(db, "
SELECT INSTNM College,
SATMTMID Math,
SATVRMID Verbal,
SATWRMID Writing
FROM Scorecard
WHERE Year=2013
AND SATMTMID IS NOT NULL
AND SATMTMID != 'PrivacySuppressed'
AND SATVRMID IS NOT NULL
AND SATVRMID != 'PrivacySuppressed'
AND SATWRMID IS NOT NULL
AND SATWRMID != 'PrivacySuppressed'")
return(sat)
}
plotSAT <- function(sat) {
ggplot(sat %>% gather(Section, Score, -College), aes(x=Score, color=Section, fill=Section, group=Section)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("SAT Score") +
ylab("")
}
printSAT <- function(sat) {
math <- summary(sat$Math)
verbal <- summary(sat$Verbal)
writing <- summary(sat$Writing)
print("Math")
print(math)
print("Verbal")
print(verbal)
print("Writing")
print(writing)
}
getACT <- function() {
act <- dbGetQuery(db, "
SELECT INSTNM College,
ACTCMMID Cumulative,
ACTENMID English,
ACTMTMID Math,
ACTWRMID Writing
FROM Scorecard
WHERE Year=2013
AND ACTCMMID IS NOT NULL
AND ACTCMMID != 'PrivacySuppressed'
AND ACTENMID IS NOT NULL
AND ACTENMID != 'PrivacySuppressed'
AND ACTMTMID IS NOT NULL
AND ACTMTMID != 'PrivacySuppressed'
AND ACTWRMID IS NOT NULL
AND ACTWRMID != 'PrivacySuppressed'
")
}
plotACT <- function(act) {
ggplot(act %>% gather(Section, Score, -College), aes(x=Score, color=Section, fill=Section, group=Section)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("ACT Score") +
ylab("")
}
printACT <- function(act) {
cm <- summary(act$Cumulative)
en <- summary(act$English)
mt <- summary(act$Math)
wr <- summary(act$Writing)
print("Cumulative")
print(cm)
print("English")
print(en)
print("Math")
print(mt)
print("Writing")
print(wr)
}
getADM <- function() {
adm <- dbGetQuery(db, "
SELECT INSTNM College,
ADM_RATE Admission
FROM Scorecard
WHERE Year = 2013
AND ADM_RATE IS NOT NULL
")
return(adm)
}
plotADM <- function(adm) {
ggplot(adm %>% gather(Section, Score, -College), aes(x=Score, color=Section, fill=Section, group=Section)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Admission Rate") +
ylab("")
}
printADM <- function(adm) {
rate = summary(adm$Admission)
print("Admission Rate")
print(rate)
}
getENRO <- function() {
enrollment <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment,
CONTROL CollegeType
FROM Scorecard
WHERE Year=2013
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
ORDER BY UGDS DESC")
enrollment <- cbind(Rank=1:nrow(enrollment), enrollment)
enrollment$College <- paste(enrollment$Rank, enrollment$College, sep=". ")
enrollment$College <- factor(enrollment$College, levels=rev(enrollment$College))
return(enrollment)
}
plotENRO <- function(enro) {
ggplot(enro, aes(x=UndergradEnrollment, color=CollegeType, fill=CollegeType, group=CollegeType)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Undergraduate Enrollment") + ylab("") +
xlim(0, 20000)
}
printENRO <- function() {
public <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Public'
AND PREDDEG = 'Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
")
private_p <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private for-profit'
AND PREDDEG = 'Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
")
private_np <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private nonprofit'
AND PREDDEG = 'Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
")
p = summary(public)
pp = summary(private_p)
pnp = summary(private_np)
print("Public")
print(p)
print("Private for-profit")
print(pp)
print("Private nonprofit")
print(pnp)
}
getCOST <- function() {
cost <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost,
CONTROL CollegeType
FROM Scorecard
WHERE Year=2013
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
ORDER BY COSTT4_A DESC")
cost <- cbind(Rank=1:nrow(cost), cost)
cost$College <- paste(cost$Rank, cost$College, sep=". ")
cost$College <- factor(cost$College, levels=rev(cost$College))
return(cost)
}
plotCOST <- function(cost) {
ggplot(cost, aes(x=Cost, color=CollegeType, fill=CollegeType, group=CollegeType)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Cost of Attendance") + ylab("")
}
printCOST <- function() {
public <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Public'
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
")
private_p <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private for-profit'
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
")
private_np <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private nonprofit'
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
")
p = summary(public)
pp = summary(private_p)
pnp = summary(private_np)
print("Public")
print(p)
print("Private for-profit")
print(pp)
print("Private nonprofit")
print(pnp)
}
plotEARN <- function() {
earnings <- dbGetQuery(db, "
SELECT s11.INSTNM College,
s11.CONTROL CollegeType,
s11.md_earn_wne_p10 e50,
s11.pct10_earn_wne_p10 e10,
s11.pct25_earn_wne_p10 e25,
s11.pct75_earn_wne_p10 e75,
s11.pct90_earn_wne_p10 e90
FROM Scorecard s11
-- We have to do a self-join because the CCBASIC metadata is only attached to 2013 data
-- And 2013 data has no 10 year out earnings data
INNER JOIN Scorecard s13 ON s11.UNITID=s13.UNITID
WHERE s11.Year=2011
AND s13.Year=2013
AND s11.pct75_earn_wne_p10 IS NOT NULL
AND s11.pct75_earn_wne_p10 != 'PrivacySuppressed'
AND s11.PREDDEG = 'Predominantly bachelor''s-degree granting'
--Filter out medical schools and the like that are mislabeled as predominantly bachelor's-degree granting
AND s13.CCBASIC NOT LIKE '%Special%'
ORDER BY s11.pct75_earn_wne_p10 DESC")
earnings <- cbind(Rank=1:nrow(earnings), earnings)
earnings$College <- paste(earnings$Rank, earnings$College, sep=". ")
earnings$College <- factor(earnings$College, levels=rev(earnings$College))
ggplot(earnings[1:15,], aes(x=College, ymin=e10, lower=e25, middle=e50, upper=e75, ymax=e90)) +
geom_boxplot(stat="identity", fill="#0099ff") +
geom_text(aes(x=College, y=e75-2000, ymax=e75, hjust=0.95, label=paste0("$", e75)), size=4) +
theme_light(base_size=16) +
theme(axis.text.y = element_text(hjust=0, color="black")) +
coord_flip() +
xlab("") + ylab("")
}
fetchDATA <- function() {
schools <- dplyr::select(df,
INSTNM, ## institution name
CITY, ## institution city
STABBR, ## institution state abbrev
ZIP, ## institution zip
PREDDEG, ## predominate degree
CURROPER, ## currently operating flag
CONTROL, ## type of school
TUITIONFEE_IN, ## in-state tuition and fees
DISTANCEONLY, ## distance only flag
LATITUDE, ## latitude
LONGITUDE, ## longitude
GRAD_DEBT_MDN ## median debt
)
return(schools)
}
plotMAP <- function(schools) {
uniInfo <- paste(schools[['INSTNM']], "<br>", schools[['CITY']], ", ",
schools[['STABBR']], schools[['ZIP']], "<br> Median debt: $",
schools[['GRAD_DEBT_MDN']], sep='')
schools$info <- uniInfo
## filter data
schools<-filter(schools,
PREDDEG==3 & ## Predominate degree is BS
CURROPER==1 & ## Currently operating
DISTANCEONLY==0 & ## Not distance
is.na(TUITIONFEE_IN)==FALSE & ## Key measurements aren't missing
is.na(LATITUDE)==FALSE &
is.na(LONGITUDE)==FALSE &
LATITUDE>20 & LATITUDE<50 & ## Location is US 48
LONGITUDE>(-130) & LONGITUDE<(-60)
)
map <- leaflet(schools) %>%
setView(-93.65, 42.0285, zoom = 4) %>%
addTiles() %>%
addMarkers(~LONGITUDE, ~LATITUDE, popup=~info,
options = popupOptions(closeButton = TRUE),
clusterOptions = markerClusterOptions())
map
}
shinyServer(function(input, output) {
schools <- fetchDATA()
output$plot <- renderPlot({
switch(input$plot,
"SAT Scores" = {
sat <<- getSAT()
plotSAT(sat)
},
"ACT Scores" = {
act <<- getACT()
plotACT(act)
},
"Admission Rate" = {
adm <<- getADM()
plotADM(adm)
},
"Undergraduate Enrollment" = {
enro <<- getENRO()
plotENRO(enro)
},
"Cost of Attendance" = {
cost <<- getCOST()
plotCOST(cost)
},
"Top 15 meadian earnings" = {
plotEARN()
}
)})
output$summary <- renderPrint({
switch(input$plot,
"SAT Scores" = printSAT(sat),
"ACT Scores" = printACT(act),
"Admission Rate" = printADM(adm),
"Undergraduate Enrollment" = printENRO(),
"Cost of Attendance" = printCOST()
)
})
output$map <- renderLeaflet({
plotMAP(schools)
})
}) | /server.R | no_license | luciferpop/DATA-MINING-IN-US-EDUCATION-DATASETS | R | false | false | 11,662 | r | library(RSQLite)
library(dplyr)
library(ggvis)
library(shiny)
library(magrittr)
library(ggplot2)
library(tidyr)
# connect to the database
db <- dbConnect(dbDriver("SQLite"), "database.sqlite")
dbGetQuery(db, "PRAGMA temp_store=2;")
# read csv file
df <- read.csv("MERGED2013_PP.csv", na.strings = "NULL")
getSAT <- function() {
sat <- dbGetQuery(db, "
SELECT INSTNM College,
SATMTMID Math,
SATVRMID Verbal,
SATWRMID Writing
FROM Scorecard
WHERE Year=2013
AND SATMTMID IS NOT NULL
AND SATMTMID != 'PrivacySuppressed'
AND SATVRMID IS NOT NULL
AND SATVRMID != 'PrivacySuppressed'
AND SATWRMID IS NOT NULL
AND SATWRMID != 'PrivacySuppressed'")
return(sat)
}
plotSAT <- function(sat) {
ggplot(sat %>% gather(Section, Score, -College), aes(x=Score, color=Section, fill=Section, group=Section)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("SAT Score") +
ylab("")
}
printSAT <- function(sat) {
math <- summary(sat$Math)
verbal <- summary(sat$Verbal)
writing <- summary(sat$Writing)
print("Math")
print(math)
print("Verbal")
print(verbal)
print("Writing")
print(writing)
}
getACT <- function() {
act <- dbGetQuery(db, "
SELECT INSTNM College,
ACTCMMID Cumulative,
ACTENMID English,
ACTMTMID Math,
ACTWRMID Writing
FROM Scorecard
WHERE Year=2013
AND ACTCMMID IS NOT NULL
AND ACTCMMID != 'PrivacySuppressed'
AND ACTENMID IS NOT NULL
AND ACTENMID != 'PrivacySuppressed'
AND ACTMTMID IS NOT NULL
AND ACTMTMID != 'PrivacySuppressed'
AND ACTWRMID IS NOT NULL
AND ACTWRMID != 'PrivacySuppressed'
")
}
plotACT <- function(act) {
ggplot(act %>% gather(Section, Score, -College), aes(x=Score, color=Section, fill=Section, group=Section)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("ACT Score") +
ylab("")
}
printACT <- function(act) {
cm <- summary(act$Cumulative)
en <- summary(act$English)
mt <- summary(act$Math)
wr <- summary(act$Writing)
print("Cumulative")
print(cm)
print("English")
print(en)
print("Math")
print(mt)
print("Writing")
print(wr)
}
getADM <- function() {
adm <- dbGetQuery(db, "
SELECT INSTNM College,
ADM_RATE Admission
FROM Scorecard
WHERE Year = 2013
AND ADM_RATE IS NOT NULL
")
return(adm)
}
plotADM <- function(adm) {
ggplot(adm %>% gather(Section, Score, -College), aes(x=Score, color=Section, fill=Section, group=Section)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Admission Rate") +
ylab("")
}
printADM <- function(adm) {
rate = summary(adm$Admission)
print("Admission Rate")
print(rate)
}
getENRO <- function() {
enrollment <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment,
CONTROL CollegeType
FROM Scorecard
WHERE Year=2013
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
ORDER BY UGDS DESC")
enrollment <- cbind(Rank=1:nrow(enrollment), enrollment)
enrollment$College <- paste(enrollment$Rank, enrollment$College, sep=". ")
enrollment$College <- factor(enrollment$College, levels=rev(enrollment$College))
return(enrollment)
}
plotENRO <- function(enro) {
ggplot(enro, aes(x=UndergradEnrollment, color=CollegeType, fill=CollegeType, group=CollegeType)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Undergraduate Enrollment") + ylab("") +
xlim(0, 20000)
}
printENRO <- function() {
public <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Public'
AND PREDDEG = 'Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
")
private_p <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private for-profit'
AND PREDDEG = 'Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
")
private_np <- dbGetQuery(db, "
SELECT INSTNM College,
UGDS UndergradEnrollment
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private nonprofit'
AND PREDDEG = 'Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND UGDS IS NOT NULL
AND UGDS>0
")
p = summary(public)
pp = summary(private_p)
pnp = summary(private_np)
print("Public")
print(p)
print("Private for-profit")
print(pp)
print("Private nonprofit")
print(pnp)
}
getCOST <- function() {
cost <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost,
CONTROL CollegeType
FROM Scorecard
WHERE Year=2013
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
ORDER BY COSTT4_A DESC")
cost <- cbind(Rank=1:nrow(cost), cost)
cost$College <- paste(cost$Rank, cost$College, sep=". ")
cost$College <- factor(cost$College, levels=rev(cost$College))
return(cost)
}
plotCOST <- function(cost) {
ggplot(cost, aes(x=Cost, color=CollegeType, fill=CollegeType, group=CollegeType)) +
geom_density(alpha=0.3) +
theme_light(base_size=16) +
xlab("Cost of Attendance") + ylab("")
}
printCOST <- function() {
public <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Public'
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
")
private_p <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private for-profit'
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
")
private_np <- dbGetQuery(db, "
SELECT INSTNM College,
COSTT4_A Cost
FROM Scorecard
WHERE Year = 2013
AND CONTROL = 'Private nonprofit'
AND PREDDEG='Predominantly bachelor''s-degree granting'
AND CCBASIC NOT LIKE '%Special Focus%'
AND COSTT4_A IS NOT NULL
")
p = summary(public)
pp = summary(private_p)
pnp = summary(private_np)
print("Public")
print(p)
print("Private for-profit")
print(pp)
print("Private nonprofit")
print(pnp)
}
plotEARN <- function() {
earnings <- dbGetQuery(db, "
SELECT s11.INSTNM College,
s11.CONTROL CollegeType,
s11.md_earn_wne_p10 e50,
s11.pct10_earn_wne_p10 e10,
s11.pct25_earn_wne_p10 e25,
s11.pct75_earn_wne_p10 e75,
s11.pct90_earn_wne_p10 e90
FROM Scorecard s11
-- We have to do a self-join because the CCBASIC metadata is only attached to 2013 data
-- And 2013 data has no 10 year out earnings data
INNER JOIN Scorecard s13 ON s11.UNITID=s13.UNITID
WHERE s11.Year=2011
AND s13.Year=2013
AND s11.pct75_earn_wne_p10 IS NOT NULL
AND s11.pct75_earn_wne_p10 != 'PrivacySuppressed'
AND s11.PREDDEG = 'Predominantly bachelor''s-degree granting'
--Filter out medical schools and the like that are mislabeled as predominantly bachelor's-degree granting
AND s13.CCBASIC NOT LIKE '%Special%'
ORDER BY s11.pct75_earn_wne_p10 DESC")
earnings <- cbind(Rank=1:nrow(earnings), earnings)
earnings$College <- paste(earnings$Rank, earnings$College, sep=". ")
earnings$College <- factor(earnings$College, levels=rev(earnings$College))
ggplot(earnings[1:15,], aes(x=College, ymin=e10, lower=e25, middle=e50, upper=e75, ymax=e90)) +
geom_boxplot(stat="identity", fill="#0099ff") +
geom_text(aes(x=College, y=e75-2000, ymax=e75, hjust=0.95, label=paste0("$", e75)), size=4) +
theme_light(base_size=16) +
theme(axis.text.y = element_text(hjust=0, color="black")) +
coord_flip() +
xlab("") + ylab("")
}
fetchDATA <- function() {
schools <- dplyr::select(df,
INSTNM, ## institution name
CITY, ## institution city
STABBR, ## institution state abbrev
ZIP, ## institution zip
PREDDEG, ## predominate degree
CURROPER, ## currently operating flag
CONTROL, ## type of school
TUITIONFEE_IN, ## in-state tuition and fees
DISTANCEONLY, ## distance only flag
LATITUDE, ## latitude
LONGITUDE, ## longitude
GRAD_DEBT_MDN ## median debt
)
return(schools)
}
plotMAP <- function(schools) {
uniInfo <- paste(schools[['INSTNM']], "<br>", schools[['CITY']], ", ",
schools[['STABBR']], schools[['ZIP']], "<br> Median debt: $",
schools[['GRAD_DEBT_MDN']], sep='')
schools$info <- uniInfo
## filter data
schools<-filter(schools,
PREDDEG==3 & ## Predominate degree is BS
CURROPER==1 & ## Currently operating
DISTANCEONLY==0 & ## Not distance
is.na(TUITIONFEE_IN)==FALSE & ## Key measurements aren't missing
is.na(LATITUDE)==FALSE &
is.na(LONGITUDE)==FALSE &
LATITUDE>20 & LATITUDE<50 & ## Location is US 48
LONGITUDE>(-130) & LONGITUDE<(-60)
)
map <- leaflet(schools) %>%
setView(-93.65, 42.0285, zoom = 4) %>%
addTiles() %>%
addMarkers(~LONGITUDE, ~LATITUDE, popup=~info,
options = popupOptions(closeButton = TRUE),
clusterOptions = markerClusterOptions())
map
}
shinyServer(function(input, output) {
schools <- fetchDATA()
output$plot <- renderPlot({
switch(input$plot,
"SAT Scores" = {
sat <<- getSAT()
plotSAT(sat)
},
"ACT Scores" = {
act <<- getACT()
plotACT(act)
},
"Admission Rate" = {
adm <<- getADM()
plotADM(adm)
},
"Undergraduate Enrollment" = {
enro <<- getENRO()
plotENRO(enro)
},
"Cost of Attendance" = {
cost <<- getCOST()
plotCOST(cost)
},
"Top 15 meadian earnings" = {
plotEARN()
}
)})
output$summary <- renderPrint({
switch(input$plot,
"SAT Scores" = printSAT(sat),
"ACT Scores" = printACT(act),
"Admission Rate" = printADM(adm),
"Undergraduate Enrollment" = printENRO(),
"Cost of Attendance" = printCOST()
)
})
output$map <- renderLeaflet({
plotMAP(schools)
})
}) |
require(robustbase)
setwd("C:/Lab/Patient Samples Nanowell/New Code for Median Hypothesis Testing")
load("Aggregate.Rdata")
setwd("C:/Lab/Patient Samples Nanowell/New Code for Median Hypothesis Testing/20150406 Diehn 4")
CD45 = read.csv("HEX_Before.csv",header=TRUE)
PE = read.csv("HEX_After.csv",header=TRUE)
FITC = read.csv("FAM_After.csv",header=TRUE)
PE = subset(PE,!(is.na(CD45$O_75th)))
FITC = subset(FITC,!(is.na(CD45$O_75th)))
CD45 = subset(CD45,!(is.na(CD45$O_75th)))
Sig_Value = abs(qt(0.05/nrow(CD45),nrow(CD45)))
CD45.corr = (CD45$O_75th-CD45$Sq_Mean)/CD45$Sq_Mean
Med.corr = CD45_Agg$CD45_Mean
SD.corr = CD45_Agg$CD45_SD
CD45.possible = as.numeric((CD45.corr-Med.corr-Sig_Value*SD.corr)>0)+1
#tiff("Pre_PCR.tiff",width=421,height=355)
plot(CD45.corr,ylab = "CD45-PE Intensity (A.U.)",xlab = "Well Index",main="Pre-PCR",col=CD45.possible,pch=CD45.possible)
abline(h=Med.corr + Sig_Value*SD.corr,col=3)
legend("topright",c('High Mean'),col=c(2,3),pch=c(2,NA),lty=c(NA,1),cex=0.7,ncol=2)
#dev.off()
CTC_Bonferroni_TF = (CD45.corr>(Med.corr + Sig_Value*SD.corr))
table(CTC_Bonferroni_TF)
# CD45-subtracted by Bonferroni
PE_B = subset(PE,!(CTC_Bonferroni_TF))
FITC_B = subset(FITC,!(CTC_Bonferroni_TF))
Sig_Value_B = abs(qt(0.05/nrow(PE_B),nrow(PE_B)))
PE_B.corr = (PE_B$O_75th-PE_B$Sq_Mean)/PE_B$Sq_Mean
PE_B.Med.corr = PE_Agg$PE_Mean
PE_B.SD.corr = (PE_Agg$PE_SD+PE_Agg$PE_SN)/2
PE_B.possible = as.numeric((PE_B.corr-PE_B.Med.corr-Sig_Value_B*PE_B.SD.corr)>0)*1
FITC_B.corr = (FITC_B$O_75th-FITC_B$Sq_Mean)/FITC_B$Sq_Mean
FITC_B.Med.corr = FITC_Agg$FITC_Mean
FITC_B.SD.corr = (FITC_Agg$FITC_SD+FITC_Agg$FITC_SN)/2
FITC_B.possible = as.numeric((FITC_B.corr-FITC_B.Med.corr-Sig_Value_B*FITC_B.SD.corr)>0)*2
All_B.possible = PE_B.possible + FITC_B.possible + 1
#tiff("PostPCR_MeanSub.tiff",width=421,height=355)
plot(x=FITC_B.corr,y=PE_B.corr,ylab = "PE Intensity (A.U.)",xlab = "FITC Intensity (A.U.)",main="Post-PCR (Mean subtracted)",,col=All_B.possible)
abline(h=(PE_B.Med.corr + Sig_Value_B*PE_B.SD.corr),col=3)
abline(v=(FITC_B.Med.corr + Sig_Value_B*FITC_B.SD.corr),col=3)
legend("topright",c('PE High','FITC High','PE/FITC High','Bonferroni'),col=c(2,3,4,3),pch=c(1,1,1,NA),lty=c(NA,NA,NA,1),cex=0.6,ncol=2,bg='transparent')
#dev.off()
Corr_Int_B = data.frame(FITC_B.corr,PE_B.corr)
CTC_B_PE_TF_NoWBC = (Corr_Int_B$FITC_B.corr>(FITC_B.Med.corr + Sig_Value_B*FITC_B.SD.corr))
CTC_B_FITC_TF_NoWBC = (Corr_Int_B$PE_B.corr>(PE_B.Med.corr + Sig_Value_B*PE_B.SD.corr))
table(CTC_B_PE_TF_NoWBC,CTC_B_FITC_TF_NoWBC)
| /20150406 Diehn 5/ImageAnalysisMedianAgg.R | no_license | ooichinchun/Nanowell-Code | R | false | false | 2,547 | r | require(robustbase)
setwd("C:/Lab/Patient Samples Nanowell/New Code for Median Hypothesis Testing")
load("Aggregate.Rdata")
setwd("C:/Lab/Patient Samples Nanowell/New Code for Median Hypothesis Testing/20150406 Diehn 4")
CD45 = read.csv("HEX_Before.csv",header=TRUE)
PE = read.csv("HEX_After.csv",header=TRUE)
FITC = read.csv("FAM_After.csv",header=TRUE)
PE = subset(PE,!(is.na(CD45$O_75th)))
FITC = subset(FITC,!(is.na(CD45$O_75th)))
CD45 = subset(CD45,!(is.na(CD45$O_75th)))
Sig_Value = abs(qt(0.05/nrow(CD45),nrow(CD45)))
CD45.corr = (CD45$O_75th-CD45$Sq_Mean)/CD45$Sq_Mean
Med.corr = CD45_Agg$CD45_Mean
SD.corr = CD45_Agg$CD45_SD
CD45.possible = as.numeric((CD45.corr-Med.corr-Sig_Value*SD.corr)>0)+1
#tiff("Pre_PCR.tiff",width=421,height=355)
plot(CD45.corr,ylab = "CD45-PE Intensity (A.U.)",xlab = "Well Index",main="Pre-PCR",col=CD45.possible,pch=CD45.possible)
abline(h=Med.corr + Sig_Value*SD.corr,col=3)
legend("topright",c('High Mean'),col=c(2,3),pch=c(2,NA),lty=c(NA,1),cex=0.7,ncol=2)
#dev.off()
CTC_Bonferroni_TF = (CD45.corr>(Med.corr + Sig_Value*SD.corr))
table(CTC_Bonferroni_TF)
# CD45-subtracted by Bonferroni
PE_B = subset(PE,!(CTC_Bonferroni_TF))
FITC_B = subset(FITC,!(CTC_Bonferroni_TF))
Sig_Value_B = abs(qt(0.05/nrow(PE_B),nrow(PE_B)))
PE_B.corr = (PE_B$O_75th-PE_B$Sq_Mean)/PE_B$Sq_Mean
PE_B.Med.corr = PE_Agg$PE_Mean
PE_B.SD.corr = (PE_Agg$PE_SD+PE_Agg$PE_SN)/2
PE_B.possible = as.numeric((PE_B.corr-PE_B.Med.corr-Sig_Value_B*PE_B.SD.corr)>0)*1
FITC_B.corr = (FITC_B$O_75th-FITC_B$Sq_Mean)/FITC_B$Sq_Mean
FITC_B.Med.corr = FITC_Agg$FITC_Mean
FITC_B.SD.corr = (FITC_Agg$FITC_SD+FITC_Agg$FITC_SN)/2
FITC_B.possible = as.numeric((FITC_B.corr-FITC_B.Med.corr-Sig_Value_B*FITC_B.SD.corr)>0)*2
All_B.possible = PE_B.possible + FITC_B.possible + 1
#tiff("PostPCR_MeanSub.tiff",width=421,height=355)
plot(x=FITC_B.corr,y=PE_B.corr,ylab = "PE Intensity (A.U.)",xlab = "FITC Intensity (A.U.)",main="Post-PCR (Mean subtracted)",,col=All_B.possible)
abline(h=(PE_B.Med.corr + Sig_Value_B*PE_B.SD.corr),col=3)
abline(v=(FITC_B.Med.corr + Sig_Value_B*FITC_B.SD.corr),col=3)
legend("topright",c('PE High','FITC High','PE/FITC High','Bonferroni'),col=c(2,3,4,3),pch=c(1,1,1,NA),lty=c(NA,NA,NA,1),cex=0.6,ncol=2,bg='transparent')
#dev.off()
Corr_Int_B = data.frame(FITC_B.corr,PE_B.corr)
CTC_B_PE_TF_NoWBC = (Corr_Int_B$FITC_B.corr>(FITC_B.Med.corr + Sig_Value_B*FITC_B.SD.corr))
CTC_B_FITC_TF_NoWBC = (Corr_Int_B$PE_B.corr>(PE_B.Med.corr + Sig_Value_B*PE_B.SD.corr))
table(CTC_B_PE_TF_NoWBC,CTC_B_FITC_TF_NoWBC)
|
################################################################################
# #
# Script for preparting the raw NLSY79 data #
# for incorportation into the analysis dataset. # #
# This code was automatically generated by the NLSY Investigator and #
# modified slightly to use here() for relative file paths rather #
# than setting a working directory with an absolute path. #
# #
# This script is called by analysis.r #
# It is not meant to be independently executed. #
# #
# Project: Challenging the Link Between Early Childhood Television Exposure #
# and Later Attention Problems: A Multiverse Analysis #
# Investigators: Matt McBee, Wallace Dixon, & Rebecca Brand #
# Programmer: Matt McBee #
# mcbeem@etsu.edu #
# #
################################################################################
library(here)
new_data <- read.table(here("Data", "NLSY_raw.dat"))
names(new_data) <- c('A0002600',
'R0000100',
'R0173600',
'R0214700',
'R0214800',
'R2350020',
'R2509000',
'R2722500',
'R2724700',
'R2724701',
'R2726800',
'R2727300',
'R2731700',
'R2870200',
'R2872700',
'R2872800',
'R3110200',
'R3400700',
'R3403100',
'R3403200',
'R3710200',
'R3896830',
'R4006600',
'R4009000',
'R4009100',
'R4526500',
'R5080700',
'R5083100',
'R5083200',
'R5166000',
'R5168400',
'R5168500',
'R5221800',
'R5821800',
'R6478700',
'R6481200',
'R6481300',
'R6540400',
'R7006500',
'R7008900',
'R7009000')
# Handle missing values
new_data[new_data == -1] = NA # Refused
new_data[new_data == -2] = NA # Dont know
new_data[new_data == -3] = NA # Invalid missing
new_data[new_data == -4] = NA # Valid missing
new_data[new_data == -5] = NA # Non-interview
# If there are values not categorized they will be represented as NA
vallabels = function(data) {
data$A0002600[1.0 <= data$A0002600 & data$A0002600 <= 999.0] <- 1.0
data$A0002600[1000.0 <= data$A0002600 & data$A0002600 <= 1999.0] <- 1000.0
data$A0002600[2000.0 <= data$A0002600 & data$A0002600 <= 2999.0] <- 2000.0
data$A0002600[3000.0 <= data$A0002600 & data$A0002600 <= 3999.0] <- 3000.0
data$A0002600[4000.0 <= data$A0002600 & data$A0002600 <= 4999.0] <- 4000.0
data$A0002600[5000.0 <= data$A0002600 & data$A0002600 <= 5999.0] <- 5000.0
data$A0002600[6000.0 <= data$A0002600 & data$A0002600 <= 6999.0] <- 6000.0
data$A0002600[7000.0 <= data$A0002600 & data$A0002600 <= 7999.0] <- 7000.0
data$A0002600[8000.0 <= data$A0002600 & data$A0002600 <= 8999.0] <- 8000.0
data$A0002600[9000.0 <= data$A0002600 & data$A0002600 <= 9999.0] <- 9000.0
data$A0002600[10000.0 <= data$A0002600 & data$A0002600 <= 10999.0] <- 10000.0
data$A0002600[11000.0 <= data$A0002600 & data$A0002600 <= 11999.0] <- 11000.0
data$A0002600[12000.0 <= data$A0002600 & data$A0002600 <= 12999.0] <- 12000.0
data$A0002600 <- factor(data$A0002600,
levels=c(1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,11000.0,12000.0),
labels=c("1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 10999",
"11000 TO 11999",
"12000 TO 12999"))
data$R0173600 <- factor(data$R0173600,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0),
labels=c("CROSS MALE WHITE",
"CROSS MALE WH. POOR",
"CROSS MALE BLACK",
"CROSS MALE HISPANIC",
"CROSS FEMALE WHITE",
"CROSS FEMALE WH POOR",
"CROSS FEMALE BLACK",
"CROSS FEMALE HISPANIC",
"SUP MALE WH POOR",
"SUP MALE BLACK",
"SUP MALE HISPANIC",
"SUP FEM WH POOR",
"SUP FEMALE BLACK",
"SUP FEMALE HISPANIC",
"MIL MALE WHITE",
"MIL MALE BLACK",
"MIL MALE HISPANIC",
"MIL FEMALE WHITE",
"MIL FEMALE BLACK",
"MIL FEMALE HISPANIC"))
data$R0214700 <- factor(data$R0214700,
levels=c(1.0,2.0,3.0),
labels=c("HISPANIC",
"BLACK",
"NON-BLACK, NON-HISPANIC"))
data$R0214800 <- factor(data$R0214800,
levels=c(1.0,2.0),
labels=c("MALE",
"FEMALE"))
data$R2350020[1.0 <= data$R2350020 & data$R2350020 <= 49.0] <- 1.0
data$R2350020[50.0 <= data$R2350020 & data$R2350020 <= 99.0] <- 50.0
data$R2350020[100.0 <= data$R2350020 & data$R2350020 <= 149.0] <- 100.0
data$R2350020[150.0 <= data$R2350020 & data$R2350020 <= 199.0] <- 150.0
data$R2350020[200.0 <= data$R2350020 & data$R2350020 <= 249.0] <- 200.0
data$R2350020[250.0 <= data$R2350020 & data$R2350020 <= 299.0] <- 250.0
data$R2350020[300.0 <= data$R2350020 & data$R2350020 <= 349.0] <- 300.0
data$R2350020[350.0 <= data$R2350020 & data$R2350020 <= 399.0] <- 350.0
data$R2350020[400.0 <= data$R2350020 & data$R2350020 <= 449.0] <- 400.0
data$R2350020[450.0 <= data$R2350020 & data$R2350020 <= 499.0] <- 450.0
data$R2350020[500.0 <= data$R2350020 & data$R2350020 <= 549.0] <- 500.0
data$R2350020[550.0 <= data$R2350020 & data$R2350020 <= 599.0] <- 550.0
data$R2350020[600.0 <= data$R2350020 & data$R2350020 <= 649.0] <- 600.0
data$R2350020[650.0 <= data$R2350020 & data$R2350020 <= 699.0] <- 650.0
data$R2350020[700.0 <= data$R2350020 & data$R2350020 <= 749.0] <- 700.0
data$R2350020[750.0 <= data$R2350020 & data$R2350020 <= 799.0] <- 750.0
data$R2350020[800.0 <= data$R2350020 & data$R2350020 <= 9999999.0] <- 800.0
data$R2350020 <- factor(data$R2350020,
levels=c(0.0,1.0,50.0,100.0,150.0,200.0,250.0,300.0,350.0,400.0,450.0,500.0,550.0,600.0,650.0,700.0,750.0,800.0),
labels=c("0",
"1 TO 49",
"50 TO 99",
"100 TO 149",
"150 TO 199",
"200 TO 249",
"250 TO 299",
"300 TO 349",
"350 TO 399",
"400 TO 449",
"450 TO 499",
"500 TO 549",
"550 TO 599",
"600 TO 649",
"650 TO 699",
"700 TO 749",
"750 TO 799",
"800 TO 9999999: 800+"))
data$R2509000 <- factor(data$R2509000,
levels=c(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("NONE",
"1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YR COL",
"2ND YR COL",
"3RD YR COL",
"4TH YR COL",
"5TH YR COL",
"6TH YR COL",
"7TH YR COL",
"8TH YR COL OR MORE",
"UNGRADED"))
data$R2722500[1.0 <= data$R2722500 & data$R2722500 <= 999.0] <- 1.0
data$R2722500[1000.0 <= data$R2722500 & data$R2722500 <= 1999.0] <- 1000.0
data$R2722500[2000.0 <= data$R2722500 & data$R2722500 <= 2999.0] <- 2000.0
data$R2722500[3000.0 <= data$R2722500 & data$R2722500 <= 3999.0] <- 3000.0
data$R2722500[4000.0 <= data$R2722500 & data$R2722500 <= 4999.0] <- 4000.0
data$R2722500[5000.0 <= data$R2722500 & data$R2722500 <= 5999.0] <- 5000.0
data$R2722500[6000.0 <= data$R2722500 & data$R2722500 <= 6999.0] <- 6000.0
data$R2722500[7000.0 <= data$R2722500 & data$R2722500 <= 7999.0] <- 7000.0
data$R2722500[8000.0 <= data$R2722500 & data$R2722500 <= 8999.0] <- 8000.0
data$R2722500[9000.0 <= data$R2722500 & data$R2722500 <= 9999.0] <- 9000.0
data$R2722500[10000.0 <= data$R2722500 & data$R2722500 <= 14999.0] <- 10000.0
data$R2722500[15000.0 <= data$R2722500 & data$R2722500 <= 19999.0] <- 15000.0
data$R2722500[20000.0 <= data$R2722500 & data$R2722500 <= 24999.0] <- 20000.0
data$R2722500[25000.0 <= data$R2722500 & data$R2722500 <= 49999.0] <- 25000.0
data$R2722500[50000.0 <= data$R2722500 & data$R2722500 <= 9999999.0] <- 50000.0
data$R2722500 <- factor(data$R2722500,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2724700[1.0 <= data$R2724700 & data$R2724700 <= 999.0] <- 1.0
data$R2724700[1000.0 <= data$R2724700 & data$R2724700 <= 1999.0] <- 1000.0
data$R2724700[2000.0 <= data$R2724700 & data$R2724700 <= 2999.0] <- 2000.0
data$R2724700[3000.0 <= data$R2724700 & data$R2724700 <= 3999.0] <- 3000.0
data$R2724700[4000.0 <= data$R2724700 & data$R2724700 <= 4999.0] <- 4000.0
data$R2724700[5000.0 <= data$R2724700 & data$R2724700 <= 5999.0] <- 5000.0
data$R2724700[6000.0 <= data$R2724700 & data$R2724700 <= 6999.0] <- 6000.0
data$R2724700[7000.0 <= data$R2724700 & data$R2724700 <= 7999.0] <- 7000.0
data$R2724700[8000.0 <= data$R2724700 & data$R2724700 <= 8999.0] <- 8000.0
data$R2724700[9000.0 <= data$R2724700 & data$R2724700 <= 9999.0] <- 9000.0
data$R2724700[10000.0 <= data$R2724700 & data$R2724700 <= 14999.0] <- 10000.0
data$R2724700[15000.0 <= data$R2724700 & data$R2724700 <= 19999.0] <- 15000.0
data$R2724700[20000.0 <= data$R2724700 & data$R2724700 <= 24999.0] <- 20000.0
data$R2724700[25000.0 <= data$R2724700 & data$R2724700 <= 49999.0] <- 25000.0
data$R2724700[50000.0 <= data$R2724700 & data$R2724700 <= 9999999.0] <- 50000.0
data$R2724700 <- factor(data$R2724700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2724701[1.0 <= data$R2724701 & data$R2724701 <= 999.0] <- 1.0
data$R2724701[1000.0 <= data$R2724701 & data$R2724701 <= 1999.0] <- 1000.0
data$R2724701[2000.0 <= data$R2724701 & data$R2724701 <= 2999.0] <- 2000.0
data$R2724701[3000.0 <= data$R2724701 & data$R2724701 <= 3999.0] <- 3000.0
data$R2724701[4000.0 <= data$R2724701 & data$R2724701 <= 4999.0] <- 4000.0
data$R2724701[5000.0 <= data$R2724701 & data$R2724701 <= 5999.0] <- 5000.0
data$R2724701[6000.0 <= data$R2724701 & data$R2724701 <= 6999.0] <- 6000.0
data$R2724701[7000.0 <= data$R2724701 & data$R2724701 <= 7999.0] <- 7000.0
data$R2724701[8000.0 <= data$R2724701 & data$R2724701 <= 8999.0] <- 8000.0
data$R2724701[9000.0 <= data$R2724701 & data$R2724701 <= 9999.0] <- 9000.0
data$R2724701[10000.0 <= data$R2724701 & data$R2724701 <= 14999.0] <- 10000.0
data$R2724701[15000.0 <= data$R2724701 & data$R2724701 <= 19999.0] <- 15000.0
data$R2724701[20000.0 <= data$R2724701 & data$R2724701 <= 24999.0] <- 20000.0
data$R2724701[25000.0 <= data$R2724701 & data$R2724701 <= 49999.0] <- 25000.0
data$R2724701[50000.0 <= data$R2724701 & data$R2724701 <= 9999999.0] <- 50000.0
data$R2724701 <- factor(data$R2724701,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2726800[1.0 <= data$R2726800 & data$R2726800 <= 499.0] <- 1.0
data$R2726800[500.0 <= data$R2726800 & data$R2726800 <= 999.0] <- 500.0
data$R2726800[1000.0 <= data$R2726800 & data$R2726800 <= 1499.0] <- 1000.0
data$R2726800[1500.0 <= data$R2726800 & data$R2726800 <= 1999.0] <- 1500.0
data$R2726800[2000.0 <= data$R2726800 & data$R2726800 <= 2499.0] <- 2000.0
data$R2726800[2500.0 <= data$R2726800 & data$R2726800 <= 2999.0] <- 2500.0
data$R2726800[3000.0 <= data$R2726800 & data$R2726800 <= 3499.0] <- 3000.0
data$R2726800[3500.0 <= data$R2726800 & data$R2726800 <= 3999.0] <- 3500.0
data$R2726800[4000.0 <= data$R2726800 & data$R2726800 <= 4499.0] <- 4000.0
data$R2726800[4500.0 <= data$R2726800 & data$R2726800 <= 4999.0] <- 4500.0
data$R2726800[5000.0 <= data$R2726800 & data$R2726800 <= 9999999.0] <- 5000.0
data$R2726800 <- factor(data$R2726800,
levels=c(0.0,1.0,500.0,1000.0,1500.0,2000.0,2500.0,3000.0,3500.0,4000.0,4500.0,5000.0),
labels=c("0",
"1 TO 499",
"500 TO 999",
"1000 TO 1499",
"1500 TO 1999",
"2000 TO 2499",
"2500 TO 2999",
"3000 TO 3499",
"3500 TO 3999",
"4000 TO 4499",
"4500 TO 4999",
"5000 TO 9999999: 5000+"))
data$R2727300[1.0 <= data$R2727300 & data$R2727300 <= 499.0] <- 1.0
data$R2727300[500.0 <= data$R2727300 & data$R2727300 <= 999.0] <- 500.0
data$R2727300[1000.0 <= data$R2727300 & data$R2727300 <= 1499.0] <- 1000.0
data$R2727300[1500.0 <= data$R2727300 & data$R2727300 <= 1999.0] <- 1500.0
data$R2727300[2000.0 <= data$R2727300 & data$R2727300 <= 2499.0] <- 2000.0
data$R2727300[2500.0 <= data$R2727300 & data$R2727300 <= 2999.0] <- 2500.0
data$R2727300[3000.0 <= data$R2727300 & data$R2727300 <= 3499.0] <- 3000.0
data$R2727300[3500.0 <= data$R2727300 & data$R2727300 <= 3999.0] <- 3500.0
data$R2727300[4000.0 <= data$R2727300 & data$R2727300 <= 4499.0] <- 4000.0
data$R2727300[4500.0 <= data$R2727300 & data$R2727300 <= 4999.0] <- 4500.0
data$R2727300[5000.0 <= data$R2727300 & data$R2727300 <= 9999999.0] <- 5000.0
data$R2727300 <- factor(data$R2727300,
levels=c(0.0,1.0,500.0,1000.0,1500.0,2000.0,2500.0,3000.0,3500.0,4000.0,4500.0,5000.0),
labels=c("0",
"1 TO 499",
"500 TO 999",
"1000 TO 1499",
"1500 TO 1999",
"2000 TO 2499",
"2500 TO 2999",
"3000 TO 3499",
"3500 TO 3999",
"4000 TO 4499",
"4500 TO 4999",
"5000 TO 9999999: 5000+"))
data$R2731700[1.0 <= data$R2731700 & data$R2731700 <= 99.0] <- 1.0
data$R2731700[100.0 <= data$R2731700 & data$R2731700 <= 199.0] <- 100.0
data$R2731700[200.0 <= data$R2731700 & data$R2731700 <= 299.0] <- 200.0
data$R2731700[300.0 <= data$R2731700 & data$R2731700 <= 399.0] <- 300.0
data$R2731700[400.0 <= data$R2731700 & data$R2731700 <= 499.0] <- 400.0
data$R2731700[500.0 <= data$R2731700 & data$R2731700 <= 599.0] <- 500.0
data$R2731700[600.0 <= data$R2731700 & data$R2731700 <= 699.0] <- 600.0
data$R2731700[700.0 <= data$R2731700 & data$R2731700 <= 799.0] <- 700.0
data$R2731700[800.0 <= data$R2731700 & data$R2731700 <= 899.0] <- 800.0
data$R2731700[900.0 <= data$R2731700 & data$R2731700 <= 999.0] <- 900.0
data$R2731700[1000.0 <= data$R2731700 & data$R2731700 <= 9999999.0] <- 1000.0
data$R2731700 <- factor(data$R2731700,
levels=c(0.0,1.0,100.0,200.0,300.0,400.0,500.0,600.0,700.0,800.0,900.0,1000.0),
labels=c("0",
"1 TO 99",
"100 TO 199",
"200 TO 299",
"300 TO 399",
"400 TO 499",
"500 TO 599",
"600 TO 699",
"700 TO 799",
"800 TO 899",
"900 TO 999",
"1000 TO 9999999: 1000+"))
data$R2870200[1.0 <= data$R2870200 & data$R2870200 <= 999.0] <- 1.0
data$R2870200[1000.0 <= data$R2870200 & data$R2870200 <= 1999.0] <- 1000.0
data$R2870200[2000.0 <= data$R2870200 & data$R2870200 <= 2999.0] <- 2000.0
data$R2870200[3000.0 <= data$R2870200 & data$R2870200 <= 3999.0] <- 3000.0
data$R2870200[4000.0 <= data$R2870200 & data$R2870200 <= 4999.0] <- 4000.0
data$R2870200[5000.0 <= data$R2870200 & data$R2870200 <= 5999.0] <- 5000.0
data$R2870200[6000.0 <= data$R2870200 & data$R2870200 <= 6999.0] <- 6000.0
data$R2870200[7000.0 <= data$R2870200 & data$R2870200 <= 7999.0] <- 7000.0
data$R2870200[8000.0 <= data$R2870200 & data$R2870200 <= 8999.0] <- 8000.0
data$R2870200[9000.0 <= data$R2870200 & data$R2870200 <= 9999.0] <- 9000.0
data$R2870200[10000.0 <= data$R2870200 & data$R2870200 <= 14999.0] <- 10000.0
data$R2870200[15000.0 <= data$R2870200 & data$R2870200 <= 19999.0] <- 15000.0
data$R2870200[20000.0 <= data$R2870200 & data$R2870200 <= 24999.0] <- 20000.0
data$R2870200[25000.0 <= data$R2870200 & data$R2870200 <= 49999.0] <- 25000.0
data$R2870200[50000.0 <= data$R2870200 & data$R2870200 <= 9999999.0] <- 50000.0
data$R2870200 <- factor(data$R2870200,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2872700 <- factor(data$R2872700,
levels=c(0.0,1.0),
labels=c("RURAL",
"URBAN"))
data$R2872800 <- factor(data$R2872800,
levels=c(0.0,1.0,2.0,3.0),
labels=c("NOT IN SMSA",
"SMSA, NOT CENTRAL CITY",
"SMSA, CENTRAL CITY NOT KNOWN",
"SMSA, IN CENTRAL CITY"))
data$R3110200 <- factor(data$R3110200,
levels=c(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("NONE",
"1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YR COL",
"2ND YR COL",
"3RD YR COL",
"4TH YR COL",
"5TH YR COL",
"6TH YR COL",
"7TH YR COL",
"8TH YR COL OR MORE",
"UNGRADED"))
data$R3400700[1.0 <= data$R3400700 & data$R3400700 <= 999.0] <- 1.0
data$R3400700[1000.0 <= data$R3400700 & data$R3400700 <= 1999.0] <- 1000.0
data$R3400700[2000.0 <= data$R3400700 & data$R3400700 <= 2999.0] <- 2000.0
data$R3400700[3000.0 <= data$R3400700 & data$R3400700 <= 3999.0] <- 3000.0
data$R3400700[4000.0 <= data$R3400700 & data$R3400700 <= 4999.0] <- 4000.0
data$R3400700[5000.0 <= data$R3400700 & data$R3400700 <= 5999.0] <- 5000.0
data$R3400700[6000.0 <= data$R3400700 & data$R3400700 <= 6999.0] <- 6000.0
data$R3400700[7000.0 <= data$R3400700 & data$R3400700 <= 7999.0] <- 7000.0
data$R3400700[8000.0 <= data$R3400700 & data$R3400700 <= 8999.0] <- 8000.0
data$R3400700[9000.0 <= data$R3400700 & data$R3400700 <= 9999.0] <- 9000.0
data$R3400700[10000.0 <= data$R3400700 & data$R3400700 <= 14999.0] <- 10000.0
data$R3400700[15000.0 <= data$R3400700 & data$R3400700 <= 19999.0] <- 15000.0
data$R3400700[20000.0 <= data$R3400700 & data$R3400700 <= 24999.0] <- 20000.0
data$R3400700[25000.0 <= data$R3400700 & data$R3400700 <= 49999.0] <- 25000.0
data$R3400700[50000.0 <= data$R3400700 & data$R3400700 <= 9999999.0] <- 50000.0
data$R3400700 <- factor(data$R3400700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R3403100 <- factor(data$R3403100,
levels=c(0.0,1.0),
labels=c("RURAL",
"URBAN"))
data$R3403200 <- factor(data$R3403200,
levels=c(0.0,1.0,2.0,3.0),
labels=c("NOT IN SMSA",
"SMSA, NOT CENTRAL CITY",
"SMSA, CENTRAL CITY NOT KNOWN",
"SMSA, IN CENTRAL CITY"))
data$R3710200 <- factor(data$R3710200,
levels=c(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("NONE",
"1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YR COL",
"2ND YR COL",
"3RD YR COL",
"4TH YR COL",
"5TH YR COL",
"6TH YR COL",
"7TH YR COL",
"8TH YR COL OR MORE",
"UNGRADED"))
data$R3896830[1.0 <= data$R3896830 & data$R3896830 <= 49.0] <- 1.0
data$R3896830[50.0 <= data$R3896830 & data$R3896830 <= 99.0] <- 50.0
data$R3896830[100.0 <= data$R3896830 & data$R3896830 <= 149.0] <- 100.0
data$R3896830[150.0 <= data$R3896830 & data$R3896830 <= 199.0] <- 150.0
data$R3896830[200.0 <= data$R3896830 & data$R3896830 <= 249.0] <- 200.0
data$R3896830[250.0 <= data$R3896830 & data$R3896830 <= 299.0] <- 250.0
data$R3896830[300.0 <= data$R3896830 & data$R3896830 <= 349.0] <- 300.0
data$R3896830[350.0 <= data$R3896830 & data$R3896830 <= 399.0] <- 350.0
data$R3896830[400.0 <= data$R3896830 & data$R3896830 <= 449.0] <- 400.0
data$R3896830[450.0 <= data$R3896830 & data$R3896830 <= 499.0] <- 450.0
data$R3896830[500.0 <= data$R3896830 & data$R3896830 <= 549.0] <- 500.0
data$R3896830[550.0 <= data$R3896830 & data$R3896830 <= 599.0] <- 550.0
data$R3896830[600.0 <= data$R3896830 & data$R3896830 <= 649.0] <- 600.0
data$R3896830[650.0 <= data$R3896830 & data$R3896830 <= 699.0] <- 650.0
data$R3896830[700.0 <= data$R3896830 & data$R3896830 <= 749.0] <- 700.0
data$R3896830[750.0 <= data$R3896830 & data$R3896830 <= 799.0] <- 750.0
data$R3896830[800.0 <= data$R3896830 & data$R3896830 <= 9999999.0] <- 800.0
data$R3896830 <- factor(data$R3896830,
levels=c(0.0,1.0,50.0,100.0,150.0,200.0,250.0,300.0,350.0,400.0,450.0,500.0,550.0,600.0,650.0,700.0,750.0,800.0),
labels=c("0",
"1 TO 49",
"50 TO 99",
"100 TO 149",
"150 TO 199",
"200 TO 249",
"250 TO 299",
"300 TO 349",
"350 TO 399",
"400 TO 449",
"450 TO 499",
"500 TO 549",
"550 TO 599",
"600 TO 649",
"650 TO 699",
"700 TO 749",
"750 TO 799",
"800 TO 9999999: 800+"))
data$R4006600[1.0 <= data$R4006600 & data$R4006600 <= 999.0] <- 1.0
data$R4006600[1000.0 <= data$R4006600 & data$R4006600 <= 1999.0] <- 1000.0
data$R4006600[2000.0 <= data$R4006600 & data$R4006600 <= 2999.0] <- 2000.0
data$R4006600[3000.0 <= data$R4006600 & data$R4006600 <= 3999.0] <- 3000.0
data$R4006600[4000.0 <= data$R4006600 & data$R4006600 <= 4999.0] <- 4000.0
data$R4006600[5000.0 <= data$R4006600 & data$R4006600 <= 5999.0] <- 5000.0
data$R4006600[6000.0 <= data$R4006600 & data$R4006600 <= 6999.0] <- 6000.0
data$R4006600[7000.0 <= data$R4006600 & data$R4006600 <= 7999.0] <- 7000.0
data$R4006600[8000.0 <= data$R4006600 & data$R4006600 <= 8999.0] <- 8000.0
data$R4006600[9000.0 <= data$R4006600 & data$R4006600 <= 9999.0] <- 9000.0
data$R4006600[10000.0 <= data$R4006600 & data$R4006600 <= 14999.0] <- 10000.0
data$R4006600[15000.0 <= data$R4006600 & data$R4006600 <= 19999.0] <- 15000.0
data$R4006600[20000.0 <= data$R4006600 & data$R4006600 <= 24999.0] <- 20000.0
data$R4006600[25000.0 <= data$R4006600 & data$R4006600 <= 49999.0] <- 25000.0
data$R4006600[50000.0 <= data$R4006600 & data$R4006600 <= 9999999.0] <- 50000.0
data$R4006600 <- factor(data$R4006600,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R4009000 <- factor(data$R4009000,
levels=c(0.0,1.0),
labels=c("RURAL",
"URBAN"))
data$R4009100 <- factor(data$R4009100,
levels=c(0.0,1.0,2.0,3.0),
labels=c("NOT IN SMSA",
"SMSA, NOT CENTRAL CITY",
"SMSA, CENTRAL CITY NOT KNOWN",
"SMSA, IN CENTRAL CITY"))
data$R4526500 <- factor(data$R4526500,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R5080700[1.0 <= data$R5080700 & data$R5080700 <= 999.0] <- 1.0
data$R5080700[1000.0 <= data$R5080700 & data$R5080700 <= 1999.0] <- 1000.0
data$R5080700[2000.0 <= data$R5080700 & data$R5080700 <= 2999.0] <- 2000.0
data$R5080700[3000.0 <= data$R5080700 & data$R5080700 <= 3999.0] <- 3000.0
data$R5080700[4000.0 <= data$R5080700 & data$R5080700 <= 4999.0] <- 4000.0
data$R5080700[5000.0 <= data$R5080700 & data$R5080700 <= 5999.0] <- 5000.0
data$R5080700[6000.0 <= data$R5080700 & data$R5080700 <= 6999.0] <- 6000.0
data$R5080700[7000.0 <= data$R5080700 & data$R5080700 <= 7999.0] <- 7000.0
data$R5080700[8000.0 <= data$R5080700 & data$R5080700 <= 8999.0] <- 8000.0
data$R5080700[9000.0 <= data$R5080700 & data$R5080700 <= 9999.0] <- 9000.0
data$R5080700[10000.0 <= data$R5080700 & data$R5080700 <= 14999.0] <- 10000.0
data$R5080700[15000.0 <= data$R5080700 & data$R5080700 <= 19999.0] <- 15000.0
data$R5080700[20000.0 <= data$R5080700 & data$R5080700 <= 24999.0] <- 20000.0
data$R5080700[25000.0 <= data$R5080700 & data$R5080700 <= 49999.0] <- 25000.0
data$R5080700[50000.0 <= data$R5080700 & data$R5080700 <= 9.9999999E7] <- 50000.0
data$R5080700 <- factor(data$R5080700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R5083100 <- factor(data$R5083100,
levels=c(0.0,1.0),
labels=c("0: 0 RURAL",
"1: 1 URBAN"))
data$R5083200 <- factor(data$R5083200,
levels=c(0.0,1.0,2.0,3.0),
labels=c("0: 0 NOT IN SMSA",
"1: 1 SMSA, NOT CENTRAL CITY",
"2: 2 SMSA, CENTRAL CITY NOT KNOWN",
"3: 3 SMSA, IN CENTRAL CITY"))
data$R5166000[1.0 <= data$R5166000 & data$R5166000 <= 999.0] <- 1.0
data$R5166000[1000.0 <= data$R5166000 & data$R5166000 <= 1999.0] <- 1000.0
data$R5166000[2000.0 <= data$R5166000 & data$R5166000 <= 2999.0] <- 2000.0
data$R5166000[3000.0 <= data$R5166000 & data$R5166000 <= 3999.0] <- 3000.0
data$R5166000[4000.0 <= data$R5166000 & data$R5166000 <= 4999.0] <- 4000.0
data$R5166000[5000.0 <= data$R5166000 & data$R5166000 <= 5999.0] <- 5000.0
data$R5166000[6000.0 <= data$R5166000 & data$R5166000 <= 6999.0] <- 6000.0
data$R5166000[7000.0 <= data$R5166000 & data$R5166000 <= 7999.0] <- 7000.0
data$R5166000[8000.0 <= data$R5166000 & data$R5166000 <= 8999.0] <- 8000.0
data$R5166000[9000.0 <= data$R5166000 & data$R5166000 <= 9999.0] <- 9000.0
data$R5166000[10000.0 <= data$R5166000 & data$R5166000 <= 14999.0] <- 10000.0
data$R5166000[15000.0 <= data$R5166000 & data$R5166000 <= 19999.0] <- 15000.0
data$R5166000[20000.0 <= data$R5166000 & data$R5166000 <= 24999.0] <- 20000.0
data$R5166000[25000.0 <= data$R5166000 & data$R5166000 <= 49999.0] <- 25000.0
data$R5166000[50000.0 <= data$R5166000 & data$R5166000 <= 9.9999999E7] <- 50000.0
data$R5166000 <- factor(data$R5166000,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R5168400 <- factor(data$R5168400,
levels=c(0.0,1.0),
labels=c("0: RURAL",
"1: URBAN"))
data$R5168500 <- factor(data$R5168500,
levels=c(0.0,1.0,2.0,3.0),
labels=c("0: NOT IN SMSA",
"1: SMSA, NOT CENTRAL CITY",
"2: SMSA, CENTRAL CITY NOT KNOWN",
"3: SMSA, IN CENTRAL CITY"))
data$R5221800 <- factor(data$R5221800,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R5821800 <- factor(data$R5821800,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R6478700[1.0 <= data$R6478700 & data$R6478700 <= 999.0] <- 1.0
data$R6478700[1000.0 <= data$R6478700 & data$R6478700 <= 1999.0] <- 1000.0
data$R6478700[2000.0 <= data$R6478700 & data$R6478700 <= 2999.0] <- 2000.0
data$R6478700[3000.0 <= data$R6478700 & data$R6478700 <= 3999.0] <- 3000.0
data$R6478700[4000.0 <= data$R6478700 & data$R6478700 <= 4999.0] <- 4000.0
data$R6478700[5000.0 <= data$R6478700 & data$R6478700 <= 5999.0] <- 5000.0
data$R6478700[6000.0 <= data$R6478700 & data$R6478700 <= 6999.0] <- 6000.0
data$R6478700[7000.0 <= data$R6478700 & data$R6478700 <= 7999.0] <- 7000.0
data$R6478700[8000.0 <= data$R6478700 & data$R6478700 <= 8999.0] <- 8000.0
data$R6478700[9000.0 <= data$R6478700 & data$R6478700 <= 9999.0] <- 9000.0
data$R6478700[10000.0 <= data$R6478700 & data$R6478700 <= 14999.0] <- 10000.0
data$R6478700[15000.0 <= data$R6478700 & data$R6478700 <= 19999.0] <- 15000.0
data$R6478700[20000.0 <= data$R6478700 & data$R6478700 <= 24999.0] <- 20000.0
data$R6478700[25000.0 <= data$R6478700 & data$R6478700 <= 49999.0] <- 25000.0
data$R6478700[50000.0 <= data$R6478700 & data$R6478700 <= 9.9999999E7] <- 50000.0
data$R6478700 <- factor(data$R6478700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R6481200 <- factor(data$R6481200,
levels=c(0.0,1.0),
labels=c("0: 0 RURAL",
"1: 1 URBAN"))
data$R6481300 <- factor(data$R6481300,
levels=c(0.0,1.0,2.0,3.0),
labels=c("0: NOT IN SMSA",
"1: SMSA, NOT CENTRAL CITY",
"2: SMSA, CENTRAL CITY NOT KNOWN",
"3: SMSA, IN CENTRAL CITY"))
data$R6540400 <- factor(data$R6540400,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R7006500[1.0 <= data$R7006500 & data$R7006500 <= 999.0] <- 1.0
data$R7006500[1000.0 <= data$R7006500 & data$R7006500 <= 1999.0] <- 1000.0
data$R7006500[2000.0 <= data$R7006500 & data$R7006500 <= 2999.0] <- 2000.0
data$R7006500[3000.0 <= data$R7006500 & data$R7006500 <= 3999.0] <- 3000.0
data$R7006500[4000.0 <= data$R7006500 & data$R7006500 <= 4999.0] <- 4000.0
data$R7006500[5000.0 <= data$R7006500 & data$R7006500 <= 5999.0] <- 5000.0
data$R7006500[6000.0 <= data$R7006500 & data$R7006500 <= 6999.0] <- 6000.0
data$R7006500[7000.0 <= data$R7006500 & data$R7006500 <= 7999.0] <- 7000.0
data$R7006500[8000.0 <= data$R7006500 & data$R7006500 <= 8999.0] <- 8000.0
data$R7006500[9000.0 <= data$R7006500 & data$R7006500 <= 9999.0] <- 9000.0
data$R7006500[10000.0 <= data$R7006500 & data$R7006500 <= 14999.0] <- 10000.0
data$R7006500[15000.0 <= data$R7006500 & data$R7006500 <= 19999.0] <- 15000.0
data$R7006500[20000.0 <= data$R7006500 & data$R7006500 <= 24999.0] <- 20000.0
data$R7006500[25000.0 <= data$R7006500 & data$R7006500 <= 49999.0] <- 25000.0
data$R7006500[50000.0 <= data$R7006500 & data$R7006500 <= 9.9999999E7] <- 50000.0
data$R7006500 <- factor(data$R7006500,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R7008900 <- factor(data$R7008900,
levels=c(0.0,1.0,2.0),
labels=c("0: RURAL",
"1: URBAN",
"2: UNKNOWN"))
data$R7009000 <- factor(data$R7009000,
levels=c(1.0,2.0,3.0,4.0),
labels=c("1: NOT IN MSA",
"2: IN MSA, NOT IN CENTRAL CITY",
"3: IN MSA, IN CENTRAL CITY",
"4: IN MSA, CENTRAL CITY NOT KNOWN"))
return(data)
}
varlabels <- c("VERSION_R26_1 2014",
"ID# (1-12686) 79",
"SAMPLE ID 79 INT",
"RACL/ETHNIC COHORT /SCRNR 79",
"SEX OF R 79",
"ROSENBERG ESTEEM ITEM RESPONSE SCORE 87",
"HGC 88",
"TOT INC WAGES AND SALRY P-C YR 88",
"TOT INC SP WAGE AND SALRY P-C YR 88",
"TOT INC SP WAGE AND SALRY P-C YR 88 (TRUNC)",
"TOT INC ALIMONY RCVD 87 88",
"TOT INC CHILD SUPP RCVD 87 88",
"AVG MO INC SSI RCVD IN 87 88",
"TOT NET FAMILY INC P-C YR 88",
"RS CURRENT RESIDENCE URBAN/RURAL 88",
"RS CURRENT RESIDENCE IN SMSA 88",
"HGC 90",
"TOT NET FAMILY INC P-C YR 90",
"RS CURRENT RESIDENCE URBAN/RURAL 90",
"RS CURRENT RESIDENCE IN SMSA 90",
"HGC 92",
"20-ITEM CES-D ITEM RESPONSE SCORE 92",
"TOT NET FAMILY INC P-C YR 92",
"RS CURRENT RESIDENCE URBAN/RURAL 92",
"RS CURRENT RESIDENCE IN SMSA 92",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 94",
"TOTAL NET FAMILY INCOME 94",
"RS RESIDENCE URBAN OR RURAL 94",
"RS RESIDENCE IN SMSA 94",
"TOTAL NET FAMILY INCOME 96",
"RS RESIDENCE URBAN OR RURAL 96",
"RS RESIDENCE IN SMSA 96",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 96",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 1998",
"TOTAL NET FAMILY INCOME 1998",
"RS RESIDENCE URBAN OR RURAL 1998",
"RS RESIDENCE IN SMSA 1998",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 2000",
"TOTAL NET FAMILY INCOME 2000",
"RS RESIDENCE URBAN OR RURAL 2000",
"RS RESIDENCE IN SMSA 2000"
)
# Use qnames rather than rnums
qnames = function(data) {
names(data) <- c("VERSION_R26_2014",
"CASEID_1979",
"SAMPLE_ID_1979",
"SAMPLE_RACE_78SCRN",
"SAMPLE_SEX_1979",
"ROSENBERG_IRT_SCORE_1987",
"Q3-4_1988",
"Q13-5_1988",
"Q13-18_1988",
"Q13-18_TRUNC_REVISED_1988",
"INCOME-2C_1988",
"INCOME-5D_1988",
"INCOME-9C_1988",
"TNFI_TRUNC_1988",
"URBAN-RURAL_1988",
"SMSARES_1988",
"Q3-4_1990",
"TNFI_TRUNC_1990",
"URBAN-RURAL_1990",
"SMSARES_1990",
"Q3-4_1992",
"CESD_IRT_SCORE_20_ITEM_1992",
"TNFI_TRUNC_1992",
"URBAN-RURAL_1992",
"SMSARES_1992",
"Q3-4_1994",
"TNFI_TRUNC_1994",
"URBAN-RURAL_1994",
"SMSARES_1994",
"TNFI_TRUNC_1996",
"URBAN-RURAL_1996",
"SMSARES_1996",
"Q3-4_1996",
"Q3-4_1998",
"TNFI_TRUNC_1998",
"URBAN-RURAL_1998",
"SMSARES_1998",
"Q3-4_2000",
"TNFI_TRUNC_2000",
"URBAN-RURAL_2000",
"SMSARES_2000")
return(data)
}
#********************************************************************************************************
# Remove the '#' before the following line to create a data file called "categories" with value labels.
categories <- vallabels(new_data)
| /Code/NLSY_rawdata_import.R | no_license | mcbeem/TVAttention | R | false | false | 38,320 | r | ################################################################################
# #
# Script for preparting the raw NLSY79 data #
# for incorportation into the analysis dataset. # #
# This code was automatically generated by the NLSY Investigator and #
# modified slightly to use here() for relative file paths rather #
# than setting a working directory with an absolute path. #
# #
# This script is called by analysis.r #
# It is not meant to be independently executed. #
# #
# Project: Challenging the Link Between Early Childhood Television Exposure #
# and Later Attention Problems: A Multiverse Analysis #
# Investigators: Matt McBee, Wallace Dixon, & Rebecca Brand #
# Programmer: Matt McBee #
# mcbeem@etsu.edu #
# #
################################################################################
library(here)
new_data <- read.table(here("Data", "NLSY_raw.dat"))
names(new_data) <- c('A0002600',
'R0000100',
'R0173600',
'R0214700',
'R0214800',
'R2350020',
'R2509000',
'R2722500',
'R2724700',
'R2724701',
'R2726800',
'R2727300',
'R2731700',
'R2870200',
'R2872700',
'R2872800',
'R3110200',
'R3400700',
'R3403100',
'R3403200',
'R3710200',
'R3896830',
'R4006600',
'R4009000',
'R4009100',
'R4526500',
'R5080700',
'R5083100',
'R5083200',
'R5166000',
'R5168400',
'R5168500',
'R5221800',
'R5821800',
'R6478700',
'R6481200',
'R6481300',
'R6540400',
'R7006500',
'R7008900',
'R7009000')
# Handle missing values
new_data[new_data == -1] = NA # Refused
new_data[new_data == -2] = NA # Dont know
new_data[new_data == -3] = NA # Invalid missing
new_data[new_data == -4] = NA # Valid missing
new_data[new_data == -5] = NA # Non-interview
# If there are values not categorized they will be represented as NA
vallabels = function(data) {
data$A0002600[1.0 <= data$A0002600 & data$A0002600 <= 999.0] <- 1.0
data$A0002600[1000.0 <= data$A0002600 & data$A0002600 <= 1999.0] <- 1000.0
data$A0002600[2000.0 <= data$A0002600 & data$A0002600 <= 2999.0] <- 2000.0
data$A0002600[3000.0 <= data$A0002600 & data$A0002600 <= 3999.0] <- 3000.0
data$A0002600[4000.0 <= data$A0002600 & data$A0002600 <= 4999.0] <- 4000.0
data$A0002600[5000.0 <= data$A0002600 & data$A0002600 <= 5999.0] <- 5000.0
data$A0002600[6000.0 <= data$A0002600 & data$A0002600 <= 6999.0] <- 6000.0
data$A0002600[7000.0 <= data$A0002600 & data$A0002600 <= 7999.0] <- 7000.0
data$A0002600[8000.0 <= data$A0002600 & data$A0002600 <= 8999.0] <- 8000.0
data$A0002600[9000.0 <= data$A0002600 & data$A0002600 <= 9999.0] <- 9000.0
data$A0002600[10000.0 <= data$A0002600 & data$A0002600 <= 10999.0] <- 10000.0
data$A0002600[11000.0 <= data$A0002600 & data$A0002600 <= 11999.0] <- 11000.0
data$A0002600[12000.0 <= data$A0002600 & data$A0002600 <= 12999.0] <- 12000.0
data$A0002600 <- factor(data$A0002600,
levels=c(1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,11000.0,12000.0),
labels=c("1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 10999",
"11000 TO 11999",
"12000 TO 12999"))
data$R0173600 <- factor(data$R0173600,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0),
labels=c("CROSS MALE WHITE",
"CROSS MALE WH. POOR",
"CROSS MALE BLACK",
"CROSS MALE HISPANIC",
"CROSS FEMALE WHITE",
"CROSS FEMALE WH POOR",
"CROSS FEMALE BLACK",
"CROSS FEMALE HISPANIC",
"SUP MALE WH POOR",
"SUP MALE BLACK",
"SUP MALE HISPANIC",
"SUP FEM WH POOR",
"SUP FEMALE BLACK",
"SUP FEMALE HISPANIC",
"MIL MALE WHITE",
"MIL MALE BLACK",
"MIL MALE HISPANIC",
"MIL FEMALE WHITE",
"MIL FEMALE BLACK",
"MIL FEMALE HISPANIC"))
data$R0214700 <- factor(data$R0214700,
levels=c(1.0,2.0,3.0),
labels=c("HISPANIC",
"BLACK",
"NON-BLACK, NON-HISPANIC"))
data$R0214800 <- factor(data$R0214800,
levels=c(1.0,2.0),
labels=c("MALE",
"FEMALE"))
data$R2350020[1.0 <= data$R2350020 & data$R2350020 <= 49.0] <- 1.0
data$R2350020[50.0 <= data$R2350020 & data$R2350020 <= 99.0] <- 50.0
data$R2350020[100.0 <= data$R2350020 & data$R2350020 <= 149.0] <- 100.0
data$R2350020[150.0 <= data$R2350020 & data$R2350020 <= 199.0] <- 150.0
data$R2350020[200.0 <= data$R2350020 & data$R2350020 <= 249.0] <- 200.0
data$R2350020[250.0 <= data$R2350020 & data$R2350020 <= 299.0] <- 250.0
data$R2350020[300.0 <= data$R2350020 & data$R2350020 <= 349.0] <- 300.0
data$R2350020[350.0 <= data$R2350020 & data$R2350020 <= 399.0] <- 350.0
data$R2350020[400.0 <= data$R2350020 & data$R2350020 <= 449.0] <- 400.0
data$R2350020[450.0 <= data$R2350020 & data$R2350020 <= 499.0] <- 450.0
data$R2350020[500.0 <= data$R2350020 & data$R2350020 <= 549.0] <- 500.0
data$R2350020[550.0 <= data$R2350020 & data$R2350020 <= 599.0] <- 550.0
data$R2350020[600.0 <= data$R2350020 & data$R2350020 <= 649.0] <- 600.0
data$R2350020[650.0 <= data$R2350020 & data$R2350020 <= 699.0] <- 650.0
data$R2350020[700.0 <= data$R2350020 & data$R2350020 <= 749.0] <- 700.0
data$R2350020[750.0 <= data$R2350020 & data$R2350020 <= 799.0] <- 750.0
data$R2350020[800.0 <= data$R2350020 & data$R2350020 <= 9999999.0] <- 800.0
data$R2350020 <- factor(data$R2350020,
levels=c(0.0,1.0,50.0,100.0,150.0,200.0,250.0,300.0,350.0,400.0,450.0,500.0,550.0,600.0,650.0,700.0,750.0,800.0),
labels=c("0",
"1 TO 49",
"50 TO 99",
"100 TO 149",
"150 TO 199",
"200 TO 249",
"250 TO 299",
"300 TO 349",
"350 TO 399",
"400 TO 449",
"450 TO 499",
"500 TO 549",
"550 TO 599",
"600 TO 649",
"650 TO 699",
"700 TO 749",
"750 TO 799",
"800 TO 9999999: 800+"))
data$R2509000 <- factor(data$R2509000,
levels=c(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("NONE",
"1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YR COL",
"2ND YR COL",
"3RD YR COL",
"4TH YR COL",
"5TH YR COL",
"6TH YR COL",
"7TH YR COL",
"8TH YR COL OR MORE",
"UNGRADED"))
data$R2722500[1.0 <= data$R2722500 & data$R2722500 <= 999.0] <- 1.0
data$R2722500[1000.0 <= data$R2722500 & data$R2722500 <= 1999.0] <- 1000.0
data$R2722500[2000.0 <= data$R2722500 & data$R2722500 <= 2999.0] <- 2000.0
data$R2722500[3000.0 <= data$R2722500 & data$R2722500 <= 3999.0] <- 3000.0
data$R2722500[4000.0 <= data$R2722500 & data$R2722500 <= 4999.0] <- 4000.0
data$R2722500[5000.0 <= data$R2722500 & data$R2722500 <= 5999.0] <- 5000.0
data$R2722500[6000.0 <= data$R2722500 & data$R2722500 <= 6999.0] <- 6000.0
data$R2722500[7000.0 <= data$R2722500 & data$R2722500 <= 7999.0] <- 7000.0
data$R2722500[8000.0 <= data$R2722500 & data$R2722500 <= 8999.0] <- 8000.0
data$R2722500[9000.0 <= data$R2722500 & data$R2722500 <= 9999.0] <- 9000.0
data$R2722500[10000.0 <= data$R2722500 & data$R2722500 <= 14999.0] <- 10000.0
data$R2722500[15000.0 <= data$R2722500 & data$R2722500 <= 19999.0] <- 15000.0
data$R2722500[20000.0 <= data$R2722500 & data$R2722500 <= 24999.0] <- 20000.0
data$R2722500[25000.0 <= data$R2722500 & data$R2722500 <= 49999.0] <- 25000.0
data$R2722500[50000.0 <= data$R2722500 & data$R2722500 <= 9999999.0] <- 50000.0
data$R2722500 <- factor(data$R2722500,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2724700[1.0 <= data$R2724700 & data$R2724700 <= 999.0] <- 1.0
data$R2724700[1000.0 <= data$R2724700 & data$R2724700 <= 1999.0] <- 1000.0
data$R2724700[2000.0 <= data$R2724700 & data$R2724700 <= 2999.0] <- 2000.0
data$R2724700[3000.0 <= data$R2724700 & data$R2724700 <= 3999.0] <- 3000.0
data$R2724700[4000.0 <= data$R2724700 & data$R2724700 <= 4999.0] <- 4000.0
data$R2724700[5000.0 <= data$R2724700 & data$R2724700 <= 5999.0] <- 5000.0
data$R2724700[6000.0 <= data$R2724700 & data$R2724700 <= 6999.0] <- 6000.0
data$R2724700[7000.0 <= data$R2724700 & data$R2724700 <= 7999.0] <- 7000.0
data$R2724700[8000.0 <= data$R2724700 & data$R2724700 <= 8999.0] <- 8000.0
data$R2724700[9000.0 <= data$R2724700 & data$R2724700 <= 9999.0] <- 9000.0
data$R2724700[10000.0 <= data$R2724700 & data$R2724700 <= 14999.0] <- 10000.0
data$R2724700[15000.0 <= data$R2724700 & data$R2724700 <= 19999.0] <- 15000.0
data$R2724700[20000.0 <= data$R2724700 & data$R2724700 <= 24999.0] <- 20000.0
data$R2724700[25000.0 <= data$R2724700 & data$R2724700 <= 49999.0] <- 25000.0
data$R2724700[50000.0 <= data$R2724700 & data$R2724700 <= 9999999.0] <- 50000.0
data$R2724700 <- factor(data$R2724700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2724701[1.0 <= data$R2724701 & data$R2724701 <= 999.0] <- 1.0
data$R2724701[1000.0 <= data$R2724701 & data$R2724701 <= 1999.0] <- 1000.0
data$R2724701[2000.0 <= data$R2724701 & data$R2724701 <= 2999.0] <- 2000.0
data$R2724701[3000.0 <= data$R2724701 & data$R2724701 <= 3999.0] <- 3000.0
data$R2724701[4000.0 <= data$R2724701 & data$R2724701 <= 4999.0] <- 4000.0
data$R2724701[5000.0 <= data$R2724701 & data$R2724701 <= 5999.0] <- 5000.0
data$R2724701[6000.0 <= data$R2724701 & data$R2724701 <= 6999.0] <- 6000.0
data$R2724701[7000.0 <= data$R2724701 & data$R2724701 <= 7999.0] <- 7000.0
data$R2724701[8000.0 <= data$R2724701 & data$R2724701 <= 8999.0] <- 8000.0
data$R2724701[9000.0 <= data$R2724701 & data$R2724701 <= 9999.0] <- 9000.0
data$R2724701[10000.0 <= data$R2724701 & data$R2724701 <= 14999.0] <- 10000.0
data$R2724701[15000.0 <= data$R2724701 & data$R2724701 <= 19999.0] <- 15000.0
data$R2724701[20000.0 <= data$R2724701 & data$R2724701 <= 24999.0] <- 20000.0
data$R2724701[25000.0 <= data$R2724701 & data$R2724701 <= 49999.0] <- 25000.0
data$R2724701[50000.0 <= data$R2724701 & data$R2724701 <= 9999999.0] <- 50000.0
data$R2724701 <- factor(data$R2724701,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2726800[1.0 <= data$R2726800 & data$R2726800 <= 499.0] <- 1.0
data$R2726800[500.0 <= data$R2726800 & data$R2726800 <= 999.0] <- 500.0
data$R2726800[1000.0 <= data$R2726800 & data$R2726800 <= 1499.0] <- 1000.0
data$R2726800[1500.0 <= data$R2726800 & data$R2726800 <= 1999.0] <- 1500.0
data$R2726800[2000.0 <= data$R2726800 & data$R2726800 <= 2499.0] <- 2000.0
data$R2726800[2500.0 <= data$R2726800 & data$R2726800 <= 2999.0] <- 2500.0
data$R2726800[3000.0 <= data$R2726800 & data$R2726800 <= 3499.0] <- 3000.0
data$R2726800[3500.0 <= data$R2726800 & data$R2726800 <= 3999.0] <- 3500.0
data$R2726800[4000.0 <= data$R2726800 & data$R2726800 <= 4499.0] <- 4000.0
data$R2726800[4500.0 <= data$R2726800 & data$R2726800 <= 4999.0] <- 4500.0
data$R2726800[5000.0 <= data$R2726800 & data$R2726800 <= 9999999.0] <- 5000.0
data$R2726800 <- factor(data$R2726800,
levels=c(0.0,1.0,500.0,1000.0,1500.0,2000.0,2500.0,3000.0,3500.0,4000.0,4500.0,5000.0),
labels=c("0",
"1 TO 499",
"500 TO 999",
"1000 TO 1499",
"1500 TO 1999",
"2000 TO 2499",
"2500 TO 2999",
"3000 TO 3499",
"3500 TO 3999",
"4000 TO 4499",
"4500 TO 4999",
"5000 TO 9999999: 5000+"))
data$R2727300[1.0 <= data$R2727300 & data$R2727300 <= 499.0] <- 1.0
data$R2727300[500.0 <= data$R2727300 & data$R2727300 <= 999.0] <- 500.0
data$R2727300[1000.0 <= data$R2727300 & data$R2727300 <= 1499.0] <- 1000.0
data$R2727300[1500.0 <= data$R2727300 & data$R2727300 <= 1999.0] <- 1500.0
data$R2727300[2000.0 <= data$R2727300 & data$R2727300 <= 2499.0] <- 2000.0
data$R2727300[2500.0 <= data$R2727300 & data$R2727300 <= 2999.0] <- 2500.0
data$R2727300[3000.0 <= data$R2727300 & data$R2727300 <= 3499.0] <- 3000.0
data$R2727300[3500.0 <= data$R2727300 & data$R2727300 <= 3999.0] <- 3500.0
data$R2727300[4000.0 <= data$R2727300 & data$R2727300 <= 4499.0] <- 4000.0
data$R2727300[4500.0 <= data$R2727300 & data$R2727300 <= 4999.0] <- 4500.0
data$R2727300[5000.0 <= data$R2727300 & data$R2727300 <= 9999999.0] <- 5000.0
data$R2727300 <- factor(data$R2727300,
levels=c(0.0,1.0,500.0,1000.0,1500.0,2000.0,2500.0,3000.0,3500.0,4000.0,4500.0,5000.0),
labels=c("0",
"1 TO 499",
"500 TO 999",
"1000 TO 1499",
"1500 TO 1999",
"2000 TO 2499",
"2500 TO 2999",
"3000 TO 3499",
"3500 TO 3999",
"4000 TO 4499",
"4500 TO 4999",
"5000 TO 9999999: 5000+"))
data$R2731700[1.0 <= data$R2731700 & data$R2731700 <= 99.0] <- 1.0
data$R2731700[100.0 <= data$R2731700 & data$R2731700 <= 199.0] <- 100.0
data$R2731700[200.0 <= data$R2731700 & data$R2731700 <= 299.0] <- 200.0
data$R2731700[300.0 <= data$R2731700 & data$R2731700 <= 399.0] <- 300.0
data$R2731700[400.0 <= data$R2731700 & data$R2731700 <= 499.0] <- 400.0
data$R2731700[500.0 <= data$R2731700 & data$R2731700 <= 599.0] <- 500.0
data$R2731700[600.0 <= data$R2731700 & data$R2731700 <= 699.0] <- 600.0
data$R2731700[700.0 <= data$R2731700 & data$R2731700 <= 799.0] <- 700.0
data$R2731700[800.0 <= data$R2731700 & data$R2731700 <= 899.0] <- 800.0
data$R2731700[900.0 <= data$R2731700 & data$R2731700 <= 999.0] <- 900.0
data$R2731700[1000.0 <= data$R2731700 & data$R2731700 <= 9999999.0] <- 1000.0
data$R2731700 <- factor(data$R2731700,
levels=c(0.0,1.0,100.0,200.0,300.0,400.0,500.0,600.0,700.0,800.0,900.0,1000.0),
labels=c("0",
"1 TO 99",
"100 TO 199",
"200 TO 299",
"300 TO 399",
"400 TO 499",
"500 TO 599",
"600 TO 699",
"700 TO 799",
"800 TO 899",
"900 TO 999",
"1000 TO 9999999: 1000+"))
data$R2870200[1.0 <= data$R2870200 & data$R2870200 <= 999.0] <- 1.0
data$R2870200[1000.0 <= data$R2870200 & data$R2870200 <= 1999.0] <- 1000.0
data$R2870200[2000.0 <= data$R2870200 & data$R2870200 <= 2999.0] <- 2000.0
data$R2870200[3000.0 <= data$R2870200 & data$R2870200 <= 3999.0] <- 3000.0
data$R2870200[4000.0 <= data$R2870200 & data$R2870200 <= 4999.0] <- 4000.0
data$R2870200[5000.0 <= data$R2870200 & data$R2870200 <= 5999.0] <- 5000.0
data$R2870200[6000.0 <= data$R2870200 & data$R2870200 <= 6999.0] <- 6000.0
data$R2870200[7000.0 <= data$R2870200 & data$R2870200 <= 7999.0] <- 7000.0
data$R2870200[8000.0 <= data$R2870200 & data$R2870200 <= 8999.0] <- 8000.0
data$R2870200[9000.0 <= data$R2870200 & data$R2870200 <= 9999.0] <- 9000.0
data$R2870200[10000.0 <= data$R2870200 & data$R2870200 <= 14999.0] <- 10000.0
data$R2870200[15000.0 <= data$R2870200 & data$R2870200 <= 19999.0] <- 15000.0
data$R2870200[20000.0 <= data$R2870200 & data$R2870200 <= 24999.0] <- 20000.0
data$R2870200[25000.0 <= data$R2870200 & data$R2870200 <= 49999.0] <- 25000.0
data$R2870200[50000.0 <= data$R2870200 & data$R2870200 <= 9999999.0] <- 50000.0
data$R2870200 <- factor(data$R2870200,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R2872700 <- factor(data$R2872700,
levels=c(0.0,1.0),
labels=c("RURAL",
"URBAN"))
data$R2872800 <- factor(data$R2872800,
levels=c(0.0,1.0,2.0,3.0),
labels=c("NOT IN SMSA",
"SMSA, NOT CENTRAL CITY",
"SMSA, CENTRAL CITY NOT KNOWN",
"SMSA, IN CENTRAL CITY"))
data$R3110200 <- factor(data$R3110200,
levels=c(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("NONE",
"1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YR COL",
"2ND YR COL",
"3RD YR COL",
"4TH YR COL",
"5TH YR COL",
"6TH YR COL",
"7TH YR COL",
"8TH YR COL OR MORE",
"UNGRADED"))
data$R3400700[1.0 <= data$R3400700 & data$R3400700 <= 999.0] <- 1.0
data$R3400700[1000.0 <= data$R3400700 & data$R3400700 <= 1999.0] <- 1000.0
data$R3400700[2000.0 <= data$R3400700 & data$R3400700 <= 2999.0] <- 2000.0
data$R3400700[3000.0 <= data$R3400700 & data$R3400700 <= 3999.0] <- 3000.0
data$R3400700[4000.0 <= data$R3400700 & data$R3400700 <= 4999.0] <- 4000.0
data$R3400700[5000.0 <= data$R3400700 & data$R3400700 <= 5999.0] <- 5000.0
data$R3400700[6000.0 <= data$R3400700 & data$R3400700 <= 6999.0] <- 6000.0
data$R3400700[7000.0 <= data$R3400700 & data$R3400700 <= 7999.0] <- 7000.0
data$R3400700[8000.0 <= data$R3400700 & data$R3400700 <= 8999.0] <- 8000.0
data$R3400700[9000.0 <= data$R3400700 & data$R3400700 <= 9999.0] <- 9000.0
data$R3400700[10000.0 <= data$R3400700 & data$R3400700 <= 14999.0] <- 10000.0
data$R3400700[15000.0 <= data$R3400700 & data$R3400700 <= 19999.0] <- 15000.0
data$R3400700[20000.0 <= data$R3400700 & data$R3400700 <= 24999.0] <- 20000.0
data$R3400700[25000.0 <= data$R3400700 & data$R3400700 <= 49999.0] <- 25000.0
data$R3400700[50000.0 <= data$R3400700 & data$R3400700 <= 9999999.0] <- 50000.0
data$R3400700 <- factor(data$R3400700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R3403100 <- factor(data$R3403100,
levels=c(0.0,1.0),
labels=c("RURAL",
"URBAN"))
data$R3403200 <- factor(data$R3403200,
levels=c(0.0,1.0,2.0,3.0),
labels=c("NOT IN SMSA",
"SMSA, NOT CENTRAL CITY",
"SMSA, CENTRAL CITY NOT KNOWN",
"SMSA, IN CENTRAL CITY"))
data$R3710200 <- factor(data$R3710200,
levels=c(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("NONE",
"1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YR COL",
"2ND YR COL",
"3RD YR COL",
"4TH YR COL",
"5TH YR COL",
"6TH YR COL",
"7TH YR COL",
"8TH YR COL OR MORE",
"UNGRADED"))
data$R3896830[1.0 <= data$R3896830 & data$R3896830 <= 49.0] <- 1.0
data$R3896830[50.0 <= data$R3896830 & data$R3896830 <= 99.0] <- 50.0
data$R3896830[100.0 <= data$R3896830 & data$R3896830 <= 149.0] <- 100.0
data$R3896830[150.0 <= data$R3896830 & data$R3896830 <= 199.0] <- 150.0
data$R3896830[200.0 <= data$R3896830 & data$R3896830 <= 249.0] <- 200.0
data$R3896830[250.0 <= data$R3896830 & data$R3896830 <= 299.0] <- 250.0
data$R3896830[300.0 <= data$R3896830 & data$R3896830 <= 349.0] <- 300.0
data$R3896830[350.0 <= data$R3896830 & data$R3896830 <= 399.0] <- 350.0
data$R3896830[400.0 <= data$R3896830 & data$R3896830 <= 449.0] <- 400.0
data$R3896830[450.0 <= data$R3896830 & data$R3896830 <= 499.0] <- 450.0
data$R3896830[500.0 <= data$R3896830 & data$R3896830 <= 549.0] <- 500.0
data$R3896830[550.0 <= data$R3896830 & data$R3896830 <= 599.0] <- 550.0
data$R3896830[600.0 <= data$R3896830 & data$R3896830 <= 649.0] <- 600.0
data$R3896830[650.0 <= data$R3896830 & data$R3896830 <= 699.0] <- 650.0
data$R3896830[700.0 <= data$R3896830 & data$R3896830 <= 749.0] <- 700.0
data$R3896830[750.0 <= data$R3896830 & data$R3896830 <= 799.0] <- 750.0
data$R3896830[800.0 <= data$R3896830 & data$R3896830 <= 9999999.0] <- 800.0
data$R3896830 <- factor(data$R3896830,
levels=c(0.0,1.0,50.0,100.0,150.0,200.0,250.0,300.0,350.0,400.0,450.0,500.0,550.0,600.0,650.0,700.0,750.0,800.0),
labels=c("0",
"1 TO 49",
"50 TO 99",
"100 TO 149",
"150 TO 199",
"200 TO 249",
"250 TO 299",
"300 TO 349",
"350 TO 399",
"400 TO 449",
"450 TO 499",
"500 TO 549",
"550 TO 599",
"600 TO 649",
"650 TO 699",
"700 TO 749",
"750 TO 799",
"800 TO 9999999: 800+"))
data$R4006600[1.0 <= data$R4006600 & data$R4006600 <= 999.0] <- 1.0
data$R4006600[1000.0 <= data$R4006600 & data$R4006600 <= 1999.0] <- 1000.0
data$R4006600[2000.0 <= data$R4006600 & data$R4006600 <= 2999.0] <- 2000.0
data$R4006600[3000.0 <= data$R4006600 & data$R4006600 <= 3999.0] <- 3000.0
data$R4006600[4000.0 <= data$R4006600 & data$R4006600 <= 4999.0] <- 4000.0
data$R4006600[5000.0 <= data$R4006600 & data$R4006600 <= 5999.0] <- 5000.0
data$R4006600[6000.0 <= data$R4006600 & data$R4006600 <= 6999.0] <- 6000.0
data$R4006600[7000.0 <= data$R4006600 & data$R4006600 <= 7999.0] <- 7000.0
data$R4006600[8000.0 <= data$R4006600 & data$R4006600 <= 8999.0] <- 8000.0
data$R4006600[9000.0 <= data$R4006600 & data$R4006600 <= 9999.0] <- 9000.0
data$R4006600[10000.0 <= data$R4006600 & data$R4006600 <= 14999.0] <- 10000.0
data$R4006600[15000.0 <= data$R4006600 & data$R4006600 <= 19999.0] <- 15000.0
data$R4006600[20000.0 <= data$R4006600 & data$R4006600 <= 24999.0] <- 20000.0
data$R4006600[25000.0 <= data$R4006600 & data$R4006600 <= 49999.0] <- 25000.0
data$R4006600[50000.0 <= data$R4006600 & data$R4006600 <= 9999999.0] <- 50000.0
data$R4006600 <- factor(data$R4006600,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 9999999: 50000+"))
data$R4009000 <- factor(data$R4009000,
levels=c(0.0,1.0),
labels=c("RURAL",
"URBAN"))
data$R4009100 <- factor(data$R4009100,
levels=c(0.0,1.0,2.0,3.0),
labels=c("NOT IN SMSA",
"SMSA, NOT CENTRAL CITY",
"SMSA, CENTRAL CITY NOT KNOWN",
"SMSA, IN CENTRAL CITY"))
data$R4526500 <- factor(data$R4526500,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R5080700[1.0 <= data$R5080700 & data$R5080700 <= 999.0] <- 1.0
data$R5080700[1000.0 <= data$R5080700 & data$R5080700 <= 1999.0] <- 1000.0
data$R5080700[2000.0 <= data$R5080700 & data$R5080700 <= 2999.0] <- 2000.0
data$R5080700[3000.0 <= data$R5080700 & data$R5080700 <= 3999.0] <- 3000.0
data$R5080700[4000.0 <= data$R5080700 & data$R5080700 <= 4999.0] <- 4000.0
data$R5080700[5000.0 <= data$R5080700 & data$R5080700 <= 5999.0] <- 5000.0
data$R5080700[6000.0 <= data$R5080700 & data$R5080700 <= 6999.0] <- 6000.0
data$R5080700[7000.0 <= data$R5080700 & data$R5080700 <= 7999.0] <- 7000.0
data$R5080700[8000.0 <= data$R5080700 & data$R5080700 <= 8999.0] <- 8000.0
data$R5080700[9000.0 <= data$R5080700 & data$R5080700 <= 9999.0] <- 9000.0
data$R5080700[10000.0 <= data$R5080700 & data$R5080700 <= 14999.0] <- 10000.0
data$R5080700[15000.0 <= data$R5080700 & data$R5080700 <= 19999.0] <- 15000.0
data$R5080700[20000.0 <= data$R5080700 & data$R5080700 <= 24999.0] <- 20000.0
data$R5080700[25000.0 <= data$R5080700 & data$R5080700 <= 49999.0] <- 25000.0
data$R5080700[50000.0 <= data$R5080700 & data$R5080700 <= 9.9999999E7] <- 50000.0
data$R5080700 <- factor(data$R5080700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R5083100 <- factor(data$R5083100,
levels=c(0.0,1.0),
labels=c("0: 0 RURAL",
"1: 1 URBAN"))
data$R5083200 <- factor(data$R5083200,
levels=c(0.0,1.0,2.0,3.0),
labels=c("0: 0 NOT IN SMSA",
"1: 1 SMSA, NOT CENTRAL CITY",
"2: 2 SMSA, CENTRAL CITY NOT KNOWN",
"3: 3 SMSA, IN CENTRAL CITY"))
data$R5166000[1.0 <= data$R5166000 & data$R5166000 <= 999.0] <- 1.0
data$R5166000[1000.0 <= data$R5166000 & data$R5166000 <= 1999.0] <- 1000.0
data$R5166000[2000.0 <= data$R5166000 & data$R5166000 <= 2999.0] <- 2000.0
data$R5166000[3000.0 <= data$R5166000 & data$R5166000 <= 3999.0] <- 3000.0
data$R5166000[4000.0 <= data$R5166000 & data$R5166000 <= 4999.0] <- 4000.0
data$R5166000[5000.0 <= data$R5166000 & data$R5166000 <= 5999.0] <- 5000.0
data$R5166000[6000.0 <= data$R5166000 & data$R5166000 <= 6999.0] <- 6000.0
data$R5166000[7000.0 <= data$R5166000 & data$R5166000 <= 7999.0] <- 7000.0
data$R5166000[8000.0 <= data$R5166000 & data$R5166000 <= 8999.0] <- 8000.0
data$R5166000[9000.0 <= data$R5166000 & data$R5166000 <= 9999.0] <- 9000.0
data$R5166000[10000.0 <= data$R5166000 & data$R5166000 <= 14999.0] <- 10000.0
data$R5166000[15000.0 <= data$R5166000 & data$R5166000 <= 19999.0] <- 15000.0
data$R5166000[20000.0 <= data$R5166000 & data$R5166000 <= 24999.0] <- 20000.0
data$R5166000[25000.0 <= data$R5166000 & data$R5166000 <= 49999.0] <- 25000.0
data$R5166000[50000.0 <= data$R5166000 & data$R5166000 <= 9.9999999E7] <- 50000.0
data$R5166000 <- factor(data$R5166000,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R5168400 <- factor(data$R5168400,
levels=c(0.0,1.0),
labels=c("0: RURAL",
"1: URBAN"))
data$R5168500 <- factor(data$R5168500,
levels=c(0.0,1.0,2.0,3.0),
labels=c("0: NOT IN SMSA",
"1: SMSA, NOT CENTRAL CITY",
"2: SMSA, CENTRAL CITY NOT KNOWN",
"3: SMSA, IN CENTRAL CITY"))
data$R5221800 <- factor(data$R5221800,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R5821800 <- factor(data$R5821800,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R6478700[1.0 <= data$R6478700 & data$R6478700 <= 999.0] <- 1.0
data$R6478700[1000.0 <= data$R6478700 & data$R6478700 <= 1999.0] <- 1000.0
data$R6478700[2000.0 <= data$R6478700 & data$R6478700 <= 2999.0] <- 2000.0
data$R6478700[3000.0 <= data$R6478700 & data$R6478700 <= 3999.0] <- 3000.0
data$R6478700[4000.0 <= data$R6478700 & data$R6478700 <= 4999.0] <- 4000.0
data$R6478700[5000.0 <= data$R6478700 & data$R6478700 <= 5999.0] <- 5000.0
data$R6478700[6000.0 <= data$R6478700 & data$R6478700 <= 6999.0] <- 6000.0
data$R6478700[7000.0 <= data$R6478700 & data$R6478700 <= 7999.0] <- 7000.0
data$R6478700[8000.0 <= data$R6478700 & data$R6478700 <= 8999.0] <- 8000.0
data$R6478700[9000.0 <= data$R6478700 & data$R6478700 <= 9999.0] <- 9000.0
data$R6478700[10000.0 <= data$R6478700 & data$R6478700 <= 14999.0] <- 10000.0
data$R6478700[15000.0 <= data$R6478700 & data$R6478700 <= 19999.0] <- 15000.0
data$R6478700[20000.0 <= data$R6478700 & data$R6478700 <= 24999.0] <- 20000.0
data$R6478700[25000.0 <= data$R6478700 & data$R6478700 <= 49999.0] <- 25000.0
data$R6478700[50000.0 <= data$R6478700 & data$R6478700 <= 9.9999999E7] <- 50000.0
data$R6478700 <- factor(data$R6478700,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R6481200 <- factor(data$R6481200,
levels=c(0.0,1.0),
labels=c("0: 0 RURAL",
"1: 1 URBAN"))
data$R6481300 <- factor(data$R6481300,
levels=c(0.0,1.0,2.0,3.0),
labels=c("0: NOT IN SMSA",
"1: SMSA, NOT CENTRAL CITY",
"2: SMSA, CENTRAL CITY NOT KNOWN",
"3: SMSA, IN CENTRAL CITY"))
data$R6540400 <- factor(data$R6540400,
levels=c(1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0,19.0,20.0,95.0),
labels=c("1ST GRADE",
"2ND GRADE",
"3RD GRADE",
"4TH GRADE",
"5TH GRADE",
"6TH GRADE",
"7TH GRADE",
"8TH GRADE",
"9TH GRADE",
"10TH GRADE",
"11TH GRADE",
"12TH GRADE",
"1ST YEAR COLLEGE",
"2ND YEAR COLLEGE",
"3RD YEAR COLLEGE",
"4TH YEAR COLLEGE",
"5TH YEAR COLLEGE",
"6TH YEAR COLLEGE",
"7TH YEAR COLLEGE",
"8TH YEAR COLLEGE OR MORE",
"UNGRADED"))
data$R7006500[1.0 <= data$R7006500 & data$R7006500 <= 999.0] <- 1.0
data$R7006500[1000.0 <= data$R7006500 & data$R7006500 <= 1999.0] <- 1000.0
data$R7006500[2000.0 <= data$R7006500 & data$R7006500 <= 2999.0] <- 2000.0
data$R7006500[3000.0 <= data$R7006500 & data$R7006500 <= 3999.0] <- 3000.0
data$R7006500[4000.0 <= data$R7006500 & data$R7006500 <= 4999.0] <- 4000.0
data$R7006500[5000.0 <= data$R7006500 & data$R7006500 <= 5999.0] <- 5000.0
data$R7006500[6000.0 <= data$R7006500 & data$R7006500 <= 6999.0] <- 6000.0
data$R7006500[7000.0 <= data$R7006500 & data$R7006500 <= 7999.0] <- 7000.0
data$R7006500[8000.0 <= data$R7006500 & data$R7006500 <= 8999.0] <- 8000.0
data$R7006500[9000.0 <= data$R7006500 & data$R7006500 <= 9999.0] <- 9000.0
data$R7006500[10000.0 <= data$R7006500 & data$R7006500 <= 14999.0] <- 10000.0
data$R7006500[15000.0 <= data$R7006500 & data$R7006500 <= 19999.0] <- 15000.0
data$R7006500[20000.0 <= data$R7006500 & data$R7006500 <= 24999.0] <- 20000.0
data$R7006500[25000.0 <= data$R7006500 & data$R7006500 <= 49999.0] <- 25000.0
data$R7006500[50000.0 <= data$R7006500 & data$R7006500 <= 9.9999999E7] <- 50000.0
data$R7006500 <- factor(data$R7006500,
levels=c(0.0,1.0,1000.0,2000.0,3000.0,4000.0,5000.0,6000.0,7000.0,8000.0,9000.0,10000.0,15000.0,20000.0,25000.0,50000.0),
labels=c("0",
"1 TO 999",
"1000 TO 1999",
"2000 TO 2999",
"3000 TO 3999",
"4000 TO 4999",
"5000 TO 5999",
"6000 TO 6999",
"7000 TO 7999",
"8000 TO 8999",
"9000 TO 9999",
"10000 TO 14999",
"15000 TO 19999",
"20000 TO 24999",
"25000 TO 49999",
"50000 TO 99999999: 50000+"))
data$R7008900 <- factor(data$R7008900,
levels=c(0.0,1.0,2.0),
labels=c("0: RURAL",
"1: URBAN",
"2: UNKNOWN"))
data$R7009000 <- factor(data$R7009000,
levels=c(1.0,2.0,3.0,4.0),
labels=c("1: NOT IN MSA",
"2: IN MSA, NOT IN CENTRAL CITY",
"3: IN MSA, IN CENTRAL CITY",
"4: IN MSA, CENTRAL CITY NOT KNOWN"))
return(data)
}
varlabels <- c("VERSION_R26_1 2014",
"ID# (1-12686) 79",
"SAMPLE ID 79 INT",
"RACL/ETHNIC COHORT /SCRNR 79",
"SEX OF R 79",
"ROSENBERG ESTEEM ITEM RESPONSE SCORE 87",
"HGC 88",
"TOT INC WAGES AND SALRY P-C YR 88",
"TOT INC SP WAGE AND SALRY P-C YR 88",
"TOT INC SP WAGE AND SALRY P-C YR 88 (TRUNC)",
"TOT INC ALIMONY RCVD 87 88",
"TOT INC CHILD SUPP RCVD 87 88",
"AVG MO INC SSI RCVD IN 87 88",
"TOT NET FAMILY INC P-C YR 88",
"RS CURRENT RESIDENCE URBAN/RURAL 88",
"RS CURRENT RESIDENCE IN SMSA 88",
"HGC 90",
"TOT NET FAMILY INC P-C YR 90",
"RS CURRENT RESIDENCE URBAN/RURAL 90",
"RS CURRENT RESIDENCE IN SMSA 90",
"HGC 92",
"20-ITEM CES-D ITEM RESPONSE SCORE 92",
"TOT NET FAMILY INC P-C YR 92",
"RS CURRENT RESIDENCE URBAN/RURAL 92",
"RS CURRENT RESIDENCE IN SMSA 92",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 94",
"TOTAL NET FAMILY INCOME 94",
"RS RESIDENCE URBAN OR RURAL 94",
"RS RESIDENCE IN SMSA 94",
"TOTAL NET FAMILY INCOME 96",
"RS RESIDENCE URBAN OR RURAL 96",
"RS RESIDENCE IN SMSA 96",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 96",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 1998",
"TOTAL NET FAMILY INCOME 1998",
"RS RESIDENCE URBAN OR RURAL 1998",
"RS RESIDENCE IN SMSA 1998",
"HGHST GRADE/YR COMPLTD & GOT CREDIT 2000",
"TOTAL NET FAMILY INCOME 2000",
"RS RESIDENCE URBAN OR RURAL 2000",
"RS RESIDENCE IN SMSA 2000"
)
# Use qnames rather than rnums
qnames = function(data) {
names(data) <- c("VERSION_R26_2014",
"CASEID_1979",
"SAMPLE_ID_1979",
"SAMPLE_RACE_78SCRN",
"SAMPLE_SEX_1979",
"ROSENBERG_IRT_SCORE_1987",
"Q3-4_1988",
"Q13-5_1988",
"Q13-18_1988",
"Q13-18_TRUNC_REVISED_1988",
"INCOME-2C_1988",
"INCOME-5D_1988",
"INCOME-9C_1988",
"TNFI_TRUNC_1988",
"URBAN-RURAL_1988",
"SMSARES_1988",
"Q3-4_1990",
"TNFI_TRUNC_1990",
"URBAN-RURAL_1990",
"SMSARES_1990",
"Q3-4_1992",
"CESD_IRT_SCORE_20_ITEM_1992",
"TNFI_TRUNC_1992",
"URBAN-RURAL_1992",
"SMSARES_1992",
"Q3-4_1994",
"TNFI_TRUNC_1994",
"URBAN-RURAL_1994",
"SMSARES_1994",
"TNFI_TRUNC_1996",
"URBAN-RURAL_1996",
"SMSARES_1996",
"Q3-4_1996",
"Q3-4_1998",
"TNFI_TRUNC_1998",
"URBAN-RURAL_1998",
"SMSARES_1998",
"Q3-4_2000",
"TNFI_TRUNC_2000",
"URBAN-RURAL_2000",
"SMSARES_2000")
return(data)
}
#********************************************************************************************************
# Remove the '#' before the following line to create a data file called "categories" with value labels.
categories <- vallabels(new_data)
|
# Kernel PCA
#above pca and lda work on linear problem i.e when data is linearly seperable
#kernal pca woprk for non-linear problem which is kernalised version of pca where we map data to higherdimension using kernal trick then from there we extract new principal component
#here we are using logestic regression model from previous data
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 1:2] = scale(training_set[, 1:2])
test_set[, 1:2] = scale(test_set[, 1:2])
# Applying Kernel PCA
# install.packages('kernlab')
library(kernlab)
kpca = kpca(~., data = training_set[-3], kernel = 'rbfdot', features = 2) #[-3] to remove dependent variable, rbfdot is gaussion kernal,, so in oreder to visulize the 2-D we will keep feature 2(final independent variable)
training_set_pca = as.data.frame(predict(kpca, training_set)) #transform original data into new extracted training set, this will return matrix so add detaframe
training_set_pca$Purchased = training_set$Purchased #add dependent variable into new training set variable of training_set_pca
test_set_pca = as.data.frame(predict(kpca, test_set))
test_set_pca$Purchased = test_set$Purchased
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ ., #it have linear classifier
family = binomial,
data = training_set_pca) # here data is training_set_pca
# Predicting the Test set results
prob_pred = predict(classifier, type = 'response', newdata = test_set_pca[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the Confusion Matrix
cm = table(test_set_pca[, 3], y_pred)
cm
# Visualising the Training set results
#install.packages('ElemStatLearn')
library(ElemStatLearn)
set = training_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')# 'V1', 'V2' is column name of new training set
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
# install.packages('ElemStatLearn')
library(ElemStatLearn)
set = test_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')# 'V1', 'V2' is column name of new test set
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
| /Part 9 - Dimensionality Reduction/Section 45 - Kernel PCA/kernel_pca.R | no_license | celestialized/Machine-Learning | R | false | false | 3,598 | r | # Kernel PCA
#above pca and lda work on linear problem i.e when data is linearly seperable
#kernal pca woprk for non-linear problem which is kernalised version of pca where we map data to higherdimension using kernal trick then from there we extract new principal component
#here we are using logestic regression model from previous data
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[, 3:5]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 1:2] = scale(training_set[, 1:2])
test_set[, 1:2] = scale(test_set[, 1:2])
# Applying Kernel PCA
# install.packages('kernlab')
library(kernlab)
kpca = kpca(~., data = training_set[-3], kernel = 'rbfdot', features = 2) #[-3] to remove dependent variable, rbfdot is gaussion kernal,, so in oreder to visulize the 2-D we will keep feature 2(final independent variable)
training_set_pca = as.data.frame(predict(kpca, training_set)) #transform original data into new extracted training set, this will return matrix so add detaframe
training_set_pca$Purchased = training_set$Purchased #add dependent variable into new training set variable of training_set_pca
test_set_pca = as.data.frame(predict(kpca, test_set))
test_set_pca$Purchased = test_set$Purchased
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ ., #it have linear classifier
family = binomial,
data = training_set_pca) # here data is training_set_pca
# Predicting the Test set results
prob_pred = predict(classifier, type = 'response', newdata = test_set_pca[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the Confusion Matrix
cm = table(test_set_pca[, 3], y_pred)
cm
# Visualising the Training set results
#install.packages('ElemStatLearn')
library(ElemStatLearn)
set = training_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')# 'V1', 'V2' is column name of new training set
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
# install.packages('ElemStatLearn')
library(ElemStatLearn)
set = test_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')# 'V1', 'V2' is column name of new test set
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
testlist <- list(a = 0L, b = 0L, x = 439418879L)
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131460-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 109 | r | testlist <- list(a = 0L, b = 0L, x = 439418879L)
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
# variable: (coverage)"coverage.QC.bases",
#(Not N coverage) 'genome_w.o_N.coverage.QC.bases',
#(QC reads/total reads) 'X.QC.bases..total.bases',
#(QC reads/total not N reads)'X.QC.bases..total.not_N.bases',
#(% incorrect PE orientation) 'X.incorrect.PE.orientation',
#(incorrect proper pair) 'X.incorrect.proper.pair',
#(Read 1 mapq = 0)'mapq.0.read1',
#(Read 1 mapq > 0 base quality median < base cutoff) 'mapq.0.BaseQualityMedian.basequalCutoff.read1',
#(Read 1 mapq > 0 base quality median > base cutoff) 'mapq.0.BaseQualityMedian..basequalCutoff.read1',
#(Read 1 mapq > 0 read length < min length) 'mapq.0.readlength.minlength.read1',
#(Read 1 duplicates) 'X.duplicates.read1..excluded.from.coverage.analysis.',
#(Read 2 mapq = 0)'mapq.0.read2',
#(Read 2 mapq > 0 base quality median < base cutoff) 'mapq.0.BaseQualityMedian.basequalCutoff.read2',
#(Read 2 mapq > 0 base quality median > base cutoff) 'mapq.0.BaseQualityMedian..basequalCutoff.read2',
#(Read 2 mapq > 0 read length < min length) 'mapq.0.readlength.minlength.read2',
#(Read 2 duplicates)'X.duplicates.read2..excluded.from.coverage.analysis.'
otp_path <- '/icgc/dkfzlsdf/project/hipo/hipo_016/sequencing/whole_genome_bisulfite_sequencing/view-by-pid/'
path <- '/data/'
source('/function/depthofcoveragefunction.R')
library(pryr)
starttime <- Sys.time()
variables <- c("coverage.QC.bases", 'genome_w.o_N.coverage.QC.bases','X.QC.bases..total.bases','X.QC.bases..total.not_N.bases',
'X.incorrect.PE.orientation', 'X.incorrect.proper.pair', 'mapq.0.read1','mapq.0.BaseQualityMedian.basequalCutoff.read1',
'mapq.0.BaseQualityMedian..basequalCutoff.read1', 'mapq.0.readlength.minlength.read1',
'X.duplicates.read1..excluded.from.coverage.analysis.', 'mapq.0.read2', 'mapq.0.BaseQualityMedian.basequalCutoff.read2',
'mapq.0.BaseQualityMedian..basequalCutoff.read2', 'mapq.0.readlength.minlength.read2', 'X.duplicates.read2..excluded.from.coverage.analysis.')
for ( variable in variables ){
df <- df_retrival(otp_path, variable, path)
}
mem <- mem_used()
endtime <- Sys.time()
runtime <- endtime - starttime
write(c(mem, runtime), paste0(path,'depthofcov_info.txt'))
| /process/depthofcoverage.R | no_license | leungman426/MultideepQC | R | false | false | 2,203 | r |
# variable: (coverage)"coverage.QC.bases",
#(Not N coverage) 'genome_w.o_N.coverage.QC.bases',
#(QC reads/total reads) 'X.QC.bases..total.bases',
#(QC reads/total not N reads)'X.QC.bases..total.not_N.bases',
#(% incorrect PE orientation) 'X.incorrect.PE.orientation',
#(incorrect proper pair) 'X.incorrect.proper.pair',
#(Read 1 mapq = 0)'mapq.0.read1',
#(Read 1 mapq > 0 base quality median < base cutoff) 'mapq.0.BaseQualityMedian.basequalCutoff.read1',
#(Read 1 mapq > 0 base quality median > base cutoff) 'mapq.0.BaseQualityMedian..basequalCutoff.read1',
#(Read 1 mapq > 0 read length < min length) 'mapq.0.readlength.minlength.read1',
#(Read 1 duplicates) 'X.duplicates.read1..excluded.from.coverage.analysis.',
#(Read 2 mapq = 0)'mapq.0.read2',
#(Read 2 mapq > 0 base quality median < base cutoff) 'mapq.0.BaseQualityMedian.basequalCutoff.read2',
#(Read 2 mapq > 0 base quality median > base cutoff) 'mapq.0.BaseQualityMedian..basequalCutoff.read2',
#(Read 2 mapq > 0 read length < min length) 'mapq.0.readlength.minlength.read2',
#(Read 2 duplicates)'X.duplicates.read2..excluded.from.coverage.analysis.'
otp_path <- '/icgc/dkfzlsdf/project/hipo/hipo_016/sequencing/whole_genome_bisulfite_sequencing/view-by-pid/'
path <- '/data/'
source('/function/depthofcoveragefunction.R')
library(pryr)
starttime <- Sys.time()
variables <- c("coverage.QC.bases", 'genome_w.o_N.coverage.QC.bases','X.QC.bases..total.bases','X.QC.bases..total.not_N.bases',
'X.incorrect.PE.orientation', 'X.incorrect.proper.pair', 'mapq.0.read1','mapq.0.BaseQualityMedian.basequalCutoff.read1',
'mapq.0.BaseQualityMedian..basequalCutoff.read1', 'mapq.0.readlength.minlength.read1',
'X.duplicates.read1..excluded.from.coverage.analysis.', 'mapq.0.read2', 'mapq.0.BaseQualityMedian.basequalCutoff.read2',
'mapq.0.BaseQualityMedian..basequalCutoff.read2', 'mapq.0.readlength.minlength.read2', 'X.duplicates.read2..excluded.from.coverage.analysis.')
for ( variable in variables ){
df <- df_retrival(otp_path, variable, path)
}
mem <- mem_used()
endtime <- Sys.time()
runtime <- endtime - starttime
write(c(mem, runtime), paste0(path,'depthofcov_info.txt'))
|
get_multiple_LVs <- function(X,
Y,
penalization,
lambda,
nonzero,
nr_latent=1,
stop_criterium = 1 * 10^-5,
max_iterations,
cross_validate) {
Res_X <- X
alphas <- c()
betas <- c()
xis <- c()
etas <- c()
iterations <- c()
corr_v <- c()
s_cond_v <- c()
red_indexs <- c()
ridge_penaltys<- c()
nr_nonzeros <- c()
CV_results <- c()
iterations_crts <- c()
sum_of_sq_betas <- c()
sum_of_sq_alphas <- c()
i <- 1
WeCarryOn <- TRUE
cat("Multiple latent variables scenario,
number of latent variables calculated:",nr_latent, "\n")
while ( !(i > nr_latent) && WeCarryOn ){
results <- sRDA(predictor = Res_X,
predicted = Y,
penalization = penalization,
ridge_penalty = lambda,
nonzero = nonzero,
tolerance = stop_criterium,
# cross validate for every latent variables
cross_validate = cross_validate,
multiple_LV = FALSE,
max_iterations = max_iterations)
alphas[[i]] <- results$ALPHA
betas[[i]] <- results$BETA
xis[[i]] <- results$XI
etas[[i]] <- results$ETA
iterations[[i]] <- results$nr_iterations
red_indexs[[i]] <- results$redundancy_index
iterations_crts[[i]] <- results$iterations_crts
ridge_penaltys[[i]] <- results$ridge_penalty
nr_nonzeros[[i]] <- results$nr_nonzeros
if(cross_validate){
CV_results[[i]] <- results$CV_results
}
reg_coeff <- results$inverse_of_XIXI %*% as.matrix(Res_X)
# calculate the residuals
calcres = function(Xcol)
Xcol - results$inverse_of_XIXI %*% Xcol %*% t(xis[[i]])
Res_X = apply(Res_X, 2,calcres)
sum_of_sq_betas[[i]] <- sum(betas[[i]]^2)
sum_of_sq_alphas[[i]] <- sum(alphas[[i]]^2)
if (i>1){
stop_condition <- abs(sum_of_sq_betas[[i]] - sum_of_sq_betas[[i-1]])
s_cond_v[[i]] <- stop_condition
if (stop_condition < stop_criterium){
WeCarryOn <- FALSE
}
}
i <- i +1
}
result <- list(
XI = xis,
ETA = etas,
ALPHA = alphas,
BETA= betas,
nr_iterations = iterations,
# inverse_of_XIXI = SOLVE_XIXI,
iterations_crts = iterations_crts,
sum_absolute_betas = sum_of_sq_betas,
ridge_penalty = ridge_penaltys,
nr_nonzeros = nr_nonzeros,
nr_latent_variables = nr_latent,
CV_results = CV_results
)
result
}
| /R/get_multiple_LVs.R | permissive | acsala/sRDA | R | false | false | 3,145 | r | get_multiple_LVs <- function(X,
Y,
penalization,
lambda,
nonzero,
nr_latent=1,
stop_criterium = 1 * 10^-5,
max_iterations,
cross_validate) {
Res_X <- X
alphas <- c()
betas <- c()
xis <- c()
etas <- c()
iterations <- c()
corr_v <- c()
s_cond_v <- c()
red_indexs <- c()
ridge_penaltys<- c()
nr_nonzeros <- c()
CV_results <- c()
iterations_crts <- c()
sum_of_sq_betas <- c()
sum_of_sq_alphas <- c()
i <- 1
WeCarryOn <- TRUE
cat("Multiple latent variables scenario,
number of latent variables calculated:",nr_latent, "\n")
while ( !(i > nr_latent) && WeCarryOn ){
results <- sRDA(predictor = Res_X,
predicted = Y,
penalization = penalization,
ridge_penalty = lambda,
nonzero = nonzero,
tolerance = stop_criterium,
# cross validate for every latent variables
cross_validate = cross_validate,
multiple_LV = FALSE,
max_iterations = max_iterations)
alphas[[i]] <- results$ALPHA
betas[[i]] <- results$BETA
xis[[i]] <- results$XI
etas[[i]] <- results$ETA
iterations[[i]] <- results$nr_iterations
red_indexs[[i]] <- results$redundancy_index
iterations_crts[[i]] <- results$iterations_crts
ridge_penaltys[[i]] <- results$ridge_penalty
nr_nonzeros[[i]] <- results$nr_nonzeros
if(cross_validate){
CV_results[[i]] <- results$CV_results
}
reg_coeff <- results$inverse_of_XIXI %*% as.matrix(Res_X)
# calculate the residuals
calcres = function(Xcol)
Xcol - results$inverse_of_XIXI %*% Xcol %*% t(xis[[i]])
Res_X = apply(Res_X, 2,calcres)
sum_of_sq_betas[[i]] <- sum(betas[[i]]^2)
sum_of_sq_alphas[[i]] <- sum(alphas[[i]]^2)
if (i>1){
stop_condition <- abs(sum_of_sq_betas[[i]] - sum_of_sq_betas[[i-1]])
s_cond_v[[i]] <- stop_condition
if (stop_condition < stop_criterium){
WeCarryOn <- FALSE
}
}
i <- i +1
}
result <- list(
XI = xis,
ETA = etas,
ALPHA = alphas,
BETA= betas,
nr_iterations = iterations,
# inverse_of_XIXI = SOLVE_XIXI,
iterations_crts = iterations_crts,
sum_absolute_betas = sum_of_sq_betas,
ridge_penalty = ridge_penaltys,
nr_nonzeros = nr_nonzeros,
nr_latent_variables = nr_latent,
CV_results = CV_results
)
result
}
|
### R libraries
.libPaths('/groups/umcg-lld/tmp03/umcg-agulyaeva/R_LIB')
library('optparse')
sessionInfo()
### input parameters
option_list = list(
make_option('--metadata_file'),
make_option('--cg_counts_file'),
make_option('--vc_counts_file'),
make_option('--fm_counts_file'))
opt_parser = OptionParser(option_list = option_list)
opt = parse_args(opt_parser)
### read files
metadata <- read.table(
opt$metadata_file,
sep = '\t',
header = TRUE,
row.names = 1)
excl <- c('GFDR2_11.3', 'GFDR2_11.7', 'GFDR_10.1', 'GFDR_10.3', 'GFDR_10.5')
metadata <- metadata[!(rownames(metadata) %in% excl), ]
cg_counts <- read.table(
opt$cg_counts_file,
sep = '\t',
header = TRUE,
row.names = 1)
vc_counts <- read.table(
opt$vc_counts_file,
sep = '\t',
header = TRUE,
row.names = 1)
fm_counts <- read.table(
opt$fm_counts_file,
sep = '\t',
header = TRUE,
row.names = 1)
### calculate
indiv <- sort(unique(metadata$Sample_real))
L <- list(
contigs = cg_counts,
VCs = vc_counts,
families = fm_counts
)
for (x in names(L)) {
t <- L[[x]]
DF <- as.data.frame(
matrix(
NA,
nrow = nrow(t),
ncol = length(indiv),
dimnames = list(rownames(t), paste0('Individual_', indiv))
),
stringsAsFactors = FALSE)
for (y in indiv) {
samples <- rownames(metadata)[metadata$Sample_real == y]
DF[, paste0('Individual_', y)] <- apply(t[, samples], 1, function (v) ifelse(any(v > 0), 1, 0))
}
DF$SUM <- apply(DF, 1, sum)
N <- sum(DF$SUM > 5) / nrow(DF) * 100
cat(N, '% of ', x, ' were shared among more than half of the individual virus pools.\n\n\n', sep='')
}
| /numbers_for_paper/calculate_numbers_for_paper.R | no_license | aag1/GFD_vConTACT_based_analysis | R | false | false | 1,813 | r | ### R libraries
.libPaths('/groups/umcg-lld/tmp03/umcg-agulyaeva/R_LIB')
library('optparse')
sessionInfo()
### input parameters
option_list = list(
make_option('--metadata_file'),
make_option('--cg_counts_file'),
make_option('--vc_counts_file'),
make_option('--fm_counts_file'))
opt_parser = OptionParser(option_list = option_list)
opt = parse_args(opt_parser)
### read files
metadata <- read.table(
opt$metadata_file,
sep = '\t',
header = TRUE,
row.names = 1)
excl <- c('GFDR2_11.3', 'GFDR2_11.7', 'GFDR_10.1', 'GFDR_10.3', 'GFDR_10.5')
metadata <- metadata[!(rownames(metadata) %in% excl), ]
cg_counts <- read.table(
opt$cg_counts_file,
sep = '\t',
header = TRUE,
row.names = 1)
vc_counts <- read.table(
opt$vc_counts_file,
sep = '\t',
header = TRUE,
row.names = 1)
fm_counts <- read.table(
opt$fm_counts_file,
sep = '\t',
header = TRUE,
row.names = 1)
### calculate
indiv <- sort(unique(metadata$Sample_real))
L <- list(
contigs = cg_counts,
VCs = vc_counts,
families = fm_counts
)
for (x in names(L)) {
t <- L[[x]]
DF <- as.data.frame(
matrix(
NA,
nrow = nrow(t),
ncol = length(indiv),
dimnames = list(rownames(t), paste0('Individual_', indiv))
),
stringsAsFactors = FALSE)
for (y in indiv) {
samples <- rownames(metadata)[metadata$Sample_real == y]
DF[, paste0('Individual_', y)] <- apply(t[, samples], 1, function (v) ifelse(any(v > 0), 1, 0))
}
DF$SUM <- apply(DF, 1, sum)
N <- sum(DF$SUM > 5) / nrow(DF) * 100
cat(N, '% of ', x, ' were shared among more than half of the individual virus pools.\n\n\n', sep='')
}
|
load("~/prioritization/Combined_Network/New/Total/Total_matrix.RData")
load("~/prioritization/Combined_Network/New/Total/Total_graph.RData")
cregiyg=read.table("~/prioritization/Combined_Network/New/Filtering_list/cregiyg.txt")
cregiyg=cregiyg[,2]
a=Total_matrix
a1=Total_graph
library(Matrix)
library(igraph)
clos=closeness(a1,mode=c("all")) #Closeness
save(clos,file="~/prioritization/Leave-one-out/New/Total_topology/closeness.RData") | /Prioritization/Total/Topological properties/closeness.r | no_license | ehsanbiostat/PhD-thesis | R | false | false | 439 | r | load("~/prioritization/Combined_Network/New/Total/Total_matrix.RData")
load("~/prioritization/Combined_Network/New/Total/Total_graph.RData")
cregiyg=read.table("~/prioritization/Combined_Network/New/Filtering_list/cregiyg.txt")
cregiyg=cregiyg[,2]
a=Total_matrix
a1=Total_graph
library(Matrix)
library(igraph)
clos=closeness(a1,mode=c("all")) #Closeness
save(clos,file="~/prioritization/Leave-one-out/New/Total_topology/closeness.RData") |
context("LaTeX -- Ensuring that the `fmt_percent()` function works as expected")
test_that("the `fmt_percent()` function works correctly", {
# Create an input data frame four columns: two
# character-based and two that are numeric
data_tbl <-
data.frame(
char_1 = c("saturday", "sunday", "monday", "tuesday",
"wednesday", "thursday", "friday"),
char_2 = c("june", "july", "august", "september",
"october", "november", "december"),
num_1 = c(1836.23, 2763.39, 937.29, 643.00, 212.232, 0, -23.24),
num_2 = c(34, 74, 23, 93, 35, 76, 57),
stringsAsFactors = FALSE
)
# Create a `tbl_latex` object with `gt()` and the
# `data_tbl` dataset
tbl_latex <- gt(data = data_tbl)
# Format the `num_1` column to 2 decimal places, use all
# other defaults; extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623.00\\%$", "$276,339.00\\%$", "$93,729.00\\%$",
"$64,300.00\\%$", "$21,223.20\\%$", "$0.00\\%$", "$-2,324.00\\%$")
)
# Format the `num_1` column to 5 decimal places, use all
# other defaults; extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 5) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623.00000\\%$", "$276,339.00000\\%$", "$93,729.00000\\%$",
"$64,300.00000\\%$", "$21,223.20000\\%$", "$0.00000\\%$",
"$-2,324.00000\\%$")
)
# Format the `num_1` column to 2 decimal places, drop the trailing
# zeros, use all other defaults; extract `output_df` and compare to
# expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 2,
drop_trailing_zeros = TRUE
) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623\\%$", "$276,339\\%$", "$93,729\\%$", "$64,300\\%$",
"$21,223.2\\%$", "$0\\%$", "$-2,324\\%$")
)
# Format the `num_1` column to 2 decimal places, don't use digit
# grouping separators, use all other defaults; extract `output_df`
# and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, use_seps = FALSE) %>%
render_formats_test("latex"))[["num_1"]],
c("$183623.00\\%$", "$276339.00\\%$", "$93729.00\\%$", "$64300.00\\%$",
"$21223.20\\%$", "$0.00\\%$", "$-2324.00\\%$")
)
# Format the `num_1` column to 2 decimal places, use a single space
# character as digit grouping separators, use all other defaults;
# extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, sep_mark = " ") %>%
render_formats_test("latex"))[["num_1"]],
c("$183 623.00\\%$", "$276 339.00\\%$", "$93 729.00\\%$", "$64 300.00\\%$",
"$21 223.20\\%$", "$0.00\\%$", "$-2 324.00\\%$")
)
# Format the `num_1` column to 2 decimal places, use a period for the
# digit grouping separators and a comma for the decimal mark, use
# all other defaults; extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 2,
sep_mark = ".", dec_mark = ","
) %>%
render_formats_test("latex"))[["num_1"]],
c("$183.623,00\\%$", "$276.339,00\\%$", "$93.729,00\\%$", "$64.300,00\\%$",
"$21.223,20\\%$", "$0,00\\%$", "$-2.324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, prepend and append
# all values by 2 different literals, use all other defaults; extract
# `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, pattern = "a {x}:n") %>%
render_formats_test("latex"))[["num_1"]],
c("a $183,623.00\\%$:n", "a $276,339.00\\%$:n", "a $93,729.00\\%$:n",
"a $64,300.00\\%$:n", "a $21,223.20\\%$:n", "a $0.00\\%$:n",
"a $-2,324.00\\%$:n")
)
# Format the `num_1` column to 0 decimal places, place a space between
# the percent sign (on the right) and the value, use all other defaults;
# extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 0,
placement = "right", incl_space = TRUE
) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623 \\%$", "$276,339 \\%$", "$93,729 \\%$", "$64,300 \\%$",
"$21,223 \\%$", "$0 \\%$", "$-2,324 \\%$")
)
# Format the `num_1` column to 0 decimal places, place a space between
# the percent sign (on the left) and the value, use all other defaults;
# extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 0,
placement = "left", incl_space = TRUE
) %>%
render_formats_test("latex"))[["num_1"]],
c("$\\% 183,623$", "$\\% 276,339$", "$\\% 93,729$", "$\\% 64,300$",
"$\\% 21,223$", "$\\% 0$", "$-\\% 2,324$")
)
# Format the `num_1` column to 2 decimal places, apply the `en_US`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "en_US") %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623.00\\%$", "$276,339.00\\%$", "$93,729.00\\%$",
"$64,300.00\\%$", "$21,223.20\\%$", "$0.00\\%$", "$-2,324.00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `da_DK`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "da_DK") %>%
render_formats_test("latex"))[["num_1"]],
c("$183.623,00\\%$", "$276.339,00\\%$", "$93.729,00\\%$",
"$64.300,00\\%$", "$21.223,20\\%$", "$0,00\\%$", "$-2.324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `de_AT`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "de_AT") %>%
render_formats_test("latex"))[["num_1"]],
c("$183 623,00\\%$", "$276 339,00\\%$", "$93 729,00\\%$",
"$64 300,00\\%$", "$21 223,20\\%$", "$0,00\\%$", "$-2 324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `et_EE`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "et_EE") %>%
render_formats_test("latex"))[["num_1"]],
c("$183 623,00\\%$", "$276 339,00\\%$", "$93 729,00\\%$",
"$64 300,00\\%$", "$21 223,20\\%$", "$0,00\\%$", "$-2 324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `gl_ES`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "gl_ES") %>%
render_formats_test("latex"))[["num_1"]],
c("$183.623,00\\%$", "$276.339,00\\%$", "$93.729,00\\%$",
"$64.300,00\\%$", "$21.223,20\\%$", "$0,00\\%$", "$-2.324,00\\%$")
)
})
| /tests/testthat/test-l_fmt_percent.R | permissive | marinamerlo/gt | R | false | false | 7,483 | r | context("LaTeX -- Ensuring that the `fmt_percent()` function works as expected")
test_that("the `fmt_percent()` function works correctly", {
# Create an input data frame four columns: two
# character-based and two that are numeric
data_tbl <-
data.frame(
char_1 = c("saturday", "sunday", "monday", "tuesday",
"wednesday", "thursday", "friday"),
char_2 = c("june", "july", "august", "september",
"october", "november", "december"),
num_1 = c(1836.23, 2763.39, 937.29, 643.00, 212.232, 0, -23.24),
num_2 = c(34, 74, 23, 93, 35, 76, 57),
stringsAsFactors = FALSE
)
# Create a `tbl_latex` object with `gt()` and the
# `data_tbl` dataset
tbl_latex <- gt(data = data_tbl)
# Format the `num_1` column to 2 decimal places, use all
# other defaults; extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623.00\\%$", "$276,339.00\\%$", "$93,729.00\\%$",
"$64,300.00\\%$", "$21,223.20\\%$", "$0.00\\%$", "$-2,324.00\\%$")
)
# Format the `num_1` column to 5 decimal places, use all
# other defaults; extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 5) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623.00000\\%$", "$276,339.00000\\%$", "$93,729.00000\\%$",
"$64,300.00000\\%$", "$21,223.20000\\%$", "$0.00000\\%$",
"$-2,324.00000\\%$")
)
# Format the `num_1` column to 2 decimal places, drop the trailing
# zeros, use all other defaults; extract `output_df` and compare to
# expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 2,
drop_trailing_zeros = TRUE
) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623\\%$", "$276,339\\%$", "$93,729\\%$", "$64,300\\%$",
"$21,223.2\\%$", "$0\\%$", "$-2,324\\%$")
)
# Format the `num_1` column to 2 decimal places, don't use digit
# grouping separators, use all other defaults; extract `output_df`
# and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, use_seps = FALSE) %>%
render_formats_test("latex"))[["num_1"]],
c("$183623.00\\%$", "$276339.00\\%$", "$93729.00\\%$", "$64300.00\\%$",
"$21223.20\\%$", "$0.00\\%$", "$-2324.00\\%$")
)
# Format the `num_1` column to 2 decimal places, use a single space
# character as digit grouping separators, use all other defaults;
# extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, sep_mark = " ") %>%
render_formats_test("latex"))[["num_1"]],
c("$183 623.00\\%$", "$276 339.00\\%$", "$93 729.00\\%$", "$64 300.00\\%$",
"$21 223.20\\%$", "$0.00\\%$", "$-2 324.00\\%$")
)
# Format the `num_1` column to 2 decimal places, use a period for the
# digit grouping separators and a comma for the decimal mark, use
# all other defaults; extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 2,
sep_mark = ".", dec_mark = ","
) %>%
render_formats_test("latex"))[["num_1"]],
c("$183.623,00\\%$", "$276.339,00\\%$", "$93.729,00\\%$", "$64.300,00\\%$",
"$21.223,20\\%$", "$0,00\\%$", "$-2.324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, prepend and append
# all values by 2 different literals, use all other defaults; extract
# `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, pattern = "a {x}:n") %>%
render_formats_test("latex"))[["num_1"]],
c("a $183,623.00\\%$:n", "a $276,339.00\\%$:n", "a $93,729.00\\%$:n",
"a $64,300.00\\%$:n", "a $21,223.20\\%$:n", "a $0.00\\%$:n",
"a $-2,324.00\\%$:n")
)
# Format the `num_1` column to 0 decimal places, place a space between
# the percent sign (on the right) and the value, use all other defaults;
# extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 0,
placement = "right", incl_space = TRUE
) %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623 \\%$", "$276,339 \\%$", "$93,729 \\%$", "$64,300 \\%$",
"$21,223 \\%$", "$0 \\%$", "$-2,324 \\%$")
)
# Format the `num_1` column to 0 decimal places, place a space between
# the percent sign (on the left) and the value, use all other defaults;
# extract `output_df` and compare to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(
columns = "num_1", decimals = 0,
placement = "left", incl_space = TRUE
) %>%
render_formats_test("latex"))[["num_1"]],
c("$\\% 183,623$", "$\\% 276,339$", "$\\% 93,729$", "$\\% 64,300$",
"$\\% 21,223$", "$\\% 0$", "$-\\% 2,324$")
)
# Format the `num_1` column to 2 decimal places, apply the `en_US`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "en_US") %>%
render_formats_test("latex"))[["num_1"]],
c("$183,623.00\\%$", "$276,339.00\\%$", "$93,729.00\\%$",
"$64,300.00\\%$", "$21,223.20\\%$", "$0.00\\%$", "$-2,324.00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `da_DK`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "da_DK") %>%
render_formats_test("latex"))[["num_1"]],
c("$183.623,00\\%$", "$276.339,00\\%$", "$93.729,00\\%$",
"$64.300,00\\%$", "$21.223,20\\%$", "$0,00\\%$", "$-2.324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `de_AT`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "de_AT") %>%
render_formats_test("latex"))[["num_1"]],
c("$183 623,00\\%$", "$276 339,00\\%$", "$93 729,00\\%$",
"$64 300,00\\%$", "$21 223,20\\%$", "$0,00\\%$", "$-2 324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `et_EE`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "et_EE") %>%
render_formats_test("latex"))[["num_1"]],
c("$183 623,00\\%$", "$276 339,00\\%$", "$93 729,00\\%$",
"$64 300,00\\%$", "$21 223,20\\%$", "$0,00\\%$", "$-2 324,00\\%$")
)
# Format the `num_1` column to 2 decimal places, apply the `gl_ES`
# locale and use all other defaults; extract `output_df` and compare
# to expected values
expect_equal(
(tbl_latex %>%
fmt_percent(columns = "num_1", decimals = 2, locale = "gl_ES") %>%
render_formats_test("latex"))[["num_1"]],
c("$183.623,00\\%$", "$276.339,00\\%$", "$93.729,00\\%$",
"$64.300,00\\%$", "$21.223,20\\%$", "$0,00\\%$", "$-2.324,00\\%$")
)
})
|
\name{equijoin}
\alias{equijoin}
\title{
Equijoins using map reduce
}
\description{
A generalized form of equijoin, hybrid between the SQL brethren and mapreduce
}
\usage{
equijoin(
left.input = NULL,
right.input = NULL,
input = NULL,
output = NULL,
input.format = "native",
output.format = "native",
outer = c("", "left", "right", "full"),
map.left = to.map(identity),
map.right = to.map(identity),
reduce = reduce.default)}
\arguments{\item{left.input}{The left side input to the join.}
\item{right.input}{The right side input to the join.}
\item{input}{The only input in case of a self join. Mutually exclusive with the previous two.}
\item{output}{Where to write the output.}
\item{input.format}{Input format specification, see \code{\link{make.input.format}}}
\item{output.format}{Output format specification, see \code{\link{make.output.format}}}
\item{outer}{Whether to perform an outer join, one of the usual three types, left, right or full.}
\item{map.left}{Function to apply to each record from the left input, follows same conventions as any map function. The returned keys
will become join keys.}
\item{map.right}{Function to apply to each record from the right input, follows same conventions as any map function. The returned keys
will become join keys.}
\item{reduce}{Function to be applied, key by key, on the values associated with that key. Those values are in the arguments \code{vl} (left side) and \code{vr} (right side) and their type is determined by the type returned by the map functions, separately for the left side and the right side. The allowable return values are like those of any reduce function, see \code{\link{mapreduce}}. The default performs a \code{merge} with \code{by = NULL} which performs a cartesian product, unless lists are involved in which case the arguments are simply returned in a list.}}
\value{If output is specified, returns output itself. Otherwise, a \code{\link{big.data.object}}}
\section{Warning}{Doesn't work with multiple inputs like \code{mapreduce}}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
from.dfs(equijoin(left.input = to.dfs(keyval(1:10, 1:10^2)), right.input = to.dfs(keyval(1:10, 1:10^3))))
}
| /pkg/man/equijoin.Rd | no_license | beedata-analytics/rmr2 | R | false | false | 2,330 | rd | \name{equijoin}
\alias{equijoin}
\title{
Equijoins using map reduce
}
\description{
A generalized form of equijoin, hybrid between the SQL brethren and mapreduce
}
\usage{
equijoin(
left.input = NULL,
right.input = NULL,
input = NULL,
output = NULL,
input.format = "native",
output.format = "native",
outer = c("", "left", "right", "full"),
map.left = to.map(identity),
map.right = to.map(identity),
reduce = reduce.default)}
\arguments{\item{left.input}{The left side input to the join.}
\item{right.input}{The right side input to the join.}
\item{input}{The only input in case of a self join. Mutually exclusive with the previous two.}
\item{output}{Where to write the output.}
\item{input.format}{Input format specification, see \code{\link{make.input.format}}}
\item{output.format}{Output format specification, see \code{\link{make.output.format}}}
\item{outer}{Whether to perform an outer join, one of the usual three types, left, right or full.}
\item{map.left}{Function to apply to each record from the left input, follows same conventions as any map function. The returned keys
will become join keys.}
\item{map.right}{Function to apply to each record from the right input, follows same conventions as any map function. The returned keys
will become join keys.}
\item{reduce}{Function to be applied, key by key, on the values associated with that key. Those values are in the arguments \code{vl} (left side) and \code{vr} (right side) and their type is determined by the type returned by the map functions, separately for the left side and the right side. The allowable return values are like those of any reduce function, see \code{\link{mapreduce}}. The default performs a \code{merge} with \code{by = NULL} which performs a cartesian product, unless lists are involved in which case the arguments are simply returned in a list.}}
\value{If output is specified, returns output itself. Otherwise, a \code{\link{big.data.object}}}
\section{Warning}{Doesn't work with multiple inputs like \code{mapreduce}}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
from.dfs(equijoin(left.input = to.dfs(keyval(1:10, 1:10^2)), right.input = to.dfs(keyval(1:10, 1:10^3))))
}
|
########################
# remove_singularities #
########################
# function to remove the singular variable using a linear model
# before the computation of a mixed model
remove_singularities <- function(d){
fix_form <- paste0('trait~-1 + cross_env+', paste(colnames(d)[5:ncol(d)], collapse = '+'))
m_sg <- lm(as.formula(fix_form), data = d)
coeff <- coefficients(m_sg)
if(any(is.na(coeff))){
d <- d[, -which(colnames(d) %in% names(coeff[is.na(coeff)]))]
}
return(d)
} | /R/remove_singularities.R | no_license | vincentgarin/mppR | R | false | false | 508 | r | ########################
# remove_singularities #
########################
# function to remove the singular variable using a linear model
# before the computation of a mixed model
remove_singularities <- function(d){
fix_form <- paste0('trait~-1 + cross_env+', paste(colnames(d)[5:ncol(d)], collapse = '+'))
m_sg <- lm(as.formula(fix_form), data = d)
coeff <- coefficients(m_sg)
if(any(is.na(coeff))){
d <- d[, -which(colnames(d) %in% names(coeff[is.na(coeff)]))]
}
return(d)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/similarity.R
\name{score.apd_similarity}
\alias{score.apd_similarity}
\title{Score new samples using similarity methods}
\usage{
\method{score}{apd_similarity}(object, new_data, type = "numeric", add_percentile = TRUE, ...)
}
\arguments{
\item{object}{A \code{apd_similarity} object.}
\item{new_data}{A data frame or matrix of new predictors.}
\item{type}{A single character. The type of predictions to generate.
Valid options are:
\itemize{
\item \code{"numeric"} for a numeric value that summarizes the similarity values for
each sample across the training set.
}}
\item{add_percentile}{A single logical; should the percentile of the
similarity score \emph{relative to the training set values} by computed?}
\item{...}{Not used, but required for extensibility.}
}
\value{
A tibble of predictions. The number of rows in the tibble is guaranteed
to be the same as the number of rows in \code{new_data}. For \code{type = "numeric"},
the tibble contains a column called "similarity". If \code{add_percentile = TRUE},
an additional column called \code{similarity_pctl} will be added. These values are
in percent units so that a value of 11.5 indicates that, in the training set,
11.5 percent of the training set samples had smaller values than the sample
being scored.
}
\description{
Score new samples using similarity methods
}
\examples{
\donttest{
data(qsar_binary)
jacc_sim <- apd_similarity(binary_tr)
mean_sim <- score(jacc_sim, new_data = binary_unk)
mean_sim
}
}
| /man/score.apd_similarity.Rd | permissive | tidymodels/applicable | R | false | true | 1,553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/similarity.R
\name{score.apd_similarity}
\alias{score.apd_similarity}
\title{Score new samples using similarity methods}
\usage{
\method{score}{apd_similarity}(object, new_data, type = "numeric", add_percentile = TRUE, ...)
}
\arguments{
\item{object}{A \code{apd_similarity} object.}
\item{new_data}{A data frame or matrix of new predictors.}
\item{type}{A single character. The type of predictions to generate.
Valid options are:
\itemize{
\item \code{"numeric"} for a numeric value that summarizes the similarity values for
each sample across the training set.
}}
\item{add_percentile}{A single logical; should the percentile of the
similarity score \emph{relative to the training set values} by computed?}
\item{...}{Not used, but required for extensibility.}
}
\value{
A tibble of predictions. The number of rows in the tibble is guaranteed
to be the same as the number of rows in \code{new_data}. For \code{type = "numeric"},
the tibble contains a column called "similarity". If \code{add_percentile = TRUE},
an additional column called \code{similarity_pctl} will be added. These values are
in percent units so that a value of 11.5 indicates that, in the training set,
11.5 percent of the training set samples had smaller values than the sample
being scored.
}
\description{
Score new samples using similarity methods
}
\examples{
\donttest{
data(qsar_binary)
jacc_sim <- apd_similarity(binary_tr)
mean_sim <- score(jacc_sim, new_data = binary_unk)
mean_sim
}
}
|
library(astrolibR)
### Name: aitoff
### Title: Convert longitude, latitude to X,Y using an AITOFF projection
### Aliases: aitoff
### Keywords: misc
### ** Examples
aitoff(227.23,-8.890) # celestial location of Sirius in Galactic coordinates
| /data/genthat_extracted_code/astrolibR/examples/aitoff.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 250 | r | library(astrolibR)
### Name: aitoff
### Title: Convert longitude, latitude to X,Y using an AITOFF projection
### Aliases: aitoff
### Keywords: misc
### ** Examples
aitoff(227.23,-8.890) # celestial location of Sirius in Galactic coordinates
|
rm(list=ls(all=T)) #clear workspace
v='DSS_InOutModelSelection20121003.r'
# Read data-****Make Sure the Path Is Correct****
require(RODBC) #Packages robustbase & RODBC must be installed
require(robustbase)
#Load DSS load data n=18,016
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/MRB1.mdb")
DSS<- sqlQuery(con, "
SELECT tblWBID_SparrowLoadsDSS.WB_ID, tblWBID_SparrowLoadsDSS.FlowM3_yr, tblWBID_SparrowLoadsDSS.Ninput, tblWBID_SparrowLoadsDSS.Noutput, tblWBID_SparrowLoadsDSS.Pinput, tblWBID_SparrowLoadsDSS.Poutput
FROM tblWBID_SparrowLoadsDSS;
")
close(con)
str(DSS)
#Load Area, Depth & Volume data n=27,942
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/WaterbodyDatabase.mdb")
z<- sqlQuery(con, "
SELECT MRB1_PredictedVolumeDepth.WB_ID, MRB1_PredictedVolumeDepth.distvol AS Volume, MRB1_PredictedVolumeDepth.maxdepth_corrected AS Zmax, MRB1_WBIDLakes.AlbersAreaM AS Area, MRB1_WBIDLakes.AlbersX, MRB1_WBIDLakes.AlbersY
FROM MRB1_PredictedVolumeDepth INNER JOIN MRB1_WBIDLakes ON MRB1_PredictedVolumeDepth.WB_ID = MRB1_WBIDLakes.WB_ID;
")
close(con)
str(z)
#Load NLA data n=155
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/WaterbodyDatabase.mdb")
NLA<- sqlQuery(con, "
SELECT tblJoinNLAID_WBID.WB_ID, tblJoinNLAID_WBID.NLA_ID, NLA2007Sites_DesignInfo.SITE_TYPE, tblNLA_WaterQualityData.VISIT_NO, NLA2007Sites_DesignInfo.LAKE_SAMP, tblJoinNLAID_WBID.Rank, NLA2007Sites_DesignInfo.WGT_NLA, tblNLA_WaterQualityData.NTL, tblNLA_WaterQualityData.PTL, tblNLA_WaterQualityData.CHLA, tblNLA_WaterQualityData.SECMEAN, tblNLA_WaterQualityData.CLEAR_TO_BOTTOM
FROM (tblJoinNLAID_WBID INNER JOIN NLA2007Sites_DesignInfo ON tblJoinNLAID_WBID.NLA_ID = NLA2007Sites_DesignInfo.SITE_ID) INNER JOIN tblNLA_WaterQualityData ON (NLA2007Sites_DesignInfo.VISIT_NO = tblNLA_WaterQualityData.VISIT_NO) AND (NLA2007Sites_DesignInfo.SITE_ID = tblNLA_WaterQualityData.SITE_ID)
WHERE (((tblNLA_WaterQualityData.VISIT_NO)=1) AND ((NLA2007Sites_DesignInfo.LAKE_SAMP)='Target_Sampled') AND ((tblJoinNLAID_WBID.Rank)=1));
")
close(con)
str(NLA)
#Method detection limit Updates
NLA$PTL[NLA$PTL<4]<-2 #MDL for PTL is 4 assign to .5MDL=2
NLA$CHLA[NLA$CHLA<.1]<-0.05 #MDL for ChlA is .1 assign to .5MDL=.05
#Merge all
One<-merge(DSS,z,by='WB_ID',all.x=F) #n=18,014 two lakes do not have depth/volume data
One<-merge(One, NLA,by='WB_ID',all.x=T) #n=18,014
str(One)
#Calculated Fields
One$TN=One$NTL/1000 #(mg/l)=Total Nitrogen from NLA
One$TP=One$PTL/1000 #(mg/l)=Total Phosphorus from NLA
One$Nin=One$Ninput*1000/One$FlowM3_yr #(mg/l) Nitrogen inflow load concentration from sparrow
One$Nout=One$Noutput*1000/One$FlowM3_yr #(mg/l) Nitrogen outflow load concentration from sparrow
One$Pin=One$Pinput*1000/One$FlowM3_yr #(mg/l) Phosphorus inflow load concentration from sparrow
One$Pout=One$Poutput*1000/One$FlowM3_yr #(mg/l) Phosphorus outflow load concentration from sparrow
One$hrt=One$Volume/One$FlowM3_yr # (yr) Hydraulic retention time for GIS estimated max depth and volume
One$Zmean=One$Volume/One$Area #(m) Mean Depth for GIS estimated max depth and volume
#Eliminate Lake Champlain lakes where SPARROW predictions Nin doesn't equal Nout (within 0.5kg i.e., rounded to 3 places)
# this also eliminates Lake Champlain; n=17,792
MRB1<-One[round(One$Ninput)==round(One$Noutput),]
MRB1<-MRB1[,c(1:13,17,20:30)] #eliminate unnecessary fields
#Select the NLA data only from MRB1 n=132
NLA<-MRB1[!is.na(MRB1$NLA_ID),]
#model search functions
#subroutine to return regression stats
Stats<-function(Model,In,y,x,Label){
rmse<-round(sqrt(sum(na.exclude(Model$residuals^2))/length(na.exclude(Model$residuals))),3)
aic<-round(AIC(Model),3)
Yhat=predict(Model, newdata = In)
R2<-round(summary(lm(log10(In$Y)~Yhat))$r.squared,3)
adjR2<-round(summary(lm(log10(In$Y)~Yhat))$adj.r.squared,3)
N<-length(na.exclude(In$Y))
data.frame(model=Label,Y=y,X=x,rmse,R2,adjR2,N,aic)
}
#main Model search function
ModelSearch<-function(MRB1In,MRB1Out,NLAobs,Data){
#Rename Data to automate the anlysis below
A<-Data
tmp<-names(A)
tmp[tmp==NLAobs]<-'Y'
tmp[tmp==MRB1In]<-'Xin'
tmp[tmp==MRB1Out]<-'Xout'
names(A)<-tmp
#Linear regression
tryCatch({a<-lm(log10(Y)~log10(Xout),data=A)
keep<-Stats(a,A,NLAobs,MRB1Out,'H0')
} , error = function(e) { print("H0") })
#B&B2008 H1 log10(TP)=log10(Pin/(1+(.45*hrt)))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt))),
start=list(c1 = .45),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H1'))
} , error = function(e) { print("H1") })
#B&B2008 H2 log10(TP)=log10(Pin/(1+ 1.06))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+c1)),
start=list(c1 = 1.06),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H2'))
} , error = function(e) { print("H2") })
#B&B2008 H3 log10(TP)=log10(Pin/(1+((5.1/z)*hrt)))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+((c1/Zmean)*hrt))),
start=list(c1 = 5.1),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H3'))
} , error = function(e) { print("H3") })
#B&B2008 H4 log10(TP)=log10(Pin/(1+(1.12*hrt^-.53)))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2))),
start=list(c1 = 1.12,c2=-.53),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H4'))
} , error = function(e) { print("H4") })
#Reckhow(Bachmann) Pers. Comm. H4se log10(TN)=log10(Nin/(1+(0.693*hrt^0.45))) #NE
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2))),
start=list(c1 = 0.693,c2=0.45),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H4se'))
} , error = function(e) { print("H4se") })
#Reckhow(Bachmann) Pers. Comm. H4ne log10(TN)=log10(Nin/(1+(0.67*hrt^0.25))) #SE
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2))),
start=list(c1 = 0.67,c2=0.25),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H4ne'))
} , error = function(e) { print("H4ne") })
#B&B2008 H5 log10(TP)=log10((.65*Pin)/(1+(.17*hrt)))
tryCatch({a<- nlrob(log10(Y) ~ log10((c1*Xin)/(1+(c2*hrt))),
start=list(c1 = .65,c2=.17),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H5'))
} , error = function(e) { print("H5") })
#Ken Reckhow Eutromod H6ne: log10(TP)=log10(Pin/(1+(12.26*hrt^.45*z^-.16*Pin^.5))) see Reckhow_NE lakes - Eutromod - page1.pdf
#mg/l
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2*Zmean^c3*Xin^c4))),
start=list(c1 = 12.26, c2 = .45, c3=-.16,c4=.5),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H6ne'))
} , error = function(e) { print("H6ne") })
#Ken Reckhow Eutromod H6se: log10(TP)=log10(Pin/(1+(3.0*hrt^0.25*z^0.58*Pin^0.53))) see Reckhow 1988
#mg/l
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2*Zmean^c3*Xin^c4))),
start=list(c1 = 3.0, c2 = .25, c3=.58,c4=.53),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H6se'))
} , error = function(e) { print("H6se") })
#Windolf1996 Table 4 Model 1 H7: log10(TN)=log10(0.32*Nin*hrt^-0.18)
tryCatch({a<- nlrob(log10(Y) ~ log10(c1*Xin*hrt^c2),
start=list(c1 =.32, c2 = -.18),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H7'))
} , error = function(e) { print("H7") })
#Windolf1996 Table 4 Model 2 H8: log10(TN)=log10(0.27*Nin*hrt^-0.22*z^0.12)
tryCatch({a<- nlrob(log10(Y) ~ log10(c1*Xin*hrt^c2*Zmean^c3),
start=list(c1 =.27, c2 = -.22, c3=.12),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H8'))
} , error = function(e) { print("H8") })
#Print the results
Results<-data.frame(keep)
a<-as.numeric(as.character(Results$aic)) #convert AIC stored as factor to numeric level
Results$dAIC<-a-min(a,na.rm=T) #get delta AIC
Results$AICwt<-round(exp(-Results$dAIC/2)/sum(exp(-Results$dAIC/2),na.rm=T),3) #get AIC weight
Results[is.na(Results$dAIC),4:10]<-NA # convert all output to NA for nl models that failed to converge
Results$Version<-v #add R script version to output file
Results
}
##############################
#Select Best model for N and P
P<- ModelSearch('Pin','Pout','TP',NLA)
P
# Export Table
#write.table(P, file='//AA.AD.EPA.GOV/ORD/NAR/USERS/EC2/wmilstea/Net MyDocuments/tempMD/tempP.csv',row.names=F,sep=',')
N<- ModelSearch('Nin','Nout','TN',NLA)
N
# Export Table
#write.table(N, file='//AA.AD.EPA.GOV/ORD/NAR/USERS/EC2/wmilstea/Net MyDocuments/tempMD/tempN.csv',row.names=F,sep=',')
#Reckhow Eutromod H6se is the best model for both N and P
########### linear model for N and P
#Linear model for N
LMN<-lm(log10(TN)~log10(Nout),data=NLA)
MRB1$TNlm<-10**predict(LMN, newdata = MRB1) #get predicted values
#Linear model for P
LMP<-lm(log10(TP)~log10(Pout),data=NLA)
MRB1$TPlm<-10**predict(LMP, newdata = MRB1) #get predicted values
########### Best nonlinear model for N and P
#Ken Reckhow Eutromod H6se: log10(TP)=log10(Pin/(1+(3.0*hrt^0.25*z^0.58*Pin^0.53))) see Reckhow 1988
#nonlinear model for N
nln<-nlrob(log10(TN) ~ log10(Nin/(1+(c1*hrt^c2*Zmean^c3*Nin^c4))),
start=list(c1 = 3.0, c2 = .25, c3=.58,c4=.53),
data=NLA,algorithm = "default", trace=F,na.action = na.exclude)
MRB1$TNvv<-10**predict(nln, newdata = MRB1) #get predicted values
#nonlinear model for P
nlp<-nlrob(log10(TP) ~ log10(Pin/(1+(c1*hrt^c2*Zmean^c3*Pin^c4))),
start=list(c1 = 3.0, c2 = .25, c3=.58,c4=.53),
data=NLA,algorithm = "default", trace=F,na.action = na.exclude)
MRB1$TPvv<-10**predict(nlp, newdata = MRB1) #get predicted values
#Load State data n=28,122
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/WaterbodyDatabase.mdb")
St<- sqlQuery(con, "
SELECT tblWBIDbyState.WB_ID, tblWBIDbyState.ST1, tblWBIDbyState.ST2
FROM tblWBIDbyState
GROUP BY tblWBIDbyState.WB_ID, tblWBIDbyState.ST1, tblWBIDbyState.ST2;
")
close(con)
str(St)
#Add State Data to MRB1
MRB1<-merge(MRB1,St,by='WB_ID')
nrow(MRB1)
#Resave NLA data n=132
NLA<-MRB1[!is.na(MRB1$NLA_ID),]
#########################
#save the data
#
save(LMN,nln,LMP,nlp,MRB1,NLA,file='C:/Bryan/EPA/Data/RData/InOutModelSelection20120912.rda')
#load(file='C:/Bryan/EPA/Data/RData/InOutModelSelection20120808.rda')
#files: MRB1, NLA, LMN (linear model nitrogen), LMP (lm Phosphorus), nln (nonlinear model N), nlp (nl P)
#Data Definitions MRB1 n=17,982 NLA n=134
# WB_ID: unique lake identification number
# FlowM3_yr: (m3/yr) flow into and out of lake
# Volume: lake volume estimated from Zmax
# Ninput (kg/yr): Sum of nitrogen from SPARROW for all upstream flowlines plus the incremental load.
# Noutput: (kg/yr) Sparrow estimate of Nitrogen Load
# Pinput (kg/yr): Sum of phosphorus from SPARROW for all upstream flowlines plus incremental load.
# Poutput: (kg/yr) Sparrow estimate of Phosphorus Load
# Zmax: estimated Maximum depth of the lake
# Area (m2): [AlbersAreaM] Lake Surface Area calculated from NHDPlus derived waterbody polygons in Albers projection
# AlbersX: (m) X coordinate of lake Albers projection
# AlbersY: (m) Y coordinate of lake Albers projection
# NLA_ID: National Lake Assessment (NLA) Lake Identification Number
# CHLA (ug/l): Chorophyll A concentration in waterbody from NLA
# SECMEAN (m): Secchi Disk Transparency from NLA
# CLEAR_TO_BOTTOM (Y/NA): Y=lake is clear to bottom so SECMEAN is not valid
# TN: (mg/l) Total Nitrogen from NLA
# TP: (mg/l) Total Phosphorus from NLA
# Nin:(mg/l) Nitrogen inflow load concentration from sparrow
# Nout:(mg/l) Nitrogen outflow load concentration from sparrow
# Pin:(mg/l) Phosphorus inflow load concentration from sparrow
# Pout:(mg/l) Phosphorus outflow load concentration from sparrow
# hrt:(yr) Hydraulic retention time for GIS estimated max depth and volume
# Zmean:(m) Mean Depth for GIS estimated max depth and volume
# TNlm: (mg/l) Predicted Total Nitrogen based on the linear model for NLA~SPARROW (LMN)
# TNlm: (mg/l) Predicted Total Phosphorus based on the linear model for NLA~SPARROW (LMP)
# TNvv: (mg/l) Predicted Total Nitrogen based on the nonlinear Eutromod model (H6) for NLA~SPARROW (nln)
# TNvv: (mg/l) Predicted Total Phosphorus based on the nonlinear Eutromod model (H6) for NLA~SPARROW (nlp)
# ST1: State where the majority of the lake (by area) is located
# ST2: If the lake is in two states, State where the minority of the lake (by area) is located
| /r/Old/DSS_InOutModelSelection20121003.r | no_license | willbmisled/MRB1 | R | false | false | 13,580 | r | rm(list=ls(all=T)) #clear workspace
v='DSS_InOutModelSelection20121003.r'
# Read data-****Make Sure the Path Is Correct****
require(RODBC) #Packages robustbase & RODBC must be installed
require(robustbase)
#Load DSS load data n=18,016
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/MRB1.mdb")
DSS<- sqlQuery(con, "
SELECT tblWBID_SparrowLoadsDSS.WB_ID, tblWBID_SparrowLoadsDSS.FlowM3_yr, tblWBID_SparrowLoadsDSS.Ninput, tblWBID_SparrowLoadsDSS.Noutput, tblWBID_SparrowLoadsDSS.Pinput, tblWBID_SparrowLoadsDSS.Poutput
FROM tblWBID_SparrowLoadsDSS;
")
close(con)
str(DSS)
#Load Area, Depth & Volume data n=27,942
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/WaterbodyDatabase.mdb")
z<- sqlQuery(con, "
SELECT MRB1_PredictedVolumeDepth.WB_ID, MRB1_PredictedVolumeDepth.distvol AS Volume, MRB1_PredictedVolumeDepth.maxdepth_corrected AS Zmax, MRB1_WBIDLakes.AlbersAreaM AS Area, MRB1_WBIDLakes.AlbersX, MRB1_WBIDLakes.AlbersY
FROM MRB1_PredictedVolumeDepth INNER JOIN MRB1_WBIDLakes ON MRB1_PredictedVolumeDepth.WB_ID = MRB1_WBIDLakes.WB_ID;
")
close(con)
str(z)
#Load NLA data n=155
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/WaterbodyDatabase.mdb")
NLA<- sqlQuery(con, "
SELECT tblJoinNLAID_WBID.WB_ID, tblJoinNLAID_WBID.NLA_ID, NLA2007Sites_DesignInfo.SITE_TYPE, tblNLA_WaterQualityData.VISIT_NO, NLA2007Sites_DesignInfo.LAKE_SAMP, tblJoinNLAID_WBID.Rank, NLA2007Sites_DesignInfo.WGT_NLA, tblNLA_WaterQualityData.NTL, tblNLA_WaterQualityData.PTL, tblNLA_WaterQualityData.CHLA, tblNLA_WaterQualityData.SECMEAN, tblNLA_WaterQualityData.CLEAR_TO_BOTTOM
FROM (tblJoinNLAID_WBID INNER JOIN NLA2007Sites_DesignInfo ON tblJoinNLAID_WBID.NLA_ID = NLA2007Sites_DesignInfo.SITE_ID) INNER JOIN tblNLA_WaterQualityData ON (NLA2007Sites_DesignInfo.VISIT_NO = tblNLA_WaterQualityData.VISIT_NO) AND (NLA2007Sites_DesignInfo.SITE_ID = tblNLA_WaterQualityData.SITE_ID)
WHERE (((tblNLA_WaterQualityData.VISIT_NO)=1) AND ((NLA2007Sites_DesignInfo.LAKE_SAMP)='Target_Sampled') AND ((tblJoinNLAID_WBID.Rank)=1));
")
close(con)
str(NLA)
#Method detection limit Updates
NLA$PTL[NLA$PTL<4]<-2 #MDL for PTL is 4 assign to .5MDL=2
NLA$CHLA[NLA$CHLA<.1]<-0.05 #MDL for ChlA is .1 assign to .5MDL=.05
#Merge all
One<-merge(DSS,z,by='WB_ID',all.x=F) #n=18,014 two lakes do not have depth/volume data
One<-merge(One, NLA,by='WB_ID',all.x=T) #n=18,014
str(One)
#Calculated Fields
One$TN=One$NTL/1000 #(mg/l)=Total Nitrogen from NLA
One$TP=One$PTL/1000 #(mg/l)=Total Phosphorus from NLA
One$Nin=One$Ninput*1000/One$FlowM3_yr #(mg/l) Nitrogen inflow load concentration from sparrow
One$Nout=One$Noutput*1000/One$FlowM3_yr #(mg/l) Nitrogen outflow load concentration from sparrow
One$Pin=One$Pinput*1000/One$FlowM3_yr #(mg/l) Phosphorus inflow load concentration from sparrow
One$Pout=One$Poutput*1000/One$FlowM3_yr #(mg/l) Phosphorus outflow load concentration from sparrow
One$hrt=One$Volume/One$FlowM3_yr # (yr) Hydraulic retention time for GIS estimated max depth and volume
One$Zmean=One$Volume/One$Area #(m) Mean Depth for GIS estimated max depth and volume
#Eliminate Lake Champlain lakes where SPARROW predictions Nin doesn't equal Nout (within 0.5kg i.e., rounded to 3 places)
# this also eliminates Lake Champlain; n=17,792
MRB1<-One[round(One$Ninput)==round(One$Noutput),]
MRB1<-MRB1[,c(1:13,17,20:30)] #eliminate unnecessary fields
#Select the NLA data only from MRB1 n=132
NLA<-MRB1[!is.na(MRB1$NLA_ID),]
#model search functions
#subroutine to return regression stats
Stats<-function(Model,In,y,x,Label){
rmse<-round(sqrt(sum(na.exclude(Model$residuals^2))/length(na.exclude(Model$residuals))),3)
aic<-round(AIC(Model),3)
Yhat=predict(Model, newdata = In)
R2<-round(summary(lm(log10(In$Y)~Yhat))$r.squared,3)
adjR2<-round(summary(lm(log10(In$Y)~Yhat))$adj.r.squared,3)
N<-length(na.exclude(In$Y))
data.frame(model=Label,Y=y,X=x,rmse,R2,adjR2,N,aic)
}
#main Model search function
ModelSearch<-function(MRB1In,MRB1Out,NLAobs,Data){
#Rename Data to automate the anlysis below
A<-Data
tmp<-names(A)
tmp[tmp==NLAobs]<-'Y'
tmp[tmp==MRB1In]<-'Xin'
tmp[tmp==MRB1Out]<-'Xout'
names(A)<-tmp
#Linear regression
tryCatch({a<-lm(log10(Y)~log10(Xout),data=A)
keep<-Stats(a,A,NLAobs,MRB1Out,'H0')
} , error = function(e) { print("H0") })
#B&B2008 H1 log10(TP)=log10(Pin/(1+(.45*hrt)))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt))),
start=list(c1 = .45),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H1'))
} , error = function(e) { print("H1") })
#B&B2008 H2 log10(TP)=log10(Pin/(1+ 1.06))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+c1)),
start=list(c1 = 1.06),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H2'))
} , error = function(e) { print("H2") })
#B&B2008 H3 log10(TP)=log10(Pin/(1+((5.1/z)*hrt)))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+((c1/Zmean)*hrt))),
start=list(c1 = 5.1),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H3'))
} , error = function(e) { print("H3") })
#B&B2008 H4 log10(TP)=log10(Pin/(1+(1.12*hrt^-.53)))
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2))),
start=list(c1 = 1.12,c2=-.53),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H4'))
} , error = function(e) { print("H4") })
#Reckhow(Bachmann) Pers. Comm. H4se log10(TN)=log10(Nin/(1+(0.693*hrt^0.45))) #NE
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2))),
start=list(c1 = 0.693,c2=0.45),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H4se'))
} , error = function(e) { print("H4se") })
#Reckhow(Bachmann) Pers. Comm. H4ne log10(TN)=log10(Nin/(1+(0.67*hrt^0.25))) #SE
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2))),
start=list(c1 = 0.67,c2=0.25),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H4ne'))
} , error = function(e) { print("H4ne") })
#B&B2008 H5 log10(TP)=log10((.65*Pin)/(1+(.17*hrt)))
tryCatch({a<- nlrob(log10(Y) ~ log10((c1*Xin)/(1+(c2*hrt))),
start=list(c1 = .65,c2=.17),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H5'))
} , error = function(e) { print("H5") })
#Ken Reckhow Eutromod H6ne: log10(TP)=log10(Pin/(1+(12.26*hrt^.45*z^-.16*Pin^.5))) see Reckhow_NE lakes - Eutromod - page1.pdf
#mg/l
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2*Zmean^c3*Xin^c4))),
start=list(c1 = 12.26, c2 = .45, c3=-.16,c4=.5),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H6ne'))
} , error = function(e) { print("H6ne") })
#Ken Reckhow Eutromod H6se: log10(TP)=log10(Pin/(1+(3.0*hrt^0.25*z^0.58*Pin^0.53))) see Reckhow 1988
#mg/l
tryCatch({a<- nlrob(log10(Y) ~ log10(Xin/(1+(c1*hrt^c2*Zmean^c3*Xin^c4))),
start=list(c1 = 3.0, c2 = .25, c3=.58,c4=.53),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H6se'))
} , error = function(e) { print("H6se") })
#Windolf1996 Table 4 Model 1 H7: log10(TN)=log10(0.32*Nin*hrt^-0.18)
tryCatch({a<- nlrob(log10(Y) ~ log10(c1*Xin*hrt^c2),
start=list(c1 =.32, c2 = -.18),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H7'))
} , error = function(e) { print("H7") })
#Windolf1996 Table 4 Model 2 H8: log10(TN)=log10(0.27*Nin*hrt^-0.22*z^0.12)
tryCatch({a<- nlrob(log10(Y) ~ log10(c1*Xin*hrt^c2*Zmean^c3),
start=list(c1 =.27, c2 = -.22, c3=.12),
data=A,algorithm = "default", trace=F,na.action = na.exclude)
keep<-rbind(keep,Stats(a,A,NLAobs,MRB1In,'H8'))
} , error = function(e) { print("H8") })
#Print the results
Results<-data.frame(keep)
a<-as.numeric(as.character(Results$aic)) #convert AIC stored as factor to numeric level
Results$dAIC<-a-min(a,na.rm=T) #get delta AIC
Results$AICwt<-round(exp(-Results$dAIC/2)/sum(exp(-Results$dAIC/2),na.rm=T),3) #get AIC weight
Results[is.na(Results$dAIC),4:10]<-NA # convert all output to NA for nl models that failed to converge
Results$Version<-v #add R script version to output file
Results
}
##############################
#Select Best model for N and P
P<- ModelSearch('Pin','Pout','TP',NLA)
P
# Export Table
#write.table(P, file='//AA.AD.EPA.GOV/ORD/NAR/USERS/EC2/wmilstea/Net MyDocuments/tempMD/tempP.csv',row.names=F,sep=',')
N<- ModelSearch('Nin','Nout','TN',NLA)
N
# Export Table
#write.table(N, file='//AA.AD.EPA.GOV/ORD/NAR/USERS/EC2/wmilstea/Net MyDocuments/tempMD/tempN.csv',row.names=F,sep=',')
#Reckhow Eutromod H6se is the best model for both N and P
########### linear model for N and P
#Linear model for N
LMN<-lm(log10(TN)~log10(Nout),data=NLA)
MRB1$TNlm<-10**predict(LMN, newdata = MRB1) #get predicted values
#Linear model for P
LMP<-lm(log10(TP)~log10(Pout),data=NLA)
MRB1$TPlm<-10**predict(LMP, newdata = MRB1) #get predicted values
########### Best nonlinear model for N and P
#Ken Reckhow Eutromod H6se: log10(TP)=log10(Pin/(1+(3.0*hrt^0.25*z^0.58*Pin^0.53))) see Reckhow 1988
#nonlinear model for N
nln<-nlrob(log10(TN) ~ log10(Nin/(1+(c1*hrt^c2*Zmean^c3*Nin^c4))),
start=list(c1 = 3.0, c2 = .25, c3=.58,c4=.53),
data=NLA,algorithm = "default", trace=F,na.action = na.exclude)
MRB1$TNvv<-10**predict(nln, newdata = MRB1) #get predicted values
#nonlinear model for P
nlp<-nlrob(log10(TP) ~ log10(Pin/(1+(c1*hrt^c2*Zmean^c3*Pin^c4))),
start=list(c1 = 3.0, c2 = .25, c3=.58,c4=.53),
data=NLA,algorithm = "default", trace=F,na.action = na.exclude)
MRB1$TPvv<-10**predict(nlp, newdata = MRB1) #get predicted values
#Load State data n=28,122
con <- odbcConnectAccess("C:/Bryan/EPA/Data/WaterbodyDatabase/WaterbodyDatabase.mdb")
St<- sqlQuery(con, "
SELECT tblWBIDbyState.WB_ID, tblWBIDbyState.ST1, tblWBIDbyState.ST2
FROM tblWBIDbyState
GROUP BY tblWBIDbyState.WB_ID, tblWBIDbyState.ST1, tblWBIDbyState.ST2;
")
close(con)
str(St)
#Add State Data to MRB1
MRB1<-merge(MRB1,St,by='WB_ID')
nrow(MRB1)
#Resave NLA data n=132
NLA<-MRB1[!is.na(MRB1$NLA_ID),]
#########################
#save the data
#
save(LMN,nln,LMP,nlp,MRB1,NLA,file='C:/Bryan/EPA/Data/RData/InOutModelSelection20120912.rda')
#load(file='C:/Bryan/EPA/Data/RData/InOutModelSelection20120808.rda')
#files: MRB1, NLA, LMN (linear model nitrogen), LMP (lm Phosphorus), nln (nonlinear model N), nlp (nl P)
#Data Definitions MRB1 n=17,982 NLA n=134
# WB_ID: unique lake identification number
# FlowM3_yr: (m3/yr) flow into and out of lake
# Volume: lake volume estimated from Zmax
# Ninput (kg/yr): Sum of nitrogen from SPARROW for all upstream flowlines plus the incremental load.
# Noutput: (kg/yr) Sparrow estimate of Nitrogen Load
# Pinput (kg/yr): Sum of phosphorus from SPARROW for all upstream flowlines plus incremental load.
# Poutput: (kg/yr) Sparrow estimate of Phosphorus Load
# Zmax: estimated Maximum depth of the lake
# Area (m2): [AlbersAreaM] Lake Surface Area calculated from NHDPlus derived waterbody polygons in Albers projection
# AlbersX: (m) X coordinate of lake Albers projection
# AlbersY: (m) Y coordinate of lake Albers projection
# NLA_ID: National Lake Assessment (NLA) Lake Identification Number
# CHLA (ug/l): Chorophyll A concentration in waterbody from NLA
# SECMEAN (m): Secchi Disk Transparency from NLA
# CLEAR_TO_BOTTOM (Y/NA): Y=lake is clear to bottom so SECMEAN is not valid
# TN: (mg/l) Total Nitrogen from NLA
# TP: (mg/l) Total Phosphorus from NLA
# Nin:(mg/l) Nitrogen inflow load concentration from sparrow
# Nout:(mg/l) Nitrogen outflow load concentration from sparrow
# Pin:(mg/l) Phosphorus inflow load concentration from sparrow
# Pout:(mg/l) Phosphorus outflow load concentration from sparrow
# hrt:(yr) Hydraulic retention time for GIS estimated max depth and volume
# Zmean:(m) Mean Depth for GIS estimated max depth and volume
# TNlm: (mg/l) Predicted Total Nitrogen based on the linear model for NLA~SPARROW (LMN)
# TNlm: (mg/l) Predicted Total Phosphorus based on the linear model for NLA~SPARROW (LMP)
# TNvv: (mg/l) Predicted Total Nitrogen based on the nonlinear Eutromod model (H6) for NLA~SPARROW (nln)
# TNvv: (mg/l) Predicted Total Phosphorus based on the nonlinear Eutromod model (H6) for NLA~SPARROW (nlp)
# ST1: State where the majority of the lake (by area) is located
# ST2: If the lake is in two states, State where the minority of the lake (by area) is located
|
library(RCurl)
library(XML)
library(RJSONIO)
movieScoreapi <- function(x) {
api <- "https://api.douban.com/v2/movie/search?q={"
url <- paste(api, x, "}", sep = "")
res <- getURL(url)
reslist <- fromJSON(res)
name <- reslist$subjects[[1]]$title
score <- reslist$subjects[[1]]$rating$average
return(list(name = name, score = score))
}
movieScoreapi("僵尸世界大战")
| /r/r3.R | no_license | lyuehh/program_exercise | R | false | false | 397 | r | library(RCurl)
library(XML)
library(RJSONIO)
movieScoreapi <- function(x) {
api <- "https://api.douban.com/v2/movie/search?q={"
url <- paste(api, x, "}", sep = "")
res <- getURL(url)
reslist <- fromJSON(res)
name <- reslist$subjects[[1]]$title
score <- reslist$subjects[[1]]$rating$average
return(list(name = name, score = score))
}
movieScoreapi("僵尸世界大战")
|
# Week 4.2 Assignment
# Name: Vinay Nagaraj
# Scatterplot, Bubble chart and Density plot
# Set working directory, location where my data file is saved along with my .R files
setwd("/Users/vinaynagaraj/My Docs/Masters/Sem 6, Data Presentation & Visualization/Week 7-8")
# Import "crimerates-by-state-2005.csv" for analysis
crime_rate_raw = read.csv2(file = "crimerates-by-state-2005.csv", header = TRUE, sep=',', dec = '.')
# Get state data by removing "United States Record"
crime_rate <- crime_rate_raw %>% filter(state != 'United States')
# Show the data sample
head(crime_rate)
# import libraries
library(ggplot2)
library("dplyr")
library(tidyr)
# Scatterplot
ggplot(crime_rate, aes(x=motor_vehicle_theft, y=robbery)) +
geom_point(color="red", alpha=0.5) +
xlab('Motor Vehicle Theft Incidents') +
ylab('Robbery Incidents') +
ggtitle('Motor Vehicle Theft Incidents and Robbery Incidents') +
theme(plot.title = element_text(hjust = 0.5, size = 18))
# Bubble Chart
ggplot(crime_rate, aes(x=motor_vehicle_theft, y=robbery, size=murder)) +
geom_point(color="red", alpha=0.5) +
xlab('Motor Vehicle Theft Incidents') +
ylab('Robbery Incidents') +
ggtitle('Motor Vehicle Theft Incidents and Robbery Incidents') +
theme(plot.title = element_text(hjust = 0.5, size = 18))
# Density Plot
ggplot(crime_rate, aes(x=burglary)) +
geom_density(color="red", fill="blue") +
xlab('Burglary Incident Counts') +
ylab('Burglary Count Frequency/Density') +
ggtitle('Density Plot: Burglary Incidents') +
theme(plot.title = element_text(hjust = 0.5, size = 14)) | /Assignments/Assignment 4.2 - Vinay Nagaraj.R | no_license | vinaynagaraj88/DSC640---Data-Presentation-Visualization | R | false | false | 1,583 | r | # Week 4.2 Assignment
# Name: Vinay Nagaraj
# Scatterplot, Bubble chart and Density plot
# Set working directory, location where my data file is saved along with my .R files
setwd("/Users/vinaynagaraj/My Docs/Masters/Sem 6, Data Presentation & Visualization/Week 7-8")
# Import "crimerates-by-state-2005.csv" for analysis
crime_rate_raw = read.csv2(file = "crimerates-by-state-2005.csv", header = TRUE, sep=',', dec = '.')
# Get state data by removing "United States Record"
crime_rate <- crime_rate_raw %>% filter(state != 'United States')
# Show the data sample
head(crime_rate)
# import libraries
library(ggplot2)
library("dplyr")
library(tidyr)
# Scatterplot
ggplot(crime_rate, aes(x=motor_vehicle_theft, y=robbery)) +
geom_point(color="red", alpha=0.5) +
xlab('Motor Vehicle Theft Incidents') +
ylab('Robbery Incidents') +
ggtitle('Motor Vehicle Theft Incidents and Robbery Incidents') +
theme(plot.title = element_text(hjust = 0.5, size = 18))
# Bubble Chart
ggplot(crime_rate, aes(x=motor_vehicle_theft, y=robbery, size=murder)) +
geom_point(color="red", alpha=0.5) +
xlab('Motor Vehicle Theft Incidents') +
ylab('Robbery Incidents') +
ggtitle('Motor Vehicle Theft Incidents and Robbery Incidents') +
theme(plot.title = element_text(hjust = 0.5, size = 18))
# Density Plot
ggplot(crime_rate, aes(x=burglary)) +
geom_density(color="red", fill="blue") +
xlab('Burglary Incident Counts') +
ylab('Burglary Count Frequency/Density') +
ggtitle('Density Plot: Burglary Incidents') +
theme(plot.title = element_text(hjust = 0.5, size = 14)) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{cor_diff_report}
\alias{cor_diff_report}
\title{report cocor's different of correlations}
\usage{
cor_diff_report(cor_p)
}
\arguments{
\item{cor_p}{a cocor object}
}
\description{
report cocor's different of correlations
}
| /man/cor_diff_report.Rd | no_license | pinusm/Mmisc | R | false | true | 333 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporting.R
\name{cor_diff_report}
\alias{cor_diff_report}
\title{report cocor's different of correlations}
\usage{
cor_diff_report(cor_p)
}
\arguments{
\item{cor_p}{a cocor object}
}
\description{
report cocor's different of correlations
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.