content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
/impl/02_data_preparation/30_returns&mood/ret_day_quart_v03.R
no_license
yangboyubyron/thesis-google-crypto-trading
R
false
false
2,267
r
suppressPackageStartupMessages(c( library(shiny), library(tm), library(stringr))) source("C:/Users/Usuario/Documents/final/en_US/WordLogic.R") # LoadData() shinyServer(function(input, output) { output$NextWord <- renderPrint({ result <- predictWord(input$inputText) result }); output$inputWords <- renderText({ input$inputText}); } )
/server.R
no_license
Quijote2015/Data_Science_Capstone
R
false
false
383
r
suppressPackageStartupMessages(c( library(shiny), library(tm), library(stringr))) source("C:/Users/Usuario/Documents/final/en_US/WordLogic.R") # LoadData() shinyServer(function(input, output) { output$NextWord <- renderPrint({ result <- predictWord(input$inputText) result }); output$inputWords <- renderText({ input$inputText}); } )
library(glmnet) mydata = read.table("./TrainingSet/AvgRank/autonomic_ganglia.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=TRUE) sink('./Model/EN/AvgRank/autonomic_ganglia/autonomic_ganglia_081.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/AvgRank/autonomic_ganglia/autonomic_ganglia_081.R
no_license
leon1003/QSMART
R
false
false
384
r
library(glmnet) mydata = read.table("./TrainingSet/AvgRank/autonomic_ganglia.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=TRUE) sink('./Model/EN/AvgRank/autonomic_ganglia/autonomic_ganglia_081.txt',append=TRUE) print(glm$glmnet.fit) sink()
# Used in BASiCS_VarianceDecomp HiddenVarDecomp <- function(Chain) { if (!is(Chain, "BASiCS_Chain")) { stop("'Chain' is not a BASiCS_Chain class object.") } N <- nrow(Chain@parameters$delta) q.bio <- ncol(Chain@parameters$delta) UniqueBatch <- colnames(Chain@parameters$theta) nBatch <- length(UniqueBatch) CellName <- colnames(Chain@parameters$s) if (nBatch > 1) { Theta <- matrixStats::rowMedians(Chain@parameters$theta) } else { Theta <- as.vector(Chain@parameters$theta) } # To store global values (uses median values across all cells) if("phi" %in% names(Chain@parameters)) { PhiS <- matrixStats::rowMedians(Chain@parameters$phi * Chain@parameters$s) } else { PhiS <- matrixStats::rowMedians(Chain@parameters$s) } Aux <- (1 / (PhiS * Chain@parameters$mu)) + Chain@parameters$delta * (Theta + 1) TechVarGlobal <- Theta / (Aux + Theta) BioVarGlobal <- (Chain@parameters$delta * (Theta + 1)) / (Aux + Theta) # To store batch specific values (in arrays) TechVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Technical BioVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Biological if (nBatch > 1) { for (Batch in seq_len(nBatch)) { SBatch <- Chain@parameters$s[, grep(UniqueBatch[Batch], CellName)] if("phi" %in% names(Chain@parameters)) { PhiBatch <- Chain@parameters$phi[, grep(UniqueBatch[Batch], CellName)] PhiSBatch <- matrixStats::rowMedians(PhiBatch * SBatch) } else { PhiSBatch <- matrixStats::rowMedians(SBatch) } Aux <- (1 / (PhiSBatch * Chain@parameters$mu)) + Chain@parameters$delta * (Chain@parameters$theta[, Batch] + 1) TechVarBatch[, , Batch] <- Chain@parameters$theta[,Batch] / (Aux + Chain@parameters$theta[, Batch]) BioVarBatch[, , Batch] <- (Chain@parameters$delta * (Chain@parameters$theta[, Batch] + 1)) / (Aux + Chain@parameters$theta[, Batch]) } } if (nBatch > 1) { list(TechVarGlobal = TechVarGlobal, BioVarGlobal = BioVarGlobal, TechVarBatch = TechVarBatch, BioVarBatch = BioVarBatch) } else { list(TechVarGlobal = TechVarGlobal, BioVarGlobal = BioVarGlobal) } }
/R/HiddenVarDecomp.R
no_license
nilseling/BASiCS
R
false
false
2,230
r
# Used in BASiCS_VarianceDecomp HiddenVarDecomp <- function(Chain) { if (!is(Chain, "BASiCS_Chain")) { stop("'Chain' is not a BASiCS_Chain class object.") } N <- nrow(Chain@parameters$delta) q.bio <- ncol(Chain@parameters$delta) UniqueBatch <- colnames(Chain@parameters$theta) nBatch <- length(UniqueBatch) CellName <- colnames(Chain@parameters$s) if (nBatch > 1) { Theta <- matrixStats::rowMedians(Chain@parameters$theta) } else { Theta <- as.vector(Chain@parameters$theta) } # To store global values (uses median values across all cells) if("phi" %in% names(Chain@parameters)) { PhiS <- matrixStats::rowMedians(Chain@parameters$phi * Chain@parameters$s) } else { PhiS <- matrixStats::rowMedians(Chain@parameters$s) } Aux <- (1 / (PhiS * Chain@parameters$mu)) + Chain@parameters$delta * (Theta + 1) TechVarGlobal <- Theta / (Aux + Theta) BioVarGlobal <- (Chain@parameters$delta * (Theta + 1)) / (Aux + Theta) # To store batch specific values (in arrays) TechVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Technical BioVarBatch <- array(0, dim = c(N, q.bio, nBatch)) # Biological if (nBatch > 1) { for (Batch in seq_len(nBatch)) { SBatch <- Chain@parameters$s[, grep(UniqueBatch[Batch], CellName)] if("phi" %in% names(Chain@parameters)) { PhiBatch <- Chain@parameters$phi[, grep(UniqueBatch[Batch], CellName)] PhiSBatch <- matrixStats::rowMedians(PhiBatch * SBatch) } else { PhiSBatch <- matrixStats::rowMedians(SBatch) } Aux <- (1 / (PhiSBatch * Chain@parameters$mu)) + Chain@parameters$delta * (Chain@parameters$theta[, Batch] + 1) TechVarBatch[, , Batch] <- Chain@parameters$theta[,Batch] / (Aux + Chain@parameters$theta[, Batch]) BioVarBatch[, , Batch] <- (Chain@parameters$delta * (Chain@parameters$theta[, Batch] + 1)) / (Aux + Chain@parameters$theta[, Batch]) } } if (nBatch > 1) { list(TechVarGlobal = TechVarGlobal, BioVarGlobal = BioVarGlobal, TechVarBatch = TechVarBatch, BioVarBatch = BioVarBatch) } else { list(TechVarGlobal = TechVarGlobal, BioVarGlobal = BioVarGlobal) } }
library(ggplot2) library(gstat) library(sp) library(maptools) bDir <- "D:/cenavarro/hnd_usaid/04_interpolation/stations-averages/yearly_pseudost" sY <- 1990 fY <- 2014 oDir <- "D:/cenavarro/hnd_usaid/04_interpolation/outputs_yearly/average" var <- "dter" # List of months mthLs <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") msk <- raster(paste0("W:/04_interpolation/region/v3/mask")) xt <- extent(msk) for (y in sY:fY){ yrdata_info <- read.csv(paste(bDir,"/", var, "_", y, ".csv" ,sep="")) cols <- colnames(yrdata_info) %in% toupper(mthLs) # Read all files yrdata <- subset(read.csv(paste(bDir,"/", var,"_", y, ".csv" ,sep="")),,cols) for (i in 1:12){ yrdata_2 <- as.data.frame(yrdata[,i]) colnames(yrdata_2) <- "DATA" yrdata_2$x <- yrdata_info$LONG yrdata_2$y <- yrdata_info$LAT coordinates(yrdata_2) = ~x + y x.range <- as.numeric(c(xt@xmin, xt@xmax)) # min/max longitude of the interpolation area y.range <- as.numeric(c(xt@ymin, xt@ymax)) # min/max latitude of the interpolation area # expand points to grid grd <- expand.grid(x = seq(from = x.range[1], to = x.range[2], by = res(msk)[1]), y = seq(from = y.range[1], to = y.range[2], by = res(msk)[2])) coordinates(grd) <- ~x + y gridded(grd) <- TRUE # Interpolate surface and fix the output: idw <- idw(formula = DATA ~ 1, locations = yrdata_2, newdata = grd, idp = 2.0) # kridge <- krige(formula = DATA ~ 1, locations = yrdata_2, newdata = grd) ## [inverse distance weighted interpolation] idw.output = as.data.frame(idw) # output is defined as a data table # kridge.output = as.data.frame(kridge) outRs <- resample(rasterFromXYZ(idw.output[,1:3], res=res(msk), crs=NA, digits=2), msk) outRs <- writeRaster(outRs, paste0(oDir, "/dtridw_", y, "_", i, ".asc")) # plot(outRs) } }
/02_interpolations/01_interpolation_idw.R
no_license
CIAT-DAPA/usaid_hnd
R
false
false
1,950
r
library(ggplot2) library(gstat) library(sp) library(maptools) bDir <- "D:/cenavarro/hnd_usaid/04_interpolation/stations-averages/yearly_pseudost" sY <- 1990 fY <- 2014 oDir <- "D:/cenavarro/hnd_usaid/04_interpolation/outputs_yearly/average" var <- "dter" # List of months mthLs <- c("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec") msk <- raster(paste0("W:/04_interpolation/region/v3/mask")) xt <- extent(msk) for (y in sY:fY){ yrdata_info <- read.csv(paste(bDir,"/", var, "_", y, ".csv" ,sep="")) cols <- colnames(yrdata_info) %in% toupper(mthLs) # Read all files yrdata <- subset(read.csv(paste(bDir,"/", var,"_", y, ".csv" ,sep="")),,cols) for (i in 1:12){ yrdata_2 <- as.data.frame(yrdata[,i]) colnames(yrdata_2) <- "DATA" yrdata_2$x <- yrdata_info$LONG yrdata_2$y <- yrdata_info$LAT coordinates(yrdata_2) = ~x + y x.range <- as.numeric(c(xt@xmin, xt@xmax)) # min/max longitude of the interpolation area y.range <- as.numeric(c(xt@ymin, xt@ymax)) # min/max latitude of the interpolation area # expand points to grid grd <- expand.grid(x = seq(from = x.range[1], to = x.range[2], by = res(msk)[1]), y = seq(from = y.range[1], to = y.range[2], by = res(msk)[2])) coordinates(grd) <- ~x + y gridded(grd) <- TRUE # Interpolate surface and fix the output: idw <- idw(formula = DATA ~ 1, locations = yrdata_2, newdata = grd, idp = 2.0) # kridge <- krige(formula = DATA ~ 1, locations = yrdata_2, newdata = grd) ## [inverse distance weighted interpolation] idw.output = as.data.frame(idw) # output is defined as a data table # kridge.output = as.data.frame(kridge) outRs <- resample(rasterFromXYZ(idw.output[,1:3], res=res(msk), crs=NA, digits=2), msk) outRs <- writeRaster(outRs, paste0(oDir, "/dtridw_", y, "_", i, ".asc")) # plot(outRs) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SaemixData.R \name{subset.SaemixData} \alias{subset.SaemixData} \alias{subset-methods} \alias{subset} \title{Data subsetting} \arguments{ \item{x}{saemixData object} \item{subset}{logical expression indicating elements or rows to keep: missing values are taken as false} \item{...}{additional parameters (ignored)} } \value{ an object of class \code{"\linkS4class{SaemixData}"} } \description{ Return an SaemixData object containing the subset of data which meets conditions. } \examples{ # TODO } \keyword{methods}
/Parallel-MCMC/man/subset.SaemixData.Rd
no_license
BelhalK/AccelerationTrainingAlgorithms
R
false
true
596
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SaemixData.R \name{subset.SaemixData} \alias{subset.SaemixData} \alias{subset-methods} \alias{subset} \title{Data subsetting} \arguments{ \item{x}{saemixData object} \item{subset}{logical expression indicating elements or rows to keep: missing values are taken as false} \item{...}{additional parameters (ignored)} } \value{ an object of class \code{"\linkS4class{SaemixData}"} } \description{ Return an SaemixData object containing the subset of data which meets conditions. } \examples{ # TODO } \keyword{methods}
download <- download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "temp") unzip("temp") unlink("temp") #reading data hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?") # add a column of datetime hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S") hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y") #subset the data by the date range hpc_sub <- subset(hpc, (hpc$Date == "2007-02-01" | hpc$Date== "2007-02-02")) #Plot1: building historgram png("plot1.png", width = 480, height = 480) hist(hpc_sub$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power", breaks = 13, ylim = c(0,1200), xlim = c(0, 6)) dev.off()
/Plot1.R
no_license
cypressville/ExData_Plotting1
R
false
false
860
r
download <- download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "temp") unzip("temp") unlink("temp") #reading data hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?") # add a column of datetime hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S") hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y") #subset the data by the date range hpc_sub <- subset(hpc, (hpc$Date == "2007-02-01" | hpc$Date== "2007-02-02")) #Plot1: building historgram png("plot1.png", width = 480, height = 480) hist(hpc_sub$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", main = "Global Active Power", breaks = 13, ylim = c(0,1200), xlim = c(0, 6)) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lists2df.R \name{lists2df} \alias{lists2df} \title{Combine multiple lists element wise to a single data frame} \usage{ lists2df(...) } \arguments{ \item{...}{Lists of the same length} } \value{ A data frame. With one column for each vector present in the lists, e.g., if the lists have two vectors then the resulting data frame will have two columns. An additional column will be added called \code{"index"} that specifies which vector the information was taken from within each list. For example, if each list has two vectors and the first vector is of length 3 and the second is of length 4, then the index column will be \code{c(rep(1,3), rep(2,4))}. } \description{ Take the ith element from each list and combine into a data frame and then rbind the resulting data frames to form a single data frame. } \examples{ ss3sim:::lists2df(list(1:2, 5:10), list(3:4, 5:10)) ss3sim:::lists2df(years = list(1:2, 5:10), fvals = list(3:4, 5:10)) ss3sim:::lists2df(years = list(1:2, 5:10)) \dontshow{ testthat::expect_error( ss3sim:::lists2df(years = list(1:10, 5:10), fisheries = 1:2) ) } } \author{ Kelli Faye Johnson } \keyword{internal}
/man/lists2df.Rd
no_license
realsmak88/ss3sim
R
false
true
1,214
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lists2df.R \name{lists2df} \alias{lists2df} \title{Combine multiple lists element wise to a single data frame} \usage{ lists2df(...) } \arguments{ \item{...}{Lists of the same length} } \value{ A data frame. With one column for each vector present in the lists, e.g., if the lists have two vectors then the resulting data frame will have two columns. An additional column will be added called \code{"index"} that specifies which vector the information was taken from within each list. For example, if each list has two vectors and the first vector is of length 3 and the second is of length 4, then the index column will be \code{c(rep(1,3), rep(2,4))}. } \description{ Take the ith element from each list and combine into a data frame and then rbind the resulting data frames to form a single data frame. } \examples{ ss3sim:::lists2df(list(1:2, 5:10), list(3:4, 5:10)) ss3sim:::lists2df(years = list(1:2, 5:10), fvals = list(3:4, 5:10)) ss3sim:::lists2df(years = list(1:2, 5:10)) \dontshow{ testthat::expect_error( ss3sim:::lists2df(years = list(1:10, 5:10), fisheries = 1:2) ) } } \author{ Kelli Faye Johnson } \keyword{internal}
#######Detection of Disruptors - Genes located in the gap between two DCEs that were united in the Healthy group######## find_genes <- function(chr, start, end, coordinates, DA_group){ data <- data_normalized[,which(DA == DA_group)] z <- rowSums(data) genes <- intersect(rownames(coordinates)[coordinates[, "start"] >= start & coordinates[, "start"] <= end & coordinates[, "chromosome"] == chr], rownames(data)[z > 0]) coordinates[genes,] } find_genes_of_split_cods <- function(overlap_cutoff=0){ genes <- list() for (group in names(rearrangements)){ print(group) gene_name <- character() cod_control <- integer() cod_test <- integer() start <- integer() end <- integer() chromosome <- character() for (chr in names(rearrangements[[group]])){ cod_idxs_control <- unique(rearrangements[[group]][[chr]]$split[, "control"]) if (length(cod_idxs_control) > 0){ print(chr) for (j in cod_idxs_control){ cod_idxs_test <- rearrangements[[group]][[chr]]$split[ rearrangements[[group]][[chr]]$split[,"control"] == j ,] if (all(cod_idxs_test[, "overlap"] < overlap_cutoff)) next cod_idxs_test <- cod_idxs_test[, "test"] split_cods_control <- c_CODs[["healthy"]][[chr]][j, , drop = F] split_cods_test <- c_CODs[[group]][[chr]][cod_idxs_test, , drop = F] genes_control <- find_genes(chr, split_cods_control[,1], split_cods_control[,2], coordinates$coordinates, "Healthy") test_cod <- rep(0, nrow(genes_control)) for (i in 1:nrow(split_cods_test)){ gene_test <- rownames(find_genes(chr, split_cods_test[i,1], split_cods_test[i,2], coordinates$coordinates, toupper(group))) common <- intersect(rownames(genes_control), gene_test) test_cod[match(common, rownames(genes_control))] <- cod_idxs_test[i] } gene_name <- c(gene_name, rownames(genes_control)) cod_control <- c(cod_control, rep(j, nrow(genes_control))) cod_test <- c(cod_test, test_cod) chromosome <- c(chromosome, genes_control[,"chromosome"]) start <- c(start, genes_control[,"start"]) end <- c(end, genes_control[,"end"]) } } } if (!(length(gene_name) > 0)) next genes[[group]] <- data.frame("gene_name" = gene_name, "cod_control" = cod_control, "cod_test" = cod_test, "chromosome" = chromosome, "start" = start, "end" = end, stringsAsFactors = F) } genes } genes_of_split_cods <- find_genes_of_split_cods(0.1) trim <- function(l){ repeat if (l[[1]][1] == 0) l <- l[-1,] else break l } disruptors <- genes_of_split_cods %>% lapply(FUN = function(x){ unique(x$chromosome) %>% lapply(FUN = function(k) {x %>% filter(chromosome == k) %$% unique(cod_control) %>% lapply(FUN = function(t){ x %>% filter(chromosome == k) %>% filter(.$cod_control == t) %$% data.frame(cod_test, gene_name, stringsAsFactors = F) %>% trim %>% .[rev(rownames(.)),] %>% trim %>% .[rev(rownames(.)),] %$% gene_name[cod_test == 0] })}) %>% unlist })
/scripts/DCE_analysis/Disruptor_detection.R
permissive
Skourtis/SLE_spatial_gene_expression
R
false
false
3,797
r
#######Detection of Disruptors - Genes located in the gap between two DCEs that were united in the Healthy group######## find_genes <- function(chr, start, end, coordinates, DA_group){ data <- data_normalized[,which(DA == DA_group)] z <- rowSums(data) genes <- intersect(rownames(coordinates)[coordinates[, "start"] >= start & coordinates[, "start"] <= end & coordinates[, "chromosome"] == chr], rownames(data)[z > 0]) coordinates[genes,] } find_genes_of_split_cods <- function(overlap_cutoff=0){ genes <- list() for (group in names(rearrangements)){ print(group) gene_name <- character() cod_control <- integer() cod_test <- integer() start <- integer() end <- integer() chromosome <- character() for (chr in names(rearrangements[[group]])){ cod_idxs_control <- unique(rearrangements[[group]][[chr]]$split[, "control"]) if (length(cod_idxs_control) > 0){ print(chr) for (j in cod_idxs_control){ cod_idxs_test <- rearrangements[[group]][[chr]]$split[ rearrangements[[group]][[chr]]$split[,"control"] == j ,] if (all(cod_idxs_test[, "overlap"] < overlap_cutoff)) next cod_idxs_test <- cod_idxs_test[, "test"] split_cods_control <- c_CODs[["healthy"]][[chr]][j, , drop = F] split_cods_test <- c_CODs[[group]][[chr]][cod_idxs_test, , drop = F] genes_control <- find_genes(chr, split_cods_control[,1], split_cods_control[,2], coordinates$coordinates, "Healthy") test_cod <- rep(0, nrow(genes_control)) for (i in 1:nrow(split_cods_test)){ gene_test <- rownames(find_genes(chr, split_cods_test[i,1], split_cods_test[i,2], coordinates$coordinates, toupper(group))) common <- intersect(rownames(genes_control), gene_test) test_cod[match(common, rownames(genes_control))] <- cod_idxs_test[i] } gene_name <- c(gene_name, rownames(genes_control)) cod_control <- c(cod_control, rep(j, nrow(genes_control))) cod_test <- c(cod_test, test_cod) chromosome <- c(chromosome, genes_control[,"chromosome"]) start <- c(start, genes_control[,"start"]) end <- c(end, genes_control[,"end"]) } } } if (!(length(gene_name) > 0)) next genes[[group]] <- data.frame("gene_name" = gene_name, "cod_control" = cod_control, "cod_test" = cod_test, "chromosome" = chromosome, "start" = start, "end" = end, stringsAsFactors = F) } genes } genes_of_split_cods <- find_genes_of_split_cods(0.1) trim <- function(l){ repeat if (l[[1]][1] == 0) l <- l[-1,] else break l } disruptors <- genes_of_split_cods %>% lapply(FUN = function(x){ unique(x$chromosome) %>% lapply(FUN = function(k) {x %>% filter(chromosome == k) %$% unique(cod_control) %>% lapply(FUN = function(t){ x %>% filter(chromosome == k) %>% filter(.$cod_control == t) %$% data.frame(cod_test, gene_name, stringsAsFactors = F) %>% trim %>% .[rev(rownames(.)),] %>% trim %>% .[rev(rownames(.)),] %$% gene_name[cod_test == 0] })}) %>% unlist })
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/bayessir.R \name{bayessir} \alias{bayessir} \title{PMMH algorithm for time-varying SIRS model} \usage{ bayessir(obscholLIST, obsdaysLIST, COVMATLIST, th, trans, invtrans, pmean, psd, betaIIndex, betaWIndex, gammaIndex, kappaIndex, etaIndex, muIndex, alphasIndex, rhoIndex, startmeansIndex, nu1Index, nu2Index, nu3Index, nu4Index, burn, prelim, iters, thin, tune, ll, psigma, deltavalue, critical, PopSize, theMU, numParticles, resultspath, UseGill, UseSIWR, setBetaW, setKappa, setEta, setRatio, setval, setAlpha0, setAlpha0val, setstartmeansval, usetprior, alphadf, uselaplaceprior, maxWval) } \arguments{ \item{obscholLIST}{List containing the cholera counts for each phase of data} \item{obsdaysLIST}{List containing the observation days for each phase of data} \item{COVMATLIST}{List containing the matricies of daily covariates for each phase of data} \item{th}{Starting value for parameter vales} \item{trans}{function to transform the parameter values} \item{invtrans}{inverse transformation function} \item{pmean}{Prior means for Normal prior distributions} \item{psd}{Prior standard deviations for Normal prior distributions} \item{betaIIndex}{Index of which \code{th} values correspond to \eqn{\beta_I}} \item{betaWIndex}{Index of which \code{th} values correspond to \eqn{\beta_W}} \item{gammaIndex}{Index of which \code{th} values correspond to \eqn{\gamma}} \item{kappaIndex}{Index of which \code{th} values correspond to \eqn{\kappa}} \item{etaIndex}{Index of which \code{th} values correspond to \eqn{\eta}} \item{muIndex}{Index of which \code{th} values correspond to \eqn{\mu}} \item{alphasIndex}{Index of which \code{th} values correspond to \eqn{\alpha} parameters} \item{rhoIndex}{Vector of length equal to number of phases of data, Index of which \code{th} values correspond to \eqn{\rho}, the probability of infected individuals seeking treatment} \item{startmeansIndex}{Index of which \code{th} values correspond to means of initial distributions for the numbers of susceptible and infected individuals respectively} \item{nu1Index}{Index of which \code{th} value corresponds to \eqn{\nu_1} power} \item{nu2Index}{Index of which \code{th} value corresponds to \eqn{\nu_2} power} \item{nu3Index}{Index of which \code{th} value corresponds to \eqn{\nu_3} power} \item{nu4Index}{Index of which \code{th} value corresponds to \eqn{\nu_4} power} \item{burn}{Number of iterations for burn-in run} \item{prelim}{Number of total iterations for preliminary run, preliminary run = burn-in run + secondary run} \item{iters}{Number of iterations for final run} \item{thin}{Amount to thin the chain; only every \code{thin}th iteration of the PMMH algorithm is saved} \item{tune}{Tuning parameter for the covariance of the multivariate normal proposal distribution in the final run of the PMMH algorithm} \item{ll}{Starting value for log-likelihood} \item{psigma}{Standard deviation for independent normal proposal distribution in preliminary run} \item{deltavalue}{Initial value to use for tau in tau-leaping algorithm} \item{critical}{Critical size for modified tau-leaping algorithm; if the population of a compartment is lower than this number a single step algorithm is used until the population gets above the critical size.} \item{PopSize}{Population size} \item{theMU}{The rate at which immunity is lost, if setting this value} \item{numParticles}{Number of particles} \item{resultspath}{File path for results} \item{UseGill}{boolian; if 1, uses the gillespie algorithm. If 0, uses tau-leaping algorithm} \item{UseSIWR}{boolian; if 1, uses the SIWR model. If 0, uses SIRS model} \item{setBetaW}{boolian; if 1, sets BetaW parameter. If 0, estimates BetaW parameter.} \item{setKappa}{boolian; if 1, sets Kappa parameter. If 0, estimates Kappa parameter.} \item{setEta}{boolian; if 1, sets Eta parameter. If 0, estimates Eta parameter.} \item{setRatio}{boolian; if 1, sets ratio of kappa and eta.} \item{setval}{} \item{setAlpha0}{boolian; if 1, sets alpha0 parameter. If 0, estimates alpha0 parameter.} \item{setAlpha0val}{} \item{setstartmeansval}{} \item{usetprior}{} \item{alphadf}{} \item{uselaplaceprior}{} \item{maxWval}{Upper bound for W compartment. If 0, no bounding.} } \value{ Posterior samples from the final run of the PMMH algorithm Also, writes 4 files which are updated every 100th iteration: 1. prelimpmcmctimes.csv: times and acceptance ratios for preliminary PMMH run 2. prelimthmat.csv: preliminary PMMH output 3. FINALpmcmctimes: times and acceptance ratios for final PMMH run 4. FINALthmat.csv: final PMMH output } \description{ PMMH algorithm for time-varying SIRS model } \examples{ \dontrun{ library(bayessir) ############################ ## simulate data ############################ SimTimes=seq(0,365*4.5, by=14) ############################ # environmental force of infection ############################ int<- -6 A<-2 sincovAmp<- c(2.1,1.8,2,2.2,2) wave<-pi/(365/2) t<-0:(max(SimTimes)) sincov<-sin(wave*t) allsincov=matrix(NA,nrow=length(t),ncol=length(sincovAmp)) for(i in 1:length(sincovAmp)){ allsincov[,i]<-sincovAmp[i]*sincov } sincov[1:365]=allsincov[1:365,1] sincov[366:(365*2)]=allsincov[366:(365*2),2] sincov[(365*2+1):(365*3)]=allsincov[(365*2+1):(365*3),3] sincov[(365*3+1):(365*4)]=allsincov[(365*3+1):(365*4),4] sincov[(365*4+1):length(sincov)]=allsincov[(365*4+1):length(sincov),5] alpha<-exp(int+A*sincov) ########### pop=10000 #population size phiS=2900 phiI=84 th1=.5/10000 #beta th2=0.12 #gamma th3=.0018 #mu rho=90/10000 #reporting rate nu1=1 nu2=1 nu3=0 nu4=0 set.seed(10) sus0=rpois(1,phiS) inf0=rpois(1,phiI) shortstart<-as.matrix(c(sus0,inf0)) allcovs<-enviforce(as.matrix(c(sincov)),SimTimes,c(int,A)) simstates<-matrix(NA,nrow=(length(SimTimes)),ncol=2) simstates[1,]<-shortstart for (i in 2:length(SimTimes)){ simstates[i,]<-inhomoSIRSGillespie(simstates[i-1,],pop,SimTimes[i-1],SimTimes[i]-SimTimes[i-1], c(th1,th2,th3,nu1,nu2,nu3,nu4),allcovs[[i-1]][,2],allcovs[[i-1]][,1]) } set.seed(9) SimData<-c() for(i in 1:dim(simstates)[1]) SimData[i]<-rbinom(1,simstates[i,2],rho) ################################################## ##### # data for inference ##### COVMATLIST=list(as.matrix(sincov)) obscholLIST=list(SimData) obsdaysLIST=list(SimTimes) numofcovs=1 ################################################# trans=function(p){ c(log(p[1]), #beta log(p[2]), #gamma log(p[3]), #mu p[4], #alpha0 p[5], #alpha1 logit(p[6]))#rho } invtrans=function(p){ c(exp(p[1]), #beta exp(p[2]), #gamma exp(p[3]), #mu p[4], #alpha0 p[5], #alpha1 expit(p[6]))#rho } #prior means pbetaI=log(1.25e-04) pgamma=log(.1) pmu=log(.0009) palpha0=-8 palphas=rep(0,numofcovs) prho=logit(.03) pmean=c(pbetaI,pgamma,pmu,palpha0,palphas,prho) #prior standard deviations psd=c(5, #beta .09,#gamma .3, #mu 5, #alpha0 5, #alpha1 2) #rho betaIIndex=1 #need one for each phase of data collection, we only simulated one phase gammaIndex=2 muIndex=3 alphasIndex=4:5 rhoIndex=6 startmeansIndex=nu1Index=nu2Index=nu3Index=nu4Index=NA betaWIndex=kappaIndex=etaIndex=NA # Iterations set small for example purposes; increase for applications burn = 0 prelim = 10 iters =10 thin =1 tune=1 psigma<-diag(c(0.012, #beta 0.012, #gamma 0.180, #mu 0.120, #alpha0 0.120, #alpha1 0.012)) #rho #start values #Names of th input are used for the column names in the matrix output th=c( betaI=abs(rnorm(1,th1,th1/3)), gamma=abs(rnorm(1,th2,th2/10)), mu=abs(rnorm(1,th3,th3/10)), alpha0=rnorm(1,int,1), alpha1=rnorm(1,A,1), rho=abs(rnorm(1,rho,rho))) resultspath<-getwd() deltavalue=1 critical=10 numParticles=100 UseGill=0 UseSIWR=0 #Set the population size for inference PopSize=10000 #Set the rate immunity is lost theMU=NA #doesn't matter what this is since we are estimating mu in this example setstartmeansval=list(c(10000*.21,10000*.0015)) setBetaW=setKappa=setEta=setRatio=setAlpha0=0 #don't want to set these right now setval=setAlpha0val=NA #don't want to set these right now uset=uselaplace=0 # not using shrinkage priors for alpha parameters alphadf=5 maxW=50000 ll=-50000 bayessirOUT=bayessir(obscholLIST,obsdaysLIST,COVMATLIST, th,trans,invtrans,pmean,psd, betaIIndex,betaWIndex,gammaIndex,kappaIndex,etaIndex,muIndex,alphasIndex,rhoIndex,startmeansIndex,nu1Index,nu2Index,nu3Index,nu4Index, burn,prelim,iters,thin,tune,ll,psigma, deltavalue,critical,PopSize,theMU,numParticles,resultspath,UseGill,UseSIWR,setBetaW,setKappa,setEta,setRatio,setval,setAlpha0, setAlpha0val,setstartmeansval,uset,alphadf,uselaplace,maxW) #Output columns are posterior samples for parameters in th, in addition to the log-likelihood and accepted values of the hidden states susT and infT at the final observation time T #Posterior histograms for parameter values nvars=dim(bayessirOUT)[2] par(mfrow=c(1,nvars-3)) for(i in 1:(nvars-3)) hist(bayessirOUT[,i],main="",xlab=colnames(bayessirOUT)[i]) #Trace plots for all output par(mfrow=c(1,nvars)) for(i in 1:(nvars)) plot(ts(bayessirOUT[,i]),xlab=colnames(bayessirOUT)[i],ylab="") } }
/man/bayessir.Rd
no_license
standardgalactic/bayessir
R
false
false
9,459
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/bayessir.R \name{bayessir} \alias{bayessir} \title{PMMH algorithm for time-varying SIRS model} \usage{ bayessir(obscholLIST, obsdaysLIST, COVMATLIST, th, trans, invtrans, pmean, psd, betaIIndex, betaWIndex, gammaIndex, kappaIndex, etaIndex, muIndex, alphasIndex, rhoIndex, startmeansIndex, nu1Index, nu2Index, nu3Index, nu4Index, burn, prelim, iters, thin, tune, ll, psigma, deltavalue, critical, PopSize, theMU, numParticles, resultspath, UseGill, UseSIWR, setBetaW, setKappa, setEta, setRatio, setval, setAlpha0, setAlpha0val, setstartmeansval, usetprior, alphadf, uselaplaceprior, maxWval) } \arguments{ \item{obscholLIST}{List containing the cholera counts for each phase of data} \item{obsdaysLIST}{List containing the observation days for each phase of data} \item{COVMATLIST}{List containing the matricies of daily covariates for each phase of data} \item{th}{Starting value for parameter vales} \item{trans}{function to transform the parameter values} \item{invtrans}{inverse transformation function} \item{pmean}{Prior means for Normal prior distributions} \item{psd}{Prior standard deviations for Normal prior distributions} \item{betaIIndex}{Index of which \code{th} values correspond to \eqn{\beta_I}} \item{betaWIndex}{Index of which \code{th} values correspond to \eqn{\beta_W}} \item{gammaIndex}{Index of which \code{th} values correspond to \eqn{\gamma}} \item{kappaIndex}{Index of which \code{th} values correspond to \eqn{\kappa}} \item{etaIndex}{Index of which \code{th} values correspond to \eqn{\eta}} \item{muIndex}{Index of which \code{th} values correspond to \eqn{\mu}} \item{alphasIndex}{Index of which \code{th} values correspond to \eqn{\alpha} parameters} \item{rhoIndex}{Vector of length equal to number of phases of data, Index of which \code{th} values correspond to \eqn{\rho}, the probability of infected individuals seeking treatment} \item{startmeansIndex}{Index of which \code{th} values correspond to means of initial distributions for the numbers of susceptible and infected individuals respectively} \item{nu1Index}{Index of which \code{th} value corresponds to \eqn{\nu_1} power} \item{nu2Index}{Index of which \code{th} value corresponds to \eqn{\nu_2} power} \item{nu3Index}{Index of which \code{th} value corresponds to \eqn{\nu_3} power} \item{nu4Index}{Index of which \code{th} value corresponds to \eqn{\nu_4} power} \item{burn}{Number of iterations for burn-in run} \item{prelim}{Number of total iterations for preliminary run, preliminary run = burn-in run + secondary run} \item{iters}{Number of iterations for final run} \item{thin}{Amount to thin the chain; only every \code{thin}th iteration of the PMMH algorithm is saved} \item{tune}{Tuning parameter for the covariance of the multivariate normal proposal distribution in the final run of the PMMH algorithm} \item{ll}{Starting value for log-likelihood} \item{psigma}{Standard deviation for independent normal proposal distribution in preliminary run} \item{deltavalue}{Initial value to use for tau in tau-leaping algorithm} \item{critical}{Critical size for modified tau-leaping algorithm; if the population of a compartment is lower than this number a single step algorithm is used until the population gets above the critical size.} \item{PopSize}{Population size} \item{theMU}{The rate at which immunity is lost, if setting this value} \item{numParticles}{Number of particles} \item{resultspath}{File path for results} \item{UseGill}{boolian; if 1, uses the gillespie algorithm. If 0, uses tau-leaping algorithm} \item{UseSIWR}{boolian; if 1, uses the SIWR model. If 0, uses SIRS model} \item{setBetaW}{boolian; if 1, sets BetaW parameter. If 0, estimates BetaW parameter.} \item{setKappa}{boolian; if 1, sets Kappa parameter. If 0, estimates Kappa parameter.} \item{setEta}{boolian; if 1, sets Eta parameter. If 0, estimates Eta parameter.} \item{setRatio}{boolian; if 1, sets ratio of kappa and eta.} \item{setval}{} \item{setAlpha0}{boolian; if 1, sets alpha0 parameter. If 0, estimates alpha0 parameter.} \item{setAlpha0val}{} \item{setstartmeansval}{} \item{usetprior}{} \item{alphadf}{} \item{uselaplaceprior}{} \item{maxWval}{Upper bound for W compartment. If 0, no bounding.} } \value{ Posterior samples from the final run of the PMMH algorithm Also, writes 4 files which are updated every 100th iteration: 1. prelimpmcmctimes.csv: times and acceptance ratios for preliminary PMMH run 2. prelimthmat.csv: preliminary PMMH output 3. FINALpmcmctimes: times and acceptance ratios for final PMMH run 4. FINALthmat.csv: final PMMH output } \description{ PMMH algorithm for time-varying SIRS model } \examples{ \dontrun{ library(bayessir) ############################ ## simulate data ############################ SimTimes=seq(0,365*4.5, by=14) ############################ # environmental force of infection ############################ int<- -6 A<-2 sincovAmp<- c(2.1,1.8,2,2.2,2) wave<-pi/(365/2) t<-0:(max(SimTimes)) sincov<-sin(wave*t) allsincov=matrix(NA,nrow=length(t),ncol=length(sincovAmp)) for(i in 1:length(sincovAmp)){ allsincov[,i]<-sincovAmp[i]*sincov } sincov[1:365]=allsincov[1:365,1] sincov[366:(365*2)]=allsincov[366:(365*2),2] sincov[(365*2+1):(365*3)]=allsincov[(365*2+1):(365*3),3] sincov[(365*3+1):(365*4)]=allsincov[(365*3+1):(365*4),4] sincov[(365*4+1):length(sincov)]=allsincov[(365*4+1):length(sincov),5] alpha<-exp(int+A*sincov) ########### pop=10000 #population size phiS=2900 phiI=84 th1=.5/10000 #beta th2=0.12 #gamma th3=.0018 #mu rho=90/10000 #reporting rate nu1=1 nu2=1 nu3=0 nu4=0 set.seed(10) sus0=rpois(1,phiS) inf0=rpois(1,phiI) shortstart<-as.matrix(c(sus0,inf0)) allcovs<-enviforce(as.matrix(c(sincov)),SimTimes,c(int,A)) simstates<-matrix(NA,nrow=(length(SimTimes)),ncol=2) simstates[1,]<-shortstart for (i in 2:length(SimTimes)){ simstates[i,]<-inhomoSIRSGillespie(simstates[i-1,],pop,SimTimes[i-1],SimTimes[i]-SimTimes[i-1], c(th1,th2,th3,nu1,nu2,nu3,nu4),allcovs[[i-1]][,2],allcovs[[i-1]][,1]) } set.seed(9) SimData<-c() for(i in 1:dim(simstates)[1]) SimData[i]<-rbinom(1,simstates[i,2],rho) ################################################## ##### # data for inference ##### COVMATLIST=list(as.matrix(sincov)) obscholLIST=list(SimData) obsdaysLIST=list(SimTimes) numofcovs=1 ################################################# trans=function(p){ c(log(p[1]), #beta log(p[2]), #gamma log(p[3]), #mu p[4], #alpha0 p[5], #alpha1 logit(p[6]))#rho } invtrans=function(p){ c(exp(p[1]), #beta exp(p[2]), #gamma exp(p[3]), #mu p[4], #alpha0 p[5], #alpha1 expit(p[6]))#rho } #prior means pbetaI=log(1.25e-04) pgamma=log(.1) pmu=log(.0009) palpha0=-8 palphas=rep(0,numofcovs) prho=logit(.03) pmean=c(pbetaI,pgamma,pmu,palpha0,palphas,prho) #prior standard deviations psd=c(5, #beta .09,#gamma .3, #mu 5, #alpha0 5, #alpha1 2) #rho betaIIndex=1 #need one for each phase of data collection, we only simulated one phase gammaIndex=2 muIndex=3 alphasIndex=4:5 rhoIndex=6 startmeansIndex=nu1Index=nu2Index=nu3Index=nu4Index=NA betaWIndex=kappaIndex=etaIndex=NA # Iterations set small for example purposes; increase for applications burn = 0 prelim = 10 iters =10 thin =1 tune=1 psigma<-diag(c(0.012, #beta 0.012, #gamma 0.180, #mu 0.120, #alpha0 0.120, #alpha1 0.012)) #rho #start values #Names of th input are used for the column names in the matrix output th=c( betaI=abs(rnorm(1,th1,th1/3)), gamma=abs(rnorm(1,th2,th2/10)), mu=abs(rnorm(1,th3,th3/10)), alpha0=rnorm(1,int,1), alpha1=rnorm(1,A,1), rho=abs(rnorm(1,rho,rho))) resultspath<-getwd() deltavalue=1 critical=10 numParticles=100 UseGill=0 UseSIWR=0 #Set the population size for inference PopSize=10000 #Set the rate immunity is lost theMU=NA #doesn't matter what this is since we are estimating mu in this example setstartmeansval=list(c(10000*.21,10000*.0015)) setBetaW=setKappa=setEta=setRatio=setAlpha0=0 #don't want to set these right now setval=setAlpha0val=NA #don't want to set these right now uset=uselaplace=0 # not using shrinkage priors for alpha parameters alphadf=5 maxW=50000 ll=-50000 bayessirOUT=bayessir(obscholLIST,obsdaysLIST,COVMATLIST, th,trans,invtrans,pmean,psd, betaIIndex,betaWIndex,gammaIndex,kappaIndex,etaIndex,muIndex,alphasIndex,rhoIndex,startmeansIndex,nu1Index,nu2Index,nu3Index,nu4Index, burn,prelim,iters,thin,tune,ll,psigma, deltavalue,critical,PopSize,theMU,numParticles,resultspath,UseGill,UseSIWR,setBetaW,setKappa,setEta,setRatio,setval,setAlpha0, setAlpha0val,setstartmeansval,uset,alphadf,uselaplace,maxW) #Output columns are posterior samples for parameters in th, in addition to the log-likelihood and accepted values of the hidden states susT and infT at the final observation time T #Posterior histograms for parameter values nvars=dim(bayessirOUT)[2] par(mfrow=c(1,nvars-3)) for(i in 1:(nvars-3)) hist(bayessirOUT[,i],main="",xlab=colnames(bayessirOUT)[i]) #Trace plots for all output par(mfrow=c(1,nvars)) for(i in 1:(nvars)) plot(ts(bayessirOUT[,i]),xlab=colnames(bayessirOUT)[i],ylab="") } }
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 6.76053925883897e+305)) result <- do.call(meteor:::ET0_Makkink,testlist) str(result)
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615846096-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
736
r
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 6.76053925883897e+305)) result <- do.call(meteor:::ET0_Makkink,testlist) str(result)
read_and_process_saeb_ate_2005 <- function(data_folder, dest_folder_prefix = 'data/processed') { `%>%` <- magrittr::`%>%` list_of_files <- list.files( data_folder, pattern = ".txt|.TXT", recursive = TRUE, full.names = TRUE ) purrr::walk( list_of_files, function(filename = .x, dest_folder_prefix) { message(filename) dir.create( file.path(dest_folder_prefix, stringr::str_extract(filename, "[0-9]{4}")), showWarnings = FALSE, recursive = TRUE ) file_save_name <- strsplit(filename, split = "/") %>% unlist() %>% dplyr::last() %>% janitor::make_clean_names() %>% stringr::str_replace(., '.TXT|.txt', '.rds') tbl_read <- readr::read_fwf( filename, col_positions = dict_saeb_ate_2005(filename) ) readr::write_rds( x = tbl_read, path = file.path(dest_folder_prefix, stringr::str_extract(filename, "[0-9]{4}"), file_save_name) ) }, dest_folder_prefix = dest_folder_prefix ) }
/functions/read_and_process_saeb_ate_2005.R
no_license
paeselhz/microdados_saeb_ate_2005
R
false
false
1,330
r
read_and_process_saeb_ate_2005 <- function(data_folder, dest_folder_prefix = 'data/processed') { `%>%` <- magrittr::`%>%` list_of_files <- list.files( data_folder, pattern = ".txt|.TXT", recursive = TRUE, full.names = TRUE ) purrr::walk( list_of_files, function(filename = .x, dest_folder_prefix) { message(filename) dir.create( file.path(dest_folder_prefix, stringr::str_extract(filename, "[0-9]{4}")), showWarnings = FALSE, recursive = TRUE ) file_save_name <- strsplit(filename, split = "/") %>% unlist() %>% dplyr::last() %>% janitor::make_clean_names() %>% stringr::str_replace(., '.TXT|.txt', '.rds') tbl_read <- readr::read_fwf( filename, col_positions = dict_saeb_ate_2005(filename) ) readr::write_rds( x = tbl_read, path = file.path(dest_folder_prefix, stringr::str_extract(filename, "[0-9]{4}"), file_save_name) ) }, dest_folder_prefix = dest_folder_prefix ) }
makeTopologyPlotBase <- function(db) { require(ggplot2) p <- ggplot(db,aes(y=ylvl,x=xlvl)) + theme(panel.background=element_blank(), axis.text=element_blank(), axis.ticks=element_blank(), axis.title=element_blank()) return(p) }
/R/makeTopologyPlotBase.R
no_license
catherine-hhs/bbeaR
R
false
false
275
r
makeTopologyPlotBase <- function(db) { require(ggplot2) p <- ggplot(db,aes(y=ylvl,x=xlvl)) + theme(panel.background=element_blank(), axis.text=element_blank(), axis.ticks=element_blank(), axis.title=element_blank()) return(p) }
\name{write.EcoNet} \alias{write.EcoNet} \title{ Write enaR models to an EcoNet formatted file. } \description{ Creates an EcoNet model from an enaR network object that can be used with the online interface for EcoNet. } \usage{ write.EcoNet(x='model',file='file path',mn='ena_model') } \arguments{ \item{x}{ Network object. } \item{file}{ The file name or path. If a simple file name is given, this function uses the current working directory by default. } \item{mn}{ The model name that EcoNet will use. The DEFAULT is 'ena_model'. } } \value{ An EcoNet formatted text file is created from the model, which can be input at http://eco.engr.uga.edu. } \references{ About EcoNet (http://eco.engr.uga.edu/DOC/econet1.html) Kazanci, C. 2009. Handbook of Ecological Modelling and Informatics, by WIT Press. } \author{ Matthew K. Lau (enaR.maintainer@gmail.com) }
/man/write.EcoNet.Rd
no_license
PatrickPata/enaR
R
false
false
909
rd
\name{write.EcoNet} \alias{write.EcoNet} \title{ Write enaR models to an EcoNet formatted file. } \description{ Creates an EcoNet model from an enaR network object that can be used with the online interface for EcoNet. } \usage{ write.EcoNet(x='model',file='file path',mn='ena_model') } \arguments{ \item{x}{ Network object. } \item{file}{ The file name or path. If a simple file name is given, this function uses the current working directory by default. } \item{mn}{ The model name that EcoNet will use. The DEFAULT is 'ena_model'. } } \value{ An EcoNet formatted text file is created from the model, which can be input at http://eco.engr.uga.edu. } \references{ About EcoNet (http://eco.engr.uga.edu/DOC/econet1.html) Kazanci, C. 2009. Handbook of Ecological Modelling and Informatics, by WIT Press. } \author{ Matthew K. Lau (enaR.maintainer@gmail.com) }
load("~/workspace/lits/attrib/attrib_year2013/data/lits.RData") tbl <- macro[, c(1,8:14)] tbl[,c(2:7)] <- round(tbl[,c(2:7)], 2) tbl <- tbl[order(tbl$undp_hdi), ] names(tbl) <- c("country","GDP Change 2007-1010","Failed States Index", "Voice And Accountability (WB)","HDI","Democracy (Freedom House)", "Gini UNU Wider","Country group")
/code/iv_macro_table.R
permissive
muuankarski/attributions
R
false
false
367
r
load("~/workspace/lits/attrib/attrib_year2013/data/lits.RData") tbl <- macro[, c(1,8:14)] tbl[,c(2:7)] <- round(tbl[,c(2:7)], 2) tbl <- tbl[order(tbl$undp_hdi), ] names(tbl) <- c("country","GDP Change 2007-1010","Failed States Index", "Voice And Accountability (WB)","HDI","Democracy (Freedom House)", "Gini UNU Wider","Country group")
library("qualityTools") pdf("ppplot.pdf", bg="transparent") ppPlot(iris$Petal.Length, main="Normal P-P Plot iris$Petal.Length", pch=19, cex=0.5) dev.off()
/inst/examples/data/distribution/ppplot.R
no_license
Kale14/mmstat4
R
false
false
155
r
library("qualityTools") pdf("ppplot.pdf", bg="transparent") ppPlot(iris$Petal.Length, main="Normal P-P Plot iris$Petal.Length", pch=19, cex=0.5) dev.off()
\name{workbook-class} \Rdversion{1.1} \docType{class} \alias{workbook-class} \title{Class "workbook"} \description{ This is \pkg{XLConnect}'s main entity representing a Microsoft Excel workbook. S4 objects of this class and corresponding methods are used to manipulate the underlying Excel workbook instances. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{\link{loadWorkbook}(filename, create)}. This is a shortcut form of \code{new("workbook", filename, create)} with some additional error checking. } \section{Slots}{ \describe{ \item{\code{filename}:}{Object of class \code{character} which represents the filename of the underlying Microsoft Excel workbook.} \item{\code{jobj}:}{Object of class \code{jobjRef} (see package \pkg{rJava}) which represents a Java object reference that is used in the back-end to manipulate the underlying Excel workbook instance.} } Note: The \code{jobj} slot should not be accessed directly. \code{workbook} objects should only be manipulated via the corresponding methods. } \references{ Wikipedia: Office Open XML\cr \url{http://en.wikipedia.org/wiki/Office_Open_XML} } \author{ Martin Studer\cr Mirai Solutions GmbH \url{http://www.mirai-solutions.com} } \note{ \pkg{XLConnect} supports both Excel 97-2003 (*.xls) and OOXML (Excel 2007+, *.xlsx) file formats.\cr\cr A \code{workbook}'s underlying Excel file is not saved (or being created in case the file did not exist and \code{create = TRUE} has been specified) unless the \code{\link[=saveWorkbook-methods]{saveWorkbook}} method has been called on the object. This provides more flexibility to the user to decide when changes are saved and also provides better performance in that several changes can be written in one go (normally at the end, rather than after every operation causing the file to be rewritten again completely each time). This is due to the fact that workbooks are manipulated in-memory and are only written to disk with specifically calling \code{\link[=saveWorkbook-methods]{saveWorkbook}}. } \seealso{ \code{\link{loadWorkbook}}, \code{\link[=saveWorkbook-methods]{saveWorkbook}} } \examples{ # Create a new workbook 'myWorkbook.xlsx' # (assuming the file to not exist already) wb <- loadWorkbook("myWorkbook.xlsx", create = TRUE) # Create a worksheet called 'mtcars' createSheet(wb, name = "mtcars") # Write built-in dataset 'mtcars' to sheet 'mtcars' created above writeWorksheet(wb, mtcars, sheet = "mtcars") # Save workbook - this actually writes the file 'myWorkbook.xlsx' to disk saveWorkbook(wb) } \keyword{classes}
/man/workbook-class.Rd
no_license
HouYonghui/xlconnect
R
false
false
2,606
rd
\name{workbook-class} \Rdversion{1.1} \docType{class} \alias{workbook-class} \title{Class "workbook"} \description{ This is \pkg{XLConnect}'s main entity representing a Microsoft Excel workbook. S4 objects of this class and corresponding methods are used to manipulate the underlying Excel workbook instances. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{\link{loadWorkbook}(filename, create)}. This is a shortcut form of \code{new("workbook", filename, create)} with some additional error checking. } \section{Slots}{ \describe{ \item{\code{filename}:}{Object of class \code{character} which represents the filename of the underlying Microsoft Excel workbook.} \item{\code{jobj}:}{Object of class \code{jobjRef} (see package \pkg{rJava}) which represents a Java object reference that is used in the back-end to manipulate the underlying Excel workbook instance.} } Note: The \code{jobj} slot should not be accessed directly. \code{workbook} objects should only be manipulated via the corresponding methods. } \references{ Wikipedia: Office Open XML\cr \url{http://en.wikipedia.org/wiki/Office_Open_XML} } \author{ Martin Studer\cr Mirai Solutions GmbH \url{http://www.mirai-solutions.com} } \note{ \pkg{XLConnect} supports both Excel 97-2003 (*.xls) and OOXML (Excel 2007+, *.xlsx) file formats.\cr\cr A \code{workbook}'s underlying Excel file is not saved (or being created in case the file did not exist and \code{create = TRUE} has been specified) unless the \code{\link[=saveWorkbook-methods]{saveWorkbook}} method has been called on the object. This provides more flexibility to the user to decide when changes are saved and also provides better performance in that several changes can be written in one go (normally at the end, rather than after every operation causing the file to be rewritten again completely each time). This is due to the fact that workbooks are manipulated in-memory and are only written to disk with specifically calling \code{\link[=saveWorkbook-methods]{saveWorkbook}}. } \seealso{ \code{\link{loadWorkbook}}, \code{\link[=saveWorkbook-methods]{saveWorkbook}} } \examples{ # Create a new workbook 'myWorkbook.xlsx' # (assuming the file to not exist already) wb <- loadWorkbook("myWorkbook.xlsx", create = TRUE) # Create a worksheet called 'mtcars' createSheet(wb, name = "mtcars") # Write built-in dataset 'mtcars' to sheet 'mtcars' created above writeWorksheet(wb, mtcars, sheet = "mtcars") # Save workbook - this actually writes the file 'myWorkbook.xlsx' to disk saveWorkbook(wb) } \keyword{classes}
library(HK80) ### Name: HK1980GRID_TO_WGS84GEO ### Title: Convert HK1980GRID coordinates to WGS84GEO coordinates ### Aliases: HK1980GRID_TO_WGS84GEO ### Keywords: HK1980GRID WGS84GEO ### ** Examples options(digits = 15) HK1980GRID_TO_WGS84GEO(820351.389, 832591.320) #### $latitude #### [1] 22.3221739419203 #### #### $longitude #### [1] 114.141179433862 #### Answer from the online conversion tool #### http://www.geodetic.gov.hk/smo/tform/tform.aspx #### 22.322172084 114.141187917
/data/genthat_extracted_code/HK80/examples/HK1980GRID_TO_WGS84GEO.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
497
r
library(HK80) ### Name: HK1980GRID_TO_WGS84GEO ### Title: Convert HK1980GRID coordinates to WGS84GEO coordinates ### Aliases: HK1980GRID_TO_WGS84GEO ### Keywords: HK1980GRID WGS84GEO ### ** Examples options(digits = 15) HK1980GRID_TO_WGS84GEO(820351.389, 832591.320) #### $latitude #### [1] 22.3221739419203 #### #### $longitude #### [1] 114.141179433862 #### Answer from the online conversion tool #### http://www.geodetic.gov.hk/smo/tform/tform.aspx #### 22.322172084 114.141187917
\name{mh25} \alias{mh25} \title{Function to return the MH25 hydrologic indicator statistic for a given data frame} \usage{ mh25(qfiletempf) } \arguments{ \item{qfiletempf}{data frame containing a "discharge" column containing daily flow values} } \value{ mh25 numeric value of MH25 for the given data frame } \description{ This function accepts a data frame that contains a column named "discharge" and calculates MH25, high peak flow. Compute the average peak-flow value for flow events above a threshold equal to three times the median flow for the entire record. MH25 is the average peak flow divided by the median flow for the entire record (dimensionless-temporal). } \examples{ qfiletempf<-sampleData mh25(qfiletempf) }
/man/mh25.Rd
no_license
tsangyp/EflowStats
R
false
false
731
rd
\name{mh25} \alias{mh25} \title{Function to return the MH25 hydrologic indicator statistic for a given data frame} \usage{ mh25(qfiletempf) } \arguments{ \item{qfiletempf}{data frame containing a "discharge" column containing daily flow values} } \value{ mh25 numeric value of MH25 for the given data frame } \description{ This function accepts a data frame that contains a column named "discharge" and calculates MH25, high peak flow. Compute the average peak-flow value for flow events above a threshold equal to three times the median flow for the entire record. MH25 is the average peak flow divided by the median flow for the entire record (dimensionless-temporal). } \examples{ qfiletempf<-sampleData mh25(qfiletempf) }
#' Read outputs of ALEVEL.OUT #' #' @param project.path path of hydrus project #' @param out.file name of teh alevel file: "A_Level.out" is default output file. #' @param output vector of output names that should be read #' @param warn Logical for displaying/suppressing warnings producted by data.table::fread() #' @param ... #' #' @return #' @export #' #' @examples read.alevel.out<- function(project.path, out.file = "A_Level.out", output = NULL, warn = FALSE, ...) { if(is.null(output) | missing(output)) { output = c("sum(rTop)", "sum(rRoot)", "sum(vTop)", "sum(vRoot)", "sum(vBot)", "hTop", "hRoot", "hBot", "A-level") } # alevel_out<- read.table(file.path(project.path, out.file), # header = T, sep = "", dec = ".", # na.strings = "NA", colClasses = NA, as.is = TRUE, # skip = 2, check.names = FALSE, fill = T, # strip.white = FALSE, blank.lines.skip = TRUE, # comment.char = "#", # allowEscapes = FALSE, flush = FALSE, # stringsAsFactors = default.stringsAsFactors(), # fileEncoding = "", encoding = "unknown") options(warn = -1) alevel_out = data.table::fread(input = file.path(project.path, out.file), fill = TRUE, blank.lines.skip = T, skip = 2, header = T) alevel_out = apply(alevel_out, MARGIN = 2, FUN = as.numeric) alevel_out = na.omit(alevel_out) alevel_out = data.frame(alevel_out, check.names = FALSE, row.names = NULL) astart_ind = which(alevel_out$`A-level` == 1) sum_cols_ind = grep("sum", names(alevel_out)) sum_col_names = names(alevel_out)[sum_cols_ind] for(i in 2: length(astart_ind)){ run1_totals = alevel_out[(astart_ind[i]-1), sum_cols_ind] if(i == length(astart_ind)){ run_i_ind = astart_ind[i]:nrow(alevel_out) } else { run_i_ind = astart_ind[i]:(astart_ind[i+1]-1) } aout_j = alevel_out[run_i_ind, ] for(j in sum_col_names) { alevel_out[run_i_ind, j] = aout_j[, j] + run1_totals[[j]] } } return(alevel_out) }
/R/read_alevel_out.R
no_license
hydrosphag/hydrusR
R
false
false
2,352
r
#' Read outputs of ALEVEL.OUT #' #' @param project.path path of hydrus project #' @param out.file name of teh alevel file: "A_Level.out" is default output file. #' @param output vector of output names that should be read #' @param warn Logical for displaying/suppressing warnings producted by data.table::fread() #' @param ... #' #' @return #' @export #' #' @examples read.alevel.out<- function(project.path, out.file = "A_Level.out", output = NULL, warn = FALSE, ...) { if(is.null(output) | missing(output)) { output = c("sum(rTop)", "sum(rRoot)", "sum(vTop)", "sum(vRoot)", "sum(vBot)", "hTop", "hRoot", "hBot", "A-level") } # alevel_out<- read.table(file.path(project.path, out.file), # header = T, sep = "", dec = ".", # na.strings = "NA", colClasses = NA, as.is = TRUE, # skip = 2, check.names = FALSE, fill = T, # strip.white = FALSE, blank.lines.skip = TRUE, # comment.char = "#", # allowEscapes = FALSE, flush = FALSE, # stringsAsFactors = default.stringsAsFactors(), # fileEncoding = "", encoding = "unknown") options(warn = -1) alevel_out = data.table::fread(input = file.path(project.path, out.file), fill = TRUE, blank.lines.skip = T, skip = 2, header = T) alevel_out = apply(alevel_out, MARGIN = 2, FUN = as.numeric) alevel_out = na.omit(alevel_out) alevel_out = data.frame(alevel_out, check.names = FALSE, row.names = NULL) astart_ind = which(alevel_out$`A-level` == 1) sum_cols_ind = grep("sum", names(alevel_out)) sum_col_names = names(alevel_out)[sum_cols_ind] for(i in 2: length(astart_ind)){ run1_totals = alevel_out[(astart_ind[i]-1), sum_cols_ind] if(i == length(astart_ind)){ run_i_ind = astart_ind[i]:nrow(alevel_out) } else { run_i_ind = astart_ind[i]:(astart_ind[i+1]-1) } aout_j = alevel_out[run_i_ind, ] for(j in sum_col_names) { alevel_out[run_i_ind, j] = aout_j[, j] + run1_totals[[j]] } } return(alevel_out) }
library(dplyr) library(xgboost) library(caret) library(data.table) sm_data=read.csv("train_sample.csv") summary(sm_data) summary(sm_data[sm_data$is_attributed==1,]) pacman::p_load(knitr, tidyverse, highcharter, data.table, lubridate, pROC, tictoc, DescTools, lightgbm) head(sm_data[sm_data$is_attributed==1,],n=10) summary(sm_data) #checking number of rows n_r=nrow(sm_data) #n_r_os=aggregate(sm_data$device,by=list(c(0,1,2)),FUN=nrow) grp_os <- group_by(sm_data, is_attributed) summarise(grp_os, nr=n()) %n_r_os= #count for group sm_data %>% group_by(device) %>% summarise(n = n()) count(sm_data[sm_data$is_attributed==1,]) #Datatator2 trainData<-fread('train_sample.csv',drop=c(1,6,7)) #trainData<-fread('../input/train.csv',drop = c(1,6,7),nrows=80000000) # Balanceamos clases trainDataDwnSmpl<-downSample(trainData[,-5],as.factor(trainData$is_attributed)) # Como mas abajo hacemos rbind de trainDataDwnSmpl y z vamos a almacenar # donde terminan los datos de trainig endOfTrainData<-dim(trainDataDwnSmpl)[1] # Descartamos las columnas "ip" (2) y "click time" (7) testData<-fread('test_supplement.csv',drop = c(2,7)) # Todas las columnas de testData menos la primera ("click_id") z<-testData[,-1] # allData es para que no haya discrepancias en la variables binarias # de train y test cuando hacemos one hot encode allData<-rbind(trainDataDwnSmpl,z,fill =T) # one hot encode app apps<-as.factor(allData$app) apps_dummy<-Matrix::sparse.model.matrix(~0+apps) # one hot encode devices devices<-as.factor(allData$device) devices_dummy<-Matrix::sparse.model.matrix(~0+devices) count(devices_dummy==2) # one hot encode oss oss<-as.factor(allData$os) oss_dummy<-Matrix::sparse.model.matrix(~0+oss) # one hot encode channels channels<-as.factor(allData$channel) channels_dummy<-Matrix::sparse.model.matrix(~0+channels)
/Data_manipulation.R
permissive
Jinchili/Fraud-click-detection
R
false
false
1,832
r
library(dplyr) library(xgboost) library(caret) library(data.table) sm_data=read.csv("train_sample.csv") summary(sm_data) summary(sm_data[sm_data$is_attributed==1,]) pacman::p_load(knitr, tidyverse, highcharter, data.table, lubridate, pROC, tictoc, DescTools, lightgbm) head(sm_data[sm_data$is_attributed==1,],n=10) summary(sm_data) #checking number of rows n_r=nrow(sm_data) #n_r_os=aggregate(sm_data$device,by=list(c(0,1,2)),FUN=nrow) grp_os <- group_by(sm_data, is_attributed) summarise(grp_os, nr=n()) %n_r_os= #count for group sm_data %>% group_by(device) %>% summarise(n = n()) count(sm_data[sm_data$is_attributed==1,]) #Datatator2 trainData<-fread('train_sample.csv',drop=c(1,6,7)) #trainData<-fread('../input/train.csv',drop = c(1,6,7),nrows=80000000) # Balanceamos clases trainDataDwnSmpl<-downSample(trainData[,-5],as.factor(trainData$is_attributed)) # Como mas abajo hacemos rbind de trainDataDwnSmpl y z vamos a almacenar # donde terminan los datos de trainig endOfTrainData<-dim(trainDataDwnSmpl)[1] # Descartamos las columnas "ip" (2) y "click time" (7) testData<-fread('test_supplement.csv',drop = c(2,7)) # Todas las columnas de testData menos la primera ("click_id") z<-testData[,-1] # allData es para que no haya discrepancias en la variables binarias # de train y test cuando hacemos one hot encode allData<-rbind(trainDataDwnSmpl,z,fill =T) # one hot encode app apps<-as.factor(allData$app) apps_dummy<-Matrix::sparse.model.matrix(~0+apps) # one hot encode devices devices<-as.factor(allData$device) devices_dummy<-Matrix::sparse.model.matrix(~0+devices) count(devices_dummy==2) # one hot encode oss oss<-as.factor(allData$os) oss_dummy<-Matrix::sparse.model.matrix(~0+oss) # one hot encode channels channels<-as.factor(allData$channel) channels_dummy<-Matrix::sparse.model.matrix(~0+channels)
FitGetPval <- function(jsub, jform){ jfit <- lm(formula = jform, data = jsub) pval <- summary(jfit)$coefficients[2, "Pr(>|t|)"] slope.val <- summary(jfit)$coefficients[2, "Estimate"] slope.se <- summary(jfit)$coefficients[2, "Std. Error"] return(data.frame(pval = pval, slope.val = slope.val, slope.se = slope.se)) } SumSqrDev <- function(x){ return( sum( (x - mean(x)) ^ 2 )) } BinTrajectory <- function(trajs.spring.lst, jtraj, nearest = 0.1){ round.int <- 1 / nearest trajs.sum <- lapply(trajs.spring, function(x) x[[jtraj]] %>% mutate(traj = jtraj)) %>% bind_rows() %>% rowwise() %>% mutate(mark = strsplit(cell, "_")[[1]][[2]]) %>% left_join(., mat.sub.merge) %>% rowwise() %>% mutate(lambda.bin = floor(lambda * round.int) / round.int) %>% group_by(traj, lambda.bin, mark, coord, pos) %>% summarise(exprs = mean(exprs)) %>% return(trajs.sum) } GetTrajSum <- function(tm.result.lst, trajs.mixed, jmarks, jstr, jpseudo, jfac, jtraj){ mat.sub.merge <- lapply(jmarks, function(jmark) GetMatSub(tm.result.lst, jmark, jstr, jpseudo, jfac) %>% mutate(mark = jmark)) %>% bind_rows() trajs.long <- lapply(trajs.mixed, function(x) x[[jtraj]]) %>% bind_rows() %>% rowwise() %>% mutate(mark = strsplit(cell, "_")[[1]][[2]]) %>% left_join(., mat.sub.merge) %>% rowwise() %>% mutate(lambda.bin = floor(lambda * 10) / 10) trajs.sum <- trajs.long %>% group_by(lambda.bin, mark, coord, pos) %>% summarise(exprs = mean(exprs)) %>% mutate(chromo = jstr) return(trajs.sum) } FitSlope <- function(dat.sub){ # fit exprs to trajectory to find slope fit <- lm(formula = exprs ~ lambda.bin, data = dat.sub) slope <- fit$coefficients[['lambda.bin']] int <- fit$coefficients[['(Intercept)']] pval <- summary(fit)$coefficients["lambda.bin", "Pr(>|t|)"] return(data.frame(slope = slope, int = int, pval = pval)) } HexPlot <- function(data, mapping, ...){ p <- ggplot(data, mapping) + geom_hex() + geom_density2d() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) return(p) } MakeLowerPlot <- function(data, mapping, ..., jbins = 150){ m <- ggplot(data = data, mapping = mapping) + geom_point_rast(data = data %>% sample_frac(size = 0.1), alpha = 0.3) + geom_density_2d() # geom_point(data = data %>% sample_frac(size = 0.1), alpha = 0.1) + return(m) } GetCellsAlongTraj <- function(trajs.spring, jmark, ctype, n.sections = 3, thres = 0.05, jfrom = 0, jto = 1, show.plots = TRUE){ lambda.cutoffs <- seq(from = jfrom, to = jto, length.out = n.sections) cells.vec <- lapply(lambda.cutoffs, function(jlambda) (trajs.spring[[jmark]][[ctype]] %>% arrange(abs(lambda - jlambda)))$cell[[1]]) if (show.plots){ m <- ggplot(dat.trajs.long %>% filter(mark == jmark) %>% mutate(highlighted = cell %in% c(cell0, cell1, cell2)), aes(x = X1, y = X2, color = highlighted)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) print(m) # plot chromosome 15 over time m <- ggplot(jmerge %>% filter(cell %in% c(cell0, cell1, cell2)), aes(x = pos, y = exprs, group = lambda, color = lambda)) + geom_line() + theme_bw() + facet_wrap(~lambda, ncol = 1) + theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + ggtitle(jmark, ctype) print(m) } return(unlist(cells.vec)) } GetMatSub <- function(tm.result.lst, jmark, gstr, jpseudo, jfac, cells.vec = NULL){ imputed.dat <- log2((t(tm.result.lst[[jmark]]$terms) %*% t(tm.result.lst[[jmark]]$topics) * jfac) + jpseudo) mat.sub <- MatToLong(imputed.dat, gstr = gstr, cells.vec = cells.vec) %>% dplyr::select(-start, -end) # mat.sub <- MatToLong(imputed.dat, gstr = gstr, cells.vec = NULL) return(mat.sub) } MakeVariabilityPlots <- function(jmark, trajname, tm.result.lst, dat.umap.long.trajs, jcol = c("gray85", "gray50", "darkblue"), grep.strs = paste("chr", c(seq(21)), ":", sep = ""), jalpha = 0.5, pseudo = 0, jscale = 1, mdpt.sd = 1, ms.sd = c(0, 3), mdpt.fc = 0.75, lims.fc = c(0, 3), jsize.facet = 0.2, gw.size.facet = 2, lagmax = 2500, ci.interval = 1.96, chromogstr="chr15:", pdfout = FALSE){ # jmark <- "H3K27me3" # trajname <- "myeloid" if (!is.null(pdfout)){ pdf(pdfout, useDingbats=FALSE) } names(grep.strs) <- grep.strs # SummarizeACF() uses a named list that fails if you don't do this print(paste("Making plots for", jmark, trajname)) imputed.dat <- t(tm.result.lst[[jmark]]$terms) %*% t(tm.result.lst[[jmark]]$topics) dat.umap.long <- dat.umap.long.trajs[[jmark]] cell.sd.df.long <- lapply(grep.strs, function(grep.str){ return(GetCellSd(jscale * (imputed.dat + pseudo), grep.str, log2.scale = TRUE, fn = sd)) }) %>% bind_rows() dat.umap.filt <- left_join(dat.umap.long, cell.sd.df.long) m.chr <- ggplot(dat.umap.filt, aes(x = umap1, y = umap2, color = cell.sd)) + geom_point(size = jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + facet_wrap(~label) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.sd, limits = lims.sd) + ggtitle(jmark, paste0(deparse(substitute(sd)), " across chromosome")) print(m.chr) # what about genome wide cell.sd.genomewide <- GetCellSd(imputed.dat, "", log2.scale=TRUE) dat.umap.filt.gw <- left_join(dat.umap.long, cell.sd.genomewide) m.gw <- ggplot(dat.umap.filt.gw, aes(x = umap1, y = umap2, color = cell.sd)) + geom_point(size = gw.jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + facet_wrap(~label) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.sd, limits = lims.sd) + ggtitle(jmark, paste0(deparse(substitute(sd)), " genome wide")) print(m.gw) # Highlight differences for two representative cells on a representative chromosome hsc.cell <- (trajs[[jmark]][[trajname]] %>% arrange(lambda) %>% dplyr::top_n(-1))$cell[[1]] diff.cell <- (trajs[[jmark]][[trajname]] %>% arrange(lambda) %>% dplyr::top_n(1))$cell[[1]] gstr <- paste0(chromogstr) jsub <- MatToLong(imputed.dat, gstr, cells.vec = c(hsc.cell, diff.cell)) m.spatial <- ggplot(jsub, aes(x = pos / 10^6, y = log2(exprs))) + geom_line(alpha = jalpha) + facet_wrap(~cell) + ggtitle(paste(jmark, gstr)) + xlab("MB") + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) m.spatial.merged <- ggplot(jsub, aes(x = pos / 10^6, y = log2(exprs), group = cell, color = cell)) + geom_line(alpha = jalpha) + ggtitle(paste(jmark, gstr)) + xlab("MB") + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) m.spatial.log2fc <- ggplot(jsub %>% group_by(pos) %>% summarise(exprs = diff(log2(exprs))), aes(x = pos / 10^6, y = exprs)) + geom_line(alpha = jalpha) + ggtitle(paste(jmark, gstr)) + xlab("MB") + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + ylab("log2 Fold Change") print(m.spatial) print(m.spatial.merged) print(m.spatial.log2fc) # spatial pattern? jsub.hsc <- jsub %>% filter(cell == hsc.cell) %>% arrange(pos) jsub.myeloid <- jsub %>% filter(cell == diff.cell) %>% arrange(pos) # jmain <- "hsc" acf.out.hsc <- CalculateACF(jsub.hsc, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Prog Cell", gstr), show.plot = TRUE) acf.out.hsc <- CalculateACF(jsub.hsc, jstep = jstep, jtype = "partial", jmain = paste(jmark, trajname, "Prog Cell", gstr), show.plot = TRUE) acf.out.myeloid <- CalculateACF(jsub.myeloid, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Diff Cell", gstr), show.plot = TRUE) acf.out.myeloid <- CalculateACF(jsub.myeloid, jstep = jstep, jtype = "partial", jmain = paste(jmark, trajname, "Diff Cell", gstr), show.plot = TRUE) # do it genome wide jsub.hsc.lst <- lapply(grep.strs, function(g) MatToLong(imputed.dat, g, cells.vec = c(hsc.cell))) jsub.myeloid.lst <- lapply(grep.strs, function(g) MatToLong(imputed.dat, g, cells.vec = c(diff.cell))) # plot chromosome over space for all chromosomes m.hsc.chromo.all <- ggplot(jsub.hsc.lst %>% bind_rows(), aes(x = pos / 10^6, y = log2(exprs))) + geom_line() + facet_wrap(~chromo, scales = "free_x", ncol = 7) + theme_bw(8) + theme(aspect.ratio=0.5, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.text.x = element_text(size = 4)) + xlab("MB") + ylab("log2(imputed counts)") + ggtitle(paste(jmark, trajname, "Prog Cell")) print(m.hsc.chromo.all) m.myeloid.chromo.all <- ggplot(jsub.myeloid.lst %>% bind_rows(), aes(x = pos / 10^6, y = log2(exprs))) + geom_line() + facet_wrap(~chromo, scales = "free_x", ncol = 7) + theme_bw(8) + theme(aspect.ratio=0.5, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.text.x = element_text(size = 4)) + xlab("MB") + ylab("log2(imputed counts)") + ggtitle(paste(jmark, trajname, "Diff Cell")) print(m.myeloid.chromo.all) acf.out.hsc.lst <- lapply(jsub.hsc.lst, function(jsub.hsc) CalculateACF(jsub.hsc, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Prog Cell", gstr), show.plot = FALSE)) acf.out.myeloid.lst <- lapply(jsub.myeloid.lst, function(jsub.hsc) CalculateACF(jsub.hsc, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Diff Cell", gstr), show.plot = FALSE)) # average out the plots for different lags # plot all chromosomes par(mfrow=c(1, 2), mar=c(5.1, 4.1, 4.1, 2.1), mgp=c(3, 1, 0), las=0, pty = "s") for (i in seq(length(acf.out.hsc.lst))){ plot(acf.out.hsc.lst[[i]]$lag.stepadj / 10^6, acf.out.hsc.lst[[i]]$acf, main = paste(jmark, trajname, "Prog", grep.strs[[i]]), type = "h", xlab = "Step Size (MB)", ylab = "Autocorrelation") abline(h = ci.interval / sqrt(length(acf.out.hsc.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = -ci.interval / sqrt(length(acf.out.hsc.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = 0, cex = 2) plot(acf.out.myeloid.lst[[i]]$lag.stepadj / 10^6, acf.out.myeloid.lst[[i]]$acf, main = paste(jmark, trajname, "Diff", grep.strs[[i]]), type = "h", xlab = "Step Size (MB)", ylab = "Autocorrelation") abline(h = ci.interval / sqrt(length(acf.out.myeloid.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = -ci.interval / sqrt(length(acf.out.myeloid.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = 0, cex = 2) } par(mfrow=c(1,1), mar=c(5.1, 4.1, 4.1, 2.1), mgp=c(3, 1, 0), las=0) # summarize across chromosomes acf.hsc.sum.lst <- SummarizeACF(acf.out.hsc.lst) m.acf.hsc.gw <- ggplot(acf.hsc.sum.lst$acf.out.sum, aes(x = dx / 10^6, y = acfval)) + geom_area() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + xlab("Lag (MB)") + ylab("ACF avg over chromosomes") + geom_hline(yintercept = acf.hsc.sum.lst$acf.ci, linetype = "dashed", col = "blue") + geom_hline(yintercept = -acf.hsc.sum.lst$acf.ci, linetype = "dashed", col = "blue") + ggtitle(paste(jmark, trajname, "Prog Genome-wide")) print(m.acf.hsc.gw) acf.myeloid.sum.lst <- SummarizeACF(acf.out.myeloid.lst) m.acf.myeloid.gw <- ggplot(acf.myeloid.sum.lst$acf.out.sum, aes(x = dx / 10^6, y = acfval)) + geom_area() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + xlab("Lag (MB)") + ylab("ACF avg over chromosomes") + geom_hline(yintercept = acf.myeloid.sum.lst$acf.ci, linetype = "dashed", col = "blue") + geom_hline(yintercept = -acf.myeloid.sum.lst$acf.ci, linetype = "dashed", col = "blue") + ggtitle(paste(jmark, trajname, "Diff Genome-wide")) print(m.acf.myeloid.gw) multiplot(m.acf.hsc.gw, m.acf.myeloid.gw, cols = 1) # Plot the median log2 fold change relative to HSC cell: for one chromo jsub.ref.merge <- lapply(grep.strs, function(gstr) GetDiffRelToCell(imputed.dat, gstr = gstr, trajs, trajname = trajname, dat.umap.long = dat.umap.long, jmark = jmark)) %>% bind_rows() m.mad <- ggplot(jsub.ref.merge, aes(x = umap1, y = umap2, color = exprs.diff.med)) + geom_point(size = jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.fc, limits = lims.fc) + facet_wrap(~label) + ggtitle(jmark, "Median log2 fold change with HSC") print(m.mad) # do genome-wide? jsub.ref.merge.gw <- lapply(c(""), function(gstr) GetDiffRelToCell(imputed.dat, gstr = gstr, trajs, trajname = trajname, dat.umap.long = dat.umap.long, jmark = jmark)) %>% bind_rows() m.mad.gw <- ggplot(jsub.ref.merge.gw, aes(x = umap1, y = umap2, color = exprs.diff.med)) + geom_point(size = gw.jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.fc, limits = lims.fc) + facet_wrap(~label) + ggtitle(jmark, "Median log2 fold change with HSC") print(m.mad.gw) print(range(jsub.ref.merge.gw$exprs.diff.med)) # plot along pseudotime? traj.sub <- trajs[[jmark]][[trajname]] # add exprs.diff.med traj.sub <- left_join(traj.sub, jsub.ref.merge.gw %>% dplyr::select(cell, exprs.diff.med), by = c("cell")) m.mad.traj <- ggplot(traj.sub, aes(x = lambda, y = exprs.diff.med)) + geom_point(alpha = 0.1) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + xlab("Pseudotime") + ylab("Median Log2 FC from Prog Cell") + ggtitle(jmark, paste(trajname, "Genome-wide")) print(m.mad.traj) m.mad.traj.fixscale <- ggplot(traj.sub, aes(x = lambda, y = exprs.diff.med)) + geom_point(alpha = 0.1) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + ylim(lims.fc) + xlab("Pseudotime") + ylab("Median Log2 FC from Prog Cell") + ggtitle(jmark, paste(trajname, "Genome-wide")) print(m.mad.traj.fixscale) if (!is.null(pdfout)){ dev.off() } # return something that can be used later for integrated analysis return(jsub.ref.merge.gw) } GetDiffRelToCell2 <- function(jmerge.long, cell.ref){ # add reference cell expression as exprs.ref column, then calculate median log2 fold change lambda.dat <- jmerge.long %>% group_by(cell) %>% filter(row_number()==1) %>% ungroup() %>% dplyr::select(cell, lambda) jsub <- jmerge.long %>% filter(cell == cell.ref) %>% dplyr::select(coord, mark, ctype, exprs) %>% dplyr::rename(exprs.ref = exprs) jmerge.med.diff <- left_join(jmerge.long, jsub) %>% group_by(mark, ctype, cell) %>% mutate(exprs.diff = exprs - exprs.ref) %>% summarise(exprs.diff.med = median(abs(exprs.diff))) %>% left_join(., lambda.dat) # add back lambda info, which is lost return(jmerge.med.diff) } GetDiffRelToCell <- function(imputed.dat, gstr, trajs, trajname, dat.umap.long, jmark){ hsc.cell <- (trajs[[jmark]][[trajname]] %>% arrange(lambda) %>% dplyr::top_n(-1))$cell[[1]] jsub.all <- MatToLong(imputed.dat, gstr = gstr, cells.vec=NULL) jsub.hsc <- jsub.all %>% filter(cell == hsc.cell) # plot by reference to stem cell jsub.hsc.ref <- jsub.hsc %>% rename(exprs.ref = exprs) %>% select(-cell, -start, -end, -pos, -chromo) jsub.ref <- left_join(jsub.all, jsub.hsc.ref) # do the difference over pseudotime?? jsub.ref$exprs.diff <- log2(jsub.ref$exprs) - log2(jsub.ref$exprs.ref) jsub.ref.sum <- jsub.ref %>% group_by(cell) %>% summarise(exprs.diff.med = median(abs(exprs.diff))) # join to UMAP jsub.ref.merge <- left_join(jsub.ref.sum %>% dplyr::select(cell, exprs.diff.med), dat.umap.long) %>% mutate(label = gstr) return(jsub.ref.merge) } GetCellSd <- function(dat.mat, grep.str, log2.scale = TRUE, fn = sd){ # calculate standard deviation from matrix row.filt.indx <- grepl(grep.str, rownames(dat.mat)) if (log2.scale){ cell.sd.df <- data.frame(cell = colnames(dat.mat[row.filt.indx, ]), cell.sd = apply(log2(dat.mat[row.filt.indx, ]), 2, fn)) } else { cell.sd.df <- data.frame(cell = colnames(dat.mat[row.filt.indx, ]), cell.sd = apply(dat.mat[row.filt.indx, ], 2, fn)) } cell.sd.df$label <- grep.str return(cell.sd.df) } TotalVar <- function(x){ # sum of squares return((x - mean(x)) ^ 2) } MatToLong <- function(imputed.dat, gstr, cells.vec = NULL){ if (!is.null(cells.vec)){ jsub <- as.data.frame(imputed.dat[grepl(gstr, rownames(imputed.dat)), cells.vec]) colnames(jsub) <- cells.vec } else { jsub <- as.data.frame(imputed.dat[grepl(gstr, rownames(imputed.dat)), ]) } if (nrow(jsub) == 0){ print(paste("Warning: grepstr", gstr, "found no matches, returning empty dataframe")) return(data.frame(NULL)) } jsub$coord <- rownames(jsub) jsub$start <- as.numeric(sapply(jsub$coord, GetStart)) jsub$end <- as.numeric(sapply(jsub$coord, GetStart)) jsub$pos <- jsub$start + (jsub$end - jsub$start) / 2 jsub$chromo <- sapply(jsub$coord, GetChromo) jsub <- gather(jsub, key = "cell", value = "exprs", c(-coord, -start, -end, -pos, -chromo)) jsub <- jsub %>% arrange(desc(pos)) return(jsub) } MergeSdWithPseudotime <- function(dat.umap.long.trajs, tm.result.lst, trajs, jmark, jtraj, grep.strs, jscale=TRUE, jfn = mad){ imputed.dat <- t(tm.result.lst[[jmark]]$terms) %*% t(tm.result.lst[[jmark]]$topics) dat.umap.long <- dat.umap.long.trajs[[jmark]] cell.sd.df.long <- lapply(grep.strs, function(grep.str){ return(GetCellSd(imputed.dat, grep.str, log2.scale = jscale, fn = jfn)) }) %>% bind_rows() dat.umap.filt <- left_join(dat.umap.long, cell.sd.df.long) # add a trajectory dat.umap.filt <- left_join(trajs[[jmark]][[jtraj]] %>% dplyr::select(cell, lambda), dat.umap.filt, by = "cell") return(dat.umap.filt) } CalculateACF <- function(jsub.hsc, jstep = 20000, jtype = "correlation", jmain = "Title", show.plot = TRUE, maxlag = "full"){ # impute missing positions with minimum value # impute missing bins with minimum value jcells <- unique(jsub.hsc$cell) pos.start <- min(jsub.hsc$pos) pos.end <- max(jsub.hsc$pos) # print(head(jsub.hsc)) # print(tail(jsub.hsc)) # print(paste(pos.start, pos.end)) # jstep <- 20000 pos.vec <- seq(pos.start, pos.end, jstep) jsub.impute.vec <- data.frame(pos = rep(pos.vec, length(jcells)), cell = rep(jcells, each = length(pos.vec))) jsub.impute.vec <- left_join(jsub.impute.vec, jsub.hsc %>% dplyr::select(c(chromo, pos, cell, exprs))) # jsub.impute.vec$exprs[which(is.na(jsub.impute.vec$exprs))] <- min(jsub.hsc$exprs) if (maxlag == "full"){ maxlag <- nrow(jsub.impute.vec) } acf.out <- acf(log2(jsub.impute.vec$exprs), type = jtype, lag.max = nrow(jsub.impute.vec), na.action = na.pass, main = jmain, plot = show.plot) acf.out$lag.stepadj <- acf.out$lag * jstep return(acf.out) } SummarizeACF <- function(acf.out.hsc.lst){ assertthat::assert_that(!is.null(names(acf.out.hsc.lst))) acf.out.hsc.long <- lapply(names(acf.out.hsc.lst), function(g){ lst <- acf.out.hsc.lst[[g]] dat.tmp <- data.frame(acfval = lst$acf, dx = lst$lag.stepadj, label = g) return(dat.tmp) }) %>% bind_rows() acf.out.hsc.sum <- acf.out.hsc.long %>% filter(dx < pos.max) %>% group_by(dx) %>% summarise(acfval = mean(acfval)) # get error bar for each chromose acf.hsc.ci <- acf.out.hsc.long %>% group_by(label) %>% summarise(ci = ci.interval / sqrt(length(acfval))) %>% ungroup() %>% summarise(ci = mean(ci)) acf.hsc.ci <- acf.hsc.ci$ci[[1]] return(list(acf.out.sum = acf.out.hsc.sum, acf.ci = acf.hsc.ci)) }
/scripts/Rfunctions/VariabilityFunctions.R
no_license
jakeyeung/sortchicAllScripts
R
false
false
20,571
r
FitGetPval <- function(jsub, jform){ jfit <- lm(formula = jform, data = jsub) pval <- summary(jfit)$coefficients[2, "Pr(>|t|)"] slope.val <- summary(jfit)$coefficients[2, "Estimate"] slope.se <- summary(jfit)$coefficients[2, "Std. Error"] return(data.frame(pval = pval, slope.val = slope.val, slope.se = slope.se)) } SumSqrDev <- function(x){ return( sum( (x - mean(x)) ^ 2 )) } BinTrajectory <- function(trajs.spring.lst, jtraj, nearest = 0.1){ round.int <- 1 / nearest trajs.sum <- lapply(trajs.spring, function(x) x[[jtraj]] %>% mutate(traj = jtraj)) %>% bind_rows() %>% rowwise() %>% mutate(mark = strsplit(cell, "_")[[1]][[2]]) %>% left_join(., mat.sub.merge) %>% rowwise() %>% mutate(lambda.bin = floor(lambda * round.int) / round.int) %>% group_by(traj, lambda.bin, mark, coord, pos) %>% summarise(exprs = mean(exprs)) %>% return(trajs.sum) } GetTrajSum <- function(tm.result.lst, trajs.mixed, jmarks, jstr, jpseudo, jfac, jtraj){ mat.sub.merge <- lapply(jmarks, function(jmark) GetMatSub(tm.result.lst, jmark, jstr, jpseudo, jfac) %>% mutate(mark = jmark)) %>% bind_rows() trajs.long <- lapply(trajs.mixed, function(x) x[[jtraj]]) %>% bind_rows() %>% rowwise() %>% mutate(mark = strsplit(cell, "_")[[1]][[2]]) %>% left_join(., mat.sub.merge) %>% rowwise() %>% mutate(lambda.bin = floor(lambda * 10) / 10) trajs.sum <- trajs.long %>% group_by(lambda.bin, mark, coord, pos) %>% summarise(exprs = mean(exprs)) %>% mutate(chromo = jstr) return(trajs.sum) } FitSlope <- function(dat.sub){ # fit exprs to trajectory to find slope fit <- lm(formula = exprs ~ lambda.bin, data = dat.sub) slope <- fit$coefficients[['lambda.bin']] int <- fit$coefficients[['(Intercept)']] pval <- summary(fit)$coefficients["lambda.bin", "Pr(>|t|)"] return(data.frame(slope = slope, int = int, pval = pval)) } HexPlot <- function(data, mapping, ...){ p <- ggplot(data, mapping) + geom_hex() + geom_density2d() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) return(p) } MakeLowerPlot <- function(data, mapping, ..., jbins = 150){ m <- ggplot(data = data, mapping = mapping) + geom_point_rast(data = data %>% sample_frac(size = 0.1), alpha = 0.3) + geom_density_2d() # geom_point(data = data %>% sample_frac(size = 0.1), alpha = 0.1) + return(m) } GetCellsAlongTraj <- function(trajs.spring, jmark, ctype, n.sections = 3, thres = 0.05, jfrom = 0, jto = 1, show.plots = TRUE){ lambda.cutoffs <- seq(from = jfrom, to = jto, length.out = n.sections) cells.vec <- lapply(lambda.cutoffs, function(jlambda) (trajs.spring[[jmark]][[ctype]] %>% arrange(abs(lambda - jlambda)))$cell[[1]]) if (show.plots){ m <- ggplot(dat.trajs.long %>% filter(mark == jmark) %>% mutate(highlighted = cell %in% c(cell0, cell1, cell2)), aes(x = X1, y = X2, color = highlighted)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) print(m) # plot chromosome 15 over time m <- ggplot(jmerge %>% filter(cell %in% c(cell0, cell1, cell2)), aes(x = pos, y = exprs, group = lambda, color = lambda)) + geom_line() + theme_bw() + facet_wrap(~lambda, ncol = 1) + theme(aspect.ratio=0.2, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + ggtitle(jmark, ctype) print(m) } return(unlist(cells.vec)) } GetMatSub <- function(tm.result.lst, jmark, gstr, jpseudo, jfac, cells.vec = NULL){ imputed.dat <- log2((t(tm.result.lst[[jmark]]$terms) %*% t(tm.result.lst[[jmark]]$topics) * jfac) + jpseudo) mat.sub <- MatToLong(imputed.dat, gstr = gstr, cells.vec = cells.vec) %>% dplyr::select(-start, -end) # mat.sub <- MatToLong(imputed.dat, gstr = gstr, cells.vec = NULL) return(mat.sub) } MakeVariabilityPlots <- function(jmark, trajname, tm.result.lst, dat.umap.long.trajs, jcol = c("gray85", "gray50", "darkblue"), grep.strs = paste("chr", c(seq(21)), ":", sep = ""), jalpha = 0.5, pseudo = 0, jscale = 1, mdpt.sd = 1, ms.sd = c(0, 3), mdpt.fc = 0.75, lims.fc = c(0, 3), jsize.facet = 0.2, gw.size.facet = 2, lagmax = 2500, ci.interval = 1.96, chromogstr="chr15:", pdfout = FALSE){ # jmark <- "H3K27me3" # trajname <- "myeloid" if (!is.null(pdfout)){ pdf(pdfout, useDingbats=FALSE) } names(grep.strs) <- grep.strs # SummarizeACF() uses a named list that fails if you don't do this print(paste("Making plots for", jmark, trajname)) imputed.dat <- t(tm.result.lst[[jmark]]$terms) %*% t(tm.result.lst[[jmark]]$topics) dat.umap.long <- dat.umap.long.trajs[[jmark]] cell.sd.df.long <- lapply(grep.strs, function(grep.str){ return(GetCellSd(jscale * (imputed.dat + pseudo), grep.str, log2.scale = TRUE, fn = sd)) }) %>% bind_rows() dat.umap.filt <- left_join(dat.umap.long, cell.sd.df.long) m.chr <- ggplot(dat.umap.filt, aes(x = umap1, y = umap2, color = cell.sd)) + geom_point(size = jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + facet_wrap(~label) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.sd, limits = lims.sd) + ggtitle(jmark, paste0(deparse(substitute(sd)), " across chromosome")) print(m.chr) # what about genome wide cell.sd.genomewide <- GetCellSd(imputed.dat, "", log2.scale=TRUE) dat.umap.filt.gw <- left_join(dat.umap.long, cell.sd.genomewide) m.gw <- ggplot(dat.umap.filt.gw, aes(x = umap1, y = umap2, color = cell.sd)) + geom_point(size = gw.jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + facet_wrap(~label) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.sd, limits = lims.sd) + ggtitle(jmark, paste0(deparse(substitute(sd)), " genome wide")) print(m.gw) # Highlight differences for two representative cells on a representative chromosome hsc.cell <- (trajs[[jmark]][[trajname]] %>% arrange(lambda) %>% dplyr::top_n(-1))$cell[[1]] diff.cell <- (trajs[[jmark]][[trajname]] %>% arrange(lambda) %>% dplyr::top_n(1))$cell[[1]] gstr <- paste0(chromogstr) jsub <- MatToLong(imputed.dat, gstr, cells.vec = c(hsc.cell, diff.cell)) m.spatial <- ggplot(jsub, aes(x = pos / 10^6, y = log2(exprs))) + geom_line(alpha = jalpha) + facet_wrap(~cell) + ggtitle(paste(jmark, gstr)) + xlab("MB") + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) m.spatial.merged <- ggplot(jsub, aes(x = pos / 10^6, y = log2(exprs), group = cell, color = cell)) + geom_line(alpha = jalpha) + ggtitle(paste(jmark, gstr)) + xlab("MB") + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) m.spatial.log2fc <- ggplot(jsub %>% group_by(pos) %>% summarise(exprs = diff(log2(exprs))), aes(x = pos / 10^6, y = exprs)) + geom_line(alpha = jalpha) + ggtitle(paste(jmark, gstr)) + xlab("MB") + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + ylab("log2 Fold Change") print(m.spatial) print(m.spatial.merged) print(m.spatial.log2fc) # spatial pattern? jsub.hsc <- jsub %>% filter(cell == hsc.cell) %>% arrange(pos) jsub.myeloid <- jsub %>% filter(cell == diff.cell) %>% arrange(pos) # jmain <- "hsc" acf.out.hsc <- CalculateACF(jsub.hsc, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Prog Cell", gstr), show.plot = TRUE) acf.out.hsc <- CalculateACF(jsub.hsc, jstep = jstep, jtype = "partial", jmain = paste(jmark, trajname, "Prog Cell", gstr), show.plot = TRUE) acf.out.myeloid <- CalculateACF(jsub.myeloid, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Diff Cell", gstr), show.plot = TRUE) acf.out.myeloid <- CalculateACF(jsub.myeloid, jstep = jstep, jtype = "partial", jmain = paste(jmark, trajname, "Diff Cell", gstr), show.plot = TRUE) # do it genome wide jsub.hsc.lst <- lapply(grep.strs, function(g) MatToLong(imputed.dat, g, cells.vec = c(hsc.cell))) jsub.myeloid.lst <- lapply(grep.strs, function(g) MatToLong(imputed.dat, g, cells.vec = c(diff.cell))) # plot chromosome over space for all chromosomes m.hsc.chromo.all <- ggplot(jsub.hsc.lst %>% bind_rows(), aes(x = pos / 10^6, y = log2(exprs))) + geom_line() + facet_wrap(~chromo, scales = "free_x", ncol = 7) + theme_bw(8) + theme(aspect.ratio=0.5, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.text.x = element_text(size = 4)) + xlab("MB") + ylab("log2(imputed counts)") + ggtitle(paste(jmark, trajname, "Prog Cell")) print(m.hsc.chromo.all) m.myeloid.chromo.all <- ggplot(jsub.myeloid.lst %>% bind_rows(), aes(x = pos / 10^6, y = log2(exprs))) + geom_line() + facet_wrap(~chromo, scales = "free_x", ncol = 7) + theme_bw(8) + theme(aspect.ratio=0.5, panel.grid.major = element_blank(), panel.grid.minor = element_blank(), strip.text.x = element_text(size = 4)) + xlab("MB") + ylab("log2(imputed counts)") + ggtitle(paste(jmark, trajname, "Diff Cell")) print(m.myeloid.chromo.all) acf.out.hsc.lst <- lapply(jsub.hsc.lst, function(jsub.hsc) CalculateACF(jsub.hsc, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Prog Cell", gstr), show.plot = FALSE)) acf.out.myeloid.lst <- lapply(jsub.myeloid.lst, function(jsub.hsc) CalculateACF(jsub.hsc, jstep = jstep, jtype = "correlation", jmain = paste(jmark, trajname, "Diff Cell", gstr), show.plot = FALSE)) # average out the plots for different lags # plot all chromosomes par(mfrow=c(1, 2), mar=c(5.1, 4.1, 4.1, 2.1), mgp=c(3, 1, 0), las=0, pty = "s") for (i in seq(length(acf.out.hsc.lst))){ plot(acf.out.hsc.lst[[i]]$lag.stepadj / 10^6, acf.out.hsc.lst[[i]]$acf, main = paste(jmark, trajname, "Prog", grep.strs[[i]]), type = "h", xlab = "Step Size (MB)", ylab = "Autocorrelation") abline(h = ci.interval / sqrt(length(acf.out.hsc.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = -ci.interval / sqrt(length(acf.out.hsc.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = 0, cex = 2) plot(acf.out.myeloid.lst[[i]]$lag.stepadj / 10^6, acf.out.myeloid.lst[[i]]$acf, main = paste(jmark, trajname, "Diff", grep.strs[[i]]), type = "h", xlab = "Step Size (MB)", ylab = "Autocorrelation") abline(h = ci.interval / sqrt(length(acf.out.myeloid.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = -ci.interval / sqrt(length(acf.out.myeloid.lst[[i]]$lag.stepadj)), lty = "dotted", col = "blue") abline(h = 0, cex = 2) } par(mfrow=c(1,1), mar=c(5.1, 4.1, 4.1, 2.1), mgp=c(3, 1, 0), las=0) # summarize across chromosomes acf.hsc.sum.lst <- SummarizeACF(acf.out.hsc.lst) m.acf.hsc.gw <- ggplot(acf.hsc.sum.lst$acf.out.sum, aes(x = dx / 10^6, y = acfval)) + geom_area() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + xlab("Lag (MB)") + ylab("ACF avg over chromosomes") + geom_hline(yintercept = acf.hsc.sum.lst$acf.ci, linetype = "dashed", col = "blue") + geom_hline(yintercept = -acf.hsc.sum.lst$acf.ci, linetype = "dashed", col = "blue") + ggtitle(paste(jmark, trajname, "Prog Genome-wide")) print(m.acf.hsc.gw) acf.myeloid.sum.lst <- SummarizeACF(acf.out.myeloid.lst) m.acf.myeloid.gw <- ggplot(acf.myeloid.sum.lst$acf.out.sum, aes(x = dx / 10^6, y = acfval)) + geom_area() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + xlab("Lag (MB)") + ylab("ACF avg over chromosomes") + geom_hline(yintercept = acf.myeloid.sum.lst$acf.ci, linetype = "dashed", col = "blue") + geom_hline(yintercept = -acf.myeloid.sum.lst$acf.ci, linetype = "dashed", col = "blue") + ggtitle(paste(jmark, trajname, "Diff Genome-wide")) print(m.acf.myeloid.gw) multiplot(m.acf.hsc.gw, m.acf.myeloid.gw, cols = 1) # Plot the median log2 fold change relative to HSC cell: for one chromo jsub.ref.merge <- lapply(grep.strs, function(gstr) GetDiffRelToCell(imputed.dat, gstr = gstr, trajs, trajname = trajname, dat.umap.long = dat.umap.long, jmark = jmark)) %>% bind_rows() m.mad <- ggplot(jsub.ref.merge, aes(x = umap1, y = umap2, color = exprs.diff.med)) + geom_point(size = jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.fc, limits = lims.fc) + facet_wrap(~label) + ggtitle(jmark, "Median log2 fold change with HSC") print(m.mad) # do genome-wide? jsub.ref.merge.gw <- lapply(c(""), function(gstr) GetDiffRelToCell(imputed.dat, gstr = gstr, trajs, trajname = trajname, dat.umap.long = dat.umap.long, jmark = jmark)) %>% bind_rows() m.mad.gw <- ggplot(jsub.ref.merge.gw, aes(x = umap1, y = umap2, color = exprs.diff.med)) + geom_point(size = gw.jsize.facet) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_gradient2(low = jcol[[1]], mid = jcol[[2]], high = jcol[[3]], midpoint = mdpt.fc, limits = lims.fc) + facet_wrap(~label) + ggtitle(jmark, "Median log2 fold change with HSC") print(m.mad.gw) print(range(jsub.ref.merge.gw$exprs.diff.med)) # plot along pseudotime? traj.sub <- trajs[[jmark]][[trajname]] # add exprs.diff.med traj.sub <- left_join(traj.sub, jsub.ref.merge.gw %>% dplyr::select(cell, exprs.diff.med), by = c("cell")) m.mad.traj <- ggplot(traj.sub, aes(x = lambda, y = exprs.diff.med)) + geom_point(alpha = 0.1) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + xlab("Pseudotime") + ylab("Median Log2 FC from Prog Cell") + ggtitle(jmark, paste(trajname, "Genome-wide")) print(m.mad.traj) m.mad.traj.fixscale <- ggplot(traj.sub, aes(x = lambda, y = exprs.diff.med)) + geom_point(alpha = 0.1) + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + ylim(lims.fc) + xlab("Pseudotime") + ylab("Median Log2 FC from Prog Cell") + ggtitle(jmark, paste(trajname, "Genome-wide")) print(m.mad.traj.fixscale) if (!is.null(pdfout)){ dev.off() } # return something that can be used later for integrated analysis return(jsub.ref.merge.gw) } GetDiffRelToCell2 <- function(jmerge.long, cell.ref){ # add reference cell expression as exprs.ref column, then calculate median log2 fold change lambda.dat <- jmerge.long %>% group_by(cell) %>% filter(row_number()==1) %>% ungroup() %>% dplyr::select(cell, lambda) jsub <- jmerge.long %>% filter(cell == cell.ref) %>% dplyr::select(coord, mark, ctype, exprs) %>% dplyr::rename(exprs.ref = exprs) jmerge.med.diff <- left_join(jmerge.long, jsub) %>% group_by(mark, ctype, cell) %>% mutate(exprs.diff = exprs - exprs.ref) %>% summarise(exprs.diff.med = median(abs(exprs.diff))) %>% left_join(., lambda.dat) # add back lambda info, which is lost return(jmerge.med.diff) } GetDiffRelToCell <- function(imputed.dat, gstr, trajs, trajname, dat.umap.long, jmark){ hsc.cell <- (trajs[[jmark]][[trajname]] %>% arrange(lambda) %>% dplyr::top_n(-1))$cell[[1]] jsub.all <- MatToLong(imputed.dat, gstr = gstr, cells.vec=NULL) jsub.hsc <- jsub.all %>% filter(cell == hsc.cell) # plot by reference to stem cell jsub.hsc.ref <- jsub.hsc %>% rename(exprs.ref = exprs) %>% select(-cell, -start, -end, -pos, -chromo) jsub.ref <- left_join(jsub.all, jsub.hsc.ref) # do the difference over pseudotime?? jsub.ref$exprs.diff <- log2(jsub.ref$exprs) - log2(jsub.ref$exprs.ref) jsub.ref.sum <- jsub.ref %>% group_by(cell) %>% summarise(exprs.diff.med = median(abs(exprs.diff))) # join to UMAP jsub.ref.merge <- left_join(jsub.ref.sum %>% dplyr::select(cell, exprs.diff.med), dat.umap.long) %>% mutate(label = gstr) return(jsub.ref.merge) } GetCellSd <- function(dat.mat, grep.str, log2.scale = TRUE, fn = sd){ # calculate standard deviation from matrix row.filt.indx <- grepl(grep.str, rownames(dat.mat)) if (log2.scale){ cell.sd.df <- data.frame(cell = colnames(dat.mat[row.filt.indx, ]), cell.sd = apply(log2(dat.mat[row.filt.indx, ]), 2, fn)) } else { cell.sd.df <- data.frame(cell = colnames(dat.mat[row.filt.indx, ]), cell.sd = apply(dat.mat[row.filt.indx, ], 2, fn)) } cell.sd.df$label <- grep.str return(cell.sd.df) } TotalVar <- function(x){ # sum of squares return((x - mean(x)) ^ 2) } MatToLong <- function(imputed.dat, gstr, cells.vec = NULL){ if (!is.null(cells.vec)){ jsub <- as.data.frame(imputed.dat[grepl(gstr, rownames(imputed.dat)), cells.vec]) colnames(jsub) <- cells.vec } else { jsub <- as.data.frame(imputed.dat[grepl(gstr, rownames(imputed.dat)), ]) } if (nrow(jsub) == 0){ print(paste("Warning: grepstr", gstr, "found no matches, returning empty dataframe")) return(data.frame(NULL)) } jsub$coord <- rownames(jsub) jsub$start <- as.numeric(sapply(jsub$coord, GetStart)) jsub$end <- as.numeric(sapply(jsub$coord, GetStart)) jsub$pos <- jsub$start + (jsub$end - jsub$start) / 2 jsub$chromo <- sapply(jsub$coord, GetChromo) jsub <- gather(jsub, key = "cell", value = "exprs", c(-coord, -start, -end, -pos, -chromo)) jsub <- jsub %>% arrange(desc(pos)) return(jsub) } MergeSdWithPseudotime <- function(dat.umap.long.trajs, tm.result.lst, trajs, jmark, jtraj, grep.strs, jscale=TRUE, jfn = mad){ imputed.dat <- t(tm.result.lst[[jmark]]$terms) %*% t(tm.result.lst[[jmark]]$topics) dat.umap.long <- dat.umap.long.trajs[[jmark]] cell.sd.df.long <- lapply(grep.strs, function(grep.str){ return(GetCellSd(imputed.dat, grep.str, log2.scale = jscale, fn = jfn)) }) %>% bind_rows() dat.umap.filt <- left_join(dat.umap.long, cell.sd.df.long) # add a trajectory dat.umap.filt <- left_join(trajs[[jmark]][[jtraj]] %>% dplyr::select(cell, lambda), dat.umap.filt, by = "cell") return(dat.umap.filt) } CalculateACF <- function(jsub.hsc, jstep = 20000, jtype = "correlation", jmain = "Title", show.plot = TRUE, maxlag = "full"){ # impute missing positions with minimum value # impute missing bins with minimum value jcells <- unique(jsub.hsc$cell) pos.start <- min(jsub.hsc$pos) pos.end <- max(jsub.hsc$pos) # print(head(jsub.hsc)) # print(tail(jsub.hsc)) # print(paste(pos.start, pos.end)) # jstep <- 20000 pos.vec <- seq(pos.start, pos.end, jstep) jsub.impute.vec <- data.frame(pos = rep(pos.vec, length(jcells)), cell = rep(jcells, each = length(pos.vec))) jsub.impute.vec <- left_join(jsub.impute.vec, jsub.hsc %>% dplyr::select(c(chromo, pos, cell, exprs))) # jsub.impute.vec$exprs[which(is.na(jsub.impute.vec$exprs))] <- min(jsub.hsc$exprs) if (maxlag == "full"){ maxlag <- nrow(jsub.impute.vec) } acf.out <- acf(log2(jsub.impute.vec$exprs), type = jtype, lag.max = nrow(jsub.impute.vec), na.action = na.pass, main = jmain, plot = show.plot) acf.out$lag.stepadj <- acf.out$lag * jstep return(acf.out) } SummarizeACF <- function(acf.out.hsc.lst){ assertthat::assert_that(!is.null(names(acf.out.hsc.lst))) acf.out.hsc.long <- lapply(names(acf.out.hsc.lst), function(g){ lst <- acf.out.hsc.lst[[g]] dat.tmp <- data.frame(acfval = lst$acf, dx = lst$lag.stepadj, label = g) return(dat.tmp) }) %>% bind_rows() acf.out.hsc.sum <- acf.out.hsc.long %>% filter(dx < pos.max) %>% group_by(dx) %>% summarise(acfval = mean(acfval)) # get error bar for each chromose acf.hsc.ci <- acf.out.hsc.long %>% group_by(label) %>% summarise(ci = ci.interval / sqrt(length(acfval))) %>% ungroup() %>% summarise(ci = mean(ci)) acf.hsc.ci <- acf.hsc.ci$ci[[1]] return(list(acf.out.sum = acf.out.hsc.sum, acf.ci = acf.hsc.ci)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{defecttype} \alias{defecttype} \title{공정변수-불량 종류 데이터} \format{ 18개의 행과 4개의 열을 지닌 데이터 프레임: \describe{ \item{id}{관측 고유번호} \item{x1, x2}{두 공정변수} \item{y}{불량 종류 (1, 2, 3)} } } \usage{ defecttype } \description{ 3종류의 불량을 2개의 공정변수로 예측하는 모형을 학습하기 위한 데이터. } \keyword{datasets}
/man/defecttype.Rd
no_license
youngroklee-ml/dmtr
R
false
true
523
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{defecttype} \alias{defecttype} \title{공정변수-불량 종류 데이터} \format{ 18개의 행과 4개의 열을 지닌 데이터 프레임: \describe{ \item{id}{관측 고유번호} \item{x1, x2}{두 공정변수} \item{y}{불량 종류 (1, 2, 3)} } } \usage{ defecttype } \description{ 3종류의 불량을 2개의 공정변수로 예측하는 모형을 학습하기 위한 데이터. } \keyword{datasets}
powerdata <- read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors=F, dec=".") subSetData <- powerdata[powerdata$Date %in% c("1/2/2007","2/2/2007") ,] datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") globalActivePower <- as.numeric(subSetData$Global_active_power) png("plot2.png", width=480, height=480) plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
/Plot2.R
no_license
tiagovazsoares/ExData_Plotting1
R
false
false
480
r
powerdata <- read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors=F, dec=".") subSetData <- powerdata[powerdata$Date %in% c("1/2/2007","2/2/2007") ,] datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S") globalActivePower <- as.numeric(subSetData$Global_active_power) png("plot2.png", width=480, height=480) plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
#CROMO MNTD Analysis #Performed on Brazelton Computing Cluster --Code Pasted into sbatch job file #Run using: #srun --partition highmem --mem 80GB Rscript CRO_MNTD-analysis.r & #job ID 27051 #load librarys library(vegan) library(ape) library(picante) library(readr) #Read in Otu.ids -- BEFORE READING IN REPLACE SPACES WITH AN "_" otu.ids=read.delim("CROMO_TS_filtered_FINAL_otu.ids.txt", header=FALSE) otu.ids=as.data.frame(otu.ids) #Upload phylogenetic tree CROMO_Tree=read_file("CROMO_TS_FINAL.tre") phy=read.tree(text=CROMO_Tree) #Extract reordered otu.ids reorg.otu.ids=as.data.frame(phy$tip.label) #Read in count table comm=read.delim("CROMO_TS_filtered_FINAL_counts.txt", header=TRUE) comm=as.data.frame(comm) #Set rownames remove column 1 and transpose row.names(comm)=otu.ids[,1] #Reorganize count table by tree otu.ids comm=cbind(otu.ids, comm) order=reorg.otu.ids[,1] comm=comm[match(order, comm$V1),] #Remove organizational column and transpose comm[,1]<-NULL comm=t(comm) #Create combined object combined=match.phylo.comm(phy, comm) phy <- combined$phy comm <- combined$comm #Normalize Branch Lengths #branch.sum=sum(phy$edge.length) #phy$edge.length=phy$edge.length/branch.sum #Calculate phylogenetic distance #phy.dist=cophenetic.phylo(phy) #phy.dist1=cophenetic(phy) #Calculate MNTD CRO_weighted.mntd=mntd(comm, cophenetic(phy), abundance.weighted = TRUE) #Write out MNTD matrix write.csv(CRO_weighted.mntd,'CRO_TS_FINAL_MNTD_weighted.csv', quote=F) #Caluclate NTI CRO_ses.mntd=ses.mntd(comm, cophenetic(phy), null.model="taxa.labels", abundance.weighted = TRUE, runs=2, iterations=1000) #Write out NTI info write.csv(CRO_ses.mntd, "CRO_TS_FINAL_ses.mntd.csv", quote=F) write.csv(CRO_ses.mntd$ntaxa, "CRO_TS_FINAL_ntaxa.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs, "CRO_TS_FINAL_mntd.obs.csv", quote=F) write.csv(CRO_ses.mntd$mntd.rand.mean, "CRO_TS_FINAL_mntd.rand.mean.csv", quote=F) write.csv(CRO_ses.mntd$mntd.rand.sd, "CRO_TS_FINAL_mntd.rand.sd.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs.rank, "CRO_TS_FINAL_mntd.obs.rank.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs.z, "CRO_TS_FINAL_mntd.obs.z.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs.p, "CRO_TS_FINAL_mntd.obs.p.csv", quote=F)
/CROMO_MNTD_NTI_Script.R
no_license
SchrenkLab/PAPER_Putman_CommunityAssembly
R
false
false
2,302
r
#CROMO MNTD Analysis #Performed on Brazelton Computing Cluster --Code Pasted into sbatch job file #Run using: #srun --partition highmem --mem 80GB Rscript CRO_MNTD-analysis.r & #job ID 27051 #load librarys library(vegan) library(ape) library(picante) library(readr) #Read in Otu.ids -- BEFORE READING IN REPLACE SPACES WITH AN "_" otu.ids=read.delim("CROMO_TS_filtered_FINAL_otu.ids.txt", header=FALSE) otu.ids=as.data.frame(otu.ids) #Upload phylogenetic tree CROMO_Tree=read_file("CROMO_TS_FINAL.tre") phy=read.tree(text=CROMO_Tree) #Extract reordered otu.ids reorg.otu.ids=as.data.frame(phy$tip.label) #Read in count table comm=read.delim("CROMO_TS_filtered_FINAL_counts.txt", header=TRUE) comm=as.data.frame(comm) #Set rownames remove column 1 and transpose row.names(comm)=otu.ids[,1] #Reorganize count table by tree otu.ids comm=cbind(otu.ids, comm) order=reorg.otu.ids[,1] comm=comm[match(order, comm$V1),] #Remove organizational column and transpose comm[,1]<-NULL comm=t(comm) #Create combined object combined=match.phylo.comm(phy, comm) phy <- combined$phy comm <- combined$comm #Normalize Branch Lengths #branch.sum=sum(phy$edge.length) #phy$edge.length=phy$edge.length/branch.sum #Calculate phylogenetic distance #phy.dist=cophenetic.phylo(phy) #phy.dist1=cophenetic(phy) #Calculate MNTD CRO_weighted.mntd=mntd(comm, cophenetic(phy), abundance.weighted = TRUE) #Write out MNTD matrix write.csv(CRO_weighted.mntd,'CRO_TS_FINAL_MNTD_weighted.csv', quote=F) #Caluclate NTI CRO_ses.mntd=ses.mntd(comm, cophenetic(phy), null.model="taxa.labels", abundance.weighted = TRUE, runs=2, iterations=1000) #Write out NTI info write.csv(CRO_ses.mntd, "CRO_TS_FINAL_ses.mntd.csv", quote=F) write.csv(CRO_ses.mntd$ntaxa, "CRO_TS_FINAL_ntaxa.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs, "CRO_TS_FINAL_mntd.obs.csv", quote=F) write.csv(CRO_ses.mntd$mntd.rand.mean, "CRO_TS_FINAL_mntd.rand.mean.csv", quote=F) write.csv(CRO_ses.mntd$mntd.rand.sd, "CRO_TS_FINAL_mntd.rand.sd.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs.rank, "CRO_TS_FINAL_mntd.obs.rank.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs.z, "CRO_TS_FINAL_mntd.obs.z.csv", quote=F) write.csv(CRO_ses.mntd$mntd.obs.p, "CRO_TS_FINAL_mntd.obs.p.csv", quote=F)
library(downloader) url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv" filename <- basename(url) download(url, destfile=filename) x <- unlist( read.csv(filename) ) set.seed(1) N = 1000 meanList = vector("numeric", N) for(n in 1:N){ meanList[n] = mean(sample(x, 5)) } p = sum(abs(mean(x) - meanList) > 1)/N ############## set.seed(1) N = 10000 meanList = vector("numeric", N) for(n in 1:N){ meanList[n] = mean(sample(x, 5)) } p = sum(abs(mean(x) - meanList) > 1)/N p ###### set.seed(1) N = 10000 meanList = vector("numeric", N) for(n in 1:N){ meanList[n] = mean(sample(x, 50)) } p = sum(abs(mean(x) - meanList) > 1)/N p
/Wk2_nullDist.R
no_license
subsoontornlab/RedX
R
false
false
697
r
library(downloader) url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/femaleControlsPopulation.csv" filename <- basename(url) download(url, destfile=filename) x <- unlist( read.csv(filename) ) set.seed(1) N = 1000 meanList = vector("numeric", N) for(n in 1:N){ meanList[n] = mean(sample(x, 5)) } p = sum(abs(mean(x) - meanList) > 1)/N ############## set.seed(1) N = 10000 meanList = vector("numeric", N) for(n in 1:N){ meanList[n] = mean(sample(x, 5)) } p = sum(abs(mean(x) - meanList) > 1)/N p ###### set.seed(1) N = 10000 meanList = vector("numeric", N) for(n in 1:N){ meanList[n] = mean(sample(x, 50)) } p = sum(abs(mean(x) - meanList) > 1)/N p
compute.threshold.YI.cROC.bnp <- function(object, newdata, parallel = c("no", "multicore", "snow"), ncpus = 1, cl = NULL) { doMCMCTH <- function(k, res0, res1, Xhp, Xdp, Lh, Ld, grid) { npred <- nrow(Xhp) ngrid <- length(grid) F0 <- F1 <- matrix(0, nrow = ngrid, ncol = npred) thresholds.s <- YI.s <- TPF.s <- FPF.s <- vector(length = npred) if(Ld == 1 & Lh == 1){ mu.h <- Xhp%*%res0$beta[k,] mu.d <- Xdp%*%res1$beta[k,] for(l in 1:npred){ F0[,l] <- pnorm(grid, mu.h[l], res0$sd[k]) F1[,l] <- pnorm(grid, mu.d[l], res1$sd[k]) dif <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(dif == max(dif))]) YI.s[l] <- max(dif) TPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.d[l], res1$sd[k]) FPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.h[l], res0$sd[k]) } } if(Ld == 1 & Lh > 1){ mu.d <- Xdp%*%res1$beta[k,] mu.h <- Xhp%*%t(res0$beta[k,,]) for(l in 1:npred){ aux0 <- norMix(mu = c(mu.h[l,]), sigma = res0$sd[k,], w = res0$probs[k,]) F0[,l] <- pnorMix(grid, aux0) F1[,l] <- pnorm(grid, mu.d[l], res1$sd[k]) dif <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(dif == max(dif))]) YI.s[l] <- max(dif) TPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.d[l], res1$sd[k]) FPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux0) } } if(Ld > 1 & Lh == 1){ mu.h <- Xhp%*%res0$beta[k,] mu.d <- Xdp%*%t(res1$beta[k,,]) for(l in 1:npred){ aux1 <- norMix(mu = c(mu.d[l,]), sigma = res1$sd[k,], w = res1$probs[k,]) F0[,l] <- pnorm(grid, mu.h[l], res0$sd[k]) F1[,l] <- pnorMix(grid, aux1) dif <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(dif == max(dif))]) YI.s[l] <- max(dif) TPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux1) FPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.h[l], res0$sd[k]) } } if(Ld > 1 & Lh > 1){ mu.h <- Xhp%*%t(res0$beta[k,,]) mu.d <- Xdp%*%t(res1$beta[k,,]) for(l in 1:npred) { aux0 <- norMix(mu = c(mu.h[l,]), sigma = res0$sd[k,], w = res0$probs[k,]) aux1 <- norMix(mu = c(mu.d[l,]), sigma = res1$sd[k,], w = res1$probs[k,]) F0[,l] <- pnorMix(grid, aux0) F1[,l] <- pnorMix(grid, aux1) difbb <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(difbb == max(difbb))]) YI.s[l] <- max(difbb) TPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux1) FPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux0) } } res <- list() res$thresholds.s <- thresholds.s res$YI.s <- YI.s res$TPF.s <- TPF.s res$FPF.s <- FPF.s res } if(class(object)[1] != "cROC.bnp") { stop(paste0("This function can not be used for this object class: ", class(object)[1])) } parallel <- match.arg(parallel) #names.cov.h <- all.vars(object$fit$h$formula)[-1] #names.cov.d <- all.vars(object$fit$d$formula)[-1] #names.cov <- c(names.cov.h, names.cov.d[is.na(match(names.cov.d, names.cov.h))]) names.cov.h <- get_vars_formula(object$fit$h$formula) names.cov.d <- get_vars_formula(object$fit$d$formula) names.cov <- c(names.cov.h, names.cov.d[is.na(match(names.cov.d, names.cov.h))]) if(!missing(newdata) && !inherits(newdata, "data.frame")) stop("Newdata must be a data frame") if(!missing(newdata) && length(names.cov) != 0 && sum(is.na(match(names.cov, names(newdata))))) stop("Not all needed variables are supplied in newdata") if(missing(newdata)) { newdata <- cROCData(object$data, names.cov, object$group) } else { newdata <- na.omit(newdata[,names.cov,drop=FALSE]) } # Compute F_D|X and F_{\bar{D}}|X X0p <- predict(object$fit$h$mm, newdata = newdata)$X X1p <- predict(object$fit$d$mm, newdata = newdata)$X Lh <- object$prior$h$L Ld <- object$prior$d$L y0 <- object$data_model$y$h y1 <- object$data_model$y$d n0 <- length(y0) n1 <- length(y1) grid <- seq(min(c(y0, y1), na.rm = TRUE) - 1, max(c(y0, y1), na.rm = TRUE) + 1, length = max(500, c(n0,n1))) if(object$mcmc$nsave > 0) { do_mc <- do_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") { do_mc <- .Platform$OS.type != "windows" } else if (parallel == "snow") { do_snow <- TRUE } if (!do_mc && !do_snow) { ncpus <- 1L } loadNamespace("parallel") # get this out of the way before recording seed } # Seed #if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) #seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) # Apply function resBoot <- if (ncpus > 1L && (do_mc || do_snow)) { if (do_mc) { parallel::mclapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid, mc.cores = ncpus) } else if (do_snow) { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) if(RNGkind()[1L] == "L'Ecuyer-CMRG") { parallel::clusterSetRNGStream(cl) } res <- parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid) parallel::stopCluster(cl) res } else { if(!inherits(cl, "cluster")) { stop("Class of object 'cl' is not correct") } else { parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid) } } } } else { lapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid) } resBoot <- simplify2array(resBoot) thresholds.s <- simplify2array(resBoot["thresholds.s",]) YI.s <- simplify2array(resBoot["YI.s",]) TPF.s <- simplify2array(resBoot["TPF.s",]) FPF.s <- simplify2array(resBoot["FPF.s",]) } else { stop("nsave should be larger than zero.") } res <- list() res$call <- match.call() res$newdata <- newdata res$thresholds <- cbind(est = apply(thresholds.s, 1, mean), ql = apply(thresholds.s, 1, quantile, 0.025), qh = apply(thresholds.s, 1, quantile, 0.975)) res$YI <- cbind(est = apply(YI.s, 1, mean), ql = apply(YI.s, 1, quantile, 0.025), qh = apply(YI.s, 1, quantile, 0.975)) res$FPF <- cbind(est = apply(FPF.s, 1, mean), ql = apply(FPF.s, 1, quantile, 0.025), qh = apply(FPF.s, 1, quantile, 0.975)) res$TPF <- cbind(est = apply(TPF.s, 1, mean), ql = apply(TPF.s, 1, quantile, 0.025), qh = apply(TPF.s, 1, quantile, 0.975)) res }
/ROCnReg/R/compute.threshold.YI.cROC.bnp.R
no_license
albrizre/spatstat.revdep
R
false
false
8,024
r
compute.threshold.YI.cROC.bnp <- function(object, newdata, parallel = c("no", "multicore", "snow"), ncpus = 1, cl = NULL) { doMCMCTH <- function(k, res0, res1, Xhp, Xdp, Lh, Ld, grid) { npred <- nrow(Xhp) ngrid <- length(grid) F0 <- F1 <- matrix(0, nrow = ngrid, ncol = npred) thresholds.s <- YI.s <- TPF.s <- FPF.s <- vector(length = npred) if(Ld == 1 & Lh == 1){ mu.h <- Xhp%*%res0$beta[k,] mu.d <- Xdp%*%res1$beta[k,] for(l in 1:npred){ F0[,l] <- pnorm(grid, mu.h[l], res0$sd[k]) F1[,l] <- pnorm(grid, mu.d[l], res1$sd[k]) dif <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(dif == max(dif))]) YI.s[l] <- max(dif) TPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.d[l], res1$sd[k]) FPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.h[l], res0$sd[k]) } } if(Ld == 1 & Lh > 1){ mu.d <- Xdp%*%res1$beta[k,] mu.h <- Xhp%*%t(res0$beta[k,,]) for(l in 1:npred){ aux0 <- norMix(mu = c(mu.h[l,]), sigma = res0$sd[k,], w = res0$probs[k,]) F0[,l] <- pnorMix(grid, aux0) F1[,l] <- pnorm(grid, mu.d[l], res1$sd[k]) dif <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(dif == max(dif))]) YI.s[l] <- max(dif) TPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.d[l], res1$sd[k]) FPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux0) } } if(Ld > 1 & Lh == 1){ mu.h <- Xhp%*%res0$beta[k,] mu.d <- Xdp%*%t(res1$beta[k,,]) for(l in 1:npred){ aux1 <- norMix(mu = c(mu.d[l,]), sigma = res1$sd[k,], w = res1$probs[k,]) F0[,l] <- pnorm(grid, mu.h[l], res0$sd[k]) F1[,l] <- pnorMix(grid, aux1) dif <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(dif == max(dif))]) YI.s[l] <- max(dif) TPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux1) FPF.s[l] <- 1 - pnorm(thresholds.s[l], mu.h[l], res0$sd[k]) } } if(Ld > 1 & Lh > 1){ mu.h <- Xhp%*%t(res0$beta[k,,]) mu.d <- Xdp%*%t(res1$beta[k,,]) for(l in 1:npred) { aux0 <- norMix(mu = c(mu.h[l,]), sigma = res0$sd[k,], w = res0$probs[k,]) aux1 <- norMix(mu = c(mu.d[l,]), sigma = res1$sd[k,], w = res1$probs[k,]) F0[,l] <- pnorMix(grid, aux0) F1[,l] <- pnorMix(grid, aux1) difbb <- abs(F0[,l] - F1[,l]) thresholds.s[l] <- mean(grid[which(difbb == max(difbb))]) YI.s[l] <- max(difbb) TPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux1) FPF.s[l] <- 1 - pnorMix(thresholds.s[l], aux0) } } res <- list() res$thresholds.s <- thresholds.s res$YI.s <- YI.s res$TPF.s <- TPF.s res$FPF.s <- FPF.s res } if(class(object)[1] != "cROC.bnp") { stop(paste0("This function can not be used for this object class: ", class(object)[1])) } parallel <- match.arg(parallel) #names.cov.h <- all.vars(object$fit$h$formula)[-1] #names.cov.d <- all.vars(object$fit$d$formula)[-1] #names.cov <- c(names.cov.h, names.cov.d[is.na(match(names.cov.d, names.cov.h))]) names.cov.h <- get_vars_formula(object$fit$h$formula) names.cov.d <- get_vars_formula(object$fit$d$formula) names.cov <- c(names.cov.h, names.cov.d[is.na(match(names.cov.d, names.cov.h))]) if(!missing(newdata) && !inherits(newdata, "data.frame")) stop("Newdata must be a data frame") if(!missing(newdata) && length(names.cov) != 0 && sum(is.na(match(names.cov, names(newdata))))) stop("Not all needed variables are supplied in newdata") if(missing(newdata)) { newdata <- cROCData(object$data, names.cov, object$group) } else { newdata <- na.omit(newdata[,names.cov,drop=FALSE]) } # Compute F_D|X and F_{\bar{D}}|X X0p <- predict(object$fit$h$mm, newdata = newdata)$X X1p <- predict(object$fit$d$mm, newdata = newdata)$X Lh <- object$prior$h$L Ld <- object$prior$d$L y0 <- object$data_model$y$h y1 <- object$data_model$y$d n0 <- length(y0) n1 <- length(y1) grid <- seq(min(c(y0, y1), na.rm = TRUE) - 1, max(c(y0, y1), na.rm = TRUE) + 1, length = max(500, c(n0,n1))) if(object$mcmc$nsave > 0) { do_mc <- do_snow <- FALSE if (parallel != "no" && ncpus > 1L) { if (parallel == "multicore") { do_mc <- .Platform$OS.type != "windows" } else if (parallel == "snow") { do_snow <- TRUE } if (!do_mc && !do_snow) { ncpus <- 1L } loadNamespace("parallel") # get this out of the way before recording seed } # Seed #if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) #seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) # Apply function resBoot <- if (ncpus > 1L && (do_mc || do_snow)) { if (do_mc) { parallel::mclapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid, mc.cores = ncpus) } else if (do_snow) { if (is.null(cl)) { cl <- parallel::makePSOCKcluster(rep("localhost", ncpus)) if(RNGkind()[1L] == "L'Ecuyer-CMRG") { parallel::clusterSetRNGStream(cl) } res <- parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid) parallel::stopCluster(cl) res } else { if(!inherits(cl, "cluster")) { stop("Class of object 'cl' is not correct") } else { parallel::parLapply(cl, seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid) } } } } else { lapply(seq_len(object$mcmc$nsave), doMCMCTH, res0 = object$fit$h, res1 = object$fit$d, Lh = Lh, Ld = Ld, Xhp = X0p, Xdp = X1p, grid = grid) } resBoot <- simplify2array(resBoot) thresholds.s <- simplify2array(resBoot["thresholds.s",]) YI.s <- simplify2array(resBoot["YI.s",]) TPF.s <- simplify2array(resBoot["TPF.s",]) FPF.s <- simplify2array(resBoot["FPF.s",]) } else { stop("nsave should be larger than zero.") } res <- list() res$call <- match.call() res$newdata <- newdata res$thresholds <- cbind(est = apply(thresholds.s, 1, mean), ql = apply(thresholds.s, 1, quantile, 0.025), qh = apply(thresholds.s, 1, quantile, 0.975)) res$YI <- cbind(est = apply(YI.s, 1, mean), ql = apply(YI.s, 1, quantile, 0.025), qh = apply(YI.s, 1, quantile, 0.975)) res$FPF <- cbind(est = apply(FPF.s, 1, mean), ql = apply(FPF.s, 1, quantile, 0.025), qh = apply(FPF.s, 1, quantile, 0.975)) res$TPF <- cbind(est = apply(TPF.s, 1, mean), ql = apply(TPF.s, 1, quantile, 0.025), qh = apply(TPF.s, 1, quantile, 0.975)) res }
weekdays <- timeline %>% mutate(WD = weekdays(Meldedatum)) %>% mutate(DoW = wday(Meldedatum)) %>% filter(Meldedatum > max(Meldedatum - backweek * 7)) %>% group_by(DoW, WD) %>% summarize(Fallzahl_Durchschnitt = round(mean(AnzahlFall), 2), sd = sd(AnzahlFall), .groups = "keep") %>% arrange(DoW) %>% as.data.frame() %>% select(Wochentag = WD, Fallzahl_Durchschnitt, DoW)
/scripts/weekdays.R
no_license
klabrx/Corona
R
false
false
384
r
weekdays <- timeline %>% mutate(WD = weekdays(Meldedatum)) %>% mutate(DoW = wday(Meldedatum)) %>% filter(Meldedatum > max(Meldedatum - backweek * 7)) %>% group_by(DoW, WD) %>% summarize(Fallzahl_Durchschnitt = round(mean(AnzahlFall), 2), sd = sd(AnzahlFall), .groups = "keep") %>% arrange(DoW) %>% as.data.frame() %>% select(Wochentag = WD, Fallzahl_Durchschnitt, DoW)
#' Find Labels from rset Object #' #' Produce a vector of resampling labels (e.g. "Fold1") from #' an `rset` object. Currently, `nested_cv` #' is not supported. #' #' @param object An `rset` object #' @param make_factor A logical for whether the results should be #' character or a factor. #' @param ... Not currently used. #' @return A single character or factor vector. #' @export #' @examples #' labels(vfold_cv(mtcars)) labels.rset <- function(object, make_factor = FALSE, ...) { if (inherits(object, "nested_cv")) stop("`labels` not implemented for nested resampling", call. = FALSE) if (make_factor) as.factor(object$id) else as.character(object$id) } #' @rdname labels.rset #' @export labels.vfold_cv <- function(object, make_factor = FALSE, ...) { if (inherits(object, "nested_cv")) stop("`labels` not implemented for nested resampling", call. = FALSE) is_repeated <- attr(object, "repeats") > 1 if (is_repeated) { out <- as.character(paste(object$id, object$id2, sep = ".")) } else out <- as.character(object$id) if (make_factor) out <- as.factor(out) out } #' Find Labels from rsplit Object #' #' Produce a tibble of identification variables so that single #' splits can be linked to a particular resample. #' #' @param object An `rsplit` object #' @param ... Not currently used. #' @return A tibble. #' @seealso add_resample_id #' @export #' @examples #' cv_splits <- vfold_cv(mtcars) #' labels(cv_splits$splits[[1]]) labels.rsplit <- function(object, ...) { out <- if ("id" %in% names(object)) object$id else tibble() out } ## The `pretty` methods below are good for when you need to ## textually describe the resampling procedure. Note that they ## can have more than one element (in the case of nesting) #' Short Decriptions of rsets #' #' Produce a chracter vector of describing the resampling method. #' #' @param x An `rset` object #' @param ... Not currently used. #' @return A character vector. #' @export pretty.vfold_cv #' @export #' @method pretty vfold_cv #' @keywords internal pretty.vfold_cv <- function(x, ...) { details <- attributes(x) res <- paste0(details$v, "-fold cross-validation") if (details$repeats > 1) res <- paste(res, "repeated", details$repeats, "times") if (details$strata) res <- paste(res, "using stratification") res } #' @export pretty.loo_cv #' @export #' @method pretty loo_cv #' @rdname pretty.vfold_cv pretty.loo_cv <- function(x, ...) "Leave-one-out cross-validation" #' @export pretty.apparent #' @export #' @method pretty apparent #' @rdname pretty.vfold_cv pretty.apparent <- function(x, ...) "Apparent sampling" #' @export pretty.rolling_origin #' @export #' @method pretty rolling_origin #' @rdname pretty.vfold_cv pretty.rolling_origin <- function(x, ...) "Rolling origin forecast resampling" #' @export pretty.sliding_window #' @export #' @method pretty sliding_window #' @rdname pretty.vfold_cv pretty.sliding_window <- function(x, ...) "Sliding window resampling" #' @export pretty.sliding_index #' @export #' @method pretty sliding_index #' @rdname pretty.vfold_cv pretty.sliding_index <- function(x, ...) "Sliding index resampling" #' @export pretty.sliding_period #' @export #' @method pretty sliding_period #' @rdname pretty.vfold_cv pretty.sliding_period <- function(x, ...) "Sliding period resampling" #' @export pretty.mc_cv #' @export #' @method pretty mc_cv #' @rdname pretty.vfold_cv pretty.mc_cv <- function(x, ...) { details <- attributes(x) res <- paste0( "Monte Carlo cross-validation (", signif(details$prop, 2), "/", signif(1 - details$prop, 2), ") with ", details$times, " resamples " ) if (details$strata) res <- paste(res, "using stratification") res } #' @export pretty.validation_split #' @export #' @method pretty validation_split #' @rdname pretty.vfold_cv pretty.validation_split <- function(x, ...) { details <- attributes(x) res <- paste0( "Validation Set Split (", signif(details$prop, 2), "/", signif(1 - details$prop, 2), ") " ) if (details$strata) res <- paste(res, "using stratification") res } #' @export pretty.nested_cv #' @export #' @method pretty nested_cv #' @rdname pretty.vfold_cv pretty.nested_cv <- function(x, ...) { details <- attributes(x) if (is_call(details$outside)) { class(x) <- class(x)[!(class(x) == "nested_cv")] outer_label <- pretty(x) } else { outer_label <- paste0("`", deparse(details$outside), "`") } inner_label <- if (is_call(details$inside)) pretty(x$inner_resamples[[1]]) else paste0("`", deparse(details$inside), "`") res <- c("Nested resampling:", paste(" outer:", outer_label), paste(" inner:", inner_label)) res } #' @export pretty.bootstraps #' @export #' @method pretty bootstraps #' @rdname pretty.vfold_cv pretty.bootstraps <- function(x, ...) { details <- attributes(x) res <- "Bootstrap sampling" if (details$strata) res <- paste(res, "using stratification") if (details$apparent) res <- paste(res, "with apparent sample") res } #' @export pretty.group_vfold_cv #' @export #' @method pretty group_vfold_cv #' @rdname pretty.vfold_cv pretty.group_vfold_cv <- function(x, ...) { details <- attributes(x) paste0("Group ", details$v, "-fold cross-validation") } #' Augment a data set with resampling identifiers #' #' For a data set, `add_resample_id()` will add at least one new column that #' identifies which resample that the data came from. In most cases, a single #' column is added but for some resampling methods two or more are added. #' @param .data A data frame #' @param split A single `rset` object. #' @param dots A single logical: should the id columns be prefixed with a "." #' to avoid name conflicts with `.data`? #' @return An updated data frame. #' @examples #' library(dplyr) #' #' set.seed(363) #' car_folds <- vfold_cv(mtcars, repeats = 3) #' #' analysis(car_folds$splits[[1]]) %>% #' add_resample_id(car_folds$splits[[1]]) %>% #' head() #' #' car_bt <- bootstraps(mtcars) #' #' analysis(car_bt$splits[[1]]) %>% #' add_resample_id(car_bt$splits[[1]]) %>% #' head() #' @seealso labels.rsplit #' @export add_resample_id <- function(.data, split, dots = FALSE) { if (!inherits(dots, "logical") || length(dots) > 1) { stop("`dots` should be a single logical.", call. = FALSE) } if (!inherits(.data, "data.frame")) { stop("`.data` should be a data frame.", call. = FALSE) } if (!inherits(split, "rsplit")) { stop("`split` should be a single 'rset' object.", call. = FALSE) } labs <- labels(split) if (!tibble::is_tibble(labs) && nrow(labs) == 1) { stop("`split` should be a single 'rset' object.", call. = FALSE) } if (dots) { colnames(labs) <- paste0(".", colnames(labs)) } cbind(.data, labs) }
/R/lables.R
no_license
kmdupr33/rsample
R
false
false
6,892
r
#' Find Labels from rset Object #' #' Produce a vector of resampling labels (e.g. "Fold1") from #' an `rset` object. Currently, `nested_cv` #' is not supported. #' #' @param object An `rset` object #' @param make_factor A logical for whether the results should be #' character or a factor. #' @param ... Not currently used. #' @return A single character or factor vector. #' @export #' @examples #' labels(vfold_cv(mtcars)) labels.rset <- function(object, make_factor = FALSE, ...) { if (inherits(object, "nested_cv")) stop("`labels` not implemented for nested resampling", call. = FALSE) if (make_factor) as.factor(object$id) else as.character(object$id) } #' @rdname labels.rset #' @export labels.vfold_cv <- function(object, make_factor = FALSE, ...) { if (inherits(object, "nested_cv")) stop("`labels` not implemented for nested resampling", call. = FALSE) is_repeated <- attr(object, "repeats") > 1 if (is_repeated) { out <- as.character(paste(object$id, object$id2, sep = ".")) } else out <- as.character(object$id) if (make_factor) out <- as.factor(out) out } #' Find Labels from rsplit Object #' #' Produce a tibble of identification variables so that single #' splits can be linked to a particular resample. #' #' @param object An `rsplit` object #' @param ... Not currently used. #' @return A tibble. #' @seealso add_resample_id #' @export #' @examples #' cv_splits <- vfold_cv(mtcars) #' labels(cv_splits$splits[[1]]) labels.rsplit <- function(object, ...) { out <- if ("id" %in% names(object)) object$id else tibble() out } ## The `pretty` methods below are good for when you need to ## textually describe the resampling procedure. Note that they ## can have more than one element (in the case of nesting) #' Short Decriptions of rsets #' #' Produce a chracter vector of describing the resampling method. #' #' @param x An `rset` object #' @param ... Not currently used. #' @return A character vector. #' @export pretty.vfold_cv #' @export #' @method pretty vfold_cv #' @keywords internal pretty.vfold_cv <- function(x, ...) { details <- attributes(x) res <- paste0(details$v, "-fold cross-validation") if (details$repeats > 1) res <- paste(res, "repeated", details$repeats, "times") if (details$strata) res <- paste(res, "using stratification") res } #' @export pretty.loo_cv #' @export #' @method pretty loo_cv #' @rdname pretty.vfold_cv pretty.loo_cv <- function(x, ...) "Leave-one-out cross-validation" #' @export pretty.apparent #' @export #' @method pretty apparent #' @rdname pretty.vfold_cv pretty.apparent <- function(x, ...) "Apparent sampling" #' @export pretty.rolling_origin #' @export #' @method pretty rolling_origin #' @rdname pretty.vfold_cv pretty.rolling_origin <- function(x, ...) "Rolling origin forecast resampling" #' @export pretty.sliding_window #' @export #' @method pretty sliding_window #' @rdname pretty.vfold_cv pretty.sliding_window <- function(x, ...) "Sliding window resampling" #' @export pretty.sliding_index #' @export #' @method pretty sliding_index #' @rdname pretty.vfold_cv pretty.sliding_index <- function(x, ...) "Sliding index resampling" #' @export pretty.sliding_period #' @export #' @method pretty sliding_period #' @rdname pretty.vfold_cv pretty.sliding_period <- function(x, ...) "Sliding period resampling" #' @export pretty.mc_cv #' @export #' @method pretty mc_cv #' @rdname pretty.vfold_cv pretty.mc_cv <- function(x, ...) { details <- attributes(x) res <- paste0( "Monte Carlo cross-validation (", signif(details$prop, 2), "/", signif(1 - details$prop, 2), ") with ", details$times, " resamples " ) if (details$strata) res <- paste(res, "using stratification") res } #' @export pretty.validation_split #' @export #' @method pretty validation_split #' @rdname pretty.vfold_cv pretty.validation_split <- function(x, ...) { details <- attributes(x) res <- paste0( "Validation Set Split (", signif(details$prop, 2), "/", signif(1 - details$prop, 2), ") " ) if (details$strata) res <- paste(res, "using stratification") res } #' @export pretty.nested_cv #' @export #' @method pretty nested_cv #' @rdname pretty.vfold_cv pretty.nested_cv <- function(x, ...) { details <- attributes(x) if (is_call(details$outside)) { class(x) <- class(x)[!(class(x) == "nested_cv")] outer_label <- pretty(x) } else { outer_label <- paste0("`", deparse(details$outside), "`") } inner_label <- if (is_call(details$inside)) pretty(x$inner_resamples[[1]]) else paste0("`", deparse(details$inside), "`") res <- c("Nested resampling:", paste(" outer:", outer_label), paste(" inner:", inner_label)) res } #' @export pretty.bootstraps #' @export #' @method pretty bootstraps #' @rdname pretty.vfold_cv pretty.bootstraps <- function(x, ...) { details <- attributes(x) res <- "Bootstrap sampling" if (details$strata) res <- paste(res, "using stratification") if (details$apparent) res <- paste(res, "with apparent sample") res } #' @export pretty.group_vfold_cv #' @export #' @method pretty group_vfold_cv #' @rdname pretty.vfold_cv pretty.group_vfold_cv <- function(x, ...) { details <- attributes(x) paste0("Group ", details$v, "-fold cross-validation") } #' Augment a data set with resampling identifiers #' #' For a data set, `add_resample_id()` will add at least one new column that #' identifies which resample that the data came from. In most cases, a single #' column is added but for some resampling methods two or more are added. #' @param .data A data frame #' @param split A single `rset` object. #' @param dots A single logical: should the id columns be prefixed with a "." #' to avoid name conflicts with `.data`? #' @return An updated data frame. #' @examples #' library(dplyr) #' #' set.seed(363) #' car_folds <- vfold_cv(mtcars, repeats = 3) #' #' analysis(car_folds$splits[[1]]) %>% #' add_resample_id(car_folds$splits[[1]]) %>% #' head() #' #' car_bt <- bootstraps(mtcars) #' #' analysis(car_bt$splits[[1]]) %>% #' add_resample_id(car_bt$splits[[1]]) %>% #' head() #' @seealso labels.rsplit #' @export add_resample_id <- function(.data, split, dots = FALSE) { if (!inherits(dots, "logical") || length(dots) > 1) { stop("`dots` should be a single logical.", call. = FALSE) } if (!inherits(.data, "data.frame")) { stop("`.data` should be a data frame.", call. = FALSE) } if (!inherits(split, "rsplit")) { stop("`split` should be a single 'rset' object.", call. = FALSE) } labs <- labels(split) if (!tibble::is_tibble(labs) && nrow(labs) == 1) { stop("`split` should be a single 'rset' object.", call. = FALSE) } if (dots) { colnames(labs) <- paste0(".", colnames(labs)) } cbind(.data, labs) }
#+++++++++++++++++++++++++++ # xlsx.writeMultipleData #+++++++++++++++++++++++++++++ # file : the path to the output file # ... : a list of data to write to the workbook xlsx.writeMultipleData <- function (file, d1, d2) { require(xlsx, quietly = TRUE) objects <- list(mtcars, d1, d2) fargs <- as.list(match.call(expand.dots = TRUE)) objnames <- as.character(fargs)[-c(1, 2)] nobjects <- length(objects) for (i in 1:nobjects) { if (i == 1) write.xlsx(objects[[i]], file, sheetName = objnames[i]) else write.xlsx(objects[[i]], file, sheetName = objnames[i], append = TRUE) } }
/functionUtils.R
no_license
momokeith1123/AFDBPRDVAR
R
false
false
640
r
#+++++++++++++++++++++++++++ # xlsx.writeMultipleData #+++++++++++++++++++++++++++++ # file : the path to the output file # ... : a list of data to write to the workbook xlsx.writeMultipleData <- function (file, d1, d2) { require(xlsx, quietly = TRUE) objects <- list(mtcars, d1, d2) fargs <- as.list(match.call(expand.dots = TRUE)) objnames <- as.character(fargs)[-c(1, 2)] nobjects <- length(objects) for (i in 1:nobjects) { if (i == 1) write.xlsx(objects[[i]], file, sheetName = objnames[i]) else write.xlsx(objects[[i]], file, sheetName = objnames[i], append = TRUE) } }
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "electricity") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "class") lrn = makeLearner("classif.earth", par.vals = list(thresh = 0.001, minspan = 30), predict.type = "prob") #:# hash #:# c32d8211a6df67a5095ea6ba110efce6 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
/models/openml_electricity/classification_class/c32d8211a6df67a5095ea6ba110efce6/code.R
no_license
pysiakk/CaseStudies2019S
R
false
false
712
r
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "electricity") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "class") lrn = makeLearner("classif.earth", par.vals = list(thresh = 0.001, minspan = 30), predict.type = "prob") #:# hash #:# c32d8211a6df67a5095ea6ba110efce6 hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_daily_stats.R \name{plot_daily_stats} \alias{plot_daily_stats} \title{Plot daily summary statistics} \usage{ plot_daily_stats( data, dates = Date, values = Value, groups = STATION_NUMBER, station_number, roll_days = 1, roll_align = "right", water_year_start = 1, start_year, end_year, exclude_years, complete_years = FALSE, months = 1:12, ignore_missing = FALSE, include_extremes = TRUE, inner_percentiles = c(25, 75), outer_percentiles = c(5, 95), add_year, log_discharge = TRUE, include_title = FALSE ) } \arguments{ \item{data}{Data frame of daily data that contains columns of dates, flow values, and (optional) groups (e.g. station numbers). Leave blank if using \code{station_number} argument.} \item{dates}{Name of column in \code{data} that contains dates formatted YYYY-MM-DD. Only required if dates column name is not 'Date' (default). Leave blank if using \code{station_number} argument.} \item{values}{Name of column in \code{data} that contains numeric flow values, in units of cubic metres per second. Only required if values column name is not 'Value' (default). Leave blank if using \code{station_number} argument.} \item{groups}{Name of column in \code{data} that contains unique identifiers for different data sets, if applicable. Only required if groups column name is not 'STATION_NUMBER'. Function will automatically group by a column named 'STATION_NUMBER' if present. Remove the 'STATION_NUMBER' column beforehand to remove this grouping. Leave blank if using \code{station_number} argument.} \item{station_number}{Character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of which to extract daily streamflow data from a HYDAT database. Requires \code{tidyhydat} package and a HYDAT database. Leave blank if using \code{data} argument.} \item{roll_days}{Numeric value of the number of days to apply a rolling mean. Default \code{1}.} \item{roll_align}{Character string identifying the direction of the rolling mean from the specified date, either by the first (\code{'left'}), last (\code{'right'}), or middle (\code{'center'}) day of the rolling n-day group of observations. Default \code{'right'}.} \item{water_year_start}{Numeric value indicating the month (\code{1} through \code{12}) of the start of water year for analysis. Default \code{1}.} \item{start_year}{Numeric value of the first year to consider for analysis. Leave blank to use the first year of the source data.} \item{end_year}{Numeric value of the last year to consider for analysis. Leave blank to use the last year of the source data.} \item{exclude_years}{Numeric vector of years to exclude from analysis. Leave blank to include all years.} \item{complete_years}{Logical values indicating whether to include only years with complete data in analysis. Default \code{FALSE}.} \item{months}{Numeric vector of months to include in analysis (e.g. \code{6:8} for Jun-Aug). Leave blank to summarize all months (default \code{1:12}).} \item{ignore_missing}{Logical value indicating whether dates with missing values should be included in the calculation. If \code{TRUE} then a statistic will be calculated regardless of missing dates. If \code{FALSE} then only those statistics from time periods with no missing dates will be returned. Default \code{FALSE}.} \item{include_extremes}{Logical value to indicate plotting a ribbon with the range of daily minimum and maximum flows. Default \code{TRUE}.} \item{inner_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the inner percentiles ribbon for plotting. Default \code{c(25,75)}, set to \code{NULL} for no inner ribbon.} \item{outer_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the outer percentiles ribbon for plotting. Default \code{c(5,95)}, set to \code{NULL} for no outer ribbon.} \item{add_year}{Numeric value indicating a year of daily flows to add to the daily statistics plot. Leave blank for no years.} \item{log_discharge}{Logical value to indicate plotting the discharge axis (Y-axis) on a logarithmic scale. Default \code{FALSE}.} \item{include_title}{Logical value to indicate adding the group/station number to the plot, if provided. Default \code{FALSE}.} } \value{ A list of ggplot2 objects with the following for each station provided: \item{Daily_Stats}{a plot that contains daily flow statistics} Default plots on each object: \item{Mean}{daily mean} \item{Median}{daily median} \item{25-75 Percentiles}{a ribbon showing the range of data between the daily 25th and 75th percentiles} \item{5-95 Percentiles}{a ribbon showing the range of data between the daily 5th and 95th percentiles} \item{Minimum-Maximum}{a ribbon showing the range of data between the daily minimum and maximums} \item{'Year'}{(on annual plots) the daily flows for the designated year} } \description{ Plots means, medians, maximums, minimums, and percentiles for each day of the year of flow values from a daily streamflow data set. Can determine statistics of rolling mean days (e.g. 7-day flows) using the \code{roll_days} argument. Calculates statistics from all values, unless specified. The Maximum-Minimum band can be removed using the \code{include_extremes} argument and the percentile bands can be customized using the \code{inner_percentiles} and \code{outer_percentiles} arguments. Data calculated using \code{calc_daily_stats()} function. Returns a list of plots. } \examples{ # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat()) if (file.exists(tidyhydat::hy_downloaded_db())) { # Plot daily statistics using a data frame and data argument with defaults flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116") plot_daily_stats(data = flow_data, start_year = 1980) # Plot daily statistics using only years with no missing data plot_daily_stats(station_number = "08NM116", complete_years = TRUE) # Plot daily statistics and add a specific year's daily flows plot_daily_stats(station_number = "08NM116", start_year = 1980, add_year = 1985) # Plot daily statistics for 7-day flows for July-September months only plot_daily_stats(station_number = "08NM116", start_year = 1980, roll_days = 7, months = 7:9) # Plot daily statistics without a log-scale Discharge axis plot_daily_stats(station_number = "08NM116", start_year = 1981, end_year = 2010, log_discharge = FALSE) } } \seealso{ \code{\link{calc_daily_stats}} }
/man/plot_daily_stats.Rd
permissive
Rain3498/fasstr
R
false
true
6,878
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_daily_stats.R \name{plot_daily_stats} \alias{plot_daily_stats} \title{Plot daily summary statistics} \usage{ plot_daily_stats( data, dates = Date, values = Value, groups = STATION_NUMBER, station_number, roll_days = 1, roll_align = "right", water_year_start = 1, start_year, end_year, exclude_years, complete_years = FALSE, months = 1:12, ignore_missing = FALSE, include_extremes = TRUE, inner_percentiles = c(25, 75), outer_percentiles = c(5, 95), add_year, log_discharge = TRUE, include_title = FALSE ) } \arguments{ \item{data}{Data frame of daily data that contains columns of dates, flow values, and (optional) groups (e.g. station numbers). Leave blank if using \code{station_number} argument.} \item{dates}{Name of column in \code{data} that contains dates formatted YYYY-MM-DD. Only required if dates column name is not 'Date' (default). Leave blank if using \code{station_number} argument.} \item{values}{Name of column in \code{data} that contains numeric flow values, in units of cubic metres per second. Only required if values column name is not 'Value' (default). Leave blank if using \code{station_number} argument.} \item{groups}{Name of column in \code{data} that contains unique identifiers for different data sets, if applicable. Only required if groups column name is not 'STATION_NUMBER'. Function will automatically group by a column named 'STATION_NUMBER' if present. Remove the 'STATION_NUMBER' column beforehand to remove this grouping. Leave blank if using \code{station_number} argument.} \item{station_number}{Character string vector of seven digit Water Survey of Canada station numbers (e.g. \code{"08NM116"}) of which to extract daily streamflow data from a HYDAT database. Requires \code{tidyhydat} package and a HYDAT database. Leave blank if using \code{data} argument.} \item{roll_days}{Numeric value of the number of days to apply a rolling mean. Default \code{1}.} \item{roll_align}{Character string identifying the direction of the rolling mean from the specified date, either by the first (\code{'left'}), last (\code{'right'}), or middle (\code{'center'}) day of the rolling n-day group of observations. Default \code{'right'}.} \item{water_year_start}{Numeric value indicating the month (\code{1} through \code{12}) of the start of water year for analysis. Default \code{1}.} \item{start_year}{Numeric value of the first year to consider for analysis. Leave blank to use the first year of the source data.} \item{end_year}{Numeric value of the last year to consider for analysis. Leave blank to use the last year of the source data.} \item{exclude_years}{Numeric vector of years to exclude from analysis. Leave blank to include all years.} \item{complete_years}{Logical values indicating whether to include only years with complete data in analysis. Default \code{FALSE}.} \item{months}{Numeric vector of months to include in analysis (e.g. \code{6:8} for Jun-Aug). Leave blank to summarize all months (default \code{1:12}).} \item{ignore_missing}{Logical value indicating whether dates with missing values should be included in the calculation. If \code{TRUE} then a statistic will be calculated regardless of missing dates. If \code{FALSE} then only those statistics from time periods with no missing dates will be returned. Default \code{FALSE}.} \item{include_extremes}{Logical value to indicate plotting a ribbon with the range of daily minimum and maximum flows. Default \code{TRUE}.} \item{inner_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the inner percentiles ribbon for plotting. Default \code{c(25,75)}, set to \code{NULL} for no inner ribbon.} \item{outer_percentiles}{Numeric vector of two percentile values indicating the lower and upper limits of the outer percentiles ribbon for plotting. Default \code{c(5,95)}, set to \code{NULL} for no outer ribbon.} \item{add_year}{Numeric value indicating a year of daily flows to add to the daily statistics plot. Leave blank for no years.} \item{log_discharge}{Logical value to indicate plotting the discharge axis (Y-axis) on a logarithmic scale. Default \code{FALSE}.} \item{include_title}{Logical value to indicate adding the group/station number to the plot, if provided. Default \code{FALSE}.} } \value{ A list of ggplot2 objects with the following for each station provided: \item{Daily_Stats}{a plot that contains daily flow statistics} Default plots on each object: \item{Mean}{daily mean} \item{Median}{daily median} \item{25-75 Percentiles}{a ribbon showing the range of data between the daily 25th and 75th percentiles} \item{5-95 Percentiles}{a ribbon showing the range of data between the daily 5th and 95th percentiles} \item{Minimum-Maximum}{a ribbon showing the range of data between the daily minimum and maximums} \item{'Year'}{(on annual plots) the daily flows for the designated year} } \description{ Plots means, medians, maximums, minimums, and percentiles for each day of the year of flow values from a daily streamflow data set. Can determine statistics of rolling mean days (e.g. 7-day flows) using the \code{roll_days} argument. Calculates statistics from all values, unless specified. The Maximum-Minimum band can be removed using the \code{include_extremes} argument and the percentile bands can be customized using the \code{inner_percentiles} and \code{outer_percentiles} arguments. Data calculated using \code{calc_daily_stats()} function. Returns a list of plots. } \examples{ # Run if HYDAT database has been downloaded (using tidyhydat::download_hydat()) if (file.exists(tidyhydat::hy_downloaded_db())) { # Plot daily statistics using a data frame and data argument with defaults flow_data <- tidyhydat::hy_daily_flows(station_number = "08NM116") plot_daily_stats(data = flow_data, start_year = 1980) # Plot daily statistics using only years with no missing data plot_daily_stats(station_number = "08NM116", complete_years = TRUE) # Plot daily statistics and add a specific year's daily flows plot_daily_stats(station_number = "08NM116", start_year = 1980, add_year = 1985) # Plot daily statistics for 7-day flows for July-September months only plot_daily_stats(station_number = "08NM116", start_year = 1980, roll_days = 7, months = 7:9) # Plot daily statistics without a log-scale Discharge axis plot_daily_stats(station_number = "08NM116", start_year = 1981, end_year = 2010, log_discharge = FALSE) } } \seealso{ \code{\link{calc_daily_stats}} }
#Homogeneous Poisson process arrivals function (BH=30, L=0.01, mu=180) arrivals <- function(BH,L,mu){ return(rpois(BH/L,mu/(BH/L))) } arrivals(30,0.01,180) #Inhomogeneous Poisson process arrivals #with intensity function of the form at + b ipparrivals <- function(BH, L, a, b){ ipparrivals1 <- numeric(0) for (i in 1:(BH/L)){ ipparrivals1 <- c(ipparrivals1,rpois(1,((a/2)*(i^2-(i-1)^2) + b*(i - (i-1)))/(BH/L))) } return(ipparrivals1) } ipparrivals(30,0.01,1/3,1) #Piecewise homogeneous Poisson process arrivals pharrivals <- function(BH,L,segments,rates){ if (sum(segments) != BH){ print("Segments do not sum to length of booking horizon") } cust <- numeric(0) for (i in 1:(length(segments))){ cust <- c(cust, (rpois(segments[i]/L,rates[i]/(segments[i]/L)))) } return(cust) } pharrivals(30,0.01,c(10,10,10),c(20,30,40)) #Define a function for aggregating data into days aggBH <- function(data,BH){ k <- length(data)/BH if (k%%1 != 0){ return("vector not an integer multiple of booking horizon") } else { output <- unname(tapply(data, (seq_along(data)-1) %/% k, sum)) return(output) } } aggBH(arrivals(30,0.01,180),30) #Generate two types of customer: type 1 and type 2 Cust1 <- pharrivals(BH=30,L=0.01,segments=c(10,10,10),rates=c(20,30,40)) Cust2 <- pharrivals(BH=30,L=0.01,segments=c(15,10,5),rates=c(5,35,50)) #Save to files and read in write.csv(Cust1, "Cust1.csv", row.names=F) write.csv(Cust2, "Cust2.csv", row.names=F) Cust1 <- read.csv("Cust1.csv", header=TRUE, stringsAsFactors = FALSE) Cust2 <- read.csv("Cust2.csv", header=TRUE, stringsAsFactors = FALSE) #Plot the customer arrivals of type 1 and type 2 customers library(ggplot2) d <- data.frame(x = 30:1, y1 = cumsum(aggBH(Cust1[,1],30)), y2 = cumsum(aggBH(Cust2[,1],30)), y3 = cumsum(aggBH(Cust1[,1],30)+aggBH(Cust2[,1],30))) p <- ggplot(data = d, aes(x = x)) + geom_step(size=1,aes(y = y1, colour = "Type 1 (Business) Arrivals")) + geom_step(size=1,aes(y = y2, colour = "Type 2 (Tourist) Arrivals")) + geom_step(size=1,aes(y = y3, colour = "Total Customer Arrivals")) + scale_colour_manual("", values = c("Type 1 (Business) Arrivals"="red", "Type 2 (Tourist) Arrivals"="blue", "Total Customer Arrivals"="black")) + labs(x = "Booking Horizon (DCPs before Departure)") + scale_x_reverse(lim=c(30,0)) + scale_y_continuous("Cumulative Customer Arrivals", limits = c(0,200)) + theme(axis.text=element_text(size=14),axis.title=element_text(size=14),legend.text=element_text(size=14),plot.background = element_rect(fill = "transparent", color = NA),legend.background = element_rect(color = NA,fill="transparent"),legend.box.background = element_rect(fill = "transparent",color=NA),legend.position=c(0,1),legend.justification=c(0,1),legend.title=element_blank(),legend.key = element_blank()) ggsave(p, filename = "CustomerArrivals.png", bg = "transparent")
/CustomerArrivals.R
no_license
nmarwen/ARM
R
false
false
2,962
r
#Homogeneous Poisson process arrivals function (BH=30, L=0.01, mu=180) arrivals <- function(BH,L,mu){ return(rpois(BH/L,mu/(BH/L))) } arrivals(30,0.01,180) #Inhomogeneous Poisson process arrivals #with intensity function of the form at + b ipparrivals <- function(BH, L, a, b){ ipparrivals1 <- numeric(0) for (i in 1:(BH/L)){ ipparrivals1 <- c(ipparrivals1,rpois(1,((a/2)*(i^2-(i-1)^2) + b*(i - (i-1)))/(BH/L))) } return(ipparrivals1) } ipparrivals(30,0.01,1/3,1) #Piecewise homogeneous Poisson process arrivals pharrivals <- function(BH,L,segments,rates){ if (sum(segments) != BH){ print("Segments do not sum to length of booking horizon") } cust <- numeric(0) for (i in 1:(length(segments))){ cust <- c(cust, (rpois(segments[i]/L,rates[i]/(segments[i]/L)))) } return(cust) } pharrivals(30,0.01,c(10,10,10),c(20,30,40)) #Define a function for aggregating data into days aggBH <- function(data,BH){ k <- length(data)/BH if (k%%1 != 0){ return("vector not an integer multiple of booking horizon") } else { output <- unname(tapply(data, (seq_along(data)-1) %/% k, sum)) return(output) } } aggBH(arrivals(30,0.01,180),30) #Generate two types of customer: type 1 and type 2 Cust1 <- pharrivals(BH=30,L=0.01,segments=c(10,10,10),rates=c(20,30,40)) Cust2 <- pharrivals(BH=30,L=0.01,segments=c(15,10,5),rates=c(5,35,50)) #Save to files and read in write.csv(Cust1, "Cust1.csv", row.names=F) write.csv(Cust2, "Cust2.csv", row.names=F) Cust1 <- read.csv("Cust1.csv", header=TRUE, stringsAsFactors = FALSE) Cust2 <- read.csv("Cust2.csv", header=TRUE, stringsAsFactors = FALSE) #Plot the customer arrivals of type 1 and type 2 customers library(ggplot2) d <- data.frame(x = 30:1, y1 = cumsum(aggBH(Cust1[,1],30)), y2 = cumsum(aggBH(Cust2[,1],30)), y3 = cumsum(aggBH(Cust1[,1],30)+aggBH(Cust2[,1],30))) p <- ggplot(data = d, aes(x = x)) + geom_step(size=1,aes(y = y1, colour = "Type 1 (Business) Arrivals")) + geom_step(size=1,aes(y = y2, colour = "Type 2 (Tourist) Arrivals")) + geom_step(size=1,aes(y = y3, colour = "Total Customer Arrivals")) + scale_colour_manual("", values = c("Type 1 (Business) Arrivals"="red", "Type 2 (Tourist) Arrivals"="blue", "Total Customer Arrivals"="black")) + labs(x = "Booking Horizon (DCPs before Departure)") + scale_x_reverse(lim=c(30,0)) + scale_y_continuous("Cumulative Customer Arrivals", limits = c(0,200)) + theme(axis.text=element_text(size=14),axis.title=element_text(size=14),legend.text=element_text(size=14),plot.background = element_rect(fill = "transparent", color = NA),legend.background = element_rect(color = NA,fill="transparent"),legend.box.background = element_rect(fill = "transparent",color=NA),legend.position=c(0,1),legend.justification=c(0,1),legend.title=element_blank(),legend.key = element_blank()) ggsave(p, filename = "CustomerArrivals.png", bg = "transparent")
library(tidyverse) library(sf) # build some shapes ------------------------------------------------------- # a circle pt <- st_point(x = c(0, 0)) radius <- 30 circle <- st_buffer(pt, radius) circle_linestring <- st_cast(circle, "MULTILINESTRING") # something more abstract nc <- st_read(system.file("shape/nc.shp", package="sf"))[4, ] st_area(nc) plot(nc$geometry) # convert each to a multilinestring --------------------------------------- circle_linestring <- st_cast(circle, "MULTILINESTRING") nc_linestring <- st_cast(nc, "MULTILINESTRING") # sample from each polygons ----------------------------------------------- circle_samps <- st_sample(circle, size = 1000) nc_samps <- st_sample(nc, size = 1000) # calculate distance to edge ---------------------------------------------- circle_dist_to_edge <- st_distance(circle_samps, circle_linestring)[,1] %>% sort() nc_dist_to_edge <- st_distance(nc_samps, nc_linestring)[, 1] %>% sort() %>% as.numeric() # create cumulative distribution function --------------------------------- circle_cdf <- ecdf(circle_dist_to_edge) circle_pr_X_farther_from_edge <- 1 - circle_cdf(circle_dist_to_edge) nc_cdf <- ecdf(nc_dist_to_edge) nc_pr_X_farther_from_edge <- 1 - nc_cdf(nc_dist_to_edge) # some plots -------------------------------------------------------------- # geometry plots par(mfrow = c(1, 2)) plot(circle) plot(circle_samps, add = TRUE, pch = 19) plot(nc$geometry) plot(nc_samps, add = TRUE, pch = 19) dev.off() # cdf plots par(mfrow = c(1, 2)) plot(circle_dist_to_edge, circle_pr_X_farther_from_edge) plot(nc_dist_to_edge, nc_pr_X_farther_from_edge) dev.off() # theoretical relationships plots par(mfrow = c(1, 2)) circle_prop_rad <- circle_dist_to_edge / max(circle_dist_to_edge) plot(circle_dist_to_edge, circle_pr_X_farther_from_edge) lines(circle_dist_to_edge, y = (circle_prop_rad - 1)^2, col = "red", lwd = 3) nc_prop_rad <- as.numeric(nc_dist_to_edge / max(nc_dist_to_edge)) plot(nc_dist_to_edge, nc_pr_X_farther_from_edge) lines(nc_dist_to_edge, y = (nc_prop_rad - 1)^2, col = "red", lwd = 3) # dev.off() # fit some models --------------------------------------------------------- circle_fm <- nls(circle_pr_X_farther_from_edge ~ ((circle_dist_to_edge - max_rad) / max_rad) ^ 2, start = list(max_rad = 25)) nc_fm <- nls(nc_pr_X_farther_from_edge ~ ((nc_dist_to_edge - max_rad) / max_rad) ^ 2, start = list(max_rad = 5000)) lines(nc_dist_to_edge, y = ((nc_dist_to_edge / coef(nc_fm)) - 1)^2, col = "blue", lwd = 3) # original SDC calculations ----------------------------------------------- # buffer_dist <- 0:30 # # buffered_geoms <- lapply(buffer_dist, FUN = function(x) st_buffer(circle, -x)) # props <- lapply(buffered_geoms, FUN = function(x) 1 - (st_area(x) / st_area(circle))) %>% do.call("c", .) # # lines(buffer_dist, props, col = "blue", lwd = 3)
/analyses/circle-noodle-comparison.R
no_license
mikoontz/sdc-clarifying-concepts
R
false
false
2,856
r
library(tidyverse) library(sf) # build some shapes ------------------------------------------------------- # a circle pt <- st_point(x = c(0, 0)) radius <- 30 circle <- st_buffer(pt, radius) circle_linestring <- st_cast(circle, "MULTILINESTRING") # something more abstract nc <- st_read(system.file("shape/nc.shp", package="sf"))[4, ] st_area(nc) plot(nc$geometry) # convert each to a multilinestring --------------------------------------- circle_linestring <- st_cast(circle, "MULTILINESTRING") nc_linestring <- st_cast(nc, "MULTILINESTRING") # sample from each polygons ----------------------------------------------- circle_samps <- st_sample(circle, size = 1000) nc_samps <- st_sample(nc, size = 1000) # calculate distance to edge ---------------------------------------------- circle_dist_to_edge <- st_distance(circle_samps, circle_linestring)[,1] %>% sort() nc_dist_to_edge <- st_distance(nc_samps, nc_linestring)[, 1] %>% sort() %>% as.numeric() # create cumulative distribution function --------------------------------- circle_cdf <- ecdf(circle_dist_to_edge) circle_pr_X_farther_from_edge <- 1 - circle_cdf(circle_dist_to_edge) nc_cdf <- ecdf(nc_dist_to_edge) nc_pr_X_farther_from_edge <- 1 - nc_cdf(nc_dist_to_edge) # some plots -------------------------------------------------------------- # geometry plots par(mfrow = c(1, 2)) plot(circle) plot(circle_samps, add = TRUE, pch = 19) plot(nc$geometry) plot(nc_samps, add = TRUE, pch = 19) dev.off() # cdf plots par(mfrow = c(1, 2)) plot(circle_dist_to_edge, circle_pr_X_farther_from_edge) plot(nc_dist_to_edge, nc_pr_X_farther_from_edge) dev.off() # theoretical relationships plots par(mfrow = c(1, 2)) circle_prop_rad <- circle_dist_to_edge / max(circle_dist_to_edge) plot(circle_dist_to_edge, circle_pr_X_farther_from_edge) lines(circle_dist_to_edge, y = (circle_prop_rad - 1)^2, col = "red", lwd = 3) nc_prop_rad <- as.numeric(nc_dist_to_edge / max(nc_dist_to_edge)) plot(nc_dist_to_edge, nc_pr_X_farther_from_edge) lines(nc_dist_to_edge, y = (nc_prop_rad - 1)^2, col = "red", lwd = 3) # dev.off() # fit some models --------------------------------------------------------- circle_fm <- nls(circle_pr_X_farther_from_edge ~ ((circle_dist_to_edge - max_rad) / max_rad) ^ 2, start = list(max_rad = 25)) nc_fm <- nls(nc_pr_X_farther_from_edge ~ ((nc_dist_to_edge - max_rad) / max_rad) ^ 2, start = list(max_rad = 5000)) lines(nc_dist_to_edge, y = ((nc_dist_to_edge / coef(nc_fm)) - 1)^2, col = "blue", lwd = 3) # original SDC calculations ----------------------------------------------- # buffer_dist <- 0:30 # # buffered_geoms <- lapply(buffer_dist, FUN = function(x) st_buffer(circle, -x)) # props <- lapply(buffered_geoms, FUN = function(x) 1 - (st_area(x) / st_area(circle))) %>% do.call("c", .) # # lines(buffer_dist, props, col = "blue", lwd = 3)
library(leaflet); library(maptools); library(maps); library(stringr); # Capitalize the first word of every string element in a vector properCaps <- function(x) { words <- str_split(x, "\\s") out <- sapply(words, function(word) { paste0( toupper(str_sub(word, 1, 1)), str_sub(word, start = 2), collapse = " " ) }) return(out) } world <- rworldmap::getMap() # Get SpatialPolygonsDataFrame of countries states <- map("state", fill = TRUE, plot = FALSE) # Get maps class of American states state_names <- sapply(str_split(states$names, ":"), function(x) x[1]) %>% properCaps() # Properly capitalized IDs for states state_names[state_names == "Georgia"] <- "Georgia state" # Deal with Georgia country vs Georgia, USA states <- map2SpatialPolygons(states, IDs = state_names, proj4string = world@proj4string) world <- SpatialPolygons(world@polygons, proj4string = world@proj4string) sp_polys <- rbind(world, states) sp_centroids <- read.csv("data/filtered_world_cities.csv", stringsAsFactors = FALSE, row.names = 1) %>% select(long, lat) %>% as.matrix() %>% rbind(coordinates(sp_polys)) leaflet() %>% addPolygons(data = world, fillColor="white", weight = 0.5) %>% addCircles(data = coordinates(world)) %>% addTiles()
/leaflet.R
no_license
amarnathprabhakar/CNN-stories
R
false
false
1,274
r
library(leaflet); library(maptools); library(maps); library(stringr); # Capitalize the first word of every string element in a vector properCaps <- function(x) { words <- str_split(x, "\\s") out <- sapply(words, function(word) { paste0( toupper(str_sub(word, 1, 1)), str_sub(word, start = 2), collapse = " " ) }) return(out) } world <- rworldmap::getMap() # Get SpatialPolygonsDataFrame of countries states <- map("state", fill = TRUE, plot = FALSE) # Get maps class of American states state_names <- sapply(str_split(states$names, ":"), function(x) x[1]) %>% properCaps() # Properly capitalized IDs for states state_names[state_names == "Georgia"] <- "Georgia state" # Deal with Georgia country vs Georgia, USA states <- map2SpatialPolygons(states, IDs = state_names, proj4string = world@proj4string) world <- SpatialPolygons(world@polygons, proj4string = world@proj4string) sp_polys <- rbind(world, states) sp_centroids <- read.csv("data/filtered_world_cities.csv", stringsAsFactors = FALSE, row.names = 1) %>% select(long, lat) %>% as.matrix() %>% rbind(coordinates(sp_polys)) leaflet() %>% addPolygons(data = world, fillColor="white", weight = 0.5) %>% addCircles(data = coordinates(world)) %>% addTiles()
# Libraries----- library(tidyverse) library(janitor) library(magrittr) library(caret) library(factoextra) library(gridExtra) library(NbClust) library(cluster) # Data Prep---- data_sample <- read_csv('climate-data-sample.csv') %>% clean_names() first_winter_median <- median(data_sample$date_first_winter_freeze) last_winter_median <- median(data_sample$date_last_winter_freeze) data_sample %<>% mutate(first_winter_diff = as.integer(first_winter_median - date_first_winter_freeze), last_winter_diff = as.integer(last_winter_median - date_last_winter_freeze)) %>% select(-one_of('date_first_winter_freeze', 'date_last_winter_freeze' )) which(is.na(data_sample), arr.ind=TRUE) data_sample[1330, 50] <- data_sample[1329, 50] # PCA Analisys---- prec_var <- which(str_detect(colnames(data_sample), 'prec')) rain_var <- which(str_detect(colnames(data_sample), 'rain')) snow_var <- which(str_detect(colnames(data_sample), 'snow')) temp_var <- which(str_detect(colnames(data_sample), 'temp')) pca_prec <- prcomp(data_sample[prec_var], scale = TRUE) pca_rain <- prcomp(data_sample[rain_var], scale = TRUE) pca_snow <- prcomp(data_sample[snow_var], scale = FALSE) pca_temp <- prcomp(data_sample[temp_var], scale = TRUE) list_scree_plot <- list(fviz_eig(pca_prec, addlabels = TRUE, choice = 'variance', main = 'Precipitation Effect'), fviz_eig(pca_rain, addlabels = TRUE, choice = 'variance', main = 'Rain Effect'), fviz_eig(pca_snow, addlabels = TRUE, choice = 'variance', main = 'Snow Effect'), fviz_eig(pca_temp, addlabels = TRUE, choice = 'variance', main = 'Temp Effect') ) grid.arrange(grobs = list_scree_plot) reduced_data <- data.frame(prec_1 = pca_prec$x[,1], prec_2 = pca_prec$x[,2], prec_3 = pca_prec$x[,3], rain_1 = pca_rain$x[,2], rain_2 = pca_rain$x[,2], rain_3 = pca_rain$x[,3], snow = pca_snow$x[,1], temp = pca_temp$x[,1]) %>% cbind(data_sample[,c('growing_season_length', 'first_winter_diff', 'last_winter_diff')]) %>% lapply(scale) %>% as.data.frame() # Clustering----- stores_map <- data_sample[,c('longitude', 'latitude')] ggplot() + geom_polygon(data = map_data("usa"), aes(x=long, y = lat, group = group), fill = NA, col = 'black') + coord_fixed(1.3) + geom_point(data = data_sample, aes(x = longitude, y = latitude), color = "red", size = 1) # Kmean kmresult <- rep(0, 15) for(i in 2:15){ .km <- reduced_data %>% kmeans(centers = i, nstart = 25, iter.max = 25) kmresult[i] <- .km$betweenss/.km$totss } plot(kmresult, type = 'b', col = 'blue') fviz_nbclust(reduced_data, kmeans, method = "wss", k.max = 12) fviz_nbclust(reduced_data, kmeans, method = "silhouette", k.max = 12) gap_stat <- clusGap(reduced_data, FUN = kmeans, nstart = 25, K.max = 18, B = 50, verbose = TRUE) fviz_gap_stat(gap_stat) # Hierarchical Clustering hc_complete <- hclust (dist(reduced_data), method = "complete") hc_complete <- hclust (dist(reduced_data), method = "average") hc_complete <- hclust (dist(reduced_data), method = "single") plot(hc_complete, main = "Complete Linkage", xlab = "", sub = "", cex =.9) # Visualization kmeanfit <- reduced_data %>% kmeans(centers = 15, nstart = 25, iter.max = 25) kmeanfit$centers %>% as.data.frame() %>% names() barplot(t(kmeanfit$centers), beside = TRUE, col = heat.colors(11),legend=FALSE, args.legend=list(cex = 0.8)) clustered_data <- data_sample %>% cbind(cluster = as.character(kmeanfit$cluster)) ggplot() + geom_polygon(data = map_data("usa"), aes(x=long, y = lat, group = group), fill = NA, col = 'black') + coord_fixed(1.3) + geom_point(data = clustered_data, aes(x = longitude, y = latitude, col = cluster), size = 2) # Another try w/ less PC kmeanfit_PC <- reduced_data %>% select(-one_of('prec_2', 'prec_3', 'rain_2', 'rain_3')) %>% kmeans(centers = 6, nstart = 25, iter.max = 25) kmeanfit_PC$centers %>% as.data.frame() %>% names() barplot(t(kmeanfit_PC$centers), beside = TRUE, col = heat.colors(11),legend = TRUE, args.legend = list(cex = 0.3, horiz = TRUE)) clustered_data_PC <- data_sample %>% cbind(cluster = as.character(kmeanfit_PC$cluster)) ggplot() + geom_polygon(data = map_data("usa"), aes(x=long, y = lat, group = group), fill = NA, col = 'black') + coord_fixed(1.3) + geom_point(data = clustered_data_PC, aes(x = longitude, y = latitude, col = cluster), size = 2)
/climate_cluster.R
no_license
rodserr/climate-cluster-Udacity-course
R
false
false
4,499
r
# Libraries----- library(tidyverse) library(janitor) library(magrittr) library(caret) library(factoextra) library(gridExtra) library(NbClust) library(cluster) # Data Prep---- data_sample <- read_csv('climate-data-sample.csv') %>% clean_names() first_winter_median <- median(data_sample$date_first_winter_freeze) last_winter_median <- median(data_sample$date_last_winter_freeze) data_sample %<>% mutate(first_winter_diff = as.integer(first_winter_median - date_first_winter_freeze), last_winter_diff = as.integer(last_winter_median - date_last_winter_freeze)) %>% select(-one_of('date_first_winter_freeze', 'date_last_winter_freeze' )) which(is.na(data_sample), arr.ind=TRUE) data_sample[1330, 50] <- data_sample[1329, 50] # PCA Analisys---- prec_var <- which(str_detect(colnames(data_sample), 'prec')) rain_var <- which(str_detect(colnames(data_sample), 'rain')) snow_var <- which(str_detect(colnames(data_sample), 'snow')) temp_var <- which(str_detect(colnames(data_sample), 'temp')) pca_prec <- prcomp(data_sample[prec_var], scale = TRUE) pca_rain <- prcomp(data_sample[rain_var], scale = TRUE) pca_snow <- prcomp(data_sample[snow_var], scale = FALSE) pca_temp <- prcomp(data_sample[temp_var], scale = TRUE) list_scree_plot <- list(fviz_eig(pca_prec, addlabels = TRUE, choice = 'variance', main = 'Precipitation Effect'), fviz_eig(pca_rain, addlabels = TRUE, choice = 'variance', main = 'Rain Effect'), fviz_eig(pca_snow, addlabels = TRUE, choice = 'variance', main = 'Snow Effect'), fviz_eig(pca_temp, addlabels = TRUE, choice = 'variance', main = 'Temp Effect') ) grid.arrange(grobs = list_scree_plot) reduced_data <- data.frame(prec_1 = pca_prec$x[,1], prec_2 = pca_prec$x[,2], prec_3 = pca_prec$x[,3], rain_1 = pca_rain$x[,2], rain_2 = pca_rain$x[,2], rain_3 = pca_rain$x[,3], snow = pca_snow$x[,1], temp = pca_temp$x[,1]) %>% cbind(data_sample[,c('growing_season_length', 'first_winter_diff', 'last_winter_diff')]) %>% lapply(scale) %>% as.data.frame() # Clustering----- stores_map <- data_sample[,c('longitude', 'latitude')] ggplot() + geom_polygon(data = map_data("usa"), aes(x=long, y = lat, group = group), fill = NA, col = 'black') + coord_fixed(1.3) + geom_point(data = data_sample, aes(x = longitude, y = latitude), color = "red", size = 1) # Kmean kmresult <- rep(0, 15) for(i in 2:15){ .km <- reduced_data %>% kmeans(centers = i, nstart = 25, iter.max = 25) kmresult[i] <- .km$betweenss/.km$totss } plot(kmresult, type = 'b', col = 'blue') fviz_nbclust(reduced_data, kmeans, method = "wss", k.max = 12) fviz_nbclust(reduced_data, kmeans, method = "silhouette", k.max = 12) gap_stat <- clusGap(reduced_data, FUN = kmeans, nstart = 25, K.max = 18, B = 50, verbose = TRUE) fviz_gap_stat(gap_stat) # Hierarchical Clustering hc_complete <- hclust (dist(reduced_data), method = "complete") hc_complete <- hclust (dist(reduced_data), method = "average") hc_complete <- hclust (dist(reduced_data), method = "single") plot(hc_complete, main = "Complete Linkage", xlab = "", sub = "", cex =.9) # Visualization kmeanfit <- reduced_data %>% kmeans(centers = 15, nstart = 25, iter.max = 25) kmeanfit$centers %>% as.data.frame() %>% names() barplot(t(kmeanfit$centers), beside = TRUE, col = heat.colors(11),legend=FALSE, args.legend=list(cex = 0.8)) clustered_data <- data_sample %>% cbind(cluster = as.character(kmeanfit$cluster)) ggplot() + geom_polygon(data = map_data("usa"), aes(x=long, y = lat, group = group), fill = NA, col = 'black') + coord_fixed(1.3) + geom_point(data = clustered_data, aes(x = longitude, y = latitude, col = cluster), size = 2) # Another try w/ less PC kmeanfit_PC <- reduced_data %>% select(-one_of('prec_2', 'prec_3', 'rain_2', 'rain_3')) %>% kmeans(centers = 6, nstart = 25, iter.max = 25) kmeanfit_PC$centers %>% as.data.frame() %>% names() barplot(t(kmeanfit_PC$centers), beside = TRUE, col = heat.colors(11),legend = TRUE, args.legend = list(cex = 0.3, horiz = TRUE)) clustered_data_PC <- data_sample %>% cbind(cluster = as.character(kmeanfit_PC$cluster)) ggplot() + geom_polygon(data = map_data("usa"), aes(x=long, y = lat, group = group), fill = NA, col = 'black') + coord_fixed(1.3) + geom_point(data = clustered_data_PC, aes(x = longitude, y = latitude, col = cluster), size = 2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/checks.R \name{checkarg_fetch_id_data} \alias{checkarg_fetch_id_data} \title{Check if data for fetch_id() is correct} \usage{ checkarg_fetch_id_data(.data) } \description{ Check if data for fetch_id() is correct } \keyword{internal}
/man/checkarg_fetch_id_data.Rd
permissive
ropensci/qualtRics
R
false
true
311
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/checks.R \name{checkarg_fetch_id_data} \alias{checkarg_fetch_id_data} \title{Check if data for fetch_id() is correct} \usage{ checkarg_fetch_id_data(.data) } \description{ Check if data for fetch_id() is correct } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gadgets.R \name{ask_builder} \alias{ask_builder} \title{ask if go to cheat sheet building menu} \usage{ ask_builder() } \description{ ask if go to cheat sheet building menu }
/man/ask_builder.Rd
no_license
wnk4242/Rcheatsheet
R
false
true
253
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gadgets.R \name{ask_builder} \alias{ask_builder} \title{ask if go to cheat sheet building menu} \usage{ ask_builder() } \description{ ask if go to cheat sheet building menu }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{prml} \alias{prml} \title{Declares that I read a famous textbook.} \usage{ prml() } \description{ Declares that I read a famous textbook. }
/man/prml.Rd
no_license
kota8/rpacktest
R
false
true
238
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{prml} \alias{prml} \title{Declares that I read a famous textbook.} \usage{ prml() } \description{ Declares that I read a famous textbook. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/directoryName.R \name{directoryName} \alias{directoryName} \title{Safe Version of Base Function dirname()} \usage{ directoryName(x) } \arguments{ \item{x}{a file path of which to get the path to the directory only} } \value{ path to directory of file path given in \code{x} } \description{ The base function \code{\link{dirname}} may fail if the path passed is too long. This version checks if the call of \code{dirname()} failed and gives a clear error message. } \examples{ \dontrun{ directoryName(repeated("verylongpath/", 50)) } }
/man/directoryName.Rd
permissive
KWB-R/kwb.utils
R
false
true
614
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/directoryName.R \name{directoryName} \alias{directoryName} \title{Safe Version of Base Function dirname()} \usage{ directoryName(x) } \arguments{ \item{x}{a file path of which to get the path to the directory only} } \value{ path to directory of file path given in \code{x} } \description{ The base function \code{\link{dirname}} may fail if the path passed is too long. This version checks if the call of \code{dirname()} failed and gives a clear error message. } \examples{ \dontrun{ directoryName(repeated("verylongpath/", 50)) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is.R \name{is_app} \alias{is_app} \alias{is_service_principal} \alias{is_user} \alias{is_group} \alias{is_directory_object} \title{Informational functions} \usage{ is_app(object) is_service_principal(object) is_user(object) is_group(object) is_directory_object(object) } \arguments{ \item{object}{An R object.} } \value{ A boolean. } \description{ These functions return whether the object is of the corresponding class. }
/man/info.Rd
permissive
PennState/AzureGraph
R
false
true
505
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/is.R \name{is_app} \alias{is_app} \alias{is_service_principal} \alias{is_user} \alias{is_group} \alias{is_directory_object} \title{Informational functions} \usage{ is_app(object) is_service_principal(object) is_user(object) is_group(object) is_directory_object(object) } \arguments{ \item{object}{An R object.} } \value{ A boolean. } \description{ These functions return whether the object is of the corresponding class. }
ponies <- c( "Twilight Sparkle", "Rainbow Dash", "Pinkie Pie", "Applejack", "Rarity", "Fluttershy", "Fluttershy"
/R/rpony.R
no_license
adriennewood/mylittlepony
R
false
false
128
r
ponies <- c( "Twilight Sparkle", "Rainbow Dash", "Pinkie Pie", "Applejack", "Rarity", "Fluttershy", "Fluttershy"
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/urinary_tract.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.65,family="gaussian",standardize=TRUE) sink('./urinary_tract_069.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/ReliefF/urinary_tract/urinary_tract_069.R
no_license
esbgkannan/QSMART
R
false
false
360
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/urinary_tract.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.65,family="gaussian",standardize=TRUE) sink('./urinary_tract_069.txt',append=TRUE) print(glm$glmnet.fit) sink()
cat("\014") # Clear your console rm(list = ls()) #clear your environment ########################## Load in header file ######################## # source(file.path("C:/Users/Nick/git/of-dollars-and-data/header.R")) ########################## Load in Libraries ########################## # library(dplyr) library(tidyr) library(scales) library(ggplot2) library(scales) library(grid) library(gridExtra) library(gtable) library(ggrepel) library(stringr) ########################## Start Program Here ######################### # # Set program paramaters n_years_working <- 40 income <- 50000 savings_rate <- 0.15 sample_mean <- 0.05 sample_sd <- 0.09 n_simulations <- 1 # Create a custom palette with black using COlorBrewer # From here: http://colorbrewer2.org/#type=qualitative&scheme=Set1&n=7 my_palette <- c("#E41A1C", "#4DAF4A", "#000000", "#377EB8", "#984EA3", "#FF7F00", "#A65628") # This seed allows us to have reproducible random sampling set.seed(12345) # Create asset and return matrices returns_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_working+1) asset_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_working+1) savings_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_working+1) # Create a for loop for each year you work for (i in 1:(n_years_working + 1)){ returns <- rnorm(n_simulations, sample_mean, sample_sd) if (i == 1){ savings_matrix[, i] <- income * savings_rate asset_matrix[, i] <- savings_matrix[, i] returns_matrix[, i] <- rep(0, n_simulations) } else { savings_matrix[, i] <- income * savings_rate returns_matrix[, i] <- returns * asset_matrix[, (i-1)] asset_matrix[, i] <- savings_matrix[, i] + returns_matrix[, i] + asset_matrix[, (i-1)] } } # Convert the matrices to a long data frame for plotting convert_to_df <- function(matrix, type){ out <- as.data.frame(matrix) %>% gather(key = "year", value = "value", 1:(n_years_working+1)) out$simulation <- seq(1, n_simulations) out$year <- rep(seq(1, (n_years_working + 1)), each = n_simulations) out$type <- type return(out) } savings_df <- convert_to_df(savings_matrix, "savings") returns_df <- convert_to_df(returns_matrix, "returns") # Bind the two data frames to_plot <- bind_rows(savings_df, returns_df) # Define the y_unit for the y-axis y_unit <- 10^floor(min(log10(abs(max(to_plot$value))), log10(abs(min(to_plot$value))))) # Function to find a rounded max/min based on the specifications of y_unit create_max_min <- function(x, unit, ceilfloor) { ceilfloor(x/unit)*unit } y_max <- create_max_min(max(to_plot$value), y_unit, ceiling) y_min <- create_max_min(min(to_plot$value), y_unit, floor) # If the distance between the max and min is too large, increase y_unit # until the distance is less than 10 ticks away while (ceiling(abs(y_max - y_min))/y_unit > 10){ y_unit <- y_unit * 2 } # Define a new y_max if the y_unit has changed y_max <- create_max_min(y_max, y_unit, ceiling) ## Create 1st plot # Set the file path file_path = paste0(exportdir, "16-investing-vs-saving/saving-vs-investing.jpeg") # Create plot plot <- ggplot(data = to_plot, aes(x = year, fill = type, weight = value)) + geom_bar(position = "stack") + geom_hline(yintercept = 0) + geom_text_repel(data = filter(to_plot, year == 1, type == "savings"), aes(x = year, y = value, col = type, label = str_wrap("Savings Matter Early in Life", width = 10), family = "my_font"), nudge_y = 120000, nudge_x = 2) + geom_text_repel(data = filter(to_plot, year == 29, type == "returns"), aes(x = year, y = value, col = type, label = str_wrap("Investments Dominate Later in Life", width = 10), family = "my_font"), nudge_y = 130000, nudge_x = -2) + scale_color_manual(values = my_palette, guide = FALSE) + scale_fill_manual(values = my_palette, guide = FALSE) + scale_y_continuous(labels = dollar, limits = c(y_min, y_max), breaks = seq(y_min, y_max, y_unit)) + scale_x_continuous(breaks = seq(0, n_years_working, 5)) + of_dollars_and_data_theme + labs(x = "Years" , y = "Change in Value") + ggtitle(paste0("Savings and Investment Returns Have\nVarying Impact Over Time")) # Add a source and note string for the plots source_string <- "Source: Simulated data (OfDollarsAndData.com)" note_string <- paste0("Note: Assumes an annual mean return of ", sample_mean*100, "% with a ", sample_sd*100, "% standard deviation.") # Turn plot into a gtable for adding text grobs my_gtable <- ggplot_gtable(ggplot_build(plot)) # Make the source and note text grobs source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) # Add the text grobs to the bototm of the gtable my_gtable <- arrangeGrob(my_gtable, bottom = source_grob) my_gtable <- arrangeGrob(my_gtable, bottom = note_grob) # Save the gtable ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm") ## Create additional plot to show percentage of total assets attributable to savings vs investment assets_df <- convert_to_df(asset_matrix, "total_assets") assets_df$pct <- 1 - ((seq(1, n_years_working + 1) * (income * savings_rate)) / assets_df$value) assets_df$type <- "investment_pct" ## Create 2nd plot # Get the maximum percentage for the data frame ymax <- max(assets_df$pct) # Set the file path file_path = paste0(exportdir, "16-investing-vs-saving/pct-of-total-assets.jpeg") # Create plot plot <- ggplot(data = assets_df, aes(x = year, y = pct, fill = type)) + geom_area() + geom_hline(yintercept = ymax, col = my_palette[1], linetype = 2) + scale_color_manual(values = my_palette, guide = FALSE) + scale_fill_manual(values = my_palette, guide = FALSE) + scale_y_continuous(labels = percent, limits = c(0, 1), breaks = seq(0, 1, 0.1)) + scale_x_continuous(breaks = seq(0, n_years_working, 5)) + of_dollars_and_data_theme + labs(x = "Years" , y = "Percentage of Total Assets") + ggtitle(paste0("Percentage of Total Assets That\nCome From Investment Gains")) # Add a source and note string for the plots source_string <- "Source: Simulated data (OfDollarsAndData.com)" note_string <- paste0("Note: Assumes an annual mean return of ", sample_mean*100, "% with a ", sample_sd*100, "% standard deviation.") # Turn plot into a gtable for adding text grobs my_gtable <- ggplot_gtable(ggplot_build(plot)) # Make the source and note text grobs source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) # Add the text grobs to the bototm of the gtable my_gtable <- arrangeGrob(my_gtable, bottom = source_grob) my_gtable <- arrangeGrob(my_gtable, bottom = note_grob) # Save the gtable ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm") # ############################ End ################################## #
/analysis/16-investing-vs-saving-plots.R
no_license
intelliBrain/of-dollars-and-data
R
false
false
8,631
r
cat("\014") # Clear your console rm(list = ls()) #clear your environment ########################## Load in header file ######################## # source(file.path("C:/Users/Nick/git/of-dollars-and-data/header.R")) ########################## Load in Libraries ########################## # library(dplyr) library(tidyr) library(scales) library(ggplot2) library(scales) library(grid) library(gridExtra) library(gtable) library(ggrepel) library(stringr) ########################## Start Program Here ######################### # # Set program paramaters n_years_working <- 40 income <- 50000 savings_rate <- 0.15 sample_mean <- 0.05 sample_sd <- 0.09 n_simulations <- 1 # Create a custom palette with black using COlorBrewer # From here: http://colorbrewer2.org/#type=qualitative&scheme=Set1&n=7 my_palette <- c("#E41A1C", "#4DAF4A", "#000000", "#377EB8", "#984EA3", "#FF7F00", "#A65628") # This seed allows us to have reproducible random sampling set.seed(12345) # Create asset and return matrices returns_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_working+1) asset_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_working+1) savings_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_working+1) # Create a for loop for each year you work for (i in 1:(n_years_working + 1)){ returns <- rnorm(n_simulations, sample_mean, sample_sd) if (i == 1){ savings_matrix[, i] <- income * savings_rate asset_matrix[, i] <- savings_matrix[, i] returns_matrix[, i] <- rep(0, n_simulations) } else { savings_matrix[, i] <- income * savings_rate returns_matrix[, i] <- returns * asset_matrix[, (i-1)] asset_matrix[, i] <- savings_matrix[, i] + returns_matrix[, i] + asset_matrix[, (i-1)] } } # Convert the matrices to a long data frame for plotting convert_to_df <- function(matrix, type){ out <- as.data.frame(matrix) %>% gather(key = "year", value = "value", 1:(n_years_working+1)) out$simulation <- seq(1, n_simulations) out$year <- rep(seq(1, (n_years_working + 1)), each = n_simulations) out$type <- type return(out) } savings_df <- convert_to_df(savings_matrix, "savings") returns_df <- convert_to_df(returns_matrix, "returns") # Bind the two data frames to_plot <- bind_rows(savings_df, returns_df) # Define the y_unit for the y-axis y_unit <- 10^floor(min(log10(abs(max(to_plot$value))), log10(abs(min(to_plot$value))))) # Function to find a rounded max/min based on the specifications of y_unit create_max_min <- function(x, unit, ceilfloor) { ceilfloor(x/unit)*unit } y_max <- create_max_min(max(to_plot$value), y_unit, ceiling) y_min <- create_max_min(min(to_plot$value), y_unit, floor) # If the distance between the max and min is too large, increase y_unit # until the distance is less than 10 ticks away while (ceiling(abs(y_max - y_min))/y_unit > 10){ y_unit <- y_unit * 2 } # Define a new y_max if the y_unit has changed y_max <- create_max_min(y_max, y_unit, ceiling) ## Create 1st plot # Set the file path file_path = paste0(exportdir, "16-investing-vs-saving/saving-vs-investing.jpeg") # Create plot plot <- ggplot(data = to_plot, aes(x = year, fill = type, weight = value)) + geom_bar(position = "stack") + geom_hline(yintercept = 0) + geom_text_repel(data = filter(to_plot, year == 1, type == "savings"), aes(x = year, y = value, col = type, label = str_wrap("Savings Matter Early in Life", width = 10), family = "my_font"), nudge_y = 120000, nudge_x = 2) + geom_text_repel(data = filter(to_plot, year == 29, type == "returns"), aes(x = year, y = value, col = type, label = str_wrap("Investments Dominate Later in Life", width = 10), family = "my_font"), nudge_y = 130000, nudge_x = -2) + scale_color_manual(values = my_palette, guide = FALSE) + scale_fill_manual(values = my_palette, guide = FALSE) + scale_y_continuous(labels = dollar, limits = c(y_min, y_max), breaks = seq(y_min, y_max, y_unit)) + scale_x_continuous(breaks = seq(0, n_years_working, 5)) + of_dollars_and_data_theme + labs(x = "Years" , y = "Change in Value") + ggtitle(paste0("Savings and Investment Returns Have\nVarying Impact Over Time")) # Add a source and note string for the plots source_string <- "Source: Simulated data (OfDollarsAndData.com)" note_string <- paste0("Note: Assumes an annual mean return of ", sample_mean*100, "% with a ", sample_sd*100, "% standard deviation.") # Turn plot into a gtable for adding text grobs my_gtable <- ggplot_gtable(ggplot_build(plot)) # Make the source and note text grobs source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) # Add the text grobs to the bototm of the gtable my_gtable <- arrangeGrob(my_gtable, bottom = source_grob) my_gtable <- arrangeGrob(my_gtable, bottom = note_grob) # Save the gtable ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm") ## Create additional plot to show percentage of total assets attributable to savings vs investment assets_df <- convert_to_df(asset_matrix, "total_assets") assets_df$pct <- 1 - ((seq(1, n_years_working + 1) * (income * savings_rate)) / assets_df$value) assets_df$type <- "investment_pct" ## Create 2nd plot # Get the maximum percentage for the data frame ymax <- max(assets_df$pct) # Set the file path file_path = paste0(exportdir, "16-investing-vs-saving/pct-of-total-assets.jpeg") # Create plot plot <- ggplot(data = assets_df, aes(x = year, y = pct, fill = type)) + geom_area() + geom_hline(yintercept = ymax, col = my_palette[1], linetype = 2) + scale_color_manual(values = my_palette, guide = FALSE) + scale_fill_manual(values = my_palette, guide = FALSE) + scale_y_continuous(labels = percent, limits = c(0, 1), breaks = seq(0, 1, 0.1)) + scale_x_continuous(breaks = seq(0, n_years_working, 5)) + of_dollars_and_data_theme + labs(x = "Years" , y = "Percentage of Total Assets") + ggtitle(paste0("Percentage of Total Assets That\nCome From Investment Gains")) # Add a source and note string for the plots source_string <- "Source: Simulated data (OfDollarsAndData.com)" note_string <- paste0("Note: Assumes an annual mean return of ", sample_mean*100, "% with a ", sample_sd*100, "% standard deviation.") # Turn plot into a gtable for adding text grobs my_gtable <- ggplot_gtable(ggplot_build(plot)) # Make the source and note text grobs source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"), gp =gpar(fontfamily = "my_font", fontsize = 8)) # Add the text grobs to the bototm of the gtable my_gtable <- arrangeGrob(my_gtable, bottom = source_grob) my_gtable <- arrangeGrob(my_gtable, bottom = note_grob) # Save the gtable ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm") # ############################ End ################################## #
#### ----------------------------------------------------------------------------- #### IMPORTANT: This app is deployed as appName = mock. So, after setting up the account info #### using rsconnect::setAcountInfo() with the token and secret in shinyapps.io, #### you have to run the command like this> #### rsconnect::deployApp(appName = "fitmock", account = "iese") #### #### ----------------------------------------------------------------------------- #### ----- PACKAGES AND CODES #### Load these packages for the functions you need. library(shiny) library(googlesheets) library(dplyr) library(gmailr) library(googleAuthR) library(mailR) library(plotly) library(ggplot2) library(highcharter) library(googledrive) source("global.R") #### google authentification token... #gm_auth_configure(path = "www/FitMockProject.json") #gtoken<-gs_auth(new_user = TRUE, cache = FALSE) #saveRDS(gtoken,"www/gtoken30Oct.rds") gs_auth(token = "www/gtoken30Oct.rds") #### ----------------------------------------------------------------------------- #### ----- LOADING STATIC DATA #### I will load the data that does not change with the app. #### It is data already stored in googlesheets. #### VERY IMPORTANT: DO NOT CHANGE THE NAMES OF COLUMNS OR WORKSHEETS IN EACH SHEET!!! #### here are the keys for each googlesheet we will use. #### To get a key> go to the sheet and get the share link, then #### do> googlesheets::gs_url("url...") #### that will give you a key to use... key_interviewers <- "1fcz_yJBUiHMGsnibUjCHsfbmUF8M6vF5TeDounbIAAU" key_results <- "13wYFf2y5r5G5NO4UcdVpvVpT24grYkcjeS0o5bh2MJM" button_choices <- c("Overall Structure", "Situation description", "Task description", "Action description", "Results", "Learnings", "Energy and Enthusiasm", "Rambling") #### Now, I am using the googlesheets package to load the interviewers sheet mentors <- gs_key(key_interviewers, lookup = FALSE, visibility = "private") %>% gs_read(ws = "Mentors") # only accessing one worksheet... mentees <- gs_key(key_interviewers, lookup = FALSE, visibility = "private") %>% gs_read(ws = "Mentees") # accessing the other worksheet... #### loading the results sheet (not reading, but keeping the connection open.) results <- gs_key(key_results, lookup = FALSE, visibility = "private") #### ----------------------------------------------------------------------------- ##### ----------- SERVER ##### This is the collection of data that is "transmitted" in the shiny app to the ##### user interface file (ui.R) shinyServer(function(input, output) { #### ----- RENDERING PARTS OF THE UI #### In this part, we will render some of the UI objects which depend on the information in the #### google sheets (like interviewer email for example). #### Once it is "rendered" in this server side, it will be shown in the UI side. #### I will name them "e+number" for element... #### ------------------- #### e1 = interviewers drop-down menu output$e1 <- renderUI({ shiny::selectizeInput(inputId = 'interviewer', choices = c("", mentors$Email), label = "Interviewer (person conducting the mock)", selected = NULL) }) #### e2 = interviewees drop-down menu output$e2 <- renderUI({ shiny::selectizeInput(inputId = 'interviewee', choices = c("", mentees$Email), label = "Interviewee", selected = NULL) }) #### e3 = button choices, I take from here so that when we filter to find exact matches to store, they are the same exact phrases output$e3 <- renderUI({ shiny::checkboxGroupInput(inputId = "improvements", label = "Improvements", choices = button_choices, selected = ",") }) #### ----------------------------------------------------------------------------- #### ----- SAVING RESULTS #### When the submit botton is pressed, all the results in each UI element #### are recorded in a vector. Then, that is added as a row in the results googlesheet. #### The observeEvent changes whenever the submit button is pushed. The results vector is what is stored. observeEvent(input$submit, { # People were pressing twice because of the lag between pressing and sending email, # so I added this BEFORE the email to keep them waiting... Interview_Questions_vector<-c(input$Tell_Me,input$Strength,input$Weakness,input$CBI,input$Why_Company) if(sum(Interview_Questions_vector)==0){ showNotification("Please select the type of interview") } else{ withProgress(message = 'Sending email ... ',{ # button_vector <- button_choices %in% c(unlist(input$improvements)) general_data_vector <- c(as.character(Sys.time()),input$interviewee,input$interviewer, input$type_interviewer) Tell_me_vector <-c(input$Straight,input$New,input$Energy,input$TellMeInput) Strength_vector <-c(input$StrengthVivid,input$StrengthClear,input$StrengthInput) Weakness_vector <-c(input$WeaknessVivid,input$WeaknessClear,input$WeaknessInput) CBI_vector <-c(input$STAR,input$impact,input$succinct,input$CBIInput) why_vector<-c(input$Specific,input$Robust,input$ToThePoint,input$WhyInput) general_attitude_vector <-c(input$Posture, input$Pause, input$BodyLang,input$AttitudeInput) # adding a new row here using the googlesheets categories_vector <- c("Time","Interviewer","Interviewee", "Type of Interviewer") #This line is used in conjunction with the results_vector for e-mail purpose results_vector <- c(general_data_vector) #This helps to compile the information to be send via e-mail #The If-Statement helps to input the data at its respective tab(s) if(input$Tell_Me ==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,Tell_me_vector))) results_vector <- c(results_vector,Tell_me_vector) categories_vector <- c(categories_vector, "<strong> Tell Me About Yourself </strong></br>Straight to the Point", "Something New", "Energy","Tell Me About Yourself Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Tell_Me", input = data_to_write )} if(input$Strength==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,Strength_vector))) results_vector <- c(results_vector,Strength_vector) categories_vector <-c (categories_vector, "<strong> Strength Questions </strong></br>Strength - Vivid Example", "Strength - Clear Conclusion", "Strength Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Strength", input = data_to_write )} if(input$Weakness==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,Weakness_vector))) results_vector <- c(results_vector,Weakness_vector) categories_vector <-c (categories_vector, "<strong> Weakness Questions </strong></br>Weakness - Vivid Example", "Weakness - Clear Conclusion", "Weakness Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Weakness", input = data_to_write )} if(input$CBI==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,CBI_vector))) results_vector <- c(results_vector,CBI_vector) categories_vector <-c (categories_vector, "<strong> Competency Based Questions </strong></br>STAR", "Impact", "To the Point Storytelling", "CBI Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "CBI", input = data_to_write )} if(input$Why_Company==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,why_vector))) results_vector <- c(results_vector,why_vector) categories_vector <-c (categories_vector, "<strong> Why Company Questions</strong></br>Sector Specific","Robustness","Succinct","Why Company Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Why_Company", input = data_to_write )} #if-statements end here googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "General_attitude", input = as.data.frame(t(c(general_data_vector,general_attitude_vector)))) results_vector <- c(results_vector,general_attitude_vector) categories_vector <-c(categories_vector, "<strong> General Attitude </strong></br>Posture", "Pause", "Body Language", "Attitude and Presence input") ####### GOOGLESHEETS OLD CODE ########### # # results_vector <- c(as.character(Sys.time()), input$interviewee,input$interviewer, # input$type_interviewer, # input$Straight, input$New, # input$Energy, input$TellMeInput ,input$vivid, # input$clear, input$SnWInput, input$STAR, # input$impact, input$succinct, input$CBIInput, # input$Posture, input$Pause, input$BodyLang, # input$AttitudeInput) # # # adding a new row here using the googlesheets # googlesheets::gs_add_row(gs_key(key_results, # lookup = FALSE, # visibility = "private"), # ws = "Results", # input = results_vector) # # sending the email (this function is stored in global.R) send_fit_mail(interviewer = as.character(input$interviewee), # sending to this guy interviewee = as.character(input$interviewer), # also sending to this guy data_vector = results_vector, categories = categories_vector) }) } }) # Success text (when the email is successfully sent, you get a nice success message) # ts <- eventReactive(input$submit, {"Sucess! Data stored sent!"}) output$submitsucess <- renderText(ts()) #### ----------------------------------------------------------------------------- #### ------- OUTPUTS FOR THE COMPARISON CHARTS #### This is to compare results, on the second tab. #### First, we start importing data when the second action button is clicked # ch <- eventReactive(input$compare, { # gs_key(key_results, lookup = FALSE, visibility = "private") %>% # gs_read(ws = "Results") # }) # ##### Second, we will render the highchart object # output$hc_comparison <- renderHighchart({ # # # here are the results for this mock, taken from the inputs # real_results <- c(input$Straight, input$New, # input$Energy, input$communication) # # # now, we are going to average out all the results for all the mocks. # # we "select" the columns we want, then summarise them all by mean (average) # results_df <- ch() %>% # dplyr::select("Structure", "Impact", "Succent", # "Synthesis", "Communication") %>% # dplyr::summarise_all(.funs = "mean") # # # now I transpose the data.frame, so that columns are rows (just makes it easier to manipulate). # results_df <- as.data.frame(t(results_df)) # names(results_df) <- "score" # # # now, we build the highchart # highchart() %>% # hc_chart(type = "column") %>% # hc_add_series(data = results_df$score, # name = "Average", # color = "#9C2625") %>% # hc_add_series(data = real_results, # name = "This Mock", # color = "#c35f33") %>% # hc_xAxis(categories = as.character(row.names(results_df))) %>% # hc_exporting(enabled = TRUE) # }) })
/server.R
no_license
Fabricioholgado/fit-mock-app
R
false
false
13,027
r
#### ----------------------------------------------------------------------------- #### IMPORTANT: This app is deployed as appName = mock. So, after setting up the account info #### using rsconnect::setAcountInfo() with the token and secret in shinyapps.io, #### you have to run the command like this> #### rsconnect::deployApp(appName = "fitmock", account = "iese") #### #### ----------------------------------------------------------------------------- #### ----- PACKAGES AND CODES #### Load these packages for the functions you need. library(shiny) library(googlesheets) library(dplyr) library(gmailr) library(googleAuthR) library(mailR) library(plotly) library(ggplot2) library(highcharter) library(googledrive) source("global.R") #### google authentification token... #gm_auth_configure(path = "www/FitMockProject.json") #gtoken<-gs_auth(new_user = TRUE, cache = FALSE) #saveRDS(gtoken,"www/gtoken30Oct.rds") gs_auth(token = "www/gtoken30Oct.rds") #### ----------------------------------------------------------------------------- #### ----- LOADING STATIC DATA #### I will load the data that does not change with the app. #### It is data already stored in googlesheets. #### VERY IMPORTANT: DO NOT CHANGE THE NAMES OF COLUMNS OR WORKSHEETS IN EACH SHEET!!! #### here are the keys for each googlesheet we will use. #### To get a key> go to the sheet and get the share link, then #### do> googlesheets::gs_url("url...") #### that will give you a key to use... key_interviewers <- "1fcz_yJBUiHMGsnibUjCHsfbmUF8M6vF5TeDounbIAAU" key_results <- "13wYFf2y5r5G5NO4UcdVpvVpT24grYkcjeS0o5bh2MJM" button_choices <- c("Overall Structure", "Situation description", "Task description", "Action description", "Results", "Learnings", "Energy and Enthusiasm", "Rambling") #### Now, I am using the googlesheets package to load the interviewers sheet mentors <- gs_key(key_interviewers, lookup = FALSE, visibility = "private") %>% gs_read(ws = "Mentors") # only accessing one worksheet... mentees <- gs_key(key_interviewers, lookup = FALSE, visibility = "private") %>% gs_read(ws = "Mentees") # accessing the other worksheet... #### loading the results sheet (not reading, but keeping the connection open.) results <- gs_key(key_results, lookup = FALSE, visibility = "private") #### ----------------------------------------------------------------------------- ##### ----------- SERVER ##### This is the collection of data that is "transmitted" in the shiny app to the ##### user interface file (ui.R) shinyServer(function(input, output) { #### ----- RENDERING PARTS OF THE UI #### In this part, we will render some of the UI objects which depend on the information in the #### google sheets (like interviewer email for example). #### Once it is "rendered" in this server side, it will be shown in the UI side. #### I will name them "e+number" for element... #### ------------------- #### e1 = interviewers drop-down menu output$e1 <- renderUI({ shiny::selectizeInput(inputId = 'interviewer', choices = c("", mentors$Email), label = "Interviewer (person conducting the mock)", selected = NULL) }) #### e2 = interviewees drop-down menu output$e2 <- renderUI({ shiny::selectizeInput(inputId = 'interviewee', choices = c("", mentees$Email), label = "Interviewee", selected = NULL) }) #### e3 = button choices, I take from here so that when we filter to find exact matches to store, they are the same exact phrases output$e3 <- renderUI({ shiny::checkboxGroupInput(inputId = "improvements", label = "Improvements", choices = button_choices, selected = ",") }) #### ----------------------------------------------------------------------------- #### ----- SAVING RESULTS #### When the submit botton is pressed, all the results in each UI element #### are recorded in a vector. Then, that is added as a row in the results googlesheet. #### The observeEvent changes whenever the submit button is pushed. The results vector is what is stored. observeEvent(input$submit, { # People were pressing twice because of the lag between pressing and sending email, # so I added this BEFORE the email to keep them waiting... Interview_Questions_vector<-c(input$Tell_Me,input$Strength,input$Weakness,input$CBI,input$Why_Company) if(sum(Interview_Questions_vector)==0){ showNotification("Please select the type of interview") } else{ withProgress(message = 'Sending email ... ',{ # button_vector <- button_choices %in% c(unlist(input$improvements)) general_data_vector <- c(as.character(Sys.time()),input$interviewee,input$interviewer, input$type_interviewer) Tell_me_vector <-c(input$Straight,input$New,input$Energy,input$TellMeInput) Strength_vector <-c(input$StrengthVivid,input$StrengthClear,input$StrengthInput) Weakness_vector <-c(input$WeaknessVivid,input$WeaknessClear,input$WeaknessInput) CBI_vector <-c(input$STAR,input$impact,input$succinct,input$CBIInput) why_vector<-c(input$Specific,input$Robust,input$ToThePoint,input$WhyInput) general_attitude_vector <-c(input$Posture, input$Pause, input$BodyLang,input$AttitudeInput) # adding a new row here using the googlesheets categories_vector <- c("Time","Interviewer","Interviewee", "Type of Interviewer") #This line is used in conjunction with the results_vector for e-mail purpose results_vector <- c(general_data_vector) #This helps to compile the information to be send via e-mail #The If-Statement helps to input the data at its respective tab(s) if(input$Tell_Me ==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,Tell_me_vector))) results_vector <- c(results_vector,Tell_me_vector) categories_vector <- c(categories_vector, "<strong> Tell Me About Yourself </strong></br>Straight to the Point", "Something New", "Energy","Tell Me About Yourself Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Tell_Me", input = data_to_write )} if(input$Strength==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,Strength_vector))) results_vector <- c(results_vector,Strength_vector) categories_vector <-c (categories_vector, "<strong> Strength Questions </strong></br>Strength - Vivid Example", "Strength - Clear Conclusion", "Strength Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Strength", input = data_to_write )} if(input$Weakness==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,Weakness_vector))) results_vector <- c(results_vector,Weakness_vector) categories_vector <-c (categories_vector, "<strong> Weakness Questions </strong></br>Weakness - Vivid Example", "Weakness - Clear Conclusion", "Weakness Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Weakness", input = data_to_write )} if(input$CBI==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,CBI_vector))) results_vector <- c(results_vector,CBI_vector) categories_vector <-c (categories_vector, "<strong> Competency Based Questions </strong></br>STAR", "Impact", "To the Point Storytelling", "CBI Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "CBI", input = data_to_write )} if(input$Why_Company==TRUE){ data_to_write <- as.data.frame(t(c(general_data_vector,why_vector))) results_vector <- c(results_vector,why_vector) categories_vector <-c (categories_vector, "<strong> Why Company Questions</strong></br>Sector Specific","Robustness","Succinct","Why Company Input") googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "Why_Company", input = data_to_write )} #if-statements end here googlesheets::gs_add_row(gs_key(key_results, lookup = FALSE, visibility = "private"), ws = "General_attitude", input = as.data.frame(t(c(general_data_vector,general_attitude_vector)))) results_vector <- c(results_vector,general_attitude_vector) categories_vector <-c(categories_vector, "<strong> General Attitude </strong></br>Posture", "Pause", "Body Language", "Attitude and Presence input") ####### GOOGLESHEETS OLD CODE ########### # # results_vector <- c(as.character(Sys.time()), input$interviewee,input$interviewer, # input$type_interviewer, # input$Straight, input$New, # input$Energy, input$TellMeInput ,input$vivid, # input$clear, input$SnWInput, input$STAR, # input$impact, input$succinct, input$CBIInput, # input$Posture, input$Pause, input$BodyLang, # input$AttitudeInput) # # # adding a new row here using the googlesheets # googlesheets::gs_add_row(gs_key(key_results, # lookup = FALSE, # visibility = "private"), # ws = "Results", # input = results_vector) # # sending the email (this function is stored in global.R) send_fit_mail(interviewer = as.character(input$interviewee), # sending to this guy interviewee = as.character(input$interviewer), # also sending to this guy data_vector = results_vector, categories = categories_vector) }) } }) # Success text (when the email is successfully sent, you get a nice success message) # ts <- eventReactive(input$submit, {"Sucess! Data stored sent!"}) output$submitsucess <- renderText(ts()) #### ----------------------------------------------------------------------------- #### ------- OUTPUTS FOR THE COMPARISON CHARTS #### This is to compare results, on the second tab. #### First, we start importing data when the second action button is clicked # ch <- eventReactive(input$compare, { # gs_key(key_results, lookup = FALSE, visibility = "private") %>% # gs_read(ws = "Results") # }) # ##### Second, we will render the highchart object # output$hc_comparison <- renderHighchart({ # # # here are the results for this mock, taken from the inputs # real_results <- c(input$Straight, input$New, # input$Energy, input$communication) # # # now, we are going to average out all the results for all the mocks. # # we "select" the columns we want, then summarise them all by mean (average) # results_df <- ch() %>% # dplyr::select("Structure", "Impact", "Succent", # "Synthesis", "Communication") %>% # dplyr::summarise_all(.funs = "mean") # # # now I transpose the data.frame, so that columns are rows (just makes it easier to manipulate). # results_df <- as.data.frame(t(results_df)) # names(results_df) <- "score" # # # now, we build the highchart # highchart() %>% # hc_chart(type = "column") %>% # hc_add_series(data = results_df$score, # name = "Average", # color = "#9C2625") %>% # hc_add_series(data = real_results, # name = "This Mock", # color = "#c35f33") %>% # hc_xAxis(categories = as.character(row.names(results_df))) %>% # hc_exporting(enabled = TRUE) # }) })
\name{like.sst} \alias{like.sst} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ like.sst(ingrid, datax = as.numeric(vec[11]), sigma = as.numeric(vec[12])) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ingrid}{ %% ~~Describe \code{ingrid} here~~ } \item{datax}{ %% ~~Describe \code{datax} here~~ } \item{sigma}{ %% ~~Describe \code{sigma} here~~ } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- Should be DIRECTLY executable !! ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## The function is currently defined as function (ingrid, datax = as.numeric(vec[11]), sigma = as.numeric(vec[12])) { if (is.na(sigma)) sigma = 5 tempL = ingrid * 0 tdim = dim(ingrid) tempL = dnorm(datax, ingrid, sigma) tempL } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/like.sst.Rd
no_license
galuardi/hmmwoa
R
false
false
1,619
rd
\name{like.sst} \alias{like.sst} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ like.sst(ingrid, datax = as.numeric(vec[11]), sigma = as.numeric(vec[12])) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{ingrid}{ %% ~~Describe \code{ingrid} here~~ } \item{datax}{ %% ~~Describe \code{datax} here~~ } \item{sigma}{ %% ~~Describe \code{sigma} here~~ } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- Should be DIRECTLY executable !! ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## The function is currently defined as function (ingrid, datax = as.numeric(vec[11]), sigma = as.numeric(vec[12])) { if (is.na(sigma)) sigma = 5 tempL = ingrid * 0 tdim = dim(ingrid) tempL = dnorm(datax, ingrid, sigma) tempL } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/평가과제.R
no_license
oneday14/my_R
R
false
false
2,869
r
\name{species.tree} \alias{species.tree} \docType{data} \title{Species Phylogenetic tree} \description{This is a simulated phylogenetic trees with 200 tips.} \keyword{datasets}
/man/species.tree.Rd
no_license
cran/evobiR
R
false
false
177
rd
\name{species.tree} \alias{species.tree} \docType{data} \title{Species Phylogenetic tree} \description{This is a simulated phylogenetic trees with 200 tips.} \keyword{datasets}
context("Testing GenoGAMList-class") test_that("GenoGAM builds without parameters", { expect_true(is(GenoGAMList(), "GenoGAMList")) }) test_that("The accessor functions work correctly", { ggl <- makeTestGenoGAMList() expect_error(design()) expect_true(is(design(ggl), "formula")) expect_error(sizeFactors()) expect_true(is(sizeFactors(ggl), "numeric")) expect_error(getSettings()) expect_true(is(getSettings(ggl), "GenoGAMSettings")) expect_error(getFamily()) expect_true(is(getFamily(ggl), "character")) expect_error(colData()) expect_true(is(colData(ggl), "DataFrame")) expect_true(all(rownames(colData(ggl)) != rownames(colData(ggl@data[[1]])))) expect_error(getParams()) expect_true(is(getParams(ggl), "list")) expect_true(is(getCoefs(ggl), "matrix") | is(getCoefs(ggl), "HDF5Matrix")) expect_true(is(getKnots(ggl), "numeric")) expect_error(rowRanges()) expect_true(is(rowRanges(ggl), "list")) expect_true(is(rowRanges(ggl)[[1]], "GPos")) expect_error(assay()) expect_true(is(assay(ggl), "list")) expect_true(is(assay(ggl)[[1]], "DataFrame")) expect_true(all(colnames(ggl) != rownames(colData(ggl@data[[1]])), na.rm = TRUE)) dnames <- c(names(ggl), rownames(colData(ggl@data[[1]]))) expect_true(all(dimnames(ggl) != dnames, na.rm = TRUE)) expect_error(fits()) expect_error(se()) ggfits <- fits(ggl) ggses <- se(ggl) expect_true(is(ggfits, "list")) expect_true(is(ggfits[[1]], "DataFrame")) expect_true(is(ggses, "list")) expect_true(is(ggses[[1]], "DataFrame")) expect_true(all(names(ggfits[[1]]) == names(ggses[[1]]))) for(ii in 1:length(ggl@data)) { assays(ggl@data[[ii]])[["fits"]] <- NULL assays(ggl@data[[ii]])[["se"]] <- NULL } expect_true(all(sapply(assays(ggl), length) == 0)) expect_true(all(sapply(fits(ggl), is.null))) expect_true(all(sapply(se(ggl), is.null))) }) test_that("Subset on GenoGAM class works correctly", { ggl <- makeTestGenoGAMList() gr <- GRanges("chrX", IRanges(101, 1000)) subggl <- ggl[gr] subrange <- range(rowRanges(subggl)[[1]]) expect_true(all(dim(subggl) == c(900, dim(ggl)[2]))) expect_true(start(subrange) == 101) expect_true(end(subrange) == 1000) subggl <- subset(subggl, pos >= 901) subrange <- range(rowRanges(subggl)[[1]]) expect_true(all(dim(subggl) == c(100, dim(subggl)[2]))) expect_true(start(subrange) == 901) expect_true(end(subrange) == 1000) })
/tests/testthat/test_GenoGAMList-class.R
no_license
bedatadriven/fastGenoGAM
R
false
false
2,557
r
context("Testing GenoGAMList-class") test_that("GenoGAM builds without parameters", { expect_true(is(GenoGAMList(), "GenoGAMList")) }) test_that("The accessor functions work correctly", { ggl <- makeTestGenoGAMList() expect_error(design()) expect_true(is(design(ggl), "formula")) expect_error(sizeFactors()) expect_true(is(sizeFactors(ggl), "numeric")) expect_error(getSettings()) expect_true(is(getSettings(ggl), "GenoGAMSettings")) expect_error(getFamily()) expect_true(is(getFamily(ggl), "character")) expect_error(colData()) expect_true(is(colData(ggl), "DataFrame")) expect_true(all(rownames(colData(ggl)) != rownames(colData(ggl@data[[1]])))) expect_error(getParams()) expect_true(is(getParams(ggl), "list")) expect_true(is(getCoefs(ggl), "matrix") | is(getCoefs(ggl), "HDF5Matrix")) expect_true(is(getKnots(ggl), "numeric")) expect_error(rowRanges()) expect_true(is(rowRanges(ggl), "list")) expect_true(is(rowRanges(ggl)[[1]], "GPos")) expect_error(assay()) expect_true(is(assay(ggl), "list")) expect_true(is(assay(ggl)[[1]], "DataFrame")) expect_true(all(colnames(ggl) != rownames(colData(ggl@data[[1]])), na.rm = TRUE)) dnames <- c(names(ggl), rownames(colData(ggl@data[[1]]))) expect_true(all(dimnames(ggl) != dnames, na.rm = TRUE)) expect_error(fits()) expect_error(se()) ggfits <- fits(ggl) ggses <- se(ggl) expect_true(is(ggfits, "list")) expect_true(is(ggfits[[1]], "DataFrame")) expect_true(is(ggses, "list")) expect_true(is(ggses[[1]], "DataFrame")) expect_true(all(names(ggfits[[1]]) == names(ggses[[1]]))) for(ii in 1:length(ggl@data)) { assays(ggl@data[[ii]])[["fits"]] <- NULL assays(ggl@data[[ii]])[["se"]] <- NULL } expect_true(all(sapply(assays(ggl), length) == 0)) expect_true(all(sapply(fits(ggl), is.null))) expect_true(all(sapply(se(ggl), is.null))) }) test_that("Subset on GenoGAM class works correctly", { ggl <- makeTestGenoGAMList() gr <- GRanges("chrX", IRanges(101, 1000)) subggl <- ggl[gr] subrange <- range(rowRanges(subggl)[[1]]) expect_true(all(dim(subggl) == c(900, dim(ggl)[2]))) expect_true(start(subrange) == 101) expect_true(end(subrange) == 1000) subggl <- subset(subggl, pos >= 901) subrange <- range(rowRanges(subggl)[[1]]) expect_true(all(dim(subggl) == c(100, dim(subggl)[2]))) expect_true(start(subrange) == 901) expect_true(end(subrange) == 1000) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/androidenterprise_functions.R \name{managedconfigurationsforuser.delete} \alias{managedconfigurationsforuser.delete} \title{Removes a per-user managed configuration for an app for the specified user.} \usage{ managedconfigurationsforuser.delete(enterpriseId, userId, managedConfigurationForUserId) } \arguments{ \item{enterpriseId}{The ID of the enterprise} \item{userId}{The ID of the user} \item{managedConfigurationForUserId}{The ID of the managed configuration (a product ID), e} } \description{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}} } \details{ Authentication scopes used by this function are: \itemize{ \item https://www.googleapis.com/auth/androidenterprise } Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/androidenterprise)} Then run \code{googleAuthR::gar_auth()} to authenticate. See \code{\link[googleAuthR]{gar_auth}} for details. } \seealso{ \href{https://developers.google.com/android/work/play/emm-api}{Google Documentation} }
/googleandroidenterprisev1.auto/man/managedconfigurationsforuser.delete.Rd
permissive
Phippsy/autoGoogleAPI
R
false
true
1,095
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/androidenterprise_functions.R \name{managedconfigurationsforuser.delete} \alias{managedconfigurationsforuser.delete} \title{Removes a per-user managed configuration for an app for the specified user.} \usage{ managedconfigurationsforuser.delete(enterpriseId, userId, managedConfigurationForUserId) } \arguments{ \item{enterpriseId}{The ID of the enterprise} \item{userId}{The ID of the user} \item{managedConfigurationForUserId}{The ID of the managed configuration (a product ID), e} } \description{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}} } \details{ Authentication scopes used by this function are: \itemize{ \item https://www.googleapis.com/auth/androidenterprise } Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/androidenterprise)} Then run \code{googleAuthR::gar_auth()} to authenticate. See \code{\link[googleAuthR]{gar_auth}} for details. } \seealso{ \href{https://developers.google.com/android/work/play/emm-api}{Google Documentation} }
#' Make fixed-width bins #' #' Make fixed-width bins based on given bin size. #' #' @param bamfile A BAM file from which the header is read to determine the chromosome lengths. If a \code{bamfile} is specified, option \code{assembly} is ignored. #' @param assembly An assembly from which the chromosome lengths are determined. Please see \code{\link[GenomeInfoDb]{fetchExtendedChromInfoFromUCSC}} for available assemblies. This option is ignored if \code{bamfile} is specified. Alternatively a data.frame generated by \code{\link[GenomeInfoDb]{fetchExtendedChromInfoFromUCSC}}. #' @param chrom.lengths A named character vector with chromosome lengths. Names correspond to chromosomes. #' @param chromosome.format A character specifying the format of the chromosomes if \code{assembly} is specified. Either 'NCBI' for (1,2,3 ...) or 'UCSC' for (chr1,chr2,chr3 ...). If a \code{bamfile} or \code{chrom.lengths} is supplied, the format will be chosen automatically. #' @param binsizes A vector of bin sizes in base pairs. #' @param chromosomes A subset of chromosomes for which the bins are generated. #' @return A \code{list()} of \code{\link{GRanges}} objects with fixed-width bins. #' @author Aaron Taudt #' @importFrom Rsamtools scanBamHeader #' @export #' #'@examples #'## Make fixed-width bins of size 500kb and 1Mb #'bins <- fixedWidthBins(assembly='mm10', chromosome.format='NCBI', binsizes=c(5e5,1e6)) #'bins #' fixedWidthBins <- function(bamfile=NULL, assembly=NULL, chrom.lengths=NULL, chromosome.format, binsizes=1e6, chromosomes=NULL) { ### Check user input ### if (is.null(bamfile) & is.null(assembly) & is.null(chrom.lengths)) { stop("Please specify either a 'bamfile', 'assembly' or 'chrom.lengths'") } if (is.null(bamfile) & is.null(chrom.lengths)) { trigger.error <- chromosome.format } ### Get chromosome lengths ### if (!is.null(bamfile)) { ptm <- startTimedMessage(paste0("Reading header from ", bamfile, " ...")) file.header <- Rsamtools::scanBamHeader(bamfile)[[1]] chrom.lengths <- file.header$targets stopTimedMessage(ptm) } else if (!is.null(assembly)) { if (is.character(assembly)) { ptm <- startTimedMessage("Fetching chromosome lengths from UCSC ...") df <- GenomeInfoDb::fetchExtendedChromInfoFromUCSC(assembly) stopTimedMessage(ptm) } else if (is.data.frame(assembly)) { df <- assembly } else { stop("Unknown assembly") } chrom.lengths <- df$UCSC_seqlength if (chromosome.format=='UCSC') { names(chrom.lengths) <- df$UCSC_seqlevel } else if (chromosome.format=='NCBI') { names(chrom.lengths) <- df$NCBI_seqlevel } chrom.lengths <- chrom.lengths[!is.na(names(chrom.lengths))] chrom.lengths <- chrom.lengths[!is.na(chrom.lengths)] } else if (!is.null(chrom.lengths)) { chrom.lengths <- chrom.lengths[!is.na(names(chrom.lengths))] chrom.lengths <- chrom.lengths[!is.na(chrom.lengths)] } chroms.in.data <- names(chrom.lengths) if (is.null(chromosomes)) { chromosomes <- chroms.in.data } chroms2use <- intersect(chromosomes, chroms.in.data) ## Stop if none of the specified chromosomes exist if (length(chroms2use)==0) { chrstring <- paste0(chromosomes, collapse=', ') stop('Could not find length information for any of the specified chromosomes: ', chrstring) } ## Issue warning for non-existent chromosomes diff <- setdiff(chromosomes, chroms.in.data) if (length(diff)>0) { diffs <- paste0(diff, collapse=', ') warning('Could not find length information for the following chromosomes: ', diffs) } ### Making fixed-width bins ### bins.list <- list() for (binsize in binsizes) { ptm <- startTimedMessage("Making fixed-width bins for bin size ", binsize, " ...") bins <- GenomicRanges::GRangesList() skipped.chroms <- character() ## Loop over chromosomes for (chromosome in chroms2use) { ## Check last incomplete bin incomplete.bin <- chrom.lengths[chromosome] %% binsize > 0 if (incomplete.bin) { numbin <- floor(chrom.lengths[chromosome]/binsize) # floor: we don't want incomplete bins, ceiling: we want incomplete bins at the end } else { numbin <- chrom.lengths[chromosome]/binsize } if (numbin == 0) { skipped.chroms[chromosome] <- chromosome next } ## Initialize vectors chroms <- rep(chromosome,numbin) reads <- rep(0,numbin) start <- seq(from=1, by=binsize, length.out=numbin) end <- seq(from=binsize, by=binsize, length.out=numbin) # end[length(end)] <- seqlengths(data)[chromosome] # last ending coordinate is size of chromosome, only if incomplete bins are desired ## Create binned chromosome as GRanges object bins.chr <- GenomicRanges::GRanges(seqnames = rep(chromosome, numbin), ranges = IRanges(start=start, end=end), strand = rep(strand("*"), numbin) ) suppressWarnings( bins[[chromosome]] <- bins.chr ) } ## end loop chromosomes ### Concatenate all chromosomes bins <- unlist(bins, use.names=FALSE) seqlengths(bins) <- as.integer(chrom.lengths[names(seqlengths(bins))]) bins.list[[as.character(binsize)]] <- bins stopTimedMessage(ptm) if (length(skipped.chroms)>0) { warning("The following chromosomes were skipped because they are smaller than binsize ", binsize, ": ", paste0(skipped.chroms, collapse=', ')) } } return(bins.list) } #' Make variable-width bins #' #' Make variable-width bins based on a reference BAM file. This can be a simulated file (produced by TODO: insert link and aligned with your favourite aligner) or a real reference. #' #' Variable-width bins are produced by first binning the reference BAM file with fixed-width bins and selecting the desired number of reads per bin as the (non-zero) maximum of the histogram. A new set of bins is then generated such that every bin contains the desired number of reads. #' #' @param reads A \code{\link{GRanges}} with reads. See \code{\link{bam2GRanges}} and \code{\link{bed2GRanges}}. #' @param binsizes A vector with binsizes. Resulting bins will be close to the specified binsizes. #' @param chromosomes A subset of chromosomes for which the bins are generated. #' @return A \code{list()} of \code{\link{GRanges}} objects with variable-width bins. #' @author Aaron Taudt #' @export #' #'@examples #'## Get an example BAM file with ChIP-seq reads #'bamfile <- system.file("extdata", "liver-H3K4me3-BN-male-bio2-tech1.bam", #' package="chromstaRData") #'## Read the file into a GRanges object #'reads <- bam2GRanges(bamfile, chromosomes='chr12', pairedEndReads=FALSE, #' min.mapq=10, remove.duplicate.reads=TRUE) #'## Make variable-width bins of size 1000bp #'bins <- variableWidthBins(reads, binsizes=1000) #'## Plot the distribution of binsizes #'hist(width(bins[['1000']]), breaks=50) #' variableWidthBins <- function(reads, binsizes, chromosomes=NULL) { ### Check user input ### chroms.in.data <- seqlevels(reads) if (is.null(chromosomes)) { chromosomes <- chroms.in.data } chroms2use <- intersect(chromosomes, chroms.in.data) ## Stop if non of the specified chromosomes exist if (length(chroms2use)==0) { chrstring <- paste0(chromosomes, collapse=', ') stop('Could not find length information for any of the specified chromosomes: ', chrstring) } ## Issue warning for non-existent chromosomes diff <- setdiff(chromosomes, chroms.in.data) if (length(diff)>0) { diffs <- paste0(diff, collapse=', ') warning('Could not find length information for the following chromosomes: ', diffs) } ## Drop unwanted seqlevels reads <- reads[seqnames(reads) %in% chroms2use] reads <- keepSeqlevels(reads, chroms2use) ## Make fixed width bins ptm <- startTimedMessage("Binning reads in fixed-width windows ...") binned.list <- suppressMessages( binReads(reads, assembly=NULL, binsizes=binsizes, chromosomes=chromosomes) ) stopTimedMessage(ptm) ## Sort the reads strand(reads) <- '*' reads <- sort(reads) ## Loop over binsizes bins.list <- list() for (i1 in 1:length(binsizes)) { binsize <- binsizes[i1] ptm <- startTimedMessage("Making variable-width windows for bin size ", binsize, " ...") if (class(binned.list)=='GRanges') { binned <- binned.list } else { binned <- binned.list[[i1]] } ## Get mode of histogram tab <- table(binned$counts) modecount <- as.integer(names(which.max(tab[names(tab)!=0]))) ## Pick only every modecount read subreads <- GRangesList() skipped.chroms <- character() for (chrom in chroms2use) { reads.chr <- reads[seqnames(reads)==chrom] if (length(reads.chr) >= modecount) { idx <- seq(modecount, length(reads.chr), by=modecount) subreads[[chrom]] <- reads.chr[idx] } else { skipped.chroms[chrom] <- chrom } } if (length(skipped.chroms)>0) { warning("The following chromosomes were skipped because they are smaller than binsize ", binsize, ": ", paste0(skipped.chroms, collapse=', ')) } subreads <- unlist(subreads, use.names=FALSE) ## Adjust length of reads to get consecutive bins subreads <- resize(subreads, width=1) ## Make new bins bins <- gaps(subreads, start=1L, end=seqlengths(subreads)-1L) # gaps until seqlengths-1 because we have to add 1 later to get consecutive bins bins <- bins[strand(bins)=='*'] end(bins) <- end(bins) + 1 ## We don't want incomplete bins at the end bins.split <- split(bins, seqnames(bins)) bins.split <- endoapply(bins.split, function(x) { x[-length(x)] }) bins <- unlist(bins.split, use.names=FALSE) ## Remove skipped chromosomes bins <- bins[!seqnames(bins) %in% skipped.chroms] bins <- keepSeqlevels(bins, setdiff(seqlevels(bins), skipped.chroms)) bins.list[[as.character(binsize)]] <- bins stopTimedMessage(ptm) } return(bins.list) }
/R/makeBins.R
no_license
ustcahwry/chromstaR
R
false
false
10,773
r
#' Make fixed-width bins #' #' Make fixed-width bins based on given bin size. #' #' @param bamfile A BAM file from which the header is read to determine the chromosome lengths. If a \code{bamfile} is specified, option \code{assembly} is ignored. #' @param assembly An assembly from which the chromosome lengths are determined. Please see \code{\link[GenomeInfoDb]{fetchExtendedChromInfoFromUCSC}} for available assemblies. This option is ignored if \code{bamfile} is specified. Alternatively a data.frame generated by \code{\link[GenomeInfoDb]{fetchExtendedChromInfoFromUCSC}}. #' @param chrom.lengths A named character vector with chromosome lengths. Names correspond to chromosomes. #' @param chromosome.format A character specifying the format of the chromosomes if \code{assembly} is specified. Either 'NCBI' for (1,2,3 ...) or 'UCSC' for (chr1,chr2,chr3 ...). If a \code{bamfile} or \code{chrom.lengths} is supplied, the format will be chosen automatically. #' @param binsizes A vector of bin sizes in base pairs. #' @param chromosomes A subset of chromosomes for which the bins are generated. #' @return A \code{list()} of \code{\link{GRanges}} objects with fixed-width bins. #' @author Aaron Taudt #' @importFrom Rsamtools scanBamHeader #' @export #' #'@examples #'## Make fixed-width bins of size 500kb and 1Mb #'bins <- fixedWidthBins(assembly='mm10', chromosome.format='NCBI', binsizes=c(5e5,1e6)) #'bins #' fixedWidthBins <- function(bamfile=NULL, assembly=NULL, chrom.lengths=NULL, chromosome.format, binsizes=1e6, chromosomes=NULL) { ### Check user input ### if (is.null(bamfile) & is.null(assembly) & is.null(chrom.lengths)) { stop("Please specify either a 'bamfile', 'assembly' or 'chrom.lengths'") } if (is.null(bamfile) & is.null(chrom.lengths)) { trigger.error <- chromosome.format } ### Get chromosome lengths ### if (!is.null(bamfile)) { ptm <- startTimedMessage(paste0("Reading header from ", bamfile, " ...")) file.header <- Rsamtools::scanBamHeader(bamfile)[[1]] chrom.lengths <- file.header$targets stopTimedMessage(ptm) } else if (!is.null(assembly)) { if (is.character(assembly)) { ptm <- startTimedMessage("Fetching chromosome lengths from UCSC ...") df <- GenomeInfoDb::fetchExtendedChromInfoFromUCSC(assembly) stopTimedMessage(ptm) } else if (is.data.frame(assembly)) { df <- assembly } else { stop("Unknown assembly") } chrom.lengths <- df$UCSC_seqlength if (chromosome.format=='UCSC') { names(chrom.lengths) <- df$UCSC_seqlevel } else if (chromosome.format=='NCBI') { names(chrom.lengths) <- df$NCBI_seqlevel } chrom.lengths <- chrom.lengths[!is.na(names(chrom.lengths))] chrom.lengths <- chrom.lengths[!is.na(chrom.lengths)] } else if (!is.null(chrom.lengths)) { chrom.lengths <- chrom.lengths[!is.na(names(chrom.lengths))] chrom.lengths <- chrom.lengths[!is.na(chrom.lengths)] } chroms.in.data <- names(chrom.lengths) if (is.null(chromosomes)) { chromosomes <- chroms.in.data } chroms2use <- intersect(chromosomes, chroms.in.data) ## Stop if none of the specified chromosomes exist if (length(chroms2use)==0) { chrstring <- paste0(chromosomes, collapse=', ') stop('Could not find length information for any of the specified chromosomes: ', chrstring) } ## Issue warning for non-existent chromosomes diff <- setdiff(chromosomes, chroms.in.data) if (length(diff)>0) { diffs <- paste0(diff, collapse=', ') warning('Could not find length information for the following chromosomes: ', diffs) } ### Making fixed-width bins ### bins.list <- list() for (binsize in binsizes) { ptm <- startTimedMessage("Making fixed-width bins for bin size ", binsize, " ...") bins <- GenomicRanges::GRangesList() skipped.chroms <- character() ## Loop over chromosomes for (chromosome in chroms2use) { ## Check last incomplete bin incomplete.bin <- chrom.lengths[chromosome] %% binsize > 0 if (incomplete.bin) { numbin <- floor(chrom.lengths[chromosome]/binsize) # floor: we don't want incomplete bins, ceiling: we want incomplete bins at the end } else { numbin <- chrom.lengths[chromosome]/binsize } if (numbin == 0) { skipped.chroms[chromosome] <- chromosome next } ## Initialize vectors chroms <- rep(chromosome,numbin) reads <- rep(0,numbin) start <- seq(from=1, by=binsize, length.out=numbin) end <- seq(from=binsize, by=binsize, length.out=numbin) # end[length(end)] <- seqlengths(data)[chromosome] # last ending coordinate is size of chromosome, only if incomplete bins are desired ## Create binned chromosome as GRanges object bins.chr <- GenomicRanges::GRanges(seqnames = rep(chromosome, numbin), ranges = IRanges(start=start, end=end), strand = rep(strand("*"), numbin) ) suppressWarnings( bins[[chromosome]] <- bins.chr ) } ## end loop chromosomes ### Concatenate all chromosomes bins <- unlist(bins, use.names=FALSE) seqlengths(bins) <- as.integer(chrom.lengths[names(seqlengths(bins))]) bins.list[[as.character(binsize)]] <- bins stopTimedMessage(ptm) if (length(skipped.chroms)>0) { warning("The following chromosomes were skipped because they are smaller than binsize ", binsize, ": ", paste0(skipped.chroms, collapse=', ')) } } return(bins.list) } #' Make variable-width bins #' #' Make variable-width bins based on a reference BAM file. This can be a simulated file (produced by TODO: insert link and aligned with your favourite aligner) or a real reference. #' #' Variable-width bins are produced by first binning the reference BAM file with fixed-width bins and selecting the desired number of reads per bin as the (non-zero) maximum of the histogram. A new set of bins is then generated such that every bin contains the desired number of reads. #' #' @param reads A \code{\link{GRanges}} with reads. See \code{\link{bam2GRanges}} and \code{\link{bed2GRanges}}. #' @param binsizes A vector with binsizes. Resulting bins will be close to the specified binsizes. #' @param chromosomes A subset of chromosomes for which the bins are generated. #' @return A \code{list()} of \code{\link{GRanges}} objects with variable-width bins. #' @author Aaron Taudt #' @export #' #'@examples #'## Get an example BAM file with ChIP-seq reads #'bamfile <- system.file("extdata", "liver-H3K4me3-BN-male-bio2-tech1.bam", #' package="chromstaRData") #'## Read the file into a GRanges object #'reads <- bam2GRanges(bamfile, chromosomes='chr12', pairedEndReads=FALSE, #' min.mapq=10, remove.duplicate.reads=TRUE) #'## Make variable-width bins of size 1000bp #'bins <- variableWidthBins(reads, binsizes=1000) #'## Plot the distribution of binsizes #'hist(width(bins[['1000']]), breaks=50) #' variableWidthBins <- function(reads, binsizes, chromosomes=NULL) { ### Check user input ### chroms.in.data <- seqlevels(reads) if (is.null(chromosomes)) { chromosomes <- chroms.in.data } chroms2use <- intersect(chromosomes, chroms.in.data) ## Stop if non of the specified chromosomes exist if (length(chroms2use)==0) { chrstring <- paste0(chromosomes, collapse=', ') stop('Could not find length information for any of the specified chromosomes: ', chrstring) } ## Issue warning for non-existent chromosomes diff <- setdiff(chromosomes, chroms.in.data) if (length(diff)>0) { diffs <- paste0(diff, collapse=', ') warning('Could not find length information for the following chromosomes: ', diffs) } ## Drop unwanted seqlevels reads <- reads[seqnames(reads) %in% chroms2use] reads <- keepSeqlevels(reads, chroms2use) ## Make fixed width bins ptm <- startTimedMessage("Binning reads in fixed-width windows ...") binned.list <- suppressMessages( binReads(reads, assembly=NULL, binsizes=binsizes, chromosomes=chromosomes) ) stopTimedMessage(ptm) ## Sort the reads strand(reads) <- '*' reads <- sort(reads) ## Loop over binsizes bins.list <- list() for (i1 in 1:length(binsizes)) { binsize <- binsizes[i1] ptm <- startTimedMessage("Making variable-width windows for bin size ", binsize, " ...") if (class(binned.list)=='GRanges') { binned <- binned.list } else { binned <- binned.list[[i1]] } ## Get mode of histogram tab <- table(binned$counts) modecount <- as.integer(names(which.max(tab[names(tab)!=0]))) ## Pick only every modecount read subreads <- GRangesList() skipped.chroms <- character() for (chrom in chroms2use) { reads.chr <- reads[seqnames(reads)==chrom] if (length(reads.chr) >= modecount) { idx <- seq(modecount, length(reads.chr), by=modecount) subreads[[chrom]] <- reads.chr[idx] } else { skipped.chroms[chrom] <- chrom } } if (length(skipped.chroms)>0) { warning("The following chromosomes were skipped because they are smaller than binsize ", binsize, ": ", paste0(skipped.chroms, collapse=', ')) } subreads <- unlist(subreads, use.names=FALSE) ## Adjust length of reads to get consecutive bins subreads <- resize(subreads, width=1) ## Make new bins bins <- gaps(subreads, start=1L, end=seqlengths(subreads)-1L) # gaps until seqlengths-1 because we have to add 1 later to get consecutive bins bins <- bins[strand(bins)=='*'] end(bins) <- end(bins) + 1 ## We don't want incomplete bins at the end bins.split <- split(bins, seqnames(bins)) bins.split <- endoapply(bins.split, function(x) { x[-length(x)] }) bins <- unlist(bins.split, use.names=FALSE) ## Remove skipped chromosomes bins <- bins[!seqnames(bins) %in% skipped.chroms] bins <- keepSeqlevels(bins, setdiff(seqlevels(bins), skipped.chroms)) bins.list[[as.character(binsize)]] <- bins stopTimedMessage(ptm) } return(bins.list) }
rm(list = ls()) library(ggplot2) library(tidyverse) library(ggfortify) library(fastICA) library(datasets) df <- iris[c(1,2,3,4)] a <- fastICA(df, 7, alg.typ = "parallel", fun = "logcosh", alpha = 1, method = "R", row.norm = FALSE, maxit = 200, tol = 0.0001, verbose = TRUE) heatmap(a$S)
/solution3.R
no_license
NYUMachineLearning/hw1-Ying_Liao
R
false
false
330
r
rm(list = ls()) library(ggplot2) library(tidyverse) library(ggfortify) library(fastICA) library(datasets) df <- iris[c(1,2,3,4)] a <- fastICA(df, 7, alg.typ = "parallel", fun = "logcosh", alpha = 1, method = "R", row.norm = FALSE, maxit = 200, tol = 0.0001, verbose = TRUE) heatmap(a$S)
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/itis.R \name{getcoveragefromtsn} \alias{getcoveragefromtsn} \title{Get coverge from tsn} \usage{ getcoveragefromtsn(tsn, ...) } \arguments{ \item{tsn}{TSN for a taxonomic group (numeric)} \item{...}{optional additional curl options (debugging tools mostly)} } \description{ Get coverge from tsn } \examples{ \dontrun{ getcoveragefromtsn(tsn=28727, config=timeout(4)) # coverage data getcoveragefromtsn(526852, config=timeout(4)) # no coverage data } } \keyword{internal}
/man/getcoveragefromtsn.Rd
permissive
MadeleineMcGreer/taxize
R
false
false
561
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/itis.R \name{getcoveragefromtsn} \alias{getcoveragefromtsn} \title{Get coverge from tsn} \usage{ getcoveragefromtsn(tsn, ...) } \arguments{ \item{tsn}{TSN for a taxonomic group (numeric)} \item{...}{optional additional curl options (debugging tools mostly)} } \description{ Get coverge from tsn } \examples{ \dontrun{ getcoveragefromtsn(tsn=28727, config=timeout(4)) # coverage data getcoveragefromtsn(526852, config=timeout(4)) # no coverage data } } \keyword{internal}
distCat <- function(data) { as.matrix(daisy(data, metric = "gower", stand = TRUE,warnType = F)) } n1byClass <- function(dst, data) { classes<-levels(data$class) g = graph.adjacency(dst, weighted = TRUE) tree = as.matrix(as_adj(mst(as.undirected(g)))) tmp = which(tree != 0, arr.ind = TRUE) cons = data[tmp[,1],]$class != data[tmp[,2],]$class aux = length(unique(tmp[cons,1])) n1class<-vector() nam<-vector() for(i in classes){ tam<-sum(data[,"class"] == i) n1c<-vector() for(j in classes){ aux2<-sum(data[tmp[,1],]$class==i & data[tmp[,2],]$class==j) n1c<-c(n1c,aux2/tam) } n1class<-rbind(n1class,n1c) } rownames(n1class)<-classes colnames(n1class)<-classes # n1class2 = sapply(classes, FUN=function(c){ # aux2 = sum(data[unique(tmp[cons,1]),"class"] == c) # return(aux2/sum(data[,"class"] == c)) # }) return(list(aux/nrow(data),n1class)) }
/n1byClass.R
no_license
everlandio/EVINCI
R
false
false
931
r
distCat <- function(data) { as.matrix(daisy(data, metric = "gower", stand = TRUE,warnType = F)) } n1byClass <- function(dst, data) { classes<-levels(data$class) g = graph.adjacency(dst, weighted = TRUE) tree = as.matrix(as_adj(mst(as.undirected(g)))) tmp = which(tree != 0, arr.ind = TRUE) cons = data[tmp[,1],]$class != data[tmp[,2],]$class aux = length(unique(tmp[cons,1])) n1class<-vector() nam<-vector() for(i in classes){ tam<-sum(data[,"class"] == i) n1c<-vector() for(j in classes){ aux2<-sum(data[tmp[,1],]$class==i & data[tmp[,2],]$class==j) n1c<-c(n1c,aux2/tam) } n1class<-rbind(n1class,n1c) } rownames(n1class)<-classes colnames(n1class)<-classes # n1class2 = sapply(classes, FUN=function(c){ # aux2 = sum(data[unique(tmp[cons,1]),"class"] == c) # return(aux2/sum(data[,"class"] == c)) # }) return(list(aux/nrow(data),n1class)) }
source('hillConstants.R') directory <- 'data/' baseFile <- paste(c(directory,'baseData.csv'),collapse="") intFilePrefix <- directory intFileSuffix <- '.csv' #Intervention string details: # inc/red --> increase/reduce # EnLTBI/Imm/LTBItrmt/Trans --> Entering LTBI, Immigration, LTBI Treatment, # Transmission. # Magnitude (the numbers) --> Interpert these by placing a 'by' prior to the # number and adding a percent at the end. # & --> 'and'. This chains interventions. #Ex: # redImm25&incLTBItrmt100 --> reduce immigration by 25% and increase LTBI # treatment by 100% allInterventions <- c("redEnLTBI100","redEnLTBI75","redEnLTBI50","redTrans100", "redImm75","redImm50","incLTBItrmt100","incLTBItrmt300", "redImm75&incLTBItrmt100","redImm50&incLTBItrmt100", "redImm75&incLTBItrmt100","redImm50&incLTBItrmt300", "incLTBItrmt100&redEnLTBI100","incLTBItrmt100&redEnLTBI75", "incLTBItrmt100&redEnLTBI50","incLTBItrmt300&redEnLTBI100", "incLTBItrmt300&redEnLTBI75","incLTBItrmt300&redEnLTBI50") curInterventions <- c("redEnLTBI10","redEnLTBI25","redEnLTBI50","redEnLTBI100") interventionConfig <- function(interventionStr) { error <- F sigmaL <- sigmaLBase #define me f <- fBase trans <- transBase incLTBI <- incLTBIBase newCases <- 0 totPop <- 0 LTBIEn <- 0 interVector <- strsplit(interventionStr,'&')[[1]] for (intervention in interVector) { interventionType <- sub("\\d+","",intervention)#sub empty str for digits interventionMag <- as.numeric(sub("\\D+","",intervention))#sub empty str for non-digits if (interventionType == "redEnLTBI") { incLTBI <- incLTBI*(100-interventionMag)/100 LTBIEn <- 800 #LTBIEn + 400 + 600*(interventionMag/100) + x*100 } else if (interventionType == "redImm") { #No Costs! if (interventionMag == 75) { f <- f * 0.25 } else if (interventionMag == 50) { f <- f * 0.5 } else { error = T } } else if (interventionType == "incLTBItrmt") { if (interventionMag == 100) { totPop <- totPop + 0.05 #THESE ARE SO TOTALLY MADE UP. sigmaL <- sigmaL * 2 } else if (interventionMag == 300) { totPop <- totPop + 0.1 #THESE ARE SO TOTALLY MADE UP. sigmaL <- sigmaL * 4 } else { error = T } } else if (interventionType == "redTrans") { if (interventionMag == 100) { newCases <- newCases + 1000 trans <- 0 } else { error = T } } else { error = T } if (error) { print("I didn't recognize that intervention strategy. I'm sorry.") print("Here's the intervention you gave me: ") print(intervention) stop() } } costs <- c(newCases=newCases,totPop=totPop,LTBIEn=LTBIEn) params <- c(sigmaL=sigmaL,f=f,trans=trans,incLTBI=incLTBI) return(list(costs=costs,params=params)) }
/deliverables/costBenefitAnalysis/interventionConfig.R
no_license
mmcdermott/disease-modeling
R
false
false
3,159
r
source('hillConstants.R') directory <- 'data/' baseFile <- paste(c(directory,'baseData.csv'),collapse="") intFilePrefix <- directory intFileSuffix <- '.csv' #Intervention string details: # inc/red --> increase/reduce # EnLTBI/Imm/LTBItrmt/Trans --> Entering LTBI, Immigration, LTBI Treatment, # Transmission. # Magnitude (the numbers) --> Interpert these by placing a 'by' prior to the # number and adding a percent at the end. # & --> 'and'. This chains interventions. #Ex: # redImm25&incLTBItrmt100 --> reduce immigration by 25% and increase LTBI # treatment by 100% allInterventions <- c("redEnLTBI100","redEnLTBI75","redEnLTBI50","redTrans100", "redImm75","redImm50","incLTBItrmt100","incLTBItrmt300", "redImm75&incLTBItrmt100","redImm50&incLTBItrmt100", "redImm75&incLTBItrmt100","redImm50&incLTBItrmt300", "incLTBItrmt100&redEnLTBI100","incLTBItrmt100&redEnLTBI75", "incLTBItrmt100&redEnLTBI50","incLTBItrmt300&redEnLTBI100", "incLTBItrmt300&redEnLTBI75","incLTBItrmt300&redEnLTBI50") curInterventions <- c("redEnLTBI10","redEnLTBI25","redEnLTBI50","redEnLTBI100") interventionConfig <- function(interventionStr) { error <- F sigmaL <- sigmaLBase #define me f <- fBase trans <- transBase incLTBI <- incLTBIBase newCases <- 0 totPop <- 0 LTBIEn <- 0 interVector <- strsplit(interventionStr,'&')[[1]] for (intervention in interVector) { interventionType <- sub("\\d+","",intervention)#sub empty str for digits interventionMag <- as.numeric(sub("\\D+","",intervention))#sub empty str for non-digits if (interventionType == "redEnLTBI") { incLTBI <- incLTBI*(100-interventionMag)/100 LTBIEn <- 800 #LTBIEn + 400 + 600*(interventionMag/100) + x*100 } else if (interventionType == "redImm") { #No Costs! if (interventionMag == 75) { f <- f * 0.25 } else if (interventionMag == 50) { f <- f * 0.5 } else { error = T } } else if (interventionType == "incLTBItrmt") { if (interventionMag == 100) { totPop <- totPop + 0.05 #THESE ARE SO TOTALLY MADE UP. sigmaL <- sigmaL * 2 } else if (interventionMag == 300) { totPop <- totPop + 0.1 #THESE ARE SO TOTALLY MADE UP. sigmaL <- sigmaL * 4 } else { error = T } } else if (interventionType == "redTrans") { if (interventionMag == 100) { newCases <- newCases + 1000 trans <- 0 } else { error = T } } else { error = T } if (error) { print("I didn't recognize that intervention strategy. I'm sorry.") print("Here's the intervention you gave me: ") print(intervention) stop() } } costs <- c(newCases=newCases,totPop=totPop,LTBIEn=LTBIEn) params <- c(sigmaL=sigmaL,f=f,trans=trans,incLTBI=incLTBI) return(list(costs=costs,params=params)) }
library(lspls) ### Name: lspls-package ### Title: LS-PLS Models ### Aliases: lspls-package ### Keywords: package multivariate regression ### ** Examples ## FIXME
/data/genthat_extracted_code/lspls/examples/lspls-package.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
169
r
library(lspls) ### Name: lspls-package ### Title: LS-PLS Models ### Aliases: lspls-package ### Keywords: package multivariate regression ### ** Examples ## FIXME
X = read.csv("~/Desktop/local/surescript-code/fixed_drug_details.csv", sep="|", quote="", colClasses=c("character", "integer", "character", "integer", "character", "character", "factor", "character", "numeric", "Date", "character", "factor", "numeric", "character", "factor", "integer", "Date", "character")) # Takes around 90 sec on my laptop. # Quantity and Refills seem to be unused. # The only one that definitely is non-integer is QuantityValue. # Boring columns are DrugCodedProductCodeQualifier, RefillQualifier. require(ggplot2) qplot(X$DaySupply[X$DaySupply < 110]) qplot(X$FillNumber[X$FillNumber < 15]) qplot(X$FillDate) qplot(X$QuantityQualifier) qplot(X$QuantityValue[X$QuantityValue < 500]) # plotting all QuantityValue conflates mL and pills. qplot(X$RefillQuantity[X$RefillQuantity < 15]) qplot(X$DateWritten) val_vs_qual = ggplot(X[X$QuantityValue < 300,], aes(QuantityQualifier, QuantityValue)) val_vs_qual + geom_boxplot() subset = X[X$QuantityValue < 300,][1:10000,] val10k = ggplot(subset, aes(x = QuantityValue)) val10k + geom_density(aes(color=QuantityQualifier)) ggm = rbind(X[X$QuantityQualifier == "GM",], X[X$QuantityQualifier == "GR",], X[X$QuantityQualifier == "ML",]) valgm = ggplot(ggm[ggm$QuantityValue < 300,], aes(x = QuantityValue)) valgm + geom_density(aes(color=QuantityQualifier)) kruskal.test(QuantityValue ~ QuantityQualifier, data = X)  # P < 2.2e-16. Thus proving that milliliters are a different unit from pills. Well, duh.
/import.R
no_license
jmandel/bch-rx-challenge
R
false
false
1,481
r
X = read.csv("~/Desktop/local/surescript-code/fixed_drug_details.csv", sep="|", quote="", colClasses=c("character", "integer", "character", "integer", "character", "character", "factor", "character", "numeric", "Date", "character", "factor", "numeric", "character", "factor", "integer", "Date", "character")) # Takes around 90 sec on my laptop. # Quantity and Refills seem to be unused. # The only one that definitely is non-integer is QuantityValue. # Boring columns are DrugCodedProductCodeQualifier, RefillQualifier. require(ggplot2) qplot(X$DaySupply[X$DaySupply < 110]) qplot(X$FillNumber[X$FillNumber < 15]) qplot(X$FillDate) qplot(X$QuantityQualifier) qplot(X$QuantityValue[X$QuantityValue < 500]) # plotting all QuantityValue conflates mL and pills. qplot(X$RefillQuantity[X$RefillQuantity < 15]) qplot(X$DateWritten) val_vs_qual = ggplot(X[X$QuantityValue < 300,], aes(QuantityQualifier, QuantityValue)) val_vs_qual + geom_boxplot() subset = X[X$QuantityValue < 300,][1:10000,] val10k = ggplot(subset, aes(x = QuantityValue)) val10k + geom_density(aes(color=QuantityQualifier)) ggm = rbind(X[X$QuantityQualifier == "GM",], X[X$QuantityQualifier == "GR",], X[X$QuantityQualifier == "ML",]) valgm = ggplot(ggm[ggm$QuantityValue < 300,], aes(x = QuantityValue)) valgm + geom_density(aes(color=QuantityQualifier)) kruskal.test(QuantityValue ~ QuantityQualifier, data = X)  # P < 2.2e-16. Thus proving that milliliters are a different unit from pills. Well, duh.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/user_interface.R \name{assert_valid_output_dir} \alias{assert_valid_output_dir} \title{Check that a directory is a valid FPEMglobal output directory} \usage{ assert_valid_output_dir( output_folder_path, post_processed = TRUE, countrytrajectories = post_processed, made_results = post_processed, adjusted_medians = post_processed, age_ratios = NULL, verbose = FALSE ) } \arguments{ \item{output_folder_path}{Path to directory to validate.} \item{post_processed}{Logical; has \code{\link{post_process_mcmc}} been run on the directory?} \item{countrytrajectories}{Logical; check for \file{countrytrajectories} or \file{aggregatetrajectories} directories?} \item{made_results}{Logical; has \code{\link{make_results}} been run on the directory?} \item{adjusted_medians}{Logical; check for adjusted median results?} \item{age_ratios}{Logical; check for age ratio results? Defaults to \code{FALSE} if \code{output_folder_path} points to a \dQuote{15-49} run, otherwise the value of \code{post_processed}.} } \value{ If all checks pass, \code{output_folder_path} is returned invisibly, otherwise an error is thrown. } \description{ Checks the content of \code{output_dir} to make sure certain directories and files are present. If some are missing, an error is returned, otherwise \code{output_folder_path} is returned invisibly. } \details{ The default for 'post_processed' is 'TRUE' by because an un-processed directory doesn't even have 'mcmc.array.rda', which means it's unlikely to be used. } \author{ Mark Wheldon }
/man/assert_valid_output_dir.Rd
permissive
FPcounts/FPEMglobal
R
false
true
1,611
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/user_interface.R \name{assert_valid_output_dir} \alias{assert_valid_output_dir} \title{Check that a directory is a valid FPEMglobal output directory} \usage{ assert_valid_output_dir( output_folder_path, post_processed = TRUE, countrytrajectories = post_processed, made_results = post_processed, adjusted_medians = post_processed, age_ratios = NULL, verbose = FALSE ) } \arguments{ \item{output_folder_path}{Path to directory to validate.} \item{post_processed}{Logical; has \code{\link{post_process_mcmc}} been run on the directory?} \item{countrytrajectories}{Logical; check for \file{countrytrajectories} or \file{aggregatetrajectories} directories?} \item{made_results}{Logical; has \code{\link{make_results}} been run on the directory?} \item{adjusted_medians}{Logical; check for adjusted median results?} \item{age_ratios}{Logical; check for age ratio results? Defaults to \code{FALSE} if \code{output_folder_path} points to a \dQuote{15-49} run, otherwise the value of \code{post_processed}.} } \value{ If all checks pass, \code{output_folder_path} is returned invisibly, otherwise an error is thrown. } \description{ Checks the content of \code{output_dir} to make sure certain directories and files are present. If some are missing, an error is returned, otherwise \code{output_folder_path} is returned invisibly. } \details{ The default for 'post_processed' is 'TRUE' by because an un-processed directory doesn't even have 'mcmc.array.rda', which means it's unlikely to be used. } \author{ Mark Wheldon }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/C3FURV.R \name{subdaily_out_of_range} \alias{subdaily_out_of_range} \title{Sub-daily big errors test} \usage{ subdaily_out_of_range( subdailydata, meta = NULL, outpath, time_offset = 0, ta_day_upper = 45, ta_day_lower = -35, ta_night_upper = 40, ta_night_lower = -40, rr_upper = 100, rr_lower = 0, w_upper = 50, w_lower = 0, dd_upper = 360, dd_lower = 0, sc_upper = 100, sc_lower = 0, sd_upper = 200, sd_lower = 0, fs_upper = 100, fs_lower = 0 ) } \arguments{ \item{subdailydata}{A character string giving the path of the input file, or a 7-column matrix with following columns: variable code, year, month, day, hour, minute, value.} \item{meta}{A character vector with 6 elements: station ID, latitude, longitude, altitude, variable code, units. If \code{subdailydata} is a path, \code{meta} is ignored.} \item{outpath}{Character string giving the path for the QC results.} \item{time_offset}{Offset in hours to add to the time to obtain local time. By default, time_offset = 0.} \item{ta_day_upper}{is the ta maximum day threshold in degrees Celsius. By default, ta_day_upper = 45 C.} \item{ta_day_lower}{is the ta minimum day threshold in degrees Celsius. By default, ta_day_lower = -35 C.} \item{ta_night_upper}{is the ta maximum night threshold in degrees Celsius. By default, ta_night_upper = 40 C.} \item{ta_night_lower}{is the ta minimum night threshold in degrees Celsius. By default, ta_night_lower = -40 C.} \item{rr_upper}{is the rr maximum threshold in millimetres. By default, rr_upper = 100 mm.} \item{rr_lower}{is the rr minimum threshold in millimetres. By default, rr_lower = 0 mm.} \item{w_upper}{is the w maximum threshold in metres per second. By default, w_upper = 50 m/s.} \item{w_lower}{is the w mimumum threshold in metres per second. By default, w_lower = 0 m/s.} \item{dd_upper}{is the dd maximum threshold in degrees North. By default, dd_upper = 360.} \item{dd_lower}{is the dd minimum threshold in degrees North. By default, dd_lower = 0.} \item{sc_upper}{is the sc maximum threshold in percent. By default, sc_upper = 100\%.} \item{sc_lower}{is the sc minimum threshold in percent. By default, sc_lower = 0\%.} \item{sd_upper}{is the sd maximum threshold in centimetres. By default, sd_upper = 200 cm.} \item{sd_lower}{is the sd minimum threshold in centimetres. By default, sd_lower = 0 cm.} \item{fs_upper}{is the fs maximum threshold in centimetres. By default, fs_upper = 100 cm.} \item{fs_lower}{is the fs minimum threshold in centimetres. By default, fs_lower = 0 cm.} } \description{ Find the subdaily temperature (ta), wind speed (w), wind direction (dd), snow cover (sc), snow depth (sd) and fresh snow (fs) values that exceed thresholds selected by the user. The output is a list with the days in which ta, rr, dd, w, sc, sd or fs exceeds some threshold. } \details{ The input file must follow the Copernicus Station Exchange Format (SEF). } \examples{ subdaily_out_of_range(Rosario$ta, Meta$ta[which(Meta$ta$id=="Rosario"),], outpath = tempdir(), time_offset = -4.28, ta_day_upper = 35) } \author{ Alba Gilabert, Yuri Brugnara }
/man/subdaily_out_of_range.Rd
no_license
cran/dataresqc
R
false
true
3,256
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/C3FURV.R \name{subdaily_out_of_range} \alias{subdaily_out_of_range} \title{Sub-daily big errors test} \usage{ subdaily_out_of_range( subdailydata, meta = NULL, outpath, time_offset = 0, ta_day_upper = 45, ta_day_lower = -35, ta_night_upper = 40, ta_night_lower = -40, rr_upper = 100, rr_lower = 0, w_upper = 50, w_lower = 0, dd_upper = 360, dd_lower = 0, sc_upper = 100, sc_lower = 0, sd_upper = 200, sd_lower = 0, fs_upper = 100, fs_lower = 0 ) } \arguments{ \item{subdailydata}{A character string giving the path of the input file, or a 7-column matrix with following columns: variable code, year, month, day, hour, minute, value.} \item{meta}{A character vector with 6 elements: station ID, latitude, longitude, altitude, variable code, units. If \code{subdailydata} is a path, \code{meta} is ignored.} \item{outpath}{Character string giving the path for the QC results.} \item{time_offset}{Offset in hours to add to the time to obtain local time. By default, time_offset = 0.} \item{ta_day_upper}{is the ta maximum day threshold in degrees Celsius. By default, ta_day_upper = 45 C.} \item{ta_day_lower}{is the ta minimum day threshold in degrees Celsius. By default, ta_day_lower = -35 C.} \item{ta_night_upper}{is the ta maximum night threshold in degrees Celsius. By default, ta_night_upper = 40 C.} \item{ta_night_lower}{is the ta minimum night threshold in degrees Celsius. By default, ta_night_lower = -40 C.} \item{rr_upper}{is the rr maximum threshold in millimetres. By default, rr_upper = 100 mm.} \item{rr_lower}{is the rr minimum threshold in millimetres. By default, rr_lower = 0 mm.} \item{w_upper}{is the w maximum threshold in metres per second. By default, w_upper = 50 m/s.} \item{w_lower}{is the w mimumum threshold in metres per second. By default, w_lower = 0 m/s.} \item{dd_upper}{is the dd maximum threshold in degrees North. By default, dd_upper = 360.} \item{dd_lower}{is the dd minimum threshold in degrees North. By default, dd_lower = 0.} \item{sc_upper}{is the sc maximum threshold in percent. By default, sc_upper = 100\%.} \item{sc_lower}{is the sc minimum threshold in percent. By default, sc_lower = 0\%.} \item{sd_upper}{is the sd maximum threshold in centimetres. By default, sd_upper = 200 cm.} \item{sd_lower}{is the sd minimum threshold in centimetres. By default, sd_lower = 0 cm.} \item{fs_upper}{is the fs maximum threshold in centimetres. By default, fs_upper = 100 cm.} \item{fs_lower}{is the fs minimum threshold in centimetres. By default, fs_lower = 0 cm.} } \description{ Find the subdaily temperature (ta), wind speed (w), wind direction (dd), snow cover (sc), snow depth (sd) and fresh snow (fs) values that exceed thresholds selected by the user. The output is a list with the days in which ta, rr, dd, w, sc, sd or fs exceeds some threshold. } \details{ The input file must follow the Copernicus Station Exchange Format (SEF). } \examples{ subdaily_out_of_range(Rosario$ta, Meta$ta[which(Meta$ta$id=="Rosario"),], outpath = tempdir(), time_offset = -4.28, ta_day_upper = 35) } \author{ Alba Gilabert, Yuri Brugnara }
class(c(TRUE,FALSE)) class(sqrt(1:10)) class(3+1i) class(1) class(1L) class(0.5:4.5) class(1:5) class(c("she","he","the")) (gender <- factor(c("male","female","male","female"))) levels(gender) nlevels(gender) as.integer(gender) gender_char <- sample(c("female","male"),1000,replace = TRUE) gender_fac <- as.factor(gender_char) object.size(gender_char) object.size(gender_fac) as.character(gender) if(!is(x,"some_class")) is.character("red lorry,yellow lorry") is.logical(FALSE) is.list(list(a=1,b=2)) is(gender,"factor") is.factor(gender) is(pattern="^is",baseenv()) is.numeric(1) is.numeric(1L) is.integer(1) is.integer(1L) is.double(1) is.double(1L) x <- "123.456" as(x,"numeric") as.numeric(x) y<-c(2,12,343,34997) as(y, "data.frame") as.data.frame(y) x <- "123.456" class(x) <- "numeric" is.numeric(x) y <- c(1,2,3,4,5,6,7) for(i in y)i for(i in y)print(i) num <- runif(30) summary(num) letters letters[1:5] LETTERS fac <- factor(sample(letters[1:5],30,replace = TRUE)) summary(fac) bool <- sample(c(TRUE,FALSE,NA),30,replace = TRUE) summary(bool) attributes(fac) view(dfr) new_dfr <- edit(dfr) peach <- 1 plum <- "fruity" pear <- TRUE ls() ls(pattern="ea") ls() ls.str() browseEnv() ls() rm(FA) ls() rm(list=ls) ls()
/罗斐练习2.R
no_license
ROFFI-06/R_01
R
false
false
1,291
r
class(c(TRUE,FALSE)) class(sqrt(1:10)) class(3+1i) class(1) class(1L) class(0.5:4.5) class(1:5) class(c("she","he","the")) (gender <- factor(c("male","female","male","female"))) levels(gender) nlevels(gender) as.integer(gender) gender_char <- sample(c("female","male"),1000,replace = TRUE) gender_fac <- as.factor(gender_char) object.size(gender_char) object.size(gender_fac) as.character(gender) if(!is(x,"some_class")) is.character("red lorry,yellow lorry") is.logical(FALSE) is.list(list(a=1,b=2)) is(gender,"factor") is.factor(gender) is(pattern="^is",baseenv()) is.numeric(1) is.numeric(1L) is.integer(1) is.integer(1L) is.double(1) is.double(1L) x <- "123.456" as(x,"numeric") as.numeric(x) y<-c(2,12,343,34997) as(y, "data.frame") as.data.frame(y) x <- "123.456" class(x) <- "numeric" is.numeric(x) y <- c(1,2,3,4,5,6,7) for(i in y)i for(i in y)print(i) num <- runif(30) summary(num) letters letters[1:5] LETTERS fac <- factor(sample(letters[1:5],30,replace = TRUE)) summary(fac) bool <- sample(c(TRUE,FALSE,NA),30,replace = TRUE) summary(bool) attributes(fac) view(dfr) new_dfr <- edit(dfr) peach <- 1 plum <- "fruity" pear <- TRUE ls() ls(pattern="ea") ls() ls.str() browseEnv() ls() rm(FA) ls() rm(list=ls) ls()
# fit multivariate model to experimental and perturbed time series source('~/Dropbox/1current/multidimensionalChangeMS/multiComponentChange/r_scripts/00_init_dirs_load_packages.R') # these data were compiled and wrangled by Dr Alban Sagious # code to download and wrangle data available at: https://github.com/sablowes/MulticomponentBioChange/temporal_change_data_preparation dat <- read_csv(paste0(path2wd, 'multiComponentChange/data/long_table.csv')) good_dat <- dat %>% filter(minN > 5) %>% unite(study_trt, c(dataset_id, treatment), remove = F) %>% mutate(cYear = year - mean(year)) # remove treatments with only a single observation (i.e., not time series) remove <- good_dat %>% group_by(study_trt) %>% summarise(nyrs = n_distinct(cYear)) %>% filter(nyrs==1) good_dat <- good_dat %>% filter(!study_trt %in% remove$study_trt) %>% # combine site and block to a single covariate mutate(site = str_replace(site, ' ', '_')) %>% unite(sb, c(site, block)) # model with non-varying intercept and slope S_model_s2 <- bf(S ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) N_model_s2 <- bf(N ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) Sn_model_s2 <- bf(Sn ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) S_PIE_model_s2 <- bf(ENSPIE ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) btx_multi4_fit_global <- brm(S_model_s2 + N_model_s2 + S_PIE_model_s2 + Sn_model_s2 + set_rescor(FALSE), data= good_dat, prior = c(prior('normal(0,1)', class = 'sigma', resp = 'S'), prior('normal(0,1)', class = 'sigma', resp = 'N'), prior('normal(0,1)', class = 'sigma', resp = 'ENSPIE'), prior('normal(0,1)', class = 'sigma', resp = 'Sn'), # sd of varying intercepts and slopes prior('normal(0,1)', class = 'sd', resp = 'S'), prior('normal(0,1)', class = 'sd', resp = 'N'), prior('normal(0,1)', class = 'sd', resp = 'ENSPIE'), prior('normal(0,1)', class = 'sd', resp = 'Sn'), # priors for non-varying slopes prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'S'), prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'N'), prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'ENSPIE'), prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'Sn'), # non-varying intercepts prior('normal(0,1)', class = 'Intercept', resp = 'S'), prior('normal(0,1)', class = 'Intercept', resp = 'N'), prior('normal(0,1)', class = 'Intercept', resp = 'ENSPIE'), prior('normal(0,1)', class = 'Intercept', resp = 'Sn')), # control = list(adapt_delta=0.99), # prior predictive # sample_prior = 'only', # init_r = 0.5, iter = 3000, cores = 4, chains = 4) save(btx_multi4_fit_global, file = '~/Dropbox/1current/multidimensionalChangeMS/multiComponentChange/results/btx_multi4_fit_global.Rdata')
/r_scripts/02_btx_multi4_fit.R
permissive
sablowes/MulticomponentBioChange
R
false
false
3,752
r
# fit multivariate model to experimental and perturbed time series source('~/Dropbox/1current/multidimensionalChangeMS/multiComponentChange/r_scripts/00_init_dirs_load_packages.R') # these data were compiled and wrangled by Dr Alban Sagious # code to download and wrangle data available at: https://github.com/sablowes/MulticomponentBioChange/temporal_change_data_preparation dat <- read_csv(paste0(path2wd, 'multiComponentChange/data/long_table.csv')) good_dat <- dat %>% filter(minN > 5) %>% unite(study_trt, c(dataset_id, treatment), remove = F) %>% mutate(cYear = year - mean(year)) # remove treatments with only a single observation (i.e., not time series) remove <- good_dat %>% group_by(study_trt) %>% summarise(nyrs = n_distinct(cYear)) %>% filter(nyrs==1) good_dat <- good_dat %>% filter(!study_trt %in% remove$study_trt) %>% # combine site and block to a single covariate mutate(site = str_replace(site, ' ', '_')) %>% unite(sb, c(site, block)) # model with non-varying intercept and slope S_model_s2 <- bf(S ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) N_model_s2 <- bf(N ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) Sn_model_s2 <- bf(Sn ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) S_PIE_model_s2 <- bf(ENSPIE ~ cYear + (1 | dataset_id) + (cYear | p | study_trt) + (1| sb), family = lognormal()) btx_multi4_fit_global <- brm(S_model_s2 + N_model_s2 + S_PIE_model_s2 + Sn_model_s2 + set_rescor(FALSE), data= good_dat, prior = c(prior('normal(0,1)', class = 'sigma', resp = 'S'), prior('normal(0,1)', class = 'sigma', resp = 'N'), prior('normal(0,1)', class = 'sigma', resp = 'ENSPIE'), prior('normal(0,1)', class = 'sigma', resp = 'Sn'), # sd of varying intercepts and slopes prior('normal(0,1)', class = 'sd', resp = 'S'), prior('normal(0,1)', class = 'sd', resp = 'N'), prior('normal(0,1)', class = 'sd', resp = 'ENSPIE'), prior('normal(0,1)', class = 'sd', resp = 'Sn'), # priors for non-varying slopes prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'S'), prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'N'), prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'ENSPIE'), prior('normal(0,0.2)', class = 'b', coef = 'cYear', resp = 'Sn'), # non-varying intercepts prior('normal(0,1)', class = 'Intercept', resp = 'S'), prior('normal(0,1)', class = 'Intercept', resp = 'N'), prior('normal(0,1)', class = 'Intercept', resp = 'ENSPIE'), prior('normal(0,1)', class = 'Intercept', resp = 'Sn')), # control = list(adapt_delta=0.99), # prior predictive # sample_prior = 'only', # init_r = 0.5, iter = 3000, cores = 4, chains = 4) save(btx_multi4_fit_global, file = '~/Dropbox/1current/multidimensionalChangeMS/multiComponentChange/results/btx_multi4_fit_global.Rdata')
generate.year_summary.O3 <- function(all_years = FALSE, single_year = NULL, year_range = NULL, file_path = NULL, station, quarter_bounds = c("04-01", "09-30")) { # Example: # year_summary_O3(single_year = 2001, file_path = "~/Documents/R (Working)") measure <- "O3" file_path <- ifelse(is.null(file_path), getwd(), file_path) # Add require statement require(lubridate) # Generate the appropriate file list depending on the options chosen # # Generate file list for selected pollutant for all years if (all_years == TRUE & is.null(single_year) & is.null(year_range)) file_list <- list.files(path = file_path, pattern = "^[0-9][0-9][0-9][0-9][0-9A-Z]*O3\\.csv") # If a year range of years is provided, capture start and end year boundaries if (all_years == FALSE & is.null(single_year) & !is.null(year_range)) { start_year_range <- substr(as.character(year_range), 1, 4) end_year_range <- substr(as.character(year_range), 6, 9) for (i in start_year_range:end_year_range) { nam <- paste("file_list", i, sep = ".") assign(nam, list.files(path = file_path, pattern = paste("^",i,"[0-9A-Z]*O3\\.csv", sep = ''))) } # Combine vector lists list <- vector("list", length(ls(pattern = "file_list."))) for (j in 1:length(ls(pattern = "file_list."))) { list[j] <- list(get(ls(pattern = "file_list.")[j])) } file_list <- unlist(list) # Remove temp objects rm(list) rm(i) rm(j) rm(nam) rm(list = ls(pattern = "file_list.")) } # If 'single_year' specified, filter the list to only include objects of the specified year if (all_years == FALSE & !is.null(single_year) & is.null(year_range)) { assign("file_list", list.files(path = file_path, pattern = paste("^",single_year,"[0-9A-Z]*O3\\.csv", sep = ''))) } # Loop through reading in CSV files; convert time column back to POSIXct time objects for (i in 1:length(file_list)){ df <- read.csv(file = paste(file_path, "/", file_list[i], sep = ''), header = TRUE, stringsAsFactors = FALSE) if (exists("station")){ df <- subset(df, df$STATION == station) } df$time <- as.POSIXct(df$time) # get number of stations no_stations <- length(unique(df$STATION)) # inspect dataset to verify the year year <- round(mean(year(df$time))) # Determine number of days in year days_in_year <- yday(as.POSIXct(paste(year, "-12-31", sep = ''), origin = "1970-01-01", tz = "GMT")) # Determine number of hours in year hours_in_year <- days_in_year * 24 # get vector list of stations for analysis station_list <- mat.or.vec(nr = no_stations, nc = 1) station_list <- unique(df$STATION) # Initialize the output file for writing if (i == 1) { cat("Year,Pollutant,NapsID,", "Annual_O3_Average_Daily_8_hr_Max,", "Annual_O3_4th_Highest,", "Q2_Q3.Complete_%,", "Is_Annual_O3_4th_Highest_Valid,Annual_O3_4th_Highest_Exceed,", "Annual_O3_4th_Highest_Flag", file = paste(measure,"_data_summary.csv", sep = ''), sep = '') cat("", file = paste(measure,"_data_summary.csv", sep = ''), sep = "\n", append = TRUE) } # The data required to calculate the annual 4th highest daily 8hr-O3-max value for a #station includes: # i. The daily maximum 8-hour average ozone concentration for each day of the year # ii. The annual 4th highest daily 8hr-O3-max for a given year # # Initialize matrix with (1) year, (2) day of year, (3) the date, # (4) number of dataset rows in a day, (5) number of NA values in a day, # (6) number of valid observations in a day, and (7) maximum of ozone daily 8-hour # rolling averages O3_max_daily_8hr_rolling_averages <- as.data.frame(mat.or.vec(nr = days_in_year, nc = 7)) colnames(O3_max_daily_8hr_rolling_averages) <- c("year", "day_of_year", "date", "rows_in_day", "NA_in_day", "valid_obs_in_day", "O3_max_daily_8hr_rolling_average") # Loop through all stations in each file for (j in 1:length(station_list)){ df.station <- subset(df, df$STATION == station_list[j]) completeness_year <- round(((nrow(df.station) - sum(is.na(df.station[,3])))/ ifelse(leap_year(year), 8784, 8760)) *100, digits = 2) # Initialize data frame for ozone daily 8-hr rolling averages O3_8hr_rolling_averages <- as.data.frame(mat.or.vec(nr = hours_in_year, nc = 8)) colnames(O3_8hr_rolling_averages) <- c("year", "day_of_year", "hour_of_day", "date", "rows_in_8hr_period", "NA_in_8hr_period", "valid_obs_in_8hr_period", "O3_8hr_rolling_average") class(O3_8hr_rolling_averages$date) = c('POSIXt','POSIXct') for (m in 1:7) { # Get year of 8-hour averaging period O3_8hr_rolling_averages[m, 1] <- year # Get day of year for 8-hour averaging period O3_8hr_rolling_averages[m, 2] <- yday(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get hour of day for 8-hour averaging period O3_8hr_rolling_averages[m, 3] <- hour(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get date for 8-hour averaging period O3_8hr_rolling_averages[m, 4] <- as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) # Set to NA: (1) count of rows in dataset for a given 8-hour averaging period, # (2) count of NA values in dataset for a given 8-hour averaging period # (3) count of valid measurements for a given 8-hour averaging period # (4) average ozone concentration for a given 8-hour averaging period O3_8hr_rolling_averages[m, 5] <- NA O3_8hr_rolling_averages[m, 6] <- NA O3_8hr_rolling_averages[m, 7] <- NA O3_8hr_rolling_averages[m, 8] <- NA } for (m in 8:hours_in_year) { # Get year of 8-hour averaging period O3_8hr_rolling_averages[m, 1] <- year(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get day of year for 8-hour averaging period O3_8hr_rolling_averages[m, 2] <- yday(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get hour of day for 8-hour averaging period O3_8hr_rolling_averages[m, 3] <- hour(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get date for 8-hour averaging period O3_8hr_rolling_averages[m, 4] <- as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) # Count the number of rows in dataset for a given 8-hour averaging period O3_8hr_rolling_averages[m, 5] <- nrow(subset(df.station, time <= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) & time >= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 8) * 3600))) # Count the number of NA values in dataset for a given 8-hour averaging period O3_8hr_rolling_averages[m, 6] <- sum(is.na(subset(df.station, time <= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) & time >= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 8) * 3600) )[,3])) # Calculate the number of valid measurements for a given 8-hour averaging period O3_8hr_rolling_averages[m, 7] <- O3_8hr_rolling_averages[m, 5] - O3_8hr_rolling_averages[m, 6] # Calculate the average ozone concentration for a given 8-hour averaging period O3_8hr_rolling_averages[m, 8] <- ifelse(O3_8hr_rolling_averages[m, 7] >= 6, round(mean(subset(df.station, time <= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) & time >= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 8) * 3600) )[,3], na.rm = TRUE), digits = 1), NA) } # Initialize matrix with (1) year, (2) day of year, (3) the date, # (4) number of dataset rows in a day, (5) number of NA values in a day, # (6) number of valid observations in a day, and (7) maximum of ozone daily 8-hour # rolling averages O3_max_daily_8hr_rolling_averages <- as.data.frame(mat.or.vec(nr = days_in_year, nc = 7)) colnames(O3_max_daily_8hr_rolling_averages) <- c("year", "day_of_year", "date", "rows_in_day", "NA_in_day", "valid_obs_in_day", "O3_max_daily_8hr_rolling_average") class(O3_max_daily_8hr_rolling_averages$date) = c('Date') # Loop through all days in year and put calculated values in initialized data frame for (k in 1:days_in_year) { # Insert the year in the 'year' column O3_max_daily_8hr_rolling_averages[k,1] <- year # Insert the day of year in the 'day_of_year' column O3_max_daily_8hr_rolling_averages[k,2] <- k # Insert the date in the 'date' column O3_max_daily_8hr_rolling_averages[k,3] <- as.Date(subset(O3_8hr_rolling_averages, day_of_year == k)[,4][1]) # Count the number of rows in dataset for a day O3_max_daily_8hr_rolling_averages[k,4] <- nrow(subset(O3_8hr_rolling_averages, day_of_year == k)) # Count the number of NA values for the O3 8hr rolling average in the dataset # for a given day O3_max_daily_8hr_rolling_averages[k,5] <- sum(is.na(subset(O3_8hr_rolling_averages, day_of_year == k)[,8])) # Calculate the number of valid measurements for a given day O3_max_daily_8hr_rolling_averages[k,6] <- O3_max_daily_8hr_rolling_averages[k,4] - O3_max_daily_8hr_rolling_averages[k,5] # Calculate the maximum of 8-hour daily average, put into column 7 # ('O3_max_daily_8hr_rolling_average') O3_max_daily_8hr_rolling_averages[k,7] <- ifelse(O3_max_daily_8hr_rolling_averages[k,6] >= 18, round(mean(subset(O3_8hr_rolling_averages, day_of_year == k)[,8], na.rm = TRUE), digits = 1), NA) # Close inner loop for station days } # Convert any NaN values in the data frame to NA for consistency O3_max_daily_8hr_rolling_averages <- as.data.frame(rapply(O3_max_daily_8hr_rolling_averages, f = function(x) ifelse(is.nan(x), NA, x), how = "replace")) # Calculate the annual average of the highest daily 8hr-O3-max for the year average_annual_of_daily_8hr_O3_max <- round(mean(O3_8hr_rolling_averages$O3_8hr_rolling_average, na.rm = TRUE), digits = 1) # Calculate the 4th highest daily 8hr-O3-max for the year by conducting a decreasing # sort of the maximum of daily rolling 8-hr averages and then accessing the 4th item in # that vector list O3_8hr_rolling_averages_sort_descending <- sort(O3_8hr_rolling_averages$O3_8hr_rolling_average, decreasing = TRUE, na.last = NA) annual_4th_highest_daily_8hr_O3_max <- O3_8hr_rolling_averages_sort_descending[4] # Determine number of valid daily 8hr-O3-max in the combined 2nd and 3rd quarters # (April 1 to September 30) number_of_valid_O3_daily_averages <- sum(!is.na(subset( O3_max_daily_8hr_rolling_averages, O3_max_daily_8hr_rolling_averages$day_of_year >= 91 & O3_max_daily_8hr_rolling_averages$day_of_year <= 274)[,7])) # Determine the percentage of days with valid daily 8hr-O3-max values in the # April 1 to September 30 period percent_valid_O3_daily_averages <- (number_of_valid_O3_daily_averages / 184) * 100 # Set the data completeness boolean to TRUE if the percentage of valid days in the # specified period is greater than or equal to 75% data_complete_year <- ifelse(percent_valid_O3_daily_averages >= 75, TRUE, FALSE) does_annual_4th_highest_daily_8hr_O3_max_exceed <- ifelse(annual_4th_highest_daily_8hr_O3_max > 30, TRUE, FALSE) annual_4th_highest_daily_8hr_O3_max_flag <- ifelse(does_annual_4th_highest_daily_8hr_O3_max_exceed == TRUE & data_complete_year == FALSE, "based on incomplete data", "") # Place values in row of output CSV file #cat(year,",",measure,",",station_list[j],",", cat(year,",",measure,",",station_list[j],",", average_annual_of_daily_8hr_O3_max,",", annual_4th_highest_daily_8hr_O3_max,",", percent_valid_O3_daily_averages,",", data_complete_year,",", does_annual_4th_highest_daily_8hr_O3_max_exceed,",", annual_4th_highest_daily_8hr_O3_max_flag, file = paste(measure,"_data_summary.csv", sep = ''), sep = "", append = TRUE) # Add linebreak to CSV file after writing line cat("", file = paste(measure,"_data_summary.csv", sep = ''), sep = "\n", append = TRUE) # Close inner for loop, looping through stations in a CSV file } # Close outer for loop, looping through reads of CSV files } # Close function }
/R/year_summary_O3.R
permissive
arturochian/stationaRy
R
false
false
15,026
r
generate.year_summary.O3 <- function(all_years = FALSE, single_year = NULL, year_range = NULL, file_path = NULL, station, quarter_bounds = c("04-01", "09-30")) { # Example: # year_summary_O3(single_year = 2001, file_path = "~/Documents/R (Working)") measure <- "O3" file_path <- ifelse(is.null(file_path), getwd(), file_path) # Add require statement require(lubridate) # Generate the appropriate file list depending on the options chosen # # Generate file list for selected pollutant for all years if (all_years == TRUE & is.null(single_year) & is.null(year_range)) file_list <- list.files(path = file_path, pattern = "^[0-9][0-9][0-9][0-9][0-9A-Z]*O3\\.csv") # If a year range of years is provided, capture start and end year boundaries if (all_years == FALSE & is.null(single_year) & !is.null(year_range)) { start_year_range <- substr(as.character(year_range), 1, 4) end_year_range <- substr(as.character(year_range), 6, 9) for (i in start_year_range:end_year_range) { nam <- paste("file_list", i, sep = ".") assign(nam, list.files(path = file_path, pattern = paste("^",i,"[0-9A-Z]*O3\\.csv", sep = ''))) } # Combine vector lists list <- vector("list", length(ls(pattern = "file_list."))) for (j in 1:length(ls(pattern = "file_list."))) { list[j] <- list(get(ls(pattern = "file_list.")[j])) } file_list <- unlist(list) # Remove temp objects rm(list) rm(i) rm(j) rm(nam) rm(list = ls(pattern = "file_list.")) } # If 'single_year' specified, filter the list to only include objects of the specified year if (all_years == FALSE & !is.null(single_year) & is.null(year_range)) { assign("file_list", list.files(path = file_path, pattern = paste("^",single_year,"[0-9A-Z]*O3\\.csv", sep = ''))) } # Loop through reading in CSV files; convert time column back to POSIXct time objects for (i in 1:length(file_list)){ df <- read.csv(file = paste(file_path, "/", file_list[i], sep = ''), header = TRUE, stringsAsFactors = FALSE) if (exists("station")){ df <- subset(df, df$STATION == station) } df$time <- as.POSIXct(df$time) # get number of stations no_stations <- length(unique(df$STATION)) # inspect dataset to verify the year year <- round(mean(year(df$time))) # Determine number of days in year days_in_year <- yday(as.POSIXct(paste(year, "-12-31", sep = ''), origin = "1970-01-01", tz = "GMT")) # Determine number of hours in year hours_in_year <- days_in_year * 24 # get vector list of stations for analysis station_list <- mat.or.vec(nr = no_stations, nc = 1) station_list <- unique(df$STATION) # Initialize the output file for writing if (i == 1) { cat("Year,Pollutant,NapsID,", "Annual_O3_Average_Daily_8_hr_Max,", "Annual_O3_4th_Highest,", "Q2_Q3.Complete_%,", "Is_Annual_O3_4th_Highest_Valid,Annual_O3_4th_Highest_Exceed,", "Annual_O3_4th_Highest_Flag", file = paste(measure,"_data_summary.csv", sep = ''), sep = '') cat("", file = paste(measure,"_data_summary.csv", sep = ''), sep = "\n", append = TRUE) } # The data required to calculate the annual 4th highest daily 8hr-O3-max value for a #station includes: # i. The daily maximum 8-hour average ozone concentration for each day of the year # ii. The annual 4th highest daily 8hr-O3-max for a given year # # Initialize matrix with (1) year, (2) day of year, (3) the date, # (4) number of dataset rows in a day, (5) number of NA values in a day, # (6) number of valid observations in a day, and (7) maximum of ozone daily 8-hour # rolling averages O3_max_daily_8hr_rolling_averages <- as.data.frame(mat.or.vec(nr = days_in_year, nc = 7)) colnames(O3_max_daily_8hr_rolling_averages) <- c("year", "day_of_year", "date", "rows_in_day", "NA_in_day", "valid_obs_in_day", "O3_max_daily_8hr_rolling_average") # Loop through all stations in each file for (j in 1:length(station_list)){ df.station <- subset(df, df$STATION == station_list[j]) completeness_year <- round(((nrow(df.station) - sum(is.na(df.station[,3])))/ ifelse(leap_year(year), 8784, 8760)) *100, digits = 2) # Initialize data frame for ozone daily 8-hr rolling averages O3_8hr_rolling_averages <- as.data.frame(mat.or.vec(nr = hours_in_year, nc = 8)) colnames(O3_8hr_rolling_averages) <- c("year", "day_of_year", "hour_of_day", "date", "rows_in_8hr_period", "NA_in_8hr_period", "valid_obs_in_8hr_period", "O3_8hr_rolling_average") class(O3_8hr_rolling_averages$date) = c('POSIXt','POSIXct') for (m in 1:7) { # Get year of 8-hour averaging period O3_8hr_rolling_averages[m, 1] <- year # Get day of year for 8-hour averaging period O3_8hr_rolling_averages[m, 2] <- yday(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get hour of day for 8-hour averaging period O3_8hr_rolling_averages[m, 3] <- hour(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get date for 8-hour averaging period O3_8hr_rolling_averages[m, 4] <- as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) # Set to NA: (1) count of rows in dataset for a given 8-hour averaging period, # (2) count of NA values in dataset for a given 8-hour averaging period # (3) count of valid measurements for a given 8-hour averaging period # (4) average ozone concentration for a given 8-hour averaging period O3_8hr_rolling_averages[m, 5] <- NA O3_8hr_rolling_averages[m, 6] <- NA O3_8hr_rolling_averages[m, 7] <- NA O3_8hr_rolling_averages[m, 8] <- NA } for (m in 8:hours_in_year) { # Get year of 8-hour averaging period O3_8hr_rolling_averages[m, 1] <- year(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get day of year for 8-hour averaging period O3_8hr_rolling_averages[m, 2] <- yday(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get hour of day for 8-hour averaging period O3_8hr_rolling_averages[m, 3] <- hour(as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600)) # Get date for 8-hour averaging period O3_8hr_rolling_averages[m, 4] <- as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) # Count the number of rows in dataset for a given 8-hour averaging period O3_8hr_rolling_averages[m, 5] <- nrow(subset(df.station, time <= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) & time >= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 8) * 3600))) # Count the number of NA values in dataset for a given 8-hour averaging period O3_8hr_rolling_averages[m, 6] <- sum(is.na(subset(df.station, time <= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) & time >= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 8) * 3600) )[,3])) # Calculate the number of valid measurements for a given 8-hour averaging period O3_8hr_rolling_averages[m, 7] <- O3_8hr_rolling_averages[m, 5] - O3_8hr_rolling_averages[m, 6] # Calculate the average ozone concentration for a given 8-hour averaging period O3_8hr_rolling_averages[m, 8] <- ifelse(O3_8hr_rolling_averages[m, 7] >= 6, round(mean(subset(df.station, time <= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 1) * 3600) & time >= as.POSIXct(paste(year, "-01-01", sep = '')) + ((m - 8) * 3600) )[,3], na.rm = TRUE), digits = 1), NA) } # Initialize matrix with (1) year, (2) day of year, (3) the date, # (4) number of dataset rows in a day, (5) number of NA values in a day, # (6) number of valid observations in a day, and (7) maximum of ozone daily 8-hour # rolling averages O3_max_daily_8hr_rolling_averages <- as.data.frame(mat.or.vec(nr = days_in_year, nc = 7)) colnames(O3_max_daily_8hr_rolling_averages) <- c("year", "day_of_year", "date", "rows_in_day", "NA_in_day", "valid_obs_in_day", "O3_max_daily_8hr_rolling_average") class(O3_max_daily_8hr_rolling_averages$date) = c('Date') # Loop through all days in year and put calculated values in initialized data frame for (k in 1:days_in_year) { # Insert the year in the 'year' column O3_max_daily_8hr_rolling_averages[k,1] <- year # Insert the day of year in the 'day_of_year' column O3_max_daily_8hr_rolling_averages[k,2] <- k # Insert the date in the 'date' column O3_max_daily_8hr_rolling_averages[k,3] <- as.Date(subset(O3_8hr_rolling_averages, day_of_year == k)[,4][1]) # Count the number of rows in dataset for a day O3_max_daily_8hr_rolling_averages[k,4] <- nrow(subset(O3_8hr_rolling_averages, day_of_year == k)) # Count the number of NA values for the O3 8hr rolling average in the dataset # for a given day O3_max_daily_8hr_rolling_averages[k,5] <- sum(is.na(subset(O3_8hr_rolling_averages, day_of_year == k)[,8])) # Calculate the number of valid measurements for a given day O3_max_daily_8hr_rolling_averages[k,6] <- O3_max_daily_8hr_rolling_averages[k,4] - O3_max_daily_8hr_rolling_averages[k,5] # Calculate the maximum of 8-hour daily average, put into column 7 # ('O3_max_daily_8hr_rolling_average') O3_max_daily_8hr_rolling_averages[k,7] <- ifelse(O3_max_daily_8hr_rolling_averages[k,6] >= 18, round(mean(subset(O3_8hr_rolling_averages, day_of_year == k)[,8], na.rm = TRUE), digits = 1), NA) # Close inner loop for station days } # Convert any NaN values in the data frame to NA for consistency O3_max_daily_8hr_rolling_averages <- as.data.frame(rapply(O3_max_daily_8hr_rolling_averages, f = function(x) ifelse(is.nan(x), NA, x), how = "replace")) # Calculate the annual average of the highest daily 8hr-O3-max for the year average_annual_of_daily_8hr_O3_max <- round(mean(O3_8hr_rolling_averages$O3_8hr_rolling_average, na.rm = TRUE), digits = 1) # Calculate the 4th highest daily 8hr-O3-max for the year by conducting a decreasing # sort of the maximum of daily rolling 8-hr averages and then accessing the 4th item in # that vector list O3_8hr_rolling_averages_sort_descending <- sort(O3_8hr_rolling_averages$O3_8hr_rolling_average, decreasing = TRUE, na.last = NA) annual_4th_highest_daily_8hr_O3_max <- O3_8hr_rolling_averages_sort_descending[4] # Determine number of valid daily 8hr-O3-max in the combined 2nd and 3rd quarters # (April 1 to September 30) number_of_valid_O3_daily_averages <- sum(!is.na(subset( O3_max_daily_8hr_rolling_averages, O3_max_daily_8hr_rolling_averages$day_of_year >= 91 & O3_max_daily_8hr_rolling_averages$day_of_year <= 274)[,7])) # Determine the percentage of days with valid daily 8hr-O3-max values in the # April 1 to September 30 period percent_valid_O3_daily_averages <- (number_of_valid_O3_daily_averages / 184) * 100 # Set the data completeness boolean to TRUE if the percentage of valid days in the # specified period is greater than or equal to 75% data_complete_year <- ifelse(percent_valid_O3_daily_averages >= 75, TRUE, FALSE) does_annual_4th_highest_daily_8hr_O3_max_exceed <- ifelse(annual_4th_highest_daily_8hr_O3_max > 30, TRUE, FALSE) annual_4th_highest_daily_8hr_O3_max_flag <- ifelse(does_annual_4th_highest_daily_8hr_O3_max_exceed == TRUE & data_complete_year == FALSE, "based on incomplete data", "") # Place values in row of output CSV file #cat(year,",",measure,",",station_list[j],",", cat(year,",",measure,",",station_list[j],",", average_annual_of_daily_8hr_O3_max,",", annual_4th_highest_daily_8hr_O3_max,",", percent_valid_O3_daily_averages,",", data_complete_year,",", does_annual_4th_highest_daily_8hr_O3_max_exceed,",", annual_4th_highest_daily_8hr_O3_max_flag, file = paste(measure,"_data_summary.csv", sep = ''), sep = "", append = TRUE) # Add linebreak to CSV file after writing line cat("", file = paste(measure,"_data_summary.csv", sep = ''), sep = "\n", append = TRUE) # Close inner for loop, looping through stations in a CSV file } # Close outer for loop, looping through reads of CSV files } # Close function }
# # Copyright 2007-2016 The OpenMx Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require(OpenMx) fix_matrix <- function(m) { for (i in 1:dim(m$labels)[1]) { for (j in 1:dim(m$labels)[2]) { m$free[i,j] <- FALSE; } } return(m); } # (2) # generate random normal data Y~N(0,1) # N <- 1000 Y <- rnorm(N,0,1) data <- data.frame(Y) names(data) <- c("Y") # (3) # create a dummy model which estimates the mean # of an observed variable with unit variance # model <- mxModel("One Factor", type="RAM", manifestVars = c("Y"), latentVars = c(), mxPath(from=c("Y"), arrows=2, free=F, values=1.0, label=c("lat_var")), mxPath( from="one", to=c("Y"), arrows=1, free=TRUE, values=c(1), labels=c("mean") ) , mxData( data, type="raw", numObs=dim(data)[1]) ); # (4) # run model (with a single free param) # and estimate mean # run <- mxRun(model); ################# # rewrite version ################# m <- run$A$values[1,1] test <- mxModel("test", mxData(data, "raw"), mxMatrix("Symm", 1, 1, FALSE, 1, name="S"), mxMatrix("Full", 1, 1, name="A"), mxMatrix("Iden", 1, name="F"), mxMatrix("Full", 1, 1, FALSE, m, name="M"), mxFitFunctionML(),mxExpectationRAM("A", "S", "F", "M", dimnames=c("Y")) ) # runs fine test2 <- mxRun(test) # flipping parameters test3 <- run test3$M$free[1,1] <- F test4 <- mxRun(test3) # test3$output <- list() test5 <- mxRun(test3)
/inst/models/passing/ModelTransformTest.R
permissive
falkcarl/OpenMx
R
false
false
2,010
r
# # Copyright 2007-2016 The OpenMx Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require(OpenMx) fix_matrix <- function(m) { for (i in 1:dim(m$labels)[1]) { for (j in 1:dim(m$labels)[2]) { m$free[i,j] <- FALSE; } } return(m); } # (2) # generate random normal data Y~N(0,1) # N <- 1000 Y <- rnorm(N,0,1) data <- data.frame(Y) names(data) <- c("Y") # (3) # create a dummy model which estimates the mean # of an observed variable with unit variance # model <- mxModel("One Factor", type="RAM", manifestVars = c("Y"), latentVars = c(), mxPath(from=c("Y"), arrows=2, free=F, values=1.0, label=c("lat_var")), mxPath( from="one", to=c("Y"), arrows=1, free=TRUE, values=c(1), labels=c("mean") ) , mxData( data, type="raw", numObs=dim(data)[1]) ); # (4) # run model (with a single free param) # and estimate mean # run <- mxRun(model); ################# # rewrite version ################# m <- run$A$values[1,1] test <- mxModel("test", mxData(data, "raw"), mxMatrix("Symm", 1, 1, FALSE, 1, name="S"), mxMatrix("Full", 1, 1, name="A"), mxMatrix("Iden", 1, name="F"), mxMatrix("Full", 1, 1, FALSE, m, name="M"), mxFitFunctionML(),mxExpectationRAM("A", "S", "F", "M", dimnames=c("Y")) ) # runs fine test2 <- mxRun(test) # flipping parameters test3 <- run test3$M$free[1,1] <- F test4 <- mxRun(test3) # test3$output <- list() test5 <- mxRun(test3)
simulateData <- function(simulationConfig, samples, model, indPath, isOracle = FALSE, typeOfSimulator="random", returnData=TRUE, verbose=0) { # Function that simulates the model and then samples the data, if necessary. # Returns a list with M=true model, D=sampled data. # - simulationConfig$n # - simulationConfig$topology # - simulationConfig$exconf # - simulationConfig$N # - simulationConfig$pedge # - simulationConfig$restrict # - samples: NULL unless the data is already provided. # - model: NULL unless the true model is already provided. # - indPath: NULL unless the independence tests have already been ran. In that case, we don't need to generate anything. # - isOracle: FALSE for noisy data, TRUE for oracle data. # Independence test are already done, there is no need to simulate anything. if (!is.null(indPath)) { return (list(M=model, D=NULL)) } # No model or sampling necessary. # Assume only observational data (needs revision otherwise). if (!is.null(samples)) { D <- list() D[[1]] <- list() D[[1]]$M <- model D[[1]]$data <- samples D[[1]]$e <- rep(0, ncol(samples)) D[[1]]$N <- nrow(samples) return (list(M=model, D=D)) } # Get the true model, either from model or by generating a new one randomly. if (!is.null(model) ) { M <- model } else { cat("\n* Generating the model: n=", simulationConfig$n,", restrict=", simulationConfig$restrict, ", topology=",simulationConfig$topology, ", probability of edge = ", simulationConfig$pedge, ", confounder proportion=",simulationConfig$confounder_proportion, ".\n",sep='') if (typeOfSimulator == "int") { M <- simul_interventions.generateModel(p=simulationConfig$n, numConf = as.integer(simulationConfig$n*simulationConfig$confounder_proportion), numInts = simulationConfig$numInts) } else if (typeOfSimulator == "int_example") { M <- simul_interventions.generateModel(p=2, prob=c(0, 0, 0, 0), probInts = c(0, 1), numConf = 0, numInts = 1) } else if (typeOfSimulator == "int_example2") { M <- simul_interventions.generateModel(p=5, prob=c(0.25, 0.25, 0.25, 0.25), probInts = c(0.25, 0.5, 0.25), numConf = 1, numInts = 2) } else { M <- generateModel(n=simulationConfig$n, restrict=simulationConfig$restrict, topology=simulationConfig$topology, model=model, samples=samples, pedge=simulationConfig$pedge, confounder_proportion=simulationConfig$confounder_proportion, verbose=verbose) } } if (returnData) { if (isOracle) { if (verbose) cat(" - Skipping sample data generation, since using oracle.\n",sep='') D <- list() E <- experimentConfiguration(simulationConfig$n, simulationConfig$exconf) # For each experiment vector e in E: for ( i in 1:nrow(E)) { # Create the tuple that will be stored in D, storing the vector e. D[[i]]<-list(e=E[i,],M=M) # Get indexes of intervened variables in this particular experimental setting. J<-which( D[[i]]$e==1 ) # The data consist of manipulated graphs where # Edge heads into the intervened variables are cut. D[[i]]$M$G[J,]<-0 D[[i]]$M$Ge[J,]<-0 D[[i]]$M$Ge[,J]<-0 } } else if (grepl("int", typeOfSimulator)) { MD <- simul_interventions.generateData(M=M, nObs=simulationConfig$N) D <- MD$D } else { if (verbose) { cat("\n* Generating the sample data using the experiment configuration: exconf=", simulationConfig$exconf, ", number of samples=", simulationConfig$N, ".\n",sep='') } D <- generateSampleData(n=simulationConfig$n, exconf=simulationConfig$exconf, test=test, N=simulationConfig$N, M=M, verbose=verbose) } } else { D <- list() } # Change for compatibility with results from the learn step: not causes = -1. M$G[(M$G==0)] <- -1 M$Ge[(M$Ge==0)] <- -1 M$Gs[(M$Gs==0)] <- -1 M$C[(M$C==0)] <- -1 list(M=M, D=D) } simulateData._test1 <- function() { simulationConfig <- list(n=4, topology="random", exconf="passive", N=500, pedge = 1/3, restrict = c('acyclic')) simulateData(simulationConfig=simulationConfig, samples=NULL, model=NULL, indPath=NULL, isOracle=0, verbose=0) } simulateData._test2 <- function() { simulationConfig <- list(n=4, topology="random", exconf="single", N=500, pedge = 1/3, restrict = c('acyclic')) simulateData(simulationConfig=simulationConfig, samples=NULL, model=NULL, indPath=NULL, isOracle=0, verbose=0) } simulateData._test3 <- function() { simulationConfig <- list(n=4, topology="random", exconf="single", N=500, pedge = 1/3, restrict = c('acyclic')) simulateData(simulationConfig=simulationConfig, samples=NULL, model=NULL, indPath=NULL, isOracle=1, verbose=0) }
/R/simulateData.R
no_license
caus-am/dom_adapt
R
false
false
5,200
r
simulateData <- function(simulationConfig, samples, model, indPath, isOracle = FALSE, typeOfSimulator="random", returnData=TRUE, verbose=0) { # Function that simulates the model and then samples the data, if necessary. # Returns a list with M=true model, D=sampled data. # - simulationConfig$n # - simulationConfig$topology # - simulationConfig$exconf # - simulationConfig$N # - simulationConfig$pedge # - simulationConfig$restrict # - samples: NULL unless the data is already provided. # - model: NULL unless the true model is already provided. # - indPath: NULL unless the independence tests have already been ran. In that case, we don't need to generate anything. # - isOracle: FALSE for noisy data, TRUE for oracle data. # Independence test are already done, there is no need to simulate anything. if (!is.null(indPath)) { return (list(M=model, D=NULL)) } # No model or sampling necessary. # Assume only observational data (needs revision otherwise). if (!is.null(samples)) { D <- list() D[[1]] <- list() D[[1]]$M <- model D[[1]]$data <- samples D[[1]]$e <- rep(0, ncol(samples)) D[[1]]$N <- nrow(samples) return (list(M=model, D=D)) } # Get the true model, either from model or by generating a new one randomly. if (!is.null(model) ) { M <- model } else { cat("\n* Generating the model: n=", simulationConfig$n,", restrict=", simulationConfig$restrict, ", topology=",simulationConfig$topology, ", probability of edge = ", simulationConfig$pedge, ", confounder proportion=",simulationConfig$confounder_proportion, ".\n",sep='') if (typeOfSimulator == "int") { M <- simul_interventions.generateModel(p=simulationConfig$n, numConf = as.integer(simulationConfig$n*simulationConfig$confounder_proportion), numInts = simulationConfig$numInts) } else if (typeOfSimulator == "int_example") { M <- simul_interventions.generateModel(p=2, prob=c(0, 0, 0, 0), probInts = c(0, 1), numConf = 0, numInts = 1) } else if (typeOfSimulator == "int_example2") { M <- simul_interventions.generateModel(p=5, prob=c(0.25, 0.25, 0.25, 0.25), probInts = c(0.25, 0.5, 0.25), numConf = 1, numInts = 2) } else { M <- generateModel(n=simulationConfig$n, restrict=simulationConfig$restrict, topology=simulationConfig$topology, model=model, samples=samples, pedge=simulationConfig$pedge, confounder_proportion=simulationConfig$confounder_proportion, verbose=verbose) } } if (returnData) { if (isOracle) { if (verbose) cat(" - Skipping sample data generation, since using oracle.\n",sep='') D <- list() E <- experimentConfiguration(simulationConfig$n, simulationConfig$exconf) # For each experiment vector e in E: for ( i in 1:nrow(E)) { # Create the tuple that will be stored in D, storing the vector e. D[[i]]<-list(e=E[i,],M=M) # Get indexes of intervened variables in this particular experimental setting. J<-which( D[[i]]$e==1 ) # The data consist of manipulated graphs where # Edge heads into the intervened variables are cut. D[[i]]$M$G[J,]<-0 D[[i]]$M$Ge[J,]<-0 D[[i]]$M$Ge[,J]<-0 } } else if (grepl("int", typeOfSimulator)) { MD <- simul_interventions.generateData(M=M, nObs=simulationConfig$N) D <- MD$D } else { if (verbose) { cat("\n* Generating the sample data using the experiment configuration: exconf=", simulationConfig$exconf, ", number of samples=", simulationConfig$N, ".\n",sep='') } D <- generateSampleData(n=simulationConfig$n, exconf=simulationConfig$exconf, test=test, N=simulationConfig$N, M=M, verbose=verbose) } } else { D <- list() } # Change for compatibility with results from the learn step: not causes = -1. M$G[(M$G==0)] <- -1 M$Ge[(M$Ge==0)] <- -1 M$Gs[(M$Gs==0)] <- -1 M$C[(M$C==0)] <- -1 list(M=M, D=D) } simulateData._test1 <- function() { simulationConfig <- list(n=4, topology="random", exconf="passive", N=500, pedge = 1/3, restrict = c('acyclic')) simulateData(simulationConfig=simulationConfig, samples=NULL, model=NULL, indPath=NULL, isOracle=0, verbose=0) } simulateData._test2 <- function() { simulationConfig <- list(n=4, topology="random", exconf="single", N=500, pedge = 1/3, restrict = c('acyclic')) simulateData(simulationConfig=simulationConfig, samples=NULL, model=NULL, indPath=NULL, isOracle=0, verbose=0) } simulateData._test3 <- function() { simulationConfig <- list(n=4, topology="random", exconf="single", N=500, pedge = 1/3, restrict = c('acyclic')) simulateData(simulationConfig=simulationConfig, samples=NULL, model=NULL, indPath=NULL, isOracle=1, verbose=0) }
library(SECP) ### Name: fds3s ### Title: Mass fractal dimension of sampling 3D clusters ### Aliases: fds3s ### ** Examples # # # # # # # # # # # # # # # # # # Example 1: Isotropic set cover # # # # # # # # # # # # # # # # # pc <- .311608 p1 <- pc - .01 p2 <- pc + .01 lx <- 33; ss <- (lx+1)/2 rf1 <- fssi30(n=100, x=lx, p=p1) rf2 <- fssi30(n=100, x=lx, p=p2) bnd <- isc3s(k=9, x=dim(rf1)) fd1 <- fds3s(rfq=rf1, bnd=bnd) fd2 <- fds3s(rfq=rf2, bnd=bnd) w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"] r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"] rr <- seq(min(r1)-.2, max(r1)+.2, length=100) ww1 <- predict(fd1, newdata=list(r=rr), interval="conf") ww2 <- predict(fd2, newdata=list(r=rr), interval="conf") s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ") s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ") x <- z <- seq(lx) y1 <- rf1[,ss,]; y2 <- rf2[,ss,] par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0)) image(x, z, y1, zlim=c(0, 3*mean(y1)), cex.main=1, main=paste("Isotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p1, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(h=ss, lty=2); abline(v=ss, lty=2) image(x, z, y2, zlim=c(0, 3*mean(y2)), cex.main=1, main=paste("Isotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p2, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(h=ss, lty=2); abline(v=ss, lty=2) plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s1,")", sep="")) matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red")) plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s2,")", sep="")) matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red")) # # # # # # # # # # # # # # # # # # Example 2: Anisotropic set cover, dir=3 # # # # # # # # # # # # # # # # # pc <- .311608 p1 <- pc - .01 p2 <- pc + .01 lx <- 33; ss <- (lx+1)/2 ssz <- seq(lx^2+lx+2, 2*lx^2-lx-1) rf1 <- fssi30(n=100, x=lx, p=p1, set=ssz, all=FALSE) rf2 <- fssi30(n=100, x=lx, p=p2, set=ssz, all=FALSE) bnd <- asc3s(k=9, x=dim(rf1), dir=3) fd1 <- fds3s(rfq=rf1, bnd=bnd) fd2 <- fds3s(rfq=rf2, bnd=bnd) w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"] r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"] rr <- seq(min(r1)-.2, max(r1)+.2, length=100) ww1 <- predict(fd1, newdata=list(r=rr), interval="conf") ww2 <- predict(fd2, newdata=list(r=rr), interval="conf") s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ") s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ") x <- z <- seq(lx) y1 <- rf1[,ss,]; y2 <- rf2[,ss,] par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0)) image(x, z, y1, zlim=c(0, .3), cex.main=1, main=paste("Anisotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p1, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(v=ss, lty=2) image(x, z, y2, zlim=c(0, .3), cex.main=1, main=paste("Anisotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p2, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(v=ss, lty=2) plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s1,")", sep="")) matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red")) plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s2,")", sep="")) matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red"))
/data/genthat_extracted_code/SECP/examples/fds3s.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
4,131
r
library(SECP) ### Name: fds3s ### Title: Mass fractal dimension of sampling 3D clusters ### Aliases: fds3s ### ** Examples # # # # # # # # # # # # # # # # # # Example 1: Isotropic set cover # # # # # # # # # # # # # # # # # pc <- .311608 p1 <- pc - .01 p2 <- pc + .01 lx <- 33; ss <- (lx+1)/2 rf1 <- fssi30(n=100, x=lx, p=p1) rf2 <- fssi30(n=100, x=lx, p=p2) bnd <- isc3s(k=9, x=dim(rf1)) fd1 <- fds3s(rfq=rf1, bnd=bnd) fd2 <- fds3s(rfq=rf2, bnd=bnd) w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"] r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"] rr <- seq(min(r1)-.2, max(r1)+.2, length=100) ww1 <- predict(fd1, newdata=list(r=rr), interval="conf") ww2 <- predict(fd2, newdata=list(r=rr), interval="conf") s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ") s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ") x <- z <- seq(lx) y1 <- rf1[,ss,]; y2 <- rf2[,ss,] par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0)) image(x, z, y1, zlim=c(0, 3*mean(y1)), cex.main=1, main=paste("Isotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p1, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(h=ss, lty=2); abline(v=ss, lty=2) image(x, z, y2, zlim=c(0, 3*mean(y2)), cex.main=1, main=paste("Isotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p2, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(h=ss, lty=2); abline(v=ss, lty=2) plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s1,")", sep="")) matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red")) plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s2,")", sep="")) matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red")) # # # # # # # # # # # # # # # # # # Example 2: Anisotropic set cover, dir=3 # # # # # # # # # # # # # # # # # pc <- .311608 p1 <- pc - .01 p2 <- pc + .01 lx <- 33; ss <- (lx+1)/2 ssz <- seq(lx^2+lx+2, 2*lx^2-lx-1) rf1 <- fssi30(n=100, x=lx, p=p1, set=ssz, all=FALSE) rf2 <- fssi30(n=100, x=lx, p=p2, set=ssz, all=FALSE) bnd <- asc3s(k=9, x=dim(rf1), dir=3) fd1 <- fds3s(rfq=rf1, bnd=bnd) fd2 <- fds3s(rfq=rf2, bnd=bnd) w1 <- fd1$model[,"w"]; w2 <- fd2$model[,"w"] r1 <- fd1$model[,"r"]; r2 <- fd2$model[,"r"] rr <- seq(min(r1)-.2, max(r1)+.2, length=100) ww1 <- predict(fd1, newdata=list(r=rr), interval="conf") ww2 <- predict(fd2, newdata=list(r=rr), interval="conf") s1 <- paste(round(confint(fd1)[2,], digits=3), collapse=", ") s2 <- paste(round(confint(fd2)[2,], digits=3), collapse=", ") x <- z <- seq(lx) y1 <- rf1[,ss,]; y2 <- rf2[,ss,] par(mfrow=c(2,2), mar=c(3,3,3,1), mgp=c(2,1,0)) image(x, z, y1, zlim=c(0, .3), cex.main=1, main=paste("Anisotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p1, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(v=ss, lty=2) image(x, z, y2, zlim=c(0, .3), cex.main=1, main=paste("Anisotropic set cover and a 3D clusters\n", "frequency in the y=",ss," slice with\n", "(1,0)-neighborhood and p=", round(p2, digits=3), sep="")) rect(bnd["x1",], bnd["z1",], bnd["x2",], bnd["z2",]) abline(v=ss, lty=2) plot(r1, w1, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s1,")", sep="")) matlines(rr, ww1, lty=c(1,2,2), col=c("black","red","red")) plot(r2, w2, pch=3, ylim=range(c(w1,w2)), cex.main=1, main=paste("0.95 confidence interval for the mass\n", "fractal dimension is (",s2,")", sep="")) matlines(rr, ww2, lty=c(1,2,2), col=c("black","red","red"))
################ # Intro to ggplot # eco-data-sci workshop # 1/17/2018 ################ # We'll be using data for National Parks visitation to practice basic data visualization using ggplot (from the National Park Service at <https://irma.nps.gov/Stats/SSRSReports>). # Make sure to check out the Ocean Health Index Data Science Training book at <http://ohi-science.org/data-science-training/>, which includes more detail, examples and resources for using ggplot in Chapter 5. ################ ##What is ggplot? # ggplot2 is a graphics package specifically built to help you iteratively create customized graphs. It exists solo, or within the *tidyverse* - a collection of data wrangling, visualizing, and presenting packages that play nicely together. ################ ################ ##1. Load tidyverse (install if necessary) ################ # If you do NOT have the tidyverse installed, you may need to install it first using **install.packages("tidyverse")**. When you install a package, then it exists in R's brain but is dormant. # install.packages("tidyverse") # Then load it to "activate" in R's brain library(tidyverse) ################ ##2. Get the data (np_visit.csv) ################ np_visit <- read_csv("~/github/data-vis/np_visit.csv") ################ ##3. Single series scatterplot (Dinosaur National Monument visitors) ################ # First, we'll explore visitation at Dinosaur National Monument. # Let's make a subset of our data that only contains information for Dinosaur (I'll call my subset dino_nm). We'll use this subset to create our graph. dino_nm <- np_visit %>% filter(park_name == "Dinosaur National Monument") # Take a look at that data frame (View() or just click on the blue circle with arrow next to it in the Environment tab). We'll make a scatterplot of year (x-axis) versus visitors (y-axis). # How do we make that graph in ggplot? # To make the most basic graph, you need to tell R three things: # 1. You're using ggplot # 2. What data is used to create the graph # 3. What type of graph you want to create # ...everything beyond that is optional customization. # So code to make the most basic scatterplot for Dinosaur NM might look something like this: ggplot(data = dino_nm, aes(x = year, y = visitors)) + geom_point(color = "blue", pch = 2, size = 3) # Note that we used the aes() - aesthetics - argument here. Whenever you are referencing a variable in ggplot code, it needs to be within an aes() argument. # You'd want to customize that graph - we'll get back to customization later on. For now, focus on the structure of the ggplot code. # What if we have more than one series we're trying to plot? ################ ##4. Multi-series graph of California National Parks visitation ################ # Make a new subset (I'll store as data frame 'np_ca') that only includes annual visitors in California National Parks, and arrange by park name (alphabetical) then year (increasing). *Note: data wrangling using dplyr and tidyr will be covered in another eco-data-sci workshop.* np_ca <- np_visit %>% # introduce pipe operator? filter(state == "CA" & type == "National Park") %>% arrange(park_name, year) # Go exploring a little bit. How many parks are there in California, and what are they? summary(np_ca) # Useful to see the class of each variable (column) unique(np_ca$park_name) # If a factor, can use 'levels' - but this is just a character length(unique(np_ca$park_name)) # To see how many there are # Now let's make a scatterplot graph (year v. visitors for the 9 California NPs): ggplot(data = np_ca, aes(x = year, y = visitors)) + # There are (1) and (2) geom_point() # This is (3) - what type of graph do you want to create? # Now you have made a totally hideous and useless graph - but it DOES contain all of the correct data. We just need to figure out how to clean it up a little bit to make it useful. # How do we do that? ################# ##5. Updating graph characteristics by VARIABLE ################# # We would like for each CA National Park series to be shown in a different color. We can do that by updating within the geom_point() layer (that's the layer where the points are added...). Since we're referencing a variable (park_name), we'll need to use the aes() argument. Anything that is not variable specified can be added outside of an aes() argument. ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point(aes(color = park_name)) ################## ##6. Customization - updating labels and titles ################## # We customize graphs in ggplot *iteratively* by adding layers (using the plus sign '+') to a base graphic and/or adding arguments within layers. # Use xlab() and ylab() layers to update x- and y-axis labels, and ggtitle() to add a graph title # graph + # xlab("This is my x-label") + # ylab("This is my y-label") + # ggtitle("This is my graph title") ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point(aes(color = park_name)) + xlab("Year") + ylab("Annual Visitors") + ggtitle("California National Parks Visitation")# + # theme(legend.title=element_blank()) # This is just if you want to remove the legend title ################## ##7. ggplot themes ################## # One way to make major changes to the overall aesthetic of your graph is using *themes* (that may exist in ggplot, or in other packages that you can load and install like 'ggthemes') # Some examples to try: # - theme_bw() # - theme_minimal() # - theme_classic() # Using themes doesn't finalize your graph, but it can give you a better "starting point" for customization. ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point(aes(color = park_name)) + xlab("Year") + ylab("Annual Visitors") + ggtitle("California National Parks Visitation") + theme_classic() # + #theme(legend.title=element_blank()) # Again, just to remove the legend title (optional) ################### ##8. ggplot geoms (types of graphics) ################### # We just made a few scatterplot graphs. But what if we wanted to make a line graph? Then do we have to start over? No...as long as the *type* of data is compatible with the new geom that you choose, then all you'd have to change is that layer. # Notice all of the types of geoms that exist when you start typing it in. And there are even other packages with **more** geom types that you can get. ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_line(aes(color = park_name)) + xlab("Year") + ylab("Annual Visitors") + ggtitle("California National Parks Visitation") + theme_classic() + theme(legend.title=element_blank()) # Keep in mind that the type of graph you're trying to create needs to be compatible with the data you're telling it to use. # For example, if I want to make a histogram of visitation, then I couldn't give it both an x- and y- quantitative variable (since the y-axis on a histogram is always just the frequency of events within a bin). So a histogram only asks for one variable. A boxplot typically has one categorical variable and one quantitative variable. For example: ggplot(data = np_ca, aes(x = park_name, y = visitors)) + geom_boxplot(aes(fill = park_name)) # And you'd obviously want to customize (e.g. x-axis labels) ggplot(data = np_ca, aes(x = park_name, y = visitors)) + geom_jitter(aes(color = park_name), width = 0.1, alpha = 0.4) + coord_flip() # Flips x- and y-variable visually # ...and then you can continue to customize. # Example customization: ggplot(data = np_ca, aes(x = park_name, y = visitors)) + geom_jitter(aes(color = park_name), width = 0.1, alpha = 0.4) + theme_bw() + ylab("Annual Visitors") + xlab("") + ggtitle("California NP Annual Visitors (1904 - 2016)") + coord_flip() + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) # You can also *combine* different types of compatible graphs. For example, you can create a graph with lines and points by using both geom_line and geom_point. # Using the Dinosaur National Monument subset we created (dino_nm): ggplot(data = dino_nm, aes(x = year, y = visitors)) + geom_point() + geom_line() + geom_smooth() # Use 'span' argument to update "wiggliness" in loess smoothing #################### ##9. faceting #################### # Considering our CA National Parks Visitation, what if we wanted each National Park to exist in its own graphics space? Would we need to create a new graph for each? No - we can use facet_wrap to split up the graph by a variable that we pick (here, park_name). ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point() + xlab("Year") + ylab("Annual Visitors") + theme_bw() + ggtitle("California National Parks Visitation") + facet_wrap(~park_name) #################### ##10. Bar plots #################### # First, let's make a subset only of data for visitor counts from 2016 for all of the National Parks and National Monuments in the original np_visit dataset. I'll call my subset 'visit_16'. visit_16 <- np_visit %>% filter(year == 2016) %>% filter(type == "National Park" | type == "National Monument") # So we have 134 parks that are designated as either National Parks or National Monuments. # We can use geom_bar() in ggplot to *count* and display (as a bar) the number of times a certain outcome or character string appears in a column. # Here, we'll create a bar graph showing how many NPs and NMs had recorded visitors in 2016. Note that you do NOT give it the counts - that's what geom_bar does for us. ggplot(visit_16, aes(x = type)) + geom_bar() # What if we would like to know where (in what region) each of these exist? We can use the fill() argument to create a stacked bar graph, where the different colors indicate counts within each region. ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region)) + theme_bw() # Just a couple more things. The graph we just made shows the actual counts as a stacked bar graph. We might just be interested in the *proportions* of each monument type that exist in each region, or we might like an "unstacked" version. # Use the position argument to adjust geom_bar appearance. Set to "fill" if you want proportions shown (so all bars will be from 0 to 1), and "dodge" to have bars appear side-by-side for each group. # *Note that updating the position doesn't automatically correct the y-axis label, as you'll see below.* # Using position = "fill": ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region), position = "fill") + theme_bw() + ylab("Proportion") # Using position = "dodge": ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region), position = "dodge") + theme_bw() ################### ##11. Exporting your gg-graphs ################### # To export a hi-res version of your beautiful graph, make sure that you store your graph (assign it a name...we haven't been doing that so far). # I'll copy and paste the code for the bar graph I made above, and store it as 'park_graph'. park_graph <- ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region), position = "dodge") + theme_bw() # Then use ggsave() to export, including the size (width = , height = ) and resolution (dpi = ) as desired. ggsave("my_park_graph.png", park_graph, width = 5, height = 5, dpi = 300)
/ggplot visualization exercise/data_vis_script.R
no_license
LilianYou/Geography_Analytics
R
false
false
11,534
r
################ # Intro to ggplot # eco-data-sci workshop # 1/17/2018 ################ # We'll be using data for National Parks visitation to practice basic data visualization using ggplot (from the National Park Service at <https://irma.nps.gov/Stats/SSRSReports>). # Make sure to check out the Ocean Health Index Data Science Training book at <http://ohi-science.org/data-science-training/>, which includes more detail, examples and resources for using ggplot in Chapter 5. ################ ##What is ggplot? # ggplot2 is a graphics package specifically built to help you iteratively create customized graphs. It exists solo, or within the *tidyverse* - a collection of data wrangling, visualizing, and presenting packages that play nicely together. ################ ################ ##1. Load tidyverse (install if necessary) ################ # If you do NOT have the tidyverse installed, you may need to install it first using **install.packages("tidyverse")**. When you install a package, then it exists in R's brain but is dormant. # install.packages("tidyverse") # Then load it to "activate" in R's brain library(tidyverse) ################ ##2. Get the data (np_visit.csv) ################ np_visit <- read_csv("~/github/data-vis/np_visit.csv") ################ ##3. Single series scatterplot (Dinosaur National Monument visitors) ################ # First, we'll explore visitation at Dinosaur National Monument. # Let's make a subset of our data that only contains information for Dinosaur (I'll call my subset dino_nm). We'll use this subset to create our graph. dino_nm <- np_visit %>% filter(park_name == "Dinosaur National Monument") # Take a look at that data frame (View() or just click on the blue circle with arrow next to it in the Environment tab). We'll make a scatterplot of year (x-axis) versus visitors (y-axis). # How do we make that graph in ggplot? # To make the most basic graph, you need to tell R three things: # 1. You're using ggplot # 2. What data is used to create the graph # 3. What type of graph you want to create # ...everything beyond that is optional customization. # So code to make the most basic scatterplot for Dinosaur NM might look something like this: ggplot(data = dino_nm, aes(x = year, y = visitors)) + geom_point(color = "blue", pch = 2, size = 3) # Note that we used the aes() - aesthetics - argument here. Whenever you are referencing a variable in ggplot code, it needs to be within an aes() argument. # You'd want to customize that graph - we'll get back to customization later on. For now, focus on the structure of the ggplot code. # What if we have more than one series we're trying to plot? ################ ##4. Multi-series graph of California National Parks visitation ################ # Make a new subset (I'll store as data frame 'np_ca') that only includes annual visitors in California National Parks, and arrange by park name (alphabetical) then year (increasing). *Note: data wrangling using dplyr and tidyr will be covered in another eco-data-sci workshop.* np_ca <- np_visit %>% # introduce pipe operator? filter(state == "CA" & type == "National Park") %>% arrange(park_name, year) # Go exploring a little bit. How many parks are there in California, and what are they? summary(np_ca) # Useful to see the class of each variable (column) unique(np_ca$park_name) # If a factor, can use 'levels' - but this is just a character length(unique(np_ca$park_name)) # To see how many there are # Now let's make a scatterplot graph (year v. visitors for the 9 California NPs): ggplot(data = np_ca, aes(x = year, y = visitors)) + # There are (1) and (2) geom_point() # This is (3) - what type of graph do you want to create? # Now you have made a totally hideous and useless graph - but it DOES contain all of the correct data. We just need to figure out how to clean it up a little bit to make it useful. # How do we do that? ################# ##5. Updating graph characteristics by VARIABLE ################# # We would like for each CA National Park series to be shown in a different color. We can do that by updating within the geom_point() layer (that's the layer where the points are added...). Since we're referencing a variable (park_name), we'll need to use the aes() argument. Anything that is not variable specified can be added outside of an aes() argument. ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point(aes(color = park_name)) ################## ##6. Customization - updating labels and titles ################## # We customize graphs in ggplot *iteratively* by adding layers (using the plus sign '+') to a base graphic and/or adding arguments within layers. # Use xlab() and ylab() layers to update x- and y-axis labels, and ggtitle() to add a graph title # graph + # xlab("This is my x-label") + # ylab("This is my y-label") + # ggtitle("This is my graph title") ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point(aes(color = park_name)) + xlab("Year") + ylab("Annual Visitors") + ggtitle("California National Parks Visitation")# + # theme(legend.title=element_blank()) # This is just if you want to remove the legend title ################## ##7. ggplot themes ################## # One way to make major changes to the overall aesthetic of your graph is using *themes* (that may exist in ggplot, or in other packages that you can load and install like 'ggthemes') # Some examples to try: # - theme_bw() # - theme_minimal() # - theme_classic() # Using themes doesn't finalize your graph, but it can give you a better "starting point" for customization. ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point(aes(color = park_name)) + xlab("Year") + ylab("Annual Visitors") + ggtitle("California National Parks Visitation") + theme_classic() # + #theme(legend.title=element_blank()) # Again, just to remove the legend title (optional) ################### ##8. ggplot geoms (types of graphics) ################### # We just made a few scatterplot graphs. But what if we wanted to make a line graph? Then do we have to start over? No...as long as the *type* of data is compatible with the new geom that you choose, then all you'd have to change is that layer. # Notice all of the types of geoms that exist when you start typing it in. And there are even other packages with **more** geom types that you can get. ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_line(aes(color = park_name)) + xlab("Year") + ylab("Annual Visitors") + ggtitle("California National Parks Visitation") + theme_classic() + theme(legend.title=element_blank()) # Keep in mind that the type of graph you're trying to create needs to be compatible with the data you're telling it to use. # For example, if I want to make a histogram of visitation, then I couldn't give it both an x- and y- quantitative variable (since the y-axis on a histogram is always just the frequency of events within a bin). So a histogram only asks for one variable. A boxplot typically has one categorical variable and one quantitative variable. For example: ggplot(data = np_ca, aes(x = park_name, y = visitors)) + geom_boxplot(aes(fill = park_name)) # And you'd obviously want to customize (e.g. x-axis labels) ggplot(data = np_ca, aes(x = park_name, y = visitors)) + geom_jitter(aes(color = park_name), width = 0.1, alpha = 0.4) + coord_flip() # Flips x- and y-variable visually # ...and then you can continue to customize. # Example customization: ggplot(data = np_ca, aes(x = park_name, y = visitors)) + geom_jitter(aes(color = park_name), width = 0.1, alpha = 0.4) + theme_bw() + ylab("Annual Visitors") + xlab("") + ggtitle("California NP Annual Visitors (1904 - 2016)") + coord_flip() + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) # You can also *combine* different types of compatible graphs. For example, you can create a graph with lines and points by using both geom_line and geom_point. # Using the Dinosaur National Monument subset we created (dino_nm): ggplot(data = dino_nm, aes(x = year, y = visitors)) + geom_point() + geom_line() + geom_smooth() # Use 'span' argument to update "wiggliness" in loess smoothing #################### ##9. faceting #################### # Considering our CA National Parks Visitation, what if we wanted each National Park to exist in its own graphics space? Would we need to create a new graph for each? No - we can use facet_wrap to split up the graph by a variable that we pick (here, park_name). ggplot(data = np_ca, aes(x = year, y = visitors)) + geom_point() + xlab("Year") + ylab("Annual Visitors") + theme_bw() + ggtitle("California National Parks Visitation") + facet_wrap(~park_name) #################### ##10. Bar plots #################### # First, let's make a subset only of data for visitor counts from 2016 for all of the National Parks and National Monuments in the original np_visit dataset. I'll call my subset 'visit_16'. visit_16 <- np_visit %>% filter(year == 2016) %>% filter(type == "National Park" | type == "National Monument") # So we have 134 parks that are designated as either National Parks or National Monuments. # We can use geom_bar() in ggplot to *count* and display (as a bar) the number of times a certain outcome or character string appears in a column. # Here, we'll create a bar graph showing how many NPs and NMs had recorded visitors in 2016. Note that you do NOT give it the counts - that's what geom_bar does for us. ggplot(visit_16, aes(x = type)) + geom_bar() # What if we would like to know where (in what region) each of these exist? We can use the fill() argument to create a stacked bar graph, where the different colors indicate counts within each region. ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region)) + theme_bw() # Just a couple more things. The graph we just made shows the actual counts as a stacked bar graph. We might just be interested in the *proportions* of each monument type that exist in each region, or we might like an "unstacked" version. # Use the position argument to adjust geom_bar appearance. Set to "fill" if you want proportions shown (so all bars will be from 0 to 1), and "dodge" to have bars appear side-by-side for each group. # *Note that updating the position doesn't automatically correct the y-axis label, as you'll see below.* # Using position = "fill": ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region), position = "fill") + theme_bw() + ylab("Proportion") # Using position = "dodge": ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region), position = "dodge") + theme_bw() ################### ##11. Exporting your gg-graphs ################### # To export a hi-res version of your beautiful graph, make sure that you store your graph (assign it a name...we haven't been doing that so far). # I'll copy and paste the code for the bar graph I made above, and store it as 'park_graph'. park_graph <- ggplot(visit_16, aes(x = type)) + geom_bar(aes(fill = region), position = "dodge") + theme_bw() # Then use ggsave() to export, including the size (width = , height = ) and resolution (dpi = ) as desired. ggsave("my_park_graph.png", park_graph, width = 5, height = 5, dpi = 300)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load.R \docType{methods} \name{load,ModelRef-method} \alias{load,ModelRef-method} \title{Load a ModelRef} \usage{ \S4method{load}{ModelRef}(file) } \arguments{ \item{file}{object to load} } \description{ Load a ModelRef }
/man/load-ModelRef-method.Rd
no_license
patrickvossler18/simulator
R
false
true
300
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load.R \docType{methods} \name{load,ModelRef-method} \alias{load,ModelRef-method} \title{Load a ModelRef} \usage{ \S4method{load}{ModelRef}(file) } \arguments{ \item{file}{object to load} } \description{ Load a ModelRef }
library(SDMTools) out.dir='E:/Barra Work Directory/outputs/daily_validation/trials/'; setwd(out.dir) cols=rainbow(9) load(file=paste(out.dir, 'dtr.sept08.rData', sep='')) sept08 = out;out=NULL sept08$day = NULL sept08$day=seq(1,length(sept08$julian.day), 1) load(file=paste(out.dir, 'dtr.end-sept08.rData', sep='')) endsept08 = out;out=NULL endsept08$day = NULL endsept08$day=seq(1,length(endsept08$julian.day), 1) png(paste(out.dir, 'dtr.test2.png',sep=''), width=7, height=7, units='cm', res=300, pointsize=5, bg='white') par(mar=c(3,5,3,2)) plot(sept08$day,sept08$wt, ylim=c(0,5000), main="Weight of Barramundi over time", xaxt='n', xlab=NA, ylab="Weight (g)", type='n', cex.main=1.5, cex.lab=1.3, cex.axis=1, col=cols[1]) lines(sept08$day,sept08$wt, lty=3,col=cols[2]) lines(sept08$day[which(sept08$wt<=3000)],sept08$wt[which(sept08$wt<=3000)],col=cols[2]) lines(endsept08$day,endsept08$wt, lty=3,col=cols[2]) lines(endsept08$day[which(endsept08$wt<=3000)],endsept08$wt[which(endsept08$wt<=3000)],col=cols[2]) #compare with real data lines(x=c(122, 122), y=c(500,1000)) lines(x=c(670, 670), y=c(4000,5000)) #points(x=c(274, 456, 639), y=c(1200, 1500, 3000), pch=8) axis(1,at=c(1,182.5,365,547.5, 730),labels=c(NA,'6 months',NA, '18 months', NA),lwd=1,lwd.ticks=1.2, cex.axis=1) dev.off()
/barra/barra_optimum_temp/glencross/validation.image.daintree.r
no_license
LaurenHodgson/Projects
R
false
false
1,332
r
library(SDMTools) out.dir='E:/Barra Work Directory/outputs/daily_validation/trials/'; setwd(out.dir) cols=rainbow(9) load(file=paste(out.dir, 'dtr.sept08.rData', sep='')) sept08 = out;out=NULL sept08$day = NULL sept08$day=seq(1,length(sept08$julian.day), 1) load(file=paste(out.dir, 'dtr.end-sept08.rData', sep='')) endsept08 = out;out=NULL endsept08$day = NULL endsept08$day=seq(1,length(endsept08$julian.day), 1) png(paste(out.dir, 'dtr.test2.png',sep=''), width=7, height=7, units='cm', res=300, pointsize=5, bg='white') par(mar=c(3,5,3,2)) plot(sept08$day,sept08$wt, ylim=c(0,5000), main="Weight of Barramundi over time", xaxt='n', xlab=NA, ylab="Weight (g)", type='n', cex.main=1.5, cex.lab=1.3, cex.axis=1, col=cols[1]) lines(sept08$day,sept08$wt, lty=3,col=cols[2]) lines(sept08$day[which(sept08$wt<=3000)],sept08$wt[which(sept08$wt<=3000)],col=cols[2]) lines(endsept08$day,endsept08$wt, lty=3,col=cols[2]) lines(endsept08$day[which(endsept08$wt<=3000)],endsept08$wt[which(endsept08$wt<=3000)],col=cols[2]) #compare with real data lines(x=c(122, 122), y=c(500,1000)) lines(x=c(670, 670), y=c(4000,5000)) #points(x=c(274, 456, 639), y=c(1200, 1500, 3000), pch=8) axis(1,at=c(1,182.5,365,547.5, 730),labels=c(NA,'6 months',NA, '18 months', NA),lwd=1,lwd.ticks=1.2, cex.axis=1) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/feature_logit.R \name{feature_logit} \alias{feature_logit} \title{Generate, format and save a tabulation of the most important predictors in a logit model (glm/glmnet/speedglm).} \usage{ feature_logit(model, cluster_var_vector = NA, feat_lim = 300, output_path, add_dt = NULL, mode = "default") } \arguments{ \item{model}{} \item{cluster_var_vector}{} \item{output_path}{} } \description{ }
/man/feature_logit.Rd
no_license
ClaraMarquardt/huhn
R
false
true
474
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/feature_logit.R \name{feature_logit} \alias{feature_logit} \title{Generate, format and save a tabulation of the most important predictors in a logit model (glm/glmnet/speedglm).} \usage{ feature_logit(model, cluster_var_vector = NA, feat_lim = 300, output_path, add_dt = NULL, mode = "default") } \arguments{ \item{model}{} \item{cluster_var_vector}{} \item{output_path}{} } \description{ }
library(rworldmap) library(RColorBrewer) library(ggplot2) countries = read.csv("countries.csv", na.strings = " NA") countries$GDP = as.numeric(as.character(countries$GDP)) sPDF = joinCountryData2Map(countries, joinCode = "NAME", nameJoinColumn = "Country", verbose = TRUE) png(filename = "CountryGDP.png", width = 800, height = 600, units = "px") mapParams = mapCountryData(sPDF, nameColumnToPlot = "GDP", missingCountryCol = "dark grey", addLegend = FALSE, oceanCol = "lightsteelblue2", numCats = 15, catMethod = "logFixedWidth", colourPalette = brewer.pal(9, "RdPu")) do.call(addMapLegend, c(mapParams, legendWidth = 0.5, legendMar = 2)) dev.off()
/country.R
no_license
tejaykodali/CountryGDP
R
false
false
733
r
library(rworldmap) library(RColorBrewer) library(ggplot2) countries = read.csv("countries.csv", na.strings = " NA") countries$GDP = as.numeric(as.character(countries$GDP)) sPDF = joinCountryData2Map(countries, joinCode = "NAME", nameJoinColumn = "Country", verbose = TRUE) png(filename = "CountryGDP.png", width = 800, height = 600, units = "px") mapParams = mapCountryData(sPDF, nameColumnToPlot = "GDP", missingCountryCol = "dark grey", addLegend = FALSE, oceanCol = "lightsteelblue2", numCats = 15, catMethod = "logFixedWidth", colourPalette = brewer.pal(9, "RdPu")) do.call(addMapLegend, c(mapParams, legendWidth = 0.5, legendMar = 2)) dev.off()
PlotFDRs <- function(lpcfdr.out, frac=.25){ # frac is the fraction of genes plotted. CheckPlotFDRsFormat(lpcfdr.out,frac) lpcfdr <- lpcfdr.out$fdrlpc tfdr <- lpcfdr.out$fdrt tfdrs <- tfdr[tfdr<quantile(tfdr,frac)] lpcfdrs <- lpcfdr[lpcfdr<quantile(lpcfdr,frac)] plot(tfdrs[order(tfdrs, decreasing=FALSE)], type="l", ylim=range(c(tfdrs,lpcfdrs)), main="Estimated FDRs for T and LPC", xlab="Num Genes Called Significant", ylab="Estimated FDR") points(lpcfdrs[order(lpcfdrs,decreasing=FALSE)], type="l", col="red") legend("topleft", pch=15, col=c("black", "red"), c("T", "LPC")) }
/R/PlotFDRs.R
no_license
cran/lpc
R
false
false
599
r
PlotFDRs <- function(lpcfdr.out, frac=.25){ # frac is the fraction of genes plotted. CheckPlotFDRsFormat(lpcfdr.out,frac) lpcfdr <- lpcfdr.out$fdrlpc tfdr <- lpcfdr.out$fdrt tfdrs <- tfdr[tfdr<quantile(tfdr,frac)] lpcfdrs <- lpcfdr[lpcfdr<quantile(lpcfdr,frac)] plot(tfdrs[order(tfdrs, decreasing=FALSE)], type="l", ylim=range(c(tfdrs,lpcfdrs)), main="Estimated FDRs for T and LPC", xlab="Num Genes Called Significant", ylab="Estimated FDR") points(lpcfdrs[order(lpcfdrs,decreasing=FALSE)], type="l", col="red") legend("topleft", pch=15, col=c("black", "red"), c("T", "LPC")) }
function (x, window) { e <- get("data.env", .GlobalEnv) e[["blocksums_n_max"]][[length(e[["blocksums_n_max"]]) + 1]] <- list(x = x, window = window) .Call("_accelerometry_blocksums_n_max", x, window) }
/valgrind_test_dir/blocksums_n_max-test.R
no_license
akhikolla/RcppDeepStateTest
R
false
false
224
r
function (x, window) { e <- get("data.env", .GlobalEnv) e[["blocksums_n_max"]][[length(e[["blocksums_n_max"]]) + 1]] <- list(x = x, window = window) .Call("_accelerometry_blocksums_n_max", x, window) }
#============ #validation.R #============ #This script defines functions that are used to validate data prior to #reading or writing to the datastore. The functions check whether the datastore #contains the group/table/dataset requested and whether the data match #specifications. Unlike the functions defined in the "hdf5.R" script, these #functions don't directly interact with the datastore. Instead, they rely on the #datastore listing (Datastore) that is maintained in the model state file. #CHECK DATASET EXISTENCE #======================= #' Check dataset existence #' #' \code{checkDataset} checks whether a dataset exists in the datastore and #' returns a TRUE or FALSE value with an attribute of the full path to where the #' dataset should be located in the datastore. #' #' This function checks whether a dataset exists. The dataset is identified by #' its name and the table and group names it is in. If the dataset is not in the #' datastore, an error is thrown. If it is located in the datastore, the full #' path name to the dataset is returned. #' #' @param Name a string identifying the dataset name. #' @param Table a string identifying the table the dataset is a part of. #' @param Group a string or numeric representation of the group the table is a #' part of. #' @param DstoreListing_df a dataframe which lists the contents of the datastore #' as contained in the model state file. #' @return A logical identifying whether the dataset is in the datastore. It has #' an attribute that is a string of the full path to where the dataset should be #' in the datastore. #' @export checkDataset <- function(Name, Table, Group, DstoreListing_df) { Name <- as.character(Name) Table <- as.character(Table) Group <- as.character(Group) #TableName <- checkTable(Table, Group, DstoreListing_df)[[2]] DatasetName <- file.path(Group, Table, Name) DatasetExists <- DatasetName %in% DstoreListing_df$groupname Result <- ifelse (DatasetExists, TRUE, FALSE) attributes(Result) <- list(DatasetName = DatasetName) Result } #GET ATTRIBUTES OF A DATASET #=========================== #' Get attributes of a dataset #' #' \code{getDatasetAttr} retrieves the attributes for a dataset in the datastore #' #' This function extracts the listed attributes for a specific dataset from the #' datastore listing. #' #' @param Name a string identifying the dataset name. #' @param Table a string identifying the table the dataset is a part of. #' @param Group a string or numeric representation of the group the table is a #' part of. #' @param DstoreListing_df a dataframe which lists the contents of the datastore #' as contained in the model state file. #' @return A named list of the dataset attributes. #' @export getDatasetAttr <- function(Name, Table, Group, DstoreListing_df) { DatasetName <- file.path(Group, Table, Name) #checkDataset(Name, Table, Group, DstoreListing_df)[[2]] DatasetIdx <- which(DstoreListing_df$groupname == DatasetName) DstoreListing_df$attributes[[DatasetIdx]] } #CHECK WHETHER TABLE EXISTS #========================== #' Check whether table exists in the datastore #' #' \code{checkTableExistence} checks whether a table is present in the #' datastore. #' #' This function checks whether a table is present in the datastore. #' #' @param Table a string identifying the table. #' @param Group a string or numeric representation of the group the table is a #' part of. #' @param DstoreListing_df a dataframe which lists the contents of the datastore #' as contained in the model state file. #' @return A logical identifying whether a table is present in the datastore. #' @export checkTableExistence <- function(Table, Group, DstoreListing_df) { TableName <- file.path(Group, Table) TableName %in% DstoreListing_df$groupname } #CHECK SPECIFICATION CONSISTENCY #=============================== #' Check specification consistency #' #' \code{checkSpecConsistency} checks whether the specifications for a dataset #' are consistent with the data attributes in the datastore #' #' This function compares the specifications for a dataset identified in a #' module "Get" or "Set" are consistent with the attributes for that data in the #' datastore. #' #' @param Spec_ls a list of data specifications consistent with a module "Get" #' or "Set" specifications. #' @param DstoreAttr_ a named list where the components are the attributes of a #' dataset. #' @return A list containing two components, Errors and Warnings. If no #' inconsistencies are found, both components will have zero-length character #' vectors. If there are one or more inconsistencies, then these components #' will hold vectors of error and warning messages. Mismatch between UNITS #' will produce a warning message. All other inconsistencies will produce #' error messages. #' @export checkSpecConsistency <- function(Spec_ls, DstoreAttr_) { Errors_ <- character(0) Warnings_ <- character(0) if (Spec_ls$TYPE != DstoreAttr_$TYPE) { Message <- paste0( "TYPE mismatch for ", Spec_ls$NAME, ". ", "Module ", Spec_ls$MODULE, " asks for TYPE = (", Spec_ls$TYPE, "). ", "Datastore contains TYPE = (", DstoreAttr_$TYPE, ")." ) Errors_ <- c(Errors_, Message) } #With code that allows unit conversions, can't expect units to be the same # if (Spec_ls$UNITS != DstoreAttr_$UNITS) { # Message <- paste0( # "UNITS mismatch for ", Spec_ls$NAME, ". ", # "Module ", Spec_ls$MODULE, "asks for UNITS = (", Spec_ls$UNITS, "). ", # "Datastore contains UNITS = (", DstoreAttr_$UNITS, ")." # ) # Warnings_ <- c(Warnings_, Message) # } if (!is.null(Spec_ls$PROHIBIT) & !is.null(DstoreAttr_$PROHIBIT)) { if (!all(Spec_ls$PROHIBIT %in% DstoreAttr_$PROHIBIT) | !all(DstoreAttr_$PROHIBIT %in% Spec_ls$PROHIBIT)) { SpecProhibit <- paste(Spec_ls$PROHIBIT, collapse = ", ") DstoreProhibit <- paste(DstoreAttr_$PROHIBIT, collapse = ", ") Message <- paste0( "PROHIBIT mismatch for ", Spec_ls$NAME, ". ", "Module ", Spec_ls$MODULE, " specifies PROHIBIT as (", SpecProhibit, "). ", "Datastore specifies PROHIBIT as (", DstoreProhibit, ")." ) Errors_ <- c(Errors_, Message) } } if (!is.null(Spec_ls$ISELEMENTOF) & !is.null(DstoreAttr_$ISELEMENTOF)) { if (!all(Spec_ls$ISELEMENTOF %in% DstoreAttr_$ISELEMENTOF) | !all(DstoreAttr_$ISELEMENTOF %in% Spec_ls$ISELEMENTOF)) { SpecElements <- paste(Spec_ls$ISELEMENTOF, collapse = ", ") DstoreElements <- paste(DstoreAttr_$ISELEMENTOF, collapse = ", ") Message <- paste0( "ISELEMENTOF mismatch for ", Spec_ls$NAME, ". ", "Module ", Spec_ls$MODULE, " specifies ISELEMENTOF as (", SpecElements, "). ", "Datastore specifies ISELEMENTOF as (", DstoreElements, ")." ) Errors_ <- c(Errors_, Message) } } list(Errors = Errors_, Warnings = Warnings_) } #CHECK DATA TYPE #=============== #' Check data type #' #' \code{checkMatchType} checks whether the data type of a data vector is #' consistent with specifications. #' #' This function checks whether the data type of a data vector is consistent #' with a specified data type. An error message is generated if data can't be #' coerced into the specified data type without the possibility of error or loss #' of information (e.g. if a double is coerced to an integer). A warning message #' is generated if the specified type is 'character' but the input data type is #' 'integer', 'double' or 'logical' since these can be coerced correctly, but #' that may not be what is intended (e.g. zone names may be input as numbers). #' Note that some modules may use NA inputs as a flag to identify case when #' result does not need to match a target. In this case, R will read in the type #' of data as logical. In this case, the function sets the data type to be the #' same as the specification for the data type so the function not flag a #' data type error. #' #' @param Data_ A data vector. #' @param Type A string identifying the specified data type. #' @param DataName A string identifying the field name of the data being #' compared (used for composing message identifying non-compliant fields). #' @return A list having 2 components, Errors and Warnings. If no error or #' warning is identified, both components will contain a zero-length character #' string. If either an error or warning is identified, the relevant component #' will contain a character string that identifies the data field and the type #' mismatch. #' @export checkMatchType <- function(Data_, Type, DataName) { DataType <- typeof(Data_) #Because some modules allow NA values as flag instead of target values if (all(is.na(Data_))) DataType <- Type Types <- paste0(Type, DataType) makeMessage <- function() { paste0("Type of data in field '", DataName, "' is ", DataType, " but is specified as ", Type) } makeError <- function() { list(Error = makeMessage(), Warning = character(0)) } makeWarning <- function() { list(Error = character(0), Warning = makeMessage()) } makeOk <- function() { list(Error = character(0), Warning = character(0)) } switch( Types, integerdouble = makeError(), integercharacter = makeError(), integerlogical = makeError(), doublecharacter = makeError(), doublelogical = makeError(), characterinteger = makeWarning(), characterdouble = makeWarning(), characterlogical = makeWarning(), logicalinteger = makeError(), logicaldouble = makeError(), logicalcharacter = makeError(), makeOk() ) } #CHECK VALUES WITH CONDITIONS #============================ #' Check values with conditions. #' #' \code{checkMatchConditions} checks whether a data vector contains any #' elements that match a set of conditions. #' #' This function checks whether any of the values in a data vector match one or #' more conditions. The conditions are specified in a character vector where #' each element is either "NA" (to match for the existence of NA values) or a #' character representation of a valid R comparison expression for comparing #' each element with a specified value (e.g. "< 0", "> 1", "!= 10"). This #' function is used both for checking for the presence of prohibited values and #' for the presence of unlikely values. #' #' @param Data_ A vector of data of type integer, double, character, or logical. #' @param Conditions_ A character vector of valid R comparison expressions or an #' empty vector if there are no conditions. #' @param DataName A string identifying the field name of the data being #' compared (used for composing message identifying non-compliant fields). #' @param ConditionType A string having a value of either "PROHIBIT" or #' "UNLIKELY", the two data specifications which use conditions. #' @return A character vector of messages which identify the data field and the #' condition that is not met. A zero-length vector is returned if none of the #' conditions are met. #' @export checkMatchConditions <- function(Data_, Conditions_, DataName, ConditionType) { if (length(Conditions_) == 1) { if (Conditions_ == "") { return(character(0)) } } makeMessage <- function(Cond) { paste0("Data in data name '", DataName, "' includes values matching ", ConditionType, " condition (", Cond, ").") } Results_ <- character(0) DataChecks_ <- list() for (i in 1:length(Conditions_)) { Cond <- Conditions_[i] if (Cond == "NA") { DataChecks_[[i]] <- any(is.na(Data_)) } else { TempData_ <- Data_[!is.na(Data_)] DataChecks_[[i]] <- any(eval(parse(text = paste("TempData_", Cond)))) rm(TempData_) } } TrueConditions_ <- Conditions_[unlist(DataChecks_)] for (Condition in TrueConditions_) { Results_ <- c(Results_, makeMessage(Condition)) } Results_ } #CHECK IF DATA VALUES ARE IN A SPECIFIED SET OF VALUES #===================================================== #' Check if data values are in a specified set of values #' #' \code{checkIsElementOf} checks whether a data vector contains any elements #' that are not in an allowed set of values. #' #' This function is used to check whether categorical data values are consistent #' with the defined set of allowed values. #' #' @param Data_ A vector of data of type integer, double, character, or logical. #' @param SetElements_ A vector of allowed values. #' @param DataName A string identifying the field name of the data being #' compared (used for composing message identifying non-compliant fields). #' @return A character vector of messages which identify the data field and the #' condition that is not met. A zero-length vector is returned if none of the #' conditions are met. #' @export checkIsElementOf <- function(Data_, SetElements_, DataName){ if (length(SetElements_) == 1) { if (SetElements_ == "") { return(character(0)) } } makeMessage <- function(El) { paste0("Data in data name '", DataName, "' includes value (", El, ") not in allowed set." ) } Results_ <- character(0) DataChecks_ <- list() IsElement_ <- is.element(Data_, SetElements_) ProhibitedElements_ <- unique(Data_[!IsElement_]) for (Element in ProhibitedElements_) { Results_ <- c(Results_, makeMessage(Element)) } Results_ } #CHECK DATA CONSISTENCY WITH SPECIFICATION #========================================= #' Check data consistency with specification #' #' \code{checkDataConsistency} checks whether data to be written to a dataset is #' consistent with the dataset attributes. #' #' This function compares characteristics of data to be written to a dataset to #' the dataset attributes to determine whether they are consistent. #' #' @param DatasetName A string identifying the dataset that is being checked. #' @param Data_ A vector of values that may be of type integer, double, #' character, or logical. #' @param DstoreAttr_ A named list where the components are the attributes of a #' dataset. #' @return A list containing two components, Errors and Warnings. If no #' inconsistencies are found, both components will have zero-length character #' vectors. If there are one or more inconsistencies, then these components #' will hold vectors of error and warning messages. Mismatch between UNITS #' will produce a warning message. All other inconsistencies will produce #' error messages. #' @export checkDataConsistency <- function(DatasetName, Data_, DstoreAttr_) { Errors_ <- character(0) Warnings_ <- character(0) #Check data TYPE TypeCheckResult_ <- checkMatchType(Data_, DstoreAttr_$TYPE, DatasetName) if (length(TypeCheckResult_$Error) != 0) { Message <- paste0("The storage mode of the data (", typeof(Data_), ") does not match the storage mode of datastore (", DstoreAttr_["TYPE"], ") for dataset ", DatasetName) Errors_ <- c(Errors_, Message) } if (length(TypeCheckResult_$Warning) != 0) { Message <- paste0("The storage mode of the data (", typeof(Data_), ") does not match the storage mode of datastore (", DstoreAttr_["TYPE"], ") for dataset ", DatasetName) } #Check if character and SIZE is adequate if (typeof(Data_) == "character") { MaxSize <- max(nchar(Data_)) if (MaxSize > DstoreAttr_$SIZE) { Message <- paste0("Attempting to write character data of length (", MaxSize, ") which is longer than specified in datastore (", DstoreAttr_["SIZE"], ") for dataset ", DatasetName) Errors_ <- c(Errors_, Message) } } #Check if any values in PROHIBIT if (!is.null(DstoreAttr_$PROHIBIT)) { if (DstoreAttr_$PROHIBIT[1] != "") { Message <- checkMatchConditions( Data_, DstoreAttr_$PROHIBIT, DatasetName, "PROHIBIT") Errors_ <- c(Errors_, Message) } } #Check if all values in ISELEMENTOF if (!is.null(DstoreAttr_$ISELEMENTOF)) { if (DstoreAttr_$ISELEMENTOF[1] != "") { Message <- checkIsElementOf( Data_, DstoreAttr_$ISELEMENTOF, DatasetName) Errors_ <- c(Errors_, Message) } } #Check if any values in UNLIKELY if (!is.null(DstoreAttr_$UNLIKELY)) { if (DstoreAttr_$UNLIKELY != "") { Message <- checkMatchConditions( Data_, DstoreAttr_$UNLIKELY, DatasetName, "UNLIKELY") Warnings_ <- c(Warnings_, Message) } } #Check whether the sum of values equals the value specified in TOTAL if (!is.null(DstoreAttr_$TOTAL)) { if (DstoreAttr_$TOTAL != "") { if (sum(Data_) != DstoreAttr_$TOTAL) { Message <- paste("Sum of", DatasetName, "does not match specified total.") Errors_ <- c(Errors_, Message) } } } #Return list of errors and warnings list(Errors = Errors_, Warnings = Warnings_) } #PARSE UNITS SPECIFICATION #========================= #' Parse units specification into components and add to specifications list. #' #' \code{parseUnitsSpec} parses the UNITS attribute of a standard Inp, Get, or #' Set specification for a dataset to identify the units name, multiplier, and #' year for currency data. Returns a modified specifications list whose UNITS #' value is only the units name, and includes a MULTIPLIER attribute and YEAR #' attribute. #' #' The UNITS component of a specifications list can encode information in #' addition to the units name. This includes a value units multiplier and in #' the case of currency values the year for the currency measurement. The #' multiplier element can only be expressed in scientific notation where the #' number before the 'e' can only be 1. #' #' @param Spec_ls A standard specifications list for a Inp, Get, or Set item. #' @return a list that is a standard specifications list with the addition of #' a MULTIPLIER component and a YEAR component as well as a modification of the #' UNIT component. The MULTIPLIER component can have the value of NA, a number, #' or NaN. The value is NA if the multiplier is missing. It is a number if the #' multiplier is a valid number. The value is NaN if the multiplier is not a #' valid number. The YEAR component is a character string that is a 4-digit #' representation of a year or NA if the component is missing or not a proper #' year. The UNITS component is modified to only be the units name. #' @export parseUnitsSpec <- function(Spec_ls) { #Define function to return a multiplier value from a multiplier string #NA if none, NaN if not a properly specified scientic notation (e.g. 1e3) getMultiplier <- function(String) { if (is.na(String)) { Result <- NA } else { SciTest_ <- as.numeric(unlist(strsplit(String, "e"))) if (length(SciTest_) != 2 | any(is.na(SciTest_)) | SciTest_[1] != 1) { Result <- NaN } else { Result <- as.numeric(String) } } Result } #Define function to return a year value from a year string #NA if none or not a correct year getYear <- function(String) { CurrentString <- unlist(strsplit(as.character(Sys.Date()), "-"))[1] if (is.na(as.numeric(String)) | is.na(String)) { Result <- NA } else { if (as.numeric(String) < 1900 | as.numeric(String) > CurrentString) { Result <- NA } else { Result <- String } } Result } #Split the parts of the units specification UnitsSplit_ <- unlist(strsplit(Spec_ls$UNITS, "\\.")) #The units name is the first element Spec_ls$UNITS <- UnitsSplit_[1] #If currency type, the year is the 2nd element and multiplier is 3rd element if (Spec_ls$TYPE == "currency") { Year <- UnitsSplit_[2] Multiplier <- UnitsSplit_[3] } else { Year <- NA Multiplier <- UnitsSplit_[2] } #Process the multiplier element Spec_ls$MULTIPLIER <- getMultiplier(Multiplier) #Process the year element Spec_ls$YEAR <- getYear(Year) #Return the result Spec_ls } #RECOGNIZED TYPES AND UNITS ATTRIBUTES FOR SPECIFICATIONS #======================================================== #' Returns a list of returns a list of recognized data types, the units for each #' type, and storage mode of each type. #' #' \code{Types} returns a list of returns a list of recognized data types, the #' units for each type, and storage mode of each type. #' #' This function stores a listing of the dataset types recognized by the #' visioneval framework, the units recognized for each type, and the storage #' mode used for each type. Types include simple types (e.g. integer, double, #' character, logical) as well as complex types (e.g. distance, time, mass). For #' the complex types, units are specified as well. For example for the distance #' type, allowed units are MI (miles), FT (feet), KM (kilometers), M (meters). #' The listing includes conversion factors between units of each complex type. #' The listing also contains the storage mode (i.e. integer, double, character, #' logical of each type. For simple types, the type and the storage mode are the #' same). #' #' @return A list containing a component for each recognized type. Each #' component lists the recognized units for the type and the storage mode. There #' are currently 4 simple types and 10 complex type. The simple types are #' integer, double, character and logical. The complex types are currency, #' distance, area, mass, volume, time, speed, vehicle_distance, #' passenger_distance, and payload_distance. #' @export Types <- function(){ list( double = list(units = NA, mode = "double"), integer = list(units = NA, mode = "integer"), character = list(units = NA, mode = "character"), logical = list(units = NA, mode = "logical"), compound = list(units = NA, mode = "double"), currency = list( units = list( USD = c(USD = 1) ), mode = "double"), distance = list( units = list( MI = c(MI = 1, FT = 5280, KM = 1.60934, M = 1609.34), FT = c(MI = 0.000189394, FT = 1, KM = 0.0003048, M = 0.3048), KM = c(MI = 0.621371, FT = 3280.84, KM = 1, M = 1000), M = c(MI = 0.000621371, FT = 3.28084, KM = 0.001, M = 1)), mode = "double"), area = list( units = list( SQMI = c(SQMI = 1, ACRE = 640, SQFT = 2.788e+7, SQM = 2.59e+6, HA = 258.999, SQKM = 2.58999 ), ACRE = c(SQMI = 0.0015625, ACRE = 1, SQFT = 43560, SQM = 4046.86, HA = 0.404686, SQKM = 0.00404686), SQFT = c(SQMI = 3.587e-8, ACRE = 2.2957e-5, SQFT = 1, SQM = 0.092903, HA = 9.2903e-6, SQKM = 9.2903e-8), SQM = c(SQMI = 3.861e-7, ACRE = 0.000247105, SQFT = 10.7639, SQM = 1, HA = 1e-4, SQKM = 1e-6), HA = c(SQMI = 0.00386102, ACRE = 2.47105, SQFT = 107639, SQM = 0.00386102, HA = 1, SQKM = 0.01), SQKM = c(SQMI = 0.386102, ACRE = 247.105, SQFT = 1.076e+7, SQM = 1e+6, HA = 100, SQKM = 1)), mode = "double" ), mass = list( units = list( LB = c(LB = 1, TON = 0.0005, MT = 0.000453592, KG = 0.453592, GM = 453.592), TON = c(LB = 2000, TON = 1, MT = 0.907185, KG = 907.185, GM = 907185), MT = c(LB = 2204.62, TON = 1.10231, MT = 1, KG = 1000, M = 1e+6), KG = c(LB = 2.20462, TON = 0.00110231, MT = 0.001, KG = 1, GM = 1000), GM = c(LB = 0.00220462, TON = 1.1023e-6, MT = 1e-6, KG = 0.001, GM = 1)), mode = "double" ), volume = list( units = list( GAL = c(GAL = 1, L = 3.78541), L = c(GAL = 0.264172, L = 1)), mode = "double" ), time = list( units = list( YR = c(YR = 1, DAY = 365, HR = 8760, MIN = 525600, SEC = 3.154e+7), DAY = c(YR = 0.00273973, DAY = 1, HR = 24, MIN = 1440, SEC = 86400), HR = c(YR = 0.000114155, DAY = 0.0416667, HR = 1, MIN = 60, SEC = 3600), MIN = c(YR = 1.9026e-6, DAY = 0.000694444, HR = 0.0166667, MIN = 1, SEC = 60), SEC = c(YR = 3.171e-8, DAY = 1.1574e-5, HR = 0.000277778, MIN = 0.0166667, SEC = 1)), mode = "double" ), people = list( units = list( PRSN = c(PRSN = 1) ), mode = "integer" ), vehicles = list( units = list( VEH = c(VEH = 1) ), mode = "integer" ), trips = list( units = list( TRIP = c(TRIP = 1) ), mode = "integer" ), households = list( units = list( HH = c(HH = 1) ), mode = "integer" ), employment = list( units = list( JOB = c(JOB = 1) ), mode = "integer" ), activity = list( units = list( HHJOB = c(HHJOB = 1) ) ) ) } #CHECK MEASUREMENT UNITS #======================= #' Check measurement units for consistency with recognized units for stated type. #' #' \code{checkUnits}checks the specified UNITS for a dataset for consistency #' with the recognized units for the TYPE specification for the dataset. It also #' splits compound units into elements. #' #' The visioneval code recognizes 4 simple data types (integer, double, logical, #' and character) and 9 complex data types (e.g. distance, time, mass). #' The simple data types can have any units of measure, but the complex data #' types must use units of measure that are declared in the Types() function. In #' addition, there is a compound data type that can have units that are composed #' of the units of two or more complex data types. For example, speed is a #' compound data type composed of distance divided by speed. With this example, #' speed in miles per hour would be represented as MI/HR. This function checks #' the UNITS specification for a dataset for consistency with the recognized #' units for the given data TYPE. To check the units of a compound data type, #' the function splits the units into elements and the operators that separate #' the elements. It identifies the element units, the complex data type for each #' element and the operators that separate the elements. #' #' @param DataType a string which identifies the data type as specified in the #' TYPE attribute for a data set. #' @param Units a string identifying the measurement units as specified in the #' UNITS attribute for a data set after processing with the parseUnitsSpec #' function. #' @return A list which contains the following elements: #' DataType: a string identifying the data type. #' UnitType: a string identifying whether the units correspond to a 'simple' #' data type, a 'complex' data type, or a 'compound' data type. #' Units: a string identifying the units. #' Elements: a list containing the elements of a compound units. Components of #' this list are: #' Types: the complex type of each element, #' Units: the units of each element, #' Operators: the operators that separate the units. #' Errors: a string containing an error message or character(0) if no error. #' @import stringr #' @export checkUnits <- function(DataType, Units) { #Define return value template Result_ls <- list( DataType = DataType, UnitType = character(0), Units = character(0), Elements = list(), Errors = character(0) ) #Identify recognized data types and check if DataType is one of them DT_ <- names(Types()) if (!(DataType %in% DT_)) { Msg <- paste0( "Data type is not a recognized data type. ", "Must be one of the following: ", paste(DT_, collapse = ", "), ".") Result_ls$Errors <- Msg return(Result_ls) } #Check if Units is a character type and has length equal to one if (length(Units) != 1 | typeof(Units) != "character") { Msg <- paste0( "Units value is not correctly specified. ", "Must be a string and must not be a vector." ) Result_ls$Errors <- Msg Result_ls$Units <- Units return(Result_ls) } #Identify the units type (either simple, complex, or compound) UT_ <- character(length(DT_)) names(UT_) <- DT_ UT_[DT_ %in% c("double", "integer", "character", "logical")] <- "simple" UT_[DT_ %in% "compound"] <- "compound" UT_[!UT_ %in% c("simple", "compound")] <- "complex" UnitType <- UT_[DataType] Result_ls$UnitType <- unname(UnitType) #Check Simple Type if (UnitType == "simple") { #No check necessary, assign units and return the result Result_ls$Units <- Units return(Result_ls) } #Check complex type if (UnitType == "complex") { Result_ls$Units <- Units #Check that Units are recognized for the specified data type AllowedUnits_ <- names(Types()[[DataType]]$units) if (!(Units %in% AllowedUnits_)) { Msg <- paste0( "Units specified for ", DataType, " are not correctly specified. ", "Must be one of the following: ", paste(AllowedUnits_, collapse = ", "), ".") Result_ls$Errors <- Msg } #Return the result return(Result_ls) } #Check compound type #Define function to identify the data type from a unit findTypeFromUnit <- function(Units_) { Complex_ls <- Types()[DT_[UT_ == "complex"]] AllUnits_ <- unlist(lapply(Complex_ls, function(x) names(x$units))) UnitsToTypes_ <- gsub("[0-9]", "", names(AllUnits_)) names(UnitsToTypes_) <- AllUnits_ UnitsToTypes_[Units_] } #Define function to split units from compound type splitUnits <- function(Units){ OperatorLoc_ <- str_locate_all(Units, "[*/]")[[1]][,1] Operators_ <- sapply(OperatorLoc_, function(x) substr(Units, x, x)) UnitParts_ <- unlist(str_split(Units, "[*/]")) list(units = unname(UnitParts_), types = unname(findTypeFromUnit(UnitParts_)), operators = unname(Operators_)) } #Extract the units, types, and operators from the compound units string Result_ls$Units <- Units Units_ls <- splitUnits(Units) Result_ls$Elements$Types <- Units_ls$types Result_ls$Elements$Units <- Units_ls$units Result_ls$Elements$Operators <- Units_ls$operators #Check whether all element units are correct UnitsNotFound <- Units_ls$units[is.na(Units_ls$types)] if (length(UnitsNotFound) != 0) { Msg <- paste0( "One or more of the component units of the compound unit ", Units, " can't be resolved into units of recognized complex data types. ", "The following units elements are not recognized: ", paste(UnitsNotFound, collapse = ", "), ".") Result_ls$Errors <- Msg return(Result_ls) } #Check whether any duplication of data types for element units IsDupType_ <- duplicated(Units_ls$types) if (any(IsDupType_)) { DupTypes_ <- Units_ls$types[IsDupType_] DupUnits_ <- Units_ls$units[Units_ls$types %in% DupTypes_] Msg <- paste0( "Two or more of the component units of the compound unit ", Units, " are units in the same complex data type. ", "It does not make sense to have two units of the same complex type ", "in the same compound expression. The following units have the same type: ", paste(DupUnits_, collapse = ", "), ".") Result_ls$Errors <- Msg return(Result_ls) } #Return the result return(Result_ls) } # checkUnits("double", "person") # checkUnits("Bogus", "person") # checkUnits("people", "HH") # checkUnits("distance", "MI") # checkUnits("compound", "MI+KM") # checkUnits("compound", "MI/KM") # checkUnits("compound", "MI/HR") # checkUnits("compound", "TRIP/PRSN/DAY") #CHECK SPECIFICATION TYPE AND UNITS #================================== #' Checks the TYPE and UNITS and associated MULTIPLIER and YEAR attributes of a #' Inp, Get, or Set specification for consistency. #' #' \code{checkSpecTypeUnits}Checks correctness of TYPE, UNITS, MULTIPLIER and #' YEAR attributes of a specification that has been processed with the #' parseUnitsSpec function. #' #' This function checks whether the TYPE and UNITS of a module's specification #' contain errors. The check is done on a module specification in which the #' module's UNITS attribute has been parsed by the parseUnitsSpec function to #' split the name, multiplier, and years parts of the UNITS attribute. The TYPE #' is checked against the types catalogued in the Types function. The units name #' in the UNITS attribute is checked against the units names corresponding to #' each type catalogued in the Types function. The MULTIPLIER is checked to #' determine whether a value is a valid number, NA, or not a number (NaN). A NA #' value means that no multiplier was specified (this is OK) a NaN value means #' that a multiplier that is not a number was specified which is an error. The #' YEAR attribute is checked to determine whether there is a proper #' specification if the specified TYPE is currency. If the TYPE is currency, a #' YEAR must be specified for Get and Set specifications. #' #' @param Spec_ls a list for a single specification (e.g. a Get specification #' for a dataset) that has been processed with the parseUnitsSpec function to #' split the name, multiplier, and year elements of the UNITS specification. #' @param SpecGroup a string identifying the group that this specification #' comes from (e.g. Inp, Get, Set). #' @param SpecNum a number identifying which specification in the order of the #' SpecGroup. This is used to identify the subject specification if an error #' is identified. #' @return A vector containing messages identifying any errors that are found. #' @export checkSpecTypeUnits <- function(Spec_ls, SpecGroup, SpecNum) { Errors_ <- character(0) Name <- Spec_ls$NAME Table <- Spec_ls$TABLE Type <- Spec_ls$TYPE Units <- Spec_ls$UNITS AllowedTypes_ <- names(Types()) #Check if type is an allowed type if (Type %in% AllowedTypes_) { #Check if units are correct for the type UnitsCheck <- checkUnits(Type, Units) if (length(UnitsCheck$Errors) == 0) { #Check that there is a valid year specification if type is currency if (Type == "currency") { if (is.na(Spec_ls$YEAR) & SpecGroup %in% c("Get", "Set")) { Msg <- paste0("The TYPE specified for the ", SpecGroup, " specification ", "number ", SpecNum, " is 'currency' but the UNITS ", "specification does not contain a valid year element. ", "A valid year element must be specified so that the ", "framework knows how to convert currency values to and from ", "the proper year for the module. ", "See the user documentation for how to properly specify a ", "year in the UNITS specification.") Errors_ <- c(Errors_, Msg) } if (!is.na(Spec_ls$YEAR) & SpecGroup == "Inp") { Msg <- paste0("The TYPE specified for the ", SpecGroup, " specification ", "number ", SpecNum, " is 'currency' and the UNITS ", "specification contains a year element. ", "A year element must NOT be part of the UNITS ", "specification for a ", SpecGroup, " specification because ", "the input file has to specify the nominal year for the ", "input data. For ", SpecGroup, " specifications, the UNITS ", "specification must only include a units name.") Errors_ <- c(Errors_, Msg) } } #Check that multiplier is correct if (is.nan(Spec_ls$MULTIPLIER) & SpecGroup %in% c("Get", "Set")) { Msg <- paste0("The UNITS specified for the ", SpecGroup, " specification ", "number ", SpecNum, " does not contain a valid multiplier ", "element. The multiplier element, if present, must use ", "scientific notation with a coefficient of 1. ", "See the user documentation for how to properly specify a ", "multiplier in the UNITS attribute.") Errors_ <- c(Errors_, Msg) } if (!is.na(Spec_ls$MULTIPLIER) & SpecGroup == "Inp") { Msg <- paste0("The UNITS attribute for the ", SpecGroup, " specification ", "number ", SpecNum, "incorrectly contains a multiplier element. ", "A multiplier element must NOT be part of the UNITS ", "specification for a ", SpecGroup, " specification because ", "the input file has to specify the multiplier for the ", "input data, if there is one. For ", SpecGroup, " specifications, the UNITS specification must only include ", "a units name.") Errors_ <- c(Errors_, Msg) } } else { Msg <- paste0("UNITS specified for the ", SpecGroup, " specification ", "number ", SpecNum, " are incorrect as follows: ", UnitsCheck$Errors) Errors_ <- c(Errors_, Msg) } } else { Msg <- paste0("TYPE specified for the ", SpecGroup, " specification ", "number ", SpecNum, " has an incorrect type. ", "Check user documentation for list of allowed types.") Errors_ <- c(Errors_, Msg) } Errors_ } #DEFINITION OF BASIC MODULE SPECIFICATIONS REQUIREMENTS #====================================================== #' List basic module specifications to check for correctness #' #' \code{SpecRequirements}returns a list of basic requirements for module #' specifications to be used for checking correctness of specifications. #' #' This function returns a list of the basic requirements for module #' specifications. The main components of the list are the components of module #' specifications: RunBy, NewInpTable, NewSetTable, Inp, Get, Set. For each #' item of each module specifications component, the list identifies the #' required data type of the attribute entry and the allowed values for the #' attribute entry. #' #' @return A list comprised of six named components: RunBy, NewInpTable, #' NewSetTable, Inp, Get, Set. Each main component is a list that has a #' component for each specification item that has values to be checked. For each #' such item there is a list having two components: ValueType and ValuesAllowed. #' The ValueType component identifies the data type that the data entry for the #' item must have (e.g. character, integer). The ValuesAllowed item identifies #' what values the item may have. #' @export SpecRequirements <- function(){ list( RunBy = list( ValueType = "character", ValuesAllowed = c("Region", "Azone", "Bzone", "Czone", "Marea") ), NewInpTable = list( TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")) ), NewSetTable = list( TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")) ), Inp = list( NAME = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), FILE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_][.csv]"), TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")), TYPE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), UNITS = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]") ), Get = list( NAME = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "BaseYear", "Year")), TYPE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), UNITS = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]") ), Set = list( NAME = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")), TYPE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), UNITS = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]") ) ) } #CHECK A MODULE SPECIFICATION #============================ #' Checks a module specifications for completeness and for incorrect entries #' #' \code{checkSpec}Function checks a single module specification for #' completeness and for proper values. #' #' This function checks whether a single module specification (i.e. the #' specification for a single dataset contains the minimum required #' attributes and that the values of the attributes are correct. #' #' @param Spec_ls a list containing the specifications for a single item in #' a module specifications list. #' @param SpecGroup a string identifying the specifications group the #' specification is in (e.g. RunBy, NewInpTable, NewSetTable, Inp, Get, Set). #' This is used in the error messages to identify which specification has #' errors. #' @param SpecNum an integer identifying which specification in the #' specifications group has errors. #' @return A vector containing messages identifying any errors that are found. #' @import stringr #' @export checkSpec <- function(Spec_ls, SpecGroup, SpecNum) { Require_ls <- SpecRequirements()[[SpecGroup]] Errors_ <- character(0) #Define function to check one specification requirement #ReqName argument is the requirement name (e.g. TYPE). Is NULL for RunBy #specification group. checkRequirement <- function(ReqName = NULL){ if (is.null(ReqName)) { Spec <- Spec_ls Req_ls <- Require_ls Name <- "" } else { Spec <- Spec_ls[[ReqName]] Req_ls <- Require_ls[[ReqName]] Name <- paste0(ReqName, " ") } Errors_ <- character(0) if (typeof(Spec) != Req_ls$ValueType) { Msg <- paste0("The type of the ", Name, "attribute of the ", SpecGroup, " specification number ", SpecNum, " is incorrect. ", "The attribute must be a ", Req_ls$ValueType, " type.") Errors_ <- c(Errors_, Msg) } if (!any(str_detect(Spec, Req_ls$ValuesAllowed))) { Msg <- paste0("The value of the ", Name, "attribute of the ", SpecGroup, " specification number ", SpecNum, " is incorrect. ", "The attribute value must be one of the following: ", paste(Req_ls$ValuesAllowed, collapse = ", "), ".") Errors_ <- c(Errors_, Msg) } Errors_ } #Check a specification if (SpecGroup == "RunBy") { Errors_ <- c(Errors_, checkRequirement()) } else { for (nm in names(Require_ls)) { Errors_ <- c(Errors_, checkRequirement(nm)) } if (SpecGroup %in% c("Inp", "Get", "Set")) { Errors_ <- c(Errors_, checkSpecTypeUnits(Spec_ls, SpecGroup, SpecNum)) } } Errors_ } #CHECK THE SPECIFICATIONS FOR A MODULE #===================================== #' Checks all module specifications for completeness and for incorrect entries #' #' \code{checkModuleSpecs}checks all module specifications for #' completeness and for proper values. #' #' This function iterates through all the specifications for a module and #' calls the checkSpec function to check each specification for completeness and #' for proper values. #' #' @param Specs_ls a module specifications list. #' @param ModuleName a string identifying the name of the module. This is used in #' the error messages to identify which module has errors. #' @return A vector containing messages identifying any errors that are found. #' @export checkModuleSpecs <- function(Specs_ls, ModuleName) { Errors_ <- character(0) #Check RunBy #----------- Err_ <- checkSpec(Specs_ls$RunBy, "RunBy", 1) if (length(Err_) != 0) { Msg <- paste0( "'RunBy' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) #Check NewInpTable if component exists #------------------------------------- if (!is.null(Specs_ls$NewInpTable)) { Err_ <- character(0) for (i in 1:length(Specs_ls$NewInpTable)) { Err_ <- c(Err_, checkSpec(Specs_ls$NewInpTable[[i]], "NewInpTable", i)) } if (length(Err_) != 0) { Msg <- paste0( "'NewInpTable' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) } #Check NewSetTable if component exists #------------------------------------- if (!is.null(Specs_ls$NewSetTable)) { Err_ <- character(0) for (i in 1:length(Specs_ls$NewSetTable)) { Err_ <- c(Err_, checkSpec(Specs_ls$NewSetTable[[i]], "NewSetTable", i)) } if (length(Err_) != 0) { Msg <- paste0( "'NewSetTable' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) } #Check Inp specifications if component exists #-------------------------------------------- if (!is.null(Specs_ls$Inp)) { Err_ <- character(0) for (i in 1:length(Specs_ls$Inp)) { Err_ <- c(Err_, checkSpec(Specs_ls$Inp[[i]], "Inp", i)) } if (length(Err_) != 0) { Msg <- paste0( "'Inp' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) } #Check Get specifications #------------------------ if (!is.null(Specs_ls$Get)) { Err_ <- character(0) for (i in 1:length(Specs_ls$Get)) { Err_ <- c(Err_, checkSpec(Specs_ls$Get[[i]], "Get", i)) } if (length(Err_) != 0) { Msg <- paste0( "'Get' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_<- c(Errors_, Msg, Err_) } rm(Err_) } #Check Set specifications #------------------------ if (!is.null(Specs_ls$Set)) { Err_ <- character(0) for (i in 1:length(Specs_ls$Set)) { Err_ <- c(Err_, checkSpec(Specs_ls$Set[[i]], "Set", i)) } if (length(Err_) != 0) { Msg <- paste0( "'Set' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_<- c(Errors_, Msg, Err_) } rm(Err_) } #Return errors #------------- if (length(Errors_) != 0) { Msg <- paste0( "Module ", ModuleName, " has one or more errors as follow:" ) Errors_ <- c(Msg, Errors_) } Errors_ } #CHECK YEARS AND GEOGRAPHY OF INPUT FILE #======================================= #' Check years and geography of input file #' #' \code{checkInputYearGeo} checks the 'Year' and 'Geo' columns of an input file #' to determine whether they are complete and have no duplications. #' #' This function checks the 'Year' and 'Geo' columns of an input file to #' determine whether there are records for all run years specified for the #' model and for all geographic areas for the level of geography. It also checks #' for redundant year and geography entries. #' #' @param Year_ the vector extract of the 'Year' column from the input data. #' @param Geo_ the vector extract of the 'Geo' column from the input data. #' @param Group a string identifying the 'GROUP' specification for the data sets #' contained in the input file. #' @param Table a string identifying the 'TABLE' specification for the data sets #' contained in the input file. #' @return A list containing the results of the check. The list has two #' mandatory components and two optional components. 'CompleteInput' is a #' logical that identifies whether records are present for all years and #' geographic areas. 'DupInput' identifies where are any redundant year and #' geography entries. If 'CompleteInput' is FALSE, the list contains a #' 'MissingInputs' component that is a string identifying the missing year and #' geography records. If 'DupInput' is TRUE, the list contains a component that #' is a string identifying the duplicated year and geography records. #' @export checkInputYearGeo <- function(Year_, Geo_, Group, Table) { Result_ls <- list() G <- getModelState() #Make a vector of required year and geography combinations if (Group == "Year") { Required_df <- expand.grid(G$Years, unique(G$Geo_df[[Table]]), stringsAsFactors = FALSE) } names(Required_df) <- c("Year", "Geo") RequiredNames_ <- sort(paste(Required_df$Year, Required_df$Geo, sep = "/")) #Make a vector of year and geography combinations in the inputs InputNames_ <- sort(paste(Year_, Geo_, sep = "/")) #Check that there are missing records CompleteInputCheck_ <- RequiredNames_ %in% InputNames_ Result_ls$CompleteInput <- all(CompleteInputCheck_) if (!all(CompleteInputCheck_)) { MissingNames_ <- RequiredNames_[!CompleteInputCheck_] Result_ls$MissingInputs <- paste(MissingNames_, collapse = ", ") } #Check whether there are duplicated records DuplicatedInputCheck_ <- duplicated(InputNames_) Result_ls$DupInput <- any(DuplicatedInputCheck_) if (any(DuplicatedInputCheck_)) { DuplicateNames_ <- InputNames_[DuplicatedInputCheck_] Result_ls$DuplicatedInputs <- paste(DuplicateNames_, collapse = ", ") } #Return the result Result_ls } #FIND SPECIFICATION CORRESPONDING TO A NAME, TABLE, AND GROUP #============================================================ #' Find the full specification corresponding to a defined NAME, TABLE, and GROUP #' #' \code{findSpec} returns the full dataset specification for defined NAME, #' TABLE, and GROUP. #' #' This function finds and returns the full specification from a specifications #' list whose NAME, TABLE and GROUP values correspond to the Name, Table, and #' Group argument values. The specifications list must be in standard format and #' must be for only 'Inp', 'Get', or 'Set' specifications. #' #' @param Specs_ls a standard specifications list for 'Inp', 'Get', or 'Set' #' @param Name a string for the name of the dataset #' @param Table a string for the table that the dataset resides in #' @param Group a string for the generic group that the table resides in #' @return A list containing the full specifications for the dataset #' @export findSpec <- function(Specs_ls, Name, Table, Group) { SpecIdx <- which(unlist(lapply(Specs_ls, function(x) { x$NAME == Name & x$TABLE == Table & x$GROUP == Group }))) Specs_ls[[SpecIdx]] } #SORT DATA FRAME TO MATCH ORDER OF GEOGRAPHY IN DATASTORE TABLE #============================================================== #' Sort a data frame so that the order of rows matches the geography in a #' datastore table. #' #' \code{sortGeoTable} returns a data frame whose rows are sorted to match the #' geography in a specified table in the datastore. #' #' This function sorts the rows of a data frame that the 'Geo' field in the #' data frame matches the corresponding geography names in the specified table #' in the datastore. The function returns the sorted table. #' #' @param Data_df a data frame that contains a 'Geo' field containing the names #' of the geographic areas to sort by and any number of additional data fields. #' @param Table a string for the table that is to be matched against. #' @param Group a string for the generic group that the table resides in. #' @return The data frame which has been sorted to match the order of geography #' in the specified table in the datastore. #' @export sortGeoTable <- function(Data_df, Table, Group) { if (!("Geo" %in% names(Data_df))) { Msg <- paste0( "Data frame does not have a 'Geo' field. ", "A 'Geo' field must be included in order for the table to be sorted ", "to match the geography of the specified table in the datastore." ) stop(Msg) } DstoreNames_ <- readFromTable(Table, Table, Group) Order_ <- match(DstoreNames_, Data_df$Geo) Data_df[Order_,] } #PARSE INPUT FILE FIELD NAMES #============================ #' Parse field names of input file to separate out the field name, currency #' year, and multiplier. #' #' \code{parseInputFieldNames} parses the field names of an input file to #' separate out the field name, currency year (if data is #' currency type), and value multiplier. #' #' The field names of input files can be used to encode more information than #' the name itself. It can also encode the currency year for currency type data #' and also if the values are in multiples (e.g. thousands of dollars). For #' currency type data it is mandatory that the currency year be specified so #' that the data can be converted to base year currency values (e.g. dollars in #' base year dollars). The multiplier is optional, but needless to say, it can #' only be applied to numeric data. The function returns a list with a component #' for each field. Each component identifies the field name, year, multiplier, #' and error status for the result of parsing the field name. If the field name #' was parsed successfully, the error status is character(0). If the field name #' was not successfully parsed, the error status contains an error message, #' identifying the problem. #' #' @param FieldNames_ A character vector containing the field names of an #' input file. #' @param Specs_ls A list of specifications for fields in the input file. #' @param FileName A string identifying the name of the file that the field #' names are from. This is used for writing error messages. #' @return A named list with one component for each field. Each component is a list #' having 4 named components: Error, Name, Year, Multiplier. The Error #' component has a value of character(0) if there are no errors or a character #' vector of error messages if there are errors. The Name component is a string #' with the name of the field. The Year component is a string with the year #' component if the data type is currency or NA if the data type is not currency #' or if the Year component has an invalid value. The Multiplier is a number if #' the multiplier component is present and is valid. It is NA if there is no #' multiplier component and NaN if the multiplier is invalid. Each component of #' the list is named with the value of the Name component (i.e. the field name #' without the year and multiplier elements.) #' @export parseInputFieldNames <- function(FieldNames_, Specs_ls, FileName) { #Define function to return a multiplier value from a multiplier string #NA if none, NaN if not a properly specified scientic notation (e.g. 1e3) getMultiplier <- function(String) { if (is.na(String)) { Result <- NA } else { SciTest_ <- as.numeric(unlist(strsplit(String, "e"))) if (length(SciTest_) != 2 | any(is.na(SciTest_)) | SciTest_[1] != 1) { Result <- NaN } else { Result <- as.numeric(String) } } Result } #Define function to return a year value from a year string #NA if none or not a correct year getYear <- function(String) { CurrentString <- unlist(strsplit(as.character(Sys.Date()), "-"))[1] if (is.na(as.numeric(String)) | is.na(String)) { Result <- NA } else { if (as.numeric(String) < 1900 | as.numeric(String) > CurrentString) { Result <- NA } else { Result <- String } } Result } #Make a list to store results Fields_ls <- list() #Make an index to the specified field names SpecdNames_ <- unlist(lapply(Specs_ls, function(x) x$NAME)) for (i in 1:length(FieldNames_)) { Fields_ls[[i]] <- list() FieldName <- FieldNames_[i] Fields_ls[[i]]$Error <- character(0) #Split the parts of the units specification NameSplit_ <- unlist(strsplit(FieldName, "\\.")) #The field name is the first element Name <- NameSplit_[1] Fields_ls[[i]]$Name <- Name #If the field name is "Geo" or "Year" move on to next field if (Name %in% c("Geo", "Year")) next() #Check that the parsed name is one of the specified field names if (!(Name %in% SpecdNames_)) { Fields_ls[[i]]$Year <- NA Fields_ls[[i]]$Multiplier <- NA Msg <- paste0("Field name ", FieldName, " does not parse to a name that ", "can be recognized as one of the names specified for the ", "input file ", FileName) Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) next() } #Decode the Year and Multiplier portions FieldType <- Specs_ls[[which(SpecdNames_ == Name)]]$TYPE if (FieldType == "currency") { Fields_ls[[i]]$Year <- getYear(NameSplit_[2]) Fields_ls[[i]]$Multiplier <- getMultiplier(NameSplit_[3]) } else { Fields_ls[[i]]$Year <- NA Fields_ls[[i]]$Multiplier <- getMultiplier(NameSplit_[2]) } #If currency type, check that value is correct or give an error if (FieldType == "currency") { AllowedYears_ <- as.character(getModelState()$Deflators$Year) if (is.na(Fields_ls[[i]]$Year)) { Msg <- paste0("Field name ", FieldName, " in input file ", FileName, " has a specification TYPE of currency, but the parsed year ", "component is missing or is not a valid year. ", "See documentation for details on how to properly name ", "a field name that has a year component. ") Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) } else { if (!(Fields_ls[[i]]$Year %in% AllowedYears_)) { Msg <- paste0("Field name ", FieldName, " in input file ", FileName, " has a specification TYPE of currency, but the parsed year ", "component is not one for which there is a deflator. ", "If the year component is correct, then the deflators file ", "must be corrected to include a deflator for the year. ", "See documentation for details on the deflator file requirements.") Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) } } } #Check whether multiplier is correct or give an error if (is.nan(Fields_ls[[i]]$Multiplier)) { Msg <- paste0("Field name ", FieldName, " in input file ", FileName, " has parsed multiplier component that is not valid. ", "See documentation for details on how to properly name ", "a field name that has a multiplier component. ") Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) } } names(Fields_ls) <- unlist(lapply(Fields_ls, function(x) x$Name)) Fields_ls } # items <- item <- list # Specs_ls <- # items( # item( # NAME = "TotHhPop", # TYPE = "double", # UNITS = "persons" # ), # item( # NAME = "TotHhIncome", # TYPE = "currency", # UNITS = "USD" # ) # ) # FieldNames_ <- c("Geo", "Year", "TotHhPop.1e3", "TotHhIncome.2000") # temp_ls <- parseInputFieldNames(FieldNames_, Specs_ls, "test.csv") # FieldNames_ <- c("Geo", "Year", "TotHhPop.1000", "TotHhIncome.1998.1000") # temp_ls <- parseInputFieldNames(FieldNames_, Specs_ls, "test.csv") # FieldNames_ <- c("Geo", "Year", "TotHhPop.1000", "TotHhIncome.hello.1000") # parseInputFieldNames(FieldNames_, Specs_ls, "test.csv") # unlist(lapply(temp_ls, function(x) x$Error)) #PROCESS MODULE INPUT FILES #========================== #' Process module input files #' #' \code{processModuleInputs} processes input files identified in a module's #' 'Inp' specifications in preparation for saving in the datastore. #' #' This function processes the input files identified in a module's 'Inp' #' specifications in preparation for saving the data in the datastore. Several #' processes are carried out. The existence of each specified input file is #' checked. Files that are not global, are checked to determine that they have #' 'Year' and 'Geo' columns. The entries in the 'Year' and 'Geo' columns are #' checked to make sure they are complete and there are no duplicates. The data #' in each column are checked against specifications to determine conformance. #' The function returns a list which contains a list of error messages and a #' list of the data inputs. The function also writes error messages and warnings #' to the log file. #' #' @param ModuleSpec_ls a list of module specifications that is consistent with #' the VisionEval requirements. #' @param ModuleName a string identifying the name of the module (used to document #' module in error messages). #' @param Dir a string identifying the relative path to the directory where the #' model inputs are contained. #' @return A list containing the results of the input processing. The list has #' two components. The first (Errors) is a vector of identified file and data #' errors. The second (Data) is a list containing the data in the input files #' organized in the standard format for data exchange with the datastore. #' @export processModuleInputs <- function(ModuleSpec_ls, ModuleName, Dir = "inputs") { G <- getModelState() FileErr_ <- character(0) FileWarn_ <- character(0) InpSpec_ls <- ModuleSpec_ls$Inp #ORGANIZE THE SPECIFICATIONS BY INPUT FILE AND NAME SortSpec_ls <- list() for (i in 1:length(InpSpec_ls)) { Spec_ls <- InpSpec_ls[[i]] File <- Spec_ls$FILE Name <- Spec_ls$NAME if (is.null(SortSpec_ls[[File]])) { SortSpec_ls[[File]] <- list() } SortSpec_ls[[File]][[Name]] <- Spec_ls rm(Spec_ls, File, Name) } #Initialize a list to store all the input data Data_ls <- initDataList() #ITERATE THROUGH SORTED SPECIFICATIONS AND LOAD DATA INTO LIST Files_ <- names(SortSpec_ls) for (File in Files_) { #Extract the specifications Spec_ls <- SortSpec_ls[[File]] #Check that file exists if (!file.exists(file.path(Dir, File))) { Msg <- paste( "Input file error.", "File '", File, "' required by '", ModuleName, "' is not present in the 'inputs' directory." ) FileErr_ <- c(FileErr_, Msg) next() } #Read in the data file Data_df <- read.csv(file.path(Dir, File), as.is = TRUE) #Parse the field names of the data file ParsedNames_ls <- parseInputFieldNames(names(Data_df), Spec_ls, File) ParsingErrors_ <- unlist(lapply(ParsedNames_ls, function(x) x$Error)) names(Data_df) <- names(ParsedNames_ls) if (length(ParsingErrors_) != 0) { writeLog( c("Input file field name errors as follows:", ParsingErrors_)) FileErr_ <- c(FileErr_, ParsingErrors_) } else { rm(ParsingErrors_) } #Identify the group and table the data is to be placed in Group <- unique(unlist(lapply(Spec_ls, function(x) x$GROUP))) if (length(Group) != 1) { Msg <- paste0( "Input specification error for module '", ModuleName, "' for input file '", File, "'. ", "All datasets must have the same 'Group' specification." ) FileErr_ <- c(FileErr_, Msg) Group <- Group[1] } Table <- unique(unlist(lapply(Spec_ls, function(x) x$TABLE))) if (length(Table) != 1) { Msg <- paste0( "Input specification error for module '", ModuleName, "' for input file '", File, "'. ", "All datasets must have the same 'Table' specification." ) FileErr_ <- c(FileErr_, Msg) Table <- Table[1] } #Add Table and table attributes to data list if not already there if (is.null(Data_ls[[Group]][[Table]])) { Data_ls[[Group]][[Table]] <- list() } #If Group is Year, check that Geo and Year fields are correct if (Group == "Year") { #Check that there are 'Year' and 'Geo' fields HasYearField <- "Year" %in% names(Data_df) HasGeoField <- "Geo" %in% names(Data_df) if (!(HasYearField & HasGeoField)) { Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "'Group' specification is 'Year' or 'RunYear' ", "but the input file is missing required 'Year' ", "and/or 'Geo' fields." ) FileErr_ <- c(FileErr_, Msg) next() } #Check that the file thas inputs for all years and geographic units #If so, save Year and Geo to table CorrectYearGeo <- checkInputYearGeo(Data_df$Year, Data_df$Geo, Group, Table) if (CorrectYearGeo$CompleteInput & !CorrectYearGeo$DupInput) { Data_ls[[Group]][[Table]]$Year <- Data_df$Year Data_ls[[Group]][[Table]]$Geo <- Data_df$Geo } else { if (!CorrectYearGeo$CompleteInput) { Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "Is missing inputs for the following Year/", Table, " combinations: ", CorrectYearGeo$MissingInputs ) FileErr_ <- c(FileErr_, Msg) } if(CorrectYearGeo$DupInput){ Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "Has duplicate inputs for the following Year/", Table, " combinations: ", CorrectYearGeo$DuplicatedInputs ) FileErr_ <- c(FileErr_, Msg) } next() } } #Check and load data into list DataErr_ls <- list(Errors = character(0), Warnings = character(0)) for (Name in names(Spec_ls)) { ThisSpec_ls <- Spec_ls[[Name]] Data_ <- Data_df[[Name]] DataCheck_ls <- checkDataConsistency(Name, Data_, ThisSpec_ls) if (length(DataCheck_ls$Errors) != 0) { writeLog(DataCheck_ls$Errors) DataErr_ls$Errors <- c(DataErr_ls$Errors, DataCheck_ls$Errors) next() } if (length(DataCheck_ls$Warnings) != 0) { writeLog(DataCheck_ls$Warnings) DataErr_ls$Warnings <- c(DataErr_ls$Warnings, DataCheck_ls$Warnings) } #Convert currency if (ThisSpec_ls$TYPE == "currency") { FromYear <- ParsedNames_ls[[Name]]$Year ToYear <- G$BaseYear if (!is.na(FromYear) & FromYear != ToYear) { Data_ <- deflateCurrency(Data_, FromYear, ToYear) rm(FromYear, ToYear) } } #Convert units SimpleTypes_ <- c("integer", "double", "character", "logical") ComplexTypes_ <- names(Types())[!(names(Types()) %in% SimpleTypes_)] if (ThisSpec_ls$TYPE %in% ComplexTypes_) { FromUnits <- ThisSpec_ls$UNITS Conversion_ls <- convertUnits(Data_, ThisSpec_ls$TYPE, FromUnits) Data_ <- Conversion_ls$Values #Update UNITS to reflect datastore units ThisSpec_ls$UNITS <- Conversion_ls$ToUnits rm(FromUnits, Conversion_ls) } rm(SimpleTypes_, ComplexTypes_) #Convert magnitude Multiplier <- ParsedNames_ls[[Name]]$Multiplier if (!is.na(Multiplier)) { Data_ <- convertMagnitude(Data_, Multiplier, 1) } rm(Multiplier) #Assign UNITS attribute to Data_ because storage units may be different #than the input data UNITS attributes(Data_) <- list(UNITS = ThisSpec_ls$UNITS) #Assign Data_ to Data_ls Data_ls[[Group]][[Table]][[Name]] <- Data_ } if (length(DataErr_ls$Errors) != 0) { Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "Has one or more errors in the data inputs as follows:" ) FileErr_ <- c(FileErr_, Msg, DataErr_ls$Errors) writeLog(FileErr_) } if (length(DataErr_ls$Warnings) != 0) { Msg <- paste0( "Input file warnings for module '", ModuleName, "' for input file '", File, "'. ", "Has one or more warnings for the data inputs as follows:" ) FileWarn_ <- c(FileWarn_, Msg, DataErr_ls$Warnings) writeLog(FileWarn_) } }#End loop through input files #RETURN THE RESULTS list(Errors = FileErr_, Data = Data_ls) }
/sources/framework/visioneval/R/validation.R
permissive
cities-lab/VisionEval
R
false
false
71,266
r
#============ #validation.R #============ #This script defines functions that are used to validate data prior to #reading or writing to the datastore. The functions check whether the datastore #contains the group/table/dataset requested and whether the data match #specifications. Unlike the functions defined in the "hdf5.R" script, these #functions don't directly interact with the datastore. Instead, they rely on the #datastore listing (Datastore) that is maintained in the model state file. #CHECK DATASET EXISTENCE #======================= #' Check dataset existence #' #' \code{checkDataset} checks whether a dataset exists in the datastore and #' returns a TRUE or FALSE value with an attribute of the full path to where the #' dataset should be located in the datastore. #' #' This function checks whether a dataset exists. The dataset is identified by #' its name and the table and group names it is in. If the dataset is not in the #' datastore, an error is thrown. If it is located in the datastore, the full #' path name to the dataset is returned. #' #' @param Name a string identifying the dataset name. #' @param Table a string identifying the table the dataset is a part of. #' @param Group a string or numeric representation of the group the table is a #' part of. #' @param DstoreListing_df a dataframe which lists the contents of the datastore #' as contained in the model state file. #' @return A logical identifying whether the dataset is in the datastore. It has #' an attribute that is a string of the full path to where the dataset should be #' in the datastore. #' @export checkDataset <- function(Name, Table, Group, DstoreListing_df) { Name <- as.character(Name) Table <- as.character(Table) Group <- as.character(Group) #TableName <- checkTable(Table, Group, DstoreListing_df)[[2]] DatasetName <- file.path(Group, Table, Name) DatasetExists <- DatasetName %in% DstoreListing_df$groupname Result <- ifelse (DatasetExists, TRUE, FALSE) attributes(Result) <- list(DatasetName = DatasetName) Result } #GET ATTRIBUTES OF A DATASET #=========================== #' Get attributes of a dataset #' #' \code{getDatasetAttr} retrieves the attributes for a dataset in the datastore #' #' This function extracts the listed attributes for a specific dataset from the #' datastore listing. #' #' @param Name a string identifying the dataset name. #' @param Table a string identifying the table the dataset is a part of. #' @param Group a string or numeric representation of the group the table is a #' part of. #' @param DstoreListing_df a dataframe which lists the contents of the datastore #' as contained in the model state file. #' @return A named list of the dataset attributes. #' @export getDatasetAttr <- function(Name, Table, Group, DstoreListing_df) { DatasetName <- file.path(Group, Table, Name) #checkDataset(Name, Table, Group, DstoreListing_df)[[2]] DatasetIdx <- which(DstoreListing_df$groupname == DatasetName) DstoreListing_df$attributes[[DatasetIdx]] } #CHECK WHETHER TABLE EXISTS #========================== #' Check whether table exists in the datastore #' #' \code{checkTableExistence} checks whether a table is present in the #' datastore. #' #' This function checks whether a table is present in the datastore. #' #' @param Table a string identifying the table. #' @param Group a string or numeric representation of the group the table is a #' part of. #' @param DstoreListing_df a dataframe which lists the contents of the datastore #' as contained in the model state file. #' @return A logical identifying whether a table is present in the datastore. #' @export checkTableExistence <- function(Table, Group, DstoreListing_df) { TableName <- file.path(Group, Table) TableName %in% DstoreListing_df$groupname } #CHECK SPECIFICATION CONSISTENCY #=============================== #' Check specification consistency #' #' \code{checkSpecConsistency} checks whether the specifications for a dataset #' are consistent with the data attributes in the datastore #' #' This function compares the specifications for a dataset identified in a #' module "Get" or "Set" are consistent with the attributes for that data in the #' datastore. #' #' @param Spec_ls a list of data specifications consistent with a module "Get" #' or "Set" specifications. #' @param DstoreAttr_ a named list where the components are the attributes of a #' dataset. #' @return A list containing two components, Errors and Warnings. If no #' inconsistencies are found, both components will have zero-length character #' vectors. If there are one or more inconsistencies, then these components #' will hold vectors of error and warning messages. Mismatch between UNITS #' will produce a warning message. All other inconsistencies will produce #' error messages. #' @export checkSpecConsistency <- function(Spec_ls, DstoreAttr_) { Errors_ <- character(0) Warnings_ <- character(0) if (Spec_ls$TYPE != DstoreAttr_$TYPE) { Message <- paste0( "TYPE mismatch for ", Spec_ls$NAME, ". ", "Module ", Spec_ls$MODULE, " asks for TYPE = (", Spec_ls$TYPE, "). ", "Datastore contains TYPE = (", DstoreAttr_$TYPE, ")." ) Errors_ <- c(Errors_, Message) } #With code that allows unit conversions, can't expect units to be the same # if (Spec_ls$UNITS != DstoreAttr_$UNITS) { # Message <- paste0( # "UNITS mismatch for ", Spec_ls$NAME, ". ", # "Module ", Spec_ls$MODULE, "asks for UNITS = (", Spec_ls$UNITS, "). ", # "Datastore contains UNITS = (", DstoreAttr_$UNITS, ")." # ) # Warnings_ <- c(Warnings_, Message) # } if (!is.null(Spec_ls$PROHIBIT) & !is.null(DstoreAttr_$PROHIBIT)) { if (!all(Spec_ls$PROHIBIT %in% DstoreAttr_$PROHIBIT) | !all(DstoreAttr_$PROHIBIT %in% Spec_ls$PROHIBIT)) { SpecProhibit <- paste(Spec_ls$PROHIBIT, collapse = ", ") DstoreProhibit <- paste(DstoreAttr_$PROHIBIT, collapse = ", ") Message <- paste0( "PROHIBIT mismatch for ", Spec_ls$NAME, ". ", "Module ", Spec_ls$MODULE, " specifies PROHIBIT as (", SpecProhibit, "). ", "Datastore specifies PROHIBIT as (", DstoreProhibit, ")." ) Errors_ <- c(Errors_, Message) } } if (!is.null(Spec_ls$ISELEMENTOF) & !is.null(DstoreAttr_$ISELEMENTOF)) { if (!all(Spec_ls$ISELEMENTOF %in% DstoreAttr_$ISELEMENTOF) | !all(DstoreAttr_$ISELEMENTOF %in% Spec_ls$ISELEMENTOF)) { SpecElements <- paste(Spec_ls$ISELEMENTOF, collapse = ", ") DstoreElements <- paste(DstoreAttr_$ISELEMENTOF, collapse = ", ") Message <- paste0( "ISELEMENTOF mismatch for ", Spec_ls$NAME, ". ", "Module ", Spec_ls$MODULE, " specifies ISELEMENTOF as (", SpecElements, "). ", "Datastore specifies ISELEMENTOF as (", DstoreElements, ")." ) Errors_ <- c(Errors_, Message) } } list(Errors = Errors_, Warnings = Warnings_) } #CHECK DATA TYPE #=============== #' Check data type #' #' \code{checkMatchType} checks whether the data type of a data vector is #' consistent with specifications. #' #' This function checks whether the data type of a data vector is consistent #' with a specified data type. An error message is generated if data can't be #' coerced into the specified data type without the possibility of error or loss #' of information (e.g. if a double is coerced to an integer). A warning message #' is generated if the specified type is 'character' but the input data type is #' 'integer', 'double' or 'logical' since these can be coerced correctly, but #' that may not be what is intended (e.g. zone names may be input as numbers). #' Note that some modules may use NA inputs as a flag to identify case when #' result does not need to match a target. In this case, R will read in the type #' of data as logical. In this case, the function sets the data type to be the #' same as the specification for the data type so the function not flag a #' data type error. #' #' @param Data_ A data vector. #' @param Type A string identifying the specified data type. #' @param DataName A string identifying the field name of the data being #' compared (used for composing message identifying non-compliant fields). #' @return A list having 2 components, Errors and Warnings. If no error or #' warning is identified, both components will contain a zero-length character #' string. If either an error or warning is identified, the relevant component #' will contain a character string that identifies the data field and the type #' mismatch. #' @export checkMatchType <- function(Data_, Type, DataName) { DataType <- typeof(Data_) #Because some modules allow NA values as flag instead of target values if (all(is.na(Data_))) DataType <- Type Types <- paste0(Type, DataType) makeMessage <- function() { paste0("Type of data in field '", DataName, "' is ", DataType, " but is specified as ", Type) } makeError <- function() { list(Error = makeMessage(), Warning = character(0)) } makeWarning <- function() { list(Error = character(0), Warning = makeMessage()) } makeOk <- function() { list(Error = character(0), Warning = character(0)) } switch( Types, integerdouble = makeError(), integercharacter = makeError(), integerlogical = makeError(), doublecharacter = makeError(), doublelogical = makeError(), characterinteger = makeWarning(), characterdouble = makeWarning(), characterlogical = makeWarning(), logicalinteger = makeError(), logicaldouble = makeError(), logicalcharacter = makeError(), makeOk() ) } #CHECK VALUES WITH CONDITIONS #============================ #' Check values with conditions. #' #' \code{checkMatchConditions} checks whether a data vector contains any #' elements that match a set of conditions. #' #' This function checks whether any of the values in a data vector match one or #' more conditions. The conditions are specified in a character vector where #' each element is either "NA" (to match for the existence of NA values) or a #' character representation of a valid R comparison expression for comparing #' each element with a specified value (e.g. "< 0", "> 1", "!= 10"). This #' function is used both for checking for the presence of prohibited values and #' for the presence of unlikely values. #' #' @param Data_ A vector of data of type integer, double, character, or logical. #' @param Conditions_ A character vector of valid R comparison expressions or an #' empty vector if there are no conditions. #' @param DataName A string identifying the field name of the data being #' compared (used for composing message identifying non-compliant fields). #' @param ConditionType A string having a value of either "PROHIBIT" or #' "UNLIKELY", the two data specifications which use conditions. #' @return A character vector of messages which identify the data field and the #' condition that is not met. A zero-length vector is returned if none of the #' conditions are met. #' @export checkMatchConditions <- function(Data_, Conditions_, DataName, ConditionType) { if (length(Conditions_) == 1) { if (Conditions_ == "") { return(character(0)) } } makeMessage <- function(Cond) { paste0("Data in data name '", DataName, "' includes values matching ", ConditionType, " condition (", Cond, ").") } Results_ <- character(0) DataChecks_ <- list() for (i in 1:length(Conditions_)) { Cond <- Conditions_[i] if (Cond == "NA") { DataChecks_[[i]] <- any(is.na(Data_)) } else { TempData_ <- Data_[!is.na(Data_)] DataChecks_[[i]] <- any(eval(parse(text = paste("TempData_", Cond)))) rm(TempData_) } } TrueConditions_ <- Conditions_[unlist(DataChecks_)] for (Condition in TrueConditions_) { Results_ <- c(Results_, makeMessage(Condition)) } Results_ } #CHECK IF DATA VALUES ARE IN A SPECIFIED SET OF VALUES #===================================================== #' Check if data values are in a specified set of values #' #' \code{checkIsElementOf} checks whether a data vector contains any elements #' that are not in an allowed set of values. #' #' This function is used to check whether categorical data values are consistent #' with the defined set of allowed values. #' #' @param Data_ A vector of data of type integer, double, character, or logical. #' @param SetElements_ A vector of allowed values. #' @param DataName A string identifying the field name of the data being #' compared (used for composing message identifying non-compliant fields). #' @return A character vector of messages which identify the data field and the #' condition that is not met. A zero-length vector is returned if none of the #' conditions are met. #' @export checkIsElementOf <- function(Data_, SetElements_, DataName){ if (length(SetElements_) == 1) { if (SetElements_ == "") { return(character(0)) } } makeMessage <- function(El) { paste0("Data in data name '", DataName, "' includes value (", El, ") not in allowed set." ) } Results_ <- character(0) DataChecks_ <- list() IsElement_ <- is.element(Data_, SetElements_) ProhibitedElements_ <- unique(Data_[!IsElement_]) for (Element in ProhibitedElements_) { Results_ <- c(Results_, makeMessage(Element)) } Results_ } #CHECK DATA CONSISTENCY WITH SPECIFICATION #========================================= #' Check data consistency with specification #' #' \code{checkDataConsistency} checks whether data to be written to a dataset is #' consistent with the dataset attributes. #' #' This function compares characteristics of data to be written to a dataset to #' the dataset attributes to determine whether they are consistent. #' #' @param DatasetName A string identifying the dataset that is being checked. #' @param Data_ A vector of values that may be of type integer, double, #' character, or logical. #' @param DstoreAttr_ A named list where the components are the attributes of a #' dataset. #' @return A list containing two components, Errors and Warnings. If no #' inconsistencies are found, both components will have zero-length character #' vectors. If there are one or more inconsistencies, then these components #' will hold vectors of error and warning messages. Mismatch between UNITS #' will produce a warning message. All other inconsistencies will produce #' error messages. #' @export checkDataConsistency <- function(DatasetName, Data_, DstoreAttr_) { Errors_ <- character(0) Warnings_ <- character(0) #Check data TYPE TypeCheckResult_ <- checkMatchType(Data_, DstoreAttr_$TYPE, DatasetName) if (length(TypeCheckResult_$Error) != 0) { Message <- paste0("The storage mode of the data (", typeof(Data_), ") does not match the storage mode of datastore (", DstoreAttr_["TYPE"], ") for dataset ", DatasetName) Errors_ <- c(Errors_, Message) } if (length(TypeCheckResult_$Warning) != 0) { Message <- paste0("The storage mode of the data (", typeof(Data_), ") does not match the storage mode of datastore (", DstoreAttr_["TYPE"], ") for dataset ", DatasetName) } #Check if character and SIZE is adequate if (typeof(Data_) == "character") { MaxSize <- max(nchar(Data_)) if (MaxSize > DstoreAttr_$SIZE) { Message <- paste0("Attempting to write character data of length (", MaxSize, ") which is longer than specified in datastore (", DstoreAttr_["SIZE"], ") for dataset ", DatasetName) Errors_ <- c(Errors_, Message) } } #Check if any values in PROHIBIT if (!is.null(DstoreAttr_$PROHIBIT)) { if (DstoreAttr_$PROHIBIT[1] != "") { Message <- checkMatchConditions( Data_, DstoreAttr_$PROHIBIT, DatasetName, "PROHIBIT") Errors_ <- c(Errors_, Message) } } #Check if all values in ISELEMENTOF if (!is.null(DstoreAttr_$ISELEMENTOF)) { if (DstoreAttr_$ISELEMENTOF[1] != "") { Message <- checkIsElementOf( Data_, DstoreAttr_$ISELEMENTOF, DatasetName) Errors_ <- c(Errors_, Message) } } #Check if any values in UNLIKELY if (!is.null(DstoreAttr_$UNLIKELY)) { if (DstoreAttr_$UNLIKELY != "") { Message <- checkMatchConditions( Data_, DstoreAttr_$UNLIKELY, DatasetName, "UNLIKELY") Warnings_ <- c(Warnings_, Message) } } #Check whether the sum of values equals the value specified in TOTAL if (!is.null(DstoreAttr_$TOTAL)) { if (DstoreAttr_$TOTAL != "") { if (sum(Data_) != DstoreAttr_$TOTAL) { Message <- paste("Sum of", DatasetName, "does not match specified total.") Errors_ <- c(Errors_, Message) } } } #Return list of errors and warnings list(Errors = Errors_, Warnings = Warnings_) } #PARSE UNITS SPECIFICATION #========================= #' Parse units specification into components and add to specifications list. #' #' \code{parseUnitsSpec} parses the UNITS attribute of a standard Inp, Get, or #' Set specification for a dataset to identify the units name, multiplier, and #' year for currency data. Returns a modified specifications list whose UNITS #' value is only the units name, and includes a MULTIPLIER attribute and YEAR #' attribute. #' #' The UNITS component of a specifications list can encode information in #' addition to the units name. This includes a value units multiplier and in #' the case of currency values the year for the currency measurement. The #' multiplier element can only be expressed in scientific notation where the #' number before the 'e' can only be 1. #' #' @param Spec_ls A standard specifications list for a Inp, Get, or Set item. #' @return a list that is a standard specifications list with the addition of #' a MULTIPLIER component and a YEAR component as well as a modification of the #' UNIT component. The MULTIPLIER component can have the value of NA, a number, #' or NaN. The value is NA if the multiplier is missing. It is a number if the #' multiplier is a valid number. The value is NaN if the multiplier is not a #' valid number. The YEAR component is a character string that is a 4-digit #' representation of a year or NA if the component is missing or not a proper #' year. The UNITS component is modified to only be the units name. #' @export parseUnitsSpec <- function(Spec_ls) { #Define function to return a multiplier value from a multiplier string #NA if none, NaN if not a properly specified scientic notation (e.g. 1e3) getMultiplier <- function(String) { if (is.na(String)) { Result <- NA } else { SciTest_ <- as.numeric(unlist(strsplit(String, "e"))) if (length(SciTest_) != 2 | any(is.na(SciTest_)) | SciTest_[1] != 1) { Result <- NaN } else { Result <- as.numeric(String) } } Result } #Define function to return a year value from a year string #NA if none or not a correct year getYear <- function(String) { CurrentString <- unlist(strsplit(as.character(Sys.Date()), "-"))[1] if (is.na(as.numeric(String)) | is.na(String)) { Result <- NA } else { if (as.numeric(String) < 1900 | as.numeric(String) > CurrentString) { Result <- NA } else { Result <- String } } Result } #Split the parts of the units specification UnitsSplit_ <- unlist(strsplit(Spec_ls$UNITS, "\\.")) #The units name is the first element Spec_ls$UNITS <- UnitsSplit_[1] #If currency type, the year is the 2nd element and multiplier is 3rd element if (Spec_ls$TYPE == "currency") { Year <- UnitsSplit_[2] Multiplier <- UnitsSplit_[3] } else { Year <- NA Multiplier <- UnitsSplit_[2] } #Process the multiplier element Spec_ls$MULTIPLIER <- getMultiplier(Multiplier) #Process the year element Spec_ls$YEAR <- getYear(Year) #Return the result Spec_ls } #RECOGNIZED TYPES AND UNITS ATTRIBUTES FOR SPECIFICATIONS #======================================================== #' Returns a list of returns a list of recognized data types, the units for each #' type, and storage mode of each type. #' #' \code{Types} returns a list of returns a list of recognized data types, the #' units for each type, and storage mode of each type. #' #' This function stores a listing of the dataset types recognized by the #' visioneval framework, the units recognized for each type, and the storage #' mode used for each type. Types include simple types (e.g. integer, double, #' character, logical) as well as complex types (e.g. distance, time, mass). For #' the complex types, units are specified as well. For example for the distance #' type, allowed units are MI (miles), FT (feet), KM (kilometers), M (meters). #' The listing includes conversion factors between units of each complex type. #' The listing also contains the storage mode (i.e. integer, double, character, #' logical of each type. For simple types, the type and the storage mode are the #' same). #' #' @return A list containing a component for each recognized type. Each #' component lists the recognized units for the type and the storage mode. There #' are currently 4 simple types and 10 complex type. The simple types are #' integer, double, character and logical. The complex types are currency, #' distance, area, mass, volume, time, speed, vehicle_distance, #' passenger_distance, and payload_distance. #' @export Types <- function(){ list( double = list(units = NA, mode = "double"), integer = list(units = NA, mode = "integer"), character = list(units = NA, mode = "character"), logical = list(units = NA, mode = "logical"), compound = list(units = NA, mode = "double"), currency = list( units = list( USD = c(USD = 1) ), mode = "double"), distance = list( units = list( MI = c(MI = 1, FT = 5280, KM = 1.60934, M = 1609.34), FT = c(MI = 0.000189394, FT = 1, KM = 0.0003048, M = 0.3048), KM = c(MI = 0.621371, FT = 3280.84, KM = 1, M = 1000), M = c(MI = 0.000621371, FT = 3.28084, KM = 0.001, M = 1)), mode = "double"), area = list( units = list( SQMI = c(SQMI = 1, ACRE = 640, SQFT = 2.788e+7, SQM = 2.59e+6, HA = 258.999, SQKM = 2.58999 ), ACRE = c(SQMI = 0.0015625, ACRE = 1, SQFT = 43560, SQM = 4046.86, HA = 0.404686, SQKM = 0.00404686), SQFT = c(SQMI = 3.587e-8, ACRE = 2.2957e-5, SQFT = 1, SQM = 0.092903, HA = 9.2903e-6, SQKM = 9.2903e-8), SQM = c(SQMI = 3.861e-7, ACRE = 0.000247105, SQFT = 10.7639, SQM = 1, HA = 1e-4, SQKM = 1e-6), HA = c(SQMI = 0.00386102, ACRE = 2.47105, SQFT = 107639, SQM = 0.00386102, HA = 1, SQKM = 0.01), SQKM = c(SQMI = 0.386102, ACRE = 247.105, SQFT = 1.076e+7, SQM = 1e+6, HA = 100, SQKM = 1)), mode = "double" ), mass = list( units = list( LB = c(LB = 1, TON = 0.0005, MT = 0.000453592, KG = 0.453592, GM = 453.592), TON = c(LB = 2000, TON = 1, MT = 0.907185, KG = 907.185, GM = 907185), MT = c(LB = 2204.62, TON = 1.10231, MT = 1, KG = 1000, M = 1e+6), KG = c(LB = 2.20462, TON = 0.00110231, MT = 0.001, KG = 1, GM = 1000), GM = c(LB = 0.00220462, TON = 1.1023e-6, MT = 1e-6, KG = 0.001, GM = 1)), mode = "double" ), volume = list( units = list( GAL = c(GAL = 1, L = 3.78541), L = c(GAL = 0.264172, L = 1)), mode = "double" ), time = list( units = list( YR = c(YR = 1, DAY = 365, HR = 8760, MIN = 525600, SEC = 3.154e+7), DAY = c(YR = 0.00273973, DAY = 1, HR = 24, MIN = 1440, SEC = 86400), HR = c(YR = 0.000114155, DAY = 0.0416667, HR = 1, MIN = 60, SEC = 3600), MIN = c(YR = 1.9026e-6, DAY = 0.000694444, HR = 0.0166667, MIN = 1, SEC = 60), SEC = c(YR = 3.171e-8, DAY = 1.1574e-5, HR = 0.000277778, MIN = 0.0166667, SEC = 1)), mode = "double" ), people = list( units = list( PRSN = c(PRSN = 1) ), mode = "integer" ), vehicles = list( units = list( VEH = c(VEH = 1) ), mode = "integer" ), trips = list( units = list( TRIP = c(TRIP = 1) ), mode = "integer" ), households = list( units = list( HH = c(HH = 1) ), mode = "integer" ), employment = list( units = list( JOB = c(JOB = 1) ), mode = "integer" ), activity = list( units = list( HHJOB = c(HHJOB = 1) ) ) ) } #CHECK MEASUREMENT UNITS #======================= #' Check measurement units for consistency with recognized units for stated type. #' #' \code{checkUnits}checks the specified UNITS for a dataset for consistency #' with the recognized units for the TYPE specification for the dataset. It also #' splits compound units into elements. #' #' The visioneval code recognizes 4 simple data types (integer, double, logical, #' and character) and 9 complex data types (e.g. distance, time, mass). #' The simple data types can have any units of measure, but the complex data #' types must use units of measure that are declared in the Types() function. In #' addition, there is a compound data type that can have units that are composed #' of the units of two or more complex data types. For example, speed is a #' compound data type composed of distance divided by speed. With this example, #' speed in miles per hour would be represented as MI/HR. This function checks #' the UNITS specification for a dataset for consistency with the recognized #' units for the given data TYPE. To check the units of a compound data type, #' the function splits the units into elements and the operators that separate #' the elements. It identifies the element units, the complex data type for each #' element and the operators that separate the elements. #' #' @param DataType a string which identifies the data type as specified in the #' TYPE attribute for a data set. #' @param Units a string identifying the measurement units as specified in the #' UNITS attribute for a data set after processing with the parseUnitsSpec #' function. #' @return A list which contains the following elements: #' DataType: a string identifying the data type. #' UnitType: a string identifying whether the units correspond to a 'simple' #' data type, a 'complex' data type, or a 'compound' data type. #' Units: a string identifying the units. #' Elements: a list containing the elements of a compound units. Components of #' this list are: #' Types: the complex type of each element, #' Units: the units of each element, #' Operators: the operators that separate the units. #' Errors: a string containing an error message or character(0) if no error. #' @import stringr #' @export checkUnits <- function(DataType, Units) { #Define return value template Result_ls <- list( DataType = DataType, UnitType = character(0), Units = character(0), Elements = list(), Errors = character(0) ) #Identify recognized data types and check if DataType is one of them DT_ <- names(Types()) if (!(DataType %in% DT_)) { Msg <- paste0( "Data type is not a recognized data type. ", "Must be one of the following: ", paste(DT_, collapse = ", "), ".") Result_ls$Errors <- Msg return(Result_ls) } #Check if Units is a character type and has length equal to one if (length(Units) != 1 | typeof(Units) != "character") { Msg <- paste0( "Units value is not correctly specified. ", "Must be a string and must not be a vector." ) Result_ls$Errors <- Msg Result_ls$Units <- Units return(Result_ls) } #Identify the units type (either simple, complex, or compound) UT_ <- character(length(DT_)) names(UT_) <- DT_ UT_[DT_ %in% c("double", "integer", "character", "logical")] <- "simple" UT_[DT_ %in% "compound"] <- "compound" UT_[!UT_ %in% c("simple", "compound")] <- "complex" UnitType <- UT_[DataType] Result_ls$UnitType <- unname(UnitType) #Check Simple Type if (UnitType == "simple") { #No check necessary, assign units and return the result Result_ls$Units <- Units return(Result_ls) } #Check complex type if (UnitType == "complex") { Result_ls$Units <- Units #Check that Units are recognized for the specified data type AllowedUnits_ <- names(Types()[[DataType]]$units) if (!(Units %in% AllowedUnits_)) { Msg <- paste0( "Units specified for ", DataType, " are not correctly specified. ", "Must be one of the following: ", paste(AllowedUnits_, collapse = ", "), ".") Result_ls$Errors <- Msg } #Return the result return(Result_ls) } #Check compound type #Define function to identify the data type from a unit findTypeFromUnit <- function(Units_) { Complex_ls <- Types()[DT_[UT_ == "complex"]] AllUnits_ <- unlist(lapply(Complex_ls, function(x) names(x$units))) UnitsToTypes_ <- gsub("[0-9]", "", names(AllUnits_)) names(UnitsToTypes_) <- AllUnits_ UnitsToTypes_[Units_] } #Define function to split units from compound type splitUnits <- function(Units){ OperatorLoc_ <- str_locate_all(Units, "[*/]")[[1]][,1] Operators_ <- sapply(OperatorLoc_, function(x) substr(Units, x, x)) UnitParts_ <- unlist(str_split(Units, "[*/]")) list(units = unname(UnitParts_), types = unname(findTypeFromUnit(UnitParts_)), operators = unname(Operators_)) } #Extract the units, types, and operators from the compound units string Result_ls$Units <- Units Units_ls <- splitUnits(Units) Result_ls$Elements$Types <- Units_ls$types Result_ls$Elements$Units <- Units_ls$units Result_ls$Elements$Operators <- Units_ls$operators #Check whether all element units are correct UnitsNotFound <- Units_ls$units[is.na(Units_ls$types)] if (length(UnitsNotFound) != 0) { Msg <- paste0( "One or more of the component units of the compound unit ", Units, " can't be resolved into units of recognized complex data types. ", "The following units elements are not recognized: ", paste(UnitsNotFound, collapse = ", "), ".") Result_ls$Errors <- Msg return(Result_ls) } #Check whether any duplication of data types for element units IsDupType_ <- duplicated(Units_ls$types) if (any(IsDupType_)) { DupTypes_ <- Units_ls$types[IsDupType_] DupUnits_ <- Units_ls$units[Units_ls$types %in% DupTypes_] Msg <- paste0( "Two or more of the component units of the compound unit ", Units, " are units in the same complex data type. ", "It does not make sense to have two units of the same complex type ", "in the same compound expression. The following units have the same type: ", paste(DupUnits_, collapse = ", "), ".") Result_ls$Errors <- Msg return(Result_ls) } #Return the result return(Result_ls) } # checkUnits("double", "person") # checkUnits("Bogus", "person") # checkUnits("people", "HH") # checkUnits("distance", "MI") # checkUnits("compound", "MI+KM") # checkUnits("compound", "MI/KM") # checkUnits("compound", "MI/HR") # checkUnits("compound", "TRIP/PRSN/DAY") #CHECK SPECIFICATION TYPE AND UNITS #================================== #' Checks the TYPE and UNITS and associated MULTIPLIER and YEAR attributes of a #' Inp, Get, or Set specification for consistency. #' #' \code{checkSpecTypeUnits}Checks correctness of TYPE, UNITS, MULTIPLIER and #' YEAR attributes of a specification that has been processed with the #' parseUnitsSpec function. #' #' This function checks whether the TYPE and UNITS of a module's specification #' contain errors. The check is done on a module specification in which the #' module's UNITS attribute has been parsed by the parseUnitsSpec function to #' split the name, multiplier, and years parts of the UNITS attribute. The TYPE #' is checked against the types catalogued in the Types function. The units name #' in the UNITS attribute is checked against the units names corresponding to #' each type catalogued in the Types function. The MULTIPLIER is checked to #' determine whether a value is a valid number, NA, or not a number (NaN). A NA #' value means that no multiplier was specified (this is OK) a NaN value means #' that a multiplier that is not a number was specified which is an error. The #' YEAR attribute is checked to determine whether there is a proper #' specification if the specified TYPE is currency. If the TYPE is currency, a #' YEAR must be specified for Get and Set specifications. #' #' @param Spec_ls a list for a single specification (e.g. a Get specification #' for a dataset) that has been processed with the parseUnitsSpec function to #' split the name, multiplier, and year elements of the UNITS specification. #' @param SpecGroup a string identifying the group that this specification #' comes from (e.g. Inp, Get, Set). #' @param SpecNum a number identifying which specification in the order of the #' SpecGroup. This is used to identify the subject specification if an error #' is identified. #' @return A vector containing messages identifying any errors that are found. #' @export checkSpecTypeUnits <- function(Spec_ls, SpecGroup, SpecNum) { Errors_ <- character(0) Name <- Spec_ls$NAME Table <- Spec_ls$TABLE Type <- Spec_ls$TYPE Units <- Spec_ls$UNITS AllowedTypes_ <- names(Types()) #Check if type is an allowed type if (Type %in% AllowedTypes_) { #Check if units are correct for the type UnitsCheck <- checkUnits(Type, Units) if (length(UnitsCheck$Errors) == 0) { #Check that there is a valid year specification if type is currency if (Type == "currency") { if (is.na(Spec_ls$YEAR) & SpecGroup %in% c("Get", "Set")) { Msg <- paste0("The TYPE specified for the ", SpecGroup, " specification ", "number ", SpecNum, " is 'currency' but the UNITS ", "specification does not contain a valid year element. ", "A valid year element must be specified so that the ", "framework knows how to convert currency values to and from ", "the proper year for the module. ", "See the user documentation for how to properly specify a ", "year in the UNITS specification.") Errors_ <- c(Errors_, Msg) } if (!is.na(Spec_ls$YEAR) & SpecGroup == "Inp") { Msg <- paste0("The TYPE specified for the ", SpecGroup, " specification ", "number ", SpecNum, " is 'currency' and the UNITS ", "specification contains a year element. ", "A year element must NOT be part of the UNITS ", "specification for a ", SpecGroup, " specification because ", "the input file has to specify the nominal year for the ", "input data. For ", SpecGroup, " specifications, the UNITS ", "specification must only include a units name.") Errors_ <- c(Errors_, Msg) } } #Check that multiplier is correct if (is.nan(Spec_ls$MULTIPLIER) & SpecGroup %in% c("Get", "Set")) { Msg <- paste0("The UNITS specified for the ", SpecGroup, " specification ", "number ", SpecNum, " does not contain a valid multiplier ", "element. The multiplier element, if present, must use ", "scientific notation with a coefficient of 1. ", "See the user documentation for how to properly specify a ", "multiplier in the UNITS attribute.") Errors_ <- c(Errors_, Msg) } if (!is.na(Spec_ls$MULTIPLIER) & SpecGroup == "Inp") { Msg <- paste0("The UNITS attribute for the ", SpecGroup, " specification ", "number ", SpecNum, "incorrectly contains a multiplier element. ", "A multiplier element must NOT be part of the UNITS ", "specification for a ", SpecGroup, " specification because ", "the input file has to specify the multiplier for the ", "input data, if there is one. For ", SpecGroup, " specifications, the UNITS specification must only include ", "a units name.") Errors_ <- c(Errors_, Msg) } } else { Msg <- paste0("UNITS specified for the ", SpecGroup, " specification ", "number ", SpecNum, " are incorrect as follows: ", UnitsCheck$Errors) Errors_ <- c(Errors_, Msg) } } else { Msg <- paste0("TYPE specified for the ", SpecGroup, " specification ", "number ", SpecNum, " has an incorrect type. ", "Check user documentation for list of allowed types.") Errors_ <- c(Errors_, Msg) } Errors_ } #DEFINITION OF BASIC MODULE SPECIFICATIONS REQUIREMENTS #====================================================== #' List basic module specifications to check for correctness #' #' \code{SpecRequirements}returns a list of basic requirements for module #' specifications to be used for checking correctness of specifications. #' #' This function returns a list of the basic requirements for module #' specifications. The main components of the list are the components of module #' specifications: RunBy, NewInpTable, NewSetTable, Inp, Get, Set. For each #' item of each module specifications component, the list identifies the #' required data type of the attribute entry and the allowed values for the #' attribute entry. #' #' @return A list comprised of six named components: RunBy, NewInpTable, #' NewSetTable, Inp, Get, Set. Each main component is a list that has a #' component for each specification item that has values to be checked. For each #' such item there is a list having two components: ValueType and ValuesAllowed. #' The ValueType component identifies the data type that the data entry for the #' item must have (e.g. character, integer). The ValuesAllowed item identifies #' what values the item may have. #' @export SpecRequirements <- function(){ list( RunBy = list( ValueType = "character", ValuesAllowed = c("Region", "Azone", "Bzone", "Czone", "Marea") ), NewInpTable = list( TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")) ), NewSetTable = list( TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")) ), Inp = list( NAME = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), FILE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_][.csv]"), TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")), TYPE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), UNITS = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]") ), Get = list( NAME = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "BaseYear", "Year")), TYPE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), UNITS = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]") ), Set = list( NAME = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), TABLE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), GROUP = list(ValueType = "character", ValuesAllowed = c("Global", "Year")), TYPE = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]"), UNITS = list(ValueType = "character", ValuesAllowed = "[0-9a-zA-Z_]") ) ) } #CHECK A MODULE SPECIFICATION #============================ #' Checks a module specifications for completeness and for incorrect entries #' #' \code{checkSpec}Function checks a single module specification for #' completeness and for proper values. #' #' This function checks whether a single module specification (i.e. the #' specification for a single dataset contains the minimum required #' attributes and that the values of the attributes are correct. #' #' @param Spec_ls a list containing the specifications for a single item in #' a module specifications list. #' @param SpecGroup a string identifying the specifications group the #' specification is in (e.g. RunBy, NewInpTable, NewSetTable, Inp, Get, Set). #' This is used in the error messages to identify which specification has #' errors. #' @param SpecNum an integer identifying which specification in the #' specifications group has errors. #' @return A vector containing messages identifying any errors that are found. #' @import stringr #' @export checkSpec <- function(Spec_ls, SpecGroup, SpecNum) { Require_ls <- SpecRequirements()[[SpecGroup]] Errors_ <- character(0) #Define function to check one specification requirement #ReqName argument is the requirement name (e.g. TYPE). Is NULL for RunBy #specification group. checkRequirement <- function(ReqName = NULL){ if (is.null(ReqName)) { Spec <- Spec_ls Req_ls <- Require_ls Name <- "" } else { Spec <- Spec_ls[[ReqName]] Req_ls <- Require_ls[[ReqName]] Name <- paste0(ReqName, " ") } Errors_ <- character(0) if (typeof(Spec) != Req_ls$ValueType) { Msg <- paste0("The type of the ", Name, "attribute of the ", SpecGroup, " specification number ", SpecNum, " is incorrect. ", "The attribute must be a ", Req_ls$ValueType, " type.") Errors_ <- c(Errors_, Msg) } if (!any(str_detect(Spec, Req_ls$ValuesAllowed))) { Msg <- paste0("The value of the ", Name, "attribute of the ", SpecGroup, " specification number ", SpecNum, " is incorrect. ", "The attribute value must be one of the following: ", paste(Req_ls$ValuesAllowed, collapse = ", "), ".") Errors_ <- c(Errors_, Msg) } Errors_ } #Check a specification if (SpecGroup == "RunBy") { Errors_ <- c(Errors_, checkRequirement()) } else { for (nm in names(Require_ls)) { Errors_ <- c(Errors_, checkRequirement(nm)) } if (SpecGroup %in% c("Inp", "Get", "Set")) { Errors_ <- c(Errors_, checkSpecTypeUnits(Spec_ls, SpecGroup, SpecNum)) } } Errors_ } #CHECK THE SPECIFICATIONS FOR A MODULE #===================================== #' Checks all module specifications for completeness and for incorrect entries #' #' \code{checkModuleSpecs}checks all module specifications for #' completeness and for proper values. #' #' This function iterates through all the specifications for a module and #' calls the checkSpec function to check each specification for completeness and #' for proper values. #' #' @param Specs_ls a module specifications list. #' @param ModuleName a string identifying the name of the module. This is used in #' the error messages to identify which module has errors. #' @return A vector containing messages identifying any errors that are found. #' @export checkModuleSpecs <- function(Specs_ls, ModuleName) { Errors_ <- character(0) #Check RunBy #----------- Err_ <- checkSpec(Specs_ls$RunBy, "RunBy", 1) if (length(Err_) != 0) { Msg <- paste0( "'RunBy' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) #Check NewInpTable if component exists #------------------------------------- if (!is.null(Specs_ls$NewInpTable)) { Err_ <- character(0) for (i in 1:length(Specs_ls$NewInpTable)) { Err_ <- c(Err_, checkSpec(Specs_ls$NewInpTable[[i]], "NewInpTable", i)) } if (length(Err_) != 0) { Msg <- paste0( "'NewInpTable' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) } #Check NewSetTable if component exists #------------------------------------- if (!is.null(Specs_ls$NewSetTable)) { Err_ <- character(0) for (i in 1:length(Specs_ls$NewSetTable)) { Err_ <- c(Err_, checkSpec(Specs_ls$NewSetTable[[i]], "NewSetTable", i)) } if (length(Err_) != 0) { Msg <- paste0( "'NewSetTable' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) } #Check Inp specifications if component exists #-------------------------------------------- if (!is.null(Specs_ls$Inp)) { Err_ <- character(0) for (i in 1:length(Specs_ls$Inp)) { Err_ <- c(Err_, checkSpec(Specs_ls$Inp[[i]], "Inp", i)) } if (length(Err_) != 0) { Msg <- paste0( "'Inp' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_ <- c(Errors_, Msg, Err_) } rm(Err_) } #Check Get specifications #------------------------ if (!is.null(Specs_ls$Get)) { Err_ <- character(0) for (i in 1:length(Specs_ls$Get)) { Err_ <- c(Err_, checkSpec(Specs_ls$Get[[i]], "Get", i)) } if (length(Err_) != 0) { Msg <- paste0( "'Get' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_<- c(Errors_, Msg, Err_) } rm(Err_) } #Check Set specifications #------------------------ if (!is.null(Specs_ls$Set)) { Err_ <- character(0) for (i in 1:length(Specs_ls$Set)) { Err_ <- c(Err_, checkSpec(Specs_ls$Set[[i]], "Set", i)) } if (length(Err_) != 0) { Msg <- paste0( "'Set' specification for module '", ModuleName, "' has one or more errors as follows.") Errors_<- c(Errors_, Msg, Err_) } rm(Err_) } #Return errors #------------- if (length(Errors_) != 0) { Msg <- paste0( "Module ", ModuleName, " has one or more errors as follow:" ) Errors_ <- c(Msg, Errors_) } Errors_ } #CHECK YEARS AND GEOGRAPHY OF INPUT FILE #======================================= #' Check years and geography of input file #' #' \code{checkInputYearGeo} checks the 'Year' and 'Geo' columns of an input file #' to determine whether they are complete and have no duplications. #' #' This function checks the 'Year' and 'Geo' columns of an input file to #' determine whether there are records for all run years specified for the #' model and for all geographic areas for the level of geography. It also checks #' for redundant year and geography entries. #' #' @param Year_ the vector extract of the 'Year' column from the input data. #' @param Geo_ the vector extract of the 'Geo' column from the input data. #' @param Group a string identifying the 'GROUP' specification for the data sets #' contained in the input file. #' @param Table a string identifying the 'TABLE' specification for the data sets #' contained in the input file. #' @return A list containing the results of the check. The list has two #' mandatory components and two optional components. 'CompleteInput' is a #' logical that identifies whether records are present for all years and #' geographic areas. 'DupInput' identifies where are any redundant year and #' geography entries. If 'CompleteInput' is FALSE, the list contains a #' 'MissingInputs' component that is a string identifying the missing year and #' geography records. If 'DupInput' is TRUE, the list contains a component that #' is a string identifying the duplicated year and geography records. #' @export checkInputYearGeo <- function(Year_, Geo_, Group, Table) { Result_ls <- list() G <- getModelState() #Make a vector of required year and geography combinations if (Group == "Year") { Required_df <- expand.grid(G$Years, unique(G$Geo_df[[Table]]), stringsAsFactors = FALSE) } names(Required_df) <- c("Year", "Geo") RequiredNames_ <- sort(paste(Required_df$Year, Required_df$Geo, sep = "/")) #Make a vector of year and geography combinations in the inputs InputNames_ <- sort(paste(Year_, Geo_, sep = "/")) #Check that there are missing records CompleteInputCheck_ <- RequiredNames_ %in% InputNames_ Result_ls$CompleteInput <- all(CompleteInputCheck_) if (!all(CompleteInputCheck_)) { MissingNames_ <- RequiredNames_[!CompleteInputCheck_] Result_ls$MissingInputs <- paste(MissingNames_, collapse = ", ") } #Check whether there are duplicated records DuplicatedInputCheck_ <- duplicated(InputNames_) Result_ls$DupInput <- any(DuplicatedInputCheck_) if (any(DuplicatedInputCheck_)) { DuplicateNames_ <- InputNames_[DuplicatedInputCheck_] Result_ls$DuplicatedInputs <- paste(DuplicateNames_, collapse = ", ") } #Return the result Result_ls } #FIND SPECIFICATION CORRESPONDING TO A NAME, TABLE, AND GROUP #============================================================ #' Find the full specification corresponding to a defined NAME, TABLE, and GROUP #' #' \code{findSpec} returns the full dataset specification for defined NAME, #' TABLE, and GROUP. #' #' This function finds and returns the full specification from a specifications #' list whose NAME, TABLE and GROUP values correspond to the Name, Table, and #' Group argument values. The specifications list must be in standard format and #' must be for only 'Inp', 'Get', or 'Set' specifications. #' #' @param Specs_ls a standard specifications list for 'Inp', 'Get', or 'Set' #' @param Name a string for the name of the dataset #' @param Table a string for the table that the dataset resides in #' @param Group a string for the generic group that the table resides in #' @return A list containing the full specifications for the dataset #' @export findSpec <- function(Specs_ls, Name, Table, Group) { SpecIdx <- which(unlist(lapply(Specs_ls, function(x) { x$NAME == Name & x$TABLE == Table & x$GROUP == Group }))) Specs_ls[[SpecIdx]] } #SORT DATA FRAME TO MATCH ORDER OF GEOGRAPHY IN DATASTORE TABLE #============================================================== #' Sort a data frame so that the order of rows matches the geography in a #' datastore table. #' #' \code{sortGeoTable} returns a data frame whose rows are sorted to match the #' geography in a specified table in the datastore. #' #' This function sorts the rows of a data frame that the 'Geo' field in the #' data frame matches the corresponding geography names in the specified table #' in the datastore. The function returns the sorted table. #' #' @param Data_df a data frame that contains a 'Geo' field containing the names #' of the geographic areas to sort by and any number of additional data fields. #' @param Table a string for the table that is to be matched against. #' @param Group a string for the generic group that the table resides in. #' @return The data frame which has been sorted to match the order of geography #' in the specified table in the datastore. #' @export sortGeoTable <- function(Data_df, Table, Group) { if (!("Geo" %in% names(Data_df))) { Msg <- paste0( "Data frame does not have a 'Geo' field. ", "A 'Geo' field must be included in order for the table to be sorted ", "to match the geography of the specified table in the datastore." ) stop(Msg) } DstoreNames_ <- readFromTable(Table, Table, Group) Order_ <- match(DstoreNames_, Data_df$Geo) Data_df[Order_,] } #PARSE INPUT FILE FIELD NAMES #============================ #' Parse field names of input file to separate out the field name, currency #' year, and multiplier. #' #' \code{parseInputFieldNames} parses the field names of an input file to #' separate out the field name, currency year (if data is #' currency type), and value multiplier. #' #' The field names of input files can be used to encode more information than #' the name itself. It can also encode the currency year for currency type data #' and also if the values are in multiples (e.g. thousands of dollars). For #' currency type data it is mandatory that the currency year be specified so #' that the data can be converted to base year currency values (e.g. dollars in #' base year dollars). The multiplier is optional, but needless to say, it can #' only be applied to numeric data. The function returns a list with a component #' for each field. Each component identifies the field name, year, multiplier, #' and error status for the result of parsing the field name. If the field name #' was parsed successfully, the error status is character(0). If the field name #' was not successfully parsed, the error status contains an error message, #' identifying the problem. #' #' @param FieldNames_ A character vector containing the field names of an #' input file. #' @param Specs_ls A list of specifications for fields in the input file. #' @param FileName A string identifying the name of the file that the field #' names are from. This is used for writing error messages. #' @return A named list with one component for each field. Each component is a list #' having 4 named components: Error, Name, Year, Multiplier. The Error #' component has a value of character(0) if there are no errors or a character #' vector of error messages if there are errors. The Name component is a string #' with the name of the field. The Year component is a string with the year #' component if the data type is currency or NA if the data type is not currency #' or if the Year component has an invalid value. The Multiplier is a number if #' the multiplier component is present and is valid. It is NA if there is no #' multiplier component and NaN if the multiplier is invalid. Each component of #' the list is named with the value of the Name component (i.e. the field name #' without the year and multiplier elements.) #' @export parseInputFieldNames <- function(FieldNames_, Specs_ls, FileName) { #Define function to return a multiplier value from a multiplier string #NA if none, NaN if not a properly specified scientic notation (e.g. 1e3) getMultiplier <- function(String) { if (is.na(String)) { Result <- NA } else { SciTest_ <- as.numeric(unlist(strsplit(String, "e"))) if (length(SciTest_) != 2 | any(is.na(SciTest_)) | SciTest_[1] != 1) { Result <- NaN } else { Result <- as.numeric(String) } } Result } #Define function to return a year value from a year string #NA if none or not a correct year getYear <- function(String) { CurrentString <- unlist(strsplit(as.character(Sys.Date()), "-"))[1] if (is.na(as.numeric(String)) | is.na(String)) { Result <- NA } else { if (as.numeric(String) < 1900 | as.numeric(String) > CurrentString) { Result <- NA } else { Result <- String } } Result } #Make a list to store results Fields_ls <- list() #Make an index to the specified field names SpecdNames_ <- unlist(lapply(Specs_ls, function(x) x$NAME)) for (i in 1:length(FieldNames_)) { Fields_ls[[i]] <- list() FieldName <- FieldNames_[i] Fields_ls[[i]]$Error <- character(0) #Split the parts of the units specification NameSplit_ <- unlist(strsplit(FieldName, "\\.")) #The field name is the first element Name <- NameSplit_[1] Fields_ls[[i]]$Name <- Name #If the field name is "Geo" or "Year" move on to next field if (Name %in% c("Geo", "Year")) next() #Check that the parsed name is one of the specified field names if (!(Name %in% SpecdNames_)) { Fields_ls[[i]]$Year <- NA Fields_ls[[i]]$Multiplier <- NA Msg <- paste0("Field name ", FieldName, " does not parse to a name that ", "can be recognized as one of the names specified for the ", "input file ", FileName) Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) next() } #Decode the Year and Multiplier portions FieldType <- Specs_ls[[which(SpecdNames_ == Name)]]$TYPE if (FieldType == "currency") { Fields_ls[[i]]$Year <- getYear(NameSplit_[2]) Fields_ls[[i]]$Multiplier <- getMultiplier(NameSplit_[3]) } else { Fields_ls[[i]]$Year <- NA Fields_ls[[i]]$Multiplier <- getMultiplier(NameSplit_[2]) } #If currency type, check that value is correct or give an error if (FieldType == "currency") { AllowedYears_ <- as.character(getModelState()$Deflators$Year) if (is.na(Fields_ls[[i]]$Year)) { Msg <- paste0("Field name ", FieldName, " in input file ", FileName, " has a specification TYPE of currency, but the parsed year ", "component is missing or is not a valid year. ", "See documentation for details on how to properly name ", "a field name that has a year component. ") Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) } else { if (!(Fields_ls[[i]]$Year %in% AllowedYears_)) { Msg <- paste0("Field name ", FieldName, " in input file ", FileName, " has a specification TYPE of currency, but the parsed year ", "component is not one for which there is a deflator. ", "If the year component is correct, then the deflators file ", "must be corrected to include a deflator for the year. ", "See documentation for details on the deflator file requirements.") Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) } } } #Check whether multiplier is correct or give an error if (is.nan(Fields_ls[[i]]$Multiplier)) { Msg <- paste0("Field name ", FieldName, " in input file ", FileName, " has parsed multiplier component that is not valid. ", "See documentation for details on how to properly name ", "a field name that has a multiplier component. ") Fields_ls[[i]]$Error <- c(Fields_ls[[i]]$Error, Msg) rm(Msg) } } names(Fields_ls) <- unlist(lapply(Fields_ls, function(x) x$Name)) Fields_ls } # items <- item <- list # Specs_ls <- # items( # item( # NAME = "TotHhPop", # TYPE = "double", # UNITS = "persons" # ), # item( # NAME = "TotHhIncome", # TYPE = "currency", # UNITS = "USD" # ) # ) # FieldNames_ <- c("Geo", "Year", "TotHhPop.1e3", "TotHhIncome.2000") # temp_ls <- parseInputFieldNames(FieldNames_, Specs_ls, "test.csv") # FieldNames_ <- c("Geo", "Year", "TotHhPop.1000", "TotHhIncome.1998.1000") # temp_ls <- parseInputFieldNames(FieldNames_, Specs_ls, "test.csv") # FieldNames_ <- c("Geo", "Year", "TotHhPop.1000", "TotHhIncome.hello.1000") # parseInputFieldNames(FieldNames_, Specs_ls, "test.csv") # unlist(lapply(temp_ls, function(x) x$Error)) #PROCESS MODULE INPUT FILES #========================== #' Process module input files #' #' \code{processModuleInputs} processes input files identified in a module's #' 'Inp' specifications in preparation for saving in the datastore. #' #' This function processes the input files identified in a module's 'Inp' #' specifications in preparation for saving the data in the datastore. Several #' processes are carried out. The existence of each specified input file is #' checked. Files that are not global, are checked to determine that they have #' 'Year' and 'Geo' columns. The entries in the 'Year' and 'Geo' columns are #' checked to make sure they are complete and there are no duplicates. The data #' in each column are checked against specifications to determine conformance. #' The function returns a list which contains a list of error messages and a #' list of the data inputs. The function also writes error messages and warnings #' to the log file. #' #' @param ModuleSpec_ls a list of module specifications that is consistent with #' the VisionEval requirements. #' @param ModuleName a string identifying the name of the module (used to document #' module in error messages). #' @param Dir a string identifying the relative path to the directory where the #' model inputs are contained. #' @return A list containing the results of the input processing. The list has #' two components. The first (Errors) is a vector of identified file and data #' errors. The second (Data) is a list containing the data in the input files #' organized in the standard format for data exchange with the datastore. #' @export processModuleInputs <- function(ModuleSpec_ls, ModuleName, Dir = "inputs") { G <- getModelState() FileErr_ <- character(0) FileWarn_ <- character(0) InpSpec_ls <- ModuleSpec_ls$Inp #ORGANIZE THE SPECIFICATIONS BY INPUT FILE AND NAME SortSpec_ls <- list() for (i in 1:length(InpSpec_ls)) { Spec_ls <- InpSpec_ls[[i]] File <- Spec_ls$FILE Name <- Spec_ls$NAME if (is.null(SortSpec_ls[[File]])) { SortSpec_ls[[File]] <- list() } SortSpec_ls[[File]][[Name]] <- Spec_ls rm(Spec_ls, File, Name) } #Initialize a list to store all the input data Data_ls <- initDataList() #ITERATE THROUGH SORTED SPECIFICATIONS AND LOAD DATA INTO LIST Files_ <- names(SortSpec_ls) for (File in Files_) { #Extract the specifications Spec_ls <- SortSpec_ls[[File]] #Check that file exists if (!file.exists(file.path(Dir, File))) { Msg <- paste( "Input file error.", "File '", File, "' required by '", ModuleName, "' is not present in the 'inputs' directory." ) FileErr_ <- c(FileErr_, Msg) next() } #Read in the data file Data_df <- read.csv(file.path(Dir, File), as.is = TRUE) #Parse the field names of the data file ParsedNames_ls <- parseInputFieldNames(names(Data_df), Spec_ls, File) ParsingErrors_ <- unlist(lapply(ParsedNames_ls, function(x) x$Error)) names(Data_df) <- names(ParsedNames_ls) if (length(ParsingErrors_) != 0) { writeLog( c("Input file field name errors as follows:", ParsingErrors_)) FileErr_ <- c(FileErr_, ParsingErrors_) } else { rm(ParsingErrors_) } #Identify the group and table the data is to be placed in Group <- unique(unlist(lapply(Spec_ls, function(x) x$GROUP))) if (length(Group) != 1) { Msg <- paste0( "Input specification error for module '", ModuleName, "' for input file '", File, "'. ", "All datasets must have the same 'Group' specification." ) FileErr_ <- c(FileErr_, Msg) Group <- Group[1] } Table <- unique(unlist(lapply(Spec_ls, function(x) x$TABLE))) if (length(Table) != 1) { Msg <- paste0( "Input specification error for module '", ModuleName, "' for input file '", File, "'. ", "All datasets must have the same 'Table' specification." ) FileErr_ <- c(FileErr_, Msg) Table <- Table[1] } #Add Table and table attributes to data list if not already there if (is.null(Data_ls[[Group]][[Table]])) { Data_ls[[Group]][[Table]] <- list() } #If Group is Year, check that Geo and Year fields are correct if (Group == "Year") { #Check that there are 'Year' and 'Geo' fields HasYearField <- "Year" %in% names(Data_df) HasGeoField <- "Geo" %in% names(Data_df) if (!(HasYearField & HasGeoField)) { Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "'Group' specification is 'Year' or 'RunYear' ", "but the input file is missing required 'Year' ", "and/or 'Geo' fields." ) FileErr_ <- c(FileErr_, Msg) next() } #Check that the file thas inputs for all years and geographic units #If so, save Year and Geo to table CorrectYearGeo <- checkInputYearGeo(Data_df$Year, Data_df$Geo, Group, Table) if (CorrectYearGeo$CompleteInput & !CorrectYearGeo$DupInput) { Data_ls[[Group]][[Table]]$Year <- Data_df$Year Data_ls[[Group]][[Table]]$Geo <- Data_df$Geo } else { if (!CorrectYearGeo$CompleteInput) { Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "Is missing inputs for the following Year/", Table, " combinations: ", CorrectYearGeo$MissingInputs ) FileErr_ <- c(FileErr_, Msg) } if(CorrectYearGeo$DupInput){ Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "Has duplicate inputs for the following Year/", Table, " combinations: ", CorrectYearGeo$DuplicatedInputs ) FileErr_ <- c(FileErr_, Msg) } next() } } #Check and load data into list DataErr_ls <- list(Errors = character(0), Warnings = character(0)) for (Name in names(Spec_ls)) { ThisSpec_ls <- Spec_ls[[Name]] Data_ <- Data_df[[Name]] DataCheck_ls <- checkDataConsistency(Name, Data_, ThisSpec_ls) if (length(DataCheck_ls$Errors) != 0) { writeLog(DataCheck_ls$Errors) DataErr_ls$Errors <- c(DataErr_ls$Errors, DataCheck_ls$Errors) next() } if (length(DataCheck_ls$Warnings) != 0) { writeLog(DataCheck_ls$Warnings) DataErr_ls$Warnings <- c(DataErr_ls$Warnings, DataCheck_ls$Warnings) } #Convert currency if (ThisSpec_ls$TYPE == "currency") { FromYear <- ParsedNames_ls[[Name]]$Year ToYear <- G$BaseYear if (!is.na(FromYear) & FromYear != ToYear) { Data_ <- deflateCurrency(Data_, FromYear, ToYear) rm(FromYear, ToYear) } } #Convert units SimpleTypes_ <- c("integer", "double", "character", "logical") ComplexTypes_ <- names(Types())[!(names(Types()) %in% SimpleTypes_)] if (ThisSpec_ls$TYPE %in% ComplexTypes_) { FromUnits <- ThisSpec_ls$UNITS Conversion_ls <- convertUnits(Data_, ThisSpec_ls$TYPE, FromUnits) Data_ <- Conversion_ls$Values #Update UNITS to reflect datastore units ThisSpec_ls$UNITS <- Conversion_ls$ToUnits rm(FromUnits, Conversion_ls) } rm(SimpleTypes_, ComplexTypes_) #Convert magnitude Multiplier <- ParsedNames_ls[[Name]]$Multiplier if (!is.na(Multiplier)) { Data_ <- convertMagnitude(Data_, Multiplier, 1) } rm(Multiplier) #Assign UNITS attribute to Data_ because storage units may be different #than the input data UNITS attributes(Data_) <- list(UNITS = ThisSpec_ls$UNITS) #Assign Data_ to Data_ls Data_ls[[Group]][[Table]][[Name]] <- Data_ } if (length(DataErr_ls$Errors) != 0) { Msg <- paste0( "Input file error for module '", ModuleName, "' for input file '", File, "'. ", "Has one or more errors in the data inputs as follows:" ) FileErr_ <- c(FileErr_, Msg, DataErr_ls$Errors) writeLog(FileErr_) } if (length(DataErr_ls$Warnings) != 0) { Msg <- paste0( "Input file warnings for module '", ModuleName, "' for input file '", File, "'. ", "Has one or more warnings for the data inputs as follows:" ) FileWarn_ <- c(FileWarn_, Msg, DataErr_ls$Warnings) writeLog(FileWarn_) } }#End loop through input files #RETURN THE RESULTS list(Errors = FileErr_, Data = Data_ls) }
d8a8c8159f3c471cf90e97a7218ea0f6 ttt_4x4-shape-7-GTTT-2-2-torus-1.qdimacs 756 2648
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_4x4-shape-7-GTTT-2-2-torus-1/ttt_4x4-shape-7-GTTT-2-2-torus-1.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
82
r
d8a8c8159f3c471cf90e97a7218ea0f6 ttt_4x4-shape-7-GTTT-2-2-torus-1.qdimacs 756 2648
test_that(".file_extdata", { expect_error(.file_extdata("foo")) expect_true(file.exists(.file_extdata("labcodes.csv"))) })
/tests/testthat/test-utils.R
permissive
ampel-leipzig/sbcdata
R
false
false
131
r
test_that(".file_extdata", { expect_error(.file_extdata("foo")) expect_true(file.exists(.file_extdata("labcodes.csv"))) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/covidestim.R \name{covidestim_add.priors} \alias{covidestim_add.priors} \title{When adding priors, we want to be sure that a new 'modelconfig' object is created, in order to check these priors} \usage{ \method{covidestim_add}{priors}(rightside, leftside) } \description{ When adding priors, we want to be sure that a new 'modelconfig' object is created, in order to check these priors }
/man/covidestim_add.priors.Rd
no_license
sechan9999/covidestim
R
false
true
465
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/covidestim.R \name{covidestim_add.priors} \alias{covidestim_add.priors} \title{When adding priors, we want to be sure that a new 'modelconfig' object is created, in order to check these priors} \usage{ \method{covidestim_add}{priors}(rightside, leftside) } \description{ When adding priors, we want to be sure that a new 'modelconfig' object is created, in order to check these priors }
library(ggplot2) setwd("C:\\Users\\letic\\OneDrive\\Área de Trabalho\\PESQUISA\\mini artigo") df_activity <- read.table("activityWD.data", header = TRUE, sep=",") Dias<-c("Segunda", "Terca", "Quarta", "Quinta", "Sexta", "Sabado","Domingo") df_activity pl <- ggplot(df_activity,aes(x = Dia, y=Frequencia)) + ggtitle("Commits em função do dia da semana")+ theme_bw(base_size = 11) + geom_point(shape=1,size=3) + ylab("Quantidade (commits)") + geom_line(lwd=1.5) + scale_x_continuous(breaks=(0:6),labels= Dias) + scale_y_continuous(limits = c(0, 1200)) + theme(text = element_text(size=25), axis.text.x = element_text(angle=45, hjust=1)) print(pl)
/graphActivitySemanas.R
no_license
pauloangelodb/IPI-mini-artigo
R
false
false
691
r
library(ggplot2) setwd("C:\\Users\\letic\\OneDrive\\Área de Trabalho\\PESQUISA\\mini artigo") df_activity <- read.table("activityWD.data", header = TRUE, sep=",") Dias<-c("Segunda", "Terca", "Quarta", "Quinta", "Sexta", "Sabado","Domingo") df_activity pl <- ggplot(df_activity,aes(x = Dia, y=Frequencia)) + ggtitle("Commits em função do dia da semana")+ theme_bw(base_size = 11) + geom_point(shape=1,size=3) + ylab("Quantidade (commits)") + geom_line(lwd=1.5) + scale_x_continuous(breaks=(0:6),labels= Dias) + scale_y_continuous(limits = c(0, 1200)) + theme(text = element_text(size=25), axis.text.x = element_text(angle=45, hjust=1)) print(pl)
source('mainFunctions_sub.R') # HUES64_mesoderm_23_paired_phased_tmml_pvals.bedGraph CPEL_version2 4.2M, Apr 28 (filter 5, no boudnary), 98230 # HUES64_mesoderm_23_paired_phased_tmml_pvals.bedGraph newrun3 9.4M, May 7 (filter 5, boundary), 171985 #Contains boundary condition GR_contain_boundary=import.subject('../downstream/data/ASM_run3/bedGraph_diff/')#2,221,880 unique, total 28164174 GR_contain_boundary=convert_GR(GR_contain_boundary,direction="DT") GR_no_boundary=readRDS(GR_file) #3,332,744 unique, total 16601685 GR_no_boundary = convert_GR(GR_no_boundary,direction="DT") GR_no_boundary = GR_no_boundary[!Sample%in%c("rep1 - H1","rep2 - H1")] gff_in=readRDS(gff_in_file) gff_in=convert_GR(gff_in,direction="DT") GR_contain_boundary$NCpG = gff_in[match(GR_contain_boundary$region,region)]$N GR_no_boundary$NCpG = gff_in[match(GR_no_boundary$region,region)]$N sum(GR_contain_boundary[NCpG>=2&Statistic=="dMML" & Sample %in% GR_no_boundary$Sample]$pvalue<=0.1)#9696 sum(GR_contain_boundary[NCpG>=2&Statistic=="dNME"& Sample %in% GR_no_boundary$Sample]$pvalue<=0.1)#10497 sum(GR_no_boundary[NCpG>=2&Statistic=="dMML"]$pvalue<=0.1)#6803 sum(GR_no_boundary[NCpG>=2&Statistic=="dNME"]$pvalue<=0.1)#29826 # #Get min dNME or dMML at pval<0.1 with each N # GR_no_boundary_summary=GR_no_boundary[pvalue<=0.1 & NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG,Sample)][order(NCpG)] # #GR_no_boundary_summary=GR_no_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] # GR_no_boundary_summary$CPEL = "Without reads at boundary condition" # GR_contain_boundary_summary=GR_contain_boundary[pvalue<=0.1& NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG,Sample)][order(NCpG)] # #GR_contain_boundary_summary=GR_contain_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] # GR_contain_boundary_summary$CPEL = "With reads at boundary condition" # dMML_comparison = rbind(GR_no_boundary_summary[Statistic=="dMML"],GR_contain_boundary_summary[Statistic=="dMML"]) # dNME_comparison = rbind(GR_no_boundary_summary[Statistic=="dNME"],GR_contain_boundary_summary[Statistic=="dNME"]) # pdf("../downstream/output/human_analysis/QC/dMML_cutoff_boundary.pdf",width = 6, height=5) # print(ggplot(dMML_comparison,aes(x = NCpG,y=minScore,color=CPEL))+geom_smooth()+ylim(c(0,1))+ # ggtitle("dMML")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) # dev.off() # pdf("../downstream/output/human_analysis/QC/dNME_cutoff_boundary.pdf",width = 6, height=5) # print(ggplot(dNME_comparison,aes(x = NCpG,y=minScore,color=CPEL))+geom_smooth()+ylim(c(0,1))+ # ggtitle("dNME")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) # dev.off() #Get min dNME or dMML at pval<0.1 with each N GR_no_boundary_summary=GR_no_boundary[pvalue<=0.1 & NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG)][order(NCpG)] #GR_no_boundary_summary=GR_no_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] GR_no_boundary_summary$CPEL = "Without reads at boundary condition" GR_contain_boundary_summary=GR_contain_boundary[pvalue<=0.1& NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG)][order(NCpG)] #GR_contain_boundary_summary=GR_contain_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] GR_contain_boundary_summary$CPEL = "With reads at boundary condition" dMML_comparison = rbind(GR_no_boundary_summary[Statistic=="dMML"],GR_contain_boundary_summary[Statistic=="dMML"]) dNME_comparison = rbind(GR_no_boundary_summary[Statistic=="dNME"],GR_contain_boundary_summary[Statistic=="dNME"]) pdf("../downstream/output/human_analysis/QC/dMML_cutoff_boundary.pdf",width = 6, height=5) print(ggplot(dMML_comparison,aes(x = NCpG,y=minScore,fill=CPEL))+geom_bar(stat="identity", position=position_dodge())+ylim(c(0,1))+ ggtitle("dMML")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) dev.off() pdf("../downstream/output/human_analysis/QC/dNME_cutoff_boundary.pdf",width = 6, height=5) print(ggplot(dNME_comparison,aes(x = NCpG,y=minScore,fill=CPEL))+geom_bar(stat="identity", position=position_dodge())+ylim(c(0,1))+ ggtitle("dNME")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) dev.off()
/code_not_in_paper/QC/boundary_condition_check.R
no_license
yuqifang94/ASE
R
false
false
4,381
r
source('mainFunctions_sub.R') # HUES64_mesoderm_23_paired_phased_tmml_pvals.bedGraph CPEL_version2 4.2M, Apr 28 (filter 5, no boudnary), 98230 # HUES64_mesoderm_23_paired_phased_tmml_pvals.bedGraph newrun3 9.4M, May 7 (filter 5, boundary), 171985 #Contains boundary condition GR_contain_boundary=import.subject('../downstream/data/ASM_run3/bedGraph_diff/')#2,221,880 unique, total 28164174 GR_contain_boundary=convert_GR(GR_contain_boundary,direction="DT") GR_no_boundary=readRDS(GR_file) #3,332,744 unique, total 16601685 GR_no_boundary = convert_GR(GR_no_boundary,direction="DT") GR_no_boundary = GR_no_boundary[!Sample%in%c("rep1 - H1","rep2 - H1")] gff_in=readRDS(gff_in_file) gff_in=convert_GR(gff_in,direction="DT") GR_contain_boundary$NCpG = gff_in[match(GR_contain_boundary$region,region)]$N GR_no_boundary$NCpG = gff_in[match(GR_no_boundary$region,region)]$N sum(GR_contain_boundary[NCpG>=2&Statistic=="dMML" & Sample %in% GR_no_boundary$Sample]$pvalue<=0.1)#9696 sum(GR_contain_boundary[NCpG>=2&Statistic=="dNME"& Sample %in% GR_no_boundary$Sample]$pvalue<=0.1)#10497 sum(GR_no_boundary[NCpG>=2&Statistic=="dMML"]$pvalue<=0.1)#6803 sum(GR_no_boundary[NCpG>=2&Statistic=="dNME"]$pvalue<=0.1)#29826 # #Get min dNME or dMML at pval<0.1 with each N # GR_no_boundary_summary=GR_no_boundary[pvalue<=0.1 & NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG,Sample)][order(NCpG)] # #GR_no_boundary_summary=GR_no_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] # GR_no_boundary_summary$CPEL = "Without reads at boundary condition" # GR_contain_boundary_summary=GR_contain_boundary[pvalue<=0.1& NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG,Sample)][order(NCpG)] # #GR_contain_boundary_summary=GR_contain_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] # GR_contain_boundary_summary$CPEL = "With reads at boundary condition" # dMML_comparison = rbind(GR_no_boundary_summary[Statistic=="dMML"],GR_contain_boundary_summary[Statistic=="dMML"]) # dNME_comparison = rbind(GR_no_boundary_summary[Statistic=="dNME"],GR_contain_boundary_summary[Statistic=="dNME"]) # pdf("../downstream/output/human_analysis/QC/dMML_cutoff_boundary.pdf",width = 6, height=5) # print(ggplot(dMML_comparison,aes(x = NCpG,y=minScore,color=CPEL))+geom_smooth()+ylim(c(0,1))+ # ggtitle("dMML")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) # dev.off() # pdf("../downstream/output/human_analysis/QC/dNME_cutoff_boundary.pdf",width = 6, height=5) # print(ggplot(dNME_comparison,aes(x = NCpG,y=minScore,color=CPEL))+geom_smooth()+ylim(c(0,1))+ # ggtitle("dNME")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) # dev.off() #Get min dNME or dMML at pval<0.1 with each N GR_no_boundary_summary=GR_no_boundary[pvalue<=0.1 & NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG)][order(NCpG)] #GR_no_boundary_summary=GR_no_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] GR_no_boundary_summary$CPEL = "Without reads at boundary condition" GR_contain_boundary_summary=GR_contain_boundary[pvalue<=0.1& NCpG>1,list(minScore = min(score)),by=list(Statistic,NCpG)][order(NCpG)] #GR_contain_boundary_summary=GR_contain_boundary_summary[,list(minScore=mean(minScore)),by=list(Statistic,NCpG)] GR_contain_boundary_summary$CPEL = "With reads at boundary condition" dMML_comparison = rbind(GR_no_boundary_summary[Statistic=="dMML"],GR_contain_boundary_summary[Statistic=="dMML"]) dNME_comparison = rbind(GR_no_boundary_summary[Statistic=="dNME"],GR_contain_boundary_summary[Statistic=="dNME"]) pdf("../downstream/output/human_analysis/QC/dMML_cutoff_boundary.pdf",width = 6, height=5) print(ggplot(dMML_comparison,aes(x = NCpG,y=minScore,fill=CPEL))+geom_bar(stat="identity", position=position_dodge())+ylim(c(0,1))+ ggtitle("dMML")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) dev.off() pdf("../downstream/output/human_analysis/QC/dNME_cutoff_boundary.pdf",width = 6, height=5) print(ggplot(dNME_comparison,aes(x = NCpG,y=minScore,fill=CPEL))+geom_bar(stat="identity", position=position_dodge())+ylim(c(0,1))+ ggtitle("dNME")+ theme(legend.position="bottom",plot.title = element_text(hjust = 0.5),legend.title = element_blank())) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geneXplain.R \name{gx.trackToGeneSet} \alias{gx.trackToGeneSet} \title{Track to gene set} \usage{ gx.trackToGeneSet(tracks, species, from, to, resultTypes = c("Count"), allGenes = F, destPath, wait = T, verbose = T) } \arguments{ \item{tracks}{List of track paths} \item{species}{Name of the species} \item{from}{Gene region start relative to 5' end of Ensembl gene} \item{to}{Gene region end relative to 3' end of Ensembl gene} \item{resultTypes}{List of statistics to report (Schematic, + or -, Count, Count in exons, Count in introns, Count in 5', Count in 3', Structure, Positions)} \item{allGenes}{True if all genes shall be reported regardless of hit} \item{destPath}{output path} \item{wait}{True to wait for job to complete} \item{verbose}{True for more progress info} } \value{ a string containing the status of the request in JSON format } \description{ Maps one or more tracks to genes of the most recent Ensembl release }
/man/gx.trackToGeneSet.Rd
permissive
genexplain/geneXplainR
R
false
true
1,022
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geneXplain.R \name{gx.trackToGeneSet} \alias{gx.trackToGeneSet} \title{Track to gene set} \usage{ gx.trackToGeneSet(tracks, species, from, to, resultTypes = c("Count"), allGenes = F, destPath, wait = T, verbose = T) } \arguments{ \item{tracks}{List of track paths} \item{species}{Name of the species} \item{from}{Gene region start relative to 5' end of Ensembl gene} \item{to}{Gene region end relative to 3' end of Ensembl gene} \item{resultTypes}{List of statistics to report (Schematic, + or -, Count, Count in exons, Count in introns, Count in 5', Count in 3', Structure, Positions)} \item{allGenes}{True if all genes shall be reported regardless of hit} \item{destPath}{output path} \item{wait}{True to wait for job to complete} \item{verbose}{True for more progress info} } \value{ a string containing the status of the request in JSON format } \description{ Maps one or more tracks to genes of the most recent Ensembl release }
# Data cleaning ---------------------------------------------------------- # load packages library(tidyverse) # read in data counts <- read_csv("./data_raw/germination/TPC_germination.csv") # separate slides into site, temp and replicate columns counts <- counts %>% rename(sample = slide) %>% separate(sample, into = c("site", "temperature"), sep = 2, remove = FALSE) %>% separate(temperature, into = c("temp", "replicate"), sep = "_") %>% mutate(dead = (dead + tufts), total = (dead + germinated + ungerminated)) %>% select(-tufts) survival <- counts %>% select(site, temp, replicate, dead, total) %>% mutate(percent_live = (((total - dead) / total))*100) %>% group_by(site, temp) %>% summarise(avg_live = mean(percent_live)) germination <- counts %>% select(-notes) %>% mutate(percent_germ = (germinated / total)*100) %>% mutate(percent_ungerm = (ungerminated / total)*100) %>% na.omit() %>% mutate(temp = as.integer(temp)) # Playing around with subsetting data ------------------------------------- sub <- counts %>% filter(total >=50) %>% mutate(other = ungerminated + dead) %>% select(sample, germinated, other) %>% na.omit() #initialize dataframe dum <- as.data.frame(matrix(0, ncol = 2, nrow = nrow(sub))) names(dum) <- c("germinated", "other") for(i in 1:nrow(sub)) { test <- (c(rep("germinated", sub[i,2]), rep("ungerminated", sub[i,3]))) # making a list of germinated and ungerminated that exists in each sample test2 <- sample(test, size = 50, replace = FALSE, prob = NULL) # sampling from each 50 times test3 <- as.data.frame(table(test2)) # create dataframe with above results if(test3[1,1] == "germinated") { # trying to store above results in a dataframe dum[i,1] <- test3[1,2] dum[i,2] <- test3[2,2] } else dum[i,2] <- test3[1,2] } dum$sample <- sub$sample dum$number <- c(rep(50, nrow(dum))) sub$number <- c(rep(100, nrow(dum))) test <- full_join(sub, dum) test[is.na(test)] <- 0 test <- test %>% mutate(percent_germ = germinated / (germinated + other)) %>% separate(sample, into = c("site", "temperature"), sep = 2, remove = FALSE) %>% separate(temperature, into = c("temp", "replicate"), sep = "_") anova(lm(data = test, percent_germ ~ site * temp * number)) test100 <- test %>% filter(number == 100) test50 <- test %>% filter(number == 50) anova(lm(data = test100, percent_germ ~ site * temp)) anova(lm(data = test50, percent_germ ~ site * temp)) dog <- test %>% group_by(sample) %>% summarise(max_germ = max(percent_germ), min_germ = min(percent_germ)) t.test(dog$max_germ, dog$min_germ) # Plots - Survival ------------------------------------------------------------------- # plotting survival with averages as a scatterplot ggplot(data = survival, aes(x = temp, y = avg_live, color = site)) + geom_point() # plotting survival without averages as a scatterplot survival <- counts %>% select(site, temp, replicate, dead, total) %>% mutate(percent_live = (((total - dead) / total))*100) %>% na.omit() ggplot(data = survival, aes(x = temp, y = percent_live, color = site)) + geom_point() + geom_smooth() # raw date with boxplots ggplot(data = survival, aes(x = temp, y = percent_live, color = site)) + geom_boxplot() + geom_jitter(position = position_jitter(width = 0.1, height = 0), alpha = 1/4) + xlab("temperature") + ylab("percentage live") length(unique(counts$slide)) == length(counts$slide) sum((survival$site == "BI") & (survival$temp == "30")) sum(survival$site == "RP") sum(survival$site == "NP") counts$slide[duplicated(counts$slide)] counts$slide == "NP20_05" counts$slide == "RP10_05" # Plots - Germination ------------------------------------------------------------ # plotting percent germination of all zygotes ggplot(data = germination, aes(x = temp, y = percent_germ, color = site)) + geom_boxplot() + geom_jitter(position = position_jitter(width = 0.1, height = 0), alpha = 1/2) + xlab("temperature") + ylab("percentage germinated") + scale_y_continuous(limits = c(0, 100)) ggplot(data = germination, aes(x = temp, y = percent_germ, color = site)) + geom_point() + xlab("temperature") + ylab("percentage germinated") + geom_smooth(lwd = 1, se = TRUE, method = "loess") + scale_y_continuous(limits = c(0, 100)) # Models ------------------------------------------------------------------ mod1 <- lm(germination$percent_germ ~ germination$temp * germination$site) par(mfrow = c(2,2)) plot(mod1) summary(mod1) anova(mod1)
/scripts/germination.R
no_license
sandraemry/fucus-temp
R
false
false
4,568
r
# Data cleaning ---------------------------------------------------------- # load packages library(tidyverse) # read in data counts <- read_csv("./data_raw/germination/TPC_germination.csv") # separate slides into site, temp and replicate columns counts <- counts %>% rename(sample = slide) %>% separate(sample, into = c("site", "temperature"), sep = 2, remove = FALSE) %>% separate(temperature, into = c("temp", "replicate"), sep = "_") %>% mutate(dead = (dead + tufts), total = (dead + germinated + ungerminated)) %>% select(-tufts) survival <- counts %>% select(site, temp, replicate, dead, total) %>% mutate(percent_live = (((total - dead) / total))*100) %>% group_by(site, temp) %>% summarise(avg_live = mean(percent_live)) germination <- counts %>% select(-notes) %>% mutate(percent_germ = (germinated / total)*100) %>% mutate(percent_ungerm = (ungerminated / total)*100) %>% na.omit() %>% mutate(temp = as.integer(temp)) # Playing around with subsetting data ------------------------------------- sub <- counts %>% filter(total >=50) %>% mutate(other = ungerminated + dead) %>% select(sample, germinated, other) %>% na.omit() #initialize dataframe dum <- as.data.frame(matrix(0, ncol = 2, nrow = nrow(sub))) names(dum) <- c("germinated", "other") for(i in 1:nrow(sub)) { test <- (c(rep("germinated", sub[i,2]), rep("ungerminated", sub[i,3]))) # making a list of germinated and ungerminated that exists in each sample test2 <- sample(test, size = 50, replace = FALSE, prob = NULL) # sampling from each 50 times test3 <- as.data.frame(table(test2)) # create dataframe with above results if(test3[1,1] == "germinated") { # trying to store above results in a dataframe dum[i,1] <- test3[1,2] dum[i,2] <- test3[2,2] } else dum[i,2] <- test3[1,2] } dum$sample <- sub$sample dum$number <- c(rep(50, nrow(dum))) sub$number <- c(rep(100, nrow(dum))) test <- full_join(sub, dum) test[is.na(test)] <- 0 test <- test %>% mutate(percent_germ = germinated / (germinated + other)) %>% separate(sample, into = c("site", "temperature"), sep = 2, remove = FALSE) %>% separate(temperature, into = c("temp", "replicate"), sep = "_") anova(lm(data = test, percent_germ ~ site * temp * number)) test100 <- test %>% filter(number == 100) test50 <- test %>% filter(number == 50) anova(lm(data = test100, percent_germ ~ site * temp)) anova(lm(data = test50, percent_germ ~ site * temp)) dog <- test %>% group_by(sample) %>% summarise(max_germ = max(percent_germ), min_germ = min(percent_germ)) t.test(dog$max_germ, dog$min_germ) # Plots - Survival ------------------------------------------------------------------- # plotting survival with averages as a scatterplot ggplot(data = survival, aes(x = temp, y = avg_live, color = site)) + geom_point() # plotting survival without averages as a scatterplot survival <- counts %>% select(site, temp, replicate, dead, total) %>% mutate(percent_live = (((total - dead) / total))*100) %>% na.omit() ggplot(data = survival, aes(x = temp, y = percent_live, color = site)) + geom_point() + geom_smooth() # raw date with boxplots ggplot(data = survival, aes(x = temp, y = percent_live, color = site)) + geom_boxplot() + geom_jitter(position = position_jitter(width = 0.1, height = 0), alpha = 1/4) + xlab("temperature") + ylab("percentage live") length(unique(counts$slide)) == length(counts$slide) sum((survival$site == "BI") & (survival$temp == "30")) sum(survival$site == "RP") sum(survival$site == "NP") counts$slide[duplicated(counts$slide)] counts$slide == "NP20_05" counts$slide == "RP10_05" # Plots - Germination ------------------------------------------------------------ # plotting percent germination of all zygotes ggplot(data = germination, aes(x = temp, y = percent_germ, color = site)) + geom_boxplot() + geom_jitter(position = position_jitter(width = 0.1, height = 0), alpha = 1/2) + xlab("temperature") + ylab("percentage germinated") + scale_y_continuous(limits = c(0, 100)) ggplot(data = germination, aes(x = temp, y = percent_germ, color = site)) + geom_point() + xlab("temperature") + ylab("percentage germinated") + geom_smooth(lwd = 1, se = TRUE, method = "loess") + scale_y_continuous(limits = c(0, 100)) # Models ------------------------------------------------------------------ mod1 <- lm(germination$percent_germ ~ germination$temp * germination$site) par(mfrow = c(2,2)) plot(mod1) summary(mod1) anova(mod1)
fill_sample <- function(session) { updateTextInput(session, 's_name', value = 'Matthew Webb') updateTextInput(session, 's_email', value = 'mwebb@channelswimmingassocation.com') updateSelectInput(session, 's_gender', selected = 'male') updateTextInput(session, 's_phone', value = '505-867-5309') updateTextAreaInput(session, 's_mailing', value = '121 Main St, Dover, England') updateTextInput(session, 'team_name', value = 'Chunky Dunkers') updateSelectInput(session, 'team_size', selected = 6) updateSelectInput(session, 'boat_known', selected = 'Tuna Thumper') updateTextInput(session, 'cc_name', value = "Tabloid Terry") updateTextInput(session, 'cc_email', value = "tterry@hotmail.com") }
/relay-leader/fill_sample.R
no_license
evmo/sbcsa-apply
R
false
false
711
r
fill_sample <- function(session) { updateTextInput(session, 's_name', value = 'Matthew Webb') updateTextInput(session, 's_email', value = 'mwebb@channelswimmingassocation.com') updateSelectInput(session, 's_gender', selected = 'male') updateTextInput(session, 's_phone', value = '505-867-5309') updateTextAreaInput(session, 's_mailing', value = '121 Main St, Dover, England') updateTextInput(session, 'team_name', value = 'Chunky Dunkers') updateSelectInput(session, 'team_size', selected = 6) updateSelectInput(session, 'boat_known', selected = 'Tuna Thumper') updateTextInput(session, 'cc_name', value = "Tabloid Terry") updateTextInput(session, 'cc_email', value = "tterry@hotmail.com") }
make_understorey_aboveground_production_flux <- function(c_frac) { ### currently only Varsha's harvest data on HIEv download_understorey_aboveground_data() ### read in the data inDF1 <- read.csv(file.path(getToPath(), "FACE_P0061_RA_PATHARE_UNDERSTORY_ABOVEGROUND_BIOMASS_L2_20150201_20160730.csv")) ### read in Matthias's harvest data inDF2 <- read.csv("~/Documents/Research/Projects/EucFACE_C_Balance/R_repo/temp_files/EucFACE_GrassStrip_Harvest_20170523.csv") ### process inDFs inDF1$Date <- paste0(as.character(inDF1$month), "-1") inDF1$Date <- as.Date(inDF1$Date, "%y-%b-%d") inDF1$live_g <- inDF1$grasses_live_g + inDF1$forbs_g tempDF1 <- data.frame(inDF1$ring, inDF1$Date, inDF1$live_g, inDF1$dead_g, inDF1$total_g) colnames(tempDF1) <- c("Ring", "Date", "Live_g", "Dead_g", "Total_g") tempDF2 <- data.frame(inDF2$Ring, "2017-05-01", inDF2$LiveBiomassDW, inDF2$DeadBiomassDW, inDF2$LiveBiomassDW + inDF2$DeadBiomassDW) colnames(tempDF2) <- c("Ring", "Date", "Live_g", "Dead_g", "Total_g") ### combine data myDF <- rbind(tempDF1, tempDF2) ### average across rings and dates liveDF <- summaryBy(Live_g~Date+Ring,data=myDF,FUN=mean,keep.names=T,na.rm=T) deadDF <- summaryBy(Dead_g~Date+Ring,data=myDF,FUN=mean,keep.names=T,na.rm=T) totDF <- summaryBy(Total_g~Date+Ring,data=myDF,FUN=mean,keep.names=T,na.rm=T) ### convert from g per 0.1 m-2 to g/m2, and make an assumption for C fraction outDF <- cbind(liveDF, deadDF$Dead_g, totDF$Total_g) names(outDF) <- c("Date", "Ring", "Live_g", "Dead_g", "Total_g") outDF$Live_g_C_m2 <- outDF$Live_g / strip_area * c_frac outDF$Dead_g_C_m2 <- outDF$Dead_g / strip_area * c_frac outDF$Total_g_C_m2 <- outDF$Live_g_C_m2 + outDF$Dead_g_C_m2 ### count number of days between two dates d <- unique(outDF$Date) b <- count_ndays(d) ### convert into mg m-2 d-1 outDF$ndays <- rep(b, each = 6) out <- dplyr::mutate(outDF, Date = as.Date(outDF$Date, format = "%d/%m/%Y"), Start_date = Date - ndays, End_date = Date, understorey_production_flux = Total_g_C_m2 * g_to_mg / ndays) ### drop NA rows out <- out[complete.cases(out),] df <- out[Reduce(`&`, lapply(out, is.finite)),] df$ndays <- as.numeric(df$End_date - df$Start_date) + 1 ### format dataframe to return out <-df[,c("Start_date", "End_date", "Date", "Ring","understorey_production_flux", "ndays")] colnames(out) <- c("Start_date", "End_date", "Date", "Ring", "understorey_production_flux", "Days") return(out) }
/modules/c_variables/understorey_c_production/make_understorey_production_flux.R
no_license
SoilTSSM/EucFACE_P_synthesis
R
false
false
2,877
r
make_understorey_aboveground_production_flux <- function(c_frac) { ### currently only Varsha's harvest data on HIEv download_understorey_aboveground_data() ### read in the data inDF1 <- read.csv(file.path(getToPath(), "FACE_P0061_RA_PATHARE_UNDERSTORY_ABOVEGROUND_BIOMASS_L2_20150201_20160730.csv")) ### read in Matthias's harvest data inDF2 <- read.csv("~/Documents/Research/Projects/EucFACE_C_Balance/R_repo/temp_files/EucFACE_GrassStrip_Harvest_20170523.csv") ### process inDFs inDF1$Date <- paste0(as.character(inDF1$month), "-1") inDF1$Date <- as.Date(inDF1$Date, "%y-%b-%d") inDF1$live_g <- inDF1$grasses_live_g + inDF1$forbs_g tempDF1 <- data.frame(inDF1$ring, inDF1$Date, inDF1$live_g, inDF1$dead_g, inDF1$total_g) colnames(tempDF1) <- c("Ring", "Date", "Live_g", "Dead_g", "Total_g") tempDF2 <- data.frame(inDF2$Ring, "2017-05-01", inDF2$LiveBiomassDW, inDF2$DeadBiomassDW, inDF2$LiveBiomassDW + inDF2$DeadBiomassDW) colnames(tempDF2) <- c("Ring", "Date", "Live_g", "Dead_g", "Total_g") ### combine data myDF <- rbind(tempDF1, tempDF2) ### average across rings and dates liveDF <- summaryBy(Live_g~Date+Ring,data=myDF,FUN=mean,keep.names=T,na.rm=T) deadDF <- summaryBy(Dead_g~Date+Ring,data=myDF,FUN=mean,keep.names=T,na.rm=T) totDF <- summaryBy(Total_g~Date+Ring,data=myDF,FUN=mean,keep.names=T,na.rm=T) ### convert from g per 0.1 m-2 to g/m2, and make an assumption for C fraction outDF <- cbind(liveDF, deadDF$Dead_g, totDF$Total_g) names(outDF) <- c("Date", "Ring", "Live_g", "Dead_g", "Total_g") outDF$Live_g_C_m2 <- outDF$Live_g / strip_area * c_frac outDF$Dead_g_C_m2 <- outDF$Dead_g / strip_area * c_frac outDF$Total_g_C_m2 <- outDF$Live_g_C_m2 + outDF$Dead_g_C_m2 ### count number of days between two dates d <- unique(outDF$Date) b <- count_ndays(d) ### convert into mg m-2 d-1 outDF$ndays <- rep(b, each = 6) out <- dplyr::mutate(outDF, Date = as.Date(outDF$Date, format = "%d/%m/%Y"), Start_date = Date - ndays, End_date = Date, understorey_production_flux = Total_g_C_m2 * g_to_mg / ndays) ### drop NA rows out <- out[complete.cases(out),] df <- out[Reduce(`&`, lapply(out, is.finite)),] df$ndays <- as.numeric(df$End_date - df$Start_date) + 1 ### format dataframe to return out <-df[,c("Start_date", "End_date", "Date", "Ring","understorey_production_flux", "ndays")] colnames(out) <- c("Start_date", "End_date", "Date", "Ring", "understorey_production_flux", "Days") return(out) }
utils::globalVariables(c("input_table", "color.df", "dict.combine","edge_matrix_full"))
/R/data.R
permissive
xinxiong0238/CUInetwork
R
false
false
88
r
utils::globalVariables(c("input_table", "color.df", "dict.combine","edge_matrix_full"))
\name{fW.Gompertz} \alias{fW.Gompertz} \title{Effects of moisture on decomposition rates according to the Gompertz function} \description{Calculates the effects of water content on decomposition rates.} \usage{fW.Gompertz(theta, a = 0.824, b = 0.308)}\arguments{ \item{theta}{A scalar or vector containing values of volumetric soil water content.} \item{a}{Empirical parameter} \item{b}{Empirical parameter} } \references{I. Janssens, S. Dore, D. Epron, H. Lankreijer, N. Buchmann, B. Longdoz, J. Brossaud, L. Montagnani. 2003. Climatic Influences on Seasonal and Spatial Differences in Soil CO2 Efflux. In Valentini, R. (Ed.) Fluxes of Carbon, Water and Energy of European Forests. pp 235-253. Springer. } \author{Carlos A. Sierra <csierra@bgc-jena.mpg.de>, Markus Mueller <mamueller@bgc-jena.mpg.de>} \examples{ th=seq(0,1,0.01) xi=fW.Gompertz(theta=th) plot(th,xi,type="l",main="Effects of soil water content on decomposition rates", xlab="Volumetric soil water content (cm3 cm-3)",ylab=expression(xi)) }
/man/131.Rd
no_license
haiyangzhang798/SoilR
R
false
false
1,051
rd
\name{fW.Gompertz} \alias{fW.Gompertz} \title{Effects of moisture on decomposition rates according to the Gompertz function} \description{Calculates the effects of water content on decomposition rates.} \usage{fW.Gompertz(theta, a = 0.824, b = 0.308)}\arguments{ \item{theta}{A scalar or vector containing values of volumetric soil water content.} \item{a}{Empirical parameter} \item{b}{Empirical parameter} } \references{I. Janssens, S. Dore, D. Epron, H. Lankreijer, N. Buchmann, B. Longdoz, J. Brossaud, L. Montagnani. 2003. Climatic Influences on Seasonal and Spatial Differences in Soil CO2 Efflux. In Valentini, R. (Ed.) Fluxes of Carbon, Water and Energy of European Forests. pp 235-253. Springer. } \author{Carlos A. Sierra <csierra@bgc-jena.mpg.de>, Markus Mueller <mamueller@bgc-jena.mpg.de>} \examples{ th=seq(0,1,0.01) xi=fW.Gompertz(theta=th) plot(th,xi,type="l",main="Effects of soil water content on decomposition rates", xlab="Volumetric soil water content (cm3 cm-3)",ylab=expression(xi)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/date_classify.R \name{classify_quarter} \alias{classify_quarter} \title{classify by quarter} \usage{ classify_quarter(ts) } \arguments{ \item{ts}{vector of Date} } \value{ vector of Date } \description{ make label for classify by quarter. } \examples{ classify_quarter(as.Date("2020-01-01")) }
/man/classify_quarter.Rd
permissive
rea-osaka/retiex
R
false
true
372
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/date_classify.R \name{classify_quarter} \alias{classify_quarter} \title{classify by quarter} \usage{ classify_quarter(ts) } \arguments{ \item{ts}{vector of Date} } \value{ vector of Date } \description{ make label for classify by quarter. } \examples{ classify_quarter(as.Date("2020-01-01")) }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Do not modify this file since it was automatically generated from: % % findAsciiDoc.R % % by the Rdoc compiler part of the R.oo package. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \name{findAsciiDoc} \alias{findAsciiDoc.default} \alias{findAsciiDoc} \title{Locates the asciidoc executable} \description{ Locates the asciidoc executable on the current system. } \usage{ \method{findAsciiDoc}{default}(mustExist=TRUE, ..., verbose=FALSE) } \arguments{ \item{mustExist}{If \code{\link[base:logical]{TRUE}}, an exception is thrown if the executable could not be located.} \item{...}{Not used.} \item{verbose}{See \code{\link[R.utils]{Verbose}}.} } \value{ Returns the pathname to the executable, or \code{\link[base]{NULL}} if not found. } \details{ The 'asciidoc' executable is searched for as follows: \enumerate{ \item \code{Sys.which("asciidoc")} } } \author{Henrik Bengtsson} \keyword{file} \keyword{IO} \keyword{internal}
/man/findAsciiDoc.Rd
no_license
HenrikBengtsson/R.rsp
R
false
false
1,067
rd
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Do not modify this file since it was automatically generated from: % % findAsciiDoc.R % % by the Rdoc compiler part of the R.oo package. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \name{findAsciiDoc} \alias{findAsciiDoc.default} \alias{findAsciiDoc} \title{Locates the asciidoc executable} \description{ Locates the asciidoc executable on the current system. } \usage{ \method{findAsciiDoc}{default}(mustExist=TRUE, ..., verbose=FALSE) } \arguments{ \item{mustExist}{If \code{\link[base:logical]{TRUE}}, an exception is thrown if the executable could not be located.} \item{...}{Not used.} \item{verbose}{See \code{\link[R.utils]{Verbose}}.} } \value{ Returns the pathname to the executable, or \code{\link[base]{NULL}} if not found. } \details{ The 'asciidoc' executable is searched for as follows: \enumerate{ \item \code{Sys.which("asciidoc")} } } \author{Henrik Bengtsson} \keyword{file} \keyword{IO} \keyword{internal}
library(tidyverse) #GLMs#### titanic<-read.csv("datasets/titanic_long.csv") table(titanic$class, titanic$survived) titanic %>% group_by(class, survived) %>% summarise(count = n()) ##class#### tit.glm <- glm(survived ~ class, data = titanic, family = binomial) library(effects) allEffects(tit.glm) summary(allEffects(tit.glm)) library(modelbased) estimate_means(tit.glm) estimate_contrasts(tit.glm) #kable(xtable::xtable(tit.glm), digits = 2) #this is for r markdown. Will produce a table library(visreg) visreg(tit.glm, scale = "response", rug = FALSE) library(sjPlot) sjPlot::plot_model(tit.glm, type = "eff") #check if residuals are inside the error bounds. binned_residuals(tit.glm) library(DHARMa) simulateResiduals(tit.glm, plot = TRUE) ##sex#### tit.sex<-glm(formula = survived ~ sex, family = binomial, data = titanic) summary(tit.sex) #model checking simulateResiduals(tit.sex, plot = TRUE) ##sex and class##### ## Did women have higher survival because they travelled more in first class? #lets look at data table(titanic$class, titanic$survived, titanic$sex) #two ways to look at it ##additive models and ##interactive (multiplication) models #additive tit.sex.class.add<-glm(formula = survived ~ class + sex, family = binomial, data = titanic) plot_model(tit.sex.class.add, type = "est") #not sure what this does. par(mfrow=c(2,2)) plot(tit.sex.class.add) visreg(tit.sex.class.add) #interactive tit.sex.class.int<-glm(formula = survived ~ class * sex, family = binomial, data = titanic) plot_model(tit.sex.class.int, type = "int")#type is interactive par(mfrow=c(2,2)) plot(tit.sex.class.int) visreg(tit.seq.class.int) ##compare compare_performance(tit.sex.class.add, tit.sex.class.int) #Other questions: #Is survival related to age? #Are age effects dependent on sex? #Logistic regression for proportion data ie moves from 0-1 tit.prop<-read.csv("./datasets/titanic_prop.csv") #Yes is people who survived and No is those that did not prop.glm <- glm(cbind(Yes, No) ~ Class, data = tit.prop, family = binomial) visreg(prop.glm) cbind(`tit.prop`$Yes, `tit.prop`$No) ###UN gdp data gdp<-read.csv("datasets/UN_GDP_infantmortality.csv") gdp.glm <- glm(cbind(infant.mortality, 1000 - infant.mortality) ~ gdp, data = gdp, family = binomial) #note:we are using binomial although both variables are continous, because lm isn't doing well #dont be afrid to try differnt models for your data , then compare allEffects(gdp.glm) visreg(gdp.glm, scale = "response") points(infant.mortality/1000 ~ gdp, data = gdp) simulateResiduals(gdp.glm, plot = TRUE) ##Overdispersion #Testing for overdispersion library(DHARMa) simres <- simulateResiduals(gdp.glm, refit = TRUE) testDispersion(simres, plot = FALSE) #quasibinomial allows us to model overdispersed binomial data gdp.overdisp <- glm(cbind(infant.mortality, 1000 - infant.mortality) ~ gdp, data = gdp, family = quasibinomial) coef(gdp.overdisp) coef(gdp.glm) #Mean estimates do not change after accounting for overdispersion #standard error in relation to estimate(dispersion) #But standard errors (uncertainty) do! #Not everything has to be linear… #Residuals show non-linear pattern #Assignment soccer <- read.csv("datasets/soccer.csv") soccer lm(Nshots~Scored,data=soccer) #data too small ##Two seedset <- read.csv("datasets/seedset.csv") lm(pcmass~seeds, data=seedset) glm(cbind(infant.mortality, 1000 - infant.mortality) ~ gdp, data = gdp, family = binomial)
/scripts/day3.R
no_license
marthaluka/GLMs-in-R
R
false
false
3,557
r
library(tidyverse) #GLMs#### titanic<-read.csv("datasets/titanic_long.csv") table(titanic$class, titanic$survived) titanic %>% group_by(class, survived) %>% summarise(count = n()) ##class#### tit.glm <- glm(survived ~ class, data = titanic, family = binomial) library(effects) allEffects(tit.glm) summary(allEffects(tit.glm)) library(modelbased) estimate_means(tit.glm) estimate_contrasts(tit.glm) #kable(xtable::xtable(tit.glm), digits = 2) #this is for r markdown. Will produce a table library(visreg) visreg(tit.glm, scale = "response", rug = FALSE) library(sjPlot) sjPlot::plot_model(tit.glm, type = "eff") #check if residuals are inside the error bounds. binned_residuals(tit.glm) library(DHARMa) simulateResiduals(tit.glm, plot = TRUE) ##sex#### tit.sex<-glm(formula = survived ~ sex, family = binomial, data = titanic) summary(tit.sex) #model checking simulateResiduals(tit.sex, plot = TRUE) ##sex and class##### ## Did women have higher survival because they travelled more in first class? #lets look at data table(titanic$class, titanic$survived, titanic$sex) #two ways to look at it ##additive models and ##interactive (multiplication) models #additive tit.sex.class.add<-glm(formula = survived ~ class + sex, family = binomial, data = titanic) plot_model(tit.sex.class.add, type = "est") #not sure what this does. par(mfrow=c(2,2)) plot(tit.sex.class.add) visreg(tit.sex.class.add) #interactive tit.sex.class.int<-glm(formula = survived ~ class * sex, family = binomial, data = titanic) plot_model(tit.sex.class.int, type = "int")#type is interactive par(mfrow=c(2,2)) plot(tit.sex.class.int) visreg(tit.seq.class.int) ##compare compare_performance(tit.sex.class.add, tit.sex.class.int) #Other questions: #Is survival related to age? #Are age effects dependent on sex? #Logistic regression for proportion data ie moves from 0-1 tit.prop<-read.csv("./datasets/titanic_prop.csv") #Yes is people who survived and No is those that did not prop.glm <- glm(cbind(Yes, No) ~ Class, data = tit.prop, family = binomial) visreg(prop.glm) cbind(`tit.prop`$Yes, `tit.prop`$No) ###UN gdp data gdp<-read.csv("datasets/UN_GDP_infantmortality.csv") gdp.glm <- glm(cbind(infant.mortality, 1000 - infant.mortality) ~ gdp, data = gdp, family = binomial) #note:we are using binomial although both variables are continous, because lm isn't doing well #dont be afrid to try differnt models for your data , then compare allEffects(gdp.glm) visreg(gdp.glm, scale = "response") points(infant.mortality/1000 ~ gdp, data = gdp) simulateResiduals(gdp.glm, plot = TRUE) ##Overdispersion #Testing for overdispersion library(DHARMa) simres <- simulateResiduals(gdp.glm, refit = TRUE) testDispersion(simres, plot = FALSE) #quasibinomial allows us to model overdispersed binomial data gdp.overdisp <- glm(cbind(infant.mortality, 1000 - infant.mortality) ~ gdp, data = gdp, family = quasibinomial) coef(gdp.overdisp) coef(gdp.glm) #Mean estimates do not change after accounting for overdispersion #standard error in relation to estimate(dispersion) #But standard errors (uncertainty) do! #Not everything has to be linear… #Residuals show non-linear pattern #Assignment soccer <- read.csv("datasets/soccer.csv") soccer lm(Nshots~Scored,data=soccer) #data too small ##Two seedset <- read.csv("datasets/seedset.csv") lm(pcmass~seeds, data=seedset) glm(cbind(infant.mortality, 1000 - infant.mortality) ~ gdp, data = gdp, family = binomial)
% Generated by roxygen2 (4.0.0): do not edit by hand \name{reload} \alias{reload} \title{Unload and reload package.} \usage{ reload(pkg = ".", quiet = FALSE) } \arguments{ \item{pkg}{package description, can be path or package name. See \code{\link{as.package}} for more information} \item{quiet}{if \code{TRUE} suppresses output from this function.} } \description{ This attempts to unload and reload a package. If the package is not loaded already, it does nothing. It's not always possible to cleanly unload a package: see the caveats in \code{\link{unload}} for the some of the potential failure points. If in doubt, restart R and reload the package with \code{\link{library}}. } \examples{ \dontrun{ # Reload package that is in current directory reload(".") # Reload package that is in ./ggplot2/ reload("ggplot2/") # Can use inst() to find the package path # This will reload the installed ggplot2 package reload(inst("ggplot2")) } }
/man/reload.Rd
no_license
kingo55/devtools
R
false
false
945
rd
% Generated by roxygen2 (4.0.0): do not edit by hand \name{reload} \alias{reload} \title{Unload and reload package.} \usage{ reload(pkg = ".", quiet = FALSE) } \arguments{ \item{pkg}{package description, can be path or package name. See \code{\link{as.package}} for more information} \item{quiet}{if \code{TRUE} suppresses output from this function.} } \description{ This attempts to unload and reload a package. If the package is not loaded already, it does nothing. It's not always possible to cleanly unload a package: see the caveats in \code{\link{unload}} for the some of the potential failure points. If in doubt, restart R and reload the package with \code{\link{library}}. } \examples{ \dontrun{ # Reload package that is in current directory reload(".") # Reload package that is in ./ggplot2/ reload("ggplot2/") # Can use inst() to find the package path # This will reload the installed ggplot2 package reload(inst("ggplot2")) } }
# Assignment: ASSIGNMENT 4 # Name: Holdman, Sarah # Date: 2020-09-28 ## Load the ggplot2 package library(ggplot2) theme_set(theme_minimal()) ## Set the working directory to the root of your DSC 520 directory setwd("C:/Users/shold/Documents/GITHUB/HOLDMAN_DSC520/dsc520") ## Load the `data/r4ds/heights.csv` to heights_df <- read.csv("data/r4ds/heights.csv") # https://ggplot2.tidyverse.org/reference/geom_boxplot.html ## Create boxplots of sex vs. earn and race vs. earn using `geom_point()` and `geom_boxplot()` ## sex vs. earn ggplot(heights_df, aes(x=sex, y=earn)) + geom_point()+ geom_boxplot() ## race vs. earn ggplot(heights_df, aes(x=race, y=earn)) + geom_point()+ geom_boxplot() # https://ggplot2.tidyverse.org/reference/geom_bar.html ## Using `geom_bar()` plot a bar chart of the number of records for each `sex` ggplot(heights_df, aes(sex)) + geom_bar() ## Using `geom_bar()` plot a bar chart of the number of records for each race ggplot(heights_df, aes(race)) + geom_bar() ## Create a horizontal bar chart by adding `coord_flip()` to the previous plot ggplot(heights_df, aes(race)) + geom_bar() + coord_flip() # https://www.rdocumentation.org/packages/ggplot2/versions/3.3.0/topics/geom_path ## Load the file `"data/nytimes/covid-19-data/us-states.csv"` and ## assign it to the `covid_df` dataframe covid_df <- read.csv("data/nytimes/covid-19-data/us-states.csv") ## Parse the date column using `as.Date()`` covid_df$date <- as.Date(covid_df$date) ## Create three dataframes named `california_df`, `ny_df`, and `florida_df` ## containing the data from California, New York, and Florida california_df <- covid_df[ which( covid_df$state == "California"), ] ny_df <- covid_df[which( covid_df$state == "New York"), ] florida_df <- covid_df[ which( covid_df$state == "Florida"), ] ## Plot the number of cases in Florida using `geom_line()` ggplot(data=florida_df, aes(x=date, y=cases, group=1)) + geom_line() ## Add lines for New York and California to the plot ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases)) + geom_line(data=ny_df, aes(y = cases)) + geom_line(data=california_df, aes(y = cases)) ## Use the colors "darkred", "darkgreen", and "steelblue" for Florida, New York, and California ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases), color = "darkred") + geom_line(data=ny_df, aes(y = cases), color="darkgreen") + geom_line(data=california_df, aes(y = cases), color="steelblue") ## Add a legend to the plot using `scale_colour_manual` ## Add a blank (" ") label to the x-axis and the label "Cases" to the y axis ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases, colour = "Florida")) + geom_line(data=ny_df, aes(y = cases,colour="New York")) + geom_line(data=california_df, aes(y = cases, colour="California")) + scale_colour_manual("", breaks = c("Florida", "New York", "California"), values = c("darkred", "darkgreen", "Steelblue")) + xlab(" ") + ylab("Cases") ## Scale the y axis using `scale_y_log10()` ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases, colour = "Florida")) + geom_line(data=ny_df, aes(y = cases,colour="New York")) + geom_line(data=california_df, aes(y = cases, colour="California")) + scale_colour_manual("", breaks = c("Florida", "New York", "California"), values = c("darkred", "darkgreen", "Steelblue")) + xlab(" ") + ylab("Cases") + scale_y_log10()
/assignments/assignment04/assignment_04_HoldmanSarah.R
permissive
sjholdman/dsc520
R
false
false
3,522
r
# Assignment: ASSIGNMENT 4 # Name: Holdman, Sarah # Date: 2020-09-28 ## Load the ggplot2 package library(ggplot2) theme_set(theme_minimal()) ## Set the working directory to the root of your DSC 520 directory setwd("C:/Users/shold/Documents/GITHUB/HOLDMAN_DSC520/dsc520") ## Load the `data/r4ds/heights.csv` to heights_df <- read.csv("data/r4ds/heights.csv") # https://ggplot2.tidyverse.org/reference/geom_boxplot.html ## Create boxplots of sex vs. earn and race vs. earn using `geom_point()` and `geom_boxplot()` ## sex vs. earn ggplot(heights_df, aes(x=sex, y=earn)) + geom_point()+ geom_boxplot() ## race vs. earn ggplot(heights_df, aes(x=race, y=earn)) + geom_point()+ geom_boxplot() # https://ggplot2.tidyverse.org/reference/geom_bar.html ## Using `geom_bar()` plot a bar chart of the number of records for each `sex` ggplot(heights_df, aes(sex)) + geom_bar() ## Using `geom_bar()` plot a bar chart of the number of records for each race ggplot(heights_df, aes(race)) + geom_bar() ## Create a horizontal bar chart by adding `coord_flip()` to the previous plot ggplot(heights_df, aes(race)) + geom_bar() + coord_flip() # https://www.rdocumentation.org/packages/ggplot2/versions/3.3.0/topics/geom_path ## Load the file `"data/nytimes/covid-19-data/us-states.csv"` and ## assign it to the `covid_df` dataframe covid_df <- read.csv("data/nytimes/covid-19-data/us-states.csv") ## Parse the date column using `as.Date()`` covid_df$date <- as.Date(covid_df$date) ## Create three dataframes named `california_df`, `ny_df`, and `florida_df` ## containing the data from California, New York, and Florida california_df <- covid_df[ which( covid_df$state == "California"), ] ny_df <- covid_df[which( covid_df$state == "New York"), ] florida_df <- covid_df[ which( covid_df$state == "Florida"), ] ## Plot the number of cases in Florida using `geom_line()` ggplot(data=florida_df, aes(x=date, y=cases, group=1)) + geom_line() ## Add lines for New York and California to the plot ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases)) + geom_line(data=ny_df, aes(y = cases)) + geom_line(data=california_df, aes(y = cases)) ## Use the colors "darkred", "darkgreen", and "steelblue" for Florida, New York, and California ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases), color = "darkred") + geom_line(data=ny_df, aes(y = cases), color="darkgreen") + geom_line(data=california_df, aes(y = cases), color="steelblue") ## Add a legend to the plot using `scale_colour_manual` ## Add a blank (" ") label to the x-axis and the label "Cases" to the y axis ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases, colour = "Florida")) + geom_line(data=ny_df, aes(y = cases,colour="New York")) + geom_line(data=california_df, aes(y = cases, colour="California")) + scale_colour_manual("", breaks = c("Florida", "New York", "California"), values = c("darkred", "darkgreen", "Steelblue")) + xlab(" ") + ylab("Cases") ## Scale the y axis using `scale_y_log10()` ggplot(data=florida_df, aes(x=date, group=1)) + geom_line(aes(y = cases, colour = "Florida")) + geom_line(data=ny_df, aes(y = cases,colour="New York")) + geom_line(data=california_df, aes(y = cases, colour="California")) + scale_colour_manual("", breaks = c("Florida", "New York", "California"), values = c("darkred", "darkgreen", "Steelblue")) + xlab(" ") + ylab("Cases") + scale_y_log10()
#' who_cities #' List cities currently being analysed #' @export who_cities <- function () { c ("accra", "bristol", "kathmandu") } match_city <- function (city = NULL) { cities <- c ("accra", "kathmandu") if (!is.null (city)) city <- match.arg (tolower (city), cities) else city <- cities return (city) } #' upload_popdens_nodes #' @export upload_popdens_nodes <- function () { flist <- file.path (c ("accra", "kathmandu"), "osm", "popdens_nodes.Rds") junk <- lapply (flist, function (i) { message ("uploading ", i) piggyback::pb_upload (i, repo = "ATFutures/who-data", tag = "v0.0.3-osmdata") }) } #' download_popdens_nodes #' @param city Default uploads both cities, otherwise one of Accra or Kathmandu. #' @export download_popdens_nodes <- function (city = NULL) { flist <- file.path (match_city (city), "osm", "popdens_nodes.Rds") junk <- lapply (flist, function (i) { message ("downloading ", i) piggyback::pb_download (i, repo = "ATFutures/who-data", tag = "v0.0.3-osmdata") }) } #' upload_flows #' @param city Default uploads both cities, otherwise one of Accra or Kathmandu. #' @param overwrite Should generally be set to \code{TRUE}. #' @export upload_flows <- function (city = NULL, overwrite = TRUE) { flist <- list.files (file.path (match_city (city), "flows"), full.names = TRUE) junk <- lapply (flist, function (i) { message ("uploading ", i) piggyback::pb_upload (i, repo = "ATFutures/who-data", tag = "v0.0.4-flowlayers", overwrite = overwrite) }) } #' download_flows #' @export download_flows <- function () { flist <- file.path ("flows", c ("flow_foot_act_bus_k15.Rds", "flow_foot_bus_act_k15.Rds", "flow_foot_bus_res_k15.Rds", "flow_foot_res_bus_k15.Rds")) #flist <- unlist (lapply (flist, function (i) # c (paste0 ("accra/", i), # paste0 ("kathmandu/", i)))) # TODO: Reinstate the above once Kathmandu flows have been redone # MP: Nov 2018 flist <- unlist (lapply (flist, function (i) (paste0 ("accra/", i)))) junk <- lapply (flist, function (i) { message ("downloading ", i) piggyback::pb_download (i, repo = "ATFutures/who-data", tag = "v0.0.4-flowlayers") }) } #' download_who_data #' #' Download all WHO data from github repo via \pkg{piggyback}. This function can #' be used both for initial download, and for periodic refreshment of data. Only #' those data which have been updated will be downloaded anew. #' @export download_who_data <- function () { download_worldpop_tiffiles () download_popdens_nodes () download_osm () download_bristol_pop () download_flows () }
/R/utils.R
no_license
ATFutures/who-data
R
false
false
3,349
r
#' who_cities #' List cities currently being analysed #' @export who_cities <- function () { c ("accra", "bristol", "kathmandu") } match_city <- function (city = NULL) { cities <- c ("accra", "kathmandu") if (!is.null (city)) city <- match.arg (tolower (city), cities) else city <- cities return (city) } #' upload_popdens_nodes #' @export upload_popdens_nodes <- function () { flist <- file.path (c ("accra", "kathmandu"), "osm", "popdens_nodes.Rds") junk <- lapply (flist, function (i) { message ("uploading ", i) piggyback::pb_upload (i, repo = "ATFutures/who-data", tag = "v0.0.3-osmdata") }) } #' download_popdens_nodes #' @param city Default uploads both cities, otherwise one of Accra or Kathmandu. #' @export download_popdens_nodes <- function (city = NULL) { flist <- file.path (match_city (city), "osm", "popdens_nodes.Rds") junk <- lapply (flist, function (i) { message ("downloading ", i) piggyback::pb_download (i, repo = "ATFutures/who-data", tag = "v0.0.3-osmdata") }) } #' upload_flows #' @param city Default uploads both cities, otherwise one of Accra or Kathmandu. #' @param overwrite Should generally be set to \code{TRUE}. #' @export upload_flows <- function (city = NULL, overwrite = TRUE) { flist <- list.files (file.path (match_city (city), "flows"), full.names = TRUE) junk <- lapply (flist, function (i) { message ("uploading ", i) piggyback::pb_upload (i, repo = "ATFutures/who-data", tag = "v0.0.4-flowlayers", overwrite = overwrite) }) } #' download_flows #' @export download_flows <- function () { flist <- file.path ("flows", c ("flow_foot_act_bus_k15.Rds", "flow_foot_bus_act_k15.Rds", "flow_foot_bus_res_k15.Rds", "flow_foot_res_bus_k15.Rds")) #flist <- unlist (lapply (flist, function (i) # c (paste0 ("accra/", i), # paste0 ("kathmandu/", i)))) # TODO: Reinstate the above once Kathmandu flows have been redone # MP: Nov 2018 flist <- unlist (lapply (flist, function (i) (paste0 ("accra/", i)))) junk <- lapply (flist, function (i) { message ("downloading ", i) piggyback::pb_download (i, repo = "ATFutures/who-data", tag = "v0.0.4-flowlayers") }) } #' download_who_data #' #' Download all WHO data from github repo via \pkg{piggyback}. This function can #' be used both for initial download, and for periodic refreshment of data. Only #' those data which have been updated will be downloaded anew. #' @export download_who_data <- function () { download_worldpop_tiffiles () download_popdens_nodes () download_osm () download_bristol_pop () download_flows () }
# Purpose: Create figure 1 for manuscript. It's the presentation of the results from the Cox processes # Packages library(plyr) library(dplyr) library(ggplot2) # Load data alpha_hat <- tbl_df(read.table(file = "../analysis/alpha-hat.txt", sep = "\t", header = TRUE)) alpha_hat_cox <- filter(alpha_hat, process == "cox") # Generate plot p <- ggplot(alpha_hat_cox, aes(x = tests, y = alpha_hat)) + geom_pointrange(aes(ymin = ci_low, ymax = ci_high), size = 0.2) + geom_path() + ggtitle("Family wise error rates (FWE), with 95% confidence intervals") + xlab("Number of ranges tested") + ylab("FWE") + theme_classic() + theme(plot.title = element_text(size = 8, family = ""), axis.title = element_text(size = 6, family = ""), axis.text = element_text(size = 6, family = "")) png(file = "forest-plot.png", res = 300, width = 3.3, height = 2, units = 'in') p dev.off()
/docs/forest-plot.R
no_license
mloop/kdiff-type1-error-rate
R
false
false
920
r
# Purpose: Create figure 1 for manuscript. It's the presentation of the results from the Cox processes # Packages library(plyr) library(dplyr) library(ggplot2) # Load data alpha_hat <- tbl_df(read.table(file = "../analysis/alpha-hat.txt", sep = "\t", header = TRUE)) alpha_hat_cox <- filter(alpha_hat, process == "cox") # Generate plot p <- ggplot(alpha_hat_cox, aes(x = tests, y = alpha_hat)) + geom_pointrange(aes(ymin = ci_low, ymax = ci_high), size = 0.2) + geom_path() + ggtitle("Family wise error rates (FWE), with 95% confidence intervals") + xlab("Number of ranges tested") + ylab("FWE") + theme_classic() + theme(plot.title = element_text(size = 8, family = ""), axis.title = element_text(size = 6, family = ""), axis.text = element_text(size = 6, family = "")) png(file = "forest-plot.png", res = 300, width = 3.3, height = 2, units = 'in') p dev.off()
## ================================================================ ## HEADER ## ================================================================ ## Following two functions can be easily used to calculate inverse ## of a square invertible matrix ## - "efficiency" feature being the caching of the inverse ## while repeatedly feeding identical input matrix ## ================================================================ ## Example of application: ## ================================================================ ## abc <- makeCacheMatrix(matrix(1:4,2,2)) ## cacheSolve(abc) ## ================================================================ ## "makeCacheMatrix" ## ================================================================ ## Creates vector of all necessary functions, which are being later ## called by the "cacheSolve" function makeCacheMatrix <- function(x = matrix()) { Inv <- NULL set <- function(y) { x <<- y Inv <<- NULL } get <- function() x setinverse <- function(inverse) Inv <<- inverse getinverse <- function() Inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## ================================================================ ## "cacheSolve" ## ================================================================ ## Returns inverse of a matrix, either by retrieving it from cache ## (if available) or by applying the built-in "solve" function cacheSolve <- function(x, ...) { Inv <- x$getinverse() if(!is.null(Inv)) { message("getting cached data") return(Inv) } data <- x$get() Inv <- solve(data) x$setinverse(Inv) Inv }
/cachematrix.R
no_license
arehurek/ProgrammingAssignment2
R
false
false
1,804
r
## ================================================================ ## HEADER ## ================================================================ ## Following two functions can be easily used to calculate inverse ## of a square invertible matrix ## - "efficiency" feature being the caching of the inverse ## while repeatedly feeding identical input matrix ## ================================================================ ## Example of application: ## ================================================================ ## abc <- makeCacheMatrix(matrix(1:4,2,2)) ## cacheSolve(abc) ## ================================================================ ## "makeCacheMatrix" ## ================================================================ ## Creates vector of all necessary functions, which are being later ## called by the "cacheSolve" function makeCacheMatrix <- function(x = matrix()) { Inv <- NULL set <- function(y) { x <<- y Inv <<- NULL } get <- function() x setinverse <- function(inverse) Inv <<- inverse getinverse <- function() Inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## ================================================================ ## "cacheSolve" ## ================================================================ ## Returns inverse of a matrix, either by retrieving it from cache ## (if available) or by applying the built-in "solve" function cacheSolve <- function(x, ...) { Inv <- x$getinverse() if(!is.null(Inv)) { message("getting cached data") return(Inv) } data <- x$get() Inv <- solve(data) x$setinverse(Inv) Inv }
source("internal_v4.R") foldernames=c("Cell 1") sapply(foldernames, function(foldername){ r = readLines(con=file.path(paste(foldername, "/config.txt", sep=""))) get <- function(type){i = grep(type,r); strsplit(r[i], "=")[[1]][2]} as.v <- function(ch){as.numeric(strsplit(ch,",")[[1]])} model=get("model") {if (model=="Gaussian(prec)"){ xlim = as.v(get("xlim")) ylim = as.v(get("ylim")) histbins = as.v(get("histbins")) histvalues = as.v(get("histvalues")) if (length(grep("pbackground",r))==0 | length(grep("alpha",r))==0){ useplabel=FALSE; pb=NULL; alpha=NULL } else { useplabel=TRUE; pb=as.numeric(get("pbackground")) alpha=as.numeric(get("alpha")) } if (length(grep("bestonly",r))==0) bestonly=FALSE else bestonly=as.numeric(get("bestonly"))>0 if (length(grep("rseq",r))==0) rseq=seq(10, 50, by=10) else { rparams=as.v(get("rseq")) rseq=seq(rparams[1], rparams[2], by=rparams[3]) } if (length(grep("thseq",r))==0) thseq=seq(100, 200, by=10) else { thparams=as.v(get("thseq")) thseq=seq(thparams[1], thparams[2], by=thparams[3]) } if (length(grep("clustermethod",r))==0) clustermethod="K" else { method=as.numeric(get("clustermethod")) if (method==1) clustermethod="K" else clustermethod="DBSCAN" } } else {stop("Haven't implemented anything else!")}} o = order(histbins); histbins=histbins[o]; histvalues=histvalues[o] f = approxfun(histbins, histvalues, yleft=histvalues[1], yright=histvalues[length(histvalues)]) cst=integrate(f, lower=histbins[o],upper=histbins[length(histbins)])$value psd <- function(sd){ log(f(sd))-log(cst) } minsd = histbins[1]; maxsd = histbins[length(histbins)] ld=list.dirs(foldername, recursive=FALSE) ld=ld[ld!=foldername] sapply(file.path(ld), function(foldername){ data= read.table(file.path(paste(foldername, "/ClusterCentroids_Ch2_v1.txt", sep=""))) pts = data[,1:2]; sds = data[,3]; res=Kclust(pts=pts, sds=sds, xlim=xlim, ylim=ylim, psd=psd, minsd=minsd, maxsd=maxsd, useplabel=useplabel, alpha=alpha, pb=pb, score=TRUE, rlabel=TRUE, report=TRUE, rseq=rseq, thseq=thseq, clustermethod=clustermethod) writeRes(res, file.path(paste(foldername, "/r_vs_thresh_ClusterCentroids_Ch2_v1.txt", sep="")), file.path(paste(foldername, "/labels", sep="")), bestonly=bestonly) }) })
/R code - Bayesian-based cluster analysis/Bayesian cluster analysis of molecular coordinates data for 1 color data/run_ClusterCentroids_Ch2_v4.R
permissive
Owenlab-UoB/DNA-PAINT-analysis
R
false
false
2,320
r
source("internal_v4.R") foldernames=c("Cell 1") sapply(foldernames, function(foldername){ r = readLines(con=file.path(paste(foldername, "/config.txt", sep=""))) get <- function(type){i = grep(type,r); strsplit(r[i], "=")[[1]][2]} as.v <- function(ch){as.numeric(strsplit(ch,",")[[1]])} model=get("model") {if (model=="Gaussian(prec)"){ xlim = as.v(get("xlim")) ylim = as.v(get("ylim")) histbins = as.v(get("histbins")) histvalues = as.v(get("histvalues")) if (length(grep("pbackground",r))==0 | length(grep("alpha",r))==0){ useplabel=FALSE; pb=NULL; alpha=NULL } else { useplabel=TRUE; pb=as.numeric(get("pbackground")) alpha=as.numeric(get("alpha")) } if (length(grep("bestonly",r))==0) bestonly=FALSE else bestonly=as.numeric(get("bestonly"))>0 if (length(grep("rseq",r))==0) rseq=seq(10, 50, by=10) else { rparams=as.v(get("rseq")) rseq=seq(rparams[1], rparams[2], by=rparams[3]) } if (length(grep("thseq",r))==0) thseq=seq(100, 200, by=10) else { thparams=as.v(get("thseq")) thseq=seq(thparams[1], thparams[2], by=thparams[3]) } if (length(grep("clustermethod",r))==0) clustermethod="K" else { method=as.numeric(get("clustermethod")) if (method==1) clustermethod="K" else clustermethod="DBSCAN" } } else {stop("Haven't implemented anything else!")}} o = order(histbins); histbins=histbins[o]; histvalues=histvalues[o] f = approxfun(histbins, histvalues, yleft=histvalues[1], yright=histvalues[length(histvalues)]) cst=integrate(f, lower=histbins[o],upper=histbins[length(histbins)])$value psd <- function(sd){ log(f(sd))-log(cst) } minsd = histbins[1]; maxsd = histbins[length(histbins)] ld=list.dirs(foldername, recursive=FALSE) ld=ld[ld!=foldername] sapply(file.path(ld), function(foldername){ data= read.table(file.path(paste(foldername, "/ClusterCentroids_Ch2_v1.txt", sep=""))) pts = data[,1:2]; sds = data[,3]; res=Kclust(pts=pts, sds=sds, xlim=xlim, ylim=ylim, psd=psd, minsd=minsd, maxsd=maxsd, useplabel=useplabel, alpha=alpha, pb=pb, score=TRUE, rlabel=TRUE, report=TRUE, rseq=rseq, thseq=thseq, clustermethod=clustermethod) writeRes(res, file.path(paste(foldername, "/r_vs_thresh_ClusterCentroids_Ch2_v1.txt", sep="")), file.path(paste(foldername, "/labels", sep="")), bestonly=bestonly) }) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/CoreMethods.R \docType{methods} \name{findCellTypes} \alias{findCellTypes} \alias{findCellTypes.geneList} \alias{findCellTypes,SCFind,character-method} \title{queries cells that contain all the genes from the list} \usage{ findCellTypes(object, gene.list, datasets) findCellTypes.geneList(object, gene.list, datasets) \S4method{findCellTypes}{SCFind,character}(object, gene.list, datasets) } \arguments{ \item{object}{the \code{SCFind} object} \item{gene.list}{genes to be searched in the gene.index (Operators: "-gene" to exclude a gene | "*gene" either gene is expressed "*-gene" either gene is expressed to be excluded)} \item{datasets}{the datasets that will be considered} } \value{ a named numeric vector containing p-values } \description{ queries cells that contain all the genes from the list Find cell types associated with a given gene list. All cells returned express all of the genes in the given gene list }
/man/findCellTypes.Rd
permissive
thjimmylee/scfind
R
false
true
1,023
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/CoreMethods.R \docType{methods} \name{findCellTypes} \alias{findCellTypes} \alias{findCellTypes.geneList} \alias{findCellTypes,SCFind,character-method} \title{queries cells that contain all the genes from the list} \usage{ findCellTypes(object, gene.list, datasets) findCellTypes.geneList(object, gene.list, datasets) \S4method{findCellTypes}{SCFind,character}(object, gene.list, datasets) } \arguments{ \item{object}{the \code{SCFind} object} \item{gene.list}{genes to be searched in the gene.index (Operators: "-gene" to exclude a gene | "*gene" either gene is expressed "*-gene" either gene is expressed to be excluded)} \item{datasets}{the datasets that will be considered} } \value{ a named numeric vector containing p-values } \description{ queries cells that contain all the genes from the list Find cell types associated with a given gene list. All cells returned express all of the genes in the given gene list }
## version: 1.34 ## method: get ## path: /plugins/{name}/json ## code: 200 NULL data_frame <- function(...) { data.frame(..., stringsAsFactors = FALSE) } settings <- list( mounts = data_frame( name = "some-mount", description = "This is a mount that's used by the plugin.", settable = structure(list("string"), class = "AsIs"), source = "/var/lib/docker/plugins/", destination = "/mnt/state", type = "bind", options = I(list(c("rbind", "rw")))), env = "DEBUG=0", args = "string", devices = data_frame( name = "string", description = "string", settable = I(list("string")), path = "/dev/fuse")) config <- list( docker_version = "17.06.0-ce", description = "A sample volume plugin for Docker", documentation = "https://docs.docker.com/engine/extend/plugins/", interface = list( types = data_frame( prefix = NA_character_, capability = NA_character_, version = NA_character_), socket = "plugins.sock"), entrypoint = c("/usr/bin/sample-volume-plugin", "/data"), work_dir = "/bin/", user = list(uid = 1000L, gid = 1000L), network = list(type = "host"), linux = list( capabilities = c("CAP_SYS_ADMIN", "CAP_SYSLOG"), allow_all_devices = FALSE, devices = data_frame( name = "string", description = "string", settable = I(list("string")), path = "/dev/fuse")), propagated_mount = "/mnt/volumes", ipc_host = FALSE, pid_host = FALSE, mounts = data_frame( name = "some-mount", description = "This is a mount that's used by the plugin.", settable = structure(list("string"), class = "AsIs"), source = "/var/lib/docker/plugins/", destination = "/mnt/state", type = "bind", options = I(list(c("rbind", "rw")))), env = data_frame( name = "DEBUG", description = "If set, prints debug messages", settable = I(list(character(0))), value = "0"), args = list( name = "args", description = "command line arguments", settable = "string", value = "string"), rootfs = list( type = "layers", diff_ids = c( "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887", "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ))) list( id = "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", name = "tiborvass/sample-volume-plugin", enabled = TRUE, settings = settings, plugin_reference = "localhost:5000/tiborvass/sample-volume-plugin:latest", config = config)
/tests/testthat/sample_responses/v1.34/plugin_inspect.R
no_license
cran/stevedore
R
false
false
2,532
r
## version: 1.34 ## method: get ## path: /plugins/{name}/json ## code: 200 NULL data_frame <- function(...) { data.frame(..., stringsAsFactors = FALSE) } settings <- list( mounts = data_frame( name = "some-mount", description = "This is a mount that's used by the plugin.", settable = structure(list("string"), class = "AsIs"), source = "/var/lib/docker/plugins/", destination = "/mnt/state", type = "bind", options = I(list(c("rbind", "rw")))), env = "DEBUG=0", args = "string", devices = data_frame( name = "string", description = "string", settable = I(list("string")), path = "/dev/fuse")) config <- list( docker_version = "17.06.0-ce", description = "A sample volume plugin for Docker", documentation = "https://docs.docker.com/engine/extend/plugins/", interface = list( types = data_frame( prefix = NA_character_, capability = NA_character_, version = NA_character_), socket = "plugins.sock"), entrypoint = c("/usr/bin/sample-volume-plugin", "/data"), work_dir = "/bin/", user = list(uid = 1000L, gid = 1000L), network = list(type = "host"), linux = list( capabilities = c("CAP_SYS_ADMIN", "CAP_SYSLOG"), allow_all_devices = FALSE, devices = data_frame( name = "string", description = "string", settable = I(list("string")), path = "/dev/fuse")), propagated_mount = "/mnt/volumes", ipc_host = FALSE, pid_host = FALSE, mounts = data_frame( name = "some-mount", description = "This is a mount that's used by the plugin.", settable = structure(list("string"), class = "AsIs"), source = "/var/lib/docker/plugins/", destination = "/mnt/state", type = "bind", options = I(list(c("rbind", "rw")))), env = data_frame( name = "DEBUG", description = "If set, prints debug messages", settable = I(list(character(0))), value = "0"), args = list( name = "args", description = "command line arguments", settable = "string", value = "string"), rootfs = list( type = "layers", diff_ids = c( "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887", "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" ))) list( id = "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078", name = "tiborvass/sample-volume-plugin", enabled = TRUE, settings = settings, plugin_reference = "localhost:5000/tiborvass/sample-volume-plugin:latest", config = config)
plotbcdf = function (m, ...) UseMethod ("plotbcdf") plotbcdf.mecdf = function (m, regular=TRUE, res=16, ulim, vlim, ...) { x = m$x if (missing (ulim) ) ulim = range (x [,1]) if (missing (vlim) ) vlim = range (x [,2]) if (regular) { u = seq (ulim [1], ulim [2], length=res) v = seq (vlim [1], vlim [2], length=res) mst = matrix (numeric (), nr=res, nc=res) for (i in 1:res) for (j in 1:res) mst [i, j] = m (c (u [i], v [j]) ) plotbcdf (mst, ..., labs=colnames (x) ) } else { u = sort (unique (x [,1]) ) v = sort (unique (x [,2]) ) if (!m$continuous) { uo = 0.1 * diff (range (u) ) vo = 0.1 * diff (range (v) ) u = c (u [1] - uo, u, u [length (u)] + uo) v = c (v [1] - vo, v, v [length (v)] + vo) } nu = length (u) nv = length (v) mst = matrix (numeric (), nr=nu, nc=nv) for (i in 1:nu) for (j in 1:nv) mst [i, j] = m (c (u [i], v [j]) ) urng = range (u) vrng = range (v) u = (u - urng [1]) / diff (urng) v = (v - vrng [1]) / diff (vrng) plotbcdf.matrix (NULL, ..., labs=colnames (x) ) if (m$continuous) .plotbcdf.irregulargrid (mst, u, v) else .plotbcdf.bsf (mst, u, v) } } plotbcdf.matrix = function (m, mmin=0, mmax=1, ..., labs=NULL) { p0 = par (mar=c (1.75, 0.175, 0.9, 0.175) ) plot.new () plot.window (c (-0.75, 0.75), c (0, 1.5) ) .plotbcdf.plane.uv () .plotbcdf.plane.u2 () .plotbcdf.plane.v2 () if (ifo (labs) ) .plotbcdf.labs (rev (labs) ) if (ifo (m) ) { n = nrow (m) if (n != ncol (m) ) stop ("square matrix required") m = (m - mmin) / (mmax - mmin) .plotbcdf.regulargrid (m, n) } par (p0) } .plotbcdf.regulargrid = function (m, n) { n2 = n - 1 incr = 1 / n2 for (i in 1:n2) for (j in 1:n2) { u1 = (i - 1) * incr u2 = u1 + incr v1 = (j - 1) * incr v2 = v1 + incr u = c (u1, u1, u2, u2) v = c (v1, v2, v2, v1) w = c (m [i, j], m [i, j + 1], m [i + 1, j + 1], m [i + 1, j]) dir = (w [3] - w [1]) / sqrt (2 * incr^2) if (dir < 0) dir = 0 #scaling to interval (0, 1), tangent=1 -> dir=0.5 dir = 2 * atan (dir) / pi .plotbcdf.poly (u, v, w, getOption ("mecdf.surface")$line, .colinterp (dir) ) } } .plotbcdf.irregulargrid = function (m, u, v) { nu = length (u) - 1 nv = length (v) - 1 for (i in 1:nu) for (j in 1:nv) { u1 = u [i] u2 = u [i + 1] v1 = v [j] v2 = v [j + 1] up = c (u1, u1, u2, u2) vp = c (v1, v2, v2, v1) w = c (m [i, j], m [i, j + 1], m [i + 1, j + 1], m [i + 1, j]) dir = (w [3] - w [1]) / sqrt ( (u2 - u1)^2 + (v2 - v1)^2) if (dir < 0) dir = 0 dir = 2 * atan (dir) / pi .plotbcdf.poly (up, vp, w, getOption ("mecdf.surface")$line, col=.colinterp (dir) ) } } .plotbcdf.bsf = function (m, u, v) { nu = length (u) - 1 nv = length (v) - 1 bc = rgb (0.08, 0.5, 0.5) fc = rgb (0, 0.8, 0.1) ec = rgb (0.7, 0.8, 0.9) for (i in nu:1) for (j in nv:1) { u1 = u [i] u2 = u [i + 1] v1 = v [j] v2 = v [j + 1] up = c (u1, u1, u2, u2) vp = c (v1, v2, v2, v1) w0 = m [i, j] w = c (w0, w0, w0, w0) up1 = c (u1, u1, u1, u1) vp1 = c (v1, v1, v2, v2) w1 = c (0, w0, w0, 0) up2 = c (u1, u1, u2, u2) vp2 = c (v1, v1, v1, v1) dir = (w [3] - w [1]) / sqrt ( (u2 - u1)^2 + (v2 - v1)^2) if (i == 1 || i == nu || j == 1 || j == nv) { .plotbcdf.poly (up1, vp1, w1, border=bc, col=ec) .plotbcdf.poly (up2, vp2, w1, border=bc, col=ec) .plotbcdf.poly (up, vp, w, border=bc, col=ec) } else { .plotbcdf.poly (up1, vp1, w1, border=bc, col=fc) .plotbcdf.poly (up2, vp2, w1, border=bc, col=fc) .plotbcdf.poly (up, vp, w, border=bc, col="darkgreen") } } } .plotbcdf.plane.uv = function () { .plotbcdf.poly (c (0, 0, 1, 1), c (0, 1, 1, 0), 0) p1 = .project (0, 0, 0) p2 = .project (0, 1, 0) p3 = .project (1, 0, 0) z = 0.03 col = getOption ("mecdf.frame")$line arrows (p1 [1] - z, p1 [2] - z, p2 [1] - z, p2 [2] - z, col=col, length=0.12) arrows (p1 [1] + z, p1 [2] - z, p3 [1] + z, p3 [2] - z, col=col, length=0.12) } .plotbcdf.plane.u2 = function () { .plotbcdf.poly (1, c (0, 0, 1, 1), c (0, 1, 1, 0) ) .plotbcdf.lines (1, 0:1, 0.25) .plotbcdf.lines (1, 0:1, 0.5) .plotbcdf.lines (1, 0:1, 0.75) .plotbcdf.lines (1, 0.25, 0:1) .plotbcdf.lines (1, 0.5, 0:1) .plotbcdf.lines (1, 0.75, 0:1) } .plotbcdf.plane.v2 = function () { .plotbcdf.poly (c (0, 0, 1, 1), 1, c (0, 1, 1, 0) ) .plotbcdf.lines (0:1, 1, 0.25) .plotbcdf.lines (0:1, 1, 0.5) .plotbcdf.lines (0:1, 1, 0.75) .plotbcdf.lines (0.25, 1, 0:1) .plotbcdf.lines (0.5, 1, 0:1) .plotbcdf.lines (0.75, 1, 0:1) } .plotbcdf.labs = function (labs) { x = c (-0.525, 0.525) y = 0.16 text (x, y, labs) } .plotbcdf.poly = function (u, v, w, border=getOption ("mecdf.frame")$line, col=getOption ("mecdf.frame")$fill) { m = .project (u, v, w) polygon (m [,1], m [,2], border=border, col=col) } .plotbcdf.lines = function (u, v, w, col=getOption ("mecdf.frame")$line) { m = .project (u, v, w) lines (m [,1], m [,2], col=col) } #todo, fix this... .project = function (u, v, w) { x = u * cos (pi / 4) + v * cos (pi * 3 / 4) y = u * sin (pi / 4) + v * sin (pi * 3 / 4) y = 0.71 * y + 0.5 * w cbind (x, y) } .colinterp = function (x) { col1 = getOption ("mecdf.surface")$fill1 col2 = getOption ("mecdf.surface")$fill2 col = col1 + x * (col2 - col1) rgb (col [1], col [2], col [3]) }
/bcdfplot.r
no_license
zeke75/PCA-bootstrap
R
false
false
5,241
r
plotbcdf = function (m, ...) UseMethod ("plotbcdf") plotbcdf.mecdf = function (m, regular=TRUE, res=16, ulim, vlim, ...) { x = m$x if (missing (ulim) ) ulim = range (x [,1]) if (missing (vlim) ) vlim = range (x [,2]) if (regular) { u = seq (ulim [1], ulim [2], length=res) v = seq (vlim [1], vlim [2], length=res) mst = matrix (numeric (), nr=res, nc=res) for (i in 1:res) for (j in 1:res) mst [i, j] = m (c (u [i], v [j]) ) plotbcdf (mst, ..., labs=colnames (x) ) } else { u = sort (unique (x [,1]) ) v = sort (unique (x [,2]) ) if (!m$continuous) { uo = 0.1 * diff (range (u) ) vo = 0.1 * diff (range (v) ) u = c (u [1] - uo, u, u [length (u)] + uo) v = c (v [1] - vo, v, v [length (v)] + vo) } nu = length (u) nv = length (v) mst = matrix (numeric (), nr=nu, nc=nv) for (i in 1:nu) for (j in 1:nv) mst [i, j] = m (c (u [i], v [j]) ) urng = range (u) vrng = range (v) u = (u - urng [1]) / diff (urng) v = (v - vrng [1]) / diff (vrng) plotbcdf.matrix (NULL, ..., labs=colnames (x) ) if (m$continuous) .plotbcdf.irregulargrid (mst, u, v) else .plotbcdf.bsf (mst, u, v) } } plotbcdf.matrix = function (m, mmin=0, mmax=1, ..., labs=NULL) { p0 = par (mar=c (1.75, 0.175, 0.9, 0.175) ) plot.new () plot.window (c (-0.75, 0.75), c (0, 1.5) ) .plotbcdf.plane.uv () .plotbcdf.plane.u2 () .plotbcdf.plane.v2 () if (ifo (labs) ) .plotbcdf.labs (rev (labs) ) if (ifo (m) ) { n = nrow (m) if (n != ncol (m) ) stop ("square matrix required") m = (m - mmin) / (mmax - mmin) .plotbcdf.regulargrid (m, n) } par (p0) } .plotbcdf.regulargrid = function (m, n) { n2 = n - 1 incr = 1 / n2 for (i in 1:n2) for (j in 1:n2) { u1 = (i - 1) * incr u2 = u1 + incr v1 = (j - 1) * incr v2 = v1 + incr u = c (u1, u1, u2, u2) v = c (v1, v2, v2, v1) w = c (m [i, j], m [i, j + 1], m [i + 1, j + 1], m [i + 1, j]) dir = (w [3] - w [1]) / sqrt (2 * incr^2) if (dir < 0) dir = 0 #scaling to interval (0, 1), tangent=1 -> dir=0.5 dir = 2 * atan (dir) / pi .plotbcdf.poly (u, v, w, getOption ("mecdf.surface")$line, .colinterp (dir) ) } } .plotbcdf.irregulargrid = function (m, u, v) { nu = length (u) - 1 nv = length (v) - 1 for (i in 1:nu) for (j in 1:nv) { u1 = u [i] u2 = u [i + 1] v1 = v [j] v2 = v [j + 1] up = c (u1, u1, u2, u2) vp = c (v1, v2, v2, v1) w = c (m [i, j], m [i, j + 1], m [i + 1, j + 1], m [i + 1, j]) dir = (w [3] - w [1]) / sqrt ( (u2 - u1)^2 + (v2 - v1)^2) if (dir < 0) dir = 0 dir = 2 * atan (dir) / pi .plotbcdf.poly (up, vp, w, getOption ("mecdf.surface")$line, col=.colinterp (dir) ) } } .plotbcdf.bsf = function (m, u, v) { nu = length (u) - 1 nv = length (v) - 1 bc = rgb (0.08, 0.5, 0.5) fc = rgb (0, 0.8, 0.1) ec = rgb (0.7, 0.8, 0.9) for (i in nu:1) for (j in nv:1) { u1 = u [i] u2 = u [i + 1] v1 = v [j] v2 = v [j + 1] up = c (u1, u1, u2, u2) vp = c (v1, v2, v2, v1) w0 = m [i, j] w = c (w0, w0, w0, w0) up1 = c (u1, u1, u1, u1) vp1 = c (v1, v1, v2, v2) w1 = c (0, w0, w0, 0) up2 = c (u1, u1, u2, u2) vp2 = c (v1, v1, v1, v1) dir = (w [3] - w [1]) / sqrt ( (u2 - u1)^2 + (v2 - v1)^2) if (i == 1 || i == nu || j == 1 || j == nv) { .plotbcdf.poly (up1, vp1, w1, border=bc, col=ec) .plotbcdf.poly (up2, vp2, w1, border=bc, col=ec) .plotbcdf.poly (up, vp, w, border=bc, col=ec) } else { .plotbcdf.poly (up1, vp1, w1, border=bc, col=fc) .plotbcdf.poly (up2, vp2, w1, border=bc, col=fc) .plotbcdf.poly (up, vp, w, border=bc, col="darkgreen") } } } .plotbcdf.plane.uv = function () { .plotbcdf.poly (c (0, 0, 1, 1), c (0, 1, 1, 0), 0) p1 = .project (0, 0, 0) p2 = .project (0, 1, 0) p3 = .project (1, 0, 0) z = 0.03 col = getOption ("mecdf.frame")$line arrows (p1 [1] - z, p1 [2] - z, p2 [1] - z, p2 [2] - z, col=col, length=0.12) arrows (p1 [1] + z, p1 [2] - z, p3 [1] + z, p3 [2] - z, col=col, length=0.12) } .plotbcdf.plane.u2 = function () { .plotbcdf.poly (1, c (0, 0, 1, 1), c (0, 1, 1, 0) ) .plotbcdf.lines (1, 0:1, 0.25) .plotbcdf.lines (1, 0:1, 0.5) .plotbcdf.lines (1, 0:1, 0.75) .plotbcdf.lines (1, 0.25, 0:1) .plotbcdf.lines (1, 0.5, 0:1) .plotbcdf.lines (1, 0.75, 0:1) } .plotbcdf.plane.v2 = function () { .plotbcdf.poly (c (0, 0, 1, 1), 1, c (0, 1, 1, 0) ) .plotbcdf.lines (0:1, 1, 0.25) .plotbcdf.lines (0:1, 1, 0.5) .plotbcdf.lines (0:1, 1, 0.75) .plotbcdf.lines (0.25, 1, 0:1) .plotbcdf.lines (0.5, 1, 0:1) .plotbcdf.lines (0.75, 1, 0:1) } .plotbcdf.labs = function (labs) { x = c (-0.525, 0.525) y = 0.16 text (x, y, labs) } .plotbcdf.poly = function (u, v, w, border=getOption ("mecdf.frame")$line, col=getOption ("mecdf.frame")$fill) { m = .project (u, v, w) polygon (m [,1], m [,2], border=border, col=col) } .plotbcdf.lines = function (u, v, w, col=getOption ("mecdf.frame")$line) { m = .project (u, v, w) lines (m [,1], m [,2], col=col) } #todo, fix this... .project = function (u, v, w) { x = u * cos (pi / 4) + v * cos (pi * 3 / 4) y = u * sin (pi / 4) + v * sin (pi * 3 / 4) y = 0.71 * y + 0.5 * w cbind (x, y) } .colinterp = function (x) { col1 = getOption ("mecdf.surface")$fill1 col2 = getOption ("mecdf.surface")$fill2 col = col1 + x * (col2 - col1) rgb (col [1], col [2], col [3]) }
## makeCacheMatrix is a function which creates a special "matrix" object that can ## cache its inverse for the input (which is an invertible square matrix) makeCacheMatrix <- function(x = matrix()) { m<-NULL y<-NULL setm<-function(y){ x<<-y m<<-NULL } getm<-function() x setinv<-function(solve) m<<- solve getinv<-function() m list (setm=setm, getm = getm, setinv = setinv, getinv = getinv) } ## cacheSolve is a function which computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already been calculated ## (and the matrix has not changed), then the cachesolve should retrieve the ## inverse from the cache cacheSolve <- function(xM= m(), ...) { m <- xM$getinverse() if(!is.null(m)){ message("Getting Data ......................") return(m) } y <- xM$getm() xM$setm(y) m <- solve(y, ...) xM$setinv(m) m }
/Assignment 2 - Lexical Scoping.R
no_license
Shubham-Nagle/Assignment-2-Lexical-Scoping
R
false
false
946
r
## makeCacheMatrix is a function which creates a special "matrix" object that can ## cache its inverse for the input (which is an invertible square matrix) makeCacheMatrix <- function(x = matrix()) { m<-NULL y<-NULL setm<-function(y){ x<<-y m<<-NULL } getm<-function() x setinv<-function(solve) m<<- solve getinv<-function() m list (setm=setm, getm = getm, setinv = setinv, getinv = getinv) } ## cacheSolve is a function which computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already been calculated ## (and the matrix has not changed), then the cachesolve should retrieve the ## inverse from the cache cacheSolve <- function(xM= m(), ...) { m <- xM$getinverse() if(!is.null(m)){ message("Getting Data ......................") return(m) } y <- xM$getm() xM$setm(y) m <- solve(y, ...) xM$setinv(m) m }
testlist <- list(hi = -2.14555482385487e+110, lo = -2.1455548238548e+110, mu = -2.14555482385487e+110, sig = -2.14555482385487e+110) result <- do.call(gjam:::tnormRcpp,testlist) str(result)
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610045255-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
194
r
testlist <- list(hi = -2.14555482385487e+110, lo = -2.1455548238548e+110, mu = -2.14555482385487e+110, sig = -2.14555482385487e+110) result <- do.call(gjam:::tnormRcpp,testlist) str(result)
################limma-voom limma_dge <- tabItem(tabName = "limma_dge", br(), br(), h2("Differential Gene Analysis with limma-voom"), br() ,uiOutput("limma_runDGE") ,hr() ,boxPlus(collapsible=T, closable=F, width = 10, title = "DGE analysis Summary with limma-voom" ,status = "primary", solidHeader = TRUE ,fluidRow(column(width=4, plotOutput("limma_pvalues")) ,column(width=4, plotOutput("limma_plotMA")) ,column(width=4, uiOutput("limma_sumout")) ) ) ,boxPlus(collapsible=T, closable=F, width = 10, title = "limma-voom DGE results" ,status = "primary", solidHeader = TRUE ,dataTableOutput("limma_dge_res") ,uiOutput("limma_dge_res_dld") ) ,boxPlus(collapsible=T, closable=F, width = 10, title = "Volcano Plot" ,status = "primary", solidHeader = TRUE ,plotOutput("limma_volcano", height = "800px") ,footer = tagList(fluidRow(column(2) ,column(3, sliderInput("limma_vp_pval", "Define threshold for adjusted p-value:" ,min = 0 ,max = 1 ,value = 0.05 ,step = 0.01)) ,column(3, sliderInput("limma_vp_lfc", "Define threshold for log2FC:" ,min = 0 ,max = 10 ,value = 2 ,step = 0.1)) ,column(3, sliderInput("limma_vp_limit", "Define plot x-limit:" ,min = -15 ,max = 15 ,value = c(-3, 3) ,step = 1)) )) ) ,boxPlus(collapsible=T, closable=F, width = 10, title = "Heatmap" ,status = "primary", solidHeader = TRUE ,uiOutput("limma_settings") # ,verbatimTextOutput("limma_num_dge"), hr() ,plotlyOutput("limma_heatmap", height = '800px')#,plotOutput("limma_heatmap", height = '800px') # ,d3heatmapOutput("DESeq2_heatmap") ) )
/shared/limma_dgeUI.R
no_license
rosericazondekon/irnaa
R
false
false
2,684
r
################limma-voom limma_dge <- tabItem(tabName = "limma_dge", br(), br(), h2("Differential Gene Analysis with limma-voom"), br() ,uiOutput("limma_runDGE") ,hr() ,boxPlus(collapsible=T, closable=F, width = 10, title = "DGE analysis Summary with limma-voom" ,status = "primary", solidHeader = TRUE ,fluidRow(column(width=4, plotOutput("limma_pvalues")) ,column(width=4, plotOutput("limma_plotMA")) ,column(width=4, uiOutput("limma_sumout")) ) ) ,boxPlus(collapsible=T, closable=F, width = 10, title = "limma-voom DGE results" ,status = "primary", solidHeader = TRUE ,dataTableOutput("limma_dge_res") ,uiOutput("limma_dge_res_dld") ) ,boxPlus(collapsible=T, closable=F, width = 10, title = "Volcano Plot" ,status = "primary", solidHeader = TRUE ,plotOutput("limma_volcano", height = "800px") ,footer = tagList(fluidRow(column(2) ,column(3, sliderInput("limma_vp_pval", "Define threshold for adjusted p-value:" ,min = 0 ,max = 1 ,value = 0.05 ,step = 0.01)) ,column(3, sliderInput("limma_vp_lfc", "Define threshold for log2FC:" ,min = 0 ,max = 10 ,value = 2 ,step = 0.1)) ,column(3, sliderInput("limma_vp_limit", "Define plot x-limit:" ,min = -15 ,max = 15 ,value = c(-3, 3) ,step = 1)) )) ) ,boxPlus(collapsible=T, closable=F, width = 10, title = "Heatmap" ,status = "primary", solidHeader = TRUE ,uiOutput("limma_settings") # ,verbatimTextOutput("limma_num_dge"), hr() ,plotlyOutput("limma_heatmap", height = '800px')#,plotOutput("limma_heatmap", height = '800px') # ,d3heatmapOutput("DESeq2_heatmap") ) )
############################################################################### ############################################################################### ############################################################################### library("shiny") ## uživatelské rozhraní ------------------------------------------------------- fluidPage( # "nadpis" aplikace ------------------------------------------------------- titlePanel("R kalkulátor"), sidebarLayout( # ovládací prvky aplikace (vstupy; levý panel) ------------------------ sidebarPanel( textInput( inputId = "my_input", label = "Sem vložte výraz, který má být spočítán", placeholder = "2+1" ), submitButton(text = "Spočítej!") ), ## výstupy; pravý panel ----------------------------------------------- mainPanel( strong("Výsledek výrazu:"), tags$br(), tags$br(), textOutput(outputId = "my_result") ) ) ) ## ---------------------------------------------------------------------------- ############################################################################### ############################################################################### ###############################################################################
/akademicky_rok_2019_2020/r_kalkulator/ui.R
no_license
LStepanek/17VSADR_Skriptovani_a_analyza_dat_v_jazyce_R
R
false
false
1,606
r
############################################################################### ############################################################################### ############################################################################### library("shiny") ## uživatelské rozhraní ------------------------------------------------------- fluidPage( # "nadpis" aplikace ------------------------------------------------------- titlePanel("R kalkulátor"), sidebarLayout( # ovládací prvky aplikace (vstupy; levý panel) ------------------------ sidebarPanel( textInput( inputId = "my_input", label = "Sem vložte výraz, který má být spočítán", placeholder = "2+1" ), submitButton(text = "Spočítej!") ), ## výstupy; pravý panel ----------------------------------------------- mainPanel( strong("Výsledek výrazu:"), tags$br(), tags$br(), textOutput(outputId = "my_result") ) ) ) ## ---------------------------------------------------------------------------- ############################################################################### ############################################################################### ###############################################################################