content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
### ### ### script parameters correlation ### ### no_cores = 12 optimisation.table <- read.table(paste(path.list$optimisation.results, "otimisation_fit.csv", sep = "/" ), header = TRUE, sep = ",") optimisation.initiate <- InitiateOptimisation( path.list = path.list) parameters.conditions <- optimisation.initiate$parameters.conditions parameters.conditions$variable <- paste("p", 1:nrow(parameters.conditions), sep ="") registerDoParallel(no_cores) parameters.estimated.list <- foreach( i = 1:length(optimisation.table$par.id)) %dopar% { par_id <- optimisation.table[i,]$par.id parameters.tmp <- read.table( file = paste(path.list$optimisation.data, par_id, "parameters.csv", sep = "/"), header = TRUE, sep = "," ) parameters.tmp <- parameters.tmp %>% dplyr::mutate(data.id = id) %>% dplyr::mutate(par.id = par_id) %>% dplyr::select(-id) } stopImplicitCluster() parameters.estimated <- do.call(what = rbind, args = parameters.estimated.list) #install.packages("PerformanceAnalytics") library("PerformanceAnalytics") parameters.estimated <- parameters.estimated %>% dplyr::filter(likelihood < -50000) parameters.estimated.2 <- parameters.estimated %>% #dplyr::mutate(p1p6 = p11*p21*p1/(p6+p19*p11)) dplyr::mutate(p1p6 = p1/p6) %>% dplyr::arrange(likelihood) %>% dplyr::filter(likelihood < -54278.41) %>% dplyr::summarise(p1p6 = mean(p1p6)) parameters.estimated.2$p1p6[1] df <- parameters.estimated[, c(paste("p",c(2,3,8,9,10), sep = ""), "p1p6")] M <- cor(df) #M <- M[-c(4,5,7),-c(4,5,7)] do.call(pdf, args = append(plot.args.ggsave, list(file = paste(path.list$optimisation.results, "parameters_correlations.pdf", sep ="/")))) chart.Correlation(log(df)) corrplot(M, method = "color") corrplot(M, method = "number") dev.off() parameters.estimated <- parameters.estimated %>% dplyr::mutate(p1p6 = p1/p6) %>% dplyr::mutate(p9p10 = p9/p10) %>% dplyr::mutate(p8p9 = p8*p9*p10) %>% dplyr::mutate(p2p8 = -42237642*p2+ 3.752908*p8) %>% dplyr::mutate(p9p10 = -0.183*p9+ 259*p10) parameters.estimated.melt <- parameters.estimated %>% melt(id.vars = c("likelihood", "par.id", "data.id")) %>% data.table() parameters.estimated.melt <- parameters.estimated.melt %>% dplyr::mutate(variable = as.character(variable)) par <- "p9p10" q <- quantile(parameters.estimated.melt$likelihood, probs = c(0.75))[[1]] cols <- c(colnames(df), "p2p8") gplot.list <- list() for(par in colnames(df)){ gplot.list[[par]] <- ggplot( data = parameters.estimated.melt %>% dplyr::filter(variable %in% c(par), likelihood < q), aes( x = log(value), y = log(-likelihood)) ) + geom_point() + #xlim(c(-16, -15)) + xlab(paste("log(",par,")")) + ggtitle(paste("Dependence of likelihood on parameters (",par,")")) } do.call(what = ggsave, args = append(plot.args.ggsave, list(filename = paste(path.list$optimisation.results, "parameters_dependence.pdf", sep ="/"), plot = marrangeGrob(grobs = gplot.list, ncol = 1, nrow = 1)))) #### canoncial correlations #### df.params <- as.matrix(parameters.estimated[, c(paste("p",c(2,3,8,9,10), sep = ""), "p1p6")]) df.likelihood <- matrix(parameters.estimated[, "likelihood"],ncol = 1) cov(x = df.params, y = df.likelihood) # install.packages("CCA") # require("CCA")acepack ccor <- cc(X = df.params[,c(4,5)], Y = df.likelihood) ccor$cor ?cc est
/R/computations/computations_summary_parameters.R
no_license
stork119/OSigA
R
false
false
3,785
r
### ### ### script parameters correlation ### ### no_cores = 12 optimisation.table <- read.table(paste(path.list$optimisation.results, "otimisation_fit.csv", sep = "/" ), header = TRUE, sep = ",") optimisation.initiate <- InitiateOptimisation( path.list = path.list) parameters.conditions <- optimisation.initiate$parameters.conditions parameters.conditions$variable <- paste("p", 1:nrow(parameters.conditions), sep ="") registerDoParallel(no_cores) parameters.estimated.list <- foreach( i = 1:length(optimisation.table$par.id)) %dopar% { par_id <- optimisation.table[i,]$par.id parameters.tmp <- read.table( file = paste(path.list$optimisation.data, par_id, "parameters.csv", sep = "/"), header = TRUE, sep = "," ) parameters.tmp <- parameters.tmp %>% dplyr::mutate(data.id = id) %>% dplyr::mutate(par.id = par_id) %>% dplyr::select(-id) } stopImplicitCluster() parameters.estimated <- do.call(what = rbind, args = parameters.estimated.list) #install.packages("PerformanceAnalytics") library("PerformanceAnalytics") parameters.estimated <- parameters.estimated %>% dplyr::filter(likelihood < -50000) parameters.estimated.2 <- parameters.estimated %>% #dplyr::mutate(p1p6 = p11*p21*p1/(p6+p19*p11)) dplyr::mutate(p1p6 = p1/p6) %>% dplyr::arrange(likelihood) %>% dplyr::filter(likelihood < -54278.41) %>% dplyr::summarise(p1p6 = mean(p1p6)) parameters.estimated.2$p1p6[1] df <- parameters.estimated[, c(paste("p",c(2,3,8,9,10), sep = ""), "p1p6")] M <- cor(df) #M <- M[-c(4,5,7),-c(4,5,7)] do.call(pdf, args = append(plot.args.ggsave, list(file = paste(path.list$optimisation.results, "parameters_correlations.pdf", sep ="/")))) chart.Correlation(log(df)) corrplot(M, method = "color") corrplot(M, method = "number") dev.off() parameters.estimated <- parameters.estimated %>% dplyr::mutate(p1p6 = p1/p6) %>% dplyr::mutate(p9p10 = p9/p10) %>% dplyr::mutate(p8p9 = p8*p9*p10) %>% dplyr::mutate(p2p8 = -42237642*p2+ 3.752908*p8) %>% dplyr::mutate(p9p10 = -0.183*p9+ 259*p10) parameters.estimated.melt <- parameters.estimated %>% melt(id.vars = c("likelihood", "par.id", "data.id")) %>% data.table() parameters.estimated.melt <- parameters.estimated.melt %>% dplyr::mutate(variable = as.character(variable)) par <- "p9p10" q <- quantile(parameters.estimated.melt$likelihood, probs = c(0.75))[[1]] cols <- c(colnames(df), "p2p8") gplot.list <- list() for(par in colnames(df)){ gplot.list[[par]] <- ggplot( data = parameters.estimated.melt %>% dplyr::filter(variable %in% c(par), likelihood < q), aes( x = log(value), y = log(-likelihood)) ) + geom_point() + #xlim(c(-16, -15)) + xlab(paste("log(",par,")")) + ggtitle(paste("Dependence of likelihood on parameters (",par,")")) } do.call(what = ggsave, args = append(plot.args.ggsave, list(filename = paste(path.list$optimisation.results, "parameters_dependence.pdf", sep ="/"), plot = marrangeGrob(grobs = gplot.list, ncol = 1, nrow = 1)))) #### canoncial correlations #### df.params <- as.matrix(parameters.estimated[, c(paste("p",c(2,3,8,9,10), sep = ""), "p1p6")]) df.likelihood <- matrix(parameters.estimated[, "likelihood"],ncol = 1) cov(x = df.params, y = df.likelihood) # install.packages("CCA") # require("CCA")acepack ccor <- cc(X = df.params[,c(4,5)], Y = df.likelihood) ccor$cor ?cc est
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fns.R \name{cholsolve} \alias{cholsolve} \title{Solve the equation Qx = y} \usage{ cholsolve(Q, y, perm = F, cholQ = matrix(1, 0, 0), cholQp = matrix(1, 0, 0), P = NA) } \arguments{ \item{Q}{matrix (sparse or dense), the Cholesky factor of which needs to be found} \item{y}{matrix with the same number of rows as Q} \item{perm}{if F no permutation is carried out, if T permuted Cholesky factors are used} \item{cholQ}{the Cholesky factor of Q (if known already)} \item{cholQp}{the permuted Cholesky factor of Q (if known already)} \item{P}{the pivot matrix (if known already)} } \value{ x solution to Qx = y } \description{ This function is similar to \code{solve(Q,y)} but with the added benefit that it allows for permuted matrices. This function does the job in order to minimise user error when attempting to re-permute the matrices prior or after solving. The user also has an option for the permuted Cholesky factorisation of Q to be carried out internally. } \examples{ require(Matrix) Q = sparseMatrix(i=c(1,1,2,2),j=c(1,2,1,2),x=c(0.1,0.2,0.2,1)) y = matrix(c(1,2),2,1) cholsolve(Q,y) } \references{ Havard Rue and Leonhard Held (2005). Gaussian Markov Random Fields: Theory and Applications. Chapman & Hall/CRC Press } \keyword{Cholesky} \keyword{factor,} \keyword{linear} \keyword{solve}
/man/cholsolve.Rd
no_license
jeffwong-nflx/sparseinv
R
false
true
1,384
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fns.R \name{cholsolve} \alias{cholsolve} \title{Solve the equation Qx = y} \usage{ cholsolve(Q, y, perm = F, cholQ = matrix(1, 0, 0), cholQp = matrix(1, 0, 0), P = NA) } \arguments{ \item{Q}{matrix (sparse or dense), the Cholesky factor of which needs to be found} \item{y}{matrix with the same number of rows as Q} \item{perm}{if F no permutation is carried out, if T permuted Cholesky factors are used} \item{cholQ}{the Cholesky factor of Q (if known already)} \item{cholQp}{the permuted Cholesky factor of Q (if known already)} \item{P}{the pivot matrix (if known already)} } \value{ x solution to Qx = y } \description{ This function is similar to \code{solve(Q,y)} but with the added benefit that it allows for permuted matrices. This function does the job in order to minimise user error when attempting to re-permute the matrices prior or after solving. The user also has an option for the permuted Cholesky factorisation of Q to be carried out internally. } \examples{ require(Matrix) Q = sparseMatrix(i=c(1,1,2,2),j=c(1,2,1,2),x=c(0.1,0.2,0.2,1)) y = matrix(c(1,2),2,1) cholsolve(Q,y) } \references{ Havard Rue and Leonhard Held (2005). Gaussian Markov Random Fields: Theory and Applications. Chapman & Hall/CRC Press } \keyword{Cholesky} \keyword{factor,} \keyword{linear} \keyword{solve}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/area_of_sq.R \name{sq_a} \alias{sq_a} \title{Square Area} \usage{ sq_a(l, w) } \arguments{ \item{l}{the length of rectangular} \item{w}{the width of rectangular} } \description{ Calculates area of a rectangular } \author{ M Usman Mirza }
/man/sq_a.Rd
no_license
muhusmanmirza/myfirstpackage
R
false
true
317
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/area_of_sq.R \name{sq_a} \alias{sq_a} \title{Square Area} \usage{ sq_a(l, w) } \arguments{ \item{l}{the length of rectangular} \item{w}{the width of rectangular} } \description{ Calculates area of a rectangular } \author{ M Usman Mirza }
#' Access files in the current app #' #' @param ... Character vector specifying directory and or file to #' point to inside the current package. #' #' @noRd app_sys <- function(...){ system.file(..., package = "sporecounter") } #' Read App Config #' #' @param value Value to retrieve from the config file. #' @param config R_CONFIG_ACTIVE value. #' @param use_parent Logical, scan the parent directory for config file. #' #' @importFrom config get #' #' @noRd get_golem_config <- function( value, config = Sys.getenv("R_CONFIG_ACTIVE", "default"), use_parent = TRUE ){ config::get( value = value, config = config, # Modify this if your config file is somewhere else: file = app_sys("golem-config.yml"), use_parent = use_parent ) }
/R/app_config.R
permissive
astrzalka/sporecounter
R
false
false
786
r
#' Access files in the current app #' #' @param ... Character vector specifying directory and or file to #' point to inside the current package. #' #' @noRd app_sys <- function(...){ system.file(..., package = "sporecounter") } #' Read App Config #' #' @param value Value to retrieve from the config file. #' @param config R_CONFIG_ACTIVE value. #' @param use_parent Logical, scan the parent directory for config file. #' #' @importFrom config get #' #' @noRd get_golem_config <- function( value, config = Sys.getenv("R_CONFIG_ACTIVE", "default"), use_parent = TRUE ){ config::get( value = value, config = config, # Modify this if your config file is somewhere else: file = app_sys("golem-config.yml"), use_parent = use_parent ) }
############################################################################################# # Analyze Time Series ratio for each year # ############################################################################################# #remove old objects for safety resons rm(list=ls(all=TRUE)) #set seed to make analysis reproducible if any pseudo random number generator is used by any function set.seed(123) #utility function to glue together text without separator glue<-function(...){paste(...,sep="")} #read the local paths to different directories from an external file source("workingDir.R") #change to the data directory setwd(dataDir) d<-read.table("timeSeries.txt", header=TRUE,sep=";") names(d)[1]<-"year" with(d, plot(year,num.ips)) setwd(plotDir) jpeg("ObservationsIPs.jpeg") with(d,plot(year,num.obs/num.ips, pch="+",type="b", xlab="Year", ylab="Number of observations / IP")) dev.off() jpeg("BadExitProportion.jpeg") with(d,plot(year,num.ips.flag.bad/num.ips, pch="+",type="b", xlab="Year", ylab="Ratio of nodes with \"BadExit\" flag")) dev.off() jpeg("NumSybils.jpeg") with(d,plot(year,num.sybills, pch="+",type="b", xlab="Year", ylab="Number of sybils")) dev.off() #Extraordinary peak of IPs with flag "BadExit" in 2012 with(d, plot(year,num.obs.fingerprint/num.ips)) with(d, plot(year,num.obs.fingerprint)) #Why are there so few IPs sampled in the files from which we extracted the fingerprints? with(d, plot(year, num.obs)) #The number of observations per increases over time with(d, plot(year, num.obs/num.ips)) #The number of observations /IP increases with(d, plot(year, num.obs.fingerprint/num.obs)) with(d, plot(year, num.sybills)) with(d, plot(year,num.obs.fingerprint)) #scale to view all the different values in one plot ds<-as.data.frame(scale(d[,-1])) ds<-cbind(year=d$year,ds) #we will use melt from reshape2 to create a better plot require(ggplot2) require(reshape2) ds.melt<-melt(ds,id="year") #function to add new values to the dataframe calculate.value<-function(dataframe,operation){ temp.dataframe<-NULL temp.dataframe$year<-dataframe$year temp.dataframe$variable<-as.factor(rep(deparse(substitute(operation)),nrow(dataframe))) temp.dataframe$value<-eval(parse(text=glue("with(",deparse(substitute(dataframe)),",",deparse(substitute(operation)),")")))#with(dataframe,operation) return(as.data.frame(temp.dataframe)) } #finally we row join the values ds.melt<-rbind(ds.melt, calculate.value(ds,num.ips.flag.bad/num.ips), calculate.value(ds,num.obs/num.ips), calculate.value(ds,num.obs.fingerprint/num.obs)) #drop values that are not needed in plot ds.reduced<-(droplevels( ds.melt[-which(ds.melt$variable %in% c("num.ips.flag.bad","num.obs","num.ips","num.obs.known.fingerprint")), ] )) #and the plot is grouped by variable ggplot(ds.reduced, aes(x=year, y=value, color=variable)) + geom_line(aes(linetype=variable), size=1) + geom_point(aes(shape=variable, size=4)) + scale_linetype_manual(values =sample(1:10,nlevels(ds.reduced$variable),replace=T)) + scale_shape_manual(values=sample(1:10,nlevels(ds.reduced$variable),replace=T)) levels(ds.melt$variable) #drop values that are not needed in plot ds.reduced<-(droplevels( ds.melt[-which(ds.melt$variable %in% c("num.obs.fingerprint/num.obs","num.ips.flag.bad","num.obs","num.ips","num.obs.known.fingerprint")), ] )) #and the plot is grouped by variable ggplot(ds.reduced, aes(x=year, y=value, color=variable)) + geom_line(aes(linetype=variable), size=1) + geom_point(aes(shape=variable, size=4)) + scale_linetype_manual(values =sample(1:10,nlevels(ds.reduced$variable),replace=T)) + scale_shape_manual(values=sample(1:10,nlevels(ds.reduced$variable),replace=T)) cbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7") jpeg("BadExitProportion.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d, aes(x=year, y=num.ips.flag.bad/num.ips))+ geom_line(size=0.9,color=cbPalette[1])+ geom_point(aes(shape=as.factor(d$year), size=4,color=as.factor(d$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d$year))))+ labs(x="Year", y="Ratio of nodes with \"BadExit\" flag",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d$year) dev.off() jpeg("NumSybils.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d, aes(x=year, y=num.sybills))+ geom_line(size=0.9,color=cbPalette[5])+ geom_point(aes(shape=as.factor(d$year), size=4,color=as.factor(d$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d$year))))+ labs(x="Year", y="Number of sybils",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d$year) dev.off() jpeg("ObservationsIPs.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d, aes(x=year, y=num.obs/num.ips))+ geom_line(size=0.9,color=cbPalette[3])+ geom_point(aes(shape=as.factor(d$year), size=4,color=as.factor(d$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d$year))))+ labs(x="Year", y="Number of observations / IP",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d$year) dev.off() # Cross information of IP from counted fingerprints and each year setwd(dataDir) lines<-readLines("CountFingerprints.txt") trim.leading <- function (x) sub("^\\s+", "", x) getFirstColumn<-function(x){ unlist(trim.leading(substr(trim.leading(x), 1, 2))) } getSecondColumn<-function(x){ unlist(substr(trim.leading(x), 3, 16)) } count <- as.numeric(sapply(lines,getFirstColumn)) IP <- as.character(sapply(lines,getSecondColumn)) d.fp<-data.frame(count,IP) d.fpy<-NULL # Get all ips by year for(year in seq(2008,2017)){ setwd(dataDir) fileName<-glue("AggregatedDataSet",year,".txt") d<-read.table(fileName, header=FALSE, sep=" ", stringsAsFactors=FALSE,comment.char="") names(d)<-header d<-d[,-1] d$IP<- as.factor(d$IP) d<-cbind.data.frame(IP=d$IP) d.aux<-merge(d,d.fp,by="IP") d.aux<-cbind(d.aux,year) d.fpy<-rbind(d.fpy,d.aux) # outputFileName<-glue("IPCountFingerPrintByYear.txt") # setwd(dataDir) # write.table(d.aux, file=outputFileName,append=TRUE,col.names=FALSE, row.names = F) } library(dplyr) set.seed(1) d.count.fpy<-d.fpy %>% group_by(year) %>% summarise("IP Count" = length(year)) setwd(plotDir) jpeg("ObservationsFingerprintsYear.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d.count.fpy, aes(x=d.count.fpy$year, y=d.count.fpy$`IP Count`))+ geom_line(size=0.9,color=cbPalette[3])+ geom_point(aes(shape=as.factor(d.count.fpy$year), size=4,color=as.factor(d.count.fpy$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d.count.fpy$year))))+ labs(x="Year", y="Number of observations changed fingerprints",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d.count.fpy$year) dev.off()
/analyzeTimeSeries.R
no_license
svana1/RottenOnions
R
false
false
7,303
r
############################################################################################# # Analyze Time Series ratio for each year # ############################################################################################# #remove old objects for safety resons rm(list=ls(all=TRUE)) #set seed to make analysis reproducible if any pseudo random number generator is used by any function set.seed(123) #utility function to glue together text without separator glue<-function(...){paste(...,sep="")} #read the local paths to different directories from an external file source("workingDir.R") #change to the data directory setwd(dataDir) d<-read.table("timeSeries.txt", header=TRUE,sep=";") names(d)[1]<-"year" with(d, plot(year,num.ips)) setwd(plotDir) jpeg("ObservationsIPs.jpeg") with(d,plot(year,num.obs/num.ips, pch="+",type="b", xlab="Year", ylab="Number of observations / IP")) dev.off() jpeg("BadExitProportion.jpeg") with(d,plot(year,num.ips.flag.bad/num.ips, pch="+",type="b", xlab="Year", ylab="Ratio of nodes with \"BadExit\" flag")) dev.off() jpeg("NumSybils.jpeg") with(d,plot(year,num.sybills, pch="+",type="b", xlab="Year", ylab="Number of sybils")) dev.off() #Extraordinary peak of IPs with flag "BadExit" in 2012 with(d, plot(year,num.obs.fingerprint/num.ips)) with(d, plot(year,num.obs.fingerprint)) #Why are there so few IPs sampled in the files from which we extracted the fingerprints? with(d, plot(year, num.obs)) #The number of observations per increases over time with(d, plot(year, num.obs/num.ips)) #The number of observations /IP increases with(d, plot(year, num.obs.fingerprint/num.obs)) with(d, plot(year, num.sybills)) with(d, plot(year,num.obs.fingerprint)) #scale to view all the different values in one plot ds<-as.data.frame(scale(d[,-1])) ds<-cbind(year=d$year,ds) #we will use melt from reshape2 to create a better plot require(ggplot2) require(reshape2) ds.melt<-melt(ds,id="year") #function to add new values to the dataframe calculate.value<-function(dataframe,operation){ temp.dataframe<-NULL temp.dataframe$year<-dataframe$year temp.dataframe$variable<-as.factor(rep(deparse(substitute(operation)),nrow(dataframe))) temp.dataframe$value<-eval(parse(text=glue("with(",deparse(substitute(dataframe)),",",deparse(substitute(operation)),")")))#with(dataframe,operation) return(as.data.frame(temp.dataframe)) } #finally we row join the values ds.melt<-rbind(ds.melt, calculate.value(ds,num.ips.flag.bad/num.ips), calculate.value(ds,num.obs/num.ips), calculate.value(ds,num.obs.fingerprint/num.obs)) #drop values that are not needed in plot ds.reduced<-(droplevels( ds.melt[-which(ds.melt$variable %in% c("num.ips.flag.bad","num.obs","num.ips","num.obs.known.fingerprint")), ] )) #and the plot is grouped by variable ggplot(ds.reduced, aes(x=year, y=value, color=variable)) + geom_line(aes(linetype=variable), size=1) + geom_point(aes(shape=variable, size=4)) + scale_linetype_manual(values =sample(1:10,nlevels(ds.reduced$variable),replace=T)) + scale_shape_manual(values=sample(1:10,nlevels(ds.reduced$variable),replace=T)) levels(ds.melt$variable) #drop values that are not needed in plot ds.reduced<-(droplevels( ds.melt[-which(ds.melt$variable %in% c("num.obs.fingerprint/num.obs","num.ips.flag.bad","num.obs","num.ips","num.obs.known.fingerprint")), ] )) #and the plot is grouped by variable ggplot(ds.reduced, aes(x=year, y=value, color=variable)) + geom_line(aes(linetype=variable), size=1) + geom_point(aes(shape=variable, size=4)) + scale_linetype_manual(values =sample(1:10,nlevels(ds.reduced$variable),replace=T)) + scale_shape_manual(values=sample(1:10,nlevels(ds.reduced$variable),replace=T)) cbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7") jpeg("BadExitProportion.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d, aes(x=year, y=num.ips.flag.bad/num.ips))+ geom_line(size=0.9,color=cbPalette[1])+ geom_point(aes(shape=as.factor(d$year), size=4,color=as.factor(d$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d$year))))+ labs(x="Year", y="Ratio of nodes with \"BadExit\" flag",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d$year) dev.off() jpeg("NumSybils.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d, aes(x=year, y=num.sybills))+ geom_line(size=0.9,color=cbPalette[5])+ geom_point(aes(shape=as.factor(d$year), size=4,color=as.factor(d$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d$year))))+ labs(x="Year", y="Number of sybils",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d$year) dev.off() jpeg("ObservationsIPs.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d, aes(x=year, y=num.obs/num.ips))+ geom_line(size=0.9,color=cbPalette[3])+ geom_point(aes(shape=as.factor(d$year), size=4,color=as.factor(d$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d$year))))+ labs(x="Year", y="Number of observations / IP",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d$year) dev.off() # Cross information of IP from counted fingerprints and each year setwd(dataDir) lines<-readLines("CountFingerprints.txt") trim.leading <- function (x) sub("^\\s+", "", x) getFirstColumn<-function(x){ unlist(trim.leading(substr(trim.leading(x), 1, 2))) } getSecondColumn<-function(x){ unlist(substr(trim.leading(x), 3, 16)) } count <- as.numeric(sapply(lines,getFirstColumn)) IP <- as.character(sapply(lines,getSecondColumn)) d.fp<-data.frame(count,IP) d.fpy<-NULL # Get all ips by year for(year in seq(2008,2017)){ setwd(dataDir) fileName<-glue("AggregatedDataSet",year,".txt") d<-read.table(fileName, header=FALSE, sep=" ", stringsAsFactors=FALSE,comment.char="") names(d)<-header d<-d[,-1] d$IP<- as.factor(d$IP) d<-cbind.data.frame(IP=d$IP) d.aux<-merge(d,d.fp,by="IP") d.aux<-cbind(d.aux,year) d.fpy<-rbind(d.fpy,d.aux) # outputFileName<-glue("IPCountFingerPrintByYear.txt") # setwd(dataDir) # write.table(d.aux, file=outputFileName,append=TRUE,col.names=FALSE, row.names = F) } library(dplyr) set.seed(1) d.count.fpy<-d.fpy %>% group_by(year) %>% summarise("IP Count" = length(year)) setwd(plotDir) jpeg("ObservationsFingerprintsYear.jpeg",width = 900,height = 600,units = "px",res = 150) ggplot(d.count.fpy, aes(x=d.count.fpy$year, y=d.count.fpy$`IP Count`))+ geom_line(size=0.9,color=cbPalette[3])+ geom_point(aes(shape=as.factor(d.count.fpy$year), size=4,color=as.factor(d.count.fpy$year))) + scale_shape_manual(values=sample(1:10,nlevels(as.factor(d.count.fpy$year))))+ labs(x="Year", y="Number of observations changed fingerprints",shape="Year", colour="Year") + theme(axis.text=element_text(size=10), axis.title=element_text(size=12,face="bold"))+ scale_x_continuous(breaks=d.count.fpy$year) dev.off()
setGeneric( name = "directionCluster", def = function(track, minD, minT , tolerance) { .loadPackages() standardGeneric("directionCluster") } ) setMethod( f = "directionCluster", signature = c("Track","numeric", "numeric", "numeric"), definition = function(track, minD, minT , tolerance) { if (is.null(track)|| length(track) < 2){ return (0)} cl<-list() clusterId = 1 clusterOpen = FALSE tracksize <- length(track@connections$direction) tol = tolerance clusterini = 0 lastindex=1 for(n in 1:(tracksize-1)){ dirc = track@connections$direction[n]-track@connections$direction[n+1] if(dirc<0){ dirc = dirc*(-1) } if(dirc >= minD){ cl[n]<-clusterId if(!clusterOpen){ clusterini = n } clusterOpen = TRUE } else{ if(clusterOpen){ i=1 tol=tolerance while(tol >0 && (n+i)<(tracksize+1)){ dirc = track@connections$direction[n+i]-track@connections$direction[n+i+1] if(dirc<0){ dirc = dirc*(-1) } if(dirc>=minD){ i = i+1 lastindex = n+i break } else{ tol = tol-1 if(tol==0){ lastindex = n+i } } } } if(lastindex<(n+tolerance)){ for(j in n:lastindex){ cl[j]<-clusterId } n=lastindex } else{ ctime=0 for (j in clusterini:n){ ctime = ctime + track@connections$duration[j] } if(ctime>minT){ n=lastindex for (j in clusterini:n){ cl[j]<-clusterId } clusterId=clusterId+1 } else{ for (j in clusterini:n){ # print("j e clusterId") print(j) print(clusterId) cl[j]<--1 } } clusterOpen = FALSE } } } return (cl) } )
/R/DirectionCluster.R
no_license
dvm1607/TrajDataMining
R
false
false
2,135
r
setGeneric( name = "directionCluster", def = function(track, minD, minT , tolerance) { .loadPackages() standardGeneric("directionCluster") } ) setMethod( f = "directionCluster", signature = c("Track","numeric", "numeric", "numeric"), definition = function(track, minD, minT , tolerance) { if (is.null(track)|| length(track) < 2){ return (0)} cl<-list() clusterId = 1 clusterOpen = FALSE tracksize <- length(track@connections$direction) tol = tolerance clusterini = 0 lastindex=1 for(n in 1:(tracksize-1)){ dirc = track@connections$direction[n]-track@connections$direction[n+1] if(dirc<0){ dirc = dirc*(-1) } if(dirc >= minD){ cl[n]<-clusterId if(!clusterOpen){ clusterini = n } clusterOpen = TRUE } else{ if(clusterOpen){ i=1 tol=tolerance while(tol >0 && (n+i)<(tracksize+1)){ dirc = track@connections$direction[n+i]-track@connections$direction[n+i+1] if(dirc<0){ dirc = dirc*(-1) } if(dirc>=minD){ i = i+1 lastindex = n+i break } else{ tol = tol-1 if(tol==0){ lastindex = n+i } } } } if(lastindex<(n+tolerance)){ for(j in n:lastindex){ cl[j]<-clusterId } n=lastindex } else{ ctime=0 for (j in clusterini:n){ ctime = ctime + track@connections$duration[j] } if(ctime>minT){ n=lastindex for (j in clusterini:n){ cl[j]<-clusterId } clusterId=clusterId+1 } else{ for (j in clusterini:n){ # print("j e clusterId") print(j) print(clusterId) cl[j]<--1 } } clusterOpen = FALSE } } } return (cl) } )
% Generated by roxygen2 (4.0.2): do not edit by hand \docType{data} \name{napkins} \alias{napkins} \title{Napkin Use} \format{A data frame with 86 observations on the following 2 variables. \describe{ \item{napkins}{number of napkins used by the subject during the meal.} \item{sex}{a factor with levels \code{female} \code{male} Sex of the person being observed}}} \source{ MAT 111 at Georgetown College } \description{ Students at GC observed their fellow students in the Cafe at lunch. } \keyword{datasets}
/man/napkins.Rd
no_license
arturochian/tigerstats
R
false
false
511
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \docType{data} \name{napkins} \alias{napkins} \title{Napkin Use} \format{A data frame with 86 observations on the following 2 variables. \describe{ \item{napkins}{number of napkins used by the subject during the meal.} \item{sex}{a factor with levels \code{female} \code{male} Sex of the person being observed}}} \source{ MAT 111 at Georgetown College } \description{ Students at GC observed their fellow students in the Cafe at lunch. } \keyword{datasets}
/Exercícios_R/Features_scalling.R
no_license
olimpiojunior/Estudos_R
R
false
false
275
r
source(file='code/packages.R') load(file='data_clean/cluster_ConHum.Rdata') load(file='data_clean/cluster_FactInf.Rdata') load(file='data_clean/data_clean.Rdata') clustersConHum2 <- read.csv(file='data_clean/cluster_ConHum.csv') clustersFactInf2 <- read.csv(file='data_clean/cluster_FactInf.csv') names(index_all)[1]<- "Formulario" all <- clustersConHum2 %>% full_join(ConHum , by="Formulario") %>% full_join(clustersFactInf2 , by="Formulario") %>% left_join(FactInf, by="Formulario") %>% left_join(vars, by="Formulario") %>% left_join(index_all, by="Formulario") %>% select(Formulario:groups5.x, groups5.y, BuenEstado,IV, CI, HI, VI, asp,tipo_hum,nat_art,Perimetro_m,Area_Has) names(all) <- c("ID","Name","cCH5", "cFI5", "BE", "IV","CI", "HI", "VI","asp","tipo_hum", "nat_art","Perimetro_m","Area_Has") head(all) all %>% group_by(cCH5, cFI5)%>% summarise(mean=mean(VI)) %>% spread(cCH5, mean)%>% kable() all%>% group_by(cCH5, cFI5)%>% summarise(n=n())%>% spread(cCH5, n)%>% kable() all %>% filter(cCH5 ==1) %>% group_by(cFI5, BE)%>% summarise(n = n()) %>% spread(cFI5,n)%>% kable() dataset <- ConHum %>% select(Formulario, BuenEstado) %>% full_join(clustersConHum2, by="Formulario") %>% full_join(clustersFactInf2, by="Formulario") %>% left_join(FactInf, by="Formulario") %>% left_join(vars, by="Formulario") %>% left_join(index_all, by="Formulario") %>% select(Formulario, BuenEstado, groups5.x, groups5.y, IV, CI, HI, VI, asp, tipo_hum,nat_art,Perimetro_m,Area_Has, Bosques, Gan_Extens, Sabanas,Pesca, Acuacult,Moluscos,Turism_Com) names(dataset) <- c("ID","IBE","cCH5", "cFI5", "IV","CI", "HI", "VI","asp","tipo_hum", "nat_art","Perimetro_m","Area_Has","Bosques","Gan_Extens", "Sabanas","Pesca","Acuacult","Moluscos" , "Turism_Com") mod1 <- lm(VI ~ as.factor(cCH5) + as.factor(cFI5),data = dataset) summary(mod1) plot(mod1) extract_eq(mod1) aa<-aov(mod1) plot(TukeyHSD(aa, "as.factor(cCH5)")) plot(TukeyHSD(aa, "as.factor(cFI5)")) mod2 <- lm(IV ~ IBE,data = dataset) summary(mod2) mod3 <- lm(IV ~ scale(Area_Has),data = dataset) summary(mod3) mod4 <- lm(IV ~ scale(Perimetro_m),data = dataset) summary(mod4) mod5 <- glm(IBE ~ scale(Area_Has)+ Bosques + Gan_Extens + Sabanas + Pesca, data = dataset, family = "binomial") mod6 <- glm(IBE ~ Bosques + Gan_Extens + Sabanas + Pesca, data = dataset, family = "binomial") mod7 <- glm(IBE ~ Gan_Extens + Sabanas + Pesca, data = dataset, family = "binomial") mod8 <- glm(IBE ~ Sabanas + Pesca, data = dataset, family = "binomial") summary(mod5)$aic;summary(mod6)$aic;summary(mod7)$aic;summary(mod8)$aic confint(mod5) exp(coef(mod5)) exp(cbind(OR = coef(mod5), confint(mod5))) #Now we can say that having forest (influenciado por) a wetland, #increases the odds of a wetland area being in good condition #(versus not being in good condition) by a factor of 5.99. #Sabana's factor is 2.54 and fishing 4.31.The area, and # extensive cattle raising decrease that same odds. mod9 <- glm(VI ~ scale(Area_Has)+ Bosques + Gan_Extens + Sabanas + Pesca + asp+ tipo_hum + nat_art, data = dataset, family = "gaussian") mod10 <- glm(VI ~ scale(Area_Has)+ Bosques + Gan_Extens + Sabanas + Pesca + asp+ nat_art, data = dataset, family = "gaussian") mod11 <- glm(VI ~ Bosques + Gan_Extens + Sabanas + Pesca + asp+ tipo_hum + nat_art, data = dataset, family = "gaussian") mod12 <- glm(VI ~ Bosques + Gan_Extens + Sabanas + Pesca + asp+ nat_art, data = dataset, family = "gaussian") summary(mod9)$aic;summary(mod10)$aic;summary(mod11)$aic;summary(mod12)$aic ### changes missing in here: set up as a baseline: Inside protected area, ### palustrine and natural dataset <- dataset %>% mutate(tipo_hum = factor(tipo_hum, levels=c("Palustre","Lacustre","Estuarino"), labels=c("0Palustre","1Lacustre","2Estuarino")), nat_art = factor(nat_art, levels=c("Natural", "Artificial"), labels=c("0Natural", "1Artificial"))) mod11 <- glm(VI ~ Bosques + Gan_Extens + Sabanas + Pesca + asp+ tipo_hum + nat_art, data = dataset, family = "gaussian") summary(mod11) plot(mod11)
/code/analysis.R
no_license
malfaro2/humedales
R
false
false
4,388
r
source(file='code/packages.R') load(file='data_clean/cluster_ConHum.Rdata') load(file='data_clean/cluster_FactInf.Rdata') load(file='data_clean/data_clean.Rdata') clustersConHum2 <- read.csv(file='data_clean/cluster_ConHum.csv') clustersFactInf2 <- read.csv(file='data_clean/cluster_FactInf.csv') names(index_all)[1]<- "Formulario" all <- clustersConHum2 %>% full_join(ConHum , by="Formulario") %>% full_join(clustersFactInf2 , by="Formulario") %>% left_join(FactInf, by="Formulario") %>% left_join(vars, by="Formulario") %>% left_join(index_all, by="Formulario") %>% select(Formulario:groups5.x, groups5.y, BuenEstado,IV, CI, HI, VI, asp,tipo_hum,nat_art,Perimetro_m,Area_Has) names(all) <- c("ID","Name","cCH5", "cFI5", "BE", "IV","CI", "HI", "VI","asp","tipo_hum", "nat_art","Perimetro_m","Area_Has") head(all) all %>% group_by(cCH5, cFI5)%>% summarise(mean=mean(VI)) %>% spread(cCH5, mean)%>% kable() all%>% group_by(cCH5, cFI5)%>% summarise(n=n())%>% spread(cCH5, n)%>% kable() all %>% filter(cCH5 ==1) %>% group_by(cFI5, BE)%>% summarise(n = n()) %>% spread(cFI5,n)%>% kable() dataset <- ConHum %>% select(Formulario, BuenEstado) %>% full_join(clustersConHum2, by="Formulario") %>% full_join(clustersFactInf2, by="Formulario") %>% left_join(FactInf, by="Formulario") %>% left_join(vars, by="Formulario") %>% left_join(index_all, by="Formulario") %>% select(Formulario, BuenEstado, groups5.x, groups5.y, IV, CI, HI, VI, asp, tipo_hum,nat_art,Perimetro_m,Area_Has, Bosques, Gan_Extens, Sabanas,Pesca, Acuacult,Moluscos,Turism_Com) names(dataset) <- c("ID","IBE","cCH5", "cFI5", "IV","CI", "HI", "VI","asp","tipo_hum", "nat_art","Perimetro_m","Area_Has","Bosques","Gan_Extens", "Sabanas","Pesca","Acuacult","Moluscos" , "Turism_Com") mod1 <- lm(VI ~ as.factor(cCH5) + as.factor(cFI5),data = dataset) summary(mod1) plot(mod1) extract_eq(mod1) aa<-aov(mod1) plot(TukeyHSD(aa, "as.factor(cCH5)")) plot(TukeyHSD(aa, "as.factor(cFI5)")) mod2 <- lm(IV ~ IBE,data = dataset) summary(mod2) mod3 <- lm(IV ~ scale(Area_Has),data = dataset) summary(mod3) mod4 <- lm(IV ~ scale(Perimetro_m),data = dataset) summary(mod4) mod5 <- glm(IBE ~ scale(Area_Has)+ Bosques + Gan_Extens + Sabanas + Pesca, data = dataset, family = "binomial") mod6 <- glm(IBE ~ Bosques + Gan_Extens + Sabanas + Pesca, data = dataset, family = "binomial") mod7 <- glm(IBE ~ Gan_Extens + Sabanas + Pesca, data = dataset, family = "binomial") mod8 <- glm(IBE ~ Sabanas + Pesca, data = dataset, family = "binomial") summary(mod5)$aic;summary(mod6)$aic;summary(mod7)$aic;summary(mod8)$aic confint(mod5) exp(coef(mod5)) exp(cbind(OR = coef(mod5), confint(mod5))) #Now we can say that having forest (influenciado por) a wetland, #increases the odds of a wetland area being in good condition #(versus not being in good condition) by a factor of 5.99. #Sabana's factor is 2.54 and fishing 4.31.The area, and # extensive cattle raising decrease that same odds. mod9 <- glm(VI ~ scale(Area_Has)+ Bosques + Gan_Extens + Sabanas + Pesca + asp+ tipo_hum + nat_art, data = dataset, family = "gaussian") mod10 <- glm(VI ~ scale(Area_Has)+ Bosques + Gan_Extens + Sabanas + Pesca + asp+ nat_art, data = dataset, family = "gaussian") mod11 <- glm(VI ~ Bosques + Gan_Extens + Sabanas + Pesca + asp+ tipo_hum + nat_art, data = dataset, family = "gaussian") mod12 <- glm(VI ~ Bosques + Gan_Extens + Sabanas + Pesca + asp+ nat_art, data = dataset, family = "gaussian") summary(mod9)$aic;summary(mod10)$aic;summary(mod11)$aic;summary(mod12)$aic ### changes missing in here: set up as a baseline: Inside protected area, ### palustrine and natural dataset <- dataset %>% mutate(tipo_hum = factor(tipo_hum, levels=c("Palustre","Lacustre","Estuarino"), labels=c("0Palustre","1Lacustre","2Estuarino")), nat_art = factor(nat_art, levels=c("Natural", "Artificial"), labels=c("0Natural", "1Artificial"))) mod11 <- glm(VI ~ Bosques + Gan_Extens + Sabanas + Pesca + asp+ tipo_hum + nat_art, data = dataset, family = "gaussian") summary(mod11) plot(mod11)
# install course dependencies devtools::install_version("tibble", "1.2") devtools::install_version("dplyr", "0.5.0") devtools::install_version("data.table", "1.10.0") devtools::install_version("hflights", "0.1") devtools::install_version("DBI", "0.5-1") devtools::install_version("RMySQL", "0.10.9") devtools::install_version("ggplot2", "2.2.1")
/requirements.r
no_license
ramnathv/test-course-r2
R
false
false
346
r
# install course dependencies devtools::install_version("tibble", "1.2") devtools::install_version("dplyr", "0.5.0") devtools::install_version("data.table", "1.10.0") devtools::install_version("hflights", "0.1") devtools::install_version("DBI", "0.5-1") devtools::install_version("RMySQL", "0.10.9") devtools::install_version("ggplot2", "2.2.1")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{model_gam_ex} \alias{model_gam_ex} \title{Model output tibble from the \code{\link{model_gam}} function} \format{ A data frame with 84 rows and 17 variables: \describe{ \item{\code{id}}{Numerical IDs for the IND~press combinations.} \item{\code{ind}}{Indicator names.} \item{\code{press}}{Pressure names.} \item{\code{model_type}}{Specification of the model type; at this stage containing only "gam" (Generalized Additive Model).} \item{\code{corrstruc}}{Specification of the correlation structure; at this stage containing only "none".} \item{\code{aic}}{AIC of the fitted models} \item{\code{edf}}{Estimated degrees of freedom for the model terms.} \item{\code{p_val}}{The p values for the smoothing term (the pressure).} \item{\code{signif_code}}{The significance codes for the p-values.} \item{\code{r_sq}}{The adjusted r-squared for the models. Defined as the proportion of variance explained, where original variance and residual variance are both estimated using unbiased estimators. This quantity can be negative if your model is worse than a one parameter constant model, and can be higher for the smaller of two nested models.} \item{\code{expl_dev}}{The proportion of the null deviance explained by the models.} \item{\code{nrmse}}{Absolute values of the root mean square error normalized by the standard deviation (NRMSE) using no back-transformation.} \item{\code{ks_test}}{The p-values from a Kolmogorov-Smirnov Test applied on the model residuals to test for normal distribution. P-values > 0.05 indicate normally distributed residuals.} \item{\code{tac}}{logical; indicates whether temporal autocorrelation (TAC) was detected in the residuals. TRUE if model residuals show TAC.} \item{\code{pres_outlier}}{A list-column with outliers identified for each model (i.e. Cook`s distance > 1). The indices present the position in the training data, including NAs.} \item{\code{excl_outlier}}{A list-column listing all outliers per model that have been excluded in the GAM fitting} \item{\code{model}}{A list-column of IND~press-specific gam objects.} } } \usage{ model_gam_ex } \description{ This is an example output tibble from the \code{model_gam} function applied on the Central Baltic Sea food web indicator demonstration data. } \keyword{datasets}
/man/model_gam_ex.Rd
no_license
saskiaotto/INDperform
R
false
true
2,584
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{model_gam_ex} \alias{model_gam_ex} \title{Model output tibble from the \code{\link{model_gam}} function} \format{ A data frame with 84 rows and 17 variables: \describe{ \item{\code{id}}{Numerical IDs for the IND~press combinations.} \item{\code{ind}}{Indicator names.} \item{\code{press}}{Pressure names.} \item{\code{model_type}}{Specification of the model type; at this stage containing only "gam" (Generalized Additive Model).} \item{\code{corrstruc}}{Specification of the correlation structure; at this stage containing only "none".} \item{\code{aic}}{AIC of the fitted models} \item{\code{edf}}{Estimated degrees of freedom for the model terms.} \item{\code{p_val}}{The p values for the smoothing term (the pressure).} \item{\code{signif_code}}{The significance codes for the p-values.} \item{\code{r_sq}}{The adjusted r-squared for the models. Defined as the proportion of variance explained, where original variance and residual variance are both estimated using unbiased estimators. This quantity can be negative if your model is worse than a one parameter constant model, and can be higher for the smaller of two nested models.} \item{\code{expl_dev}}{The proportion of the null deviance explained by the models.} \item{\code{nrmse}}{Absolute values of the root mean square error normalized by the standard deviation (NRMSE) using no back-transformation.} \item{\code{ks_test}}{The p-values from a Kolmogorov-Smirnov Test applied on the model residuals to test for normal distribution. P-values > 0.05 indicate normally distributed residuals.} \item{\code{tac}}{logical; indicates whether temporal autocorrelation (TAC) was detected in the residuals. TRUE if model residuals show TAC.} \item{\code{pres_outlier}}{A list-column with outliers identified for each model (i.e. Cook`s distance > 1). The indices present the position in the training data, including NAs.} \item{\code{excl_outlier}}{A list-column listing all outliers per model that have been excluded in the GAM fitting} \item{\code{model}}{A list-column of IND~press-specific gam objects.} } } \usage{ model_gam_ex } \description{ This is an example output tibble from the \code{model_gam} function applied on the Central Baltic Sea food web indicator demonstration data. } \keyword{datasets}
chi_emps <- read.csv("data/ChicagoEmployees.csv", stringsAsFactors = FALSE) ## Alternatively, we can use read_csv from the readr library library(readr) chi_emps <- read_csv("data/ChicagoEmployees.csv") ## summary to give summary statistics summary(chi_emps) ## info about the data types str(chi_emps) ### get counts of values using the table command table(chi_emps$Dept) ### get the counts in order using sort sort(table(chi_emps$Dept)) sort(table(chi_emps$Dept), decreasing = TRUE) police <- chi_emps[chi_emps$Dept == "POLICE", ] hist(police$AnnualSalary, xlab = "Annual Salary", ylab = "Count", main = "Salaries of Chicago Police Officers", col = "royalblue", breaks = 50) big2 <- c("POLICE", "FIRE") chi_emps$Dept2 <- ifelse(chi_emps$Dept %in% big2, chi_emps$Dept, "OTHER") table(chi_emps$Dept2, chi_emps$SalHour) boxplot(AnnualSalary~Dept2, data = chi_emps) #### Get a dataframe of all individuals who make more than $150k highly_paid <- chi_emps[chi_emps$AnnualSalary > 150000 & !is.na(chi_emps$AnnualSalary), ]
/.Rproj.user/9D0D6351/sources/per/t/49D5B43A-contents
no_license
thisisdaryn/saturday
R
false
false
1,075
chi_emps <- read.csv("data/ChicagoEmployees.csv", stringsAsFactors = FALSE) ## Alternatively, we can use read_csv from the readr library library(readr) chi_emps <- read_csv("data/ChicagoEmployees.csv") ## summary to give summary statistics summary(chi_emps) ## info about the data types str(chi_emps) ### get counts of values using the table command table(chi_emps$Dept) ### get the counts in order using sort sort(table(chi_emps$Dept)) sort(table(chi_emps$Dept), decreasing = TRUE) police <- chi_emps[chi_emps$Dept == "POLICE", ] hist(police$AnnualSalary, xlab = "Annual Salary", ylab = "Count", main = "Salaries of Chicago Police Officers", col = "royalblue", breaks = 50) big2 <- c("POLICE", "FIRE") chi_emps$Dept2 <- ifelse(chi_emps$Dept %in% big2, chi_emps$Dept, "OTHER") table(chi_emps$Dept2, chi_emps$SalHour) boxplot(AnnualSalary~Dept2, data = chi_emps) #### Get a dataframe of all individuals who make more than $150k highly_paid <- chi_emps[chi_emps$AnnualSalary > 150000 & !is.na(chi_emps$AnnualSalary), ]
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/robCov.R \name{robCov} \alias{robCov} \title{Robust covariance matrix estimation} \usage{ robCov(sY, alpha = 2, beta = 1.25) } \arguments{ \item{sY}{A matrix, where each column is a replicate observation on a multivariate r.v.} \item{alpha}{tuning parameter, see details.} \item{beta}{tuning parameter, see details.} } \value{ A list where: \itemize{ \item{\code{COV}}{ The estimated covariance matrix.} \item{\code{E}}{ A square root of the inverse covariance matrix. i.e. the inverse cov matrix is \code{t(E)\%*\%E};} \item{\code{half.ldet.V}}{ Half the log of the determinant of the covariance matrix;} \item{\code{mY}}{ The estimated mean;} \item{\code{sd}}{ The estimated standard deviations of each variable.} \item{\code{weights}}{ This is \code{w1/sum(w1)*ncol(sY)}, where \code{w1} are the weights of Campbell (1980).} \item{\code{lowVar}}{ The indexes of the columns of \code{sY} whose variance is zero (if any). These variable were removed and excluded from the covariance matrix. } } } \description{ Obtains a robust estimate of the covariance matrix of a sample of multivariate data, using Campbell's (1980) method as described on p231-235 of Krzanowski (1988). } \details{ Campbell (1980) suggests an estimator of the covariance matrix which downweights observations at more than some Mahalanobis distance \code{d.0} from the mean. \code{d.0} is \code{sqrt(nrow(sY))+alpha/sqrt(2)}. Weights are one for observations with Mahalanobis distance, \code{d}, less than \code{d.0}. Otherwise weights are \code{d.0*exp(-.5*(d-d.0)^2/beta^2)/d}. The defaults are as recommended by Campbell. This routine also uses pre-conditioning to ensure good scaling and stable numerical calculations. If some of the columns of \code{sY} has zero variance, these are removed. } \examples{ p <- 5;n <- 100 Y <- matrix(runif(p*n),p,n) robCov(Y) } \references{ Krzanowski, W.J. (1988) Principles of Multivariate Analysis. Oxford. Campbell, N.A. (1980) Robust procedures in multivariate analysis I: robust covariance estimation. JRSSC 29, 231-237. } \author{ Simon N. Wood, maintained by Matteo Fasiolo <matteo.fasiolo@gmail.com>. }
/fuzzedpackages/esaddle/man/robCov.Rd
no_license
akhikolla/testpackages
R
false
true
2,536
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/robCov.R \name{robCov} \alias{robCov} \title{Robust covariance matrix estimation} \usage{ robCov(sY, alpha = 2, beta = 1.25) } \arguments{ \item{sY}{A matrix, where each column is a replicate observation on a multivariate r.v.} \item{alpha}{tuning parameter, see details.} \item{beta}{tuning parameter, see details.} } \value{ A list where: \itemize{ \item{\code{COV}}{ The estimated covariance matrix.} \item{\code{E}}{ A square root of the inverse covariance matrix. i.e. the inverse cov matrix is \code{t(E)\%*\%E};} \item{\code{half.ldet.V}}{ Half the log of the determinant of the covariance matrix;} \item{\code{mY}}{ The estimated mean;} \item{\code{sd}}{ The estimated standard deviations of each variable.} \item{\code{weights}}{ This is \code{w1/sum(w1)*ncol(sY)}, where \code{w1} are the weights of Campbell (1980).} \item{\code{lowVar}}{ The indexes of the columns of \code{sY} whose variance is zero (if any). These variable were removed and excluded from the covariance matrix. } } } \description{ Obtains a robust estimate of the covariance matrix of a sample of multivariate data, using Campbell's (1980) method as described on p231-235 of Krzanowski (1988). } \details{ Campbell (1980) suggests an estimator of the covariance matrix which downweights observations at more than some Mahalanobis distance \code{d.0} from the mean. \code{d.0} is \code{sqrt(nrow(sY))+alpha/sqrt(2)}. Weights are one for observations with Mahalanobis distance, \code{d}, less than \code{d.0}. Otherwise weights are \code{d.0*exp(-.5*(d-d.0)^2/beta^2)/d}. The defaults are as recommended by Campbell. This routine also uses pre-conditioning to ensure good scaling and stable numerical calculations. If some of the columns of \code{sY} has zero variance, these are removed. } \examples{ p <- 5;n <- 100 Y <- matrix(runif(p*n),p,n) robCov(Y) } \references{ Krzanowski, W.J. (1988) Principles of Multivariate Analysis. Oxford. Campbell, N.A. (1980) Robust procedures in multivariate analysis I: robust covariance estimation. JRSSC 29, 231-237. } \author{ Simon N. Wood, maintained by Matteo Fasiolo <matteo.fasiolo@gmail.com>. }
# Class specification and constructors for mizer base parameters class # Class has members to store parameters of size based model # Copyright 2012 Finlay Scott and Julia Blanchard. # Copyright 2018 Gustav Delius and Richard Southwell. # Development has received funding from the European Commission's Horizon 2020 # Research and Innovation Programme under Grant Agreement No. 634495 # for the project MINOUW (http://minouw-project.eu/). # Distributed under the GPL 3 or later # Maintainer: Gustav Delius, University of York, <gustav.delius@york.ac.uk> # Naming conventions: # S4 classes and constructors: AClass # Functions: aFunction # Variables: a_variable # Validity function --------------------------------------------------------- # Not documented as removed later on validMizerParams <- function(object) { errors <- character() # grab some dims no_w <- length(object@w) no_w_full <- length(object@w_full) w_idx <- (no_w_full - no_w + 1):no_w_full # Check weight grids ---- # Check dw and dw_full are correct length if (length(object@dw) != no_w) { msg <- paste("dw is length ", length(object@dw), " and w is length ", no_w, ". These should be the same length", sep = "") errors <- c(errors, msg) } if (length(object@dw_full) != no_w_full) { msg <- paste("dw_full is length ", length(object@dw_full), " and w_full is length ", no_w_full, ". These should be the same length", sep = "") errors <- c(errors, msg) } # Check that the last entries of w_full and dw_full agree with w and dw if (any(object@w[] != object@w_full[w_idx])) { msg <- "The later entries of w_full should be equal to those of w." errors <- c(errors, msg) } if (any(object@dw[] != object@dw_full[w_idx])) { msg <- "The later entries of dw_full should be equal to those of dw." errors <- c(errors, msg) } # Check the array dimensions are good ---- # 2D arrays if (!all(c(length(dim(object@psi)), length(dim(object@intake_max)), length(dim(object@search_vol)), length(dim(object@metab)), length(dim(object@mu_b)), length(dim(object@interaction)), length(dim(object@catchability))) == 2)) { msg <- "psi, intake_max, search_vol, metab, mu_b, interaction and catchability must all be two dimensions" errors <- c(errors, msg) } # 3D arrays if (length(dim(object@selectivity)) != 3) { msg <- "selectivity must be three dimensions" errors <- c(errors, msg) } # Check number of species is equal across relevant slots if (!all(c( dim(object@psi)[1], dim(object@intake_max)[1], dim(object@search_vol)[1], dim(object@metab)[1], dim(object@mu_b)[1], dim(object@selectivity)[2], dim(object@catchability)[2], dim(object@interaction)[1], dim(object@interaction)[2]) == dim(object@species_params)[1])) { msg <- "The number of species in the model must be consistent across the species_params, psi, intake_max, search_vol, mu_b, interaction (dim 1), selectivity, catchability and interaction (dim 2) slots" errors <- c(errors, msg) } # Check number of size groups if (!all(c( dim(object@psi)[2], dim(object@intake_max)[2], dim(object@search_vol)[2], dim(object@metab)[2], dim(object@selectivity)[3]) == no_w)) { msg <- "The number of size bins in the model must be consistent across the w, psi, intake_max, search_vol, and selectivity (dim 3) slots" errors <- c(errors, msg) } # Check numbe of gears if (!isTRUE(all.equal(dim(object@selectivity)[1], dim(object@catchability)[1]))) { msg <- "The number of fishing gears must be consistent across the catchability and selectivity (dim 1) slots" errors <- c(errors, msg) } # Check names of dimnames of arrays ---- # sp dimension if (!all(c( names(dimnames(object@psi))[1], names(dimnames(object@intake_max))[1], names(dimnames(object@search_vol))[1], names(dimnames(object@metab))[1], names(dimnames(object@mu_b))[1], names(dimnames(object@selectivity))[2], names(dimnames(object@catchability))[2]) == "sp")) { msg <- "Name of first dimension of psi, intake_max, search_vol, metab, mu_b, and the second dimension of selectivity and catchability must be 'sp'" errors <- c(errors, msg) } #interaction dimension names if (names(dimnames(object@interaction))[1] != "predator") { msg <- "The first dimension of interaction must be called 'predator'" errors <- c(errors, msg) } if (names(dimnames(object@interaction))[2] != "prey") { msg <- "The first dimension of interaction must be called 'prey'" errors <- c(errors, msg) } # w dimension if (!all(c( names(dimnames(object@psi))[2], names(dimnames(object@intake_max))[2], names(dimnames(object@search_vol))[2], names(dimnames(object@metab))[2], names(dimnames(object@selectivity))[3]) == "w")) { msg <- "Name of second dimension of psi, intake_max, search_vol, metab and third dimension of selectivity must be 'w'" errors <- c(errors, msg) } if (!all(c( names(dimnames(object@selectivity))[1], names(dimnames(object@catchability))[1]) == "gear")) { msg <- "Name of first dimension of selectivity and catchability must be 'gear'" errors <- c(errors, msg) } # Check dimnames of species are identical # Bit tricky this one as I don't know of a way to compare lots of vectors # at the same time. Just use == and the recycling rule if (!all(c( dimnames(object@psi)[[1]], dimnames(object@intake_max)[[1]], dimnames(object@search_vol)[[1]], dimnames(object@metab)[[1]], dimnames(object@mu_b)[[1]], dimnames(object@selectivity)[[2]], dimnames(object@catchability)[[2]], dimnames(object@interaction)[[1]], dimnames(object@interaction)[[2]]) == object@species_params$species)) { msg <- "The species names of species_params, psi, intake_max, search_vol, metab, mu_b, selectivity, catchability and interaction must all be the same" errors <- c(errors, msg) } # Check dimnames of w if (!all(c( dimnames(object@psi)[[2]], dimnames(object@intake_max)[[2]], dimnames(object@search_vol)[[2]], dimnames(object@metab)[[2]]) == dimnames(object@selectivity)[[3]])) { msg <- "The size names of psi, intake_max, search_vol, metab and selectivity must all be the same" errors <- c(errors, msg) } # Check dimnames of gear if (!isTRUE(all.equal( dimnames(object@catchability)[[1]], dimnames(object@selectivity)[[1]]))) { msg <- "The gear names of selectivity and catchability must all be the same" errors <- c(errors, msg) } # Check the vector slots ---- if (length(object@rr_pp) != length(object@w_full)) { msg <- "rr_pp must be the same length as w_full" errors <- c(errors, msg) } if (length(object@cc_pp) != length(object@w_full)) { msg <- "cc_pp must be the same length as w_full" errors <- c(errors, msg) } # TODO: Rewrite the following into a test of the @rates_funcs slot ---- # SRR # if (!is.string(object@srr)) { # msg <- "srr needs to be specified as a string giving the name of the function" # errors <- c(errors, msg) # } else { # if (!exists(object@srr)) { # msg <- paste0("The stock-recruitment function ", # object@srr, # "does not exist.") # errors <- c(errors, msg) # } else { # srr <- get(object@srr) # if (!is.function(srr)) { # msg <- "The specified srr is not a function." # errors <- c(errors, msg) # } else { # # Must have two arguments: rdi amd species_params # if (!isTRUE(all.equal(names(formals(srr)), c("rdi", "species_params")))) { # msg <- "Arguments of srr function must be 'rdi' and 'species_params'" # errors <- c(errors, msg) # } # } # } # } # Should not have legacy r_max column (has been renamed to R_max) if ("r_max" %in% names(object@species_params)) { msg <- "The 'r_max' column in species_params should be called 'R_max'. You can use 'upgradeParams()' to upgrade your params object." errors <- c(errors, msg) } # # species_params data.frame must have columns: # # species, z0, alpha, eRepro # species_params_cols <- c("species","z0","alpha","erepro") # if (!all(species_params_cols %in% names(object@species_params))) { # msg <- "species_params data.frame must have 'species', 'z0', 'alpha' and 'erepro' columms" # errors <- c(errors,msg) # } # must also have SRR params but not sorted out yet # species_params # Column check done in constructor # If everything is OK if (length(errors) == 0) TRUE else errors } #### Class definition #### #' A class to hold the parameters for a size based model. #' #' Although it is possible to build a `MizerParams` object by hand it is #' not recommended and several constructors are available. Dynamic simulations #' are performed using [project()] function on objects of this class. As a #' user you should never need to access the slots inside a `MizerParams` object #' directly. #' #' @slot w The size grid for the fish part of the spectrum. An increasing #' vector of weights (in grams) running from the smallest egg size to the #' largest asymptotic size. #' @slot dw The widths (in grams) of the size bins #' @slot w_full The size grid for the full size range including the resource #' spectrum. An increasing vector of weights (in grams) running from the #' smallest resource size to the largest asymptotic size of fish. The #' last entries of the vector have to be equal to the content of the w slot. #' @slot dw_full The width of the size bins for the full spectrum. The last #' entries have to be equal to the content of the dw slot. #' @slot w_min_idx A vector holding the index of the weight of the egg size #' of each species #' @slot maturity An array (species x size) that holds the proportion of #' individuals of each species at size that are mature. This enters in the #' calculation of the spawning stock biomass with [getSSB()]. Set #' with [setReproduction()]. #' @slot psi An array (species x size) that holds the allocation to reproduction #' for each species at size, \eqn{\psi_i(w)}. Changed with #' [setReproduction()]. #' @slot intake_max An array (species x size) that holds the maximum intake for #' each species at size. Changed with [setMaxIntakeRate()]. #' @slot search_vol An array (species x size) that holds the search volume for #' each species at size. Changed with [setSearchVolume()]. #' @slot metab An array (species x size) that holds the metabolism #' for each species at size. Changed with [setMetabolicRate()]. #' @slot mu_b An array (species x size) that holds the external mortality rate #' \eqn{\mu_{b.i}(w)}. Changed with [setExtMort()]. #' @slot pred_kernel An array (species x predator size x prey size) that holds #' the predation coefficient of each predator at size on each prey size. If #' this is NA then the following two slots will be used. Changed with #' [setPredKernel()]. #' @slot ft_pred_kernel_e An array (species x log of predator/prey size ratio) #' that holds the Fourier transform of the feeding kernel in a form #' appropriate for evaluating the encounter rate integral. If this is NA #' then the `pred_kernel` will be used to calculate the available #' energy integral. Changed with [setPredKernel()]. #' @slot ft_pred_kernel_p An array (species x log of predator/prey size ratio) #' that holds the Fourier transform of the feeding kernel in a form #' appropriate for evaluating the predation mortality integral. If this is NA #' then the `pred_kernel` will be used to calculate the integral. #' Changed with [setPredKernel()]. #' @slot rr_pp A vector the same length as the w_full slot. The size specific #' growth rate of the resource spectrum. Changed with [setResource()]. #' @slot cc_pp A vector the same length as the w_full slot. The size specific #' carrying capacity of the resource spectrum. Changed with #' [setResource()]. #' @slot resource_dynamics Name of the function for projecting the resource abundance #' density by one timestep. The default is #' [resource_semichemostat()]. #' Changed with [setResource()]. #' @slot other_dynamics A named list of functions for projecting the #' values of other dynamical components of the ecosystem that may be modelled #' by a mizer extensions you have installed. The names of the list entries #' are the names of those components. #' @slot other_encounter A named list of functions for calculating the #' contribution to the encounter rate from each other dynamical component. #' @slot other_mort A named list of functions for calculating the #' contribution to the mortality rate from each other dynamical components. #' @slot other_params A list containing the parameters needed by any mizer #' extensions you may have installed to model other dynamical components of #' the ecosystem. #' @slot rates_funcs A named list with the names of the functions that should be #' used to calculate the rates needed by `project()`. By default this will be #' set to the names of the built-in rate functions. #' @slot sc The community abundance of the scaling community #' @slot species_params A data.frame to hold the species specific parameters. #' See [newMultispeciesParams()] for details. #' @slot gear_params Data frame with parameters for gear selectivity. See #' [setFishing()] for details. #' @slot interaction The species specific interaction matrix, \eqn{\theta_{ij}}. #' Changed with [setInteraction()]. #' @slot selectivity An array (gear x species x w) that holds the selectivity of #' each gear for species and size, \eqn{S_{g,i,w}}. Changed with #' [setFishing()]. #' @slot catchability An array (gear x species) that holds the catchability of #' each species by each gear, \eqn{Q_{g,i}}. Changed with #' [setFishing()]. #' @slot initial_effort A vector containing the initial fishing effort for each #' gear. Changed with [setFishing()]. #' @slot initial_n An array (species x size) that holds the initial abundance of #' each species at each weight. #' @slot initial_n_pp A vector the same length as the w_full slot that describes #' the initial resource abundance at each weight. #' @slot initial_n_other A list with the initial abundances of all other #' ecosystem components. Has length zero if there are no other components. #' @slot resource_params List with parameters for resource. See [setResource()]. #' @slot A Abundance multipliers. #' @slot linecolour A named vector of colour values, named by species. #' Used to give consistent colours in plots. #' @slot linetype A named vector of linetypes, named by species. #' Used to give consistent line types in plots. #' @slot ft_mask An array (species x w_full) with zeros for weights larger than #' the asymptotic weight of each species. Used to efficiently minimize #' wrap-around errors in Fourier transform calculations. #' #' The \linkS4class{MizerParams} class is fairly complex with a large number of #' slots, many of which are multidimensional arrays. The dimensions of these #' arrays is strictly enforced so that `MizerParams` objects are consistent #' in terms of number of species and number of size classes. #' #' The `MizerParams` class does not hold any dynamic information, e.g. #' abundances or harvest effort through time. These are held in #' \linkS4class{MizerSim} objects. #' #' @seealso [project()] [MizerSim()] #' [emptyParams()] [newMultispeciesParams()] #' [newCommunityParams()] #' [newTraitParams()] #' @export setClass( "MizerParams", slots = c( w = "numeric", dw = "numeric", w_full = "numeric", dw_full = "numeric", w_min_idx = "numeric", maturity = "array", psi = "array", initial_n = "array", intake_max = "array", search_vol = "array", metab = "array", pred_kernel = "array", ft_pred_kernel_e = "array", ft_pred_kernel_p = "array", mu_b = "array", rr_pp = "numeric", cc_pp = "numeric", resource_dynamics = "character", resource_params = "list", other_dynamics = "list", other_params = "list", other_encounter = "list", other_mort = "list", rates_funcs = "list", sc = "numeric", initial_n_pp = "numeric", initial_n_other = "list", species_params = "data.frame", interaction = "array", gear_params = "data.frame", selectivity = "array", catchability = "array", initial_effort = "numeric", A = "numeric", linecolour = "character", linetype = "character", ft_mask = "array" ), ) setValidity("MizerParams", validMizerParams) remove(validMizerParams) #' Create empty MizerParams object of the right size #' #' An internal function. #' Sets up a valid \linkS4class{MizerParams} object with all the slots #' initialised and given dimension names, but with some slots left empty. This #' function is to be used by other functions to set up full parameter objects. #' #' @section Size grid: # Some code is commented out that would allow the user to # specify a grid with a non-constant log spacing. But we comment this out # for now because of the fft. # #' When the `w_full` argument is not given, then #' A size grid is created so that #' the log-sizes are equally spaced. The spacing is chosen so that there will be #' `no_w` fish size bins, with the smallest starting at `min_w` and the largest #' starting at `max_w`. For `w_full` additional size bins are added below #' `min_w`, with the same log size. The number of extra bins is such that #' `min_w_pp` comes to lie within the smallest bin. #' #' @section Changes to species params: #' The `species_params` slot of the returned MizerParams object may differ #' slightly from the data frame supplied as argument to this function in the #' following ways: #' \itemize{ #' \item Default values are set for \code{w_min, w_inf, alpha, gear, interaction_p}. #' \item The egg sizes in `w_min` are rounded down to lie on a grid point. #' } #' Note that the other characteristic sizes of the species, like `w_mat` and #' `w_inf`, are not modified to lie on grid points. #' #' @param species_params A data frame of species-specific parameter values. #' @param gear_params A data frame with gear-specific parameter values. #' @param no_w The number of size bins in the consumer spectrum. #' @param min_w Sets the size of the eggs of all species for which this is not #' given in the `w_min` column of the `species_params` dataframe. # #' @param w_full Increasing vector of weights giving the boundaries of size # #' classes. Must include the value min_w. Has one more entry than the number # #' of size bins. The last entry is the upper end of the largest size class. It # #' be used to calculate the sizes of the size bins but will not be stored in # #' the w_full slot of the returned MizerParams object. If this argument is not # #' provided then size classes are set by the other arguments as described in # #' the Details. #' @param max_w The largest size of the consumer spectrum. By default this is #' set to the largest `w_inf` specified in the `species_params` data #' frame. #' @param min_w_pp The smallest size of the resource spectrum. # #' Ignored if w_full is specified. #' #' @return An empty but valid MizerParams object #' @seealso See [newMultispeciesParams()] for a function that fills #' the slots left empty by this function. #' @export emptyParams <- function(species_params, gear_params = data.frame(), no_w = 100, min_w = 0.001, # w_full = NA, max_w = NA, min_w_pp = 1e-12) { assert_that(is.data.frame(species_params), is.data.frame(gear_params), no_w > 10) ## Set defaults ---- if (is.na(min_w_pp)) min_w_pp <- 1e-12 species_params <- set_species_param_default(species_params, "w_min", min_w) min_w <- min(species_params$w_min) species_params <- validSpeciesParams(species_params) gear_params <- validGearParams(gear_params, species_params) if (is.na(max_w)) { max_w <- max(species_params$w_inf) } else { if (max(species_params$w_inf) > max_w * (1 + 1e-9)) { # The fudge factor # is there to avoid false alerts due to rounding errors. too_large <- species_params$species[max_w < species_params$w_inf] stop("Some of your species have an maximum size larger than max_w: ", toString(too_large)) } } # Set up grids ---- # The following code anticipates that in future we might allow the user to # specify a grid with a non-constant log spacing. But we comment this out # for now because of the fft. # if (missing(w_full)) { # set up logarithmic grids dx <- log10(max_w / min_w) / (no_w - 1) # Community grid w <- 10^(seq(from = log10(min_w), by = dx, length.out = no_w)) # dw[i] = w[i+1] - w[i]. Following formula works also for last entry dw[no_w] dw <- (10^dx - 1) * w # To avoid issues due to numerical imprecision min_w <- w[1] # For fft methods we need a constant log bin size throughout. # Therefore we use as many steps as are necessary so that the first size # class includes min_w_pp. x_pp <- rev(seq(from = log10(min_w), to = log10(min_w_pp), by = -dx)) - dx w_full <- c(10^x_pp, w) # If min_w_pp happened to lie exactly on a grid point, we now added # one grid point too much which we need to remove again if (w_full[2] == min_w_pp) { w_full <- w_full[2:length(w_full)] } no_w_full <- length(w_full) dw_full <- (10^dx - 1) * w_full # } else { # # use supplied w_full # no_w_full <- length(w_full) - 1 # dw_full <- diff(w_full) # w_full <- w_full[seq_along(dw_full)] # # Check that sizes are increasing # if (any(dw_full <= 0)) { # stop("w_full must be increasing.") # } # w_min_idx <- match(min_w, w_full) # if (is.na(w_min_idx)) { # stop("w_min must be contained in w_full.") # } # w <- w_full[w_min_idx:no_w_full] # dw <- dw_full[w_min_idx:no_w_full] # no_w <- length(w) # min_w_pp <- w_full[1] # } # Basic arrays for templates ---- no_sp <- nrow(species_params) species_names <- as.character(species_params$species) gear_names <- unique(gear_params$gear) mat1 <- array(NA, dim = c(no_sp, no_w), dimnames = list(sp = species_names, w = signif(w,3))) ft_pred_kernel <- array(NA, dim = c(no_sp, no_w_full), dimnames = list(sp = species_names, k = 1:no_w_full)) ft_mask <- plyr::aaply(species_params$w_inf, 1, function(x) w_full < x, .drop = FALSE) selectivity <- array(0, dim = c(length(gear_names), no_sp, no_w), dimnames = list(gear = gear_names, sp = species_names, w = signif(w, 3))) catchability <- array(0, dim = c(length(gear_names), no_sp), dimnames = list(gear = gear_names, sp = species_names)) initial_effort <- rep(0, length(gear_names)) names(initial_effort) <- gear_names interaction <- array(1, dim = c(no_sp, no_sp), dimnames = list(predator = species_names, prey = species_names)) vec1 <- as.numeric(rep(NA, no_w_full)) names(vec1) <- signif(w_full, 3) # Round down w_min to lie on grid points and store the indices of these # grid points in w_min_idx w_min_idx <- as.vector(suppressWarnings( tapply(species_params$w_min, 1:no_sp, function(w_min, wx) max(which(wx <= w_min)), wx = w))) # Due to rounding errors this might happen: w_min_idx[w_min_idx == -Inf] <- 1 names(w_min_idx) = species_names species_params$w_min <- w[w_min_idx] # Colour and linetype scales ---- # for use in plots # Colour-blind-friendly palettes # From http://dr-k-lo.blogspot.co.uk/2013/07/a-color-blind-friendly-palette-for-r.html # cbbPalette <- c("#000000", "#009E73", "#e79f00", "#9ad0f3", "#0072B2", "#D55E00", # "#CC79A7", "#F0E442") # From http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette # cbbPalette <- c("#E69F00", "#56B4E9", "#009E73", # "#F0E442", "#0072B2", "#D55E00", "#CC79A7") # Random palette gemerated pm https://medialab.github.io/iwanthue/ colour_palette <- c("#815f00", "#6237e2", "#8da600", "#de53ff", "#0e4300", "#430079", "#6caa72", "#ee0053", "#007957", "#b42979", "#142300", "#a08dfb", "#644500", "#04004c", "#b79955", "#0060a8", "#dc8852", "#007ca9", "#ab003c", "#9796d9", "#472c00", "#b492b0", "#140000", "#dc8488", "#005c67", "#5c585a") # type_palette <- c("solid", "dashed", "dotdash", "longdash", # "twodash") type_palette <- c("solid") if ("linecolour" %in% names(species_params)) { linecolour <- species_params$linecolour # If any NA's first fill them with unused colours linecolour[is.na(linecolour)] <- setdiff(colour_palette, linecolour)[1:sum(is.na(linecolour))] # if there are still NAs, start from beginning of palette again linecolour[is.na(linecolour)] <- colour_palette[1:sum(is.na(linecolour))] } else { linecolour <- rep(colour_palette, length.out = no_sp) } names(linecolour) <- as.character(species_names) linecolour <- c(linecolour, "Total" = "black", "Resource" = "green", "Background" = "grey", "Fishing" = "red") if ("linetype" %in% names(species_params)) { linetype <- species_params$linetype linetype[is.na(linetype)] <- "solid" } else { linetype <- rep(type_palette, length.out = no_sp) } names(linetype) <- as.character(species_names) linetype <- c(linetype, "Total" = "solid", "Resource" = "solid", "Background" = "solid", "Fishing" = "solid") # Make object ---- # Should Z0, rrPP and ccPP have names (species names etc)? params <- new( "MizerParams", w = w, dw = dw, w_full = w_full, dw_full = dw_full, w_min_idx = w_min_idx, maturity = mat1, psi = mat1, initial_n = mat1, intake_max = mat1, search_vol = mat1, metab = mat1, mu_b = mat1, ft_pred_kernel_e = ft_pred_kernel, ft_pred_kernel_p = ft_pred_kernel, pred_kernel = array(), gear_params = gear_params, selectivity = selectivity, catchability = catchability, initial_effort = initial_effort, rr_pp = vec1, cc_pp = vec1, sc = w, initial_n_pp = vec1, species_params = species_params, interaction = interaction, other_dynamics = list(), other_encounter = list(), other_mort = list(), rates_funcs = list( Rates = "mizerRates", Encounter = "mizerEncounter", FeedingLevel = "mizerFeedingLevel", EReproAndGrowth = "mizerEReproAndGrowth", PredRate = "mizerPredRate", PredMort = "mizerPredMort", FMort = "mizerFMort", Mort = "mizerMort", ERepro = "mizerERepro", EGrowth = "mizerEGrowth", ResourceMort = "mizerResourceMort", RDI = "mizerRDI", RDD = "BevertonHoltRDD"), resource_dynamics = "resource_semichemostat", other_params = list(), initial_n_other = list(), A = as.numeric(rep(NA, no_sp)), linecolour = linecolour, linetype = linetype, ft_mask = ft_mask ) return(params) } #' Set line colours to be used in mizer plots #' #' @param params A MizerParams object #' @param colours A named list or named vector of line colours. #' #' @return The MizerParams object with updated line colours #' @export #' @examples #' params <- NS_params #' params <- setColours(params, list("Cod" = "red", "Haddock" = "#00ff00")) #' plotSpectra(params) #' getColours(params) setColours <- function(params, colours) { assert_that(is(params, "MizerParams"), all(validColour(colours))) params@linecolour <- unlist( modifyList(as.list(params@linecolour), as.list(colours))) params } #' @rdname setColours #' @export getColours <- function(params) { params@linecolour } validColour <- function(colour) { sapply(colour, function(X) { tryCatch(is.matrix(col2rgb(X)), error = function(e) FALSE) }) } #' Set linetypes to be used in mizer plots #' #' @param params A MizerParams object #' @param linetypes A named list or named vector of linetypes. #' #' @return The MizerParams object with updated linetypes #' @export #' @examples #' params <- NS_params #' params <- setLinetypes(params, list("Cod" = "solid")) #' plotSpectra(params) #' getLinetypes(params) setLinetypes <- function(params, linetypes) { assert_that(is(params, "MizerParams")) params@linetype <- unlist( modifyList(as.list(params@linetype), as.list(linetypes))) params } #' @rdname setLinetypes #' @export getLinetypes <- function(params) { as.list(params@linetype) } #' Size bins #' #' This is a good place to explain how mizer discretises the size #' #' @param params A MizerParams object #' #' @export w <- function(params) { params@w } #' @rdname w #' @export w_full <- function(params) { params@w_full } #' @rdname w #' @export dw <- function(params) { params@dw } #' @rdname w #' @export dw_full <- function(params) { params@dw_full }
/R/MizerParams-class.R
no_license
ReneevanDorst/mizer
R
false
false
31,828
r
# Class specification and constructors for mizer base parameters class # Class has members to store parameters of size based model # Copyright 2012 Finlay Scott and Julia Blanchard. # Copyright 2018 Gustav Delius and Richard Southwell. # Development has received funding from the European Commission's Horizon 2020 # Research and Innovation Programme under Grant Agreement No. 634495 # for the project MINOUW (http://minouw-project.eu/). # Distributed under the GPL 3 or later # Maintainer: Gustav Delius, University of York, <gustav.delius@york.ac.uk> # Naming conventions: # S4 classes and constructors: AClass # Functions: aFunction # Variables: a_variable # Validity function --------------------------------------------------------- # Not documented as removed later on validMizerParams <- function(object) { errors <- character() # grab some dims no_w <- length(object@w) no_w_full <- length(object@w_full) w_idx <- (no_w_full - no_w + 1):no_w_full # Check weight grids ---- # Check dw and dw_full are correct length if (length(object@dw) != no_w) { msg <- paste("dw is length ", length(object@dw), " and w is length ", no_w, ". These should be the same length", sep = "") errors <- c(errors, msg) } if (length(object@dw_full) != no_w_full) { msg <- paste("dw_full is length ", length(object@dw_full), " and w_full is length ", no_w_full, ". These should be the same length", sep = "") errors <- c(errors, msg) } # Check that the last entries of w_full and dw_full agree with w and dw if (any(object@w[] != object@w_full[w_idx])) { msg <- "The later entries of w_full should be equal to those of w." errors <- c(errors, msg) } if (any(object@dw[] != object@dw_full[w_idx])) { msg <- "The later entries of dw_full should be equal to those of dw." errors <- c(errors, msg) } # Check the array dimensions are good ---- # 2D arrays if (!all(c(length(dim(object@psi)), length(dim(object@intake_max)), length(dim(object@search_vol)), length(dim(object@metab)), length(dim(object@mu_b)), length(dim(object@interaction)), length(dim(object@catchability))) == 2)) { msg <- "psi, intake_max, search_vol, metab, mu_b, interaction and catchability must all be two dimensions" errors <- c(errors, msg) } # 3D arrays if (length(dim(object@selectivity)) != 3) { msg <- "selectivity must be three dimensions" errors <- c(errors, msg) } # Check number of species is equal across relevant slots if (!all(c( dim(object@psi)[1], dim(object@intake_max)[1], dim(object@search_vol)[1], dim(object@metab)[1], dim(object@mu_b)[1], dim(object@selectivity)[2], dim(object@catchability)[2], dim(object@interaction)[1], dim(object@interaction)[2]) == dim(object@species_params)[1])) { msg <- "The number of species in the model must be consistent across the species_params, psi, intake_max, search_vol, mu_b, interaction (dim 1), selectivity, catchability and interaction (dim 2) slots" errors <- c(errors, msg) } # Check number of size groups if (!all(c( dim(object@psi)[2], dim(object@intake_max)[2], dim(object@search_vol)[2], dim(object@metab)[2], dim(object@selectivity)[3]) == no_w)) { msg <- "The number of size bins in the model must be consistent across the w, psi, intake_max, search_vol, and selectivity (dim 3) slots" errors <- c(errors, msg) } # Check numbe of gears if (!isTRUE(all.equal(dim(object@selectivity)[1], dim(object@catchability)[1]))) { msg <- "The number of fishing gears must be consistent across the catchability and selectivity (dim 1) slots" errors <- c(errors, msg) } # Check names of dimnames of arrays ---- # sp dimension if (!all(c( names(dimnames(object@psi))[1], names(dimnames(object@intake_max))[1], names(dimnames(object@search_vol))[1], names(dimnames(object@metab))[1], names(dimnames(object@mu_b))[1], names(dimnames(object@selectivity))[2], names(dimnames(object@catchability))[2]) == "sp")) { msg <- "Name of first dimension of psi, intake_max, search_vol, metab, mu_b, and the second dimension of selectivity and catchability must be 'sp'" errors <- c(errors, msg) } #interaction dimension names if (names(dimnames(object@interaction))[1] != "predator") { msg <- "The first dimension of interaction must be called 'predator'" errors <- c(errors, msg) } if (names(dimnames(object@interaction))[2] != "prey") { msg <- "The first dimension of interaction must be called 'prey'" errors <- c(errors, msg) } # w dimension if (!all(c( names(dimnames(object@psi))[2], names(dimnames(object@intake_max))[2], names(dimnames(object@search_vol))[2], names(dimnames(object@metab))[2], names(dimnames(object@selectivity))[3]) == "w")) { msg <- "Name of second dimension of psi, intake_max, search_vol, metab and third dimension of selectivity must be 'w'" errors <- c(errors, msg) } if (!all(c( names(dimnames(object@selectivity))[1], names(dimnames(object@catchability))[1]) == "gear")) { msg <- "Name of first dimension of selectivity and catchability must be 'gear'" errors <- c(errors, msg) } # Check dimnames of species are identical # Bit tricky this one as I don't know of a way to compare lots of vectors # at the same time. Just use == and the recycling rule if (!all(c( dimnames(object@psi)[[1]], dimnames(object@intake_max)[[1]], dimnames(object@search_vol)[[1]], dimnames(object@metab)[[1]], dimnames(object@mu_b)[[1]], dimnames(object@selectivity)[[2]], dimnames(object@catchability)[[2]], dimnames(object@interaction)[[1]], dimnames(object@interaction)[[2]]) == object@species_params$species)) { msg <- "The species names of species_params, psi, intake_max, search_vol, metab, mu_b, selectivity, catchability and interaction must all be the same" errors <- c(errors, msg) } # Check dimnames of w if (!all(c( dimnames(object@psi)[[2]], dimnames(object@intake_max)[[2]], dimnames(object@search_vol)[[2]], dimnames(object@metab)[[2]]) == dimnames(object@selectivity)[[3]])) { msg <- "The size names of psi, intake_max, search_vol, metab and selectivity must all be the same" errors <- c(errors, msg) } # Check dimnames of gear if (!isTRUE(all.equal( dimnames(object@catchability)[[1]], dimnames(object@selectivity)[[1]]))) { msg <- "The gear names of selectivity and catchability must all be the same" errors <- c(errors, msg) } # Check the vector slots ---- if (length(object@rr_pp) != length(object@w_full)) { msg <- "rr_pp must be the same length as w_full" errors <- c(errors, msg) } if (length(object@cc_pp) != length(object@w_full)) { msg <- "cc_pp must be the same length as w_full" errors <- c(errors, msg) } # TODO: Rewrite the following into a test of the @rates_funcs slot ---- # SRR # if (!is.string(object@srr)) { # msg <- "srr needs to be specified as a string giving the name of the function" # errors <- c(errors, msg) # } else { # if (!exists(object@srr)) { # msg <- paste0("The stock-recruitment function ", # object@srr, # "does not exist.") # errors <- c(errors, msg) # } else { # srr <- get(object@srr) # if (!is.function(srr)) { # msg <- "The specified srr is not a function." # errors <- c(errors, msg) # } else { # # Must have two arguments: rdi amd species_params # if (!isTRUE(all.equal(names(formals(srr)), c("rdi", "species_params")))) { # msg <- "Arguments of srr function must be 'rdi' and 'species_params'" # errors <- c(errors, msg) # } # } # } # } # Should not have legacy r_max column (has been renamed to R_max) if ("r_max" %in% names(object@species_params)) { msg <- "The 'r_max' column in species_params should be called 'R_max'. You can use 'upgradeParams()' to upgrade your params object." errors <- c(errors, msg) } # # species_params data.frame must have columns: # # species, z0, alpha, eRepro # species_params_cols <- c("species","z0","alpha","erepro") # if (!all(species_params_cols %in% names(object@species_params))) { # msg <- "species_params data.frame must have 'species', 'z0', 'alpha' and 'erepro' columms" # errors <- c(errors,msg) # } # must also have SRR params but not sorted out yet # species_params # Column check done in constructor # If everything is OK if (length(errors) == 0) TRUE else errors } #### Class definition #### #' A class to hold the parameters for a size based model. #' #' Although it is possible to build a `MizerParams` object by hand it is #' not recommended and several constructors are available. Dynamic simulations #' are performed using [project()] function on objects of this class. As a #' user you should never need to access the slots inside a `MizerParams` object #' directly. #' #' @slot w The size grid for the fish part of the spectrum. An increasing #' vector of weights (in grams) running from the smallest egg size to the #' largest asymptotic size. #' @slot dw The widths (in grams) of the size bins #' @slot w_full The size grid for the full size range including the resource #' spectrum. An increasing vector of weights (in grams) running from the #' smallest resource size to the largest asymptotic size of fish. The #' last entries of the vector have to be equal to the content of the w slot. #' @slot dw_full The width of the size bins for the full spectrum. The last #' entries have to be equal to the content of the dw slot. #' @slot w_min_idx A vector holding the index of the weight of the egg size #' of each species #' @slot maturity An array (species x size) that holds the proportion of #' individuals of each species at size that are mature. This enters in the #' calculation of the spawning stock biomass with [getSSB()]. Set #' with [setReproduction()]. #' @slot psi An array (species x size) that holds the allocation to reproduction #' for each species at size, \eqn{\psi_i(w)}. Changed with #' [setReproduction()]. #' @slot intake_max An array (species x size) that holds the maximum intake for #' each species at size. Changed with [setMaxIntakeRate()]. #' @slot search_vol An array (species x size) that holds the search volume for #' each species at size. Changed with [setSearchVolume()]. #' @slot metab An array (species x size) that holds the metabolism #' for each species at size. Changed with [setMetabolicRate()]. #' @slot mu_b An array (species x size) that holds the external mortality rate #' \eqn{\mu_{b.i}(w)}. Changed with [setExtMort()]. #' @slot pred_kernel An array (species x predator size x prey size) that holds #' the predation coefficient of each predator at size on each prey size. If #' this is NA then the following two slots will be used. Changed with #' [setPredKernel()]. #' @slot ft_pred_kernel_e An array (species x log of predator/prey size ratio) #' that holds the Fourier transform of the feeding kernel in a form #' appropriate for evaluating the encounter rate integral. If this is NA #' then the `pred_kernel` will be used to calculate the available #' energy integral. Changed with [setPredKernel()]. #' @slot ft_pred_kernel_p An array (species x log of predator/prey size ratio) #' that holds the Fourier transform of the feeding kernel in a form #' appropriate for evaluating the predation mortality integral. If this is NA #' then the `pred_kernel` will be used to calculate the integral. #' Changed with [setPredKernel()]. #' @slot rr_pp A vector the same length as the w_full slot. The size specific #' growth rate of the resource spectrum. Changed with [setResource()]. #' @slot cc_pp A vector the same length as the w_full slot. The size specific #' carrying capacity of the resource spectrum. Changed with #' [setResource()]. #' @slot resource_dynamics Name of the function for projecting the resource abundance #' density by one timestep. The default is #' [resource_semichemostat()]. #' Changed with [setResource()]. #' @slot other_dynamics A named list of functions for projecting the #' values of other dynamical components of the ecosystem that may be modelled #' by a mizer extensions you have installed. The names of the list entries #' are the names of those components. #' @slot other_encounter A named list of functions for calculating the #' contribution to the encounter rate from each other dynamical component. #' @slot other_mort A named list of functions for calculating the #' contribution to the mortality rate from each other dynamical components. #' @slot other_params A list containing the parameters needed by any mizer #' extensions you may have installed to model other dynamical components of #' the ecosystem. #' @slot rates_funcs A named list with the names of the functions that should be #' used to calculate the rates needed by `project()`. By default this will be #' set to the names of the built-in rate functions. #' @slot sc The community abundance of the scaling community #' @slot species_params A data.frame to hold the species specific parameters. #' See [newMultispeciesParams()] for details. #' @slot gear_params Data frame with parameters for gear selectivity. See #' [setFishing()] for details. #' @slot interaction The species specific interaction matrix, \eqn{\theta_{ij}}. #' Changed with [setInteraction()]. #' @slot selectivity An array (gear x species x w) that holds the selectivity of #' each gear for species and size, \eqn{S_{g,i,w}}. Changed with #' [setFishing()]. #' @slot catchability An array (gear x species) that holds the catchability of #' each species by each gear, \eqn{Q_{g,i}}. Changed with #' [setFishing()]. #' @slot initial_effort A vector containing the initial fishing effort for each #' gear. Changed with [setFishing()]. #' @slot initial_n An array (species x size) that holds the initial abundance of #' each species at each weight. #' @slot initial_n_pp A vector the same length as the w_full slot that describes #' the initial resource abundance at each weight. #' @slot initial_n_other A list with the initial abundances of all other #' ecosystem components. Has length zero if there are no other components. #' @slot resource_params List with parameters for resource. See [setResource()]. #' @slot A Abundance multipliers. #' @slot linecolour A named vector of colour values, named by species. #' Used to give consistent colours in plots. #' @slot linetype A named vector of linetypes, named by species. #' Used to give consistent line types in plots. #' @slot ft_mask An array (species x w_full) with zeros for weights larger than #' the asymptotic weight of each species. Used to efficiently minimize #' wrap-around errors in Fourier transform calculations. #' #' The \linkS4class{MizerParams} class is fairly complex with a large number of #' slots, many of which are multidimensional arrays. The dimensions of these #' arrays is strictly enforced so that `MizerParams` objects are consistent #' in terms of number of species and number of size classes. #' #' The `MizerParams` class does not hold any dynamic information, e.g. #' abundances or harvest effort through time. These are held in #' \linkS4class{MizerSim} objects. #' #' @seealso [project()] [MizerSim()] #' [emptyParams()] [newMultispeciesParams()] #' [newCommunityParams()] #' [newTraitParams()] #' @export setClass( "MizerParams", slots = c( w = "numeric", dw = "numeric", w_full = "numeric", dw_full = "numeric", w_min_idx = "numeric", maturity = "array", psi = "array", initial_n = "array", intake_max = "array", search_vol = "array", metab = "array", pred_kernel = "array", ft_pred_kernel_e = "array", ft_pred_kernel_p = "array", mu_b = "array", rr_pp = "numeric", cc_pp = "numeric", resource_dynamics = "character", resource_params = "list", other_dynamics = "list", other_params = "list", other_encounter = "list", other_mort = "list", rates_funcs = "list", sc = "numeric", initial_n_pp = "numeric", initial_n_other = "list", species_params = "data.frame", interaction = "array", gear_params = "data.frame", selectivity = "array", catchability = "array", initial_effort = "numeric", A = "numeric", linecolour = "character", linetype = "character", ft_mask = "array" ), ) setValidity("MizerParams", validMizerParams) remove(validMizerParams) #' Create empty MizerParams object of the right size #' #' An internal function. #' Sets up a valid \linkS4class{MizerParams} object with all the slots #' initialised and given dimension names, but with some slots left empty. This #' function is to be used by other functions to set up full parameter objects. #' #' @section Size grid: # Some code is commented out that would allow the user to # specify a grid with a non-constant log spacing. But we comment this out # for now because of the fft. # #' When the `w_full` argument is not given, then #' A size grid is created so that #' the log-sizes are equally spaced. The spacing is chosen so that there will be #' `no_w` fish size bins, with the smallest starting at `min_w` and the largest #' starting at `max_w`. For `w_full` additional size bins are added below #' `min_w`, with the same log size. The number of extra bins is such that #' `min_w_pp` comes to lie within the smallest bin. #' #' @section Changes to species params: #' The `species_params` slot of the returned MizerParams object may differ #' slightly from the data frame supplied as argument to this function in the #' following ways: #' \itemize{ #' \item Default values are set for \code{w_min, w_inf, alpha, gear, interaction_p}. #' \item The egg sizes in `w_min` are rounded down to lie on a grid point. #' } #' Note that the other characteristic sizes of the species, like `w_mat` and #' `w_inf`, are not modified to lie on grid points. #' #' @param species_params A data frame of species-specific parameter values. #' @param gear_params A data frame with gear-specific parameter values. #' @param no_w The number of size bins in the consumer spectrum. #' @param min_w Sets the size of the eggs of all species for which this is not #' given in the `w_min` column of the `species_params` dataframe. # #' @param w_full Increasing vector of weights giving the boundaries of size # #' classes. Must include the value min_w. Has one more entry than the number # #' of size bins. The last entry is the upper end of the largest size class. It # #' be used to calculate the sizes of the size bins but will not be stored in # #' the w_full slot of the returned MizerParams object. If this argument is not # #' provided then size classes are set by the other arguments as described in # #' the Details. #' @param max_w The largest size of the consumer spectrum. By default this is #' set to the largest `w_inf` specified in the `species_params` data #' frame. #' @param min_w_pp The smallest size of the resource spectrum. # #' Ignored if w_full is specified. #' #' @return An empty but valid MizerParams object #' @seealso See [newMultispeciesParams()] for a function that fills #' the slots left empty by this function. #' @export emptyParams <- function(species_params, gear_params = data.frame(), no_w = 100, min_w = 0.001, # w_full = NA, max_w = NA, min_w_pp = 1e-12) { assert_that(is.data.frame(species_params), is.data.frame(gear_params), no_w > 10) ## Set defaults ---- if (is.na(min_w_pp)) min_w_pp <- 1e-12 species_params <- set_species_param_default(species_params, "w_min", min_w) min_w <- min(species_params$w_min) species_params <- validSpeciesParams(species_params) gear_params <- validGearParams(gear_params, species_params) if (is.na(max_w)) { max_w <- max(species_params$w_inf) } else { if (max(species_params$w_inf) > max_w * (1 + 1e-9)) { # The fudge factor # is there to avoid false alerts due to rounding errors. too_large <- species_params$species[max_w < species_params$w_inf] stop("Some of your species have an maximum size larger than max_w: ", toString(too_large)) } } # Set up grids ---- # The following code anticipates that in future we might allow the user to # specify a grid with a non-constant log spacing. But we comment this out # for now because of the fft. # if (missing(w_full)) { # set up logarithmic grids dx <- log10(max_w / min_w) / (no_w - 1) # Community grid w <- 10^(seq(from = log10(min_w), by = dx, length.out = no_w)) # dw[i] = w[i+1] - w[i]. Following formula works also for last entry dw[no_w] dw <- (10^dx - 1) * w # To avoid issues due to numerical imprecision min_w <- w[1] # For fft methods we need a constant log bin size throughout. # Therefore we use as many steps as are necessary so that the first size # class includes min_w_pp. x_pp <- rev(seq(from = log10(min_w), to = log10(min_w_pp), by = -dx)) - dx w_full <- c(10^x_pp, w) # If min_w_pp happened to lie exactly on a grid point, we now added # one grid point too much which we need to remove again if (w_full[2] == min_w_pp) { w_full <- w_full[2:length(w_full)] } no_w_full <- length(w_full) dw_full <- (10^dx - 1) * w_full # } else { # # use supplied w_full # no_w_full <- length(w_full) - 1 # dw_full <- diff(w_full) # w_full <- w_full[seq_along(dw_full)] # # Check that sizes are increasing # if (any(dw_full <= 0)) { # stop("w_full must be increasing.") # } # w_min_idx <- match(min_w, w_full) # if (is.na(w_min_idx)) { # stop("w_min must be contained in w_full.") # } # w <- w_full[w_min_idx:no_w_full] # dw <- dw_full[w_min_idx:no_w_full] # no_w <- length(w) # min_w_pp <- w_full[1] # } # Basic arrays for templates ---- no_sp <- nrow(species_params) species_names <- as.character(species_params$species) gear_names <- unique(gear_params$gear) mat1 <- array(NA, dim = c(no_sp, no_w), dimnames = list(sp = species_names, w = signif(w,3))) ft_pred_kernel <- array(NA, dim = c(no_sp, no_w_full), dimnames = list(sp = species_names, k = 1:no_w_full)) ft_mask <- plyr::aaply(species_params$w_inf, 1, function(x) w_full < x, .drop = FALSE) selectivity <- array(0, dim = c(length(gear_names), no_sp, no_w), dimnames = list(gear = gear_names, sp = species_names, w = signif(w, 3))) catchability <- array(0, dim = c(length(gear_names), no_sp), dimnames = list(gear = gear_names, sp = species_names)) initial_effort <- rep(0, length(gear_names)) names(initial_effort) <- gear_names interaction <- array(1, dim = c(no_sp, no_sp), dimnames = list(predator = species_names, prey = species_names)) vec1 <- as.numeric(rep(NA, no_w_full)) names(vec1) <- signif(w_full, 3) # Round down w_min to lie on grid points and store the indices of these # grid points in w_min_idx w_min_idx <- as.vector(suppressWarnings( tapply(species_params$w_min, 1:no_sp, function(w_min, wx) max(which(wx <= w_min)), wx = w))) # Due to rounding errors this might happen: w_min_idx[w_min_idx == -Inf] <- 1 names(w_min_idx) = species_names species_params$w_min <- w[w_min_idx] # Colour and linetype scales ---- # for use in plots # Colour-blind-friendly palettes # From http://dr-k-lo.blogspot.co.uk/2013/07/a-color-blind-friendly-palette-for-r.html # cbbPalette <- c("#000000", "#009E73", "#e79f00", "#9ad0f3", "#0072B2", "#D55E00", # "#CC79A7", "#F0E442") # From http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette # cbbPalette <- c("#E69F00", "#56B4E9", "#009E73", # "#F0E442", "#0072B2", "#D55E00", "#CC79A7") # Random palette gemerated pm https://medialab.github.io/iwanthue/ colour_palette <- c("#815f00", "#6237e2", "#8da600", "#de53ff", "#0e4300", "#430079", "#6caa72", "#ee0053", "#007957", "#b42979", "#142300", "#a08dfb", "#644500", "#04004c", "#b79955", "#0060a8", "#dc8852", "#007ca9", "#ab003c", "#9796d9", "#472c00", "#b492b0", "#140000", "#dc8488", "#005c67", "#5c585a") # type_palette <- c("solid", "dashed", "dotdash", "longdash", # "twodash") type_palette <- c("solid") if ("linecolour" %in% names(species_params)) { linecolour <- species_params$linecolour # If any NA's first fill them with unused colours linecolour[is.na(linecolour)] <- setdiff(colour_palette, linecolour)[1:sum(is.na(linecolour))] # if there are still NAs, start from beginning of palette again linecolour[is.na(linecolour)] <- colour_palette[1:sum(is.na(linecolour))] } else { linecolour <- rep(colour_palette, length.out = no_sp) } names(linecolour) <- as.character(species_names) linecolour <- c(linecolour, "Total" = "black", "Resource" = "green", "Background" = "grey", "Fishing" = "red") if ("linetype" %in% names(species_params)) { linetype <- species_params$linetype linetype[is.na(linetype)] <- "solid" } else { linetype <- rep(type_palette, length.out = no_sp) } names(linetype) <- as.character(species_names) linetype <- c(linetype, "Total" = "solid", "Resource" = "solid", "Background" = "solid", "Fishing" = "solid") # Make object ---- # Should Z0, rrPP and ccPP have names (species names etc)? params <- new( "MizerParams", w = w, dw = dw, w_full = w_full, dw_full = dw_full, w_min_idx = w_min_idx, maturity = mat1, psi = mat1, initial_n = mat1, intake_max = mat1, search_vol = mat1, metab = mat1, mu_b = mat1, ft_pred_kernel_e = ft_pred_kernel, ft_pred_kernel_p = ft_pred_kernel, pred_kernel = array(), gear_params = gear_params, selectivity = selectivity, catchability = catchability, initial_effort = initial_effort, rr_pp = vec1, cc_pp = vec1, sc = w, initial_n_pp = vec1, species_params = species_params, interaction = interaction, other_dynamics = list(), other_encounter = list(), other_mort = list(), rates_funcs = list( Rates = "mizerRates", Encounter = "mizerEncounter", FeedingLevel = "mizerFeedingLevel", EReproAndGrowth = "mizerEReproAndGrowth", PredRate = "mizerPredRate", PredMort = "mizerPredMort", FMort = "mizerFMort", Mort = "mizerMort", ERepro = "mizerERepro", EGrowth = "mizerEGrowth", ResourceMort = "mizerResourceMort", RDI = "mizerRDI", RDD = "BevertonHoltRDD"), resource_dynamics = "resource_semichemostat", other_params = list(), initial_n_other = list(), A = as.numeric(rep(NA, no_sp)), linecolour = linecolour, linetype = linetype, ft_mask = ft_mask ) return(params) } #' Set line colours to be used in mizer plots #' #' @param params A MizerParams object #' @param colours A named list or named vector of line colours. #' #' @return The MizerParams object with updated line colours #' @export #' @examples #' params <- NS_params #' params <- setColours(params, list("Cod" = "red", "Haddock" = "#00ff00")) #' plotSpectra(params) #' getColours(params) setColours <- function(params, colours) { assert_that(is(params, "MizerParams"), all(validColour(colours))) params@linecolour <- unlist( modifyList(as.list(params@linecolour), as.list(colours))) params } #' @rdname setColours #' @export getColours <- function(params) { params@linecolour } validColour <- function(colour) { sapply(colour, function(X) { tryCatch(is.matrix(col2rgb(X)), error = function(e) FALSE) }) } #' Set linetypes to be used in mizer plots #' #' @param params A MizerParams object #' @param linetypes A named list or named vector of linetypes. #' #' @return The MizerParams object with updated linetypes #' @export #' @examples #' params <- NS_params #' params <- setLinetypes(params, list("Cod" = "solid")) #' plotSpectra(params) #' getLinetypes(params) setLinetypes <- function(params, linetypes) { assert_that(is(params, "MizerParams")) params@linetype <- unlist( modifyList(as.list(params@linetype), as.list(linetypes))) params } #' @rdname setLinetypes #' @export getLinetypes <- function(params) { as.list(params@linetype) } #' Size bins #' #' This is a good place to explain how mizer discretises the size #' #' @param params A MizerParams object #' #' @export w <- function(params) { params@w } #' @rdname w #' @export w_full <- function(params) { params@w_full } #' @rdname w #' @export dw <- function(params) { params@dw } #' @rdname w #' @export dw_full <- function(params) { params@dw_full }
library(shiny) library(shinydashboard) library(reshape2) library(dplyr) library(plotly) library(shinythemes) #Load Data adult <- read.csv("AdultCare_StatisticalReport.csv") # Avoid plotly issues ---------------------------------------------- pdf(NULL) # Application header & title ---------------------------------------------- header <- dashboardHeader(title = "Adult Care Facility Statistical Report: 2013-2018", titleWidth = 450) # Dashboard Sidebar ---------------------------------------------- sidebar <- dashboardSidebar( sidebarMenu( id = "tabs", # Menu Items ---------------------------------------------- menuItem("Gender Stats", icon = icon("bar-chart"), tabName = "genders"), menuItem("Count Stats", icon = icon("bar-chart"), tabName = "counts"), menuItem("Organization Type Stats", icon = icon("bar-chart"), tabName = "organizations"), menuItem("Table", icon = icon("th"), tabName = "table", badgeLabel = "new", badgeColor = "green"), # Inputs: select county to plot ---------------------------------------------- selectInput(inputId = "county", label = "Select County", choices = sort(unique(adult$County)), selected = "Albany"), # Inputs: select quarter to plot ---------------------------------------------- checkboxGroupInput(inputId = "type", label = "Select Organziation Type", choices = sort(unique(adult$Certified.Type)), selected = c("S","F","E","N")), # Reporting year Selection ---------------------------------------------- sliderInput(inputId = "year", label = "Year Range", min = min(adult$Reporting.Year), max = max(adult$Reporting.Year), value = c(min(adult$Reporting.Year), max(adult$Reporting.Year)), step = 1, sep = "") ) ) # Dashboard body ---------------------------------------------- body <- dashboardBody(tabItems( # Quarter Stats page ---------------------------------------------- tabItem("genders", # Value Boxes ---------------------------------------------- fluidRow( infoBoxOutput("female"), infoBoxOutput("male") ), # Plot ---------------------------------------------- fluidRow( tabBox(width = 12, tabPanel("Female Resident Distribution By Reporting Years", plotlyOutput("plot_gender")), tabPanel("Male Resident Distribution By Reporting Years", plotlyOutput("plot_gender2"))) )), # Quarter Stats page ---------------------------------------------- tabItem("counts", # Input Boxes ---------------------------------------------- fluidRow( valueBoxOutput("total"), # Plot ---------------------------------------------- fluidRow( tabBox(width = 8, tabPanel("Overall Distribution by the Number of Resident", plotlyOutput("plot_count"))) ))), # Organization Stats page ---------------------------------------------- tabItem("organizations", # Plot ---------------------------------------------- fluidRow( tabBox(width = 8, tabPanel("Resident Distribution by Organzaition Type", plotlyOutput("plot_type"))) )), # Data Table Page ---------------------------------------------- tabItem("table", fluidPage( box(title = "Statisical Report for Your Selection", DT::dataTableOutput("table"), width = 12))) ) ) ui <- dashboardPage(header, sidebar, body, skin = "purple") # Define server function required to create plots and value boxes ----- server <- function(input, output) { # Reactive data function ------------------------------------------- adultInput <- reactive({ adult <- adult %>% #Year Filter filter(Reporting.Year >= input$year[1] & Reporting.Year <= input$year[2]) #County Filter adult <- subset(adult, County %in% input$county) #Quarter Filter adult <- subset(adult, Certified.Type %in% input$type) # Return dataframe ---------------------------------------------- return(adult) }) # Plots showing the Gender stats ----------------------------------- # Female output$plot_gender <- renderPlotly({ ggplot(data = adultInput(), aes(Female.Census)) + geom_histogram(binwidth = 20) + facet_wrap(~Reporting.Year) + labs(x = "The Number of Female Residents", y = "Count")+ theme_classic() }) #Male output$plot_gender2 <- renderPlotly({ ggplot(data = adultInput(), aes (Male.Census)) + geom_histogram(binwidth = 20) + facet_wrap(~Reporting.Year) + labs(x = "The Number of Male Residents", y = "Count") + theme_classic() }) # A plot showing the Resident count stats ----------------------------------- output$plot_count <- renderPlotly({ ggplot(data = adultInput(), aes(x = End.Census)) + geom_histogram(aes(y = ..density..), colour = "black", fill = "white")+ geom_density(color = "darkblue", fill = "lightblue", alpha = 0.4) + labs (x = "Number of Residents per Organization") + theme_classic() }) # A plot showing the Organization Type stats ----------------------------------- output$plot_type <- renderPlotly({ ggplot(data = adultInput(), aes(x = Certified.Type, y = End.Census)) + geom_boxplot(aes(fill = Certified.Type)) + labs(fill="Type" , x = "Organization Type", y = "The number of residents") + scale_fill_brewer(palette = "Blues") + theme_classic() }) # Data table of characters ---------------------------------------------- output$table <- DT::renderDataTable({ ad <-subset(adultInput(), select = c(Reporting.Year, Reporting.Quarter, Reporting.Organization, Certified.Type, County, Certified.Capacity, End.Census, Male.Census, Female.Census)) DT::datatable(ad, options = list(scrollX = TRUE)) }) # Male Average box ---------------------------------------------- output$male <- renderValueBox({ ai <- adultInput() infoBox("Avg Male Count:", value = round(mean(ai$Male.Census), 0), color = "blue", width = 6) }) # Female Average box ---------------------------------------------- output$female <- renderValueBox({ ai <- adultInput() infoBox("Avg Female Count:", value = round(mean(ai$Female.Census), 0), color = "maroon", width = 6) }) # Total Census box ---------------------------------------------- output$total <- renderValueBox({ ai <- adultInput() valueBox("Total Number of Residents", value = sum(ai$End.Census), color = "olive", width = 4) }) } # Run the application shinyApp(ui = ui, server = server)
/app.R
no_license
rforoperations2019/project1_jiaying2
R
false
false
6,998
r
library(shiny) library(shinydashboard) library(reshape2) library(dplyr) library(plotly) library(shinythemes) #Load Data adult <- read.csv("AdultCare_StatisticalReport.csv") # Avoid plotly issues ---------------------------------------------- pdf(NULL) # Application header & title ---------------------------------------------- header <- dashboardHeader(title = "Adult Care Facility Statistical Report: 2013-2018", titleWidth = 450) # Dashboard Sidebar ---------------------------------------------- sidebar <- dashboardSidebar( sidebarMenu( id = "tabs", # Menu Items ---------------------------------------------- menuItem("Gender Stats", icon = icon("bar-chart"), tabName = "genders"), menuItem("Count Stats", icon = icon("bar-chart"), tabName = "counts"), menuItem("Organization Type Stats", icon = icon("bar-chart"), tabName = "organizations"), menuItem("Table", icon = icon("th"), tabName = "table", badgeLabel = "new", badgeColor = "green"), # Inputs: select county to plot ---------------------------------------------- selectInput(inputId = "county", label = "Select County", choices = sort(unique(adult$County)), selected = "Albany"), # Inputs: select quarter to plot ---------------------------------------------- checkboxGroupInput(inputId = "type", label = "Select Organziation Type", choices = sort(unique(adult$Certified.Type)), selected = c("S","F","E","N")), # Reporting year Selection ---------------------------------------------- sliderInput(inputId = "year", label = "Year Range", min = min(adult$Reporting.Year), max = max(adult$Reporting.Year), value = c(min(adult$Reporting.Year), max(adult$Reporting.Year)), step = 1, sep = "") ) ) # Dashboard body ---------------------------------------------- body <- dashboardBody(tabItems( # Quarter Stats page ---------------------------------------------- tabItem("genders", # Value Boxes ---------------------------------------------- fluidRow( infoBoxOutput("female"), infoBoxOutput("male") ), # Plot ---------------------------------------------- fluidRow( tabBox(width = 12, tabPanel("Female Resident Distribution By Reporting Years", plotlyOutput("plot_gender")), tabPanel("Male Resident Distribution By Reporting Years", plotlyOutput("plot_gender2"))) )), # Quarter Stats page ---------------------------------------------- tabItem("counts", # Input Boxes ---------------------------------------------- fluidRow( valueBoxOutput("total"), # Plot ---------------------------------------------- fluidRow( tabBox(width = 8, tabPanel("Overall Distribution by the Number of Resident", plotlyOutput("plot_count"))) ))), # Organization Stats page ---------------------------------------------- tabItem("organizations", # Plot ---------------------------------------------- fluidRow( tabBox(width = 8, tabPanel("Resident Distribution by Organzaition Type", plotlyOutput("plot_type"))) )), # Data Table Page ---------------------------------------------- tabItem("table", fluidPage( box(title = "Statisical Report for Your Selection", DT::dataTableOutput("table"), width = 12))) ) ) ui <- dashboardPage(header, sidebar, body, skin = "purple") # Define server function required to create plots and value boxes ----- server <- function(input, output) { # Reactive data function ------------------------------------------- adultInput <- reactive({ adult <- adult %>% #Year Filter filter(Reporting.Year >= input$year[1] & Reporting.Year <= input$year[2]) #County Filter adult <- subset(adult, County %in% input$county) #Quarter Filter adult <- subset(adult, Certified.Type %in% input$type) # Return dataframe ---------------------------------------------- return(adult) }) # Plots showing the Gender stats ----------------------------------- # Female output$plot_gender <- renderPlotly({ ggplot(data = adultInput(), aes(Female.Census)) + geom_histogram(binwidth = 20) + facet_wrap(~Reporting.Year) + labs(x = "The Number of Female Residents", y = "Count")+ theme_classic() }) #Male output$plot_gender2 <- renderPlotly({ ggplot(data = adultInput(), aes (Male.Census)) + geom_histogram(binwidth = 20) + facet_wrap(~Reporting.Year) + labs(x = "The Number of Male Residents", y = "Count") + theme_classic() }) # A plot showing the Resident count stats ----------------------------------- output$plot_count <- renderPlotly({ ggplot(data = adultInput(), aes(x = End.Census)) + geom_histogram(aes(y = ..density..), colour = "black", fill = "white")+ geom_density(color = "darkblue", fill = "lightblue", alpha = 0.4) + labs (x = "Number of Residents per Organization") + theme_classic() }) # A plot showing the Organization Type stats ----------------------------------- output$plot_type <- renderPlotly({ ggplot(data = adultInput(), aes(x = Certified.Type, y = End.Census)) + geom_boxplot(aes(fill = Certified.Type)) + labs(fill="Type" , x = "Organization Type", y = "The number of residents") + scale_fill_brewer(palette = "Blues") + theme_classic() }) # Data table of characters ---------------------------------------------- output$table <- DT::renderDataTable({ ad <-subset(adultInput(), select = c(Reporting.Year, Reporting.Quarter, Reporting.Organization, Certified.Type, County, Certified.Capacity, End.Census, Male.Census, Female.Census)) DT::datatable(ad, options = list(scrollX = TRUE)) }) # Male Average box ---------------------------------------------- output$male <- renderValueBox({ ai <- adultInput() infoBox("Avg Male Count:", value = round(mean(ai$Male.Census), 0), color = "blue", width = 6) }) # Female Average box ---------------------------------------------- output$female <- renderValueBox({ ai <- adultInput() infoBox("Avg Female Count:", value = round(mean(ai$Female.Census), 0), color = "maroon", width = 6) }) # Total Census box ---------------------------------------------- output$total <- renderValueBox({ ai <- adultInput() valueBox("Total Number of Residents", value = sum(ai$End.Census), color = "olive", width = 4) }) } # Run the application shinyApp(ui = ui, server = server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trajectory.R \name{TrajScale} \alias{TrajScale} \title{Scale a trajectory} \usage{ TrajScale(trj, scale, units, yScale = scale) } \arguments{ \item{trj}{The trajectory to be scaled.} \item{scale}{Scaling factor to be applied to the trajectory coordinates.} \item{units}{Character specifying the spatial units after scaling, e.g. "m" or "metres"} \item{yScale}{Optional scaling factor to be applied to the y-axis, which may be specified if the original coordinates are not square. Defaults to \code{scale}.} } \value{ new scaled trajectory. } \description{ Scales the cartesian coordinates in a trajectory, for example, to convert units from pixels to metres. } \examples{ set.seed(42) trj <- TrajGenerate() # original trajectory units are pixels, measured as having # 47 pixels in 10 mm, so to convert to metres, scale the # trajectory by the approriate factor, i.e. (size in metres) / (size in pixels). scale <- .01 / 47 scaled <- TrajScale(trj, scale, "m") }
/man/TrajScale.Rd
no_license
cran/trajr
R
false
true
1,080
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/trajectory.R \name{TrajScale} \alias{TrajScale} \title{Scale a trajectory} \usage{ TrajScale(trj, scale, units, yScale = scale) } \arguments{ \item{trj}{The trajectory to be scaled.} \item{scale}{Scaling factor to be applied to the trajectory coordinates.} \item{units}{Character specifying the spatial units after scaling, e.g. "m" or "metres"} \item{yScale}{Optional scaling factor to be applied to the y-axis, which may be specified if the original coordinates are not square. Defaults to \code{scale}.} } \value{ new scaled trajectory. } \description{ Scales the cartesian coordinates in a trajectory, for example, to convert units from pixels to metres. } \examples{ set.seed(42) trj <- TrajGenerate() # original trajectory units are pixels, measured as having # 47 pixels in 10 mm, so to convert to metres, scale the # trajectory by the approriate factor, i.e. (size in metres) / (size in pixels). scale <- .01 / 47 scaled <- TrajScale(trj, scale, "m") }
# PhenoCombinedBrassicaDec13data.R # R version 3.3.1 (2016-06-21) # January 12, 2017. Mallory B. Lai. # Reviewed by: TODO (Mallory B. Lai) : Find reviewer to proofread # Combining Brassica phenotype data for analysis # This "data clean-up" is meant to combine separate files and get rid # of columns and data that are not needed for analysis. #----------------------------------------------------------------------- library(data.table) #----------------------------------------------------------------------- # Read in file 1. Starch <-fread(file = "4Mall_NSC_Starchdec2013_edit.csv") Starch$NSC <- as.numeric(Starch$NSC) Starch$Starch <- as.numeric(Starch$Starch) # Read in file 2. Soil <-fread(file = "4Mall_Soil_Moisture_dec_2013_edit.csv") Soil$Timepoint <- as.numeric(Soil$Timepoint) # Read in file 3. Photo <-fread(file = "4Mall_PhotoFv'Fm'gs_dec_2013_edit.csv") # File1 doesn't have replicate values. Since we're only interested # in leaf tissue so we'll want to remove any readings taken from the # roots. Starch <- Starch[Tissue == "leaves"] # For each file, remove columns that aren't "Treatment", # "Timepoint", and output values. Starch <- Starch[, c("Treatment", "Timepoint", "NSC", "Starch")] Soil <- Soil[, c("Treatment", "Timepoint", "SM(%)")] Photo <- Photo[, c("Treatment", "Timepoint", "Photo", "gs", "Fv'Fm'")] # Examine the number of replicates at each Timepoint. replicates <- data.table(Photo = Photo[ , .N, by = Timepoint], Soil = Soil[ , .N, by = Timepoint], Starch = Starch[ , .N, by = Timepoint]) # Note, the mismatched values have been recycled. # We can see that Photo is missing one timepoint and Soil and Starch # are missing two. # Photo is missing eight replicates for the first three timepoints. # Starch has an extra replicate for timepoint 4 and a missing replicate # for timepoint 12. # Build dataframe for combined data. # Start with timepoint 1, treatment "WW". Pheno <- Photo[Timepoint == 1 & Treatment == "WW"] Pheno <- cbind(Pheno, Starch[Timepoint == 1 & Treatment == "WW", NSC, Starch], Soil[Timepoint == 1 & Treatment == "WW", "SM(%)"]) # For now, missing replicates for Photo will be recycled. # Timepoint 1, Dry. tp1dry <- Photo[Timepoint == 1 & Treatment == "Dry"] tp1dry <- cbind(tp1dry, Starch[Timepoint == 1 & Treatment == "Dry", NSC, Starch], Soil[Timepoint == 1 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp1dry) rm(tp1dry) # Timepoint 2, WW. tp2ww <- Photo[Timepoint == 2 & Treatment == "WW"] tp2ww <- cbind(tp2ww, Starch[Timepoint == 2 & Treatment == "WW", NSC, Starch], Soil[Timepoint == 2 & Treatment == "WW", "SM(%)"]) Pheno <- rbind(Pheno, tp2ww) rm(tp2ww) # Timepoint 2, Dry. tp2dry <- Photo[Timepoint == 2 & Treatment == "Dry"] tp2dry <- cbind(tp2dry, Starch[Timepoint == 2 & Treatment == "Dry", NSC, Starch], Soil[Timepoint == 2 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp2dry) rm(tp2dry) # Timepoint 3, WW. Fill in missing starch timepoint with NA's. tp3ww <- Photo[Timepoint == 3 & Treatment == "WW"] tp3ww <- cbind(tp3ww, Starch = NA, NSC = NA, Soil[Timepoint == 3 & Treatment == "WW", "SM(%)"]) Pheno <- rbind(Pheno, tp3ww) rm(tp3ww) # Timepoint 3, Dry. tp3dry <- Photo[Timepoint == 3 & Treatment == "Dry"] tp3dry <- cbind(tp3dry, Starch = NA, NSC = NA, Soil[Timepoint == 3 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp3dry) rm(tp3dry) # Timepoint 4, WW. Remove extra starch value. # The repeated value in the first row of "WW" looks like a human error. starch24 <- Starch[Timepoint == 4 & Treatment == "WW", NSC, Starch] starch24 <- starch24[-1] tp4ww <- Photo[Timepoint == 4 & Treatment == "WW"] tp4ww <- cbind(tp4ww, starch24, Soil[Timepoint == 4 & Treatment == "WW", "SM(%)"]) Pheno <- rbind(Pheno, tp4ww) rm(tp4ww) rm(starch24) # Timepoint 4, Dry. tp4dry <- Photo[Timepoint == 4 & Treatment == "Dry"] tp4dry <- cbind(tp4dry, Starch[Timepoint == 4 & Treatment == "Dry", NSC, Starch], Soil[Timepoint == 4 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp4dry) rm(tp4dry) # Timepoint 5. Fill in missing soil values with NA's. tp5 <- Photo[Timepoint == 5] tp5 <- cbind(tp5, Starch[Timepoint == 5, NSC, Starch], "SM(%)" = NA) Pheno <- rbind(Pheno, tp5) rm(tp5) # Timepoint 6. Fill in missing soil and Photo values with NA's. tp6 <- Starch[Timepoint == 6, Timepoint, Treatment] tp6 <- cbind(tp6, Photo = NA, gs = NA, "Fv'Fm'" = 0, Starch[Timepoint == 6, NSC, Starch], "SM(%)" = NA) Pheno <- rbind(Pheno, tp6) rm(tp6) # Reorder Soil moisture columns to match Photo and Starch. Soil <- Soil[order(Soil[, 2], -Soil[, 1]), ] # Timepoint 7. tp7 <- Photo[Timepoint == 7] tp7 <- cbind(tp7, Starch[Timepoint == 7, NSC, Starch], Soil[Timepoint == 7, "SM(%)"]) Pheno <- rbind(Pheno, tp7) rm(tp7) # Timepoint 8. tp8 <- Photo[Timepoint == 8] tp8 <- cbind(tp8, Starch[Timepoint == 8, NSC, Starch], Soil[Timepoint == 8, "SM(%)"]) Pheno <- rbind(Pheno, tp8) rm(tp8) # Timepoint 9. Fill in missing starch timepoint with NA's. tp9 <- Photo[Timepoint == 9] tp9 <- cbind(tp9, Starch = NA, NSC = NA, Soil[Timepoint == 9, "SM(%)"]) Pheno <- rbind(Pheno, tp9) rm(tp9) # Timepoint 10. tp10 <- Photo[Timepoint == 10] tp10 <- cbind(tp10, Starch[Timepoint == 10, NSC, Starch], Soil[Timepoint == 10, "SM(%)"]) Pheno <- rbind(Pheno, tp10) rm(tp10) # Timepoint 11. tp11 <- Photo[Timepoint == 11] tp11 <- cbind(tp11, Starch[Timepoint == 11, NSC, Starch], Soil[Timepoint == 11, "SM(%)"]) Pheno <- rbind(Pheno, tp11) rm(tp11) # Timepoint 12. tp12 <- Photo[Timepoint == 12] tp12 <- cbind(tp12, Starch[Timepoint == 12, NSC, Starch], Soil[Timepoint == 12, "SM(%)"]) Pheno <- rbind(Pheno, tp12) rm(tp12) # Convert to data.frame. Pheno <- as.data.frame(Pheno) # Remove data points containing errors. # Starch should never have a negative value. sum(Pheno$Starch < 0, na.rm = T) # Only one value is negative. Replace negative value with NA. Pheno[which(Pheno$Starch < 0), "Starch"] <- NA # NSC should also never be negative. sum(Pheno$NSC < 0, na.rm = T) # Only one value is negative. Replace negative value with NA. Pheno[which(Pheno$NSC < 0), "NSC"] <- NA # The Soil Moisture value of zero also seems to be an error. Pheno[which(Pheno$`SM(%)` == 0), "SM(%)"] <- NA # Check that Photo is negative at timepoints 5, 6, 11, & 12. Pheno[which(Pheno$Photo > 0), "Timepoint" == 5] Pheno[which(Pheno$Photo > 0), "Timepoint" == 6] Pheno[which(Pheno$Photo > 0), "Timepoint" == 11] Pheno[which(Pheno$Photo > 0), "Timepoint" == 12] # Check that Fv'Fm' is zero at timepoints 5, 6, 11, 12. Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 5] Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 6] Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 11] Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 12] # Check that Fv'Fm' is positive at all timepoints. sum(Pheno$`Fv'Fm'` < 0) # Write file to csv. write.csv(Pheno, file = "PhenotypeBrassica.csv")
/DataWrangling/PhenoCombinedBrassicaDec13data.R
no_license
mblai35/BrassicaDBN_Neo4j
R
false
false
7,392
r
# PhenoCombinedBrassicaDec13data.R # R version 3.3.1 (2016-06-21) # January 12, 2017. Mallory B. Lai. # Reviewed by: TODO (Mallory B. Lai) : Find reviewer to proofread # Combining Brassica phenotype data for analysis # This "data clean-up" is meant to combine separate files and get rid # of columns and data that are not needed for analysis. #----------------------------------------------------------------------- library(data.table) #----------------------------------------------------------------------- # Read in file 1. Starch <-fread(file = "4Mall_NSC_Starchdec2013_edit.csv") Starch$NSC <- as.numeric(Starch$NSC) Starch$Starch <- as.numeric(Starch$Starch) # Read in file 2. Soil <-fread(file = "4Mall_Soil_Moisture_dec_2013_edit.csv") Soil$Timepoint <- as.numeric(Soil$Timepoint) # Read in file 3. Photo <-fread(file = "4Mall_PhotoFv'Fm'gs_dec_2013_edit.csv") # File1 doesn't have replicate values. Since we're only interested # in leaf tissue so we'll want to remove any readings taken from the # roots. Starch <- Starch[Tissue == "leaves"] # For each file, remove columns that aren't "Treatment", # "Timepoint", and output values. Starch <- Starch[, c("Treatment", "Timepoint", "NSC", "Starch")] Soil <- Soil[, c("Treatment", "Timepoint", "SM(%)")] Photo <- Photo[, c("Treatment", "Timepoint", "Photo", "gs", "Fv'Fm'")] # Examine the number of replicates at each Timepoint. replicates <- data.table(Photo = Photo[ , .N, by = Timepoint], Soil = Soil[ , .N, by = Timepoint], Starch = Starch[ , .N, by = Timepoint]) # Note, the mismatched values have been recycled. # We can see that Photo is missing one timepoint and Soil and Starch # are missing two. # Photo is missing eight replicates for the first three timepoints. # Starch has an extra replicate for timepoint 4 and a missing replicate # for timepoint 12. # Build dataframe for combined data. # Start with timepoint 1, treatment "WW". Pheno <- Photo[Timepoint == 1 & Treatment == "WW"] Pheno <- cbind(Pheno, Starch[Timepoint == 1 & Treatment == "WW", NSC, Starch], Soil[Timepoint == 1 & Treatment == "WW", "SM(%)"]) # For now, missing replicates for Photo will be recycled. # Timepoint 1, Dry. tp1dry <- Photo[Timepoint == 1 & Treatment == "Dry"] tp1dry <- cbind(tp1dry, Starch[Timepoint == 1 & Treatment == "Dry", NSC, Starch], Soil[Timepoint == 1 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp1dry) rm(tp1dry) # Timepoint 2, WW. tp2ww <- Photo[Timepoint == 2 & Treatment == "WW"] tp2ww <- cbind(tp2ww, Starch[Timepoint == 2 & Treatment == "WW", NSC, Starch], Soil[Timepoint == 2 & Treatment == "WW", "SM(%)"]) Pheno <- rbind(Pheno, tp2ww) rm(tp2ww) # Timepoint 2, Dry. tp2dry <- Photo[Timepoint == 2 & Treatment == "Dry"] tp2dry <- cbind(tp2dry, Starch[Timepoint == 2 & Treatment == "Dry", NSC, Starch], Soil[Timepoint == 2 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp2dry) rm(tp2dry) # Timepoint 3, WW. Fill in missing starch timepoint with NA's. tp3ww <- Photo[Timepoint == 3 & Treatment == "WW"] tp3ww <- cbind(tp3ww, Starch = NA, NSC = NA, Soil[Timepoint == 3 & Treatment == "WW", "SM(%)"]) Pheno <- rbind(Pheno, tp3ww) rm(tp3ww) # Timepoint 3, Dry. tp3dry <- Photo[Timepoint == 3 & Treatment == "Dry"] tp3dry <- cbind(tp3dry, Starch = NA, NSC = NA, Soil[Timepoint == 3 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp3dry) rm(tp3dry) # Timepoint 4, WW. Remove extra starch value. # The repeated value in the first row of "WW" looks like a human error. starch24 <- Starch[Timepoint == 4 & Treatment == "WW", NSC, Starch] starch24 <- starch24[-1] tp4ww <- Photo[Timepoint == 4 & Treatment == "WW"] tp4ww <- cbind(tp4ww, starch24, Soil[Timepoint == 4 & Treatment == "WW", "SM(%)"]) Pheno <- rbind(Pheno, tp4ww) rm(tp4ww) rm(starch24) # Timepoint 4, Dry. tp4dry <- Photo[Timepoint == 4 & Treatment == "Dry"] tp4dry <- cbind(tp4dry, Starch[Timepoint == 4 & Treatment == "Dry", NSC, Starch], Soil[Timepoint == 4 & Treatment == "Dry", "SM(%)"]) Pheno <- rbind(Pheno, tp4dry) rm(tp4dry) # Timepoint 5. Fill in missing soil values with NA's. tp5 <- Photo[Timepoint == 5] tp5 <- cbind(tp5, Starch[Timepoint == 5, NSC, Starch], "SM(%)" = NA) Pheno <- rbind(Pheno, tp5) rm(tp5) # Timepoint 6. Fill in missing soil and Photo values with NA's. tp6 <- Starch[Timepoint == 6, Timepoint, Treatment] tp6 <- cbind(tp6, Photo = NA, gs = NA, "Fv'Fm'" = 0, Starch[Timepoint == 6, NSC, Starch], "SM(%)" = NA) Pheno <- rbind(Pheno, tp6) rm(tp6) # Reorder Soil moisture columns to match Photo and Starch. Soil <- Soil[order(Soil[, 2], -Soil[, 1]), ] # Timepoint 7. tp7 <- Photo[Timepoint == 7] tp7 <- cbind(tp7, Starch[Timepoint == 7, NSC, Starch], Soil[Timepoint == 7, "SM(%)"]) Pheno <- rbind(Pheno, tp7) rm(tp7) # Timepoint 8. tp8 <- Photo[Timepoint == 8] tp8 <- cbind(tp8, Starch[Timepoint == 8, NSC, Starch], Soil[Timepoint == 8, "SM(%)"]) Pheno <- rbind(Pheno, tp8) rm(tp8) # Timepoint 9. Fill in missing starch timepoint with NA's. tp9 <- Photo[Timepoint == 9] tp9 <- cbind(tp9, Starch = NA, NSC = NA, Soil[Timepoint == 9, "SM(%)"]) Pheno <- rbind(Pheno, tp9) rm(tp9) # Timepoint 10. tp10 <- Photo[Timepoint == 10] tp10 <- cbind(tp10, Starch[Timepoint == 10, NSC, Starch], Soil[Timepoint == 10, "SM(%)"]) Pheno <- rbind(Pheno, tp10) rm(tp10) # Timepoint 11. tp11 <- Photo[Timepoint == 11] tp11 <- cbind(tp11, Starch[Timepoint == 11, NSC, Starch], Soil[Timepoint == 11, "SM(%)"]) Pheno <- rbind(Pheno, tp11) rm(tp11) # Timepoint 12. tp12 <- Photo[Timepoint == 12] tp12 <- cbind(tp12, Starch[Timepoint == 12, NSC, Starch], Soil[Timepoint == 12, "SM(%)"]) Pheno <- rbind(Pheno, tp12) rm(tp12) # Convert to data.frame. Pheno <- as.data.frame(Pheno) # Remove data points containing errors. # Starch should never have a negative value. sum(Pheno$Starch < 0, na.rm = T) # Only one value is negative. Replace negative value with NA. Pheno[which(Pheno$Starch < 0), "Starch"] <- NA # NSC should also never be negative. sum(Pheno$NSC < 0, na.rm = T) # Only one value is negative. Replace negative value with NA. Pheno[which(Pheno$NSC < 0), "NSC"] <- NA # The Soil Moisture value of zero also seems to be an error. Pheno[which(Pheno$`SM(%)` == 0), "SM(%)"] <- NA # Check that Photo is negative at timepoints 5, 6, 11, & 12. Pheno[which(Pheno$Photo > 0), "Timepoint" == 5] Pheno[which(Pheno$Photo > 0), "Timepoint" == 6] Pheno[which(Pheno$Photo > 0), "Timepoint" == 11] Pheno[which(Pheno$Photo > 0), "Timepoint" == 12] # Check that Fv'Fm' is zero at timepoints 5, 6, 11, 12. Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 5] Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 6] Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 11] Pheno[which(Pheno$`Fv'Fm'` != 0), "Timepoint" == 12] # Check that Fv'Fm' is positive at all timepoints. sum(Pheno$`Fv'Fm'` < 0) # Write file to csv. write.csv(Pheno, file = "PhenotypeBrassica.csv")
# import library library(PopGenome) # import data gen <- (readVCF("vcf/sub3.vcf.gz", include.unknown = TRUE, frompos = 1, topos = 10049, tid = "1", numcols = 10000)) # set populations bhhz <- as.character(read.table("populations/bhhz.txt", sep = ",")[[1]]) hznearmofo3003 <- as.character(read.table("populations/hznearmofo3003.txt", sep = ",")[[1]]) hznearmost005 <- as.character(read.table("populations/hznearmost005.txt", sep = ",")[[1]]) mofo3001nn <- as.character(read.table("populations/mofo3001nn.txt", sep = ",")[[1]]) mofo3002 <- as.character(read.table("populations/mofo3002.txt", sep = ",")[[1]]) mofo3003 <- as.character(read.table("populations/mofo3005.txt", sep = ",")[[1]]) mofo3009 <- as.character(read.table("populations/mofo3009.txt", sep = ",")[[1]]) lfo <- as.character(read.table("populations/lfo.txt", sep = ",")[[1]]) mosh <- as.character(read.table("populations/mosh.txt", sep = ",")[[1]]) most001 <- as.character(read.table("populations/most001.txt", sep = ",")[[1]]) most003 <- as.character(read.table("populations/most003.txt", sep = ",")[[1]]) most004 <- as.character(read.table("populations/most004.txt", sep = ",")[[1]]) most005 <- as.character(read.table("populations/most005.txt", sep = ",")[[1]]) rhhz <- as.character(read.table("populations/rhhz.txt", sep = ",")[[1]]) poplist <- list(bhhz, hznearmofo3003, hznearmost005, mofo3001nn, mofo3002, mofo3003, mofo3009, lfo, mosh, most001, most003, most004, most005, rhhz) gen <- set.populations(gen, poplist, diploid=TRUE) # get stats gen <- diversity.stats(gen, pi = TRUE) gen <- diversity.stats.between(gen) # get per site stats Pi.per.site <- gen@Pi / gen@n.sites Pi.per.site <- data.frame(Pi.per.site) dxy.per.site <- gen@nuc.diversity.between / gen@n.sites dxy.per.site <- data.frame(dxy.per.site) # print stats gen@Pi gen@nuc.diversity.between Pi.per.site dxy.per.site # write to file write.table(dxy.per.site, file = "dxy.csv", sep = ",") write.table(Pi.per.site, file = "pi.csv", sep = ",")
/monardella_pop_genome.R
no_license
brettasmi/biology
R
false
false
2,034
r
# import library library(PopGenome) # import data gen <- (readVCF("vcf/sub3.vcf.gz", include.unknown = TRUE, frompos = 1, topos = 10049, tid = "1", numcols = 10000)) # set populations bhhz <- as.character(read.table("populations/bhhz.txt", sep = ",")[[1]]) hznearmofo3003 <- as.character(read.table("populations/hznearmofo3003.txt", sep = ",")[[1]]) hznearmost005 <- as.character(read.table("populations/hznearmost005.txt", sep = ",")[[1]]) mofo3001nn <- as.character(read.table("populations/mofo3001nn.txt", sep = ",")[[1]]) mofo3002 <- as.character(read.table("populations/mofo3002.txt", sep = ",")[[1]]) mofo3003 <- as.character(read.table("populations/mofo3005.txt", sep = ",")[[1]]) mofo3009 <- as.character(read.table("populations/mofo3009.txt", sep = ",")[[1]]) lfo <- as.character(read.table("populations/lfo.txt", sep = ",")[[1]]) mosh <- as.character(read.table("populations/mosh.txt", sep = ",")[[1]]) most001 <- as.character(read.table("populations/most001.txt", sep = ",")[[1]]) most003 <- as.character(read.table("populations/most003.txt", sep = ",")[[1]]) most004 <- as.character(read.table("populations/most004.txt", sep = ",")[[1]]) most005 <- as.character(read.table("populations/most005.txt", sep = ",")[[1]]) rhhz <- as.character(read.table("populations/rhhz.txt", sep = ",")[[1]]) poplist <- list(bhhz, hznearmofo3003, hznearmost005, mofo3001nn, mofo3002, mofo3003, mofo3009, lfo, mosh, most001, most003, most004, most005, rhhz) gen <- set.populations(gen, poplist, diploid=TRUE) # get stats gen <- diversity.stats(gen, pi = TRUE) gen <- diversity.stats.between(gen) # get per site stats Pi.per.site <- gen@Pi / gen@n.sites Pi.per.site <- data.frame(Pi.per.site) dxy.per.site <- gen@nuc.diversity.between / gen@n.sites dxy.per.site <- data.frame(dxy.per.site) # print stats gen@Pi gen@nuc.diversity.between Pi.per.site dxy.per.site # write to file write.table(dxy.per.site, file = "dxy.csv", sep = ",") write.table(Pi.per.site, file = "pi.csv", sep = ",")
## load the dataset ## set global variables setglobalVariable <- function(){ activity<<-NULL ## setting global variables to null } readFile<- function(fname){ ## read the csv file that contains headers ## any blanks as NA, turn off interpretation of comments ## do i need to specify data types for date and interval columns? ## , colClasses=c("integer", "character", "character") x<-read.csv(fname, header = TRUE) x$date<-ymd(x$date) activity<<-x } getfile<- function(){ ## zipped file in repository ## unzip and send to readFile function temp <- "repdata-data-activity.zip" ## Create a temp file to hold the Zipped file temp<-unzip(temp,files="activity.csv") ## unzip the temp file to activity.csv readFile(temp) ## read temp unlink(temp) } ## check if global variable is null, if it is retrieve file if(exists("activity", mode = "any")){ if(is.null(activity)){ getfile() } } else { setglobalVariable() getfile() } ## Make a histogram of the total number of steps taken each day y<- ddply(activity, c("date"), summarize, steps=sum(steps)) png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "transparent") hist(y$steps, xlab="Steps", ylab="daily", main="Steps per day", col="red") ## What is mean total number of steps taken per day? mean(y$steps, na.rm=TRUE) ## Median total number of steps taken per day median(y$steps, na.rm=TRUE) ## What is the average daily activity pattern? ## Make a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all days (y-axis) z<-ddply(activity, c("interval"), summarize, steps=sum(steps, na.rm=TRUE)) plot(z$interval, z$steps, type='l', xlab="Interval", ylab="steps") ## Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps? subset(z,z$steps==max(z$steps), select = interval) ## Imputing missing values ## Note that there are a number of days/intervals where there are missing values (coded as NA). The presence of missing days may introduce bias into some calculations or summaries of the data. ## Calculate and report the total number of missing values in the dataset (i.e. the total number of rows with NAs) count(activity[!complete.cases(activity),]) ## Devise a strategy for filling in all of the missing values in the dataset. ## The strategy does not need to be sophisticated. For example, you could use ## the mean/median for that day, or the mean for that 5-minute interval, etc. ## remove the missing values from the dataset ## x$steps[is.na(x$steps)] <- 0 ## Create a new dataset that is equal to the original dataset but with the missing data filled in. ## Make a histogram of the total number of steps taken each day and Calculate and report the mean and median total number of steps taken per day. Do these values differ from the estimates from the first part of the assignment? What is the impact of imputing missing data on the estimates of the total daily number of steps? ## Are there differences in activity patterns between weekdays and weekends?
/getData.R
no_license
Sasha299/RepData_PeerAssessment1
R
false
false
3,387
r
## load the dataset ## set global variables setglobalVariable <- function(){ activity<<-NULL ## setting global variables to null } readFile<- function(fname){ ## read the csv file that contains headers ## any blanks as NA, turn off interpretation of comments ## do i need to specify data types for date and interval columns? ## , colClasses=c("integer", "character", "character") x<-read.csv(fname, header = TRUE) x$date<-ymd(x$date) activity<<-x } getfile<- function(){ ## zipped file in repository ## unzip and send to readFile function temp <- "repdata-data-activity.zip" ## Create a temp file to hold the Zipped file temp<-unzip(temp,files="activity.csv") ## unzip the temp file to activity.csv readFile(temp) ## read temp unlink(temp) } ## check if global variable is null, if it is retrieve file if(exists("activity", mode = "any")){ if(is.null(activity)){ getfile() } } else { setglobalVariable() getfile() } ## Make a histogram of the total number of steps taken each day y<- ddply(activity, c("date"), summarize, steps=sum(steps)) png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "transparent") hist(y$steps, xlab="Steps", ylab="daily", main="Steps per day", col="red") ## What is mean total number of steps taken per day? mean(y$steps, na.rm=TRUE) ## Median total number of steps taken per day median(y$steps, na.rm=TRUE) ## What is the average daily activity pattern? ## Make a time series plot (i.e. type = "l") of the 5-minute interval (x-axis) and the average number of steps taken, averaged across all days (y-axis) z<-ddply(activity, c("interval"), summarize, steps=sum(steps, na.rm=TRUE)) plot(z$interval, z$steps, type='l', xlab="Interval", ylab="steps") ## Which 5-minute interval, on average across all the days in the dataset, contains the maximum number of steps? subset(z,z$steps==max(z$steps), select = interval) ## Imputing missing values ## Note that there are a number of days/intervals where there are missing values (coded as NA). The presence of missing days may introduce bias into some calculations or summaries of the data. ## Calculate and report the total number of missing values in the dataset (i.e. the total number of rows with NAs) count(activity[!complete.cases(activity),]) ## Devise a strategy for filling in all of the missing values in the dataset. ## The strategy does not need to be sophisticated. For example, you could use ## the mean/median for that day, or the mean for that 5-minute interval, etc. ## remove the missing values from the dataset ## x$steps[is.na(x$steps)] <- 0 ## Create a new dataset that is equal to the original dataset but with the missing data filled in. ## Make a histogram of the total number of steps taken each day and Calculate and report the mean and median total number of steps taken per day. Do these values differ from the estimates from the first part of the assignment? What is the impact of imputing missing data on the estimates of the total daily number of steps? ## Are there differences in activity patterns between weekdays and weekends?
## rm(list=ls()) ## load("chemical.r") for (ichemical in chemical_name) { fname = paste("chemistry/",gsub("/", "_", ichemical),".jpg",sep="") sample_date = as.Date(data[which(data[,"STD_CON_LONG_NAME"]==ichemical),"SAMP_DATE_TIME"]) jpeg(fname,units="in",width=6,height=5,quality=100,res=600) hist(sample_date,#breaks="years", breaks = seq(as.Date("1970-01-01"), as.Date("2020-01-01"), by = "5 years" ), freq=TRUE, ylim=c(0,2000), main = ichemical, xlab="year",format="%Y") dev.off() } count_data= rep(NA,length(chemical_name)) names(count_data) = chemical_name for (ichemical in 1:length(chemical_name)) { count_data[ichemical] = length(which(data[,"STD_CON_LONG_NAME"]==chemical_name[ichemical])) } fname = paste("chemistry.jpg",sep="") jpeg(fname,units="in",width=10,height=15,quality=100,res=600) par(mar = c(5,15,1,2)) barplot(sort(count_data),horiz=T,las=2,xlab="Number of samples") dev.off() total_sample_date = c() for (ichemical in chemical_name) { sample_date = as.Date(data[which(data[,"STD_CON_LONG_NAME"]==ichemical),"SAMP_DATE_TIME"]) total_sample_date = c(total_sample_date,as.character(sample_date)) } total_sample_date = as.Date(total_sample_date) fname = paste("chemistry2.jpg",sep="") jpeg(fname,units="in",width=6,height=5,quality=100,res=600) hist(total_sample_date,#breaks="years", breaks = seq(as.Date("1970-01-01"), as.Date("2020-01-01"), by = "5 years" ), freq=TRUE, ylim=c(0,70000), main = "All Major Chemicals", xlab="year",format="%Y") dev.off()
/homework_for_2016_ESS_PI_meeting/plot_chemist.R
no_license
mrubayet/archived_codes_for_sfa_modeling
R
false
false
1,689
r
## rm(list=ls()) ## load("chemical.r") for (ichemical in chemical_name) { fname = paste("chemistry/",gsub("/", "_", ichemical),".jpg",sep="") sample_date = as.Date(data[which(data[,"STD_CON_LONG_NAME"]==ichemical),"SAMP_DATE_TIME"]) jpeg(fname,units="in",width=6,height=5,quality=100,res=600) hist(sample_date,#breaks="years", breaks = seq(as.Date("1970-01-01"), as.Date("2020-01-01"), by = "5 years" ), freq=TRUE, ylim=c(0,2000), main = ichemical, xlab="year",format="%Y") dev.off() } count_data= rep(NA,length(chemical_name)) names(count_data) = chemical_name for (ichemical in 1:length(chemical_name)) { count_data[ichemical] = length(which(data[,"STD_CON_LONG_NAME"]==chemical_name[ichemical])) } fname = paste("chemistry.jpg",sep="") jpeg(fname,units="in",width=10,height=15,quality=100,res=600) par(mar = c(5,15,1,2)) barplot(sort(count_data),horiz=T,las=2,xlab="Number of samples") dev.off() total_sample_date = c() for (ichemical in chemical_name) { sample_date = as.Date(data[which(data[,"STD_CON_LONG_NAME"]==ichemical),"SAMP_DATE_TIME"]) total_sample_date = c(total_sample_date,as.character(sample_date)) } total_sample_date = as.Date(total_sample_date) fname = paste("chemistry2.jpg",sep="") jpeg(fname,units="in",width=6,height=5,quality=100,res=600) hist(total_sample_date,#breaks="years", breaks = seq(as.Date("1970-01-01"), as.Date("2020-01-01"), by = "5 years" ), freq=TRUE, ylim=c(0,70000), main = "All Major Chemicals", xlab="year",format="%Y") dev.off()
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 53006 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 53006 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query48_query44_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 9935 c no.of clauses 53006 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 53006 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query48_query44_1344n.qdimacs 9935 53006 E1 [] 0 180 9755 53006 NONE
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query48_query44_1344n/query48_query44_1344n.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
720
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 53006 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 53006 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query48_query44_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 9935 c no.of clauses 53006 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 53006 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query48_query44_1344n.qdimacs 9935 53006 E1 [] 0 180 9755 53006 NONE
library(DiagrammeR) ### Name: colorize_edge_attrs ### Title: Apply colors based on edge attribute values ### Aliases: colorize_edge_attrs ### ** Examples # Create a graph with 5 # nodes and 4 edges graph <- create_graph() %>% add_path(n = 5) %>% set_edge_attrs( edge_attr = weight, values = c(3.7, 6.3, 9.2, 1.6)) # We can bucketize values in # the edge `weight` attribute using # `cut_points` and, by doing so, # assign colors to each of the # bucketed ranges (for values not # part of any bucket, a gray color # is assigned by default) graph <- graph %>% colorize_edge_attrs( edge_attr_from = weight, edge_attr_to = color, cut_points = c(0, 2, 4, 6, 8, 10), palette = "RdYlGn") # Now there will be a `color` # edge attribute with distinct # colors (from the RColorBrewer # Red-Yellow-Green palette) graph %>% get_edge_df()
/data/genthat_extracted_code/DiagrammeR/examples/colorize_edge_attrs.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
870
r
library(DiagrammeR) ### Name: colorize_edge_attrs ### Title: Apply colors based on edge attribute values ### Aliases: colorize_edge_attrs ### ** Examples # Create a graph with 5 # nodes and 4 edges graph <- create_graph() %>% add_path(n = 5) %>% set_edge_attrs( edge_attr = weight, values = c(3.7, 6.3, 9.2, 1.6)) # We can bucketize values in # the edge `weight` attribute using # `cut_points` and, by doing so, # assign colors to each of the # bucketed ranges (for values not # part of any bucket, a gray color # is assigned by default) graph <- graph %>% colorize_edge_attrs( edge_attr_from = weight, edge_attr_to = color, cut_points = c(0, 2, 4, 6, 8, 10), palette = "RdYlGn") # Now there will be a `color` # edge attribute with distinct # colors (from the RColorBrewer # Red-Yellow-Green palette) graph %>% get_edge_df()
pkgs <- c( "Seurat", "SeuratWrappers", "ggplot2", "batchelor", "dplyr", "optparse", "reshape2", "data.table", "magrittr", "patchwork", "scales", "GSVA", "RColorBrewer", "ggridges", "clusterProfiler", "survminer", "survminer", "monocle", "psych", "ggrepel", "pheatmap", "escape", "multcomp", "agricolae" ) lapply(pkgs, function(x) require(package = x, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)) # nolint # lymphocyte--9 yr <- c( "#fafac0", "#f5eca6", "#fee391", "#fec44f", "#fe9929", "#ec7014", "#cc4c02", "#8c2d04", "#611f03" ) # endothelium--5 gr <- c( "#b9e9e0", "#7dc9b7", "#59b898", "#41ae76", "#16d355", "#238b45", "#116d37", "#025826", "#003516" ) # fibroblast--9 bl <- c( "#82cbf5", "#7ba7e0", "#5199cc", "#488dbe", "#3690c0", "#0570b0", "#0d71aa", "#045a8d", "#023858" ) # myeloid--7 pur <- c( "#8c97c6", "#a28abd", "#997abd", "#9362cc", "#88419d", "#810f7c", "#4d004b" ) # plasma--6 bro <- c( "#8c510a", "#995401", "#be7816", "#be9430", "#ad8d36", "#a07540" ) color1 <- c(bl[1:8], yr, pur, gr[c(3, 5, 6, 7, 9)], bro, "#A9A9A9") color2 <- c( "#E0D39B", "#D05146", "#748EAE", "#567161", "#574F84", "#967447" ) data <- readRDS("final_input1.Rds") ## Tissue and sample number analysis tissue_sample_number <- read.table("number.txt", header = T, sep = "\t") tissue_sample_number <- melt(tissue_sample_number) tissue_sample_number$variable <- factor( tissue_sample_number$variable, levels = c("Normal", "Adjacent", "Tumor_1", "Tumor_2") ) pdf("F2B.pdf", width = 6, height = 3) ggplot(tissue_sample_number, aes(x = tissue, y = value, fill = variable)) + geom_bar(stat = "identity") + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929", "#fcbf47")) + theme_bw() + labs(x = "", y = "Number of samples") + theme( legend.title = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, color = "black", size = 10), # nolint axis.text.y = element_text(color = "black", size = 10), axis.title = element_text(color = "black", size = 12) ) dev.off() ## The Umap of clusters and celltype mat <- data.frame(data@reductions$umap@cell.embeddings, group = data$celltype) mt <- mat[order(mat$group != "Epithelium"), ] pdf("umap_subtype.pdf", width = 10, height = 9) ggplot(mt, aes(x = UMAP_1, y = UMAP_2, color = group)) + geom_point(size = 1e-5) + theme_classic() + scale_color_manual(values = color2) + theme( legend.title = element_blank(), legend.text = element_text(size = 20, color = "black"), axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(), axis.line = element_blank() ) + guides(colour = guide_legend(override.aes = list(size = 8))) dev.off() pdf("umap_subcluster.pdf", width = 10, height = 10) DimPlot(data, label = F) + NoLegend() + scale_color_manual(values = color1) + theme( axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(), axis.line = element_blank() ) dev.off() ## Proportion chart of cells from different sources in each cluster percent_result <- read.table("percent_result.txt", header = T, sep = "\t") percent_result <- melt(percent_result) percent_result$variable <- factor( percent_result$variable, levels = c("Normal", "Adjacent", "Tumor") ) pdf("Percent_types.pdf", width = 4, height = 3) ggplot(percent_result, aes(x = tissue, y = value, fill = variable)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)") + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929")) + coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 7 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) dev.off() percent_subtypes <- read.table("percent_subtype.txt", header = T, sep = "\t") percent_subtypes <- melt(percent_subtypes) percent_subtypes$variable <- factor( percent_subtypes$variable, levels = c("Normal", "Adjacent", "Tumor") ) percent_subtypes$cluster <- factor( percent_subtypes$cluster, levels = c(paste0("c", 34:1), "total") ) plot_list <- foreach::foreach(gp = unique(percent_subtypes$group)) %do% { plot_dat <- percent_subtypes[percent_subtypes$group == gp, ] ggplot(plot_dat, aes(x = cluster, y = value, fill = variable)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)", title = i) + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929")) + coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 7 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) } final <- wrap_plots(plot_list, ncol = length(plot_list), guides = "collect") ggsave("percent_subtype.pdf", final, width = 10, height = 3) ## FeaturePlot or each tissue subtype <- c( "Bladder", "Breast", "Colorectal", "Gastric", "Intrahepatic duct", "Lung", "Ovarian", "Pancreas", "Prostate", "Thyroid" ) plot_tissue <- foreach::foreach(ts = unique(data$tissue)) %do% { data$co <- ifelse( data$tissue != ts | data$clusters == 34, "Others", subtype[k] ) mat <- data.frame(data@reductions$umap@cell.embeddings, group = data$co) mt <- rbind(mat[which(mat$group == "Others"), ], mat[which(mat$group == subtype[k]), ]) # nolint mt$group <- factor(mt$group, levels = c("Others", subtype[k])) ggplot(mt, aes(x = cluster, y = value, fill = variable)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)", title = i) + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929")) + coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 7 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) } final_plot_tissue <- wrap_plots(plot_tissue, ncol = length(plot_tissue) / 2, guides = "collect") # nolint ggsave("featureplot_each_cluster.pdf", final_plot_tissue, width = 30, height = 13) # nolint ## Subcluster analysis for DC dc_clusters <- subset(data, ident = 13) dc_clusters %<>% NormalizeData(object = ., normalization.method = "LogNormalize") %>% # nolint FindVariableFeatures(selection.method = "vst") all.genes <- rownames(dc_clusters) dc_clusters %<>% ScaleData(object = ., features = all.genes) %>% ScaleData(object = ., vars.to.regress = "percent.mt") dc_clusters %<>% RunPCA(object = ., features = VariableFeatures(object = .)) %>% RunFastMNN(object = ., object.list = SplitObject(., split.by = "SampleID")) dc_clusters %<>% FindNeighbors(reduction = "mnn", dims = 1:30) %>% FindClusters(resolution = 0.7) %<>% RunTSNE(reduction = "mnn", dims = 1:30) %>% RunUMAP(reduction = "mnn", dims = 1:30) dc_clusters <- RenameIdents(dc_clusters, "0" = "1", "1" = "1", "2" = "1", "3" = "1", "8" = "1", "7" = "5", "5" = "2", "4" = "4", "6" = "3", "10" = "3", "9" = "6" ) dc_clusters$seurat_clusters <- dc_clusters@active.ident dc_clusters$seurat_clusters <- factor(dc_clusters$seurat_clusters, levels = 1:6) saveRDS(dc_clusters, "DC_sub-clusters.Rds") pdf("DC_sub-clusters_VlnPlot.pdf", width = 3, height = 6) VlnPlot(dc_clusters, features = c("CD3D", "CD3E"), pt.size = 0, cols = pal_npg()(6), ncol = 1 ) & theme( axis.text.x = element_text(angle = 0, hjust = 0.5, color = "black"), axis.title.x = element_blank() ) dev.off() dot_plot_genes <- c( "CLEC10A", "FCGR2B", "FCER1G", "FCGR2A", "S100B", "LTB", "CD1A", "CD1E", "STMN1", "TUBB", "TYMS", "TOP2A", "CLEC9A", "LGALS2", "CPVL", "XCR1", "CCL22", "BIRC3", "CCL19", "CCR7", "TRAC", "CD3D", "CD3E", "CD2" ) dc_clusters@active.ident <- factor(dc_clusters@active.ident, levels = 6:1) pdf("DC_sub-clusters_DotPlot.pdf", width = 8, height = 3.5) DotPlot(dc_clusters, features = dot_plot_genes, cols = c( "#FAFCFA", "#4D9157" ) ) + theme_bw() + theme( axis.title = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, color = "black"), axis.text.y = element_text(color = "black"), panel.grid.major = element_blank() ) dev.off() ## Subcluster analysis for Endothelium endothelium_clusters <- subset(data, ident = c(25, 26, 28)) endothelium_clusters$div <- ifelse(endothelium_clusters@active.ident == 25, "TEC", "NEC") # nolint endothelium_clusters@active.ident <- factor(endothelium_clusters$div, levels = c("NEC", "TEC")) # nolint pdf("endothelium_cluster_vln_plot.pdf", width = 4.5, height = 7) VlnPlot(endothelium_clusters, features = c( "IGFBP2", "IGFBP4", "INSR", "SPRY1", "CD320", "IGHG4" ), pt.size = 0, cols = pal_npg()(6), ncol = 2) & theme( axis.text.x = element_text(angle = 0, hjust = 0.5, color = "black"), axis.title.x = element_blank() ) dev.off() TEC_NEC_DEG_Marker <- FindAllMarkers(endothelium_clusters, min.pct = 0.25, only.pos = T) # nolint TEC_NEC_DEG_Marker$fc <- ifelse( TEC_NEC_DEG_Marker$cluster == "NEC", -1 * TEC_NEC_DEG_Marker$avg_log2FC, TEC_NEC_DEG_Marker$avg_log2FC ) # nolint TEC_NEC_DEG_Marker$q_value <- -log(TEC_NEC_DEG_Marker$p_val_adj, 10) TEC_NEC_DEG_Marker$group <- ifelse( TEC_NEC_DEG_Marker$fc > 0.5 & TEC_NEC_DEG_Marker$p_val_adj < 0.05, "sig up", ifelse(TEC_NEC_DEG_Marker$fc < -0.5 & TEC_NEC_DEG_Marker$p_val_adj < 0.05, "sig down", "not sig" ) ) select_genes <- c( TEC_NEC_DEG_Marker[which(TEC_NEC_DEG_Marker$group == "sig up"), "gene"][1:10], # nolint TEC_NEC_DEG_Marker[which(TEC_NEC_DEG_Marker$group == "sig down"), "gene"][1:10] # nolint ) TEC_NEC_DEG_Marker$label <- ifelse( TEC_NEC_DEG_Marker$gene %in% select_genes, TEC_NEC_DEG_Marker$gene, NA ) p <- ggplot(TEC_NEC_DEG_Marker, aes(x = fc, y = q_value)) + geom_point(aes(color = group), size = 1) + scale_color_manual(values = c("gray", "blue", "red")) + labs(x = "log2 fold change", y = "-log10 padj", title = "CAMR vs stable") + geom_text_repel(aes(label = label), size = 2) + geom_hline(yintercept = 1.30103, linetype = "dotted") + geom_vline(xintercept = c(-0.5, 0.5), linetype = "dotted") + theme(plot.title = element_text(hjust = 0.5)) ggsave("TEC_NEC_DEG_Volcano_Plot.pdf", p, width = 6.5, height = 6) ## Cluster analysis of tissue origin specificity Percent_Calculated <- read.table( "Percent_Calculated.txt", header = T, sep = "\t" ) Percent_Calculated <- melt(Percent_Calculated) Percent_Calculated$variable <- factor( Percent_Calculated$variable, levels = c("c21", "c17", "c6", "c5", "c4") ) npg_color <- pal_npg(alpha = 0.8)(10) color_for_use <- rev( c( "#9269A2", "#7dc9b7", colo[3], "#5199cc", "#6479cc", npg_color[c(10, 1, 8, 2, 6, 9)], "#EEC877", npg_color[5] ) ) p <- ggplot(Percent_Calculated, aes(x = variable, y = value, fill = cluster)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)") + scale_fill_manual(values = color_for_use) + # nolint coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 10 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) ggsave( "Tissue_Origin_Specificity_Percent_Calculated.pdf", p, width = 5, height = 3.8 ) ## Analysis of the distribution of cells of tumor, adjacent and normal tissue origin, respectively plot_list <- foreach::foreach(gp = c("Normal", "Adjacent", "Tumor")) %do% { select_subsets <- data select_subsets$group_for_color <- ifelse( select_subsets$clusters != 35 & select_subsets$group == gp, gp, "Others" ) plot_data <- data.frame( select_subsets@reductions$umap@cell.embeddings, group = select_subsets$group_for_color ) plot_data$group <- factor(plot_data$group, levels = c("Others", gp)) plot_data <- plot_data[order(plot_data$group), ] ggplot(tt, aes(x = UMAP_1, y = UMAP_2, color = group)) + geom_point(size = 0.1) + scale_color_manual(values = c("#D9D9D9", "#9362cc")) + labs(title = gp) + theme_classic() + theme( axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(), axis.line = element_blank(), plot.title = element_text(hjust = 0.5, size = 25, color = "black"), legend.position = "none" ) } final <- wrap_plots(plot_list, ncol = length(plot_list), guides = "collect") ggsave("Subtype_Umap_Plot", final, width = 24, height = 8) ## Subcluster analysis for C24 C24_cluster <- subset(data, ident = 24) C24_cluster_clusters %<>% NormalizeData(object = ., normalization.method = "LogNormalize") %>% # nolint FindVariableFeatures(selection.method = "vst") all.genes <- rownames(C24_cluster) C24_cluster %<>% ScaleData(object = ., features = all.genes) %>% ScaleData(object = ., vars.to.regress = "percent.mt") C24_cluster %<>% RunPCA(object = ., features = VariableFeatures(object = .)) %>% RunFastMNN(object = ., object.list = SplitObject(., split.by = "SampleID")) C24_cluster %<>% FindNeighbors(reduction = "mnn", dims = 1:30) %>% FindClusters(resolution = 0.7) %<>% RunTSNE(reduction = "mnn", dims = 1:30) %>% RunUMAP(reduction = "mnn", dims = 1:30) pdf("C24_Sub_Cluster_Umap.pdf", width = 4, height = 3.5) DimPlot(C24_cluster, label = F) + scale_color_manual(values = pal_npg()(6)) dev.off() pdf("C24_Sub_Cluster_Vln_Plot.pdf", width = 3, height = 4.5) VlnPlot(C24_cluster, features = "RGS13", pt.size = 0, cols = pal_npg()(6), ncol = 1 ) & theme( axis.text.x = element_text(angle = 0, hjust = 0.5, color = "black"), axis.title.x = element_blank() ) dev.off() ## Analysis of FABP4+ macrophage (c20) proportion Validation_Results <- readRDS("Validation_Results/input.Rds") # nolint Validation_Matrix <- table(Validation_Results@meta.data[, c("sampleid", "seurat_clusters")]) # nolint Validation_Matrix_Sum <- 100 * rowSums(Validation_Matrix[, c(4, 32)]) / as.vector(table(Validation_Results$sampleid)) # nolint Validation_Matrix_Value <- data.frame(name = names(Validation_Matrix_Sum), value = as.numeric(Validation_Matrix_Sum)) # nolint Reference <- unique(Validation_Results@meta.data[, c("sampleid", "group")]) # nolint Validation_Matrix_Draw <- merge(Validation_Matrix_Value, Reference, by.x = "name", by.y = "sampleid") # nolint Validation_Matrix_Draw <- Validation_Matrix_Draw[ which(Validation_Matrix_Draw$group %in% c("Lung_N", "Lung_T", "tLB", "mBrain")), # nolint ] Validation_Matrix_Draw$group <- factor( Validation_Matrix_Draw$group, levels = c("Lung_N", "Lung_T", "tLB", "mBrain") ) model <- aov(value ~ group, data = tt) rht <- glht(model, linfct = mcp(group = "Dunnett"), alternative = "two.side") summary(rht) p <- ggplot(tt, aes(x = group, y = value)) + geom_boxplot( size = 0.8, fill = "white", outlier.fill = NA, outlier.color = NA, outlier.size = 0 ) + geom_point(aes(fill = group, color = group), shape = 21, size = 3) + scale_color_manual(values = pal_npg()(4)) + scale_fill_manual(values = pal_npg()(4)) + theme_classic() + labs(y = "Proportion (%)") + theme( axis.title.x = element_blank(), axis.title.y = element_text(size = 12, color = "black"), axis.text.x = element_text( size = 10, color = "black", angle = 45, hjust = 1 ), legend.title = element_blank(), legend.position = "none", axis.text.y = element_text(size = 10, color = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank() ) ggsave("Validation_FABP4_Macrophage_Proportion.pdf", width = 2.3, height = 3)
/Main_Part1.R
no_license
Xiaxy-XuLab/PanCAF
R
false
false
17,831
r
pkgs <- c( "Seurat", "SeuratWrappers", "ggplot2", "batchelor", "dplyr", "optparse", "reshape2", "data.table", "magrittr", "patchwork", "scales", "GSVA", "RColorBrewer", "ggridges", "clusterProfiler", "survminer", "survminer", "monocle", "psych", "ggrepel", "pheatmap", "escape", "multcomp", "agricolae" ) lapply(pkgs, function(x) require(package = x, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)) # nolint # lymphocyte--9 yr <- c( "#fafac0", "#f5eca6", "#fee391", "#fec44f", "#fe9929", "#ec7014", "#cc4c02", "#8c2d04", "#611f03" ) # endothelium--5 gr <- c( "#b9e9e0", "#7dc9b7", "#59b898", "#41ae76", "#16d355", "#238b45", "#116d37", "#025826", "#003516" ) # fibroblast--9 bl <- c( "#82cbf5", "#7ba7e0", "#5199cc", "#488dbe", "#3690c0", "#0570b0", "#0d71aa", "#045a8d", "#023858" ) # myeloid--7 pur <- c( "#8c97c6", "#a28abd", "#997abd", "#9362cc", "#88419d", "#810f7c", "#4d004b" ) # plasma--6 bro <- c( "#8c510a", "#995401", "#be7816", "#be9430", "#ad8d36", "#a07540" ) color1 <- c(bl[1:8], yr, pur, gr[c(3, 5, 6, 7, 9)], bro, "#A9A9A9") color2 <- c( "#E0D39B", "#D05146", "#748EAE", "#567161", "#574F84", "#967447" ) data <- readRDS("final_input1.Rds") ## Tissue and sample number analysis tissue_sample_number <- read.table("number.txt", header = T, sep = "\t") tissue_sample_number <- melt(tissue_sample_number) tissue_sample_number$variable <- factor( tissue_sample_number$variable, levels = c("Normal", "Adjacent", "Tumor_1", "Tumor_2") ) pdf("F2B.pdf", width = 6, height = 3) ggplot(tissue_sample_number, aes(x = tissue, y = value, fill = variable)) + geom_bar(stat = "identity") + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929", "#fcbf47")) + theme_bw() + labs(x = "", y = "Number of samples") + theme( legend.title = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, color = "black", size = 10), # nolint axis.text.y = element_text(color = "black", size = 10), axis.title = element_text(color = "black", size = 12) ) dev.off() ## The Umap of clusters and celltype mat <- data.frame(data@reductions$umap@cell.embeddings, group = data$celltype) mt <- mat[order(mat$group != "Epithelium"), ] pdf("umap_subtype.pdf", width = 10, height = 9) ggplot(mt, aes(x = UMAP_1, y = UMAP_2, color = group)) + geom_point(size = 1e-5) + theme_classic() + scale_color_manual(values = color2) + theme( legend.title = element_blank(), legend.text = element_text(size = 20, color = "black"), axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(), axis.line = element_blank() ) + guides(colour = guide_legend(override.aes = list(size = 8))) dev.off() pdf("umap_subcluster.pdf", width = 10, height = 10) DimPlot(data, label = F) + NoLegend() + scale_color_manual(values = color1) + theme( axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(), axis.line = element_blank() ) dev.off() ## Proportion chart of cells from different sources in each cluster percent_result <- read.table("percent_result.txt", header = T, sep = "\t") percent_result <- melt(percent_result) percent_result$variable <- factor( percent_result$variable, levels = c("Normal", "Adjacent", "Tumor") ) pdf("Percent_types.pdf", width = 4, height = 3) ggplot(percent_result, aes(x = tissue, y = value, fill = variable)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)") + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929")) + coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 7 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) dev.off() percent_subtypes <- read.table("percent_subtype.txt", header = T, sep = "\t") percent_subtypes <- melt(percent_subtypes) percent_subtypes$variable <- factor( percent_subtypes$variable, levels = c("Normal", "Adjacent", "Tumor") ) percent_subtypes$cluster <- factor( percent_subtypes$cluster, levels = c(paste0("c", 34:1), "total") ) plot_list <- foreach::foreach(gp = unique(percent_subtypes$group)) %do% { plot_dat <- percent_subtypes[percent_subtypes$group == gp, ] ggplot(plot_dat, aes(x = cluster, y = value, fill = variable)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)", title = i) + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929")) + coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 7 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) } final <- wrap_plots(plot_list, ncol = length(plot_list), guides = "collect") ggsave("percent_subtype.pdf", final, width = 10, height = 3) ## FeaturePlot or each tissue subtype <- c( "Bladder", "Breast", "Colorectal", "Gastric", "Intrahepatic duct", "Lung", "Ovarian", "Pancreas", "Prostate", "Thyroid" ) plot_tissue <- foreach::foreach(ts = unique(data$tissue)) %do% { data$co <- ifelse( data$tissue != ts | data$clusters == 34, "Others", subtype[k] ) mat <- data.frame(data@reductions$umap@cell.embeddings, group = data$co) mt <- rbind(mat[which(mat$group == "Others"), ], mat[which(mat$group == subtype[k]), ]) # nolint mt$group <- factor(mt$group, levels = c("Others", subtype[k])) ggplot(mt, aes(x = cluster, y = value, fill = variable)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)", title = i) + scale_fill_manual(values = c("#9362cc", "#5199cc", "#fe9929")) + coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 7 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) } final_plot_tissue <- wrap_plots(plot_tissue, ncol = length(plot_tissue) / 2, guides = "collect") # nolint ggsave("featureplot_each_cluster.pdf", final_plot_tissue, width = 30, height = 13) # nolint ## Subcluster analysis for DC dc_clusters <- subset(data, ident = 13) dc_clusters %<>% NormalizeData(object = ., normalization.method = "LogNormalize") %>% # nolint FindVariableFeatures(selection.method = "vst") all.genes <- rownames(dc_clusters) dc_clusters %<>% ScaleData(object = ., features = all.genes) %>% ScaleData(object = ., vars.to.regress = "percent.mt") dc_clusters %<>% RunPCA(object = ., features = VariableFeatures(object = .)) %>% RunFastMNN(object = ., object.list = SplitObject(., split.by = "SampleID")) dc_clusters %<>% FindNeighbors(reduction = "mnn", dims = 1:30) %>% FindClusters(resolution = 0.7) %<>% RunTSNE(reduction = "mnn", dims = 1:30) %>% RunUMAP(reduction = "mnn", dims = 1:30) dc_clusters <- RenameIdents(dc_clusters, "0" = "1", "1" = "1", "2" = "1", "3" = "1", "8" = "1", "7" = "5", "5" = "2", "4" = "4", "6" = "3", "10" = "3", "9" = "6" ) dc_clusters$seurat_clusters <- dc_clusters@active.ident dc_clusters$seurat_clusters <- factor(dc_clusters$seurat_clusters, levels = 1:6) saveRDS(dc_clusters, "DC_sub-clusters.Rds") pdf("DC_sub-clusters_VlnPlot.pdf", width = 3, height = 6) VlnPlot(dc_clusters, features = c("CD3D", "CD3E"), pt.size = 0, cols = pal_npg()(6), ncol = 1 ) & theme( axis.text.x = element_text(angle = 0, hjust = 0.5, color = "black"), axis.title.x = element_blank() ) dev.off() dot_plot_genes <- c( "CLEC10A", "FCGR2B", "FCER1G", "FCGR2A", "S100B", "LTB", "CD1A", "CD1E", "STMN1", "TUBB", "TYMS", "TOP2A", "CLEC9A", "LGALS2", "CPVL", "XCR1", "CCL22", "BIRC3", "CCL19", "CCR7", "TRAC", "CD3D", "CD3E", "CD2" ) dc_clusters@active.ident <- factor(dc_clusters@active.ident, levels = 6:1) pdf("DC_sub-clusters_DotPlot.pdf", width = 8, height = 3.5) DotPlot(dc_clusters, features = dot_plot_genes, cols = c( "#FAFCFA", "#4D9157" ) ) + theme_bw() + theme( axis.title = element_blank(), axis.text.x = element_text(angle = 45, hjust = 1, color = "black"), axis.text.y = element_text(color = "black"), panel.grid.major = element_blank() ) dev.off() ## Subcluster analysis for Endothelium endothelium_clusters <- subset(data, ident = c(25, 26, 28)) endothelium_clusters$div <- ifelse(endothelium_clusters@active.ident == 25, "TEC", "NEC") # nolint endothelium_clusters@active.ident <- factor(endothelium_clusters$div, levels = c("NEC", "TEC")) # nolint pdf("endothelium_cluster_vln_plot.pdf", width = 4.5, height = 7) VlnPlot(endothelium_clusters, features = c( "IGFBP2", "IGFBP4", "INSR", "SPRY1", "CD320", "IGHG4" ), pt.size = 0, cols = pal_npg()(6), ncol = 2) & theme( axis.text.x = element_text(angle = 0, hjust = 0.5, color = "black"), axis.title.x = element_blank() ) dev.off() TEC_NEC_DEG_Marker <- FindAllMarkers(endothelium_clusters, min.pct = 0.25, only.pos = T) # nolint TEC_NEC_DEG_Marker$fc <- ifelse( TEC_NEC_DEG_Marker$cluster == "NEC", -1 * TEC_NEC_DEG_Marker$avg_log2FC, TEC_NEC_DEG_Marker$avg_log2FC ) # nolint TEC_NEC_DEG_Marker$q_value <- -log(TEC_NEC_DEG_Marker$p_val_adj, 10) TEC_NEC_DEG_Marker$group <- ifelse( TEC_NEC_DEG_Marker$fc > 0.5 & TEC_NEC_DEG_Marker$p_val_adj < 0.05, "sig up", ifelse(TEC_NEC_DEG_Marker$fc < -0.5 & TEC_NEC_DEG_Marker$p_val_adj < 0.05, "sig down", "not sig" ) ) select_genes <- c( TEC_NEC_DEG_Marker[which(TEC_NEC_DEG_Marker$group == "sig up"), "gene"][1:10], # nolint TEC_NEC_DEG_Marker[which(TEC_NEC_DEG_Marker$group == "sig down"), "gene"][1:10] # nolint ) TEC_NEC_DEG_Marker$label <- ifelse( TEC_NEC_DEG_Marker$gene %in% select_genes, TEC_NEC_DEG_Marker$gene, NA ) p <- ggplot(TEC_NEC_DEG_Marker, aes(x = fc, y = q_value)) + geom_point(aes(color = group), size = 1) + scale_color_manual(values = c("gray", "blue", "red")) + labs(x = "log2 fold change", y = "-log10 padj", title = "CAMR vs stable") + geom_text_repel(aes(label = label), size = 2) + geom_hline(yintercept = 1.30103, linetype = "dotted") + geom_vline(xintercept = c(-0.5, 0.5), linetype = "dotted") + theme(plot.title = element_text(hjust = 0.5)) ggsave("TEC_NEC_DEG_Volcano_Plot.pdf", p, width = 6.5, height = 6) ## Cluster analysis of tissue origin specificity Percent_Calculated <- read.table( "Percent_Calculated.txt", header = T, sep = "\t" ) Percent_Calculated <- melt(Percent_Calculated) Percent_Calculated$variable <- factor( Percent_Calculated$variable, levels = c("c21", "c17", "c6", "c5", "c4") ) npg_color <- pal_npg(alpha = 0.8)(10) color_for_use <- rev( c( "#9269A2", "#7dc9b7", colo[3], "#5199cc", "#6479cc", npg_color[c(10, 1, 8, 2, 6, 9)], "#EEC877", npg_color[5] ) ) p <- ggplot(Percent_Calculated, aes(x = variable, y = value, fill = cluster)) + geom_bar(stat = "identity", position = "fill") + labs(y = "Proportion (%)") + scale_fill_manual(values = color_for_use) + # nolint coord_flip() + theme( axis.line = element_blank(), legend.title = element_blank(), panel.grid = element_blank(), legend.text = element_text(size = 12), axis.text.y = element_text(size = 10, color = "black"), axis.title.x = element_text(size = 12), axis.title.y = element_blank(), legend.key.size = unit(0.6, "cm"), axis.text.x = element_text( angle = 45, hjust = 1, color = "black", size = 10 ), plot.title = element_text(hjust = 0.5) ) + scale_y_continuous(expand = c(0, 0.01), labels = percent) ggsave( "Tissue_Origin_Specificity_Percent_Calculated.pdf", p, width = 5, height = 3.8 ) ## Analysis of the distribution of cells of tumor, adjacent and normal tissue origin, respectively plot_list <- foreach::foreach(gp = c("Normal", "Adjacent", "Tumor")) %do% { select_subsets <- data select_subsets$group_for_color <- ifelse( select_subsets$clusters != 35 & select_subsets$group == gp, gp, "Others" ) plot_data <- data.frame( select_subsets@reductions$umap@cell.embeddings, group = select_subsets$group_for_color ) plot_data$group <- factor(plot_data$group, levels = c("Others", gp)) plot_data <- plot_data[order(plot_data$group), ] ggplot(tt, aes(x = UMAP_1, y = UMAP_2, color = group)) + geom_point(size = 0.1) + scale_color_manual(values = c("#D9D9D9", "#9362cc")) + labs(title = gp) + theme_classic() + theme( axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(), axis.line = element_blank(), plot.title = element_text(hjust = 0.5, size = 25, color = "black"), legend.position = "none" ) } final <- wrap_plots(plot_list, ncol = length(plot_list), guides = "collect") ggsave("Subtype_Umap_Plot", final, width = 24, height = 8) ## Subcluster analysis for C24 C24_cluster <- subset(data, ident = 24) C24_cluster_clusters %<>% NormalizeData(object = ., normalization.method = "LogNormalize") %>% # nolint FindVariableFeatures(selection.method = "vst") all.genes <- rownames(C24_cluster) C24_cluster %<>% ScaleData(object = ., features = all.genes) %>% ScaleData(object = ., vars.to.regress = "percent.mt") C24_cluster %<>% RunPCA(object = ., features = VariableFeatures(object = .)) %>% RunFastMNN(object = ., object.list = SplitObject(., split.by = "SampleID")) C24_cluster %<>% FindNeighbors(reduction = "mnn", dims = 1:30) %>% FindClusters(resolution = 0.7) %<>% RunTSNE(reduction = "mnn", dims = 1:30) %>% RunUMAP(reduction = "mnn", dims = 1:30) pdf("C24_Sub_Cluster_Umap.pdf", width = 4, height = 3.5) DimPlot(C24_cluster, label = F) + scale_color_manual(values = pal_npg()(6)) dev.off() pdf("C24_Sub_Cluster_Vln_Plot.pdf", width = 3, height = 4.5) VlnPlot(C24_cluster, features = "RGS13", pt.size = 0, cols = pal_npg()(6), ncol = 1 ) & theme( axis.text.x = element_text(angle = 0, hjust = 0.5, color = "black"), axis.title.x = element_blank() ) dev.off() ## Analysis of FABP4+ macrophage (c20) proportion Validation_Results <- readRDS("Validation_Results/input.Rds") # nolint Validation_Matrix <- table(Validation_Results@meta.data[, c("sampleid", "seurat_clusters")]) # nolint Validation_Matrix_Sum <- 100 * rowSums(Validation_Matrix[, c(4, 32)]) / as.vector(table(Validation_Results$sampleid)) # nolint Validation_Matrix_Value <- data.frame(name = names(Validation_Matrix_Sum), value = as.numeric(Validation_Matrix_Sum)) # nolint Reference <- unique(Validation_Results@meta.data[, c("sampleid", "group")]) # nolint Validation_Matrix_Draw <- merge(Validation_Matrix_Value, Reference, by.x = "name", by.y = "sampleid") # nolint Validation_Matrix_Draw <- Validation_Matrix_Draw[ which(Validation_Matrix_Draw$group %in% c("Lung_N", "Lung_T", "tLB", "mBrain")), # nolint ] Validation_Matrix_Draw$group <- factor( Validation_Matrix_Draw$group, levels = c("Lung_N", "Lung_T", "tLB", "mBrain") ) model <- aov(value ~ group, data = tt) rht <- glht(model, linfct = mcp(group = "Dunnett"), alternative = "two.side") summary(rht) p <- ggplot(tt, aes(x = group, y = value)) + geom_boxplot( size = 0.8, fill = "white", outlier.fill = NA, outlier.color = NA, outlier.size = 0 ) + geom_point(aes(fill = group, color = group), shape = 21, size = 3) + scale_color_manual(values = pal_npg()(4)) + scale_fill_manual(values = pal_npg()(4)) + theme_classic() + labs(y = "Proportion (%)") + theme( axis.title.x = element_blank(), axis.title.y = element_text(size = 12, color = "black"), axis.text.x = element_text( size = 10, color = "black", angle = 45, hjust = 1 ), legend.title = element_blank(), legend.position = "none", axis.text.y = element_text(size = 10, color = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank() ) ggsave("Validation_FABP4_Macrophage_Proportion.pdf", width = 2.3, height = 3)
## Functions Caching the Inverse of a Matrix ## Code written for R 3.1.0 - Platform: x86_64-w64-mingw32/x64 (64-bit) ## makeCacheMatrix function creates a special "matrix" ## object that can cache its inverse. ## ## Args: ## myMatrix: The square Matrix which has to be inverted. ## ## Returns: ## a list containing ## set: function to "store" the original matrix ## get: function to "retrieve" the original matrix ## setInverse: function to "store" the inverse matrix ## getInverse: function to "retrieve" the inverse matrix makeCacheMatrix <- function(myMatrix = matrix()) { invMatrix <- NULL set <- function(y) { myMatrix <<- y invMatrix <<- NULL } get <- function() myMatrix setInverse <- function(solve) invMatrix <<- solve getInverse <- function() invMatrix list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## cacheSolve function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. ## If already calculated & no change in matrix, retrieves it from cache. ## ## Args: ## x: The special "Matrix" returned by makeCacheMatrix function ## containing list of special functions (get, set, setInverse, getInverse) ## ## Returns: ## a matrix that is the inverse of original matrix cacheSolve <- function(x, ...) { invMatrix <- x$getInverse() if(!is.null(invMatrix)) { message("getting cached data") return(invMatrix) } data <- x$get() invMatrix <- solve(data, ...) x$setInverse(invMatrix) invMatrix }
/cachematrix.R
no_license
andreataglioni/ProgrammingAssignment2
R
false
false
1,703
r
## Functions Caching the Inverse of a Matrix ## Code written for R 3.1.0 - Platform: x86_64-w64-mingw32/x64 (64-bit) ## makeCacheMatrix function creates a special "matrix" ## object that can cache its inverse. ## ## Args: ## myMatrix: The square Matrix which has to be inverted. ## ## Returns: ## a list containing ## set: function to "store" the original matrix ## get: function to "retrieve" the original matrix ## setInverse: function to "store" the inverse matrix ## getInverse: function to "retrieve" the inverse matrix makeCacheMatrix <- function(myMatrix = matrix()) { invMatrix <- NULL set <- function(y) { myMatrix <<- y invMatrix <<- NULL } get <- function() myMatrix setInverse <- function(solve) invMatrix <<- solve getInverse <- function() invMatrix list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## cacheSolve function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. ## If already calculated & no change in matrix, retrieves it from cache. ## ## Args: ## x: The special "Matrix" returned by makeCacheMatrix function ## containing list of special functions (get, set, setInverse, getInverse) ## ## Returns: ## a matrix that is the inverse of original matrix cacheSolve <- function(x, ...) { invMatrix <- x$getInverse() if(!is.null(invMatrix)) { message("getting cached data") return(invMatrix) } data <- x$get() invMatrix <- solve(data, ...) x$setInverse(invMatrix) invMatrix }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{setcolor} \alias{setcolor} \title{return colors with given a vector} \usage{ setcolor(x) } \arguments{ \item{x}{Number of color} } \value{ color vector } \description{ Setcolor will provide a list of color vectors based on the number used as an input. } \examples{ mycol <- setcolor(10) mycol } \author{ Kai Guo }
/man/setcolor.Rd
no_license
sridhar0605/VennDetail
R
false
true
405
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/misc.R \name{setcolor} \alias{setcolor} \title{return colors with given a vector} \usage{ setcolor(x) } \arguments{ \item{x}{Number of color} } \value{ color vector } \description{ Setcolor will provide a list of color vectors based on the number used as an input. } \examples{ mycol <- setcolor(10) mycol } \author{ Kai Guo }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{ori_et_al_complex_ppis} \alias{ori_et_al_complex_ppis} \title{Data frame of eukaryotic protein-protein interactions inferred from annotated protein complexes by Ori et al. and StringDB interations with a combined score of at least 900} \format{ data frame with columns complex_name, x, y, pair (unique pair id) } \usage{ data("ori_et_al_complex_ppis") } \description{ data frame assigning proteins to (in)directly interacting proteins within protein complexes } \examples{ data("ori_et_al_complex_ppis") } \references{ Ori et al. (2016), Genome Biology, 17, 47; Jensen et al. (2009), Nucleic Acids Research, 37, D412–D416 } \keyword{datasets}
/man/ori_et_al_complex_ppis.Rd
no_license
nkurzaw/Rtpca
R
false
true
755
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{ori_et_al_complex_ppis} \alias{ori_et_al_complex_ppis} \title{Data frame of eukaryotic protein-protein interactions inferred from annotated protein complexes by Ori et al. and StringDB interations with a combined score of at least 900} \format{ data frame with columns complex_name, x, y, pair (unique pair id) } \usage{ data("ori_et_al_complex_ppis") } \description{ data frame assigning proteins to (in)directly interacting proteins within protein complexes } \examples{ data("ori_et_al_complex_ppis") } \references{ Ori et al. (2016), Genome Biology, 17, 47; Jensen et al. (2009), Nucleic Acids Research, 37, D412–D416 } \keyword{datasets}
power <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE) power_subset <- power[power$Date %in% c("1/2/2007","2/2/2007"),] date <- strptime(paste(power_subset$Date, power_subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S") esm1 <- as.numeric(power_subset$Sub_metering_1) esm2 <- as.numeric(power_subset$Sub_metering_2) esm3 <- as.numeric(power_subset$Sub_metering_3) png("plot3.png", width=480, height=480) with(power_subset, { plot(date,esm1, type="S", ylab="Energy Sub Metering", xlab="") lines(date,esm2,col='Red') lines(date,esm3,col='Blue') }) #plot(date,esm1, col = "black", type= "1", ylab ="Energy Sub Meeting", xlab= "") #plot(date,esm2, col = "red", type= "S") #plot(date,esm3, col = "blue", type= "S") legend ("topright", col=c("black","red","blue") , lty=1, lwd=2,c("Sub_metering_1","Sub_metering_2","Sub_metering_3") )
/Explonatory Analysis/Assignment 1/plot3.R
no_license
Kamaldass/datasciencecoursera
R
false
false
908
r
power <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE) power_subset <- power[power$Date %in% c("1/2/2007","2/2/2007"),] date <- strptime(paste(power_subset$Date, power_subset$Time, sep=" "), "%d/%m/%Y %H:%M:%S") esm1 <- as.numeric(power_subset$Sub_metering_1) esm2 <- as.numeric(power_subset$Sub_metering_2) esm3 <- as.numeric(power_subset$Sub_metering_3) png("plot3.png", width=480, height=480) with(power_subset, { plot(date,esm1, type="S", ylab="Energy Sub Metering", xlab="") lines(date,esm2,col='Red') lines(date,esm3,col='Blue') }) #plot(date,esm1, col = "black", type= "1", ylab ="Energy Sub Meeting", xlab= "") #plot(date,esm2, col = "red", type= "S") #plot(date,esm3, col = "blue", type= "S") legend ("topright", col=c("black","red","blue") , lty=1, lwd=2,c("Sub_metering_1","Sub_metering_2","Sub_metering_3") )
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536289265e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613111875-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
257
r
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536289265e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
#' @method ggplot summary.margarita.sim.rl #' @export #' @importFrom scales comma ggplot.summary.margarita.sim.rl <- function(data=NULL, trans="log10", labels=comma, xlab="Return level", ylab="", main=NULL, xbreaks = waiver(), ptcol="blue", linecol=c("blue", "blue"), ptsize=4, linesize=c(.5, 1.5), ncol=1, as.table=TRUE, ...){ data <- as.data.frame(data) data$M <- factor(data$M, levels=unique(data$M)) ng <- length(unique(data$groups)) if (ng == 1) data$groups <- data$M # <------------------ Redundant now??? nint <- ncol(data)/2 - .5 # Number of intervals names(data)[(ncol(data)-2)/2 + .5] <- "median" # Middle column (could be mean or median or something else) # data$group <- factor(rownames(data), levels=rownames(data)) seg <- getSegmentData(data) seg[[1]]$M <- seg[[2]]$M <- data$M if (ng > 1){ p <- ggplot(data=data, aes(median, groups)) + geom_point(size=ptsize, color=ptcol) + facet_wrap(~M, ncol=ncol, as.table=as.table) + scale_x_continuous(xlab, trans=trans, labels=labels, breaks=xbreaks) + scale_y_discrete(ylab) + ggtitle(main) + geom_segment(data=seg[[1]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[1], color=linecol[1]) + if (!is.null(seg[[2]])){ geom_segment(data=seg[[2]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[2], color=linecol[2]) } # Close if } # Close if ng > 1 else{ p <- ggplot(data=data, aes(median, groups)) + geom_point(size=ptsize, color=ptcol) + scale_x_continuous(xlab, trans=trans, labels=labels, breaks=xbreaks) + scale_y_discrete(ylab) + ggtitle(main) + geom_segment(data=seg[[1]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[1], color=linecol[1]) + if (!is.null(seg[[2]])){ geom_segment(data=seg[[2]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[2], color=linecol[2]) } } p } #' @method ggplot summary.margarita.sim.prob #' @export ggplot.summary.margarita.sim.prob <- function(data=NULL, ptcol="blue", linecol=c("blue", "blue"), ptsize=4, linesize=c(.5, 1.5), scales="free", ncol=NULL, as.table=TRUE, xlab="P( > RL)", ylab="", M, main=NULL, ...){ g <- names(data) data <- unclass(data) nM <- nrow(data[[1]]) g <- rep(g, each=nM) # Add M to each data.frame if (missing(M)) M <- factor(rownames(data[[1]]), levels=rownames(data[[1]])) data <- lapply(1:length(data), function(x, data, M) { data <- as.data.frame(data[[x]]) data$M <- M data }, data=data, M=M) # Make groups to trellis on data <- do.call("rbind", data) data$groups <- factor(g, levels=unique(g)) if (ncol(data) == 7){ names(data)[3] <- "mid" } else if (ncol(data) == 5){ names(data)[2] <- "mid" } else { stop("data object has wrong number of columns") } seg <- getSegmentData(data) seg <- lapply(seg, function(x, M){ if (!is.null(x)){ x$M <- M }; x }, M=data$M) p <- ggplot(data, aes(mid, groups)) + geom_point(size=ptsize, color=ptcol) + facet_wrap(~M, scales=scales, ncol=ncol, as.table=as.table) + scale_x_continuous(xlab) + scale_y_discrete(ylab) + ggtitle(main) + geom_segment(data=seg[[1]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[1], color=linecol[1]) + if (!is.null(seg[[2]])){ geom_segment(data=seg[[2]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[2], color=linecol[2]) } p } #' @method ggplot margarita.sim.prob #' @export ggplot.margarita.sim.prob <- function(data=NULL, mapping = aes(), ..., environment = parent.frame()){ stop("No method available. You need to call 'summary' on the simulated margarita object first.") } #' @method ggplot margarita.sim.rl #' @export ggplot.margarita.sim.rl <- ggplot.margarita.sim.prob
/R/ggplot.sim.R
no_license
harrysouthworth/margarita
R
false
false
4,820
r
#' @method ggplot summary.margarita.sim.rl #' @export #' @importFrom scales comma ggplot.summary.margarita.sim.rl <- function(data=NULL, trans="log10", labels=comma, xlab="Return level", ylab="", main=NULL, xbreaks = waiver(), ptcol="blue", linecol=c("blue", "blue"), ptsize=4, linesize=c(.5, 1.5), ncol=1, as.table=TRUE, ...){ data <- as.data.frame(data) data$M <- factor(data$M, levels=unique(data$M)) ng <- length(unique(data$groups)) if (ng == 1) data$groups <- data$M # <------------------ Redundant now??? nint <- ncol(data)/2 - .5 # Number of intervals names(data)[(ncol(data)-2)/2 + .5] <- "median" # Middle column (could be mean or median or something else) # data$group <- factor(rownames(data), levels=rownames(data)) seg <- getSegmentData(data) seg[[1]]$M <- seg[[2]]$M <- data$M if (ng > 1){ p <- ggplot(data=data, aes(median, groups)) + geom_point(size=ptsize, color=ptcol) + facet_wrap(~M, ncol=ncol, as.table=as.table) + scale_x_continuous(xlab, trans=trans, labels=labels, breaks=xbreaks) + scale_y_discrete(ylab) + ggtitle(main) + geom_segment(data=seg[[1]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[1], color=linecol[1]) + if (!is.null(seg[[2]])){ geom_segment(data=seg[[2]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[2], color=linecol[2]) } # Close if } # Close if ng > 1 else{ p <- ggplot(data=data, aes(median, groups)) + geom_point(size=ptsize, color=ptcol) + scale_x_continuous(xlab, trans=trans, labels=labels, breaks=xbreaks) + scale_y_discrete(ylab) + ggtitle(main) + geom_segment(data=seg[[1]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[1], color=linecol[1]) + if (!is.null(seg[[2]])){ geom_segment(data=seg[[2]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[2], color=linecol[2]) } } p } #' @method ggplot summary.margarita.sim.prob #' @export ggplot.summary.margarita.sim.prob <- function(data=NULL, ptcol="blue", linecol=c("blue", "blue"), ptsize=4, linesize=c(.5, 1.5), scales="free", ncol=NULL, as.table=TRUE, xlab="P( > RL)", ylab="", M, main=NULL, ...){ g <- names(data) data <- unclass(data) nM <- nrow(data[[1]]) g <- rep(g, each=nM) # Add M to each data.frame if (missing(M)) M <- factor(rownames(data[[1]]), levels=rownames(data[[1]])) data <- lapply(1:length(data), function(x, data, M) { data <- as.data.frame(data[[x]]) data$M <- M data }, data=data, M=M) # Make groups to trellis on data <- do.call("rbind", data) data$groups <- factor(g, levels=unique(g)) if (ncol(data) == 7){ names(data)[3] <- "mid" } else if (ncol(data) == 5){ names(data)[2] <- "mid" } else { stop("data object has wrong number of columns") } seg <- getSegmentData(data) seg <- lapply(seg, function(x, M){ if (!is.null(x)){ x$M <- M }; x }, M=data$M) p <- ggplot(data, aes(mid, groups)) + geom_point(size=ptsize, color=ptcol) + facet_wrap(~M, scales=scales, ncol=ncol, as.table=as.table) + scale_x_continuous(xlab) + scale_y_discrete(ylab) + ggtitle(main) + geom_segment(data=seg[[1]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[1], color=linecol[1]) + if (!is.null(seg[[2]])){ geom_segment(data=seg[[2]], aes(x=lo, xend=hi, y=group, yend=group), size=linesize[2], color=linecol[2]) } p } #' @method ggplot margarita.sim.prob #' @export ggplot.margarita.sim.prob <- function(data=NULL, mapping = aes(), ..., environment = parent.frame()){ stop("No method available. You need to call 'summary' on the simulated margarita object first.") } #' @method ggplot margarita.sim.rl #' @export ggplot.margarita.sim.rl <- ggplot.margarita.sim.prob
get_result_cache <- function () { if (!iatlas.data:::present(.GlobalEnv$result_cache)) .GlobalEnv$result_cache <- new.env() .GlobalEnv$result_cache } result_cached <- function (key, value) { result_cache <- get_result_cache() if (iatlas.data:::present(result_cache[[key]])) result_cache[[key]] else result_cache[[key]] <- value } reset_results_cache <- function () { if (iatlas.data:::present(.GlobalEnv$result_cache)) { rm(result_cache, pos = .GlobalEnv) } gc() } reset_results_cache()
/R/result_cache.R
no_license
CRI-iAtlas/iatlas-data
R
false
false
514
r
get_result_cache <- function () { if (!iatlas.data:::present(.GlobalEnv$result_cache)) .GlobalEnv$result_cache <- new.env() .GlobalEnv$result_cache } result_cached <- function (key, value) { result_cache <- get_result_cache() if (iatlas.data:::present(result_cache[[key]])) result_cache[[key]] else result_cache[[key]] <- value } reset_results_cache <- function () { if (iatlas.data:::present(.GlobalEnv$result_cache)) { rm(result_cache, pos = .GlobalEnv) } gc() } reset_results_cache()
%[dont read] \name{Rfast2-package} \alias{Rfast2-package} \docType{package} \title{ Really fast R functions } \description{ A collection of Rfast2 functions for data analysis. Note 1: The vast majority of the functions accept matrices only, not data.frames. Note 2: Do not have matrices or vectors with have missing data (i.e NAs). We do no check about them and C++ internally transforms them into zeros (0), so you may get wrong results. Note 3: In general, make sure you give the correct input, in order to get the correct output. We do no checks and this is one of the many reasons we are fast. } \details{ \tabular{ll}{ Package: \tab Rfast2\cr Type: \tab Package\cr Version: \tab 0.0.7 \cr Date: \tab 2020-10-19\cr License: \tab GPL-2\cr } } \author{ Manos Papadakis <papadakm95@gmail.com>, Michail Tsagris <mtsagris@yahoo.gr>, Stefanos Fafalios <stefanosfafalios@gmail.com>, Marios Dimitriadis <kmdimitriadis@gmail.com>. } \section{Maintainers }{ Manos Papadakis \email{rfastofficial@gmail.com} } %\note{ %Acknowledgments: %}
/man/Rfast2-package.Rd
no_license
minghao2016/Rfast2
R
false
false
1,043
rd
%[dont read] \name{Rfast2-package} \alias{Rfast2-package} \docType{package} \title{ Really fast R functions } \description{ A collection of Rfast2 functions for data analysis. Note 1: The vast majority of the functions accept matrices only, not data.frames. Note 2: Do not have matrices or vectors with have missing data (i.e NAs). We do no check about them and C++ internally transforms them into zeros (0), so you may get wrong results. Note 3: In general, make sure you give the correct input, in order to get the correct output. We do no checks and this is one of the many reasons we are fast. } \details{ \tabular{ll}{ Package: \tab Rfast2\cr Type: \tab Package\cr Version: \tab 0.0.7 \cr Date: \tab 2020-10-19\cr License: \tab GPL-2\cr } } \author{ Manos Papadakis <papadakm95@gmail.com>, Michail Tsagris <mtsagris@yahoo.gr>, Stefanos Fafalios <stefanosfafalios@gmail.com>, Marios Dimitriadis <kmdimitriadis@gmail.com>. } \section{Maintainers }{ Manos Papadakis \email{rfastofficial@gmail.com} } %\note{ %Acknowledgments: %}
#'@import futile.logger #'@import hashmap library(utils) library(hashmap) #'Read gRNA counts given sample and gene specification files #' #'\code{read_counts_from_spec_files()} takes a file containing raw read counts, a replicate to sample specification file, and a gRNA ID to gene specification file, and returns a \code{\link{SummarizedExperiment}} #'object of normalized log2-scale read counts and standard deviations. #' #'@param count_file String path to read count file. See \code{read_count_table_from_spec} for requirements. #'@param sample_spec_file String path to sample specification file. Has to contain \code{replicate_col} and \code{sample_col} columns. #'@param replicate_col String Name of the column in \code{sample_spec_file} that contains identifiers for each experimental replicate, as used as the column header for that replicate in the corresponding count file (globally unique identifiers per line, treatment, replicate, etc., not just '1','2', 'A', or 'R1'). #'@param sample_col String Name of the column in \code{sample_spec_file} that contains the sample identifiers. For example, if three columns in the count file are replicate measurements of the same sample, they should all have the same sample id here. #'@param reference_sample String Sample name of the single reference sample all other samples will be compared against #'@param gene_spec_file String path to gRNA-gene link specification file. Has to contain \code{grna_col} and \code{gene_col} columns. #'@param grna_col String Name of the column in \code{gene_spec_file} that contains the gRNA identifiers. #'@param gene_col String Name of the column in \code{gene_spec_file} that contains the gene identifiers (to identify which gRNAs map to the same gene). #'@param count_prior Scalar double pseudocounts for each gRNA added to observations. Default: 32. #'@param normalization String per-sample normalization method. Default: 'median' [sets median log2-scale count to 0] #'@param window Scalar integer Length of smoothing window used in standard deviation estimation. Default: 800. #'@return \code{\link{SummarizedExperiment}} object of log2-scale read count changes compared to reference, and estimated standard deviations across replicates #'@export read_counts_from_spec_files <- function(count_file, sample_spec_file, replicate_col, sample_col, reference_sample, gene_spec_file, grna_col, gene_col, count_prior=32., normalization='median', window=800){ sample_spec = .read_sample_spec(count_file, sample_spec_file, replicate_col, sample_col) gene_spec = .read_gene_spec(gene_spec_file, grna_col, gene_col) logf = read_count_table_from_spec(sample_spec, gene_spec, count_prior=count_prior, normalization=normalization, window=window) colData(logf)$Condition = "TEST" colData(logf)$Condition = as.character(colData(logf)$Condition) colData(logf)$Condition[as.character(colData(logf)$Name) == reference_sample] = "CTRL" colData(logf)$Condition = as.factor(colData(logf)$Condition) return(calc_logfc(logf, reference="CTRL", condition="TEST")) } .read_sample_spec <- function(count_file, sample_spec_file, replicate_col, sample_col){ sample_spec = utils::read.table(sample_spec_file, header=TRUE, stringsAsFactors=FALSE, check.names=FALSE) if(!(replicate_col %in% colnames(sample_spec))){ flog.error(paste("Designated replicate column '", replicate_col, "' not found in columns of sample specification file ", sample_spec_file)) } else if (!(sample_col %in% colnames(sample_spec))){ flog.error(paste("Designated sample column '", sample_col, "' not found in columns of sample specification file ", sample_spec_file)) } sample_spec = sample_spec[,c(which(colnames(sample_spec) == replicate_col), which(colnames(sample_spec) == sample_col))] sample_spec = cbind(rep(count_file, dim(sample_spec)[1]), sample_spec) # add count file info colnames(sample_spec) = c("Filename", "Replicate", "Sample") return(sample_spec) } .read_gene_spec <- function(gene_spec_file, grna_col, gene_col){ gene_spec = utils::read.table(gene_spec_file, header=TRUE, stringsAsFactors=FALSE, check.names=FALSE) if (!(grna_col %in% colnames(gene_spec))){ flog.error(paste("Designated gRNA ID column '", grna_col, "' not found in columns of gRNA gene specification file ", gene_spec_file)) } else if (!(gene_col %in% colnames(gene_spec))){ flog.error(paste("Designated gene column '", gene_col, "' not found in columns of gRNA gene specification file ", gene_spec_file)) } gene_spec = gene_spec[,c(which(colnames(gene_spec) == grna_col), which(colnames(gene_spec) == gene_col))] return(gene_spec) } #'Read gRNA counts given sample and gene specification #' #'\code{read_count_table_from_spec()} takes a sample specification, and returns a \code{\link{SummarizedExperiment}} #'object of read counts. #' #'The sample specification table provides names of count files to read, columns in those files to retain, and the samples these correspond to. #'For example, #'\code{} #'JACKS assumes that each count file has the same number of rows ordered in the same way, corresponding to the same gRNAs. #'Every count file has one header line, and the rest of the lines give raw gRNA read counts for one gRNA each, with each column being a different measurement. #'Only columns with headers as given in the "Replicate" (2nd column of the specification) are stored from each count file. #'Count file columns that map to the same "Sample" (3rd column of the specification) are treated as replicate measurements for that sample. #' #'@param sample_spec Data frame of at least three columns: "Filename": input count file, "Replicate": column names in count file, "Sample": corresponding sample ID to allow combining of replicates within samples. #'@param gene_spec Data frame of at least two columns: gRNA ID (1st col) and corresponding gene (2nd col) #'@param count_prior Scalar double pseudocounts for each gRNA added to observations. Default: 32. #'@param normalization String per-sample normalization method. Default: 'median' [sets median log2-scale count to 0] #'@param window Scalar integer smoothing window used in standard deviation estimation. Default: 800. #'@return \code{\link{SummarizedExperiment}} object of log-scale sample read counts and estimated standard deviations across replicates #'@export read_count_table_from_spec <- function(sample_spec, gene_spec, count_prior=32., normalization='median', window=800){ all_samples = c() all_counts = c() genehash = hashmap(as.character(gene_spec[,1]), as.character(gene_spec[,2])) for(filename in unique(sample_spec[,1])){ # column 1 of sample spec. is filename flog.debug(paste("Reading samples from", filename)) counts = c() meta = c() d = .read_sample_counts(filename, sample_spec) grnas = rownames(d) I = rep(T, dim(d)[1]) # gRNAs to use for(i in 1:dim(d)[1]){ I[i] = genehash$has_key(grnas[i]) } # see which gRNAs can be mapped meta = cbind(grnas[I], rep(NA, sum(I))) # retain gRNA metadata info - no gene to begin with for(i in 1:dim(meta)[1]){ meta[i,2] = genehash[[meta[i,1]]] } # and fill in if(is.null(all_counts)) { # if first file, set result all_counts = d[I,] all_samples = .get_sample_ids(colnames(d), sample_spec, filename) } else { # some results already exist - append all_counts = cbind(all_counts, d[I,]) # assume all files are aligned all_samples = c(all_samples, .get_sample_ids(colnames(d), sample_spec, filename)) } } return(.generate_logfc_summarized_experiment(meta, all_counts, all_samples, count_prior, normalization, window)) } .read_sample_counts <- function(filename, sample_spec){ sep = "\t" if(".csv" %in% filename){ sep = "," } d = utils::read.table(filename, sep=sep,header=TRUE, stringsAsFactors=FALSE, row.names=1, check.names=FALSE) file_samples = unique(sample_spec[filename == sample_spec[,1], 2]) # Specification column 1 = file name, column 2 = samples to retain from this file I_sample = rep(F, dim(d)[2]) for(j in 1:length(I_sample)){ I_sample[j] = colnames(d)[j] %in% file_samples } return(d[,I_sample]) } .get_sample_ids <- function(colnames, sample_spec, filename){ sample_ids = c() spec = sample_spec[sample_spec[,1] == filename,] # column 1 is filename for(i in 1:length(colnames)){ for(j in 1:dim(spec)[1]){ if(colnames[i] == spec[j,2]){ # Spec. column 2 is sample name in input file sample_ids = c(sample_ids, spec[j,3]) # Spec. column 3 is sample ID } } } return(sample_ids) } .generate_logfc_summarized_experiment <- function(grna_meta, counts, count_samples, count_prior=32., normalization='median', window=800){ # typecast vectors of vectors to matrices, add names grna_meta = matrix(unlist(grna_meta), ncol=2, dimnames=list(NULL, c("gRNA", "gene"))) counts = matrix(unlist(counts), nrow=dim(grna_meta)[1], dimnames=list(grna_meta[,1], count_samples)) samples = unique(count_samples) # create new matrix of log-scale centered and counts meanmat = matrix(ncol=length(samples), nrow=dim(counts)[1], dimnames=list(grna_meta[,1], samples)) sdmat = matrix(ncol=length(samples), nrow=dim(counts)[1], dimnames=list(grna_meta[,1], samples)) for (i in 1:length(samples)){ # for each sample flog.debug(paste("Calculating variance estimates for", samples[i])) I = (count_samples == samples[i]) # pick corresponding counts from replicates d = .calc_mean_sd(counts[,I], count_prior, window) # calculate meanmat[,i] = as.vector(d$mean) # and store sdmat[,i] = as.vector(d$sd) } # assemble object - metadata of matrices, row, and column entries colnames(grna_meta) = c("gRNA", "gene") sample_meta = data.frame(list(Name=samples, Condition=rep("NA", length(samples))), check.names=FALSE) rownames(sample_meta) = samples result = SummarizedExperiment(assays=list("logf_mean"=meanmat, "logf_sd"=sdmat), rowData=grna_meta, colData=sample_meta) validate_logf_table(result) flog.info(paste("Done condensing replicates; went from", length(count_samples), "experiments down to", length(samples), "samples")) return(result) } .read_precomputed_x <- function(library){ supported = c("avana","gecko2","yusa_v10") if( library %in% supported ){ ref = url(paste0("https://raw.githubusercontent.com/felicityallen/JACKS/master/reference_grna_efficacies/", library, "_grna_JACKS_results.txt"))} else{ ref = library } x = utils::read.table(ref, header=TRUE, sep="\t", check.names=FALSE, stringsAsFactors=FALSE, row.names = 1) return(x) } #'Extract JACKS output for a gene #' #'\code{jacks_w_gene()} takes JACKS output and gene name, and returns a \code{data.frame} #'that has columns for statistics of JACKS inferred posterior for gene essentiality w - mean #'and standard deviation. Each row is one cell line, name in row.names. #' #'@param expt Output of JACKS inference - SummarizedExperiment endowed with posteriors. #'@param gene Gene name to extract information for. #'@return \code{data.table} object estimated means and standard deviations of gene essentiality. #'@export jacks_w_gene <- function(expt, gene){ data.frame( row.names = rownames(colData(expt)), w = metadata(expt)$jacks_w[[gene]], sd_w = metadata(expt)$jacks_sdw[[gene]], neg_pval = metadata(expt)$jacks_neg_pval[[gene]], pos_pval = metadata(expt)$jacks_pos_pval[[gene]], neg_fdr = metadata(expt)$jacks_neg_fdr[[gene]], pos_fdr = metadata(expt)$jacks_pos_fdr[[gene]] ) } #'Extract JACKS output for a sample #' #'\code{jacks_w_sample()} takes JACKS output and sample name, and returns a \code{data.frame} #'that has columns for statistics of JACKS inferred posterior for gene essentiality w - mean #'and standard deviation. Each row is one gene (name in row.names). #' #'@param expt Output of JACKS inference - SummarizedExperiment endowed with posteriors. #'@param sample Sample name to extract information for. #'@return \code{data.table} object estimated means and standard deviations of gene essentiality. #'@export jacks_w_sample <- function(expt, sample){ i = which(row.names(colData(expt)) == sample) m = metadata(expt) data.frame( row.names = colnames(m$jacks_w), w = m$jacks_w[i,], sd_w = m$jacks_sdw[i,], neg_pval = m$jacks_neg_pval[i,], pos_pval = m$jacks_pos_pval[i,], neg_fdr = m$jacks_neg_fdr[i,], pos_fdr = m$jacks_pos_fdr[i,] ) } # #write_jacks_output <- function(){ # #} #'Pre-computed gRNA efficacy estimates for the Avana library. #'Two vectors are provided, both sorted according to the gRNA sequence. #' \itemize{ #' \item x: Estimated gRNA efficacy for each gRNA. x = 1 means the guide works roughly as an average gRNA would. x = 0 means guide is ineffective. #' \item sdx: Standard deviation of x estimate. #' } #' #' @format Two numeric vectors. #' @source \url{http://www.diamondse.info/} #' @name avana #' @docType data #' @author Leopold Parts \email{lp2@sanger.ac.uk} #' @keywords data NULL #'Test #' @name example_repmap #' @docType data #' @keywords data NULL #'Test #' @name example_count_data #' @docType data #' @keywords data NULL #'Test #' @name avana_head #' @docType data #' @keywords data NULL #'Test #' @name data #' @docType data #' @keywords data NULL #'Test #' @name data_err #' @docType data #' @keywords data NULL #'Test #' @name pyvals #' @docType data #' @keywords data NULL #'Test #' @name x #' @docType data #' @keywords data NULL #'Test #' @name sdx #' @docType data #' @keywords data NULL
/rjacks/jacks/R/io.R
permissive
goedel-gang/JACKS
R
false
false
13,845
r
#'@import futile.logger #'@import hashmap library(utils) library(hashmap) #'Read gRNA counts given sample and gene specification files #' #'\code{read_counts_from_spec_files()} takes a file containing raw read counts, a replicate to sample specification file, and a gRNA ID to gene specification file, and returns a \code{\link{SummarizedExperiment}} #'object of normalized log2-scale read counts and standard deviations. #' #'@param count_file String path to read count file. See \code{read_count_table_from_spec} for requirements. #'@param sample_spec_file String path to sample specification file. Has to contain \code{replicate_col} and \code{sample_col} columns. #'@param replicate_col String Name of the column in \code{sample_spec_file} that contains identifiers for each experimental replicate, as used as the column header for that replicate in the corresponding count file (globally unique identifiers per line, treatment, replicate, etc., not just '1','2', 'A', or 'R1'). #'@param sample_col String Name of the column in \code{sample_spec_file} that contains the sample identifiers. For example, if three columns in the count file are replicate measurements of the same sample, they should all have the same sample id here. #'@param reference_sample String Sample name of the single reference sample all other samples will be compared against #'@param gene_spec_file String path to gRNA-gene link specification file. Has to contain \code{grna_col} and \code{gene_col} columns. #'@param grna_col String Name of the column in \code{gene_spec_file} that contains the gRNA identifiers. #'@param gene_col String Name of the column in \code{gene_spec_file} that contains the gene identifiers (to identify which gRNAs map to the same gene). #'@param count_prior Scalar double pseudocounts for each gRNA added to observations. Default: 32. #'@param normalization String per-sample normalization method. Default: 'median' [sets median log2-scale count to 0] #'@param window Scalar integer Length of smoothing window used in standard deviation estimation. Default: 800. #'@return \code{\link{SummarizedExperiment}} object of log2-scale read count changes compared to reference, and estimated standard deviations across replicates #'@export read_counts_from_spec_files <- function(count_file, sample_spec_file, replicate_col, sample_col, reference_sample, gene_spec_file, grna_col, gene_col, count_prior=32., normalization='median', window=800){ sample_spec = .read_sample_spec(count_file, sample_spec_file, replicate_col, sample_col) gene_spec = .read_gene_spec(gene_spec_file, grna_col, gene_col) logf = read_count_table_from_spec(sample_spec, gene_spec, count_prior=count_prior, normalization=normalization, window=window) colData(logf)$Condition = "TEST" colData(logf)$Condition = as.character(colData(logf)$Condition) colData(logf)$Condition[as.character(colData(logf)$Name) == reference_sample] = "CTRL" colData(logf)$Condition = as.factor(colData(logf)$Condition) return(calc_logfc(logf, reference="CTRL", condition="TEST")) } .read_sample_spec <- function(count_file, sample_spec_file, replicate_col, sample_col){ sample_spec = utils::read.table(sample_spec_file, header=TRUE, stringsAsFactors=FALSE, check.names=FALSE) if(!(replicate_col %in% colnames(sample_spec))){ flog.error(paste("Designated replicate column '", replicate_col, "' not found in columns of sample specification file ", sample_spec_file)) } else if (!(sample_col %in% colnames(sample_spec))){ flog.error(paste("Designated sample column '", sample_col, "' not found in columns of sample specification file ", sample_spec_file)) } sample_spec = sample_spec[,c(which(colnames(sample_spec) == replicate_col), which(colnames(sample_spec) == sample_col))] sample_spec = cbind(rep(count_file, dim(sample_spec)[1]), sample_spec) # add count file info colnames(sample_spec) = c("Filename", "Replicate", "Sample") return(sample_spec) } .read_gene_spec <- function(gene_spec_file, grna_col, gene_col){ gene_spec = utils::read.table(gene_spec_file, header=TRUE, stringsAsFactors=FALSE, check.names=FALSE) if (!(grna_col %in% colnames(gene_spec))){ flog.error(paste("Designated gRNA ID column '", grna_col, "' not found in columns of gRNA gene specification file ", gene_spec_file)) } else if (!(gene_col %in% colnames(gene_spec))){ flog.error(paste("Designated gene column '", gene_col, "' not found in columns of gRNA gene specification file ", gene_spec_file)) } gene_spec = gene_spec[,c(which(colnames(gene_spec) == grna_col), which(colnames(gene_spec) == gene_col))] return(gene_spec) } #'Read gRNA counts given sample and gene specification #' #'\code{read_count_table_from_spec()} takes a sample specification, and returns a \code{\link{SummarizedExperiment}} #'object of read counts. #' #'The sample specification table provides names of count files to read, columns in those files to retain, and the samples these correspond to. #'For example, #'\code{} #'JACKS assumes that each count file has the same number of rows ordered in the same way, corresponding to the same gRNAs. #'Every count file has one header line, and the rest of the lines give raw gRNA read counts for one gRNA each, with each column being a different measurement. #'Only columns with headers as given in the "Replicate" (2nd column of the specification) are stored from each count file. #'Count file columns that map to the same "Sample" (3rd column of the specification) are treated as replicate measurements for that sample. #' #'@param sample_spec Data frame of at least three columns: "Filename": input count file, "Replicate": column names in count file, "Sample": corresponding sample ID to allow combining of replicates within samples. #'@param gene_spec Data frame of at least two columns: gRNA ID (1st col) and corresponding gene (2nd col) #'@param count_prior Scalar double pseudocounts for each gRNA added to observations. Default: 32. #'@param normalization String per-sample normalization method. Default: 'median' [sets median log2-scale count to 0] #'@param window Scalar integer smoothing window used in standard deviation estimation. Default: 800. #'@return \code{\link{SummarizedExperiment}} object of log-scale sample read counts and estimated standard deviations across replicates #'@export read_count_table_from_spec <- function(sample_spec, gene_spec, count_prior=32., normalization='median', window=800){ all_samples = c() all_counts = c() genehash = hashmap(as.character(gene_spec[,1]), as.character(gene_spec[,2])) for(filename in unique(sample_spec[,1])){ # column 1 of sample spec. is filename flog.debug(paste("Reading samples from", filename)) counts = c() meta = c() d = .read_sample_counts(filename, sample_spec) grnas = rownames(d) I = rep(T, dim(d)[1]) # gRNAs to use for(i in 1:dim(d)[1]){ I[i] = genehash$has_key(grnas[i]) } # see which gRNAs can be mapped meta = cbind(grnas[I], rep(NA, sum(I))) # retain gRNA metadata info - no gene to begin with for(i in 1:dim(meta)[1]){ meta[i,2] = genehash[[meta[i,1]]] } # and fill in if(is.null(all_counts)) { # if first file, set result all_counts = d[I,] all_samples = .get_sample_ids(colnames(d), sample_spec, filename) } else { # some results already exist - append all_counts = cbind(all_counts, d[I,]) # assume all files are aligned all_samples = c(all_samples, .get_sample_ids(colnames(d), sample_spec, filename)) } } return(.generate_logfc_summarized_experiment(meta, all_counts, all_samples, count_prior, normalization, window)) } .read_sample_counts <- function(filename, sample_spec){ sep = "\t" if(".csv" %in% filename){ sep = "," } d = utils::read.table(filename, sep=sep,header=TRUE, stringsAsFactors=FALSE, row.names=1, check.names=FALSE) file_samples = unique(sample_spec[filename == sample_spec[,1], 2]) # Specification column 1 = file name, column 2 = samples to retain from this file I_sample = rep(F, dim(d)[2]) for(j in 1:length(I_sample)){ I_sample[j] = colnames(d)[j] %in% file_samples } return(d[,I_sample]) } .get_sample_ids <- function(colnames, sample_spec, filename){ sample_ids = c() spec = sample_spec[sample_spec[,1] == filename,] # column 1 is filename for(i in 1:length(colnames)){ for(j in 1:dim(spec)[1]){ if(colnames[i] == spec[j,2]){ # Spec. column 2 is sample name in input file sample_ids = c(sample_ids, spec[j,3]) # Spec. column 3 is sample ID } } } return(sample_ids) } .generate_logfc_summarized_experiment <- function(grna_meta, counts, count_samples, count_prior=32., normalization='median', window=800){ # typecast vectors of vectors to matrices, add names grna_meta = matrix(unlist(grna_meta), ncol=2, dimnames=list(NULL, c("gRNA", "gene"))) counts = matrix(unlist(counts), nrow=dim(grna_meta)[1], dimnames=list(grna_meta[,1], count_samples)) samples = unique(count_samples) # create new matrix of log-scale centered and counts meanmat = matrix(ncol=length(samples), nrow=dim(counts)[1], dimnames=list(grna_meta[,1], samples)) sdmat = matrix(ncol=length(samples), nrow=dim(counts)[1], dimnames=list(grna_meta[,1], samples)) for (i in 1:length(samples)){ # for each sample flog.debug(paste("Calculating variance estimates for", samples[i])) I = (count_samples == samples[i]) # pick corresponding counts from replicates d = .calc_mean_sd(counts[,I], count_prior, window) # calculate meanmat[,i] = as.vector(d$mean) # and store sdmat[,i] = as.vector(d$sd) } # assemble object - metadata of matrices, row, and column entries colnames(grna_meta) = c("gRNA", "gene") sample_meta = data.frame(list(Name=samples, Condition=rep("NA", length(samples))), check.names=FALSE) rownames(sample_meta) = samples result = SummarizedExperiment(assays=list("logf_mean"=meanmat, "logf_sd"=sdmat), rowData=grna_meta, colData=sample_meta) validate_logf_table(result) flog.info(paste("Done condensing replicates; went from", length(count_samples), "experiments down to", length(samples), "samples")) return(result) } .read_precomputed_x <- function(library){ supported = c("avana","gecko2","yusa_v10") if( library %in% supported ){ ref = url(paste0("https://raw.githubusercontent.com/felicityallen/JACKS/master/reference_grna_efficacies/", library, "_grna_JACKS_results.txt"))} else{ ref = library } x = utils::read.table(ref, header=TRUE, sep="\t", check.names=FALSE, stringsAsFactors=FALSE, row.names = 1) return(x) } #'Extract JACKS output for a gene #' #'\code{jacks_w_gene()} takes JACKS output and gene name, and returns a \code{data.frame} #'that has columns for statistics of JACKS inferred posterior for gene essentiality w - mean #'and standard deviation. Each row is one cell line, name in row.names. #' #'@param expt Output of JACKS inference - SummarizedExperiment endowed with posteriors. #'@param gene Gene name to extract information for. #'@return \code{data.table} object estimated means and standard deviations of gene essentiality. #'@export jacks_w_gene <- function(expt, gene){ data.frame( row.names = rownames(colData(expt)), w = metadata(expt)$jacks_w[[gene]], sd_w = metadata(expt)$jacks_sdw[[gene]], neg_pval = metadata(expt)$jacks_neg_pval[[gene]], pos_pval = metadata(expt)$jacks_pos_pval[[gene]], neg_fdr = metadata(expt)$jacks_neg_fdr[[gene]], pos_fdr = metadata(expt)$jacks_pos_fdr[[gene]] ) } #'Extract JACKS output for a sample #' #'\code{jacks_w_sample()} takes JACKS output and sample name, and returns a \code{data.frame} #'that has columns for statistics of JACKS inferred posterior for gene essentiality w - mean #'and standard deviation. Each row is one gene (name in row.names). #' #'@param expt Output of JACKS inference - SummarizedExperiment endowed with posteriors. #'@param sample Sample name to extract information for. #'@return \code{data.table} object estimated means and standard deviations of gene essentiality. #'@export jacks_w_sample <- function(expt, sample){ i = which(row.names(colData(expt)) == sample) m = metadata(expt) data.frame( row.names = colnames(m$jacks_w), w = m$jacks_w[i,], sd_w = m$jacks_sdw[i,], neg_pval = m$jacks_neg_pval[i,], pos_pval = m$jacks_pos_pval[i,], neg_fdr = m$jacks_neg_fdr[i,], pos_fdr = m$jacks_pos_fdr[i,] ) } # #write_jacks_output <- function(){ # #} #'Pre-computed gRNA efficacy estimates for the Avana library. #'Two vectors are provided, both sorted according to the gRNA sequence. #' \itemize{ #' \item x: Estimated gRNA efficacy for each gRNA. x = 1 means the guide works roughly as an average gRNA would. x = 0 means guide is ineffective. #' \item sdx: Standard deviation of x estimate. #' } #' #' @format Two numeric vectors. #' @source \url{http://www.diamondse.info/} #' @name avana #' @docType data #' @author Leopold Parts \email{lp2@sanger.ac.uk} #' @keywords data NULL #'Test #' @name example_repmap #' @docType data #' @keywords data NULL #'Test #' @name example_count_data #' @docType data #' @keywords data NULL #'Test #' @name avana_head #' @docType data #' @keywords data NULL #'Test #' @name data #' @docType data #' @keywords data NULL #'Test #' @name data_err #' @docType data #' @keywords data NULL #'Test #' @name pyvals #' @docType data #' @keywords data NULL #'Test #' @name x #' @docType data #' @keywords data NULL #'Test #' @name sdx #' @docType data #' @keywords data NULL
library(imp4p) ### Name: prob.mcar ### Title: Estimation of a vector of probabilities that missing values are ### MCAR. ### Aliases: prob.mcar ### Keywords: Missing value analysis ### ** Examples ## No test: #Simulating data #Simulating data res.sim=sim.data(nb.pept=2000,nb.miss=600,pi.mcar=0.2,para=10,nb.cond=2,nb.repbio=3, nb.sample=5,m.c=25,sd.c=2,sd.rb=0.5,sd.r=0.2); #Imputation of missing values with the slsa algorithm dat.slsa=impute.slsa(tab=res.sim$dat.obs,conditions=res.sim$condition,repbio=res.sim$repbio); #Estimation of the mixture model res=estim.mix(tab=res.sim$dat.obs, tab.imp=dat.slsa, conditions=res.sim$condition); #Computing probabilities to be MCAR born=estim.bound(tab=res.sim$dat.obs,conditions=res.sim$condition); #Computing probabilities to be MCAR in the first column of result$tab.mod proba=prob.mcar(b.l=born$tab.lower[,1],b.u=born$tab.upper[,1],absc=res$abs.mod, pi.mcar=res$pi.mcar[1], F.tot=res$F.tot[,1], F.na=res$F.na[,1]); ## End(No test)
/data/genthat_extracted_code/imp4p/examples/prob_mcar.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
992
r
library(imp4p) ### Name: prob.mcar ### Title: Estimation of a vector of probabilities that missing values are ### MCAR. ### Aliases: prob.mcar ### Keywords: Missing value analysis ### ** Examples ## No test: #Simulating data #Simulating data res.sim=sim.data(nb.pept=2000,nb.miss=600,pi.mcar=0.2,para=10,nb.cond=2,nb.repbio=3, nb.sample=5,m.c=25,sd.c=2,sd.rb=0.5,sd.r=0.2); #Imputation of missing values with the slsa algorithm dat.slsa=impute.slsa(tab=res.sim$dat.obs,conditions=res.sim$condition,repbio=res.sim$repbio); #Estimation of the mixture model res=estim.mix(tab=res.sim$dat.obs, tab.imp=dat.slsa, conditions=res.sim$condition); #Computing probabilities to be MCAR born=estim.bound(tab=res.sim$dat.obs,conditions=res.sim$condition); #Computing probabilities to be MCAR in the first column of result$tab.mod proba=prob.mcar(b.l=born$tab.lower[,1],b.u=born$tab.upper[,1],absc=res$abs.mod, pi.mcar=res$pi.mcar[1], F.tot=res$F.tot[,1], F.na=res$F.na[,1]); ## End(No test)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interpolateImageValues.R \name{interpolateImageValues} \alias{interpolateImageValues} \title{interpolateImageValues} \usage{ interpolateImageValues(img, points, type = "point", interpolation = "linear") } \arguments{ \item{img}{an antsImage} \item{points}{the locations of interest} \item{type}{'point' or 'index'} \item{interpolation}{options are: 'linear'} } \description{ return image values at points or indices }
/man/interpolateImageValues.Rd
no_license
jeffduda/DANTsR
R
false
true
501
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interpolateImageValues.R \name{interpolateImageValues} \alias{interpolateImageValues} \title{interpolateImageValues} \usage{ interpolateImageValues(img, points, type = "point", interpolation = "linear") } \arguments{ \item{img}{an antsImage} \item{points}{the locations of interest} \item{type}{'point' or 'index'} \item{interpolation}{options are: 'linear'} } \description{ return image values at points or indices }
rm(list = ls()) # XXX Manuscript uses asreml to do models (must pay for license) ## used asreml because easiest way to get 95% CIs on variance estimates have_asreml <- FALSE #<-- change to TRUE if computer has the asreml software # If do not have asreml, can still get same models with nlme, just can't do ## profile likelihood method to get 95% CIs on variance estimates. library(nlme) if(have_asreml){ library(asreml) # create function to extract 95% CIs from profile likelihood ## uses nadiv:::proLik4 to profile the likelihood for a single variance proCI <- function(x){ if(is(x) != "proLik") error("x is not a profile likelihood/nadiv::proLik") unlist(x[c("LCL", "UCL")]) } #<-- end function } # need nadiv for use with asreml (need LRT function) library(nadiv) #FIXME set to your own path here #setwd("<< Insert path on local computer >>") # load data microhab <- read.table(file = "microhabitat.txt", header = TRUE, sep = "\t") #<-- XXX important to include tab-separated ## Create subset of just artificial nests art <- microhab[which(microhab$NestRand == 1), ] ################################################################################ # BEFORE ANALYSES, need to make categorical variables into factors ## Do this so won't make mistake using an integer and R interprets this as a covariate when you want it to be a categorical factor str(microhab) # Just doing the main ones, others *could* be added if used later # Also, Mean center and standardize iButton depth and canopy openness: use as covariates microhab <- within(microhab, { NestRandFac <- as.factor(NestRand) #<-- create new column/don't write over SiteTypeFac <- as.factor(SiteType) NestClusterFac <- as.factor(NestCluster) scibdepth <- scale(ibdepth) scCanopy <- scale(Canopy) }) # order based on nest type for consistency (and for asreml) microhab <- microhab[order(microhab$NestRandFac), ] # now for artificial nest subset art <- within(art, { NestRandFac <- as.factor(NestRand) #<-- create new column/don't write over SiteTypeFac <- as.factor(SiteType) NestClusterFac <- as.factor(NestCluster) scibdepth <- scale(ibdepth) scCanopy <- scale(Canopy) }) ############################################################################ ################################## # ARTIFICIAL NEST SUBSET ANALYSES ################################## artmod.Slope <- lm(Slope ~ SiteTypeFac, data = art, na.action = na.omit) summary(artmod.Slope) anova(artmod.Slope) artmod.Canopy <- lm(Canopy ~ SiteTypeFac, data = art, na.action = na.omit) summary(artmod.Canopy) anova(artmod.Canopy) # Temperature variables also include continuous covariate of iButton depth artmod.dailyMean_C <- lm(dailyMean_C ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.dailyMean_C) anova(artmod.dailyMean_C) artmod.dailyMax_C <- lm(dailyMax_C ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.dailyMax_C) anova(artmod.dailyMax_C) artmod.dailyMin_C <- lm(dailyMin_C ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.dailyMin_C) anova(artmod.dailyMin_C) artmod.range <- lm(range ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.range) anova(artmod.range) ############################################################################ ############################################################################ ############################################################################ ############################################################################ ################################ # MICROHABITAT ANALYSES ################################ #### Real vs. random locations for each microhabitat variable, plus urbanization covar. and urb*nestrand interaction ###random effect of nest cluster ################################################## # Distance to water ################################################## modDistTW <- lme(DistTW ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # use `modDistTW` (separate residual variances) ## but now see whether slopes differ or should we use a model with a single slope anova(modDistTW) #<-- no significant interaction modDistTWb <- lme(DistTW ~ urbPC1 + NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # Test for different residual variances modDistTWc <- lme(DistTW ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) anova(modDistTW, modDistTWc) summary(modDistTWb) #<-- XXX use no interaction but separate residual variances ############################## # asreml if(have_asreml){ asrDistTW <- asreml(DistTW ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrDistTWb <- asreml(DistTW ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrDistTW, ~ sqrt(V1)), vpredict(asrDistTW, ~ sqrt(V2)), vpredict(asrDistTW, ~ sqrt(V3) / sqrt(V2))) summary(modDistTW) # Profile likelihood confidence intervals DistTW.v1 <- proLik4(asrDistTW, component = ~ V1, parallel = TRUE, ncores = 8) DistTW.v2 <- proLik4(asrDistTW, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) DistTW.v3 <- proLik4(asrDistTW, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) rbind(vpredict(asrDistTWb, ~ sqrt(V1)), vpredict(asrDistTWb, ~ sqrt(V2))) summary(modDistTWc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrDistTW$loglik, asrDistTWb$loglik, df = 1) anova(modDistTW, modDistTWc) #XXX asreml agrees with variance components of lme } ############################## ################################################## # Slope ################################################## modSlope <- lme(Slope ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) ## see whether slopes differ or should use model with single slope anova(modSlope) #<-- no significant interaction modSlopeb <- lme(Slope ~ urbPC1 + NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # Test for different residual variances modSlopec <- lme(Slope ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) anova(modSlope,modSlopec) #<-- no significant diff: use 1 residual variance summary(modSlope) summary(modSlopec) #<-- XXX Use this model ############################## # asreml if(have_asreml){ asrSlope <- asreml(Slope ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrSlopeb <- asreml(Slope ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrSlope, ~ sqrt(V1)), vpredict(asrSlope, ~ sqrt(V2)), vpredict(asrSlope, ~ sqrt(V3) / sqrt(V2))) summary(modSlope) # Profile likelihood confidence intervals Slope.v1 <- proLik4(asrSlope, component = ~ V1, parallel = TRUE, ncores = 8) Slope.v2 <- proLik4(asrSlope, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) Slope.v3 <- proLik4(asrSlope, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) rbind(vpredict(asrSlopeb, ~ sqrt(V1)), vpredict(asrSlopeb, ~ sqrt(V2))) summary(modSlopec) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrSlope$loglik, asrSlopeb$loglik, df = 1) anova(modSlope, modSlopec) #XXX asreml agrees with variance components of lme } ############################## ################################################## # Canopy Openness ################################################## modCanopy <- lme(Canopy ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~1 | NestRandFac), data = microhab, na.action = na.omit) summary(modCanopy) ## but now see whether slopes differ or should we use a model with a single slope anova(modCanopy) #<-- NO significant interaction modCanopyb <- lme(Canopy ~ urbPC1 + NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(modCanopyb) #<-- XXX Use this model # Test for different residual variances modCanopyc <- lme(Canopy ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(modCanopyc) anova(modCanopy,modCanopyc) #<-- XXX significant diff: use 2 residual variance ############################## # asreml if(have_asreml){ asrCanopy <- asreml(Canopy ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrCanopyb <- asreml(Canopy ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrCanopy, ~ sqrt(V1)), vpredict(asrCanopy, ~ sqrt(V2)), vpredict(asrCanopy, ~ sqrt(V3) / sqrt(V2))) summary(modCanopy) # Profile likelihood confidence intervals Canopy.v1 <- proLik4(asrCanopy, component = ~ V1, parallel = TRUE, ncores = 8) Canopy.v2 <- proLik4(asrCanopy, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) Canopy.v3 <- proLik4(asrCanopy, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) rbind(vpredict(asrCanopyb, ~ sqrt(V1)), vpredict(asrCanopyb, ~ sqrt(V2))) summary(modCanopyc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrCanopy$loglik, asrCanopyb$loglik, df = 1) anova(modCanopy, modCanopyc) #XXX asreml agrees with variance components of lme } ############################## # use `modCanopyb` (separate residual variances) ################################################## # Daily Mean Temperature ################################################## moddailyMean_C <- lme(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMean_C) ## but now see whether slopes differ or should we use a model with a single slope anova(moddailyMean_C) #<-- NO significant interaction moddailyMean_Cb <- lme(dailyMean_C ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMean_Cb) # See if separate residual variances needed moddailyMean_Cc <- lme(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(moddailyMean_Cc) #<-- XXX Use this model anova(moddailyMean_C,moddailyMean_Cc) #<--XXX No significant diff. ############################## # asreml if(have_asreml){ asrdailyMean_C <- asreml(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrdailyMean_C <- update(asrdailyMean_C, maxit = 10) asrdailyMean_C <- update(asrdailyMean_C, maxit = 10) asrdailyMean_Cb <- asreml(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrdailyMean_C, ~ sqrt(V1)), vpredict(asrdailyMean_C, ~ sqrt(V3)), vpredict(asrdailyMean_C, ~ sqrt(V2) / sqrt(V3))) summary(moddailyMean_C) # Profile likelihood confidence intervals dailyMean_C.v1 <- proLik4(asrdailyMean_C, component = ~ V1, parallel = TRUE, ncores = 8) dailyMean_C.v2 <- proLik4(asrdailyMean_C, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) dailyMean_C.v3 <- proLik4(asrdailyMean_C, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) par(mfrow = c(3, 1)) plot(dailyMean_C.v1) plot(dailyMean_C.v2) plot(dailyMean_C.v3) rbind(vpredict(asrdailyMean_Cb, ~ sqrt(V1)), vpredict(asrdailyMean_Cb, ~ sqrt(V2))) summary(moddailyMean_Cc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrdailyMean_C$loglik, asrdailyMean_Cb$loglik, df = 1) anova(moddailyMean_C, moddailyMean_Cc) ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## # use `moddailyMean_C` (2 residual variances) based on profile likelihood CIs ################################################## # Daily Max Temperature ################################################## moddailyMax_C <- lme(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMax_C) ## see whether slopes differ or should use model with single slope anova(moddailyMax_C) #<-- NO significant interaction moddailyMax_Cb <- lme(dailyMax_C ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMax_Cb) # Test for separate residual variances moddailyMax_Cc <- lme(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(moddailyMax_Cc) anova(moddailyMax_C, moddailyMax_Cc) # use `moddailyMax_Cc` (No difference between residual variances) summary(moddailyMax_Cc) #<-- XXX Use this model ############################## # asreml if(have_asreml){ asrdailyMax_C <- asreml(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrdailyMax_C <- update(asrdailyMax_C, maxit = 10) asrdailyMax_Cb <- asreml(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrdailyMax_C, ~ sqrt(V1)), vpredict(asrdailyMax_C, ~ sqrt(V3)), vpredict(asrdailyMax_C, ~ sqrt(V2) / sqrt(V3))) summary(moddailyMax_C) # Profile likelihood confidence intervals dailyMax_C.v1 <- proLik4(asrdailyMax_C, component = ~ V1, parallel = TRUE, ncores = 8, nsample.units = 1, nse = 4) dailyMax_C.v2 <- proLik4(asrdailyMax_C, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) dailyMax_C.v3 <- proLik4(asrdailyMax_C, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8, nsample.units = 1, nse = 1.5) # Have to find upper CI limit manually #XXX MUST do var.estimate replacement FIRST XXX dailyMax_C.v3$var.estimates <- dailyMax_C.v3$var.estimates[!is.na(dailyMax_C.v3$lambdas)] #XXX dailyMax_C.v3$lambdas <- dailyMax_C.v3$lambdas[!is.na(dailyMax_C.v3$lambdas)] chi.val <- 0.5 * qchisq(1 - 0.05, df = 1) fllmd <- asreml::update.asreml(object = asrdailyMax_C, start.values = TRUE)$vparameters.table fllmd2 <- fllmd fllmd2[3, 3] <- "F" for(v in seq(17.05, 17.15, by = 0.005)){ fllmd2[3, 2] <- v conMod <- update.asreml(asrdailyMax_C, random = ~., R.param = fllmd2) conMod <- update(conMod, maxiter = 10) lout <- asreml::lrt(conMod, asrdailyMax_C)$"LR-statistic" dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- c(lambdas, lout) var.estimates <- c(var.estimates, v)}) } dailyMax_C.v3 chi.val for(v in seq(17.14, 17.145, by = 0.0001)){ fllmd2[3, 2] <- v conMod <- update.asreml(asrdailyMax_C, random = ~., R.param = fllmd2) conMod <- update(conMod, maxiter = 10) lout <- asreml::lrt(conMod, asrdailyMax_C)$"LR-statistic" dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- c(lambdas, lout) var.estimates <- c(var.estimates, v)}) } dailyMax_C.v3 chi.val v <- 17.14016 fllmd2[3, 2] <- v conMod <- update.asreml(asrdailyMax_C, random = ~., R.param = fllmd2) conMod <- update(conMod, maxiter = 10) lout <- asreml::lrt(conMod, asrdailyMax_C)$"LR-statistic" dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- c(lambdas, lout) var.estimates <- c(var.estimates, v)}) dailyMax_C.v3 chi.val dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- lambdas[order(var.estimates)] var.estimates <- var.estimates[order(var.estimates)]}) dailyMax_C.v3$UCL <- with(dailyMax_C.v3, var.estimates[var.estimates > 10][which.min(abs(lambdas[var.estimates > 10] - chi.val))]) par(mfrow = c(3, 1)) plot(dailyMax_C.v1) plot(dailyMax_C.v2) plot(dailyMax_C.v3) rbind(vpredict(asrdailyMax_Cb, ~ sqrt(V1)), vpredict(asrdailyMax_Cb, ~ sqrt(V2))) summary(moddailyMax_Cc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrdailyMax_C$loglik, asrdailyMax_Cb$loglik, df = 1) anova(moddailyMax_C, moddailyMax_Cc) #XXX asreml MOSTLY agrees with variance components of lme ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## ################################################## # Daily Min Temperature ################################################## moddailyMin_C <- lme(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMin_C) anova(moddailyMin_C) #<-- NO significant interaction moddailyMin_Cb <- lme(dailyMin_C ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # Test for separate residual variances ## drop separate residual variances moddailyMin_Cc <- lme(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(moddailyMin_Cc) anova(moddailyMin_C,moddailyMin_Cc) #<-- no significant diff. XXX p=0.08 XXX # use `moddailyMin_C` (2 different residual variances since profile 95%CI do NOT overalp) summary(moddailyMin_C) #<-- XXX Use this model ############################## # asreml if(have_asreml){ asrdailyMin_C <- asreml(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrdailyMin_C <- update(asrdailyMin_C, maxit = 10) asrdailyMin_Cb <- asreml(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrdailyMin_C, ~ sqrt(V1)), vpredict(asrdailyMin_C, ~ sqrt(V3)), vpredict(asrdailyMin_C, ~ sqrt(V2) / sqrt(V3))) summary(moddailyMin_C) # Profile likelihood confidence intervals dailyMin_C.v1 <- proLik4(asrdailyMin_C, component = ~ V1, parallel = TRUE, ncores = 8) dailyMin_C.v2 <- proLik4(asrdailyMin_C, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) dailyMin_C.v3 <- proLik4(asrdailyMin_C, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) par(mfrow = c(3, 1)) plot(dailyMin_C.v1) plot(dailyMin_C.v2) plot(dailyMin_C.v3) rbind(vpredict(asrdailyMin_Cb, ~ sqrt(V1)), vpredict(asrdailyMin_Cb, ~ sqrt(V2))) summary(moddailyMin_Cc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrdailyMin_C$loglik, asrdailyMin_Cb$loglik, df = 1) anova(moddailyMin_C, moddailyMin_Cc) #XXX asreml MOSTLY agrees with variance components of lme ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## ################################################## # Daily Temperature Range ################################################## modrange <- lme(range ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(modrange) anova(modrange) #<-- NO significant interaction modrangeb <- lme(range ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(modrangeb) # Now test for separate residual variances modrangec <- lme(range ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(modrangec) #<-- XXX Use this model anova(modrange,modrangec) #<-- XXX Non-significance difference between residuals # use `modrangec` (No difference between residual variances) ############################## # asreml if(have_asreml){ asrrange <- asreml(range ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrrangeb <- asreml(range ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrrange, ~ sqrt(V1)), vpredict(asrrange, ~ sqrt(V3)), vpredict(asrrange, ~ sqrt(V2) / sqrt(V3))) summary(modrange) # Profile likelihood confidence intervals range.v1 <- proLik4(asrrange, component = ~ V1, parallel = TRUE, ncores = 8) range.v2 <- proLik4(asrrange, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) range.v3 <- proLik4(asrrange, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) par(mfrow = c(3, 1)) plot(range.v1) plot(range.v2) plot(range.v3) rbind(vpredict(asrrangeb, ~ sqrt(V1)), vpredict(asrrangeb, ~ sqrt(V2))) summary(modrangec) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrrange$loglik, asrrangeb$loglik, df = 1) anova(modrange, modrangec) #XXX asreml MOSTLY agrees with variance components of lme ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## ############################################################################### ############################################################################### #### Save asreml results if necessary if(have_asreml){ ciBaseNms <- c("Canopy", "dailyMax_C", "dailyMean_C", "dailyMin_C", "DistTW", "range", "Slope") # Save raw profile likelihoods save(list = c(paste0(ciBaseNms, ".v1"), paste0(ciBaseNms, ".v2"), paste0(ciBaseNms, ".v3")), file = "./microhabProfileCIs.rda") } ############################################################################### ############################################################################### # Figure with urbanization scores by location/location type ############################################################################### # First, make simple dataframe with sites locations <- microhab[which(!duplicated(microhab$siteAbbr)), match(c("SiteName", "siteAbbr", "SiteType", "HumDist", "urbPC1", "urbPC2"), names(microhab))] # Give each location a number corresponding to table in main text of Manuscript ## Missing the Agricultural Heritage park (4) and Notasulga pond (15) locations$ind <- c(3, 2, 11, 10, 5, 7, 6, 14, 1, 13, 12, 9, 8) # now make small dataframe for plotting the indices at the urbPC1 scores ## and condense to a single unique urbPC1 ### sort first, so indices will increase when paste duplicated ones back in locTcks <- locations[order(locations$ind), c("urbPC1", "ind")] locTcks <- locTcks[which(!duplicated(locTcks$urbPC1)), ] locTcks$ind <- as.character(locTcks$ind) # Now find matches and paste those indices in with existing ones for(i in 1:nrow(locTcks)){ locTcks$ind[i] <- paste(locations$ind[which(locTcks$urbPC1[i] == locations$urbPC1)], collapse = ",") } ## order by urbPC1 locTcks <- locTcks[order(locTcks$urbPC1), ] # Add a vertical adjustment to space out text on figure locTcks$vadj <- rep(c(0, 0.2), nrow(locTcks))[1:nrow(locTcks)] # Snag some colors from base R's `Okabe-Ito` palette ## suitable palette for color-related vision accessibility ### first have a look palOI <- palette.colors(NULL, "Okabe-Ito") pie(rep(1, length(palOI)), col = palOI, labels = names(palOI)) # grab subset of 3 colors for figure palOI3 <- palOI[c("skyblue", "bluishgreen", "vermillion")] dev.off() #<-- turn off pie chart # Need multiple "rug" plots to do different colors ## set rug plot arguments here tcksz <- 0.05 #<-- "ticksize" positive then ticks plot in towards center of fig. tcklwd <- 4 #<-- "lwd" rpos <- 0.26 #<-- "pos" to make sure end of line doesn't extend below axis xpos <- 0.25 #<-- "pos" of axis(1) to bring down to make room for indnices #XXX for saving figures - use: pdf(file = "./Fig1_HumDisturbCat_vs_urb.pdf", width = 9, height = 5) # use `par()` to set up some features of the entire figure ## `mfrow` designates the number of rows x columns to create in the figure par(mar = c(4.5, 5.5, 2.2, 0.1), #<-- space around panel (bottom, left, top, right) mgp = c(3, 1, 0), #<-- adjustment of axis title, labels, and line cex.axis = 1.25, cex.lab = 1.6) #<-- scaling for axis labels and axis title boxplot(urbPC1 ~ SiteType, data = locations, horizontal = TRUE, axes = FALSE, col = palOI3, xlab = "Urbanization level (PC1)", ylab = "Study area disturbance level", xlim = c(0.5, 3.5), #<-- switched: for final y-axis when `horizontal = TRUE` ylim = c(-1.6, 5.3)) #<-- switched so eventual x-axis rug(locations$urbPC1[locations$HumDist == "High"], side = 1, #<-- side = 1 is bottom ticksize = tcksz, lwd = tcklwd, pos = rpos, col = palOI3[3]) #<-- color for ticks rug(locations$urbPC1[locations$HumDist == "Intermediate"], side = 1, #<-- side = 1 is bottom ticksize = tcksz, lwd = tcklwd, pos = rpos, col = palOI3[2]) #<-- color for ticks rug(locations$urbPC1[locations$HumDist == "Low"], side = 1, #<-- side = 1 is bottom ticksize = tcksz, lwd = tcklwd, pos = rpos, col = palOI3[1]) #<-- color for ticks # indices to associate rug plot ticks with table in main manuscript #text(x = locTcks$urbPC1, y = 0.5 + locTcks$vadj, labels = locTcks$ind, cex = 0.8) axis(1, at = seq(-1.6, 5.2, 0.4), labels = FALSE, lwd = 1.6, pos = xpos) #<-- just the axis axis(1, at = seq(-1.6, 5.2, 0.8), lwd = 0, pos = xpos) #<-- just the labels axis(2, at = seq.int(3), labels = c("Low", "Intermediate", "High"), lwd = 1.6) dev.off() #<-- XXX MUST do this to close pdf file connection ############################################################################### # 7-panel Figure of all above variables ############################################################################### ## First, to create prediction lines need a new dataset that gives values over which we want predictions ### choose even spacing between minimum and maximum urbanization scores ### assign this to both random and nest sites ndata <- with(microhab, #<--avoid `microhab$` each time data.frame(urbPC1 = rep(seq(from = min(urbPC1, na.rm = TRUE), to = max(urbPC1, na.rm = TRUE), length.out = 100), 2), NestRandFac = as.factor(rep(c(0, 1), each = 100)), scibdepth = rep(seq(from = min(scibdepth, na.rm = TRUE), to = max(scibdepth, na.rm = TRUE), length.out = 100), 2))) # Set up a few things that will be used over and over again among all 8 plots xlab_in <- "Urbanization level (PC1)" xaxis <- seq(from = -2, to = 5, by = 1) # what should we set x-axis limits at range(microhab$urbPC1, na.rm = TRUE) xlim_in <- c(-2.05, 5.2) degCexpr <- "(\u00B0C)" #<-- degrees Celsius expression to paste in nestPtSymb <- 21 nestPtCols <- c(bg = "#03244d", brd = "grey40") #<-- "BLUE" nestPtCx <- 1.7 randPtSymb <- 22 randPtCols <- c(bg = "#e86823", brd = "grey20") #<-- "ORANGE" randPtCx <- 1.1 jitfac <- 3.6 #<-- jitter factor reglinewd <- 3.2 #<-- line width of all regression lines ptLwd <- 1.0 #<-- line width of point border #XXX for saving figures - use: pdf(file = "./Fig3_microhab_vs_urb.pdf", width = 9, height = 12) # use `par()` to set up some features of the entire figure ## `mfrow` designates the number of rows x columns to create in the figure par(mfrow = c(4, 2), #<-- (rows, columns) mar = c(5, 6.2, 1.9, 0.5), #<-- space around each panel (bottom, left, top, right) mgp = c(3, 1, 0), #<-- adjustment of axis title, labels, and line cex.axis = 1.5, cex.lab = 1.6) #<-- scaling for axis labels and axis title ################################ # Distance to Water (DistTW) ################################ plot(DistTW ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 265), xlab = xlab_in, ylab = "Distance to water (m)") # points first (to put in background) ## Nest first points(DistTW ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(DistTW ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modDistTW, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) #<-- make thicker so not covered ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 250, 50)) mtext(text = expression((bold(A))), side = 3, line = -0.4, adj = -0.25, cex = 1.3) ################################ # Slope ################################ plot(Slope ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 65), xlab = xlab_in, ylab = "Slope (degrees)") # points first (to put in background) ## Nest first points(Slope ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(Slope ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modSlope, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 60, 20)) mtext(text = expression((bold(B))), side = 3, line = -0.4, adj = -0.25, cex = 1.3) ################################ # Canopy ################################ plot(Canopy ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 100), xlab = xlab_in, ylab = "Canopy openness (%)") # points first (to put in background) ## Nest first points(Canopy ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(Canopy ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modCanopy, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 100, 20)) mtext(text = expression((bold(C))), side = 3, line = -0.4, adj = -0.25, cex = 1.3) ################################ # dailyMean_C ################################ plot(dailyMean_C ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(22.5, 34), xlab = xlab_in, ylab = paste("Daily mean temp. ", degCexpr)) # points first (to put in background) ## Nest first points(dailyMean_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(dailyMean_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(moddailyMean_C, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(24, 34, 2)) mtext(text = expression((bold(D))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) ################################ # dailyMax_C ################################ plot(dailyMax_C ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(23, 45), xlab = xlab_in, ylab = paste("Daily max. temp. ", degCexpr)) # points first (to put in background) ## Nest first points(dailyMax_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(dailyMax_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(moddailyMax_C, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(25, 45, 5)) mtext(text = expression((bold(E))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) ################################ # dailyMin_C ################################ plot(dailyMin_C ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, xlab = xlab_in, ylab = paste("Daily min. temp. ", degCexpr)) # points first (to put in background) ## Nest first points(dailyMin_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(dailyMin_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(moddailyMin_C, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2) mtext(text = expression((bold(F))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) ################################ # range ################################ plot(range ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 16), xlab = xlab_in, ylab = paste("Daily temp. range ", degCexpr)) # points first (to put in background) ## Nest first points(range ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(range ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modrange, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 16, 4)) mtext(text = expression((bold(G))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) # END OF PLOTTING ############################# dev.off() #<-- XXX MUST do this to close pdf file connection ################################################################################ ################################################################################ # For results tables Using `asreml` ## "NestRandFac0" == natural and "NestRandFac1" == artifical # Create quick function to extract fixed effect estimates and standard errors from asreml fxdFun <- function(mod){ fxdtmp <- cbind(rev(mod$coefficients$fixed), rev(sqrt(mod$vcoeff$fixed))) dimnames(fxdtmp) <- list(rev(rownames(mod$coefficients$fixed)), c("Est", "Std.Err")) fxdtmp[which(fxdtmp[, 1] != 0.00 & fxdtmp[, 2] != 0.00), ] } ####################### fxdFun(asrDistTW) wald(asrDistTW) summary(asrDistTW)$varcomp rbind(proCI(DistTW.v2), proCI(DistTW.v3)) lrt(asrDistTWb, asrDistTW) fxdFun(asrSlope) wald(asrSlope) summary(asrSlope)$varcomp rbind(proCI(Slope.v2), proCI(Slope.v3)) lrt(asrSlopeb, asrSlope) fxdFun(asrCanopy) wald(asrCanopy) summary(asrCanopy)$varcomp rbind(proCI(Canopy.v2), proCI(Canopy.v3)) lrt(asrCanopyb, asrCanopy) fxdFun(asrdailyMean_C) wald(asrdailyMean_C) summary(asrdailyMean_C)$varcomp rbind(proCI(dailyMean_C.v2), proCI(dailyMean_C.v3)) lrt(asrdailyMean_Cb, asrdailyMean_C) fxdFun(asrdailyMax_C) wald(asrdailyMax_C) summary(asrdailyMax_C)$varcomp rbind(proCI(dailyMax_C.v2), proCI(dailyMax_C.v3)) lrt(asrdailyMax_Cb, asrdailyMax_C) fxdFun(asrdailyMin_C) wald(asrdailyMin_C) summary(asrdailyMin_C)$varcomp rbind(proCI(dailyMin_C.v2), proCI(dailyMin_C.v3)) lrt(asrdailyMin_Cb, asrdailyMin_C) fxdFun(asrrange) wald(asrrange) summary(asrrange)$varcomp rbind(proCI(range.v2), proCI(range.v3)) lrt(asrrangeb, asrrange)
/microhabitat_ANALYSES.R
permissive
qgevoeco/Caldwell_turtle_nest-choice-predation
R
false
false
41,700
r
rm(list = ls()) # XXX Manuscript uses asreml to do models (must pay for license) ## used asreml because easiest way to get 95% CIs on variance estimates have_asreml <- FALSE #<-- change to TRUE if computer has the asreml software # If do not have asreml, can still get same models with nlme, just can't do ## profile likelihood method to get 95% CIs on variance estimates. library(nlme) if(have_asreml){ library(asreml) # create function to extract 95% CIs from profile likelihood ## uses nadiv:::proLik4 to profile the likelihood for a single variance proCI <- function(x){ if(is(x) != "proLik") error("x is not a profile likelihood/nadiv::proLik") unlist(x[c("LCL", "UCL")]) } #<-- end function } # need nadiv for use with asreml (need LRT function) library(nadiv) #FIXME set to your own path here #setwd("<< Insert path on local computer >>") # load data microhab <- read.table(file = "microhabitat.txt", header = TRUE, sep = "\t") #<-- XXX important to include tab-separated ## Create subset of just artificial nests art <- microhab[which(microhab$NestRand == 1), ] ################################################################################ # BEFORE ANALYSES, need to make categorical variables into factors ## Do this so won't make mistake using an integer and R interprets this as a covariate when you want it to be a categorical factor str(microhab) # Just doing the main ones, others *could* be added if used later # Also, Mean center and standardize iButton depth and canopy openness: use as covariates microhab <- within(microhab, { NestRandFac <- as.factor(NestRand) #<-- create new column/don't write over SiteTypeFac <- as.factor(SiteType) NestClusterFac <- as.factor(NestCluster) scibdepth <- scale(ibdepth) scCanopy <- scale(Canopy) }) # order based on nest type for consistency (and for asreml) microhab <- microhab[order(microhab$NestRandFac), ] # now for artificial nest subset art <- within(art, { NestRandFac <- as.factor(NestRand) #<-- create new column/don't write over SiteTypeFac <- as.factor(SiteType) NestClusterFac <- as.factor(NestCluster) scibdepth <- scale(ibdepth) scCanopy <- scale(Canopy) }) ############################################################################ ################################## # ARTIFICIAL NEST SUBSET ANALYSES ################################## artmod.Slope <- lm(Slope ~ SiteTypeFac, data = art, na.action = na.omit) summary(artmod.Slope) anova(artmod.Slope) artmod.Canopy <- lm(Canopy ~ SiteTypeFac, data = art, na.action = na.omit) summary(artmod.Canopy) anova(artmod.Canopy) # Temperature variables also include continuous covariate of iButton depth artmod.dailyMean_C <- lm(dailyMean_C ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.dailyMean_C) anova(artmod.dailyMean_C) artmod.dailyMax_C <- lm(dailyMax_C ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.dailyMax_C) anova(artmod.dailyMax_C) artmod.dailyMin_C <- lm(dailyMin_C ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.dailyMin_C) anova(artmod.dailyMin_C) artmod.range <- lm(range ~ SiteTypeFac + ibdepth, data = art, na.action = na.omit) summary(artmod.range) anova(artmod.range) ############################################################################ ############################################################################ ############################################################################ ############################################################################ ################################ # MICROHABITAT ANALYSES ################################ #### Real vs. random locations for each microhabitat variable, plus urbanization covar. and urb*nestrand interaction ###random effect of nest cluster ################################################## # Distance to water ################################################## modDistTW <- lme(DistTW ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # use `modDistTW` (separate residual variances) ## but now see whether slopes differ or should we use a model with a single slope anova(modDistTW) #<-- no significant interaction modDistTWb <- lme(DistTW ~ urbPC1 + NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # Test for different residual variances modDistTWc <- lme(DistTW ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) anova(modDistTW, modDistTWc) summary(modDistTWb) #<-- XXX use no interaction but separate residual variances ############################## # asreml if(have_asreml){ asrDistTW <- asreml(DistTW ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrDistTWb <- asreml(DistTW ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrDistTW, ~ sqrt(V1)), vpredict(asrDistTW, ~ sqrt(V2)), vpredict(asrDistTW, ~ sqrt(V3) / sqrt(V2))) summary(modDistTW) # Profile likelihood confidence intervals DistTW.v1 <- proLik4(asrDistTW, component = ~ V1, parallel = TRUE, ncores = 8) DistTW.v2 <- proLik4(asrDistTW, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) DistTW.v3 <- proLik4(asrDistTW, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) rbind(vpredict(asrDistTWb, ~ sqrt(V1)), vpredict(asrDistTWb, ~ sqrt(V2))) summary(modDistTWc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrDistTW$loglik, asrDistTWb$loglik, df = 1) anova(modDistTW, modDistTWc) #XXX asreml agrees with variance components of lme } ############################## ################################################## # Slope ################################################## modSlope <- lme(Slope ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) ## see whether slopes differ or should use model with single slope anova(modSlope) #<-- no significant interaction modSlopeb <- lme(Slope ~ urbPC1 + NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # Test for different residual variances modSlopec <- lme(Slope ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) anova(modSlope,modSlopec) #<-- no significant diff: use 1 residual variance summary(modSlope) summary(modSlopec) #<-- XXX Use this model ############################## # asreml if(have_asreml){ asrSlope <- asreml(Slope ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrSlopeb <- asreml(Slope ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrSlope, ~ sqrt(V1)), vpredict(asrSlope, ~ sqrt(V2)), vpredict(asrSlope, ~ sqrt(V3) / sqrt(V2))) summary(modSlope) # Profile likelihood confidence intervals Slope.v1 <- proLik4(asrSlope, component = ~ V1, parallel = TRUE, ncores = 8) Slope.v2 <- proLik4(asrSlope, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) Slope.v3 <- proLik4(asrSlope, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) rbind(vpredict(asrSlopeb, ~ sqrt(V1)), vpredict(asrSlopeb, ~ sqrt(V2))) summary(modSlopec) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrSlope$loglik, asrSlopeb$loglik, df = 1) anova(modSlope, modSlopec) #XXX asreml agrees with variance components of lme } ############################## ################################################## # Canopy Openness ################################################## modCanopy <- lme(Canopy ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~1 | NestRandFac), data = microhab, na.action = na.omit) summary(modCanopy) ## but now see whether slopes differ or should we use a model with a single slope anova(modCanopy) #<-- NO significant interaction modCanopyb <- lme(Canopy ~ urbPC1 + NestRandFac, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(modCanopyb) #<-- XXX Use this model # Test for different residual variances modCanopyc <- lme(Canopy ~ urbPC1*NestRandFac, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(modCanopyc) anova(modCanopy,modCanopyc) #<-- XXX significant diff: use 2 residual variance ############################## # asreml if(have_asreml){ asrCanopy <- asreml(Canopy ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrCanopyb <- asreml(Canopy ~ urbPC1*NestRandFac, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrCanopy, ~ sqrt(V1)), vpredict(asrCanopy, ~ sqrt(V2)), vpredict(asrCanopy, ~ sqrt(V3) / sqrt(V2))) summary(modCanopy) # Profile likelihood confidence intervals Canopy.v1 <- proLik4(asrCanopy, component = ~ V1, parallel = TRUE, ncores = 8) Canopy.v2 <- proLik4(asrCanopy, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) Canopy.v3 <- proLik4(asrCanopy, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) rbind(vpredict(asrCanopyb, ~ sqrt(V1)), vpredict(asrCanopyb, ~ sqrt(V2))) summary(modCanopyc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrCanopy$loglik, asrCanopyb$loglik, df = 1) anova(modCanopy, modCanopyc) #XXX asreml agrees with variance components of lme } ############################## # use `modCanopyb` (separate residual variances) ################################################## # Daily Mean Temperature ################################################## moddailyMean_C <- lme(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMean_C) ## but now see whether slopes differ or should we use a model with a single slope anova(moddailyMean_C) #<-- NO significant interaction moddailyMean_Cb <- lme(dailyMean_C ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMean_Cb) # See if separate residual variances needed moddailyMean_Cc <- lme(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(moddailyMean_Cc) #<-- XXX Use this model anova(moddailyMean_C,moddailyMean_Cc) #<--XXX No significant diff. ############################## # asreml if(have_asreml){ asrdailyMean_C <- asreml(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrdailyMean_C <- update(asrdailyMean_C, maxit = 10) asrdailyMean_C <- update(asrdailyMean_C, maxit = 10) asrdailyMean_Cb <- asreml(dailyMean_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrdailyMean_C, ~ sqrt(V1)), vpredict(asrdailyMean_C, ~ sqrt(V3)), vpredict(asrdailyMean_C, ~ sqrt(V2) / sqrt(V3))) summary(moddailyMean_C) # Profile likelihood confidence intervals dailyMean_C.v1 <- proLik4(asrdailyMean_C, component = ~ V1, parallel = TRUE, ncores = 8) dailyMean_C.v2 <- proLik4(asrdailyMean_C, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) dailyMean_C.v3 <- proLik4(asrdailyMean_C, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) par(mfrow = c(3, 1)) plot(dailyMean_C.v1) plot(dailyMean_C.v2) plot(dailyMean_C.v3) rbind(vpredict(asrdailyMean_Cb, ~ sqrt(V1)), vpredict(asrdailyMean_Cb, ~ sqrt(V2))) summary(moddailyMean_Cc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrdailyMean_C$loglik, asrdailyMean_Cb$loglik, df = 1) anova(moddailyMean_C, moddailyMean_Cc) ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## # use `moddailyMean_C` (2 residual variances) based on profile likelihood CIs ################################################## # Daily Max Temperature ################################################## moddailyMax_C <- lme(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMax_C) ## see whether slopes differ or should use model with single slope anova(moddailyMax_C) #<-- NO significant interaction moddailyMax_Cb <- lme(dailyMax_C ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMax_Cb) # Test for separate residual variances moddailyMax_Cc <- lme(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(moddailyMax_Cc) anova(moddailyMax_C, moddailyMax_Cc) # use `moddailyMax_Cc` (No difference between residual variances) summary(moddailyMax_Cc) #<-- XXX Use this model ############################## # asreml if(have_asreml){ asrdailyMax_C <- asreml(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrdailyMax_C <- update(asrdailyMax_C, maxit = 10) asrdailyMax_Cb <- asreml(dailyMax_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrdailyMax_C, ~ sqrt(V1)), vpredict(asrdailyMax_C, ~ sqrt(V3)), vpredict(asrdailyMax_C, ~ sqrt(V2) / sqrt(V3))) summary(moddailyMax_C) # Profile likelihood confidence intervals dailyMax_C.v1 <- proLik4(asrdailyMax_C, component = ~ V1, parallel = TRUE, ncores = 8, nsample.units = 1, nse = 4) dailyMax_C.v2 <- proLik4(asrdailyMax_C, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) dailyMax_C.v3 <- proLik4(asrdailyMax_C, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8, nsample.units = 1, nse = 1.5) # Have to find upper CI limit manually #XXX MUST do var.estimate replacement FIRST XXX dailyMax_C.v3$var.estimates <- dailyMax_C.v3$var.estimates[!is.na(dailyMax_C.v3$lambdas)] #XXX dailyMax_C.v3$lambdas <- dailyMax_C.v3$lambdas[!is.na(dailyMax_C.v3$lambdas)] chi.val <- 0.5 * qchisq(1 - 0.05, df = 1) fllmd <- asreml::update.asreml(object = asrdailyMax_C, start.values = TRUE)$vparameters.table fllmd2 <- fllmd fllmd2[3, 3] <- "F" for(v in seq(17.05, 17.15, by = 0.005)){ fllmd2[3, 2] <- v conMod <- update.asreml(asrdailyMax_C, random = ~., R.param = fllmd2) conMod <- update(conMod, maxiter = 10) lout <- asreml::lrt(conMod, asrdailyMax_C)$"LR-statistic" dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- c(lambdas, lout) var.estimates <- c(var.estimates, v)}) } dailyMax_C.v3 chi.val for(v in seq(17.14, 17.145, by = 0.0001)){ fllmd2[3, 2] <- v conMod <- update.asreml(asrdailyMax_C, random = ~., R.param = fllmd2) conMod <- update(conMod, maxiter = 10) lout <- asreml::lrt(conMod, asrdailyMax_C)$"LR-statistic" dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- c(lambdas, lout) var.estimates <- c(var.estimates, v)}) } dailyMax_C.v3 chi.val v <- 17.14016 fllmd2[3, 2] <- v conMod <- update.asreml(asrdailyMax_C, random = ~., R.param = fllmd2) conMod <- update(conMod, maxiter = 10) lout <- asreml::lrt(conMod, asrdailyMax_C)$"LR-statistic" dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- c(lambdas, lout) var.estimates <- c(var.estimates, v)}) dailyMax_C.v3 chi.val dailyMax_C.v3 <- within(dailyMax_C.v3, { lambdas <- lambdas[order(var.estimates)] var.estimates <- var.estimates[order(var.estimates)]}) dailyMax_C.v3$UCL <- with(dailyMax_C.v3, var.estimates[var.estimates > 10][which.min(abs(lambdas[var.estimates > 10] - chi.val))]) par(mfrow = c(3, 1)) plot(dailyMax_C.v1) plot(dailyMax_C.v2) plot(dailyMax_C.v3) rbind(vpredict(asrdailyMax_Cb, ~ sqrt(V1)), vpredict(asrdailyMax_Cb, ~ sqrt(V2))) summary(moddailyMax_Cc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrdailyMax_C$loglik, asrdailyMax_Cb$loglik, df = 1) anova(moddailyMax_C, moddailyMax_Cc) #XXX asreml MOSTLY agrees with variance components of lme ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## ################################################## # Daily Min Temperature ################################################## moddailyMin_C <- lme(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(moddailyMin_C) anova(moddailyMin_C) #<-- NO significant interaction moddailyMin_Cb <- lme(dailyMin_C ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) # Test for separate residual variances ## drop separate residual variances moddailyMin_Cc <- lme(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(moddailyMin_Cc) anova(moddailyMin_C,moddailyMin_Cc) #<-- no significant diff. XXX p=0.08 XXX # use `moddailyMin_C` (2 different residual variances since profile 95%CI do NOT overalp) summary(moddailyMin_C) #<-- XXX Use this model ############################## # asreml if(have_asreml){ asrdailyMin_C <- asreml(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrdailyMin_C <- update(asrdailyMin_C, maxit = 10) asrdailyMin_Cb <- asreml(dailyMin_C ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrdailyMin_C, ~ sqrt(V1)), vpredict(asrdailyMin_C, ~ sqrt(V3)), vpredict(asrdailyMin_C, ~ sqrt(V2) / sqrt(V3))) summary(moddailyMin_C) # Profile likelihood confidence intervals dailyMin_C.v1 <- proLik4(asrdailyMin_C, component = ~ V1, parallel = TRUE, ncores = 8) dailyMin_C.v2 <- proLik4(asrdailyMin_C, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) dailyMin_C.v3 <- proLik4(asrdailyMin_C, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) par(mfrow = c(3, 1)) plot(dailyMin_C.v1) plot(dailyMin_C.v2) plot(dailyMin_C.v3) rbind(vpredict(asrdailyMin_Cb, ~ sqrt(V1)), vpredict(asrdailyMin_Cb, ~ sqrt(V2))) summary(moddailyMin_Cc) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrdailyMin_C$loglik, asrdailyMin_Cb$loglik, df = 1) anova(moddailyMin_C, moddailyMin_Cc) #XXX asreml MOSTLY agrees with variance components of lme ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## ################################################## # Daily Temperature Range ################################################## modrange <- lme(range ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(modrange) anova(modrange) #<-- NO significant interaction modrangeb <- lme(range ~ urbPC1 + NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, weights = varIdent(form = ~ 1 | NestRandFac), data = microhab, na.action = na.omit) summary(modrangeb) # Now test for separate residual variances modrangec <- lme(range ~ urbPC1*NestRandFac + scibdepth, random = ~ 1 | NestClusterFac, data = microhab, na.action = na.omit) summary(modrangec) #<-- XXX Use this model anova(modrange,modrangec) #<-- XXX Non-significance difference between residuals # use `modrangec` (No difference between residual variances) ############################## # asreml if(have_asreml){ asrrange <- asreml(range ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ dsum( ~ units | NestRandFac), data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) asrrangeb <- asreml(range ~ urbPC1*NestRandFac + scibdepth, random = ~ NestClusterFac, residual = ~ idv(units), #<--necessary so model NOT parameterized ratios: var/sigma data = microhab, na.action = list(y = "omit", x = "omit"), maxit = 20) # convert asreml variances into lme SD and SD ratio ## use delta method to approximate std. errors on calculated SD and SD ratio rbind(vpredict(asrrange, ~ sqrt(V1)), vpredict(asrrange, ~ sqrt(V3)), vpredict(asrrange, ~ sqrt(V2) / sqrt(V3))) summary(modrange) # Profile likelihood confidence intervals range.v1 <- proLik4(asrrange, component = ~ V1, parallel = TRUE, ncores = 8) range.v2 <- proLik4(asrrange, component = ~ V2, G = FALSE, parallel = TRUE, ncores = 8) range.v3 <- proLik4(asrrange, component = ~ V3, G = FALSE, parallel = TRUE, ncores = 8) par(mfrow = c(3, 1)) plot(range.v1) plot(range.v2) plot(range.v3) rbind(vpredict(asrrangeb, ~ sqrt(V1)), vpredict(asrrangeb, ~ sqrt(V2))) summary(modrangec) # do the likelihood ratio test of asreml models to compare to anova() results LRTest(asrrange$loglik, asrrangeb$loglik, df = 1) anova(modrange, modrangec) #XXX asreml MOSTLY agrees with variance components of lme ## the test of whether or not the variances differ yields the same interpretation ### across ALL software } ############################## ############################################################################### ############################################################################### #### Save asreml results if necessary if(have_asreml){ ciBaseNms <- c("Canopy", "dailyMax_C", "dailyMean_C", "dailyMin_C", "DistTW", "range", "Slope") # Save raw profile likelihoods save(list = c(paste0(ciBaseNms, ".v1"), paste0(ciBaseNms, ".v2"), paste0(ciBaseNms, ".v3")), file = "./microhabProfileCIs.rda") } ############################################################################### ############################################################################### # Figure with urbanization scores by location/location type ############################################################################### # First, make simple dataframe with sites locations <- microhab[which(!duplicated(microhab$siteAbbr)), match(c("SiteName", "siteAbbr", "SiteType", "HumDist", "urbPC1", "urbPC2"), names(microhab))] # Give each location a number corresponding to table in main text of Manuscript ## Missing the Agricultural Heritage park (4) and Notasulga pond (15) locations$ind <- c(3, 2, 11, 10, 5, 7, 6, 14, 1, 13, 12, 9, 8) # now make small dataframe for plotting the indices at the urbPC1 scores ## and condense to a single unique urbPC1 ### sort first, so indices will increase when paste duplicated ones back in locTcks <- locations[order(locations$ind), c("urbPC1", "ind")] locTcks <- locTcks[which(!duplicated(locTcks$urbPC1)), ] locTcks$ind <- as.character(locTcks$ind) # Now find matches and paste those indices in with existing ones for(i in 1:nrow(locTcks)){ locTcks$ind[i] <- paste(locations$ind[which(locTcks$urbPC1[i] == locations$urbPC1)], collapse = ",") } ## order by urbPC1 locTcks <- locTcks[order(locTcks$urbPC1), ] # Add a vertical adjustment to space out text on figure locTcks$vadj <- rep(c(0, 0.2), nrow(locTcks))[1:nrow(locTcks)] # Snag some colors from base R's `Okabe-Ito` palette ## suitable palette for color-related vision accessibility ### first have a look palOI <- palette.colors(NULL, "Okabe-Ito") pie(rep(1, length(palOI)), col = palOI, labels = names(palOI)) # grab subset of 3 colors for figure palOI3 <- palOI[c("skyblue", "bluishgreen", "vermillion")] dev.off() #<-- turn off pie chart # Need multiple "rug" plots to do different colors ## set rug plot arguments here tcksz <- 0.05 #<-- "ticksize" positive then ticks plot in towards center of fig. tcklwd <- 4 #<-- "lwd" rpos <- 0.26 #<-- "pos" to make sure end of line doesn't extend below axis xpos <- 0.25 #<-- "pos" of axis(1) to bring down to make room for indnices #XXX for saving figures - use: pdf(file = "./Fig1_HumDisturbCat_vs_urb.pdf", width = 9, height = 5) # use `par()` to set up some features of the entire figure ## `mfrow` designates the number of rows x columns to create in the figure par(mar = c(4.5, 5.5, 2.2, 0.1), #<-- space around panel (bottom, left, top, right) mgp = c(3, 1, 0), #<-- adjustment of axis title, labels, and line cex.axis = 1.25, cex.lab = 1.6) #<-- scaling for axis labels and axis title boxplot(urbPC1 ~ SiteType, data = locations, horizontal = TRUE, axes = FALSE, col = palOI3, xlab = "Urbanization level (PC1)", ylab = "Study area disturbance level", xlim = c(0.5, 3.5), #<-- switched: for final y-axis when `horizontal = TRUE` ylim = c(-1.6, 5.3)) #<-- switched so eventual x-axis rug(locations$urbPC1[locations$HumDist == "High"], side = 1, #<-- side = 1 is bottom ticksize = tcksz, lwd = tcklwd, pos = rpos, col = palOI3[3]) #<-- color for ticks rug(locations$urbPC1[locations$HumDist == "Intermediate"], side = 1, #<-- side = 1 is bottom ticksize = tcksz, lwd = tcklwd, pos = rpos, col = palOI3[2]) #<-- color for ticks rug(locations$urbPC1[locations$HumDist == "Low"], side = 1, #<-- side = 1 is bottom ticksize = tcksz, lwd = tcklwd, pos = rpos, col = palOI3[1]) #<-- color for ticks # indices to associate rug plot ticks with table in main manuscript #text(x = locTcks$urbPC1, y = 0.5 + locTcks$vadj, labels = locTcks$ind, cex = 0.8) axis(1, at = seq(-1.6, 5.2, 0.4), labels = FALSE, lwd = 1.6, pos = xpos) #<-- just the axis axis(1, at = seq(-1.6, 5.2, 0.8), lwd = 0, pos = xpos) #<-- just the labels axis(2, at = seq.int(3), labels = c("Low", "Intermediate", "High"), lwd = 1.6) dev.off() #<-- XXX MUST do this to close pdf file connection ############################################################################### # 7-panel Figure of all above variables ############################################################################### ## First, to create prediction lines need a new dataset that gives values over which we want predictions ### choose even spacing between minimum and maximum urbanization scores ### assign this to both random and nest sites ndata <- with(microhab, #<--avoid `microhab$` each time data.frame(urbPC1 = rep(seq(from = min(urbPC1, na.rm = TRUE), to = max(urbPC1, na.rm = TRUE), length.out = 100), 2), NestRandFac = as.factor(rep(c(0, 1), each = 100)), scibdepth = rep(seq(from = min(scibdepth, na.rm = TRUE), to = max(scibdepth, na.rm = TRUE), length.out = 100), 2))) # Set up a few things that will be used over and over again among all 8 plots xlab_in <- "Urbanization level (PC1)" xaxis <- seq(from = -2, to = 5, by = 1) # what should we set x-axis limits at range(microhab$urbPC1, na.rm = TRUE) xlim_in <- c(-2.05, 5.2) degCexpr <- "(\u00B0C)" #<-- degrees Celsius expression to paste in nestPtSymb <- 21 nestPtCols <- c(bg = "#03244d", brd = "grey40") #<-- "BLUE" nestPtCx <- 1.7 randPtSymb <- 22 randPtCols <- c(bg = "#e86823", brd = "grey20") #<-- "ORANGE" randPtCx <- 1.1 jitfac <- 3.6 #<-- jitter factor reglinewd <- 3.2 #<-- line width of all regression lines ptLwd <- 1.0 #<-- line width of point border #XXX for saving figures - use: pdf(file = "./Fig3_microhab_vs_urb.pdf", width = 9, height = 12) # use `par()` to set up some features of the entire figure ## `mfrow` designates the number of rows x columns to create in the figure par(mfrow = c(4, 2), #<-- (rows, columns) mar = c(5, 6.2, 1.9, 0.5), #<-- space around each panel (bottom, left, top, right) mgp = c(3, 1, 0), #<-- adjustment of axis title, labels, and line cex.axis = 1.5, cex.lab = 1.6) #<-- scaling for axis labels and axis title ################################ # Distance to Water (DistTW) ################################ plot(DistTW ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 265), xlab = xlab_in, ylab = "Distance to water (m)") # points first (to put in background) ## Nest first points(DistTW ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(DistTW ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modDistTW, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) #<-- make thicker so not covered ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 250, 50)) mtext(text = expression((bold(A))), side = 3, line = -0.4, adj = -0.25, cex = 1.3) ################################ # Slope ################################ plot(Slope ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 65), xlab = xlab_in, ylab = "Slope (degrees)") # points first (to put in background) ## Nest first points(Slope ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(Slope ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modSlope, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 60, 20)) mtext(text = expression((bold(B))), side = 3, line = -0.4, adj = -0.25, cex = 1.3) ################################ # Canopy ################################ plot(Canopy ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 100), xlab = xlab_in, ylab = "Canopy openness (%)") # points first (to put in background) ## Nest first points(Canopy ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(Canopy ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modCanopy, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 100, 20)) mtext(text = expression((bold(C))), side = 3, line = -0.4, adj = -0.25, cex = 1.3) ################################ # dailyMean_C ################################ plot(dailyMean_C ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(22.5, 34), xlab = xlab_in, ylab = paste("Daily mean temp. ", degCexpr)) # points first (to put in background) ## Nest first points(dailyMean_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(dailyMean_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(moddailyMean_C, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(24, 34, 2)) mtext(text = expression((bold(D))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) ################################ # dailyMax_C ################################ plot(dailyMax_C ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(23, 45), xlab = xlab_in, ylab = paste("Daily max. temp. ", degCexpr)) # points first (to put in background) ## Nest first points(dailyMax_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(dailyMax_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(moddailyMax_C, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(25, 45, 5)) mtext(text = expression((bold(E))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) ################################ # dailyMin_C ################################ plot(dailyMin_C ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, xlab = xlab_in, ylab = paste("Daily min. temp. ", degCexpr)) # points first (to put in background) ## Nest first points(dailyMin_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(dailyMin_C ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(moddailyMin_C, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2) mtext(text = expression((bold(F))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) ################################ # range ################################ plot(range ~ urbPC1, data = microhab, type = "n", #<-- just set up axes = FALSE, #<-- make our own fancy ones xlim = xlim_in, ylim = c(0, 16), xlab = xlab_in, ylab = paste("Daily temp. range ", degCexpr)) # points first (to put in background) ## Nest first points(range ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 0, pch = nestPtSymb, bg = nestPtCols["bg"], col = nestPtCols["brd"], cex = nestPtCx, lwd = ptLwd) ## Random second points(range ~ jitter(urbPC1, jitfac), data = microhab, subset = NestRand == 1, pch = randPtSymb, bg = randPtCols["bg"], col = randPtCols["brd"], cex = randPtCx, lwd = ptLwd) # Lines from model ## predict from the model ndata$pred <- predict(modrange, #<-- XXX change for each response variable newdata = ndata, level = 0) ## Nest first lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "0", lwd = reglinewd * 1.2, col = nestPtCols["bg"]) ## Random second lines(pred ~ urbPC1, data = ndata, subset = NestRandFac == "1", lwd = reglinewd, lty = "dashed", col = randPtCols["bg"]) # X-axis followed by Y-axis axis(1, xaxis) axis(2, seq(0, 16, 4)) mtext(text = expression((bold(G))), side = 3, line = -0.2, adj = -0.24, cex = 1.3) # END OF PLOTTING ############################# dev.off() #<-- XXX MUST do this to close pdf file connection ################################################################################ ################################################################################ # For results tables Using `asreml` ## "NestRandFac0" == natural and "NestRandFac1" == artifical # Create quick function to extract fixed effect estimates and standard errors from asreml fxdFun <- function(mod){ fxdtmp <- cbind(rev(mod$coefficients$fixed), rev(sqrt(mod$vcoeff$fixed))) dimnames(fxdtmp) <- list(rev(rownames(mod$coefficients$fixed)), c("Est", "Std.Err")) fxdtmp[which(fxdtmp[, 1] != 0.00 & fxdtmp[, 2] != 0.00), ] } ####################### fxdFun(asrDistTW) wald(asrDistTW) summary(asrDistTW)$varcomp rbind(proCI(DistTW.v2), proCI(DistTW.v3)) lrt(asrDistTWb, asrDistTW) fxdFun(asrSlope) wald(asrSlope) summary(asrSlope)$varcomp rbind(proCI(Slope.v2), proCI(Slope.v3)) lrt(asrSlopeb, asrSlope) fxdFun(asrCanopy) wald(asrCanopy) summary(asrCanopy)$varcomp rbind(proCI(Canopy.v2), proCI(Canopy.v3)) lrt(asrCanopyb, asrCanopy) fxdFun(asrdailyMean_C) wald(asrdailyMean_C) summary(asrdailyMean_C)$varcomp rbind(proCI(dailyMean_C.v2), proCI(dailyMean_C.v3)) lrt(asrdailyMean_Cb, asrdailyMean_C) fxdFun(asrdailyMax_C) wald(asrdailyMax_C) summary(asrdailyMax_C)$varcomp rbind(proCI(dailyMax_C.v2), proCI(dailyMax_C.v3)) lrt(asrdailyMax_Cb, asrdailyMax_C) fxdFun(asrdailyMin_C) wald(asrdailyMin_C) summary(asrdailyMin_C)$varcomp rbind(proCI(dailyMin_C.v2), proCI(dailyMin_C.v3)) lrt(asrdailyMin_Cb, asrdailyMin_C) fxdFun(asrrange) wald(asrrange) summary(asrrange)$varcomp rbind(proCI(range.v2), proCI(range.v3)) lrt(asrrangeb, asrrange)
library(dplyr) ## read and parse data (same for all plots) allData <- read.table("household_power_consumption.txt", na.strings="?", stringsAsFactors=FALSE, sep=";", header=TRUE) allData$Date<-as.Date(allData$Date,"%d/%m/%Y") interestingdata<-allData %>% subset(Date<=as.Date("2007-02-02","%Y-%m-%d")) %>% subset(Date>=as.Date("2007-02-01","%Y-%m-%d")) rm(allData) interestingdata<-mutate(interestingdata,datetime=paste(interestingdata$Date,interestingdata$Time)) interestingdata$datetime<-strptime(interestingdata$datetime,"%Y-%m-%d %H:%M:%S") ## draw Plot 4 - four subplots png(file="plot4.png",width = 480, height = 480, units = "px") par(mfcol = c(2, 2)) ##Subplot 1 = old plot2 (almost) with(interestingdata,plot(datetime,Global_active_power,type="l",xlab="",ylab="Global Active Power")) ##Subplot 2 = old plot3 (almost) with(interestingdata,plot(datetime,Sub_metering_1,type="l",xlab="",ylab="Energy sub metering",col="black")) lines(interestingdata$datetime,interestingdata$Sub_metering_2,type="l",col="red") lines(interestingdata$datetime,interestingdata$Sub_metering_3,type="l",col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=1,bty="n") ##Subplot 3 - Voltage with(interestingdata,plot(datetime,Voltage,type="l",xlab="datetime",ylab="Voltage")) ##Subplot 4 - Reactive power with(interestingdata,plot(datetime,Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")) dev.off() rm(interestingdata)
/plot4.R
no_license
MikeCrookenden/ExData_Plotting1
R
false
false
1,551
r
library(dplyr) ## read and parse data (same for all plots) allData <- read.table("household_power_consumption.txt", na.strings="?", stringsAsFactors=FALSE, sep=";", header=TRUE) allData$Date<-as.Date(allData$Date,"%d/%m/%Y") interestingdata<-allData %>% subset(Date<=as.Date("2007-02-02","%Y-%m-%d")) %>% subset(Date>=as.Date("2007-02-01","%Y-%m-%d")) rm(allData) interestingdata<-mutate(interestingdata,datetime=paste(interestingdata$Date,interestingdata$Time)) interestingdata$datetime<-strptime(interestingdata$datetime,"%Y-%m-%d %H:%M:%S") ## draw Plot 4 - four subplots png(file="plot4.png",width = 480, height = 480, units = "px") par(mfcol = c(2, 2)) ##Subplot 1 = old plot2 (almost) with(interestingdata,plot(datetime,Global_active_power,type="l",xlab="",ylab="Global Active Power")) ##Subplot 2 = old plot3 (almost) with(interestingdata,plot(datetime,Sub_metering_1,type="l",xlab="",ylab="Energy sub metering",col="black")) lines(interestingdata$datetime,interestingdata$Sub_metering_2,type="l",col="red") lines(interestingdata$datetime,interestingdata$Sub_metering_3,type="l",col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=1,bty="n") ##Subplot 3 - Voltage with(interestingdata,plot(datetime,Voltage,type="l",xlab="datetime",ylab="Voltage")) ##Subplot 4 - Reactive power with(interestingdata,plot(datetime,Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")) dev.off() rm(interestingdata)
wine<-read.csv("wine.csv") set.seed(1234) str(wine) library(class) ##descriptive summary(wine) hist(wine$fixedacidity) hist(wine$volatileacidity) hist(wine$citricacid) hist(wine$residualsugar) hist(wine$chlorides) hist(wine$freesulfurdioxide) hist(wine$totalsulfurdioxide) ##outliers boxplot(wine$fixedacidity) boxplot(wine$volatileacidity) boxplot(wine$citricacid) boxplot(wine$residualsugar) boxplot(wine$chlorides) boxplot(wine$freesulfurdioxide) boxplot(wine$totalsulfurdioxide) ##splitting t<-floor(0.7*nrow(wine)) t1<-sample(seq_len(nrow(wine)),size = t) train<-wine[t1,] test<-wine[-t1,] ##min max and knn minmax<-function(x){ xnew<-(x-min(x))/(max(x)-min(x)) } train[,1:11]<-apply(train[,-12],2,minmax) test[,1:11]<-apply(test[,-12],2,minmax) data<-knn1(train[,-12], test[,-12], as.factor(train[,12])) ac<-length(which(data==as.factor(test[,12]),T))/length((test[,12])) cv1<-c() for(i in 1 :35){ cvr<-knn.cv(train[,-12], as.factor(train[,12]),k=i,l=0,prob=F,use.all = T) cv1<-c(cv1,length(which(cvr==as.factor(train[,12]),T))/length(train[,12])) } data1<-knn(train[,-12], test[,-12], as.factor(train[,12]),k=1,l=0,prob=F,use.all = T) ac1<-length(which(data1==as.factor(test[,12]),T))/length((test[,12])) keep<-condense(train[,-12],as.factor(train[,12])) keep1<-reduce.nn(train[,-12], keep,as.factor(train[,12])) data2<-knn(train[keep1,-12], test[,-12], as.factor(train[keep1,12]),k=1,l=0,prob=F,use.all = T) ac2<-length(which(data2==as.factor(test[,12]),T))/length((test[,12])) data3<-knn(train[keep,-12], test[,-12], as.factor(train[keep,12]),k=1,l=0,prob=F,use.all = T) ac3<-length(which(data3==as.factor(test[,12]),T))/length((test[,12])) ##quality wise splitting q5<-subset(wine,wine$quality==5) q6<-subset(wine,wine$quality==6) q7<-subset(wine,wine$quality==7) q4<-subset(wine,wine$quality==4) q8<-subset(wine,wine$quality==8) q3<-subset(wine,wine$quality==3) ##splitting quality wise data for train and test train5<-floor(seq_len(.8*nrow(q5))) test5<-q5[-train5,] train6<-floor(seq_len(.8*nrow(q6))) test6<-q6[-train6,] train7<-floor(seq_len(.8*nrow(q7))) test7<-q7[-train7,] train8<-floor(seq_len(.8*nrow(q8))) test8<-q8[-train8,] train3<-floor(seq_len(.8*nrow(q3))) test3<-q3[-train3,] train4<-floor(seq_len(.8*nrow(q4))) test4<-q4[-train4,] traini5<-q5[train5,] traini6<-q6[train6,] traini7<-q7[train7,] traini8<-q8[train8,] traini3<-q3[train3,] traini4<-q4[train4,] ##combine quality wise data library(class) trainbig<-rbind(traini5,traini6,traini7,traini8,traini3,traini4) testbig<-rbind(test5,test6,test7,test8,test3,test4) knn4<-knn(train=trainbig[,-12],test=testbig[,-12],as.factor(trainbig[,12]),k=5,l=5) ac2<-length(which(knn4==as.factor(testbig[,12]),T))/length((testbig[,12])) qv<-matrix(0,10,10) for(i in 1:10){ for(j in 1:10){ knn9<-knn.cv(trainbig[,-12],as.factor(trainbig[,12]),k=i,l=j-1) qv[i,j]<-length(which(knn9==as.factor(trainbig[,12]),T))/length(trainbig[,12]) } } qv
/wine data k neaest.R
no_license
drblur/Academic-Projects
R
false
false
3,038
r
wine<-read.csv("wine.csv") set.seed(1234) str(wine) library(class) ##descriptive summary(wine) hist(wine$fixedacidity) hist(wine$volatileacidity) hist(wine$citricacid) hist(wine$residualsugar) hist(wine$chlorides) hist(wine$freesulfurdioxide) hist(wine$totalsulfurdioxide) ##outliers boxplot(wine$fixedacidity) boxplot(wine$volatileacidity) boxplot(wine$citricacid) boxplot(wine$residualsugar) boxplot(wine$chlorides) boxplot(wine$freesulfurdioxide) boxplot(wine$totalsulfurdioxide) ##splitting t<-floor(0.7*nrow(wine)) t1<-sample(seq_len(nrow(wine)),size = t) train<-wine[t1,] test<-wine[-t1,] ##min max and knn minmax<-function(x){ xnew<-(x-min(x))/(max(x)-min(x)) } train[,1:11]<-apply(train[,-12],2,minmax) test[,1:11]<-apply(test[,-12],2,minmax) data<-knn1(train[,-12], test[,-12], as.factor(train[,12])) ac<-length(which(data==as.factor(test[,12]),T))/length((test[,12])) cv1<-c() for(i in 1 :35){ cvr<-knn.cv(train[,-12], as.factor(train[,12]),k=i,l=0,prob=F,use.all = T) cv1<-c(cv1,length(which(cvr==as.factor(train[,12]),T))/length(train[,12])) } data1<-knn(train[,-12], test[,-12], as.factor(train[,12]),k=1,l=0,prob=F,use.all = T) ac1<-length(which(data1==as.factor(test[,12]),T))/length((test[,12])) keep<-condense(train[,-12],as.factor(train[,12])) keep1<-reduce.nn(train[,-12], keep,as.factor(train[,12])) data2<-knn(train[keep1,-12], test[,-12], as.factor(train[keep1,12]),k=1,l=0,prob=F,use.all = T) ac2<-length(which(data2==as.factor(test[,12]),T))/length((test[,12])) data3<-knn(train[keep,-12], test[,-12], as.factor(train[keep,12]),k=1,l=0,prob=F,use.all = T) ac3<-length(which(data3==as.factor(test[,12]),T))/length((test[,12])) ##quality wise splitting q5<-subset(wine,wine$quality==5) q6<-subset(wine,wine$quality==6) q7<-subset(wine,wine$quality==7) q4<-subset(wine,wine$quality==4) q8<-subset(wine,wine$quality==8) q3<-subset(wine,wine$quality==3) ##splitting quality wise data for train and test train5<-floor(seq_len(.8*nrow(q5))) test5<-q5[-train5,] train6<-floor(seq_len(.8*nrow(q6))) test6<-q6[-train6,] train7<-floor(seq_len(.8*nrow(q7))) test7<-q7[-train7,] train8<-floor(seq_len(.8*nrow(q8))) test8<-q8[-train8,] train3<-floor(seq_len(.8*nrow(q3))) test3<-q3[-train3,] train4<-floor(seq_len(.8*nrow(q4))) test4<-q4[-train4,] traini5<-q5[train5,] traini6<-q6[train6,] traini7<-q7[train7,] traini8<-q8[train8,] traini3<-q3[train3,] traini4<-q4[train4,] ##combine quality wise data library(class) trainbig<-rbind(traini5,traini6,traini7,traini8,traini3,traini4) testbig<-rbind(test5,test6,test7,test8,test3,test4) knn4<-knn(train=trainbig[,-12],test=testbig[,-12],as.factor(trainbig[,12]),k=5,l=5) ac2<-length(which(knn4==as.factor(testbig[,12]),T))/length((testbig[,12])) qv<-matrix(0,10,10) for(i in 1:10){ for(j in 1:10){ knn9<-knn.cv(trainbig[,-12],as.factor(trainbig[,12]),k=i,l=j-1) qv[i,j]<-length(which(knn9==as.factor(trainbig[,12]),T))/length(trainbig[,12]) } } qv
library(igraph) # Source: A tutorial by Esteban Moro # <http://estebanmoro.org/2012/11/temporal-networks-with-igraph-and-r-with-20-lines-of-code/> #load the edges with time stamp #there are three columns in edges: id1,id2,time #edges <- read.table("edges.csv",header=T) #generate the full graph #g <- graph.edgelist(as.matrix(edges[,c(1,2)]),directed=F) # Initial part of "Fur Ellise" g <- graph.formula(75-+76, 76-+75, 75-+76, 76-+71, 71-+74, 74-+72, 72-+69, 69-+45, 45-+52, 52-+57, 57-+60, 60-+64, 64-+69, 69-+71, 71-+52, 52-+56, 56-+59, 59-+64, 64-+68, 68-+71, 71-+72, 72-+45, 45-+52, 52-+57, 57-+64, 64-+76, 76-+75, 75-+76, 76-+75, 75-+76, 76-+71, 71-+74, 74-+72, 72-+69, 69-+45, 45-+52, 52-+57, 57-+60, 60-+64, 64-+69, 69-+71, 71-+52, 52-+56, 56-+59, 59-+64, 64-+72, 72-+71, 71-+69, 69-+45, 45-+52, 52-+57, 57-+76, 76-+75, 75-+76, 76-+75, 75-+76, 76-+71, 71-+74, 74-+72, 72-+69, 69-+45, 45-+52, 52-+57, 57-+60, 60-+64, 64-+69, 69-+71) E(g)$time <- 1:68 #generate a cool palette for the graph YlOrBr <- c("#FFFFD4", "#FED98E", "#FE9929", "#D95F0E", "#993404") YlOrBr.Lab <- colorRampPalette(YlOrBr, space = "Lab") #colors for the nodes are chosen from the very beginning vcolor <- rev(YlOrBr.Lab(vcount(g))) #time in the edges goes from 1 to 300. We kick off at time 3 ti <- 3 #weights of edges formed up to time ti is 1. Future edges are weighted 0 E(g)$weight <- ifelse(E(g)$time < ti,1,0) #generate first layout using weights. layout.old <- layout.fruchterman.reingold(g,params=list(weights=E(g)$weight)) #total time of the dynamics total_time <- max(E(g)$time) #This is the time interval for the animation. In this case is taken to be 1/10 #of the time (i.e. 10 snapshots) between adding two consecutive nodes dt <- 0.1 #Output for each frame will be a png with HD size 1600x900 :) png(file="example%03d.png", width=1600,height=900) nsteps <- max(E(g)$time) #Time loop starts for(ti in seq(3,total_time,dt)){ #define weight for edges present up to time ti. E(g)$weight <- ifelse(E(g)$time < ti,1,0) #Edges with non-zero weight are in gray. The rest are transparent E(g)$color <- ifelse(E(g)$time < ti,"gray",rgb(0,0,0,0)) #Nodes with at least a non-zero weighted edge are in color. The rest are transparent V(g)$color <- ifelse(graph.strength(g)==0,rgb(0,0,0,0),vcolor) #given the new weights, we update the layout a little bit layout.new <- layout.fruchterman.reingold(g,params=list(niter=10,start=layout.old,weights=E(g)$weight,maxdelta=1)) #plot the new graph plot(g,layout=layout.new,vertex.label="",vertex.size=1+2*log(graph.strength(g)),vertex.frame.color=V(g)$color,edge.width=1.5,asp=9/16,margin=-0.15) #use the new layout in the next round layout.old <- layout.new } dev.off()
/playground/net.R
no_license
abaghan/music.as.network
R
false
false
2,904
r
library(igraph) # Source: A tutorial by Esteban Moro # <http://estebanmoro.org/2012/11/temporal-networks-with-igraph-and-r-with-20-lines-of-code/> #load the edges with time stamp #there are three columns in edges: id1,id2,time #edges <- read.table("edges.csv",header=T) #generate the full graph #g <- graph.edgelist(as.matrix(edges[,c(1,2)]),directed=F) # Initial part of "Fur Ellise" g <- graph.formula(75-+76, 76-+75, 75-+76, 76-+71, 71-+74, 74-+72, 72-+69, 69-+45, 45-+52, 52-+57, 57-+60, 60-+64, 64-+69, 69-+71, 71-+52, 52-+56, 56-+59, 59-+64, 64-+68, 68-+71, 71-+72, 72-+45, 45-+52, 52-+57, 57-+64, 64-+76, 76-+75, 75-+76, 76-+75, 75-+76, 76-+71, 71-+74, 74-+72, 72-+69, 69-+45, 45-+52, 52-+57, 57-+60, 60-+64, 64-+69, 69-+71, 71-+52, 52-+56, 56-+59, 59-+64, 64-+72, 72-+71, 71-+69, 69-+45, 45-+52, 52-+57, 57-+76, 76-+75, 75-+76, 76-+75, 75-+76, 76-+71, 71-+74, 74-+72, 72-+69, 69-+45, 45-+52, 52-+57, 57-+60, 60-+64, 64-+69, 69-+71) E(g)$time <- 1:68 #generate a cool palette for the graph YlOrBr <- c("#FFFFD4", "#FED98E", "#FE9929", "#D95F0E", "#993404") YlOrBr.Lab <- colorRampPalette(YlOrBr, space = "Lab") #colors for the nodes are chosen from the very beginning vcolor <- rev(YlOrBr.Lab(vcount(g))) #time in the edges goes from 1 to 300. We kick off at time 3 ti <- 3 #weights of edges formed up to time ti is 1. Future edges are weighted 0 E(g)$weight <- ifelse(E(g)$time < ti,1,0) #generate first layout using weights. layout.old <- layout.fruchterman.reingold(g,params=list(weights=E(g)$weight)) #total time of the dynamics total_time <- max(E(g)$time) #This is the time interval for the animation. In this case is taken to be 1/10 #of the time (i.e. 10 snapshots) between adding two consecutive nodes dt <- 0.1 #Output for each frame will be a png with HD size 1600x900 :) png(file="example%03d.png", width=1600,height=900) nsteps <- max(E(g)$time) #Time loop starts for(ti in seq(3,total_time,dt)){ #define weight for edges present up to time ti. E(g)$weight <- ifelse(E(g)$time < ti,1,0) #Edges with non-zero weight are in gray. The rest are transparent E(g)$color <- ifelse(E(g)$time < ti,"gray",rgb(0,0,0,0)) #Nodes with at least a non-zero weighted edge are in color. The rest are transparent V(g)$color <- ifelse(graph.strength(g)==0,rgb(0,0,0,0),vcolor) #given the new weights, we update the layout a little bit layout.new <- layout.fruchterman.reingold(g,params=list(niter=10,start=layout.old,weights=E(g)$weight,maxdelta=1)) #plot the new graph plot(g,layout=layout.new,vertex.label="",vertex.size=1+2*log(graph.strength(g)),vertex.frame.color=V(g)$color,edge.width=1.5,asp=9/16,margin=-0.15) #use the new layout in the next round layout.old <- layout.new } dev.off()
# Testing code for the RCMIP5 scripts in 'RCMIP5.R' # Uses the testthat package # See http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf library(testthat) # To run this code: # source("RCMIP5.R") # library(testthat) # test_file("tests/testthat/test_internalHelpers.R") context("cmip5data") implementations <- c("data.frame", "array") test_that("cmip5data handles bad input", { expect_error(cmip5data("hi")) expect_error(cmip5data(1, monthly=123)) expect_error(cmip5data(1, Z=123)) expect_error(cmip5data(1, lev=123)) expect_error(cmip5data(1, randomize="hi")) }) # helper function: test basic structural integrity structuretest <- function(d, i) { # d is cmip5data object, i is info expect_is(d, "cmip5data") expect_false(xor(is.null(d$lon), is.null(d$lat))) # both NULL, or both not if(!is.null(d$lon)) { expect_is(d$lon, "matrix", info=i) expect_is(d$lat, "matrix", info=i) } expect_is(d$model, "character", info=i) expect_is(d$variable, "character", info=i) expect_is(d$experiment, "character", info=i) expect_is(d$valUnit, "character", info=i) expect_is(d$debug, "list", info=i) if(!is.null(d$time)) { expect_is(d$debug$timeFreqStr, "character", info=i) expect_is(d$debug$calendarStr, "character", info=i) expect_is(d$debug$timeUnit, "character", info=i) } if(is.data.frame(d$val)) { # Data-frame specific tests expect_equal(ncol(d$val), 5, info=i) if(is.null(d$lon)) { expect_true(all(is.na(d$val$lon))) expect_true(all(is.na(d$val$lat))) } else { expect_equal(length(unique(d$val$lon)), length(unique(as.numeric(d$lon))), info=i) expect_equal(length(unique(d$val$lat)), length(unique(as.numeric(d$lat))), info=i) } if(is.null(d$Z)) { expect_true(all(is.na(d$val$Z)), info=i) } else { expect_equal(length(unique(d$val$Z)), length(unique(as.numeric(d$Z))), info=i) } if(is.null(d$time)) { expect_true(all(is.na(d$val$time)), info=i) } else { expect_equal(length(unique(d$val$time)), length(unique(as.numeric(d$time))), info=i) } } else if(is.array(d$val)) { # Array-frame specific tests dm <- dim(d$val) expect_equal(length(dm), 4, info=i) if(is.null(d$lon)) { expect_equal(dm[1], 1, info=i) expect_equal(dm[2], 1, info=i) } else { expect_equal(dm[1], length(unique(as.numeric(d$lon))), info=i) expect_equal(dm[2], length(unique(as.numeric(d$lat))), info=i) } if(is.null(d$Z)) { expect_equal(dm[3], 1, info=i) } else { expect_equal(dm[3], length(unique(as.numeric(d$Z))), info=i) } if(is.null(d$time)) { expect_equal(dm[4], 1, info=i) } else { expect_equal(dm[4], length(unique(as.numeric(d$time))), info=i) } } else stop("Unknown val type") } test_that("cmip5data generates annual and monthly data", { for(i in implementations) { d <- cmip5data(1, loadAs=i) structuretest(d, paste("A1", i)) expect_equal(length(dim(d$lon)), 2, info=i) expect_equal(length(dim(d$lat)), 2, info=i) expect_equal(length(d$time), 12, info=i) expect_equal(d$debug$timeFreqStr, "mon", info=i) d <- cmip5data(1, monthly=F, loadAs=i) structuretest(d, paste("A2", i)) expect_equal(length(dim(d$lon)), 2, info=i) expect_equal(length(dim(d$lat)), 2, info=i) expect_equal(length(d$time), 1, info=i) expect_equal(d$debug$timeFreqStr, "yr", info=i) } }) test_that("cmip5data obeys randomize", { for(i in implementations) { d1 <- cmip5data(1, randomize=T, loadAs=i) d2 <- cmip5data(1, randomize=T, loadAs=i) expect_false(all(RCMIP5:::vals(d1) == RCMIP5:::vals(d2)), info=i) } }) test_that("cmip5data creates area-only data", { lonsize <- 10 latsize <- 10 for(i in implementations) { d <- cmip5data(1, lonsize=lonsize, latsize=latsize, time=F, verbose=F, loadAs=i) structuretest(d, paste("D1", i)) expect_is(d$lon, "matrix", info=i) expect_is(d$lat, "matrix", info=i) expect_null(d$Z, info=i) expect_null(d$time, info=i) } }) test_that("cmip5data creates area and Z data", { lonsize <- 10 latsize <- 10 Zsize <- 5 for(i in implementations) { d <- cmip5data(1, lonsize=lonsize, latsize=latsize, Z=T, Zsize=Zsize, time=F, verbose=F, loadAs=i) structuretest(d, paste("E1", i)) expect_is(d$lon, "matrix", info=i) expect_is(d$lat, "matrix", info=i) expect_is(d$Z, "integer", info=i) expect_null(d$time, info=i) } }) test_that("cmip5data creates time-only data", { for(i in implementations) { d <- cmip5data(1, lonlat=F, Z=F, verbose=F, loadAs=i) structuretest(d, paste("F1", i)) expect_null(d$lon, info=i) expect_null(d$lat, info=i) expect_null(d$Z, info=i) expect_is(d$time, "numeric", info=i) } }) test_that("cmip5data creates irregular grids", { for(i in implementations) { d <- cmip5data(1, irregular=FALSE, verbose=F, loadAs=i) # test that no rows/columns exhibit varying values expect_true(all(apply(d$lon, 1, function(x) duplicated(x)[-1])), info=i) expect_true(all(apply(d$lat, 2, function(x) duplicated(x)[-1])), info=i) d <- cmip5data(1, irregular=TRUE, verbose=F, loadAs=i) # test that rows/columns have varying values expect_true(sum(apply(d$lon, 1, function(x) duplicated(x)[-1])) > 0, info=i) expect_true(sum(apply(d$lat, 2, function(x) duplicated(x)[-1])) > 0, info=i) } })
/tests/testthat/test_cmip5data.R
no_license
cran/RCMIP5
R
false
false
6,058
r
# Testing code for the RCMIP5 scripts in 'RCMIP5.R' # Uses the testthat package # See http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf library(testthat) # To run this code: # source("RCMIP5.R") # library(testthat) # test_file("tests/testthat/test_internalHelpers.R") context("cmip5data") implementations <- c("data.frame", "array") test_that("cmip5data handles bad input", { expect_error(cmip5data("hi")) expect_error(cmip5data(1, monthly=123)) expect_error(cmip5data(1, Z=123)) expect_error(cmip5data(1, lev=123)) expect_error(cmip5data(1, randomize="hi")) }) # helper function: test basic structural integrity structuretest <- function(d, i) { # d is cmip5data object, i is info expect_is(d, "cmip5data") expect_false(xor(is.null(d$lon), is.null(d$lat))) # both NULL, or both not if(!is.null(d$lon)) { expect_is(d$lon, "matrix", info=i) expect_is(d$lat, "matrix", info=i) } expect_is(d$model, "character", info=i) expect_is(d$variable, "character", info=i) expect_is(d$experiment, "character", info=i) expect_is(d$valUnit, "character", info=i) expect_is(d$debug, "list", info=i) if(!is.null(d$time)) { expect_is(d$debug$timeFreqStr, "character", info=i) expect_is(d$debug$calendarStr, "character", info=i) expect_is(d$debug$timeUnit, "character", info=i) } if(is.data.frame(d$val)) { # Data-frame specific tests expect_equal(ncol(d$val), 5, info=i) if(is.null(d$lon)) { expect_true(all(is.na(d$val$lon))) expect_true(all(is.na(d$val$lat))) } else { expect_equal(length(unique(d$val$lon)), length(unique(as.numeric(d$lon))), info=i) expect_equal(length(unique(d$val$lat)), length(unique(as.numeric(d$lat))), info=i) } if(is.null(d$Z)) { expect_true(all(is.na(d$val$Z)), info=i) } else { expect_equal(length(unique(d$val$Z)), length(unique(as.numeric(d$Z))), info=i) } if(is.null(d$time)) { expect_true(all(is.na(d$val$time)), info=i) } else { expect_equal(length(unique(d$val$time)), length(unique(as.numeric(d$time))), info=i) } } else if(is.array(d$val)) { # Array-frame specific tests dm <- dim(d$val) expect_equal(length(dm), 4, info=i) if(is.null(d$lon)) { expect_equal(dm[1], 1, info=i) expect_equal(dm[2], 1, info=i) } else { expect_equal(dm[1], length(unique(as.numeric(d$lon))), info=i) expect_equal(dm[2], length(unique(as.numeric(d$lat))), info=i) } if(is.null(d$Z)) { expect_equal(dm[3], 1, info=i) } else { expect_equal(dm[3], length(unique(as.numeric(d$Z))), info=i) } if(is.null(d$time)) { expect_equal(dm[4], 1, info=i) } else { expect_equal(dm[4], length(unique(as.numeric(d$time))), info=i) } } else stop("Unknown val type") } test_that("cmip5data generates annual and monthly data", { for(i in implementations) { d <- cmip5data(1, loadAs=i) structuretest(d, paste("A1", i)) expect_equal(length(dim(d$lon)), 2, info=i) expect_equal(length(dim(d$lat)), 2, info=i) expect_equal(length(d$time), 12, info=i) expect_equal(d$debug$timeFreqStr, "mon", info=i) d <- cmip5data(1, monthly=F, loadAs=i) structuretest(d, paste("A2", i)) expect_equal(length(dim(d$lon)), 2, info=i) expect_equal(length(dim(d$lat)), 2, info=i) expect_equal(length(d$time), 1, info=i) expect_equal(d$debug$timeFreqStr, "yr", info=i) } }) test_that("cmip5data obeys randomize", { for(i in implementations) { d1 <- cmip5data(1, randomize=T, loadAs=i) d2 <- cmip5data(1, randomize=T, loadAs=i) expect_false(all(RCMIP5:::vals(d1) == RCMIP5:::vals(d2)), info=i) } }) test_that("cmip5data creates area-only data", { lonsize <- 10 latsize <- 10 for(i in implementations) { d <- cmip5data(1, lonsize=lonsize, latsize=latsize, time=F, verbose=F, loadAs=i) structuretest(d, paste("D1", i)) expect_is(d$lon, "matrix", info=i) expect_is(d$lat, "matrix", info=i) expect_null(d$Z, info=i) expect_null(d$time, info=i) } }) test_that("cmip5data creates area and Z data", { lonsize <- 10 latsize <- 10 Zsize <- 5 for(i in implementations) { d <- cmip5data(1, lonsize=lonsize, latsize=latsize, Z=T, Zsize=Zsize, time=F, verbose=F, loadAs=i) structuretest(d, paste("E1", i)) expect_is(d$lon, "matrix", info=i) expect_is(d$lat, "matrix", info=i) expect_is(d$Z, "integer", info=i) expect_null(d$time, info=i) } }) test_that("cmip5data creates time-only data", { for(i in implementations) { d <- cmip5data(1, lonlat=F, Z=F, verbose=F, loadAs=i) structuretest(d, paste("F1", i)) expect_null(d$lon, info=i) expect_null(d$lat, info=i) expect_null(d$Z, info=i) expect_is(d$time, "numeric", info=i) } }) test_that("cmip5data creates irregular grids", { for(i in implementations) { d <- cmip5data(1, irregular=FALSE, verbose=F, loadAs=i) # test that no rows/columns exhibit varying values expect_true(all(apply(d$lon, 1, function(x) duplicated(x)[-1])), info=i) expect_true(all(apply(d$lat, 2, function(x) duplicated(x)[-1])), info=i) d <- cmip5data(1, irregular=TRUE, verbose=F, loadAs=i) # test that rows/columns have varying values expect_true(sum(apply(d$lon, 1, function(x) duplicated(x)[-1])) > 0, info=i) expect_true(sum(apply(d$lat, 2, function(x) duplicated(x)[-1])) > 0, info=i) } })
table = read.delim('C:/Users/rhirsch/Desktop/rasmc_enhancer_promoter/brd4_tables/subpeak_to_region_motif_density_table.txt', header=TRUE) scatter_table = cbind(table[,1:5],table[,10:14],log2(table[,12]/table[,11])) colnames(scatter_table)=c("SUBPEAK_ID","CHROM","START","STOP","REGION_ID","BRD4_UNSTIM","BRD4_2H","BRD4_24H","LOG2FC_2v0","LOG2FC_24v0",'LOG2FC_24v2') plot(scatter_table[,9],scatter_table[,11], xlim=c(-3,3),ylim=c(-3,3),xlab="Log2FC 2H versus Unstim", ylab="Log2FC 24H versus 2H", pch=1, cex=1) abline(h=0, v=0, lwd=2) #================================================================== #===========================DEPENDENCIES=========================== #================================================================== library(ggplot2) vector1 = scatter_table[,9] vector2 = scatter_table[,11] #================================================================== #=========================DENSITY PLOTS============================ #================================================================== #for some reason ggplot doesn't like looping print('Plotting contour scatter plot for RASMC subpeak regions') pdf(file='C:/Users/rhirsch/Desktop/rasmc_enhancer_promoter/brd4_tables/subpeak_to_region_scatter_plot_2v0_vs24v2.pdf',width =6.5,height =5) dataset = structure(list(x= vector1,y=vector2),class = "data.frame") ggplot(dataset, aes(x, y)) + geom_point(data = dataset,cex=0.5,pch=16,col=rgb(0.5,0.5,0.5,0.5)) + geom_hline(yintercept = 0) + geom_vline(xintercept = 0) + stat_density2d(aes(alpha=..level.., fill=..level..), size=2, bins=50, geom="polygon") + scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) + geom_density2d(colour=rgb(0.5,0.5,0.5,.8)) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) dev.off() vector1 = scatter_table[,9] vector2 = scatter_table[,10] print('Plotting contour scatter plot for RASMC subpeak regions') pdf(file='C:/Users/rhirsch/Desktop/rasmc_enhancer_promoter/brd4_tables/subpeak_to_region_scatter_plot_2v0_vs24v0.pdf',width =6.5,height =5) dataset = structure(list(x= vector1,y=vector2),class = "data.frame") ggplot(dataset, aes(x, y)) + geom_point(data = dataset,cex=0.5,pch=16,col=rgb(0.5,0.5,0.5,0.5)) + geom_hline(yintercept = 0) + geom_vline(xintercept = 0) + stat_density2d(aes(alpha=..level.., fill=..level..), size=2, bins=50, geom="polygon") + scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) + geom_density2d(colour=rgb(0.5,0.5,0.5,.8)) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) dev.off()
/r_scripts/subpeak_regions_scatter_plot.R
permissive
linlabbcm/RASMC_Phenotypic_Switching
R
false
false
2,901
r
table = read.delim('C:/Users/rhirsch/Desktop/rasmc_enhancer_promoter/brd4_tables/subpeak_to_region_motif_density_table.txt', header=TRUE) scatter_table = cbind(table[,1:5],table[,10:14],log2(table[,12]/table[,11])) colnames(scatter_table)=c("SUBPEAK_ID","CHROM","START","STOP","REGION_ID","BRD4_UNSTIM","BRD4_2H","BRD4_24H","LOG2FC_2v0","LOG2FC_24v0",'LOG2FC_24v2') plot(scatter_table[,9],scatter_table[,11], xlim=c(-3,3),ylim=c(-3,3),xlab="Log2FC 2H versus Unstim", ylab="Log2FC 24H versus 2H", pch=1, cex=1) abline(h=0, v=0, lwd=2) #================================================================== #===========================DEPENDENCIES=========================== #================================================================== library(ggplot2) vector1 = scatter_table[,9] vector2 = scatter_table[,11] #================================================================== #=========================DENSITY PLOTS============================ #================================================================== #for some reason ggplot doesn't like looping print('Plotting contour scatter plot for RASMC subpeak regions') pdf(file='C:/Users/rhirsch/Desktop/rasmc_enhancer_promoter/brd4_tables/subpeak_to_region_scatter_plot_2v0_vs24v2.pdf',width =6.5,height =5) dataset = structure(list(x= vector1,y=vector2),class = "data.frame") ggplot(dataset, aes(x, y)) + geom_point(data = dataset,cex=0.5,pch=16,col=rgb(0.5,0.5,0.5,0.5)) + geom_hline(yintercept = 0) + geom_vline(xintercept = 0) + stat_density2d(aes(alpha=..level.., fill=..level..), size=2, bins=50, geom="polygon") + scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) + geom_density2d(colour=rgb(0.5,0.5,0.5,.8)) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) dev.off() vector1 = scatter_table[,9] vector2 = scatter_table[,10] print('Plotting contour scatter plot for RASMC subpeak regions') pdf(file='C:/Users/rhirsch/Desktop/rasmc_enhancer_promoter/brd4_tables/subpeak_to_region_scatter_plot_2v0_vs24v0.pdf',width =6.5,height =5) dataset = structure(list(x= vector1,y=vector2),class = "data.frame") ggplot(dataset, aes(x, y)) + geom_point(data = dataset,cex=0.5,pch=16,col=rgb(0.5,0.5,0.5,0.5)) + geom_hline(yintercept = 0) + geom_vline(xintercept = 0) + stat_density2d(aes(alpha=..level.., fill=..level..), size=2, bins=50, geom="polygon") + scale_fill_gradient(low = "yellow", high = "red") + scale_alpha(range = c(0.00, 0.5), guide = FALSE) + geom_density2d(colour=rgb(0.5,0.5,0.5,.8)) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(), axis.line = element_line(colour = "black")) dev.off()
#data clean setwd("/Users/Ryan/Desktop/EDAV/") survey <- read.csv("Survey+Response.csv") #naming vars and removing empty columns names(survey) <- c("waitlist","program","tools","exp.Rmodeling","b5","b6","b7","b8","b9","b10","b11","gender","primaryeditor","exp.Rgraphics","exp.Radvanced","exp.documentation","exp.Matlab","exp.Github","b19","b20","b21","b22","b23","b24","b25","b26","b27","b28","b29","b30","b31","b32","b33","b34","b35","b36","b37","b38") survey <- survey[,c(-5:-11,-19:-38)] #dummy variables for each language/tool in tools tooldummies = c() toolList <- c("Matlab","lattice","Github","Excel","SQL","RStudio","ggplot2","shell", "C/C","Python","LaTeX","(grep)","Sweave/knitr","XML","Web: html css js","dropbox","google drive","SPSS","Stata") for(t in toolList){ tooldummies <- cbind(tooldummies,grepl(t,survey$tools)) } tooldummies <- cbind(tooldummies,(grepl("R,",survey$tools)==TRUE | (grepl("R",survey$tools)==TRUE & grepl("RStudio",survey$tools)==FALSE))) colnames(tooldummies) <- c("Matlab","lattice","GitHub","Excel","SQL","RStudio","ggplot2","shell", "C","Python","LaTeX","grep","Sweave","XML","Web","dropbox","googledrive","SPSS","Stata","R") survey <- cbind(survey,tooldummies) #quick summary info summary(survey) par(las = 2) par(mar=c(5,5,5,5)) barplot(apply(survey[,12:31],2,mean),ylab = "Proportion of Class",ylim = c(0,1)) library(corrplot) corrplot(cor(survey[,12:31]),method='square')
/data_clean.R
no_license
rwalsh03/EDAV_HW1
R
false
false
1,420
r
#data clean setwd("/Users/Ryan/Desktop/EDAV/") survey <- read.csv("Survey+Response.csv") #naming vars and removing empty columns names(survey) <- c("waitlist","program","tools","exp.Rmodeling","b5","b6","b7","b8","b9","b10","b11","gender","primaryeditor","exp.Rgraphics","exp.Radvanced","exp.documentation","exp.Matlab","exp.Github","b19","b20","b21","b22","b23","b24","b25","b26","b27","b28","b29","b30","b31","b32","b33","b34","b35","b36","b37","b38") survey <- survey[,c(-5:-11,-19:-38)] #dummy variables for each language/tool in tools tooldummies = c() toolList <- c("Matlab","lattice","Github","Excel","SQL","RStudio","ggplot2","shell", "C/C","Python","LaTeX","(grep)","Sweave/knitr","XML","Web: html css js","dropbox","google drive","SPSS","Stata") for(t in toolList){ tooldummies <- cbind(tooldummies,grepl(t,survey$tools)) } tooldummies <- cbind(tooldummies,(grepl("R,",survey$tools)==TRUE | (grepl("R",survey$tools)==TRUE & grepl("RStudio",survey$tools)==FALSE))) colnames(tooldummies) <- c("Matlab","lattice","GitHub","Excel","SQL","RStudio","ggplot2","shell", "C","Python","LaTeX","grep","Sweave","XML","Web","dropbox","googledrive","SPSS","Stata","R") survey <- cbind(survey,tooldummies) #quick summary info summary(survey) par(las = 2) par(mar=c(5,5,5,5)) barplot(apply(survey[,12:31],2,mean),ylab = "Proportion of Class",ylim = c(0,1)) library(corrplot) corrplot(cor(survey[,12:31]),method='square')
#' @title PCA on a distributed dataset #' @description This function is similar to the R function princomp applied on the covariance matrix of the distributed dataset. #' It has the side effect of creating a scores dataframe on each node - that can be used by subsequent calls to 'biplot'. #' @param df a character name of the dataframe. The dataframe can contain character columns or factors in which case only the numeric columns will be considered. #' @param type a character which represents the type of analysis to carry out. #' If type is set to 'combine', global column means are calculated if type is set to 'split', the column means are #' calculated separately for each node. #' @param center a logical, should the columns be centered? Default TRUE. #' @param scale a logical, should the columns be scaled? Default FALSE. #' @param scores.suffix a character. The name of the scores dataframe will be the concatenation between df and scores.suffix. #' @param async a logical, see datashield.aggregate #' @param datasources a list of opal objects obtained after logging into the opal servers (see datashield.login) #' @return a list with one element for each node (or one $global element if type='combine'). Each element contains #' a stripped down princomp object (the 'scores' element is replaced with the name of the scores dataframe on the remote nodes) #' @export #' dssPrincomp <- function(df, type = 'combine', center = TRUE, scale = FALSE, scores.suffix = '_scores', async = TRUE, datasources = NULL){ if(!(type %in% c('combine', 'split'))){ stop('Function argument "type" has to be either "combine" or "split"') } if(is.null(datasources)){ datasources <- datashield.connections_find() } covlist <- dssCov(df, type = type, async = async, datasources = datasources) pca.builder <- function(x){ pca <- princomp(covmat = x$vcov) # to get an exact value of the loadings we need to recalculate the eigen vectors without assuming a symmetric matrix # then replace the loadings with the result # this allows correct matching of directions between the distributed and the local versions of the same dataset (e.g 'iris') xx <- eigen(x$vcov, symmetric = FALSE) pca$loadings[] <-xx$vectors[] pca$n.obs <- x$nrows pca$lam <- pca$sdev * sqrt(x$nrows) # for biplot pca$scores <- paste0(df, scores.suffix) class(pca) <- append( 'dssPrincomp', class(pca)) pca } pcalist <- Map(pca.builder, covlist) scores.builder <- function(x){ loadings <- .encode.arg(unname(pcalist[[x]]$loadings[])) means <- .encode.arg(unname(covlist[[x]]$means)) sds <- .encode.arg(pcalist[[x]]$sdev) lam <- .encode.arg(pcalist[[x]]$lam) # expr <- paste0('pca.scores(',df, ',"', loadings, '",', center, ',', scale, ',', FALSE, ',"' , means, '","', sds, '","', lam, '")') #build expr as a list to be sent as.call expr <- list(as.symbol('pcaScores'), as.symbol(df), loadings, center, scale, FALSE, means, sds, lam) if(x == 'global'){ nodes <- datasources } else { nodes <- datasources[x] } #datashield.assign(nodes, paste0(df, scores.suffix), as.symbol(expr), async = async) #send expr as.call and not as.symbol (10 thousand char limit): datashield.assign(nodes, paste0(df, scores.suffix), as.call(expr), async = async) } if(type == 'combine'){ mappee <- c('global') } else { mappee <- names(datasources) } Map(scores.builder, mappee) pcalist } #' @title Biplot a dssPrincomp object #' @description Biplot implementation for PCA on distributed datasets #' @param x an object of class dssPrincomp. #' @param choices length 2 vector specifying the components to plot (same as for biplot.princomp) #' @param type a character which represents the type of analysis to carry out. #' If type is set to 'combine', global column means are calculated if type is set to 'split', the column means are #' calculated separately for each node. #' @param levels a character the name of a factor. If provided, the plot will be colour coded by the levels of this factor #' @param draw.arrows a logical, should I draw arrows representing the underlying variables? Default TRUE. #' @param ... further arguments to be passed to dssSmooth2d (see doc) #' @param datasources a list of opal objects obtained after logging into the opal servers (see datashield.login) #' @export #' biplot.dssPrincomp <- function (x, choices = 1L:2L, type = 'combine', levels = NULL, draw.arrows = TRUE, ..., datasources = NULL){ if(!(type %in% c('combine', 'split'))){ stop('Function argument "type" has to be either "combine" or "split"') } if(is.null(datasources)){ datasources <- datashield.connections_find() } if (length(choices) != 2L){ stop("length of choices must be 2") } y <- t(t(x$loadings[,choices])* x$lam[choices]) v1 <- paste0(x$scores, '$Comp.', choices[1]) v2 <- paste0(x$scores, '$Comp.', choices[2]) ylabs <- dimnames(y)[[1L]] if(!is.null(levels)){ if(length(levels) >1){ warn("Only one column allowed in 'levels'. Will use the first one.") levels <- levels[1] } pl <- dssSmooth2d(v1, v2, categories = levels, draw.image = TRUE, ..., datasources = datasources) } else { pl <- dssSmooth2d(v1, v2, draw.image = TRUE, ..., datasources = datasources) } if(draw.arrows){ rangx1 <- dssRange(v1, datasources = datasources ) rangx2 <- dssRange(v2, datasources = datasources) rangy1 <- range(y[, 1L]) rangy2 <- range(y[, 2L]) xlim <- ylim <- rangx1 <- rangx2 <- range(rangx1, rangx2) ratio <- max(rangy1/rangx1, rangy2/rangx2) par(new = TRUE) plot(y, axes = FALSE, type = "n", xlim = xlim * ratio, ylim = ylim * ratio, xlab = "", ylab = "", col = 'red') axis(3, col = 'red') axis(4, col = 'red') text(y, labels = ylabs, cex = 1, col = 'red') arrows(0,0,y[,1] *0.8 , y[,2] *0.8, col = 'red', length = 0.1) } invisible() }
/R/dssPrincomp.R
no_license
neelsoumya/dsSwissKnifeClient
R
false
false
6,007
r
#' @title PCA on a distributed dataset #' @description This function is similar to the R function princomp applied on the covariance matrix of the distributed dataset. #' It has the side effect of creating a scores dataframe on each node - that can be used by subsequent calls to 'biplot'. #' @param df a character name of the dataframe. The dataframe can contain character columns or factors in which case only the numeric columns will be considered. #' @param type a character which represents the type of analysis to carry out. #' If type is set to 'combine', global column means are calculated if type is set to 'split', the column means are #' calculated separately for each node. #' @param center a logical, should the columns be centered? Default TRUE. #' @param scale a logical, should the columns be scaled? Default FALSE. #' @param scores.suffix a character. The name of the scores dataframe will be the concatenation between df and scores.suffix. #' @param async a logical, see datashield.aggregate #' @param datasources a list of opal objects obtained after logging into the opal servers (see datashield.login) #' @return a list with one element for each node (or one $global element if type='combine'). Each element contains #' a stripped down princomp object (the 'scores' element is replaced with the name of the scores dataframe on the remote nodes) #' @export #' dssPrincomp <- function(df, type = 'combine', center = TRUE, scale = FALSE, scores.suffix = '_scores', async = TRUE, datasources = NULL){ if(!(type %in% c('combine', 'split'))){ stop('Function argument "type" has to be either "combine" or "split"') } if(is.null(datasources)){ datasources <- datashield.connections_find() } covlist <- dssCov(df, type = type, async = async, datasources = datasources) pca.builder <- function(x){ pca <- princomp(covmat = x$vcov) # to get an exact value of the loadings we need to recalculate the eigen vectors without assuming a symmetric matrix # then replace the loadings with the result # this allows correct matching of directions between the distributed and the local versions of the same dataset (e.g 'iris') xx <- eigen(x$vcov, symmetric = FALSE) pca$loadings[] <-xx$vectors[] pca$n.obs <- x$nrows pca$lam <- pca$sdev * sqrt(x$nrows) # for biplot pca$scores <- paste0(df, scores.suffix) class(pca) <- append( 'dssPrincomp', class(pca)) pca } pcalist <- Map(pca.builder, covlist) scores.builder <- function(x){ loadings <- .encode.arg(unname(pcalist[[x]]$loadings[])) means <- .encode.arg(unname(covlist[[x]]$means)) sds <- .encode.arg(pcalist[[x]]$sdev) lam <- .encode.arg(pcalist[[x]]$lam) # expr <- paste0('pca.scores(',df, ',"', loadings, '",', center, ',', scale, ',', FALSE, ',"' , means, '","', sds, '","', lam, '")') #build expr as a list to be sent as.call expr <- list(as.symbol('pcaScores'), as.symbol(df), loadings, center, scale, FALSE, means, sds, lam) if(x == 'global'){ nodes <- datasources } else { nodes <- datasources[x] } #datashield.assign(nodes, paste0(df, scores.suffix), as.symbol(expr), async = async) #send expr as.call and not as.symbol (10 thousand char limit): datashield.assign(nodes, paste0(df, scores.suffix), as.call(expr), async = async) } if(type == 'combine'){ mappee <- c('global') } else { mappee <- names(datasources) } Map(scores.builder, mappee) pcalist } #' @title Biplot a dssPrincomp object #' @description Biplot implementation for PCA on distributed datasets #' @param x an object of class dssPrincomp. #' @param choices length 2 vector specifying the components to plot (same as for biplot.princomp) #' @param type a character which represents the type of analysis to carry out. #' If type is set to 'combine', global column means are calculated if type is set to 'split', the column means are #' calculated separately for each node. #' @param levels a character the name of a factor. If provided, the plot will be colour coded by the levels of this factor #' @param draw.arrows a logical, should I draw arrows representing the underlying variables? Default TRUE. #' @param ... further arguments to be passed to dssSmooth2d (see doc) #' @param datasources a list of opal objects obtained after logging into the opal servers (see datashield.login) #' @export #' biplot.dssPrincomp <- function (x, choices = 1L:2L, type = 'combine', levels = NULL, draw.arrows = TRUE, ..., datasources = NULL){ if(!(type %in% c('combine', 'split'))){ stop('Function argument "type" has to be either "combine" or "split"') } if(is.null(datasources)){ datasources <- datashield.connections_find() } if (length(choices) != 2L){ stop("length of choices must be 2") } y <- t(t(x$loadings[,choices])* x$lam[choices]) v1 <- paste0(x$scores, '$Comp.', choices[1]) v2 <- paste0(x$scores, '$Comp.', choices[2]) ylabs <- dimnames(y)[[1L]] if(!is.null(levels)){ if(length(levels) >1){ warn("Only one column allowed in 'levels'. Will use the first one.") levels <- levels[1] } pl <- dssSmooth2d(v1, v2, categories = levels, draw.image = TRUE, ..., datasources = datasources) } else { pl <- dssSmooth2d(v1, v2, draw.image = TRUE, ..., datasources = datasources) } if(draw.arrows){ rangx1 <- dssRange(v1, datasources = datasources ) rangx2 <- dssRange(v2, datasources = datasources) rangy1 <- range(y[, 1L]) rangy2 <- range(y[, 2L]) xlim <- ylim <- rangx1 <- rangx2 <- range(rangx1, rangx2) ratio <- max(rangy1/rangx1, rangy2/rangx2) par(new = TRUE) plot(y, axes = FALSE, type = "n", xlim = xlim * ratio, ylim = ylim * ratio, xlab = "", ylab = "", col = 'red') axis(3, col = 'red') axis(4, col = 'red') text(y, labels = ylabs, cex = 1, col = 'red') arrows(0,0,y[,1] *0.8 , y[,2] *0.8, col = 'red', length = 0.1) } invisible() }
wk1d <- read.table("household_power_consumption.txt", sep=";",header=T) data <- wk1d[wk1d$Date=='1/2/2007'| wk1d$Date=='2/2/2007',] data$dt<-paste(data$Date,data$Time) data$datetime<-strptime(data$dt,"%d/%m/%Y %H:%M:%S") par(mfcol=c(2,2)) with(data, { plot(data$datetime,as.numeric(as.character(data$Global_active_power)), type='l', xlab="",ylab="Global Active Power(kilowatts)") plot(data$datetime,as.numeric(as.character(data$Sub_metering_1)), type='l', xlab="",ylab="Energy sub metering") lines(data$datetime,as.numeric(as.character(data$Sub_metering_2)), col="red") lines(data$datetime,as.numeric(as.character(data$Sub_metering_3)), col="green") legend("topright",c("Sub metering 1","Sub metering 2","Sub metering 3"), lty=1, col=c("black","red","green"),cex=0.6) plot(data$datetime,as.numeric(as.character(data$Voltage)), type='l', xlab="datetime",ylab="voltage") plot(data$datetime,as.numeric(as.character(data$Global_reactive_power)), type='l', xlab="datetime",ylab="Global reactive power") }) dev.copy(png, file="Plot4.png")
/plot4.R
no_license
blsingh/ExpAnalysis
R
false
false
1,066
r
wk1d <- read.table("household_power_consumption.txt", sep=";",header=T) data <- wk1d[wk1d$Date=='1/2/2007'| wk1d$Date=='2/2/2007',] data$dt<-paste(data$Date,data$Time) data$datetime<-strptime(data$dt,"%d/%m/%Y %H:%M:%S") par(mfcol=c(2,2)) with(data, { plot(data$datetime,as.numeric(as.character(data$Global_active_power)), type='l', xlab="",ylab="Global Active Power(kilowatts)") plot(data$datetime,as.numeric(as.character(data$Sub_metering_1)), type='l', xlab="",ylab="Energy sub metering") lines(data$datetime,as.numeric(as.character(data$Sub_metering_2)), col="red") lines(data$datetime,as.numeric(as.character(data$Sub_metering_3)), col="green") legend("topright",c("Sub metering 1","Sub metering 2","Sub metering 3"), lty=1, col=c("black","red","green"),cex=0.6) plot(data$datetime,as.numeric(as.character(data$Voltage)), type='l', xlab="datetime",ylab="voltage") plot(data$datetime,as.numeric(as.character(data$Global_reactive_power)), type='l', xlab="datetime",ylab="Global reactive power") }) dev.copy(png, file="Plot4.png")
cov.dist.cat <- function(vars, dataset, exposure) { out_list1 <- as.list(NULL) for(i in 1:length(vars)) { print(vars[i]) dataset[[vars[i]]] <- as.factor(dataset[[vars[i]]]) dataset[[vars[i]]] <- droplevels(dataset[[vars[i]]]) ds0 <- dataset[dataset[[exposure]]==0,] ds1 <- dataset[dataset[[exposure]]==1,] n_all <- as.numeric(table(dataset[[vars[i]]])) n0 <- as.numeric(table(ds0[[vars[i]]])) n1 <- as.numeric(table(ds1[[vars[i]]])) p_all <- n_all / nrow(dataset) p0 <- n0 / nrow(ds0) p1 <- n1 / nrow(ds1) var_all <- p_all*(1-p_all) var0 <- p0*(1-p0) var1 <- p1*(1-p1) K = nlevels(dataset[[vars[i]]]) if(K==1) {abs_std_diff <- 0} if(K>1) { diff <- p1[-1] - p0[-1] k=rep(2:K,times=K-1) l=rep(2:K,each=K-1) s <- ifelse(k==l, (var0[k] + var1[l]) / 2, (p0[k]*p0[l] + p1[k]*p1[l]) / 2) s <- matrix(s, nrow=K-1, byrow=FALSE) abs_std_diff <- abs(as.numeric(sqrt(diff %*% solve(s) %*% diff))) abs_std_diff <- c(abs_std_diff, rep("",K-1)) } out_list1[[i]] <- as.data.frame(cbind(c(vars[i], rep("", nlevels(dataset[[vars[i]]])-1)), levels(dataset[[vars[i]]]), n_all, p_all, n0, p0, n1, p1, abs_std_diff)) } out_df1 <- out_list1[[1]] if(length(vars)>1) { for(i in 2:length(vars)) {out_df1 <- rbind(out_df1, out_list1[[i]])} } rownames(out_df1) <- NULL colnames(out_df1)[1:2] <- c("characteristic", "level") return(out_df1) }
/analysis/cov_dist_cat.R
permissive
opensafely/MH_pandemic
R
false
false
1,606
r
cov.dist.cat <- function(vars, dataset, exposure) { out_list1 <- as.list(NULL) for(i in 1:length(vars)) { print(vars[i]) dataset[[vars[i]]] <- as.factor(dataset[[vars[i]]]) dataset[[vars[i]]] <- droplevels(dataset[[vars[i]]]) ds0 <- dataset[dataset[[exposure]]==0,] ds1 <- dataset[dataset[[exposure]]==1,] n_all <- as.numeric(table(dataset[[vars[i]]])) n0 <- as.numeric(table(ds0[[vars[i]]])) n1 <- as.numeric(table(ds1[[vars[i]]])) p_all <- n_all / nrow(dataset) p0 <- n0 / nrow(ds0) p1 <- n1 / nrow(ds1) var_all <- p_all*(1-p_all) var0 <- p0*(1-p0) var1 <- p1*(1-p1) K = nlevels(dataset[[vars[i]]]) if(K==1) {abs_std_diff <- 0} if(K>1) { diff <- p1[-1] - p0[-1] k=rep(2:K,times=K-1) l=rep(2:K,each=K-1) s <- ifelse(k==l, (var0[k] + var1[l]) / 2, (p0[k]*p0[l] + p1[k]*p1[l]) / 2) s <- matrix(s, nrow=K-1, byrow=FALSE) abs_std_diff <- abs(as.numeric(sqrt(diff %*% solve(s) %*% diff))) abs_std_diff <- c(abs_std_diff, rep("",K-1)) } out_list1[[i]] <- as.data.frame(cbind(c(vars[i], rep("", nlevels(dataset[[vars[i]]])-1)), levels(dataset[[vars[i]]]), n_all, p_all, n0, p0, n1, p1, abs_std_diff)) } out_df1 <- out_list1[[1]] if(length(vars)>1) { for(i in 2:length(vars)) {out_df1 <- rbind(out_df1, out_list1[[i]])} } rownames(out_df1) <- NULL colnames(out_df1)[1:2] <- c("characteristic", "level") return(out_df1) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generics.R \docType{class} \name{intsp-class} \alias{intsp-class} \alias{intsp} \title{An interval extension of a SpatialPointsDataFrame} \description{ An interval extension of a SpatialPointsDataFrame } \section{Slots}{ \describe{ \item{\code{interval}}{A matrix of two columns representing the lower and upper endpoints of an interval.} }}
/man/intsp-class.Rd
no_license
beanb2/intkrige
R
false
true
422
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generics.R \docType{class} \name{intsp-class} \alias{intsp-class} \alias{intsp} \title{An interval extension of a SpatialPointsDataFrame} \description{ An interval extension of a SpatialPointsDataFrame } \section{Slots}{ \describe{ \item{\code{interval}}{A matrix of two columns representing the lower and upper endpoints of an interval.} }}
#' Run a Python REPL #' #' This function provides a Python REPL in the \R session, which can be used #' to interactively run Python code. All code executed within the REPL is #' run within the Python main module, and any generated Python objects will #' persist in the Python session after the REPL is detached. #' #' When working with R and Python scripts interactively, one can activate #' the Python REPL with `repl_python()`, run Python code, and later run `exit` #' to return to the \R console. #' #' @examples \dontrun{ #' #' # enter the Python REPL, create a dictionary, and exit #' repl_python() #' dictionary = {'alpha': 1, 'beta': 2} #' exit #' #' # access the created dictionary from R #' py$dictionary #' # $alpha #' # [1] 1 #' # #' # $beta #' # [1] 2 #' #' } #' #' @param module An (optional) Python module to be imported before #' the REPL is launched. #' #' @param quiet Boolean; print a startup banner when launching the REPL? If #' `TRUE`, the banner will be suppressed. #' #' @seealso [py], for accessing objects created using the Python REPL. #' #' @importFrom utils packageVersion #' @export repl_python <- function( module = NULL, quiet = getOption("reticulate.repl.quiet", default = FALSE)) { # load module if requested if (is.character(module)) import(module) # run hooks for initialize, teardown initialize <- getOption("reticulate.repl.initialize") if (is.function(initialize)) { initialize() } teardown <- getOption("reticulate.repl.teardown") if (is.function(teardown)) { on.exit(teardown(), add = TRUE) } # import other required modules for the REPL builtins <- import_builtins(convert = FALSE) sys <- import("sys", convert = TRUE) codeop <- import("codeop", convert = TRUE) # grab references to the locals, globals of the main module locals <- py_run_string("locals()") globals <- py_run_string("globals()") # check to see if the current environment supports history # (check for case where working directory not writable) use_history <- !"--vanilla" %in% commandArgs() && !"--no-save" %in% commandArgs() && !is.null(getwd()) && tryCatch( { utils::savehistory(tempfile()); TRUE }, error = function(e) FALSE ) if (use_history) { # if we have history, save and then restore the current # R history utils::savehistory() on.exit(utils::loadhistory(), add = TRUE) # file to be used for command history during session histfile <- getOption("reticulate.repl.histfile") if (is.null(histfile)) histfile <- file.path(tempdir(), ".reticulatehistory") # load history (create empty file if none exists yet) if (!file.exists(histfile)) file.create(histfile) utils::loadhistory(histfile) } # buffer of pending console input (we don't evaluate code # until the user has submitted a complete Python statement) # # we return an environment of functions bound in a local environment # so that hook can manipulate the buffer if required buffer <- new_stack() # command compiler (used to check if we've received a complete piece # of Python input) compiler <- codeop$CommandCompiler() # record whether the used has requested a quit quit_requested <- FALSE # inform others that the reticulate REPL is active .globals$py_repl_active <- TRUE on.exit(.globals$py_repl_active <- FALSE, add = TRUE) # handle errors produced during REPL actions handle_error <- function(output) { failed <- inherits(output, "error") if (failed) { error <- py_last_error() message(paste(error$type, error$value, sep = ": ")) } failed } # submit code for evaluation. return TRUE if evaluation succeeded process <- function(code) { # Python's command compiler complains if the only thing you submit # is a comment, so detect that case first if (grepl("^\\s*#", code)) return(TRUE) # Python is picky about trailing whitespace, so ensure only a single # newline follows the code to be submitted code <- sub("\\s*$", "\n", code) # now compile and run the code. we use 'single' mode to ensure that # python auto-prints the statement as it is evaluated. compiled <- tryCatch(builtins$compile(code, '<string>', 'single'), error = identity) if (handle_error(compiled)) return(FALSE) output <- tryCatch(builtins$eval(compiled, globals, locals), error = identity) if (handle_error(output)) return(FALSE) # ensure stdout, stderr flushed (required for Python 3) sys$stdout$flush() sys$stderr$flush() TRUE } repl <- function() { # read user input prompt <- if (buffer$empty()) ">>> " else "... " contents <- readline(prompt = prompt) # NULL implies the user sent EOF -- time to leave if (is.null(contents)) { writeLines("exit", con = stdout()) quit_requested <<- TRUE return() } # trim whitespace for handling of special commands trimmed <- gsub("^\\s*|\\s*$", "", contents) # run hook provided by front-end (in case special actions # need to be taken in response to console input) hook <- getOption("reticulate.repl.hook") if (is.function(hook)) { status <- tryCatch(hook(buffer, contents, trimmed), error = identity) # report errors to the user if (inherits(status, "error")) { message(paste("Error:", conditionMessage(status))) return() } # a TRUE return implies the hook handled this input if (isTRUE(status)) return() } # special handling for top-level commands (when buffer is empty) if (buffer$empty()) { # handle user requests to quit if (trimmed %in% c("quit", "exit")) { quit_requested <<- TRUE return() } # special handling for help requests prefixed with '?' if (regexpr("?", trimmed, fixed = TRUE) == 1) { code <- sprintf("help(\"%s\")", substring(trimmed, 2)) py_run_string(code) return() } # similar handling for help requests postfixed with '?' if (grepl("[?]\\s*$", trimmed)) { replaced <- sub("[?]\\s*$", "", trimmed) code <- sprintf("help(\"%s\")", replaced) py_run_string(code) return() } # if the user submitted a blank line at the top level, # ignore it (note that we intentionally submit whitespace-only # lines that might terminate a block) if (!nzchar(trimmed)) return() } # update history file if (use_history) { write(contents, file = histfile, append = TRUE) utils::loadhistory(histfile) } # update buffer previous <- buffer$data() buffer$push(contents) # generate code to be sent to command interpreter code <- paste(buffer$data(), collapse = "\n") ready <- tryCatch(compiler(code), condition = identity) # a NULL return implies that we can accept more input if (is.null(ready)) return() # on error, attempt to submit the previous buffer and then handle # the newest line of code independently. this allows us to handle # python constructs such as: # # def foo(): # return 42 # foo() # # try: # print 1 # except: # print 2 # print 3 # # which would otherwise fail if (length(previous) && inherits(ready, "error")) { # submit previous code process(paste(previous, collapse = "\n")) # now, handle the newest line of code submitted buffer$set(contents) code <- contents ready <- tryCatch(compiler(code), condition = identity) # a NULL return implies that we can accept more input if (is.null(ready)) return() } # otherwise, we should have received a code output object # so we can just run the code submitted thus far buffer$clear() process(code) } # notify the user we're entering the REPL (when requested) if (!quiet) { version <- paste( sys$version_info$major, sys$version_info$minor, sys$version_info$micro, sep = "." ) # NOTE: we used to use sys.executable but that would report # the R process rather than the Python process config <- py_config() executable <- config$python fmt <- c( "Python %s (%s)", "Reticulate %s REPL -- A Python interpreter in R." ) msg <- sprintf( paste(fmt, collapse = "\n"), version, executable, utils::packageVersion("reticulate") ) message(msg) } # enter the REPL loop repeat { if (quit_requested) break repl() } } # Check Whether the Python REPL is Active # # Check to see whether the Python REPL is active. This is primarily # for use by R front-ends, which might want to toggle or affect # the state of the Python REPL while it is running. py_repl_active <- function() { .globals$py_repl_active }
/R/repl.R
permissive
hitfuture/reticulate
R
false
false
8,944
r
#' Run a Python REPL #' #' This function provides a Python REPL in the \R session, which can be used #' to interactively run Python code. All code executed within the REPL is #' run within the Python main module, and any generated Python objects will #' persist in the Python session after the REPL is detached. #' #' When working with R and Python scripts interactively, one can activate #' the Python REPL with `repl_python()`, run Python code, and later run `exit` #' to return to the \R console. #' #' @examples \dontrun{ #' #' # enter the Python REPL, create a dictionary, and exit #' repl_python() #' dictionary = {'alpha': 1, 'beta': 2} #' exit #' #' # access the created dictionary from R #' py$dictionary #' # $alpha #' # [1] 1 #' # #' # $beta #' # [1] 2 #' #' } #' #' @param module An (optional) Python module to be imported before #' the REPL is launched. #' #' @param quiet Boolean; print a startup banner when launching the REPL? If #' `TRUE`, the banner will be suppressed. #' #' @seealso [py], for accessing objects created using the Python REPL. #' #' @importFrom utils packageVersion #' @export repl_python <- function( module = NULL, quiet = getOption("reticulate.repl.quiet", default = FALSE)) { # load module if requested if (is.character(module)) import(module) # run hooks for initialize, teardown initialize <- getOption("reticulate.repl.initialize") if (is.function(initialize)) { initialize() } teardown <- getOption("reticulate.repl.teardown") if (is.function(teardown)) { on.exit(teardown(), add = TRUE) } # import other required modules for the REPL builtins <- import_builtins(convert = FALSE) sys <- import("sys", convert = TRUE) codeop <- import("codeop", convert = TRUE) # grab references to the locals, globals of the main module locals <- py_run_string("locals()") globals <- py_run_string("globals()") # check to see if the current environment supports history # (check for case where working directory not writable) use_history <- !"--vanilla" %in% commandArgs() && !"--no-save" %in% commandArgs() && !is.null(getwd()) && tryCatch( { utils::savehistory(tempfile()); TRUE }, error = function(e) FALSE ) if (use_history) { # if we have history, save and then restore the current # R history utils::savehistory() on.exit(utils::loadhistory(), add = TRUE) # file to be used for command history during session histfile <- getOption("reticulate.repl.histfile") if (is.null(histfile)) histfile <- file.path(tempdir(), ".reticulatehistory") # load history (create empty file if none exists yet) if (!file.exists(histfile)) file.create(histfile) utils::loadhistory(histfile) } # buffer of pending console input (we don't evaluate code # until the user has submitted a complete Python statement) # # we return an environment of functions bound in a local environment # so that hook can manipulate the buffer if required buffer <- new_stack() # command compiler (used to check if we've received a complete piece # of Python input) compiler <- codeop$CommandCompiler() # record whether the used has requested a quit quit_requested <- FALSE # inform others that the reticulate REPL is active .globals$py_repl_active <- TRUE on.exit(.globals$py_repl_active <- FALSE, add = TRUE) # handle errors produced during REPL actions handle_error <- function(output) { failed <- inherits(output, "error") if (failed) { error <- py_last_error() message(paste(error$type, error$value, sep = ": ")) } failed } # submit code for evaluation. return TRUE if evaluation succeeded process <- function(code) { # Python's command compiler complains if the only thing you submit # is a comment, so detect that case first if (grepl("^\\s*#", code)) return(TRUE) # Python is picky about trailing whitespace, so ensure only a single # newline follows the code to be submitted code <- sub("\\s*$", "\n", code) # now compile and run the code. we use 'single' mode to ensure that # python auto-prints the statement as it is evaluated. compiled <- tryCatch(builtins$compile(code, '<string>', 'single'), error = identity) if (handle_error(compiled)) return(FALSE) output <- tryCatch(builtins$eval(compiled, globals, locals), error = identity) if (handle_error(output)) return(FALSE) # ensure stdout, stderr flushed (required for Python 3) sys$stdout$flush() sys$stderr$flush() TRUE } repl <- function() { # read user input prompt <- if (buffer$empty()) ">>> " else "... " contents <- readline(prompt = prompt) # NULL implies the user sent EOF -- time to leave if (is.null(contents)) { writeLines("exit", con = stdout()) quit_requested <<- TRUE return() } # trim whitespace for handling of special commands trimmed <- gsub("^\\s*|\\s*$", "", contents) # run hook provided by front-end (in case special actions # need to be taken in response to console input) hook <- getOption("reticulate.repl.hook") if (is.function(hook)) { status <- tryCatch(hook(buffer, contents, trimmed), error = identity) # report errors to the user if (inherits(status, "error")) { message(paste("Error:", conditionMessage(status))) return() } # a TRUE return implies the hook handled this input if (isTRUE(status)) return() } # special handling for top-level commands (when buffer is empty) if (buffer$empty()) { # handle user requests to quit if (trimmed %in% c("quit", "exit")) { quit_requested <<- TRUE return() } # special handling for help requests prefixed with '?' if (regexpr("?", trimmed, fixed = TRUE) == 1) { code <- sprintf("help(\"%s\")", substring(trimmed, 2)) py_run_string(code) return() } # similar handling for help requests postfixed with '?' if (grepl("[?]\\s*$", trimmed)) { replaced <- sub("[?]\\s*$", "", trimmed) code <- sprintf("help(\"%s\")", replaced) py_run_string(code) return() } # if the user submitted a blank line at the top level, # ignore it (note that we intentionally submit whitespace-only # lines that might terminate a block) if (!nzchar(trimmed)) return() } # update history file if (use_history) { write(contents, file = histfile, append = TRUE) utils::loadhistory(histfile) } # update buffer previous <- buffer$data() buffer$push(contents) # generate code to be sent to command interpreter code <- paste(buffer$data(), collapse = "\n") ready <- tryCatch(compiler(code), condition = identity) # a NULL return implies that we can accept more input if (is.null(ready)) return() # on error, attempt to submit the previous buffer and then handle # the newest line of code independently. this allows us to handle # python constructs such as: # # def foo(): # return 42 # foo() # # try: # print 1 # except: # print 2 # print 3 # # which would otherwise fail if (length(previous) && inherits(ready, "error")) { # submit previous code process(paste(previous, collapse = "\n")) # now, handle the newest line of code submitted buffer$set(contents) code <- contents ready <- tryCatch(compiler(code), condition = identity) # a NULL return implies that we can accept more input if (is.null(ready)) return() } # otherwise, we should have received a code output object # so we can just run the code submitted thus far buffer$clear() process(code) } # notify the user we're entering the REPL (when requested) if (!quiet) { version <- paste( sys$version_info$major, sys$version_info$minor, sys$version_info$micro, sep = "." ) # NOTE: we used to use sys.executable but that would report # the R process rather than the Python process config <- py_config() executable <- config$python fmt <- c( "Python %s (%s)", "Reticulate %s REPL -- A Python interpreter in R." ) msg <- sprintf( paste(fmt, collapse = "\n"), version, executable, utils::packageVersion("reticulate") ) message(msg) } # enter the REPL loop repeat { if (quit_requested) break repl() } } # Check Whether the Python REPL is Active # # Check to see whether the Python REPL is active. This is primarily # for use by R front-ends, which might want to toggle or affect # the state of the Python REPL while it is running. py_repl_active <- function() { .globals$py_repl_active }
library("vars") data("Canada") summary(Canada) str(Canada) plot(Canada, nc = 2, xlab = "") adf1 <- summary(ur.df(Canada[, "prod"], type = "trend", lags = 2)) adf1 Canada <- Canada[, c("prod", "e", "U", "rw")] p1ct <- VAR(Canada, p = 1, type = "both") p1ct summary(p1ct, equation = "e") plot(p1ct, names = "e") ser11 <- serial.test(p1ct, lags.pt = 16, type = "PT.asymptotic") ser11$serial norm1 <- normality.test(p1ct) norm1$jb.mul arch1 <- arch.test(p1ct, lags.multi = 5) arch1$arch.mul plot(arch1, names = "e") plot(stability(p1ct), nc = 2) summary(ca.jo(Canada, type = "trace", ecdet = "trend", K = 3, spec = "transitory")) summary(ca.jo(Canada, type = "trace", ecdet = "trend", K = 2, spec = "transitory")) vecm <- ca.jo(Canada[, c("rw", "prod", "e", "U")], type = "trace", ecdet = "trend", K = 3, spec = "transitory") vecm.r1 <- cajorls(vecm, r = 1) vecm <- ca.jo(Canada[, c("prod", "e", "U", "rw")], type = "trace", ecdet = "trend", K = 3, spec = "transitory") SR <- matrix(NA, nrow = 4, ncol = 4) SR[4, 2] <- 0 LR <- matrix(NA, nrow = 4, ncol = 4) LR[1, 2:4] <- 0 LR[2:4, 4] <- 0 svec <- SVEC(vecm, LR = LR, SR = SR, r = 1, lrtest = FALSE, boot = TRUE, runs = 100) summary(svec) LR[3, 3] <- 0 svec.oi <- update(svec, LR = LR, lrtest = TRUE, boot = FALSE) svec.oi$LRover svec.irf <- irf(svec, response = "U", n.ahead = 48, boot = TRUE) plot(svec.irf) fevd.U <- fevd(svec, n.ahead = 48)$U
/vars.R
no_license
jhuato/postwar_global_capitalism
R
false
false
1,393
r
library("vars") data("Canada") summary(Canada) str(Canada) plot(Canada, nc = 2, xlab = "") adf1 <- summary(ur.df(Canada[, "prod"], type = "trend", lags = 2)) adf1 Canada <- Canada[, c("prod", "e", "U", "rw")] p1ct <- VAR(Canada, p = 1, type = "both") p1ct summary(p1ct, equation = "e") plot(p1ct, names = "e") ser11 <- serial.test(p1ct, lags.pt = 16, type = "PT.asymptotic") ser11$serial norm1 <- normality.test(p1ct) norm1$jb.mul arch1 <- arch.test(p1ct, lags.multi = 5) arch1$arch.mul plot(arch1, names = "e") plot(stability(p1ct), nc = 2) summary(ca.jo(Canada, type = "trace", ecdet = "trend", K = 3, spec = "transitory")) summary(ca.jo(Canada, type = "trace", ecdet = "trend", K = 2, spec = "transitory")) vecm <- ca.jo(Canada[, c("rw", "prod", "e", "U")], type = "trace", ecdet = "trend", K = 3, spec = "transitory") vecm.r1 <- cajorls(vecm, r = 1) vecm <- ca.jo(Canada[, c("prod", "e", "U", "rw")], type = "trace", ecdet = "trend", K = 3, spec = "transitory") SR <- matrix(NA, nrow = 4, ncol = 4) SR[4, 2] <- 0 LR <- matrix(NA, nrow = 4, ncol = 4) LR[1, 2:4] <- 0 LR[2:4, 4] <- 0 svec <- SVEC(vecm, LR = LR, SR = SR, r = 1, lrtest = FALSE, boot = TRUE, runs = 100) summary(svec) LR[3, 3] <- 0 svec.oi <- update(svec, LR = LR, lrtest = TRUE, boot = FALSE) svec.oi$LRover svec.irf <- irf(svec, response = "U", n.ahead = 48, boot = TRUE) plot(svec.irf) fevd.U <- fevd(svec, n.ahead = 48)$U
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fun1.R \name{fun1} \alias{fun1} \title{MLR Assumptions Function} \usage{ fun1(lm, k) } \arguments{ \item{lm}{a linear model} \item{k}{the number of independent variables in the model} } \value{ Shapiro-Wilk Normality Test Breusch-Pagan Test Durbin-Watson Test Variance Inflation Factor plots of the residuals against each independent variable a plot of the fitted values vs. residuals a QQ plot } \description{ MLR Assumptions Function } \examples{ y <- rnorm(100, 1, 0) x1 <- rnorm(100, 2, 1) x2 <- rnorm(100, 3, 2) ylm <- lm(y ~ x1 + x2) fun1(ylm, 2) }
/man/fun1.Rd
no_license
leahpom/F2020Projt3LP
R
false
true
640
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fun1.R \name{fun1} \alias{fun1} \title{MLR Assumptions Function} \usage{ fun1(lm, k) } \arguments{ \item{lm}{a linear model} \item{k}{the number of independent variables in the model} } \value{ Shapiro-Wilk Normality Test Breusch-Pagan Test Durbin-Watson Test Variance Inflation Factor plots of the residuals against each independent variable a plot of the fitted values vs. residuals a QQ plot } \description{ MLR Assumptions Function } \examples{ y <- rnorm(100, 1, 0) x1 <- rnorm(100, 2, 1) x2 <- rnorm(100, 3, 2) ylm <- lm(y ~ x1 + x2) fun1(ylm, 2) }
## Load in command args: args <- commandArgs(TRUE) args <- as.list(args) args[[2]] <- as.numeric(args[[2]]) print(args) rm(list=ls(all=TRUE)) setwd("~/repos/bmr/R/") args <- list("tetrapods_gs", 10000, "_fixed_r5") ## Load tree and data into R require(ape) require(aRbor) require(devtools) require(mvnfast) require(Matrix) #load_all("~/repos/bayou/bayou_1.0") #install_github("uyedaj/bayou", ref="dev") load_all("~/repos/bayou/bayou_1.0") #require(bayou) td <- readRDS(paste("../output/data/", args[[1]], ".rds", sep="")) tree <- td$phy dat <- td$dat tree <- multi2di(tree) tree$edge.length[tree$edge.length==0] <- .Machine$double.eps td <- make.treedata(tree, dat) td <- reorder(td, "postorder") #td_gs <- filter(td, !is.na(lnGenSize)) #td <- mutate(td, lnMass = log(mean.mass), lnBMR = log(q10smr)) #td <- filter(td, !is.na(lnMass), !is.na(lnBMR), !is.na(lnGenSize)) tree <- td$phy dat <- td$dat ## BM likelihood function for genome size #gs.lik <- bm.lik(td_gs$phy, setNames(td_gs$dat[[3]], td_gs$phy$tip.label), SE=0, model="BM") #par(mfrow=c(1,2)) #plot(tree, show.tip.label=FALSE) #plot(dat$lnMass, dat$lnBMR) lnBMR <- setNames(dat[['lnBMR']], tree$tip.label) lnMass <- setNames(dat[['lnMass']], tree$tip.label) pred <- cbind(setNames(dat[['lnMass']], tree$tip.label)) #pred <- cbind(setNames(dat[['lnMass']], tree$tip.label),setNames(dat[['lnMass']]^2, tree$tip.label),setNames(dat[['lnGenSize']], tree$tip.label)) cache <- bayou:::.prepare.ou.univariate(tree, setNames(dat$lnBMR, tree$tip.label)) #identifyBranches(cache$phy, 2) endonodes <- cache$edge[c(841, 1703),2] endo <- unlist(cache$desc$tips[endonodes]) endo <- cache$phy$tip.label[endo[endo <= cache$n]] pred <- cbind(pred, as.numeric(cache$phy$tip.label %in% endo)) cache <- bayou:::.prepare.ou.univariate(tree, setNames(dat$lnBMR, tree$tip.label), pred = pred) #missing <- which(is.na(cache$pred[,3])) getPreValues <- function(cache){ V <- vcvPhylo(cache$phy, anc.nodes=FALSE) X <- cache$pred[,3] unknown <- is.na(X) known <- !unknown Vkk <- V[known, known] Vuu <- V[unknown, unknown] Vku <- V[known, unknown] Vuk <- V[unknown, known] iVkk <- solve(Vkk) sigmabar <- as.matrix(forceSymmetric(Vuu - Vuk%*%iVkk%*%Vku)) cholSigmabar <- chol(sigmabar) mubarmat <- Vuk%*%iVkk return(list(V=V, X=X, unknown=unknown, known=known, Vkk=Vkk, Vuu=Vuu, Vku=Vku, Vuk=Vuk, iVkk=iVkk, sigmabar=sigmabar, mubarmat=mubarmat, cholSigmabar=cholSigmabar)) } #pv <- getPreValues(cache) cMVNorm <- function(cache, pars, prevalues=pv, known=FALSE){ X <- prevalues$X known <- prevalues$known unknown <- prevalues$unknown mu <- rep(pars$pred.root, cache$n) muk <- mu[known] muu <- mu[unknown] mubar <- t(muu + prevalues$mubarmat%*%(X[known]-muk)) #sigmabar <- pars$pred.sig2*prevalues$sigmabar myChol <-sqrt(pars$pred.sig2)*prevalues$cholSigmabar res <- dmvn(pars$missing.pred, mu=mubar, sigma = myChol, log=TRUE, isChol=TRUE) return(res) } ## Proposal function to simulate conditional draws from a multivariate normal distribution .imputePredBM <- function(cache, pars, d, move,ct=NULL, prevalues=pv){ #(tree, dat, sig2, plot=TRUE, ...){ X <- prevalues$X Vuk <- pars$pred.sig2*prevalues$Vuk iVkk <- (1/pars$pred.sig2)*prevalues$iVkk Vku <- pars$pred.sig2*prevalues$Vku Vuu <- pars$pred.sig2*prevalues$Vuu known <- prevalues$known unknown <- prevalues$unknown mu <- rep(pars$pred.root, cache$n) muk <- mu[known] muu <- mu[unknown] mubar <- t(muu + Vuk%*%iVkk%*%(X[known]-muk)) sigmabar <- Vuu - Vuk%*%iVkk%*%Vku res <- MASS::mvrnorm(1, mubar, sigmabar) pars.new <- pars pars.new$missing.pred <- res hr=Inf type="impute" return(list(pars=pars.new, hr=hr, decision = type)) } ## We're going to define a custom likelihood function; this is nearly identical to bayou.lik (bayou's standard ## OU function), except we use pars$beta1 to calculate the residuals after accounting for lnMass. custom.lik <- function(pars, cache, X, model="Custom"){ n <- cache$n X <- cache$dat pred <- cache$pred #pred[is.na(pred[,3]),3] <- pars$missing.pred map <- bayou:::.pars2map(pars,cache) tipreg <- rev(map$theta) ntipreg <- rev(map$branch) #ntipreg <- names(map$theta) dups <- !duplicated(ntipreg) & ntipreg %in% (1:nrow(cache$edge))[cache$externalEdge] tipreg <- tipreg[which(dups)] ntipreg <- ntipreg[which(dups)] o <- order(cache$edge[as.numeric(ntipreg), 2]) betaID <- tipreg[o] #betaID <- sapply(tipreglist[cache$externalEdge][o], function(x) x[length(x)]) #beta <- cbind(sapply(c("beta1", "beta2"), function(x) pars[[x]][betaID]), pars$beta3) X = X - pars$beta1[betaID]*pred[,1] cache$dat <- X pars$theta[c(15,16)] <- pars$endo + pars$theta[c(15,16)] X.c <- bayou:::C_weightmatrix(cache, pars)$resid transf.phy <- bayou:::C_transf_branch_lengths(cache, 1, X.c, pars$alpha) transf.phy$edge.length[cache$externalEdge] <- transf.phy$edge[cache$externalEdge] + cache$SE[cache$phy$edge[cache$externalEdge, 2]]^2*(2*pars$alpha)/pars$sig2 comp <- bayou:::C_threepoint(list(n=n, N=cache$N, anc=cache$phy$edge[, 1], des=cache$phy$edge[, 2], diagMatrix=transf.phy$diagMatrix, P=X.c, root=transf.phy$root.edge, len=transf.phy$edge.length)) if(pars$alpha==0){ inv.yVy <- comp$PP detV <- comp$logd } else { inv.yVy <- comp$PP*(2*pars$alpha)/(pars$sig2) detV <- comp$logd+n*log(pars$sig2/(2*pars$alpha)) } llh <- -0.5*(n*log(2*pi)+detV+inv.yVy) #llh <- llh + gs.lik(c(pars$pred.sig2, pars$pred.root), root=ROOT.GIVEN) return(list(loglik=llh, theta=pars$theta,resid=X.c, comp=comp, transf.phy=transf.phy)) } #plotSimmap(pars2simmap(pars, cache$phy)$tree, colors=pars2simmap(pars, cache$phy)$col, ftype="off") sb <- c("Tetrapods"=1707, "Amniota"=1705,"Varanus_exanthematicus"=563, "Pseudoeurycea_brunnata"=277, "Sceloporus_variabilis"=515, "Sceloporus_occidentalis"=509, "Uta_stansburiana"=518, "Masticophis_flagellum"=443, "Typhlogobius_californiensis"=54, "Sebastolobus_altivelis"=85, "Lacertidae"=614, "Boidae"=501, "Hyla_arborea"=159, "Aves"=841, "Mammals"=1703, "Lichanura_trivirgata"=495, "Bunopus_tuberculatus"=655, "Salamandridae"=378, "Fishes"=115, "Scaphiophidae"=261, "Euphlyctis"=227, "Labeo"=7, "Bolitoglossinae"=294, "Salmonidae"=48, "Plethodontidae"=344, "Cricetidae"=1222, "Ambystoma_mexicanum"=368) k <- length(sb) startpar <- list(alpha=0.1, sig2=3, beta1=rnorm(k+1, 0.7, 0.1), endo=3 , k=k, ntheta=k+1, theta=rnorm(k+1, 0, 1), sb=sb, loc=rep(0, k), t2=2:(k+1)) ## Get optimized starting values and trait evolutionary models for genome size. #require(phylolm) #tdgs <- filter(td, !is.na(lnGenSize)) #fits <- lapply(c("BM", "OUrandomRoot", "OUfixedRoot", "EB"), function(x) phylolm(lnGenSize~1, data=tdgs$dat, phy=tdgs$phy, model=x)) #aics <- sapply(fits, function(x) x$aic) #bestfit <- fits[[which(aics == min(aics))]] #phenogram(tdgs$phy, setNames(tdgs$dat$lnGenSize, tdgs$phy$tip.label), fsize=0.5) #startpar$pred.root <- unname(bestfit$coeff) #startpar$pred.sig2 <- unname(bestfit$sigma2) #phenogram(td$phy, setNames(tmppred3[[1]], td$phy$tip.label), ftype="off", colors = makeTransparent("black", 0)) #startpar <- .imputePredBM(cache, startpar, d=1, NULL, ct=NULL, prevalues=pv)$pars #tmppred3 <- as.vector(td$dat[,3]) #tmppred3[is.na(tmppred3), ] <- startpar$missing.pred #phenogram(td$phy, setNames(tmppred3[[1]], td$phy$tip.label), ftype="off", colors = makeTransparent("black", 5), add=TRUE) ## This is a function to monitor the output of bayou for our custom model BetaBMR.monitor = function(i, lik, pr, pars, accept, accept.type, j){ names <- c("gen", "lnL", "prior", "alpha","sig2", "rbeta1", "endo", "k") string <- "%-8i%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8i" acceptratios <- tapply(accept, accept.type, mean) names <- c(names, names(acceptratios)) if(j==0){ cat(sprintf("%-7.7s", names), "\n", sep=" ") } cat(sprintf(string, i, lik, pr, pars$alpha, pars$sig2, pars$beta1[1], pars$endo, pars$k), sprintf("%-8.2f", acceptratios),"\n", sep="") } ## We're going to define a custom model with variable slopes (beta1) model.BetaBMR <- list(moves = list(alpha=".multiplierProposal", sig2=".multiplierProposal", beta1=".vectorMultiplier", endo=".slidingWindowProposal", theta=".adjustTheta", slide=".slide"), control.weights = list("alpha"=5,"sig2"=2,"beta1"=10,"endo"=3, "theta"=10,"slide"=2,"k"=0), D = list(alpha=0.5, sig2= 0.5, beta1=0.75, endo=0.25, theta=2, slide=1), parorder = c("alpha", "sig2", "beta1", "endo", "k", "ntheta", "theta"), rjpars = c("beta1", "theta"), shiftpars = c("sb", "loc", "t2"), monitor.fn = BetaBMR.monitor, lik.fn = custom.lik) ## Now we define the prior: prior <- make.prior(tree, plot.prior = TRUE, dists=list(dalpha="dhalfcauchy", dsig2="dhalfcauchy", dbeta1="dnorm", dendo="dnorm", dsb="fixed", dk="fixed", dtheta="dnorm"), param=list(dalpha=list(scale=1), dsig2=list(scale=1), dbeta1=list(mean=0.7, sd=0.1), dendo=list(mean=0, sd=4), dk="fixed", dsb="fixed", dtheta=list(mean=0, sd=4)), model="ffancova", fixed=list(sb=startpar$sb, k=startpar$k)) fixed.pars <- list(k=length(sb), ntheta=length(sb)+1, sb=unname(sb), t2=2:(length(sb)+1), loc=rep(0, length(sb))) tr <- pars2simmap(fixed.pars, cache$phy) plotSimmap(tr$tree, colors=setNames(rainbow(length(sb))[sample(1:(length(sb)),length(sb), replace=FALSE)], 1:length(sb)), fsize=0.5) prior(startpar) custom.lik(startpar, cache, cache$dat)$loglik #tr <- pars2simmap(startpar, tree) #plotSimmap(tr$tree, colors=tr$col) mymcmc <- bayou.makeMCMC(tree, lnBMR, pred=pred, SE=0, model=model.BetaBMR, prior=prior, startpar=startpar, new.dir=paste("../output/runs/",args[[1]],sep=""), outname=paste(args[[1]],args[[3]],sep=""), plot.freq=NULL, ticker.freq=1000, samp = 100) ## Now run the chain for 50,000 generations. This will write to a set of files in your temporary directory. #sink(paste(mymcmc$dir,mymcmc$outname,".log",sep=""), append=TRUE) mymcmc$run(args[[2]]) #sink(NULL) chain <- mymcmc$load() chain <- set.burnin(chain, 0.3) out <- summary(chain) print(out) par(mar=c(10, 3, 1,1)) beta1 <- do.call(rbind, chain$beta1) colnames(beta1) <- c("root", names(sb)) o <- order(apply(beta1,2, mean)) beta1 <- beta1[,o] boxplot(beta1, las=2) par(mar=c(10, 3, 1,1)) theta <- do.call(rbind, chain$theta) colnames(theta) <- c("root", names(sb)) o <- order(apply(theta,2, mean)) theta <- theta[,o] boxplot(theta, las=2) plot(c(0, length(chain$gen)), c(-5, 5), type="n") lapply(1:ncol(theta), function(x) lines(theta[,x], col=x)) summary(coda::mcmc(theta)) summary(coda::mcmc(beta1)) apply(coda::mcmc(theta), 2, effectiveSize) apply(coda::mcmc(beta1), 2, effectiveSize) require(foreach) require(doParallel) registerDoParallel(cores=10) Bk <- seq(0, 1, length.out=10) ss <- mymcmc$steppingstone(args[[2]], chain, Bk, burnin=0.3, plot=TRUE) saveRDS(chain, file=paste("../output/runs/",args[[1]],"/",args[[1]],"_",args[[3]],".chain.rds",sep="")) saveRDS(mymcmc, file=paste("../output/runs/",args[[1]],"/",args[[1]],"_",args[[3]],".mcmc.rds",sep="")) saveRDS(ss, file=paste("../output/runs/",args[[1]],"/",args[[1]],"_",args[[3]],".ss.rds",sep="")) #plotSimmap.mcmc(tree, chain, burnin=0.3) #chain <- mymcmc$load() #out <- list(tree=mymcmc$tree, dat=mymcmc$dat, outname="newmammals_quadr1", model="custom", model.pars=mymcmc$model.pars, dir="~/repos/bmr/output/runs/newmammals/") #chain <- load.bayou(out,save.Rdata = TRUE, file="../output/runs/newmammals/newmammals_quad.rds") #pdf("../output/figures/QuadraticMammals.pdf") #plotSimmap.mcmc(tree, chain, fsize=0.25, burnin=0.3) #postburn <- round(0.3*length(chain$gen), 0):length(chain$gen) #plot(density(sapply(chain$beta2[postburn], function(x) x[1])))# #par(mfrow=c(2,1)) #plot(c(0, length(chain$gen)), c(0.5, 1), type="n", xlab="Sample", ylab="beta1") #dum <- lapply(1:length(chain$gen), function(x) points(rep(x, length(chain$beta1[[x]])), chain$beta1[[x]], pch=".", col=1:length(chain$beta2[[x]]))) #plot(c(0, length(chain$gen)), c(-0.03, 0.03), type="n", xlab="Sample", ylab="beta2") #dum <- lapply(1:length(chain$gen), function(x) points(rep(x, length(chain$beta2[[x]])), chain$beta2[[x]], pch=".", col=1:length(chain$beta2[[x]]))) #par(mfrow=c(1,1)) #samp <- round(seq(postburn[1], postburn[length(postburn)], length.out=1000),0) #{burnin=0.3 #LP <- Lposterior(chain, tree, burnin=burnin) #focalSB <- c(0, which(LP[,1] > cutoff)) #PostSB <- lapply(focalSB, function (y) getBranchPosterior(y, chain, burnin=burnin, thin=thin)) #postBetas <- lapply(1:length(PostSB), function(x) cbind(x, PostSB[[x]][[1]])) #postThetas <- lapply(1:length(PostSB), function(x) cbind(x, PostSB[[x]][[2]])) #postBetas <- data.frame(do.call(rbind, postBetas)) #postBetas$var <- rep("Beta", nrow(postBetas)) #postThetas <- data.frame(do.call(rbind, postThetas)) #postThetas$var <- rep("Theta", nrow(postThetas)) #allpar <- rbind(postBetas, postThetas) #allpar$x <- factor(allpar$x) #focalpars <- list(k=length(focalSB[-1]), # ntheta=length(focalSB), # sb=focalSB[-1], # loc=rep(0, length(focalSB[-1])), # t2=2:length(focalSB))# #tr <- pars2simmap(focalpars, tree) #tipreg <- bayou:::.tipregime(focalpars, tree) #plot(lnMass, lnBMR, type="n") #beta1s <- chain$beta1[samp] #beta2s <- chain$beta2[samp] #thetas <- chain$theta[samp] #lapply(1:length(samp), function(j) sapply(1:length(beta1s[[j]]), function(y) curve(thetas[[j]][y]+beta1s[[j]][y]*x + beta2s[[j]][y]*x^2, add=TRUE, col=makeTransparent(y, 5)))) #points(lnMass, lnBMR, pch=21, bg=makeTransparent(tipreg,alpha=50), col=makeTransparent(tipreg,alpha=50))# #} #dev.off()
/R/runBetaBayou_tetrapods_1pred_eFixedShifts.R
permissive
uyedaj/bmr
R
false
false
13,877
r
## Load in command args: args <- commandArgs(TRUE) args <- as.list(args) args[[2]] <- as.numeric(args[[2]]) print(args) rm(list=ls(all=TRUE)) setwd("~/repos/bmr/R/") args <- list("tetrapods_gs", 10000, "_fixed_r5") ## Load tree and data into R require(ape) require(aRbor) require(devtools) require(mvnfast) require(Matrix) #load_all("~/repos/bayou/bayou_1.0") #install_github("uyedaj/bayou", ref="dev") load_all("~/repos/bayou/bayou_1.0") #require(bayou) td <- readRDS(paste("../output/data/", args[[1]], ".rds", sep="")) tree <- td$phy dat <- td$dat tree <- multi2di(tree) tree$edge.length[tree$edge.length==0] <- .Machine$double.eps td <- make.treedata(tree, dat) td <- reorder(td, "postorder") #td_gs <- filter(td, !is.na(lnGenSize)) #td <- mutate(td, lnMass = log(mean.mass), lnBMR = log(q10smr)) #td <- filter(td, !is.na(lnMass), !is.na(lnBMR), !is.na(lnGenSize)) tree <- td$phy dat <- td$dat ## BM likelihood function for genome size #gs.lik <- bm.lik(td_gs$phy, setNames(td_gs$dat[[3]], td_gs$phy$tip.label), SE=0, model="BM") #par(mfrow=c(1,2)) #plot(tree, show.tip.label=FALSE) #plot(dat$lnMass, dat$lnBMR) lnBMR <- setNames(dat[['lnBMR']], tree$tip.label) lnMass <- setNames(dat[['lnMass']], tree$tip.label) pred <- cbind(setNames(dat[['lnMass']], tree$tip.label)) #pred <- cbind(setNames(dat[['lnMass']], tree$tip.label),setNames(dat[['lnMass']]^2, tree$tip.label),setNames(dat[['lnGenSize']], tree$tip.label)) cache <- bayou:::.prepare.ou.univariate(tree, setNames(dat$lnBMR, tree$tip.label)) #identifyBranches(cache$phy, 2) endonodes <- cache$edge[c(841, 1703),2] endo <- unlist(cache$desc$tips[endonodes]) endo <- cache$phy$tip.label[endo[endo <= cache$n]] pred <- cbind(pred, as.numeric(cache$phy$tip.label %in% endo)) cache <- bayou:::.prepare.ou.univariate(tree, setNames(dat$lnBMR, tree$tip.label), pred = pred) #missing <- which(is.na(cache$pred[,3])) getPreValues <- function(cache){ V <- vcvPhylo(cache$phy, anc.nodes=FALSE) X <- cache$pred[,3] unknown <- is.na(X) known <- !unknown Vkk <- V[known, known] Vuu <- V[unknown, unknown] Vku <- V[known, unknown] Vuk <- V[unknown, known] iVkk <- solve(Vkk) sigmabar <- as.matrix(forceSymmetric(Vuu - Vuk%*%iVkk%*%Vku)) cholSigmabar <- chol(sigmabar) mubarmat <- Vuk%*%iVkk return(list(V=V, X=X, unknown=unknown, known=known, Vkk=Vkk, Vuu=Vuu, Vku=Vku, Vuk=Vuk, iVkk=iVkk, sigmabar=sigmabar, mubarmat=mubarmat, cholSigmabar=cholSigmabar)) } #pv <- getPreValues(cache) cMVNorm <- function(cache, pars, prevalues=pv, known=FALSE){ X <- prevalues$X known <- prevalues$known unknown <- prevalues$unknown mu <- rep(pars$pred.root, cache$n) muk <- mu[known] muu <- mu[unknown] mubar <- t(muu + prevalues$mubarmat%*%(X[known]-muk)) #sigmabar <- pars$pred.sig2*prevalues$sigmabar myChol <-sqrt(pars$pred.sig2)*prevalues$cholSigmabar res <- dmvn(pars$missing.pred, mu=mubar, sigma = myChol, log=TRUE, isChol=TRUE) return(res) } ## Proposal function to simulate conditional draws from a multivariate normal distribution .imputePredBM <- function(cache, pars, d, move,ct=NULL, prevalues=pv){ #(tree, dat, sig2, plot=TRUE, ...){ X <- prevalues$X Vuk <- pars$pred.sig2*prevalues$Vuk iVkk <- (1/pars$pred.sig2)*prevalues$iVkk Vku <- pars$pred.sig2*prevalues$Vku Vuu <- pars$pred.sig2*prevalues$Vuu known <- prevalues$known unknown <- prevalues$unknown mu <- rep(pars$pred.root, cache$n) muk <- mu[known] muu <- mu[unknown] mubar <- t(muu + Vuk%*%iVkk%*%(X[known]-muk)) sigmabar <- Vuu - Vuk%*%iVkk%*%Vku res <- MASS::mvrnorm(1, mubar, sigmabar) pars.new <- pars pars.new$missing.pred <- res hr=Inf type="impute" return(list(pars=pars.new, hr=hr, decision = type)) } ## We're going to define a custom likelihood function; this is nearly identical to bayou.lik (bayou's standard ## OU function), except we use pars$beta1 to calculate the residuals after accounting for lnMass. custom.lik <- function(pars, cache, X, model="Custom"){ n <- cache$n X <- cache$dat pred <- cache$pred #pred[is.na(pred[,3]),3] <- pars$missing.pred map <- bayou:::.pars2map(pars,cache) tipreg <- rev(map$theta) ntipreg <- rev(map$branch) #ntipreg <- names(map$theta) dups <- !duplicated(ntipreg) & ntipreg %in% (1:nrow(cache$edge))[cache$externalEdge] tipreg <- tipreg[which(dups)] ntipreg <- ntipreg[which(dups)] o <- order(cache$edge[as.numeric(ntipreg), 2]) betaID <- tipreg[o] #betaID <- sapply(tipreglist[cache$externalEdge][o], function(x) x[length(x)]) #beta <- cbind(sapply(c("beta1", "beta2"), function(x) pars[[x]][betaID]), pars$beta3) X = X - pars$beta1[betaID]*pred[,1] cache$dat <- X pars$theta[c(15,16)] <- pars$endo + pars$theta[c(15,16)] X.c <- bayou:::C_weightmatrix(cache, pars)$resid transf.phy <- bayou:::C_transf_branch_lengths(cache, 1, X.c, pars$alpha) transf.phy$edge.length[cache$externalEdge] <- transf.phy$edge[cache$externalEdge] + cache$SE[cache$phy$edge[cache$externalEdge, 2]]^2*(2*pars$alpha)/pars$sig2 comp <- bayou:::C_threepoint(list(n=n, N=cache$N, anc=cache$phy$edge[, 1], des=cache$phy$edge[, 2], diagMatrix=transf.phy$diagMatrix, P=X.c, root=transf.phy$root.edge, len=transf.phy$edge.length)) if(pars$alpha==0){ inv.yVy <- comp$PP detV <- comp$logd } else { inv.yVy <- comp$PP*(2*pars$alpha)/(pars$sig2) detV <- comp$logd+n*log(pars$sig2/(2*pars$alpha)) } llh <- -0.5*(n*log(2*pi)+detV+inv.yVy) #llh <- llh + gs.lik(c(pars$pred.sig2, pars$pred.root), root=ROOT.GIVEN) return(list(loglik=llh, theta=pars$theta,resid=X.c, comp=comp, transf.phy=transf.phy)) } #plotSimmap(pars2simmap(pars, cache$phy)$tree, colors=pars2simmap(pars, cache$phy)$col, ftype="off") sb <- c("Tetrapods"=1707, "Amniota"=1705,"Varanus_exanthematicus"=563, "Pseudoeurycea_brunnata"=277, "Sceloporus_variabilis"=515, "Sceloporus_occidentalis"=509, "Uta_stansburiana"=518, "Masticophis_flagellum"=443, "Typhlogobius_californiensis"=54, "Sebastolobus_altivelis"=85, "Lacertidae"=614, "Boidae"=501, "Hyla_arborea"=159, "Aves"=841, "Mammals"=1703, "Lichanura_trivirgata"=495, "Bunopus_tuberculatus"=655, "Salamandridae"=378, "Fishes"=115, "Scaphiophidae"=261, "Euphlyctis"=227, "Labeo"=7, "Bolitoglossinae"=294, "Salmonidae"=48, "Plethodontidae"=344, "Cricetidae"=1222, "Ambystoma_mexicanum"=368) k <- length(sb) startpar <- list(alpha=0.1, sig2=3, beta1=rnorm(k+1, 0.7, 0.1), endo=3 , k=k, ntheta=k+1, theta=rnorm(k+1, 0, 1), sb=sb, loc=rep(0, k), t2=2:(k+1)) ## Get optimized starting values and trait evolutionary models for genome size. #require(phylolm) #tdgs <- filter(td, !is.na(lnGenSize)) #fits <- lapply(c("BM", "OUrandomRoot", "OUfixedRoot", "EB"), function(x) phylolm(lnGenSize~1, data=tdgs$dat, phy=tdgs$phy, model=x)) #aics <- sapply(fits, function(x) x$aic) #bestfit <- fits[[which(aics == min(aics))]] #phenogram(tdgs$phy, setNames(tdgs$dat$lnGenSize, tdgs$phy$tip.label), fsize=0.5) #startpar$pred.root <- unname(bestfit$coeff) #startpar$pred.sig2 <- unname(bestfit$sigma2) #phenogram(td$phy, setNames(tmppred3[[1]], td$phy$tip.label), ftype="off", colors = makeTransparent("black", 0)) #startpar <- .imputePredBM(cache, startpar, d=1, NULL, ct=NULL, prevalues=pv)$pars #tmppred3 <- as.vector(td$dat[,3]) #tmppred3[is.na(tmppred3), ] <- startpar$missing.pred #phenogram(td$phy, setNames(tmppred3[[1]], td$phy$tip.label), ftype="off", colors = makeTransparent("black", 5), add=TRUE) ## This is a function to monitor the output of bayou for our custom model BetaBMR.monitor = function(i, lik, pr, pars, accept, accept.type, j){ names <- c("gen", "lnL", "prior", "alpha","sig2", "rbeta1", "endo", "k") string <- "%-8i%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8i" acceptratios <- tapply(accept, accept.type, mean) names <- c(names, names(acceptratios)) if(j==0){ cat(sprintf("%-7.7s", names), "\n", sep=" ") } cat(sprintf(string, i, lik, pr, pars$alpha, pars$sig2, pars$beta1[1], pars$endo, pars$k), sprintf("%-8.2f", acceptratios),"\n", sep="") } ## We're going to define a custom model with variable slopes (beta1) model.BetaBMR <- list(moves = list(alpha=".multiplierProposal", sig2=".multiplierProposal", beta1=".vectorMultiplier", endo=".slidingWindowProposal", theta=".adjustTheta", slide=".slide"), control.weights = list("alpha"=5,"sig2"=2,"beta1"=10,"endo"=3, "theta"=10,"slide"=2,"k"=0), D = list(alpha=0.5, sig2= 0.5, beta1=0.75, endo=0.25, theta=2, slide=1), parorder = c("alpha", "sig2", "beta1", "endo", "k", "ntheta", "theta"), rjpars = c("beta1", "theta"), shiftpars = c("sb", "loc", "t2"), monitor.fn = BetaBMR.monitor, lik.fn = custom.lik) ## Now we define the prior: prior <- make.prior(tree, plot.prior = TRUE, dists=list(dalpha="dhalfcauchy", dsig2="dhalfcauchy", dbeta1="dnorm", dendo="dnorm", dsb="fixed", dk="fixed", dtheta="dnorm"), param=list(dalpha=list(scale=1), dsig2=list(scale=1), dbeta1=list(mean=0.7, sd=0.1), dendo=list(mean=0, sd=4), dk="fixed", dsb="fixed", dtheta=list(mean=0, sd=4)), model="ffancova", fixed=list(sb=startpar$sb, k=startpar$k)) fixed.pars <- list(k=length(sb), ntheta=length(sb)+1, sb=unname(sb), t2=2:(length(sb)+1), loc=rep(0, length(sb))) tr <- pars2simmap(fixed.pars, cache$phy) plotSimmap(tr$tree, colors=setNames(rainbow(length(sb))[sample(1:(length(sb)),length(sb), replace=FALSE)], 1:length(sb)), fsize=0.5) prior(startpar) custom.lik(startpar, cache, cache$dat)$loglik #tr <- pars2simmap(startpar, tree) #plotSimmap(tr$tree, colors=tr$col) mymcmc <- bayou.makeMCMC(tree, lnBMR, pred=pred, SE=0, model=model.BetaBMR, prior=prior, startpar=startpar, new.dir=paste("../output/runs/",args[[1]],sep=""), outname=paste(args[[1]],args[[3]],sep=""), plot.freq=NULL, ticker.freq=1000, samp = 100) ## Now run the chain for 50,000 generations. This will write to a set of files in your temporary directory. #sink(paste(mymcmc$dir,mymcmc$outname,".log",sep=""), append=TRUE) mymcmc$run(args[[2]]) #sink(NULL) chain <- mymcmc$load() chain <- set.burnin(chain, 0.3) out <- summary(chain) print(out) par(mar=c(10, 3, 1,1)) beta1 <- do.call(rbind, chain$beta1) colnames(beta1) <- c("root", names(sb)) o <- order(apply(beta1,2, mean)) beta1 <- beta1[,o] boxplot(beta1, las=2) par(mar=c(10, 3, 1,1)) theta <- do.call(rbind, chain$theta) colnames(theta) <- c("root", names(sb)) o <- order(apply(theta,2, mean)) theta <- theta[,o] boxplot(theta, las=2) plot(c(0, length(chain$gen)), c(-5, 5), type="n") lapply(1:ncol(theta), function(x) lines(theta[,x], col=x)) summary(coda::mcmc(theta)) summary(coda::mcmc(beta1)) apply(coda::mcmc(theta), 2, effectiveSize) apply(coda::mcmc(beta1), 2, effectiveSize) require(foreach) require(doParallel) registerDoParallel(cores=10) Bk <- seq(0, 1, length.out=10) ss <- mymcmc$steppingstone(args[[2]], chain, Bk, burnin=0.3, plot=TRUE) saveRDS(chain, file=paste("../output/runs/",args[[1]],"/",args[[1]],"_",args[[3]],".chain.rds",sep="")) saveRDS(mymcmc, file=paste("../output/runs/",args[[1]],"/",args[[1]],"_",args[[3]],".mcmc.rds",sep="")) saveRDS(ss, file=paste("../output/runs/",args[[1]],"/",args[[1]],"_",args[[3]],".ss.rds",sep="")) #plotSimmap.mcmc(tree, chain, burnin=0.3) #chain <- mymcmc$load() #out <- list(tree=mymcmc$tree, dat=mymcmc$dat, outname="newmammals_quadr1", model="custom", model.pars=mymcmc$model.pars, dir="~/repos/bmr/output/runs/newmammals/") #chain <- load.bayou(out,save.Rdata = TRUE, file="../output/runs/newmammals/newmammals_quad.rds") #pdf("../output/figures/QuadraticMammals.pdf") #plotSimmap.mcmc(tree, chain, fsize=0.25, burnin=0.3) #postburn <- round(0.3*length(chain$gen), 0):length(chain$gen) #plot(density(sapply(chain$beta2[postburn], function(x) x[1])))# #par(mfrow=c(2,1)) #plot(c(0, length(chain$gen)), c(0.5, 1), type="n", xlab="Sample", ylab="beta1") #dum <- lapply(1:length(chain$gen), function(x) points(rep(x, length(chain$beta1[[x]])), chain$beta1[[x]], pch=".", col=1:length(chain$beta2[[x]]))) #plot(c(0, length(chain$gen)), c(-0.03, 0.03), type="n", xlab="Sample", ylab="beta2") #dum <- lapply(1:length(chain$gen), function(x) points(rep(x, length(chain$beta2[[x]])), chain$beta2[[x]], pch=".", col=1:length(chain$beta2[[x]]))) #par(mfrow=c(1,1)) #samp <- round(seq(postburn[1], postburn[length(postburn)], length.out=1000),0) #{burnin=0.3 #LP <- Lposterior(chain, tree, burnin=burnin) #focalSB <- c(0, which(LP[,1] > cutoff)) #PostSB <- lapply(focalSB, function (y) getBranchPosterior(y, chain, burnin=burnin, thin=thin)) #postBetas <- lapply(1:length(PostSB), function(x) cbind(x, PostSB[[x]][[1]])) #postThetas <- lapply(1:length(PostSB), function(x) cbind(x, PostSB[[x]][[2]])) #postBetas <- data.frame(do.call(rbind, postBetas)) #postBetas$var <- rep("Beta", nrow(postBetas)) #postThetas <- data.frame(do.call(rbind, postThetas)) #postThetas$var <- rep("Theta", nrow(postThetas)) #allpar <- rbind(postBetas, postThetas) #allpar$x <- factor(allpar$x) #focalpars <- list(k=length(focalSB[-1]), # ntheta=length(focalSB), # sb=focalSB[-1], # loc=rep(0, length(focalSB[-1])), # t2=2:length(focalSB))# #tr <- pars2simmap(focalpars, tree) #tipreg <- bayou:::.tipregime(focalpars, tree) #plot(lnMass, lnBMR, type="n") #beta1s <- chain$beta1[samp] #beta2s <- chain$beta2[samp] #thetas <- chain$theta[samp] #lapply(1:length(samp), function(j) sapply(1:length(beta1s[[j]]), function(y) curve(thetas[[j]][y]+beta1s[[j]][y]*x + beta2s[[j]][y]*x^2, add=TRUE, col=makeTransparent(y, 5)))) #points(lnMass, lnBMR, pch=21, bg=makeTransparent(tipreg,alpha=50), col=makeTransparent(tipreg,alpha=50))# #} #dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ratios.R \name{pw_ratios_matrix} \alias{pw_ratios_matrix} \title{Pairwise ratios of taxa from matrix of multiple samples} \usage{ pw_ratios_matrix(x, na.rm = FALSE) } \description{ Pairwise ratios of taxa from matrix of multiple samples }
/man/pw_ratios_matrix.Rd
permissive
marcelladane/metacal
R
false
true
317
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ratios.R \name{pw_ratios_matrix} \alias{pw_ratios_matrix} \title{Pairwise ratios of taxa from matrix of multiple samples} \usage{ pw_ratios_matrix(x, na.rm = FALSE) } \description{ Pairwise ratios of taxa from matrix of multiple samples }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ML_PLSModel.R \name{PLSModel} \alias{PLSModel} \title{Partial Least Squares Model} \usage{ PLSModel(ncomp = 1, scale = FALSE) } \arguments{ \item{ncomp}{number of components to include in the model.} \item{scale}{logical indicating whether to scale the predictors by the sample standard deviation.} } \value{ \code{MLModel} class object. } \description{ Function to perform partial least squares regression. } \details{ \describe{ \item{Response Types:}{\code{factor}, \code{numeric}} \item{\link[=TunedModel]{Automatic Tuning} of Grid Parameters:}{ \code{ncomp} } } Further model details can be found in the source link below. } \examples{ fit(sale_amount ~ ., data = ICHomes, model = PLSModel) } \seealso{ \code{\link[pls]{mvr}}, \code{\link{fit}}, \code{\link{resample}} }
/man/PLSModel.Rd
no_license
chen061218/MachineShop
R
false
true
867
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ML_PLSModel.R \name{PLSModel} \alias{PLSModel} \title{Partial Least Squares Model} \usage{ PLSModel(ncomp = 1, scale = FALSE) } \arguments{ \item{ncomp}{number of components to include in the model.} \item{scale}{logical indicating whether to scale the predictors by the sample standard deviation.} } \value{ \code{MLModel} class object. } \description{ Function to perform partial least squares regression. } \details{ \describe{ \item{Response Types:}{\code{factor}, \code{numeric}} \item{\link[=TunedModel]{Automatic Tuning} of Grid Parameters:}{ \code{ncomp} } } Further model details can be found in the source link below. } \examples{ fit(sale_amount ~ ., data = ICHomes, model = PLSModel) } \seealso{ \code{\link[pls]{mvr}}, \code{\link{fit}}, \code{\link{resample}} }
############################################################################################## ############################################################################################## ##### CREATED 1/28/2018 ##### #install.packages("lqa") ###predictors correlation of 0.2 rm(list=ls()) library("lqa") # version 1.0-3 library(MASS) # version 3.3.1 library("mgcv") require(glmnet) ######################################################### ######################################################### DIREC="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2/" DIRECOUT="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2/sampleSize200" funLoc = "Functions/" source(paste0(DIREC, funLoc, "addFun.R")) source(paste0(DIREC, funLoc, "formulaConstruct.R")) source(paste0(DIREC, funLoc, "simDataAll.R")) source(paste0(DIREC, funLoc, "variableSelectY.R")) source(paste0(DIREC, funLoc, "variableSelectT.R")) source(paste0(DIREC, funLoc, "variableSelectT2.R")) source(paste0(DIREC, funLoc, "Stepwise.R")) source(paste0(DIREC, funLoc, "pencompFit.R")) numRun=500 ############IPTW, AIPTW and PENCOMP estimators############### ###standard CI estFinal_iptw=matrix(NA, nrow=numRun, ncol=4) estFinal_aiptw=matrix(NA, nrow=numRun, ncol=4) estFinal_pencomp=matrix(NA, nrow=numRun, ncol=4) ###bagging estimator estFinal_iptw_bag=matrix(NA, nrow=numRun, ncol=4) estFinal_aiptw_bag=matrix(NA, nrow=numRun, ncol=4) estFinal_pencomp_bag=matrix(NA, nrow=numRun, ncol=4) ###percentiles estFinal_iptw_per=matrix(NA, nrow=numRun, ncol=4) estFinal_aiptw_per=matrix(NA, nrow=numRun, ncol=4) estFinal_pencomp_per=matrix(NA, nrow=numRun, ncol=4) ###Rubin's combining rule estFinal_pencomp_rubin=matrix(NA, nrow=numRun, ncol=4) varSelPropY=matrix(NA, nrow=numRun, ncol = 20) varSelPropT=matrix(NA, nrow=numRun, ncol = 20) start=201 end=300 for(d in start:end) { tryCatch ( { numT=1000 sampleSize=200 numPred=20 ##number of predictors level="low" simdatG=simulateDate(sampleSize=sampleSize, numPred=numPred, overlapL=level, seed.num=d, rho=0, treatEff=2) simdat=simdatG[[1]] varList=simdatG[[2]] outcome.varname="Y" treat.varname="A" splineTerm="s(pslogit, bs=\"ps\", k=15)" ### firstNum="outcome" ###adaptive lasso on the outcome first and then propensity score model Method="REML" ##for both propensity and prediction models modelType="stepwise" ###seperate adaptive lasso on the propensity and prediction models outcomeVarList0=NULL outcomeVarList1=NULL propenVarList=NULL ############################################################################################################################## #################################################################################################################### # print out IPTW, AIPTW and PENCOMP estimates corresponding to smallest wAMD value estimate.out=NULL estimate.out=pencompStepwise(dataOR=simdat, data=simdat, varList=varList, propenVarList=propenVarList, outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1, treat.varname=treat.varname, outcome.varname=outcome.varname) if( typeof(estimate.out) == "list" ){ ###if output is list, should be right estFinal_iptw[d,1]=estimate.out$out[1] estFinal_aiptw[d,1]=estimate.out$out[2] estFinal_pencomp[d,1]=estimate.out$out[3] } estimate.boot=matrix(NA, nrow=numT, ncol=3) ###IPTW, AIPTW and PENCOMP estimates from each bootstrap sample pencomp.rubin=matrix(NA, nrow=numT, ncol=2) ###variance of PENCOMP pencomp.numKnot=matrix(NA, nrow=numT, ncol=2) ###number of knots in PENCOMP varSelectTreat=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in propensity model varSelectY1=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome model Y0 varSelectY0=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome modely1 countMat=matrix(NA, ncol=nrow(simdat), nrow=numT) for(ind in 1:numT){ tryCatch ( { set.seed(ind) bootSample = simdat[sample(1:nrow(simdat),replace=T),] ###random bootstraps tempCount=numeric(nrow(bootSample)) for(countIndex in 1:length(tempCount)){ tempCount[countIndex] = sum(bootSample$id2==countIndex) } mulResult = pencompStepwise(dataOR=simdat, data=bootSample, varList=varList, propenVarList=propenVarList, outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1, treat.varname=treat.varname, outcome.varname=outcome.varname ) if( typeof(mulResult) == "list" ){ ###if output is list, should be right estimate.boot[ind,] = (mulResult$out)[c(1, 2, 3)] pencomp.rubin[ind,] = (mulResult$out)[c(4, 5)] varSelectTreat[ind,]=mulResult$varTreat varSelectY1[ind,]=mulResult$varY1 varSelectY0[ind,]=mulResult$varY0 countMat[ind,]=tempCount pencomp.numKnot[ind,]=mulResult$numK ###number of knots in PENCOMP } } , error=function(e) { } ) } if(d < 10){ ####store bootstrap estimates bootResult=cbind(estimate.out$out[1], estimate.out$out[2], estimate.out$out[3], estimate.boot, pencomp.rubin, pencomp.numKnot) write.table(bootResult, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, ".txt",sep=""), row.name=F, quote=F, sep="\t", col.names = c("iptwOR", "aiptwOR","pencompOR", "iptw", "aiptw","pencompBoot", "pencompRubin", "pencompRubinVar", "K0", "K1")) } ####store counts of each datapoint in each bootstrap (for calculating Brad Efron's CI) #write.table(countMat, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "countMat.txt",sep=""), row.name=F, quote=F, sep="\t") varSelPropY[d,]=colMeans(varSelectY0, na.rm = T) varSelPropT[d,]=colMeans(varSelectTreat, na.rm = T) write.table(varSelPropY, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectY.txt",sep=""), row.name=F, quote=F, col.names = varList, sep="\t") write.table(varSelPropT, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectTreat.txt",sep=""), row.name=F, quote=F, col.names = varList, sep="\t") ####store coefficients of outcome model Y1 #write.table(varSelectY1, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "varSelectY1.txt",sep=""), row.name=F, quote=F, col.names = varList, # sep="\t") #########standard confidence interval, Rubin's combining rule for PENCOMP estFinal_pencomp_rubin[d,]=processPENCOMP(t(pencomp.rubin)) estFinal_iptw[d, 2:4]=c( sd(estimate.boot[,1], na.rm = T), estFinal_iptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[, 1], na.rm = T) ) estFinal_aiptw[d, 2:4]=c( sd(estimate.boot[,2], na.rm = T), estFinal_aiptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,2], na.rm = T) ) estFinal_pencomp[d, 2:4]=c( sd(estimate.boot[,3], na.rm = T), estFinal_pencomp[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,3], na.rm = T) ) ############################################## #### bagging estimator accounting for model selection estFinal_iptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,1], sampleSize=sampleSize) estFinal_aiptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,2], sampleSize=sampleSize) estFinal_pencomp_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,3], sampleSize=sampleSize) ############################################## #### confidence interval based on quantiles estFinal_iptw_per[d,]=percentile(estimate=estimate.boot[,1]) estFinal_aiptw_per[d,]=percentile(estimate=estimate.boot[,2]) estFinal_pencomp_per[d,]=percentile(estimate=estimate.boot[,3]) resultTable=NULL resultTable=data.frame(estFinal_pencomp, estFinal_pencomp_bag, estFinal_pencomp_per, estFinal_pencomp_rubin) write.table(resultTable, paste(DIRECOUT, "/Results/pencomp_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t") resultTable=NULL resultTable=data.frame(estFinal_iptw, estFinal_iptw_bag, estFinal_iptw_per) write.table(resultTable, paste(DIRECOUT, "/Results/iptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t") resultTable=NULL resultTable=data.frame(estFinal_aiptw, estFinal_aiptw_bag, estFinal_aiptw_per) write.table(resultTable, paste(DIRECOUT, "/Results/aiptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t") } , error=function(e) { } ) }
/simulation/case2/sampleSize200/stepwise_low_start_201.R
no_license
TingtingKayla/Stats_Robust_Causal_Estimation
R
false
false
8,715
r
############################################################################################## ############################################################################################## ##### CREATED 1/28/2018 ##### #install.packages("lqa") ###predictors correlation of 0.2 rm(list=ls()) library("lqa") # version 1.0-3 library(MASS) # version 3.3.1 library("mgcv") require(glmnet) ######################################################### ######################################################### DIREC="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2/" DIRECOUT="/home/tkzhou/PSPP_Project/PENCOMP_OneTimePoint/variableSelection5/case2/sampleSize200" funLoc = "Functions/" source(paste0(DIREC, funLoc, "addFun.R")) source(paste0(DIREC, funLoc, "formulaConstruct.R")) source(paste0(DIREC, funLoc, "simDataAll.R")) source(paste0(DIREC, funLoc, "variableSelectY.R")) source(paste0(DIREC, funLoc, "variableSelectT.R")) source(paste0(DIREC, funLoc, "variableSelectT2.R")) source(paste0(DIREC, funLoc, "Stepwise.R")) source(paste0(DIREC, funLoc, "pencompFit.R")) numRun=500 ############IPTW, AIPTW and PENCOMP estimators############### ###standard CI estFinal_iptw=matrix(NA, nrow=numRun, ncol=4) estFinal_aiptw=matrix(NA, nrow=numRun, ncol=4) estFinal_pencomp=matrix(NA, nrow=numRun, ncol=4) ###bagging estimator estFinal_iptw_bag=matrix(NA, nrow=numRun, ncol=4) estFinal_aiptw_bag=matrix(NA, nrow=numRun, ncol=4) estFinal_pencomp_bag=matrix(NA, nrow=numRun, ncol=4) ###percentiles estFinal_iptw_per=matrix(NA, nrow=numRun, ncol=4) estFinal_aiptw_per=matrix(NA, nrow=numRun, ncol=4) estFinal_pencomp_per=matrix(NA, nrow=numRun, ncol=4) ###Rubin's combining rule estFinal_pencomp_rubin=matrix(NA, nrow=numRun, ncol=4) varSelPropY=matrix(NA, nrow=numRun, ncol = 20) varSelPropT=matrix(NA, nrow=numRun, ncol = 20) start=201 end=300 for(d in start:end) { tryCatch ( { numT=1000 sampleSize=200 numPred=20 ##number of predictors level="low" simdatG=simulateDate(sampleSize=sampleSize, numPred=numPred, overlapL=level, seed.num=d, rho=0, treatEff=2) simdat=simdatG[[1]] varList=simdatG[[2]] outcome.varname="Y" treat.varname="A" splineTerm="s(pslogit, bs=\"ps\", k=15)" ### firstNum="outcome" ###adaptive lasso on the outcome first and then propensity score model Method="REML" ##for both propensity and prediction models modelType="stepwise" ###seperate adaptive lasso on the propensity and prediction models outcomeVarList0=NULL outcomeVarList1=NULL propenVarList=NULL ############################################################################################################################## #################################################################################################################### # print out IPTW, AIPTW and PENCOMP estimates corresponding to smallest wAMD value estimate.out=NULL estimate.out=pencompStepwise(dataOR=simdat, data=simdat, varList=varList, propenVarList=propenVarList, outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1, treat.varname=treat.varname, outcome.varname=outcome.varname) if( typeof(estimate.out) == "list" ){ ###if output is list, should be right estFinal_iptw[d,1]=estimate.out$out[1] estFinal_aiptw[d,1]=estimate.out$out[2] estFinal_pencomp[d,1]=estimate.out$out[3] } estimate.boot=matrix(NA, nrow=numT, ncol=3) ###IPTW, AIPTW and PENCOMP estimates from each bootstrap sample pencomp.rubin=matrix(NA, nrow=numT, ncol=2) ###variance of PENCOMP pencomp.numKnot=matrix(NA, nrow=numT, ncol=2) ###number of knots in PENCOMP varSelectTreat=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in propensity model varSelectY1=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome model Y0 varSelectY0=matrix( NA, nrow=numT, ncol=length(varList) ) ### coefficients of variables in the outcome modely1 countMat=matrix(NA, ncol=nrow(simdat), nrow=numT) for(ind in 1:numT){ tryCatch ( { set.seed(ind) bootSample = simdat[sample(1:nrow(simdat),replace=T),] ###random bootstraps tempCount=numeric(nrow(bootSample)) for(countIndex in 1:length(tempCount)){ tempCount[countIndex] = sum(bootSample$id2==countIndex) } mulResult = pencompStepwise(dataOR=simdat, data=bootSample, varList=varList, propenVarList=propenVarList, outcomeVarList0=outcomeVarList0, outcomeVarList1=outcomeVarList1, treat.varname=treat.varname, outcome.varname=outcome.varname ) if( typeof(mulResult) == "list" ){ ###if output is list, should be right estimate.boot[ind,] = (mulResult$out)[c(1, 2, 3)] pencomp.rubin[ind,] = (mulResult$out)[c(4, 5)] varSelectTreat[ind,]=mulResult$varTreat varSelectY1[ind,]=mulResult$varY1 varSelectY0[ind,]=mulResult$varY0 countMat[ind,]=tempCount pencomp.numKnot[ind,]=mulResult$numK ###number of knots in PENCOMP } } , error=function(e) { } ) } if(d < 10){ ####store bootstrap estimates bootResult=cbind(estimate.out$out[1], estimate.out$out[2], estimate.out$out[3], estimate.boot, pencomp.rubin, pencomp.numKnot) write.table(bootResult, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, ".txt",sep=""), row.name=F, quote=F, sep="\t", col.names = c("iptwOR", "aiptwOR","pencompOR", "iptw", "aiptw","pencompBoot", "pencompRubin", "pencompRubinVar", "K0", "K1")) } ####store counts of each datapoint in each bootstrap (for calculating Brad Efron's CI) #write.table(countMat, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "countMat.txt",sep=""), row.name=F, quote=F, sep="\t") varSelPropY[d,]=colMeans(varSelectY0, na.rm = T) varSelPropT[d,]=colMeans(varSelectTreat, na.rm = T) write.table(varSelPropY, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectY.txt",sep=""), row.name=F, quote=F, col.names = varList, sep="\t") write.table(varSelPropT, paste(DIRECOUT, "/Results/", modelType, "_", level, "_start_", start, "varSelectTreat.txt",sep=""), row.name=F, quote=F, col.names = varList, sep="\t") ####store coefficients of outcome model Y1 #write.table(varSelectY1, paste(DIRECOUT, "/bootResults/sample", d, modelType, "_", level, "varSelectY1.txt",sep=""), row.name=F, quote=F, col.names = varList, # sep="\t") #########standard confidence interval, Rubin's combining rule for PENCOMP estFinal_pencomp_rubin[d,]=processPENCOMP(t(pencomp.rubin)) estFinal_iptw[d, 2:4]=c( sd(estimate.boot[,1], na.rm = T), estFinal_iptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[, 1], na.rm = T) ) estFinal_aiptw[d, 2:4]=c( sd(estimate.boot[,2], na.rm = T), estFinal_aiptw[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,2], na.rm = T) ) estFinal_pencomp[d, 2:4]=c( sd(estimate.boot[,3], na.rm = T), estFinal_pencomp[d,1] + c(-1, 1)*1.96*sd(estimate.boot[,3], na.rm = T) ) ############################################## #### bagging estimator accounting for model selection estFinal_iptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,1], sampleSize=sampleSize) estFinal_aiptw_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,2], sampleSize=sampleSize) estFinal_pencomp_bag[d,]=bagging2(countMat=countMat, estimate=estimate.boot[,3], sampleSize=sampleSize) ############################################## #### confidence interval based on quantiles estFinal_iptw_per[d,]=percentile(estimate=estimate.boot[,1]) estFinal_aiptw_per[d,]=percentile(estimate=estimate.boot[,2]) estFinal_pencomp_per[d,]=percentile(estimate=estimate.boot[,3]) resultTable=NULL resultTable=data.frame(estFinal_pencomp, estFinal_pencomp_bag, estFinal_pencomp_per, estFinal_pencomp_rubin) write.table(resultTable, paste(DIRECOUT, "/Results/pencomp_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t") resultTable=NULL resultTable=data.frame(estFinal_iptw, estFinal_iptw_bag, estFinal_iptw_per) write.table(resultTable, paste(DIRECOUT, "/Results/iptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t") resultTable=NULL resultTable=data.frame(estFinal_aiptw, estFinal_aiptw_bag, estFinal_aiptw_per) write.table(resultTable, paste(DIRECOUT, "/Results/aiptw_", modelType, "_", level, "_start_", start, ".txt",sep=""), row.name=F, quote=F, sep="\t") } , error=function(e) { } ) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilhisse.R \name{m_scatterplot_cp} \alias{m_scatterplot_cp} \title{Plot diversification rates estimated by a HiSSE model with means and standard deviations across tips and a two-dimensional colorplane for color} \usage{ m_scatterplot_cp( processed_recon, parameter = "turnover", focal_character = c("prob_0x", "prob_x0"), focal_character_label, second_character_label, colors, plot_as_waiting_time = FALSE ) } \arguments{ \item{processed_recon}{An object produced with \code{m_process_recon}} \item{parameter}{The diversification parameter to be plotted on the y axis. Possible options are turnover, extinct.frac, net.div, speciation, extinction} \item{focal_character}{Specifies the x axis. Either \code{prob_0x} to plot the probability of state 0 for the first character, or \code{prob_x0} to plot the probability for state 0 for the second character.} \item{focal_character_label}{Label for the x axis of the scatterplot and two-dimensional color gradient. This should match the focal probability.} \item{second_character_label}{Label for the y axis of the scatterplot and two-dimensional color gradient.} \item{colors}{A vector of three colors in the order: (1) zero color (color when the two traits are in state 0), (2) horizontal_color (color to interpolate towards state 1 of the focal character) and (2) vertical_color (color to interpolate towards state 1 of the second character). See \code{?colorplaner::color_projections} for details.} \item{plot_as_waiting_time}{Logical, whether to convert the rate to waiting time (1/rate)} } \value{ A scatterplot with focal probability (0 or 1) on the x axis and the chosen diversification parameter on the y axis with means and error bars (mean +/- SD) for each state color coded with in two-dimensional colorplane. } \description{ A function to plot a jittered scatterplot of (model-averaged) diversification rates in the alternative states. } \examples{ library("colorplaner") data("diatoms") processed_muhisse <- m_process_recon(muhisse_recon=diatoms$muhisse_recon) m_scatterplot_cp( processed_recon = processed_muhisse, parameter = "turnover", focal_character = "prob_0x", focal_character_label = "p(mar)", second_character_label = "p(pla)", colors = c("#21908CFF", "#440154FF", "#FDE725FF"), plot_as_waiting_time = TRUE) + labs(y="Net turnover\n(waiting time in millions of years)") }
/man/m_scatterplot_cp.Rd
no_license
discindo/gghisse
R
false
true
2,450
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilhisse.R \name{m_scatterplot_cp} \alias{m_scatterplot_cp} \title{Plot diversification rates estimated by a HiSSE model with means and standard deviations across tips and a two-dimensional colorplane for color} \usage{ m_scatterplot_cp( processed_recon, parameter = "turnover", focal_character = c("prob_0x", "prob_x0"), focal_character_label, second_character_label, colors, plot_as_waiting_time = FALSE ) } \arguments{ \item{processed_recon}{An object produced with \code{m_process_recon}} \item{parameter}{The diversification parameter to be plotted on the y axis. Possible options are turnover, extinct.frac, net.div, speciation, extinction} \item{focal_character}{Specifies the x axis. Either \code{prob_0x} to plot the probability of state 0 for the first character, or \code{prob_x0} to plot the probability for state 0 for the second character.} \item{focal_character_label}{Label for the x axis of the scatterplot and two-dimensional color gradient. This should match the focal probability.} \item{second_character_label}{Label for the y axis of the scatterplot and two-dimensional color gradient.} \item{colors}{A vector of three colors in the order: (1) zero color (color when the two traits are in state 0), (2) horizontal_color (color to interpolate towards state 1 of the focal character) and (2) vertical_color (color to interpolate towards state 1 of the second character). See \code{?colorplaner::color_projections} for details.} \item{plot_as_waiting_time}{Logical, whether to convert the rate to waiting time (1/rate)} } \value{ A scatterplot with focal probability (0 or 1) on the x axis and the chosen diversification parameter on the y axis with means and error bars (mean +/- SD) for each state color coded with in two-dimensional colorplane. } \description{ A function to plot a jittered scatterplot of (model-averaged) diversification rates in the alternative states. } \examples{ library("colorplaner") data("diatoms") processed_muhisse <- m_process_recon(muhisse_recon=diatoms$muhisse_recon) m_scatterplot_cp( processed_recon = processed_muhisse, parameter = "turnover", focal_character = "prob_0x", focal_character_label = "p(mar)", second_character_label = "p(pla)", colors = c("#21908CFF", "#440154FF", "#FDE725FF"), plot_as_waiting_time = TRUE) + labs(y="Net turnover\n(waiting time in millions of years)") }
# This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) library(rCharts) shinyUI(fluidPage( # Application title titlePanel("Overview"), # Sidebar with a slider input for number of bins sidebarLayout( sidebarPanel( conditionalPanel(condition="input.tabs=='Main'", "Below we list all of the available sites - users can select which sites to focus on or leave all available", uiOutput('sites'), checkboxInput('selected','Select All',value=TRUE) ), conditionalPanel(condition="input.tabs=='Upload'", fileInput('file1', 'Choose file to upload') )), # Show a plot of the generated distribution mainPanel( tabsetPanel(id="tabs", tabPanel("Main", "This web-application provides high level summary data to the user for a selected list of sites. BELOW we provide an option to provide cumulative data rather than for individual time points.", checkboxInput('cumsum','Check for cumulative sums'), "Data can be shown in simple tabular format or as a graphic via the toggle below", radioButtons('table','Show Table or Graphic?',list("Table"='table',"Graphic"='graphic')), conditionalPanel(condition="input.table=='graphic'", "With Graphic selected we provide a number of options for which metrics to show in the graph. The graphics summarize the selected data visually", selectInput('measure','Select Measurement',list('Inquiries'='Inquiries','Referrals'='Referrals','Screens'='Screens')), showOutput("day2",'highcharts'), showOutput("day3",'highcharts')), conditionalPanel(condition="input.table=='table'", dataTableOutput("table")) ), tabPanel("Upload", "This second tab provides a user-interface for uploading new data that powers the visualizations in the first page. This currently only accepts CSVs and requires a previously defined file layout. This restriction could be relaxed in the future. A preview of the uploaded data is shown below for verification before submitting to the database.", dataTableOutput("preview")) ) ) ) ))
/review/ui.R
no_license
danieltgustafson/gidb
R
false
false
2,355
r
# This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) library(rCharts) shinyUI(fluidPage( # Application title titlePanel("Overview"), # Sidebar with a slider input for number of bins sidebarLayout( sidebarPanel( conditionalPanel(condition="input.tabs=='Main'", "Below we list all of the available sites - users can select which sites to focus on or leave all available", uiOutput('sites'), checkboxInput('selected','Select All',value=TRUE) ), conditionalPanel(condition="input.tabs=='Upload'", fileInput('file1', 'Choose file to upload') )), # Show a plot of the generated distribution mainPanel( tabsetPanel(id="tabs", tabPanel("Main", "This web-application provides high level summary data to the user for a selected list of sites. BELOW we provide an option to provide cumulative data rather than for individual time points.", checkboxInput('cumsum','Check for cumulative sums'), "Data can be shown in simple tabular format or as a graphic via the toggle below", radioButtons('table','Show Table or Graphic?',list("Table"='table',"Graphic"='graphic')), conditionalPanel(condition="input.table=='graphic'", "With Graphic selected we provide a number of options for which metrics to show in the graph. The graphics summarize the selected data visually", selectInput('measure','Select Measurement',list('Inquiries'='Inquiries','Referrals'='Referrals','Screens'='Screens')), showOutput("day2",'highcharts'), showOutput("day3",'highcharts')), conditionalPanel(condition="input.table=='table'", dataTableOutput("table")) ), tabPanel("Upload", "This second tab provides a user-interface for uploading new data that powers the visualizations in the first page. This currently only accepts CSVs and requires a previously defined file layout. This restriction could be relaxed in the future. A preview of the uploaded data is shown below for verification before submitting to the database.", dataTableOutput("preview")) ) ) ) ))
### ### Claire Kelling ### BIGSSS Computational Social Science Summer School on Migration ### ### Created: 05/24/19 ### Last Updated: 06/12/19 ### # Start clean rm(list = ls()) # Packages #use install.packages("package_name") if you do not have the package installed #install.packages("maptools") #for example library(maptools) library(sp) library(rgdal) library(spatstat) library(ggplot2) library(dplyr) library(spdep) library(CARBayes) # Set working directory to bigsss_spatial_stats folder path # Note: directories have to include "/" not "\" setwd("~/spatial_stats_workshop") ### ### 1.) Load spatial polygon data data (Source: US Census) ### load(file = "data/det_bg.Rdata") det_bg <- det_bg_geog rm(det_bg_geog) plot(det_bg, main = "Block Groups in Detroit") ### ### 2.) Load spatial points/events (Source: Police Data Iniatiative) ### load(file = "data/detroit_data.Rdata") events <- detroit_data rm(detroit_data) #Only use a subset of the data, for simplicity (otherwise, takes a while to plot) set.seed(2) rand_ind <- runif(10000, 1, nrow(events)) events <- events[rand_ind,] #converting to spatial points dataframe sp_point <- cbind(events$Longitude, events$Latitude) colnames(sp_point) <- c("LONG","LAT") proj <- CRS("+proj=longlat +datum=WGS84") data.sp <- SpatialPointsDataFrame(coords=sp_point, data=events, proj4string=proj) plot(data.sp, pch=16, cex=.5, axes=T) # try overlaying the plots plot(det_bg) plot(data.sp, col = "blue", pch=16, cex=0.6, add = T) #some points actually lie outside of Detroit #clean up rm(proj, sp_point, rand_ind) ### ### 3.) Count events per areal unit ### #Build a dataset with event counts per block group #Needs to have same projection as spatial points det_bg <- spTransform(det_bg, proj4string(data.sp)) overlap_set <- over(data.sp, det_bg) nrow(data.sp) nrow(overlap_set) #it has classified each of the points in the dataset into a block group sum(is.na(overlap_set$STATEFP)) #there are some events that actually occur outside of city boundaries detroit_df <- as.data.frame(data.sp) det_dat_over <- cbind(detroit_df, overlap_set) #det_dat_over <- det_dat_over[!is.na(over(domv_dat_detroit,det_bg)),] det_dat_ov <- det_dat_over[!is.na(det_dat_over$GEOID),] agg_dat <- plyr::count(det_dat_ov, c('GEOID')) agg_dat$GEOID <- as.factor(agg_dat$GEOID) #now I would like to create a plot that illustrates how many events are occuring per block group num_per_bg <- as.numeric(agg_dat$freq) #Now I will create the data structure that I need to create a plot sp_f <- fortify(det_bg) det_bg$id <- row.names(det_bg) det_bg@data <- left_join(det_bg@data, agg_dat, by = (GEOID = "GEOID")) sp_f <- left_join(sp_f, det_bg@data) #make a color or grayscale plot to illustrate this count_by_bg <- ggplot() + geom_polygon(data = sp_f, aes(long, lat, group = group, fill = freq)) + coord_equal() + labs(fill = "No. of \nEvents")+ geom_polygon(data=sp_f,aes(long,lat, group = group), fill = NA, col = "black") + ggtitle("Number of Events per Block Group")+ scale_fill_gradient(low = "lightblue", high = "navyblue") count_by_bg rm(count_by_bg, sp_f, overlap_set, detroit_df, det_dat_over, det_dat_ov, agg_dat, num_per_bg, GEOID) ### ### 4.) Areal Unit modeling ### length(which(is.na(det_bg@data$freq))) #some block groups with no crime det_bg$freq[which(is.na(det_bg@data$freq))] <- 0 #replace with 0, instead of NA #Create neighborhood matrix from shape file #Create neighborhood matrix in different formats for different functions W.nb <- poly2nb(det_bg, row.names = rownames(det_bg@data)) W.list <- nb2listw(W.nb, style="B") W.mat <- nb2mat(W.nb, style="B") View(head(W.mat[,1:10], n =10)) #Plot neighborhood matrix coords <- coordinates(det_bg) plot(det_bg, border = "gray", main = "Neighorhood Matrix") plot(W.nb, coords, pch = 1, cex = 0.6, col="blue", add = TRUE) #Preliminary test using Moran's I #non-spatial modeling (just linear model) form <- freq ~ median_income + upemp_rate+total_pop+perc_male+med_age+herf_index model <- lm(formula=form, data=det_bg@data) resid.model <- residuals(model) moran.mc(x=resid.model, listw=W.list, nsim=5000) #Fit model using neighborhood matrix #rownames(W.mat) <- NULL #need this for test if matrix is symmetric # Takes a couple of minutes to run # *** Should increase n.sample and burnin when running for your data*** model.bym <- S.CARbym(formula=form, data=det_bg@data, family="poisson", W=W.mat, burnin=2000, n.sample=5000, thin=10) summary(model.bym) model.bym$modelfit model.bym$summary.results[,1:3] #should consider standardization of variables rm(W.list, W.mat, W.nb, model, coords, form, resid.model, model.bym) ### ### 5.) Point Process Modeling ### #Need to make smaller dataset for point process modeling data.sp <- data.sp[runif(500, 1, length(data.sp)),] ## Preliminrary test using Ripley's K # Transform our data into ppp object bg_owin <- as.owin(det_bg) xyzt <- as.matrix(coordinates(data.sp)) event_ppp <- as.ppp(xyzt, bg_owin) #some lie outside of the specified window #Get Ripley's K plot with bootstrapped CIs # simultaneous (takes a VERY long time) # sig level = (nrank/(1+nsim)) #k_sim <- envelope(event_ppp, fun = Kest, global = FALSE, nrank = 20, nsim = 800) #F test using ECDF's # *** Should use a larger "nsim" when using on your data *** # Takes a couple of minutes to run f_sim <- envelope(event_ppp, fun = Fest, global = FALSE, nrank = 20, nsim = 100) plot(f_sim, main = "F function Envelope, Pointwise", ylab = "F function") # Plot ppp object plot(event_ppp) # More visualizations plot(density(event_ppp)) persp(density(event_ppp))
/src/bigsss_preliminaries.R
no_license
ckelling/spatial_stats_workshop
R
false
false
5,670
r
### ### Claire Kelling ### BIGSSS Computational Social Science Summer School on Migration ### ### Created: 05/24/19 ### Last Updated: 06/12/19 ### # Start clean rm(list = ls()) # Packages #use install.packages("package_name") if you do not have the package installed #install.packages("maptools") #for example library(maptools) library(sp) library(rgdal) library(spatstat) library(ggplot2) library(dplyr) library(spdep) library(CARBayes) # Set working directory to bigsss_spatial_stats folder path # Note: directories have to include "/" not "\" setwd("~/spatial_stats_workshop") ### ### 1.) Load spatial polygon data data (Source: US Census) ### load(file = "data/det_bg.Rdata") det_bg <- det_bg_geog rm(det_bg_geog) plot(det_bg, main = "Block Groups in Detroit") ### ### 2.) Load spatial points/events (Source: Police Data Iniatiative) ### load(file = "data/detroit_data.Rdata") events <- detroit_data rm(detroit_data) #Only use a subset of the data, for simplicity (otherwise, takes a while to plot) set.seed(2) rand_ind <- runif(10000, 1, nrow(events)) events <- events[rand_ind,] #converting to spatial points dataframe sp_point <- cbind(events$Longitude, events$Latitude) colnames(sp_point) <- c("LONG","LAT") proj <- CRS("+proj=longlat +datum=WGS84") data.sp <- SpatialPointsDataFrame(coords=sp_point, data=events, proj4string=proj) plot(data.sp, pch=16, cex=.5, axes=T) # try overlaying the plots plot(det_bg) plot(data.sp, col = "blue", pch=16, cex=0.6, add = T) #some points actually lie outside of Detroit #clean up rm(proj, sp_point, rand_ind) ### ### 3.) Count events per areal unit ### #Build a dataset with event counts per block group #Needs to have same projection as spatial points det_bg <- spTransform(det_bg, proj4string(data.sp)) overlap_set <- over(data.sp, det_bg) nrow(data.sp) nrow(overlap_set) #it has classified each of the points in the dataset into a block group sum(is.na(overlap_set$STATEFP)) #there are some events that actually occur outside of city boundaries detroit_df <- as.data.frame(data.sp) det_dat_over <- cbind(detroit_df, overlap_set) #det_dat_over <- det_dat_over[!is.na(over(domv_dat_detroit,det_bg)),] det_dat_ov <- det_dat_over[!is.na(det_dat_over$GEOID),] agg_dat <- plyr::count(det_dat_ov, c('GEOID')) agg_dat$GEOID <- as.factor(agg_dat$GEOID) #now I would like to create a plot that illustrates how many events are occuring per block group num_per_bg <- as.numeric(agg_dat$freq) #Now I will create the data structure that I need to create a plot sp_f <- fortify(det_bg) det_bg$id <- row.names(det_bg) det_bg@data <- left_join(det_bg@data, agg_dat, by = (GEOID = "GEOID")) sp_f <- left_join(sp_f, det_bg@data) #make a color or grayscale plot to illustrate this count_by_bg <- ggplot() + geom_polygon(data = sp_f, aes(long, lat, group = group, fill = freq)) + coord_equal() + labs(fill = "No. of \nEvents")+ geom_polygon(data=sp_f,aes(long,lat, group = group), fill = NA, col = "black") + ggtitle("Number of Events per Block Group")+ scale_fill_gradient(low = "lightblue", high = "navyblue") count_by_bg rm(count_by_bg, sp_f, overlap_set, detroit_df, det_dat_over, det_dat_ov, agg_dat, num_per_bg, GEOID) ### ### 4.) Areal Unit modeling ### length(which(is.na(det_bg@data$freq))) #some block groups with no crime det_bg$freq[which(is.na(det_bg@data$freq))] <- 0 #replace with 0, instead of NA #Create neighborhood matrix from shape file #Create neighborhood matrix in different formats for different functions W.nb <- poly2nb(det_bg, row.names = rownames(det_bg@data)) W.list <- nb2listw(W.nb, style="B") W.mat <- nb2mat(W.nb, style="B") View(head(W.mat[,1:10], n =10)) #Plot neighborhood matrix coords <- coordinates(det_bg) plot(det_bg, border = "gray", main = "Neighorhood Matrix") plot(W.nb, coords, pch = 1, cex = 0.6, col="blue", add = TRUE) #Preliminary test using Moran's I #non-spatial modeling (just linear model) form <- freq ~ median_income + upemp_rate+total_pop+perc_male+med_age+herf_index model <- lm(formula=form, data=det_bg@data) resid.model <- residuals(model) moran.mc(x=resid.model, listw=W.list, nsim=5000) #Fit model using neighborhood matrix #rownames(W.mat) <- NULL #need this for test if matrix is symmetric # Takes a couple of minutes to run # *** Should increase n.sample and burnin when running for your data*** model.bym <- S.CARbym(formula=form, data=det_bg@data, family="poisson", W=W.mat, burnin=2000, n.sample=5000, thin=10) summary(model.bym) model.bym$modelfit model.bym$summary.results[,1:3] #should consider standardization of variables rm(W.list, W.mat, W.nb, model, coords, form, resid.model, model.bym) ### ### 5.) Point Process Modeling ### #Need to make smaller dataset for point process modeling data.sp <- data.sp[runif(500, 1, length(data.sp)),] ## Preliminrary test using Ripley's K # Transform our data into ppp object bg_owin <- as.owin(det_bg) xyzt <- as.matrix(coordinates(data.sp)) event_ppp <- as.ppp(xyzt, bg_owin) #some lie outside of the specified window #Get Ripley's K plot with bootstrapped CIs # simultaneous (takes a VERY long time) # sig level = (nrank/(1+nsim)) #k_sim <- envelope(event_ppp, fun = Kest, global = FALSE, nrank = 20, nsim = 800) #F test using ECDF's # *** Should use a larger "nsim" when using on your data *** # Takes a couple of minutes to run f_sim <- envelope(event_ppp, fun = Fest, global = FALSE, nrank = 20, nsim = 100) plot(f_sim, main = "F function Envelope, Pointwise", ylab = "F function") # Plot ppp object plot(event_ppp) # More visualizations plot(density(event_ppp)) persp(density(event_ppp))
## To minimize execution of repetitive cpu-intensive operations ## on the same (large) dataset such as Compution of inverse of ## a large matrix, R provides the mechanism to cache the result ## of the operation in memory for fast retrivial and reuse. ## -------------------------------------------------------------- ## makeCacheMatrix: ## ## This function provides the interface to function (cacheSolve) ## that computes the inverse of the mattrix given in the argument. ## It defines the following functions and initiates the cache for ## storing the matrix and its inverse. ## setmatrix - called to perfrom the inverse compution for a matrix ## with different value or diminsions. It inititates the ## cache for the new matrix and its inverse. ## getmatrix - returns the matrix. ## setinverse - stores the invered matrix in the cache ## getinverse - fectes the invered matrix from the cache. ## NULL indicated the inverse hasn't been stored in ## the cache. ## Returned value: the list of defined functions. ## makeCacheMatrix <- function(x = matrix()) { m <- NULL setmatrix <- function(y) { x <<- y m <<- NULL } getmatrix <- function() x setinverse <- function(inverse) m <<- inverse getinverse <- function() m list(setmatrix = setmatrix, getmatrix = getmatrix, setinverse = setinverse, getinverse = getinverse) } ## ## This function perfroms the compution of inverse matrix. ## Input: list of functions from makeCacheMatrix: ## (setmatrix, getmatrix, setinverse, getinverse) ## Logic: if inverse is already in cache (m) return the inverse ## matrix, o.w. get the matrix, compute the inverse, and ## store the inverse in cache (through setinverse function) ## cacheSolve <- function(x, ...) { m <- x$getinverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$getmatrix() m <- solve(data, ...) x$setinverse(m) m }
/cachematrix.R
no_license
hfarsi/ProgrammingAssignment2
R
false
false
2,026
r
## To minimize execution of repetitive cpu-intensive operations ## on the same (large) dataset such as Compution of inverse of ## a large matrix, R provides the mechanism to cache the result ## of the operation in memory for fast retrivial and reuse. ## -------------------------------------------------------------- ## makeCacheMatrix: ## ## This function provides the interface to function (cacheSolve) ## that computes the inverse of the mattrix given in the argument. ## It defines the following functions and initiates the cache for ## storing the matrix and its inverse. ## setmatrix - called to perfrom the inverse compution for a matrix ## with different value or diminsions. It inititates the ## cache for the new matrix and its inverse. ## getmatrix - returns the matrix. ## setinverse - stores the invered matrix in the cache ## getinverse - fectes the invered matrix from the cache. ## NULL indicated the inverse hasn't been stored in ## the cache. ## Returned value: the list of defined functions. ## makeCacheMatrix <- function(x = matrix()) { m <- NULL setmatrix <- function(y) { x <<- y m <<- NULL } getmatrix <- function() x setinverse <- function(inverse) m <<- inverse getinverse <- function() m list(setmatrix = setmatrix, getmatrix = getmatrix, setinverse = setinverse, getinverse = getinverse) } ## ## This function perfroms the compution of inverse matrix. ## Input: list of functions from makeCacheMatrix: ## (setmatrix, getmatrix, setinverse, getinverse) ## Logic: if inverse is already in cache (m) return the inverse ## matrix, o.w. get the matrix, compute the inverse, and ## store the inverse in cache (through setinverse function) ## cacheSolve <- function(x, ...) { m <- x$getinverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$getmatrix() m <- solve(data, ...) x$setinverse(m) m }
# Module UI #' @title mod_select_substances_ui and mod_select_substances_server #' @description A shiny Module. #' #' @param id shiny id #' @param input internal #' @param output internal #' @param session internal #' #' @rdname mod_select_substances #' #' @keywords internal #' @export #' @import dplyr #' @import shiny mod_select_substances_ui <- function(id){ ns <- NS(id) tagList( uiOutput(ns("substanceSelector")) ) } # Module Server #' @rdname mod_select_substances #' @export #' @keywords internal mod_select_substances_server <- function(input, output, session, data, departement, classe_substance){ n_top <- reactive({ filter(data, departement %in% departement(), classification %in% classe_substance()) %>% pull(substance) %>% n_distinct() }) output$substanceSelector <- renderUI({ ns <- session$ns if (n_top() == 0) { HTML("<p style=\"color:grey; font-size:24px\"><br>Pas de données de ventes de substances pesticides</p>") } else { if (n_top() == 1) { HTML("<p style=\"color:#39a9dc; font-size:24px\"><br>Une seule substance représente plus de 50% des ventes</p>") } else { sliderInput(inputId = ns("nb_subst"), label = "Nombre de substances à représenter", value = 5, min = 1, max = n_top(), step = 1, ticks = FALSE) } } }) reactive(input$nb_subst) } ## To be copied in the UI # mod_select_substances_ui("select_substances_ui_1") ## To be copied in the server # callModule(mod_select_substances_server, "select_substances_ui_1")
/R/01.06_mod_select_substances.R
permissive
CedricMondy/dataviz_bnvd
R
false
false
1,639
r
# Module UI #' @title mod_select_substances_ui and mod_select_substances_server #' @description A shiny Module. #' #' @param id shiny id #' @param input internal #' @param output internal #' @param session internal #' #' @rdname mod_select_substances #' #' @keywords internal #' @export #' @import dplyr #' @import shiny mod_select_substances_ui <- function(id){ ns <- NS(id) tagList( uiOutput(ns("substanceSelector")) ) } # Module Server #' @rdname mod_select_substances #' @export #' @keywords internal mod_select_substances_server <- function(input, output, session, data, departement, classe_substance){ n_top <- reactive({ filter(data, departement %in% departement(), classification %in% classe_substance()) %>% pull(substance) %>% n_distinct() }) output$substanceSelector <- renderUI({ ns <- session$ns if (n_top() == 0) { HTML("<p style=\"color:grey; font-size:24px\"><br>Pas de données de ventes de substances pesticides</p>") } else { if (n_top() == 1) { HTML("<p style=\"color:#39a9dc; font-size:24px\"><br>Une seule substance représente plus de 50% des ventes</p>") } else { sliderInput(inputId = ns("nb_subst"), label = "Nombre de substances à représenter", value = 5, min = 1, max = n_top(), step = 1, ticks = FALSE) } } }) reactive(input$nb_subst) } ## To be copied in the UI # mod_select_substances_ui("select_substances_ui_1") ## To be copied in the server # callModule(mod_select_substances_server, "select_substances_ui_1")
library(ggplot2) library(scales) library(reshape2) library(cowplot) theme_set(theme_minimal_grid()) library(png) library(grid) library(RColorBrewer) #exportar 1400 x 1400 C1_line = "solid" C2_line = "solid" C1 <- "#007849" C2 <- "#82b135" #FigureS4A sugarcane4mo9mo_Valine_timecourses <- read.csv("4mo9mo_Valine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Valine_timecourses, aes(x=CT, y=Valine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Valine <- ggplot(data=sugarcane4mo9mo_Valine_timecourses, aes(x=CT, y=Valine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Valine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_text(size=18), #axis.text.y = element_blank(), #axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Valine FinalMaxMin #FigureS4B sugarcane4mo9mo_Leucine_timecourses <- read.csv("4mo9mo_Leucine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Leucine_timecourses, aes(x=CT, y=Leucine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Leucine <- ggplot(data=sugarcane4mo9mo_Leucine_timecourses, aes(x=CT, y=Leucine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Leucine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Leucine FinalMaxMin #FigureS4C sugarcane4mo9mo_Isoleucine_timecourses <- read.csv("4mo9mo_Isoleucine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Isoleucine_timecourses, aes(x=CT, y=Isoleucine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Isoleucine <- ggplot(data=sugarcane4mo9mo_Isoleucine_timecourses, aes(x=CT, y=Isoleucine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Isoleucine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Isoleucine FinalMaxMin #Figure4D sugarcane4mo9mo_Citrate_timecourses <- read.csv("4mo9mo_Citrate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Citrate_timecourses, aes(x=CT, y=Citrate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[8:80, ] fit4 <- fit2[82:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*23.66/71)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*25.85/77)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*23.66/71)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*25.85/77)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Citrate <- ggplot(data=sugarcane4mo9mo_Citrate_timecourses, aes(x=CT, y=Citrate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Citrate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_text(size=18), #axis.text.y = element_blank(), #axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Citrate FinalMaxMin #Figure4E sugarcane4mo9mo_Glycerate_timecourses <- read.csv("4mo9mo_Glycerate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Glycerate_timecourses, aes(x=CT, y=Glycerate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Glycerate <- ggplot(data=sugarcane4mo9mo_Glycerate_timecourses, aes(x=CT, y=Glycerate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Glycerate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Glycerate FinalMaxMin #Figure4F sugarcane4mo9mo_Malate_timecourses <- read.csv("4mo9mo_Malate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Malate_timecourses, aes(x=CT, y=Malate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Malate <- ggplot(data=sugarcane4mo9mo_Malate_timecourses, aes(x=CT, y=Malate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Malate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Malate FinalMaxMin #Figure4G #changed interval sugarcane4mo9mo_Aspartate_timecourses <- read.csv("4mo9mo_Aspartate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Aspartate_timecourses, aes(x=CT, y=Aspartate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[6:72, ] fit4 <- fit2[86:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*21.69/65)+0.203 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*24.52/73)-0.293 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*21.69/65)+0.203 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/73)-0.293 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Aspartate <- ggplot(data=sugarcane4mo9mo_Aspartate_timecourses, aes(x=CT, y=Aspartate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Aspartate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), axis.text.x = element_text(size=18), #axis.text.x = element_blank(), #axis.title.x = element_blank(), axis.text.y = element_text(size=18), #axis.text.y = element_blank(), #axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Aspartate FinalMaxMin #Figure4H sugarcane4mo9mo_Glycine_timecourses <- read.csv("4mo9mo_Glycine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Glycine_timecourses, aes(x=CT, y=Glycine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Glycine <- ggplot(data=sugarcane4mo9mo_Glycine_timecourses, aes(x=CT, y=Glycine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Glycine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), axis.text.x = element_text(size=18), #axis.text.x = element_blank(), #axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Glycine FinalMaxMin #Figure4I sugarcane4mo9mo_GABA_timecourses <- read.csv("4mo9mo_GABA.txt", sep="\t") ggplot(data=sugarcane4mo9mo_GABA_timecourses, aes(x=CT, y=GABA, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_GABA <- ggplot(data=sugarcane4mo9mo_GABA_timecourses, aes(x=CT, y=GABA, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "GABA", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), axis.text.x = element_text(size=18), #axis.text.x = element_blank(), #axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_GABA FinalMaxMin plot_grid(sugarcane4mo9mo_Valine, sugarcane4mo9mo_Leucine, sugarcane4mo9mo_Isoleucine, sugarcane4mo9mo_Citrate, sugarcane4mo9mo_Glycerate, sugarcane4mo9mo_Malate, sugarcane4mo9mo_Aspartate, sugarcane4mo9mo_Glycine, sugarcane4mo9mo_GABA, labels = c("A","B","C","D","E","F","G","H","I"), ncol = 3, align = "none", rel_widths = c(1.15, 1, 1),rel_heights = c(1,1,1.15),label_size = 20)
/02_figures/FigureS4/FigureS4.R
no_license
Jovanderson/Microenvironments
R
false
false
24,257
r
library(ggplot2) library(scales) library(reshape2) library(cowplot) theme_set(theme_minimal_grid()) library(png) library(grid) library(RColorBrewer) #exportar 1400 x 1400 C1_line = "solid" C2_line = "solid" C1 <- "#007849" C2 <- "#82b135" #FigureS4A sugarcane4mo9mo_Valine_timecourses <- read.csv("4mo9mo_Valine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Valine_timecourses, aes(x=CT, y=Valine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Valine <- ggplot(data=sugarcane4mo9mo_Valine_timecourses, aes(x=CT, y=Valine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Valine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_text(size=18), #axis.text.y = element_blank(), #axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Valine FinalMaxMin #FigureS4B sugarcane4mo9mo_Leucine_timecourses <- read.csv("4mo9mo_Leucine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Leucine_timecourses, aes(x=CT, y=Leucine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Leucine <- ggplot(data=sugarcane4mo9mo_Leucine_timecourses, aes(x=CT, y=Leucine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Leucine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Leucine FinalMaxMin #FigureS4C sugarcane4mo9mo_Isoleucine_timecourses <- read.csv("4mo9mo_Isoleucine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Isoleucine_timecourses, aes(x=CT, y=Isoleucine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Isoleucine <- ggplot(data=sugarcane4mo9mo_Isoleucine_timecourses, aes(x=CT, y=Isoleucine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Isoleucine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Isoleucine FinalMaxMin #Figure4D sugarcane4mo9mo_Citrate_timecourses <- read.csv("4mo9mo_Citrate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Citrate_timecourses, aes(x=CT, y=Citrate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[8:80, ] fit4 <- fit2[82:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*23.66/71)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*25.85/77)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*23.66/71)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*25.85/77)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Citrate <- ggplot(data=sugarcane4mo9mo_Citrate_timecourses, aes(x=CT, y=Citrate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Citrate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_text(size=18), #axis.text.y = element_blank(), #axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Citrate FinalMaxMin #Figure4E sugarcane4mo9mo_Glycerate_timecourses <- read.csv("4mo9mo_Glycerate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Glycerate_timecourses, aes(x=CT, y=Glycerate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Glycerate <- ggplot(data=sugarcane4mo9mo_Glycerate_timecourses, aes(x=CT, y=Glycerate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Glycerate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Glycerate FinalMaxMin #Figure4F sugarcane4mo9mo_Malate_timecourses <- read.csv("4mo9mo_Malate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Malate_timecourses, aes(x=CT, y=Malate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Malate <- ggplot(data=sugarcane4mo9mo_Malate_timecourses, aes(x=CT, y=Malate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Malate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), #axis.text.x = element_text(size=18), axis.text.x = element_blank(), axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Malate FinalMaxMin #Figure4G #changed interval sugarcane4mo9mo_Aspartate_timecourses <- read.csv("4mo9mo_Aspartate.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Aspartate_timecourses, aes(x=CT, y=Aspartate, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[6:72, ] fit4 <- fit2[86:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*21.69/65)+0.203 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*24.52/73)-0.293 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*21.69/65)+0.203 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/73)-0.293 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Aspartate <- ggplot(data=sugarcane4mo9mo_Aspartate_timecourses, aes(x=CT, y=Aspartate, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Aspartate", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), axis.text.x = element_text(size=18), #axis.text.x = element_blank(), #axis.title.x = element_blank(), axis.text.y = element_text(size=18), #axis.text.y = element_blank(), #axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Aspartate FinalMaxMin #Figure4H sugarcane4mo9mo_Glycine_timecourses <- read.csv("4mo9mo_Glycine.txt", sep="\t") ggplot(data=sugarcane4mo9mo_Glycine_timecourses, aes(x=CT, y=Glycine, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_Glycine <- ggplot(data=sugarcane4mo9mo_Glycine_timecourses, aes(x=CT, y=Glycine, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "Glycine", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), axis.text.x = element_text(size=18), #axis.text.x = element_blank(), #axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_Glycine FinalMaxMin #Figure4I sugarcane4mo9mo_GABA_timecourses <- read.csv("4mo9mo_GABA.txt", sep="\t") ggplot(data=sugarcane4mo9mo_GABA_timecourses, aes(x=CT, y=GABA, group=Campo)) + geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5) fit2 <- data.frame(fit2) fit3 <- fit2[1:80, ] fit4 <- fit2[81:160, ] fit5 <- cbind(fit3, fit4) fit5 <- data.frame(fit5) maxC10 <- data.frame(which(fit5 == max(fit5$fit3), arr.ind=TRUE)) maxC1 <- ((maxC10$row-1)*25.96/79)-1.44 maxC20 <- data.frame(which(fit5 == max(fit5$fit4), arr.ind=TRUE)) maxC2 <- ((maxC20$row-1)*26.18/79)-1.95 minC10 <- data.frame(which(fit5 == min(fit5$fit3), arr.ind=TRUE)) minC1 <- ((minC10$row-1)*25.96/79)-1.44 minC20 <- data.frame(which(fit5 == min(fit5$fit4), arr.ind=TRUE)) minC2 <- ((minC20$row-1)*26.18/79)-1.95 FinalMaxMin <- cbind(maxC1, minC1, maxC2, minC2) sugarcane4mo9mo_GABA <- ggplot(data=sugarcane4mo9mo_GABA_timecourses, aes(x=CT, y=GABA, group=Campo)) + annotate("rect", xmin = -2, xmax = 0, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ annotate("rect", xmin = 12, xmax = 24, ymin = -3, ymax = 3, alpha = .5, fill = "#e3e3e3")+ geom_jitter(aes(col=Campo, shape = Campo),position=position_jitter(0.2), size = 3) + stat_smooth(aes(group = Campo, colour = Campo, fill=Campo, outfit=fit2<<-..y..),method="loess", span = 0.5)+ scale_colour_manual(values=c(C1, C2))+ scale_fill_manual(values=c(C1, C2))+ scale_linetype_manual(values=c(C1_line, C2_line)) + annotate("text", x = maxC1, y = 2.75, label = "\u25bc", size = 6, colour=C1)+ annotate("text", x = maxC2, y = 2.75, label = "\u25bc", size = 6, colour=C2)+ annotate("text", x = 22, y = -2.75, label = "GABA", size = 6, parse=TRUE)+ scale_x_continuous(breaks=seq(0,24,6), name="ZT (h)", limits=c(-2, 24.75))+ scale_y_continuous(breaks=seq(-3,3,1.5), name="Normalized Expression", limits=c(-3,3), labels = scales::number_format(accuracy = 0.1))+ theme(panel.grid.major = element_line(colour = "#efefef", size = 0.75), text = element_text(size=18), axis.ticks = element_blank(), axis.line = element_blank(), axis.text.x = element_text(size=18), #axis.text.x = element_blank(), #axis.title.x = element_blank(), #axis.text.y = element_text(size=18), axis.text.y = element_blank(), axis.title.y = element_blank(), legend.position = "none" ) sugarcane4mo9mo_GABA FinalMaxMin plot_grid(sugarcane4mo9mo_Valine, sugarcane4mo9mo_Leucine, sugarcane4mo9mo_Isoleucine, sugarcane4mo9mo_Citrate, sugarcane4mo9mo_Glycerate, sugarcane4mo9mo_Malate, sugarcane4mo9mo_Aspartate, sugarcane4mo9mo_Glycine, sugarcane4mo9mo_GABA, labels = c("A","B","C","D","E","F","G","H","I"), ncol = 3, align = "none", rel_widths = c(1.15, 1, 1),rel_heights = c(1,1,1.15),label_size = 20)
#! /usr/bin/Rscript # Plot graphics showing how well (or not) each primer set kept the community as determined by the Universal (Earth Microbiome Project) primers) # Libraries library(argparse) library(ggplot2) library(phyloseq) options(stringsAsFactors=F) # Command-line arguments parser=ArgumentParser() parser$add_argument("-i", "--infile", help="RDS file containing the phyloseq object to analyze (from step 2a)") parser$add_argument("-o", "--outprefix", help="Prefix for all output files") args=parser$parse_args() # setwd('/home/jgwall/Projects/Microbiomes/MicrobiomeMethodsDevelopment/CompareSampleExtractionAndAmplification_Mohsen_Cecelia/2019 07 Cecelia Final Data/2_Analysis/') # args=parser$parse_args(c("-i", "2b_filtered_data.phyloseq.Rdata", "-o", "99_tmp")) # Load data cat("Loading phyloseq data\n") mydata=readRDS(args$infile) # Split data into plant and non-plant samples is_plant = get_variable(mydata, "sample.type") %in% c("leaf-maize", "leaf-soybean") non_plant = get_variable(mydata, "sample.type") %in% c("defined-community", "soil-clay", "soil-flowerbed") plants = prune_samples(is_plant, mydata) nonplants = prune_samples(non_plant, mydata) # Make my own function to collapse by levels, since taxa_glom() keeps giving integer overflow errors collapse_taxa=function(phylo, level){ # Check that the requested phylogenetic level is actually in the data if(! level %in% rank_names(phylo)){ warning("Illegal taxonomic rank requested; returning unchanged object") return(phylo) } # Filter out OTUs with 0 reads has_reads = which(taxa_sums(phylo)>0) phylo = prune_taxa(names(has_reads), phylo) # Iteratively merge groups tempdata = phylo mytaxa = unique(as.character(tax_table(tempdata)[,level])) for(taxon in sort(mytaxa)){ # cat("Collapsing", taxon,"\n") classifications = as.character(tax_table(tempdata)[,level]) tomerge = which(classifications == taxon) tempdata = merge_taxa(tempdata, tomerge, archetype=1) } return(tempdata) } # Look over some high-level taxonomy to display results for(level in c("Domain", "Phylum", "Class", "Order")){ cat("\tPlotting distortion barplots at level",level,"\n") # Plant community data subplants = collapse_taxa(plants, level=level) subplants = transform_sample_counts(subplants, fun=function(x){x/sum(x)}) # Convert to relative abundance p1 = plot_bar(subplants, fill=level, title=paste("Plant community distortion at",level,"level")) + facet_grid(.~sample.type, scales="free_x") # Nonplant community data subnonplants = collapse_taxa(nonplants, level=level) subnonplants = transform_sample_counts(subnonplants, fun=function(x){x/sum(x)}) # Convert to relative abundance p2 = plot_bar(subnonplants, fill=level, title=paste("Non-plant community distortion at",level,"level")) + facet_grid(.~sample.type, scales="free_x") ggsave(paste(args$outprefix, level, "plant", "png", sep="."), plot=p1, width=12, height=8) ggsave(paste(args$outprefix, level, "nonplant", "png", sep="."), plot=p2, width=12, height=8) }
/TestPrimers/2d_AssessCommunityDistortion_taxonomy.r
no_license
wallacelab/paper-giangacomo-16s-methods
R
false
false
3,151
r
#! /usr/bin/Rscript # Plot graphics showing how well (or not) each primer set kept the community as determined by the Universal (Earth Microbiome Project) primers) # Libraries library(argparse) library(ggplot2) library(phyloseq) options(stringsAsFactors=F) # Command-line arguments parser=ArgumentParser() parser$add_argument("-i", "--infile", help="RDS file containing the phyloseq object to analyze (from step 2a)") parser$add_argument("-o", "--outprefix", help="Prefix for all output files") args=parser$parse_args() # setwd('/home/jgwall/Projects/Microbiomes/MicrobiomeMethodsDevelopment/CompareSampleExtractionAndAmplification_Mohsen_Cecelia/2019 07 Cecelia Final Data/2_Analysis/') # args=parser$parse_args(c("-i", "2b_filtered_data.phyloseq.Rdata", "-o", "99_tmp")) # Load data cat("Loading phyloseq data\n") mydata=readRDS(args$infile) # Split data into plant and non-plant samples is_plant = get_variable(mydata, "sample.type") %in% c("leaf-maize", "leaf-soybean") non_plant = get_variable(mydata, "sample.type") %in% c("defined-community", "soil-clay", "soil-flowerbed") plants = prune_samples(is_plant, mydata) nonplants = prune_samples(non_plant, mydata) # Make my own function to collapse by levels, since taxa_glom() keeps giving integer overflow errors collapse_taxa=function(phylo, level){ # Check that the requested phylogenetic level is actually in the data if(! level %in% rank_names(phylo)){ warning("Illegal taxonomic rank requested; returning unchanged object") return(phylo) } # Filter out OTUs with 0 reads has_reads = which(taxa_sums(phylo)>0) phylo = prune_taxa(names(has_reads), phylo) # Iteratively merge groups tempdata = phylo mytaxa = unique(as.character(tax_table(tempdata)[,level])) for(taxon in sort(mytaxa)){ # cat("Collapsing", taxon,"\n") classifications = as.character(tax_table(tempdata)[,level]) tomerge = which(classifications == taxon) tempdata = merge_taxa(tempdata, tomerge, archetype=1) } return(tempdata) } # Look over some high-level taxonomy to display results for(level in c("Domain", "Phylum", "Class", "Order")){ cat("\tPlotting distortion barplots at level",level,"\n") # Plant community data subplants = collapse_taxa(plants, level=level) subplants = transform_sample_counts(subplants, fun=function(x){x/sum(x)}) # Convert to relative abundance p1 = plot_bar(subplants, fill=level, title=paste("Plant community distortion at",level,"level")) + facet_grid(.~sample.type, scales="free_x") # Nonplant community data subnonplants = collapse_taxa(nonplants, level=level) subnonplants = transform_sample_counts(subnonplants, fun=function(x){x/sum(x)}) # Convert to relative abundance p2 = plot_bar(subnonplants, fill=level, title=paste("Non-plant community distortion at",level,"level")) + facet_grid(.~sample.type, scales="free_x") ggsave(paste(args$outprefix, level, "plant", "png", sep="."), plot=p1, width=12, height=8) ggsave(paste(args$outprefix, level, "nonplant", "png", sep="."), plot=p2, width=12, height=8) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/blob_copyurl.R, R/transfer_generics.R \name{copy_url_to_storage} \alias{copy_url_to_storage} \alias{multicopy_url_to_storage} \alias{copy_url_to_storage.blob_container} \alias{multicopy_url_to_storage.blob_container} \alias{storage_upload} \alias{storage_upload.blob_container} \alias{storage_upload.file_share} \alias{storage_upload.adls_filesystem} \alias{storage_multiupload} \alias{storage_multiupload.blob_container} \alias{storage_multiupload.file_share} \alias{storage_multiupload.adls_filesystem} \alias{storage_download} \alias{storage_download.blob_container} \alias{storage_download.file_share} \alias{storage_download.adls_filesystem} \alias{storage_multidownload} \alias{storage_multidownload.blob_container} \alias{storage_multidownload.file_share} \alias{storage_multidownload.adls_filesystem} \alias{download_from_url} \alias{upload_to_url} \title{Upload and download generics} \usage{ copy_url_to_storage(container, src, dest, ...) multicopy_url_to_storage(container, src, dest, ...) \method{copy_url_to_storage}{blob_container}(container, src, dest, ...) \method{multicopy_url_to_storage}{blob_container}(container, src, dest, ...) storage_upload(container, ...) \method{storage_upload}{blob_container}(container, ...) \method{storage_upload}{file_share}(container, ...) \method{storage_upload}{adls_filesystem}(container, ...) storage_multiupload(container, ...) \method{storage_multiupload}{blob_container}(container, ...) \method{storage_multiupload}{file_share}(container, ...) \method{storage_multiupload}{adls_filesystem}(container, ...) storage_download(container, ...) \method{storage_download}{blob_container}(container, ...) \method{storage_download}{file_share}(container, ...) \method{storage_download}{adls_filesystem}(container, ...) storage_multidownload(container, ...) \method{storage_multidownload}{blob_container}(container, ...) \method{storage_multidownload}{file_share}(container, ...) \method{storage_multidownload}{adls_filesystem}(container, ...) download_from_url(src, dest, key = NULL, token = NULL, sas = NULL, ..., overwrite = FALSE) upload_to_url(src, dest, key = NULL, token = NULL, sas = NULL, ...) } \arguments{ \item{container}{A storage container object.} \item{src, dest}{For \code{upload_to_url} and \code{download_from_url}, the source and destination files to transfer.} \item{...}{Further arguments to pass to lower-level functions.} \item{key, token, sas}{Authentication arguments: an access key, Azure Active Directory (AAD) token or a shared access signature (SAS). If multiple arguments are supplied, a key takes priority over a token, which takes priority over a SAS. For \code{upload_to_url} and \code{download_to_url}, you can also provide a SAS as part of the URL itself.} \item{overwrite}{For downloading, whether to overwrite any destination files that exist.} } \description{ Upload and download generics } \details{ \code{copy_url_to_storage} transfers the contents of the file at the specified HTTP[S] URL directly to storage, without requiring a temporary local copy to be made. \code{multicopy_url_to_storage} does the same, for multiple URLs at once. Currently methods for these are only implemented for blob storage. These functions allow you to transfer files to and from a storage account. \code{storage_upload}, \code{storage_download}, \code{storage_multiupload} and \code{storage_multidownload} take as first argument a storage container, either for blob storage, file storage, or ADLSgen2. They dispatch to the corresponding file transfer functions for the given storage type. \code{upload_to_url} and \code{download_to_url} allow you to transfer a file to or from Azure storage, given the URL of the source or destination. The storage details (endpoint, container name, and so on) are obtained from the URL. By default, the upload and download functions will display a progress bar while they are downloading. To turn this off, use \code{options(azure_storage_progress_bar=FALSE)}. To turn the progress bar back on, use \code{options(azure_storage_progress_bar=TRUE)}. } \examples{ \dontrun{ # download from blob storage bl <- storage_endpoint("https://mystorage.blob.core.windows.net/", key="access_key") cont <- storage_container(bl, "mycontainer") storage_download(cont, "bigfile.zip", "~/bigfile.zip") # same download but directly from the URL download_from_url("https://mystorage.blob.core.windows.net/mycontainer/bigfile.zip", "~/bigfile.zip", key="access_key") # upload to ADLSgen2 ad <- storage_endpoint("https://myadls.dfs.core.windows.net/", token=mytoken) cont <- storage_container(ad, "myfilesystem") create_storage_dir(cont, "newdir") storage_upload(cont, "files.zip", "newdir/files.zip") # same upload but directly to the URL upload_to_url("files.zip", "https://myadls.dfs.core.windows.net/myfilesystem/newdir/files.zip", token=mytoken) } } \seealso{ \link{storage_container}, \link{blob_container}, \link{file_share}, \link{adls_filesystem} \link{download_blob}, \link{download_azure_file}, \link{download_adls_file}, \link{call_azcopy} }
/man/file_transfer.Rd
permissive
Azure/AzureStor
R
false
true
5,215
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/blob_copyurl.R, R/transfer_generics.R \name{copy_url_to_storage} \alias{copy_url_to_storage} \alias{multicopy_url_to_storage} \alias{copy_url_to_storage.blob_container} \alias{multicopy_url_to_storage.blob_container} \alias{storage_upload} \alias{storage_upload.blob_container} \alias{storage_upload.file_share} \alias{storage_upload.adls_filesystem} \alias{storage_multiupload} \alias{storage_multiupload.blob_container} \alias{storage_multiupload.file_share} \alias{storage_multiupload.adls_filesystem} \alias{storage_download} \alias{storage_download.blob_container} \alias{storage_download.file_share} \alias{storage_download.adls_filesystem} \alias{storage_multidownload} \alias{storage_multidownload.blob_container} \alias{storage_multidownload.file_share} \alias{storage_multidownload.adls_filesystem} \alias{download_from_url} \alias{upload_to_url} \title{Upload and download generics} \usage{ copy_url_to_storage(container, src, dest, ...) multicopy_url_to_storage(container, src, dest, ...) \method{copy_url_to_storage}{blob_container}(container, src, dest, ...) \method{multicopy_url_to_storage}{blob_container}(container, src, dest, ...) storage_upload(container, ...) \method{storage_upload}{blob_container}(container, ...) \method{storage_upload}{file_share}(container, ...) \method{storage_upload}{adls_filesystem}(container, ...) storage_multiupload(container, ...) \method{storage_multiupload}{blob_container}(container, ...) \method{storage_multiupload}{file_share}(container, ...) \method{storage_multiupload}{adls_filesystem}(container, ...) storage_download(container, ...) \method{storage_download}{blob_container}(container, ...) \method{storage_download}{file_share}(container, ...) \method{storage_download}{adls_filesystem}(container, ...) storage_multidownload(container, ...) \method{storage_multidownload}{blob_container}(container, ...) \method{storage_multidownload}{file_share}(container, ...) \method{storage_multidownload}{adls_filesystem}(container, ...) download_from_url(src, dest, key = NULL, token = NULL, sas = NULL, ..., overwrite = FALSE) upload_to_url(src, dest, key = NULL, token = NULL, sas = NULL, ...) } \arguments{ \item{container}{A storage container object.} \item{src, dest}{For \code{upload_to_url} and \code{download_from_url}, the source and destination files to transfer.} \item{...}{Further arguments to pass to lower-level functions.} \item{key, token, sas}{Authentication arguments: an access key, Azure Active Directory (AAD) token or a shared access signature (SAS). If multiple arguments are supplied, a key takes priority over a token, which takes priority over a SAS. For \code{upload_to_url} and \code{download_to_url}, you can also provide a SAS as part of the URL itself.} \item{overwrite}{For downloading, whether to overwrite any destination files that exist.} } \description{ Upload and download generics } \details{ \code{copy_url_to_storage} transfers the contents of the file at the specified HTTP[S] URL directly to storage, without requiring a temporary local copy to be made. \code{multicopy_url_to_storage} does the same, for multiple URLs at once. Currently methods for these are only implemented for blob storage. These functions allow you to transfer files to and from a storage account. \code{storage_upload}, \code{storage_download}, \code{storage_multiupload} and \code{storage_multidownload} take as first argument a storage container, either for blob storage, file storage, or ADLSgen2. They dispatch to the corresponding file transfer functions for the given storage type. \code{upload_to_url} and \code{download_to_url} allow you to transfer a file to or from Azure storage, given the URL of the source or destination. The storage details (endpoint, container name, and so on) are obtained from the URL. By default, the upload and download functions will display a progress bar while they are downloading. To turn this off, use \code{options(azure_storage_progress_bar=FALSE)}. To turn the progress bar back on, use \code{options(azure_storage_progress_bar=TRUE)}. } \examples{ \dontrun{ # download from blob storage bl <- storage_endpoint("https://mystorage.blob.core.windows.net/", key="access_key") cont <- storage_container(bl, "mycontainer") storage_download(cont, "bigfile.zip", "~/bigfile.zip") # same download but directly from the URL download_from_url("https://mystorage.blob.core.windows.net/mycontainer/bigfile.zip", "~/bigfile.zip", key="access_key") # upload to ADLSgen2 ad <- storage_endpoint("https://myadls.dfs.core.windows.net/", token=mytoken) cont <- storage_container(ad, "myfilesystem") create_storage_dir(cont, "newdir") storage_upload(cont, "files.zip", "newdir/files.zip") # same upload but directly to the URL upload_to_url("files.zip", "https://myadls.dfs.core.windows.net/myfilesystem/newdir/files.zip", token=mytoken) } } \seealso{ \link{storage_container}, \link{blob_container}, \link{file_share}, \link{adls_filesystem} \link{download_blob}, \link{download_azure_file}, \link{download_adls_file}, \link{call_azcopy} }
#================================================== # DESCRIPCIoN: Estimación de datos faltantes en la estacion miraflores #Udep y Senamhi # AUTOR(ES): Nohelia e Isabella #================================================== ## Establecer el Working Directory setwd("C:/R/Tesis") #installed.packages("installr") ##Cargar librerias library(tibble) library(openxlsx) library(dplyr) library(ggplot2) library(assertive) library(naniar) library(lubridate) library(MASS) library(mosaicData) library(simputation) library(gtools) #############Modelos de miraflores######################################### miss_var_summary (data_temp_max) model_Mtmax_1 <- lm(miraflores_temp_max ~ Esperanza_temp_max + mallares_temp_max + chusis_temp_max+ miguel_temp_max + UDEP_temp_max, data = data_temp_max)#0.9214 #model_Mtmax_2 <- lm(miraflores_temp_max ~ Esperanza_temp_max + mallares_temp_max + chusis_temp_max+ # miguel_temp_max, # data = data_temp_max) model_Mtmax_2 <- lm(miraflores_temp_max ~ Esperanza_temp_max + chusis_temp_max+ miguel_temp_max, data = data_temp_max) # 0.9065 model_Mtmax_3 <-lm(miraflores_temp_max ~ Esperanza_temp_max + chusis_temp_max+ mallares_temp_max, data = data_temp_max) # 0.8865 model_Mtmax_4 <-lm(miraflores_temp_max ~ Esperanza_temp_max + miguel_temp_max+ mallares_temp_max, data = data_temp_max) #0.9033 model_Mtmax_5 <-lm(miraflores_temp_max ~ miguel_temp_max + chusis_temp_max+ mallares_temp_max, data = data_temp_max) #0.8961 model_Mtmax_6 <-lm(miraflores_temp_max ~ Esperanza_temp_max + mallares_temp_max, data = data_temp_max)# 0.8498 model_Mtmax_7 <-lm(miraflores_temp_max ~ Esperanza_temp_max + chusis_temp_max, data = data_temp_max) #0.8745 model_Mtmax_8 <-lm(miraflores_temp_max ~ Esperanza_temp_max + miguel_temp_max, data = data_temp_max)#0.9021 model_Mtmax_9 <-lm(miraflores_temp_max ~ mallares_temp_max + chusis_temp_max, data = data_temp_max) #0.8811 model_Mtmax_10 <-lm(miraflores_temp_max ~ mallares_temp_max + miguel_temp_max, data = data_temp_max) #0.8739 model_Mtmax_11 <-lm(miraflores_temp_max ~ chusis_temp_max + miguel_temp_max, data = data_temp_max) #0.9043 model_Mtmax_12 <-lm(miraflores_temp_max ~ chusis_temp_max + UDEP_temp_max, data = data_temp_max)#0.9085 summary(model_Mtmax_13) #Estimaciones con Cerritos ############Código de estimación################### n=0 n <- c() m <- 0 k=1 for (j in 1971:2019){ cont=0 for (i in 1:length(miraflores_bymonth$year)){ if(miraflores_bymonth$year[i] == j){ if(is.na(miraflores_bymonth$max_temp_max[i]) == TRUE){ cont = cont +1 }else { cont=cont } print(cont) } } n[k]=cont k=k+1 } #n = numero de meses faltantes por año (lluvia) n m data_faltante <- c(1:588) #data_faltante #library(dplyr) n <- data.frame(n) #n valor<-c() n_datos <- data.frame(meses_faltantes = n) #n_datos for (i in 1:49) { if (n[i,]>=1) { valor[i]="yes" }else{ valor[i]="no" } } valor <-data.frame(valor) valor n_datos <- data.frame(año=1971:2019, valor) #n_datos #s es los meses year<-0 h= c() k=1 j=0 g = c() jian = c() yue = c() est = c() as.data.frame(faltantes) data_predictora <- 0 for (i in 1:49) { for (r in 1:672) { #Filas en los meses de lluvia if (n_datos$valor[i]=="yes"){ for (s in 1:12){ if (data_temp_max$year[r]==n_datos$año[i] && data_temp_max$month[r]== s #&& is.na(data_temp_max$miguel_temp_max[r])==FALSE && is.na(data_temp_max$UDEP_temp_max[r])==FALSE &&is.na(data_temp_max$chusis_temp_max[r])==FALSE #&& is.na(data_temp_max$Bernal_temp_max[r])==FALSE #&& is.na(data_temp_max$Esperanza_temp_max[r])==FALSE #&& is.na(data_temp_max$mallares_temp_max[r])==FALSE && is.na(data_temp_max$miraflores_temp_max[r])==TRUE){ j=j+1 model_Mtmax_12 <-lm(miraflores_temp_max ~ chusis_temp_max + UDEP_temp_max, data = data_temp_max) data_predictora <- tibble(UDEP_temp_max = data_temp_max[r,]$UDEP_temp_max, #miguel_temp_max = data_temp_max[r,]$miguel_temp_max, chusis_temp_max = data_temp_max[r,]$chusis_temp_max, #Bernal_temp_max = data_temp_max[r,]$Bernal_temp_max, #mallares_temp_max = data_temp_max[r,]$mallares_temp_max, #Esperanza_temp_max = data_temp_max[r,]$Esperanza_temp_max, ) g[j] = predict(model_Mtmax_12, data_predictora) print (g) #}else{ jian[j] = n_datos$año[i] yue[j] = s est[j] = g[j] faltantes <- data.frame(jian, yue, est) } } } } #k=k+1 #print(h) } print(data_predictora) g faltantes
/Script/Miraflores_estimado_temp_max.R
no_license
NohePS/Tesis
R
false
false
5,217
r
#================================================== # DESCRIPCIoN: Estimación de datos faltantes en la estacion miraflores #Udep y Senamhi # AUTOR(ES): Nohelia e Isabella #================================================== ## Establecer el Working Directory setwd("C:/R/Tesis") #installed.packages("installr") ##Cargar librerias library(tibble) library(openxlsx) library(dplyr) library(ggplot2) library(assertive) library(naniar) library(lubridate) library(MASS) library(mosaicData) library(simputation) library(gtools) #############Modelos de miraflores######################################### miss_var_summary (data_temp_max) model_Mtmax_1 <- lm(miraflores_temp_max ~ Esperanza_temp_max + mallares_temp_max + chusis_temp_max+ miguel_temp_max + UDEP_temp_max, data = data_temp_max)#0.9214 #model_Mtmax_2 <- lm(miraflores_temp_max ~ Esperanza_temp_max + mallares_temp_max + chusis_temp_max+ # miguel_temp_max, # data = data_temp_max) model_Mtmax_2 <- lm(miraflores_temp_max ~ Esperanza_temp_max + chusis_temp_max+ miguel_temp_max, data = data_temp_max) # 0.9065 model_Mtmax_3 <-lm(miraflores_temp_max ~ Esperanza_temp_max + chusis_temp_max+ mallares_temp_max, data = data_temp_max) # 0.8865 model_Mtmax_4 <-lm(miraflores_temp_max ~ Esperanza_temp_max + miguel_temp_max+ mallares_temp_max, data = data_temp_max) #0.9033 model_Mtmax_5 <-lm(miraflores_temp_max ~ miguel_temp_max + chusis_temp_max+ mallares_temp_max, data = data_temp_max) #0.8961 model_Mtmax_6 <-lm(miraflores_temp_max ~ Esperanza_temp_max + mallares_temp_max, data = data_temp_max)# 0.8498 model_Mtmax_7 <-lm(miraflores_temp_max ~ Esperanza_temp_max + chusis_temp_max, data = data_temp_max) #0.8745 model_Mtmax_8 <-lm(miraflores_temp_max ~ Esperanza_temp_max + miguel_temp_max, data = data_temp_max)#0.9021 model_Mtmax_9 <-lm(miraflores_temp_max ~ mallares_temp_max + chusis_temp_max, data = data_temp_max) #0.8811 model_Mtmax_10 <-lm(miraflores_temp_max ~ mallares_temp_max + miguel_temp_max, data = data_temp_max) #0.8739 model_Mtmax_11 <-lm(miraflores_temp_max ~ chusis_temp_max + miguel_temp_max, data = data_temp_max) #0.9043 model_Mtmax_12 <-lm(miraflores_temp_max ~ chusis_temp_max + UDEP_temp_max, data = data_temp_max)#0.9085 summary(model_Mtmax_13) #Estimaciones con Cerritos ############Código de estimación################### n=0 n <- c() m <- 0 k=1 for (j in 1971:2019){ cont=0 for (i in 1:length(miraflores_bymonth$year)){ if(miraflores_bymonth$year[i] == j){ if(is.na(miraflores_bymonth$max_temp_max[i]) == TRUE){ cont = cont +1 }else { cont=cont } print(cont) } } n[k]=cont k=k+1 } #n = numero de meses faltantes por año (lluvia) n m data_faltante <- c(1:588) #data_faltante #library(dplyr) n <- data.frame(n) #n valor<-c() n_datos <- data.frame(meses_faltantes = n) #n_datos for (i in 1:49) { if (n[i,]>=1) { valor[i]="yes" }else{ valor[i]="no" } } valor <-data.frame(valor) valor n_datos <- data.frame(año=1971:2019, valor) #n_datos #s es los meses year<-0 h= c() k=1 j=0 g = c() jian = c() yue = c() est = c() as.data.frame(faltantes) data_predictora <- 0 for (i in 1:49) { for (r in 1:672) { #Filas en los meses de lluvia if (n_datos$valor[i]=="yes"){ for (s in 1:12){ if (data_temp_max$year[r]==n_datos$año[i] && data_temp_max$month[r]== s #&& is.na(data_temp_max$miguel_temp_max[r])==FALSE && is.na(data_temp_max$UDEP_temp_max[r])==FALSE &&is.na(data_temp_max$chusis_temp_max[r])==FALSE #&& is.na(data_temp_max$Bernal_temp_max[r])==FALSE #&& is.na(data_temp_max$Esperanza_temp_max[r])==FALSE #&& is.na(data_temp_max$mallares_temp_max[r])==FALSE && is.na(data_temp_max$miraflores_temp_max[r])==TRUE){ j=j+1 model_Mtmax_12 <-lm(miraflores_temp_max ~ chusis_temp_max + UDEP_temp_max, data = data_temp_max) data_predictora <- tibble(UDEP_temp_max = data_temp_max[r,]$UDEP_temp_max, #miguel_temp_max = data_temp_max[r,]$miguel_temp_max, chusis_temp_max = data_temp_max[r,]$chusis_temp_max, #Bernal_temp_max = data_temp_max[r,]$Bernal_temp_max, #mallares_temp_max = data_temp_max[r,]$mallares_temp_max, #Esperanza_temp_max = data_temp_max[r,]$Esperanza_temp_max, ) g[j] = predict(model_Mtmax_12, data_predictora) print (g) #}else{ jian[j] = n_datos$año[i] yue[j] = s est[j] = g[j] faltantes <- data.frame(jian, yue, est) } } } } #k=k+1 #print(h) } print(data_predictora) g faltantes
\name{Wang_Chen_Sim} \alias{Wang_Chen_Sim} \docType{data} \title{ Simulated process data from a plastics manufacturer. } \description{ Fifty observations where 'D' represents depth, 'L' represents length, and 'W' represents width. } \usage{Wang_Chen_Sim} \format{ A simulated data frame with 50 observations and the following 3 variables. \describe{ \item{\code{D}}{depth} \item{\code{L}}{length} \item{\code{W}}{width} } } \source{ Data simulated by Nelson Lee Afanador from average and covariance estimates provided in Wang F, Chen J (1998). "Capability index using principal components analysis." Quality Engineering, 11, 21-27. }
/man/Wang_Chen_Sim.Rd
no_license
cran/mvdalab
R
false
false
653
rd
\name{Wang_Chen_Sim} \alias{Wang_Chen_Sim} \docType{data} \title{ Simulated process data from a plastics manufacturer. } \description{ Fifty observations where 'D' represents depth, 'L' represents length, and 'W' represents width. } \usage{Wang_Chen_Sim} \format{ A simulated data frame with 50 observations and the following 3 variables. \describe{ \item{\code{D}}{depth} \item{\code{L}}{length} \item{\code{W}}{width} } } \source{ Data simulated by Nelson Lee Afanador from average and covariance estimates provided in Wang F, Chen J (1998). "Capability index using principal components analysis." Quality Engineering, 11, 21-27. }
with(a949c7f65ee664349a8371e2024f67407, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';options(digits.secs=NULL);FRAME950866[,(c('DATA_COLLECTION_TIME')) := lapply(.SD, function(x) as.POSIXct(fast_strptime(x, format='%Y-%m-%d %H:%M:%S'))), .SDcols = c('DATA_COLLECTION_TIME')]});
/1c4fa71c-191c-4da9-8102-b247ffddc5d3/R/Temp/arTlu2CPsgV3M.R
no_license
ayanmanna8/test
R
false
false
388
r
with(a949c7f65ee664349a8371e2024f67407, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';options(digits.secs=NULL);FRAME950866[,(c('DATA_COLLECTION_TIME')) := lapply(.SD, function(x) as.POSIXct(fast_strptime(x, format='%Y-%m-%d %H:%M:%S'))), .SDcols = c('DATA_COLLECTION_TIME')]});
#!/usr/bin/env Rscript #' Makes sure the R_LIBS_USER directory is installed #' R_LIBS_USER is set when R is executed dir.create(Sys.getenv("R_LIBS_USER"), showWarnings = FALSE, recursive = TRUE) #' installs packages which are not yet installed #' @param pkg list of package names ipak <- function(pkg){ new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])] if (length(new.pkg)) install.packages(new.pkg, dependencies = TRUE, repos = "https://cran.rstudio.com") sapply(pkg, require, character.only = TRUE) } ### install base packages base_packages <- c( "devtools", "tidyverse", "reshape2" ) ipak(base_packages) ### install tools from github devtools::install_github("klutometis/roxygen") # for package generation
/Install.R
permissive
riethmayer/incusyte
R
false
false
784
r
#!/usr/bin/env Rscript #' Makes sure the R_LIBS_USER directory is installed #' R_LIBS_USER is set when R is executed dir.create(Sys.getenv("R_LIBS_USER"), showWarnings = FALSE, recursive = TRUE) #' installs packages which are not yet installed #' @param pkg list of package names ipak <- function(pkg){ new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])] if (length(new.pkg)) install.packages(new.pkg, dependencies = TRUE, repos = "https://cran.rstudio.com") sapply(pkg, require, character.only = TRUE) } ### install base packages base_packages <- c( "devtools", "tidyverse", "reshape2" ) ipak(base_packages) ### install tools from github devtools::install_github("klutometis/roxygen") # for package generation
summary.eaemg <- function(object, ...) { res <- list(empirical = object$empirical, level = object$level, gp = dim(object$intervals)[1]) class(res) <- "summary.eaemg" return(res) }
/biosignalEMG/R/summary.eaemg.R
no_license
ingted/R-Examples
R
false
false
198
r
summary.eaemg <- function(object, ...) { res <- list(empirical = object$empirical, level = object$level, gp = dim(object$intervals)[1]) class(res) <- "summary.eaemg" return(res) }
#' Overlaid Normal QQ Plot #' #' Produces an overlaid normal QQ plot. #' #' #' @param x a \code{fit.models} object. #' @param fun a function to extract the desired quantity from \code{x}. #' @param \dots additional arguments are passed to #' \code{\link[lattice]{qqmath}}. #' @return the \code{trellis} object is invisibly returned. #' @keywords hplot #' @importFrom lattice qqmath strip.default #' @export overlaidQQPlot <- function(x, fun, ...) { n.models <- length(x) mod.names <- names(x) y <- lapply(x, fun) n.y <- sapply(y, length) mod <- factor(rep(mod.names, n.y), levels = mod.names) tdf <- data.frame(y = unlist(y), mod = mod) p <- qqmath(~ y | "", groups = mod, data = tdf, distribution = qnorm, strip = function(...) strip.default(..., style = 1), auto.key = list(corner = c(0.05, 0.95)), ...) print(p) invisible(p) }
/R/overlaidQQPlot.R
no_license
cran/fit.models
R
false
false
944
r
#' Overlaid Normal QQ Plot #' #' Produces an overlaid normal QQ plot. #' #' #' @param x a \code{fit.models} object. #' @param fun a function to extract the desired quantity from \code{x}. #' @param \dots additional arguments are passed to #' \code{\link[lattice]{qqmath}}. #' @return the \code{trellis} object is invisibly returned. #' @keywords hplot #' @importFrom lattice qqmath strip.default #' @export overlaidQQPlot <- function(x, fun, ...) { n.models <- length(x) mod.names <- names(x) y <- lapply(x, fun) n.y <- sapply(y, length) mod <- factor(rep(mod.names, n.y), levels = mod.names) tdf <- data.frame(y = unlist(y), mod = mod) p <- qqmath(~ y | "", groups = mod, data = tdf, distribution = qnorm, strip = function(...) strip.default(..., style = 1), auto.key = list(corner = c(0.05, 0.95)), ...) print(p) invisible(p) }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 162900 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 154980 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 154980 c c Input Parameter (command line, file): c input filename QBFLIB/Gent-Rowley/Connect9/cf_9_9x9_d_.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 910981 c no.of clauses 162900 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 154980 c c QBFLIB/Gent-Rowley/Connect9/cf_9_9x9_d_.qdimacs 910981 162900 E1 [10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 10761 10762 10763 10764 10765 10766 10767 10768 10769 10770 10771 10772 10773 10774 10775 10776 10777 10778 10779 10780 10781 10801 10802 10803 10804 10805 10806 10807 10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 10852 10853 10854 10855 10856 10857 10858 10859 10860 10861 10862 10863 10864 10865 10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 20101 20102 20103 20104 20105 20106 20107 20108 20109 20110 20111 20112 20113 20114 20115 20116 20117 20118 20119 20120 20121 20122 20123 20124 20125 20126 20127 20128 20129 20130 20131 20132 20133 20134 20135 20136 20137 20138 20139 20140 20141 20142 20143 20144 20145 20146 20147 20148 20149 20150 20151 20152 20153 20154 20155 20156 20157 20158 20159 20160 20161 20162 20163 20164 20165 20166 20167 20168 20169 20170 20171 20172 20173 20174 20175 20176 20177 20178 20179 20180 20181 20201 20202 20203 20204 20205 20206 20207 20208 20209 20210 20211 20212 20213 20214 20215 20216 20217 20218 20219 20220 20221 20222 20223 20224 20225 20226 20227 20228 20229 20230 20231 20232 20233 20234 20235 20236 20237 20238 20239 20240 20241 20242 20243 20244 20245 20246 20247 20248 20249 20250 20251 20252 20253 20254 20255 20256 20257 20258 20259 20260 20261 20262 20263 20264 20265 20266 20267 20268 20269 20270 20271 20272 20273 20274 20275 20276 20277 20278 20279 20280 20281 20301 20302 20303 20304 20305 20306 20307 20308 20309 20310 20311 20312 20313 20314 20315 20316 20317 20318 20319 20320 20321 20322 20323 20324 20325 20326 20327 20328 20329 20330 20331 20332 20333 20334 20335 20336 20337 20338 20339 20340 20341 20342 20343 20344 20345 20346 20347 20348 20349 20350 20351 20352 20353 20354 20355 20356 20357 20358 20359 20360 20361 20362 20363 20364 20365 20366 20367 20368 20369 20370 20371 20372 20373 20374 20375 20376 20377 20378 20379 20380 20381 20401 20402 20403 20404 20405 20406 20407 20408 20409 20410 20411 20412 20413 20414 20415 20416 20417 20418 20419 20420 20421 20422 20423 20424 20425 20426 20427 20428 20429 20430 20431 20432 20433 20434 20435 20436 20437 20438 20439 20440 20441 20442 20443 20444 20445 20446 20447 20448 20449 20450 20451 20452 20453 20454 20455 20456 20457 20458 20459 20460 20461 20462 20463 20464 20465 20466 20467 20468 20469 20470 20471 20472 20473 20474 20475 20476 20477 20478 20479 20480 20481 20501 20502 20503 20504 20505 20506 20507 20508 20509 20510 20511 20512 20513 20514 20515 20516 20517 20518 20519 20520 20521 20522 20523 20524 20525 20526 20527 20528 20529 20530 20531 20532 20533 20534 20535 20536 20537 20538 20539 20540 20541 20542 20543 20544 20545 20546 20547 20548 20549 20550 20551 20552 20553 20554 20555 20556 20557 20558 20559 20560 20561 20562 20563 20564 20565 20566 20567 20568 20569 20570 20571 20572 20573 20574 20575 20576 20577 20578 20579 20580 20581 20601 20602 20603 20604 20605 20606 20607 20608 20609 20610 20611 20612 20613 20614 20615 20616 20617 20618 20619 20620 20621 20622 20623 20624 20625 20626 20627 20628 20629 20630 20631 20632 20633 20634 20635 20636 20637 20638 20639 20640 20641 20642 20643 20644 20645 20646 20647 20648 20649 20650 20651 20652 20653 20654 20655 20656 20657 20658 20659 20660 20661 20662 20663 20664 20665 20666 20667 20668 20669 20670 20671 20672 20673 20674 20675 20676 20677 20678 20679 20680 20681 20701 20702 20703 20704 20705 20706 20707 20708 20709 20710 20711 20712 20713 20714 20715 20716 20717 20718 20719 20720 20721 20722 20723 20724 20725 20726 20727 20728 20729 20730 20731 20732 20733 20734 20735 20736 20737 20738 20739 20740 20741 20742 20743 20744 20745 20746 20747 20748 20749 20750 20751 20752 20753 20754 20755 20756 20757 20758 20759 20760 20761 20762 20763 20764 20765 20766 20767 20768 20769 20770 20771 20772 20773 20774 20775 20776 20777 20778 20779 20780 20781 20801 20802 20803 20804 20805 20806 20807 20808 20809 20810 20811 20812 20813 20814 20815 20816 20817 20818 20819 20820 20821 20822 20823 20824 20825 20826 20827 20828 20829 20830 20831 20832 20833 20834 20835 20836 20837 20838 20839 20840 20841 20842 20843 20844 20845 20846 20847 20848 20849 20850 20851 20852 20853 20854 20855 20856 20857 20858 20859 20860 20861 20862 20863 20864 20865 20866 20867 20868 20869 20870 20871 20872 20873 20874 20875 20876 20877 20878 20879 20880 20881 20901 20902 20903 20904 20905 20906 20907 20908 20909 20910 20911 20912 20913 20914 20915 20916 20917 20918 20919 20920 20921 20922 20923 20924 20925 20926 20927 20928 20929 20930 20931 20932 20933 20934 20935 20936 20937 20938 20939 20940 20941 20942 20943 20944 20945 20946 20947 20948 20949 20950 20951 20952 20953 20954 20955 20956 20957 20958 20959 20960 20961 20962 20963 20964 20965 20966 20967 20968 20969 20970 20971 20972 20973 20974 20975 20976 20977 20978 20979 20980 20981 80101 80102 80103 80104 80105 80106 80107 80108 80109 80110 80111 80112 80113 80114 80115 80116 80117 80118 80119 80120 80121 80122 80123 80124 80125 80126 80127 80128 80129 80130 80131 80132 80133 80134 80135 80136 80137 80138 80139 80140 80141 80142 80143 80144 80145 80146 80147 80148 80149 80150 80151 80152 80153 80154 80155 80156 80157 80158 80159 80160 80161 80162 80163 80164 80165 80166 80167 80168 80169 80170 80171 80172 80173 80174 80175 80176 80177 80178 80179 80180 80201 80202 80203 80204 80205 80206 80207 80208 80209 80210 80211 80212 80213 80214 80215 80216 80217 80218 80219 80220 80221 80222 80223 80224 80225 80226 80227 80228 80229 80230 80231 80232 80233 80234 80235 80236 80237 80238 80239 80240 80241 80242 80243 80244 80245 80246 80247 80248 80249 80250 80251 80252 80253 80254 80255 80256 80257 80258 80259 80260 80261 80262 80263 80264 80265 80266 80267 80268 80269 80270 80271 80272 80273 80274 80275 80276 80277 80278 80279 80280 80301 80302 80303 80304 80305 80306 80307 80308 80309 80310 80311 80312 80313 80314 80315 80316 80317 80318 80319 80320 80321 80322 80323 80324 80325 80326 80327 80328 80329 80330 80331 80332 80333 80334 80335 80336 80337 80338 80339 80340 80341 80342 80343 80344 80345 80346 80347 80348 80349 80350 80351 80352 80353 80354 80355 80356 80357 80358 80359 80360 80361 80362 80363 80364 80365 80366 80367 80368 80369 80370 80371 80372 80373 80374 80375 80376 80377 80378 80379 80380 80401 80402 80403 80404 80405 80406 80407 80408 80409 80410 80411 80412 80413 80414 80415 80416 80417 80418 80419 80420 80421 80422 80423 80424 80425 80426 80427 80428 80429 80430 80431 80432 80433 80434 80435 80436 80437 80438 80439 80440 80441 80442 80443 80444 80445 80446 80447 80448 80449 80450 80451 80452 80453 80454 80455 80456 80457 80458 80459 80460 80461 80462 80463 80464 80465 80466 80467 80468 80469 80470 80471 80472 80473 80474 80475 80476 80477 80478 80479 80480 80501 80502 80503 80504 80505 80506 80507 80508 80509 80510 80511 80512 80513 80514 80515 80516 80517 80518 80519 80520 80521 80522 80523 80524 80525 80526 80527 80528 80529 80530 80531 80532 80533 80534 80535 80536 80537 80538 80539 80540 80541 80542 80543 80544 80545 80546 80547 80548 80549 80550 80551 80552 80553 80554 80555 80556 80557 80558 80559 80560 80561 80562 80563 80564 80565 80566 80567 80568 80569 80570 80571 80572 80573 80574 80575 80576 80577 80578 80579 80580 80601 80602 80603 80604 80605 80606 80607 80608 80609 80610 80611 80612 80613 80614 80615 80616 80617 80618 80619 80620 80621 80622 80623 80624 80625 80626 80627 80628 80629 80630 80631 80632 80633 80634 80635 80636 80637 80638 80639 80640 80641 80642 80643 80644 80645 80646 80647 80648 80649 80650 80651 80652 80653 80654 80655 80656 80657 80658 80659 80660 80661 80662 80663 80664 80665 80666 80667 80668 80669 80670 80671 80672 80673 80674 80675 80676 80677 80678 80679 80680 80701 80702 80703 80704 80705 80706 80707 80708 80709 80710 80711 80712 80713 80714 80715 80716 80717 80718 80719 80720 80721 80722 80723 80724 80725 80726 80727 80728 80729 80730 80731 80732 80733 80734 80735 80736 80737 80738 80739 80740 80741 80742 80743 80744 80745 80746 80747 80748 80749 80750 80751 80752 80753 80754 80755 80756 80757 80758 80759 80760 80761 80762 80763 80764 80765 80766 80767 80768 80769 80770 80771 80772 80773 80774 80775 80776 80777 80778 80779 80780 80801 80802 80803 80804 80805 80806 80807 80808 80809 80810 80811 80812 80813 80814 80815 80816 80817 80818 80819 80820 80821 80822 80823 80824 80825 80826 80827 80828 80829 80830 80831 80832 80833 80834 80835 80836 80837 80838 80839 80840 80841 80842 80843 80844 80845 80846 80847 80848 80849 80850 80851 80852 80853 80854 80855 80856 80857 80858 80859 80860 80861 80862 80863 80864 80865 80866 80867 80868 80869 80870 80871 80872 80873 80874 80875 80876 80877 80878 80879 80880 80901 80902 80903 80904 80905 80906 80907 80908 80909 80910 80911 80912 80913 80914 80915 80916 80917 80918 80919 80920 80921 80922 80923 80924 80925 80926 80927 80928 80929 80930 80931 80932 80933 80934 80935 80936 80937 80938 80939 80940 80941 80942 80943 80944 80945 80946 80947 80948 80949 80950 80951 80952 80953 80954 80955 80956 80957 80958 80959 80960 80961 80962 80963 80964 80965 80966 80967 80968 80969 80970 80971 80972 80973 80974 80975 80976 80977 80978 80979 80980 81102 81104 81106 81108 81110 81112 81114 81116 81118 81120 81122 81124 81126 81128 81130 81132 81134 81136 81138 81140 81142 81144 81146 81148 81150 81152 81154 81156 81158 81160 81162 81164 81166 81168 81170 81172 81174 81176 81178 81180 81202 81204 81206 81208 81210 81212 81214 81216 81218 81220 81222 81224 81226 81228 81230 81232 81234 81236 81238 81240 81242 81244 81246 81248 81250 81252 81254 81256 81258 81260 81262 81264 81266 81268 81270 81272 81274 81276 81278 81280 81302 81304 81306 81308 81310 81312 81314 81316 81318 81320 81322 81324 81326 81328 81330 81332 81334 81336 81338 81340 81342 81344 81346 81348 81350 81352 81354 81356 81358 81360 81362 81364 81366 81368 81370 81372 81374 81376 81378 81380 81402 81404 81406 81408 81410 81412 81414 81416 81418 81420 81422 81424 81426 81428 81430 81432 81434 81436 81438 81440 81442 81444 81446 81448 81450 81452 81454 81456 81458 81460 81462 81464 81466 81468 81470 81472 81474 81476 81478 81480 81502 81504 81506 81508 81510 81512 81514 81516 81518 81520 81522 81524 81526 81528 81530 81532 81534 81536 81538 81540 81542 81544 81546 81548 81550 81552 81554 81556 81558 81560 81562 81564 81566 81568 81570 81572 81574 81576 81578 81580 81602 81604 81606 81608 81610 81612 81614 81616 81618 81620 81622 81624 81626 81628 81630 81632 81634 81636 81638 81640 81642 81644 81646 81648 81650 81652 81654 81656 81658 81660 81662 81664 81666 81668 81670 81672 81674 81676 81678 81680 81702 81704 81706 81708 81710 81712 81714 81716 81718 81720 81722 81724 81726 81728 81730 81732 81734 81736 81738 81740 81742 81744 81746 81748 81750 81752 81754 81756 81758 81760 81762 81764 81766 81768 81770 81772 81774 81776 81778 81780 81802 81804 81806 81808 81810 81812 81814 81816 81818 81820 81822 81824 81826 81828 81830 81832 81834 81836 81838 81840 81842 81844 81846 81848 81850 81852 81854 81856 81858 81860 81862 81864 81866 81868 81870 81872 81874 81876 81878 81880 81902 81904 81906 81908 81910 81912 81914 81916 81918 81920 81922 81924 81926 81928 81930 81932 81934 81936 81938 81940 81942 81944 81946 81948 81950 81952 81954 81956 81958 81960 81962 81964 81966 81968 81970 81972 81974 81976 81978 81980 82102 82104 82106 82108 82110 82112 82114 82116 82118 82120 82122 82124 82126 82128 82130 82132 82134 82136 82138 82140 82142 82144 82146 82148 82150 82152 82154 82156 82158 82160 82162 82164 82166 82168 82170 82172 82174 82176 82178 82180 82202 82204 82206 82208 82210 82212 82214 82216 82218 82220 82222 82224 82226 82228 82230 82232 82234 82236 82238 82240 82242 82244 82246 82248 82250 82252 82254 82256 82258 82260 82262 82264 82266 82268 82270 82272 82274 82276 82278 82280 82302 82304 82306 82308 82310 82312 82314 82316 82318 82320 82322 82324 82326 82328 82330 82332 82334 82336 82338 82340 82342 82344 82346 82348 82350 82352 82354 82356 82358 82360 82362 82364 82366 82368 82370 82372 82374 82376 82378 82380 82402 82404 82406 82408 82410 82412 82414 82416 82418 82420 82422 82424 82426 82428 82430 82432 82434 82436 82438 82440 82442 82444 82446 82448 82450 82452 82454 82456 82458 82460 82462 82464 82466 82468 82470 82472 82474 82476 82478 82480 82502 82504 82506 82508 82510 82512 82514 82516 82518 82520 82522 82524 82526 82528 82530 82532 82534 82536 82538 82540 82542 82544 82546 82548 82550 82552 82554 82556 82558 82560 82562 82564 82566 82568 82570 82572 82574 82576 82578 82580 82602 82604 82606 82608 82610 82612 82614 82616 82618 82620 82622 82624 82626 82628 82630 82632 82634 82636 82638 82640 82642 82644 82646 82648 82650 82652 82654 82656 82658 82660 82662 82664 82666 82668 82670 82672 82674 82676 82678 82680 82702 82704 82706 82708 82710 82712 82714 82716 82718 82720 82722 82724 82726 82728 82730 82732 82734 82736 82738 82740 82742 82744 82746 82748 82750 82752 82754 82756 82758 82760 82762 82764 82766 82768 82770 82772 82774 82776 82778 82780 82802 82804 82806 82808 82810 82812 82814 82816 82818 82820 82822 82824 82826 82828 82830 82832 82834 82836 82838 82840 82842 82844 82846 82848 82850 82852 82854 82856 82858 82860 82862 82864 82866 82868 82870 82872 82874 82876 82878 82880 82902 82904 82906 82908 82910 82912 82914 82916 82918 82920 82922 82924 82926 82928 82930 82932 82934 82936 82938 82940 82942 82944 82946 82948 82950 82952 82954 82956 82958 82960 82962 82964 82966 82968 82970 82972 82974 82976 82978 82980 83102 83104 83106 83108 83110 83112 83114 83116 83118 83120 83122 83124 83126 83128 83130 83132 83134 83136 83138 83140 83142 83144 83146 83148 83150 83152 83154 83156 83158 83160 83162 83164 83166 83168 83170 83172 83174 83176 83178 83180 83202 83204 83206 83208 83210 83212 83214 83216 83218 83220 83222 83224 83226 83228 83230 83232 83234 83236 83238 83240 83242 83244 83246 83248 83250 83252 83254 83256 83258 83260 83262 83264 83266 83268 83270 83272 83274 83276 83278 83280 83302 83304 83306 83308 83310 83312 83314 83316 83318 83320 83322 83324 83326 83328 83330 83332 83334 83336 83338 83340 83342 83344 83346 83348 83350 83352 83354 83356 83358 83360 83362 83364 83366 83368 83370 83372 83374 83376 83378 83380 83402 83404 83406 83408 83410 83412 83414 83416 83418 83420 83422 83424 83426 83428 83430 83432 83434 83436 83438 83440 83442 83444 83446 83448 83450 83452 83454 83456 83458 83460 83462 83464 83466 83468 83470 83472 83474 83476 83478 83480 83502 83504 83506 83508 83510 83512 83514 83516 83518 83520 83522 83524 83526 83528 83530 83532 83534 83536 83538 83540 83542 83544 83546 83548 83550 83552 83554 83556 83558 83560 83562 83564 83566 83568 83570 83572 83574 83576 83578 83580 83602 83604 83606 83608 83610 83612 83614 83616 83618 83620 83622 83624 83626 83628 83630 83632 83634 83636 83638 83640 83642 83644 83646 83648 83650 83652 83654 83656 83658 83660 83662 83664 83666 83668 83670 83672 83674 83676 83678 83680 83702 83704 83706 83708 83710 83712 83714 83716 83718 83720 83722 83724 83726 83728 83730 83732 83734 83736 83738 83740 83742 83744 83746 83748 83750 83752 83754 83756 83758 83760 83762 83764 83766 83768 83770 83772 83774 83776 83778 83780 83802 83804 83806 83808 83810 83812 83814 83816 83818 83820 83822 83824 83826 83828 83830 83832 83834 83836 83838 83840 83842 83844 83846 83848 83850 83852 83854 83856 83858 83860 83862 83864 83866 83868 83870 83872 83874 83876 83878 83880 83902 83904 83906 83908 83910 83912 83914 83916 83918 83920 83922 83924 83926 83928 83930 83932 83934 83936 83938 83940 83942 83944 83946 83948 83950 83952 83954 83956 83958 83960 83962 83964 83966 83968 83970 83972 83974 83976 83978 83980 84102 84104 84106 84108 84110 84112 84114 84116 84118 84120 84122 84124 84126 84128 84130 84132 84134 84136 84138 84140 84142 84144 84146 84148 84150 84152 84154 84156 84158 84160 84162 84164 84166 84168 84170 84172 84174 84176 84178 84180 84202 84204 84206 84208 84210 84212 84214 84216 84218 84220 84222 84224 84226 84228 84230 84232 84234 84236 84238 84240 84242 84244 84246 84248 84250 84252 84254 84256 84258 84260 84262 84264 84266 84268 84270 84272 84274 84276 84278 84280 84302 84304 84306 84308 84310 84312 84314 84316 84318 84320 84322 84324 84326 84328 84330 84332 84334 84336 84338 84340 84342 84344 84346 84348 84350 84352 84354 84356 84358 84360 84362 84364 84366 84368 84370 84372 84374 84376 84378 84380 84402 84404 84406 84408 84410 84412 84414 84416 84418 84420 84422 84424 84426 84428 84430 84432 84434 84436 84438 84440 84442 84444 84446 84448 84450 84452 84454 84456 84458 84460 84462 84464 84466 84468 84470 84472 84474 84476 84478 84480 84502 84504 84506 84508 84510 84512 84514 84516 84518 84520 84522 84524 84526 84528 84530 84532 84534 84536 84538 84540 84542 84544 84546 84548 84550 84552 84554 84556 84558 84560 84562 84564 84566 84568 84570 84572 84574 84576 84578 84580 84602 84604 84606 84608 84610 84612 84614 84616 84618 84620 84622 84624 84626 84628 84630 84632 84634 84636 84638 84640 84642 84644 84646 84648 84650 84652 84654 84656 84658 84660 84662 84664 84666 84668 84670 84672 84674 84676 84678 84680 84702 84704 84706 84708 84710 84712 84714 84716 84718 84720 84722 84724 84726 84728 84730 84732 84734 84736 84738 84740 84742 84744 84746 84748 84750 84752 84754 84756 84758 84760 84762 84764 84766 84768 84770 84772 84774 84776 84778 84780 84802 84804 84806 84808 84810 84812 84814 84816 84818 84820 84822 84824 84826 84828 84830 84832 84834 84836 84838 84840 84842 84844 84846 84848 84850 84852 84854 84856 84858 84860 84862 84864 84866 84868 84870 84872 84874 84876 84878 84880 84902 84904 84906 84908 84910 84912 84914 84916 84918 84920 84922 84924 84926 84928 84930 84932 84934 84936 84938 84940 84942 84944 84946 84948 84950 84952 84954 84956 84958 84960 84962 84964 84966 84968 84970 84972 84974 84976 84978 84980 85102 85104 85106 85108 85110 85112 85114 85116 85118 85120 85122 85124 85126 85128 85130 85132 85134 85136 85138 85140 85142 85144 85146 85148 85150 85152 85154 85156 85158 85160 85162 85164 85166 85168 85170 85172 85174 85176 85178 85180 85202 85204 85206 85208 85210 85212 85214 85216 85218 85220 85222 85224 85226 85228 85230 85232 85234 85236 85238 85240 85242 85244 85246 85248 85250 85252 85254 85256 85258 85260 85262 85264 85266 85268 85270 85272 85274 85276 85278 85280 85302 85304 85306 85308 85310 85312 85314 85316 85318 85320 85322 85324 85326 85328 85330 85332 85334 85336 85338 85340 85342 85344 85346 85348 85350 85352 85354 85356 85358 85360 85362 85364 85366 85368 85370 85372 85374 85376 85378 85380 85402 85404 85406 85408 85410 85412 85414 85416 85418 85420 85422 85424 85426 85428 85430 85432 85434 85436 85438 85440 85442 85444 85446 85448 85450 85452 85454 85456 85458 85460 85462 85464 85466 85468 85470 85472 85474 85476 85478 85480 85502 85504 85506 85508 85510 85512 85514 85516 85518 85520 85522 85524 85526 85528 85530 85532 85534 85536 85538 85540 85542 85544 85546 85548 85550 85552 85554 85556 85558 85560 85562 85564 85566 85568 85570 85572 85574 85576 85578 85580 85602 85604 85606 85608 85610 85612 85614 85616 85618 85620 85622 85624 85626 85628 85630 85632 85634 85636 85638 85640 85642 85644 85646 85648 85650 85652 85654 85656 85658 85660 85662 85664 85666 85668 85670 85672 85674 85676 85678 85680 85702 85704 85706 85708 85710 85712 85714 85716 85718 85720 85722 85724 85726 85728 85730 85732 85734 85736 85738 85740 85742 85744 85746 85748 85750 85752 85754 85756 85758 85760 85762 85764 85766 85768 85770 85772 85774 85776 85778 85780 85802 85804 85806 85808 85810 85812 85814 85816 85818 85820 85822 85824 85826 85828 85830 85832 85834 85836 85838 85840 85842 85844 85846 85848 85850 85852 85854 85856 85858 85860 85862 85864 85866 85868 85870 85872 85874 85876 85878 85880 85902 85904 85906 85908 85910 85912 85914 85916 85918 85920 85922 85924 85926 85928 85930 85932 85934 85936 85938 85940 85942 85944 85946 85948 85950 85952 85954 85956 85958 85960 85962 85964 85966 85968 85970 85972 85974 85976 85978 85980 86102 86104 86106 86108 86110 86112 86114 86116 86118 86120 86122 86124 86126 86128 86130 86132 86134 86136 86138 86140 86142 86144 86146 86148 86150 86152 86154 86156 86158 86160 86162 86164 86166 86168 86170 86172 86174 86176 86178 86180 86202 86204 86206 86208 86210 86212 86214 86216 86218 86220 86222 86224 86226 86228 86230 86232 86234 86236 86238 86240 86242 86244 86246 86248 86250 86252 86254 86256 86258 86260 86262 86264 86266 86268 86270 86272 86274 86276 86278 86280 86302 86304 86306 86308 86310 86312 86314 86316 86318 86320 86322 86324 86326 86328 86330 86332 86334 86336 86338 86340 86342 86344 86346 86348 86350 86352 86354 86356 86358 86360 86362 86364 86366 86368 86370 86372 86374 86376 86378 86380 86402 86404 86406 86408 86410 86412 86414 86416 86418 86420 86422 86424 86426 86428 86430 86432 86434 86436 86438 86440 86442 86444 86446 86448 86450 86452 86454 86456 86458 86460 86462 86464 86466 86468 86470 86472 86474 86476 86478 86480 86502 86504 86506 86508 86510 86512 86514 86516 86518 86520 86522 86524 86526 86528 86530 86532 86534 86536 86538 86540 86542 86544 86546 86548 86550 86552 86554 86556 86558 86560 86562 86564 86566 86568 86570 86572 86574 86576 86578 86580 86602 86604 86606 86608 86610 86612 86614 86616 86618 86620 86622 86624 86626 86628 86630 86632 86634 86636 86638 86640 86642 86644 86646 86648 86650 86652 86654 86656 86658 86660 86662 86664 86666 86668 86670 86672 86674 86676 86678 86680 86702 86704 86706 86708 86710 86712 86714 86716 86718 86720 86722 86724 86726 86728 86730 86732 86734 86736 86738 86740 86742 86744 86746 86748 86750 86752 86754 86756 86758 86760 86762 86764 86766 86768 86770 86772 86774 86776 86778 86780 86802 86804 86806 86808 86810 86812 86814 86816 86818 86820 86822 86824 86826 86828 86830 86832 86834 86836 86838 86840 86842 86844 86846 86848 86850 86852 86854 86856 86858 86860 86862 86864 86866 86868 86870 86872 86874 86876 86878 86880 86902 86904 86906 86908 86910 86912 86914 86916 86918 86920 86922 86924 86926 86928 86930 86932 86934 86936 86938 86940 86942 86944 86946 86948 86950 86952 86954 86956 86958 86960 86962 86964 86966 86968 86970 86972 86974 86976 86978 86980 87102 87104 87106 87108 87110 87112 87114 87116 87118 87120 87122 87124 87126 87128 87130 87132 87134 87136 87138 87140 87142 87144 87146 87148 87150 87152 87154 87156 87158 87160 87162 87164 87166 87168 87170 87172 87174 87176 87178 87180 87202 87204 87206 87208 87210 87212 87214 87216 87218 87220 87222 87224 87226 87228 87230 87232 87234 87236 87238 87240 87242 87244 87246 87248 87250 87252 87254 87256 87258 87260 87262 87264 87266 87268 87270 87272 87274 87276 87278 87280 87302 87304 87306 87308 87310 87312 87314 87316 87318 87320 87322 87324 87326 87328 87330 87332 87334 87336 87338 87340 87342 87344 87346 87348 87350 87352 87354 87356 87358 87360 87362 87364 87366 87368 87370 87372 87374 87376 87378 87380 87402 87404 87406 87408 87410 87412 87414 87416 87418 87420 87422 87424 87426 87428 87430 87432 87434 87436 87438 87440 87442 87444 87446 87448 87450 87452 87454 87456 87458 87460 87462 87464 87466 87468 87470 87472 87474 87476 87478 87480 87502 87504 87506 87508 87510 87512 87514 87516 87518 87520 87522 87524 87526 87528 87530 87532 87534 87536 87538 87540 87542 87544 87546 87548 87550 87552 87554 87556 87558 87560 87562 87564 87566 87568 87570 87572 87574 87576 87578 87580 87602 87604 87606 87608 87610 87612 87614 87616 87618 87620 87622 87624 87626 87628 87630 87632 87634 87636 87638 87640 87642 87644 87646 87648 87650 87652 87654 87656 87658 87660 87662 87664 87666 87668 87670 87672 87674 87676 87678 87680 87702 87704 87706 87708 87710 87712 87714 87716 87718 87720 87722 87724 87726 87728 87730 87732 87734 87736 87738 87740 87742 87744 87746 87748 87750 87752 87754 87756 87758 87760 87762 87764 87766 87768 87770 87772 87774 87776 87778 87780 87802 87804 87806 87808 87810 87812 87814 87816 87818 87820 87822 87824 87826 87828 87830 87832 87834 87836 87838 87840 87842 87844 87846 87848 87850 87852 87854 87856 87858 87860 87862 87864 87866 87868 87870 87872 87874 87876 87878 87880 87902 87904 87906 87908 87910 87912 87914 87916 87918 87920 87922 87924 87926 87928 87930 87932 87934 87936 87938 87940 87942 87944 87946 87948 87950 87952 87954 87956 87958 87960 87962 87964 87966 87968 87970 87972 87974 87976 87978 87980 88102 88104 88106 88108 88110 88112 88114 88116 88118 88120 88122 88124 88126 88128 88130 88132 88134 88136 88138 88140 88142 88144 88146 88148 88150 88152 88154 88156 88158 88160 88162 88164 88166 88168 88170 88172 88174 88176 88178 88180 88202 88204 88206 88208 88210 88212 88214 88216 88218 88220 88222 88224 88226 88228 88230 88232 88234 88236 88238 88240 88242 88244 88246 88248 88250 88252 88254 88256 88258 88260 88262 88264 88266 88268 88270 88272 88274 88276 88278 88280 88302 88304 88306 88308 88310 88312 88314 88316 88318 88320 88322 88324 88326 88328 88330 88332 88334 88336 88338 88340 88342 88344 88346 88348 88350 88352 88354 88356 88358 88360 88362 88364 88366 88368 88370 88372 88374 88376 88378 88380 88402 88404 88406 88408 88410 88412 88414 88416 88418 88420 88422 88424 88426 88428 88430 88432 88434 88436 88438 88440 88442 88444 88446 88448 88450 88452 88454 88456 88458 88460 88462 88464 88466 88468 88470 88472 88474 88476 88478 88480 88502 88504 88506 88508 88510 88512 88514 88516 88518 88520 88522 88524 88526 88528 88530 88532 88534 88536 88538 88540 88542 88544 88546 88548 88550 88552 88554 88556 88558 88560 88562 88564 88566 88568 88570 88572 88574 88576 88578 88580 88602 88604 88606 88608 88610 88612 88614 88616 88618 88620 88622 88624 88626 88628 88630 88632 88634 88636 88638 88640 88642 88644 88646 88648 88650 88652 88654 88656 88658 88660 88662 88664 88666 88668 88670 88672 88674 88676 88678 88680 88702 88704 88706 88708 88710 88712 88714 88716 88718 88720 88722 88724 88726 88728 88730 88732 88734 88736 88738 88740 88742 88744 88746 88748 88750 88752 88754 88756 88758 88760 88762 88764 88766 88768 88770 88772 88774 88776 88778 88780 88802 88804 88806 88808 88810 88812 88814 88816 88818 88820 88822 88824 88826 88828 88830 88832 88834 88836 88838 88840 88842 88844 88846 88848 88850 88852 88854 88856 88858 88860 88862 88864 88866 88868 88870 88872 88874 88876 88878 88880 88902 88904 88906 88908 88910 88912 88914 88916 88918 88920 88922 88924 88926 88928 88930 88932 88934 88936 88938 88940 88942 88944 88946 88948 88950 88952 88954 88956 88958 88960 88962 88964 88966 88968 88970 88972 88974 88976 88978 88980 89102 89104 89106 89108 89110 89112 89114 89116 89118 89120 89122 89124 89126 89128 89130 89132 89134 89136 89138 89140 89142 89144 89146 89148 89150 89152 89154 89156 89158 89160 89162 89164 89166 89168 89170 89172 89174 89176 89178 89180 89202 89204 89206 89208 89210 89212 89214 89216 89218 89220 89222 89224 89226 89228 89230 89232 89234 89236 89238 89240 89242 89244 89246 89248 89250 89252 89254 89256 89258 89260 89262 89264 89266 89268 89270 89272 89274 89276 89278 89280 89302 89304 89306 89308 89310 89312 89314 89316 89318 89320 89322 89324 89326 89328 89330 89332 89334 89336 89338 89340 89342 89344 89346 89348 89350 89352 89354 89356 89358 89360 89362 89364 89366 89368 89370 89372 89374 89376 89378 89380 89402 89404 89406 89408 89410 89412 89414 89416 89418 89420 89422 89424 89426 89428 89430 89432 89434 89436 89438 89440 89442 89444 89446 89448 89450 89452 89454 89456 89458 89460 89462 89464 89466 89468 89470 89472 89474 89476 89478 89480 89502 89504 89506 89508 89510 89512 89514 89516 89518 89520 89522 89524 89526 89528 89530 89532 89534 89536 89538 89540 89542 89544 89546 89548 89550 89552 89554 89556 89558 89560 89562 89564 89566 89568 89570 89572 89574 89576 89578 89580 89602 89604 89606 89608 89610 89612 89614 89616 89618 89620 89622 89624 89626 89628 89630 89632 89634 89636 89638 89640 89642 89644 89646 89648 89650 89652 89654 89656 89658 89660 89662 89664 89666 89668 89670 89672 89674 89676 89678 89680 89702 89704 89706 89708 89710 89712 89714 89716 89718 89720 89722 89724 89726 89728 89730 89732 89734 89736 89738 89740 89742 89744 89746 89748 89750 89752 89754 89756 89758 89760 89762 89764 89766 89768 89770 89772 89774 89776 89778 89780 89802 89804 89806 89808 89810 89812 89814 89816 89818 89820 89822 89824 89826 89828 89830 89832 89834 89836 89838 89840 89842 89844 89846 89848 89850 89852 89854 89856 89858 89860 89862 89864 89866 89868 89870 89872 89874 89876 89878 89880 89902 89904 89906 89908 89910 89912 89914 89916 89918 89920 89922 89924 89926 89928 89930 89932 89934 89936 89938 89940 89942 89944 89946 89948 89950 89952 89954 89956 89958 89960 89962 89964 89966 89968 89970 89972 89974 89976 89978 89980 90101 90102 90103 90104 90105 90106 90107 90108 90109 90110 90111 90112 90113 90114 90115 90116 90117 90118 90119 90120 90121 90122 90123 90124 90125 90126 90127 90128 90129 90130 90131 90132 90133 90134 90135 90136 90137 90138 90139 90140 90141 90142 90143 90144 90145 90146 90147 90148 90149 90150 90151 90152 90153 90154 90155 90156 90157 90158 90159 90160 90161 90162 90163 90164 90165 90166 90167 90168 90169 90170 90171 90172 90173 90174 90175 90176 90177 90178 90179 90180 90201 90202 90203 90204 90205 90206 90207 90208 90209 90210 90211 90212 90213 90214 90215 90216 90217 90218 90219 90220 90221 90222 90223 90224 90225 90226 90227 90228 90229 90230 90231 90232 90233 90234 90235 90236 90237 90238 90239 90240 90241 90242 90243 90244 90245 90246 90247 90248 90249 90250 90251 90252 90253 90254 90255 90256 90257 90258 90259 90260 90261 90262 90263 90264 90265 90266 90267 90268 90269 90270 90271 90272 90273 90274 90275 90276 90277 90278 90279 90280 90301 90302 90303 90304 90305 90306 90307 90308 90309 90310 90311 90312 90313 90314 90315 90316 90317 90318 90319 90320 90321 90322 90323 90324 90325 90326 90327 90328 90329 90330 90331 90332 90333 90334 90335 90336 90337 90338 90339 90340 90341 90342 90343 90344 90345 90346 90347 90348 90349 90350 90351 90352 90353 90354 90355 90356 90357 90358 90359 90360 90361 90362 90363 90364 90365 90366 90367 90368 90369 90370 90371 90372 90373 90374 90375 90376 90377 90378 90379 90380 90401 90402 90403 90404 90405 90406 90407 90408 90409 90410 90411 90412 90413 90414 90415 90416 90417 90418 90419 90420 90421 90422 90423 90424 90425 90426 90427 90428 90429 90430 90431 90432 90433 90434 90435 90436 90437 90438 90439 90440 90441 90442 90443 90444 90445 90446 90447 90448 90449 90450 90451 90452 90453 90454 90455 90456 90457 90458 90459 90460 90461 90462 90463 90464 90465 90466 90467 90468 90469 90470 90471 90472 90473 90474 90475 90476 90477 90478 90479 90480 90501 90502 90503 90504 90505 90506 90507 90508 90509 90510 90511 90512 90513 90514 90515 90516 90517 90518 90519 90520 90521 90522 90523 90524 90525 90526 90527 90528 90529 90530 90531 90532 90533 90534 90535 90536 90537 90538 90539 90540 90541 90542 90543 90544 90545 90546 90547 90548 90549 90550 90551 90552 90553 90554 90555 90556 90557 90558 90559 90560 90561 90562 90563 90564 90565 90566 90567 90568 90569 90570 90571 90572 90573 90574 90575 90576 90577 90578 90579 90580 90601 90602 90603 90604 90605 90606 90607 90608 90609 90610 90611 90612 90613 90614 90615 90616 90617 90618 90619 90620 90621 90622 90623 90624 90625 90626 90627 90628 90629 90630 90631 90632 90633 90634 90635 90636 90637 90638 90639 90640 90641 90642 90643 90644 90645 90646 90647 90648 90649 90650 90651 90652 90653 90654 90655 90656 90657 90658 90659 90660 90661 90662 90663 90664 90665 90666 90667 90668 90669 90670 90671 90672 90673 90674 90675 90676 90677 90678 90679 90680 90701 90702 90703 90704 90705 90706 90707 90708 90709 90710 90711 90712 90713 90714 90715 90716 90717 90718 90719 90720 90721 90722 90723 90724 90725 90726 90727 90728 90729 90730 90731 90732 90733 90734 90735 90736 90737 90738 90739 90740 90741 90742 90743 90744 90745 90746 90747 90748 90749 90750 90751 90752 90753 90754 90755 90756 90757 90758 90759 90760 90761 90762 90763 90764 90765 90766 90767 90768 90769 90770 90771 90772 90773 90774 90775 90776 90777 90778 90779 90780 90801 90802 90803 90804 90805 90806 90807 90808 90809 90810 90811 90812 90813 90814 90815 90816 90817 90818 90819 90820 90821 90822 90823 90824 90825 90826 90827 90828 90829 90830 90831 90832 90833 90834 90835 90836 90837 90838 90839 90840 90841 90842 90843 90844 90845 90846 90847 90848 90849 90850 90851 90852 90853 90854 90855 90856 90857 90858 90859 90860 90861 90862 90863 90864 90865 90866 90867 90868 90869 90870 90871 90872 90873 90874 90875 90876 90877 90878 90879 90880 90901 90902 90903 90904 90905 90906 90907 90908 90909 90910 90911 90912 90913 90914 90915 90916 90917 90918 90919 90920 90921 90922 90923 90924 90925 90926 90927 90928 90929 90930 90931 90932 90933 90934 90935 90936 90937 90938 90939 90940 90941 90942 90943 90944 90945 90946 90947 90948 90949 90950 90951 90952 90953 90954 90955 90956 90957 90958 90959 90960 90961 90962 90963 90964 90965 90966 90967 90968 90969 90970 90971 90972 90973 90974 90975 90976 90977 90978 90979 90980 91101 91103 91105 91107 91109 91111 91113 91115 91117 91119 91121 91123 91125 91127 91129 91131 91133 91135 91137 91139 91141 91143 91145 91147 91149 91151 91153 91155 91157 91159 91161 91163 91165 91167 91169 91171 91173 91175 91177 91179 91201 91203 91205 91207 91209 91211 91213 91215 91217 91219 91221 91223 91225 91227 91229 91231 91233 91235 91237 91239 91241 91243 91245 91247 91249 91251 91253 91255 91257 91259 91261 91263 91265 91267 91269 91271 91273 91275 91277 91279 91301 91303 91305 91307 91309 91311 91313 91315 91317 91319 91321 91323 91325 91327 91329 91331 91333 91335 91337 91339 91341 91343 91345 91347 91349 91351 91353 91355 91357 91359 91361 91363 91365 91367 91369 91371 91373 91375 91377 91379 91401 91403 91405 91407 91409 91411 91413 91415 91417 91419 91421 91423 91425 91427 91429 91431 91433 91435 91437 91439 91441 91443 91445 91447 91449 91451 91453 91455 91457 91459 91461 91463 91465 91467 91469 91471 91473 91475 91477 91479 91501 91503 91505 91507 91509 91511 91513 91515 91517 91519 91521 91523 91525 91527 91529 91531 91533 91535 91537 91539 91541 91543 91545 91547 91549 91551 91553 91555 91557 91559 91561 91563 91565 91567 91569 91571 91573 91575 91577 91579 91601 91603 91605 91607 91609 91611 91613 91615 91617 91619 91621 91623 91625 91627 91629 91631 91633 91635 91637 91639 91641 91643 91645 91647 91649 91651 91653 91655 91657 91659 91661 91663 91665 91667 91669 91671 91673 91675 91677 91679 91701 91703 91705 91707 91709 91711 91713 91715 91717 91719 91721 91723 91725 91727 91729 91731 91733 91735 91737 91739 91741 91743 91745 91747 91749 91751 91753 91755 91757 91759 91761 91763 91765 91767 91769 91771 91773 91775 91777 91779 91801 91803 91805 91807 91809 91811 91813 91815 91817 91819 91821 91823 91825 91827 91829 91831 91833 91835 91837 91839 91841 91843 91845 91847 91849 91851 91853 91855 91857 91859 91861 91863 91865 91867 91869 91871 91873 91875 91877 91879 91901 91903 91905 91907 91909 91911 91913 91915 91917 91919 91921 91923 91925 91927 91929 91931 91933 91935 91937 91939 91941 91943 91945 91947 91949 91951 91953 91955 91957 91959 91961 91963 91965 91967 91969 91971 91973 91975 91977 91979 92101 92103 92105 92107 92109 92111 92113 92115 92117 92119 92121 92123 92125 92127 92129 92131 92133 92135 92137 92139 92141 92143 92145 92147 92149 92151 92153 92155 92157 92159 92161 92163 92165 92167 92169 92171 92173 92175 92177 92179 92201 92203 92205 92207 92209 92211 92213 92215 92217 92219 92221 92223 92225 92227 92229 92231 92233 92235 92237 92239 92241 92243 92245 92247 92249 92251 92253 92255 92257 92259 92261 92263 92265 92267 92269 92271 92273 92275 92277 92279 92301 92303 92305 92307 92309 92311 92313 92315 92317 92319 92321 92323 92325 92327 92329 92331 92333 92335 92337 92339 92341 92343 92345 92347 92349 92351 92353 92355 92357 92359 92361 92363 92365 92367 92369 92371 92373 92375 92377 92379 92401 92403 92405 92407 92409 92411 92413 92415 92417 92419 92421 92423 92425 92427 92429 92431 92433 92435 92437 92439 92441 92443 92445 92447 92449 92451 92453 92455 92457 92459 92461 92463 92465 92467 92469 92471 92473 92475 92477 92479 92501 92503 92505 92507 92509 92511 92513 92515 92517 92519 92521 92523 92525 92527 92529 92531 92533 92535 92537 92539 92541 92543 92545 92547 92549 92551 92553 92555 92557 92559 92561 92563 92565 92567 92569 92571 92573 92575 92577 92579 92601 92603 92605 92607 92609 92611 92613 92615 92617 92619 92621 92623 92625 92627 92629 92631 92633 92635 92637 92639 92641 92643 92645 92647 92649 92651 92653 92655 92657 92659 92661 92663 92665 92667 92669 92671 92673 92675 92677 92679 92701 92703 92705 92707 92709 92711 92713 92715 92717 92719 92721 92723 92725 92727 92729 92731 92733 92735 92737 92739 92741 92743 92745 92747 92749 92751 92753 92755 92757 92759 92761 92763 92765 92767 92769 92771 92773 92775 92777 92779 92801 92803 92805 92807 92809 92811 92813 92815 92817 92819 92821 92823 92825 92827 92829 92831 92833 92835 92837 92839 92841 92843 92845 92847 92849 92851 92853 92855 92857 92859 92861 92863 92865 92867 92869 92871 92873 92875 92877 92879 92901 92903 92905 92907 92909 92911 92913 92915 92917 92919 92921 92923 92925 92927 92929 92931 92933 92935 92937 92939 92941 92943 92945 92947 92949 92951 92953 92955 92957 92959 92961 92963 92965 92967 92969 92971 92973 92975 92977 92979 93101 93103 93105 93107 93109 93111 93113 93115 93117 93119 93121 93123 93125 93127 93129 93131 93133 93135 93137 93139 93141 93143 93145 93147 93149 93151 93153 93155 93157 93159 93161 93163 93165 93167 93169 93171 93173 93175 93177 93179 93201 93203 93205 93207 93209 93211 93213 93215 93217 93219 93221 93223 93225 93227 93229 93231 93233 93235 93237 93239 93241 93243 93245 93247 93249 93251 93253 93255 93257 93259 93261 93263 93265 93267 93269 93271 93273 93275 93277 93279 93301 93303 93305 93307 93309 93311 93313 93315 93317 93319 93321 93323 93325 93327 93329 93331 93333 93335 93337 93339 93341 93343 93345 93347 93349 93351 93353 93355 93357 93359 93361 93363 93365 93367 93369 93371 93373 93375 93377 93379 93401 93403 93405 93407 93409 93411 93413 93415 93417 93419 93421 93423 93425 93427 93429 93431 93433 93435 93437 93439 93441 93443 93445 93447 93449 93451 93453 93455 93457 93459 93461 93463 93465 93467 93469 93471 93473 93475 93477 93479 93501 93503 93505 93507 93509 93511 93513 93515 93517 93519 93521 93523 93525 93527 93529 93531 93533 93535 93537 93539 93541 93543 93545 93547 93549 93551 93553 93555 93557 93559 93561 93563 93565 93567 93569 93571 93573 93575 93577 93579 93601 93603 93605 93607 93609 93611 93613 93615 93617 93619 93621 93623 93625 93627 93629 93631 93633 93635 93637 93639 93641 93643 93645 93647 93649 93651 93653 93655 93657 93659 93661 93663 93665 93667 93669 93671 93673 93675 93677 93679 93701 93703 93705 93707 93709 93711 93713 93715 93717 93719 93721 93723 93725 93727 93729 93731 93733 93735 93737 93739 93741 93743 93745 93747 93749 93751 93753 93755 93757 93759 93761 93763 93765 93767 93769 93771 93773 93775 93777 93779 93801 93803 93805 93807 93809 93811 93813 93815 93817 93819 93821 93823 93825 93827 93829 93831 93833 93835 93837 93839 93841 93843 93845 93847 93849 93851 93853 93855 93857 93859 93861 93863 93865 93867 93869 93871 93873 93875 93877 93879 93901 93903 93905 93907 93909 93911 93913 93915 93917 93919 93921 93923 93925 93927 93929 93931 93933 93935 93937 93939 93941 93943 93945 93947 93949 93951 93953 93955 93957 93959 93961 93963 93965 93967 93969 93971 93973 93975 93977 93979 94101 94103 94105 94107 94109 94111 94113 94115 94117 94119 94121 94123 94125 94127 94129 94131 94133 94135 94137 94139 94141 94143 94145 94147 94149 94151 94153 94155 94157 94159 94161 94163 94165 94167 94169 94171 94173 94175 94177 94179 94201 94203 94205 94207 94209 94211 94213 94215 94217 94219 94221 94223 94225 94227 94229 94231 94233 94235 94237 94239 94241 94243 94245 94247 94249 94251 94253 94255 94257 94259 94261 94263 94265 94267 94269 94271 94273 94275 94277 94279 94301 94303 94305 94307 94309 94311 94313 94315 94317 94319 94321 94323 94325 94327 94329 94331 94333 94335 94337 94339 94341 94343 94345 94347 94349 94351 94353 94355 94357 94359 94361 94363 94365 94367 94369 94371 94373 94375 94377 94379 94401 94403 94405 94407 94409 94411 94413 94415 94417 94419 94421 94423 94425 94427 94429 94431 94433 94435 94437 94439 94441 94443 94445 94447 94449 94451 94453 94455 94457 94459 94461 94463 94465 94467 94469 94471 94473 94475 94477 94479 94501 94503 94505 94507 94509 94511 94513 94515 94517 94519 94521 94523 94525 94527 94529 94531 94533 94535 94537 94539 94541 94543 94545 94547 94549 94551 94553 94555 94557 94559 94561 94563 94565 94567 94569 94571 94573 94575 94577 94579 94601 94603 94605 94607 94609 94611 94613 94615 94617 94619 94621 94623 94625 94627 94629 94631 94633 94635 94637 94639 94641 94643 94645 94647 94649 94651 94653 94655 94657 94659 94661 94663 94665 94667 94669 94671 94673 94675 94677 94679 94701 94703 94705 94707 94709 94711 94713 94715 94717 94719 94721 94723 94725 94727 94729 94731 94733 94735 94737 94739 94741 94743 94745 94747 94749 94751 94753 94755 94757 94759 94761 94763 94765 94767 94769 94771 94773 94775 94777 94779 94801 94803 94805 94807 94809 94811 94813 94815 94817 94819 94821 94823 94825 94827 94829 94831 94833 94835 94837 94839 94841 94843 94845 94847 94849 94851 94853 94855 94857 94859 94861 94863 94865 94867 94869 94871 94873 94875 94877 94879 94901 94903 94905 94907 94909 94911 94913 94915 94917 94919 94921 94923 94925 94927 94929 94931 94933 94935 94937 94939 94941 94943 94945 94947 94949 94951 94953 94955 94957 94959 94961 94963 94965 94967 94969 94971 94973 94975 94977 94979 95101 95103 95105 95107 95109 95111 95113 95115 95117 95119 95121 95123 95125 95127 95129 95131 95133 95135 95137 95139 95141 95143 95145 95147 95149 95151 95153 95155 95157 95159 95161 95163 95165 95167 95169 95171 95173 95175 95177 95179 95201 95203 95205 95207 95209 95211 95213 95215 95217 95219 95221 95223 95225 95227 95229 95231 95233 95235 95237 95239 95241 95243 95245 95247 95249 95251 95253 95255 95257 95259 95261 95263 95265 95267 95269 95271 95273 95275 95277 95279 95301 95303 95305 95307 95309 95311 95313 95315 95317 95319 95321 95323 95325 95327 95329 95331 95333 95335 95337 95339 95341 95343 95345 95347 95349 95351 95353 95355 95357 95359 95361 95363 95365 95367 95369 95371 95373 95375 95377 95379 95401 95403 95405 95407 95409 95411 95413 95415 95417 95419 95421 95423 95425 95427 95429 95431 95433 95435 95437 95439 95441 95443 95445 95447 95449 95451 95453 95455 95457 95459 95461 95463 95465 95467 95469 95471 95473 95475 95477 95479 95501 95503 95505 95507 95509 95511 95513 95515 95517 95519 95521 95523 95525 95527 95529 95531 95533 95535 95537 95539 95541 95543 95545 95547 95549 95551 95553 95555 95557 95559 95561 95563 95565 95567 95569 95571 95573 95575 95577 95579 95601 95603 95605 95607 95609 95611 95613 95615 95617 95619 95621 95623 95625 95627 95629 95631 95633 95635 95637 95639 95641 95643 95645 95647 95649 95651 95653 95655 95657 95659 95661 95663 95665 95667 95669 95671 95673 95675 95677 95679 95701 95703 95705 95707 95709 95711 95713 95715 95717 95719 95721 95723 95725 95727 95729 95731 95733 95735 95737 95739 95741 95743 95745 95747 95749 95751 95753 95755 95757 95759 95761 95763 95765 95767 95769 95771 95773 95775 95777 95779 95801 95803 95805 95807 95809 95811 95813 95815 95817 95819 95821 95823 95825 95827 95829 95831 95833 95835 95837 95839 95841 95843 95845 95847 95849 95851 95853 95855 95857 95859 95861 95863 95865 95867 95869 95871 95873 95875 95877 95879 95901 95903 95905 95907 95909 95911 95913 95915 95917 95919 95921 95923 95925 95927 95929 95931 95933 95935 95937 95939 95941 95943 95945 95947 95949 95951 95953 95955 95957 95959 95961 95963 95965 95967 95969 95971 95973 95975 95977 95979 96101 96103 96105 96107 96109 96111 96113 96115 96117 96119 96121 96123 96125 96127 96129 96131 96133 96135 96137 96139 96141 96143 96145 96147 96149 96151 96153 96155 96157 96159 96161 96163 96165 96167 96169 96171 96173 96175 96177 96179 96201 96203 96205 96207 96209 96211 96213 96215 96217 96219 96221 96223 96225 96227 96229 96231 96233 96235 96237 96239 96241 96243 96245 96247 96249 96251 96253 96255 96257 96259 96261 96263 96265 96267 96269 96271 96273 96275 96277 96279 96301 96303 96305 96307 96309 96311 96313 96315 96317 96319 96321 96323 96325 96327 96329 96331 96333 96335 96337 96339 96341 96343 96345 96347 96349 96351 96353 96355 96357 96359 96361 96363 96365 96367 96369 96371 96373 96375 96377 96379 96401 96403 96405 96407 96409 96411 96413 96415 96417 96419 96421 96423 96425 96427 96429 96431 96433 96435 96437 96439 96441 96443 96445 96447 96449 96451 96453 96455 96457 96459 96461 96463 96465 96467 96469 96471 96473 96475 96477 96479 96501 96503 96505 96507 96509 96511 96513 96515 96517 96519 96521 96523 96525 96527 96529 96531 96533 96535 96537 96539 96541 96543 96545 96547 96549 96551 96553 96555 96557 96559 96561 96563 96565 96567 96569 96571 96573 96575 96577 96579 96601 96603 96605 96607 96609 96611 96613 96615 96617 96619 96621 96623 96625 96627 96629 96631 96633 96635 96637 96639 96641 96643 96645 96647 96649 96651 96653 96655 96657 96659 96661 96663 96665 96667 96669 96671 96673 96675 96677 96679 96701 96703 96705 96707 96709 96711 96713 96715 96717 96719 96721 96723 96725 96727 96729 96731 96733 96735 96737 96739 96741 96743 96745 96747 96749 96751 96753 96755 96757 96759 96761 96763 96765 96767 96769 96771 96773 96775 96777 96779 96801 96803 96805 96807 96809 96811 96813 96815 96817 96819 96821 96823 96825 96827 96829 96831 96833 96835 96837 96839 96841 96843 96845 96847 96849 96851 96853 96855 96857 96859 96861 96863 96865 96867 96869 96871 96873 96875 96877 96879 96901 96903 96905 96907 96909 96911 96913 96915 96917 96919 96921 96923 96925 96927 96929 96931 96933 96935 96937 96939 96941 96943 96945 96947 96949 96951 96953 96955 96957 96959 96961 96963 96965 96967 96969 96971 96973 96975 96977 96979 97101 97103 97105 97107 97109 97111 97113 97115 97117 97119 97121 97123 97125 97127 97129 97131 97133 97135 97137 97139 97141 97143 97145 97147 97149 97151 97153 97155 97157 97159 97161 97163 97165 97167 97169 97171 97173 97175 97177 97179 97201 97203 97205 97207 97209 97211 97213 97215 97217 97219 97221 97223 97225 97227 97229 97231 97233 97235 97237 97239 97241 97243 97245 97247 97249 97251 97253 97255 97257 97259 97261 97263 97265 97267 97269 97271 97273 97275 97277 97279 97301 97303 97305 97307 97309 97311 97313 97315 97317 97319 97321 97323 97325 97327 97329 97331 97333 97335 97337 97339 97341 97343 97345 97347 97349 97351 97353 97355 97357 97359 97361 97363 97365 97367 97369 97371 97373 97375 97377 97379 97401 97403 97405 97407 97409 97411 97413 97415 97417 97419 97421 97423 97425 97427 97429 97431 97433 97435 97437 97439 97441 97443 97445 97447 97449 97451 97453 97455 97457 97459 97461 97463 97465 97467 97469 97471 97473 97475 97477 97479 97501 97503 97505 97507 97509 97511 97513 97515 97517 97519 97521 97523 97525 97527 97529 97531 97533 97535 97537 97539 97541 97543 97545 97547 97549 97551 97553 97555 97557 97559 97561 97563 97565 97567 97569 97571 97573 97575 97577 97579 97601 97603 97605 97607 97609 97611 97613 97615 97617 97619 97621 97623 97625 97627 97629 97631 97633 97635 97637 97639 97641 97643 97645 97647 97649 97651 97653 97655 97657 97659 97661 97663 97665 97667 97669 97671 97673 97675 97677 97679 97701 97703 97705 97707 97709 97711 97713 97715 97717 97719 97721 97723 97725 97727 97729 97731 97733 97735 97737 97739 97741 97743 97745 97747 97749 97751 97753 97755 97757 97759 97761 97763 97765 97767 97769 97771 97773 97775 97777 97779 97801 97803 97805 97807 97809 97811 97813 97815 97817 97819 97821 97823 97825 97827 97829 97831 97833 97835 97837 97839 97841 97843 97845 97847 97849 97851 97853 97855 97857 97859 97861 97863 97865 97867 97869 97871 97873 97875 97877 97879 97901 97903 97905 97907 97909 97911 97913 97915 97917 97919 97921 97923 97925 97927 97929 97931 97933 97935 97937 97939 97941 97943 97945 97947 97949 97951 97953 97955 97957 97959 97961 97963 97965 97967 97969 97971 97973 97975 97977 97979 98101 98103 98105 98107 98109 98111 98113 98115 98117 98119 98121 98123 98125 98127 98129 98131 98133 98135 98137 98139 98141 98143 98145 98147 98149 98151 98153 98155 98157 98159 98161 98163 98165 98167 98169 98171 98173 98175 98177 98179 98201 98203 98205 98207 98209 98211 98213 98215 98217 98219 98221 98223 98225 98227 98229 98231 98233 98235 98237 98239 98241 98243 98245 98247 98249 98251 98253 98255 98257 98259 98261 98263 98265 98267 98269 98271 98273 98275 98277 98279 98301 98303 98305 98307 98309 98311 98313 98315 98317 98319 98321 98323 98325 98327 98329 98331 98333 98335 98337 98339 98341 98343 98345 98347 98349 98351 98353 98355 98357 98359 98361 98363 98365 98367 98369 98371 98373 98375 98377 98379 98401 98403 98405 98407 98409 98411 98413 98415 98417 98419 98421 98423 98425 98427 98429 98431 98433 98435 98437 98439 98441 98443 98445 98447 98449 98451 98453 98455 98457 98459 98461 98463 98465 98467 98469 98471 98473 98475 98477 98479 98501 98503 98505 98507 98509 98511 98513 98515 98517 98519 98521 98523 98525 98527 98529 98531 98533 98535 98537 98539 98541 98543 98545 98547 98549 98551 98553 98555 98557 98559 98561 98563 98565 98567 98569 98571 98573 98575 98577 98579 98601 98603 98605 98607 98609 98611 98613 98615 98617 98619 98621 98623 98625 98627 98629 98631 98633 98635 98637 98639 98641 98643 98645 98647 98649 98651 98653 98655 98657 98659 98661 98663 98665 98667 98669 98671 98673 98675 98677 98679 98701 98703 98705 98707 98709 98711 98713 98715 98717 98719 98721 98723 98725 98727 98729 98731 98733 98735 98737 98739 98741 98743 98745 98747 98749 98751 98753 98755 98757 98759 98761 98763 98765 98767 98769 98771 98773 98775 98777 98779 98801 98803 98805 98807 98809 98811 98813 98815 98817 98819 98821 98823 98825 98827 98829 98831 98833 98835 98837 98839 98841 98843 98845 98847 98849 98851 98853 98855 98857 98859 98861 98863 98865 98867 98869 98871 98873 98875 98877 98879 98901 98903 98905 98907 98909 98911 98913 98915 98917 98919 98921 98923 98925 98927 98929 98931 98933 98935 98937 98939 98941 98943 98945 98947 98949 98951 98953 98955 98957 98959 98961 98963 98965 98967 98969 98971 98973 98975 98977 98979 99101 99103 99105 99107 99109 99111 99113 99115 99117 99119 99121 99123 99125 99127 99129 99131 99133 99135 99137 99139 99141 99143 99145 99147 99149 99151 99153 99155 99157 99159 99161 99163 99165 99167 99169 99171 99173 99175 99177 99179 99201 99203 99205 99207 99209 99211 99213 99215 99217 99219 99221 99223 99225 99227 99229 99231 99233 99235 99237 99239 99241 99243 99245 99247 99249 99251 99253 99255 99257 99259 99261 99263 99265 99267 99269 99271 99273 99275 99277 99279 99301 99303 99305 99307 99309 99311 99313 99315 99317 99319 99321 99323 99325 99327 99329 99331 99333 99335 99337 99339 99341 99343 99345 99347 99349 99351 99353 99355 99357 99359 99361 99363 99365 99367 99369 99371 99373 99375 99377 99379 99401 99403 99405 99407 99409 99411 99413 99415 99417 99419 99421 99423 99425 99427 99429 99431 99433 99435 99437 99439 99441 99443 99445 99447 99449 99451 99453 99455 99457 99459 99461 99463 99465 99467 99469 99471 99473 99475 99477 99479 99501 99503 99505 99507 99509 99511 99513 99515 99517 99519 99521 99523 99525 99527 99529 99531 99533 99535 99537 99539 99541 99543 99545 99547 99549 99551 99553 99555 99557 99559 99561 99563 99565 99567 99569 99571 99573 99575 99577 99579 99601 99603 99605 99607 99609 99611 99613 99615 99617 99619 99621 99623 99625 99627 99629 99631 99633 99635 99637 99639 99641 99643 99645 99647 99649 99651 99653 99655 99657 99659 99661 99663 99665 99667 99669 99671 99673 99675 99677 99679 99701 99703 99705 99707 99709 99711 99713 99715 99717 99719 99721 99723 99725 99727 99729 99731 99733 99735 99737 99739 99741 99743 99745 99747 99749 99751 99753 99755 99757 99759 99761 99763 99765 99767 99769 99771 99773 99775 99777 99779 99801 99803 99805 99807 99809 99811 99813 99815 99817 99819 99821 99823 99825 99827 99829 99831 99833 99835 99837 99839 99841 99843 99845 99847 99849 99851 99853 99855 99857 99859 99861 99863 99865 99867 99869 99871 99873 99875 99877 99879 99901 99903 99905 99907 99909 99911 99913 99915 99917 99919 99921 99923 99925 99927 99929 99931 99933 99935 99937 99939 99941 99943 99945 99947 99949 99951 99953 99955 99957 99959 99961 99963 99965 99967 99969 99971 99973 99975 99977 99979] 0 360 36994 154980 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Gent-Rowley/Connect9/cf_9_9x9_d_/cf_9_9x9_d_.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
56,998
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 162900 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 154980 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 154980 c c Input Parameter (command line, file): c input filename QBFLIB/Gent-Rowley/Connect9/cf_9_9x9_d_.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 910981 c no.of clauses 162900 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 154980 c c QBFLIB/Gent-Rowley/Connect9/cf_9_9x9_d_.qdimacs 910981 162900 E1 [10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 10761 10762 10763 10764 10765 10766 10767 10768 10769 10770 10771 10772 10773 10774 10775 10776 10777 10778 10779 10780 10781 10801 10802 10803 10804 10805 10806 10807 10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 10852 10853 10854 10855 10856 10857 10858 10859 10860 10861 10862 10863 10864 10865 10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 20101 20102 20103 20104 20105 20106 20107 20108 20109 20110 20111 20112 20113 20114 20115 20116 20117 20118 20119 20120 20121 20122 20123 20124 20125 20126 20127 20128 20129 20130 20131 20132 20133 20134 20135 20136 20137 20138 20139 20140 20141 20142 20143 20144 20145 20146 20147 20148 20149 20150 20151 20152 20153 20154 20155 20156 20157 20158 20159 20160 20161 20162 20163 20164 20165 20166 20167 20168 20169 20170 20171 20172 20173 20174 20175 20176 20177 20178 20179 20180 20181 20201 20202 20203 20204 20205 20206 20207 20208 20209 20210 20211 20212 20213 20214 20215 20216 20217 20218 20219 20220 20221 20222 20223 20224 20225 20226 20227 20228 20229 20230 20231 20232 20233 20234 20235 20236 20237 20238 20239 20240 20241 20242 20243 20244 20245 20246 20247 20248 20249 20250 20251 20252 20253 20254 20255 20256 20257 20258 20259 20260 20261 20262 20263 20264 20265 20266 20267 20268 20269 20270 20271 20272 20273 20274 20275 20276 20277 20278 20279 20280 20281 20301 20302 20303 20304 20305 20306 20307 20308 20309 20310 20311 20312 20313 20314 20315 20316 20317 20318 20319 20320 20321 20322 20323 20324 20325 20326 20327 20328 20329 20330 20331 20332 20333 20334 20335 20336 20337 20338 20339 20340 20341 20342 20343 20344 20345 20346 20347 20348 20349 20350 20351 20352 20353 20354 20355 20356 20357 20358 20359 20360 20361 20362 20363 20364 20365 20366 20367 20368 20369 20370 20371 20372 20373 20374 20375 20376 20377 20378 20379 20380 20381 20401 20402 20403 20404 20405 20406 20407 20408 20409 20410 20411 20412 20413 20414 20415 20416 20417 20418 20419 20420 20421 20422 20423 20424 20425 20426 20427 20428 20429 20430 20431 20432 20433 20434 20435 20436 20437 20438 20439 20440 20441 20442 20443 20444 20445 20446 20447 20448 20449 20450 20451 20452 20453 20454 20455 20456 20457 20458 20459 20460 20461 20462 20463 20464 20465 20466 20467 20468 20469 20470 20471 20472 20473 20474 20475 20476 20477 20478 20479 20480 20481 20501 20502 20503 20504 20505 20506 20507 20508 20509 20510 20511 20512 20513 20514 20515 20516 20517 20518 20519 20520 20521 20522 20523 20524 20525 20526 20527 20528 20529 20530 20531 20532 20533 20534 20535 20536 20537 20538 20539 20540 20541 20542 20543 20544 20545 20546 20547 20548 20549 20550 20551 20552 20553 20554 20555 20556 20557 20558 20559 20560 20561 20562 20563 20564 20565 20566 20567 20568 20569 20570 20571 20572 20573 20574 20575 20576 20577 20578 20579 20580 20581 20601 20602 20603 20604 20605 20606 20607 20608 20609 20610 20611 20612 20613 20614 20615 20616 20617 20618 20619 20620 20621 20622 20623 20624 20625 20626 20627 20628 20629 20630 20631 20632 20633 20634 20635 20636 20637 20638 20639 20640 20641 20642 20643 20644 20645 20646 20647 20648 20649 20650 20651 20652 20653 20654 20655 20656 20657 20658 20659 20660 20661 20662 20663 20664 20665 20666 20667 20668 20669 20670 20671 20672 20673 20674 20675 20676 20677 20678 20679 20680 20681 20701 20702 20703 20704 20705 20706 20707 20708 20709 20710 20711 20712 20713 20714 20715 20716 20717 20718 20719 20720 20721 20722 20723 20724 20725 20726 20727 20728 20729 20730 20731 20732 20733 20734 20735 20736 20737 20738 20739 20740 20741 20742 20743 20744 20745 20746 20747 20748 20749 20750 20751 20752 20753 20754 20755 20756 20757 20758 20759 20760 20761 20762 20763 20764 20765 20766 20767 20768 20769 20770 20771 20772 20773 20774 20775 20776 20777 20778 20779 20780 20781 20801 20802 20803 20804 20805 20806 20807 20808 20809 20810 20811 20812 20813 20814 20815 20816 20817 20818 20819 20820 20821 20822 20823 20824 20825 20826 20827 20828 20829 20830 20831 20832 20833 20834 20835 20836 20837 20838 20839 20840 20841 20842 20843 20844 20845 20846 20847 20848 20849 20850 20851 20852 20853 20854 20855 20856 20857 20858 20859 20860 20861 20862 20863 20864 20865 20866 20867 20868 20869 20870 20871 20872 20873 20874 20875 20876 20877 20878 20879 20880 20881 20901 20902 20903 20904 20905 20906 20907 20908 20909 20910 20911 20912 20913 20914 20915 20916 20917 20918 20919 20920 20921 20922 20923 20924 20925 20926 20927 20928 20929 20930 20931 20932 20933 20934 20935 20936 20937 20938 20939 20940 20941 20942 20943 20944 20945 20946 20947 20948 20949 20950 20951 20952 20953 20954 20955 20956 20957 20958 20959 20960 20961 20962 20963 20964 20965 20966 20967 20968 20969 20970 20971 20972 20973 20974 20975 20976 20977 20978 20979 20980 20981 80101 80102 80103 80104 80105 80106 80107 80108 80109 80110 80111 80112 80113 80114 80115 80116 80117 80118 80119 80120 80121 80122 80123 80124 80125 80126 80127 80128 80129 80130 80131 80132 80133 80134 80135 80136 80137 80138 80139 80140 80141 80142 80143 80144 80145 80146 80147 80148 80149 80150 80151 80152 80153 80154 80155 80156 80157 80158 80159 80160 80161 80162 80163 80164 80165 80166 80167 80168 80169 80170 80171 80172 80173 80174 80175 80176 80177 80178 80179 80180 80201 80202 80203 80204 80205 80206 80207 80208 80209 80210 80211 80212 80213 80214 80215 80216 80217 80218 80219 80220 80221 80222 80223 80224 80225 80226 80227 80228 80229 80230 80231 80232 80233 80234 80235 80236 80237 80238 80239 80240 80241 80242 80243 80244 80245 80246 80247 80248 80249 80250 80251 80252 80253 80254 80255 80256 80257 80258 80259 80260 80261 80262 80263 80264 80265 80266 80267 80268 80269 80270 80271 80272 80273 80274 80275 80276 80277 80278 80279 80280 80301 80302 80303 80304 80305 80306 80307 80308 80309 80310 80311 80312 80313 80314 80315 80316 80317 80318 80319 80320 80321 80322 80323 80324 80325 80326 80327 80328 80329 80330 80331 80332 80333 80334 80335 80336 80337 80338 80339 80340 80341 80342 80343 80344 80345 80346 80347 80348 80349 80350 80351 80352 80353 80354 80355 80356 80357 80358 80359 80360 80361 80362 80363 80364 80365 80366 80367 80368 80369 80370 80371 80372 80373 80374 80375 80376 80377 80378 80379 80380 80401 80402 80403 80404 80405 80406 80407 80408 80409 80410 80411 80412 80413 80414 80415 80416 80417 80418 80419 80420 80421 80422 80423 80424 80425 80426 80427 80428 80429 80430 80431 80432 80433 80434 80435 80436 80437 80438 80439 80440 80441 80442 80443 80444 80445 80446 80447 80448 80449 80450 80451 80452 80453 80454 80455 80456 80457 80458 80459 80460 80461 80462 80463 80464 80465 80466 80467 80468 80469 80470 80471 80472 80473 80474 80475 80476 80477 80478 80479 80480 80501 80502 80503 80504 80505 80506 80507 80508 80509 80510 80511 80512 80513 80514 80515 80516 80517 80518 80519 80520 80521 80522 80523 80524 80525 80526 80527 80528 80529 80530 80531 80532 80533 80534 80535 80536 80537 80538 80539 80540 80541 80542 80543 80544 80545 80546 80547 80548 80549 80550 80551 80552 80553 80554 80555 80556 80557 80558 80559 80560 80561 80562 80563 80564 80565 80566 80567 80568 80569 80570 80571 80572 80573 80574 80575 80576 80577 80578 80579 80580 80601 80602 80603 80604 80605 80606 80607 80608 80609 80610 80611 80612 80613 80614 80615 80616 80617 80618 80619 80620 80621 80622 80623 80624 80625 80626 80627 80628 80629 80630 80631 80632 80633 80634 80635 80636 80637 80638 80639 80640 80641 80642 80643 80644 80645 80646 80647 80648 80649 80650 80651 80652 80653 80654 80655 80656 80657 80658 80659 80660 80661 80662 80663 80664 80665 80666 80667 80668 80669 80670 80671 80672 80673 80674 80675 80676 80677 80678 80679 80680 80701 80702 80703 80704 80705 80706 80707 80708 80709 80710 80711 80712 80713 80714 80715 80716 80717 80718 80719 80720 80721 80722 80723 80724 80725 80726 80727 80728 80729 80730 80731 80732 80733 80734 80735 80736 80737 80738 80739 80740 80741 80742 80743 80744 80745 80746 80747 80748 80749 80750 80751 80752 80753 80754 80755 80756 80757 80758 80759 80760 80761 80762 80763 80764 80765 80766 80767 80768 80769 80770 80771 80772 80773 80774 80775 80776 80777 80778 80779 80780 80801 80802 80803 80804 80805 80806 80807 80808 80809 80810 80811 80812 80813 80814 80815 80816 80817 80818 80819 80820 80821 80822 80823 80824 80825 80826 80827 80828 80829 80830 80831 80832 80833 80834 80835 80836 80837 80838 80839 80840 80841 80842 80843 80844 80845 80846 80847 80848 80849 80850 80851 80852 80853 80854 80855 80856 80857 80858 80859 80860 80861 80862 80863 80864 80865 80866 80867 80868 80869 80870 80871 80872 80873 80874 80875 80876 80877 80878 80879 80880 80901 80902 80903 80904 80905 80906 80907 80908 80909 80910 80911 80912 80913 80914 80915 80916 80917 80918 80919 80920 80921 80922 80923 80924 80925 80926 80927 80928 80929 80930 80931 80932 80933 80934 80935 80936 80937 80938 80939 80940 80941 80942 80943 80944 80945 80946 80947 80948 80949 80950 80951 80952 80953 80954 80955 80956 80957 80958 80959 80960 80961 80962 80963 80964 80965 80966 80967 80968 80969 80970 80971 80972 80973 80974 80975 80976 80977 80978 80979 80980 81102 81104 81106 81108 81110 81112 81114 81116 81118 81120 81122 81124 81126 81128 81130 81132 81134 81136 81138 81140 81142 81144 81146 81148 81150 81152 81154 81156 81158 81160 81162 81164 81166 81168 81170 81172 81174 81176 81178 81180 81202 81204 81206 81208 81210 81212 81214 81216 81218 81220 81222 81224 81226 81228 81230 81232 81234 81236 81238 81240 81242 81244 81246 81248 81250 81252 81254 81256 81258 81260 81262 81264 81266 81268 81270 81272 81274 81276 81278 81280 81302 81304 81306 81308 81310 81312 81314 81316 81318 81320 81322 81324 81326 81328 81330 81332 81334 81336 81338 81340 81342 81344 81346 81348 81350 81352 81354 81356 81358 81360 81362 81364 81366 81368 81370 81372 81374 81376 81378 81380 81402 81404 81406 81408 81410 81412 81414 81416 81418 81420 81422 81424 81426 81428 81430 81432 81434 81436 81438 81440 81442 81444 81446 81448 81450 81452 81454 81456 81458 81460 81462 81464 81466 81468 81470 81472 81474 81476 81478 81480 81502 81504 81506 81508 81510 81512 81514 81516 81518 81520 81522 81524 81526 81528 81530 81532 81534 81536 81538 81540 81542 81544 81546 81548 81550 81552 81554 81556 81558 81560 81562 81564 81566 81568 81570 81572 81574 81576 81578 81580 81602 81604 81606 81608 81610 81612 81614 81616 81618 81620 81622 81624 81626 81628 81630 81632 81634 81636 81638 81640 81642 81644 81646 81648 81650 81652 81654 81656 81658 81660 81662 81664 81666 81668 81670 81672 81674 81676 81678 81680 81702 81704 81706 81708 81710 81712 81714 81716 81718 81720 81722 81724 81726 81728 81730 81732 81734 81736 81738 81740 81742 81744 81746 81748 81750 81752 81754 81756 81758 81760 81762 81764 81766 81768 81770 81772 81774 81776 81778 81780 81802 81804 81806 81808 81810 81812 81814 81816 81818 81820 81822 81824 81826 81828 81830 81832 81834 81836 81838 81840 81842 81844 81846 81848 81850 81852 81854 81856 81858 81860 81862 81864 81866 81868 81870 81872 81874 81876 81878 81880 81902 81904 81906 81908 81910 81912 81914 81916 81918 81920 81922 81924 81926 81928 81930 81932 81934 81936 81938 81940 81942 81944 81946 81948 81950 81952 81954 81956 81958 81960 81962 81964 81966 81968 81970 81972 81974 81976 81978 81980 82102 82104 82106 82108 82110 82112 82114 82116 82118 82120 82122 82124 82126 82128 82130 82132 82134 82136 82138 82140 82142 82144 82146 82148 82150 82152 82154 82156 82158 82160 82162 82164 82166 82168 82170 82172 82174 82176 82178 82180 82202 82204 82206 82208 82210 82212 82214 82216 82218 82220 82222 82224 82226 82228 82230 82232 82234 82236 82238 82240 82242 82244 82246 82248 82250 82252 82254 82256 82258 82260 82262 82264 82266 82268 82270 82272 82274 82276 82278 82280 82302 82304 82306 82308 82310 82312 82314 82316 82318 82320 82322 82324 82326 82328 82330 82332 82334 82336 82338 82340 82342 82344 82346 82348 82350 82352 82354 82356 82358 82360 82362 82364 82366 82368 82370 82372 82374 82376 82378 82380 82402 82404 82406 82408 82410 82412 82414 82416 82418 82420 82422 82424 82426 82428 82430 82432 82434 82436 82438 82440 82442 82444 82446 82448 82450 82452 82454 82456 82458 82460 82462 82464 82466 82468 82470 82472 82474 82476 82478 82480 82502 82504 82506 82508 82510 82512 82514 82516 82518 82520 82522 82524 82526 82528 82530 82532 82534 82536 82538 82540 82542 82544 82546 82548 82550 82552 82554 82556 82558 82560 82562 82564 82566 82568 82570 82572 82574 82576 82578 82580 82602 82604 82606 82608 82610 82612 82614 82616 82618 82620 82622 82624 82626 82628 82630 82632 82634 82636 82638 82640 82642 82644 82646 82648 82650 82652 82654 82656 82658 82660 82662 82664 82666 82668 82670 82672 82674 82676 82678 82680 82702 82704 82706 82708 82710 82712 82714 82716 82718 82720 82722 82724 82726 82728 82730 82732 82734 82736 82738 82740 82742 82744 82746 82748 82750 82752 82754 82756 82758 82760 82762 82764 82766 82768 82770 82772 82774 82776 82778 82780 82802 82804 82806 82808 82810 82812 82814 82816 82818 82820 82822 82824 82826 82828 82830 82832 82834 82836 82838 82840 82842 82844 82846 82848 82850 82852 82854 82856 82858 82860 82862 82864 82866 82868 82870 82872 82874 82876 82878 82880 82902 82904 82906 82908 82910 82912 82914 82916 82918 82920 82922 82924 82926 82928 82930 82932 82934 82936 82938 82940 82942 82944 82946 82948 82950 82952 82954 82956 82958 82960 82962 82964 82966 82968 82970 82972 82974 82976 82978 82980 83102 83104 83106 83108 83110 83112 83114 83116 83118 83120 83122 83124 83126 83128 83130 83132 83134 83136 83138 83140 83142 83144 83146 83148 83150 83152 83154 83156 83158 83160 83162 83164 83166 83168 83170 83172 83174 83176 83178 83180 83202 83204 83206 83208 83210 83212 83214 83216 83218 83220 83222 83224 83226 83228 83230 83232 83234 83236 83238 83240 83242 83244 83246 83248 83250 83252 83254 83256 83258 83260 83262 83264 83266 83268 83270 83272 83274 83276 83278 83280 83302 83304 83306 83308 83310 83312 83314 83316 83318 83320 83322 83324 83326 83328 83330 83332 83334 83336 83338 83340 83342 83344 83346 83348 83350 83352 83354 83356 83358 83360 83362 83364 83366 83368 83370 83372 83374 83376 83378 83380 83402 83404 83406 83408 83410 83412 83414 83416 83418 83420 83422 83424 83426 83428 83430 83432 83434 83436 83438 83440 83442 83444 83446 83448 83450 83452 83454 83456 83458 83460 83462 83464 83466 83468 83470 83472 83474 83476 83478 83480 83502 83504 83506 83508 83510 83512 83514 83516 83518 83520 83522 83524 83526 83528 83530 83532 83534 83536 83538 83540 83542 83544 83546 83548 83550 83552 83554 83556 83558 83560 83562 83564 83566 83568 83570 83572 83574 83576 83578 83580 83602 83604 83606 83608 83610 83612 83614 83616 83618 83620 83622 83624 83626 83628 83630 83632 83634 83636 83638 83640 83642 83644 83646 83648 83650 83652 83654 83656 83658 83660 83662 83664 83666 83668 83670 83672 83674 83676 83678 83680 83702 83704 83706 83708 83710 83712 83714 83716 83718 83720 83722 83724 83726 83728 83730 83732 83734 83736 83738 83740 83742 83744 83746 83748 83750 83752 83754 83756 83758 83760 83762 83764 83766 83768 83770 83772 83774 83776 83778 83780 83802 83804 83806 83808 83810 83812 83814 83816 83818 83820 83822 83824 83826 83828 83830 83832 83834 83836 83838 83840 83842 83844 83846 83848 83850 83852 83854 83856 83858 83860 83862 83864 83866 83868 83870 83872 83874 83876 83878 83880 83902 83904 83906 83908 83910 83912 83914 83916 83918 83920 83922 83924 83926 83928 83930 83932 83934 83936 83938 83940 83942 83944 83946 83948 83950 83952 83954 83956 83958 83960 83962 83964 83966 83968 83970 83972 83974 83976 83978 83980 84102 84104 84106 84108 84110 84112 84114 84116 84118 84120 84122 84124 84126 84128 84130 84132 84134 84136 84138 84140 84142 84144 84146 84148 84150 84152 84154 84156 84158 84160 84162 84164 84166 84168 84170 84172 84174 84176 84178 84180 84202 84204 84206 84208 84210 84212 84214 84216 84218 84220 84222 84224 84226 84228 84230 84232 84234 84236 84238 84240 84242 84244 84246 84248 84250 84252 84254 84256 84258 84260 84262 84264 84266 84268 84270 84272 84274 84276 84278 84280 84302 84304 84306 84308 84310 84312 84314 84316 84318 84320 84322 84324 84326 84328 84330 84332 84334 84336 84338 84340 84342 84344 84346 84348 84350 84352 84354 84356 84358 84360 84362 84364 84366 84368 84370 84372 84374 84376 84378 84380 84402 84404 84406 84408 84410 84412 84414 84416 84418 84420 84422 84424 84426 84428 84430 84432 84434 84436 84438 84440 84442 84444 84446 84448 84450 84452 84454 84456 84458 84460 84462 84464 84466 84468 84470 84472 84474 84476 84478 84480 84502 84504 84506 84508 84510 84512 84514 84516 84518 84520 84522 84524 84526 84528 84530 84532 84534 84536 84538 84540 84542 84544 84546 84548 84550 84552 84554 84556 84558 84560 84562 84564 84566 84568 84570 84572 84574 84576 84578 84580 84602 84604 84606 84608 84610 84612 84614 84616 84618 84620 84622 84624 84626 84628 84630 84632 84634 84636 84638 84640 84642 84644 84646 84648 84650 84652 84654 84656 84658 84660 84662 84664 84666 84668 84670 84672 84674 84676 84678 84680 84702 84704 84706 84708 84710 84712 84714 84716 84718 84720 84722 84724 84726 84728 84730 84732 84734 84736 84738 84740 84742 84744 84746 84748 84750 84752 84754 84756 84758 84760 84762 84764 84766 84768 84770 84772 84774 84776 84778 84780 84802 84804 84806 84808 84810 84812 84814 84816 84818 84820 84822 84824 84826 84828 84830 84832 84834 84836 84838 84840 84842 84844 84846 84848 84850 84852 84854 84856 84858 84860 84862 84864 84866 84868 84870 84872 84874 84876 84878 84880 84902 84904 84906 84908 84910 84912 84914 84916 84918 84920 84922 84924 84926 84928 84930 84932 84934 84936 84938 84940 84942 84944 84946 84948 84950 84952 84954 84956 84958 84960 84962 84964 84966 84968 84970 84972 84974 84976 84978 84980 85102 85104 85106 85108 85110 85112 85114 85116 85118 85120 85122 85124 85126 85128 85130 85132 85134 85136 85138 85140 85142 85144 85146 85148 85150 85152 85154 85156 85158 85160 85162 85164 85166 85168 85170 85172 85174 85176 85178 85180 85202 85204 85206 85208 85210 85212 85214 85216 85218 85220 85222 85224 85226 85228 85230 85232 85234 85236 85238 85240 85242 85244 85246 85248 85250 85252 85254 85256 85258 85260 85262 85264 85266 85268 85270 85272 85274 85276 85278 85280 85302 85304 85306 85308 85310 85312 85314 85316 85318 85320 85322 85324 85326 85328 85330 85332 85334 85336 85338 85340 85342 85344 85346 85348 85350 85352 85354 85356 85358 85360 85362 85364 85366 85368 85370 85372 85374 85376 85378 85380 85402 85404 85406 85408 85410 85412 85414 85416 85418 85420 85422 85424 85426 85428 85430 85432 85434 85436 85438 85440 85442 85444 85446 85448 85450 85452 85454 85456 85458 85460 85462 85464 85466 85468 85470 85472 85474 85476 85478 85480 85502 85504 85506 85508 85510 85512 85514 85516 85518 85520 85522 85524 85526 85528 85530 85532 85534 85536 85538 85540 85542 85544 85546 85548 85550 85552 85554 85556 85558 85560 85562 85564 85566 85568 85570 85572 85574 85576 85578 85580 85602 85604 85606 85608 85610 85612 85614 85616 85618 85620 85622 85624 85626 85628 85630 85632 85634 85636 85638 85640 85642 85644 85646 85648 85650 85652 85654 85656 85658 85660 85662 85664 85666 85668 85670 85672 85674 85676 85678 85680 85702 85704 85706 85708 85710 85712 85714 85716 85718 85720 85722 85724 85726 85728 85730 85732 85734 85736 85738 85740 85742 85744 85746 85748 85750 85752 85754 85756 85758 85760 85762 85764 85766 85768 85770 85772 85774 85776 85778 85780 85802 85804 85806 85808 85810 85812 85814 85816 85818 85820 85822 85824 85826 85828 85830 85832 85834 85836 85838 85840 85842 85844 85846 85848 85850 85852 85854 85856 85858 85860 85862 85864 85866 85868 85870 85872 85874 85876 85878 85880 85902 85904 85906 85908 85910 85912 85914 85916 85918 85920 85922 85924 85926 85928 85930 85932 85934 85936 85938 85940 85942 85944 85946 85948 85950 85952 85954 85956 85958 85960 85962 85964 85966 85968 85970 85972 85974 85976 85978 85980 86102 86104 86106 86108 86110 86112 86114 86116 86118 86120 86122 86124 86126 86128 86130 86132 86134 86136 86138 86140 86142 86144 86146 86148 86150 86152 86154 86156 86158 86160 86162 86164 86166 86168 86170 86172 86174 86176 86178 86180 86202 86204 86206 86208 86210 86212 86214 86216 86218 86220 86222 86224 86226 86228 86230 86232 86234 86236 86238 86240 86242 86244 86246 86248 86250 86252 86254 86256 86258 86260 86262 86264 86266 86268 86270 86272 86274 86276 86278 86280 86302 86304 86306 86308 86310 86312 86314 86316 86318 86320 86322 86324 86326 86328 86330 86332 86334 86336 86338 86340 86342 86344 86346 86348 86350 86352 86354 86356 86358 86360 86362 86364 86366 86368 86370 86372 86374 86376 86378 86380 86402 86404 86406 86408 86410 86412 86414 86416 86418 86420 86422 86424 86426 86428 86430 86432 86434 86436 86438 86440 86442 86444 86446 86448 86450 86452 86454 86456 86458 86460 86462 86464 86466 86468 86470 86472 86474 86476 86478 86480 86502 86504 86506 86508 86510 86512 86514 86516 86518 86520 86522 86524 86526 86528 86530 86532 86534 86536 86538 86540 86542 86544 86546 86548 86550 86552 86554 86556 86558 86560 86562 86564 86566 86568 86570 86572 86574 86576 86578 86580 86602 86604 86606 86608 86610 86612 86614 86616 86618 86620 86622 86624 86626 86628 86630 86632 86634 86636 86638 86640 86642 86644 86646 86648 86650 86652 86654 86656 86658 86660 86662 86664 86666 86668 86670 86672 86674 86676 86678 86680 86702 86704 86706 86708 86710 86712 86714 86716 86718 86720 86722 86724 86726 86728 86730 86732 86734 86736 86738 86740 86742 86744 86746 86748 86750 86752 86754 86756 86758 86760 86762 86764 86766 86768 86770 86772 86774 86776 86778 86780 86802 86804 86806 86808 86810 86812 86814 86816 86818 86820 86822 86824 86826 86828 86830 86832 86834 86836 86838 86840 86842 86844 86846 86848 86850 86852 86854 86856 86858 86860 86862 86864 86866 86868 86870 86872 86874 86876 86878 86880 86902 86904 86906 86908 86910 86912 86914 86916 86918 86920 86922 86924 86926 86928 86930 86932 86934 86936 86938 86940 86942 86944 86946 86948 86950 86952 86954 86956 86958 86960 86962 86964 86966 86968 86970 86972 86974 86976 86978 86980 87102 87104 87106 87108 87110 87112 87114 87116 87118 87120 87122 87124 87126 87128 87130 87132 87134 87136 87138 87140 87142 87144 87146 87148 87150 87152 87154 87156 87158 87160 87162 87164 87166 87168 87170 87172 87174 87176 87178 87180 87202 87204 87206 87208 87210 87212 87214 87216 87218 87220 87222 87224 87226 87228 87230 87232 87234 87236 87238 87240 87242 87244 87246 87248 87250 87252 87254 87256 87258 87260 87262 87264 87266 87268 87270 87272 87274 87276 87278 87280 87302 87304 87306 87308 87310 87312 87314 87316 87318 87320 87322 87324 87326 87328 87330 87332 87334 87336 87338 87340 87342 87344 87346 87348 87350 87352 87354 87356 87358 87360 87362 87364 87366 87368 87370 87372 87374 87376 87378 87380 87402 87404 87406 87408 87410 87412 87414 87416 87418 87420 87422 87424 87426 87428 87430 87432 87434 87436 87438 87440 87442 87444 87446 87448 87450 87452 87454 87456 87458 87460 87462 87464 87466 87468 87470 87472 87474 87476 87478 87480 87502 87504 87506 87508 87510 87512 87514 87516 87518 87520 87522 87524 87526 87528 87530 87532 87534 87536 87538 87540 87542 87544 87546 87548 87550 87552 87554 87556 87558 87560 87562 87564 87566 87568 87570 87572 87574 87576 87578 87580 87602 87604 87606 87608 87610 87612 87614 87616 87618 87620 87622 87624 87626 87628 87630 87632 87634 87636 87638 87640 87642 87644 87646 87648 87650 87652 87654 87656 87658 87660 87662 87664 87666 87668 87670 87672 87674 87676 87678 87680 87702 87704 87706 87708 87710 87712 87714 87716 87718 87720 87722 87724 87726 87728 87730 87732 87734 87736 87738 87740 87742 87744 87746 87748 87750 87752 87754 87756 87758 87760 87762 87764 87766 87768 87770 87772 87774 87776 87778 87780 87802 87804 87806 87808 87810 87812 87814 87816 87818 87820 87822 87824 87826 87828 87830 87832 87834 87836 87838 87840 87842 87844 87846 87848 87850 87852 87854 87856 87858 87860 87862 87864 87866 87868 87870 87872 87874 87876 87878 87880 87902 87904 87906 87908 87910 87912 87914 87916 87918 87920 87922 87924 87926 87928 87930 87932 87934 87936 87938 87940 87942 87944 87946 87948 87950 87952 87954 87956 87958 87960 87962 87964 87966 87968 87970 87972 87974 87976 87978 87980 88102 88104 88106 88108 88110 88112 88114 88116 88118 88120 88122 88124 88126 88128 88130 88132 88134 88136 88138 88140 88142 88144 88146 88148 88150 88152 88154 88156 88158 88160 88162 88164 88166 88168 88170 88172 88174 88176 88178 88180 88202 88204 88206 88208 88210 88212 88214 88216 88218 88220 88222 88224 88226 88228 88230 88232 88234 88236 88238 88240 88242 88244 88246 88248 88250 88252 88254 88256 88258 88260 88262 88264 88266 88268 88270 88272 88274 88276 88278 88280 88302 88304 88306 88308 88310 88312 88314 88316 88318 88320 88322 88324 88326 88328 88330 88332 88334 88336 88338 88340 88342 88344 88346 88348 88350 88352 88354 88356 88358 88360 88362 88364 88366 88368 88370 88372 88374 88376 88378 88380 88402 88404 88406 88408 88410 88412 88414 88416 88418 88420 88422 88424 88426 88428 88430 88432 88434 88436 88438 88440 88442 88444 88446 88448 88450 88452 88454 88456 88458 88460 88462 88464 88466 88468 88470 88472 88474 88476 88478 88480 88502 88504 88506 88508 88510 88512 88514 88516 88518 88520 88522 88524 88526 88528 88530 88532 88534 88536 88538 88540 88542 88544 88546 88548 88550 88552 88554 88556 88558 88560 88562 88564 88566 88568 88570 88572 88574 88576 88578 88580 88602 88604 88606 88608 88610 88612 88614 88616 88618 88620 88622 88624 88626 88628 88630 88632 88634 88636 88638 88640 88642 88644 88646 88648 88650 88652 88654 88656 88658 88660 88662 88664 88666 88668 88670 88672 88674 88676 88678 88680 88702 88704 88706 88708 88710 88712 88714 88716 88718 88720 88722 88724 88726 88728 88730 88732 88734 88736 88738 88740 88742 88744 88746 88748 88750 88752 88754 88756 88758 88760 88762 88764 88766 88768 88770 88772 88774 88776 88778 88780 88802 88804 88806 88808 88810 88812 88814 88816 88818 88820 88822 88824 88826 88828 88830 88832 88834 88836 88838 88840 88842 88844 88846 88848 88850 88852 88854 88856 88858 88860 88862 88864 88866 88868 88870 88872 88874 88876 88878 88880 88902 88904 88906 88908 88910 88912 88914 88916 88918 88920 88922 88924 88926 88928 88930 88932 88934 88936 88938 88940 88942 88944 88946 88948 88950 88952 88954 88956 88958 88960 88962 88964 88966 88968 88970 88972 88974 88976 88978 88980 89102 89104 89106 89108 89110 89112 89114 89116 89118 89120 89122 89124 89126 89128 89130 89132 89134 89136 89138 89140 89142 89144 89146 89148 89150 89152 89154 89156 89158 89160 89162 89164 89166 89168 89170 89172 89174 89176 89178 89180 89202 89204 89206 89208 89210 89212 89214 89216 89218 89220 89222 89224 89226 89228 89230 89232 89234 89236 89238 89240 89242 89244 89246 89248 89250 89252 89254 89256 89258 89260 89262 89264 89266 89268 89270 89272 89274 89276 89278 89280 89302 89304 89306 89308 89310 89312 89314 89316 89318 89320 89322 89324 89326 89328 89330 89332 89334 89336 89338 89340 89342 89344 89346 89348 89350 89352 89354 89356 89358 89360 89362 89364 89366 89368 89370 89372 89374 89376 89378 89380 89402 89404 89406 89408 89410 89412 89414 89416 89418 89420 89422 89424 89426 89428 89430 89432 89434 89436 89438 89440 89442 89444 89446 89448 89450 89452 89454 89456 89458 89460 89462 89464 89466 89468 89470 89472 89474 89476 89478 89480 89502 89504 89506 89508 89510 89512 89514 89516 89518 89520 89522 89524 89526 89528 89530 89532 89534 89536 89538 89540 89542 89544 89546 89548 89550 89552 89554 89556 89558 89560 89562 89564 89566 89568 89570 89572 89574 89576 89578 89580 89602 89604 89606 89608 89610 89612 89614 89616 89618 89620 89622 89624 89626 89628 89630 89632 89634 89636 89638 89640 89642 89644 89646 89648 89650 89652 89654 89656 89658 89660 89662 89664 89666 89668 89670 89672 89674 89676 89678 89680 89702 89704 89706 89708 89710 89712 89714 89716 89718 89720 89722 89724 89726 89728 89730 89732 89734 89736 89738 89740 89742 89744 89746 89748 89750 89752 89754 89756 89758 89760 89762 89764 89766 89768 89770 89772 89774 89776 89778 89780 89802 89804 89806 89808 89810 89812 89814 89816 89818 89820 89822 89824 89826 89828 89830 89832 89834 89836 89838 89840 89842 89844 89846 89848 89850 89852 89854 89856 89858 89860 89862 89864 89866 89868 89870 89872 89874 89876 89878 89880 89902 89904 89906 89908 89910 89912 89914 89916 89918 89920 89922 89924 89926 89928 89930 89932 89934 89936 89938 89940 89942 89944 89946 89948 89950 89952 89954 89956 89958 89960 89962 89964 89966 89968 89970 89972 89974 89976 89978 89980 90101 90102 90103 90104 90105 90106 90107 90108 90109 90110 90111 90112 90113 90114 90115 90116 90117 90118 90119 90120 90121 90122 90123 90124 90125 90126 90127 90128 90129 90130 90131 90132 90133 90134 90135 90136 90137 90138 90139 90140 90141 90142 90143 90144 90145 90146 90147 90148 90149 90150 90151 90152 90153 90154 90155 90156 90157 90158 90159 90160 90161 90162 90163 90164 90165 90166 90167 90168 90169 90170 90171 90172 90173 90174 90175 90176 90177 90178 90179 90180 90201 90202 90203 90204 90205 90206 90207 90208 90209 90210 90211 90212 90213 90214 90215 90216 90217 90218 90219 90220 90221 90222 90223 90224 90225 90226 90227 90228 90229 90230 90231 90232 90233 90234 90235 90236 90237 90238 90239 90240 90241 90242 90243 90244 90245 90246 90247 90248 90249 90250 90251 90252 90253 90254 90255 90256 90257 90258 90259 90260 90261 90262 90263 90264 90265 90266 90267 90268 90269 90270 90271 90272 90273 90274 90275 90276 90277 90278 90279 90280 90301 90302 90303 90304 90305 90306 90307 90308 90309 90310 90311 90312 90313 90314 90315 90316 90317 90318 90319 90320 90321 90322 90323 90324 90325 90326 90327 90328 90329 90330 90331 90332 90333 90334 90335 90336 90337 90338 90339 90340 90341 90342 90343 90344 90345 90346 90347 90348 90349 90350 90351 90352 90353 90354 90355 90356 90357 90358 90359 90360 90361 90362 90363 90364 90365 90366 90367 90368 90369 90370 90371 90372 90373 90374 90375 90376 90377 90378 90379 90380 90401 90402 90403 90404 90405 90406 90407 90408 90409 90410 90411 90412 90413 90414 90415 90416 90417 90418 90419 90420 90421 90422 90423 90424 90425 90426 90427 90428 90429 90430 90431 90432 90433 90434 90435 90436 90437 90438 90439 90440 90441 90442 90443 90444 90445 90446 90447 90448 90449 90450 90451 90452 90453 90454 90455 90456 90457 90458 90459 90460 90461 90462 90463 90464 90465 90466 90467 90468 90469 90470 90471 90472 90473 90474 90475 90476 90477 90478 90479 90480 90501 90502 90503 90504 90505 90506 90507 90508 90509 90510 90511 90512 90513 90514 90515 90516 90517 90518 90519 90520 90521 90522 90523 90524 90525 90526 90527 90528 90529 90530 90531 90532 90533 90534 90535 90536 90537 90538 90539 90540 90541 90542 90543 90544 90545 90546 90547 90548 90549 90550 90551 90552 90553 90554 90555 90556 90557 90558 90559 90560 90561 90562 90563 90564 90565 90566 90567 90568 90569 90570 90571 90572 90573 90574 90575 90576 90577 90578 90579 90580 90601 90602 90603 90604 90605 90606 90607 90608 90609 90610 90611 90612 90613 90614 90615 90616 90617 90618 90619 90620 90621 90622 90623 90624 90625 90626 90627 90628 90629 90630 90631 90632 90633 90634 90635 90636 90637 90638 90639 90640 90641 90642 90643 90644 90645 90646 90647 90648 90649 90650 90651 90652 90653 90654 90655 90656 90657 90658 90659 90660 90661 90662 90663 90664 90665 90666 90667 90668 90669 90670 90671 90672 90673 90674 90675 90676 90677 90678 90679 90680 90701 90702 90703 90704 90705 90706 90707 90708 90709 90710 90711 90712 90713 90714 90715 90716 90717 90718 90719 90720 90721 90722 90723 90724 90725 90726 90727 90728 90729 90730 90731 90732 90733 90734 90735 90736 90737 90738 90739 90740 90741 90742 90743 90744 90745 90746 90747 90748 90749 90750 90751 90752 90753 90754 90755 90756 90757 90758 90759 90760 90761 90762 90763 90764 90765 90766 90767 90768 90769 90770 90771 90772 90773 90774 90775 90776 90777 90778 90779 90780 90801 90802 90803 90804 90805 90806 90807 90808 90809 90810 90811 90812 90813 90814 90815 90816 90817 90818 90819 90820 90821 90822 90823 90824 90825 90826 90827 90828 90829 90830 90831 90832 90833 90834 90835 90836 90837 90838 90839 90840 90841 90842 90843 90844 90845 90846 90847 90848 90849 90850 90851 90852 90853 90854 90855 90856 90857 90858 90859 90860 90861 90862 90863 90864 90865 90866 90867 90868 90869 90870 90871 90872 90873 90874 90875 90876 90877 90878 90879 90880 90901 90902 90903 90904 90905 90906 90907 90908 90909 90910 90911 90912 90913 90914 90915 90916 90917 90918 90919 90920 90921 90922 90923 90924 90925 90926 90927 90928 90929 90930 90931 90932 90933 90934 90935 90936 90937 90938 90939 90940 90941 90942 90943 90944 90945 90946 90947 90948 90949 90950 90951 90952 90953 90954 90955 90956 90957 90958 90959 90960 90961 90962 90963 90964 90965 90966 90967 90968 90969 90970 90971 90972 90973 90974 90975 90976 90977 90978 90979 90980 91101 91103 91105 91107 91109 91111 91113 91115 91117 91119 91121 91123 91125 91127 91129 91131 91133 91135 91137 91139 91141 91143 91145 91147 91149 91151 91153 91155 91157 91159 91161 91163 91165 91167 91169 91171 91173 91175 91177 91179 91201 91203 91205 91207 91209 91211 91213 91215 91217 91219 91221 91223 91225 91227 91229 91231 91233 91235 91237 91239 91241 91243 91245 91247 91249 91251 91253 91255 91257 91259 91261 91263 91265 91267 91269 91271 91273 91275 91277 91279 91301 91303 91305 91307 91309 91311 91313 91315 91317 91319 91321 91323 91325 91327 91329 91331 91333 91335 91337 91339 91341 91343 91345 91347 91349 91351 91353 91355 91357 91359 91361 91363 91365 91367 91369 91371 91373 91375 91377 91379 91401 91403 91405 91407 91409 91411 91413 91415 91417 91419 91421 91423 91425 91427 91429 91431 91433 91435 91437 91439 91441 91443 91445 91447 91449 91451 91453 91455 91457 91459 91461 91463 91465 91467 91469 91471 91473 91475 91477 91479 91501 91503 91505 91507 91509 91511 91513 91515 91517 91519 91521 91523 91525 91527 91529 91531 91533 91535 91537 91539 91541 91543 91545 91547 91549 91551 91553 91555 91557 91559 91561 91563 91565 91567 91569 91571 91573 91575 91577 91579 91601 91603 91605 91607 91609 91611 91613 91615 91617 91619 91621 91623 91625 91627 91629 91631 91633 91635 91637 91639 91641 91643 91645 91647 91649 91651 91653 91655 91657 91659 91661 91663 91665 91667 91669 91671 91673 91675 91677 91679 91701 91703 91705 91707 91709 91711 91713 91715 91717 91719 91721 91723 91725 91727 91729 91731 91733 91735 91737 91739 91741 91743 91745 91747 91749 91751 91753 91755 91757 91759 91761 91763 91765 91767 91769 91771 91773 91775 91777 91779 91801 91803 91805 91807 91809 91811 91813 91815 91817 91819 91821 91823 91825 91827 91829 91831 91833 91835 91837 91839 91841 91843 91845 91847 91849 91851 91853 91855 91857 91859 91861 91863 91865 91867 91869 91871 91873 91875 91877 91879 91901 91903 91905 91907 91909 91911 91913 91915 91917 91919 91921 91923 91925 91927 91929 91931 91933 91935 91937 91939 91941 91943 91945 91947 91949 91951 91953 91955 91957 91959 91961 91963 91965 91967 91969 91971 91973 91975 91977 91979 92101 92103 92105 92107 92109 92111 92113 92115 92117 92119 92121 92123 92125 92127 92129 92131 92133 92135 92137 92139 92141 92143 92145 92147 92149 92151 92153 92155 92157 92159 92161 92163 92165 92167 92169 92171 92173 92175 92177 92179 92201 92203 92205 92207 92209 92211 92213 92215 92217 92219 92221 92223 92225 92227 92229 92231 92233 92235 92237 92239 92241 92243 92245 92247 92249 92251 92253 92255 92257 92259 92261 92263 92265 92267 92269 92271 92273 92275 92277 92279 92301 92303 92305 92307 92309 92311 92313 92315 92317 92319 92321 92323 92325 92327 92329 92331 92333 92335 92337 92339 92341 92343 92345 92347 92349 92351 92353 92355 92357 92359 92361 92363 92365 92367 92369 92371 92373 92375 92377 92379 92401 92403 92405 92407 92409 92411 92413 92415 92417 92419 92421 92423 92425 92427 92429 92431 92433 92435 92437 92439 92441 92443 92445 92447 92449 92451 92453 92455 92457 92459 92461 92463 92465 92467 92469 92471 92473 92475 92477 92479 92501 92503 92505 92507 92509 92511 92513 92515 92517 92519 92521 92523 92525 92527 92529 92531 92533 92535 92537 92539 92541 92543 92545 92547 92549 92551 92553 92555 92557 92559 92561 92563 92565 92567 92569 92571 92573 92575 92577 92579 92601 92603 92605 92607 92609 92611 92613 92615 92617 92619 92621 92623 92625 92627 92629 92631 92633 92635 92637 92639 92641 92643 92645 92647 92649 92651 92653 92655 92657 92659 92661 92663 92665 92667 92669 92671 92673 92675 92677 92679 92701 92703 92705 92707 92709 92711 92713 92715 92717 92719 92721 92723 92725 92727 92729 92731 92733 92735 92737 92739 92741 92743 92745 92747 92749 92751 92753 92755 92757 92759 92761 92763 92765 92767 92769 92771 92773 92775 92777 92779 92801 92803 92805 92807 92809 92811 92813 92815 92817 92819 92821 92823 92825 92827 92829 92831 92833 92835 92837 92839 92841 92843 92845 92847 92849 92851 92853 92855 92857 92859 92861 92863 92865 92867 92869 92871 92873 92875 92877 92879 92901 92903 92905 92907 92909 92911 92913 92915 92917 92919 92921 92923 92925 92927 92929 92931 92933 92935 92937 92939 92941 92943 92945 92947 92949 92951 92953 92955 92957 92959 92961 92963 92965 92967 92969 92971 92973 92975 92977 92979 93101 93103 93105 93107 93109 93111 93113 93115 93117 93119 93121 93123 93125 93127 93129 93131 93133 93135 93137 93139 93141 93143 93145 93147 93149 93151 93153 93155 93157 93159 93161 93163 93165 93167 93169 93171 93173 93175 93177 93179 93201 93203 93205 93207 93209 93211 93213 93215 93217 93219 93221 93223 93225 93227 93229 93231 93233 93235 93237 93239 93241 93243 93245 93247 93249 93251 93253 93255 93257 93259 93261 93263 93265 93267 93269 93271 93273 93275 93277 93279 93301 93303 93305 93307 93309 93311 93313 93315 93317 93319 93321 93323 93325 93327 93329 93331 93333 93335 93337 93339 93341 93343 93345 93347 93349 93351 93353 93355 93357 93359 93361 93363 93365 93367 93369 93371 93373 93375 93377 93379 93401 93403 93405 93407 93409 93411 93413 93415 93417 93419 93421 93423 93425 93427 93429 93431 93433 93435 93437 93439 93441 93443 93445 93447 93449 93451 93453 93455 93457 93459 93461 93463 93465 93467 93469 93471 93473 93475 93477 93479 93501 93503 93505 93507 93509 93511 93513 93515 93517 93519 93521 93523 93525 93527 93529 93531 93533 93535 93537 93539 93541 93543 93545 93547 93549 93551 93553 93555 93557 93559 93561 93563 93565 93567 93569 93571 93573 93575 93577 93579 93601 93603 93605 93607 93609 93611 93613 93615 93617 93619 93621 93623 93625 93627 93629 93631 93633 93635 93637 93639 93641 93643 93645 93647 93649 93651 93653 93655 93657 93659 93661 93663 93665 93667 93669 93671 93673 93675 93677 93679 93701 93703 93705 93707 93709 93711 93713 93715 93717 93719 93721 93723 93725 93727 93729 93731 93733 93735 93737 93739 93741 93743 93745 93747 93749 93751 93753 93755 93757 93759 93761 93763 93765 93767 93769 93771 93773 93775 93777 93779 93801 93803 93805 93807 93809 93811 93813 93815 93817 93819 93821 93823 93825 93827 93829 93831 93833 93835 93837 93839 93841 93843 93845 93847 93849 93851 93853 93855 93857 93859 93861 93863 93865 93867 93869 93871 93873 93875 93877 93879 93901 93903 93905 93907 93909 93911 93913 93915 93917 93919 93921 93923 93925 93927 93929 93931 93933 93935 93937 93939 93941 93943 93945 93947 93949 93951 93953 93955 93957 93959 93961 93963 93965 93967 93969 93971 93973 93975 93977 93979 94101 94103 94105 94107 94109 94111 94113 94115 94117 94119 94121 94123 94125 94127 94129 94131 94133 94135 94137 94139 94141 94143 94145 94147 94149 94151 94153 94155 94157 94159 94161 94163 94165 94167 94169 94171 94173 94175 94177 94179 94201 94203 94205 94207 94209 94211 94213 94215 94217 94219 94221 94223 94225 94227 94229 94231 94233 94235 94237 94239 94241 94243 94245 94247 94249 94251 94253 94255 94257 94259 94261 94263 94265 94267 94269 94271 94273 94275 94277 94279 94301 94303 94305 94307 94309 94311 94313 94315 94317 94319 94321 94323 94325 94327 94329 94331 94333 94335 94337 94339 94341 94343 94345 94347 94349 94351 94353 94355 94357 94359 94361 94363 94365 94367 94369 94371 94373 94375 94377 94379 94401 94403 94405 94407 94409 94411 94413 94415 94417 94419 94421 94423 94425 94427 94429 94431 94433 94435 94437 94439 94441 94443 94445 94447 94449 94451 94453 94455 94457 94459 94461 94463 94465 94467 94469 94471 94473 94475 94477 94479 94501 94503 94505 94507 94509 94511 94513 94515 94517 94519 94521 94523 94525 94527 94529 94531 94533 94535 94537 94539 94541 94543 94545 94547 94549 94551 94553 94555 94557 94559 94561 94563 94565 94567 94569 94571 94573 94575 94577 94579 94601 94603 94605 94607 94609 94611 94613 94615 94617 94619 94621 94623 94625 94627 94629 94631 94633 94635 94637 94639 94641 94643 94645 94647 94649 94651 94653 94655 94657 94659 94661 94663 94665 94667 94669 94671 94673 94675 94677 94679 94701 94703 94705 94707 94709 94711 94713 94715 94717 94719 94721 94723 94725 94727 94729 94731 94733 94735 94737 94739 94741 94743 94745 94747 94749 94751 94753 94755 94757 94759 94761 94763 94765 94767 94769 94771 94773 94775 94777 94779 94801 94803 94805 94807 94809 94811 94813 94815 94817 94819 94821 94823 94825 94827 94829 94831 94833 94835 94837 94839 94841 94843 94845 94847 94849 94851 94853 94855 94857 94859 94861 94863 94865 94867 94869 94871 94873 94875 94877 94879 94901 94903 94905 94907 94909 94911 94913 94915 94917 94919 94921 94923 94925 94927 94929 94931 94933 94935 94937 94939 94941 94943 94945 94947 94949 94951 94953 94955 94957 94959 94961 94963 94965 94967 94969 94971 94973 94975 94977 94979 95101 95103 95105 95107 95109 95111 95113 95115 95117 95119 95121 95123 95125 95127 95129 95131 95133 95135 95137 95139 95141 95143 95145 95147 95149 95151 95153 95155 95157 95159 95161 95163 95165 95167 95169 95171 95173 95175 95177 95179 95201 95203 95205 95207 95209 95211 95213 95215 95217 95219 95221 95223 95225 95227 95229 95231 95233 95235 95237 95239 95241 95243 95245 95247 95249 95251 95253 95255 95257 95259 95261 95263 95265 95267 95269 95271 95273 95275 95277 95279 95301 95303 95305 95307 95309 95311 95313 95315 95317 95319 95321 95323 95325 95327 95329 95331 95333 95335 95337 95339 95341 95343 95345 95347 95349 95351 95353 95355 95357 95359 95361 95363 95365 95367 95369 95371 95373 95375 95377 95379 95401 95403 95405 95407 95409 95411 95413 95415 95417 95419 95421 95423 95425 95427 95429 95431 95433 95435 95437 95439 95441 95443 95445 95447 95449 95451 95453 95455 95457 95459 95461 95463 95465 95467 95469 95471 95473 95475 95477 95479 95501 95503 95505 95507 95509 95511 95513 95515 95517 95519 95521 95523 95525 95527 95529 95531 95533 95535 95537 95539 95541 95543 95545 95547 95549 95551 95553 95555 95557 95559 95561 95563 95565 95567 95569 95571 95573 95575 95577 95579 95601 95603 95605 95607 95609 95611 95613 95615 95617 95619 95621 95623 95625 95627 95629 95631 95633 95635 95637 95639 95641 95643 95645 95647 95649 95651 95653 95655 95657 95659 95661 95663 95665 95667 95669 95671 95673 95675 95677 95679 95701 95703 95705 95707 95709 95711 95713 95715 95717 95719 95721 95723 95725 95727 95729 95731 95733 95735 95737 95739 95741 95743 95745 95747 95749 95751 95753 95755 95757 95759 95761 95763 95765 95767 95769 95771 95773 95775 95777 95779 95801 95803 95805 95807 95809 95811 95813 95815 95817 95819 95821 95823 95825 95827 95829 95831 95833 95835 95837 95839 95841 95843 95845 95847 95849 95851 95853 95855 95857 95859 95861 95863 95865 95867 95869 95871 95873 95875 95877 95879 95901 95903 95905 95907 95909 95911 95913 95915 95917 95919 95921 95923 95925 95927 95929 95931 95933 95935 95937 95939 95941 95943 95945 95947 95949 95951 95953 95955 95957 95959 95961 95963 95965 95967 95969 95971 95973 95975 95977 95979 96101 96103 96105 96107 96109 96111 96113 96115 96117 96119 96121 96123 96125 96127 96129 96131 96133 96135 96137 96139 96141 96143 96145 96147 96149 96151 96153 96155 96157 96159 96161 96163 96165 96167 96169 96171 96173 96175 96177 96179 96201 96203 96205 96207 96209 96211 96213 96215 96217 96219 96221 96223 96225 96227 96229 96231 96233 96235 96237 96239 96241 96243 96245 96247 96249 96251 96253 96255 96257 96259 96261 96263 96265 96267 96269 96271 96273 96275 96277 96279 96301 96303 96305 96307 96309 96311 96313 96315 96317 96319 96321 96323 96325 96327 96329 96331 96333 96335 96337 96339 96341 96343 96345 96347 96349 96351 96353 96355 96357 96359 96361 96363 96365 96367 96369 96371 96373 96375 96377 96379 96401 96403 96405 96407 96409 96411 96413 96415 96417 96419 96421 96423 96425 96427 96429 96431 96433 96435 96437 96439 96441 96443 96445 96447 96449 96451 96453 96455 96457 96459 96461 96463 96465 96467 96469 96471 96473 96475 96477 96479 96501 96503 96505 96507 96509 96511 96513 96515 96517 96519 96521 96523 96525 96527 96529 96531 96533 96535 96537 96539 96541 96543 96545 96547 96549 96551 96553 96555 96557 96559 96561 96563 96565 96567 96569 96571 96573 96575 96577 96579 96601 96603 96605 96607 96609 96611 96613 96615 96617 96619 96621 96623 96625 96627 96629 96631 96633 96635 96637 96639 96641 96643 96645 96647 96649 96651 96653 96655 96657 96659 96661 96663 96665 96667 96669 96671 96673 96675 96677 96679 96701 96703 96705 96707 96709 96711 96713 96715 96717 96719 96721 96723 96725 96727 96729 96731 96733 96735 96737 96739 96741 96743 96745 96747 96749 96751 96753 96755 96757 96759 96761 96763 96765 96767 96769 96771 96773 96775 96777 96779 96801 96803 96805 96807 96809 96811 96813 96815 96817 96819 96821 96823 96825 96827 96829 96831 96833 96835 96837 96839 96841 96843 96845 96847 96849 96851 96853 96855 96857 96859 96861 96863 96865 96867 96869 96871 96873 96875 96877 96879 96901 96903 96905 96907 96909 96911 96913 96915 96917 96919 96921 96923 96925 96927 96929 96931 96933 96935 96937 96939 96941 96943 96945 96947 96949 96951 96953 96955 96957 96959 96961 96963 96965 96967 96969 96971 96973 96975 96977 96979 97101 97103 97105 97107 97109 97111 97113 97115 97117 97119 97121 97123 97125 97127 97129 97131 97133 97135 97137 97139 97141 97143 97145 97147 97149 97151 97153 97155 97157 97159 97161 97163 97165 97167 97169 97171 97173 97175 97177 97179 97201 97203 97205 97207 97209 97211 97213 97215 97217 97219 97221 97223 97225 97227 97229 97231 97233 97235 97237 97239 97241 97243 97245 97247 97249 97251 97253 97255 97257 97259 97261 97263 97265 97267 97269 97271 97273 97275 97277 97279 97301 97303 97305 97307 97309 97311 97313 97315 97317 97319 97321 97323 97325 97327 97329 97331 97333 97335 97337 97339 97341 97343 97345 97347 97349 97351 97353 97355 97357 97359 97361 97363 97365 97367 97369 97371 97373 97375 97377 97379 97401 97403 97405 97407 97409 97411 97413 97415 97417 97419 97421 97423 97425 97427 97429 97431 97433 97435 97437 97439 97441 97443 97445 97447 97449 97451 97453 97455 97457 97459 97461 97463 97465 97467 97469 97471 97473 97475 97477 97479 97501 97503 97505 97507 97509 97511 97513 97515 97517 97519 97521 97523 97525 97527 97529 97531 97533 97535 97537 97539 97541 97543 97545 97547 97549 97551 97553 97555 97557 97559 97561 97563 97565 97567 97569 97571 97573 97575 97577 97579 97601 97603 97605 97607 97609 97611 97613 97615 97617 97619 97621 97623 97625 97627 97629 97631 97633 97635 97637 97639 97641 97643 97645 97647 97649 97651 97653 97655 97657 97659 97661 97663 97665 97667 97669 97671 97673 97675 97677 97679 97701 97703 97705 97707 97709 97711 97713 97715 97717 97719 97721 97723 97725 97727 97729 97731 97733 97735 97737 97739 97741 97743 97745 97747 97749 97751 97753 97755 97757 97759 97761 97763 97765 97767 97769 97771 97773 97775 97777 97779 97801 97803 97805 97807 97809 97811 97813 97815 97817 97819 97821 97823 97825 97827 97829 97831 97833 97835 97837 97839 97841 97843 97845 97847 97849 97851 97853 97855 97857 97859 97861 97863 97865 97867 97869 97871 97873 97875 97877 97879 97901 97903 97905 97907 97909 97911 97913 97915 97917 97919 97921 97923 97925 97927 97929 97931 97933 97935 97937 97939 97941 97943 97945 97947 97949 97951 97953 97955 97957 97959 97961 97963 97965 97967 97969 97971 97973 97975 97977 97979 98101 98103 98105 98107 98109 98111 98113 98115 98117 98119 98121 98123 98125 98127 98129 98131 98133 98135 98137 98139 98141 98143 98145 98147 98149 98151 98153 98155 98157 98159 98161 98163 98165 98167 98169 98171 98173 98175 98177 98179 98201 98203 98205 98207 98209 98211 98213 98215 98217 98219 98221 98223 98225 98227 98229 98231 98233 98235 98237 98239 98241 98243 98245 98247 98249 98251 98253 98255 98257 98259 98261 98263 98265 98267 98269 98271 98273 98275 98277 98279 98301 98303 98305 98307 98309 98311 98313 98315 98317 98319 98321 98323 98325 98327 98329 98331 98333 98335 98337 98339 98341 98343 98345 98347 98349 98351 98353 98355 98357 98359 98361 98363 98365 98367 98369 98371 98373 98375 98377 98379 98401 98403 98405 98407 98409 98411 98413 98415 98417 98419 98421 98423 98425 98427 98429 98431 98433 98435 98437 98439 98441 98443 98445 98447 98449 98451 98453 98455 98457 98459 98461 98463 98465 98467 98469 98471 98473 98475 98477 98479 98501 98503 98505 98507 98509 98511 98513 98515 98517 98519 98521 98523 98525 98527 98529 98531 98533 98535 98537 98539 98541 98543 98545 98547 98549 98551 98553 98555 98557 98559 98561 98563 98565 98567 98569 98571 98573 98575 98577 98579 98601 98603 98605 98607 98609 98611 98613 98615 98617 98619 98621 98623 98625 98627 98629 98631 98633 98635 98637 98639 98641 98643 98645 98647 98649 98651 98653 98655 98657 98659 98661 98663 98665 98667 98669 98671 98673 98675 98677 98679 98701 98703 98705 98707 98709 98711 98713 98715 98717 98719 98721 98723 98725 98727 98729 98731 98733 98735 98737 98739 98741 98743 98745 98747 98749 98751 98753 98755 98757 98759 98761 98763 98765 98767 98769 98771 98773 98775 98777 98779 98801 98803 98805 98807 98809 98811 98813 98815 98817 98819 98821 98823 98825 98827 98829 98831 98833 98835 98837 98839 98841 98843 98845 98847 98849 98851 98853 98855 98857 98859 98861 98863 98865 98867 98869 98871 98873 98875 98877 98879 98901 98903 98905 98907 98909 98911 98913 98915 98917 98919 98921 98923 98925 98927 98929 98931 98933 98935 98937 98939 98941 98943 98945 98947 98949 98951 98953 98955 98957 98959 98961 98963 98965 98967 98969 98971 98973 98975 98977 98979 99101 99103 99105 99107 99109 99111 99113 99115 99117 99119 99121 99123 99125 99127 99129 99131 99133 99135 99137 99139 99141 99143 99145 99147 99149 99151 99153 99155 99157 99159 99161 99163 99165 99167 99169 99171 99173 99175 99177 99179 99201 99203 99205 99207 99209 99211 99213 99215 99217 99219 99221 99223 99225 99227 99229 99231 99233 99235 99237 99239 99241 99243 99245 99247 99249 99251 99253 99255 99257 99259 99261 99263 99265 99267 99269 99271 99273 99275 99277 99279 99301 99303 99305 99307 99309 99311 99313 99315 99317 99319 99321 99323 99325 99327 99329 99331 99333 99335 99337 99339 99341 99343 99345 99347 99349 99351 99353 99355 99357 99359 99361 99363 99365 99367 99369 99371 99373 99375 99377 99379 99401 99403 99405 99407 99409 99411 99413 99415 99417 99419 99421 99423 99425 99427 99429 99431 99433 99435 99437 99439 99441 99443 99445 99447 99449 99451 99453 99455 99457 99459 99461 99463 99465 99467 99469 99471 99473 99475 99477 99479 99501 99503 99505 99507 99509 99511 99513 99515 99517 99519 99521 99523 99525 99527 99529 99531 99533 99535 99537 99539 99541 99543 99545 99547 99549 99551 99553 99555 99557 99559 99561 99563 99565 99567 99569 99571 99573 99575 99577 99579 99601 99603 99605 99607 99609 99611 99613 99615 99617 99619 99621 99623 99625 99627 99629 99631 99633 99635 99637 99639 99641 99643 99645 99647 99649 99651 99653 99655 99657 99659 99661 99663 99665 99667 99669 99671 99673 99675 99677 99679 99701 99703 99705 99707 99709 99711 99713 99715 99717 99719 99721 99723 99725 99727 99729 99731 99733 99735 99737 99739 99741 99743 99745 99747 99749 99751 99753 99755 99757 99759 99761 99763 99765 99767 99769 99771 99773 99775 99777 99779 99801 99803 99805 99807 99809 99811 99813 99815 99817 99819 99821 99823 99825 99827 99829 99831 99833 99835 99837 99839 99841 99843 99845 99847 99849 99851 99853 99855 99857 99859 99861 99863 99865 99867 99869 99871 99873 99875 99877 99879 99901 99903 99905 99907 99909 99911 99913 99915 99917 99919 99921 99923 99925 99927 99929 99931 99933 99935 99937 99939 99941 99943 99945 99947 99949 99951 99953 99955 99957 99959 99961 99963 99965 99967 99969 99971 99973 99975 99977 99979] 0 360 36994 154980 RED
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read.xport('C:/MEPS/.FYC..ssp'); year <- .year. if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.) if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.) if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X) FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Add aggregate event variables FYC <- FYC %>% mutate( HHTEXP.yy. = HHAEXP.yy. + HHNEXP.yy., # Home Health Agency + Independent providers ERTEXP.yy. = ERFEXP.yy. + ERDEXP.yy., # Doctor + Facility Expenses for OP, ER, IP events IPTEXP.yy. = IPFEXP.yy. + IPDEXP.yy., OPTEXP.yy. = OPFEXP.yy. + OPDEXP.yy., # All Outpatient OPYEXP.yy. = OPVEXP.yy. + OPSEXP.yy., # Physician only OPZEXP.yy. = OPOEXP.yy. + OPPEXP.yy., # Non-physician only OMAEXP.yy. = VISEXP.yy. + OTHEXP.yy.) # Other medical equipment and services FYC <- FYC %>% mutate( TOTUSE.yy. = ((DVTOT.yy. > 0) + (RXTOT.yy. > 0) + (OBTOTV.yy. > 0) + (OPTOTV.yy. > 0) + (ERTOT.yy. > 0) + (IPDIS.yy. > 0) + (HHTOTD.yy. > 0) + (OMAEXP.yy. > 0)) ) # Age groups # To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X' FYC <- FYC %>% mutate(agegrps = cut(AGELAST, breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf), labels = c("Under 5","5-17","18-44","45-64","65+"))) %>% mutate(agegrps_v2X = cut(AGELAST, breaks = c(-1, 17.5 ,64.5, Inf), labels = c("Under 18","18-64","65+"))) %>% mutate(agegrps_v3X = cut(AGELAST, breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf), labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29", "30-34", "35-44", "45-54", "55-64", "65+"))) FYCdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~PERWT.yy.F, data = FYC, nest = TRUE) # Loop over event types events <- c("TOT", "DVT", "RX", "OBV", "OBD", "OBO", "OPT", "OPY", "OPZ", "ERT", "IPT", "HHT", "OMA") results <- list() for(ev in events) { key <- paste0(ev, "EXP", ".yy.") formula <- as.formula(sprintf("~%s", key)) results[[key]] <- svyby(formula, FUN = svyquantile, by = ~agegrps, design = subset(FYCdsgn, FYC[[key]] > 0), quantiles=c(0.5), ci=T, method="constant") } print(results)
/mepstrends/hc_use/json/code/r/medEXP__agegrps__event__.r
permissive
RandomCriticalAnalysis/MEPS-summary-tables
R
false
false
2,807
r
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read.xport('C:/MEPS/.FYC..ssp'); year <- .year. if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.) if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.) if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X) FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Add aggregate event variables FYC <- FYC %>% mutate( HHTEXP.yy. = HHAEXP.yy. + HHNEXP.yy., # Home Health Agency + Independent providers ERTEXP.yy. = ERFEXP.yy. + ERDEXP.yy., # Doctor + Facility Expenses for OP, ER, IP events IPTEXP.yy. = IPFEXP.yy. + IPDEXP.yy., OPTEXP.yy. = OPFEXP.yy. + OPDEXP.yy., # All Outpatient OPYEXP.yy. = OPVEXP.yy. + OPSEXP.yy., # Physician only OPZEXP.yy. = OPOEXP.yy. + OPPEXP.yy., # Non-physician only OMAEXP.yy. = VISEXP.yy. + OTHEXP.yy.) # Other medical equipment and services FYC <- FYC %>% mutate( TOTUSE.yy. = ((DVTOT.yy. > 0) + (RXTOT.yy. > 0) + (OBTOTV.yy. > 0) + (OPTOTV.yy. > 0) + (ERTOT.yy. > 0) + (IPDIS.yy. > 0) + (HHTOTD.yy. > 0) + (OMAEXP.yy. > 0)) ) # Age groups # To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X' FYC <- FYC %>% mutate(agegrps = cut(AGELAST, breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf), labels = c("Under 5","5-17","18-44","45-64","65+"))) %>% mutate(agegrps_v2X = cut(AGELAST, breaks = c(-1, 17.5 ,64.5, Inf), labels = c("Under 18","18-64","65+"))) %>% mutate(agegrps_v3X = cut(AGELAST, breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf), labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29", "30-34", "35-44", "45-54", "55-64", "65+"))) FYCdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~PERWT.yy.F, data = FYC, nest = TRUE) # Loop over event types events <- c("TOT", "DVT", "RX", "OBV", "OBD", "OBO", "OPT", "OPY", "OPZ", "ERT", "IPT", "HHT", "OMA") results <- list() for(ev in events) { key <- paste0(ev, "EXP", ".yy.") formula <- as.formula(sprintf("~%s", key)) results[[key]] <- svyby(formula, FUN = svyquantile, by = ~agegrps, design = subset(FYCdsgn, FYC[[key]] > 0), quantiles=c(0.5), ci=T, method="constant") } print(results)
# feature selection # #' @title Features which are used to build a tree #' @description The name of covariates which use to partition the covariate space. #' @param tree :Tree structure made by getTree() function #' @return An array of characters which is the name of those covariate used in the tree #' @details The sequence only contain unique name(Only count one time even the single covariate use to split twice or more) #' @examples #' data(Lung,package="compound.Cox") #' train_Lung=Lung[which(Lung[,"train"]==TRUE),] #select training data #' t.vec=train_Lung[,1] #' d.vec=train_Lung[,2] #' x.mat=train_Lung[,-c(1,2,3)] #' res=uni.tree(t.vec,d.vec,x.mat,P.value=0.01,d0=0.01,S.plot=FALSE,score=TRUE) #' feature.selected(res) #' @export feature.selected=function(tree){ #split.covariate function can record the covariate use to split in current node split.covariate=function(tree,covariate.seq=NULL){ if(as.character(tree[[1]]$Information)[1] == "terminal node"){ return(covariate.seq) }else{ covariate.name=as.character(tree[[1]]$Information)[4] #[4] is the name which record the name of covariate covariate.seq=c(covariate.seq,covariate.name) #combine new selected covariate left_subtree = tree[[2]] #[[2]] call out the left child right_subtree = tree[[3]] #[[3]] call out the right child return(c(split.covariate(left_subtree,covariate.seq=covariate.seq),split.covariate(right_subtree,covariate.seq=covariate.seq))) } } return(unique(split.covariate(tree,covariate.seq=NULL))) }
/R/feature.selected.R
no_license
lichkeam/uni.survival.tree
R
false
false
1,549
r
# feature selection # #' @title Features which are used to build a tree #' @description The name of covariates which use to partition the covariate space. #' @param tree :Tree structure made by getTree() function #' @return An array of characters which is the name of those covariate used in the tree #' @details The sequence only contain unique name(Only count one time even the single covariate use to split twice or more) #' @examples #' data(Lung,package="compound.Cox") #' train_Lung=Lung[which(Lung[,"train"]==TRUE),] #select training data #' t.vec=train_Lung[,1] #' d.vec=train_Lung[,2] #' x.mat=train_Lung[,-c(1,2,3)] #' res=uni.tree(t.vec,d.vec,x.mat,P.value=0.01,d0=0.01,S.plot=FALSE,score=TRUE) #' feature.selected(res) #' @export feature.selected=function(tree){ #split.covariate function can record the covariate use to split in current node split.covariate=function(tree,covariate.seq=NULL){ if(as.character(tree[[1]]$Information)[1] == "terminal node"){ return(covariate.seq) }else{ covariate.name=as.character(tree[[1]]$Information)[4] #[4] is the name which record the name of covariate covariate.seq=c(covariate.seq,covariate.name) #combine new selected covariate left_subtree = tree[[2]] #[[2]] call out the left child right_subtree = tree[[3]] #[[3]] call out the right child return(c(split.covariate(left_subtree,covariate.seq=covariate.seq),split.covariate(right_subtree,covariate.seq=covariate.seq))) } } return(unique(split.covariate(tree,covariate.seq=NULL))) }
library(tidyverse) # version 1.2.1 library(readxl) # version 1.0.0 library(DT) # version 0.4 library(highcharter) # version 0.5.0.9999 library(treemap) # version 2.4-2 source("admitidos-pregrado.R", encoding = 'UTF-8') source("funciones.R", encoding = 'UTF-8') col <- c( "#8cc63f", # verde "#f15a24", # naranja "#0071bc", # azul vivo "#6d6666", # gris "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ano <- 2018 semestre <- 1 # 1 o 2 según corresponda periodo_actual_titulo <- " 2018-I" # Desagregaciones temáticas: ############### Edad: ############### col <- c( "#8cc63f", # verde, 17 o menos "#f15a24", # naranja, 18 a 20 "#0071bc", # azul vivo, 21 a 25 "#6d6666", # gris, 26 o más "#fbb03b", # amarillo, sin información "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla CAT_EDAD_TABLA <- tabla( datos = Consolidado, categoria = "CAT_EDAD", variable = 'Rango de edad - en años - del admitido', mensaje = "Número de admitidos por grupos de edad", titulo = "Admitidos por grupos de edad" );CAT_EDAD_TABLA # saveWidget(CAT_EDAD_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "CAT_EDAD_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie CAT_EDAD_SERIE <- series( datos = Consolidado, categoria = "CAT_EDAD", colores = col, titulo = "Número de admitidos por grupos de edad (en años)", eje = "Número de admitidos (k: miles)" );CAT_EDAD_SERIE # saveWidget(CAT_EDAD_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "CAT_EDAD_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual CAT_EDAD_BARRA <- barra_vertical( datos = Consolidado, categoria = "CAT_EDAD", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por grupos de edad", eje = "Número de admitidos (k: miles)" ); CAT_EDAD_BARRA # saveWidget(CAT_EDAD_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "CAT_EDAD_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Sexo: ############### col <- c( "#8cc63f", # verde, hombres "#f15a24", # naranja, mujeres "#0071bc", # azul vivo "#6d6666", # gris "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla SEXO_TABLA <- tabla( datos = Consolidado, categoria = "SEXO", variable = 'Sexo del admitido', mensaje = "Número de admitidos por sexo", titulo = "Admitidos por sexo" );SEXO_TABLA # saveWidget(SEXO_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "SEXO_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie SEXO_SERIE <- series( datos = Consolidado, categoria = "SEXO", colores = col, titulo = "Número de admitidos por sexo", eje = "Número de admitidos (k: miles)" );SEXO_SERIE # saveWidget(SEXO_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "SEXO_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual SEXO_TORTA <- torta( datos = Consolidado, variable = "SEXO", colores = col, titulo = "Admitidos por sexo", etiqueta = "Número de admitidos", ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo );SEXO_TORTA # saveWidget(SEXO_TORTA, # file = file.path(getwd(), "Resultados/Admitidos", # "SEXO_TORTA.html"), # selfcontained = F, libdir = "libraryjs") ############### Estrato socioeconómico: ############### col <- c( "#8cc63f", # verde, estrato 2 o menos "#f15a24", # naranja, estrato 3 "#0071bc", # azul vivo, estrato 4 o más "#6d6666", # gris, ND/NE "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla ESTRATO_TABLA <- tabla( datos = Consolidado, categoria = "ESTRATO", variable = 'Estrato socioeconómico del admitido', mensaje = "Número de admitidos según el estrato socioeconómico", titulo = "Admitidos según el estrato socioeconómico" );ESTRATO_TABLA # saveWidget(ESTRATO_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie ESTRATO_SERIE <- series( datos = Consolidado, categoria = "ESTRATO", colores = col, titulo = "Número de admitidos por estrato socioeconómico", eje = "Número de admitidos (k: miles)" );ESTRATO_SERIE # saveWidget(ESTRATO_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual ESTRATO_TORTA <- torta( datos = Consolidado, variable = "ESTRATO", colores = col, titulo = "Admitidos por estrato socioeconómico", etiqueta = "Número de admitidos", ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo );ESTRATO_TORTA # saveWidget(ESTRATO_TORTA, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_TORTA.html"), # selfcontained = F, libdir = "libraryjs") ESTRATO_BARRA <- barra_vertical( datos = Consolidado, categoria = "ESTRATO", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por estrato socioeconómico", eje = "Número de admitidos (k: miles)" ); ESTRATO_BARRA # saveWidget(CAT_EDAD_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Área de conocimiento SNIES: ############### col <- c( "#93278f", # morado, agronomia.. "#29abe2", # azul claro, bellas artes "#fbb03b", # amarillo, ciencias de... "#f15a24", # naranja, ciencias sociales... "#0071bc", # azul vivo, economia... "#8cc63f", # verde, ingenieria... "#6d6666", # gris, matemáticas... "#c1272d", # rojo, sin informacion "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla AREAC_SNIES_TABLA <- tabla( datos = Consolidado, categoria = "AREAC_SNIES", variable = 'Modalidades de los admitidos por área de conocimiento (SNIES)', mensaje = "Número de admitidos por área de conocimiento (SNIES)", titulo = "Admitidos por área de conocimiento (SNIES)" );AREAC_SNIES_TABLA # saveWidget(AREAC_SNIES_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREAC_SNIES_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie AREAC_SNIES_SERIE <- series( datos = Consolidado, categoria = "AREAC_SNIES", colores = col, titulo = "Número de admitidos por área de conocimiento (SNIES)", eje = "Número de admitidos" );AREAC_SNIES_SERIE # # saveWidget(AREAC_SNIES_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "AREAC_SNIES_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual AREAC_SNIES_BARRA <- barra_horizontal( datos = Consolidado, categoria = "AREAC_SNIES", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por área de conocimiento (SNIES)", eje = "Número de admitidos" ); AREAC_SNIES_BARRA # saveWidget(AREAC_SNIES_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREAC_SNIES_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Área de conocimiento CINE: ############### col <- c( "#29abe2", # azul claro, Administración... "#f15a24", # naranja, Agricultura... "#fbb03b", # amarillo, Artes y humanidades "#0071bc", # azul vivo, Ciencias naturales... "#93278f", # morado, Ciencias sociales... "#8cc63f", # verde, ingenieria... "#6d6666", # gris, Salud y ... "#8b7355", # cafe, sin información "#c1272d", # rojo, TIC "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla AREA_CINE_TABLA <- tabla( datos = Consolidado %>% filter(is.na(Clase)==FALSE), categoria = "AREA_CINE", variable = 'Modalidades de los admitidos por área de conocimiento (CINE)', mensaje = "Número de admitidos por área de conocimiento (CINE)", titulo = "Admitidos por área de conocimiento (CINE)" );AREA_CINE_TABLA # saveWidget(AREA_CINE_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREA_CINE_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie AREA_CINE_SERIE <- series( datos = Consolidado %>% filter(is.na(Clase)==FALSE), categoria = "AREA_CINE", colores = col, titulo = "Número de admitidos por área de conocimiento (CINE)", eje = "Número de admitidos" );AREA_CINE_SERIE # saveWidget(AREA_CINE_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "AREA_CINE_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual AREA_CINE_BARRA <- barra_horizontal( datos = Consolidado %>% filter(is.na(Clase)==FALSE), categoria = "AREA_CINE", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por área de conocimiento (CINE)", eje = "Número de admitidos" ); AREA_CINE_BARRA # saveWidget(AREA_CINE_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREA_CINE_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Serie de: Evolución Histórica Total de Admitidos ############### col <- c( "#8cc63f", # verde, Total "#f15a24", # naranja "#0071bc", # azul vivo "#6d6666", # gris "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado EVOLUCION_TABLA <- tablaall( datos = Consolidado, categoria = "TOTAL", mensaje = "Número de admitidos", titulo = "Admitidos" );EVOLUCION_TABLA # saveWidget(ADMITIDO_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "EVOLUCION_TABLA.html"), # selfcontained = F, libdir = "libraryjs") EVOLUCION_SERIE <- series( datos = Consolidado, categoria = "TOTAL", colores = col, titulo = "Evolución histórica del número total de admitidos a pregrado", eje = "Número de admitidos (k: miles)" );EVOLUCION_SERIE # saveWidget(EVOLUCION_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "EVOLUCION_SERIE.html"), # selfcontained = F, libdir = "libraryjs")
/public/slides/admitidos-graphs.R
no_license
mamaciasq/martin
R
false
false
12,238
r
library(tidyverse) # version 1.2.1 library(readxl) # version 1.0.0 library(DT) # version 0.4 library(highcharter) # version 0.5.0.9999 library(treemap) # version 2.4-2 source("admitidos-pregrado.R", encoding = 'UTF-8') source("funciones.R", encoding = 'UTF-8') col <- c( "#8cc63f", # verde "#f15a24", # naranja "#0071bc", # azul vivo "#6d6666", # gris "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ano <- 2018 semestre <- 1 # 1 o 2 según corresponda periodo_actual_titulo <- " 2018-I" # Desagregaciones temáticas: ############### Edad: ############### col <- c( "#8cc63f", # verde, 17 o menos "#f15a24", # naranja, 18 a 20 "#0071bc", # azul vivo, 21 a 25 "#6d6666", # gris, 26 o más "#fbb03b", # amarillo, sin información "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla CAT_EDAD_TABLA <- tabla( datos = Consolidado, categoria = "CAT_EDAD", variable = 'Rango de edad - en años - del admitido', mensaje = "Número de admitidos por grupos de edad", titulo = "Admitidos por grupos de edad" );CAT_EDAD_TABLA # saveWidget(CAT_EDAD_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "CAT_EDAD_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie CAT_EDAD_SERIE <- series( datos = Consolidado, categoria = "CAT_EDAD", colores = col, titulo = "Número de admitidos por grupos de edad (en años)", eje = "Número de admitidos (k: miles)" );CAT_EDAD_SERIE # saveWidget(CAT_EDAD_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "CAT_EDAD_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual CAT_EDAD_BARRA <- barra_vertical( datos = Consolidado, categoria = "CAT_EDAD", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por grupos de edad", eje = "Número de admitidos (k: miles)" ); CAT_EDAD_BARRA # saveWidget(CAT_EDAD_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "CAT_EDAD_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Sexo: ############### col <- c( "#8cc63f", # verde, hombres "#f15a24", # naranja, mujeres "#0071bc", # azul vivo "#6d6666", # gris "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla SEXO_TABLA <- tabla( datos = Consolidado, categoria = "SEXO", variable = 'Sexo del admitido', mensaje = "Número de admitidos por sexo", titulo = "Admitidos por sexo" );SEXO_TABLA # saveWidget(SEXO_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "SEXO_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie SEXO_SERIE <- series( datos = Consolidado, categoria = "SEXO", colores = col, titulo = "Número de admitidos por sexo", eje = "Número de admitidos (k: miles)" );SEXO_SERIE # saveWidget(SEXO_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "SEXO_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual SEXO_TORTA <- torta( datos = Consolidado, variable = "SEXO", colores = col, titulo = "Admitidos por sexo", etiqueta = "Número de admitidos", ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo );SEXO_TORTA # saveWidget(SEXO_TORTA, # file = file.path(getwd(), "Resultados/Admitidos", # "SEXO_TORTA.html"), # selfcontained = F, libdir = "libraryjs") ############### Estrato socioeconómico: ############### col <- c( "#8cc63f", # verde, estrato 2 o menos "#f15a24", # naranja, estrato 3 "#0071bc", # azul vivo, estrato 4 o más "#6d6666", # gris, ND/NE "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla ESTRATO_TABLA <- tabla( datos = Consolidado, categoria = "ESTRATO", variable = 'Estrato socioeconómico del admitido', mensaje = "Número de admitidos según el estrato socioeconómico", titulo = "Admitidos según el estrato socioeconómico" );ESTRATO_TABLA # saveWidget(ESTRATO_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie ESTRATO_SERIE <- series( datos = Consolidado, categoria = "ESTRATO", colores = col, titulo = "Número de admitidos por estrato socioeconómico", eje = "Número de admitidos (k: miles)" );ESTRATO_SERIE # saveWidget(ESTRATO_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual ESTRATO_TORTA <- torta( datos = Consolidado, variable = "ESTRATO", colores = col, titulo = "Admitidos por estrato socioeconómico", etiqueta = "Número de admitidos", ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo );ESTRATO_TORTA # saveWidget(ESTRATO_TORTA, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_TORTA.html"), # selfcontained = F, libdir = "libraryjs") ESTRATO_BARRA <- barra_vertical( datos = Consolidado, categoria = "ESTRATO", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por estrato socioeconómico", eje = "Número de admitidos (k: miles)" ); ESTRATO_BARRA # saveWidget(CAT_EDAD_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "ESTRATO_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Área de conocimiento SNIES: ############### col <- c( "#93278f", # morado, agronomia.. "#29abe2", # azul claro, bellas artes "#fbb03b", # amarillo, ciencias de... "#f15a24", # naranja, ciencias sociales... "#0071bc", # azul vivo, economia... "#8cc63f", # verde, ingenieria... "#6d6666", # gris, matemáticas... "#c1272d", # rojo, sin informacion "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla AREAC_SNIES_TABLA <- tabla( datos = Consolidado, categoria = "AREAC_SNIES", variable = 'Modalidades de los admitidos por área de conocimiento (SNIES)', mensaje = "Número de admitidos por área de conocimiento (SNIES)", titulo = "Admitidos por área de conocimiento (SNIES)" );AREAC_SNIES_TABLA # saveWidget(AREAC_SNIES_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREAC_SNIES_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie AREAC_SNIES_SERIE <- series( datos = Consolidado, categoria = "AREAC_SNIES", colores = col, titulo = "Número de admitidos por área de conocimiento (SNIES)", eje = "Número de admitidos" );AREAC_SNIES_SERIE # # saveWidget(AREAC_SNIES_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "AREAC_SNIES_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual AREAC_SNIES_BARRA <- barra_horizontal( datos = Consolidado, categoria = "AREAC_SNIES", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por área de conocimiento (SNIES)", eje = "Número de admitidos" ); AREAC_SNIES_BARRA # saveWidget(AREAC_SNIES_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREAC_SNIES_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Área de conocimiento CINE: ############### col <- c( "#29abe2", # azul claro, Administración... "#f15a24", # naranja, Agricultura... "#fbb03b", # amarillo, Artes y humanidades "#0071bc", # azul vivo, Ciencias naturales... "#93278f", # morado, Ciencias sociales... "#8cc63f", # verde, ingenieria... "#6d6666", # gris, Salud y ... "#8b7355", # cafe, sin información "#c1272d", # rojo, TIC "#855b5b", # vinotinto "#ed1e79") # rosado ################ 1. Tabla AREA_CINE_TABLA <- tabla( datos = Consolidado %>% filter(is.na(Clase)==FALSE), categoria = "AREA_CINE", variable = 'Modalidades de los admitidos por área de conocimiento (CINE)', mensaje = "Número de admitidos por área de conocimiento (CINE)", titulo = "Admitidos por área de conocimiento (CINE)" );AREA_CINE_TABLA # saveWidget(AREA_CINE_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREA_CINE_TABLA.html"), # selfcontained = F, libdir = "libraryjs") ################ 2. Serie AREA_CINE_SERIE <- series( datos = Consolidado %>% filter(is.na(Clase)==FALSE), categoria = "AREA_CINE", colores = col, titulo = "Número de admitidos por área de conocimiento (CINE)", eje = "Número de admitidos" );AREA_CINE_SERIE # saveWidget(AREA_CINE_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "AREA_CINE_SERIE.html"), # selfcontained = F, libdir = "libraryjs") ################ 3. Actual AREA_CINE_BARRA <- barra_horizontal( datos = Consolidado %>% filter(is.na(Clase)==FALSE), categoria = "AREA_CINE", colores = col, ano = ano, periodo = semestre, periodo_titulo = periodo_actual_titulo, titulo = "Admitidos por área de conocimiento (CINE)", eje = "Número de admitidos" ); AREA_CINE_BARRA # saveWidget(AREA_CINE_BARRA, # file = file.path(getwd(), "Resultados/Admitidos", # "AREA_CINE_BARRA.html"), # selfcontained = F, libdir = "libraryjs") ############### Serie de: Evolución Histórica Total de Admitidos ############### col <- c( "#8cc63f", # verde, Total "#f15a24", # naranja "#0071bc", # azul vivo "#6d6666", # gris "#fbb03b", # amarillo "#93278f", # morado "#29abe2", # azul claro "#c1272d", # rojo "#8b7355", # cafe "#855b5b", # vinotinto "#ed1e79") # rosado EVOLUCION_TABLA <- tablaall( datos = Consolidado, categoria = "TOTAL", mensaje = "Número de admitidos", titulo = "Admitidos" );EVOLUCION_TABLA # saveWidget(ADMITIDO_TABLA, # file = file.path(getwd(), "Resultados/Admitidos", # "EVOLUCION_TABLA.html"), # selfcontained = F, libdir = "libraryjs") EVOLUCION_SERIE <- series( datos = Consolidado, categoria = "TOTAL", colores = col, titulo = "Evolución histórica del número total de admitidos a pregrado", eje = "Número de admitidos (k: miles)" );EVOLUCION_SERIE # saveWidget(EVOLUCION_SERIE, # file = file.path(getwd(), "Resultados/Admitidos", # "EVOLUCION_SERIE.html"), # selfcontained = F, libdir = "libraryjs")
library(stringr) setwd("D:/code/Parser/Parser_for_Biodiversity_Checklists_Formal") getSciname <- function(x){ sciName_index <- grep("^[0-9]", x[, 1]) sciName <- x[sciName_index,] sciName <- gsub("^[0-9]+. ", "", sciName) return(sciName) } getDis <- function(x,length){ Distribution = c() n = 0 cur_dis = "" for (i in 1:length) { if (str_detect(x[i, 1], "Distribution:") && (str_detect(x[i, 1], "[*. ]$"))) { n = n + 1 cur_dis = str_split(x[i, 1], "Distribution: ")[[1]][2] } Distribution[n] = cur_dis if ((str_detect(x[i, 1], "Distribution:")) && (str_detect(x[i, 1], "[*. ]$") == FALSE)) { n = n + 1 dstr_line = "" dstr_line = x[i, 1] if (i < length - 1) { j = i + 1 } while (j < length - 1) { if (grepl("^[0-9]", x[j + 1, 1]) | grepl("^[[:alpha:]]*$", x[j + 1, 1])) { dstr_line <- paste(dstr_line, x[j, 1]) break } else{ dstr_line <- paste(dstr_line, x[j, 1]) j = j + 1 } } cur_dis <- str_split(dstr_line, "Distribution: ")[[1]][2] dstr_line <- "" } Distribution[n] <- cur_dis } return(Distribution) } getFamily <- function(x,sciName){ family_index = grep("^[[:alpha:]]*$", x[, 1]) family_name = x[family_index,] total_num = length(sciName) cur_index = c() cur_num = c() for (i in 1:length(family_index)) { cur_index[i] <- (str_match(as.character(x[family_index[i] + 1, 1]), "^[0-9]+. ")) cur_index[i] <- str_split(cur_index[i], "[\\.]")[[1]][1] cur_num[i] <- as.integer(cur_index[i]) } cur_num <- c(cur_num, total_num + 1) Family <- rep(family_name, diff(cur_num)) return(Family) } parse_taxolist <- function(filepath,filename,type,sep,output){ # read input file file <- read_file(filepath, filename,type,sep) length = length(file[, 1]) sciName = getSciname(file) Distribution = getDis(file,length) Family = getFamily(file,sciName) full_scientific_name = sciName genus = str_extract_all(full_scientific_name,"^[[:blank:]]?[A-z]+") rest1 = gsub("^[[:blank:]]?[A-z]+","",full_scientific_name) author = str_extract_all(rest1,"[A-Z]{1}.?\\s?&?\\s?[A-z]+.?(.*?)?,") year = str_extract_all(full_scientific_name,"\\s?[0-9]+") table = cbind(as.data.frame(Family),as.data.frame(Distribution)) table$genus = 1 table$author = 1 table$year = 1 for (i in 1:nrow(table)){ table[i,3] = genus[i] table[i,4] = author[i] table[i,5] = year[i] } write.csv(table, file = output, row.names = F) return(table) } read_file < -function(filepath, filename,type,sep){ if (tolower(type) == "txt") { file_path_name<-paste(filepath,"/",filename,sep="") file <- read.table(file_path_name, sep = sep, header = FALSE) } if (tolower(type) == "csv") { file_path_name<-paste(filepath,"/",filename,sep="") file <- read.csv(file_path_name, sep = sep, header = FALSE) } if (tolower(type) == 'pdf') { pdf_file <- file.path(filepath,filename) context <- pdf_text(pdf_file) file <- cat(context) } } # a = parse_taxolist("taxo01.txt","txt","\t","taxo_out01.csv") # b = parse_taxolist("testest.csv","csv","\t","taxo_out02.csv")
/parse_taxolist.R
no_license
XingXiong/Parser_for_Biodiversity_Checklists_2017
R
false
false
3,239
r
library(stringr) setwd("D:/code/Parser/Parser_for_Biodiversity_Checklists_Formal") getSciname <- function(x){ sciName_index <- grep("^[0-9]", x[, 1]) sciName <- x[sciName_index,] sciName <- gsub("^[0-9]+. ", "", sciName) return(sciName) } getDis <- function(x,length){ Distribution = c() n = 0 cur_dis = "" for (i in 1:length) { if (str_detect(x[i, 1], "Distribution:") && (str_detect(x[i, 1], "[*. ]$"))) { n = n + 1 cur_dis = str_split(x[i, 1], "Distribution: ")[[1]][2] } Distribution[n] = cur_dis if ((str_detect(x[i, 1], "Distribution:")) && (str_detect(x[i, 1], "[*. ]$") == FALSE)) { n = n + 1 dstr_line = "" dstr_line = x[i, 1] if (i < length - 1) { j = i + 1 } while (j < length - 1) { if (grepl("^[0-9]", x[j + 1, 1]) | grepl("^[[:alpha:]]*$", x[j + 1, 1])) { dstr_line <- paste(dstr_line, x[j, 1]) break } else{ dstr_line <- paste(dstr_line, x[j, 1]) j = j + 1 } } cur_dis <- str_split(dstr_line, "Distribution: ")[[1]][2] dstr_line <- "" } Distribution[n] <- cur_dis } return(Distribution) } getFamily <- function(x,sciName){ family_index = grep("^[[:alpha:]]*$", x[, 1]) family_name = x[family_index,] total_num = length(sciName) cur_index = c() cur_num = c() for (i in 1:length(family_index)) { cur_index[i] <- (str_match(as.character(x[family_index[i] + 1, 1]), "^[0-9]+. ")) cur_index[i] <- str_split(cur_index[i], "[\\.]")[[1]][1] cur_num[i] <- as.integer(cur_index[i]) } cur_num <- c(cur_num, total_num + 1) Family <- rep(family_name, diff(cur_num)) return(Family) } parse_taxolist <- function(filepath,filename,type,sep,output){ # read input file file <- read_file(filepath, filename,type,sep) length = length(file[, 1]) sciName = getSciname(file) Distribution = getDis(file,length) Family = getFamily(file,sciName) full_scientific_name = sciName genus = str_extract_all(full_scientific_name,"^[[:blank:]]?[A-z]+") rest1 = gsub("^[[:blank:]]?[A-z]+","",full_scientific_name) author = str_extract_all(rest1,"[A-Z]{1}.?\\s?&?\\s?[A-z]+.?(.*?)?,") year = str_extract_all(full_scientific_name,"\\s?[0-9]+") table = cbind(as.data.frame(Family),as.data.frame(Distribution)) table$genus = 1 table$author = 1 table$year = 1 for (i in 1:nrow(table)){ table[i,3] = genus[i] table[i,4] = author[i] table[i,5] = year[i] } write.csv(table, file = output, row.names = F) return(table) } read_file < -function(filepath, filename,type,sep){ if (tolower(type) == "txt") { file_path_name<-paste(filepath,"/",filename,sep="") file <- read.table(file_path_name, sep = sep, header = FALSE) } if (tolower(type) == "csv") { file_path_name<-paste(filepath,"/",filename,sep="") file <- read.csv(file_path_name, sep = sep, header = FALSE) } if (tolower(type) == 'pdf') { pdf_file <- file.path(filepath,filename) context <- pdf_text(pdf_file) file <- cat(context) } } # a = parse_taxolist("taxo01.txt","txt","\t","taxo_out01.csv") # b = parse_taxolist("testest.csv","csv","\t","taxo_out02.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/noTouch.R \name{noTouch} \alias{noTouch} \title{Extract and parse YAML metadata} \usage{ noTouch(file = NULL) } \arguments{ \item{file}{Path to the metadata.yaml file} } \description{ Process metadata.yml content for use in a template } \details{ This function is only intended to be called from within the YAML header of a Rmarkdown template provided by the DODschools package. When this function in invoked, the YAML metadata is read from \code{file}, processed and then inserted in the appropriate field as needed by the template. Calls to this function made within the YAML header of an Rmarkdown document should not be touched (hence the name). Changes to a document's metadata should be made in the metadata.yml file. }
/man/noTouch.Rd
no_license
Auburngrads/DODschools
R
false
true
871
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/noTouch.R \name{noTouch} \alias{noTouch} \title{Extract and parse YAML metadata} \usage{ noTouch(file = NULL) } \arguments{ \item{file}{Path to the metadata.yaml file} } \description{ Process metadata.yml content for use in a template } \details{ This function is only intended to be called from within the YAML header of a Rmarkdown template provided by the DODschools package. When this function in invoked, the YAML metadata is read from \code{file}, processed and then inserted in the appropriate field as needed by the template. Calls to this function made within the YAML header of an Rmarkdown document should not be touched (hence the name). Changes to a document's metadata should be made in the metadata.yml file. }
sci_figure <- function (experiments, hide_stages = NULL, names_of_stages = TRUE) { if (!all(unlist(lapply(experiments, function(x) { x %in% c("observed", "different", "unobserved", "incorrect") })))) { stop("Invalid cell value in experiments data frame.") } if (ncol(experiments) > 20) { experiments <- experiments[, 1:20] warning("Only showing the first 20 experiments for ease of plotting.") } idx <- !(rownames(experiments) %in% hide_stages) stage_names <- c("Población", "Pregunta", "Hipótesis", "Diseño Exp.", "Experimentador", "Datos", "Plan de Análisis", "Analista", "Código", "Estimador", "Resultados") stage_names <- stage_names[idx] experiments <- experiments[idx, , drop = FALSE] grid::grid.newpage() gptext <- grid::gpar(fontsize = 16 - min(nrow(experiments), 7)) yht <- seq(0.95, 0.05, length = nrow(experiments)) if (names_of_stages) { vp1 <- grid::viewport(x = 0.1, y = 0.5, width = 0.2, height = 0.9) grid::pushViewport(vp1) grid::grid.text(stage_names, x = 0.9, y = yht, gp = gptext) grid::upViewport() } icons <- scifigure::icons vp2 <- grid::viewport(x = 0.5, y = 0.5, width = 0.6, height = 0.9) grid::pushViewport(vp2) for (j in 1:ncol(experiments)) { for (i in 1:nrow(experiments)) { grid::grid.raster(icons[[paste(rownames(experiments)[i], experiments[i, j], sep = "_")]], x = j/(ncol(experiments) + 1), y = yht[i], height = 0.08 - 0.03 * (ncol(experiments) > 4), width = grid::unit(max(0.05, min(0.1, 1/((ncol(experiments) * 3)))), "snpc")) } } grid::upViewport() vp3 <- grid::viewport(x = 0.5, y = 0.95, width = 0.6, height = 0.1) grid::pushViewport(vp3) grid::grid.text(colnames(experiments), x = (1:ncol(experiments))/(ncol(experiments) + 1), y = 0.7, gp = gptext, rot = ifelse(ncol(experiments) > 12, 90, 0)) grid::upViewport() vp4 <- grid::viewport(x = 0.9, y = 0.5, width = 0.2, height = 0.6) grid::pushViewport(vp4) cols <- c("#D20000", "#007888", "#CDCDCD", "black") grid::grid.rect(width = 0.25, height = 0.1, x = 0.3, y = c(0.2, 0.4, 0.6, 0.8), gp = grid::gpar(fill = cols)) grid::grid.text(c("Incorrecto", "Diferente", "No Observado", "Original"), x = 0.3, y = c(0.1, 0.3, 0.5, 0.7), gp = grid::gpar(fontsize = 14)) }
/src/sci_figure_ES.R
no_license
EstadisticaUNTDF/EACN-2018
R
false
false
3,048
r
sci_figure <- function (experiments, hide_stages = NULL, names_of_stages = TRUE) { if (!all(unlist(lapply(experiments, function(x) { x %in% c("observed", "different", "unobserved", "incorrect") })))) { stop("Invalid cell value in experiments data frame.") } if (ncol(experiments) > 20) { experiments <- experiments[, 1:20] warning("Only showing the first 20 experiments for ease of plotting.") } idx <- !(rownames(experiments) %in% hide_stages) stage_names <- c("Población", "Pregunta", "Hipótesis", "Diseño Exp.", "Experimentador", "Datos", "Plan de Análisis", "Analista", "Código", "Estimador", "Resultados") stage_names <- stage_names[idx] experiments <- experiments[idx, , drop = FALSE] grid::grid.newpage() gptext <- grid::gpar(fontsize = 16 - min(nrow(experiments), 7)) yht <- seq(0.95, 0.05, length = nrow(experiments)) if (names_of_stages) { vp1 <- grid::viewport(x = 0.1, y = 0.5, width = 0.2, height = 0.9) grid::pushViewport(vp1) grid::grid.text(stage_names, x = 0.9, y = yht, gp = gptext) grid::upViewport() } icons <- scifigure::icons vp2 <- grid::viewport(x = 0.5, y = 0.5, width = 0.6, height = 0.9) grid::pushViewport(vp2) for (j in 1:ncol(experiments)) { for (i in 1:nrow(experiments)) { grid::grid.raster(icons[[paste(rownames(experiments)[i], experiments[i, j], sep = "_")]], x = j/(ncol(experiments) + 1), y = yht[i], height = 0.08 - 0.03 * (ncol(experiments) > 4), width = grid::unit(max(0.05, min(0.1, 1/((ncol(experiments) * 3)))), "snpc")) } } grid::upViewport() vp3 <- grid::viewport(x = 0.5, y = 0.95, width = 0.6, height = 0.1) grid::pushViewport(vp3) grid::grid.text(colnames(experiments), x = (1:ncol(experiments))/(ncol(experiments) + 1), y = 0.7, gp = gptext, rot = ifelse(ncol(experiments) > 12, 90, 0)) grid::upViewport() vp4 <- grid::viewport(x = 0.9, y = 0.5, width = 0.2, height = 0.6) grid::pushViewport(vp4) cols <- c("#D20000", "#007888", "#CDCDCD", "black") grid::grid.rect(width = 0.25, height = 0.1, x = 0.3, y = c(0.2, 0.4, 0.6, 0.8), gp = grid::gpar(fill = cols)) grid::grid.text(c("Incorrecto", "Diferente", "No Observado", "Original"), x = 0.3, y = c(0.1, 0.3, 0.5, 0.7), gp = grid::gpar(fontsize = 14)) }
i_row <- function (x, i) x[i, ] splt_REs <- function (b_mat, ind_RE) { n <- length(ind_RE) out <- vector("list", n) for (i in seq_len(n)) out[[i]] <- b_mat[, ind_RE[[i]], drop = FALSE] out } linpred_long <- function (X, betas, Z, b, id, type) { out <- vector("list", length(X)) for (i in seq_along(X)) { out[[i]] <- c(X[[i]] %*% betas[[i]]) if (type == "subject_specific") { out[[i]] <- out[[i]] + as.vector(rowSums(Z[[i]] * b[[i]][id[[i]], , drop = FALSE])) } } out } mu_fun <- function (eta, link) { switch (link, "identity" = eta, "inverse" = 1 / eta, "logit" = plogis(eta), "probit" = pnorm(eta), "cloglog" = - exp(- exp(eta)) + 1.0, "log" = exp(eta)) } fix_NAs_preds <- function (preds, NAs, n) { if (is.null(NAs)) preds else { r <- rep(as.numeric(NA), n) r[-NAs] <- preds r } } get_components_newdata <- function (object, newdata, n_samples, n_mcmc, cores, seed) { if (!exists(".Random.seed", envir = .GlobalEnv)) { runif(1L) } RNGstate <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", RNGstate, envir = .GlobalEnv)) # control control <- object$control # check for tibbles if (inherits(newdata, "tbl_df") || inherits(newdata, "tbl")) { newdata <- as.data.frame(newdata) } # extract idVar and time_var idVar <- object$model_info$var_names$idVar time_var <- object$model_info$var_names$time_var # set dataL as newdata; almost the same code as in jm() dataL <- if (!is.data.frame(newdata)) newdata[["newdataL"]] else newdata idL <- dataL[[idVar]] nY <- length(unique(idL)) # order data by idL and time_var if (is.null(dataL[[time_var]])) { stop("the variable specified in agument 'time_var' cannot be found ", "in the database of the longitudinal models.") } dataL <- dataL[order(idL, dataL[[time_var]]), ] # extract terms respVars <- object$model_info$var_names$respVars terms_FE <- object$model_info$terms$terms_FE terms_FE_noResp <- object$model_info$terms$terms_FE_noResp terms_RE <- object$model_info$terms$terms_RE terms_Surv <- object$model_info$terms$terms_Surv_noResp Xbar <- object$model_data$Xbar # create model frames mf_FE_dataL <- lapply(terms_FE, model.frame.default, data = dataL) mf_RE_dataL <- lapply(terms_RE, model.frame.default, data = dataL) # we need to account for missing data in the fixed and random effects model frames, # in parallel across outcomes (i.e., we will allow that some subjects may have no data # for some outcomes) NAs_FE_dataL <- lapply(mf_FE_dataL, attr, "na.action") NAs_RE_dataL <- lapply(mf_RE_dataL, attr, "na.action") mf_FE_dataL <- mapply2(fix_NAs_fixed, mf_FE_dataL, NAs_FE_dataL, NAs_RE_dataL) mf_RE_dataL <- mapply2(fix_NAs_random, mf_RE_dataL, NAs_RE_dataL, NAs_FE_dataL) # create response vectors y <- lapply(mf_FE_dataL, model.response) y <- lapply(y, function (yy) { if (is.factor(yy)) as.numeric(yy != levels(yy)[1L]) else yy }) y[] <- lapply(y, as.matrix) NAs <- mapply2(c, NAs_FE_dataL, NAs_RE_dataL) times_y <- lapply(NAs, function (ind) if (!is.null(ind)) dataL[[time_var]][-ind] else dataL[[time_var]]) families <- object$model_info$families family_names <- sapply(families, "[[", "family") links <- sapply(families, "[[", "link") # for family = binomial and when y has two columns, set the second column # to the number of trials instead the number of failures binomial_data <- family_names %in% c("binomial", "beta binomial") trials_fun <- function (y) { if (NCOL(y) == 2L) y[, 2L] <- y[, 1L] + y[, 2L] y } y[binomial_data] <- lapply(y[binomial_data], trials_fun) unq_id <- unique(idL) idL <- mapply2(exclude_NAs, NAs_FE_dataL, NAs_RE_dataL, MoreArgs = list(id = idL)) idL <- lapply(idL, match, table = unq_id) idL_lp <- lapply(idL, function (x) match(x, unique(x))) unq_idL <- lapply(idL, unique) X <- mapply2(model.matrix.default, terms_FE, mf_FE_dataL) Z <- mapply2(model.matrix.default, terms_RE, mf_RE_dataL) ################################ # extract terms terms_Surv <- object$model_info$terms$terms_Surv terms_Surv_noResp <- object$model_info$terms$terms_Surv_noResp type_censoring <- object$model_info$type_censoring dataS <- if (!is.data.frame(newdata)) newdata[["newdataE"]] else newdata CR_MS <- object$model_info$CR_MS if (!CR_MS) { idT <- dataS[[idVar]] dataS <- dataS[tapply(row.names(dataS), factor(idT, unique(idT)), tail, 1L), ] } idT <- dataS[[idVar]] mf_surv_dataS <- model.frame.default(terms_Surv, data = dataS) if (!is.null(NAs_surv <- attr(mf_surv_dataS, "na.action"))) { idT <- idT[-NAs_surv] dataS <- dataS[-NAs_surv, ] } idT <- factor(idT, levels = unique(idT)) nT <- length(unique(idT)) if (nY != nT) { stop("the number of groups/subjects in the longitudinal and survival datasets ", "do not seem to match. A potential reason why this may be happening is ", "missing data in some covariates used in the individual models.") } Surv_Response <- model.response(mf_surv_dataS) if (type_censoring == "right") { Time_right <- unname(Surv_Response[, "time"]) Time_left <- Time_start <- trunc_Time <- rep(0.0, nrow(dataS)) delta <- unname(Surv_Response[, "status"]) } else if (type_censoring == "counting") { Time_start <- unname(Surv_Response[, "start"]) Time_stop <- unname(Surv_Response[, "stop"]) delta <- unname(Surv_Response[, "status"]) Time_right <- Time_stop trunc_Time <- Time_start # possible left truncation time Time_left <- rep(0.0, nrow(dataS)) } else if (type_censoring == "interval") { Time1 <- unname(Surv_Response[, "time1"]) Time2 <- unname(Surv_Response[, "time2"]) trunc_Time <- Time_start <- rep(0.0, nrow(dataS)) delta <- unname(Surv_Response[, "status"]) Time_right <- Time1 Time_right[delta == 3] <- Time2[delta == 3] Time_right[delta == 2] <- 0.0 Time_left <- Time1 Time_left[delta <= 1] <- 0.0 } if (type_censoring != "counting") { names(Time_right) <- names(Time_left) <- names(Time_start) <- idT } which_event <- which(delta == 1) which_right <- which(delta == 0) which_left <- which(delta == 2) which_interval <- which(delta == 3) # extract strata if present otherwise all subjects in one stratum ind_strata <- attr(terms_Surv, "specials")$strata strata <- if (is.null(ind_strata)) { rep(1, nrow(mf_surv_dataS)) } else { unclass(mf_surv_dataS[[ind_strata]]) } Time_integration <- Time_right Time_integration[which_left] <- Time_left[which_left] Time_integration[which_interval] <- Time_left[which_interval] Time_integration2 <- rep(0.0, length(Time_integration)) if (length(which_interval)) { Time_integration2[which_interval] <- Time_right[which_interval] } last_times <- switch(type_censoring, "right" = unname(Surv_Response[, "time"]), "counting" = unname(Surv_Response[, "stop"]), "interval" = unname(Surv_Response[, "time1"])) # create Gauss Kronrod points and weights GK <- gaussKronrod(control$GK_k) sk <- GK$sk P <- c(Time_integration - trunc_Time) / 2 st <- outer(P, sk) + (c(Time_integration + trunc_Time) / 2) log_Pwk <- unname(rep(log(P), each = length(sk)) + rep_len(log(GK$wk), length.out = length(st))) if (length(which_interval)) { # we take the absolute value because for the subjects for whom we do not have # interval censoring P2 will be negative and this will produce a NA when we take # the log in 'log_Pwk2' P2 <- abs(Time_integration2 - Time_integration) / 2 st2 <- outer(P2, sk) + (c(Time_integration2 + Time_integration) / 2) log_Pwk2 <- rep(log(P2), each = length(sk)) + rep_len(log(GK$wk), length.out = length(st2)) } else { P2 <- st2 <- log_Pwk2 <- rep(0.0, nT * control$GK_k) } # knots for the log baseline hazard function knots <- control$knots # indices ni_event <- tapply(idT, idT, length) ni_event <- cbind(c(0, head(cumsum(ni_event), -1)), cumsum(ni_event)) id_H <- rep(paste0(idT, "_", unlist(tapply(idT, idT, seq_along))), each = control$GK_k) id_H <- match(id_H, unique(id_H)) # id_H_ repeats each unique idT the number of quadrature points id_H_ <- rep(idT, each = control$GK_k) id_H_ <- match(id_H_, unique(id_H_)) id_h <- unclass(idT) # Functional forms functional_forms <- object$model_info$functional_forms FunForms_per_outcome <- object$model_info$FunForms_per_outcome collapsed_functional_forms <- object$model_info$collapsed_functional_forms FunForms_cpp <- object$model_info$FunForms_cpp FunForms_ind <- object$model_info$FunForms_ind Funs_FunForms <- object$model_info$Funs_FunForms eps <- object$model_info$eps direction <- object$model_info$direction # Design matrices strata_H <- rep(strata, each = control$GK_k) W0_H <- create_W0(c(t(st)), knots, control$Bsplines_degree + 1, strata_H) dataS_H <- SurvData_HazardModel(st, dataS, Time_start, paste0(idT, "_", strata), time_var) mf <- model.frame.default(terms_Surv_noResp, data = dataS_H) W_H <- construct_Wmat(terms_Surv_noResp, mf) any_gammas <- as.logical(ncol(W_H)) if (!any_gammas) { W_H <- matrix(0.0, nrow = nrow(W_H), ncol = 1L) } attr <- lapply(functional_forms, extract_attributes, data = dataS_H) eps <- lapply(attr, "[[", 1L) direction <- lapply(attr, "[[", 2L) X_H <- design_matrices_functional_forms(st, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_H <- design_matrices_functional_forms(st, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_H <- lapply(functional_forms, construct_Umat, dataS = dataS_H) if (length(which_event)) { W0_h <- create_W0(Time_right, knots, control$Bsplines_degree + 1, strata) dataS_h <- SurvData_HazardModel(Time_right, dataS, Time_start, paste0(idT, "_", strata), time_var) mf <- model.frame.default(terms_Surv_noResp, data = dataS_h) W_h <- construct_Wmat(terms_Surv_noResp, mf) if (!any_gammas) { W_h <- matrix(0.0, nrow = nrow(W_h), ncol = 1L) } X_h <- design_matrices_functional_forms(Time_right, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_h <- design_matrices_functional_forms(Time_right, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_h <- lapply(functional_forms, construct_Umat, dataS = dataS_h) } else { W0_h <- W_h <- matrix(0.0) X_h <- Z_h <- U_h <- rep(list(matrix(0.0)), length(respVars)) } if (length(which_interval)) { W0_H2 <- create_W0(c(t(st2)), knots, control$Bsplines_degree + 1, strata_H) dataS_H2 <- SurvData_HazardModel(st2, dataS, Time_start, paste0(idT, "_", strata), time_var) mf2 <- model.frame.default(terms_Surv_noResp, data = dataS_H2) W_h <- construct_Wmat(terms_Surv_noResp, mf2) if (!any_gammas) { W_H2 <- matrix(0.0, nrow = nrow(W_H2), ncol = 1L) } X_H2 <- design_matrices_functional_forms(st, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_H2 <- design_matrices_functional_forms(st, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_H2 <- lapply(functional_forms, construct_Umat, dataS = dataS_H2) } else { W0_H2 <- W_H2 <- matrix(0.0) X_H2 <- Z_H2 <- U_H2 <- rep(list(matrix(0.0)), length(respVars)) } X_H[] <- lapply(X_H, docall_cbind) X_h[] <- lapply(X_h, docall_cbind) X_H2[] <- lapply(X_H2, docall_cbind) Z_H[] <- lapply(Z_H, docall_cbind) Z_h[] <- lapply(Z_h, docall_cbind) Z_H2[] <- lapply(Z_H2, docall_cbind) W_bar <- object$W_bar W_sds <- object$W_sds W_H <- center_fun(W_H, W_bar, W_sds) W_h <- center_fun(W_h, W_bar, W_sds) W_H2 <- center_fun(W_H2, W_bar, W_sds) # MCMC sample b <- lapply(sapply(Z, ncol), function (nc) matrix(0.0, nY, nc)) M <- sum(sapply(object$mcmc$bs_gammas, nrow)) get_param <- function (nam) { tht <- object$mcmc[[nam]] if (!is.null(tht)) docall_rbind(tht) else matrix(0.0, M, 1) } ind_betas <- grep("^betas", names(object$mcmc)) mcmc <- list( b = b, bs_gammas = get_param("bs_gammas"), gammas = get_param("gammas"), alphas = get_param("alphas"), Wlong_std_alphas = get_param("Wlong_std_alphas"), W_std_gammas = get_param("W_std_gammas"), betas = lapply(object$mcmc[ind_betas], docall_rbind) ) has_sigmas <- object$model_data$has_sigmas mcmc$sigmas <- matrix(0.0, M, length(has_sigmas)) mcmc$sigmas[, has_sigmas > 0] <- get_param("sigmas") D <- get_param("D") mcmc$D <- array(0.0, c(dim(lowertri2mat(D[1L, ])), M)) for (i in seq_len(M)) { mcmc$D[, , i] <- lowertri2mat(D[i, ]) } Data <- list( ind_RE = object$model_data$ind_RE, W0_H = W0_H, W0_h = W0_h, W0_H2 = W0_H2, W_H = W_H, W_h = W_h, W_H2 = W_H2, X_H = X_H, X_h = X_h, X_H2 = X_H2, Z_H = Z_H, Z_h = Z_h, Z_H2 = Z_H2, U_H = U_H, U_h = U_h, U_H2 = U_H2, Wlong_bar = object$Wlong_bar, Wlong_sds = object$Wlong_sds, idT = match(idT, unique(idT)), log_Pwk = log_Pwk, log_Pwk2 = log_Pwk2, id_H = id_H, id_H_ = id_H_, id_h = id_h, any_gammas = any_gammas, which_event = which_event, which_right = which_right, which_left = which_left, which_interval = which_interval, ni_event = ni_event, FunForms_cpp = FunForms_cpp, FunForms_ind = FunForms_ind, Funs_FunForms = Funs_FunForms, X = X, Z = Z, y = y, family_names = family_names, links = links, extra_parms = object$model_data$extra_parms, unq_idL = unq_idL, idL_lp = idL_lp, idL = idL ) if (n_samples > M) { warning("the number of samples cannot be greater than the number of ", "MCMC iterations in the fitted model.") n_samples <- M } control <- list(GK_k = object$control$GK_k, n_samples = n_samples, n_iter = n_mcmc) id_samples <- split(seq_len(control$n_samples), rep(seq_len(cores), each = ceiling(control$n_samples / cores), length.out = control$n_samples)) sample_parallel <- function (id_samples, Data, mcmc, control) { # keep only the samples from the MCMC used in the sampling # of the random effects mcmc$bs_gammas <- mcmc$bs_gammas[id_samples, , drop = FALSE] mcmc$gammas <- mcmc$gammas[id_samples, , drop = FALSE] mcmc$alphas <- mcmc$alphas[id_samples, , drop = FALSE] mcmc$betas[] <- lapply(mcmc$betas, function (m, ind) m[ind, , drop = FALSE], "ind" = id_samples) mcmc$sigmas <- mcmc$sigmas[id_samples, , drop = FALSE] mcmc$D <- mcmc$D[, , id_samples, drop = FALSE] # update control n_samples control$n_samples <- length(id_samples) # update random effects mcmc[["b"]] <- simulate_REs(Data, mcmc, control) mcmc$Wlong_std_alphas <- mcmc$Wlong_std_alphas[id_samples, , drop = FALSE] mcmc$W_std_gammas <- mcmc$W_std_gammas[id_samples, , drop = FALSE] mcmc } if (cores > 1L) { cl <- parallel::makeCluster(cores) parallel::clusterSetRNGStream(cl = cl, iseed = seed) out <- parallel::parLapply(cl, id_samples, sample_parallel, Data = Data, mcmc = mcmc, control = control) parallel::stopCluster(cl) } else { set.seed(seed) out <- list(sample_parallel(id_samples[[1L]], Data = Data, mcmc = mcmc, control = control)) } combine <- function (x) { n <- length(x) res <- x[[1L]] if (n > 1L) { for (i in 2:n) { res$bs_gammas <- rbind(res$bs_gammas, x[[i]][["bs_gammas"]]) res$gammas <- rbind(res$gammas, x[[i]][["gammas"]]) res$alphas <- rbind(res$alphas, x[[i]][["alphas"]]) res$sigmas <- rbind(res$sigmas, x[[i]][["sigmas"]]) res$Wlong_std_alphas <- rbind(res$Wlong_std_alphas, x[[i]][["Wlong_std_alphas"]]) res$W_std_gammas <- rbind(res$W_std_gammas, x[[i]][["W_std_gammas"]]) d1 <- dim(res$D)[3L] d2 <- dim(x[[i]][["D"]])[3L] a <- array(0.0, dim = c(dim(res$D)[1:2], d1 + d2)) a[, , seq(1, d1)] <- res$D a[, , seq(d1 + 1, d1 + d2)] <- x[[i]][["D"]] res$D <- a d1 <- dim(res$b)[3L] d2 <- dim(x[[i]][["b"]])[3L] a <- array(0.0, dim = c(dim(res$b)[1:2], d1 + d2)) a[, , seq(1, d1)] <- res$b a[, , seq(d1 + 1, d1 + d2)] <- x[[i]][["b"]] res$b <- a for (j in seq_along(res$betas)) { res$betas[[j]] <- rbind(res$betas[[j]], x[[i]][["betas"]][[j]]) } } } res } list(mcmc = combine(out), X = X, Z = Z, y = y, times_y = times_y, id = idL, ind_RE = object$model_data$ind_RE, links = links, respVars = lapply(respVars, "[", 1L), NAs = mapply2(c, NAs_FE_dataL, NAs_RE_dataL), last_times = last_times) } predict_Long <- function (object, components_newdata, newdata, newdata2, times, type, type_pred, level, return_newdata) { # Predictions for newdata betas <- components_newdata$mcmc[["betas"]] b_mat <- components_newdata$mcmc[["b"]] ind_RE <- components_newdata$ind_RE links <- components_newdata$links K <- length(ind_RE) M <- dim(b_mat)[3L] out <- lapply(components_newdata$X, function (x) matrix(0.0, nrow(x), M)) names(out) <- components_newdata$respVars for (i in seq_len(M)) { eta_i <- linpred_long(components_newdata$X, lapply(betas, i_row, i), components_newdata$Z, splt_REs(rbind(b_mat[, , i]), ind_RE), components_newdata$id, type = type) for (j in seq_len(K)) { out[[j]][, i] <- if (type_pred == "response") { mu_fun(eta_i[[j]], links[j]) } else eta_i[[j]] } } res1 <- list(preds = lapply(out, rowMeans, na.rm = TRUE), low = lapply(out, rowQuantiles, probs = (1 - level) / 2), upp = lapply(out, rowQuantiles, probs = (1 + level) / 2)) if (return_newdata) { n <- nrow(newdata) preds <- mapply2(fix_NAs_preds, res1$preds, components_newdata$NAs, MoreArgs = list(n = n)) names(preds) <- paste0("pred_", components_newdata$respVars) low <- mapply2(fix_NAs_preds, res1$low, components_newdata$NAs, MoreArgs = list(n = n)) names(low) <- paste0("low_", components_newdata$respVars) upp <- mapply2(fix_NAs_preds, res1$upp, components_newdata$NAs, MoreArgs = list(n = n)) names(upp) <- paste0("upp_", components_newdata$respVars) l <- c(preds, low, upp) l <- l[c(matrix(seq_along(l), ncol = length(preds), byrow = TRUE))] res1 <- cbind(newdata, as.data.frame(do.call("cbind", l))) } ############################################################################ ############################################################################ # Predictions for newdata2 if (is.null(newdata2) && !is.null(times) && is.numeric(times)) { last_times <- components_newdata$last_times t_max <- max(object$model_data$Time_right) test <- sapply(last_times, function (lt, tt) all(tt <= lt), tt = times) if (any(test)) { stop("according to the definition of argument 'times', for some ", "subjects the last available time is\n\t larger than the ", "maximum time to predict; redefine 'times' accordingly.") } f <- function (lt, tt, tm) c(lt, sort(tt[tt > lt & tt <= tm])) times <- lapply(last_times, f, tt = times, tm = t_max) n_times <- sapply(times, length) newdata2 <- newdata idVar <- object$model_info$var_names$idVar time_var <- object$model_info$var_names$time_var idT <- newdata2[[idVar]] newdata2 <- newdata2[tapply(row.names(newdata2), factor(idT, unique(idT)), tail, 1L), ] newdata2 <- newdata2[rep(seq_along(times), n_times), ] newdata2[[time_var]] <- unlist(times, use.names = FALSE) } if (!is.null(newdata2)) { terms_FE_noResp <- object$model_info$terms$terms_FE_noResp terms_RE <- object$model_info$terms$terms_RE mf_FE <- lapply(terms_FE_noResp, model.frame.default, data = newdata2) mf_RE <- lapply(terms_RE, model.frame.default, data = newdata2) NAs_FE <- lapply(mf_FE, attr, "na.action") NAs_RE <- lapply(mf_RE, attr, "na.action") mf_FE <- mapply2(fix_NAs_fixed, mf_FE, NAs_FE, NAs_RE) mf_RE <- mapply2(fix_NAs_random, mf_RE, NAs_RE, NAs_FE) X <- mapply2(model.matrix.default, terms_FE_noResp, mf_FE) Z <- mapply2(model.matrix.default, terms_RE, mf_RE) NAs <- mapply2(c, NAs_FE, NAs_RE) idL <- newdata2[[object$model_info$var_names$idVar]] unq_id <- unique(idL) idL <- mapply2(exclude_NAs, NAs_FE, NAs_RE, MoreArgs = list(id = idL)) idL <- lapply(idL, match, table = unq_id) out <- lapply(X, function (x) matrix(0.0, nrow(x), M)) names(out) <- components_newdata$respVars for (i in seq_len(M)) { eta_i <- linpred_long(X, lapply(betas, i_row, i), Z, splt_REs(rbind(b_mat[, , i]), ind_RE), idL, type = type) for (j in seq_len(K)) { out[[j]][, i] <- if (type_pred == "response") { mu_fun(eta_i[[j]], links[j]) } else eta_i[[j]] } } res2 <- list(preds = lapply(out, rowMeans, na.rm = TRUE), low = lapply(out, rowQuantiles, probs = (1 - level) / 2), upp = lapply(out, rowQuantiles, probs = (1 + level) / 2)) if (return_newdata) { n <- nrow(newdata2) preds <- mapply2(fix_NAs_preds, res2$preds, NAs, MoreArgs = list(n = n)) names(preds) <- paste0("pred_", components_newdata$respVars) low <- mapply2(fix_NAs_preds, res2$low, NAs, MoreArgs = list(n = n)) names(low) <- paste0("low_", components_newdata$respVars) upp <- mapply2(fix_NAs_preds, res2$upp, NAs, MoreArgs = list(n = n)) names(upp) <- paste0("upp_", components_newdata$respVars) l <- c(preds, low, upp) l <- l[c(matrix(seq_along(l), ncol = length(preds), byrow = TRUE))] res2 <- cbind(newdata2, as.data.frame(do.call("cbind", l))) } } out <- if (is.null(newdata2)) { res1 } else { list(newdata = res1, newdata2 = res2) } class(out) <- c("predict_jm", class(out)) attr(out, "id_var") <- object$model_info$var_names$idVar attr(out, "time_var") <- object$model_info$var_names$time_var attr(out, "resp_vars") <- object$model_info$var_names$respVars_form attr(out, "ranges") <- ranges <- lapply(object$model_data$y, range, na.rm = TRUE) attr(out, "last_times") <- components_newdata$last_times attr(out, "y") <- components_newdata$y attr(out, "times_y") <- components_newdata$times_y attr(out, "id") <- components_newdata$id attr(out, "process") <- "longitudinal" out } predict_Event <- function (object, components_newdata, newdata, times, level, return_newdata) { control <- object$control terms_FE <- object$model_info$terms$terms_FE terms_FE_noResp <- object$model_info$terms$terms_FE_noResp terms_RE <- object$model_info$terms$terms_RE idVar <- object$model_info$var_names$idVar time_var <- object$model_info$var_names$time_var terms_Surv <- object$model_info$terms$terms_Surv terms_Surv_noResp <- object$model_info$terms$terms_Surv_noResp type_censoring <- object$model_info$type_censoring dataL <- newdata Xbar <- object$model_data$Xbar data_pred <- newdata idT <- data_pred[[idVar]] data_pred <- data_pred[tapply(row.names(data_pred), factor(idT, unique(idT)), tail, 1L), ] mf_data_pred <- model.frame.default(terms_Surv, data = data_pred) Surv_Response <- model.response(mf_data_pred) ind_strata <- attr(terms_Surv, "specials")$strata strata <- if (is.null(ind_strata)) { rep(1, nrow(mf_data_pred)) } else { unclass(mf_data_pred[[ind_strata]]) } # The definition of last_times needs to be checked for counting and interval last_times <- switch(type_censoring, "right" = unname(Surv_Response[, "time"]), "counting" = unname(Surv_Response[, "stop"]), "interval" = unname(Surv_Response[, "time1"])) t_max <- quantile(object$model_data$Time_right, probs = 0.9) if (is.null(times) || !is.numeric(times)) { times <- lapply(last_times, seq, to = t_max, length.out = 21L) } else { t_max <- max(object$model_data$Time_right) test <- sapply(last_times, function (lt, tt) all(tt <= lt), tt = times) if (any(test)) { stop("according to the definition of argument 'times', for some ", "subjects the last available time is\n\t larger than the ", "maximum time to predict; redefine 'times' accordingly.") } f <- function (lt, tt, tm) c(lt, sort(tt[tt > lt & tt <= tm])) times <- lapply(last_times, f, tt = times, tm = t_max) } n_times <- sapply(times, length) data_pred <- data_pred[rep(seq_along(times), n_times), ] data_pred[[time_var]] <- unlist(times, use.names = FALSE) idT <- data_pred[[idVar]] idT <- factor(idT, levels = unique(idT)) strata <- rep(strata, n_times) upp_limit <- data_pred[[time_var]] Time_start <- last_times[unclass(idT)] g <- function (t0, t) c(t0, head(t, -1)) low_limit <- unlist(mapply2(g, last_times, times), use.names = FALSE) GK <- gaussKronrod(k = 7L) sk <- GK$sk P <- c(upp_limit - low_limit) / 2 st <- outer(P, sk) + (c(upp_limit + low_limit) / 2) log_Pwk <- unname(rep(log(P), each = length(sk)) + rep_len(log(GK$wk), length.out = length(st))) # knots knots <- control$knots # indices ni_event <- tapply(idT, idT, length) ni_event <- cbind(c(0, head(cumsum(ni_event), -1)), cumsum(ni_event)) id_H <- rep(paste0(idT, "_", unlist(tapply(idT, idT, seq_along))), each = 7L) id_H <- match(id_H, unique(id_H)) # id_H_ repeats each unique idT the number of quadrature points id_H_ <- rep(idT, each = 7L) id_H_ <- match(id_H_, unique(id_H_)) id_h <- unclass(idT) # Functional forms functional_forms <- object$model_info$functional_forms FunForms_per_outcome <- object$model_info$FunForms_per_outcome collapsed_functional_forms <- object$model_info$collapsed_functional_forms FunForms_cpp <- object$model_info$FunForms_cpp FunForms_ind <- object$model_info$FunForms_ind Funs_FunForms <- object$model_info$Funs_FunForms eps <- object$model_info$eps direction <- object$model_info$direction strata_H <- rep(strata, each = 7L) W0_H <- create_W0(c(t(st)), knots, control$Bsplines_degree + 1, strata_H) dataS_H <- SurvData_HazardModel(st, data_pred, Time_start, paste0(idT, "_", strata), time_var) mf <- model.frame.default(terms_Surv_noResp, data = dataS_H) W_H <- construct_Wmat(terms_Surv_noResp, mf) any_gammas <- as.logical(ncol(W_H)) if (!any_gammas) { W_H <- matrix(0.0, nrow = nrow(W_H), ncol = 1L) } attr <- lapply(functional_forms, extract_attributes, data = dataS_H) eps <- lapply(attr, "[[", 1L) direction <- lapply(attr, "[[", 2L) X_H <- design_matrices_functional_forms(st, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_H <- design_matrices_functional_forms(st, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_H <- lapply(functional_forms, construct_Umat, dataS = dataS_H) X_H[] <- lapply(X_H, docall_cbind) Z_H[] <- lapply(Z_H, docall_cbind) Data <- list( log_Pwk = log_Pwk, id_H = id_H, id_h = id_h, id_H_ = id_H_, ind_RE = object$model_data$ind_RE, W0_H = W0_H, W_H = W_H, U_H = U_H, X_H = X_H, Z_H = Z_H, Wlong_bar = object$Wlong_bar, Wlong_sds = object$Wlong_sds, any_gammas = any_gammas, FunForms_cpp = FunForms_cpp, FunForms_ind = FunForms_ind, Funs_FunForms = Funs_FunForms ) H <- cum_haz(Data, components_newdata$mcmc) index <- rep(seq_along(times), n_times) for (i in seq_along(times)) { H[index == i, ] <- colCumsums(H[index == i, ]) } CIF <- 1.0 - pmax(exp(- H), .Machine$double.eps) res <- list(pred = rowMeans(CIF), low = rowQuantiles(CIF, probs = (1 - level) / 2), upp = rowQuantiles(CIF, probs = (1 + level) / 2), times = unlist(times, use.names = FALSE), id = rep(levels(idT), n_times)) if (return_newdata) { data_pred[["pred_CIF"]] <- res$pred data_pred[["low_CIF"]] <- res$low data_pred[["upp_CIF"]] <- res$upp res <- data_pred } class(res) <- c("predict_jm", class(res)) attr(res, "id_var") <- object$model_info$var_names$idVar attr(res, "time_var") <- object$model_info$var_names$time_var attr(res, "resp_vars") <- object$model_info$var_names$respVars_form attr(res, "ranges") <- ranges <- lapply(object$model_data$y, range, na.rm = TRUE) attr(res, "last_times") <- components_newdata$last_times attr(res, "y") <- components_newdata$y attr(res, "times_y") <- components_newdata$times_y attr(res, "id") <- components_newdata$id attr(res, "process") <- "event" res }
/R/predict_funs.R
no_license
DaanNieboer/JMbayes2
R
false
false
32,748
r
i_row <- function (x, i) x[i, ] splt_REs <- function (b_mat, ind_RE) { n <- length(ind_RE) out <- vector("list", n) for (i in seq_len(n)) out[[i]] <- b_mat[, ind_RE[[i]], drop = FALSE] out } linpred_long <- function (X, betas, Z, b, id, type) { out <- vector("list", length(X)) for (i in seq_along(X)) { out[[i]] <- c(X[[i]] %*% betas[[i]]) if (type == "subject_specific") { out[[i]] <- out[[i]] + as.vector(rowSums(Z[[i]] * b[[i]][id[[i]], , drop = FALSE])) } } out } mu_fun <- function (eta, link) { switch (link, "identity" = eta, "inverse" = 1 / eta, "logit" = plogis(eta), "probit" = pnorm(eta), "cloglog" = - exp(- exp(eta)) + 1.0, "log" = exp(eta)) } fix_NAs_preds <- function (preds, NAs, n) { if (is.null(NAs)) preds else { r <- rep(as.numeric(NA), n) r[-NAs] <- preds r } } get_components_newdata <- function (object, newdata, n_samples, n_mcmc, cores, seed) { if (!exists(".Random.seed", envir = .GlobalEnv)) { runif(1L) } RNGstate <- get(".Random.seed", envir = .GlobalEnv) on.exit(assign(".Random.seed", RNGstate, envir = .GlobalEnv)) # control control <- object$control # check for tibbles if (inherits(newdata, "tbl_df") || inherits(newdata, "tbl")) { newdata <- as.data.frame(newdata) } # extract idVar and time_var idVar <- object$model_info$var_names$idVar time_var <- object$model_info$var_names$time_var # set dataL as newdata; almost the same code as in jm() dataL <- if (!is.data.frame(newdata)) newdata[["newdataL"]] else newdata idL <- dataL[[idVar]] nY <- length(unique(idL)) # order data by idL and time_var if (is.null(dataL[[time_var]])) { stop("the variable specified in agument 'time_var' cannot be found ", "in the database of the longitudinal models.") } dataL <- dataL[order(idL, dataL[[time_var]]), ] # extract terms respVars <- object$model_info$var_names$respVars terms_FE <- object$model_info$terms$terms_FE terms_FE_noResp <- object$model_info$terms$terms_FE_noResp terms_RE <- object$model_info$terms$terms_RE terms_Surv <- object$model_info$terms$terms_Surv_noResp Xbar <- object$model_data$Xbar # create model frames mf_FE_dataL <- lapply(terms_FE, model.frame.default, data = dataL) mf_RE_dataL <- lapply(terms_RE, model.frame.default, data = dataL) # we need to account for missing data in the fixed and random effects model frames, # in parallel across outcomes (i.e., we will allow that some subjects may have no data # for some outcomes) NAs_FE_dataL <- lapply(mf_FE_dataL, attr, "na.action") NAs_RE_dataL <- lapply(mf_RE_dataL, attr, "na.action") mf_FE_dataL <- mapply2(fix_NAs_fixed, mf_FE_dataL, NAs_FE_dataL, NAs_RE_dataL) mf_RE_dataL <- mapply2(fix_NAs_random, mf_RE_dataL, NAs_RE_dataL, NAs_FE_dataL) # create response vectors y <- lapply(mf_FE_dataL, model.response) y <- lapply(y, function (yy) { if (is.factor(yy)) as.numeric(yy != levels(yy)[1L]) else yy }) y[] <- lapply(y, as.matrix) NAs <- mapply2(c, NAs_FE_dataL, NAs_RE_dataL) times_y <- lapply(NAs, function (ind) if (!is.null(ind)) dataL[[time_var]][-ind] else dataL[[time_var]]) families <- object$model_info$families family_names <- sapply(families, "[[", "family") links <- sapply(families, "[[", "link") # for family = binomial and when y has two columns, set the second column # to the number of trials instead the number of failures binomial_data <- family_names %in% c("binomial", "beta binomial") trials_fun <- function (y) { if (NCOL(y) == 2L) y[, 2L] <- y[, 1L] + y[, 2L] y } y[binomial_data] <- lapply(y[binomial_data], trials_fun) unq_id <- unique(idL) idL <- mapply2(exclude_NAs, NAs_FE_dataL, NAs_RE_dataL, MoreArgs = list(id = idL)) idL <- lapply(idL, match, table = unq_id) idL_lp <- lapply(idL, function (x) match(x, unique(x))) unq_idL <- lapply(idL, unique) X <- mapply2(model.matrix.default, terms_FE, mf_FE_dataL) Z <- mapply2(model.matrix.default, terms_RE, mf_RE_dataL) ################################ # extract terms terms_Surv <- object$model_info$terms$terms_Surv terms_Surv_noResp <- object$model_info$terms$terms_Surv_noResp type_censoring <- object$model_info$type_censoring dataS <- if (!is.data.frame(newdata)) newdata[["newdataE"]] else newdata CR_MS <- object$model_info$CR_MS if (!CR_MS) { idT <- dataS[[idVar]] dataS <- dataS[tapply(row.names(dataS), factor(idT, unique(idT)), tail, 1L), ] } idT <- dataS[[idVar]] mf_surv_dataS <- model.frame.default(terms_Surv, data = dataS) if (!is.null(NAs_surv <- attr(mf_surv_dataS, "na.action"))) { idT <- idT[-NAs_surv] dataS <- dataS[-NAs_surv, ] } idT <- factor(idT, levels = unique(idT)) nT <- length(unique(idT)) if (nY != nT) { stop("the number of groups/subjects in the longitudinal and survival datasets ", "do not seem to match. A potential reason why this may be happening is ", "missing data in some covariates used in the individual models.") } Surv_Response <- model.response(mf_surv_dataS) if (type_censoring == "right") { Time_right <- unname(Surv_Response[, "time"]) Time_left <- Time_start <- trunc_Time <- rep(0.0, nrow(dataS)) delta <- unname(Surv_Response[, "status"]) } else if (type_censoring == "counting") { Time_start <- unname(Surv_Response[, "start"]) Time_stop <- unname(Surv_Response[, "stop"]) delta <- unname(Surv_Response[, "status"]) Time_right <- Time_stop trunc_Time <- Time_start # possible left truncation time Time_left <- rep(0.0, nrow(dataS)) } else if (type_censoring == "interval") { Time1 <- unname(Surv_Response[, "time1"]) Time2 <- unname(Surv_Response[, "time2"]) trunc_Time <- Time_start <- rep(0.0, nrow(dataS)) delta <- unname(Surv_Response[, "status"]) Time_right <- Time1 Time_right[delta == 3] <- Time2[delta == 3] Time_right[delta == 2] <- 0.0 Time_left <- Time1 Time_left[delta <= 1] <- 0.0 } if (type_censoring != "counting") { names(Time_right) <- names(Time_left) <- names(Time_start) <- idT } which_event <- which(delta == 1) which_right <- which(delta == 0) which_left <- which(delta == 2) which_interval <- which(delta == 3) # extract strata if present otherwise all subjects in one stratum ind_strata <- attr(terms_Surv, "specials")$strata strata <- if (is.null(ind_strata)) { rep(1, nrow(mf_surv_dataS)) } else { unclass(mf_surv_dataS[[ind_strata]]) } Time_integration <- Time_right Time_integration[which_left] <- Time_left[which_left] Time_integration[which_interval] <- Time_left[which_interval] Time_integration2 <- rep(0.0, length(Time_integration)) if (length(which_interval)) { Time_integration2[which_interval] <- Time_right[which_interval] } last_times <- switch(type_censoring, "right" = unname(Surv_Response[, "time"]), "counting" = unname(Surv_Response[, "stop"]), "interval" = unname(Surv_Response[, "time1"])) # create Gauss Kronrod points and weights GK <- gaussKronrod(control$GK_k) sk <- GK$sk P <- c(Time_integration - trunc_Time) / 2 st <- outer(P, sk) + (c(Time_integration + trunc_Time) / 2) log_Pwk <- unname(rep(log(P), each = length(sk)) + rep_len(log(GK$wk), length.out = length(st))) if (length(which_interval)) { # we take the absolute value because for the subjects for whom we do not have # interval censoring P2 will be negative and this will produce a NA when we take # the log in 'log_Pwk2' P2 <- abs(Time_integration2 - Time_integration) / 2 st2 <- outer(P2, sk) + (c(Time_integration2 + Time_integration) / 2) log_Pwk2 <- rep(log(P2), each = length(sk)) + rep_len(log(GK$wk), length.out = length(st2)) } else { P2 <- st2 <- log_Pwk2 <- rep(0.0, nT * control$GK_k) } # knots for the log baseline hazard function knots <- control$knots # indices ni_event <- tapply(idT, idT, length) ni_event <- cbind(c(0, head(cumsum(ni_event), -1)), cumsum(ni_event)) id_H <- rep(paste0(idT, "_", unlist(tapply(idT, idT, seq_along))), each = control$GK_k) id_H <- match(id_H, unique(id_H)) # id_H_ repeats each unique idT the number of quadrature points id_H_ <- rep(idT, each = control$GK_k) id_H_ <- match(id_H_, unique(id_H_)) id_h <- unclass(idT) # Functional forms functional_forms <- object$model_info$functional_forms FunForms_per_outcome <- object$model_info$FunForms_per_outcome collapsed_functional_forms <- object$model_info$collapsed_functional_forms FunForms_cpp <- object$model_info$FunForms_cpp FunForms_ind <- object$model_info$FunForms_ind Funs_FunForms <- object$model_info$Funs_FunForms eps <- object$model_info$eps direction <- object$model_info$direction # Design matrices strata_H <- rep(strata, each = control$GK_k) W0_H <- create_W0(c(t(st)), knots, control$Bsplines_degree + 1, strata_H) dataS_H <- SurvData_HazardModel(st, dataS, Time_start, paste0(idT, "_", strata), time_var) mf <- model.frame.default(terms_Surv_noResp, data = dataS_H) W_H <- construct_Wmat(terms_Surv_noResp, mf) any_gammas <- as.logical(ncol(W_H)) if (!any_gammas) { W_H <- matrix(0.0, nrow = nrow(W_H), ncol = 1L) } attr <- lapply(functional_forms, extract_attributes, data = dataS_H) eps <- lapply(attr, "[[", 1L) direction <- lapply(attr, "[[", 2L) X_H <- design_matrices_functional_forms(st, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_H <- design_matrices_functional_forms(st, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_H <- lapply(functional_forms, construct_Umat, dataS = dataS_H) if (length(which_event)) { W0_h <- create_W0(Time_right, knots, control$Bsplines_degree + 1, strata) dataS_h <- SurvData_HazardModel(Time_right, dataS, Time_start, paste0(idT, "_", strata), time_var) mf <- model.frame.default(terms_Surv_noResp, data = dataS_h) W_h <- construct_Wmat(terms_Surv_noResp, mf) if (!any_gammas) { W_h <- matrix(0.0, nrow = nrow(W_h), ncol = 1L) } X_h <- design_matrices_functional_forms(Time_right, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_h <- design_matrices_functional_forms(Time_right, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_h <- lapply(functional_forms, construct_Umat, dataS = dataS_h) } else { W0_h <- W_h <- matrix(0.0) X_h <- Z_h <- U_h <- rep(list(matrix(0.0)), length(respVars)) } if (length(which_interval)) { W0_H2 <- create_W0(c(t(st2)), knots, control$Bsplines_degree + 1, strata_H) dataS_H2 <- SurvData_HazardModel(st2, dataS, Time_start, paste0(idT, "_", strata), time_var) mf2 <- model.frame.default(terms_Surv_noResp, data = dataS_H2) W_h <- construct_Wmat(terms_Surv_noResp, mf2) if (!any_gammas) { W_H2 <- matrix(0.0, nrow = nrow(W_H2), ncol = 1L) } X_H2 <- design_matrices_functional_forms(st, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_H2 <- design_matrices_functional_forms(st, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_H2 <- lapply(functional_forms, construct_Umat, dataS = dataS_H2) } else { W0_H2 <- W_H2 <- matrix(0.0) X_H2 <- Z_H2 <- U_H2 <- rep(list(matrix(0.0)), length(respVars)) } X_H[] <- lapply(X_H, docall_cbind) X_h[] <- lapply(X_h, docall_cbind) X_H2[] <- lapply(X_H2, docall_cbind) Z_H[] <- lapply(Z_H, docall_cbind) Z_h[] <- lapply(Z_h, docall_cbind) Z_H2[] <- lapply(Z_H2, docall_cbind) W_bar <- object$W_bar W_sds <- object$W_sds W_H <- center_fun(W_H, W_bar, W_sds) W_h <- center_fun(W_h, W_bar, W_sds) W_H2 <- center_fun(W_H2, W_bar, W_sds) # MCMC sample b <- lapply(sapply(Z, ncol), function (nc) matrix(0.0, nY, nc)) M <- sum(sapply(object$mcmc$bs_gammas, nrow)) get_param <- function (nam) { tht <- object$mcmc[[nam]] if (!is.null(tht)) docall_rbind(tht) else matrix(0.0, M, 1) } ind_betas <- grep("^betas", names(object$mcmc)) mcmc <- list( b = b, bs_gammas = get_param("bs_gammas"), gammas = get_param("gammas"), alphas = get_param("alphas"), Wlong_std_alphas = get_param("Wlong_std_alphas"), W_std_gammas = get_param("W_std_gammas"), betas = lapply(object$mcmc[ind_betas], docall_rbind) ) has_sigmas <- object$model_data$has_sigmas mcmc$sigmas <- matrix(0.0, M, length(has_sigmas)) mcmc$sigmas[, has_sigmas > 0] <- get_param("sigmas") D <- get_param("D") mcmc$D <- array(0.0, c(dim(lowertri2mat(D[1L, ])), M)) for (i in seq_len(M)) { mcmc$D[, , i] <- lowertri2mat(D[i, ]) } Data <- list( ind_RE = object$model_data$ind_RE, W0_H = W0_H, W0_h = W0_h, W0_H2 = W0_H2, W_H = W_H, W_h = W_h, W_H2 = W_H2, X_H = X_H, X_h = X_h, X_H2 = X_H2, Z_H = Z_H, Z_h = Z_h, Z_H2 = Z_H2, U_H = U_H, U_h = U_h, U_H2 = U_H2, Wlong_bar = object$Wlong_bar, Wlong_sds = object$Wlong_sds, idT = match(idT, unique(idT)), log_Pwk = log_Pwk, log_Pwk2 = log_Pwk2, id_H = id_H, id_H_ = id_H_, id_h = id_h, any_gammas = any_gammas, which_event = which_event, which_right = which_right, which_left = which_left, which_interval = which_interval, ni_event = ni_event, FunForms_cpp = FunForms_cpp, FunForms_ind = FunForms_ind, Funs_FunForms = Funs_FunForms, X = X, Z = Z, y = y, family_names = family_names, links = links, extra_parms = object$model_data$extra_parms, unq_idL = unq_idL, idL_lp = idL_lp, idL = idL ) if (n_samples > M) { warning("the number of samples cannot be greater than the number of ", "MCMC iterations in the fitted model.") n_samples <- M } control <- list(GK_k = object$control$GK_k, n_samples = n_samples, n_iter = n_mcmc) id_samples <- split(seq_len(control$n_samples), rep(seq_len(cores), each = ceiling(control$n_samples / cores), length.out = control$n_samples)) sample_parallel <- function (id_samples, Data, mcmc, control) { # keep only the samples from the MCMC used in the sampling # of the random effects mcmc$bs_gammas <- mcmc$bs_gammas[id_samples, , drop = FALSE] mcmc$gammas <- mcmc$gammas[id_samples, , drop = FALSE] mcmc$alphas <- mcmc$alphas[id_samples, , drop = FALSE] mcmc$betas[] <- lapply(mcmc$betas, function (m, ind) m[ind, , drop = FALSE], "ind" = id_samples) mcmc$sigmas <- mcmc$sigmas[id_samples, , drop = FALSE] mcmc$D <- mcmc$D[, , id_samples, drop = FALSE] # update control n_samples control$n_samples <- length(id_samples) # update random effects mcmc[["b"]] <- simulate_REs(Data, mcmc, control) mcmc$Wlong_std_alphas <- mcmc$Wlong_std_alphas[id_samples, , drop = FALSE] mcmc$W_std_gammas <- mcmc$W_std_gammas[id_samples, , drop = FALSE] mcmc } if (cores > 1L) { cl <- parallel::makeCluster(cores) parallel::clusterSetRNGStream(cl = cl, iseed = seed) out <- parallel::parLapply(cl, id_samples, sample_parallel, Data = Data, mcmc = mcmc, control = control) parallel::stopCluster(cl) } else { set.seed(seed) out <- list(sample_parallel(id_samples[[1L]], Data = Data, mcmc = mcmc, control = control)) } combine <- function (x) { n <- length(x) res <- x[[1L]] if (n > 1L) { for (i in 2:n) { res$bs_gammas <- rbind(res$bs_gammas, x[[i]][["bs_gammas"]]) res$gammas <- rbind(res$gammas, x[[i]][["gammas"]]) res$alphas <- rbind(res$alphas, x[[i]][["alphas"]]) res$sigmas <- rbind(res$sigmas, x[[i]][["sigmas"]]) res$Wlong_std_alphas <- rbind(res$Wlong_std_alphas, x[[i]][["Wlong_std_alphas"]]) res$W_std_gammas <- rbind(res$W_std_gammas, x[[i]][["W_std_gammas"]]) d1 <- dim(res$D)[3L] d2 <- dim(x[[i]][["D"]])[3L] a <- array(0.0, dim = c(dim(res$D)[1:2], d1 + d2)) a[, , seq(1, d1)] <- res$D a[, , seq(d1 + 1, d1 + d2)] <- x[[i]][["D"]] res$D <- a d1 <- dim(res$b)[3L] d2 <- dim(x[[i]][["b"]])[3L] a <- array(0.0, dim = c(dim(res$b)[1:2], d1 + d2)) a[, , seq(1, d1)] <- res$b a[, , seq(d1 + 1, d1 + d2)] <- x[[i]][["b"]] res$b <- a for (j in seq_along(res$betas)) { res$betas[[j]] <- rbind(res$betas[[j]], x[[i]][["betas"]][[j]]) } } } res } list(mcmc = combine(out), X = X, Z = Z, y = y, times_y = times_y, id = idL, ind_RE = object$model_data$ind_RE, links = links, respVars = lapply(respVars, "[", 1L), NAs = mapply2(c, NAs_FE_dataL, NAs_RE_dataL), last_times = last_times) } predict_Long <- function (object, components_newdata, newdata, newdata2, times, type, type_pred, level, return_newdata) { # Predictions for newdata betas <- components_newdata$mcmc[["betas"]] b_mat <- components_newdata$mcmc[["b"]] ind_RE <- components_newdata$ind_RE links <- components_newdata$links K <- length(ind_RE) M <- dim(b_mat)[3L] out <- lapply(components_newdata$X, function (x) matrix(0.0, nrow(x), M)) names(out) <- components_newdata$respVars for (i in seq_len(M)) { eta_i <- linpred_long(components_newdata$X, lapply(betas, i_row, i), components_newdata$Z, splt_REs(rbind(b_mat[, , i]), ind_RE), components_newdata$id, type = type) for (j in seq_len(K)) { out[[j]][, i] <- if (type_pred == "response") { mu_fun(eta_i[[j]], links[j]) } else eta_i[[j]] } } res1 <- list(preds = lapply(out, rowMeans, na.rm = TRUE), low = lapply(out, rowQuantiles, probs = (1 - level) / 2), upp = lapply(out, rowQuantiles, probs = (1 + level) / 2)) if (return_newdata) { n <- nrow(newdata) preds <- mapply2(fix_NAs_preds, res1$preds, components_newdata$NAs, MoreArgs = list(n = n)) names(preds) <- paste0("pred_", components_newdata$respVars) low <- mapply2(fix_NAs_preds, res1$low, components_newdata$NAs, MoreArgs = list(n = n)) names(low) <- paste0("low_", components_newdata$respVars) upp <- mapply2(fix_NAs_preds, res1$upp, components_newdata$NAs, MoreArgs = list(n = n)) names(upp) <- paste0("upp_", components_newdata$respVars) l <- c(preds, low, upp) l <- l[c(matrix(seq_along(l), ncol = length(preds), byrow = TRUE))] res1 <- cbind(newdata, as.data.frame(do.call("cbind", l))) } ############################################################################ ############################################################################ # Predictions for newdata2 if (is.null(newdata2) && !is.null(times) && is.numeric(times)) { last_times <- components_newdata$last_times t_max <- max(object$model_data$Time_right) test <- sapply(last_times, function (lt, tt) all(tt <= lt), tt = times) if (any(test)) { stop("according to the definition of argument 'times', for some ", "subjects the last available time is\n\t larger than the ", "maximum time to predict; redefine 'times' accordingly.") } f <- function (lt, tt, tm) c(lt, sort(tt[tt > lt & tt <= tm])) times <- lapply(last_times, f, tt = times, tm = t_max) n_times <- sapply(times, length) newdata2 <- newdata idVar <- object$model_info$var_names$idVar time_var <- object$model_info$var_names$time_var idT <- newdata2[[idVar]] newdata2 <- newdata2[tapply(row.names(newdata2), factor(idT, unique(idT)), tail, 1L), ] newdata2 <- newdata2[rep(seq_along(times), n_times), ] newdata2[[time_var]] <- unlist(times, use.names = FALSE) } if (!is.null(newdata2)) { terms_FE_noResp <- object$model_info$terms$terms_FE_noResp terms_RE <- object$model_info$terms$terms_RE mf_FE <- lapply(terms_FE_noResp, model.frame.default, data = newdata2) mf_RE <- lapply(terms_RE, model.frame.default, data = newdata2) NAs_FE <- lapply(mf_FE, attr, "na.action") NAs_RE <- lapply(mf_RE, attr, "na.action") mf_FE <- mapply2(fix_NAs_fixed, mf_FE, NAs_FE, NAs_RE) mf_RE <- mapply2(fix_NAs_random, mf_RE, NAs_RE, NAs_FE) X <- mapply2(model.matrix.default, terms_FE_noResp, mf_FE) Z <- mapply2(model.matrix.default, terms_RE, mf_RE) NAs <- mapply2(c, NAs_FE, NAs_RE) idL <- newdata2[[object$model_info$var_names$idVar]] unq_id <- unique(idL) idL <- mapply2(exclude_NAs, NAs_FE, NAs_RE, MoreArgs = list(id = idL)) idL <- lapply(idL, match, table = unq_id) out <- lapply(X, function (x) matrix(0.0, nrow(x), M)) names(out) <- components_newdata$respVars for (i in seq_len(M)) { eta_i <- linpred_long(X, lapply(betas, i_row, i), Z, splt_REs(rbind(b_mat[, , i]), ind_RE), idL, type = type) for (j in seq_len(K)) { out[[j]][, i] <- if (type_pred == "response") { mu_fun(eta_i[[j]], links[j]) } else eta_i[[j]] } } res2 <- list(preds = lapply(out, rowMeans, na.rm = TRUE), low = lapply(out, rowQuantiles, probs = (1 - level) / 2), upp = lapply(out, rowQuantiles, probs = (1 + level) / 2)) if (return_newdata) { n <- nrow(newdata2) preds <- mapply2(fix_NAs_preds, res2$preds, NAs, MoreArgs = list(n = n)) names(preds) <- paste0("pred_", components_newdata$respVars) low <- mapply2(fix_NAs_preds, res2$low, NAs, MoreArgs = list(n = n)) names(low) <- paste0("low_", components_newdata$respVars) upp <- mapply2(fix_NAs_preds, res2$upp, NAs, MoreArgs = list(n = n)) names(upp) <- paste0("upp_", components_newdata$respVars) l <- c(preds, low, upp) l <- l[c(matrix(seq_along(l), ncol = length(preds), byrow = TRUE))] res2 <- cbind(newdata2, as.data.frame(do.call("cbind", l))) } } out <- if (is.null(newdata2)) { res1 } else { list(newdata = res1, newdata2 = res2) } class(out) <- c("predict_jm", class(out)) attr(out, "id_var") <- object$model_info$var_names$idVar attr(out, "time_var") <- object$model_info$var_names$time_var attr(out, "resp_vars") <- object$model_info$var_names$respVars_form attr(out, "ranges") <- ranges <- lapply(object$model_data$y, range, na.rm = TRUE) attr(out, "last_times") <- components_newdata$last_times attr(out, "y") <- components_newdata$y attr(out, "times_y") <- components_newdata$times_y attr(out, "id") <- components_newdata$id attr(out, "process") <- "longitudinal" out } predict_Event <- function (object, components_newdata, newdata, times, level, return_newdata) { control <- object$control terms_FE <- object$model_info$terms$terms_FE terms_FE_noResp <- object$model_info$terms$terms_FE_noResp terms_RE <- object$model_info$terms$terms_RE idVar <- object$model_info$var_names$idVar time_var <- object$model_info$var_names$time_var terms_Surv <- object$model_info$terms$terms_Surv terms_Surv_noResp <- object$model_info$terms$terms_Surv_noResp type_censoring <- object$model_info$type_censoring dataL <- newdata Xbar <- object$model_data$Xbar data_pred <- newdata idT <- data_pred[[idVar]] data_pred <- data_pred[tapply(row.names(data_pred), factor(idT, unique(idT)), tail, 1L), ] mf_data_pred <- model.frame.default(terms_Surv, data = data_pred) Surv_Response <- model.response(mf_data_pred) ind_strata <- attr(terms_Surv, "specials")$strata strata <- if (is.null(ind_strata)) { rep(1, nrow(mf_data_pred)) } else { unclass(mf_data_pred[[ind_strata]]) } # The definition of last_times needs to be checked for counting and interval last_times <- switch(type_censoring, "right" = unname(Surv_Response[, "time"]), "counting" = unname(Surv_Response[, "stop"]), "interval" = unname(Surv_Response[, "time1"])) t_max <- quantile(object$model_data$Time_right, probs = 0.9) if (is.null(times) || !is.numeric(times)) { times <- lapply(last_times, seq, to = t_max, length.out = 21L) } else { t_max <- max(object$model_data$Time_right) test <- sapply(last_times, function (lt, tt) all(tt <= lt), tt = times) if (any(test)) { stop("according to the definition of argument 'times', for some ", "subjects the last available time is\n\t larger than the ", "maximum time to predict; redefine 'times' accordingly.") } f <- function (lt, tt, tm) c(lt, sort(tt[tt > lt & tt <= tm])) times <- lapply(last_times, f, tt = times, tm = t_max) } n_times <- sapply(times, length) data_pred <- data_pred[rep(seq_along(times), n_times), ] data_pred[[time_var]] <- unlist(times, use.names = FALSE) idT <- data_pred[[idVar]] idT <- factor(idT, levels = unique(idT)) strata <- rep(strata, n_times) upp_limit <- data_pred[[time_var]] Time_start <- last_times[unclass(idT)] g <- function (t0, t) c(t0, head(t, -1)) low_limit <- unlist(mapply2(g, last_times, times), use.names = FALSE) GK <- gaussKronrod(k = 7L) sk <- GK$sk P <- c(upp_limit - low_limit) / 2 st <- outer(P, sk) + (c(upp_limit + low_limit) / 2) log_Pwk <- unname(rep(log(P), each = length(sk)) + rep_len(log(GK$wk), length.out = length(st))) # knots knots <- control$knots # indices ni_event <- tapply(idT, idT, length) ni_event <- cbind(c(0, head(cumsum(ni_event), -1)), cumsum(ni_event)) id_H <- rep(paste0(idT, "_", unlist(tapply(idT, idT, seq_along))), each = 7L) id_H <- match(id_H, unique(id_H)) # id_H_ repeats each unique idT the number of quadrature points id_H_ <- rep(idT, each = 7L) id_H_ <- match(id_H_, unique(id_H_)) id_h <- unclass(idT) # Functional forms functional_forms <- object$model_info$functional_forms FunForms_per_outcome <- object$model_info$FunForms_per_outcome collapsed_functional_forms <- object$model_info$collapsed_functional_forms FunForms_cpp <- object$model_info$FunForms_cpp FunForms_ind <- object$model_info$FunForms_ind Funs_FunForms <- object$model_info$Funs_FunForms eps <- object$model_info$eps direction <- object$model_info$direction strata_H <- rep(strata, each = 7L) W0_H <- create_W0(c(t(st)), knots, control$Bsplines_degree + 1, strata_H) dataS_H <- SurvData_HazardModel(st, data_pred, Time_start, paste0(idT, "_", strata), time_var) mf <- model.frame.default(terms_Surv_noResp, data = dataS_H) W_H <- construct_Wmat(terms_Surv_noResp, mf) any_gammas <- as.logical(ncol(W_H)) if (!any_gammas) { W_H <- matrix(0.0, nrow = nrow(W_H), ncol = 1L) } attr <- lapply(functional_forms, extract_attributes, data = dataS_H) eps <- lapply(attr, "[[", 1L) direction <- lapply(attr, "[[", 2L) X_H <- design_matrices_functional_forms(st, terms_FE_noResp, dataL, time_var, idVar, idT, collapsed_functional_forms, Xbar, eps, direction) Z_H <- design_matrices_functional_forms(st, terms_RE, dataL, time_var, idVar, idT, collapsed_functional_forms, NULL, eps, direction) U_H <- lapply(functional_forms, construct_Umat, dataS = dataS_H) X_H[] <- lapply(X_H, docall_cbind) Z_H[] <- lapply(Z_H, docall_cbind) Data <- list( log_Pwk = log_Pwk, id_H = id_H, id_h = id_h, id_H_ = id_H_, ind_RE = object$model_data$ind_RE, W0_H = W0_H, W_H = W_H, U_H = U_H, X_H = X_H, Z_H = Z_H, Wlong_bar = object$Wlong_bar, Wlong_sds = object$Wlong_sds, any_gammas = any_gammas, FunForms_cpp = FunForms_cpp, FunForms_ind = FunForms_ind, Funs_FunForms = Funs_FunForms ) H <- cum_haz(Data, components_newdata$mcmc) index <- rep(seq_along(times), n_times) for (i in seq_along(times)) { H[index == i, ] <- colCumsums(H[index == i, ]) } CIF <- 1.0 - pmax(exp(- H), .Machine$double.eps) res <- list(pred = rowMeans(CIF), low = rowQuantiles(CIF, probs = (1 - level) / 2), upp = rowQuantiles(CIF, probs = (1 + level) / 2), times = unlist(times, use.names = FALSE), id = rep(levels(idT), n_times)) if (return_newdata) { data_pred[["pred_CIF"]] <- res$pred data_pred[["low_CIF"]] <- res$low data_pred[["upp_CIF"]] <- res$upp res <- data_pred } class(res) <- c("predict_jm", class(res)) attr(res, "id_var") <- object$model_info$var_names$idVar attr(res, "time_var") <- object$model_info$var_names$time_var attr(res, "resp_vars") <- object$model_info$var_names$respVars_form attr(res, "ranges") <- ranges <- lapply(object$model_data$y, range, na.rm = TRUE) attr(res, "last_times") <- components_newdata$last_times attr(res, "y") <- components_newdata$y attr(res, "times_y") <- components_newdata$times_y attr(res, "id") <- components_newdata$id attr(res, "process") <- "event" res }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/app.r \name{appReadyToRun} \alias{appReadyToRun} \title{set the app ready to run} \usage{ appReadyToRun(app = getApp(), ui = app$ui) } \description{ set the app ready to run }
/man/appReadyToRun.Rd
no_license
skranz/shinyEvents
R
false
true
255
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/app.r \name{appReadyToRun} \alias{appReadyToRun} \title{set the app ready to run} \usage{ appReadyToRun(app = getApp(), ui = app$ui) } \description{ set the app ready to run }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MARX_functions.R \name{mixed} \alias{mixed} \alias{mixed.default} \alias{print.mixed} \alias{summary.mixed} \alias{mixed.default} \alias{print.mixed} \alias{summary.mixed} \title{The MARX estimation function} \usage{ mixed(y, x, p_C, p_NC) \method{mixed}{default}(y, x, p_C, p_NC) \method{print}{mixed}(x, ...) \method{summary}{mixed}(object, ...) } \arguments{ \item{y}{Data vector of time series observations.} \item{x}{Matrix of data (every column represents one time series). Specify NULL or "not" if not wanted.} \item{p_C}{Number of lags to be included.} \item{p_NC}{Number of leads to be included.} \item{...}{Other parameters.} \item{object}{An object of the class "mixed".} } \value{ An object of class \code{"mixed"} is a list containing the following components: \item{coefficients}{Vector of estimated coefficients.} \item{se}{Standard errors of estimated coefficients.} \item{df.residual}{Degrees of freedom residuals.} \item{residuals}{Residuals.} \item{fitted.values}{Fitted values.} \item{order}{Vector containing (r,s,q), i.e. causal order r, noncausal order s, number of exogenous regressors q.} } \description{ This function allows you to estimate mixed causal-noncausal MARX models by t-MLE (compatible with most functions in lm() class). } \examples{ data <- sim.marx(c('t',1,1), c('t',1,1),100,0.5,0.4,0.3) object <- mixed(data$y, data$x, 1, 1) class(object) <- "mixed" summary(object) } \keyword{causal-noncausal} \keyword{estimation}
/man/mixed.Rd
no_license
alexstihi/MARX
R
false
true
1,609
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MARX_functions.R \name{mixed} \alias{mixed} \alias{mixed.default} \alias{print.mixed} \alias{summary.mixed} \alias{mixed.default} \alias{print.mixed} \alias{summary.mixed} \title{The MARX estimation function} \usage{ mixed(y, x, p_C, p_NC) \method{mixed}{default}(y, x, p_C, p_NC) \method{print}{mixed}(x, ...) \method{summary}{mixed}(object, ...) } \arguments{ \item{y}{Data vector of time series observations.} \item{x}{Matrix of data (every column represents one time series). Specify NULL or "not" if not wanted.} \item{p_C}{Number of lags to be included.} \item{p_NC}{Number of leads to be included.} \item{...}{Other parameters.} \item{object}{An object of the class "mixed".} } \value{ An object of class \code{"mixed"} is a list containing the following components: \item{coefficients}{Vector of estimated coefficients.} \item{se}{Standard errors of estimated coefficients.} \item{df.residual}{Degrees of freedom residuals.} \item{residuals}{Residuals.} \item{fitted.values}{Fitted values.} \item{order}{Vector containing (r,s,q), i.e. causal order r, noncausal order s, number of exogenous regressors q.} } \description{ This function allows you to estimate mixed causal-noncausal MARX models by t-MLE (compatible with most functions in lm() class). } \examples{ data <- sim.marx(c('t',1,1), c('t',1,1),100,0.5,0.4,0.3) object <- mixed(data$y, data$x, 1, 1) class(object) <- "mixed" summary(object) } \keyword{causal-noncausal} \keyword{estimation}
dataFileName <- "data\\household_power_consumption.txt" powerData <- read.table(dataFileName, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") #head(powerData) subData <- powerData[powerData$Date %in% c("1/2/2007","2/2/2007"),] #head(subData) day <- strptime(paste(subData$Date,subData$Time,sep=" "),"%d/%m/%Y %H:%M:%S") globalActivePower <- as.numeric(subData$Global_active_power) sub_metering_1 <- as.numeric(subData$Sub_metering_1) sub_metering_2 <- as.numeric(subData$Sub_metering_2) sub_metering_3 <- as.numeric(subData$Sub_metering_3) png("plot3.png", width=480, height=480) #create line plot 1 for sub metering 1 plot(day, sub_metering_1, type="l", ylab="Energy Submetering", xlab="") #create line plot 1 for sub metering 2 lines(day, sub_metering_2, type="l", col="red") #create line plot 1 for sub metering 3 lines(day, sub_metering_3, type="l", col="blue") #add legend for sub metering legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue")) #shutdown graphic device dev.off()
/plot3.R
no_license
minesweeper222/ExData_Plotting1
R
false
false
1,073
r
dataFileName <- "data\\household_power_consumption.txt" powerData <- read.table(dataFileName, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") #head(powerData) subData <- powerData[powerData$Date %in% c("1/2/2007","2/2/2007"),] #head(subData) day <- strptime(paste(subData$Date,subData$Time,sep=" "),"%d/%m/%Y %H:%M:%S") globalActivePower <- as.numeric(subData$Global_active_power) sub_metering_1 <- as.numeric(subData$Sub_metering_1) sub_metering_2 <- as.numeric(subData$Sub_metering_2) sub_metering_3 <- as.numeric(subData$Sub_metering_3) png("plot3.png", width=480, height=480) #create line plot 1 for sub metering 1 plot(day, sub_metering_1, type="l", ylab="Energy Submetering", xlab="") #create line plot 1 for sub metering 2 lines(day, sub_metering_2, type="l", col="red") #create line plot 1 for sub metering 3 lines(day, sub_metering_3, type="l", col="blue") #add legend for sub metering legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue")) #shutdown graphic device dev.off()
## Programming Assignment #2 Lexical Scoping ## FUNCTION: makeCacheMatrix ## DESCRIPTION:creates a special "matrix" object that can cache its inverse makeCacheMatrix <- function(v_matrix = matrix()) { v_inverse <- NULL set <- function(x) { v_matrix <<- x ; v_inverse <<- NULL ; } get <- function() return(v_matrix) ; setinv <- function(inv) v_inverse <<- inv ; getinv <- function() return(v_inverse) ; return(list(set = set, get = get, setinv = setinv, getinv = getinv)) } ## Programming Assignment #2 Lexical Scoping ## FUNCTION: cacheSolve ## DESCRIPTION:This function computes the inverse of the special ## "matrix" returned by makeCacheMatrix. ## If the inverse has already been calculated ## (and the matrix has not changed), then the cachesolve ## should retrieve the inverse from the cache. cacheSolve <- function(v_matrix, ...) { v_inverse <- v_matrix$getinv() if(!is.null(v_inverse)) { message("get the data in the cache") ## Return a matrix that is the inverse of 'x' return(v_inverse) } data <- v_matrix$get() v_inverse <- solve(data, ...) v_matrix$setinv(v_inverse) return(v_inverse) }
/cachematrix.R
no_license
timsharp/ProgrammingAssignment2
R
false
false
1,282
r
## Programming Assignment #2 Lexical Scoping ## FUNCTION: makeCacheMatrix ## DESCRIPTION:creates a special "matrix" object that can cache its inverse makeCacheMatrix <- function(v_matrix = matrix()) { v_inverse <- NULL set <- function(x) { v_matrix <<- x ; v_inverse <<- NULL ; } get <- function() return(v_matrix) ; setinv <- function(inv) v_inverse <<- inv ; getinv <- function() return(v_inverse) ; return(list(set = set, get = get, setinv = setinv, getinv = getinv)) } ## Programming Assignment #2 Lexical Scoping ## FUNCTION: cacheSolve ## DESCRIPTION:This function computes the inverse of the special ## "matrix" returned by makeCacheMatrix. ## If the inverse has already been calculated ## (and the matrix has not changed), then the cachesolve ## should retrieve the inverse from the cache. cacheSolve <- function(v_matrix, ...) { v_inverse <- v_matrix$getinv() if(!is.null(v_inverse)) { message("get the data in the cache") ## Return a matrix that is the inverse of 'x' return(v_inverse) } data <- v_matrix$get() v_inverse <- solve(data, ...) v_matrix$setinv(v_inverse) return(v_inverse) }
# dat <- read.csv('RenegadesHistoryFormatted.csv') # weekly = F # Season = c(2016, 2017) # statcat = 'HR' # playoffs = F # best = T # numshow = 10 # ownerfilter = 'All' # franchisefilter = 'All' records.func <- function(dat = dat, weekly = T, season = c(2011:2017), statcat = 'All', playoffs = F, best = T, numshow = 5, ownerfilter = 'All', franchisefilter = 'All'){ dat <- dat %>% filter(AllStar == 0) %>% mutate(Luck = Wins - xWins) dat <- dat %>% filter(ifelse(rep(ownerfilter, nrow(dat)) == 'All', TRUE, TeamOwner == ownerfilter)) dat <- dat %>% filter(ifelse(rep(franchisefilter, nrow(dat)) == 'All', TRUE, CurrentName == franchisefilter)) if(!weekly){ dat <- dat %>% filter(Playoffs == as.numeric(playoffs)) %>% group_by(Season, Team) %>% summarize(Owner = TeamOwner[1], R = sum(R), HR = sum(HR), RBI = sum(RBI), SB = sum(SB), OBP = mean(OBP), SLG = mean(SLG), K = sum(K), QS = sum(QS), W = sum(W), SV = sum(SV), ERA = mean(ERA), WHIP = mean(WHIP), Luck = sum(Luck)) %>% ungroup() return(dat %>% select_(.dots = c('Team', 'Season', 'Owner', statcat)) %>% arrange_(ifelse((statcat %in% c('WHIP', 'ERA') & best) | (!(statcat %in% c('WHIP', 'ERA')) & !best), statcat, paste0('desc(', statcat, ')'))) %>% filter(Season %in% season) %>% filter(row_number() <= numshow)) } else{ if(statcat == 'All'){ if(playoffs){ ranks <- data.frame(Team = dat$Team[dat$Playoffs == 1 & dat$Season %in% season], Season = dat$Season[dat$Playoffs == 1 & dat$Season %in% season], Week = dat$Week[dat$Playoffs == 1 & dat$Season %in% season], sapply(dat %>% filter(Playoffs == 1 & Season %in% season) %>% select(R:SV), rank), sapply(dat %>% filter(dat$Playoffs == 1) %>% select(ERA, WHIP), function(x) nrow(dat) + 1 - rank(x))) ranks[['Score']] <- apply(ranks %>% select(R:WHIP), 1, sum) return(ranks %>% arrange_(ifelse(best, 'desc(Score)', 'Score')) %>% filter(row_number() <= numshow) %>% select(Team, Season, Week, Score) %>% left_join(dat %>% select(Team, Season, Week, Owner = TeamOwner, R:WHIP), c('Team', 'Season', 'Week')) %>% mutate(WinPct = Score / (nrow(dat[dat$Playoffs == as.numeric(playoffs) & dat$Season %in% season,]) * 12)) %>% select(-Score)) }else{ ranks <- data.frame(Team = dat$Team[dat$Playoffs == 0 & dat$Season %in% season], Season = dat$Season[dat$Playoffs == 0 & dat$Season %in% season], Week = dat$Week[dat$Playoffs == 0 & dat$Season %in% season], sapply(dat %>% filter(Playoffs == 0 & Season %in% season) %>% select(R:SV), rank), sapply(dat %>% filter(Playoffs == 0 & Season %in% season) %>% select(ERA, WHIP), function(x) nrow(dat[dat$Playoffs == as.numeric(playoffs) & dat$Season %in% season,]) + 1 - rank(x))) ranks[['Score']] <- apply(ranks %>% select(R:WHIP), 1, sum) return(ranks %>% arrange_(ifelse(best, 'desc(Score)', 'Score')) %>% filter(row_number() <= numshow) %>% select(Team, Season, Week, Score) %>% left_join(dat %>% select(Team, Season, Week, Owner = TeamOwner, R:WHIP), c('Team', 'Season', 'Week')) %>% mutate(WinPct = Score / (nrow(dat[dat$Playoffs == as.numeric(playoffs) & dat$Season %in% season,]) * 12)) %>% select(-Score)) } } else{ if(playoffs){ return(dat %>% filter(Playoffs == 1 & Season %in% season) %>% select_(.dots = c('Team', 'Season', 'Week', statcat)) %>% arrange_(ifelse((statcat %in% c('WHIP', 'ERA') & best) | (!(statcat %in% c('WHIP', 'ERA')) & !best), statcat, paste0('desc(', statcat, ')'))) %>% filter(row_number() <= numshow)) }else{ return(dat %>% filter(Playoffs == 0 & Season %in% season) %>% select_(.dots = c('Team', 'Season', 'Week', statcat)) %>% arrange_(ifelse((statcat %in% c('WHIP', 'ERA') & best) | (!(statcat %in% c('WHIP', 'ERA')) & !best), statcat, paste0('desc(', statcat, ')'))) %>% filter(row_number() <= numshow)) } } } } # records.func(dat = dat, # weekly = T, # Season = 2016, # statcat = 'R', # playoffs = F, # best = T, # numshow = 5)
/RecordsFunction.R
no_license
mattyanselmo/RenegadesHistoryApp
R
false
false
5,516
r
# dat <- read.csv('RenegadesHistoryFormatted.csv') # weekly = F # Season = c(2016, 2017) # statcat = 'HR' # playoffs = F # best = T # numshow = 10 # ownerfilter = 'All' # franchisefilter = 'All' records.func <- function(dat = dat, weekly = T, season = c(2011:2017), statcat = 'All', playoffs = F, best = T, numshow = 5, ownerfilter = 'All', franchisefilter = 'All'){ dat <- dat %>% filter(AllStar == 0) %>% mutate(Luck = Wins - xWins) dat <- dat %>% filter(ifelse(rep(ownerfilter, nrow(dat)) == 'All', TRUE, TeamOwner == ownerfilter)) dat <- dat %>% filter(ifelse(rep(franchisefilter, nrow(dat)) == 'All', TRUE, CurrentName == franchisefilter)) if(!weekly){ dat <- dat %>% filter(Playoffs == as.numeric(playoffs)) %>% group_by(Season, Team) %>% summarize(Owner = TeamOwner[1], R = sum(R), HR = sum(HR), RBI = sum(RBI), SB = sum(SB), OBP = mean(OBP), SLG = mean(SLG), K = sum(K), QS = sum(QS), W = sum(W), SV = sum(SV), ERA = mean(ERA), WHIP = mean(WHIP), Luck = sum(Luck)) %>% ungroup() return(dat %>% select_(.dots = c('Team', 'Season', 'Owner', statcat)) %>% arrange_(ifelse((statcat %in% c('WHIP', 'ERA') & best) | (!(statcat %in% c('WHIP', 'ERA')) & !best), statcat, paste0('desc(', statcat, ')'))) %>% filter(Season %in% season) %>% filter(row_number() <= numshow)) } else{ if(statcat == 'All'){ if(playoffs){ ranks <- data.frame(Team = dat$Team[dat$Playoffs == 1 & dat$Season %in% season], Season = dat$Season[dat$Playoffs == 1 & dat$Season %in% season], Week = dat$Week[dat$Playoffs == 1 & dat$Season %in% season], sapply(dat %>% filter(Playoffs == 1 & Season %in% season) %>% select(R:SV), rank), sapply(dat %>% filter(dat$Playoffs == 1) %>% select(ERA, WHIP), function(x) nrow(dat) + 1 - rank(x))) ranks[['Score']] <- apply(ranks %>% select(R:WHIP), 1, sum) return(ranks %>% arrange_(ifelse(best, 'desc(Score)', 'Score')) %>% filter(row_number() <= numshow) %>% select(Team, Season, Week, Score) %>% left_join(dat %>% select(Team, Season, Week, Owner = TeamOwner, R:WHIP), c('Team', 'Season', 'Week')) %>% mutate(WinPct = Score / (nrow(dat[dat$Playoffs == as.numeric(playoffs) & dat$Season %in% season,]) * 12)) %>% select(-Score)) }else{ ranks <- data.frame(Team = dat$Team[dat$Playoffs == 0 & dat$Season %in% season], Season = dat$Season[dat$Playoffs == 0 & dat$Season %in% season], Week = dat$Week[dat$Playoffs == 0 & dat$Season %in% season], sapply(dat %>% filter(Playoffs == 0 & Season %in% season) %>% select(R:SV), rank), sapply(dat %>% filter(Playoffs == 0 & Season %in% season) %>% select(ERA, WHIP), function(x) nrow(dat[dat$Playoffs == as.numeric(playoffs) & dat$Season %in% season,]) + 1 - rank(x))) ranks[['Score']] <- apply(ranks %>% select(R:WHIP), 1, sum) return(ranks %>% arrange_(ifelse(best, 'desc(Score)', 'Score')) %>% filter(row_number() <= numshow) %>% select(Team, Season, Week, Score) %>% left_join(dat %>% select(Team, Season, Week, Owner = TeamOwner, R:WHIP), c('Team', 'Season', 'Week')) %>% mutate(WinPct = Score / (nrow(dat[dat$Playoffs == as.numeric(playoffs) & dat$Season %in% season,]) * 12)) %>% select(-Score)) } } else{ if(playoffs){ return(dat %>% filter(Playoffs == 1 & Season %in% season) %>% select_(.dots = c('Team', 'Season', 'Week', statcat)) %>% arrange_(ifelse((statcat %in% c('WHIP', 'ERA') & best) | (!(statcat %in% c('WHIP', 'ERA')) & !best), statcat, paste0('desc(', statcat, ')'))) %>% filter(row_number() <= numshow)) }else{ return(dat %>% filter(Playoffs == 0 & Season %in% season) %>% select_(.dots = c('Team', 'Season', 'Week', statcat)) %>% arrange_(ifelse((statcat %in% c('WHIP', 'ERA') & best) | (!(statcat %in% c('WHIP', 'ERA')) & !best), statcat, paste0('desc(', statcat, ')'))) %>% filter(row_number() <= numshow)) } } } } # records.func(dat = dat, # weekly = T, # Season = 2016, # statcat = 'R', # playoffs = F, # best = T, # numshow = 5)
# PLOT 1 # STEP 1: reading the data # Dataset: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip # Missing values are coded as ?. url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" temp <- tempfile() download.file(url, temp) unzip(temp, "household_power_consumption.txt") data <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings = "?") unlink(temp) names(data) # STEP 2: Optimize and subset dataset # Convert the Date and Time variables to Date/Time classes in R using the 𝚜𝚝𝚛𝚙𝚝𝚒𝚖𝚎() and 𝚊𝚜.𝙳𝚊𝚝𝚎()functions. data$Date <- as.Date(data$Date, "%d/%m/%Y") data$Time <- strptime(data$Time, "%H:%M:%S") # data$Time <- sub(".* ", "", data$Time) # head(data) # class(data$Time) # Use only data from the dates 2007-02-01 and 2007-02-02. data.02.07 <- subset(data, data$Date == "2007-02-01" | data$Date == "2007-02-02") # STEP 3: Create plot # Create PNG file with a width of 480 pixels and a height of 480 pixels # Name each of the plot files as plot1.png, plot2.png, etc. dev.copy(png,'plot1.png', width = 480, height = 480) # Histogram: Frequency ~ Global Active Power (kilowatts), col = red. "Global Active Power" par(mfrow = c(1,1)) hist(data.02.07$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency") dev.off()
/plot1.R
no_license
lisahlmsch/ExData_Plotting1
R
false
false
1,447
r
# PLOT 1 # STEP 1: reading the data # Dataset: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip # Missing values are coded as ?. url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" temp <- tempfile() download.file(url, temp) unzip(temp, "household_power_consumption.txt") data <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings = "?") unlink(temp) names(data) # STEP 2: Optimize and subset dataset # Convert the Date and Time variables to Date/Time classes in R using the 𝚜𝚝𝚛𝚙𝚝𝚒𝚖𝚎() and 𝚊𝚜.𝙳𝚊𝚝𝚎()functions. data$Date <- as.Date(data$Date, "%d/%m/%Y") data$Time <- strptime(data$Time, "%H:%M:%S") # data$Time <- sub(".* ", "", data$Time) # head(data) # class(data$Time) # Use only data from the dates 2007-02-01 and 2007-02-02. data.02.07 <- subset(data, data$Date == "2007-02-01" | data$Date == "2007-02-02") # STEP 3: Create plot # Create PNG file with a width of 480 pixels and a height of 480 pixels # Name each of the plot files as plot1.png, plot2.png, etc. dev.copy(png,'plot1.png', width = 480, height = 480) # Histogram: Frequency ~ Global Active Power (kilowatts), col = red. "Global Active Power" par(mfrow = c(1,1)) hist(data.02.07$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency") dev.off()
library(slam) ### Name: crossprod ### Title: Matrix Crossproduct ### Aliases: tcrossprod_simple_triplet_matrix ### crossprod_simple_triplet_matrix matprod_simple_triplet_matrix ### Keywords: algebra array ### ** Examples ## x <- matrix(c(1, 0, 0, 2, 1, 0), nrow = 3) x s <- as.simple_triplet_matrix(x) tcrossprod_simple_triplet_matrix(s, x) ## tcrossprod_simple_triplet_matrix(s) ## tcrossprod_simple_triplet_matrix(s[1L, ], s[2:3, ])
/data/genthat_extracted_code/slam/examples/crossprod.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
445
r
library(slam) ### Name: crossprod ### Title: Matrix Crossproduct ### Aliases: tcrossprod_simple_triplet_matrix ### crossprod_simple_triplet_matrix matprod_simple_triplet_matrix ### Keywords: algebra array ### ** Examples ## x <- matrix(c(1, 0, 0, 2, 1, 0), nrow = 3) x s <- as.simple_triplet_matrix(x) tcrossprod_simple_triplet_matrix(s, x) ## tcrossprod_simple_triplet_matrix(s) ## tcrossprod_simple_triplet_matrix(s[1L, ], s[2:3, ])
program <- function(D_matrix, k=5, cancer_type = cancer_type) { ## ## YOUR CODE BEGINS HERE ## if ( !{ "NMF" %in% installed.packages( ) } ) { install.packages(pkgs = "NMF") } ## we compute the estimation of A for the data set : A_matrix <- NULL if (!is.null(x = D_matrix) ) { if (nrow(D_matrix) < 5000){ print("number of features < 5000") D = D_matrix } else { print("number of features > 5000, variance based feature selection is applied") D = medepir::feature_selection(D_matrix) } res <- NMF::nmf(x = D, rank = k, method = "snmf/r", seed = 1) A <- apply( X = res@fit@H , MARGIN = 2 , FUN = function( x ) { x / sum( x ) } ) A_matrix <- A T_matrix <- res@fit@W remove(list = "res") } ## ## YOUR CODE ENDS HERE ## return( list(A_matrix = A_matrix,T_matrix = T_matrix) ) }
/meteor_files/algorithm_744.r
no_license
cancer-heterogeneity/cancer-heterogeneity.github.io
R
false
false
1,039
r
program <- function(D_matrix, k=5, cancer_type = cancer_type) { ## ## YOUR CODE BEGINS HERE ## if ( !{ "NMF" %in% installed.packages( ) } ) { install.packages(pkgs = "NMF") } ## we compute the estimation of A for the data set : A_matrix <- NULL if (!is.null(x = D_matrix) ) { if (nrow(D_matrix) < 5000){ print("number of features < 5000") D = D_matrix } else { print("number of features > 5000, variance based feature selection is applied") D = medepir::feature_selection(D_matrix) } res <- NMF::nmf(x = D, rank = k, method = "snmf/r", seed = 1) A <- apply( X = res@fit@H , MARGIN = 2 , FUN = function( x ) { x / sum( x ) } ) A_matrix <- A T_matrix <- res@fit@W remove(list = "res") } ## ## YOUR CODE ENDS HERE ## return( list(A_matrix = A_matrix,T_matrix = T_matrix) ) }
library("car") sqrt(vif(z)) > 2 - фактор инфляции дисперсии, если больше 2, то есть вероятность мультиколлинеарности
/multicollinear-check.R
no_license
savraska/Kata
R
false
false
191
r
library("car") sqrt(vif(z)) > 2 - фактор инфляции дисперсии, если больше 2, то есть вероятность мультиколлинеарности
needs("jsonlite"); #When running the code from RStudio, you need to comment the lines above and uncomment the lines bellow # library(jsonlite); getDocumentsWithNgram <-function(ngram, path_users){ con <- file(sprintf("%s/coordinates.json", path_users), encoding = "latin1"); test <- as.character(readLines(con, warn = FALSE)); close(con) test <- iconv(test, to = "utf8") coordinates <- fromJSON(test); doclist <- c(); k <- 1; for (i in 1:length(coordinates[,1])){ if ((grepl(ngram, coordinates[i,'body_preprocessed']))){ doclist[k] <- coordinates[i, 'name']; k <- k +1; } } return(doclist); } setRelevantNgramBatch <- function(ngram, path_users, corpus){ #retrieve docs with ngram doclist <- getDocumentsWithNgram(ngram, path_users); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on focus list - check # #if document is on suggestion list - remove from suggestion + add to focus list # #else if document is on not relevant list - remove from not relevant + add to focus list k <- 1; sugg <- 1; title <- c(); label <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% focus[,'docname']))&&(!(doclist[i] %in% notrelevant[,'docname']))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } label[k] <- 1; docsnames[k] <- doclist[i]; close(con); k <- k + 1; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title, "isbase" = FALSE, "label" = label); focus <- rbind(focus, df); write(toJSON(focus, pretty=TRUE), sprintf("%s/focuslist.json", path_users)); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } } setNotRelevantNgramBatch <- function(ngram, path_users, corpus){ #retrieve docs with ngram doclist <- getDocumentsWithNgram(ngram, path_users); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); close(con) con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); close(con) con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on focus list - check # #if document is on suggestion list - remove from suggestion + add to focus list # #else if document is on not relevant list - remove from not relevant + add to focus list k <- 1; sugg <- 1; title <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% notrelevant[,'docname'])&&(!(doclist[i] %in% focus[,'docname'])))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } docsnames[k] <- doclist[i]; close(con); k <- k + 1; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title); notrelevant <- rbind(notrelevant, df); write(toJSON(notrelevant, pretty=TRUE), sprintf("%s/notrelevant.json", path_users)); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } } setRelevantSimilarBatch <- function(document, path_users, path_core, corpus, embtech){ source("init.R") doclist <- c(); doclist <- getSimilarDocuments(document, path_core, path_users, embtech); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on focus list - check # #if document is on suggestion list - remove from suggestion + add to focus list # #else if document is on not relevant list - remove from not relevant + add to focus list k <- 1; sugg <- 1; title <- c(); label <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% focus[,'docname']))&&(!(doclist[i] %in% notrelevant[,'docname']))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } docsnames[k] <- doclist[i]; label[k] <- 0.5; close(con); k <- k + 1; } } for (i in 1:length(focus[,1])){ if (focus[i,'docname'] == document){ focus[i, 'isbase'] = TRUE; focus[i, 'label'] = 2; break; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title, "isbase" = FALSE, "label" = label); focus <- rbind(focus, df); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } write(toJSON(focus, pretty=TRUE), sprintf("%s/focuslist.json", path_users)); resdf <- data.frame("response" = "success"); return(doclist); } setNotRelevantSimilarBatch <- function(document, path_users, path_core, corpus, embtech){ source("init.R") doclist <- c(); doclist <- getSimilarDocuments(document, path_core, path_users, embtech); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on not relevant list - check # #if document is on suggestion list - remove from suggestion + add to not relevant list # #else if document is on not focus list - remove from focus + add to not relevant k <- 1; sugg <- 1; title <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% notrelevant[,'docname']))&&(!(doclist[i] %in% focus[,'docname']))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } docsnames[k] <- doclist[i]; close(con); k <- k + 1; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title); notrelevant <- rbind(notrelevant, df); write(toJSON(notrelevant, pretty=TRUE), sprintf("%s/notrelevant.json", path_users)); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } }
/Server/scripts/labelDocuments.R
no_license
amandagdias/TRIVIR
R
false
false
11,116
r
needs("jsonlite"); #When running the code from RStudio, you need to comment the lines above and uncomment the lines bellow # library(jsonlite); getDocumentsWithNgram <-function(ngram, path_users){ con <- file(sprintf("%s/coordinates.json", path_users), encoding = "latin1"); test <- as.character(readLines(con, warn = FALSE)); close(con) test <- iconv(test, to = "utf8") coordinates <- fromJSON(test); doclist <- c(); k <- 1; for (i in 1:length(coordinates[,1])){ if ((grepl(ngram, coordinates[i,'body_preprocessed']))){ doclist[k] <- coordinates[i, 'name']; k <- k +1; } } return(doclist); } setRelevantNgramBatch <- function(ngram, path_users, corpus){ #retrieve docs with ngram doclist <- getDocumentsWithNgram(ngram, path_users); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on focus list - check # #if document is on suggestion list - remove from suggestion + add to focus list # #else if document is on not relevant list - remove from not relevant + add to focus list k <- 1; sugg <- 1; title <- c(); label <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% focus[,'docname']))&&(!(doclist[i] %in% notrelevant[,'docname']))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } label[k] <- 1; docsnames[k] <- doclist[i]; close(con); k <- k + 1; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title, "isbase" = FALSE, "label" = label); focus <- rbind(focus, df); write(toJSON(focus, pretty=TRUE), sprintf("%s/focuslist.json", path_users)); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } } setNotRelevantNgramBatch <- function(ngram, path_users, corpus){ #retrieve docs with ngram doclist <- getDocumentsWithNgram(ngram, path_users); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); close(con) con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); close(con) con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on focus list - check # #if document is on suggestion list - remove from suggestion + add to focus list # #else if document is on not relevant list - remove from not relevant + add to focus list k <- 1; sugg <- 1; title <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% notrelevant[,'docname'])&&(!(doclist[i] %in% focus[,'docname'])))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } docsnames[k] <- doclist[i]; close(con); k <- k + 1; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title); notrelevant <- rbind(notrelevant, df); write(toJSON(notrelevant, pretty=TRUE), sprintf("%s/notrelevant.json", path_users)); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } } setRelevantSimilarBatch <- function(document, path_users, path_core, corpus, embtech){ source("init.R") doclist <- c(); doclist <- getSimilarDocuments(document, path_core, path_users, embtech); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on focus list - check # #if document is on suggestion list - remove from suggestion + add to focus list # #else if document is on not relevant list - remove from not relevant + add to focus list k <- 1; sugg <- 1; title <- c(); label <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% focus[,'docname']))&&(!(doclist[i] %in% notrelevant[,'docname']))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } docsnames[k] <- doclist[i]; label[k] <- 0.5; close(con); k <- k + 1; } } for (i in 1:length(focus[,1])){ if (focus[i,'docname'] == document){ focus[i, 'isbase'] = TRUE; focus[i, 'label'] = 2; break; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title, "isbase" = FALSE, "label" = label); focus <- rbind(focus, df); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } write(toJSON(focus, pretty=TRUE), sprintf("%s/focuslist.json", path_users)); resdf <- data.frame("response" = "success"); return(doclist); } setNotRelevantSimilarBatch <- function(document, path_users, path_core, corpus, embtech){ source("init.R") doclist <- c(); doclist <- getSimilarDocuments(document, path_core, path_users, embtech); con <- file(sprintf("%s/focuslist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") focus <- fromJSON(jsondata); con <- file(sprintf("%s/notrelevant.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") notrelevant <- fromJSON(jsondata); con <- file(sprintf("%s/suggestionlist.json", path_users), encoding = "latin1"); jsondata <- as.character(readLines(con, warn = FALSE)); jsondata <- iconv(jsondata, to = "utf8") suggestion <- fromJSON(jsondata); close(con) # #run over each document and check # #if document is not on not relevant list - check # #if document is on suggestion list - remove from suggestion + add to not relevant list # #else if document is on not focus list - remove from focus + add to not relevant k <- 1; sugg <- 1; title <- c(); docsnames <- c(); for (i in 1:length(doclist)){ if ((!(doclist[i] %in% notrelevant[,'docname']))&&(!(doclist[i] %in% focus[,'docname']))){ if (length(suggestion) > 0){ if (doclist[i] %in% suggestion[,'docname']){ suggestion <- suggestion[suggestion$docname != doclist[i],]; sugg <- sugg + 1; } } path <- sprintf("%s/%s", corpus, doclist[i]); con <- file(path); lines <- as.character(readLines(con, warn = FALSE)); for (j in 1:length(lines)){ if (lines[j] == "") break; } if ((basename(corpus) == "cbr ilp ir son")||(basename(corpus) == "demo")){ titlelines <- lines[1:j]; title[k] <- concatenate(titlelines); }else if (basename(corpus) == "WOS all"){ title[k] <- doclist[i]; title[k] <- substr(title[k], 1, nchar(title[k]) - 4) }else{ title[k] <- lines[1]; } docsnames[k] <- doclist[i]; close(con); k <- k + 1; } } if (k > 1){ df <- data.frame("docname" = docsnames, "title" = title); notrelevant <- rbind(notrelevant, df); write(toJSON(notrelevant, pretty=TRUE), sprintf("%s/notrelevant.json", path_users)); if (sugg > 1){ write(toJSON(suggestion, pretty=TRUE), sprintf("%s/suggestionlist.json", path_users)); } } }
#' Rectangles #' #' `geom_rect()` and `geom_tile()` do the same thing, but are #' parameterised differently: `geom_rect()` uses the locations of the four #' corners (`xmin`, `xmax`, `ymin` and `ymax`), while #' `geom_tile()` uses the center of the tile and its size (`x`, #' `y`, `width`, `height`). `geom_raster()` is a high #' performance special case for when all the tiles are the same size. #' #' @eval rd_aesthetics("geom", "tile") #' @inheritParams layer #' @inheritParams geom_point #' @inheritParams geom_segment #' @export #' @examples #' # The most common use for rectangles is to draw a surface. You always want #' # to use geom_raster here because it's so much faster, and produces #' # smaller output when saving to PDF #' ggplot(faithfuld, aes(waiting, eruptions)) + #' geom_raster(aes(fill = density)) #' #' # Interpolation smooths the surface & is most helpful when rendering images. #' ggplot(faithfuld, aes(waiting, eruptions)) + #' geom_raster(aes(fill = density), interpolate = TRUE) #' #' # If you want to draw arbitrary rectangles, use geom_tile() or geom_rect() #' df <- data.frame( #' x = rep(c(2, 5, 7, 9, 12), 2), #' y = rep(c(1, 2), each = 5), #' z = factor(rep(1:5, each = 2)), #' w = rep(diff(c(0, 4, 6, 8, 10, 14)), 2) #' ) #' ggplot(df, aes(x, y)) + #' geom_tile(aes(fill = z), colour = "grey50") #' ggplot(df, aes(x, y, width = w)) + #' geom_tile(aes(fill = z), colour = "grey50") #' ggplot(df, aes(xmin = x - w / 2, xmax = x + w / 2, ymin = y, ymax = y + 1)) + #' geom_rect(aes(fill = z), colour = "grey50") #' #' \donttest{ #' # Justification controls where the cells are anchored #' df <- expand.grid(x = 0:5, y = 0:5) #' set.seed(1) #' df$z <- runif(nrow(df)) #' # default is compatible with geom_tile() #' ggplot(df, aes(x, y, fill = z)) + #' geom_raster() #' # zero padding #' ggplot(df, aes(x, y, fill = z)) + #' geom_raster(hjust = 0, vjust = 0) #' #' # Inspired by the image-density plots of Ken Knoblauch #' cars <- ggplot(mtcars, aes(mpg, factor(cyl))) #' cars + geom_point() #' cars + stat_bin2d(aes(fill = after_stat(count)), binwidth = c(3,1)) #' cars + stat_bin2d(aes(fill = after_stat(density)), binwidth = c(3,1)) #' #' cars + #' stat_density( #' aes(fill = after_stat(density)), #' geom = "raster", #' position = "identity" #' ) #' cars + #' stat_density( #' aes(fill = after_stat(count)), #' geom = "raster", #' position = "identity" #' ) #' } geom_tile <- function(mapping = NULL, data = NULL, stat = "identity", position = "identity", ..., linejoin = "mitre", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) { layer( data = data, mapping = mapping, stat = stat, geom = GeomTile, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list2( linejoin = linejoin, na.rm = na.rm, ... ) ) } #' @rdname ggplot2-ggproto #' @format NULL #' @usage NULL #' @export #' @include geom-rect.r GeomTile <- ggproto("GeomTile", GeomRect, extra_params = c("na.rm"), setup_data = function(data, params) { data$width <- data$width %||% params$width %||% resolution(data$x, FALSE) data$height <- data$height %||% params$height %||% resolution(data$y, FALSE) transform(data, xmin = x - width / 2, xmax = x + width / 2, width = NULL, ymin = y - height / 2, ymax = y + height / 2, height = NULL ) }, default_aes = aes(fill = "grey20", colour = NA, linewidth = 0.1, linetype = 1, alpha = NA, width = NA, height = NA), required_aes = c("x", "y"), # These aes columns are created by setup_data(). They need to be listed here so # that GeomRect$handle_na() properly removes any bars that fall outside the defined # limits, not just those for which x and y are outside the limits non_missing_aes = c("xmin", "xmax", "ymin", "ymax"), draw_key = draw_key_polygon )
/R/geom-tile.r
permissive
zawkzaw/ggplot2
R
false
false
4,027
r
#' Rectangles #' #' `geom_rect()` and `geom_tile()` do the same thing, but are #' parameterised differently: `geom_rect()` uses the locations of the four #' corners (`xmin`, `xmax`, `ymin` and `ymax`), while #' `geom_tile()` uses the center of the tile and its size (`x`, #' `y`, `width`, `height`). `geom_raster()` is a high #' performance special case for when all the tiles are the same size. #' #' @eval rd_aesthetics("geom", "tile") #' @inheritParams layer #' @inheritParams geom_point #' @inheritParams geom_segment #' @export #' @examples #' # The most common use for rectangles is to draw a surface. You always want #' # to use geom_raster here because it's so much faster, and produces #' # smaller output when saving to PDF #' ggplot(faithfuld, aes(waiting, eruptions)) + #' geom_raster(aes(fill = density)) #' #' # Interpolation smooths the surface & is most helpful when rendering images. #' ggplot(faithfuld, aes(waiting, eruptions)) + #' geom_raster(aes(fill = density), interpolate = TRUE) #' #' # If you want to draw arbitrary rectangles, use geom_tile() or geom_rect() #' df <- data.frame( #' x = rep(c(2, 5, 7, 9, 12), 2), #' y = rep(c(1, 2), each = 5), #' z = factor(rep(1:5, each = 2)), #' w = rep(diff(c(0, 4, 6, 8, 10, 14)), 2) #' ) #' ggplot(df, aes(x, y)) + #' geom_tile(aes(fill = z), colour = "grey50") #' ggplot(df, aes(x, y, width = w)) + #' geom_tile(aes(fill = z), colour = "grey50") #' ggplot(df, aes(xmin = x - w / 2, xmax = x + w / 2, ymin = y, ymax = y + 1)) + #' geom_rect(aes(fill = z), colour = "grey50") #' #' \donttest{ #' # Justification controls where the cells are anchored #' df <- expand.grid(x = 0:5, y = 0:5) #' set.seed(1) #' df$z <- runif(nrow(df)) #' # default is compatible with geom_tile() #' ggplot(df, aes(x, y, fill = z)) + #' geom_raster() #' # zero padding #' ggplot(df, aes(x, y, fill = z)) + #' geom_raster(hjust = 0, vjust = 0) #' #' # Inspired by the image-density plots of Ken Knoblauch #' cars <- ggplot(mtcars, aes(mpg, factor(cyl))) #' cars + geom_point() #' cars + stat_bin2d(aes(fill = after_stat(count)), binwidth = c(3,1)) #' cars + stat_bin2d(aes(fill = after_stat(density)), binwidth = c(3,1)) #' #' cars + #' stat_density( #' aes(fill = after_stat(density)), #' geom = "raster", #' position = "identity" #' ) #' cars + #' stat_density( #' aes(fill = after_stat(count)), #' geom = "raster", #' position = "identity" #' ) #' } geom_tile <- function(mapping = NULL, data = NULL, stat = "identity", position = "identity", ..., linejoin = "mitre", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) { layer( data = data, mapping = mapping, stat = stat, geom = GeomTile, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list2( linejoin = linejoin, na.rm = na.rm, ... ) ) } #' @rdname ggplot2-ggproto #' @format NULL #' @usage NULL #' @export #' @include geom-rect.r GeomTile <- ggproto("GeomTile", GeomRect, extra_params = c("na.rm"), setup_data = function(data, params) { data$width <- data$width %||% params$width %||% resolution(data$x, FALSE) data$height <- data$height %||% params$height %||% resolution(data$y, FALSE) transform(data, xmin = x - width / 2, xmax = x + width / 2, width = NULL, ymin = y - height / 2, ymax = y + height / 2, height = NULL ) }, default_aes = aes(fill = "grey20", colour = NA, linewidth = 0.1, linetype = 1, alpha = NA, width = NA, height = NA), required_aes = c("x", "y"), # These aes columns are created by setup_data(). They need to be listed here so # that GeomRect$handle_na() properly removes any bars that fall outside the defined # limits, not just those for which x and y are outside the limits non_missing_aes = c("xmin", "xmax", "ymin", "ymax"), draw_key = draw_key_polygon )
library(caret) library(mlbench) library(magrittr) data(Sonar) set.seed(107) in_train <- createDataPartition(y = Sonar$Class, p = 0.75, list = FALSE) training <- Sonar[in_train, ] testing <- Sonar[-in_train, ] ctrl <- trainControl(method = "repeatedcv", repeats = 3, classProbs = TRUE, summaryFunction = twoClassSummary ) # See names(getModelInfo()) for potential models # or http://topepo.github.io/caret/bytag.html model <- train(Class ~ ., data = training, method = "pls", preProc = c("center", "scale"), trControl = ctrl, metric = "ROC", tuneLength = 15 ) model plot(model) predictions <- predict(model, newdata = testing) predictions_prob <- predict(model, newdata = testing, type = "prob") accuracy <- confusionMatrix(data = predictions, testing$Class)
/classifier.R
no_license
GCDigitalFellows/WebScraping
R
false
false
956
r
library(caret) library(mlbench) library(magrittr) data(Sonar) set.seed(107) in_train <- createDataPartition(y = Sonar$Class, p = 0.75, list = FALSE) training <- Sonar[in_train, ] testing <- Sonar[-in_train, ] ctrl <- trainControl(method = "repeatedcv", repeats = 3, classProbs = TRUE, summaryFunction = twoClassSummary ) # See names(getModelInfo()) for potential models # or http://topepo.github.io/caret/bytag.html model <- train(Class ~ ., data = training, method = "pls", preProc = c("center", "scale"), trControl = ctrl, metric = "ROC", tuneLength = 15 ) model plot(model) predictions <- predict(model, newdata = testing) predictions_prob <- predict(model, newdata = testing, type = "prob") accuracy <- confusionMatrix(data = predictions, testing$Class)
str(HBAT) designMat <- HBAT[,c(2, 4, 14:18)] str(designMat) attach(designMat) sex = factor(sex) #factor 지정 JP = factor(JP) #일변량 이원배치분산분석 summary(aov(JS1 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS2 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS3 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS4 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS5 ~ sex+JP+sex:JP, data=designMat)) # 상호작용 그림 par(mfrow=c(1,2)) interaction.plot(sex, JP, JS1, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS1") interaction.plot(JP.f, SEX.f, JS1, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), ylab = "Sex", xlab = "Job Performance", main = "Interaction Plot : JS1") interaction.plot(SEX.f, JP.f, JS2, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS2") interaction.plot(SEX.f, JP.f, JS3, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS3") interaction.plot(SEX.f, JP.f, JS4, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS4") interaction.plot(SEX.f, JP.f, JS5, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS5") # 다중비교 실시 TukeyHSD(aov(JS2 ~ factor(SEX.f)+factor(JP.f)+factor(SEX.f)*factor(JP.f), data=designMat)) # 다변량 이원배치분산분석 JS = cbind(JS1, JS2, JS3, JS4, JS5) #응답벡터 fit = manova(JS ~ SEX.f + JP.f + SEX.f:JP.f) summary(fit, test="Wilks") # Wilks' lambda summary(fit, test="Pillai") # Pillai's trace summary(fit, test="Roy") # Roy's grestest root summary(fit, test="Hotelling") # Hotelling-Lawley trace #공분산행렬의 동질성 검정 install.packages("biotools") install.packages("rpanel") install.packages("tcltk") install.packages("BWidget") library(biotools) .libPaths() install.packages("psych") install.packages("GPArotation") library(psych) library(GPArotation) data(Thurstone) fit1 <- fa(Thurstone, nfactors = 3, rotate = "oblimin", fm="ml") print(fit1) install.packages("EFAutilities") library(EFAutilities) Thurstone res1 <- efa(x=NULL, covmat=Thurstone, dist="normal", factors=3, n.obs=213, fm="ml", rtype = 'oblique', rotation = 'CF-varimax', merror="YES", mnames=row.names(Thurstone)) print(res1) res1$ModelF res1$rotatedlow res1$rotatedupper res1$Residual Target1 <- matrix(c(9, 0, 0, 9, 0, 0, 9, 0, 0, 0, 9, 0, 0, 9, 0, 0, 9, 0, 0, 0, 9, 0, 0, 9, 0, 0, 9), ncol=3, byrow=TURE) MWeight1 <- matrix(0, ncol=3, nrow=9) MWeight1[Target1==0] <- 1
/TwoWayMANOVA.R
no_license
SungjiCho/R
R
false
false
3,290
r
str(HBAT) designMat <- HBAT[,c(2, 4, 14:18)] str(designMat) attach(designMat) sex = factor(sex) #factor 지정 JP = factor(JP) #일변량 이원배치분산분석 summary(aov(JS1 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS2 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS3 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS4 ~ sex+JP+sex:JP, data=designMat)) summary(aov(JS5 ~ sex+JP+sex:JP, data=designMat)) # 상호작용 그림 par(mfrow=c(1,2)) interaction.plot(sex, JP, JS1, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS1") interaction.plot(JP.f, SEX.f, JS1, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), ylab = "Sex", xlab = "Job Performance", main = "Interaction Plot : JS1") interaction.plot(SEX.f, JP.f, JS2, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS2") interaction.plot(SEX.f, JP.f, JS3, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS3") interaction.plot(SEX.f, JP.f, JS4, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS4") interaction.plot(SEX.f, JP.f, JS5, type="b", col=c(1:3), leg.bty = "o", leg.bg = "beige", lwd=2, pch = c(18, 24, 22), xlab = "Sex", ylab = "Job Performance", main = "Interaction Plot : JS5") # 다중비교 실시 TukeyHSD(aov(JS2 ~ factor(SEX.f)+factor(JP.f)+factor(SEX.f)*factor(JP.f), data=designMat)) # 다변량 이원배치분산분석 JS = cbind(JS1, JS2, JS3, JS4, JS5) #응답벡터 fit = manova(JS ~ SEX.f + JP.f + SEX.f:JP.f) summary(fit, test="Wilks") # Wilks' lambda summary(fit, test="Pillai") # Pillai's trace summary(fit, test="Roy") # Roy's grestest root summary(fit, test="Hotelling") # Hotelling-Lawley trace #공분산행렬의 동질성 검정 install.packages("biotools") install.packages("rpanel") install.packages("tcltk") install.packages("BWidget") library(biotools) .libPaths() install.packages("psych") install.packages("GPArotation") library(psych) library(GPArotation) data(Thurstone) fit1 <- fa(Thurstone, nfactors = 3, rotate = "oblimin", fm="ml") print(fit1) install.packages("EFAutilities") library(EFAutilities) Thurstone res1 <- efa(x=NULL, covmat=Thurstone, dist="normal", factors=3, n.obs=213, fm="ml", rtype = 'oblique', rotation = 'CF-varimax', merror="YES", mnames=row.names(Thurstone)) print(res1) res1$ModelF res1$rotatedlow res1$rotatedupper res1$Residual Target1 <- matrix(c(9, 0, 0, 9, 0, 0, 9, 0, 0, 0, 9, 0, 0, 9, 0, 0, 9, 0, 0, 0, 9, 0, 0, 9, 0, 0, 9), ncol=3, byrow=TURE) MWeight1 <- matrix(0, ncol=3, nrow=9) MWeight1[Target1==0] <- 1
## File Name: tam_args_replace_value.R ## File Version: 0.01 tam_args_replace_value <- function( args , variable=NULL , value=NULL) { if ( ! is.null(variable) ){ args[[ variable ]] <- value } return(args) }
/R/tam_args_replace_value.R
no_license
yaozeyang90/TAM
R
false
false
213
r
## File Name: tam_args_replace_value.R ## File Version: 0.01 tam_args_replace_value <- function( args , variable=NULL , value=NULL) { if ( ! is.null(variable) ){ args[[ variable ]] <- value } return(args) }
read_rdf_header <- function(con, pos, end) { obj <- list() repeat { line <- con[pos, 1] pos <- pos + 1 # advancing line to read if(line == end) break splitLine <- strsplit(line, ':', fixed = TRUE)[[1]] name <- splitLine[1] if (length(splitLine) > 1) { if (substr(splitLine[2], 1, 1) == ' ') { splitLine[2] <- substr(splitLine[2], 2, nchar(splitLine[2])) } contents <- paste(splitLine[2:length(splitLine)], collapse = ':') } else { contents <- NA } obj[[name]] <- contents # 1 passed to this function sometimes; when it is, it forces it to read # one line and parse if (end == 1) break } #returns the object return(list(data = obj, position = pos)) } #' Read the initial meta data from the rdf file; this is the descriptor:pair #' data up through the END_PACKAGE_PREAMBLE keyword. These are read once for #' each rdf file and there is only one set of meta data regardless of the #' number of traces. #' @noRd read_rdf_meta <- function(rdf.mat, rdf.obj) { rdf.tmp <- read_rdf_header(rdf.mat, rdf.obj$position, 'END_PACKAGE_PREAMBLE') rdf.obj[['meta']] <- rdf.tmp$data rdf.obj$position <- rdf.tmp$position return(rdf.obj) } read_rdf_run <- function(rdf.mat, rdf.obj) { this.run <- length(rdf.obj$runs) + 1 rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position,'END_RUN_PREAMBLE') rdf.obj$runs[[this.run]] <- rdf.tmp$data rdf.obj$position <- rdf.tmp$position #time steps nts <- as.integer(rdf.obj$runs[[this.run]]$time_steps) #for non-mrm files if (length(nts) == 0) { nts <- as.integer(rdf.obj$runs[[this.run]]$timesteps) } rr <- rdf.obj$position:(rdf.obj$position + nts -1) rdf.obj$runs[[this.run]][['times']] <- rdf.mat[rr, 1] rdf.obj$position <- rdf.obj$position + nts #Series nob <- 0 repeat { nob <- nob + 1 rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position, 'END_SLOT_PREAMBLE') rdf.obj$runs[[this.run]][['objects']][[nob]] <- rdf.tmp$data rdf.obj$position <- rdf.tmp$position # name the object after their object.slot name obj.name <- rdf.obj$runs[[this.run]][['objects']][[nob]]$object_name slot.name <- rdf.obj$runs[[this.run]][['objects']][[nob]]$slot_name name <- paste(obj.name, slot.name, sep = '.') names(rdf.obj$runs[[this.run]][['objects']])[nob] <- name # read in the extr two header pieces rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position, 1) rdf.obj$runs[[this.run]][['objects']][[nob]]$units <- rdf.tmp$data[[1]] rdf.obj$position <- rdf.tmp$position rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position, 1) rdf.obj$runs[[this.run]][['objects']][[nob]]$scale <- rdf.tmp$data[[1]] rdf.obj$position <- rdf.tmp$position # Figure out when the END_COLUMN keyword shows up #rdf_tmp <- read_rdf_header(rdf.mat, rdf.obj$position, "END_COLUMN") ec_pos <- Position(function(x) x > rdf.obj$position, rdf.obj$end_col_i) ec_i <- rdf.obj$end_col_i[ec_pos] + 1 # remove the already used indeces so next Position call doesn't have to # search for indeces that are already used rdf.obj$end_col_i <- rdf.obj$end_col_i[ (ec_pos + 1):length(rdf.obj$end_col_i) ] if (ec_i == rdf.obj$position + 2) { # must be a scalar slot row_nums <- rdf.obj$position } else if (ec_i - rdf.obj$position - 1 == nts) { row_nums <- rdf.obj$position:(ec_i - 2) } else { stop( "rdf includes an unexpected number of data points.\n", "`read.rdf()` expects the data entries to either be 1, or\n", "the number of time steps." ) } rdf.obj$runs[[this.run]][['objects']][[nob]]$values <- as.numeric( rdf.mat[row_nums, 1] ) rdf.obj$position <- rdf.obj$position + length(row_nums) #END_COLUMN,END_SLOT, table slots need support here #dummy <- readLines(rdf.con,n=2) # just advances position by 2?? if (rdf.mat[rdf.obj$position+2,1] == 'END_RUN') { rdf.obj$position <- rdf.obj$position + 3 break } else { rdf.obj$position <- rdf.obj$position + 2 } } return(rdf.obj) } #' Read an rdf file into R. #' #' `read.rdf()` reads an rdf file into R and formats it as a multi-level list #' containing all of the metadata included in the rdf file. rdf files are #' generated by RiverWare and are documented in the #' [RiverWare documentation](http://riverware.org/PDF/RiverWare/documentation/). #' #' `read.rdf()`uses [data.table::fread()] to read in the file, which provides #' performance benefits as compared to earlier versions of the function. #' #' `read.rdf2()` is deprecated and will be removed in a future release. #' #' @param iFile The input rdf file that will be read into R. #' @param rdf Boolean; if `TRUE`, then an rdf object is returned. If `FALSE`, #' then a character vector is returned. #' #' @return An rdf object or character vector. #' #' @examples #' zz <- read_rdf(system.file( #' 'extdata/Scenario/ISM1988_2014,2007Dems,IG,Most', #' "KeySlots.rdf", #' package = "RWDataPlyr" #' )) #' #' @export read.rdf <- function(iFile, rdf = TRUE) { check_rdf_file(iFile) rdf.obj <- list() # read entire file into memory rdf.mat <- as.matrix(data.table::fread( iFile, sep = '\t', header = FALSE, data.table = FALSE )) if (!rdf) { return(rdf.mat) } rdf.obj$position <- 1 # initialize where to read from rdf.obj <- read_rdf_meta(rdf.mat, rdf.obj) rdf.obj$end_col_i <- which(rdf.mat == "END_COLUMN") # Read each trace/run for (i in 1:as.numeric(rdf.obj$meta$number_of_runs)) { rdf.obj <- read_rdf_run(rdf.mat, rdf.obj) } rdf.obj$position <- NULL # remove position before returning rdf.obj$end_col_i <- NULL structure( rdf.obj, class = "rdf" ) } #' @describeIn read.rdf Deprecated version of `read.rdf()` #' @export read.rdf2 <- function(iFile) { .Deprecated("read.rdf") read.rdf(iFile, rdf = TRUE) } #' @rdname read.rdf #' @export read_rdf <- read.rdf check_rdf_file <- function(file) { if (tools::file_ext(file) != "rdf") { stop( file, " does not appear to be an rdf file.", call. = FALSE ) } if (!file.exists(file)) { stop( file, " does not exist.", call. = FALSE ) } invisible(file) }
/R/read_rdf.R
permissive
romainfrancois/RWDataPlyr
R
false
false
6,407
r
read_rdf_header <- function(con, pos, end) { obj <- list() repeat { line <- con[pos, 1] pos <- pos + 1 # advancing line to read if(line == end) break splitLine <- strsplit(line, ':', fixed = TRUE)[[1]] name <- splitLine[1] if (length(splitLine) > 1) { if (substr(splitLine[2], 1, 1) == ' ') { splitLine[2] <- substr(splitLine[2], 2, nchar(splitLine[2])) } contents <- paste(splitLine[2:length(splitLine)], collapse = ':') } else { contents <- NA } obj[[name]] <- contents # 1 passed to this function sometimes; when it is, it forces it to read # one line and parse if (end == 1) break } #returns the object return(list(data = obj, position = pos)) } #' Read the initial meta data from the rdf file; this is the descriptor:pair #' data up through the END_PACKAGE_PREAMBLE keyword. These are read once for #' each rdf file and there is only one set of meta data regardless of the #' number of traces. #' @noRd read_rdf_meta <- function(rdf.mat, rdf.obj) { rdf.tmp <- read_rdf_header(rdf.mat, rdf.obj$position, 'END_PACKAGE_PREAMBLE') rdf.obj[['meta']] <- rdf.tmp$data rdf.obj$position <- rdf.tmp$position return(rdf.obj) } read_rdf_run <- function(rdf.mat, rdf.obj) { this.run <- length(rdf.obj$runs) + 1 rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position,'END_RUN_PREAMBLE') rdf.obj$runs[[this.run]] <- rdf.tmp$data rdf.obj$position <- rdf.tmp$position #time steps nts <- as.integer(rdf.obj$runs[[this.run]]$time_steps) #for non-mrm files if (length(nts) == 0) { nts <- as.integer(rdf.obj$runs[[this.run]]$timesteps) } rr <- rdf.obj$position:(rdf.obj$position + nts -1) rdf.obj$runs[[this.run]][['times']] <- rdf.mat[rr, 1] rdf.obj$position <- rdf.obj$position + nts #Series nob <- 0 repeat { nob <- nob + 1 rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position, 'END_SLOT_PREAMBLE') rdf.obj$runs[[this.run]][['objects']][[nob]] <- rdf.tmp$data rdf.obj$position <- rdf.tmp$position # name the object after their object.slot name obj.name <- rdf.obj$runs[[this.run]][['objects']][[nob]]$object_name slot.name <- rdf.obj$runs[[this.run]][['objects']][[nob]]$slot_name name <- paste(obj.name, slot.name, sep = '.') names(rdf.obj$runs[[this.run]][['objects']])[nob] <- name # read in the extr two header pieces rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position, 1) rdf.obj$runs[[this.run]][['objects']][[nob]]$units <- rdf.tmp$data[[1]] rdf.obj$position <- rdf.tmp$position rdf.tmp <- read_rdf_header(rdf.mat,rdf.obj$position, 1) rdf.obj$runs[[this.run]][['objects']][[nob]]$scale <- rdf.tmp$data[[1]] rdf.obj$position <- rdf.tmp$position # Figure out when the END_COLUMN keyword shows up #rdf_tmp <- read_rdf_header(rdf.mat, rdf.obj$position, "END_COLUMN") ec_pos <- Position(function(x) x > rdf.obj$position, rdf.obj$end_col_i) ec_i <- rdf.obj$end_col_i[ec_pos] + 1 # remove the already used indeces so next Position call doesn't have to # search for indeces that are already used rdf.obj$end_col_i <- rdf.obj$end_col_i[ (ec_pos + 1):length(rdf.obj$end_col_i) ] if (ec_i == rdf.obj$position + 2) { # must be a scalar slot row_nums <- rdf.obj$position } else if (ec_i - rdf.obj$position - 1 == nts) { row_nums <- rdf.obj$position:(ec_i - 2) } else { stop( "rdf includes an unexpected number of data points.\n", "`read.rdf()` expects the data entries to either be 1, or\n", "the number of time steps." ) } rdf.obj$runs[[this.run]][['objects']][[nob]]$values <- as.numeric( rdf.mat[row_nums, 1] ) rdf.obj$position <- rdf.obj$position + length(row_nums) #END_COLUMN,END_SLOT, table slots need support here #dummy <- readLines(rdf.con,n=2) # just advances position by 2?? if (rdf.mat[rdf.obj$position+2,1] == 'END_RUN') { rdf.obj$position <- rdf.obj$position + 3 break } else { rdf.obj$position <- rdf.obj$position + 2 } } return(rdf.obj) } #' Read an rdf file into R. #' #' `read.rdf()` reads an rdf file into R and formats it as a multi-level list #' containing all of the metadata included in the rdf file. rdf files are #' generated by RiverWare and are documented in the #' [RiverWare documentation](http://riverware.org/PDF/RiverWare/documentation/). #' #' `read.rdf()`uses [data.table::fread()] to read in the file, which provides #' performance benefits as compared to earlier versions of the function. #' #' `read.rdf2()` is deprecated and will be removed in a future release. #' #' @param iFile The input rdf file that will be read into R. #' @param rdf Boolean; if `TRUE`, then an rdf object is returned. If `FALSE`, #' then a character vector is returned. #' #' @return An rdf object or character vector. #' #' @examples #' zz <- read_rdf(system.file( #' 'extdata/Scenario/ISM1988_2014,2007Dems,IG,Most', #' "KeySlots.rdf", #' package = "RWDataPlyr" #' )) #' #' @export read.rdf <- function(iFile, rdf = TRUE) { check_rdf_file(iFile) rdf.obj <- list() # read entire file into memory rdf.mat <- as.matrix(data.table::fread( iFile, sep = '\t', header = FALSE, data.table = FALSE )) if (!rdf) { return(rdf.mat) } rdf.obj$position <- 1 # initialize where to read from rdf.obj <- read_rdf_meta(rdf.mat, rdf.obj) rdf.obj$end_col_i <- which(rdf.mat == "END_COLUMN") # Read each trace/run for (i in 1:as.numeric(rdf.obj$meta$number_of_runs)) { rdf.obj <- read_rdf_run(rdf.mat, rdf.obj) } rdf.obj$position <- NULL # remove position before returning rdf.obj$end_col_i <- NULL structure( rdf.obj, class = "rdf" ) } #' @describeIn read.rdf Deprecated version of `read.rdf()` #' @export read.rdf2 <- function(iFile) { .Deprecated("read.rdf") read.rdf(iFile, rdf = TRUE) } #' @rdname read.rdf #' @export read_rdf <- read.rdf check_rdf_file <- function(file) { if (tools::file_ext(file) != "rdf") { stop( file, " does not appear to be an rdf file.", call. = FALSE ) } if (!file.exists(file)) { stop( file, " does not exist.", call. = FALSE ) } invisible(file) }
# Initialize environment libraryBooks <- c("knitr", "tidyverse", "cowplot") invisible(lapply(libraryBooks, require, character.only = TRUE)); rm(libraryBooks) # scriptPath <- getwd() scriptPath <- "~/Documents/GitHub/PHP2511_emotion_project/Analysis" # Graph aesthetics pnas_theme = theme_bw(base_size = 10) + theme(text = element_text(size = 10), # Increase the font size panel.grid = element_blank(), axis.ticks = element_blank()) # remove x & y ticks # Read in data d1 <- read.csv(paste0(scriptPath, "/Data/Exp2Classifications.csv"), header = TRUE, stringsAsFactors=F) %>% select(-DBSCAN_type) %>% mutate(sensations = replace(sensations, sensations=="Being conscious", "BeingConscious"), sensations = replace(sensations, sensations=="Being dazzled", "BeingDazzled"), sensations = replace(sensations, sensations=="Closeness (in social relations)", "ClosenessInSocialRelations"), sensations = replace(sensations, sensations=="Feeling nauseous", "FeelingNauseous"), sensations = replace(sensations, sensations=="Feeling pain", "FeelingPain"), sensations = replace(sensations, sensations=="Feeling touch", "FeelingTouch"), sensations = replace(sensations, sensations=="Having cold", "HavingCold"), sensations = replace(sensations, sensations=="Having fever", "HavingFever"), sensations = replace(sensations, sensations=="Having flu", "HavingFlu"), sensations = replace(sensations, sensations=="Having headache", "HavingHeadache"), sensations = replace(sensations, sensations=="Having stomach flu", "HavingStomachFlu"), sensations = replace(sensations, sensations=="Having toothache", "HavingToothache"), sensations = replace(sensations, sensations=="Longing for", "LongingFor"), sensations = replace(sensations, sensations=="Self-regulation", "SelfRegulation"), sensations = replace(sensations, sensations=="Sexual arousal", "SexualArousal"), sensations = replace(sensations, sensations=="Social exclusion", "SocialExclusion"), sensations = replace(sensations, sensations=="Social longing", "SocialLonging")) d2 <- read_csv(paste0(scriptPath, "/Data/Fig2_tSNE_coords.csv")) %>% rename(xcoord = Var1, ycoord = Var2, sensations = Row) d0 <- inner_join(d1, d2) %>% select(-DBSCAN_labels) %>% rename(dbscan = DBSCAN_class, kmeans = KMEANS_class, hc = HC_class) %>% mutate(dbscan = as.character(dbscan), dbscan = recode(dbscan, "-1" = "Between", "1" = "Negative Emotions", "2" = "Positive Emotions", "3" = "Illness", "4" = "Cognition", "5" = "Homeostasis"), dbscan = factor(dbscan, levels=c("Between", "Negative Emotions", "Positive Emotions", "Illness", "Cognition", "Homeostasis"))) %>% mutate(kmeans = as.character(kmeans), kmeans = recode(kmeans, "1" = "Positive Emotions & Cognition", "2" = "Illness", "3" = "Negative Emotions", "4" = "Between", "5" = "Homeostasis"), kmeans = factor(kmeans, levels=c("Between", "Negative Emotions", "Positive Emotions & Cognition", "Illness", "Homeostasis"))) %>% mutate(hc = as.character(hc), hc = recode(hc, "1" = "Homeostasis", "2" = "Cognition", "3" = "Illness", "4" = "Negative Emotions", "5" = "Positive Emotions"), hc = factor(hc, levels=c("Negative Emotions", "Positive Emotions", "Illness", "Cognition", "Homeostasis"))) feelcolors.dbscan <- c("#636363", "#4292c6", "#de2d26", "#756bb1", "#31a354", "#fdae6b") feelcolors.kmeans <- c("#636363", "#4292c6", "#de2d26", "#756bb1", "#fdae6b") feelcolors.hc <- c("#4292c6", "#de2d26", "#756bb1", "#31a354", "#fdae6b") # DBSCAN plot.dbscan <- ggplot(d0, aes(x=xcoord, y=ycoord, group=dbscan, color=dbscan)) + geom_point(size = 2) + geom_label(label=d0$sensations, nudge_x=0.25, nudge_y=0.2, size=2, show.legend=FALSE) + scale_color_manual(name="DBSCAN", values=feelcolors.dbscan) + xlab("t-SNE1") + ylab("t-SNE2") + pnas_theme + guides(color = guide_legend(override.aes = list(size=4))) + theme(legend.title = element_text(size = 12), legend.text = element_text(size = 10), axis.text=element_blank(), legend.position="bottom") # K-MEANS plot.kmeans <- ggplot(d0, aes(x=xcoord, y=ycoord, group=kmeans, color=kmeans)) + geom_point(size = 2) + geom_label(label=d0$sensations, nudge_x=0.25, nudge_y=0.2, size=2, show.legend=FALSE) + scale_color_manual(name="K-Means", values=feelcolors.kmeans) + xlab("t-SNE1") + ylab("t-SNE2") + pnas_theme + guides(color = guide_legend(override.aes = list(size=4))) + theme(legend.title = element_text(size = 12), legend.text = element_text(size = 10), axis.text=element_blank(), legend.position="bottom") # HIERARCHICAL CLUSTERING plot.hc <- ggplot(d0, aes(x=xcoord, y=ycoord, group=hc, color=hc)) + geom_point(size = 2) + geom_label(label=d0$sensations, nudge_x=0.25, nudge_y=0.2, size=2, show.legend=FALSE) + scale_color_manual(name="Hierarchical Clustering", values=feelcolors.hc) + xlab("t-SNE1") + ylab("t-SNE2") + pnas_theme + guides(color = guide_legend(override.aes = list(size=4))) + theme(legend.title = element_text(size = 12), legend.text = element_text(size = 10), axis.text=element_blank(), legend.position="bottom") plot_grid(plot.dbscan, plot.kmeans, plot.hc, ncol=1)
/Analysis/ScratchCode/pt3_JYS_scratch.R
no_license
jpw3/PHP2511_emotion_project
R
false
false
6,426
r
# Initialize environment libraryBooks <- c("knitr", "tidyverse", "cowplot") invisible(lapply(libraryBooks, require, character.only = TRUE)); rm(libraryBooks) # scriptPath <- getwd() scriptPath <- "~/Documents/GitHub/PHP2511_emotion_project/Analysis" # Graph aesthetics pnas_theme = theme_bw(base_size = 10) + theme(text = element_text(size = 10), # Increase the font size panel.grid = element_blank(), axis.ticks = element_blank()) # remove x & y ticks # Read in data d1 <- read.csv(paste0(scriptPath, "/Data/Exp2Classifications.csv"), header = TRUE, stringsAsFactors=F) %>% select(-DBSCAN_type) %>% mutate(sensations = replace(sensations, sensations=="Being conscious", "BeingConscious"), sensations = replace(sensations, sensations=="Being dazzled", "BeingDazzled"), sensations = replace(sensations, sensations=="Closeness (in social relations)", "ClosenessInSocialRelations"), sensations = replace(sensations, sensations=="Feeling nauseous", "FeelingNauseous"), sensations = replace(sensations, sensations=="Feeling pain", "FeelingPain"), sensations = replace(sensations, sensations=="Feeling touch", "FeelingTouch"), sensations = replace(sensations, sensations=="Having cold", "HavingCold"), sensations = replace(sensations, sensations=="Having fever", "HavingFever"), sensations = replace(sensations, sensations=="Having flu", "HavingFlu"), sensations = replace(sensations, sensations=="Having headache", "HavingHeadache"), sensations = replace(sensations, sensations=="Having stomach flu", "HavingStomachFlu"), sensations = replace(sensations, sensations=="Having toothache", "HavingToothache"), sensations = replace(sensations, sensations=="Longing for", "LongingFor"), sensations = replace(sensations, sensations=="Self-regulation", "SelfRegulation"), sensations = replace(sensations, sensations=="Sexual arousal", "SexualArousal"), sensations = replace(sensations, sensations=="Social exclusion", "SocialExclusion"), sensations = replace(sensations, sensations=="Social longing", "SocialLonging")) d2 <- read_csv(paste0(scriptPath, "/Data/Fig2_tSNE_coords.csv")) %>% rename(xcoord = Var1, ycoord = Var2, sensations = Row) d0 <- inner_join(d1, d2) %>% select(-DBSCAN_labels) %>% rename(dbscan = DBSCAN_class, kmeans = KMEANS_class, hc = HC_class) %>% mutate(dbscan = as.character(dbscan), dbscan = recode(dbscan, "-1" = "Between", "1" = "Negative Emotions", "2" = "Positive Emotions", "3" = "Illness", "4" = "Cognition", "5" = "Homeostasis"), dbscan = factor(dbscan, levels=c("Between", "Negative Emotions", "Positive Emotions", "Illness", "Cognition", "Homeostasis"))) %>% mutate(kmeans = as.character(kmeans), kmeans = recode(kmeans, "1" = "Positive Emotions & Cognition", "2" = "Illness", "3" = "Negative Emotions", "4" = "Between", "5" = "Homeostasis"), kmeans = factor(kmeans, levels=c("Between", "Negative Emotions", "Positive Emotions & Cognition", "Illness", "Homeostasis"))) %>% mutate(hc = as.character(hc), hc = recode(hc, "1" = "Homeostasis", "2" = "Cognition", "3" = "Illness", "4" = "Negative Emotions", "5" = "Positive Emotions"), hc = factor(hc, levels=c("Negative Emotions", "Positive Emotions", "Illness", "Cognition", "Homeostasis"))) feelcolors.dbscan <- c("#636363", "#4292c6", "#de2d26", "#756bb1", "#31a354", "#fdae6b") feelcolors.kmeans <- c("#636363", "#4292c6", "#de2d26", "#756bb1", "#fdae6b") feelcolors.hc <- c("#4292c6", "#de2d26", "#756bb1", "#31a354", "#fdae6b") # DBSCAN plot.dbscan <- ggplot(d0, aes(x=xcoord, y=ycoord, group=dbscan, color=dbscan)) + geom_point(size = 2) + geom_label(label=d0$sensations, nudge_x=0.25, nudge_y=0.2, size=2, show.legend=FALSE) + scale_color_manual(name="DBSCAN", values=feelcolors.dbscan) + xlab("t-SNE1") + ylab("t-SNE2") + pnas_theme + guides(color = guide_legend(override.aes = list(size=4))) + theme(legend.title = element_text(size = 12), legend.text = element_text(size = 10), axis.text=element_blank(), legend.position="bottom") # K-MEANS plot.kmeans <- ggplot(d0, aes(x=xcoord, y=ycoord, group=kmeans, color=kmeans)) + geom_point(size = 2) + geom_label(label=d0$sensations, nudge_x=0.25, nudge_y=0.2, size=2, show.legend=FALSE) + scale_color_manual(name="K-Means", values=feelcolors.kmeans) + xlab("t-SNE1") + ylab("t-SNE2") + pnas_theme + guides(color = guide_legend(override.aes = list(size=4))) + theme(legend.title = element_text(size = 12), legend.text = element_text(size = 10), axis.text=element_blank(), legend.position="bottom") # HIERARCHICAL CLUSTERING plot.hc <- ggplot(d0, aes(x=xcoord, y=ycoord, group=hc, color=hc)) + geom_point(size = 2) + geom_label(label=d0$sensations, nudge_x=0.25, nudge_y=0.2, size=2, show.legend=FALSE) + scale_color_manual(name="Hierarchical Clustering", values=feelcolors.hc) + xlab("t-SNE1") + ylab("t-SNE2") + pnas_theme + guides(color = guide_legend(override.aes = list(size=4))) + theme(legend.title = element_text(size = 12), legend.text = element_text(size = 10), axis.text=element_blank(), legend.position="bottom") plot_grid(plot.dbscan, plot.kmeans, plot.hc, ncol=1)
#' Iowa Class E Liquor Sales Summary #' #' @description Monthly summary of the different Class E liquor sales in State of Iowa #' #' @details This dataset contains an aggregated view (aggregated by multiple attributes) of the sales data for Class E liquor. The dataset has been pre-processed to remove NULL values from the county variable. See vignette for more details #' #' @format a data frame with 10 variables. #' \describe{ #' \item{year}{The year in which the sale occurred.} #' \item{year_month}{This is an aggregated value indicating the month and year in YYYY-MM-DD format.} #' \item{county}{The county in which the sale occurred.} #' \item{population}{The population of the county of the year of sale as recorded by the US Census Bureau. NA values indicate no census data were found on the Iowa Data Portal.} #' \item{type}{A high level grouping of the liquor. This was derived separately.} #' \item{category}{A grouping variable used by the State of Iowa.} #' \item{state_cost}{The cost (in US$) to the state to purchase the liquor from a vendor. Not adjusted for inflation.} #' \item{state_revenue}{The revenue (in US$) the state earned from the sale of the liquor to retailers. Not adjusted for inflation.} #' \item{bottles_sold}{The number of bottles sold by the state to a retailer.} #' \item{volume}{The volume sold (in liters) by the state to a retailer.} #' } #' #' @source State of Iowa Data \href{https://data.iowa.gov/resource/m3tr-qhgy.csv}{API} #' @keywords datasets timeseries liquor revenue #' "liquor_sales"
/R/data.R
permissive
nikdata/ialiquor
R
false
false
1,559
r
#' Iowa Class E Liquor Sales Summary #' #' @description Monthly summary of the different Class E liquor sales in State of Iowa #' #' @details This dataset contains an aggregated view (aggregated by multiple attributes) of the sales data for Class E liquor. The dataset has been pre-processed to remove NULL values from the county variable. See vignette for more details #' #' @format a data frame with 10 variables. #' \describe{ #' \item{year}{The year in which the sale occurred.} #' \item{year_month}{This is an aggregated value indicating the month and year in YYYY-MM-DD format.} #' \item{county}{The county in which the sale occurred.} #' \item{population}{The population of the county of the year of sale as recorded by the US Census Bureau. NA values indicate no census data were found on the Iowa Data Portal.} #' \item{type}{A high level grouping of the liquor. This was derived separately.} #' \item{category}{A grouping variable used by the State of Iowa.} #' \item{state_cost}{The cost (in US$) to the state to purchase the liquor from a vendor. Not adjusted for inflation.} #' \item{state_revenue}{The revenue (in US$) the state earned from the sale of the liquor to retailers. Not adjusted for inflation.} #' \item{bottles_sold}{The number of bottles sold by the state to a retailer.} #' \item{volume}{The volume sold (in liters) by the state to a retailer.} #' } #' #' @source State of Iowa Data \href{https://data.iowa.gov/resource/m3tr-qhgy.csv}{API} #' @keywords datasets timeseries liquor revenue #' "liquor_sales"
#' geom_barH geom_barh <- function (mapping = NULL, data = NULL, stat = "count", # stat = "barH", position = "stack", ..., width = NULL, binwidth = NULL, na.rm = FALSE, orientation = NA, hatch = 3, show.legend = NA, inherit.aes = TRUE) { if (!is.null(binwidth)) { warn("`geom_barh()` no longer has a `binwidth` parameter. Please use `geom_histogramh()` instead.") return(geom_histogramh(mapping = mapping, data = data, position = position, width = width, binwidth = binwidth, hatch = hatch, ..., na.rm = na.rm, show.legend = show.legend, inherit.aes = inherit.aes)) } ggplot2::layer( data = data, mapping = mapping, stat = stat, geom = GeomBarH, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( width = width, na.rm = na.rm, orientation = orientation, hatch = hatch, ... ) ) } # GeomBarH <- proto(GeomBarJ, { # objname <- "barH" # # default_stat <- function(.) StatBinH # default_aes <- function(.) aes(colour="black", fill="black", size=0.5, linetype=1, weight = 1, alpha = NA) # # draw_groups <- function(., data, scales, coordinates, ...) { # GeomRectH$draw_groups(data, scales, coordinates, ...) # } # # }) #' @format NULL #' @usage NULL #' @export #' @include geom-rect.r GeomBarH <- ggplot2::ggproto( "GeomBarH", GeomRectH, required_aes = c("x", "y"), non_missing_aes = c("xmin", "xmax", "ymin", "ymax"), setup_params = function(data, params) { params$flipped_aes <- has_flipped_aes(data, params) params }, extra_params = c("na.rm", "orientation"), setup_data = function(data, params) { data$flipped_aes <- params$flipped_aes data <- flip_data(data, params$flipped_aes) data$width <- data$width %||% params$width %||% (resolution(data$x, FALSE) * 0.9) # print(params) # data$hatch = params$hatch # print(data) transform( data, ymin = pmin(y, 0), ymax = pmax(y, 0), xmin = x - width / 2, xmax = x + width / 2, width = NULL ) }, draw_panel = function(self, data, panel_params, coord, width = NULL, flipped_aes = FALSE) { # Hack to ensure that width is detected as a parameter # data2 <- hatch2() # print(data) #print(coord) ggplot2::ggproto_parent(GeomRectH, self)$draw_panel(data, panel_params, coord) } ) #' @rdname ggplot2-ggproto #' @format NULL #' @usage NULL #' @export #' @include stat-.r StatBarH <- ggplot2::ggproto( "StatBarH", ggplot2::Stat, required_aes = "x", default_aes = ggplot2::aes(y = ..count.., colour = "white", hatch = 3), setup_params = function(data, params) { if (!is.null(data$y) || !is.null(params$y)) { stop("stat_barh() must not be used with a y aesthetic.", call. = FALSE) } # print(params) params }, compute_group = function(self, data, scales, width = NULL) { x <- data$x weight <- data$weight %||% rep(1, length(x)) width <- width %||% (resolution(x) * 0.9) count <- as.numeric(tapply(weight, x, sum, na.rm = TRUE)) count[is.na(count)] <- 0 data.frame( count = count, prop = count / sum(abs(count)), x = unique(x), width = width, hatch = hatch ) } )
/R/geom-barh.R
no_license
joker8phoenix/statds
R
false
false
3,501
r
#' geom_barH geom_barh <- function (mapping = NULL, data = NULL, stat = "count", # stat = "barH", position = "stack", ..., width = NULL, binwidth = NULL, na.rm = FALSE, orientation = NA, hatch = 3, show.legend = NA, inherit.aes = TRUE) { if (!is.null(binwidth)) { warn("`geom_barh()` no longer has a `binwidth` parameter. Please use `geom_histogramh()` instead.") return(geom_histogramh(mapping = mapping, data = data, position = position, width = width, binwidth = binwidth, hatch = hatch, ..., na.rm = na.rm, show.legend = show.legend, inherit.aes = inherit.aes)) } ggplot2::layer( data = data, mapping = mapping, stat = stat, geom = GeomBarH, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( width = width, na.rm = na.rm, orientation = orientation, hatch = hatch, ... ) ) } # GeomBarH <- proto(GeomBarJ, { # objname <- "barH" # # default_stat <- function(.) StatBinH # default_aes <- function(.) aes(colour="black", fill="black", size=0.5, linetype=1, weight = 1, alpha = NA) # # draw_groups <- function(., data, scales, coordinates, ...) { # GeomRectH$draw_groups(data, scales, coordinates, ...) # } # # }) #' @format NULL #' @usage NULL #' @export #' @include geom-rect.r GeomBarH <- ggplot2::ggproto( "GeomBarH", GeomRectH, required_aes = c("x", "y"), non_missing_aes = c("xmin", "xmax", "ymin", "ymax"), setup_params = function(data, params) { params$flipped_aes <- has_flipped_aes(data, params) params }, extra_params = c("na.rm", "orientation"), setup_data = function(data, params) { data$flipped_aes <- params$flipped_aes data <- flip_data(data, params$flipped_aes) data$width <- data$width %||% params$width %||% (resolution(data$x, FALSE) * 0.9) # print(params) # data$hatch = params$hatch # print(data) transform( data, ymin = pmin(y, 0), ymax = pmax(y, 0), xmin = x - width / 2, xmax = x + width / 2, width = NULL ) }, draw_panel = function(self, data, panel_params, coord, width = NULL, flipped_aes = FALSE) { # Hack to ensure that width is detected as a parameter # data2 <- hatch2() # print(data) #print(coord) ggplot2::ggproto_parent(GeomRectH, self)$draw_panel(data, panel_params, coord) } ) #' @rdname ggplot2-ggproto #' @format NULL #' @usage NULL #' @export #' @include stat-.r StatBarH <- ggplot2::ggproto( "StatBarH", ggplot2::Stat, required_aes = "x", default_aes = ggplot2::aes(y = ..count.., colour = "white", hatch = 3), setup_params = function(data, params) { if (!is.null(data$y) || !is.null(params$y)) { stop("stat_barh() must not be used with a y aesthetic.", call. = FALSE) } # print(params) params }, compute_group = function(self, data, scales, width = NULL) { x <- data$x weight <- data$weight %||% rep(1, length(x)) width <- width %||% (resolution(x) * 0.9) count <- as.numeric(tapply(weight, x, sum, na.rm = TRUE)) count[is.na(count)] <- 0 data.frame( count = count, prop = count / sum(abs(count)), x = unique(x), width = width, hatch = hatch ) } )
plotMapPoints<-function(area=area, xlim=c(19.5,29.5), ylim=c(34.8,41.8), col="grey60", fill=T, type ="p", col_points="red",cex=0.5, plotclr=c("pink1","red", "red4","black"), symbol.size=1.1){ library(gstat) library(mgcv) library(maps) library(mapdata) library(RColorBrewer) library(akima) library(maptools) library(classInt) library(scales) #area<-area[area$Abundance<5000,] # map(database = "worldHires", xlim=xlim, ylim = ylim, resolution = 0, col=col, fill=fill) #-------Display observations on a map------------ #let see the data we have #pdf(name) min(area$Abundance) max(area$Abundance) min(log(area$Abundance)) max(log(area$Abundance)) #Map the raw data - bubble plots. i want to view where is the most abandant area plotvar <- area$Abundance #plotvar <- log(area$Abundance) #number of color is 4 (quite good choice) #plotclr<-palette(gray(seq(0.8,0,len=4))) #choose four different colors. wants red color for high values let's say plotclr<-plotclr nclr <- length(plotclr) #display.brewer.all(n=NULL, type="all", select=NULL, exact.n=TRUE)# display palettes #plotclr <- brewer.pal(nclr,"BuPu") #plotclr <- brewer.pal(nclr,"Reds") #plotclr <- plotclr[nclr:1] # reorder colors if appropriate #max.symbol.size=3 #min.symbol.size=1 #class <- classIntervals(plotvar, style="equal",n=4) #very important command. we ask R to get our dataset and difine different classes so that it will give different colors to different classes #style is how to spit the data in my classes class <- classIntervals(plotvar, n=nclr, style="quantile") #let's view the classes class #the next is to put colors in the observations colcode <- findColours(class, plotclr) #size of the bullets #symbol.size <- ((plotvar-min(plotvar))/(max(plotvar)-min(plotvar))*(max.symbol.size-min.symbol.size)+min.symbol.size) map(database = "world", xlim=xlim, ylim = ylim, resolution = 0, col=col, fill=fill,bg="white", xlab="Longitude",ylab="Latitude") map.axes() points(area$LON, area$LAT, pch=16, type="p",col=colcode,cex=symbol.size,xlab="Longitude",ylab="Latitude") #plot(area$LON, area$LAT, pch=16, type="p",col="red",cex=symbol.size,xlab="Longitude",ylab="Latitude",xlim=c(22,29),ylim=c(35,41),axes=F) #vazei to parathema legend<-names(attr(colcode, "table")) # legend2<-c(); i<-1; # for(i in 1:length(legend)){ # legend2[i]<- # paste("[",strsplit(legend[i],"[[:punct:]]")[[1]][2],",", # strsplit(legend[i],"[[:punct:]]")[[1]][3],")",sep="") # if(i==length(legend)) legend2[i]<-paste("[",strsplit(legend[i],"[[:punct:]]")[[1]][2],",", # strsplit(legend[i],"[[:punct:]]")[[1]][3],"]",sep="") # } # legend("topright", legend=legend, fill=attr(colcode, "palette"), cex=1.5, bty="n") text(34.5,41,"Turkey", cex = 1) text(29.7,46.2,"Ukraine", cex = 1) text(40.5,45.0,"Russia", cex = 1) text(42.4,42.4,"Georgia", cex = 1) text(27.7,43.6,"Bulgaria", cex = 1) text(27.8,44.6,"Romania", cex = 1) text(29,45.18,"Danube", cex = 2.0,font=2, col="#ADD8E6") #text(34.3,45.4,"Crimea", cex = 1) # title(input$mzrt[1]) #abline(44,0) #abline(0,32) #dev.off() }
/Dataset generation/Helper_plotmappoints.R
no_license
gbouzioto/chemical_polution
R
false
false
3,419
r
plotMapPoints<-function(area=area, xlim=c(19.5,29.5), ylim=c(34.8,41.8), col="grey60", fill=T, type ="p", col_points="red",cex=0.5, plotclr=c("pink1","red", "red4","black"), symbol.size=1.1){ library(gstat) library(mgcv) library(maps) library(mapdata) library(RColorBrewer) library(akima) library(maptools) library(classInt) library(scales) #area<-area[area$Abundance<5000,] # map(database = "worldHires", xlim=xlim, ylim = ylim, resolution = 0, col=col, fill=fill) #-------Display observations on a map------------ #let see the data we have #pdf(name) min(area$Abundance) max(area$Abundance) min(log(area$Abundance)) max(log(area$Abundance)) #Map the raw data - bubble plots. i want to view where is the most abandant area plotvar <- area$Abundance #plotvar <- log(area$Abundance) #number of color is 4 (quite good choice) #plotclr<-palette(gray(seq(0.8,0,len=4))) #choose four different colors. wants red color for high values let's say plotclr<-plotclr nclr <- length(plotclr) #display.brewer.all(n=NULL, type="all", select=NULL, exact.n=TRUE)# display palettes #plotclr <- brewer.pal(nclr,"BuPu") #plotclr <- brewer.pal(nclr,"Reds") #plotclr <- plotclr[nclr:1] # reorder colors if appropriate #max.symbol.size=3 #min.symbol.size=1 #class <- classIntervals(plotvar, style="equal",n=4) #very important command. we ask R to get our dataset and difine different classes so that it will give different colors to different classes #style is how to spit the data in my classes class <- classIntervals(plotvar, n=nclr, style="quantile") #let's view the classes class #the next is to put colors in the observations colcode <- findColours(class, plotclr) #size of the bullets #symbol.size <- ((plotvar-min(plotvar))/(max(plotvar)-min(plotvar))*(max.symbol.size-min.symbol.size)+min.symbol.size) map(database = "world", xlim=xlim, ylim = ylim, resolution = 0, col=col, fill=fill,bg="white", xlab="Longitude",ylab="Latitude") map.axes() points(area$LON, area$LAT, pch=16, type="p",col=colcode,cex=symbol.size,xlab="Longitude",ylab="Latitude") #plot(area$LON, area$LAT, pch=16, type="p",col="red",cex=symbol.size,xlab="Longitude",ylab="Latitude",xlim=c(22,29),ylim=c(35,41),axes=F) #vazei to parathema legend<-names(attr(colcode, "table")) # legend2<-c(); i<-1; # for(i in 1:length(legend)){ # legend2[i]<- # paste("[",strsplit(legend[i],"[[:punct:]]")[[1]][2],",", # strsplit(legend[i],"[[:punct:]]")[[1]][3],")",sep="") # if(i==length(legend)) legend2[i]<-paste("[",strsplit(legend[i],"[[:punct:]]")[[1]][2],",", # strsplit(legend[i],"[[:punct:]]")[[1]][3],"]",sep="") # } # legend("topright", legend=legend, fill=attr(colcode, "palette"), cex=1.5, bty="n") text(34.5,41,"Turkey", cex = 1) text(29.7,46.2,"Ukraine", cex = 1) text(40.5,45.0,"Russia", cex = 1) text(42.4,42.4,"Georgia", cex = 1) text(27.7,43.6,"Bulgaria", cex = 1) text(27.8,44.6,"Romania", cex = 1) text(29,45.18,"Danube", cex = 2.0,font=2, col="#ADD8E6") #text(34.3,45.4,"Crimea", cex = 1) # title(input$mzrt[1]) #abline(44,0) #abline(0,32) #dev.off() }
\name{smooth.construct.tescv.smooth.spec} \alias{smooth.construct.tescv.smooth.spec} %- Also NEED an '\alias' for EACH other topic documented here. \title{Tensor product smoothing constructor for a bivariate function concave in the second covariate } \description{This is a special method function for creating tensor product bivariate smooths concave in the second covariate which is built by the \code{mgcv} constructor function for smooth terms, \code{smooth.construct}. It is constructed from a pair of single penalty marginal smooths. This tensor product is specified by model terms such as \code{s(x1,x2,k=c(q1,q2),bs="tescv",m=c(2,2))}, where the basis for the first marginal smooth is specified in the second element of \code{bs}. } \usage{ \method{smooth.construct}{tescv.smooth.spec}(object, data, knots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{A smooth specification object, generated by an \code{s} term in a GAM formula.} \item{data}{A data frame or list containing the values of the elements of \code{object$term}, with names given by \code{object$term}.} \item{knots}{An optional list containing the knots corresponding to \code{object$term}. If it is \code{NULL} then the knot locations are generated automatically.} } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{An object of class \code{"tescv.smooth"}. In addition to the usual elements of a smooth class documented under \code{smooth.construct} of the \code{mgcv} library, this object contains: \item{p.ident}{A vector of 0's and 1's for model parameter identification: 1's indicate parameters which will be exponentiated, 0's - otherwise.} \item{Zc}{A matrix of identifiability constraints.} %\item{margin.bs}{ } } \references{ Pya, N. and Wood, S.N. (2015) Shape constrained additive models. Statistics and Computing, 25(3), 543-559 } \author{ Natalya Pya <nat.pya@gmail.com> } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{smooth.construct.temicv.smooth.spec}} \code{\link{smooth.construct.temicx.smooth.spec}} \code{\link{smooth.construct.tedecv.smooth.spec}} \code{\link{smooth.construct.tedecx.smooth.spec}} \code{\link{smooth.construct.tescx.smooth.spec}} } \examples{ \dontrun{ ## tensor product `tescv' example ## simulating data... set.seed(5) n <- 30 x1 <- sort(runif(n)) x2 <- sort(2*runif(n)-1) f1 <- matrix(0,n,n) for (i in 1:n) for (j in 1:n) f1[i,j] <- sin(2*x1[i]) - 4*x2[j]^2 f1 <- as.vector(t(f1)) f <- (f1-min(f1))/(max(f1)-min(f1)) y <- f+rnorm(length(f))*0.1 x11 <- matrix(0,n,n) x11[,1:n] <- x1 x11 <- as.vector(t(x11)) x22 <- rep(x2,n) dat <- list(x1=x11,x2=x22,y=y) ## fit model ... b <- scam(y~s(x1,x2,k=c(10,10),bs="tescv",m=2), family=gaussian(), data=dat) ## plot results ... par(mfrow=c(2,2),mar=c(4,4,2,2)) plot(b,se=TRUE) plot(b,pers=TRUE, theta = 50, phi = 20) plot(y,b$fitted.values,xlab="Simulated data",ylab="Fitted data") x11() vis.scam(b, theta = 50, phi = 20) ## plotting the truth... x11() x1 <- seq(min(x1),max(x1),length.out=30) x2 <- seq(min(x2),max(x2),length.out=30) f1 <- matrix(0,n,n) for (i in 1:n) for (j in 1:n) f1[i,j] <- sin(2*x1[i]) - 4*x2[j]^2 persp(x1,x2,f1,theta = 50, phi = 20) } } \keyword{models} \keyword{regression}%-- one or more ..
/man/smooth.construct.tescv.smooth.spec.Rd
no_license
cran/scam
R
false
false
3,575
rd
\name{smooth.construct.tescv.smooth.spec} \alias{smooth.construct.tescv.smooth.spec} %- Also NEED an '\alias' for EACH other topic documented here. \title{Tensor product smoothing constructor for a bivariate function concave in the second covariate } \description{This is a special method function for creating tensor product bivariate smooths concave in the second covariate which is built by the \code{mgcv} constructor function for smooth terms, \code{smooth.construct}. It is constructed from a pair of single penalty marginal smooths. This tensor product is specified by model terms such as \code{s(x1,x2,k=c(q1,q2),bs="tescv",m=c(2,2))}, where the basis for the first marginal smooth is specified in the second element of \code{bs}. } \usage{ \method{smooth.construct}{tescv.smooth.spec}(object, data, knots) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{A smooth specification object, generated by an \code{s} term in a GAM formula.} \item{data}{A data frame or list containing the values of the elements of \code{object$term}, with names given by \code{object$term}.} \item{knots}{An optional list containing the knots corresponding to \code{object$term}. If it is \code{NULL} then the knot locations are generated automatically.} } %\details{ %% ~~ If necessary, more details than the description above ~~ %} \value{An object of class \code{"tescv.smooth"}. In addition to the usual elements of a smooth class documented under \code{smooth.construct} of the \code{mgcv} library, this object contains: \item{p.ident}{A vector of 0's and 1's for model parameter identification: 1's indicate parameters which will be exponentiated, 0's - otherwise.} \item{Zc}{A matrix of identifiability constraints.} %\item{margin.bs}{ } } \references{ Pya, N. and Wood, S.N. (2015) Shape constrained additive models. Statistics and Computing, 25(3), 543-559 } \author{ Natalya Pya <nat.pya@gmail.com> } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{smooth.construct.temicv.smooth.spec}} \code{\link{smooth.construct.temicx.smooth.spec}} \code{\link{smooth.construct.tedecv.smooth.spec}} \code{\link{smooth.construct.tedecx.smooth.spec}} \code{\link{smooth.construct.tescx.smooth.spec}} } \examples{ \dontrun{ ## tensor product `tescv' example ## simulating data... set.seed(5) n <- 30 x1 <- sort(runif(n)) x2 <- sort(2*runif(n)-1) f1 <- matrix(0,n,n) for (i in 1:n) for (j in 1:n) f1[i,j] <- sin(2*x1[i]) - 4*x2[j]^2 f1 <- as.vector(t(f1)) f <- (f1-min(f1))/(max(f1)-min(f1)) y <- f+rnorm(length(f))*0.1 x11 <- matrix(0,n,n) x11[,1:n] <- x1 x11 <- as.vector(t(x11)) x22 <- rep(x2,n) dat <- list(x1=x11,x2=x22,y=y) ## fit model ... b <- scam(y~s(x1,x2,k=c(10,10),bs="tescv",m=2), family=gaussian(), data=dat) ## plot results ... par(mfrow=c(2,2),mar=c(4,4,2,2)) plot(b,se=TRUE) plot(b,pers=TRUE, theta = 50, phi = 20) plot(y,b$fitted.values,xlab="Simulated data",ylab="Fitted data") x11() vis.scam(b, theta = 50, phi = 20) ## plotting the truth... x11() x1 <- seq(min(x1),max(x1),length.out=30) x2 <- seq(min(x2),max(x2),length.out=30) f1 <- matrix(0,n,n) for (i in 1:n) for (j in 1:n) f1[i,j] <- sin(2*x1[i]) - 4*x2[j]^2 persp(x1,x2,f1,theta = 50, phi = 20) } } \keyword{models} \keyword{regression}%-- one or more ..
## Data can be found at this link, was originally accessed on August 10, 2014 ## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip ## File was then unzipped on the desktop ## Read data into R data <- read.table("/Users/Mario/Desktop/household_power_consumption.txt",sep=";",header=TRUE) ## Give the columns their appropriate names names(data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") ## Create new column, DateTime, with formatted date and time data$DateTime <- paste(data$Date,data$Time) data$DateTime <- strptime(data$DateTime, format="%d/%m/%Y %T") ## Subset the dates we want (this could have been done sooner to save time) data2 <- subset(data, (as.Date(data$DateTime)=="2007-02-01" | as.Date(data$DateTime)=="2007-02-02")) ## Correct some data types (for some reason, just using as.numeric led to some errors) ## We will not be using columns 1 and 2 ("Date" and "Time") again so those can be left alone data2$Global_active_power <- as.numeric(as.character(data2$Global_active_power)) data2$Global_reactive_power <- as.numeric(as.character(data2$Global_reactive_power)) data2$Voltage <- as.numeric(as.character(data2$Voltage)) data2$Global_intensity <- as.numeric(as.character(data2$Global_intensity)) data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1)) data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2)) data2$Sub_metering_3 <- as.numeric(as.character(data2$Sub_metering_3)) ### --------------------------------------------------------------------------### ### Now we make the FOURTH plot and save it ### --------------------------------------------------------------------------### png("/Users/Mario/ExData_Plotting1/plot4.png", width=480, height=480) par(mfrow=c(2,2)) ## Plot A ## --------------------------------------------------------------------------------- plot(x=data2$DateTime, y=data2$Global_active_power, type="l", ylab="Global Active Power", xlab="", lwd=1.25, ## Make a little thicker cex.lab=1) ## --------------------------------------------------------------------------------- ## Plot B ## --------------------------------------------------------------------------------- plot(x=data2$DateTime, y=data2$Voltage, xlab="datetime", ylab="Voltage", type="l", lwd=1.25) ## make lines a tad thicker ## --------------------------------------------------------------------------------- ## Plot C ## --------------------------------------------------------------------------------- ## Add the first set of points, Sub_metering_1, and put in the x and y axis labels plot(x=data2$DateTime, y=data2$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering") ## Add in the second set of points, Sub_metering_2 points(x=data2$DateTime, y=data2$Sub_metering_2, type="l", col="red") ## Add in the third set of points, Sub_metering_3 points(x=data2$DateTime, y=data2$Sub_metering_3, type="l", col="blue") ## Add legend legend("topright", col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd=c(2,2,2), ## make it a little darker so it looks better bty="n", ## Remove that border!!! cex=1) ## --------------------------------------------------------------------------------- ## Plot D ## --------------------------------------------------------------------------------- plot(x=data2$DateTime, y=data2$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l", lwd=1.2, ## Make lines a little thicker cex=.1) ## --------------------------------------------------------------------------------- dev.off()
/plot4.R
no_license
ibanmd/ExData_Plotting1
R
false
false
3,880
r
## Data can be found at this link, was originally accessed on August 10, 2014 ## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip ## File was then unzipped on the desktop ## Read data into R data <- read.table("/Users/Mario/Desktop/household_power_consumption.txt",sep=";",header=TRUE) ## Give the columns their appropriate names names(data) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") ## Create new column, DateTime, with formatted date and time data$DateTime <- paste(data$Date,data$Time) data$DateTime <- strptime(data$DateTime, format="%d/%m/%Y %T") ## Subset the dates we want (this could have been done sooner to save time) data2 <- subset(data, (as.Date(data$DateTime)=="2007-02-01" | as.Date(data$DateTime)=="2007-02-02")) ## Correct some data types (for some reason, just using as.numeric led to some errors) ## We will not be using columns 1 and 2 ("Date" and "Time") again so those can be left alone data2$Global_active_power <- as.numeric(as.character(data2$Global_active_power)) data2$Global_reactive_power <- as.numeric(as.character(data2$Global_reactive_power)) data2$Voltage <- as.numeric(as.character(data2$Voltage)) data2$Global_intensity <- as.numeric(as.character(data2$Global_intensity)) data2$Sub_metering_1 <- as.numeric(as.character(data2$Sub_metering_1)) data2$Sub_metering_2 <- as.numeric(as.character(data2$Sub_metering_2)) data2$Sub_metering_3 <- as.numeric(as.character(data2$Sub_metering_3)) ### --------------------------------------------------------------------------### ### Now we make the FOURTH plot and save it ### --------------------------------------------------------------------------### png("/Users/Mario/ExData_Plotting1/plot4.png", width=480, height=480) par(mfrow=c(2,2)) ## Plot A ## --------------------------------------------------------------------------------- plot(x=data2$DateTime, y=data2$Global_active_power, type="l", ylab="Global Active Power", xlab="", lwd=1.25, ## Make a little thicker cex.lab=1) ## --------------------------------------------------------------------------------- ## Plot B ## --------------------------------------------------------------------------------- plot(x=data2$DateTime, y=data2$Voltage, xlab="datetime", ylab="Voltage", type="l", lwd=1.25) ## make lines a tad thicker ## --------------------------------------------------------------------------------- ## Plot C ## --------------------------------------------------------------------------------- ## Add the first set of points, Sub_metering_1, and put in the x and y axis labels plot(x=data2$DateTime, y=data2$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering") ## Add in the second set of points, Sub_metering_2 points(x=data2$DateTime, y=data2$Sub_metering_2, type="l", col="red") ## Add in the third set of points, Sub_metering_3 points(x=data2$DateTime, y=data2$Sub_metering_3, type="l", col="blue") ## Add legend legend("topright", col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd=c(2,2,2), ## make it a little darker so it looks better bty="n", ## Remove that border!!! cex=1) ## --------------------------------------------------------------------------------- ## Plot D ## --------------------------------------------------------------------------------- plot(x=data2$DateTime, y=data2$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l", lwd=1.2, ## Make lines a little thicker cex=.1) ## --------------------------------------------------------------------------------- dev.off()
# Load data source("load_data.R") # Open PNG device png("plot3.png") # Create Plot plot(hpcSubset$DateTime, hpcSubset$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(hpcSubset$DateTime, hpcSubset$Sub_metering_2, col="red") lines(hpcSubset$DateTime, hpcSubset$Sub_metering_3, col="blue") legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty="solid") # Close PNG device dev.off() message("Plot 3 created successfully.")
/plot3.r
no_license
TowkayNew/ExData_Plotting1
R
false
false
577
r
# Load data source("load_data.R") # Open PNG device png("plot3.png") # Create Plot plot(hpcSubset$DateTime, hpcSubset$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(hpcSubset$DateTime, hpcSubset$Sub_metering_2, col="red") lines(hpcSubset$DateTime, hpcSubset$Sub_metering_3, col="blue") legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty="solid") # Close PNG device dev.off() message("Plot 3 created successfully.")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/locate_site.R \name{locate_site} \alias{locate_site} \title{Find a site folder on ScienceBase} \arguments{ \item{site_name}{the site ID, e.g. "nwis_02322688", whose folder you want} \item{format}{character indicating whether the folder should be returned as an ID or as a full URL} \item{by}{character indicating how to search for the item: using tags ("tag", the default and recommended option), by scanning the parent directory for the desired title ("dir"), or both in combination ("either")?} \item{browser}{logical. Should the URL be opened in a browser?} } \description{ Find a site folder on ScienceBase } \examples{ \dontrun{ locate_site("nwis_02322688", format="url") locate_site(c("nwis_02322688", "nwis_03259813", "nwis_04024000")) locate_site("nwis_notasite", format="url") testthat::expect_error(locate_site("notasite", format="url")) } }
/man/locate_site.Rd
permissive
ehstanley/powstreams
R
false
true
936
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/locate_site.R \name{locate_site} \alias{locate_site} \title{Find a site folder on ScienceBase} \arguments{ \item{site_name}{the site ID, e.g. "nwis_02322688", whose folder you want} \item{format}{character indicating whether the folder should be returned as an ID or as a full URL} \item{by}{character indicating how to search for the item: using tags ("tag", the default and recommended option), by scanning the parent directory for the desired title ("dir"), or both in combination ("either")?} \item{browser}{logical. Should the URL be opened in a browser?} } \description{ Find a site folder on ScienceBase } \examples{ \dontrun{ locate_site("nwis_02322688", format="url") locate_site(c("nwis_02322688", "nwis_03259813", "nwis_04024000")) locate_site("nwis_notasite", format="url") testthat::expect_error(locate_site("notasite", format="url")) } }
x=seq(1, 10, len=1) y=40*2 + rnorm(10,0,5) plot(x,y) summary(x) mean(x)
/analysis.R
no_license
camilamtl/TOTALJUNK
R
false
false
72
r
x=seq(1, 10, len=1) y=40*2 + rnorm(10,0,5) plot(x,y) summary(x) mean(x)
\name{GetAssignment} \alias{GetAssignment} \alias{GetAssignments} \alias{assignment} \alias{assignments} \title{Get Assignment(s)} \description{Get an assignment or multiple assignments for one or more HITs (or a HITType) as a dataframe.} \usage{ GetAssignment( assignment = NULL, hit = NULL, hit.type = NULL, status = NULL, return.all = FALSE, pagenumber = "1", pagesize = "10", sortproperty = "SubmitTime", sortdirection = "Ascending", response.group = NULL, keypair = credentials(), print = TRUE, browser = FALSE, log.requests = TRUE, sandbox = FALSE, return.assignment.dataframe = TRUE) } \arguments{ \item{assignment}{An optional character string specifying the AssignmentId of an assignment to return.} \item{hit}{An optional character string specifying the HITId whose assignments are to be returned, or a vector of character strings specifying multiple HITIds all of whose assignments are to be returned.} \item{hit.type}{An optional character string specifying the HITTypeId of one or more HITs whose assignments are to be returned.} \item{status}{An optional character string (of \dQuote{Approved},\dQuote{Rejected},\dQuote{Submitted}), specifying whether only a subset of assignments should be returned. If \code{NULL}, all assignments are returned (the default). Only applies when \code{hit} or \code{hit.type} are specified; ignored otherwise.} \item{return.all}{If \code{TRUE}, all available assignments are returned. Otherwise, only assignments falling within the specified \code{pagenumber} and \code{pagesize} search results are returned.} \item{pagenumber}{An optional character string indicating which page of search results should be returned (only appropriate when specifying a single HITId). Most users can ignore this.} \item{pagesize}{An optional character string indicating how many search results should be returned by each request (only appropriate when specifying a single HITId), between 1 and 100. Most users can ignore this.} \item{sortproperty}{One of \dQuote{AcceptTime}, \dQuote{SubmitTime}, \dQuote{AssignmentStatus}. Ignored if \code{return.all=TRUE}. Most users can ignore this.} \item{sortdirection}{Either \dQuote{Ascending} or \dQuote{Descending}. Ignored if \code{return.all=TRUE}. Most users can ignore this.} \item{response.group}{An optional character string (or vector of character strings) specifying what details to return. If \code{assignment} is specified, \code{response.group} can include any of \dQuote{Request}, \dQuote{Minimal}, \dQuote{AssignmentFeedback}, \dQuote{HITDetail}, and/or \dQuote{HITQuestion}. If \code{hit} or \code{hit.type} is specified, \code{response.group} can include \dQuote{Request}, \dQuote{Minimal}, and/or \dQuote{AssignmentFeedback}. For more information, see \url{http://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_CommonParametersArticle.html}.} \item{keypair}{A two-item character vector containing an AWS Access Key ID in the first position and the corresponding Secret Access Key in the second position. Set default with \code{\link{credentials}}.} \item{print}{Optionally print the results of the API request to the standard output. Default is \code{TRUE}.} \item{browser}{Optionally open the request in the default web browser, rather than opening in R. Default is \code{FALSE}.} \item{log.requests}{A logical specifying whether API requests should be logged. Default is \code{TRUE}. See \code{\link{readlogfile}} for details.} \item{sandbox}{Optionally execute the request in the MTurk sandbox rather than the live server. Default is \code{FALSE}.} \item{return.assignment.dataframe}{A logical specifying whether the Assignment dataframe should be returned. Default is \code{TRUE}.} } \details{This function returns the requested assignments. The function must specify an AssignmentId xor a HITId xor a HITTypeId. If an AssignmentId is specified, only that assignment is returned. If a HIT or HITType is specified, default behavior is to return all assignments through a series of sequential (but invisible) API calls meaning that returning large numbers of assignments (or assignments for a large number of HITs in a single request) may be time consuming. \code{GetAssignments()}, \code{assignment()}, and \code{assignments()} are aliases. } \value{Optionally a dataframe containing Assignment data, including workers responses to any questions specified in the \code{question} parameter of the \code{CreateHIT} function.} \references{ \href{http://docs.amazonwebservices.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_GetAssignmentOperation.html}{API Reference: GetAssignment} \href{http://docs.amazonwebservices.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_GetAssignmentsForHITOperation.html}{API Reference: GetAssignmentsForHIT } } \author{Thomas J. Leeper} %\note{} \seealso{ \code{\link{GetHIT}} \code{\link{ApproveAssignment}} \code{\link{ApproveAllAssignments}} \code{\link{RejectAssignment}} } \examples{ \dontrun{ GetAssignment(assignments="26XXH0JPPSI23H54YVG7BKLEXAMPLE") GetAssignment(hit="2MQB727M0IGF304GJ16S1F4VE3AYDQ",return.all=TRUE) GetAssignment(hit.type="2FFNCWYB49F9BBJWA4SJUNST5OFSOW",return.all=FALSE,pagenumber="1",pagesize="50") } } \keyword{Assignments}
/man/GetAssignment.Rd
no_license
SolomonMg/MTurkR
R
false
false
5,309
rd
\name{GetAssignment} \alias{GetAssignment} \alias{GetAssignments} \alias{assignment} \alias{assignments} \title{Get Assignment(s)} \description{Get an assignment or multiple assignments for one or more HITs (or a HITType) as a dataframe.} \usage{ GetAssignment( assignment = NULL, hit = NULL, hit.type = NULL, status = NULL, return.all = FALSE, pagenumber = "1", pagesize = "10", sortproperty = "SubmitTime", sortdirection = "Ascending", response.group = NULL, keypair = credentials(), print = TRUE, browser = FALSE, log.requests = TRUE, sandbox = FALSE, return.assignment.dataframe = TRUE) } \arguments{ \item{assignment}{An optional character string specifying the AssignmentId of an assignment to return.} \item{hit}{An optional character string specifying the HITId whose assignments are to be returned, or a vector of character strings specifying multiple HITIds all of whose assignments are to be returned.} \item{hit.type}{An optional character string specifying the HITTypeId of one or more HITs whose assignments are to be returned.} \item{status}{An optional character string (of \dQuote{Approved},\dQuote{Rejected},\dQuote{Submitted}), specifying whether only a subset of assignments should be returned. If \code{NULL}, all assignments are returned (the default). Only applies when \code{hit} or \code{hit.type} are specified; ignored otherwise.} \item{return.all}{If \code{TRUE}, all available assignments are returned. Otherwise, only assignments falling within the specified \code{pagenumber} and \code{pagesize} search results are returned.} \item{pagenumber}{An optional character string indicating which page of search results should be returned (only appropriate when specifying a single HITId). Most users can ignore this.} \item{pagesize}{An optional character string indicating how many search results should be returned by each request (only appropriate when specifying a single HITId), between 1 and 100. Most users can ignore this.} \item{sortproperty}{One of \dQuote{AcceptTime}, \dQuote{SubmitTime}, \dQuote{AssignmentStatus}. Ignored if \code{return.all=TRUE}. Most users can ignore this.} \item{sortdirection}{Either \dQuote{Ascending} or \dQuote{Descending}. Ignored if \code{return.all=TRUE}. Most users can ignore this.} \item{response.group}{An optional character string (or vector of character strings) specifying what details to return. If \code{assignment} is specified, \code{response.group} can include any of \dQuote{Request}, \dQuote{Minimal}, \dQuote{AssignmentFeedback}, \dQuote{HITDetail}, and/or \dQuote{HITQuestion}. If \code{hit} or \code{hit.type} is specified, \code{response.group} can include \dQuote{Request}, \dQuote{Minimal}, and/or \dQuote{AssignmentFeedback}. For more information, see \url{http://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_CommonParametersArticle.html}.} \item{keypair}{A two-item character vector containing an AWS Access Key ID in the first position and the corresponding Secret Access Key in the second position. Set default with \code{\link{credentials}}.} \item{print}{Optionally print the results of the API request to the standard output. Default is \code{TRUE}.} \item{browser}{Optionally open the request in the default web browser, rather than opening in R. Default is \code{FALSE}.} \item{log.requests}{A logical specifying whether API requests should be logged. Default is \code{TRUE}. See \code{\link{readlogfile}} for details.} \item{sandbox}{Optionally execute the request in the MTurk sandbox rather than the live server. Default is \code{FALSE}.} \item{return.assignment.dataframe}{A logical specifying whether the Assignment dataframe should be returned. Default is \code{TRUE}.} } \details{This function returns the requested assignments. The function must specify an AssignmentId xor a HITId xor a HITTypeId. If an AssignmentId is specified, only that assignment is returned. If a HIT or HITType is specified, default behavior is to return all assignments through a series of sequential (but invisible) API calls meaning that returning large numbers of assignments (or assignments for a large number of HITs in a single request) may be time consuming. \code{GetAssignments()}, \code{assignment()}, and \code{assignments()} are aliases. } \value{Optionally a dataframe containing Assignment data, including workers responses to any questions specified in the \code{question} parameter of the \code{CreateHIT} function.} \references{ \href{http://docs.amazonwebservices.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_GetAssignmentOperation.html}{API Reference: GetAssignment} \href{http://docs.amazonwebservices.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_GetAssignmentsForHITOperation.html}{API Reference: GetAssignmentsForHIT } } \author{Thomas J. Leeper} %\note{} \seealso{ \code{\link{GetHIT}} \code{\link{ApproveAssignment}} \code{\link{ApproveAllAssignments}} \code{\link{RejectAssignment}} } \examples{ \dontrun{ GetAssignment(assignments="26XXH0JPPSI23H54YVG7BKLEXAMPLE") GetAssignment(hit="2MQB727M0IGF304GJ16S1F4VE3AYDQ",return.all=TRUE) GetAssignment(hit.type="2FFNCWYB49F9BBJWA4SJUNST5OFSOW",return.all=FALSE,pagenumber="1",pagesize="50") } } \keyword{Assignments}
\name{clear_cache} \alias{clear_cache} \title{Clear file cache.} \usage{ clear_cache() } \description{ Clear file cache. } \keyword{internal}
/man/clear_cache.Rd
no_license
andrie/devtools
R
false
false
147
rd
\name{clear_cache} \alias{clear_cache} \title{Clear file cache.} \usage{ clear_cache() } \description{ Clear file cache. } \keyword{internal}