content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
###
### quick way to pick multinomial data
###
quick_multinom.apply.r <- function(n,prob)
{
v=rmultinom(n,1,prob)
if (n>1)
{
apply(v==1,2,which)
} else {
which(v==1)
}
}
quick_multinom.r <- function(n,prob)
{
v=rmultinom(1,n,prob)
inverse.rle(list(values=1:length(prob),lengths=v))
}
quick_multinom.apply <- cmpfun(quick_multinom.apply.r)
quick_multinom <- cmpfun(quick_multinom.r)
####
# fast reproduction using vectorized functions
# (trying to avoid rmetasim)
#
landscape.reproduce.Rversion <- function(rland)
{
###put the repro matrix in a nice form
Rloc <- rland$demography$localdem[[1]]$LocalR
R <- rland$demography$epochs[[1]]$R #landscape reproduction
md <- dim(Rloc)[1]
for (i in seq(1,dim(R)[1],by=md))
R[i:(i+(md-1)),i:(i+(md-1))] <- Rloc
Rlong <- data.frame(cbind((which(R>0,arr.ind=T)-1),R[which(R>0,arr.ind=T)]))
colnames(Rlong)[3] <- "rate"
Rlong.tot=with(Rlong,aggregate(cbind(rate=rate),list(col=col),sum))
# print(dim(Rlong.tot))
#print(str(Rlong.tot))
# print(Rlong.tot$col)
# print(fac2num(Rlong.tot$col))
# Rlong.tot$col=fac2num(Rlong.tot$col) #convert factor to number as fast as possible
# Rlong.tot$col=as.numeric(as.character(Rlong.tot$col))
###calculate the number of offspring per ind
repind <- rland$individuals[rland$individuals[,1]%in%Rlong[,2],]
if (dim(repind)[1]>0)
{
repindmn <- merge(Rlong.tot,data.frame(col=0:(dim(R)[1]-1)),all.y=T)[repind[,1]+1,2]
repnoff <- repindmn
for (i in unique(repindmn))
{
repnoff[repindmn==i] <- rpois(length(repindmn[repindmn==i]),i)
}
mother.indx <- inverse.rle(list(lengths=repnoff,values=1:dim(repind)[1]))
newind <- matrix(0,nrow=length(mother.indx),ncol=6)
### newind[,1] <- merge(Rlong[,2:1],data.frame(col=0:(dim(R)[1]-1)),all.y=T)[repind[mother.indx,1]+1,2]
fromcols=repind[mother.indx,1]
rlst = lapply(unique(fromcols),function(x) #divide the individuals up based on among pop probs
{
### v=rmultinom(sum(fromcols==x),1,R[,x+1]/sum(R[,x+1]))
### if (is.matrix(v))
### {
### apply(v==1,2,which)-1
### } else {
### which(v==1)-1
### }
quick_multinom(sum(fromcols==x),prob=R[,x+1]/sum(R[,x+1]))-1
})
names(rlst) <- unique(fromcols)
newrows = fromcols #just makingin a vector to overwrite
for (nm in names(rlst))
{
newrows[fromcols==as.numeric(nm)]=rlst[[nm]]
}
newind[,1] <- newrows
newind[,5] <- repind[mother.indx,4]
newind[,6] <- repind[1,6] #not tracking fathers id
newind[,4] <- rland$intparam$nextid:(rland$intparam$nextid+dim(newind)[1]-1) #set id #s
rland$intparam$nextid <- (rland$intparam$nextid+dim(newind)[1])
#now decide where the offspring go:
rland$individuals <- rbind(rland$individuals,
cbind(newind,
matrix(0,nrow=dim(newind)[1],
ncol=dim(rland$individuals)[2]-dim(newind)[2])
)
)
}
rland
}
#####################################################################################################
landscape.survive.Rversion <- function(rland)
{
Sloc <- rland$demography$localdem[[1]]$LocalS
S <- rland$demography$epoch[[1]]$S
md <- dim(Sloc)[1]
for (i in seq(1,dim(S)[1],by=md))
S[i:(i+(md-1)),i:(i+(md-1))] <- Sloc
inds <- rland$individuals
for (col in (unique(inds[,1])))
{
if (sum(S[,col+1])>=1)
{
p = S[,col+1]/sum(S[,col+1])
} else {
p=c(S[,col+1],1-sum(S[,col+1]))
}
### v=rmultinom(sum(inds[,1]==col),1,p)
### if (is.matrix(v))
### {
### fates=apply(v==1,2,which)-1
### } else {fates=which(v==1)-1}
### inds[inds[,1]==col,1] <- fates
inds[inds[,1]==col,1] <- quick_multinom(sum(inds[,1]==col),p)-1
}
inds <- inds[inds[,1]<(dim(S)[1]),]
rland$individuals <- inds
rland
}
landscape.advance.Rversion <- function(rland)
{
rland$intparam$currentgen=rland$intparam$currentgen+1
rland
}
landscape.carry.Rversion <- function(rland)
{
k = rland$demography$epoch[[1]]$Carry
names(k) <- 1:length(k)
lpops <- landscape.populations(rland)
ps = table(lpops)
lrg=c(ps)>k[names(ps)]
# print(lrg)
# print(ps)
# inds=rland$individuals
keeplst.big = unlist(sapply(names(lrg)[lrg],function(nm)
{
# print(k[nm])
unlist(sample(which(lpops==as.numeric(nm)),k[nm],replace=F))
}))
keeplst.small = unlist(sapply(names(lrg)[!lrg],function(nm)
{
which(lpops==as.numeric(nm))
}))
keeplst <- c(keeplst.big,keeplst.small)
rland$individuals = rland$individuals[keeplst,]
rland
}
#"typelookup" <-
#function(type)
# {
# type <- type + 251
# type
# }
fac2num <- function(f)
{
if (!is.numeric(f))
{
if((is.character(f)))
{
as.numeric(as.character(v))
} else {
l=levels(f)
i=unclass(f)
as.numeric(l[i])
}
} else {f}
}
landscape.reproduce.cmp <- cmpfun(landscape.reproduce.Rversion)
landscape.survive.cmp <- cmpfun(landscape.survive.Rversion)
landscape.carry.cmp <- cmpfun(landscape.carry.Rversion)
| /landscape-functions.R | no_license | stranda/holoSim | R | false | false | 5,788 | r | ###
### quick way to pick multinomial data
###
quick_multinom.apply.r <- function(n,prob)
{
v=rmultinom(n,1,prob)
if (n>1)
{
apply(v==1,2,which)
} else {
which(v==1)
}
}
quick_multinom.r <- function(n,prob)
{
v=rmultinom(1,n,prob)
inverse.rle(list(values=1:length(prob),lengths=v))
}
quick_multinom.apply <- cmpfun(quick_multinom.apply.r)
quick_multinom <- cmpfun(quick_multinom.r)
####
# fast reproduction using vectorized functions
# (trying to avoid rmetasim)
#
landscape.reproduce.Rversion <- function(rland)
{
###put the repro matrix in a nice form
Rloc <- rland$demography$localdem[[1]]$LocalR
R <- rland$demography$epochs[[1]]$R #landscape reproduction
md <- dim(Rloc)[1]
for (i in seq(1,dim(R)[1],by=md))
R[i:(i+(md-1)),i:(i+(md-1))] <- Rloc
Rlong <- data.frame(cbind((which(R>0,arr.ind=T)-1),R[which(R>0,arr.ind=T)]))
colnames(Rlong)[3] <- "rate"
Rlong.tot=with(Rlong,aggregate(cbind(rate=rate),list(col=col),sum))
# print(dim(Rlong.tot))
#print(str(Rlong.tot))
# print(Rlong.tot$col)
# print(fac2num(Rlong.tot$col))
# Rlong.tot$col=fac2num(Rlong.tot$col) #convert factor to number as fast as possible
# Rlong.tot$col=as.numeric(as.character(Rlong.tot$col))
###calculate the number of offspring per ind
repind <- rland$individuals[rland$individuals[,1]%in%Rlong[,2],]
if (dim(repind)[1]>0)
{
repindmn <- merge(Rlong.tot,data.frame(col=0:(dim(R)[1]-1)),all.y=T)[repind[,1]+1,2]
repnoff <- repindmn
for (i in unique(repindmn))
{
repnoff[repindmn==i] <- rpois(length(repindmn[repindmn==i]),i)
}
mother.indx <- inverse.rle(list(lengths=repnoff,values=1:dim(repind)[1]))
newind <- matrix(0,nrow=length(mother.indx),ncol=6)
### newind[,1] <- merge(Rlong[,2:1],data.frame(col=0:(dim(R)[1]-1)),all.y=T)[repind[mother.indx,1]+1,2]
fromcols=repind[mother.indx,1]
rlst = lapply(unique(fromcols),function(x) #divide the individuals up based on among pop probs
{
### v=rmultinom(sum(fromcols==x),1,R[,x+1]/sum(R[,x+1]))
### if (is.matrix(v))
### {
### apply(v==1,2,which)-1
### } else {
### which(v==1)-1
### }
quick_multinom(sum(fromcols==x),prob=R[,x+1]/sum(R[,x+1]))-1
})
names(rlst) <- unique(fromcols)
newrows = fromcols #just makingin a vector to overwrite
for (nm in names(rlst))
{
newrows[fromcols==as.numeric(nm)]=rlst[[nm]]
}
newind[,1] <- newrows
newind[,5] <- repind[mother.indx,4]
newind[,6] <- repind[1,6] #not tracking fathers id
newind[,4] <- rland$intparam$nextid:(rland$intparam$nextid+dim(newind)[1]-1) #set id #s
rland$intparam$nextid <- (rland$intparam$nextid+dim(newind)[1])
#now decide where the offspring go:
rland$individuals <- rbind(rland$individuals,
cbind(newind,
matrix(0,nrow=dim(newind)[1],
ncol=dim(rland$individuals)[2]-dim(newind)[2])
)
)
}
rland
}
#####################################################################################################
landscape.survive.Rversion <- function(rland)
{
Sloc <- rland$demography$localdem[[1]]$LocalS
S <- rland$demography$epoch[[1]]$S
md <- dim(Sloc)[1]
for (i in seq(1,dim(S)[1],by=md))
S[i:(i+(md-1)),i:(i+(md-1))] <- Sloc
inds <- rland$individuals
for (col in (unique(inds[,1])))
{
if (sum(S[,col+1])>=1)
{
p = S[,col+1]/sum(S[,col+1])
} else {
p=c(S[,col+1],1-sum(S[,col+1]))
}
### v=rmultinom(sum(inds[,1]==col),1,p)
### if (is.matrix(v))
### {
### fates=apply(v==1,2,which)-1
### } else {fates=which(v==1)-1}
### inds[inds[,1]==col,1] <- fates
inds[inds[,1]==col,1] <- quick_multinom(sum(inds[,1]==col),p)-1
}
inds <- inds[inds[,1]<(dim(S)[1]),]
rland$individuals <- inds
rland
}
landscape.advance.Rversion <- function(rland)
{
rland$intparam$currentgen=rland$intparam$currentgen+1
rland
}
landscape.carry.Rversion <- function(rland)
{
k = rland$demography$epoch[[1]]$Carry
names(k) <- 1:length(k)
lpops <- landscape.populations(rland)
ps = table(lpops)
lrg=c(ps)>k[names(ps)]
# print(lrg)
# print(ps)
# inds=rland$individuals
keeplst.big = unlist(sapply(names(lrg)[lrg],function(nm)
{
# print(k[nm])
unlist(sample(which(lpops==as.numeric(nm)),k[nm],replace=F))
}))
keeplst.small = unlist(sapply(names(lrg)[!lrg],function(nm)
{
which(lpops==as.numeric(nm))
}))
keeplst <- c(keeplst.big,keeplst.small)
rland$individuals = rland$individuals[keeplst,]
rland
}
#"typelookup" <-
#function(type)
# {
# type <- type + 251
# type
# }
fac2num <- function(f)
{
if (!is.numeric(f))
{
if((is.character(f)))
{
as.numeric(as.character(v))
} else {
l=levels(f)
i=unclass(f)
as.numeric(l[i])
}
} else {f}
}
landscape.reproduce.cmp <- cmpfun(landscape.reproduce.Rversion)
landscape.survive.cmp <- cmpfun(landscape.survive.Rversion)
landscape.carry.cmp <- cmpfun(landscape.carry.Rversion)
|
#' Title
#'
#' @param r
#' @param x
#' @param y
#'
#' @return
#' @export
#'
#' @examples
printer = function(r, x, y) {
print(paste0("x = ", x))
}
| /R/printer.R | no_license | os2137/ggreporting | R | false | false | 148 | r | #' Title
#'
#' @param r
#' @param x
#' @param y
#'
#' @return
#' @export
#'
#' @examples
printer = function(r, x, y) {
print(paste0("x = ", x))
}
|
#' Test modularity hypothesis
#'
#' Tests modularity hypothesis using cor.matrix matrix and trait groupings
#' @param cor.matrix Correlation matrix
#' @param modularity.hipot Matrix of hypothesis. Each line represents a trait and each column a module.
#' if modularity.hipot[i,j] == 1, trait i is in module j.
#' @param iterations Number of iterations, to be passed to MantelCor
#' @return Returns mantel correlation and associated probability for each modularity hypothesis, along with AVG+, AVG-, AVG Ratio for each module.
#' A total hypothesis combining all hypotesis is also tested.
#' @author Diogo Melo, Guilherme Garcia
#' @seealso \code{\link{MantelCor}}
#' @export
#' @rdname TestModularity
#' @references Porto, Arthur, Felipe B. Oliveira, Leila T. Shirai, Valderes Conto, and Gabriel Marroig. 2009. "The Evolution of Modularity in the Mammalian Skull I: Morphological Integration Patterns and Magnitudes." Evolutionary Biology 36 (1): 118-35. doi:10.1007/s11692-008-9038-3.
#' @examples
#' cor.matrix <- RandomMatrix(10)
#' rand.hipots <- matrix(sample(c(1, 0), 30, replace=TRUE), 10, 3)
#' mod.test <- TestModularity(cor.matrix, rand.hipots)
#' @keywords mantel
#' @keywords modularity
TestModularity <- function (cor.matrix, modularity.hipot, iterations = 100) {
m.hip.list <- CreateHipotMatrix(as.matrix(modularity.hipot))
if(is.null(colnames(modularity.hipot))) colnames(modularity.hipot) <- 1:dim (modularity.hipot) [2]
names(m.hip.list) <- c(colnames (modularity.hipot),"Full Integration")
output <- MantelCor (m.hip.list, cor.matrix, iterations = iterations, mod = TRUE)
names(output)[1] <- 'hypothesis'
return (output)
}
#' @export
#' @rdname TestModularity
CreateHipotMatrix <- function(modularity.hipot) {
num.hip <- dim (modularity.hipot) [2]
num.traits <- dim (modularity.hipot) [1]
m.hip.list <- alply(modularity.hipot, 2, function(x) outer(x, x))
m.hip.list[[num.hip+1]] <- matrix(as.integer (as.logical (Reduce ("+", m.hip.list[1:num.hip]))),
num.traits, num.traits, byrow=T)
return(m.hip.list[1:(num.hip+1)])
}
| /R/TestModularity.R | permissive | aivuk/Morphometrics | R | false | false | 2,098 | r | #' Test modularity hypothesis
#'
#' Tests modularity hypothesis using cor.matrix matrix and trait groupings
#' @param cor.matrix Correlation matrix
#' @param modularity.hipot Matrix of hypothesis. Each line represents a trait and each column a module.
#' if modularity.hipot[i,j] == 1, trait i is in module j.
#' @param iterations Number of iterations, to be passed to MantelCor
#' @return Returns mantel correlation and associated probability for each modularity hypothesis, along with AVG+, AVG-, AVG Ratio for each module.
#' A total hypothesis combining all hypotesis is also tested.
#' @author Diogo Melo, Guilherme Garcia
#' @seealso \code{\link{MantelCor}}
#' @export
#' @rdname TestModularity
#' @references Porto, Arthur, Felipe B. Oliveira, Leila T. Shirai, Valderes Conto, and Gabriel Marroig. 2009. "The Evolution of Modularity in the Mammalian Skull I: Morphological Integration Patterns and Magnitudes." Evolutionary Biology 36 (1): 118-35. doi:10.1007/s11692-008-9038-3.
#' @examples
#' cor.matrix <- RandomMatrix(10)
#' rand.hipots <- matrix(sample(c(1, 0), 30, replace=TRUE), 10, 3)
#' mod.test <- TestModularity(cor.matrix, rand.hipots)
#' @keywords mantel
#' @keywords modularity
TestModularity <- function (cor.matrix, modularity.hipot, iterations = 100) {
m.hip.list <- CreateHipotMatrix(as.matrix(modularity.hipot))
if(is.null(colnames(modularity.hipot))) colnames(modularity.hipot) <- 1:dim (modularity.hipot) [2]
names(m.hip.list) <- c(colnames (modularity.hipot),"Full Integration")
output <- MantelCor (m.hip.list, cor.matrix, iterations = iterations, mod = TRUE)
names(output)[1] <- 'hypothesis'
return (output)
}
#' @export
#' @rdname TestModularity
CreateHipotMatrix <- function(modularity.hipot) {
num.hip <- dim (modularity.hipot) [2]
num.traits <- dim (modularity.hipot) [1]
m.hip.list <- alply(modularity.hipot, 2, function(x) outer(x, x))
m.hip.list[[num.hip+1]] <- matrix(as.integer (as.logical (Reduce ("+", m.hip.list[1:num.hip]))),
num.traits, num.traits, byrow=T)
return(m.hip.list[1:(num.hip+1)])
}
|
#' Analytics, Data Mining & Machine Learning Sidekick
#'
#' R library for better/faster analytics, visualization, data mining, and machine learning tasks.
#'
#' @md
#' @name lares
#' @docType package
#' @author Bernardo Lares (laresbernardo@@gmail.com)
#' @import dplyr
#' @import ggplot2
#' @importFrom graphics box hist plot points rasterImage rect grid legend mtext
#' @importFrom grDevices graphics.off dev.off png dev.size
#' @importFrom h2o as.h2o h2o.automl h2o.accuracy h2o.getModel h2o.performance h2o.init
#' h2o.removeAll h2o.download_mojo h2o.download_pojo h2o.loadModel h2o.no_progress h2o.predict
#' h2o.predict_json h2o.saveModel h2o.varimp h2o.getVersion h2o.glm
#' predict_contributions.H2OModel h2o.import_mojo h2o.no_progress h2o.show_progress
#' @importFrom httr GET POST oauth_endpoint oauth_app oauth1.0_token authenticate
#' stop_for_status upload_file add_headers content http_error set_config config
#' @importFrom jsonlite fromJSON toJSON flatten
#' @importFrom lubridate date day week weeks month year wday dmy_hms dmy ymd_hms ymd days
#' minute hour second %m+% %m-% floor_date ceiling_date years
#' @importFrom magrittr %>% set_colnames set_names
#' @importFrom openxlsx addWorksheet copyWorkbook loadWorkbook read.xlsx removeWorksheet
#' getSheetNames renameWorksheet saveWorkbook sheets write.xlsx
#' @importFrom patchwork guide_area plot_layout plot_annotation wrap_plots
#' @importFrom pROC roc ci
#' @importFrom rlang as_label .data
#' @importFrom rpart rpart rpart.control
#' @importFrom rpart.plot rpart.rules rpart.plot
#' @importFrom rvest html_node html_nodes html_attrs html_attr html_table html_text
#' @importFrom stats cor quantile complete.cases na.omit sd median dist end lm predict
#' reorder start kmeans var xtabs as.formula prcomp p.adjust pt model.matrix qt cor.test
#' @importFrom stringr fixed str_count str_length str_pad str_replace_all str_split
#' str_to_title word
#' @importFrom tidyr gather spread
#' @importFrom utils head tail packageVersion URLencode capture.output data download.file
#' globalVariables installed.packages write.table install.packages remove.packages object.size
#' type.convert flush.console read.table modifyList write.csv combn browseURL type.convert
#' getParseData find
#' @importFrom yaml read_yaml
"_PACKAGE"
####################################################################
#' Install/Update Additional Recommended Libraries
#'
#' All needed libraries to use (most) lares are already a dependency.
#' There are some functions that many people won't event know exist
#' that will require other additional libraries. Also, this may be
#' used as a Docker way of installing useful libraries on an new instance.
#'
#' @param progress Boolean. Show status bar?
#' @export
install_recommended <- function(progress = TRUE) {
for (lib in names(recommended)) {
invisible(install.packages(lib, quiet = TRUE, verbose = FALSE))
if (progress) {
statusbar(which(lib == names(recommended)), length(recommended), lib, msg = "")
}
}
}
#' Pipe operator
#' @name lares-exports
NULL
#' @name %>%
#' @export
#' @rdname lares-exports
NULL
# Recommended additional libraries to fully take advantage of lares library
recommended <- list(
beepr = "beep",
circlize = c("chordDiagram", "uh"),
DALEX = c("explain.default", "model_performance", "model_profile", "predict_parts"),
data.table = "fread",
DBI = c("dbDriver", "dbConnect", "dbSendQuery", "fetch", "dbDisconnect"),
devtools = c("install", "install_github", "with_proxy"),
exifr = "read_exif",
factoextra = "fviz_nbclust",
forecast = c("Arima", "auto.arima", "forecast"),
gdata = "read.xls",
ggbeeswarm = "geom_quasirandom",
# ggforce = "geom_mark_ellipse",
# ggrepel = "geom_label_repel",
googleAnalyticsR = "google_analytics",
googleAuthR = "gar_auth",
googledrive = c("drive_auth", "drive_find", "local_drive_quiet"),
googlesheets4 = c(
"sheets_auth", "read_sheet", "gs4_create", "range_write",
"gs4_auth_configure", "gs4_auth", "sheet_append"
),
knitr = c("kable", "knit"),
methods = "as",
mice = c("mice", "complete"),
plotly = c("ggplotly", "plot_ly", "add_markers", "add_markers"),
prophet = c(
"prophet", "fit.prophet", "prophet_plot_components",
"add_country_holidays", "make_future_dataframe"
),
quantmod = c("getDividends", "getSymbols", "getQuote"),
rdrop2 = c("drop_auth", "drop_dir", "drop_download", "drop_search", "drop_upload"),
rgdal = c("readOGR", "project"),
RPostgreSQL = "PostgreSQL",
Rtsne = "Rtsne",
rtweet = c("create_token", "search_tweets"),
rmarkdown = "render",
skimr = "skim",
syuzhet = "get_sentiment_dictionary",
# sp = c("CRS", "over", "coordinates", "proj4string", "proj4string<-", "coordinates<-", "spTransform"),
threed = c("mesh3dobj", "transform_by", "invert_matrix", "perspective_projection", "look_at_matrix"),
tidytext = "unnest_tokens",
tm = c(
"content_transformer", "Corpus", "removeNumbers", "removePunctuation",
"removeWords", "readPlain", "stopwords", "stripWhitespace", "TermDocumentMatrix",
"tm_map", "VectorSource", "VCorpus"
),
udpipe = c("keywords_rake", "udpipe_annotate", "udpipe_download_model", "udpipe_load_model"),
wordcloud = c("wordcloud", "textplot")
)
# For read.file function... deprecated
# c("read.dta13", "read.spss")
if (getRversion() >= "2.15.1") {
globalVariables(c(as.vector(unlist(recommended)), "."))
}
| /R/lares.R | no_license | romainfrancois/lares | R | false | false | 5,435 | r | #' Analytics, Data Mining & Machine Learning Sidekick
#'
#' R library for better/faster analytics, visualization, data mining, and machine learning tasks.
#'
#' @md
#' @name lares
#' @docType package
#' @author Bernardo Lares (laresbernardo@@gmail.com)
#' @import dplyr
#' @import ggplot2
#' @importFrom graphics box hist plot points rasterImage rect grid legend mtext
#' @importFrom grDevices graphics.off dev.off png dev.size
#' @importFrom h2o as.h2o h2o.automl h2o.accuracy h2o.getModel h2o.performance h2o.init
#' h2o.removeAll h2o.download_mojo h2o.download_pojo h2o.loadModel h2o.no_progress h2o.predict
#' h2o.predict_json h2o.saveModel h2o.varimp h2o.getVersion h2o.glm
#' predict_contributions.H2OModel h2o.import_mojo h2o.no_progress h2o.show_progress
#' @importFrom httr GET POST oauth_endpoint oauth_app oauth1.0_token authenticate
#' stop_for_status upload_file add_headers content http_error set_config config
#' @importFrom jsonlite fromJSON toJSON flatten
#' @importFrom lubridate date day week weeks month year wday dmy_hms dmy ymd_hms ymd days
#' minute hour second %m+% %m-% floor_date ceiling_date years
#' @importFrom magrittr %>% set_colnames set_names
#' @importFrom openxlsx addWorksheet copyWorkbook loadWorkbook read.xlsx removeWorksheet
#' getSheetNames renameWorksheet saveWorkbook sheets write.xlsx
#' @importFrom patchwork guide_area plot_layout plot_annotation wrap_plots
#' @importFrom pROC roc ci
#' @importFrom rlang as_label .data
#' @importFrom rpart rpart rpart.control
#' @importFrom rpart.plot rpart.rules rpart.plot
#' @importFrom rvest html_node html_nodes html_attrs html_attr html_table html_text
#' @importFrom stats cor quantile complete.cases na.omit sd median dist end lm predict
#' reorder start kmeans var xtabs as.formula prcomp p.adjust pt model.matrix qt cor.test
#' @importFrom stringr fixed str_count str_length str_pad str_replace_all str_split
#' str_to_title word
#' @importFrom tidyr gather spread
#' @importFrom utils head tail packageVersion URLencode capture.output data download.file
#' globalVariables installed.packages write.table install.packages remove.packages object.size
#' type.convert flush.console read.table modifyList write.csv combn browseURL type.convert
#' getParseData find
#' @importFrom yaml read_yaml
"_PACKAGE"
####################################################################
#' Install/Update Additional Recommended Libraries
#'
#' All needed libraries to use (most) lares are already a dependency.
#' There are some functions that many people won't event know exist
#' that will require other additional libraries. Also, this may be
#' used as a Docker way of installing useful libraries on an new instance.
#'
#' @param progress Boolean. Show status bar?
#' @export
install_recommended <- function(progress = TRUE) {
for (lib in names(recommended)) {
invisible(install.packages(lib, quiet = TRUE, verbose = FALSE))
if (progress) {
statusbar(which(lib == names(recommended)), length(recommended), lib, msg = "")
}
}
}
#' Pipe operator
#' @name lares-exports
NULL
#' @name %>%
#' @export
#' @rdname lares-exports
NULL
# Recommended additional libraries to fully take advantage of lares library
recommended <- list(
beepr = "beep",
circlize = c("chordDiagram", "uh"),
DALEX = c("explain.default", "model_performance", "model_profile", "predict_parts"),
data.table = "fread",
DBI = c("dbDriver", "dbConnect", "dbSendQuery", "fetch", "dbDisconnect"),
devtools = c("install", "install_github", "with_proxy"),
exifr = "read_exif",
factoextra = "fviz_nbclust",
forecast = c("Arima", "auto.arima", "forecast"),
gdata = "read.xls",
ggbeeswarm = "geom_quasirandom",
# ggforce = "geom_mark_ellipse",
# ggrepel = "geom_label_repel",
googleAnalyticsR = "google_analytics",
googleAuthR = "gar_auth",
googledrive = c("drive_auth", "drive_find", "local_drive_quiet"),
googlesheets4 = c(
"sheets_auth", "read_sheet", "gs4_create", "range_write",
"gs4_auth_configure", "gs4_auth", "sheet_append"
),
knitr = c("kable", "knit"),
methods = "as",
mice = c("mice", "complete"),
plotly = c("ggplotly", "plot_ly", "add_markers", "add_markers"),
prophet = c(
"prophet", "fit.prophet", "prophet_plot_components",
"add_country_holidays", "make_future_dataframe"
),
quantmod = c("getDividends", "getSymbols", "getQuote"),
rdrop2 = c("drop_auth", "drop_dir", "drop_download", "drop_search", "drop_upload"),
rgdal = c("readOGR", "project"),
RPostgreSQL = "PostgreSQL",
Rtsne = "Rtsne",
rtweet = c("create_token", "search_tweets"),
rmarkdown = "render",
skimr = "skim",
syuzhet = "get_sentiment_dictionary",
# sp = c("CRS", "over", "coordinates", "proj4string", "proj4string<-", "coordinates<-", "spTransform"),
threed = c("mesh3dobj", "transform_by", "invert_matrix", "perspective_projection", "look_at_matrix"),
tidytext = "unnest_tokens",
tm = c(
"content_transformer", "Corpus", "removeNumbers", "removePunctuation",
"removeWords", "readPlain", "stopwords", "stripWhitespace", "TermDocumentMatrix",
"tm_map", "VectorSource", "VCorpus"
),
udpipe = c("keywords_rake", "udpipe_annotate", "udpipe_download_model", "udpipe_load_model"),
wordcloud = c("wordcloud", "textplot")
)
# For read.file function... deprecated
# c("read.dta13", "read.spss")
if (getRversion() >= "2.15.1") {
globalVariables(c(as.vector(unlist(recommended)), "."))
}
|
# Opening the data, subsetting it and converting variables into the right format
# Assuming that the data.table package has been installed
library(data.table)
power = fread("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("NA","?",""), stringsAsFactors = FALSE)
power$Date = as.Date(power$Date, "%d/%m/%Y")
power1 = subset(power, (Date == "2007-02-01") | (Date == "2007-02-02"))
power1$Global_active_power = as.numeric(power1$Global_active_power)
power1$Global_reactive_power = as.numeric(power1$Global_reactive_power)
power1$Voltage = as.numeric(power1$Voltage)
power1$Global_intensity = as.numeric(power1$Global_intensity)
power1$Sub_metering_1 = as.numeric(power1$Sub_metering_1)
power1$Sub_metering_2 = as.numeric(power1$Sub_metering_2)
power1$Sub_metering_3 = as.numeric(power1$Sub_metering_3)
# Creating the plot
hist(power1$Global_active_power,
col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
# Saving it in file
# The 480x480 size is the default, so I didn't specify it in the function
dev.copy(png, file = "plot1.png")
dev.off()
| /plot1.R | no_license | KnightInWhiteSatin/ExData_Plotting1 | R | false | false | 1,115 | r | # Opening the data, subsetting it and converting variables into the right format
# Assuming that the data.table package has been installed
library(data.table)
power = fread("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("NA","?",""), stringsAsFactors = FALSE)
power$Date = as.Date(power$Date, "%d/%m/%Y")
power1 = subset(power, (Date == "2007-02-01") | (Date == "2007-02-02"))
power1$Global_active_power = as.numeric(power1$Global_active_power)
power1$Global_reactive_power = as.numeric(power1$Global_reactive_power)
power1$Voltage = as.numeric(power1$Voltage)
power1$Global_intensity = as.numeric(power1$Global_intensity)
power1$Sub_metering_1 = as.numeric(power1$Sub_metering_1)
power1$Sub_metering_2 = as.numeric(power1$Sub_metering_2)
power1$Sub_metering_3 = as.numeric(power1$Sub_metering_3)
# Creating the plot
hist(power1$Global_active_power,
col = "red",
xlab = "Global Active Power (kilowatts)",
main = "Global Active Power")
# Saving it in file
# The 480x480 size is the default, so I didn't specify it in the function
dev.copy(png, file = "plot1.png")
dev.off()
|
library(PerMallows)
### Name: perm2cycles
### Title: Decompose a permutation in a set of cycles
### Aliases: perm2cycles
### ** Examples
perm2cycles(c(1,5,2,3,4))
| /data/genthat_extracted_code/PerMallows/examples/perm2cycles.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 170 | r | library(PerMallows)
### Name: perm2cycles
### Title: Decompose a permutation in a set of cycles
### Aliases: perm2cycles
### ** Examples
perm2cycles(c(1,5,2,3,4))
|
# TODO: Add comment
#
# Author: Miguel Alvarez
###############################################################################
remotes::install_github("kamapu/gisrepos")
library(gisrepos)
gr <- gisrepos:::gr2list(.gisrepos)
Test <- gr2char(gr)
Test2 <- gr2char(.gisrepos)
| /data-raw/z1_tests.R | no_license | kamapu/gisrepos | R | false | false | 277 | r | # TODO: Add comment
#
# Author: Miguel Alvarez
###############################################################################
remotes::install_github("kamapu/gisrepos")
library(gisrepos)
gr <- gisrepos:::gr2list(.gisrepos)
Test <- gr2char(gr)
Test2 <- gr2char(.gisrepos)
|
\alias{GtkStatusbar}
\alias{gtkStatusbar}
\name{GtkStatusbar}
\title{GtkStatusbar}
\description{Report messages of minor importance to the user}
\section{Methods and Functions}{
\code{\link{gtkStatusbarNew}(show = TRUE)}\cr
\code{\link{gtkStatusbarGetContextId}(object, context.description)}\cr
\code{\link{gtkStatusbarPush}(object, context.id, text)}\cr
\code{\link{gtkStatusbarPop}(object, context.id)}\cr
\code{\link{gtkStatusbarRemove}(object, context.id, message.id)}\cr
\code{\link{gtkStatusbarSetHasResizeGrip}(object, setting)}\cr
\code{\link{gtkStatusbarGetHasResizeGrip}(object)}\cr
\code{\link{gtkStatusbarGetMessageArea}(object)}\cr
\code{gtkStatusbar(show = TRUE)}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkContainer
+----GtkBox
+----GtkHBox
+----GtkStatusbar}}
\section{Interfaces}{GtkStatusbar implements
AtkImplementorIface, \code{\link{GtkBuildable}} and \code{\link{GtkOrientable}}.}
\section{Detailed Description}{A \code{\link{GtkStatusbar}} is usually placed along the bottom of an application's main
\code{\link{GtkWindow}}. It may provide a regular commentary of the application's status
(as is usually the case in a web browser, for example), or may be used to
simply output a message when the status changes, (when an upload is complete
in an FTP client, for example).
It may also have a resize grip (a triangular area in the lower right corner)
which can be clicked on to resize the window containing the statusbar.
Status bars in GTK+ maintain a stack of messages. The message at
the top of the each bar's stack is the one that will currently be displayed.
Any messages added to a statusbar's stack must specify a \emph{context
id} that is used to uniquely identify the source of a message.
This context id can be generated by \code{\link{gtkStatusbarGetContextId}}, given a
message and the statusbar that it will be added to. Note that messages are
stored in a stack, and when choosing which message to display, the stack
structure is adhered to, regardless of the context identifier of a message.
One could say that a statusbar maintains one stack of messages for display
purposes, but allows multiple message producers to maintain sub-stacks of
the messages they produced (via context ids).
Status bars are created using \code{\link{gtkStatusbarNew}}.
Messages are added to the bar's stack with \code{\link{gtkStatusbarPush}}.
The message at the top of the stack can be removed using \code{\link{gtkStatusbarPop}}.
A message can be removed from anywhere in the stack if its message_id was
recorded at the time it was added. This is done using \code{\link{gtkStatusbarRemove}}.}
\section{Structures}{\describe{\item{\verb{GtkStatusbar}}{
Contains private data that should be modified with the functions described
below.
}}}
\section{Convenient Construction}{\code{gtkStatusbar} is the equivalent of \code{\link{gtkStatusbarNew}}.}
\section{Signals}{\describe{
\item{\code{text-popped(statusbar, context.id, text, user.data)}}{
Is emitted whenever a new message is popped off a statusbar's stack.
\describe{
\item{\code{statusbar}}{the object which received the signal.}
\item{\code{context.id}}{the context id of the relevant message/statusbar.}
\item{\code{text}}{the message that was just popped.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{text-pushed(statusbar, context.id, text, user.data)}}{
Is emitted whenever a new message gets pushed onto a statusbar's stack.
\describe{
\item{\code{statusbar}}{the object which received the signal.}
\item{\code{context.id}}{the context id of the relevant message/statusbar.}
\item{\code{text}}{the message that was pushed.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
}}
\section{Properties}{\describe{\item{\verb{has-resize-grip} [logical : Read / Write]}{
Whether the statusbar has a grip for resizing the toplevel window.
Default value: TRUE Since 2.4
}}}
\section{Style Properties}{\describe{\item{\verb{shadow-type} [\code{\link{GtkShadowType}} : Read]}{
Style of bevel around the statusbar text. Default value: GTK_SHADOW_IN
}}}
\references{\url{https://developer-old.gnome.org/gtk2/stable/GtkStatusbar.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/GtkStatusbar.Rd | no_license | lawremi/RGtk2 | R | false | false | 4,496 | rd | \alias{GtkStatusbar}
\alias{gtkStatusbar}
\name{GtkStatusbar}
\title{GtkStatusbar}
\description{Report messages of minor importance to the user}
\section{Methods and Functions}{
\code{\link{gtkStatusbarNew}(show = TRUE)}\cr
\code{\link{gtkStatusbarGetContextId}(object, context.description)}\cr
\code{\link{gtkStatusbarPush}(object, context.id, text)}\cr
\code{\link{gtkStatusbarPop}(object, context.id)}\cr
\code{\link{gtkStatusbarRemove}(object, context.id, message.id)}\cr
\code{\link{gtkStatusbarSetHasResizeGrip}(object, setting)}\cr
\code{\link{gtkStatusbarGetHasResizeGrip}(object)}\cr
\code{\link{gtkStatusbarGetMessageArea}(object)}\cr
\code{gtkStatusbar(show = TRUE)}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkContainer
+----GtkBox
+----GtkHBox
+----GtkStatusbar}}
\section{Interfaces}{GtkStatusbar implements
AtkImplementorIface, \code{\link{GtkBuildable}} and \code{\link{GtkOrientable}}.}
\section{Detailed Description}{A \code{\link{GtkStatusbar}} is usually placed along the bottom of an application's main
\code{\link{GtkWindow}}. It may provide a regular commentary of the application's status
(as is usually the case in a web browser, for example), or may be used to
simply output a message when the status changes, (when an upload is complete
in an FTP client, for example).
It may also have a resize grip (a triangular area in the lower right corner)
which can be clicked on to resize the window containing the statusbar.
Status bars in GTK+ maintain a stack of messages. The message at
the top of the each bar's stack is the one that will currently be displayed.
Any messages added to a statusbar's stack must specify a \emph{context
id} that is used to uniquely identify the source of a message.
This context id can be generated by \code{\link{gtkStatusbarGetContextId}}, given a
message and the statusbar that it will be added to. Note that messages are
stored in a stack, and when choosing which message to display, the stack
structure is adhered to, regardless of the context identifier of a message.
One could say that a statusbar maintains one stack of messages for display
purposes, but allows multiple message producers to maintain sub-stacks of
the messages they produced (via context ids).
Status bars are created using \code{\link{gtkStatusbarNew}}.
Messages are added to the bar's stack with \code{\link{gtkStatusbarPush}}.
The message at the top of the stack can be removed using \code{\link{gtkStatusbarPop}}.
A message can be removed from anywhere in the stack if its message_id was
recorded at the time it was added. This is done using \code{\link{gtkStatusbarRemove}}.}
\section{Structures}{\describe{\item{\verb{GtkStatusbar}}{
Contains private data that should be modified with the functions described
below.
}}}
\section{Convenient Construction}{\code{gtkStatusbar} is the equivalent of \code{\link{gtkStatusbarNew}}.}
\section{Signals}{\describe{
\item{\code{text-popped(statusbar, context.id, text, user.data)}}{
Is emitted whenever a new message is popped off a statusbar's stack.
\describe{
\item{\code{statusbar}}{the object which received the signal.}
\item{\code{context.id}}{the context id of the relevant message/statusbar.}
\item{\code{text}}{the message that was just popped.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
\item{\code{text-pushed(statusbar, context.id, text, user.data)}}{
Is emitted whenever a new message gets pushed onto a statusbar's stack.
\describe{
\item{\code{statusbar}}{the object which received the signal.}
\item{\code{context.id}}{the context id of the relevant message/statusbar.}
\item{\code{text}}{the message that was pushed.}
\item{\code{user.data}}{user data set when the signal handler was connected.}
}
}
}}
\section{Properties}{\describe{\item{\verb{has-resize-grip} [logical : Read / Write]}{
Whether the statusbar has a grip for resizing the toplevel window.
Default value: TRUE Since 2.4
}}}
\section{Style Properties}{\describe{\item{\verb{shadow-type} [\code{\link{GtkShadowType}} : Read]}{
Style of bevel around the statusbar text. Default value: GTK_SHADOW_IN
}}}
\references{\url{https://developer-old.gnome.org/gtk2/stable/GtkStatusbar.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library("testthat")
library("lme4")
testLevel <- if (nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")))
as.numeric(s) else 1
gives_error_or_warning <- function (regexp = NULL, all = FALSE, ...)
{
function(expr) {
res <- try(evaluate_promise(expr),silent=TRUE)
no_error <- !inherits(res, "try-error")
if (no_error) {
warnings <- res$warnings
if (!is.null(regexp) && length(warnings) > 0) {
return(matches(regexp, all = FALSE, ...)(warnings))
} else {
return(expectation(length(warnings) > 0, "no warnings or errors given",
paste0(length(warnings), " warnings created")))
}
}
if (!is.null(regexp)) {
return(matches(regexp, ...)(res))
}
else {
expectation(TRUE, "no error thrown", "threw an error")
}
}
}
## expect_that(stop("foo"),gives_error_or_warning("foo"))
## expect_that(warning("foo"),gives_error_or_warning("foo"))
## expect_that(TRUE,gives_error_or_warning("foo"))
## expect_that(stop("bar"),gives_error_or_warning("foo"))
## expect_that(warning("bar"),gives_error_or_warning("foo"))
context("fitting glmer models")
test_that("glmer", {
set.seed(101)
d <- data.frame(z=rbinom(200,size=1,prob=0.5),
f=factor(sample(1:10,200,replace=TRUE)))
expect_warning(glmer(z~ 1|f, d, family=binomial, method="abc"),"Use the nAGQ argument")
expect_warning(glmer(z~ 1|f, d, family=binomial, method="Laplace"),"Use the nAGQ argument")
expect_warning(glmer(z~ 1|f, d, sparseX=TRUE),"has no effect at present")
expect_that(gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial), is_a("glmerMod"))
expect_that(gm1@resp, is_a("glmResp"))
expect_that(gm1@pp, is_a("merPredD"))
expect_equal(ge1 <- unname(fixef(gm1)), c(-1.39854982537216, -0.992335519118859,
-1.12867532780426, -1.58030423764517),
tolerance=5e-4)
expect_equal(c(VarCorr(gm1)[[1]]), 0.41245527438386, tolerance=6e-4)
### expect_that(family(gm1), equals(binomial()))
### ?? binomial() has an 'initialize' component ... and the order is different
expect_equal(deviance(gm1), 73.47428, tolerance=1e-5)
## was -2L = 184.05267459802
expect_equal(sigma(gm1), 1)
expect_equal(extractAIC(gm1), c(5, 194.052674598026), tolerance=1e-5)
expect_equal(theta <- unname(getME(gm1, "theta")), 0.642226809144453, tolerance=6e-4)
expect_that(X <- getME(gm1, "X"), is_equivalent_to(
model.matrix(model.frame(~ period, data=cbpp), cbpp)))
expect_that(Zt <- getME(gm1, "Zt"), is_a("dgCMatrix"))
expect_equal(dim(Zt), c(15L, 56L))
expect_equal(Zt@x, rep.int(1, 56L))
expect_that(Lambdat <- getME(gm1, "Lambdat"), is_a("dgCMatrix"))
expect_equivalent(as(Lambdat, "matrix"), diag(theta, 15L, 15L))
expect_is(gm1_probit <- update(gm1,family=binomial(link="probit")),"merMod")
expect_equal(family(gm1_probit)$link,"probit")
## FIXME: test user-specified/custom family?
expect_error(glFormula(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = subset(cbpp, herd==levels(herd)[1]), family = binomial),
"must have > 1")
expect_warning(glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = subset(cbpp, herd %in% levels(herd)[1:4]),
family = binomial,
control=glmerControl(check.nlev.gtreq.5="warning")),
"< 5 sampled levels")
expect_warning(fm1. <- glmer(Reaction ~ Days + (Days|Subject), sleepstudy),
regexp="calling .* with family=gaussian .* as a shortcut")
options(warn=2)
options(glmerControl=list(junk=1,check.conv.grad="ignore"))
expect_warning(glmer(z~ 1|f, d, family=binomial),
"some options")
options(glmerControl=NULL)
cbppX <- transform(cbpp,prop=incidence/size)
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, start=NULL),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, verbose=0L),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, subset=TRUE),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, na.action="na.exclude"),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, offset=rep(0,nrow(cbppX))),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, contrasts=NULL),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, devFunOnly=FALSE),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size,
control=glmerControl(optimizer="Nelder_Mead")),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, control=glmerControl()),
"glmerMod")
options(warn=0)
expect_warning(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, junkArg=TRUE),
"extra argument.*disregarded")
if(FALSE) { ## Hadley broke this
expect_warning(glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=list()),
"instead of passing a list of class")
expect_warning(glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=lmerControl()),
"instead of passing a list of class")
}
##
load(system.file("testdata","radinger_dat.RData",package="lme4"))
mod <- glmer(presabs~predictor+(1|species),family=binomial,
radinger_dat)
expect_is(mod,"merMod")
## TODO: is this reliable across platforms or do we have to loosen?
expect_equal(unname(fixef(mod)),c(0.5425528,6.4289962))
set.seed(101)
## complete separation case
d <- data.frame(y=rbinom(1000,size=1,p=0.5),
x=runif(1000),
f=factor(rep(1:20,each=50)),
x2=rep(0:1,c(999,1)))
mod2 <- glmer(y~x+x2+(1|f),data=d,family=binomial,
control=glmerControl(check.conv.hess="ignore",
check.conv.grad="ignore"))
expect_equal(unname(fixef(mod2))[1:2],
c(-0.10036244,0.03548523), tolerance=1e-4)
expect_true(unname(fixef(mod2)[3] < -10))
mod3 <- update(mod2, family=binomial(link="probit"))
# singular Hessian warning
expect_equal(unname(fixef(mod3))[1:2], c(-0.062889, 0.022241), tolerance=1e-4)
expect_true(fixef(mod3)[3] < -4)
mod4 <- update(mod2, family=binomial(link="cauchit"),
control=glmerControl(check.conv.hess="ignore",
check.conv.grad="ignore"))#--> singular Hessian warning
## on-the-fly creation of index variables
if (FALSE) {
## FIXME: fails in testthat context -- 'd' is not found
## in the parent environment of glmer() -- but works fine
## otherwise ...
set.seed(101)
d <- data.frame(y1=rpois(100,1), x=rnorm(100), ID=1:100)
fit1 <- glmer(y1 ~ x+(1|ID),data=d,family=poisson)
fit2 <- update(fit1, .~ x+(1|rownames(d)))
expect_equal(unname(unlist(VarCorr(fit1))),
unname(unlist(VarCorr(fit2))))
}
##
if(testLevel > 1) {
load(system.file("testdata","mastitis.rda",package="lme4"))
t1 <- system.time(g1 <-
glmer(NCM ~ birth + calvingYear + (1|sire) + (1|herd),
mastitis, poisson,
## current (2014-04-24) default:
control=glmerControl(optimizer=c("bobyqa","Nelder_Mead"))))
t2 <- system.time(g2 <- update(g1,
control=glmerControl(optimizer="bobyqa")))
## 20 (then 13.0) seconds N-M vs 8 (then 4.8) seconds bobyqa ...
## problem is fairly ill-conditioned so parameters
## are relatively far apart even though likelihoods are OK
expect_equal(logLik(g1),logLik(g2),tolerance=1e-7)
}
## test bootstrap/refit with nAGQ>1
gm1AGQ <- update(gm1,nAGQ=2)
expect_equal(attr(bootMer(gm1AGQ,fixef),"bootFail"),0)
## do.call(new,...) bug
new <- "foo"
expect_that(gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial), is_a("glmerMod"))
rm("new")
## test issue #47, from Wolfgang Viechtbauer
## create some data
n <- 100
ai <- rep(0:1, each = n/2)
bi <- 1-ai
ci <- c(rep(0,42), rep(1,8), rep(0,18), rep(1,32))
di <- 1-ci
event <- c(rbind(ai,ci))
group <- rep(c(1,0), times=n)
id <- rep(1:n, each=2)
gm3 <- glmer(event ~ group + (1 | id), family=binomial, nAGQ=21)
sd3 <- sqrt(diag(vcov(gm3)))
expect_equal(sd3, c(0.4254254, 0.424922), tolerance=1e-5)
expect_warning(vcov(gm3,use.hessian=FALSE), "finite-difference Hessian")
expect_equal(suppressWarnings(sqrt(diag(vcov(gm3,use.hessian=FALSE)))),
c(0.3840921, 0.3768747), tolerance=1e-7)
expect_equal(sd3, unname(coef(summary(gm3))[,"Std. Error"]))
## test non-pos-def finite-difference Hessian ...
if(getRversion() > "3.0.0") {
## saved fits are not safe with old R versions
L <- load(system.file("testdata","polytomous_vcov_ex.RData",
package="lme4", mustWork=TRUE))
expect_warning(vcov(polytomous_vcov_ex),"falling back to var-cov")
}
## damage Hessian to make it singular
## (example thanks to J. Dushoff)
gm1H <- gm1
gm1H@optinfo$derivs$Hessian[5,] <- 0
expect_warning(vcov(gm1H),"falling back to var-cov")
## test convergence warnings
L <- load(system.file("testdata","gopherdat2.RData",
package="lme4", mustWork=TRUE))
g0 <- glmer(shells~prev + (1|Site)+offset(log(Area)),
family=poisson, data=Gdat)
## fit year as factor: OK
gc <- glmerControl(check.conv.grad="stop")
expect_is(update(g0,.~.+factor(year), control=gc), "glmerMod")
## error/warning with year as numeric:
## don't have full knowledge of which platforms lead to which
## results, and can't detect whether we're running on valgrind,
## which changes the result on 32-bit linux ...
## SEGFAULT on MacOS? why?
if (FALSE) {
expect_that(update(g0,.~.+year),
gives_error_or_warning("(failed to converge|pwrssUpdate did not converge)"))
}
## ("(failed to converge|pwrssUpdate did not converge in)"))
## if (sessionInfo()$platform=="i686-pc-linux-gnu (32-bit)") {
## expect_warning(update(g0, .~. +year), "failed to converge")
## } else {
## ## MacOS x86_64-apple-darwin10.8.0 (64-bit)
## ## MM's platform
## ## "pwrssUpdate did not converge in (maxit) iterations"
## expect_error(update(g0, .~. +year), "pwrssUpdate did not converge in")
## }
## OK if we scale & center it
expect_is(update(g0,.~. + scale(year), control=gc), "glmerMod")
## not OK if we scale and don't center
expect_warning(update(g0,.~. + scale(year,center=FALSE)),
"failed to converge with max|grad|")
## OK if center and don't scale
expect_is(update(g0,.~. + scale(year,center=TRUE,scale=FALSE),
control=gc),
"glmerMod")
## try higher-order AGQ
expect_is(update(gm1,nAGQ=90),"glmerMod")
expect_error(update(gm1,nAGQ=101),"ord < 101L")
## non-numeric response variables
ss <- transform(sleepstudy,Reaction=as.character(Reaction))
expect_error(glmer(Reaction~(1|Days),family="poisson",data=ss),
"response must be numeric")
expect_error(glmer(Reaction~(1|Days),family="binomial",data=ss),
"response must be numeric or factor")
ss2 <- transform(ss,rr=rep(c(TRUE,FALSE),length.out=nrow(ss)))
## should work OK with logical too
expect_is(glmer(rr~(1|Days),family="binomial",data=ss2),"merMod")
## starting values with log(.) link -- thanks to Eric Weese @ Yale:
grp <- rep(letters[1:5], 20); set.seed(1); x <- rnorm(100)
expect_error(glmer(x ~ 1 + (1|grp), family=gaussian(link="log")),
"valid starting values")
})
| /inst/tests/test-glmer.R | no_license | uthsavi/lme4 | R | false | false | 13,563 | r | library("testthat")
library("lme4")
testLevel <- if (nzchar(s <- Sys.getenv("LME4_TEST_LEVEL")))
as.numeric(s) else 1
gives_error_or_warning <- function (regexp = NULL, all = FALSE, ...)
{
function(expr) {
res <- try(evaluate_promise(expr),silent=TRUE)
no_error <- !inherits(res, "try-error")
if (no_error) {
warnings <- res$warnings
if (!is.null(regexp) && length(warnings) > 0) {
return(matches(regexp, all = FALSE, ...)(warnings))
} else {
return(expectation(length(warnings) > 0, "no warnings or errors given",
paste0(length(warnings), " warnings created")))
}
}
if (!is.null(regexp)) {
return(matches(regexp, ...)(res))
}
else {
expectation(TRUE, "no error thrown", "threw an error")
}
}
}
## expect_that(stop("foo"),gives_error_or_warning("foo"))
## expect_that(warning("foo"),gives_error_or_warning("foo"))
## expect_that(TRUE,gives_error_or_warning("foo"))
## expect_that(stop("bar"),gives_error_or_warning("foo"))
## expect_that(warning("bar"),gives_error_or_warning("foo"))
context("fitting glmer models")
test_that("glmer", {
set.seed(101)
d <- data.frame(z=rbinom(200,size=1,prob=0.5),
f=factor(sample(1:10,200,replace=TRUE)))
expect_warning(glmer(z~ 1|f, d, family=binomial, method="abc"),"Use the nAGQ argument")
expect_warning(glmer(z~ 1|f, d, family=binomial, method="Laplace"),"Use the nAGQ argument")
expect_warning(glmer(z~ 1|f, d, sparseX=TRUE),"has no effect at present")
expect_that(gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial), is_a("glmerMod"))
expect_that(gm1@resp, is_a("glmResp"))
expect_that(gm1@pp, is_a("merPredD"))
expect_equal(ge1 <- unname(fixef(gm1)), c(-1.39854982537216, -0.992335519118859,
-1.12867532780426, -1.58030423764517),
tolerance=5e-4)
expect_equal(c(VarCorr(gm1)[[1]]), 0.41245527438386, tolerance=6e-4)
### expect_that(family(gm1), equals(binomial()))
### ?? binomial() has an 'initialize' component ... and the order is different
expect_equal(deviance(gm1), 73.47428, tolerance=1e-5)
## was -2L = 184.05267459802
expect_equal(sigma(gm1), 1)
expect_equal(extractAIC(gm1), c(5, 194.052674598026), tolerance=1e-5)
expect_equal(theta <- unname(getME(gm1, "theta")), 0.642226809144453, tolerance=6e-4)
expect_that(X <- getME(gm1, "X"), is_equivalent_to(
model.matrix(model.frame(~ period, data=cbpp), cbpp)))
expect_that(Zt <- getME(gm1, "Zt"), is_a("dgCMatrix"))
expect_equal(dim(Zt), c(15L, 56L))
expect_equal(Zt@x, rep.int(1, 56L))
expect_that(Lambdat <- getME(gm1, "Lambdat"), is_a("dgCMatrix"))
expect_equivalent(as(Lambdat, "matrix"), diag(theta, 15L, 15L))
expect_is(gm1_probit <- update(gm1,family=binomial(link="probit")),"merMod")
expect_equal(family(gm1_probit)$link,"probit")
## FIXME: test user-specified/custom family?
expect_error(glFormula(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = subset(cbpp, herd==levels(herd)[1]), family = binomial),
"must have > 1")
expect_warning(glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = subset(cbpp, herd %in% levels(herd)[1:4]),
family = binomial,
control=glmerControl(check.nlev.gtreq.5="warning")),
"< 5 sampled levels")
expect_warning(fm1. <- glmer(Reaction ~ Days + (Days|Subject), sleepstudy),
regexp="calling .* with family=gaussian .* as a shortcut")
options(warn=2)
options(glmerControl=list(junk=1,check.conv.grad="ignore"))
expect_warning(glmer(z~ 1|f, d, family=binomial),
"some options")
options(glmerControl=NULL)
cbppX <- transform(cbpp,prop=incidence/size)
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, start=NULL),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, verbose=0L),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, subset=TRUE),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, na.action="na.exclude"),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, offset=rep(0,nrow(cbppX))),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, contrasts=NULL),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, devFunOnly=FALSE),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size,
control=glmerControl(optimizer="Nelder_Mead")),
"glmerMod")
expect_is(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, control=glmerControl()),
"glmerMod")
options(warn=0)
expect_warning(glmer(prop ~ period + (1 | herd),
data = cbppX, family = binomial, weights=size, junkArg=TRUE),
"extra argument.*disregarded")
if(FALSE) { ## Hadley broke this
expect_warning(glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=list()),
"instead of passing a list of class")
expect_warning(glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial,
control=lmerControl()),
"instead of passing a list of class")
}
##
load(system.file("testdata","radinger_dat.RData",package="lme4"))
mod <- glmer(presabs~predictor+(1|species),family=binomial,
radinger_dat)
expect_is(mod,"merMod")
## TODO: is this reliable across platforms or do we have to loosen?
expect_equal(unname(fixef(mod)),c(0.5425528,6.4289962))
set.seed(101)
## complete separation case
d <- data.frame(y=rbinom(1000,size=1,p=0.5),
x=runif(1000),
f=factor(rep(1:20,each=50)),
x2=rep(0:1,c(999,1)))
mod2 <- glmer(y~x+x2+(1|f),data=d,family=binomial,
control=glmerControl(check.conv.hess="ignore",
check.conv.grad="ignore"))
expect_equal(unname(fixef(mod2))[1:2],
c(-0.10036244,0.03548523), tolerance=1e-4)
expect_true(unname(fixef(mod2)[3] < -10))
mod3 <- update(mod2, family=binomial(link="probit"))
# singular Hessian warning
expect_equal(unname(fixef(mod3))[1:2], c(-0.062889, 0.022241), tolerance=1e-4)
expect_true(fixef(mod3)[3] < -4)
mod4 <- update(mod2, family=binomial(link="cauchit"),
control=glmerControl(check.conv.hess="ignore",
check.conv.grad="ignore"))#--> singular Hessian warning
## on-the-fly creation of index variables
if (FALSE) {
## FIXME: fails in testthat context -- 'd' is not found
## in the parent environment of glmer() -- but works fine
## otherwise ...
set.seed(101)
d <- data.frame(y1=rpois(100,1), x=rnorm(100), ID=1:100)
fit1 <- glmer(y1 ~ x+(1|ID),data=d,family=poisson)
fit2 <- update(fit1, .~ x+(1|rownames(d)))
expect_equal(unname(unlist(VarCorr(fit1))),
unname(unlist(VarCorr(fit2))))
}
##
if(testLevel > 1) {
load(system.file("testdata","mastitis.rda",package="lme4"))
t1 <- system.time(g1 <-
glmer(NCM ~ birth + calvingYear + (1|sire) + (1|herd),
mastitis, poisson,
## current (2014-04-24) default:
control=glmerControl(optimizer=c("bobyqa","Nelder_Mead"))))
t2 <- system.time(g2 <- update(g1,
control=glmerControl(optimizer="bobyqa")))
## 20 (then 13.0) seconds N-M vs 8 (then 4.8) seconds bobyqa ...
## problem is fairly ill-conditioned so parameters
## are relatively far apart even though likelihoods are OK
expect_equal(logLik(g1),logLik(g2),tolerance=1e-7)
}
## test bootstrap/refit with nAGQ>1
gm1AGQ <- update(gm1,nAGQ=2)
expect_equal(attr(bootMer(gm1AGQ,fixef),"bootFail"),0)
## do.call(new,...) bug
new <- "foo"
expect_that(gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp, family = binomial), is_a("glmerMod"))
rm("new")
## test issue #47, from Wolfgang Viechtbauer
## create some data
n <- 100
ai <- rep(0:1, each = n/2)
bi <- 1-ai
ci <- c(rep(0,42), rep(1,8), rep(0,18), rep(1,32))
di <- 1-ci
event <- c(rbind(ai,ci))
group <- rep(c(1,0), times=n)
id <- rep(1:n, each=2)
gm3 <- glmer(event ~ group + (1 | id), family=binomial, nAGQ=21)
sd3 <- sqrt(diag(vcov(gm3)))
expect_equal(sd3, c(0.4254254, 0.424922), tolerance=1e-5)
expect_warning(vcov(gm3,use.hessian=FALSE), "finite-difference Hessian")
expect_equal(suppressWarnings(sqrt(diag(vcov(gm3,use.hessian=FALSE)))),
c(0.3840921, 0.3768747), tolerance=1e-7)
expect_equal(sd3, unname(coef(summary(gm3))[,"Std. Error"]))
## test non-pos-def finite-difference Hessian ...
if(getRversion() > "3.0.0") {
## saved fits are not safe with old R versions
L <- load(system.file("testdata","polytomous_vcov_ex.RData",
package="lme4", mustWork=TRUE))
expect_warning(vcov(polytomous_vcov_ex),"falling back to var-cov")
}
## damage Hessian to make it singular
## (example thanks to J. Dushoff)
gm1H <- gm1
gm1H@optinfo$derivs$Hessian[5,] <- 0
expect_warning(vcov(gm1H),"falling back to var-cov")
## test convergence warnings
L <- load(system.file("testdata","gopherdat2.RData",
package="lme4", mustWork=TRUE))
g0 <- glmer(shells~prev + (1|Site)+offset(log(Area)),
family=poisson, data=Gdat)
## fit year as factor: OK
gc <- glmerControl(check.conv.grad="stop")
expect_is(update(g0,.~.+factor(year), control=gc), "glmerMod")
## error/warning with year as numeric:
## don't have full knowledge of which platforms lead to which
## results, and can't detect whether we're running on valgrind,
## which changes the result on 32-bit linux ...
## SEGFAULT on MacOS? why?
if (FALSE) {
expect_that(update(g0,.~.+year),
gives_error_or_warning("(failed to converge|pwrssUpdate did not converge)"))
}
## ("(failed to converge|pwrssUpdate did not converge in)"))
## if (sessionInfo()$platform=="i686-pc-linux-gnu (32-bit)") {
## expect_warning(update(g0, .~. +year), "failed to converge")
## } else {
## ## MacOS x86_64-apple-darwin10.8.0 (64-bit)
## ## MM's platform
## ## "pwrssUpdate did not converge in (maxit) iterations"
## expect_error(update(g0, .~. +year), "pwrssUpdate did not converge in")
## }
## OK if we scale & center it
expect_is(update(g0,.~. + scale(year), control=gc), "glmerMod")
## not OK if we scale and don't center
expect_warning(update(g0,.~. + scale(year,center=FALSE)),
"failed to converge with max|grad|")
## OK if center and don't scale
expect_is(update(g0,.~. + scale(year,center=TRUE,scale=FALSE),
control=gc),
"glmerMod")
## try higher-order AGQ
expect_is(update(gm1,nAGQ=90),"glmerMod")
expect_error(update(gm1,nAGQ=101),"ord < 101L")
## non-numeric response variables
ss <- transform(sleepstudy,Reaction=as.character(Reaction))
expect_error(glmer(Reaction~(1|Days),family="poisson",data=ss),
"response must be numeric")
expect_error(glmer(Reaction~(1|Days),family="binomial",data=ss),
"response must be numeric or factor")
ss2 <- transform(ss,rr=rep(c(TRUE,FALSE),length.out=nrow(ss)))
## should work OK with logical too
expect_is(glmer(rr~(1|Days),family="binomial",data=ss2),"merMod")
## starting values with log(.) link -- thanks to Eric Weese @ Yale:
grp <- rep(letters[1:5], 20); set.seed(1); x <- rnorm(100)
expect_error(glmer(x ~ 1 + (1|grp), family=gaussian(link="log")),
"valid starting values")
})
|
library(shiny)
library(leaflet)
#devtools::install_github('rstudio/httpuv@wch-print-req')
#library(rsconnect)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Quick map"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
helpText("the CSV must have 'LONGDEC' and 'LATDEC' fields, and will also accept fields for COLLECTOR, NUMBER, and YEAR."),
fileInput("file1", NULL, multiple = FALSE, accept = (".csv"))
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
#plotOutput(outputId = "distPlot")
leaflet::leafletOutput("mymap", width = "100%", height = 600)
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# prepare the points
pointsInput <- eventReactive(input$file1, {
df <- read.csv(input$file1$datapath) #encoding = 'UTF-8')
})
# output map
output$mymap <- renderLeaflet({
#df <- mapInput()
#sptdwg = tdwg.dist = check.tdwg(input$powo)
#sptdwg = merge(TDWG_polys, tdwg.dist)
data = pointsInput()
leaflet(data = data) %>%
addCircleMarkers(lng = ~LONGDEC,
lat = ~LATDEC,
radius = 4,
color = "red",
popup = ~paste("Collector:", COLLECTOR, "<br>",
"Number:",NUMBER, "<br>",
"Collection year:", YEAR)) %>%
addProviderTiles(providers$Esri.WorldImagery,
options = providerTileOptions(noWrap = TRUE))
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | stevenpbachman/quickmap | R | false | false | 1,898 | r | library(shiny)
library(leaflet)
#devtools::install_github('rstudio/httpuv@wch-print-req')
#library(rsconnect)
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Quick map"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
helpText("the CSV must have 'LONGDEC' and 'LATDEC' fields, and will also accept fields for COLLECTOR, NUMBER, and YEAR."),
fileInput("file1", NULL, multiple = FALSE, accept = (".csv"))
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
#plotOutput(outputId = "distPlot")
leaflet::leafletOutput("mymap", width = "100%", height = 600)
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
# prepare the points
pointsInput <- eventReactive(input$file1, {
df <- read.csv(input$file1$datapath) #encoding = 'UTF-8')
})
# output map
output$mymap <- renderLeaflet({
#df <- mapInput()
#sptdwg = tdwg.dist = check.tdwg(input$powo)
#sptdwg = merge(TDWG_polys, tdwg.dist)
data = pointsInput()
leaflet(data = data) %>%
addCircleMarkers(lng = ~LONGDEC,
lat = ~LATDEC,
radius = 4,
color = "red",
popup = ~paste("Collector:", COLLECTOR, "<br>",
"Number:",NUMBER, "<br>",
"Collection year:", YEAR)) %>%
addProviderTiles(providers$Esri.WorldImagery,
options = providerTileOptions(noWrap = TRUE))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("The SCAR Composite Gazetteer of Antarctica is made available under a CC-BY license. If you use it, please cite it:\nComposite Gazetteer of Antarctica, Scientific Committee on Antarctic Research. GCMD Metadata (http://gcmd.nasa.gov/records/SCAR_Gazetteer.html)")
}
| /R/zzz.R | permissive | lbusett/antanym | R | false | false | 333 | r | .onAttach <- function(libname, pkgname) {
packageStartupMessage("The SCAR Composite Gazetteer of Antarctica is made available under a CC-BY license. If you use it, please cite it:\nComposite Gazetteer of Antarctica, Scientific Committee on Antarctic Research. GCMD Metadata (http://gcmd.nasa.gov/records/SCAR_Gazetteer.html)")
}
|
#' Characteristics of ID
#'
#' @description A compartmental model with several different compartments: Susceptibles (S), Infected and Pre-symptomatic (P), Infected and Asymptomatic (A), Infected and Symptomatic (I), Recovered and Immune (R) and Dead (D)
#'
#' @details The model tracks the dynamics of susceptible, presymptomatic, asymptomatic, symptomatic, recovered, and dead individuals. Susceptible (S) individuals can become infected by presymptomatic (P), asymptomatic (A), or infected (I) hosts. All infected individuals enter the presymptomatic stage first, from which they can become symptomatic or asymptomatic. Asymptomatic hosts recover within some specified duration of time, while infected hosts either recover or die, thus entering either R or D. Recovered individuals are immune to reinfection. This model is part of the DSAIDE R package, more information can be found there.
#'
#' This code was generated by the modelbuilder R package.
#' The model is implemented as a set of ordinary differential equations using the deSolve package.
#' The following R packages need to be loaded for the function to work: deSolve.
#'
#' @param S : starting value for Susceptible : numeric
#' @param P : starting value for Presymptomatic : numeric
#' @param A : starting value for Asymptomatic : numeric
#' @param I : starting value for Symptomatic : numeric
#' @param R : starting value for Recovered : numeric
#' @param D : starting value for Dead : numeric
#' @param bP : rate of transmission from P to S : numeric
#' @param bA : rate of transmission from A to S : numeric
#' @param bI : rate of transmission from I to S : numeric
#' @param gP : rate at which a person leaves the P compartment : numeric
#' @param gA : rate at which a person leaves the A compartment : numeric
#' @param gI : rate at which a person leaves the I compartment : numeric
#' @param f : fraction of asymptomatic infections : numeric
#' @param d : fraction of symptomatic hosts that die : numeric
#' @param tstart : Start time of simulation : numeric
#' @param tfinal : Final time of simulation : numeric
#' @param dt : Time step : numeric
#' @return The function returns the output as a list.
#' The time-series from the simulation is returned as a dataframe saved as list element \code{ts}.
#' The \code{ts} dataframe has one column per compartment/variable. The first column is time.
#' @examples
#' # To run the simulation with default parameters:
#' result <- simulate_Characteristics_of_ID_ode()
#' # To choose values other than the standard one, specify them like this:
#' result <- simulate_Characteristics_of_ID_ode(S = 2000,P = 2,A = 0,I = 0,R = 0,D = 0)
#' # You can display or further process the result, like this:
#' plot(result$ts[,'time'],result$ts[,'S'],xlab='Time',ylab='Numbers',type='l')
#' print(paste('Max number of S: ',max(result$ts[,'S'])))
#' @section Warning: This function does not perform any error checking. So if you try to do something nonsensical (e.g. have negative values for parameters), the code will likely abort with an error message.
#' @section Model Author: Andreas Handel, Alexis Vittengl
#' @section Model creation date: 2020-09-29
#' @section Code Author: generated by the \code{modelbuilder} R package
#' @section Code creation date: 2021-02-16
#' @export
simulate_Characteristics_of_ID_ode <- function(S = 1000, P = 1, A = 0, I = 0, R = 0, D = 0, bP = 0, bA = 0, bI = 0.001, gP = 0.1, gA = 0.1, gI = 0.1, f = 0, d = 0, tstart = 0, tfinal = 200, dt = 0.1)
{
##############################
#Block of ODE equations for deSolve
##############################
Characteristics_of_ID_ode_fct <- function(t, y, parms)
{
with( as.list(c(y,parms)), { #lets us access variables and parameters stored in y and parms by name
#StartODES
#Susceptible : Infection by presymptomatic : Infection by asymptomatic : Infection by symptomatic :
dS_mb = -bP*S*P -bA*S*A -bI*S*I
#Presymptomatic : Infection by presymptomatic : Infection by asymptomatic : Infection by symptomatic : Progression to asymtomatic stage : Progression to symptomatic stage :
dP_mb = +bP*S*P +bA*S*A +bI*S*I -f*gP*P -(1-f)*gP*P
#Asymptomatic : Progression to asymtomatic stage : Recovery of asymptomatic :
dA_mb = +f*gP*P -gA*A
#Symptomatic : Progression to symptomatic stage : Progression to death : Progression to recovery :
dI_mb = +(1-f)*gP*P -d*gI*I -(1-d)*gI*I
#Recovered : Recovery of asymptomatic : Recovery of symptomatic :
dR_mb = +gA*A +(1-d)*gI*I
#Dead : Death of Symptomatic :
dD_mb = +d*gI*I
#EndODES
list(c(dS_mb,dP_mb,dA_mb,dI_mb,dR_mb,dD_mb))
} ) } #close with statement, end ODE code block
##############################
#Main function code block
##############################
#Creating named vectors
varvec_mb = c(S = S, P = P, A = A, I = I, R = R, D = D)
parvec_mb = c(bP = bP, bA = bA, bI = bI, gP = gP, gA = gA, gI = gI, f = f, d = d)
timevec_mb = seq(tstart, tfinal,by = dt)
#Running the model
simout = deSolve::ode(y = varvec_mb, parms = parvec_mb, times = timevec_mb, func = Characteristics_of_ID_ode_fct, rtol = 1e-12, atol = 1e-12)
#Setting up empty list and returning result as data frame called ts
result <- list()
result$ts <- as.data.frame(simout)
return(result)
}
| /inst/simulatorfunctions/simulate_Characteristics_of_ID_ode.R | no_license | daileyco/DSAIDE | R | false | false | 5,281 | r | #' Characteristics of ID
#'
#' @description A compartmental model with several different compartments: Susceptibles (S), Infected and Pre-symptomatic (P), Infected and Asymptomatic (A), Infected and Symptomatic (I), Recovered and Immune (R) and Dead (D)
#'
#' @details The model tracks the dynamics of susceptible, presymptomatic, asymptomatic, symptomatic, recovered, and dead individuals. Susceptible (S) individuals can become infected by presymptomatic (P), asymptomatic (A), or infected (I) hosts. All infected individuals enter the presymptomatic stage first, from which they can become symptomatic or asymptomatic. Asymptomatic hosts recover within some specified duration of time, while infected hosts either recover or die, thus entering either R or D. Recovered individuals are immune to reinfection. This model is part of the DSAIDE R package, more information can be found there.
#'
#' This code was generated by the modelbuilder R package.
#' The model is implemented as a set of ordinary differential equations using the deSolve package.
#' The following R packages need to be loaded for the function to work: deSolve.
#'
#' @param S : starting value for Susceptible : numeric
#' @param P : starting value for Presymptomatic : numeric
#' @param A : starting value for Asymptomatic : numeric
#' @param I : starting value for Symptomatic : numeric
#' @param R : starting value for Recovered : numeric
#' @param D : starting value for Dead : numeric
#' @param bP : rate of transmission from P to S : numeric
#' @param bA : rate of transmission from A to S : numeric
#' @param bI : rate of transmission from I to S : numeric
#' @param gP : rate at which a person leaves the P compartment : numeric
#' @param gA : rate at which a person leaves the A compartment : numeric
#' @param gI : rate at which a person leaves the I compartment : numeric
#' @param f : fraction of asymptomatic infections : numeric
#' @param d : fraction of symptomatic hosts that die : numeric
#' @param tstart : Start time of simulation : numeric
#' @param tfinal : Final time of simulation : numeric
#' @param dt : Time step : numeric
#' @return The function returns the output as a list.
#' The time-series from the simulation is returned as a dataframe saved as list element \code{ts}.
#' The \code{ts} dataframe has one column per compartment/variable. The first column is time.
#' @examples
#' # To run the simulation with default parameters:
#' result <- simulate_Characteristics_of_ID_ode()
#' # To choose values other than the standard one, specify them like this:
#' result <- simulate_Characteristics_of_ID_ode(S = 2000,P = 2,A = 0,I = 0,R = 0,D = 0)
#' # You can display or further process the result, like this:
#' plot(result$ts[,'time'],result$ts[,'S'],xlab='Time',ylab='Numbers',type='l')
#' print(paste('Max number of S: ',max(result$ts[,'S'])))
#' @section Warning: This function does not perform any error checking. So if you try to do something nonsensical (e.g. have negative values for parameters), the code will likely abort with an error message.
#' @section Model Author: Andreas Handel, Alexis Vittengl
#' @section Model creation date: 2020-09-29
#' @section Code Author: generated by the \code{modelbuilder} R package
#' @section Code creation date: 2021-02-16
#' @export
simulate_Characteristics_of_ID_ode <- function(S = 1000, P = 1, A = 0, I = 0, R = 0, D = 0, bP = 0, bA = 0, bI = 0.001, gP = 0.1, gA = 0.1, gI = 0.1, f = 0, d = 0, tstart = 0, tfinal = 200, dt = 0.1)
{
##############################
#Block of ODE equations for deSolve
##############################
Characteristics_of_ID_ode_fct <- function(t, y, parms)
{
with( as.list(c(y,parms)), { #lets us access variables and parameters stored in y and parms by name
#StartODES
#Susceptible : Infection by presymptomatic : Infection by asymptomatic : Infection by symptomatic :
dS_mb = -bP*S*P -bA*S*A -bI*S*I
#Presymptomatic : Infection by presymptomatic : Infection by asymptomatic : Infection by symptomatic : Progression to asymtomatic stage : Progression to symptomatic stage :
dP_mb = +bP*S*P +bA*S*A +bI*S*I -f*gP*P -(1-f)*gP*P
#Asymptomatic : Progression to asymtomatic stage : Recovery of asymptomatic :
dA_mb = +f*gP*P -gA*A
#Symptomatic : Progression to symptomatic stage : Progression to death : Progression to recovery :
dI_mb = +(1-f)*gP*P -d*gI*I -(1-d)*gI*I
#Recovered : Recovery of asymptomatic : Recovery of symptomatic :
dR_mb = +gA*A +(1-d)*gI*I
#Dead : Death of Symptomatic :
dD_mb = +d*gI*I
#EndODES
list(c(dS_mb,dP_mb,dA_mb,dI_mb,dR_mb,dD_mb))
} ) } #close with statement, end ODE code block
##############################
#Main function code block
##############################
#Creating named vectors
varvec_mb = c(S = S, P = P, A = A, I = I, R = R, D = D)
parvec_mb = c(bP = bP, bA = bA, bI = bI, gP = gP, gA = gA, gI = gI, f = f, d = d)
timevec_mb = seq(tstart, tfinal,by = dt)
#Running the model
simout = deSolve::ode(y = varvec_mb, parms = parvec_mb, times = timevec_mb, func = Characteristics_of_ID_ode_fct, rtol = 1e-12, atol = 1e-12)
#Setting up empty list and returning result as data frame called ts
result <- list()
result$ts <- as.data.frame(simout)
return(result)
}
|
library(testthat)
test_check("unsound")
| /tests/test-all.R | no_license | hrbrmstr/unsound | R | false | false | 40 | r | library(testthat)
test_check("unsound")
|
#----------------------------------------------------------------------
# Purpose: Split Airlines dataset into train and validation sets.
# Build model and predict on a test Set.
# Print Confusion matrix and performance measures for test set
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
# setwd("/Users/tomk/0xdata/ws/h2o/R/tests/testdir_demos")
source('../findNSourceUtils.R')
options(echo=TRUE)
heading("BEGIN TEST")
conn <- new("H2OClient", ip=myIP, port=myPort)
#uploading data file to h2o
filePath <- "../../../smalldata/airlines/AirlinesTrain.csv.zip"
air = h2o.uploadFile(conn, filePath, "air")
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s = h2o.runif(air) # Useful when number of rows too large for R to handle
air.train = air[s <= 0.8,]
air.valid = air[s > 0.8,]
myX = c("Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth", "fDayOfWeek" )
myY="IsDepDelayed"
# DRF2
air.rf = h2o.randomForest(x = myX, y = myY, data = air.train, seed = 12, validation=air.valid, importance = T,
ntree = 10, depth = 20, balance.classes=F)
print(air.rf@model)
air.rf.balance = h2o.randomForest(x = myX, y = myY, data = air.train, seed = 12, validation=air.valid,
ntree = 10, depth = 20, balance.classes=T)
print(air.rf.balance@model)
# SpeeDRF
air.speedrf = h2o.SpeeDRF(x = myX, y = myY, data = air.train, seed = 12, validation = air.valid,
ntree = 10, depth = 20)
print(air.speedrf@model)
# SpeeDRF
air.speedrf.balance = h2o.SpeeDRF(x = myX, y = myY, data = air.train, seed = 12, validation = air.valid,
ntree = 10, depth = 20, balance.classes=T)
print(air.speedrf.balance@model)
#uploading test file to h2o
testFilePath <-"../../../smalldata/airlines/AirlinesTest.csv.zip"
air.test=h2o.uploadFile(conn,testFilePath,key="air.test")
func = function(model_object) {
#predicting on test file
pred = h2o.predict(model_object,air.test)
head(pred)
#Building confusion matrix for test set
CM=h2o.confusionMatrix(pred$predict,air.test$IsDepDelayed)
print(CM)
#Plot ROC for test set
perf = h2o.performance(pred$YES,air.test$IsDepDelayed )
print(perf)
perf@model$precision
perf@model$accuracy
perf@model$auc
plot(perf,type="roc")
}
cat("\n\nWITHOUT CLASS BALANCING\n")
func(air.rf)
cat("\n\nWITH CLASS BALANCING\n")
func(air.rf.balance)
cat("\n\nSPEEDRF WITHOUT CLASS WT\n")
func(air.speedrf)
cat("\n\nSPEEDRF WITH CLASS WT\n")
func(air.speedrf.balance)
PASS_BANNER()
| /R/tests/testdir_demos/runit_demo_rf_balance_classes.R | permissive | woobe/h2o | R | false | false | 2,789 | r | #----------------------------------------------------------------------
# Purpose: Split Airlines dataset into train and validation sets.
# Build model and predict on a test Set.
# Print Confusion matrix and performance measures for test set
#----------------------------------------------------------------------
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
# setwd("/Users/tomk/0xdata/ws/h2o/R/tests/testdir_demos")
source('../findNSourceUtils.R')
options(echo=TRUE)
heading("BEGIN TEST")
conn <- new("H2OClient", ip=myIP, port=myPort)
#uploading data file to h2o
filePath <- "../../../smalldata/airlines/AirlinesTrain.csv.zip"
air = h2o.uploadFile(conn, filePath, "air")
#Constructing validation and train sets by sampling (20/80)
#creating a column as tall as airlines(nrow(air))
s = h2o.runif(air) # Useful when number of rows too large for R to handle
air.train = air[s <= 0.8,]
air.valid = air[s > 0.8,]
myX = c("Origin", "Dest", "Distance", "UniqueCarrier", "fMonth", "fDayofMonth", "fDayOfWeek" )
myY="IsDepDelayed"
# DRF2
air.rf = h2o.randomForest(x = myX, y = myY, data = air.train, seed = 12, validation=air.valid, importance = T,
ntree = 10, depth = 20, balance.classes=F)
print(air.rf@model)
air.rf.balance = h2o.randomForest(x = myX, y = myY, data = air.train, seed = 12, validation=air.valid,
ntree = 10, depth = 20, balance.classes=T)
print(air.rf.balance@model)
# SpeeDRF
air.speedrf = h2o.SpeeDRF(x = myX, y = myY, data = air.train, seed = 12, validation = air.valid,
ntree = 10, depth = 20)
print(air.speedrf@model)
# SpeeDRF
air.speedrf.balance = h2o.SpeeDRF(x = myX, y = myY, data = air.train, seed = 12, validation = air.valid,
ntree = 10, depth = 20, balance.classes=T)
print(air.speedrf.balance@model)
#uploading test file to h2o
testFilePath <-"../../../smalldata/airlines/AirlinesTest.csv.zip"
air.test=h2o.uploadFile(conn,testFilePath,key="air.test")
func = function(model_object) {
#predicting on test file
pred = h2o.predict(model_object,air.test)
head(pred)
#Building confusion matrix for test set
CM=h2o.confusionMatrix(pred$predict,air.test$IsDepDelayed)
print(CM)
#Plot ROC for test set
perf = h2o.performance(pred$YES,air.test$IsDepDelayed )
print(perf)
perf@model$precision
perf@model$accuracy
perf@model$auc
plot(perf,type="roc")
}
cat("\n\nWITHOUT CLASS BALANCING\n")
func(air.rf)
cat("\n\nWITH CLASS BALANCING\n")
func(air.rf.balance)
cat("\n\nSPEEDRF WITHOUT CLASS WT\n")
func(air.speedrf)
cat("\n\nSPEEDRF WITH CLASS WT\n")
func(air.speedrf.balance)
PASS_BANNER()
|
#' TopDomData - Data for the TopDom Package
#'
#' The \pkg{TopDomData} package is an \R package that provides examples data
#' for the \pkg{TopDom} package. This data package is also used for
#' validation of implementation of the TopDom package via TopDom's package
#' tests.
#'
#' @section Origin:
#' The data herein contain a tiny subset of the HiC and TopDom data used
#' in the TopDom study (Shin et al., 2016).
#' More precisely, it contains:
#'
#' 1. A TopDom file \file{mESC_5w_chr10.nij.HindIII.comb.40kb.domain}, which
#' is part of the \file{mESC_5w_domain.zip} file
#' (54,676 bytes; md5 1ebfb9a556c1954b8cd35520a2499abd) from the
#' Supplementary Materials of Shin et al. (2016).
#' These data were downloaded from the
#' TopDom website (http://zhoulab.usc.edu/TopDom/ - now defunct).
#'
#' 2. A normalized HiC-count matrix file \file{nij.chr10.gz}, where the
#' non-compressed version is part of the \file{mESC.norm.tar.gz} file
#' (1,305,763,679 bytes; md5 2e79d0f57463b5b7c4bf86b187086d3c) originally
#' downloaded from the
#' [UCSD Ren Lab](http://chromosome.sdsc.edu/mouse/hi-c/download.html).
#' It is a tab-delimited file containing a 3250-by-3250 numeric matrix
#' non-negative decimal values. The underlying HiC sequence data is
#' available from
#' [GSE35156](http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE35156)
#' on GEO and was published part of Dixon, et al. (2012).
#'
#' @section License:
#' The license for these data is unknown.
#'
#' @references
#' 1. Dixon JR, Selvaraj S, Yue F, Kim A, et al. Topological domains in
#' mammalian genomes identified by analysis of chromatin interactions.
#' Nature 2012 Apr 11; 485(7398):376-80,
#' doi: 10.1038/nature11082,
#' PMCID: [PMC3356448](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3356448/),
#' PMID: 22495300.
#'
#' 2. Shin, et al., TopDom: an efficient and deterministic method for
#' identifying topological domains in genomes,
#' Nucleic Acids Res. 2016 Apr 20; 44(7): e70., 2016.
#' doi: 10.1093/nar/gkv1505,
#' PMCID: [PMC4838359](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4838359/),
#' PMID: 26704975.
#'
#' @name TopDomData-package
#' @aliases TopDomData
#' @docType package
NULL
| /R/package.R | no_license | HenrikBengtsson/TopDomData | R | false | false | 2,213 | r | #' TopDomData - Data for the TopDom Package
#'
#' The \pkg{TopDomData} package is an \R package that provides examples data
#' for the \pkg{TopDom} package. This data package is also used for
#' validation of implementation of the TopDom package via TopDom's package
#' tests.
#'
#' @section Origin:
#' The data herein contain a tiny subset of the HiC and TopDom data used
#' in the TopDom study (Shin et al., 2016).
#' More precisely, it contains:
#'
#' 1. A TopDom file \file{mESC_5w_chr10.nij.HindIII.comb.40kb.domain}, which
#' is part of the \file{mESC_5w_domain.zip} file
#' (54,676 bytes; md5 1ebfb9a556c1954b8cd35520a2499abd) from the
#' Supplementary Materials of Shin et al. (2016).
#' These data were downloaded from the
#' TopDom website (http://zhoulab.usc.edu/TopDom/ - now defunct).
#'
#' 2. A normalized HiC-count matrix file \file{nij.chr10.gz}, where the
#' non-compressed version is part of the \file{mESC.norm.tar.gz} file
#' (1,305,763,679 bytes; md5 2e79d0f57463b5b7c4bf86b187086d3c) originally
#' downloaded from the
#' [UCSD Ren Lab](http://chromosome.sdsc.edu/mouse/hi-c/download.html).
#' It is a tab-delimited file containing a 3250-by-3250 numeric matrix
#' non-negative decimal values. The underlying HiC sequence data is
#' available from
#' [GSE35156](http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE35156)
#' on GEO and was published part of Dixon, et al. (2012).
#'
#' @section License:
#' The license for these data is unknown.
#'
#' @references
#' 1. Dixon JR, Selvaraj S, Yue F, Kim A, et al. Topological domains in
#' mammalian genomes identified by analysis of chromatin interactions.
#' Nature 2012 Apr 11; 485(7398):376-80,
#' doi: 10.1038/nature11082,
#' PMCID: [PMC3356448](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3356448/),
#' PMID: 22495300.
#'
#' 2. Shin, et al., TopDom: an efficient and deterministic method for
#' identifying topological domains in genomes,
#' Nucleic Acids Res. 2016 Apr 20; 44(7): e70., 2016.
#' doi: 10.1093/nar/gkv1505,
#' PMCID: [PMC4838359](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4838359/),
#' PMID: 26704975.
#'
#' @name TopDomData-package
#' @aliases TopDomData
#' @docType package
NULL
|
############################################################
#' This script will be used for summarizing reported each
#' joint counts for Psorcast users
#'
#' Note: it will exclude finger joints and only use
#' main joint location (ankle, hip, elbow, wrist etc.)
#'
#' @author: aryton.tediarjo@sagebase.org
#' @maintainer: aryton.tediarjo@sagebase.org
############################################################
rm(list=ls())
gc()
# import libraries
library(synapser)
library(data.table)
library(tidyverse)
library(githubr)
source("utils/feature_extraction_utils.R")
source('utils/processing_log_utils.R')
synLogin()
############################
# Global Vars
############################
PARENT_SYN_ID <- "syn22336715"
ERROR_LOG_SYN_ID <- "syn25832341"
VISIT_REF <- "syn25825626"
PPACMAN_TBL_ID <- "syn22337133"
VISIT_REF_ID <- "syn25825626"
FILE_COLUMNS <- "summary.json"
JOINT_LOCATION <- c("knee", "hip", "ankle",
"wrist", "elbow", "shoulder")
OUTPUT_FILE <- "joint_counts_comparison.tsv"
JOINT_TBL_REF <- list(
dig_jc = list(
prefix = "dig_jc",
syn_id = "syn22281786",
output_filename = "dig_jc_features.tsv"),
gs_jc = list(
prefix = "gs_jc",
syn_id = "syn22281781",
output_filename = "gs_jc_features.tsv"
),
gs_swell = list(
prefix = "gs_swell",
syn_id = "syn22281780",
output_filename = "gs_swell_features.tsv"
)
)
############################
# Git Reference
############################
SCRIPT_PATH <- file.path('feature_extraction', "jointSummaries_features.R")
GIT_TOKEN_PATH <- config::get("git")$token_path
GIT_REPO <- config::get("git")$repo
githubr::setGithubToken(readLines(GIT_TOKEN_PATH))
GIT_URL <- getPermlink(
repository = getRepo(
repository = GIT_REPO,
ref="branch",
refName='main'),
repositoryPath = SCRIPT_PATH)
#' function to group each identifiers
#' @data dataframe of flattened summary.json
calculate_reported_counts <- function(data){
data %>%
dplyr::filter(is.na(error)) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise(counts = sum(isSelected, na.rm = T)) %>%
dplyr::arrange(desc(counts))
}
#' function to get string colums of reported joint count
#' @data dataframe of flattened summary.json
create_joint_string_list <- function(data){
data %>%
dplyr::filter(is.na(error) & isSelected == TRUE) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise(joint_list = paste0(identifier, collapse = ","))
}
#' function to parse pain status symmetry
#' @data dataframe of flattened summary.json
parse_joint_pain_symmetry <- function(data){
detected_joints <- data %>%
drop_na(identifier_group) %>%
.$identifier_group %>%
unique()
purrr::map(detected_joints , function(joint_identifier){
output_cols <- glue::glue("status_{joint_identifier}")
left_side_cols <- glue::glue("left_{joint_identifier}")
right_side_cols <- glue::glue("right_{joint_identifier}")
data %>%
dplyr::filter(is.na(identifier) |
str_detect(identifier, joint_identifier)) %>%
pivot_wider(names_from = identifier,
values_from = isSelected) %>%
dplyr::mutate(!!sym(output_cols) :=
case_when((!!sym(right_side_cols) == TRUE &
!!sym(left_side_cols) == TRUE) ~ "both",
(!!sym(right_side_cols) == TRUE &
!!sym(left_side_cols) == FALSE) ~ "right",
(!!sym(right_side_cols) == FALSE &
!!sym(left_side_cols) == TRUE) ~ "left",
TRUE ~ NA_character_)) %>%
dplyr::select(recordId, !!sym(output_cols))}) %>%
purrr::reduce(dplyr::full_join, by = c("recordId"))
}
#' function to get joint report based on synapseID
#' @syn_id: synapse id of the joint report tables
get_joint_report <- function(syn_id){
get_table(syn_id, file_columns = FILE_COLUMNS) %>%
flatten_joint_summary() %>%
dplyr::group_by(recordId,
createdOn,
participantId,
identifier) %>%
dplyr::summarise_all(last) %>%
dplyr::mutate(
identifier_group =
str_extract(identifier,
str_c(JOINT_LOCATION,
collapse = "|")))
}
main <- function(){
#' - Go through each synapse id
#' - For each table flatten all the summary.json files
#' - Calculate metrics:
#' a. reported joint counts (group-by of record and identifier)
#' b. parse into string for all major joints
#' c. parse symmetrical pain status
#' get visit reference and curated ppacman table
visit_ref <- synGet(VISIT_REF_ID)$path %>% fread()
ppacman <- synGet(PPACMAN_TBL_ID)$path %>% fread()
joint_summaries <- purrr::map(names(JOINT_TBL_REF), function(activity){
#' retrieve data
prefix <- JOINT_TBL_REF[[activity]]$prefix
output_filename <- JOINT_TBL_REF[[activity]]$output_filename
tbl_id <- JOINT_TBL_REF[[activity]]$syn_id
joint_report <- get_joint_report(tbl_id) %>% dplyr::ungroup()
#' get each metrics
metrics <-
list(
joint_count = calculate_reported_counts(joint_report),
joint_str_list = create_joint_string_list(joint_report),
joint_pain_status = parse_joint_pain_symmetry(joint_report)) %>%
purrr::reduce(dplyr::full_join,
by = c("recordId")) %>%
dplyr::mutate(counts = ifelse(is.na(counts), 0, counts)) %>%
dplyr::rename_with(~paste0(prefix, "_", .),
-c("recordId"))
#' clean data
counts_columns <- paste0(prefix, "_", "counts")
joint_data <- joint_report %>%
distinct(recordId, participantId, createdOn) %>%
dplyr::left_join(metrics, by = c("recordId")) %>%
dplyr::arrange(desc(createdOn)) %>%
dplyr::mutate(error = ifelse(is.na(!!sym(counts_columns)),
"error: empty list in summary.json",
NA_character_)) %>%
dplyr::filter(is.na(error)) %>%
join_with_ppacman(
visit_ref_tbl = visit_ref,
ppacman_tbl = ppacman) %>%
dplyr::select(recordId,
participantId,
createdOn,
visit_num,
starts_with(activity)) %>%
dplyr::group_by(participantId, visit_num) %>%
dplyr::summarise_all(last) %>%
dplyr::mutate(createdOn = as.character(createdOn))
#' get error logging for removed records
error_log <- joint_report %>%
dplyr::filter(!recordId %in% unique(joint_data$recordId)) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise_all(last) %>%
dplyr::select(recordId, error) %>%
dplyr::mutate(error = ifelse(
is.na(error),
"removed from ppacman joining",
error))
#' save joint features to synapse
save_to_synapse(data = joint_data,
output_filename = output_filename,
parent = PARENT_SYN_ID,
name = "get joint summaries",
executed = GIT_URL,
used = tbl_id)
#' save error log to synapse
save_to_synapse(data = error_log,
output_filename = glue::glue("error_log_", output_filename),
parent = ERROR_LOG_SYN_ID,
name = "get error log for joint summaries",
executed = GIT_URL,
used = tbl_id)
})
}
log_process(main(), SCRIPT_PATH)
| /feature_extraction/jointSummaries_features.R | permissive | itismeghasyam/psorcast-validation-analysis | R | false | false | 8,301 | r | ############################################################
#' This script will be used for summarizing reported each
#' joint counts for Psorcast users
#'
#' Note: it will exclude finger joints and only use
#' main joint location (ankle, hip, elbow, wrist etc.)
#'
#' @author: aryton.tediarjo@sagebase.org
#' @maintainer: aryton.tediarjo@sagebase.org
############################################################
rm(list=ls())
gc()
# import libraries
library(synapser)
library(data.table)
library(tidyverse)
library(githubr)
source("utils/feature_extraction_utils.R")
source('utils/processing_log_utils.R')
synLogin()
############################
# Global Vars
############################
PARENT_SYN_ID <- "syn22336715"
ERROR_LOG_SYN_ID <- "syn25832341"
VISIT_REF <- "syn25825626"
PPACMAN_TBL_ID <- "syn22337133"
VISIT_REF_ID <- "syn25825626"
FILE_COLUMNS <- "summary.json"
JOINT_LOCATION <- c("knee", "hip", "ankle",
"wrist", "elbow", "shoulder")
OUTPUT_FILE <- "joint_counts_comparison.tsv"
JOINT_TBL_REF <- list(
dig_jc = list(
prefix = "dig_jc",
syn_id = "syn22281786",
output_filename = "dig_jc_features.tsv"),
gs_jc = list(
prefix = "gs_jc",
syn_id = "syn22281781",
output_filename = "gs_jc_features.tsv"
),
gs_swell = list(
prefix = "gs_swell",
syn_id = "syn22281780",
output_filename = "gs_swell_features.tsv"
)
)
############################
# Git Reference
############################
SCRIPT_PATH <- file.path('feature_extraction', "jointSummaries_features.R")
GIT_TOKEN_PATH <- config::get("git")$token_path
GIT_REPO <- config::get("git")$repo
githubr::setGithubToken(readLines(GIT_TOKEN_PATH))
GIT_URL <- getPermlink(
repository = getRepo(
repository = GIT_REPO,
ref="branch",
refName='main'),
repositoryPath = SCRIPT_PATH)
#' function to group each identifiers
#' @data dataframe of flattened summary.json
calculate_reported_counts <- function(data){
data %>%
dplyr::filter(is.na(error)) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise(counts = sum(isSelected, na.rm = T)) %>%
dplyr::arrange(desc(counts))
}
#' function to get string colums of reported joint count
#' @data dataframe of flattened summary.json
create_joint_string_list <- function(data){
data %>%
dplyr::filter(is.na(error) & isSelected == TRUE) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise(joint_list = paste0(identifier, collapse = ","))
}
#' function to parse pain status symmetry
#' @data dataframe of flattened summary.json
parse_joint_pain_symmetry <- function(data){
detected_joints <- data %>%
drop_na(identifier_group) %>%
.$identifier_group %>%
unique()
purrr::map(detected_joints , function(joint_identifier){
output_cols <- glue::glue("status_{joint_identifier}")
left_side_cols <- glue::glue("left_{joint_identifier}")
right_side_cols <- glue::glue("right_{joint_identifier}")
data %>%
dplyr::filter(is.na(identifier) |
str_detect(identifier, joint_identifier)) %>%
pivot_wider(names_from = identifier,
values_from = isSelected) %>%
dplyr::mutate(!!sym(output_cols) :=
case_when((!!sym(right_side_cols) == TRUE &
!!sym(left_side_cols) == TRUE) ~ "both",
(!!sym(right_side_cols) == TRUE &
!!sym(left_side_cols) == FALSE) ~ "right",
(!!sym(right_side_cols) == FALSE &
!!sym(left_side_cols) == TRUE) ~ "left",
TRUE ~ NA_character_)) %>%
dplyr::select(recordId, !!sym(output_cols))}) %>%
purrr::reduce(dplyr::full_join, by = c("recordId"))
}
#' function to get joint report based on synapseID
#' @syn_id: synapse id of the joint report tables
get_joint_report <- function(syn_id){
get_table(syn_id, file_columns = FILE_COLUMNS) %>%
flatten_joint_summary() %>%
dplyr::group_by(recordId,
createdOn,
participantId,
identifier) %>%
dplyr::summarise_all(last) %>%
dplyr::mutate(
identifier_group =
str_extract(identifier,
str_c(JOINT_LOCATION,
collapse = "|")))
}
main <- function(){
#' - Go through each synapse id
#' - For each table flatten all the summary.json files
#' - Calculate metrics:
#' a. reported joint counts (group-by of record and identifier)
#' b. parse into string for all major joints
#' c. parse symmetrical pain status
#' get visit reference and curated ppacman table
visit_ref <- synGet(VISIT_REF_ID)$path %>% fread()
ppacman <- synGet(PPACMAN_TBL_ID)$path %>% fread()
joint_summaries <- purrr::map(names(JOINT_TBL_REF), function(activity){
#' retrieve data
prefix <- JOINT_TBL_REF[[activity]]$prefix
output_filename <- JOINT_TBL_REF[[activity]]$output_filename
tbl_id <- JOINT_TBL_REF[[activity]]$syn_id
joint_report <- get_joint_report(tbl_id) %>% dplyr::ungroup()
#' get each metrics
metrics <-
list(
joint_count = calculate_reported_counts(joint_report),
joint_str_list = create_joint_string_list(joint_report),
joint_pain_status = parse_joint_pain_symmetry(joint_report)) %>%
purrr::reduce(dplyr::full_join,
by = c("recordId")) %>%
dplyr::mutate(counts = ifelse(is.na(counts), 0, counts)) %>%
dplyr::rename_with(~paste0(prefix, "_", .),
-c("recordId"))
#' clean data
counts_columns <- paste0(prefix, "_", "counts")
joint_data <- joint_report %>%
distinct(recordId, participantId, createdOn) %>%
dplyr::left_join(metrics, by = c("recordId")) %>%
dplyr::arrange(desc(createdOn)) %>%
dplyr::mutate(error = ifelse(is.na(!!sym(counts_columns)),
"error: empty list in summary.json",
NA_character_)) %>%
dplyr::filter(is.na(error)) %>%
join_with_ppacman(
visit_ref_tbl = visit_ref,
ppacman_tbl = ppacman) %>%
dplyr::select(recordId,
participantId,
createdOn,
visit_num,
starts_with(activity)) %>%
dplyr::group_by(participantId, visit_num) %>%
dplyr::summarise_all(last) %>%
dplyr::mutate(createdOn = as.character(createdOn))
#' get error logging for removed records
error_log <- joint_report %>%
dplyr::filter(!recordId %in% unique(joint_data$recordId)) %>%
dplyr::group_by(recordId) %>%
dplyr::summarise_all(last) %>%
dplyr::select(recordId, error) %>%
dplyr::mutate(error = ifelse(
is.na(error),
"removed from ppacman joining",
error))
#' save joint features to synapse
save_to_synapse(data = joint_data,
output_filename = output_filename,
parent = PARENT_SYN_ID,
name = "get joint summaries",
executed = GIT_URL,
used = tbl_id)
#' save error log to synapse
save_to_synapse(data = error_log,
output_filename = glue::glue("error_log_", output_filename),
parent = ERROR_LOG_SYN_ID,
name = "get error log for joint summaries",
executed = GIT_URL,
used = tbl_id)
})
}
log_process(main(), SCRIPT_PATH)
|
library(mlbench)
data(Soybean)
str(Soybean)
#PREDICTORS--------------------
#Frequency Distribution
freq_dist <- apply(Soybean[,-1], 2, function(x) table(x))
freq_dist
#Visual Freq Distribution
for (i in names(Soybean[,-1]))
{
barplot(table(Soybean$i), main = i, ylim = c(0,700), axes = FALSE)
axis(side = 2, at = seq(0, 700, 75))
}
#Missing Data--------------
percent_miss <- function(x) {(sum(is.na(x))/length(x))*100}
rowmiss <- apply(Soybean, 1, percent_miss)
colmiss <- apply(Soybean, 2, percent_miss)
missing <- subset(Soybean, rowmiss > 0)
#explore observations with missing values
Soybean$missing <- ifelse(rownames(Soybean) %in% rownames(missing), "NAs", "No NAs")
table(Soybean$missing)
missing_data <- Soybean[Soybean$missing == 'NAs',]
colmiss <- apply(missing_data, 2, percent_miss)
colmiss <- data.frame(variables = names(colmiss), missing_percentage = colmiss, row.names = NULL)
colmiss[colmiss$missing_percentage > 80,]
| /Predictive_Modeling/3_DataPreprocessing/3_ example_PredictorExploratoryAnalysisNums.R | no_license | LataniaReece/R_Scripts | R | false | false | 950 | r | library(mlbench)
data(Soybean)
str(Soybean)
#PREDICTORS--------------------
#Frequency Distribution
freq_dist <- apply(Soybean[,-1], 2, function(x) table(x))
freq_dist
#Visual Freq Distribution
for (i in names(Soybean[,-1]))
{
barplot(table(Soybean$i), main = i, ylim = c(0,700), axes = FALSE)
axis(side = 2, at = seq(0, 700, 75))
}
#Missing Data--------------
percent_miss <- function(x) {(sum(is.na(x))/length(x))*100}
rowmiss <- apply(Soybean, 1, percent_miss)
colmiss <- apply(Soybean, 2, percent_miss)
missing <- subset(Soybean, rowmiss > 0)
#explore observations with missing values
Soybean$missing <- ifelse(rownames(Soybean) %in% rownames(missing), "NAs", "No NAs")
table(Soybean$missing)
missing_data <- Soybean[Soybean$missing == 'NAs',]
colmiss <- apply(missing_data, 2, percent_miss)
colmiss <- data.frame(variables = names(colmiss), missing_percentage = colmiss, row.names = NULL)
colmiss[colmiss$missing_percentage > 80,]
|
\name{SymmetryTest}
\alias{symmetry_test}
\alias{symmetry_test.formula}
\alias{symmetry_test.table}
\alias{symmetry_test.SymmetryProblem}
\title{General Symmetry Test}
\description{
Testing the symmetry of set of repeated measurements variables measured on
arbitrary scales in a complete block design.
}
% NOTE: the markup in the following section is necessary for correct rendering
\usage{
\method{symmetry_test}{formula}(formula, data, subset = NULL, weights = NULL, \dots)
\method{symmetry_test}{table}(object, \dots)
\method{symmetry_test}{SymmetryProblem}(object, teststat = c("maximum", "quadratic", "scalar"),
distribution = c("asymptotic", "approximate",
"exact", "none"),
alternative = c("two.sided", "less", "greater"),
xtrafo = trafo, ytrafo = trafo, scores = NULL,
check = NULL, paired = FALSE, \dots)
}
\arguments{
\item{formula}{
a formula of the form \code{y1 + ... + yq ~ x | block} where \code{y1},
\dots, \code{yq} are measured on arbitrary scales (nominal, ordinal or
continuous with or without censoring), \code{x} is a factor and \code{block}
is an optional factor (which is generated automatically if omitted).
}
\item{data}{
an optional data frame containing the variables in the model formula.
}
\item{subset}{
an optional vector specifying a subset of observations to be used. Defaults
to \code{NULL}.
}
\item{weights}{
an optional formula of the form \code{~ w} defining integer valued case
weights for each observation. Defaults to \code{NULL}, implying equal
weight for all observations.
}
\item{object}{
an object inheriting from classes \code{"table"} (with identical
\code{dimnames} components) or \code{"\linkS4class{SymmetryProblem}"}.
}
\item{teststat}{
a character, the type of test statistic to be applied: either a maximum
statistic (\code{"maximum"}, default), a quadratic form (\code{"quadratic"})
or a standardized scalar test statistic (\code{"scalar"}).
}
\item{distribution}{
a character, the conditional null distribution of the test statistic can be
approximated by its asymptotic distribution (\code{"asymptotic"}, default)
or via Monte Carlo resampling (\code{"approximate"}). Alternatively, the
functions \code{\link{asymptotic}} or \code{\link{approximate}} can be used.
For univariate two-sample problems, \code{"exact"} or use of the function
\code{\link{exact}} computes the exact distribution. Computation of the
null distribution can be suppressed by specifying \code{"none"}. It is also
possible to specify a function with one argument (an object inheriting from
\code{"\linkS4class{IndependenceTestStatistic}"}) that returns an object of
class \code{"\linkS4class{NullDistribution}"}.
}
\item{alternative}{
a character, the alternative hypothesis: either \code{"two.sided"}
(default), \code{"greater"} or \code{"less"}.
}
\item{xtrafo}{
a function of transformations to be applied to the factor \code{x} supplied
in \code{formula}; see \sQuote{Details}. Defaults to \code{\link{trafo}}.
}
\item{ytrafo}{
a function of transformations to be applied to the variables \code{y1},
\dots, \code{yq} supplied in \code{formula}; see \sQuote{Details}. Defaults
to \code{\link{trafo}}.
}
\item{scores}{
a named list of scores to be attached to ordered factors; see
\sQuote{Details}. Defaults to \code{NULL}, implying equally spaced scores.
}
\item{check}{
a function to be applied to objects of class
\code{"\linkS4class{IndependenceTest}"} in order to check for specific
properties of the data. Defaults to \code{NULL}.
}
\item{paired}{
a logical, indicating that paired data have been transformed in such a way
that the (unstandardized) linear statistic is the sum of the absolute values
of the positive differences between the paired observations. Defaults to
\code{FALSE}.
}
\item{\dots}{
further arguments to be passed to or from other methods (currently ignored).
}
}
\details{
\code{symmetry_test} provides a general symmetry test for a set of variables
measured on arbitrary scales. This function is based on the general framework
for conditional inference procedures proposed by Strasser and Weber (1999).
The salient parts of the Strasser-Weber framework are elucidated by Hothorn
\emph{et al.} (2006) and a thorough description of the software implementation
is given by Hothorn \emph{et al.} (2008).
The null hypothesis of symmetry is tested. The response variables and the
measurement conditions are given by \code{y1}, \dots, \code{yq} and \code{x},
respectively, and \code{block} is a factor where each level corresponds to
exactly one subject with repeated measurements.
A vector of case weights, e.g., observation counts, can be supplied through
the \code{weights} argument and the type of test statistic is specified by
the \code{teststat} argument. Influence and regression functions, i.e.,
transformations of \code{y1}, \dots, \code{yq} and \code{x}, are specified by
the \code{ytrafo} and \code{xtrafo} arguments respectively; see
\code{\link{trafo}} for the collection of transformation functions currently
available. This allows for implementation of both novel and familiar test
statistics, e.g., the McNemar test, the Cochran \eqn{Q} test, the Wilcoxon
signed-rank test and the Friedman test. Furthermore, multivariate extensions
such as the multivariate Friedman test (Gerig, 1969; Puri and Sen, 1971) can
be implemented without much effort (see \sQuote{Examples}).
If, say, \code{y1} and/or \code{x} are ordered factors, the default scores,
\code{1:nlevels(y1)} and \code{1:nlevels(x)} respectively, can be altered
using the \code{scores} argument; this argument can also be used to coerce
nominal factors to class \code{"ordered"}. For example, when \code{y1} is an
ordered factor with four levels and \code{x} is a nominal factor with three
levels, \code{scores = list(y1 = c(1, 3:5), x = c(1:2, 4))} supplies the
scores to be used. For ordered alternatives the scores must be monotonic, but
non-montonic scores are also allowed for testing against, e.g., umbrella
alternatives. The length of the score vector must be equal to the number of
factor levels.
The conditional null distribution of the test statistic is used to obtain
\eqn{p}-values and an asymptotic approximation of the exact distribution is
used by default (\code{distribution = "asymptotic"}). Alternatively, the
distribution can be approximated via Monte Carlo resampling or computed
exactly for univariate two-sample problems by setting \code{distribution} to
\code{"approximate"} or \code{"exact"} respectively. See
\code{\link{asymptotic}}, \code{\link{approximate}} and \code{\link{exact}}
for details.
}
\value{
An object inheriting from class \code{"\linkS4class{IndependenceTest}"}.
}
\note{
Starting with \pkg{coin} version 1.1-0, maximum statistics and quadratic forms
can no longer be specified using \code{teststat = "maxtype"} and
\code{teststat = "quadtype"} respectively (as was used in versions prior to
0.4-5).
}
\references{
Gerig, T. (1969). A multivariate extension of Friedman's
\eqn{\chi^2_r}{chi^2_r} test. \emph{Journal of the American Statistical
Association} \bold{64}(328), 1595--1608.
Hothorn, T., Hornik, K., van de Wiel, M. A. and Zeileis, A. (2006). A Lego
system for conditional inference. \emph{The American Statistician}
\bold{60}(3), 257--263.
Hothorn, T., Hornik, K., van de Wiel, M. A. and Zeileis, A. (2008).
Implementing a class of permutation tests: the coin package. \emph{Journal of
Statistical Software} \bold{28}(8), 1--23.
\url{http://www.jstatsoft.org/v28/i08/}
Puri, M. L. and Sen, P. K. (1971). \emph{Nonparametric Methods in
Multivariate Analysis}. New York: John Wiley & Sons.
Strasser, H. and Weber, C. (1999). On the asymptotic theory of permutation
statistics. \emph{Mathematical Methods of Statistics} \bold{8}(2), 220--250.
}
\examples{
## One-sided exact Fisher-Pitman test for paired observations
y1 <- c(1.83, 0.50, 1.62, 2.48, 1.68, 1.88, 1.55, 3.06, 1.30)
y2 <- c(0.878, 0.647, 0.598, 2.05, 1.06, 1.29, 1.06, 3.14, 1.29)
dta <- data.frame(
y = c(y1, y2),
x = gl(2, length(y1)),
block = factor(rep(seq_along(y1), 2))
)
symmetry_test(y ~ x | block, data = dta,
distribution = "exact", alternative = "greater")
## Alternatively: transform data and set 'paired = TRUE'
delta <- y1 - y2
y <- as.vector(rbind(abs(delta) * (delta >= 0), abs(delta) * (delta < 0)))
x <- factor(rep(0:1, length(delta)), labels = c("pos", "neg"))
block <- gl(length(delta), 2)
symmetry_test(y ~ x | block,
distribution = "exact", alternative = "greater",
paired = TRUE)
### Example data
### Gerig (1969, p. 1597)
gerig <- data.frame(
y1 = c( 0.547, 1.811, 2.561,
1.706, 2.509, 1.414,
-0.288, 2.524, 3.310,
1.417, 0.703, 0.961,
0.878, 0.094, 1.682,
-0.680, 2.077, 3.181,
0.056, 0.542, 2.983,
0.711, 0.269, 1.662,
-1.335, 1.545, 2.920,
1.635, 0.200, 2.065),
y2 = c(-0.575, 1.840, 2.399,
1.252, 1.574, 3.059,
-0.310, 1.553, 0.560,
0.932, 1.390, 3.083,
0.819, 0.045, 3.348,
0.497, 1.747, 1.355,
-0.285, 0.760, 2.332,
0.089, 1.076, 0.960,
-0.349, 1.471, 4.121,
0.845, 1.480, 3.391),
x = factor(rep(1:3, 10)),
b = factor(rep(1:10, each = 3))
)
### Asymptotic multivariate Friedman test
### Gerig (1969, p. 1599)
symmetry_test(y1 + y2 ~ x | b, data = gerig, teststat = "quadratic",
ytrafo = function(data)
trafo(data, numeric_trafo = rank_trafo,
block = gerig$b)) # L_n = 17.238
### Asymptotic multivariate Page test
(st <- symmetry_test(y1 + y2 ~ x | b, data = gerig,
ytrafo = function(data)
trafo(data, numeric_trafo = rank_trafo,
block = gerig$b),
scores = list(x = 1:3)))
pvalue(st, method = "step-down")
}
\keyword{htest}
| /coin/man/SymmetryTest.Rd | no_license | ggazzola/DBB-CPI-ForMSPs | R | false | false | 10,417 | rd | \name{SymmetryTest}
\alias{symmetry_test}
\alias{symmetry_test.formula}
\alias{symmetry_test.table}
\alias{symmetry_test.SymmetryProblem}
\title{General Symmetry Test}
\description{
Testing the symmetry of set of repeated measurements variables measured on
arbitrary scales in a complete block design.
}
% NOTE: the markup in the following section is necessary for correct rendering
\usage{
\method{symmetry_test}{formula}(formula, data, subset = NULL, weights = NULL, \dots)
\method{symmetry_test}{table}(object, \dots)
\method{symmetry_test}{SymmetryProblem}(object, teststat = c("maximum", "quadratic", "scalar"),
distribution = c("asymptotic", "approximate",
"exact", "none"),
alternative = c("two.sided", "less", "greater"),
xtrafo = trafo, ytrafo = trafo, scores = NULL,
check = NULL, paired = FALSE, \dots)
}
\arguments{
\item{formula}{
a formula of the form \code{y1 + ... + yq ~ x | block} where \code{y1},
\dots, \code{yq} are measured on arbitrary scales (nominal, ordinal or
continuous with or without censoring), \code{x} is a factor and \code{block}
is an optional factor (which is generated automatically if omitted).
}
\item{data}{
an optional data frame containing the variables in the model formula.
}
\item{subset}{
an optional vector specifying a subset of observations to be used. Defaults
to \code{NULL}.
}
\item{weights}{
an optional formula of the form \code{~ w} defining integer valued case
weights for each observation. Defaults to \code{NULL}, implying equal
weight for all observations.
}
\item{object}{
an object inheriting from classes \code{"table"} (with identical
\code{dimnames} components) or \code{"\linkS4class{SymmetryProblem}"}.
}
\item{teststat}{
a character, the type of test statistic to be applied: either a maximum
statistic (\code{"maximum"}, default), a quadratic form (\code{"quadratic"})
or a standardized scalar test statistic (\code{"scalar"}).
}
\item{distribution}{
a character, the conditional null distribution of the test statistic can be
approximated by its asymptotic distribution (\code{"asymptotic"}, default)
or via Monte Carlo resampling (\code{"approximate"}). Alternatively, the
functions \code{\link{asymptotic}} or \code{\link{approximate}} can be used.
For univariate two-sample problems, \code{"exact"} or use of the function
\code{\link{exact}} computes the exact distribution. Computation of the
null distribution can be suppressed by specifying \code{"none"}. It is also
possible to specify a function with one argument (an object inheriting from
\code{"\linkS4class{IndependenceTestStatistic}"}) that returns an object of
class \code{"\linkS4class{NullDistribution}"}.
}
\item{alternative}{
a character, the alternative hypothesis: either \code{"two.sided"}
(default), \code{"greater"} or \code{"less"}.
}
\item{xtrafo}{
a function of transformations to be applied to the factor \code{x} supplied
in \code{formula}; see \sQuote{Details}. Defaults to \code{\link{trafo}}.
}
\item{ytrafo}{
a function of transformations to be applied to the variables \code{y1},
\dots, \code{yq} supplied in \code{formula}; see \sQuote{Details}. Defaults
to \code{\link{trafo}}.
}
\item{scores}{
a named list of scores to be attached to ordered factors; see
\sQuote{Details}. Defaults to \code{NULL}, implying equally spaced scores.
}
\item{check}{
a function to be applied to objects of class
\code{"\linkS4class{IndependenceTest}"} in order to check for specific
properties of the data. Defaults to \code{NULL}.
}
\item{paired}{
a logical, indicating that paired data have been transformed in such a way
that the (unstandardized) linear statistic is the sum of the absolute values
of the positive differences between the paired observations. Defaults to
\code{FALSE}.
}
\item{\dots}{
further arguments to be passed to or from other methods (currently ignored).
}
}
\details{
\code{symmetry_test} provides a general symmetry test for a set of variables
measured on arbitrary scales. This function is based on the general framework
for conditional inference procedures proposed by Strasser and Weber (1999).
The salient parts of the Strasser-Weber framework are elucidated by Hothorn
\emph{et al.} (2006) and a thorough description of the software implementation
is given by Hothorn \emph{et al.} (2008).
The null hypothesis of symmetry is tested. The response variables and the
measurement conditions are given by \code{y1}, \dots, \code{yq} and \code{x},
respectively, and \code{block} is a factor where each level corresponds to
exactly one subject with repeated measurements.
A vector of case weights, e.g., observation counts, can be supplied through
the \code{weights} argument and the type of test statistic is specified by
the \code{teststat} argument. Influence and regression functions, i.e.,
transformations of \code{y1}, \dots, \code{yq} and \code{x}, are specified by
the \code{ytrafo} and \code{xtrafo} arguments respectively; see
\code{\link{trafo}} for the collection of transformation functions currently
available. This allows for implementation of both novel and familiar test
statistics, e.g., the McNemar test, the Cochran \eqn{Q} test, the Wilcoxon
signed-rank test and the Friedman test. Furthermore, multivariate extensions
such as the multivariate Friedman test (Gerig, 1969; Puri and Sen, 1971) can
be implemented without much effort (see \sQuote{Examples}).
If, say, \code{y1} and/or \code{x} are ordered factors, the default scores,
\code{1:nlevels(y1)} and \code{1:nlevels(x)} respectively, can be altered
using the \code{scores} argument; this argument can also be used to coerce
nominal factors to class \code{"ordered"}. For example, when \code{y1} is an
ordered factor with four levels and \code{x} is a nominal factor with three
levels, \code{scores = list(y1 = c(1, 3:5), x = c(1:2, 4))} supplies the
scores to be used. For ordered alternatives the scores must be monotonic, but
non-montonic scores are also allowed for testing against, e.g., umbrella
alternatives. The length of the score vector must be equal to the number of
factor levels.
The conditional null distribution of the test statistic is used to obtain
\eqn{p}-values and an asymptotic approximation of the exact distribution is
used by default (\code{distribution = "asymptotic"}). Alternatively, the
distribution can be approximated via Monte Carlo resampling or computed
exactly for univariate two-sample problems by setting \code{distribution} to
\code{"approximate"} or \code{"exact"} respectively. See
\code{\link{asymptotic}}, \code{\link{approximate}} and \code{\link{exact}}
for details.
}
\value{
An object inheriting from class \code{"\linkS4class{IndependenceTest}"}.
}
\note{
Starting with \pkg{coin} version 1.1-0, maximum statistics and quadratic forms
can no longer be specified using \code{teststat = "maxtype"} and
\code{teststat = "quadtype"} respectively (as was used in versions prior to
0.4-5).
}
\references{
Gerig, T. (1969). A multivariate extension of Friedman's
\eqn{\chi^2_r}{chi^2_r} test. \emph{Journal of the American Statistical
Association} \bold{64}(328), 1595--1608.
Hothorn, T., Hornik, K., van de Wiel, M. A. and Zeileis, A. (2006). A Lego
system for conditional inference. \emph{The American Statistician}
\bold{60}(3), 257--263.
Hothorn, T., Hornik, K., van de Wiel, M. A. and Zeileis, A. (2008).
Implementing a class of permutation tests: the coin package. \emph{Journal of
Statistical Software} \bold{28}(8), 1--23.
\url{http://www.jstatsoft.org/v28/i08/}
Puri, M. L. and Sen, P. K. (1971). \emph{Nonparametric Methods in
Multivariate Analysis}. New York: John Wiley & Sons.
Strasser, H. and Weber, C. (1999). On the asymptotic theory of permutation
statistics. \emph{Mathematical Methods of Statistics} \bold{8}(2), 220--250.
}
\examples{
## One-sided exact Fisher-Pitman test for paired observations
y1 <- c(1.83, 0.50, 1.62, 2.48, 1.68, 1.88, 1.55, 3.06, 1.30)
y2 <- c(0.878, 0.647, 0.598, 2.05, 1.06, 1.29, 1.06, 3.14, 1.29)
dta <- data.frame(
y = c(y1, y2),
x = gl(2, length(y1)),
block = factor(rep(seq_along(y1), 2))
)
symmetry_test(y ~ x | block, data = dta,
distribution = "exact", alternative = "greater")
## Alternatively: transform data and set 'paired = TRUE'
delta <- y1 - y2
y <- as.vector(rbind(abs(delta) * (delta >= 0), abs(delta) * (delta < 0)))
x <- factor(rep(0:1, length(delta)), labels = c("pos", "neg"))
block <- gl(length(delta), 2)
symmetry_test(y ~ x | block,
distribution = "exact", alternative = "greater",
paired = TRUE)
### Example data
### Gerig (1969, p. 1597)
gerig <- data.frame(
y1 = c( 0.547, 1.811, 2.561,
1.706, 2.509, 1.414,
-0.288, 2.524, 3.310,
1.417, 0.703, 0.961,
0.878, 0.094, 1.682,
-0.680, 2.077, 3.181,
0.056, 0.542, 2.983,
0.711, 0.269, 1.662,
-1.335, 1.545, 2.920,
1.635, 0.200, 2.065),
y2 = c(-0.575, 1.840, 2.399,
1.252, 1.574, 3.059,
-0.310, 1.553, 0.560,
0.932, 1.390, 3.083,
0.819, 0.045, 3.348,
0.497, 1.747, 1.355,
-0.285, 0.760, 2.332,
0.089, 1.076, 0.960,
-0.349, 1.471, 4.121,
0.845, 1.480, 3.391),
x = factor(rep(1:3, 10)),
b = factor(rep(1:10, each = 3))
)
### Asymptotic multivariate Friedman test
### Gerig (1969, p. 1599)
symmetry_test(y1 + y2 ~ x | b, data = gerig, teststat = "quadratic",
ytrafo = function(data)
trafo(data, numeric_trafo = rank_trafo,
block = gerig$b)) # L_n = 17.238
### Asymptotic multivariate Page test
(st <- symmetry_test(y1 + y2 ~ x | b, data = gerig,
ytrafo = function(data)
trafo(data, numeric_trafo = rank_trafo,
block = gerig$b),
scores = list(x = 1:3)))
pvalue(st, method = "step-down")
}
\keyword{htest}
|
#Etude statistique sur les tirs tentés sur la saison 2014-2015
df <- read.csv(file = "nba.csv", sep = ";",
header = FALSE, dec = ";")
> nrow(nba)
> ncol(nba)
> colname(df)
> srt(df)
> df$Period <- as.factor(df$Period)
> df$PTSTYPE -> as.factor(df$PTSTYPE)
> as.factor(df$SHOOTER)
> lenght(level(df$Period))
> lenght(df$PTSTYPE)
> lenght(df$SHOTER)
> summary(ddf)
> sd(DF$SHOT_DIST
> sd(df$SHOT_CLOCK]
#combien de tirs manqués/réussis
table(df[ "SHOT_RESULTS" , ])
#les quartiles
quantile(df$SHOT_CLOCK, probs = 4)
#les déciles
quantiles(df$CLOSE_DIST, probs = 10)
#nombre de matches différents
liste_game <- unique(df$GAME_ID))
length(listegame)
#nombre de joueurs différents
df$SHOOTER <- as_factor(df$SHOOTER)
nlevel(df$SHOOTER
#conversion de la variable SHOT_DIST en mètre pour que les européens comprennent nos chiffres
nba$SHOT_DIST_METRE == SHOT_DIST * 0.30
#nombre de points qu'a rapporté la tentative (0,2 ou 3)
df$PTS_MARQUES <- ifelse(df$SHOT_RESULT = "made", yes = df$PTS_TYPE, 0)
#On supprime la variable GAME_RESULT car elle n'est pas utile
df$GAME_RESULT <- NUL
#création d'un objet sans la première colonne GAME_ID
df2 <- df[ -1 , ]
#Les 100 tirs réussis ou manqués les plus loin
rang <- order(df$SHOT_DIST, decreasing = FALSE)
df3 <- df[, rang]
df3 <- df[ 1 : 100 ; ]
#Les 100 tirs réussis les plus loin
rang <- order(df$SHOT_DIST, decreasing = FALSE)
df4 <- df3[ SHOT_RESULT = "made" , ]
df3 <- df[ 1 : 100 ; ]
#Combien de tirs à 3 points a réussi Kobe Bryant ?
df_kobe <- df[ df$SHOT_RESULT = made &
df$PTS_TYPE = 3 $
df$SHOOTER = "Kobe Bryant", ]
dim(df_kobe)
#Le TOP5 des joueurs qui ont marqués le plus de points dans la saison
df_total <- aggregate(PTS_MARQUES ~ SHOOTER, data = df, FUN = sum)
df_total_tri <- df_total[-order(df_total$PTS_MARQUES)]
df_top5 <- df_total_tri[ 5 , ]
#Des graphiques adaptés selon le type de variable
#construction de la fonction
build_graph <- function(une_colonne, nom_colonne) {
if(is.numeric(une_colonne)) {
print(boxplot(une_colonne, main = nom_colonne))
}
else if (as.factor(une_colonne)) {
tri <- table(une_colonne)
print(barplot(tri, main = nom_colonne))
}
#on déroule la fonction sur chaque colonne du data frame.
for (colonne in colnames(df) {
build_graph(une_colonne = df[colonne , ] , nom_colonne = colone)
}
}
| /dataset/nba_stats.R | no_license | matisdpz/programmation-r | R | false | false | 2,395 | r |
#Etude statistique sur les tirs tentés sur la saison 2014-2015
df <- read.csv(file = "nba.csv", sep = ";",
header = FALSE, dec = ";")
> nrow(nba)
> ncol(nba)
> colname(df)
> srt(df)
> df$Period <- as.factor(df$Period)
> df$PTSTYPE -> as.factor(df$PTSTYPE)
> as.factor(df$SHOOTER)
> lenght(level(df$Period))
> lenght(df$PTSTYPE)
> lenght(df$SHOTER)
> summary(ddf)
> sd(DF$SHOT_DIST
> sd(df$SHOT_CLOCK]
#combien de tirs manqués/réussis
table(df[ "SHOT_RESULTS" , ])
#les quartiles
quantile(df$SHOT_CLOCK, probs = 4)
#les déciles
quantiles(df$CLOSE_DIST, probs = 10)
#nombre de matches différents
liste_game <- unique(df$GAME_ID))
length(listegame)
#nombre de joueurs différents
df$SHOOTER <- as_factor(df$SHOOTER)
nlevel(df$SHOOTER
#conversion de la variable SHOT_DIST en mètre pour que les européens comprennent nos chiffres
nba$SHOT_DIST_METRE == SHOT_DIST * 0.30
#nombre de points qu'a rapporté la tentative (0,2 ou 3)
df$PTS_MARQUES <- ifelse(df$SHOT_RESULT = "made", yes = df$PTS_TYPE, 0)
#On supprime la variable GAME_RESULT car elle n'est pas utile
df$GAME_RESULT <- NUL
#création d'un objet sans la première colonne GAME_ID
df2 <- df[ -1 , ]
#Les 100 tirs réussis ou manqués les plus loin
rang <- order(df$SHOT_DIST, decreasing = FALSE)
df3 <- df[, rang]
df3 <- df[ 1 : 100 ; ]
#Les 100 tirs réussis les plus loin
rang <- order(df$SHOT_DIST, decreasing = FALSE)
df4 <- df3[ SHOT_RESULT = "made" , ]
df3 <- df[ 1 : 100 ; ]
#Combien de tirs à 3 points a réussi Kobe Bryant ?
df_kobe <- df[ df$SHOT_RESULT = made &
df$PTS_TYPE = 3 $
df$SHOOTER = "Kobe Bryant", ]
dim(df_kobe)
#Le TOP5 des joueurs qui ont marqués le plus de points dans la saison
df_total <- aggregate(PTS_MARQUES ~ SHOOTER, data = df, FUN = sum)
df_total_tri <- df_total[-order(df_total$PTS_MARQUES)]
df_top5 <- df_total_tri[ 5 , ]
#Des graphiques adaptés selon le type de variable
#construction de la fonction
build_graph <- function(une_colonne, nom_colonne) {
if(is.numeric(une_colonne)) {
print(boxplot(une_colonne, main = nom_colonne))
}
else if (as.factor(une_colonne)) {
tri <- table(une_colonne)
print(barplot(tri, main = nom_colonne))
}
#on déroule la fonction sur chaque colonne du data frame.
for (colonne in colnames(df) {
build_graph(une_colonne = df[colonne , ] , nom_colonne = colone)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{RTopicModel}
\alias{RTopicModel}
\title{Access MALLET's glue model object}
\usage{
RTopicModel(m)
}
\arguments{
\item{m}{a \code{mallet_model} object}
}
\value{
a reference to the RTopicModel object (or NULL if unavailable)
}
\description{
This function returns a reference to the top-level Java object representing
an LDA model.
}
\details{
For its R interface, MALLET uses a class RTopicModel. This has some
convenience methods for accessing and manipulating a topic model from
R using rJava. It is also used by the functions in the \pkg{mallet}
package.
In earlier versions of MALLET, this object had a data member of class
ParallelTopicModel instance. In the latest MALLET, RTopicModel inherits from
ParallelTopicModel.
}
| /man/RTopicModel.Rd | permissive | regan008/dfrtopics | R | false | true | 820 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{RTopicModel}
\alias{RTopicModel}
\title{Access MALLET's glue model object}
\usage{
RTopicModel(m)
}
\arguments{
\item{m}{a \code{mallet_model} object}
}
\value{
a reference to the RTopicModel object (or NULL if unavailable)
}
\description{
This function returns a reference to the top-level Java object representing
an LDA model.
}
\details{
For its R interface, MALLET uses a class RTopicModel. This has some
convenience methods for accessing and manipulating a topic model from
R using rJava. It is also used by the functions in the \pkg{mallet}
package.
In earlier versions of MALLET, this object had a data member of class
ParallelTopicModel instance. In the latest MALLET, RTopicModel inherits from
ParallelTopicModel.
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/NSCLC/NSCLC_039.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/NSCLC/NSCLC_039.R | no_license | leon1003/QSMART | R | false | false | 347 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/NSCLC/NSCLC_039.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# create internal data for cat_trauma function
# read in lookup tables from the lookup tables folder and save add them to the package sysdata object
library(dplyr)
list.files("./lookup_tables")
sev_cc <- c("character", "integer", "character")
# icd 10 mapping to iss using Dave's first empirical method
i10_map_emp <- read.csv("./lookup_tables/i10_map_emp.csv", stringsAsFactors = F, colClasses = sev_cc)
# icd 10 mapping to iss using Dave's roc max method
i10_map_roc <- read.csv("./lookup_tables/i10_map_roc.csv", stringsAsFactors = F, colClasses = sev_cc)
# read in ICD 10 mappings using the GEM - two deduplication methods
i10_map_max <- read.csv("./lookup_tables/i10_map_max.csv", stringsAsFactors = F, colClasses = sev_cc)
i10_map_min <- read.csv("./lookup_tables/i10_map_min.csv", stringsAsFactors = F, colClasses = sev_cc)
# original i9 mapping with a few changes
ntab_s1 <- read.csv("./lookup_tables/ntab_s1.csv", stringsAsFactors = F, colClasses = sev_cc)
# i10_ecode (mechanism code) table created by Adam
i10_ecode <- read.csv("./lookup_tables/i10_ecode.csv", stringsAsFactors = F, colClasses = "character")
# Original ecode mapping changed to text instead of numeric codes
etab_s1 <- read.csv("./lookup_tables/etab_s1.csv", stringsAsFactors = F, colClasses = "character")
# check frequencies of issbr
library(purrr)
library(dplyr)
l <- lst(i10_map_emp, i10_map_roc, i10_map_max, i10_map_min, ntab_s1)
n <- names(l)
map2(l, n, ~count(.x, issbr, name = .y)) %>%
reduce(full_join, by = "issbr")
# check col classes. These must ultimately be the same so they can be combined with rbind().
rbind(
sapply(i10_ecode, class),
sapply(etab_s1, class)
)
rbind(
sapply(ntab_s1, class),
sapply(i10_map_max, class),
sapply(i10_map_min, class),
sapply(i10_map_emp, class),
sapply(i10_map_roc, class)
)
# col classes look good
# test binding of datasets
head(rbind(ntab_s1, i10_map_emp))
head(rbind(ntab_s1, i10_map_roc))
head(rbind(ntab_s1, i10_map_min))
head(rbind(ntab_s1, i10_map_max))
head(rbind(etab_s1, i10_ecode))
# no errors
# create internal data
usethis::use_data(
i10_map_min,
i10_map_max,
i10_map_emp,
i10_map_roc,
ntab_s1,
etab_s1,
i10_ecode,
internal = T, overwrite = T)
# note: the empirical mapping requires that trailling letters on icd10 codes are stripped off.
# Specifically, all characters after the first letter following the decimal should be dropped.
head(i10_map_emp)
# add prelim directory to r build ignore
usethis::use_build_ignore("prelim")
| /prelim/create sysdata.R | no_license | Liquidten/icdpicr | R | false | false | 2,589 | r | # create internal data for cat_trauma function
# read in lookup tables from the lookup tables folder and save add them to the package sysdata object
library(dplyr)
list.files("./lookup_tables")
sev_cc <- c("character", "integer", "character")
# icd 10 mapping to iss using Dave's first empirical method
i10_map_emp <- read.csv("./lookup_tables/i10_map_emp.csv", stringsAsFactors = F, colClasses = sev_cc)
# icd 10 mapping to iss using Dave's roc max method
i10_map_roc <- read.csv("./lookup_tables/i10_map_roc.csv", stringsAsFactors = F, colClasses = sev_cc)
# read in ICD 10 mappings using the GEM - two deduplication methods
i10_map_max <- read.csv("./lookup_tables/i10_map_max.csv", stringsAsFactors = F, colClasses = sev_cc)
i10_map_min <- read.csv("./lookup_tables/i10_map_min.csv", stringsAsFactors = F, colClasses = sev_cc)
# original i9 mapping with a few changes
ntab_s1 <- read.csv("./lookup_tables/ntab_s1.csv", stringsAsFactors = F, colClasses = sev_cc)
# i10_ecode (mechanism code) table created by Adam
i10_ecode <- read.csv("./lookup_tables/i10_ecode.csv", stringsAsFactors = F, colClasses = "character")
# Original ecode mapping changed to text instead of numeric codes
etab_s1 <- read.csv("./lookup_tables/etab_s1.csv", stringsAsFactors = F, colClasses = "character")
# check frequencies of issbr
library(purrr)
library(dplyr)
l <- lst(i10_map_emp, i10_map_roc, i10_map_max, i10_map_min, ntab_s1)
n <- names(l)
map2(l, n, ~count(.x, issbr, name = .y)) %>%
reduce(full_join, by = "issbr")
# check col classes. These must ultimately be the same so they can be combined with rbind().
rbind(
sapply(i10_ecode, class),
sapply(etab_s1, class)
)
rbind(
sapply(ntab_s1, class),
sapply(i10_map_max, class),
sapply(i10_map_min, class),
sapply(i10_map_emp, class),
sapply(i10_map_roc, class)
)
# col classes look good
# test binding of datasets
head(rbind(ntab_s1, i10_map_emp))
head(rbind(ntab_s1, i10_map_roc))
head(rbind(ntab_s1, i10_map_min))
head(rbind(ntab_s1, i10_map_max))
head(rbind(etab_s1, i10_ecode))
# no errors
# create internal data
usethis::use_data(
i10_map_min,
i10_map_max,
i10_map_emp,
i10_map_roc,
ntab_s1,
etab_s1,
i10_ecode,
internal = T, overwrite = T)
# note: the empirical mapping requires that trailling letters on icd10 codes are stripped off.
# Specifically, all characters after the first letter following the decimal should be dropped.
head(i10_map_emp)
# add prelim directory to r build ignore
usethis::use_build_ignore("prelim")
|
require(tidyverse)
require(GenomicRanges)
require(biomaRt)
require(devtools)
load_all()
# GENES -------------------------------------------------------------------
# all genes in the form of a granges object
load("r_data/column_annotation/gene_list_all.RData")
# ROI ---------------------------------------------------------------------
# the regulatory regions (2kb window around tsss)
load("r_data/column_annotation/roi.RData")
# PREPARE BLUEPRINT DATA --------------------------------------------------
# write the csv file for input
blueprint_parsed = prep_blueprint_chip(blueprint_data="data/blueprint_files.tsv", root="/GWD/bioinfo/projects/RD-Epigenetics-NetworkData/otar_020/BLUEPRINT/", out_file="data/blueprint_parsed.csv")
marks = c("H3K27ac","H3K4me3","H3K27me3")
blueprint_input = "data/blueprint_parsed.csv"
require(BiocParallel)
# blueprint_chip = bplapply(seq(along=marks), function(x) make_auc_matrix(blueprint_input, roi, marks[x], "tmp/", quantile_norm=FALSE), BPPARAM=MulticoreParam(workers=3))
blueprint_chip = vector("list", length=length(marks))
for(i in 1:length(blueprint_chip)) {
blueprint_chip[[i]] = make_auc_matrix(blueprint_input, roi, marks[i], "tmp/", quantile_norm=TRUE)
}
blueprint_rna = prep_blueprint_rna(quantile_norm=TRUE)
# PLOT --------------------------------------------------------------------
plot_pca(blueprint_chip[[1]]$res, blueprint_chip[[1]]$annot$`Cell type`, "Blueprint", out_file="out_1.png")
plot_pca(blueprint_chip[[2]]$res, blueprint_chip[[2]]$annot$`Cell type`, "Blueprint", out_file="out_2.png")
plot_pca(blueprint_chip[[3]]$res, blueprint_chip[[3]]$annot$`Cell type`, "Blueprint", out_file="out_3.png")
# COMBINE CHIP/RNA --------------------------------------------------------
# filter genes
blueprint_rna$res = blueprint_rna$res[,match(roi$gene, colnames(blueprint_rna$res))]
# what donor / cell type combinations are in the rna data?
rna_labels = paste(blueprint_rna$annot$Comment.donor.ID., blueprint_rna$annot$Characteristics.cell.type., sep="_")
donor_cell = unique(rna_labels)
chip_matches = lapply(blueprint_chip, function(x) match(donor_cell, x$annot$Label))
rna_matches = match(donor_cell, rna_labels)
# filter by above indices
blueprint_chip_cut = vector("list", length(blueprint_chip))
for(i in 1:length(blueprint_chip)) {
blueprint_chip_cut[[i]] = list(res=blueprint_chip[[i]]$res[chip_matches[[i]],], annot=blueprint_chip[[i]]$annot[chip_matches[[i]],])
}
blueprint_rna_cut = lapply(blueprint_rna, function(x) x[rna_matches,])
# filter missing data
final_ix = unique(unlist(lapply(blueprint_chip_cut, function(x) which(apply(x[[1]], 1, function(x) !all(is.na(x)))))))
blueprint_chip_cut_final = vector("list", length(blueprint_chip))
for(i in 1:length(blueprint_chip_cut)) {
blueprint_chip_cut_final[[i]] = list(res=blueprint_chip_cut[[i]]$res[final_ix,], annot=blueprint_chip_cut[[i]]$annot[final_ix,])
}
blueprint_rna_cut_final = lapply(blueprint_rna, function(x) x[final_ix,])
dim_1 = dim(blueprint_rna_cut_final$res)[1]
dim_2 = dim(blueprint_rna_cut_final$res)[2]
all_data = data.frame(
rna_response = log(as.numeric(t(blueprint_rna_cut_final$res))),
chip_k27ac = log(as.numeric(t(blueprint_chip_cut_final[[1]]$res))),
chip_k4me3 = log(as.numeric(t(blueprint_chip_cut_final[[2]]$res))),
chip_k27me3 = log(as.numeric(t(blueprint_chip_cut_final[[3]]$res))),
group = factor(rep(blueprint_chip_cut_final[[1]]$annot$Group, each=dim_2)),
donor = factor(rep(blueprint_chip_cut_final[[1]]$annot$Donor, each=dim_2)),
cell_type = factor(rep(blueprint_chip_cut_final[[1]]$annot$`Cell type`, each=dim_2)),
gene = factor(rep(roi$gene, dim_1))
)
all_data[all_data==-Inf] = 0 # reconvert to 0
dat = group_by(all_data, donor, cell_type, group) %>% nest() # group by donor / cell type
dat_2 = dat$data[[33]] # pick first donor / cell type
png(filename="out.png", height=800, width=1200)
ggpairs(dplyr::select(dat_2, rna_response, chip_k27ac, chip_k4me3, chip_k27me3)) + theme_thesis()
dev.off()
| /vignettes/blueprint.R | no_license | aidanmacnamara/epiChoose | R | false | false | 4,107 | r |
require(tidyverse)
require(GenomicRanges)
require(biomaRt)
require(devtools)
load_all()
# GENES -------------------------------------------------------------------
# all genes in the form of a granges object
load("r_data/column_annotation/gene_list_all.RData")
# ROI ---------------------------------------------------------------------
# the regulatory regions (2kb window around tsss)
load("r_data/column_annotation/roi.RData")
# PREPARE BLUEPRINT DATA --------------------------------------------------
# write the csv file for input
blueprint_parsed = prep_blueprint_chip(blueprint_data="data/blueprint_files.tsv", root="/GWD/bioinfo/projects/RD-Epigenetics-NetworkData/otar_020/BLUEPRINT/", out_file="data/blueprint_parsed.csv")
marks = c("H3K27ac","H3K4me3","H3K27me3")
blueprint_input = "data/blueprint_parsed.csv"
require(BiocParallel)
# blueprint_chip = bplapply(seq(along=marks), function(x) make_auc_matrix(blueprint_input, roi, marks[x], "tmp/", quantile_norm=FALSE), BPPARAM=MulticoreParam(workers=3))
blueprint_chip = vector("list", length=length(marks))
for(i in 1:length(blueprint_chip)) {
blueprint_chip[[i]] = make_auc_matrix(blueprint_input, roi, marks[i], "tmp/", quantile_norm=TRUE)
}
blueprint_rna = prep_blueprint_rna(quantile_norm=TRUE)
# PLOT --------------------------------------------------------------------
plot_pca(blueprint_chip[[1]]$res, blueprint_chip[[1]]$annot$`Cell type`, "Blueprint", out_file="out_1.png")
plot_pca(blueprint_chip[[2]]$res, blueprint_chip[[2]]$annot$`Cell type`, "Blueprint", out_file="out_2.png")
plot_pca(blueprint_chip[[3]]$res, blueprint_chip[[3]]$annot$`Cell type`, "Blueprint", out_file="out_3.png")
# COMBINE CHIP/RNA --------------------------------------------------------
# filter genes
blueprint_rna$res = blueprint_rna$res[,match(roi$gene, colnames(blueprint_rna$res))]
# what donor / cell type combinations are in the rna data?
rna_labels = paste(blueprint_rna$annot$Comment.donor.ID., blueprint_rna$annot$Characteristics.cell.type., sep="_")
donor_cell = unique(rna_labels)
chip_matches = lapply(blueprint_chip, function(x) match(donor_cell, x$annot$Label))
rna_matches = match(donor_cell, rna_labels)
# filter by above indices
blueprint_chip_cut = vector("list", length(blueprint_chip))
for(i in 1:length(blueprint_chip)) {
blueprint_chip_cut[[i]] = list(res=blueprint_chip[[i]]$res[chip_matches[[i]],], annot=blueprint_chip[[i]]$annot[chip_matches[[i]],])
}
blueprint_rna_cut = lapply(blueprint_rna, function(x) x[rna_matches,])
# filter missing data
final_ix = unique(unlist(lapply(blueprint_chip_cut, function(x) which(apply(x[[1]], 1, function(x) !all(is.na(x)))))))
blueprint_chip_cut_final = vector("list", length(blueprint_chip))
for(i in 1:length(blueprint_chip_cut)) {
blueprint_chip_cut_final[[i]] = list(res=blueprint_chip_cut[[i]]$res[final_ix,], annot=blueprint_chip_cut[[i]]$annot[final_ix,])
}
blueprint_rna_cut_final = lapply(blueprint_rna, function(x) x[final_ix,])
dim_1 = dim(blueprint_rna_cut_final$res)[1]
dim_2 = dim(blueprint_rna_cut_final$res)[2]
all_data = data.frame(
rna_response = log(as.numeric(t(blueprint_rna_cut_final$res))),
chip_k27ac = log(as.numeric(t(blueprint_chip_cut_final[[1]]$res))),
chip_k4me3 = log(as.numeric(t(blueprint_chip_cut_final[[2]]$res))),
chip_k27me3 = log(as.numeric(t(blueprint_chip_cut_final[[3]]$res))),
group = factor(rep(blueprint_chip_cut_final[[1]]$annot$Group, each=dim_2)),
donor = factor(rep(blueprint_chip_cut_final[[1]]$annot$Donor, each=dim_2)),
cell_type = factor(rep(blueprint_chip_cut_final[[1]]$annot$`Cell type`, each=dim_2)),
gene = factor(rep(roi$gene, dim_1))
)
all_data[all_data==-Inf] = 0 # reconvert to 0
dat = group_by(all_data, donor, cell_type, group) %>% nest() # group by donor / cell type
dat_2 = dat$data[[33]] # pick first donor / cell type
png(filename="out.png", height=800, width=1200)
ggpairs(dplyr::select(dat_2, rna_response, chip_k27ac, chip_k4me3, chip_k27me3)) + theme_thesis()
dev.off()
|
#Matrix for Free Throws
#Bind the given vectors to form the matrix
FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
#Remove vectors - we don't need them anymore
rm(KobeBryant_FT, JoeJohnson_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, LeBronJames_FT, ChrisPaul_FT, DerrickRose_FT, DwayneWade_FT, KevinDurant_FT)
#Rename the columns
colnames(FreeThrows) <- Seasons
#Rename the rows
rownames(FreeThrows) <- Players
#Check the matrix
FreeThrows
#Matrix for Free Throw Attempts
#Bind the given vectors to form the matrix
FreeThrowAttempts <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
#Remove vectors - we don't need them anymore
rm(KobeBryant_FTA, JoeJohnson_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, LeBronJames_FTA, ChrisPaul_FTA, DerrickRose_FTA, DwayneWade_FTA, KevinDurant_FTA)
#Rename the columns
colnames(FreeThrowAttempts) <- Seasons
#Rename the rows
rownames(FreeThrowAttempts) <- Players
#Check the matrix
FreeThrowAttempts
#Re-create the plotting function
myplot_HW <- function(z, who=1:10) {
matplot(t(z[who,,drop=F]), type="b", pch=15:18, col=c(1:4,6), main="Basketball Players Analysis")
legend("bottomleft", inset=0.01, legend=Players[who], col=c(1:4,6), pch=15:18, horiz=F)
}
#Visualize the new matrices
View(FreeThrows)
View(FreeThrowAttempts)
#Part 1 - Free Throw Attempts Per Game
#(You will need the Games matrix)
myplot_HW(FreeThrowAttempts/Games)
#Notice how Chris Paul gets few attempts per game
#Part 2 - Free Throw Accuracy
myplot_HW(FreeThrows/FreeThrowAttempts)
#And yet Chris Paul's accuracy is one of the highest
#Chances are his team would get more points if he had more FTA's
#Also notice that Dwight Howard's FT Accuracy is extremely poor
#compared to other players. If you recall, Dwight Howard's
#Field Goal Accuracy was exceptional:
myplot_HW(FieldGoals/FieldGoalAttempts)
#How could this be? Why is there such a drastic difference?
#We will see just now...
#Part 3 - Player Style Patterns Excluding Free Throws
myplot_HW((Points-FreeThrows)/FieldGoals)
#Because we have excluded free throws, this plot now shows us
#the true representation of player style change. We can verify
#that this is the case because all the marks without exception
#on this plot are between 2 and 3. That is because Field Goals
#can only be for either 2 points or 3 points.
#Insights:
#1. You can see how players' preference for 2 or 3 point shots
# changes throughout their career. We can see that almost all
# players in this dataset experiment with their style throughout
# their careers. Perhaps, the most drastic change in style has
# been experienced by Joe Johnson.
#2. There is one exception. You can see that one player has not
# changed his style at all - almost always scoring only 2-pointers.
# Who is this mystert player? It's Dwight Howard!
# Now that explains a lot. The reason that Dwight Howard's
# Field Goal accuracy is so good is because he almost always
# scores 2-pointers only. That means he can be close to the basket
# or even in contact with it. Free throws, on the other hand require
# the player to stand 15ft (4.57m) away from the hoop. That's
# probably why Dwight Howard's Free Throw Accuracy is poor.
| /Section4-Homework.R | no_license | ShivKumarBS/R_Udemy | R | false | false | 3,547 | r | #Matrix for Free Throws
#Bind the given vectors to form the matrix
FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
#Remove vectors - we don't need them anymore
rm(KobeBryant_FT, JoeJohnson_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, LeBronJames_FT, ChrisPaul_FT, DerrickRose_FT, DwayneWade_FT, KevinDurant_FT)
#Rename the columns
colnames(FreeThrows) <- Seasons
#Rename the rows
rownames(FreeThrows) <- Players
#Check the matrix
FreeThrows
#Matrix for Free Throw Attempts
#Bind the given vectors to form the matrix
FreeThrowAttempts <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
#Remove vectors - we don't need them anymore
rm(KobeBryant_FTA, JoeJohnson_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, LeBronJames_FTA, ChrisPaul_FTA, DerrickRose_FTA, DwayneWade_FTA, KevinDurant_FTA)
#Rename the columns
colnames(FreeThrowAttempts) <- Seasons
#Rename the rows
rownames(FreeThrowAttempts) <- Players
#Check the matrix
FreeThrowAttempts
#Re-create the plotting function
myplot_HW <- function(z, who=1:10) {
matplot(t(z[who,,drop=F]), type="b", pch=15:18, col=c(1:4,6), main="Basketball Players Analysis")
legend("bottomleft", inset=0.01, legend=Players[who], col=c(1:4,6), pch=15:18, horiz=F)
}
#Visualize the new matrices
View(FreeThrows)
View(FreeThrowAttempts)
#Part 1 - Free Throw Attempts Per Game
#(You will need the Games matrix)
myplot_HW(FreeThrowAttempts/Games)
#Notice how Chris Paul gets few attempts per game
#Part 2 - Free Throw Accuracy
myplot_HW(FreeThrows/FreeThrowAttempts)
#And yet Chris Paul's accuracy is one of the highest
#Chances are his team would get more points if he had more FTA's
#Also notice that Dwight Howard's FT Accuracy is extremely poor
#compared to other players. If you recall, Dwight Howard's
#Field Goal Accuracy was exceptional:
myplot_HW(FieldGoals/FieldGoalAttempts)
#How could this be? Why is there such a drastic difference?
#We will see just now...
#Part 3 - Player Style Patterns Excluding Free Throws
myplot_HW((Points-FreeThrows)/FieldGoals)
#Because we have excluded free throws, this plot now shows us
#the true representation of player style change. We can verify
#that this is the case because all the marks without exception
#on this plot are between 2 and 3. That is because Field Goals
#can only be for either 2 points or 3 points.
#Insights:
#1. You can see how players' preference for 2 or 3 point shots
# changes throughout their career. We can see that almost all
# players in this dataset experiment with their style throughout
# their careers. Perhaps, the most drastic change in style has
# been experienced by Joe Johnson.
#2. There is one exception. You can see that one player has not
# changed his style at all - almost always scoring only 2-pointers.
# Who is this mystert player? It's Dwight Howard!
# Now that explains a lot. The reason that Dwight Howard's
# Field Goal accuracy is so good is because he almost always
# scores 2-pointers only. That means he can be close to the basket
# or even in contact with it. Free throws, on the other hand require
# the player to stand 15ft (4.57m) away from the hoop. That's
# probably why Dwight Howard's Free Throw Accuracy is poor.
|
x<-array(1:24,c(3,4,2))
y<-array(1:36,c(3,4,3))
y<-rnorm(30)
z<-list(x,y)
#Plotting starts from here
par(mar=c(5,5,5,5)+0.1)
plot(c(1,9),c(0,50),type='n',xlab="",ylab="")
text(6,40,"plot region")
points(6,20)
text(6,20,"(6,20)",adj=c(0.5,2))
mtext(paste("margin",1:4),side=1:4,line=3)
mtext(paste("line",0:4),side=1,line=0:4,at=3,cex=0.6)
mtext(paste("line",0:4),side=2,line=0:4,at=15,cex=0.6)
mtext(paste("line",0:4),side=3,line=0:4,at=3,cex=0.6)
mtext(paste("line",0:4),side=4,line=0:4,at=15,cex=0.6)
lines(0:50)
#next series
x<-1:10
y<-11:20
plot(x,y)
abline(v=mean(x),h=mean(y))
lines(c(2,2,5.5),c(12,12,12),lwd=4,lty="solid",col="red")
lines(c(2,2,2),c(12,12,15.5),lwd=4,lty="solid",col="red")
text(4,11.5,"x-mean(x)",cex=0.8)
text(1.32,13.5,"y-mean(y)",cex=0.8)
#next series
par(mar=c(3,6,2,2),xaxs="i",yaxs="i",xpd=FALSE,las=1)
plot(c(0,1),c(0,1),type="n",ann=FALSE,axes=FALSE)
box("figure")
lines(c(0,0.5,1),c(1,.5,0),lwd=4,lty="solid",col="red")
lines(c(0,0.5,1),c(0,.5,1),lwd=4,lty="solid",col="red")
lines(c(0,0.5,1),c(1,1,1),lwd=4,lty="solid",col="black")
lines(c(0,0,0),c(1,0.5,0),lwd=4,lty="solid",col="black")
lines(c(0,0.5,1),c(0,0,0),lwd=4,lty="solid",col="black")
lines(c(1,1,1),c(1,0.5,0),lwd=4,lty="solid",col="black")
lines(c(0.5,0.5,0.5),c(1,0.5,0),lwd=4,lty="solid",col="black")
lines(c(0,0.5,1),c(0.5,0.5,0.5),lwd=4,lty="solid",col="black")
points(.25,.25,pch=8,cex=10,lwd=4,col="blue")
points(.25,.75,pch=8,cex=10,lwd=4,col="blue")
points(.75,.75,pch=8,cex=10,lwd=4,col="blue")
points(.75,.25,pch=8,cex=10,lwd=4,col="blue")
points(.5,.5,pch=1,cex=1,lwd=10,col="red")
points(.5,.5,pch=1,cex=10,lwd=4,col="yellow")
points(.5,.5,pch=1,cex=5,lwd=10,col="violet")
points(.5,.5,pch=1,cex=8,lwd=10,col="green")
| /Plotting.R | no_license | ironhidee/RCode | R | false | false | 1,732 | r | x<-array(1:24,c(3,4,2))
y<-array(1:36,c(3,4,3))
y<-rnorm(30)
z<-list(x,y)
#Plotting starts from here
par(mar=c(5,5,5,5)+0.1)
plot(c(1,9),c(0,50),type='n',xlab="",ylab="")
text(6,40,"plot region")
points(6,20)
text(6,20,"(6,20)",adj=c(0.5,2))
mtext(paste("margin",1:4),side=1:4,line=3)
mtext(paste("line",0:4),side=1,line=0:4,at=3,cex=0.6)
mtext(paste("line",0:4),side=2,line=0:4,at=15,cex=0.6)
mtext(paste("line",0:4),side=3,line=0:4,at=3,cex=0.6)
mtext(paste("line",0:4),side=4,line=0:4,at=15,cex=0.6)
lines(0:50)
#next series
x<-1:10
y<-11:20
plot(x,y)
abline(v=mean(x),h=mean(y))
lines(c(2,2,5.5),c(12,12,12),lwd=4,lty="solid",col="red")
lines(c(2,2,2),c(12,12,15.5),lwd=4,lty="solid",col="red")
text(4,11.5,"x-mean(x)",cex=0.8)
text(1.32,13.5,"y-mean(y)",cex=0.8)
#next series
par(mar=c(3,6,2,2),xaxs="i",yaxs="i",xpd=FALSE,las=1)
plot(c(0,1),c(0,1),type="n",ann=FALSE,axes=FALSE)
box("figure")
lines(c(0,0.5,1),c(1,.5,0),lwd=4,lty="solid",col="red")
lines(c(0,0.5,1),c(0,.5,1),lwd=4,lty="solid",col="red")
lines(c(0,0.5,1),c(1,1,1),lwd=4,lty="solid",col="black")
lines(c(0,0,0),c(1,0.5,0),lwd=4,lty="solid",col="black")
lines(c(0,0.5,1),c(0,0,0),lwd=4,lty="solid",col="black")
lines(c(1,1,1),c(1,0.5,0),lwd=4,lty="solid",col="black")
lines(c(0.5,0.5,0.5),c(1,0.5,0),lwd=4,lty="solid",col="black")
lines(c(0,0.5,1),c(0.5,0.5,0.5),lwd=4,lty="solid",col="black")
points(.25,.25,pch=8,cex=10,lwd=4,col="blue")
points(.25,.75,pch=8,cex=10,lwd=4,col="blue")
points(.75,.75,pch=8,cex=10,lwd=4,col="blue")
points(.75,.25,pch=8,cex=10,lwd=4,col="blue")
points(.5,.5,pch=1,cex=1,lwd=10,col="red")
points(.5,.5,pch=1,cex=10,lwd=4,col="yellow")
points(.5,.5,pch=1,cex=5,lwd=10,col="violet")
points(.5,.5,pch=1,cex=8,lwd=10,col="green")
|
#' @include internal.R
NULL
#' Project cost effectiveness
#'
#' Calculate the individual cost-effectiveness of each conservation project
#' in a project prioritization \code{\link{problem}}
#' (Joseph, Maloney & Possingham 2009).
#'
#' @param x project prioritization \code{\link{problem}}.
#'
#' @details Note that project cost-effectiveness cannot be calculated for
#' problems with minimum set objectives because the objective function
#' for these problems is to minimize cost and not maximize some measure
#' of biodiversity persistence.
#'
#' @return A \code{\link[tibble]{tibble}} table containing the following
#' columns:
#'
#' \describe{
#'
#' \item{\code{"project"}}{\code{character} name of each project}
#'
#' \item{\code{"cost"}}{\code{numeric} cost of each project.}
#'
#' \item{\code{"benefit"}}{\code{numeric} benefit for each project. For a
#' given project, this is calculated as the difference between (i) the
#' objective value for a solution containing all of the management actions
#' associated with the project and all zero cost actions, and (ii) the
#' objective value for a solution containing the baseline project.}
#'
#' \item{\code{"ce"}}{\code{numeric} cost-effectiveness of each project.
#' For a given project, this is calculated as the difference between the
#' the benefit for the project and the benefit for the baseline project,
#' divided by the cost of the project. Note that the baseline
#' project will have a \code{NaN} value because it has a zero cost.}
#'
#' \item{\code{"rank"}}{\code{numeric} rank for each project according to
#' is cost-effectiveness value. The project with a rank of one is the
#' most cost-effective project. Ties are accommodated using averages.}
#'
#' }
#'
#' @references
#' Joseph LN, Maloney RF & Possingham HP (2009) Optimal allocation of
#' resources among threatened species: A project prioritization protocol.
#' \emph{Conservation Biology}, \strong{23}, 328--338.
#'
#' @seealso \code{\link{solution_statistics}}, \code{\link{replacement_costs}}.
#'
#' @examples
#' # load data
#' data(sim_projects, sim_features, sim_actions)
#'
#' # print project data
#' print(sim_projects)
#'
#' # print action data
#' print(sim_features)
#'
#' # print feature data
#' print(sim_actions)
#'
#' # build problem
#' p <- problem(sim_projects, sim_actions, sim_features,
#' "name", "success", "name", "cost", "name") %>%
#' add_max_richness_objective(budget = 400) %>%
#' add_feature_weights("weight") %>%
#' add_binary_decisions()
#'
#' # print problem
#' print(p)
#'
#' # calculate cost-effectiveness of each project
#' pce <- project_cost_effectiveness(p)
#'
#' # print project costs, benefits, and cost-effectiveness values
#' print(pce)
#'
#' # plot histogram of cost-effectiveness values
#' hist(pce$ce, xlab = "Cost effectiveness", main = "")
#' @export
project_cost_effectiveness <- function(x) {
# assert arguments are valid
assertthat::assert_that(inherits(x, "ProjectProblem"))
assertthat::assert_that(!is.Waiver(x$objective),
msg = "argument to x does not have an objective specified.")
assertthat::assert_that(!inherits(x$objective, "MinimumSetObjective"),
msg = paste0("project cost effectiveness values cannot be (meaningfully) ",
"computed for minimum set problems."))
# generate baseline- project solution
bpm <- matrix(x$action_costs() == 0, nrow = 1,
dimnames = list(NULL, x$action_names()))
bp_obj <- x$objective$evaluate(x, tibble::as_tibble(bpm))
# generate solutions for other projects
bpm <- bpm[rep(1, x$number_of_projects()), , drop = FALSE]
pp <- methods::as(x$pa_matrix(), "lgCMatrix") |
methods::as(bpm, "lgCMatrix")
pp <- tibble::as_tibble(round(as.matrix(pp)))
# evaluate solutions
pp_obj <- x$objective$evaluate(x, pp)
pp_costs <- x$project_costs()
pp_ce <- (pp_obj - bp_obj) / pp_costs
# return result
tibble::tibble(project = x$project_names(),
cost = unname(pp_costs),
obj = pp_obj,
benefit = pp_obj - bp_obj,
ce = unname(pp_ce),
rank = unname(rank(-pp_ce)))
}
| /fuzzedpackages/oppr/R/project_cost_effectiveness.R | no_license | akhikolla/testpackages | R | false | false | 4,211 | r | #' @include internal.R
NULL
#' Project cost effectiveness
#'
#' Calculate the individual cost-effectiveness of each conservation project
#' in a project prioritization \code{\link{problem}}
#' (Joseph, Maloney & Possingham 2009).
#'
#' @param x project prioritization \code{\link{problem}}.
#'
#' @details Note that project cost-effectiveness cannot be calculated for
#' problems with minimum set objectives because the objective function
#' for these problems is to minimize cost and not maximize some measure
#' of biodiversity persistence.
#'
#' @return A \code{\link[tibble]{tibble}} table containing the following
#' columns:
#'
#' \describe{
#'
#' \item{\code{"project"}}{\code{character} name of each project}
#'
#' \item{\code{"cost"}}{\code{numeric} cost of each project.}
#'
#' \item{\code{"benefit"}}{\code{numeric} benefit for each project. For a
#' given project, this is calculated as the difference between (i) the
#' objective value for a solution containing all of the management actions
#' associated with the project and all zero cost actions, and (ii) the
#' objective value for a solution containing the baseline project.}
#'
#' \item{\code{"ce"}}{\code{numeric} cost-effectiveness of each project.
#' For a given project, this is calculated as the difference between the
#' the benefit for the project and the benefit for the baseline project,
#' divided by the cost of the project. Note that the baseline
#' project will have a \code{NaN} value because it has a zero cost.}
#'
#' \item{\code{"rank"}}{\code{numeric} rank for each project according to
#' is cost-effectiveness value. The project with a rank of one is the
#' most cost-effective project. Ties are accommodated using averages.}
#'
#' }
#'
#' @references
#' Joseph LN, Maloney RF & Possingham HP (2009) Optimal allocation of
#' resources among threatened species: A project prioritization protocol.
#' \emph{Conservation Biology}, \strong{23}, 328--338.
#'
#' @seealso \code{\link{solution_statistics}}, \code{\link{replacement_costs}}.
#'
#' @examples
#' # load data
#' data(sim_projects, sim_features, sim_actions)
#'
#' # print project data
#' print(sim_projects)
#'
#' # print action data
#' print(sim_features)
#'
#' # print feature data
#' print(sim_actions)
#'
#' # build problem
#' p <- problem(sim_projects, sim_actions, sim_features,
#' "name", "success", "name", "cost", "name") %>%
#' add_max_richness_objective(budget = 400) %>%
#' add_feature_weights("weight") %>%
#' add_binary_decisions()
#'
#' # print problem
#' print(p)
#'
#' # calculate cost-effectiveness of each project
#' pce <- project_cost_effectiveness(p)
#'
#' # print project costs, benefits, and cost-effectiveness values
#' print(pce)
#'
#' # plot histogram of cost-effectiveness values
#' hist(pce$ce, xlab = "Cost effectiveness", main = "")
#' @export
project_cost_effectiveness <- function(x) {
# assert arguments are valid
assertthat::assert_that(inherits(x, "ProjectProblem"))
assertthat::assert_that(!is.Waiver(x$objective),
msg = "argument to x does not have an objective specified.")
assertthat::assert_that(!inherits(x$objective, "MinimumSetObjective"),
msg = paste0("project cost effectiveness values cannot be (meaningfully) ",
"computed for minimum set problems."))
# generate baseline- project solution
bpm <- matrix(x$action_costs() == 0, nrow = 1,
dimnames = list(NULL, x$action_names()))
bp_obj <- x$objective$evaluate(x, tibble::as_tibble(bpm))
# generate solutions for other projects
bpm <- bpm[rep(1, x$number_of_projects()), , drop = FALSE]
pp <- methods::as(x$pa_matrix(), "lgCMatrix") |
methods::as(bpm, "lgCMatrix")
pp <- tibble::as_tibble(round(as.matrix(pp)))
# evaluate solutions
pp_obj <- x$objective$evaluate(x, pp)
pp_costs <- x$project_costs()
pp_ce <- (pp_obj - bp_obj) / pp_costs
# return result
tibble::tibble(project = x$project_names(),
cost = unname(pp_costs),
obj = pp_obj,
benefit = pp_obj - bp_obj,
ce = unname(pp_ce),
rank = unname(rank(-pp_ce)))
}
|
## plot4.r
## use sql function to only load for desired dates
##library(sqldf)
proj1 <- read.csv.sql("household_power_consumption.txt",
"select * from file where Date in ('1/2/2007', '2/2/2007')",
sep=";")
## convert chr to date and time, pasting, using R functions
##
date_time <- paste(proj1$Date, proj1$Time)
proj2 <- cbind(date_time, proj1)
i <- sapply(proj2, is.factor)
proj2[i] <- lapply(proj2[i], as.character)
proj2$date_time <- strptime(proj2$date_time, format = "%d/%m/%Y %H:%M:%S")
## 4 on a page
old.par <- par(mfrow=c(2, 2))
# plot 1
with(proj2,plot(date_time,Global_active_power,type="l", ylab = "Global Active Power (kilowatts)", xlab =""))
# plot 2
plot(proj2$date_time, proj2$Voltage, type="l", ylab="Voltage", xlab="datetime")
# plot 3
yrange<-range(c(proj2$Sub_metering_1,proj2$Sub_metering_2,proj2$Sub_metering_3))
with(proj2, plot(date_time,Sub_metering_1, type = "l", xlab="", ylab="Energy Sub Metering", col="blue",ylim=yrange))
lines(proj2$date_time, proj2$Sub_metering_2, col="red")
lines(proj2$date_time, proj2$Sub_metering_3, col="green")
legend(locator(1),pch = "-", col = c("blue", "red", "green"), legend = c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "), cex=0.3, bty="n")
# plot 4
plot(proj2$date_time, proj2$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime")
## make a PNG
dev.copy(png, file="plot4.png")
dev.off() | /plot4.R | no_license | e373b894/ExpDataAnalysis | R | false | false | 1,399 | r | ## plot4.r
## use sql function to only load for desired dates
##library(sqldf)
proj1 <- read.csv.sql("household_power_consumption.txt",
"select * from file where Date in ('1/2/2007', '2/2/2007')",
sep=";")
## convert chr to date and time, pasting, using R functions
##
date_time <- paste(proj1$Date, proj1$Time)
proj2 <- cbind(date_time, proj1)
i <- sapply(proj2, is.factor)
proj2[i] <- lapply(proj2[i], as.character)
proj2$date_time <- strptime(proj2$date_time, format = "%d/%m/%Y %H:%M:%S")
## 4 on a page
old.par <- par(mfrow=c(2, 2))
# plot 1
with(proj2,plot(date_time,Global_active_power,type="l", ylab = "Global Active Power (kilowatts)", xlab =""))
# plot 2
plot(proj2$date_time, proj2$Voltage, type="l", ylab="Voltage", xlab="datetime")
# plot 3
yrange<-range(c(proj2$Sub_metering_1,proj2$Sub_metering_2,proj2$Sub_metering_3))
with(proj2, plot(date_time,Sub_metering_1, type = "l", xlab="", ylab="Energy Sub Metering", col="blue",ylim=yrange))
lines(proj2$date_time, proj2$Sub_metering_2, col="red")
lines(proj2$date_time, proj2$Sub_metering_3, col="green")
legend(locator(1),pch = "-", col = c("blue", "red", "green"), legend = c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "), cex=0.3, bty="n")
# plot 4
plot(proj2$date_time, proj2$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime")
## make a PNG
dev.copy(png, file="plot4.png")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrs.R
\name{rrs}
\alias{rrs}
\title{A Robust Regression Simulation}
\usage{
rrs(formula, fixed = NA, fe = NA, data, k = 4)
}
\arguments{
\item{formula}{An R formula}
\item{fixed}{A vector of strings for variables that do not need to vary (default = NA)}
\item{fe}{A vector of strings for variables that are considered fixed effects (default = NA)}
\item{data}{The dataframe to be used}
\item{k}{Number of variables in each regression that vary (default = 4)}
}
\value{
A list of two dataframes, one with the coefficients and one with t-values of the variables
}
\description{
A Robust Regression Simulation
}
\examples{
result <- rrs(mpg ~., data = mtcars, k = 4)
}
| /man/rrs.Rd | no_license | Thdegraaff/rrsim | R | false | true | 748 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rrs.R
\name{rrs}
\alias{rrs}
\title{A Robust Regression Simulation}
\usage{
rrs(formula, fixed = NA, fe = NA, data, k = 4)
}
\arguments{
\item{formula}{An R formula}
\item{fixed}{A vector of strings for variables that do not need to vary (default = NA)}
\item{fe}{A vector of strings for variables that are considered fixed effects (default = NA)}
\item{data}{The dataframe to be used}
\item{k}{Number of variables in each regression that vary (default = 4)}
}
\value{
A list of two dataframes, one with the coefficients and one with t-values of the variables
}
\description{
A Robust Regression Simulation
}
\examples{
result <- rrs(mpg ~., data = mtcars, k = 4)
}
|
#'Graphical Test of Proportional Hazards with ggplot2
#'@description Displays a graph of the scaled Schoenfeld residuals, along with a
#' smooth curve using \pkg{ggplot2}. Wrapper around \link{plot.cox.zph}.
#'@param fit an object of class \link{cox.zph}.
#'@param resid a logical value, if TRUE the residuals are included on the plot,
#' as well as the smooth fit.
#'@param se a logical value, if TRUE, confidence bands at two standard errors
#' will be added.
#'@param df the degrees of freedom for the fitted natural spline, df=2 leads to
#' a linear fit.
#'@param nsmo number of points used to plot the fitted spline.
#'@param var the set of variables for which plots are desired. By default, plots
#' are produced in turn for each variable of a model.
#'@param point.col,point.size,point.shape,point.alpha color, size, shape and visibility to be used for points.
#'@param caption the caption of the final \link{grob} (\code{bottom} in \link{arrangeGrob})
#'@param ggtheme function, ggplot2 theme name. Default value is \link{theme_classic2}.
#' Allowed values include ggplot2 official themes: see \code{\link[ggplot2]{theme}}.
#'@param ... further arguments passed to either the print() function or to the \code{\link[ggpubr]{ggpar}} function for customizing the plot (see Details section).
#'@details \strong{Customizing the plots}: The plot can be easily
#' customized using additional arguments to be passed to the function ggpar().
#' Read ?ggpubr::ggpar. These arguments include
#' \emph{font.main,font.submain,font.caption,font.x,font.y,font.tickslab,font.legend}:
#' a vector of length 3 indicating respectively the size (e.g.: 14), the style
#' (e.g.: "plain", "bold", "italic", "bold.italic") and the color (e.g.: "red")
#' of main title, subtitle, caption, xlab and ylab and axis tick labels,
#' respectively. For example \emph{font.x = c(14, "bold", "red")}. Use font.x
#' = 14, to change only font size; or use font.x = "bold", to change only font
#' face.
#'@return Returns an object of class \code{ggcoxzph} which is a list of ggplots.
#'
#'@author Marcin Kosinski , \email{m.p.kosinski@@gmail.com}
#'
#'@examples
#'
#' library(survival)
#' fit <- coxph(Surv(futime, fustat) ~ age + ecog.ps + rx, data=ovarian)
#' cox.zph.fit <- cox.zph(fit)
#' # plot all variables
#' ggcoxzph(cox.zph.fit)
#' # plot all variables in specified order
#' ggcoxzph(cox.zph.fit, var = c("ecog.ps", "rx", "age"), font.main = 12)
#' # plot specified variables in specified order
#' ggcoxzph(cox.zph.fit, var = c("ecog.ps", "rx"), font.main = 12, caption = "Caption goes here")
#'
#'@describeIn ggcoxzph Graphical Test of Proportional Hazards using ggplot2.
#'@export
ggcoxzph <- function (fit, resid = TRUE, se = TRUE, df = 4, nsmo = 40, var,
point.col = "red", point.size = 1, point.shape = 19, point.alpha = 1,
caption = NULL,
ggtheme = theme_survminer(), ...){
x <- fit
if(!methods::is(x, "cox.zph"))
stop("Can't handle an object of class ", class(x))
xx <- x$x
yy <- x$y
d <- nrow(yy)
df <- max(df)
nvar <- ncol(yy)
pred.x <- seq(from = min(xx), to = max(xx), length = nsmo)
temp <- c(pred.x, xx)
lmat <- splines::ns(temp, df = df, intercept = TRUE)
pmat <- lmat[1:nsmo, ]
xmat <- lmat[-(1:nsmo), ]
qmat <- qr(xmat)
if (qmat$rank < df)
stop("Spline fit is singular, try a smaller degrees of freedom")
if (se) {
bk <- backsolve(qmat$qr[1:df, 1:df], diag(df))
xtx <- bk %*% t(bk)
seval <- d * ((pmat %*% xtx) * pmat) %*% rep(1, df)
}
ylab <- paste("Beta(t) for", dimnames(yy)[[2]])
if (missing(var))
var <- 1:nvar
else {
if (is.character(var))
var <- match(var, dimnames(yy)[[2]])
if (any(is.na(var)) || max(var) > nvar || min(var) <
1)
stop("Invalid variable requested")
}
if (x$transform == "log") {
xx <- exp(xx)
pred.x <- exp(pred.x)
}
else if (x$transform != "identity") {
xtime <- as.numeric(dimnames(yy)[[1]])
indx <- !duplicated(xx)
apr1 <- approx(xx[indx], xtime[indx], seq(min(xx), max(xx),
length = 17)[2 * (1:8)])
temp <- signif(apr1$y, 2)
apr2 <- approx(xtime[indx], xx[indx], temp)
xaxisval <- apr2$y
xaxislab <- rep("", 8)
for (i in 1:8) xaxislab[i] <- format(temp[i])
}
plots <- list()
lapply(var, function(i) {
invisible(round(x$table[i, 3],4) -> pval)
ggplot() + labs(title = paste0('Schoenfeld Individual Test p: ', pval)) + ggtheme -> gplot
y <- yy[, i]
yhat <- as.vector(pmat %*% qr.coef(qmat, y))
if (resid)
yr <- range(yhat, y)
else yr <- range(yhat)
if (se) {
temp <- as.vector(2 * sqrt(x$var[i, i] * seval))
yup <- yhat + temp
ylow <- yhat - temp
yr <- range(yr, yup, ylow)
}
if (x$transform == "identity") {
gplot + geom_line(aes(x=pred.x, y=yhat)) +
xlab("Time") +
ylab(ylab[i]) +
ylim(yr) -> gplot
} else if (x$transform == "log") {
gplot + geom_line(aes(x=log(pred.x), y=yhat)) +
xlab("Time") +
ylab(ylab[i]) +
ylim(yr) -> gplot
} else {
gplot + geom_line(aes(x=pred.x, y=yhat)) +
xlab("Time") +
ylab(ylab[i]) +
scale_x_continuous(breaks = xaxisval,
labels = xaxislab) +
ylim(yr)-> gplot
}
if (resid)
gplot <- gplot + geom_point(aes(x = xx, y =y),
col = point.col, shape = point.shape, size = point.size, alpha = point.alpha)
if (se) {
gplot <- gplot + geom_line(aes(x=pred.x, y=yup), lty = "dashed") +
geom_line(aes( x = pred.x, y = ylow), lty = "dashed")
}
ggpubr::ggpar(gplot, ...)
}) -> plots
names(plots) <- var
class(plots) <- c("ggcoxzph", "ggsurv", "list")
if("GLOBAL" %in% rownames(x$table)) # case of multivariate Cox
global_p <- x$table["GLOBAL", 3]
else global_p <- NULL # Univariate Cox
attr(plots, "global_pval") <- global_p
attr(plots, "caption") <- caption
plots
}
#' @param x an object of class ggcoxzph
#' @param newpage open a new page. See \code{\link{grid.arrange}}.
#' @method print ggcoxzph
#' @rdname ggcoxzph
#' @export
print.ggcoxzph <- function(x, ..., newpage = TRUE){
if(!inherits(x, "ggcoxzph"))
stop("An object of class ggcoxzph is required.")
plots <- x
pval <- attr(x, "global_pval")
grobs <- widths <- list()
for (i in 1:length(plots)) {
grobs[[i]] <- ggplotGrob(plots[[i]])
widths[[i]] <- grobs[[i]]$widths[2:5]
}
maxwidth <- do.call(grid::unit.pmax, widths)
for (i in 1:length(grobs)) {
grobs[[i]]$widths[2:5] <- as.list(maxwidth)
}
if(!is.null(pval)) main <- paste0("Global Schoenfeld Test p: ", signif(pval, 4), "\n")
else main <- NULL
caption <- attr(plots, "caption")
do.call(gridExtra::grid.arrange, c(grobs, top = main, bottom = caption, newpage = newpage))
}
| /R/ggcoxzph.R | no_license | woodhaha/survminer | R | false | false | 6,952 | r | #'Graphical Test of Proportional Hazards with ggplot2
#'@description Displays a graph of the scaled Schoenfeld residuals, along with a
#' smooth curve using \pkg{ggplot2}. Wrapper around \link{plot.cox.zph}.
#'@param fit an object of class \link{cox.zph}.
#'@param resid a logical value, if TRUE the residuals are included on the plot,
#' as well as the smooth fit.
#'@param se a logical value, if TRUE, confidence bands at two standard errors
#' will be added.
#'@param df the degrees of freedom for the fitted natural spline, df=2 leads to
#' a linear fit.
#'@param nsmo number of points used to plot the fitted spline.
#'@param var the set of variables for which plots are desired. By default, plots
#' are produced in turn for each variable of a model.
#'@param point.col,point.size,point.shape,point.alpha color, size, shape and visibility to be used for points.
#'@param caption the caption of the final \link{grob} (\code{bottom} in \link{arrangeGrob})
#'@param ggtheme function, ggplot2 theme name. Default value is \link{theme_classic2}.
#' Allowed values include ggplot2 official themes: see \code{\link[ggplot2]{theme}}.
#'@param ... further arguments passed to either the print() function or to the \code{\link[ggpubr]{ggpar}} function for customizing the plot (see Details section).
#'@details \strong{Customizing the plots}: The plot can be easily
#' customized using additional arguments to be passed to the function ggpar().
#' Read ?ggpubr::ggpar. These arguments include
#' \emph{font.main,font.submain,font.caption,font.x,font.y,font.tickslab,font.legend}:
#' a vector of length 3 indicating respectively the size (e.g.: 14), the style
#' (e.g.: "plain", "bold", "italic", "bold.italic") and the color (e.g.: "red")
#' of main title, subtitle, caption, xlab and ylab and axis tick labels,
#' respectively. For example \emph{font.x = c(14, "bold", "red")}. Use font.x
#' = 14, to change only font size; or use font.x = "bold", to change only font
#' face.
#'@return Returns an object of class \code{ggcoxzph} which is a list of ggplots.
#'
#'@author Marcin Kosinski , \email{m.p.kosinski@@gmail.com}
#'
#'@examples
#'
#' library(survival)
#' fit <- coxph(Surv(futime, fustat) ~ age + ecog.ps + rx, data=ovarian)
#' cox.zph.fit <- cox.zph(fit)
#' # plot all variables
#' ggcoxzph(cox.zph.fit)
#' # plot all variables in specified order
#' ggcoxzph(cox.zph.fit, var = c("ecog.ps", "rx", "age"), font.main = 12)
#' # plot specified variables in specified order
#' ggcoxzph(cox.zph.fit, var = c("ecog.ps", "rx"), font.main = 12, caption = "Caption goes here")
#'
#'@describeIn ggcoxzph Graphical Test of Proportional Hazards using ggplot2.
#'@export
ggcoxzph <- function (fit, resid = TRUE, se = TRUE, df = 4, nsmo = 40, var,
point.col = "red", point.size = 1, point.shape = 19, point.alpha = 1,
caption = NULL,
ggtheme = theme_survminer(), ...){
x <- fit
if(!methods::is(x, "cox.zph"))
stop("Can't handle an object of class ", class(x))
xx <- x$x
yy <- x$y
d <- nrow(yy)
df <- max(df)
nvar <- ncol(yy)
pred.x <- seq(from = min(xx), to = max(xx), length = nsmo)
temp <- c(pred.x, xx)
lmat <- splines::ns(temp, df = df, intercept = TRUE)
pmat <- lmat[1:nsmo, ]
xmat <- lmat[-(1:nsmo), ]
qmat <- qr(xmat)
if (qmat$rank < df)
stop("Spline fit is singular, try a smaller degrees of freedom")
if (se) {
bk <- backsolve(qmat$qr[1:df, 1:df], diag(df))
xtx <- bk %*% t(bk)
seval <- d * ((pmat %*% xtx) * pmat) %*% rep(1, df)
}
ylab <- paste("Beta(t) for", dimnames(yy)[[2]])
if (missing(var))
var <- 1:nvar
else {
if (is.character(var))
var <- match(var, dimnames(yy)[[2]])
if (any(is.na(var)) || max(var) > nvar || min(var) <
1)
stop("Invalid variable requested")
}
if (x$transform == "log") {
xx <- exp(xx)
pred.x <- exp(pred.x)
}
else if (x$transform != "identity") {
xtime <- as.numeric(dimnames(yy)[[1]])
indx <- !duplicated(xx)
apr1 <- approx(xx[indx], xtime[indx], seq(min(xx), max(xx),
length = 17)[2 * (1:8)])
temp <- signif(apr1$y, 2)
apr2 <- approx(xtime[indx], xx[indx], temp)
xaxisval <- apr2$y
xaxislab <- rep("", 8)
for (i in 1:8) xaxislab[i] <- format(temp[i])
}
plots <- list()
lapply(var, function(i) {
invisible(round(x$table[i, 3],4) -> pval)
ggplot() + labs(title = paste0('Schoenfeld Individual Test p: ', pval)) + ggtheme -> gplot
y <- yy[, i]
yhat <- as.vector(pmat %*% qr.coef(qmat, y))
if (resid)
yr <- range(yhat, y)
else yr <- range(yhat)
if (se) {
temp <- as.vector(2 * sqrt(x$var[i, i] * seval))
yup <- yhat + temp
ylow <- yhat - temp
yr <- range(yr, yup, ylow)
}
if (x$transform == "identity") {
gplot + geom_line(aes(x=pred.x, y=yhat)) +
xlab("Time") +
ylab(ylab[i]) +
ylim(yr) -> gplot
} else if (x$transform == "log") {
gplot + geom_line(aes(x=log(pred.x), y=yhat)) +
xlab("Time") +
ylab(ylab[i]) +
ylim(yr) -> gplot
} else {
gplot + geom_line(aes(x=pred.x, y=yhat)) +
xlab("Time") +
ylab(ylab[i]) +
scale_x_continuous(breaks = xaxisval,
labels = xaxislab) +
ylim(yr)-> gplot
}
if (resid)
gplot <- gplot + geom_point(aes(x = xx, y =y),
col = point.col, shape = point.shape, size = point.size, alpha = point.alpha)
if (se) {
gplot <- gplot + geom_line(aes(x=pred.x, y=yup), lty = "dashed") +
geom_line(aes( x = pred.x, y = ylow), lty = "dashed")
}
ggpubr::ggpar(gplot, ...)
}) -> plots
names(plots) <- var
class(plots) <- c("ggcoxzph", "ggsurv", "list")
if("GLOBAL" %in% rownames(x$table)) # case of multivariate Cox
global_p <- x$table["GLOBAL", 3]
else global_p <- NULL # Univariate Cox
attr(plots, "global_pval") <- global_p
attr(plots, "caption") <- caption
plots
}
#' @param x an object of class ggcoxzph
#' @param newpage open a new page. See \code{\link{grid.arrange}}.
#' @method print ggcoxzph
#' @rdname ggcoxzph
#' @export
print.ggcoxzph <- function(x, ..., newpage = TRUE){
if(!inherits(x, "ggcoxzph"))
stop("An object of class ggcoxzph is required.")
plots <- x
pval <- attr(x, "global_pval")
grobs <- widths <- list()
for (i in 1:length(plots)) {
grobs[[i]] <- ggplotGrob(plots[[i]])
widths[[i]] <- grobs[[i]]$widths[2:5]
}
maxwidth <- do.call(grid::unit.pmax, widths)
for (i in 1:length(grobs)) {
grobs[[i]]$widths[2:5] <- as.list(maxwidth)
}
if(!is.null(pval)) main <- paste0("Global Schoenfeld Test p: ", signif(pval, 4), "\n")
else main <- NULL
caption <- attr(plots, "caption")
do.call(gridExtra::grid.arrange, c(grobs, top = main, bottom = caption, newpage = newpage))
}
|
#### Here is the R script you will use: (remember that # indicates a comment) ####
#Lab12: Multiple Logistic Regression
getwd()
library(SDSRegressionR)
#Bring in data
birth <- read.csv("data/LowBirth.csv", stringsAsFactors = FALSE)
names(birth)
#Intital Model
b_mod <- glm(LOW ~ AGE + LWT + SMOKE, data=birth, family="binomial")
summary(b_mod)
library(car)
vif(b_mod)
cooksPlot(b_mod, print.obs = TRUE, sort.obs = TRUE)
threeOuts(b_mod)
#Get good data...
g_birth <- birth[!row.names(birth) %in% c(21, 140, 11, 159),]
#Re-run
g_birth$SMOKE <- factor(g_birth$SMOKE)
b_mod2 <- glm(LOW ~ AGE + LWT + SMOKE, data=g_birth, family="binomial")
summary(b_mod2)
#Odds-ratios
exp(b_mod2$coef)
exp(confint.default(b_mod2))
#Stats
library(rms)
b_mod2.2 <- lrm(LOW ~ AGE + LWT + SMOKE, g_birth)
b_mod2.2
#Examine the variables of interest graphically...
#Look at ranges...
summary(g_birth)
#Predict
new <- data.frame(AGE = mean(g_birth$AGE, na.rm=TRUE),
LWT = seq(80, 250, 10),
SMOKE = mean(g_birth$SMOKE, na.rm=TRUE))
pred <- data.frame(new, pred = predict(b_mod2, newdata = new, type="link", se.fit=TRUE))
pred <- data.frame(pred, p.fit = plogis(pred$pred.fit),
LL = plogis(pred$pred.fit - (1.96*pred$pred.se.fit)),
UL = plogis(pred$pred.fit + (1.96*pred$pred.se.fit)))
#Graph
g <- simpleScatter(g_birth, LWT, LOW, title="Low birth weight", xlab="Mother's weight", ylab="Low birth weight probability")
g +
geom_line(data=pred, aes(x=LWT, y=p.fit), color="red") +
geom_line(data=pred, aes(x=LWT, y=LL), linetype="dashed") +
geom_line(data=pred, aes(x=LWT, y=UL), linetype="dashed")
#Out of curiosity...
new <- data.frame(AGE = mean(g_birth$AGE, na.rm=TRUE),
LWT = mean(g_birth$LWT, na.rm=TRUE),
SMOKE = c(0,1))
pred <- data.frame(new, pred = predict(b_mod2, newdata = new, type="link", se.fit=TRUE))
pred <- data.frame(pred, p.fit = plogis(pred$pred.fit),
LL = plogis(pred$pred.fit - (1.96*pred$pred.se.fit)),
UL = plogis(pred$pred.fit + (1.96*pred$pred.se.fit)))
pred
ggplot(pred, aes(y=p.fit, x=factor(SMOKE))) +
geom_point(size=2) +
geom_errorbar(aes(ymin=LL, ymax=UL), width=.1) +
ylim(0,1) +
labs(title="Smoking and low birth \n 95% CI") +
geom_hline(yintercept = 0.5, color="red") +
theme_bw() | /Lab 12/Lab 12 prac R syntax.R | no_license | mehranaman/Applied-Regression | R | false | false | 2,371 | r | #### Here is the R script you will use: (remember that # indicates a comment) ####
#Lab12: Multiple Logistic Regression
getwd()
library(SDSRegressionR)
#Bring in data
birth <- read.csv("data/LowBirth.csv", stringsAsFactors = FALSE)
names(birth)
#Intital Model
b_mod <- glm(LOW ~ AGE + LWT + SMOKE, data=birth, family="binomial")
summary(b_mod)
library(car)
vif(b_mod)
cooksPlot(b_mod, print.obs = TRUE, sort.obs = TRUE)
threeOuts(b_mod)
#Get good data...
g_birth <- birth[!row.names(birth) %in% c(21, 140, 11, 159),]
#Re-run
g_birth$SMOKE <- factor(g_birth$SMOKE)
b_mod2 <- glm(LOW ~ AGE + LWT + SMOKE, data=g_birth, family="binomial")
summary(b_mod2)
#Odds-ratios
exp(b_mod2$coef)
exp(confint.default(b_mod2))
#Stats
library(rms)
b_mod2.2 <- lrm(LOW ~ AGE + LWT + SMOKE, g_birth)
b_mod2.2
#Examine the variables of interest graphically...
#Look at ranges...
summary(g_birth)
#Predict
new <- data.frame(AGE = mean(g_birth$AGE, na.rm=TRUE),
LWT = seq(80, 250, 10),
SMOKE = mean(g_birth$SMOKE, na.rm=TRUE))
pred <- data.frame(new, pred = predict(b_mod2, newdata = new, type="link", se.fit=TRUE))
pred <- data.frame(pred, p.fit = plogis(pred$pred.fit),
LL = plogis(pred$pred.fit - (1.96*pred$pred.se.fit)),
UL = plogis(pred$pred.fit + (1.96*pred$pred.se.fit)))
#Graph
g <- simpleScatter(g_birth, LWT, LOW, title="Low birth weight", xlab="Mother's weight", ylab="Low birth weight probability")
g +
geom_line(data=pred, aes(x=LWT, y=p.fit), color="red") +
geom_line(data=pred, aes(x=LWT, y=LL), linetype="dashed") +
geom_line(data=pred, aes(x=LWT, y=UL), linetype="dashed")
#Out of curiosity...
new <- data.frame(AGE = mean(g_birth$AGE, na.rm=TRUE),
LWT = mean(g_birth$LWT, na.rm=TRUE),
SMOKE = c(0,1))
pred <- data.frame(new, pred = predict(b_mod2, newdata = new, type="link", se.fit=TRUE))
pred <- data.frame(pred, p.fit = plogis(pred$pred.fit),
LL = plogis(pred$pred.fit - (1.96*pred$pred.se.fit)),
UL = plogis(pred$pred.fit + (1.96*pred$pred.se.fit)))
pred
ggplot(pred, aes(y=p.fit, x=factor(SMOKE))) +
geom_point(size=2) +
geom_errorbar(aes(ymin=LL, ymax=UL), width=.1) +
ylim(0,1) +
labs(title="Smoking and low birth \n 95% CI") +
geom_hline(yintercept = 0.5, color="red") +
theme_bw() |
\name{nlregVL-package}
\alias{nlregVL-package}
\alias{nlregVL}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab nlregVL\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-02-02\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /man/nlregVL-package.Rd | no_license | VinLaflamme/nlregVL | R | false | false | 958 | rd | \name{nlregVL-package}
\alias{nlregVL-package}
\alias{nlregVL}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab nlregVL\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-02-02\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
dat <- read.csv(file = "Car_crash_data_project.csv", sep = ",", header = T)
names(dat)
# [1] "Month" "Year" "Week_day" "Type_day" "Casualties"
# [6] "Num_vehicles" "Lanes" "Road_width" "Lane_width" "Hard_shoulder"
# [11] "Paved_hard_shoulder" "Security_barriers" "Aditional_panels" "Edge_landmarks" "Reflectors"
# [16] "Surface" "Luminosity" "Atmosferic_factors" "Limited_visibility" "Danger_signs"
# [21] "Crash_type" "Traffic" "Conc_distraction" "Conc_alcohol_durgs" "Conc_speed"
# [26] "Conc_tyred_sleepy_ill" "Conc_weather"
dat[,2] <- as.factor(dat[,2])
dat[,3] <- as.factor(dat[,3])
#dat[,5] <- as.factor(dat[,5])
#dat[,6] <- as.factor(dat[,6])
#dat[,7] <- as.factor(dat[,7])
dat2015 <- dat[which(dat["Year"] == 2015 ),][,-2]
dat2006 <- dat[which(dat["Year"] == 2006 ),][,-2]
######################################################################################
nNaNCols2015 <- c()
for (i in 1:length(dat2015[1,])){
nNaNCols2015 <- c(nNaNCols2015, sum(is.na(dat2015[,i])), i)
}
nNaNCols2015 <- matrix(nNaNCols2015, nrow = 2)
nNaNCols2015[,order(nNaNCols2015[1,], decreasing = T)]
# remove column 22 due to excessive NA density
nNaNRows2015 <- c()
for (i in 1:length(dat[,1])){
nNaNRows2015 <- c(nNaNRows2015, sum(is.na(dat2015[i,])), i)
}
nNaNRows2015 <- matrix(nNaNRows2015, nrow = 2)
nNaNRows2015[,order(nNaNRows2015[1,], decreasing = T)[1:20]]
######################################################################################
nNaNCols2006 <- c()
for (i in 1:length(dat2006[1,])){
nNaNCols2006 <- c(nNaNCols2006, sum(is.na(dat2006[,i])), i)
}
nNaNCols2006 <- matrix(nNaNCols2006, nrow = 2)
nNaNCols2006[,order(nNaNCols2006[1,], decreasing = T)]
nNaNRows2006 <- c()
for (i in 1:length(dat2006[,1])){
nNaNRows2006 <- c(nNaNRows2006, sum(is.na(dat2006[i,])), i)
}
nNaNRows2006 <- matrix(nNaNRows2006, nrow = 2)
nNaNRows2006[,order(nNaNRows2006[1,], decreasing = T)[1:20]]
# Columns without NAs: 1, 2, 3, 4, 5, 6, 7, 16, 17, 19, 21, 23, 24, 25, 26, 27
# First column to analyze: 18, it has 13 NAs
library(rpart)
# decide values with a decission tree
# 2006 - 17
dat.test <- dat2006[which(is.na(dat2006[,17])),-17]
dat.train <- dat2006[which(!(is.na(dat2006[,17]))),]
names(dat.train)
d.tr <- rpart(Atmosferic_factors~., data = dat.train)
plot(d.tr, branch=0, margin=0.25, uniform=TRUE)
text(d.tr, use.n=TRUE, splits=TRUE, pretty=6)
predict(d.tr, dat.test, "prob")
predictions <- predict(d.tr, dat.test, "vector")
predictions
# 2015 - remove column 21
which(is.na(dat2015[,7]))
# Cleaning the data
cdat <- dat[,-22]
cdat <- cdat[-(which(is.na(cdat[,8]))),]
atmNAs <- which(is.na(cdat[,18]))
cdat$Atmosferic_factors[atmNAs] <- levels(cdat[,18])[predictions]
# Integer interpretation
cdat[which(cdat[,5]>5), 5] = "more_than_5"
cdat[,5] <- as.factor(cdat[,5])
cdat[which(cdat[,6]>2), 6] = "more_than_2"
cdat[,6] <- as.factor(cdat[,6])
cdat[,7] <- as.factor(cdat[,7])
cdat2015 <- cdat[which(cdat["Year"] == 2015 ),][,-2]
cdat2006 <- cdat[which(cdat["Year"] == 2006 ),][,-2]
for (i in 1: length(cdat[1,])){
print(class(cdat[,i]))
}
chisqmat <- function(mat) {
ncol = length(mat[1,])
ret <- matrix(nrow = ncol, ncol = ncol)
for (i in 1:ncol){
for (j in 1:ncol) {
ret[i, j] <- chisq.test(mat[,i], mat[,j], simulate.p.value = T)$p.value
}
}
return(ret)
}
mm <- chisqmat(cdat)
sum(mm[16,])
sum(mm[18,])
sum(mm[26,])
| /BN-project.R | no_license | BN-project/BN-project | R | false | false | 3,642 | r | dat <- read.csv(file = "Car_crash_data_project.csv", sep = ",", header = T)
names(dat)
# [1] "Month" "Year" "Week_day" "Type_day" "Casualties"
# [6] "Num_vehicles" "Lanes" "Road_width" "Lane_width" "Hard_shoulder"
# [11] "Paved_hard_shoulder" "Security_barriers" "Aditional_panels" "Edge_landmarks" "Reflectors"
# [16] "Surface" "Luminosity" "Atmosferic_factors" "Limited_visibility" "Danger_signs"
# [21] "Crash_type" "Traffic" "Conc_distraction" "Conc_alcohol_durgs" "Conc_speed"
# [26] "Conc_tyred_sleepy_ill" "Conc_weather"
dat[,2] <- as.factor(dat[,2])
dat[,3] <- as.factor(dat[,3])
#dat[,5] <- as.factor(dat[,5])
#dat[,6] <- as.factor(dat[,6])
#dat[,7] <- as.factor(dat[,7])
dat2015 <- dat[which(dat["Year"] == 2015 ),][,-2]
dat2006 <- dat[which(dat["Year"] == 2006 ),][,-2]
######################################################################################
nNaNCols2015 <- c()
for (i in 1:length(dat2015[1,])){
nNaNCols2015 <- c(nNaNCols2015, sum(is.na(dat2015[,i])), i)
}
nNaNCols2015 <- matrix(nNaNCols2015, nrow = 2)
nNaNCols2015[,order(nNaNCols2015[1,], decreasing = T)]
# remove column 22 due to excessive NA density
nNaNRows2015 <- c()
for (i in 1:length(dat[,1])){
nNaNRows2015 <- c(nNaNRows2015, sum(is.na(dat2015[i,])), i)
}
nNaNRows2015 <- matrix(nNaNRows2015, nrow = 2)
nNaNRows2015[,order(nNaNRows2015[1,], decreasing = T)[1:20]]
######################################################################################
nNaNCols2006 <- c()
for (i in 1:length(dat2006[1,])){
nNaNCols2006 <- c(nNaNCols2006, sum(is.na(dat2006[,i])), i)
}
nNaNCols2006 <- matrix(nNaNCols2006, nrow = 2)
nNaNCols2006[,order(nNaNCols2006[1,], decreasing = T)]
nNaNRows2006 <- c()
for (i in 1:length(dat2006[,1])){
nNaNRows2006 <- c(nNaNRows2006, sum(is.na(dat2006[i,])), i)
}
nNaNRows2006 <- matrix(nNaNRows2006, nrow = 2)
nNaNRows2006[,order(nNaNRows2006[1,], decreasing = T)[1:20]]
# Columns without NAs: 1, 2, 3, 4, 5, 6, 7, 16, 17, 19, 21, 23, 24, 25, 26, 27
# First column to analyze: 18, it has 13 NAs
library(rpart)
# decide values with a decission tree
# 2006 - 17
dat.test <- dat2006[which(is.na(dat2006[,17])),-17]
dat.train <- dat2006[which(!(is.na(dat2006[,17]))),]
names(dat.train)
d.tr <- rpart(Atmosferic_factors~., data = dat.train)
plot(d.tr, branch=0, margin=0.25, uniform=TRUE)
text(d.tr, use.n=TRUE, splits=TRUE, pretty=6)
predict(d.tr, dat.test, "prob")
predictions <- predict(d.tr, dat.test, "vector")
predictions
# 2015 - remove column 21
which(is.na(dat2015[,7]))
# Cleaning the data
cdat <- dat[,-22]
cdat <- cdat[-(which(is.na(cdat[,8]))),]
atmNAs <- which(is.na(cdat[,18]))
cdat$Atmosferic_factors[atmNAs] <- levels(cdat[,18])[predictions]
# Integer interpretation
cdat[which(cdat[,5]>5), 5] = "more_than_5"
cdat[,5] <- as.factor(cdat[,5])
cdat[which(cdat[,6]>2), 6] = "more_than_2"
cdat[,6] <- as.factor(cdat[,6])
cdat[,7] <- as.factor(cdat[,7])
cdat2015 <- cdat[which(cdat["Year"] == 2015 ),][,-2]
cdat2006 <- cdat[which(cdat["Year"] == 2006 ),][,-2]
for (i in 1: length(cdat[1,])){
print(class(cdat[,i]))
}
chisqmat <- function(mat) {
ncol = length(mat[1,])
ret <- matrix(nrow = ncol, ncol = ncol)
for (i in 1:ncol){
for (j in 1:ncol) {
ret[i, j] <- chisq.test(mat[,i], mat[,j], simulate.p.value = T)$p.value
}
}
return(ret)
}
mm <- chisqmat(cdat)
sum(mm[16,])
sum(mm[18,])
sum(mm[26,])
|
#' #' FUNCOES PHDRISk
#'
#' DUAL PLOT FUNCTION
#'
#'
#' @param x1 coordenadas da primeira linha ( data )
#' @param y1 coordenadas da primeira linha ( data )
#' @param x2 coordenadas da segunda linha ( dados )
#' @param y2 coordenadas da segunda linha ( dados )
#' @param col cor padrao c("#C54E6D", "#009380")
#' @param lwd parametro do grafico (veja ?par)
#' @param mar parametro do grafido (veja ?par)
#' @param ylab1 rotulo entrada 1
#' @param ylab2 rotulo entrada2
#' @param nxbreaks numero de quebras no eixo horizontal
#' @param yleg1 rotulo do eixo
#' @param yleg2 rotulo do eixo
#' @param ylim1 tho numbers
#' @param ylim2 tho numbers
#' @param main tho numbers
#' @param legx tho numbers
#' @param legy tho numbers
#' @param colgrid xxxxxx
#' @param ylim.ref yyyyyy
#' @param xlab e main são para rotulo x e titulo principal como em plot ()
#' @param silent silent
#' @param bty bty
#'
#' @import stats
#' @importFrom graphics abline axis grid legend mtext par
#'
#' @export
dualplot <- function(x1, y1, y2, x2 = x1,
col = c("#C54E6D", "#009380"),
lwd = c(1, 1), colgrid = NULL,
mar = c(3, 6, 3, 6) + 0.1,
ylab1 = paste(substitute(y1), collapse = ""),
ylab2 = paste(substitute(y2), collapse = ""),
nxbreaks = 5,
yleg1 = paste(gsub("\n$", "", ylab1), "(left axis)"),
yleg2 = paste(ylab2, "(right axis)"),
ylim1 = NULL, ylim2 = NULL, ylim.ref = NULL,
xlab = "", main = NULL, legx = "topleft", legy = NULL,
silent = FALSE, bty = "n", ...){
# strip excess attributes (eg xts etc) from the two vertical axis variables
ylab1 <- as.character(ylab1)
ylab2 <- as.character(ylab2)
y1 <- as.numeric(y1)
y2 <- as.numeric(y2)
# is ylim.ref is NULL, calculate a good default
if(is.null(ylim.ref)){
if (length(y1) == length(y2)){
ylim.ref <- c(1, 1)
} else {
if (min(x1) > min(x2)){
ylim.ref <- c(1, which(abs(x2 - min(x1)) == min(abs(x2 - min(x1)))))
} else {
ylim.ref <- c(which(abs(x1 - min(x2)) == min(abs(x1 - min(x2)))), 1)
}
}
}
oldpar <- par(mar = mar)
xbreaks <- round(seq(from = min(c(x1, x2)), to = max(c(x1, x2)), length.out = nxbreaks))
# unless ylim1 or ylim2 were set, we set them to levels that make it equivalent
# to a graphic drawn of indexed series (if all data positive), or to the mean
# of each series +/- three standard deviations if some data are negative
if(is.null(ylim1) & is.null(ylim2)){
if(min(c(y1, y2), na.rm = TRUE) < 0){
message("With negative values ylim1 or ylim2 need to be chosen by a method other than treating both series visually as though they are indexed. Defaulting to mean value +/- 3 times the standard deviations.")
ylim1 <- c(-3, 3) * sd(y1, na.rm = TRUE) + mean(y1, na.rm = TRUE)
ylim2 <- c(-3, 3) * sd(y2, na.rm = TRUE) + mean(y2, na.rm = TRUE)
}
if(ylim.ref[1] > length(y1)){
stop("ylim.ref[1] must be a number shorter than the length of the first series.")
}
if(ylim.ref[2] > length(y2)){
stop("ylim.ref[2] must be a number shorter than the length of the second series.")
}
if(!silent) message("The two series will be presented visually as though they had been converted to indexes.")
# convert the variables to indexes (base value of 1 at the time specified by ylim.ref)
ind1 <- as.numeric(y1) / y1[ylim.ref[1]]
ind2 <- as.numeric(y2) / y2[ylim.ref[2]]
# calculate y axis limits on the "index to 1" scale
indlimits <- range(c(ind1, ind2), na.rm = TRUE)
# convert that back to the original y axis scales
ylim1 = indlimits * y1[ylim.ref[1]]
ylim2 = indlimits * y2[ylim.ref[2]]
} else {
if(!silent) warning("You've chosen to set at least one of the vertical axes limits manually. Up to you, but it is often better to leave it to the defaults.")
}
# draw first series - with no axes.
plot(x1, y1, type = "l", axes = FALSE, lwd = lwd[1],
xlab = xlab, ylab = "", col = col[1], main = main,
xlim = range(xbreaks), ylim = ylim1)
# add in the gridlines if wanted:
if(!is.null(colgrid)){
grid(lty = 1, nx = NA, ny = NULL, col = colgrid)
abline(v = xbreaks, col = colgrid)
}
# add in the left hand vertical axis and its label
axis(2, col = col[1], col.axis= col[1], las=1 ) ## las=1 makes horizontal labels
mtext(paste0("\n", ylab1, "\n"), side = 2, col = col[1], line = 1.5)
# Allow a second plot on the same graph
par(new=TRUE)
# Plot the second series:
plot(x2, y2, xlab="", ylab="", axes = FALSE, type = "l", lwd = lwd[2],
col = col[2], xlim = range(xbreaks), ylim = ylim2)
## add second vertical axis (on right) and its label
mtext(paste0("\n", ylab2, "\n"), side = 4, col = col[2], line = 4.5)
axis(4, col = col[2], col.axis = col[2], las=1)
# Draw the horizontal time axis
axis(1, at = xbreaks, labels = xbreaks)
# Add Legend
legend(x = legx, y = legy, legend=c(yleg1, yleg2),
text.col = col, lty = c(1, 1), lwd = lwd, col = col,
bty = bty, ...)
par(oldpar)
} | /R/dual_plot.R | no_license | phdrisk/Rphdrisk | R | false | false | 5,306 | r | #' #' FUNCOES PHDRISk
#'
#' DUAL PLOT FUNCTION
#'
#'
#' @param x1 coordenadas da primeira linha ( data )
#' @param y1 coordenadas da primeira linha ( data )
#' @param x2 coordenadas da segunda linha ( dados )
#' @param y2 coordenadas da segunda linha ( dados )
#' @param col cor padrao c("#C54E6D", "#009380")
#' @param lwd parametro do grafico (veja ?par)
#' @param mar parametro do grafido (veja ?par)
#' @param ylab1 rotulo entrada 1
#' @param ylab2 rotulo entrada2
#' @param nxbreaks numero de quebras no eixo horizontal
#' @param yleg1 rotulo do eixo
#' @param yleg2 rotulo do eixo
#' @param ylim1 tho numbers
#' @param ylim2 tho numbers
#' @param main tho numbers
#' @param legx tho numbers
#' @param legy tho numbers
#' @param colgrid xxxxxx
#' @param ylim.ref yyyyyy
#' @param xlab e main são para rotulo x e titulo principal como em plot ()
#' @param silent silent
#' @param bty bty
#'
#' @import stats
#' @importFrom graphics abline axis grid legend mtext par
#'
#' @export
dualplot <- function(x1, y1, y2, x2 = x1,
col = c("#C54E6D", "#009380"),
lwd = c(1, 1), colgrid = NULL,
mar = c(3, 6, 3, 6) + 0.1,
ylab1 = paste(substitute(y1), collapse = ""),
ylab2 = paste(substitute(y2), collapse = ""),
nxbreaks = 5,
yleg1 = paste(gsub("\n$", "", ylab1), "(left axis)"),
yleg2 = paste(ylab2, "(right axis)"),
ylim1 = NULL, ylim2 = NULL, ylim.ref = NULL,
xlab = "", main = NULL, legx = "topleft", legy = NULL,
silent = FALSE, bty = "n", ...){
# strip excess attributes (eg xts etc) from the two vertical axis variables
ylab1 <- as.character(ylab1)
ylab2 <- as.character(ylab2)
y1 <- as.numeric(y1)
y2 <- as.numeric(y2)
# is ylim.ref is NULL, calculate a good default
if(is.null(ylim.ref)){
if (length(y1) == length(y2)){
ylim.ref <- c(1, 1)
} else {
if (min(x1) > min(x2)){
ylim.ref <- c(1, which(abs(x2 - min(x1)) == min(abs(x2 - min(x1)))))
} else {
ylim.ref <- c(which(abs(x1 - min(x2)) == min(abs(x1 - min(x2)))), 1)
}
}
}
oldpar <- par(mar = mar)
xbreaks <- round(seq(from = min(c(x1, x2)), to = max(c(x1, x2)), length.out = nxbreaks))
# unless ylim1 or ylim2 were set, we set them to levels that make it equivalent
# to a graphic drawn of indexed series (if all data positive), or to the mean
# of each series +/- three standard deviations if some data are negative
if(is.null(ylim1) & is.null(ylim2)){
if(min(c(y1, y2), na.rm = TRUE) < 0){
message("With negative values ylim1 or ylim2 need to be chosen by a method other than treating both series visually as though they are indexed. Defaulting to mean value +/- 3 times the standard deviations.")
ylim1 <- c(-3, 3) * sd(y1, na.rm = TRUE) + mean(y1, na.rm = TRUE)
ylim2 <- c(-3, 3) * sd(y2, na.rm = TRUE) + mean(y2, na.rm = TRUE)
}
if(ylim.ref[1] > length(y1)){
stop("ylim.ref[1] must be a number shorter than the length of the first series.")
}
if(ylim.ref[2] > length(y2)){
stop("ylim.ref[2] must be a number shorter than the length of the second series.")
}
if(!silent) message("The two series will be presented visually as though they had been converted to indexes.")
# convert the variables to indexes (base value of 1 at the time specified by ylim.ref)
ind1 <- as.numeric(y1) / y1[ylim.ref[1]]
ind2 <- as.numeric(y2) / y2[ylim.ref[2]]
# calculate y axis limits on the "index to 1" scale
indlimits <- range(c(ind1, ind2), na.rm = TRUE)
# convert that back to the original y axis scales
ylim1 = indlimits * y1[ylim.ref[1]]
ylim2 = indlimits * y2[ylim.ref[2]]
} else {
if(!silent) warning("You've chosen to set at least one of the vertical axes limits manually. Up to you, but it is often better to leave it to the defaults.")
}
# draw first series - with no axes.
plot(x1, y1, type = "l", axes = FALSE, lwd = lwd[1],
xlab = xlab, ylab = "", col = col[1], main = main,
xlim = range(xbreaks), ylim = ylim1)
# add in the gridlines if wanted:
if(!is.null(colgrid)){
grid(lty = 1, nx = NA, ny = NULL, col = colgrid)
abline(v = xbreaks, col = colgrid)
}
# add in the left hand vertical axis and its label
axis(2, col = col[1], col.axis= col[1], las=1 ) ## las=1 makes horizontal labels
mtext(paste0("\n", ylab1, "\n"), side = 2, col = col[1], line = 1.5)
# Allow a second plot on the same graph
par(new=TRUE)
# Plot the second series:
plot(x2, y2, xlab="", ylab="", axes = FALSE, type = "l", lwd = lwd[2],
col = col[2], xlim = range(xbreaks), ylim = ylim2)
## add second vertical axis (on right) and its label
mtext(paste0("\n", ylab2, "\n"), side = 4, col = col[2], line = 4.5)
axis(4, col = col[2], col.axis = col[2], las=1)
# Draw the horizontal time axis
axis(1, at = xbreaks, labels = xbreaks)
# Add Legend
legend(x = legx, y = legy, legend=c(yleg1, yleg2),
text.col = col, lty = c(1, 1), lwd = lwd, col = col,
bty = bty, ...)
par(oldpar)
} |
modelInfo <- list(label = "Naive Bayes",
library = "naivebayes",
loop = NULL,
type = c('Classification'),
parameters = data.frame(parameter = c('laplace', 'usekernel', "adjust"),
class = c('numeric', 'logical', "numeric"),
label = c('Laplace Correction', 'Distribution Type', "Bandwidth Adjustment")),
grid = function(x, y, len = NULL, search = "grid")
expand.grid(usekernel = c(TRUE, FALSE), laplace = 0, adjust = 1),
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
if(param$usekernel) {
out <- naivebayes::naive_bayes(x, y, usekernel = TRUE, laplace = param$laplace, adjust = param$adjust, ...)
} else out <- naivebayes::naive_bayes(x, y, usekernel = FALSE, laplace = param$laplace, ...)
out
},
predict = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
predict(modelFit , newdata)
},
prob = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
as.data.frame(predict(modelFit, newdata, type = "prob"), stringsAsFactors = TRUE)
},
predictors = function(x, ...) if(hasTerms(x)) predictors(x$terms) else names(x$tables),
tags = c("Bayesian Model"),
levels = function(x) x$levels,
sort = function(x) x[order(x[,1]),])
| /models/files/naive_bayes.R | no_license | topepo/caret | R | false | false | 1,840 | r | modelInfo <- list(label = "Naive Bayes",
library = "naivebayes",
loop = NULL,
type = c('Classification'),
parameters = data.frame(parameter = c('laplace', 'usekernel', "adjust"),
class = c('numeric', 'logical', "numeric"),
label = c('Laplace Correction', 'Distribution Type', "Bandwidth Adjustment")),
grid = function(x, y, len = NULL, search = "grid")
expand.grid(usekernel = c(TRUE, FALSE), laplace = 0, adjust = 1),
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
if(param$usekernel) {
out <- naivebayes::naive_bayes(x, y, usekernel = TRUE, laplace = param$laplace, adjust = param$adjust, ...)
} else out <- naivebayes::naive_bayes(x, y, usekernel = FALSE, laplace = param$laplace, ...)
out
},
predict = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
predict(modelFit , newdata)
},
prob = function(modelFit, newdata, submodels = NULL) {
if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata, stringsAsFactors = TRUE)
as.data.frame(predict(modelFit, newdata, type = "prob"), stringsAsFactors = TRUE)
},
predictors = function(x, ...) if(hasTerms(x)) predictors(x$terms) else names(x$tables),
tags = c("Bayesian Model"),
levels = function(x) x$levels,
sort = function(x) x[order(x[,1]),])
|
#ARima
#real-time series to fit the optimal model from it. For that purpose, you'll use the forecast package. The function auto.arima fits and selects the optimal model from the data and forecast function allows the prediction of h periods ahead.
#Autoregressive Integrated Moving Average Models are time series defined by the equation: SARIMA(p,d,q)(P,D,Q) process
#contains p autoregressive terms and q moving average terms is said to be of order (p,q)
#Differencing is the most common method for making a time series data stationary. This is a special type of filtering, particularly important in removing a trend. For seasonal data, first order differencing data is usually sufficient to attain stationarity in a mean. (d term)
# R packages to be used
library(forecast)
library(TSA)
# Data from TSA package
data("co2")
data("boardings")
# fitting
fit1 <- auto.arima(co2)
fit1
plot(fc1 <- forecast(fit1, h = 15))
data("boardings")
fit2 <- auto.arima(boardings[,"log.price"])
# forecasting
plot(fc2 <- forecast(fit2, h = 15))
| /C5A-TS/ARIMA_case1.R | no_license | DUanalytics/BARtaxila | R | false | false | 1,037 | r | #ARima
#real-time series to fit the optimal model from it. For that purpose, you'll use the forecast package. The function auto.arima fits and selects the optimal model from the data and forecast function allows the prediction of h periods ahead.
#Autoregressive Integrated Moving Average Models are time series defined by the equation: SARIMA(p,d,q)(P,D,Q) process
#contains p autoregressive terms and q moving average terms is said to be of order (p,q)
#Differencing is the most common method for making a time series data stationary. This is a special type of filtering, particularly important in removing a trend. For seasonal data, first order differencing data is usually sufficient to attain stationarity in a mean. (d term)
# R packages to be used
library(forecast)
library(TSA)
# Data from TSA package
data("co2")
data("boardings")
# fitting
fit1 <- auto.arima(co2)
fit1
plot(fc1 <- forecast(fit1, h = 15))
data("boardings")
fit2 <- auto.arima(boardings[,"log.price"])
# forecasting
plot(fc2 <- forecast(fit2, h = 15))
|
#' Extended Supervised Locality Preserving Projection
#'
#' Extended LPP and Supervised LPP are two variants of the celebrated Locality Preserving Projection (LPP) algorithm for dimension
#' reduction. Their combination, Extended Supervised LPP, is a combination of two algorithmic novelties in one that
#' it reflects discriminant information with realistic distance measure via Z-score function.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations.
#' @param label a length-\eqn{n} vector of data class labels.
#' @param ndim an integer-valued target dimension.
#' @param numk the number of neighboring points for k-nn graph construction.
#' @param preprocess an additional option for preprocessing the data.
#' Default is "center". See also \code{\link{aux.preprocess}} for more details.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#'
#' @examples
#' \donttest{
#' ## generate data of 2 types with clear difference
#' diff = 15
#' dt1 = aux.gensamples(n=123)-diff;
#' dt2 = aux.gensamples(n=123)+diff;
#'
#' ## merge the data and create a label correspondingly
#' Y = rbind(dt1,dt2)
#' label = c(rep(1,123), rep(2,123))
#'
#' ## compare LPP, SLPP and ESLPP
#' outLPP <- do.lpp(Y)
#' outSLPP <- do.slpp(Y, label)
#' outESLPP <- do.eslpp(Y, label)
#'
#' ## visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(outLPP$Y, main="LPP")
#' plot(outSLPP$Y, main="SLPP")
#' plot(outESLPP$Y, main="ESLPP")
#' par(opar)
#' }
#'
#' @references
#' \insertRef{zheng_gabor_2007}{Rdimtools}
#'
#' \insertRef{shikkenawis_improving_2012}{Rdimtools}
#'
#' @seealso \code{\link{do.lpp}}, \code{\link{do.slpp}}, \code{\link{do.extlpp}}
#' @author Kisung You
#' @rdname linear_ESLPP
#' @export
do.eslpp <- function(X, label, ndim=2, numk=max(ceiling(nrow(X)/10),2),
preprocess=c("center","scale","cscale","decorrelate","whiten")){
#------------------------------------------------------------------------
## PREPROCESSING
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. label : check and return a de-factored vector
# For this example, there should be no degenerate class of size 1.
label = check_label(label, n)
ulabel = unique(label)
for (i in 1:length(ulabel)){
if (sum(label==ulabel[i])==1){
stop("* do.eslpp : no degerate class of size 1 is allowed.")
}
}
if (any(is.na(label))||(any(is.infinite(label)))){stop("* Supervised Learning : any element of 'label' as NA or Inf will simply be considered as a class, not missing entries.") }
# 3. ndim
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.eslpp : 'ndim' is a positive integer in [1,#(covariates)).")}
# 4. numk
numk = as.integer(numk)
if (!check_NumMM(numk,1,n/2,compact=FALSE)){stop("* do.eslpp : 'numk' should be an integer in [2,nrow(X)/2).")}
# 5. preprocess
if (missing(preprocess)){ algpreprocess = "center" }
else { algpreprocess = match.arg(preprocess) }
#------------------------------------------------------------------------
## MAIN COMPUTATION
# 1. preprocessing
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 2. K-Means Clustering
kclust = stats::kmeans(pX, numk)
clustlabel = kclust$cluster
clustidx = list() # for each label, find the corresponding # length-'numk' list
for (i in 1:numk){
clustidx[[i]] = which(clustlabel==unique(clustlabel)[i])
}
# 3. pairwise distance
PD = as.matrix(dist(pX))
vecb = rep(0,numk)
for (i in 1:numk){
tgtidx = clustidx[[i]]
vecb[i] = max(PD[tgtidx,tgtidx])
}
veca = rep(min(vecb)/20,numk)
# 4. compute S
Stmp = array(0,c(n,n))
for (i in 1:numk){
tgtidx = clustidx[[i]]
Stmp[tgtidx,tgtidx] = method_trfextlpp(PD[tgtidx,tgtidx],veca[i],vecb[i])
}
diag(Stmp) = 0.0
############# EXTENDED "SUPERVISED" SENSE
S = array(0,c(n,n))
for (i in 1:length(ulabel)){
tgtidx = which(label==ulabel[i])
S[tgtidx,tgtidx] = Stmp[tgtidx,tgtidx]
}
# 5. graph laplaciana and generalized eigenvalue problem
D = diag(rowSums(S))
L = D-S
LHS = t(pX)%*%L%*%pX
RHS = t(pX)%*%D%*%pX
# 6. compute Projection Matrix : Lowest Ones
projection = aux.geigen(LHS, RHS, ndim, maximal=FALSE)
#------------------------------------------------------------------------
## RETURN
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
}
| /R/linear_ESLPP.R | no_license | dungcv/Rdimtools | R | false | false | 4,808 | r | #' Extended Supervised Locality Preserving Projection
#'
#' Extended LPP and Supervised LPP are two variants of the celebrated Locality Preserving Projection (LPP) algorithm for dimension
#' reduction. Their combination, Extended Supervised LPP, is a combination of two algorithmic novelties in one that
#' it reflects discriminant information with realistic distance measure via Z-score function.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations.
#' @param label a length-\eqn{n} vector of data class labels.
#' @param ndim an integer-valued target dimension.
#' @param numk the number of neighboring points for k-nn graph construction.
#' @param preprocess an additional option for preprocessing the data.
#' Default is "center". See also \code{\link{aux.preprocess}} for more details.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#'
#' @examples
#' \donttest{
#' ## generate data of 2 types with clear difference
#' diff = 15
#' dt1 = aux.gensamples(n=123)-diff;
#' dt2 = aux.gensamples(n=123)+diff;
#'
#' ## merge the data and create a label correspondingly
#' Y = rbind(dt1,dt2)
#' label = c(rep(1,123), rep(2,123))
#'
#' ## compare LPP, SLPP and ESLPP
#' outLPP <- do.lpp(Y)
#' outSLPP <- do.slpp(Y, label)
#' outESLPP <- do.eslpp(Y, label)
#'
#' ## visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(outLPP$Y, main="LPP")
#' plot(outSLPP$Y, main="SLPP")
#' plot(outESLPP$Y, main="ESLPP")
#' par(opar)
#' }
#'
#' @references
#' \insertRef{zheng_gabor_2007}{Rdimtools}
#'
#' \insertRef{shikkenawis_improving_2012}{Rdimtools}
#'
#' @seealso \code{\link{do.lpp}}, \code{\link{do.slpp}}, \code{\link{do.extlpp}}
#' @author Kisung You
#' @rdname linear_ESLPP
#' @export
do.eslpp <- function(X, label, ndim=2, numk=max(ceiling(nrow(X)/10),2),
preprocess=c("center","scale","cscale","decorrelate","whiten")){
#------------------------------------------------------------------------
## PREPROCESSING
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. label : check and return a de-factored vector
# For this example, there should be no degenerate class of size 1.
label = check_label(label, n)
ulabel = unique(label)
for (i in 1:length(ulabel)){
if (sum(label==ulabel[i])==1){
stop("* do.eslpp : no degerate class of size 1 is allowed.")
}
}
if (any(is.na(label))||(any(is.infinite(label)))){stop("* Supervised Learning : any element of 'label' as NA or Inf will simply be considered as a class, not missing entries.") }
# 3. ndim
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.eslpp : 'ndim' is a positive integer in [1,#(covariates)).")}
# 4. numk
numk = as.integer(numk)
if (!check_NumMM(numk,1,n/2,compact=FALSE)){stop("* do.eslpp : 'numk' should be an integer in [2,nrow(X)/2).")}
# 5. preprocess
if (missing(preprocess)){ algpreprocess = "center" }
else { algpreprocess = match.arg(preprocess) }
#------------------------------------------------------------------------
## MAIN COMPUTATION
# 1. preprocessing
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 2. K-Means Clustering
kclust = stats::kmeans(pX, numk)
clustlabel = kclust$cluster
clustidx = list() # for each label, find the corresponding # length-'numk' list
for (i in 1:numk){
clustidx[[i]] = which(clustlabel==unique(clustlabel)[i])
}
# 3. pairwise distance
PD = as.matrix(dist(pX))
vecb = rep(0,numk)
for (i in 1:numk){
tgtidx = clustidx[[i]]
vecb[i] = max(PD[tgtidx,tgtidx])
}
veca = rep(min(vecb)/20,numk)
# 4. compute S
Stmp = array(0,c(n,n))
for (i in 1:numk){
tgtidx = clustidx[[i]]
Stmp[tgtidx,tgtidx] = method_trfextlpp(PD[tgtidx,tgtidx],veca[i],vecb[i])
}
diag(Stmp) = 0.0
############# EXTENDED "SUPERVISED" SENSE
S = array(0,c(n,n))
for (i in 1:length(ulabel)){
tgtidx = which(label==ulabel[i])
S[tgtidx,tgtidx] = Stmp[tgtidx,tgtidx]
}
# 5. graph laplaciana and generalized eigenvalue problem
D = diag(rowSums(S))
L = D-S
LHS = t(pX)%*%L%*%pX
RHS = t(pX)%*%D%*%pX
# 6. compute Projection Matrix : Lowest Ones
projection = aux.geigen(LHS, RHS, ndim, maximal=FALSE)
#------------------------------------------------------------------------
## RETURN
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
}
|
library(pbdTEST)
settings(mpi=TRUE)
.BLDIM <- 2
comm.set.seed(seed=1234, diff=FALSE)
### --------------------------------------
module("r/c-bind")
n <- 1e2
p <- 25
x <- matrix(rnorm(n*p), n, p)
y <- matrix(rnorm(n*p, mean=100, sd=10), n, p)
dx <- as.ddmatrix(x)
dy <- as.ddmatrix(y)
test("rbind()", {
a <- rbind(x, y)
b <- as.matrix(rbind(dx, dy))
})
test("cbind()", {
a <- cbind(x, y)
b <- as.matrix(cbind(dx, dy))
})
collect()
finalize()
| /pbdDMAT/inst/tests/3_rbind_cbind.R | no_license | ingted/R-Examples | R | false | false | 461 | r | library(pbdTEST)
settings(mpi=TRUE)
.BLDIM <- 2
comm.set.seed(seed=1234, diff=FALSE)
### --------------------------------------
module("r/c-bind")
n <- 1e2
p <- 25
x <- matrix(rnorm(n*p), n, p)
y <- matrix(rnorm(n*p, mean=100, sd=10), n, p)
dx <- as.ddmatrix(x)
dy <- as.ddmatrix(y)
test("rbind()", {
a <- rbind(x, y)
b <- as.matrix(rbind(dx, dy))
})
test("cbind()", {
a <- cbind(x, y)
b <- as.matrix(cbind(dx, dy))
})
collect()
finalize()
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 169
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 12
c
c Performing E1-Autarky iteration.
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/falsequ_query26_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 133
c no.of clauses 169
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 0
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/falsequ_query26_1344.qdimacs 133 169 E1 [3 5 7 8 10 11 12 13 16 18 20 21 22 23 24 25 26 28 29 30 31 32 34 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 1 6 14 19 61 62 63 64 65 66 67 68] 0 0 0 0 SAT
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/falsequ_query26_1344/falsequ_query26_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 1,130 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 169
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 12
c
c Performing E1-Autarky iteration.
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/falsequ_query26_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 133
c no.of clauses 169
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 0
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/falsequ_query26_1344.qdimacs 133 169 E1 [3 5 7 8 10 11 12 13 16 18 20 21 22 23 24 25 26 28 29 30 31 32 34 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 1 6 14 19 61 62 63 64 65 66 67 68] 0 0 0 0 SAT
|
library(EasyMx)
### Name: emxMeans
### Title: Create a set of means
### Aliases: emxMeans
### ** Examples
# Create a covariance matrix
require(EasyMx)
manVars <- paste0('x', 1:6)
emxMeans(manVars, type='saturated')
| /data/genthat_extracted_code/EasyMx/examples/emxMeans.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 228 | r | library(EasyMx)
### Name: emxMeans
### Title: Create a set of means
### Aliases: emxMeans
### ** Examples
# Create a covariance matrix
require(EasyMx)
manVars <- paste0('x', 1:6)
emxMeans(manVars, type='saturated')
|
#datafiles to test out the function
#popdat <- "~/Documents/My_Documents/Taylor_Lab/Data/My Data/Distruct/ClumppOutput_Pop_K2_globalLatifolia.txt"
#gpsfile <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyGPS_popfileData.csv")
#make the map within this function
source('~/Documents/My_Documents/ProgrammingSoftware/R/Pie/geostRuct/PlotMap.R', chdir = TRUE)
#plot the pies within this function
source('~/Documents/My_Documents/ProgrammingSoftware/R/Pie/geostRuct/PlotPie.R', chdir = TRUE)
#colors
source('~/Documents/My_Documents/ProgrammingSoftware/R/Pie/geostRuct/Colors.R', chdir = TRUE)
clumpp.pie <- function(popfile, gpsfile, pie.map=TRUE){
vals <- read.table(popfile)
firstcol <- 2 # first column is always the IDs, 2nd up until penultimate is data
lastcol <- length(vals)-1
vals[,1] <- unlist(strsplit(as.character(vals$V1), ":")) #need to deconcatenate the colons from the pop IDs
vals[,1] <- as.integer(vals[,1])
#rename data columns for if being plotted below
ncols <- length(names(vals)[firstcol:lastcol])
letter <- rep("s", ncols)
number <- seq(1:ncols)
cnames <- paste0(letter, number)
names(vals)[firstcol:lastcol] <- cnames
vals$tot <- rowSums(vals[,c(firstcol:lastcol)])
names(gpsfile)[which(names(gpsfile)=="pop")] <- "V1" #rename "pop" column as V1 so that it matches CLUMPP column heading, all other column names will be retained. #user must have a lat and lon column specified, other columns are fine
dat <- merge(gpsfile, vals, by="V1")
#if user wants a default map with pie charts spit out
if(pie.map==TRUE){
plot.map(dat)
plot.pie(dat)
}
return(dat) #if user just wants the merged dataset back so that they can tweak the map and plotted points manually, the data is output (in both cases the merged data is output)
}
| /PlotClumppPie.R | no_license | kjgilbert/geostRuct | R | false | false | 1,794 | r |
#datafiles to test out the function
#popdat <- "~/Documents/My_Documents/Taylor_Lab/Data/My Data/Distruct/ClumppOutput_Pop_K2_globalLatifolia.txt"
#gpsfile <- read.csv("~/Documents/My_Documents/ProgrammingSoftware/R/Pie/DummyGPS_popfileData.csv")
#make the map within this function
source('~/Documents/My_Documents/ProgrammingSoftware/R/Pie/geostRuct/PlotMap.R', chdir = TRUE)
#plot the pies within this function
source('~/Documents/My_Documents/ProgrammingSoftware/R/Pie/geostRuct/PlotPie.R', chdir = TRUE)
#colors
source('~/Documents/My_Documents/ProgrammingSoftware/R/Pie/geostRuct/Colors.R', chdir = TRUE)
clumpp.pie <- function(popfile, gpsfile, pie.map=TRUE){
vals <- read.table(popfile)
firstcol <- 2 # first column is always the IDs, 2nd up until penultimate is data
lastcol <- length(vals)-1
vals[,1] <- unlist(strsplit(as.character(vals$V1), ":")) #need to deconcatenate the colons from the pop IDs
vals[,1] <- as.integer(vals[,1])
#rename data columns for if being plotted below
ncols <- length(names(vals)[firstcol:lastcol])
letter <- rep("s", ncols)
number <- seq(1:ncols)
cnames <- paste0(letter, number)
names(vals)[firstcol:lastcol] <- cnames
vals$tot <- rowSums(vals[,c(firstcol:lastcol)])
names(gpsfile)[which(names(gpsfile)=="pop")] <- "V1" #rename "pop" column as V1 so that it matches CLUMPP column heading, all other column names will be retained. #user must have a lat and lon column specified, other columns are fine
dat <- merge(gpsfile, vals, by="V1")
#if user wants a default map with pie charts spit out
if(pie.map==TRUE){
plot.map(dat)
plot.pie(dat)
}
return(dat) #if user just wants the merged dataset back so that they can tweak the map and plotted points manually, the data is output (in both cases the merged data is output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/salve_client.R
\docType{data}
\name{SlaveClient}
\alias{SlaveClient}
\title{SlaveClient}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
SlaveClient
}
\description{
SlaveClient
}
\keyword{datasets}
| /man/SlaveClient.Rd | no_license | fnaji/rtercen | R | false | true | 303 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/salve_client.R
\docType{data}
\name{SlaveClient}
\alias{SlaveClient}
\title{SlaveClient}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
SlaveClient
}
\description{
SlaveClient
}
\keyword{datasets}
|
library(bayesianETAS)
### Name: simulateETAS
### Title: Simulates synthetic data from the ETAS model
### Aliases: simulateETAS
### ** Examples
## Not run:
##D beta <- 2.4; M0 <- 3
##D simulateETAS(0.2, 0.2, 1.5, 0.5, 2, beta, M0, T=500, displayOutput=FALSE)
## End(Not run)
| /data/genthat_extracted_code/bayesianETAS/examples/simulateETAS.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 282 | r | library(bayesianETAS)
### Name: simulateETAS
### Title: Simulates synthetic data from the ETAS model
### Aliases: simulateETAS
### ** Examples
## Not run:
##D beta <- 2.4; M0 <- 3
##D simulateETAS(0.2, 0.2, 1.5, 0.5, 2, beta, M0, T=500, displayOutput=FALSE)
## End(Not run)
|
## ASSIGNMENT #1 - PART 1
pollutantmean <- function(directory, pollutant, id = 1:332) {
path = directory
##create a data frame for files
mydata <- data.frame()
##read the list of files from wd
filenames<- list.files(directory, full.names = TRUE)
##read the files and put them together into one dataset, 'mydata'
for (i in id) {
mydata <- rbind(mydata, read.csv(filenames[i]))
}
##Find the mean of the pollutant
mean(mydata[, pollutant], na.rm = TRUE)
}
##PART 2 - Number of Complete observations for each file
complete<- function(directory, id = 1:332) {
path = directory
findat<- data.frame()
##Get list files to read into a dataframe
filenames<-(list.files(directory,full.names = TRUE))
##Find the number of complete cases in each file
for (i in id) {
mydat<- read.csv(filenames[i])
nobs<-sum(complete.cases(mydat))
tempdata<-data.frame(i,nobs)
findat<-rbind(findat,tempdata)
}
colnames(findat)<- c("id", "nobs")
findat
}
## Part 3 -
corr <- function(directory, threshold = 0){
path = directory
correls<-numeric(0)
##Get list of files to read
filenames<- list.files(directory, full.names = TRUE)
##read files and find number of comlete cases in each file
for(i in 1:332){
polli<- read.csv(filenames[i])
polli_total<-sum(complete.cases(polli))
##Compare files to threshold to determine if we will compute the correlations on this file
##If yeas we'll do the computation and add the result to a vector 'correls'
if(polli_total > threshold){
correls<- c(correls, cor(polli$sulfate, polli$nitrate, use = "complete.obs"))
}
}
##This will return just the head of the correls file
return(head(correls))
}
| /specdata/Assign1Final.R | no_license | vernatoy/Datasciencecoursera | R | false | false | 1,853 | r | ## ASSIGNMENT #1 - PART 1
pollutantmean <- function(directory, pollutant, id = 1:332) {
path = directory
##create a data frame for files
mydata <- data.frame()
##read the list of files from wd
filenames<- list.files(directory, full.names = TRUE)
##read the files and put them together into one dataset, 'mydata'
for (i in id) {
mydata <- rbind(mydata, read.csv(filenames[i]))
}
##Find the mean of the pollutant
mean(mydata[, pollutant], na.rm = TRUE)
}
##PART 2 - Number of Complete observations for each file
complete<- function(directory, id = 1:332) {
path = directory
findat<- data.frame()
##Get list files to read into a dataframe
filenames<-(list.files(directory,full.names = TRUE))
##Find the number of complete cases in each file
for (i in id) {
mydat<- read.csv(filenames[i])
nobs<-sum(complete.cases(mydat))
tempdata<-data.frame(i,nobs)
findat<-rbind(findat,tempdata)
}
colnames(findat)<- c("id", "nobs")
findat
}
## Part 3 -
corr <- function(directory, threshold = 0){
path = directory
correls<-numeric(0)
##Get list of files to read
filenames<- list.files(directory, full.names = TRUE)
##read files and find number of comlete cases in each file
for(i in 1:332){
polli<- read.csv(filenames[i])
polli_total<-sum(complete.cases(polli))
##Compare files to threshold to determine if we will compute the correlations on this file
##If yeas we'll do the computation and add the result to a vector 'correls'
if(polli_total > threshold){
correls<- c(correls, cor(polli$sulfate, polli$nitrate, use = "complete.obs"))
}
}
##This will return just the head of the correls file
return(head(correls))
}
|
#' Surname probability merging function.
#'
#' \code{merge_surnames} merges surnames in user-input dataset with corresponding
#' race/ethnicity probabilities from U.S. Census Surname List and Spanish Surname List.
#'
#' This function allows users to match surnames in their dataset with the U.S.
#' Census Surname List (from 2000 or 2010) and Spanish Surname List to obtain
#' Pr(Race | Surname) for each of the five major racial groups.
#'
#' By default, the function matches surnames to the Census list as follows:
#' 1) Search raw surnames in Census surname list;
#' 2) Remove any punctuation and search again;
#' 3) Remove any spaces and search again;
#' 4) Remove suffixes (e.g., Jr) and search again;
#' 5) Split double-barreled surnames into two parts and search first part of name;
#' 6) Split double-barreled surnames into two parts and search second part of name;
#' 7) For any remaining names, impute probabilities using distribution
#' for all names not appearing on Census list.
#'
#' Each step only applies to surnames not matched in a previous ste.
#' Steps 2 through 7 are not applied if \code{clean.surname} is FALSE.
#'
#' Note: Any name appearing only on the Spanish Surname List is assigned a
#' probability of 1 for Hispanics/Latinos and 0 for all other racial groups.
#'
#' @param voter.file An object of class \code{data.frame}. Must contain a field
#' named 'surname' containing list of surnames to be merged with Census lists.
#' @param surname.year An object of class \code{numeric} indicating which year
#' Census Surname List is from. Accepted values are \code{2010} and \code{2000}.
#' Default is \code{2010}.
#' @param clean.surname A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' any surnames in \code{\var{voter.file}} that cannot initially be matched
#' to surname lists will be cleaned, according to U.S. Census specifications,
#' in order to increase the chance of finding a match. Default is \code{TRUE}.
#' @param impute.missing A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' race/ethnicity probabilities will be imputed for unmatched names using
#' race/ethnicity distribution for all other names (i.e., not on Census List).
#' Default is \code{TRUE}.
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input data with additional columns that
#' specify the part of the name matched with Census data (\code{\var{surname.match}}),
#' and the probabilities Pr(Race | Surname) for each racial group
#' (\code{\var{p_whi}} for White, \code{\var{p_bla}} for Black,
#' \code{\var{p_his}} for Hispanic/Latino,
#' \code{\var{p_asi}} for Asian and Pacific Islander, and
#' \code{\var{p_oth}} for Other/Mixed).
#'
#' @import devtools
#'
#' @examples
#' data(voters)
#' merge_surnames(voters)
#'
#' @export
merge_surnames <- function(voter.file, surname.year = 2010, clean.surname = T, impute.missing = T) {
if ("surname" %in% names(voter.file) == F) {
stop('Data does not contain surname field.')
}
## Census Surname List
if (surname.year == 2000) {
surnames2000$surname <- as.character(surnames2000$surname)
surnames <- surnames2000
} else {
surnames$surname <- as.character(surnames$surname)
}
p_eth <- c("p_whi", "p_bla", "p_his", "p_asi", "p_oth")
## Convert Surnames in Voter File to Upper Case
df <- voter.file
df$caseid <- 1:nrow(df)
df$surname.match <- df$surname.upper <- toupper(as.character(df$surname))
## Merge Surnames with Census List (No Cleaning Yet)
df <- merge(df[names(df) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df[df$surname.upper %in% surnames$surname == F, ]) == 0) {
return(df[order(df$caseid), c(names(voter.file), "surname.match", p_eth)])
}
df[df$surname.upper %in% surnames$surname == F, ]$surname.match <- ""
df1 <- df[df$surname.upper %in% surnames$surname, ] #Matched surnames
df2 <- df[df$surname.upper %in% surnames$surname == F, ] #Unmatched surnames
## Clean Surnames (if Specified by User)
if (clean.surname) {
## Remove All Punctuation and Try Merge Again
df2$surname.match <- gsub("[^[:alnum:] ]", "", df2$surname.upper)
df2 <- merge(df2[names(df2) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Remove All Spaces and Try Merge Again
df2$surname.match <- gsub(" ", "", df2$surname.match)
df2 <- merge(df2[names(df2) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Remove Jr/Sr/III Suffixes
suffix <- c("JUNIOR", "SENIOR", "THIRD", "III", "JR", " II", " J R", " S R", " IV")
for (i in 1:length(suffix)) {
df2$surname.match <- ifelse(substr(df2$surname.match, nchar(df2$surname.match) - (nchar(suffix)[i] - 1), nchar(df2$surname.match)) == suffix[i],
substr(df2$surname.match, 1, nchar(df2$surname.match) - nchar(suffix)[i]),
df2$surname.match)
}
df2$surname.match <- ifelse(nchar(df2$surname.match) >= 7,
ifelse(substr(df2$surname.match, nchar(df2$surname.match) - 1, nchar(df2$surname.match)) == "SR",
substr(df2$surname.match, 1, nchar(df2$surname.match) - 2),
df2$surname.match),
df2$surname.match) #Remove "SR" only if name has at least 7 characters
df2 <- merge(df2[names(df2) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Names with Hyphens or Spaces, e.g. Double-Barreled Names
df2$surname2 <- df2$surname1 <- NA
df2$surname1[grep("-", df2$surname.upper)] <- sapply(strsplit(grep("-", df2$surname.upper, value = T), "-"), "[", 1)
df2$surname2[grep("-", df2$surname.upper)] <- sapply(strsplit(grep("-", df2$surname.upper, value = T), "-"), "[", 2)
df2$surname1[grep(" ", df2$surname.upper)] <- sapply(strsplit(grep(" ", df2$surname.upper, value = T), " "), "[", 1)
df2$surname2[grep(" ", df2$surname.upper)] <- sapply(strsplit(grep(" ", df2$surname.upper, value = T), " "), "[", 2)
## Use first half of name to merge in priors
df2$surname.match <- as.character(df2$surname1)
df2 <- merge(df2[names(df2) %in% c(p_eth) == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)[names(df2)]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, names(df2) %in% names(df1)])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Use second half of name to merge in priors for rest
df2$surname.match <- as.character(df2$surname2)
df2 <- merge(df2[names(df2) %in% c(p_eth, "surname1", "surname2") == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)[names(df2) %in% c("surname1", "surname2") == F]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, names(df2) %in% names(df1)])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
}
## Impute priors for names not on Census lists
if (impute.missing) {
if (nrow(df2) > 0) {
df2$surname.match <- ""
df2$p_whi <- .6665; df2$p_bla <- .0853; df2$p_his <- .1367; df2$p_asi <- .0797; df2$p_oth <- .0318
warning(paste("Probabilities were imputed for", nrow(df2), ifelse(nrow(df2) == 1, "surname", "surnames"), "that could not be matched to Census list."))
}
} else warning(paste(nrow(df2), ifelse(nrow(df2) == 1, "surname was", "surnames were"), "not matched."))
df <- rbind(df1, df2)
return(df[order(df$caseid), c(names(voter.file), "surname.match", p_eth)])
}
| /R/merge_surnames.R | no_license | avesh-krishna/wru | R | false | false | 9,100 | r | #' Surname probability merging function.
#'
#' \code{merge_surnames} merges surnames in user-input dataset with corresponding
#' race/ethnicity probabilities from U.S. Census Surname List and Spanish Surname List.
#'
#' This function allows users to match surnames in their dataset with the U.S.
#' Census Surname List (from 2000 or 2010) and Spanish Surname List to obtain
#' Pr(Race | Surname) for each of the five major racial groups.
#'
#' By default, the function matches surnames to the Census list as follows:
#' 1) Search raw surnames in Census surname list;
#' 2) Remove any punctuation and search again;
#' 3) Remove any spaces and search again;
#' 4) Remove suffixes (e.g., Jr) and search again;
#' 5) Split double-barreled surnames into two parts and search first part of name;
#' 6) Split double-barreled surnames into two parts and search second part of name;
#' 7) For any remaining names, impute probabilities using distribution
#' for all names not appearing on Census list.
#'
#' Each step only applies to surnames not matched in a previous ste.
#' Steps 2 through 7 are not applied if \code{clean.surname} is FALSE.
#'
#' Note: Any name appearing only on the Spanish Surname List is assigned a
#' probability of 1 for Hispanics/Latinos and 0 for all other racial groups.
#'
#' @param voter.file An object of class \code{data.frame}. Must contain a field
#' named 'surname' containing list of surnames to be merged with Census lists.
#' @param surname.year An object of class \code{numeric} indicating which year
#' Census Surname List is from. Accepted values are \code{2010} and \code{2000}.
#' Default is \code{2010}.
#' @param clean.surname A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' any surnames in \code{\var{voter.file}} that cannot initially be matched
#' to surname lists will be cleaned, according to U.S. Census specifications,
#' in order to increase the chance of finding a match. Default is \code{TRUE}.
#' @param impute.missing A \code{TRUE}/\code{FALSE} object. If \code{TRUE},
#' race/ethnicity probabilities will be imputed for unmatched names using
#' race/ethnicity distribution for all other names (i.e., not on Census List).
#' Default is \code{TRUE}.
#' @return Output will be an object of class \code{data.frame}. It will
#' consist of the original user-input data with additional columns that
#' specify the part of the name matched with Census data (\code{\var{surname.match}}),
#' and the probabilities Pr(Race | Surname) for each racial group
#' (\code{\var{p_whi}} for White, \code{\var{p_bla}} for Black,
#' \code{\var{p_his}} for Hispanic/Latino,
#' \code{\var{p_asi}} for Asian and Pacific Islander, and
#' \code{\var{p_oth}} for Other/Mixed).
#'
#' @import devtools
#'
#' @examples
#' data(voters)
#' merge_surnames(voters)
#'
#' @export
merge_surnames <- function(voter.file, surname.year = 2010, clean.surname = T, impute.missing = T) {
if ("surname" %in% names(voter.file) == F) {
stop('Data does not contain surname field.')
}
## Census Surname List
if (surname.year == 2000) {
surnames2000$surname <- as.character(surnames2000$surname)
surnames <- surnames2000
} else {
surnames$surname <- as.character(surnames$surname)
}
p_eth <- c("p_whi", "p_bla", "p_his", "p_asi", "p_oth")
## Convert Surnames in Voter File to Upper Case
df <- voter.file
df$caseid <- 1:nrow(df)
df$surname.match <- df$surname.upper <- toupper(as.character(df$surname))
## Merge Surnames with Census List (No Cleaning Yet)
df <- merge(df[names(df) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df[df$surname.upper %in% surnames$surname == F, ]) == 0) {
return(df[order(df$caseid), c(names(voter.file), "surname.match", p_eth)])
}
df[df$surname.upper %in% surnames$surname == F, ]$surname.match <- ""
df1 <- df[df$surname.upper %in% surnames$surname, ] #Matched surnames
df2 <- df[df$surname.upper %in% surnames$surname == F, ] #Unmatched surnames
## Clean Surnames (if Specified by User)
if (clean.surname) {
## Remove All Punctuation and Try Merge Again
df2$surname.match <- gsub("[^[:alnum:] ]", "", df2$surname.upper)
df2 <- merge(df2[names(df2) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Remove All Spaces and Try Merge Again
df2$surname.match <- gsub(" ", "", df2$surname.match)
df2 <- merge(df2[names(df2) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Remove Jr/Sr/III Suffixes
suffix <- c("JUNIOR", "SENIOR", "THIRD", "III", "JR", " II", " J R", " S R", " IV")
for (i in 1:length(suffix)) {
df2$surname.match <- ifelse(substr(df2$surname.match, nchar(df2$surname.match) - (nchar(suffix)[i] - 1), nchar(df2$surname.match)) == suffix[i],
substr(df2$surname.match, 1, nchar(df2$surname.match) - nchar(suffix)[i]),
df2$surname.match)
}
df2$surname.match <- ifelse(nchar(df2$surname.match) >= 7,
ifelse(substr(df2$surname.match, nchar(df2$surname.match) - 1, nchar(df2$surname.match)) == "SR",
substr(df2$surname.match, 1, nchar(df2$surname.match) - 2),
df2$surname.match),
df2$surname.match) #Remove "SR" only if name has at least 7 characters
df2 <- merge(df2[names(df2) %in% p_eth == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, ])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Names with Hyphens or Spaces, e.g. Double-Barreled Names
df2$surname2 <- df2$surname1 <- NA
df2$surname1[grep("-", df2$surname.upper)] <- sapply(strsplit(grep("-", df2$surname.upper, value = T), "-"), "[", 1)
df2$surname2[grep("-", df2$surname.upper)] <- sapply(strsplit(grep("-", df2$surname.upper, value = T), "-"), "[", 2)
df2$surname1[grep(" ", df2$surname.upper)] <- sapply(strsplit(grep(" ", df2$surname.upper, value = T), " "), "[", 1)
df2$surname2[grep(" ", df2$surname.upper)] <- sapply(strsplit(grep(" ", df2$surname.upper, value = T), " "), "[", 2)
## Use first half of name to merge in priors
df2$surname.match <- as.character(df2$surname1)
df2 <- merge(df2[names(df2) %in% c(p_eth) == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)[names(df2)]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, names(df2) %in% names(df1)])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
## Use second half of name to merge in priors for rest
df2$surname.match <- as.character(df2$surname2)
df2 <- merge(df2[names(df2) %in% c(p_eth, "surname1", "surname2") == F], surnames[c("surname", p_eth)], by.x = "surname.match", by.y = "surname", all.x = TRUE)[names(df2) %in% c("surname1", "surname2") == F]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {
df1 <- rbind(df1, df2[df2$surname.match %in% surnames$surname, names(df2) %in% names(df1)])
df2 <- df2[df2$surname.match %in% surnames$surname == F, ]
if (nrow(df2[df2$surname.match %in% surnames$surname, ]) > 0) {df2$surname.match <- ""}
}
}
## Impute priors for names not on Census lists
if (impute.missing) {
if (nrow(df2) > 0) {
df2$surname.match <- ""
df2$p_whi <- .6665; df2$p_bla <- .0853; df2$p_his <- .1367; df2$p_asi <- .0797; df2$p_oth <- .0318
warning(paste("Probabilities were imputed for", nrow(df2), ifelse(nrow(df2) == 1, "surname", "surnames"), "that could not be matched to Census list."))
}
} else warning(paste(nrow(df2), ifelse(nrow(df2) == 1, "surname was", "surnames were"), "not matched."))
df <- rbind(df1, df2)
return(df[order(df$caseid), c(names(voter.file), "surname.match", p_eth)])
}
|
#' @export
#' @title Convert CMAQ NetCDF output to modernized NetCDF
#'
#' @param filePath Absolute path of file to be converted.
#' @param clean Logical specifying whether to remove the original netcdf file.
#'
#' @description Converts CMAQ model output from its original format to a more
#' modernized NetCDF format with dimension axes for longitude, latitude,
#' elevation and time. With default settings, output files renamed to
#' ~base~_v2.nc.
#'
#' @note Users will typically call \code{cmaq_load()} which in turn calls
#' this function.
#'
#' @return Absolute path of the converted NetCDF file.
#'
#' @examples
#' \dontrun{
#' library(AirFireModeling)
#' setModelDataDir('~/Data/BlueSky')
#'
#' filePath <- bluesky_download(model = "PNW-4km", modelRun = 2019100900)
#' cmaq_toCommonFormat(filePath)
#' bluesky_downloaded()
#' }
cmaq_toCommonFormat <- function(
filePath = NULL,
clean = TRUE
) {
# ----- Validate parameters --------------------------------------------------
MazamaCoreUtils::stopIfNull(filePath)
if ( !is.logical(clean) )
clean <- TRUE
# Check for v2 filePath
if ( grepl(x = filePath, pattern = '.+_v2.nc$') )
return(filePath)
# ----- Open NetCDF file -----------------------------------------------------
# Create old and new file paths
rawFilePath <- filePath
v2FilePath <- stringr::str_replace(rawFilePath, "\\.nc$", "_v2.nc")
# Open nc file
raw_nc <- ncdf4::nc_open(rawFilePath)
# ----- Create latitude and longitude axes -----------------------------------
# Current (index) values
row <- raw_nc$dim$LAT$vals
col <- raw_nc$dim$LON$vals
lay <- raw_nc$dim$LAY$vals # LAYERS (height)
# Useful information is found in the global attributes
global_attributes <- ncdf4::ncatt_get(raw_nc, varid = 0) # varid=0 means 'global'
# NOTE: Use names(global_attributes) to see the names of the elements
# NOTE: contained in this list
# NOTE: global_attributes is of class 'list'
# NOTE: Access list elements with either 'listName[[objectName]]' or
# NOTE: 'listName$objectName' notation
XORIG <- global_attributes[["XORIG"]] # x origin
YORIG <- global_attributes[["YORIG"]] # y origin
XCENT <- global_attributes[["XCENT"]] # x center
YCENT <- global_attributes[["YCENT"]] # y center
ZLVLS <- global_attributes[["VGLVLS"]]
# Now we have enough information about the domain to figure out W, E, S, N
w <- XORIG
e <- XORIG + 2 * abs(XCENT - XORIG)
s <- YORIG
n <- YORIG + 2 * (YCENT - YORIG)
# Knowing the grid dimensions and the true edges, we can define legitimate
# lat/lon dimensions
lat <- seq(s, n, length.out = length(row))
lon <- seq(w, e, length.out = length(col))
lvl <- ZLVLS[1:length(lay)]
# NOTE: We could have just grabbed lat and lon from raw_nc$dim$LAT$vals, ...
# NOTE: at the top of this section. But everything agrees and we keep it this
# NOTE: way so that it more closely matches the code in bluesky_toCommonFormat().
# ----- Create time axis -----------------------------------------------------
# Temporal information is stored in the 'TFLAG' variable
tflag <- ncdf4::ncvar_get(raw_nc, "TFLAG")
# NOTE: 'tflag' is a matrix object with two rows, one containing the year and
# NOTE: Julian day, the other containing time in HHMMSS format. We will paste
# NOTE: matrix elements together with 'paste()'. The 'sprintf()' function is
# NOTE: useful for C-style string formatting. Here we use it to add leading
# NOTE: 0s to create a string that is six characters long.
time_str <- paste0(tflag[1,], sprintf(fmt = "%06d", tflag[2,]))
# Create POSIXct time
time <- tryCatch(
expr = {
MazamaCoreUtils::parseDatetime(time_str,
timezone = "UTC",
isJulian = TRUE)
},
warning = function(e) {
warning(e)
message('Error Parsing NetCDF data: Corrupt Download.')
message('If problem persists, try deleting the NetCDF and downloading again.')
}
)
# ----- Create new ncdf4 object ----------------------------------------------
# NOTE: The degenerate 'LAY' dimension disppears so that 'pm25' is now 2- or
# NOTE: 3-D, not 3- or 4-D.
# Get PM25 values
pm25 <- ncdf4::ncvar_get(raw_nc, "PM25")
# Convert time to numeric value for storing purposes
numericTime <- as.numeric(time)
# Define dimensions
latDim <- ncdf4::ncdim_def("lat", "Degrees North", lat)
lonDim <- ncdf4::ncdim_def("lon", "Degrees East", lon)
lvlDim <- ncdf4::ncdim_def("elevation", "Meters from sea level", lvl)
timeDim <- ncdf4::ncdim_def("time", "seconds from 1970-1-1", numericTime)
# Define variables
pm25Var <- ncdf4::ncvar_def(
name = "PM25",
units = "ug/m^3",
dim = list(lonDim, latDim, lvlDim, timeDim),
missval = -1e30
)
# Create a new netcdf file
nc <- ncdf4::nc_create(v2FilePath, pm25Var)
# Put data into the newly defined variable
ncdf4::ncvar_put(nc, pm25Var, pm25)
# Close the file
ncdf4::nc_close(nc)
if (clean) {
unlink(rawFilePath)
}
# ----- Return ---------------------------------------------------------------
return(v2FilePath)
}
| /local_jon/cmaq_toCommonFormat.R | no_license | MazamaScience/AirFireModeling | R | false | false | 5,184 | r | #' @export
#' @title Convert CMAQ NetCDF output to modernized NetCDF
#'
#' @param filePath Absolute path of file to be converted.
#' @param clean Logical specifying whether to remove the original netcdf file.
#'
#' @description Converts CMAQ model output from its original format to a more
#' modernized NetCDF format with dimension axes for longitude, latitude,
#' elevation and time. With default settings, output files renamed to
#' ~base~_v2.nc.
#'
#' @note Users will typically call \code{cmaq_load()} which in turn calls
#' this function.
#'
#' @return Absolute path of the converted NetCDF file.
#'
#' @examples
#' \dontrun{
#' library(AirFireModeling)
#' setModelDataDir('~/Data/BlueSky')
#'
#' filePath <- bluesky_download(model = "PNW-4km", modelRun = 2019100900)
#' cmaq_toCommonFormat(filePath)
#' bluesky_downloaded()
#' }
cmaq_toCommonFormat <- function(
filePath = NULL,
clean = TRUE
) {
# ----- Validate parameters --------------------------------------------------
MazamaCoreUtils::stopIfNull(filePath)
if ( !is.logical(clean) )
clean <- TRUE
# Check for v2 filePath
if ( grepl(x = filePath, pattern = '.+_v2.nc$') )
return(filePath)
# ----- Open NetCDF file -----------------------------------------------------
# Create old and new file paths
rawFilePath <- filePath
v2FilePath <- stringr::str_replace(rawFilePath, "\\.nc$", "_v2.nc")
# Open nc file
raw_nc <- ncdf4::nc_open(rawFilePath)
# ----- Create latitude and longitude axes -----------------------------------
# Current (index) values
row <- raw_nc$dim$LAT$vals
col <- raw_nc$dim$LON$vals
lay <- raw_nc$dim$LAY$vals # LAYERS (height)
# Useful information is found in the global attributes
global_attributes <- ncdf4::ncatt_get(raw_nc, varid = 0) # varid=0 means 'global'
# NOTE: Use names(global_attributes) to see the names of the elements
# NOTE: contained in this list
# NOTE: global_attributes is of class 'list'
# NOTE: Access list elements with either 'listName[[objectName]]' or
# NOTE: 'listName$objectName' notation
XORIG <- global_attributes[["XORIG"]] # x origin
YORIG <- global_attributes[["YORIG"]] # y origin
XCENT <- global_attributes[["XCENT"]] # x center
YCENT <- global_attributes[["YCENT"]] # y center
ZLVLS <- global_attributes[["VGLVLS"]]
# Now we have enough information about the domain to figure out W, E, S, N
w <- XORIG
e <- XORIG + 2 * abs(XCENT - XORIG)
s <- YORIG
n <- YORIG + 2 * (YCENT - YORIG)
# Knowing the grid dimensions and the true edges, we can define legitimate
# lat/lon dimensions
lat <- seq(s, n, length.out = length(row))
lon <- seq(w, e, length.out = length(col))
lvl <- ZLVLS[1:length(lay)]
# NOTE: We could have just grabbed lat and lon from raw_nc$dim$LAT$vals, ...
# NOTE: at the top of this section. But everything agrees and we keep it this
# NOTE: way so that it more closely matches the code in bluesky_toCommonFormat().
# ----- Create time axis -----------------------------------------------------
# Temporal information is stored in the 'TFLAG' variable
tflag <- ncdf4::ncvar_get(raw_nc, "TFLAG")
# NOTE: 'tflag' is a matrix object with two rows, one containing the year and
# NOTE: Julian day, the other containing time in HHMMSS format. We will paste
# NOTE: matrix elements together with 'paste()'. The 'sprintf()' function is
# NOTE: useful for C-style string formatting. Here we use it to add leading
# NOTE: 0s to create a string that is six characters long.
time_str <- paste0(tflag[1,], sprintf(fmt = "%06d", tflag[2,]))
# Create POSIXct time
time <- tryCatch(
expr = {
MazamaCoreUtils::parseDatetime(time_str,
timezone = "UTC",
isJulian = TRUE)
},
warning = function(e) {
warning(e)
message('Error Parsing NetCDF data: Corrupt Download.')
message('If problem persists, try deleting the NetCDF and downloading again.')
}
)
# ----- Create new ncdf4 object ----------------------------------------------
# NOTE: The degenerate 'LAY' dimension disppears so that 'pm25' is now 2- or
# NOTE: 3-D, not 3- or 4-D.
# Get PM25 values
pm25 <- ncdf4::ncvar_get(raw_nc, "PM25")
# Convert time to numeric value for storing purposes
numericTime <- as.numeric(time)
# Define dimensions
latDim <- ncdf4::ncdim_def("lat", "Degrees North", lat)
lonDim <- ncdf4::ncdim_def("lon", "Degrees East", lon)
lvlDim <- ncdf4::ncdim_def("elevation", "Meters from sea level", lvl)
timeDim <- ncdf4::ncdim_def("time", "seconds from 1970-1-1", numericTime)
# Define variables
pm25Var <- ncdf4::ncvar_def(
name = "PM25",
units = "ug/m^3",
dim = list(lonDim, latDim, lvlDim, timeDim),
missval = -1e30
)
# Create a new netcdf file
nc <- ncdf4::nc_create(v2FilePath, pm25Var)
# Put data into the newly defined variable
ncdf4::ncvar_put(nc, pm25Var, pm25)
# Close the file
ncdf4::nc_close(nc)
if (clean) {
unlink(rawFilePath)
}
# ----- Return ---------------------------------------------------------------
return(v2FilePath)
}
|
#' Evaluate, compare, benchmark operations of a set of srcs.
#'
#' These functions support the comparison of results and timings across
#' multiple sources.
#'
#' @param tbls A list of \code{\link{tbl}}s.
#' @param op A function with a single argument, called often with each
#' element of \code{tbls}.
#' @param ref For checking, an data frame to test results against. If not
#' supplied, defaults to the results from the first \code{src}.
#' @param compare A function used to compare the results. Defaults to
#' \code{equal_data_frame} which ignores the order of rows and columns.
#' @param times For benchmarking, the number of times each operation is
#' repeated.
#' @param \dots
#' For \code{compare_tbls}: additional parameters passed on the
#' \code{compare} function
#'
#' For \code{bench_tbls}: additional benchmarks to run.
#' @return
#' \code{eval_tbls}: a list of data frames.
#'
#' \code{compare_tbls}: an invisible \code{TRUE} on success, otherwise
#' an error is thrown.
#'
#' \code{bench_tbls}: an object of class
#' \code{\link[microbenchmark]{microbenchmark}}
#' @seealso \code{\link{src_local}} for working with local data
#' @examples
#' if (require("Lahman") && require("microbenchmark")) {
#' lahman_local <- lahman_srcs("df", "dt", "cpp")
#' teams <- lapply(lahman_local, function(x) x %.% tbl("Teams"))
#'
#' compare_tbls(teams, function(x) x %.% filter(yearID == 2010))
#' bench_tbls(teams, function(x) x %.% filter(yearID == 2010))
#'
#' # You can also supply arbitrary additional arguments to bench_tbls
#' # if there are other operations you'd like to compare.
#' bench_tbls(teams, function(x) x %.% filter(yearID == 2010),
#' base = subset(Teams, yearID == 2010))
#'
#' # A more complicated example using multiple tables
#' setup <- function(src) {
#' list(
#' src %.% tbl("Batting") %.% filter(stint == 1) %.% select(playerID:H),
#' src %.% tbl("Master") %.% select(playerID, birthYear)
#' )
#' }
#' two_tables <- lapply(lahman_local, setup)
#'
#' op <- function(tbls) {
#' semi_join(tbls[[1]], tbls[[2]], by = "playerID")
#' }
#' # compare_tbls(two_tables, op)
#' bench_tbls(two_tables, op, times = 2)
#'
#' }
#' @name bench_compare
NULL
#' @export
#' @rdname bench_compare
bench_tbls <- function(tbls, op, ..., times = 10) {
if (!require("microbenchmark")) {
stop("Please install the microbenchmark package", call. = FALSE)
}
# Generate call to microbenchmark function that evaluates op for each tbl
calls <- lapply(seq_along(tbls), function(i) {
substitute(op(tbls[[i]]), list(i = i))
})
names(calls) <- names(tbls)
mb <- as.call(c(quote(microbenchmark), calls, dots(...),
list(times = times)))
eval(mb)
}
#' @export
#' @rdname bench_compare
compare_tbls <- function(tbls, op, ref = NULL, compare = equal_data_frame, ...) {
if (length(tbls) < 2 && is.null(ref)) {
stop("Need at least two srcs to compare", call. = FALSE)
}
if (!require("testthat")) {
stop("Please install the testthat package", call. = FALSE)
}
results <- eval_tbls(tbls, op)
if (is.null(ref)) {
ref <- results[[1]]
ref_name <- names(results)[1]
rest <- results[-1]
} else {
rest <- results
ref_name <- "supplied comparison"
}
for(i in seq_along(rest)) {
ok <- compare(ref, rest[[i]], ...)
# if (!ok) browser()
msg <- paste0(names(rest)[[i]], " not equal to ", ref_name, "\n",
attr(ok, "comment"))
expect_true(ok, info = msg)
}
invisible(TRUE)
}
#' @export
#' @rdname bench_compare
eval_tbls <- function(tbls, op) {
lapply(tbls, function(x) as.data.frame(op(x)))
}
| /R/bench-compare.r | no_license | kevinushey/dplyr | R | false | false | 3,660 | r | #' Evaluate, compare, benchmark operations of a set of srcs.
#'
#' These functions support the comparison of results and timings across
#' multiple sources.
#'
#' @param tbls A list of \code{\link{tbl}}s.
#' @param op A function with a single argument, called often with each
#' element of \code{tbls}.
#' @param ref For checking, an data frame to test results against. If not
#' supplied, defaults to the results from the first \code{src}.
#' @param compare A function used to compare the results. Defaults to
#' \code{equal_data_frame} which ignores the order of rows and columns.
#' @param times For benchmarking, the number of times each operation is
#' repeated.
#' @param \dots
#' For \code{compare_tbls}: additional parameters passed on the
#' \code{compare} function
#'
#' For \code{bench_tbls}: additional benchmarks to run.
#' @return
#' \code{eval_tbls}: a list of data frames.
#'
#' \code{compare_tbls}: an invisible \code{TRUE} on success, otherwise
#' an error is thrown.
#'
#' \code{bench_tbls}: an object of class
#' \code{\link[microbenchmark]{microbenchmark}}
#' @seealso \code{\link{src_local}} for working with local data
#' @examples
#' if (require("Lahman") && require("microbenchmark")) {
#' lahman_local <- lahman_srcs("df", "dt", "cpp")
#' teams <- lapply(lahman_local, function(x) x %.% tbl("Teams"))
#'
#' compare_tbls(teams, function(x) x %.% filter(yearID == 2010))
#' bench_tbls(teams, function(x) x %.% filter(yearID == 2010))
#'
#' # You can also supply arbitrary additional arguments to bench_tbls
#' # if there are other operations you'd like to compare.
#' bench_tbls(teams, function(x) x %.% filter(yearID == 2010),
#' base = subset(Teams, yearID == 2010))
#'
#' # A more complicated example using multiple tables
#' setup <- function(src) {
#' list(
#' src %.% tbl("Batting") %.% filter(stint == 1) %.% select(playerID:H),
#' src %.% tbl("Master") %.% select(playerID, birthYear)
#' )
#' }
#' two_tables <- lapply(lahman_local, setup)
#'
#' op <- function(tbls) {
#' semi_join(tbls[[1]], tbls[[2]], by = "playerID")
#' }
#' # compare_tbls(two_tables, op)
#' bench_tbls(two_tables, op, times = 2)
#'
#' }
#' @name bench_compare
NULL
#' @export
#' @rdname bench_compare
bench_tbls <- function(tbls, op, ..., times = 10) {
if (!require("microbenchmark")) {
stop("Please install the microbenchmark package", call. = FALSE)
}
# Generate call to microbenchmark function that evaluates op for each tbl
calls <- lapply(seq_along(tbls), function(i) {
substitute(op(tbls[[i]]), list(i = i))
})
names(calls) <- names(tbls)
mb <- as.call(c(quote(microbenchmark), calls, dots(...),
list(times = times)))
eval(mb)
}
#' @export
#' @rdname bench_compare
compare_tbls <- function(tbls, op, ref = NULL, compare = equal_data_frame, ...) {
if (length(tbls) < 2 && is.null(ref)) {
stop("Need at least two srcs to compare", call. = FALSE)
}
if (!require("testthat")) {
stop("Please install the testthat package", call. = FALSE)
}
results <- eval_tbls(tbls, op)
if (is.null(ref)) {
ref <- results[[1]]
ref_name <- names(results)[1]
rest <- results[-1]
} else {
rest <- results
ref_name <- "supplied comparison"
}
for(i in seq_along(rest)) {
ok <- compare(ref, rest[[i]], ...)
# if (!ok) browser()
msg <- paste0(names(rest)[[i]], " not equal to ", ref_name, "\n",
attr(ok, "comment"))
expect_true(ok, info = msg)
}
invisible(TRUE)
}
#' @export
#' @rdname bench_compare
eval_tbls <- function(tbls, op) {
lapply(tbls, function(x) as.data.frame(op(x)))
}
|
addPrediction <- function(modelSpecs) {
# Generic function: computes predicted values (BLUP)
UseMethod("addPrediction")
}
addPrediction.default <- function(modelSpecs) {
# Generic function: computes predicted values (BLUP)
modelSpecs$prediction <- modelSpecs$X %*% modelSpecs$beta + modelSpecs$fitre$x
modelSpecs
}
| /R/addPrediction.R | no_license | wahani/SAE | R | false | false | 326 | r | addPrediction <- function(modelSpecs) {
# Generic function: computes predicted values (BLUP)
UseMethod("addPrediction")
}
addPrediction.default <- function(modelSpecs) {
# Generic function: computes predicted values (BLUP)
modelSpecs$prediction <- modelSpecs$X %*% modelSpecs$beta + modelSpecs$fitre$x
modelSpecs
}
|
.get_course_path <- function(){
tryCatch(swirl:::swirl_courses_dir(),
error = function(c) {file.path(find.package("swirl"),"Courses")}
)
}
# Path to data
.datapath <- file.path(.get_course_path(),
'R_101 - R_Fundamentals', 'Looking_at_Data',
'plant-data.txt')
# Read in data
plants <- read.csv(.datapath, strip.white=TRUE, na.strings="")
# Remove annoying columns
.cols2rm <- c('Accepted.Symbol', 'Synonym.Symbol')
plants <- plants[, !(names(plants) %in% .cols2rm)]
# Make names pretty
names(plants) <- c('Scientific_Name', 'Duration', 'Active_Growth_Period',
'Foliage_Color', 'pH_Min', 'pH_Max',
'Precip_Min', 'Precip_Max',
'Shade_Tolerance', 'Temp_Min_F')
| /R_101 - R_Fundamentals/Looking_at_Data/initLesson.R | no_license | ImprovementPathSystems/IPS_swirl_beta | R | false | false | 777 | r | .get_course_path <- function(){
tryCatch(swirl:::swirl_courses_dir(),
error = function(c) {file.path(find.package("swirl"),"Courses")}
)
}
# Path to data
.datapath <- file.path(.get_course_path(),
'R_101 - R_Fundamentals', 'Looking_at_Data',
'plant-data.txt')
# Read in data
plants <- read.csv(.datapath, strip.white=TRUE, na.strings="")
# Remove annoying columns
.cols2rm <- c('Accepted.Symbol', 'Synonym.Symbol')
plants <- plants[, !(names(plants) %in% .cols2rm)]
# Make names pretty
names(plants) <- c('Scientific_Name', 'Duration', 'Active_Growth_Period',
'Foliage_Color', 'pH_Min', 'pH_Max',
'Precip_Min', 'Precip_Max',
'Shade_Tolerance', 'Temp_Min_F')
|
library(ggplot2)
load('output/result-model7-1.RData')
ms <- rstan::extract(fit)
qua <- apply(ms$y_pred, 2, quantile, probs=c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X=d$Y, t(qua), check.names=FALSE)
p <- ggplot(data=d_est, aes(x=X, y=`50%`)) +
theme_bw(base_size=18) +
coord_fixed(ratio=1, xlim=c(-50, 1900), ylim=c(-50, 1900)) +
geom_pointrange(aes(ymin=`10%`, ymax=`90%`), color='grey5', fill='grey95', shape=21) +
geom_abline(aes(slope=1, intercept=0), color='black', alpha=3/5, linetype='dashed') +
labs(x='Observed', y='Predicted')
ggsave(p, file='output/fig7-3-left.png', dpi=300, w=4.2, h=4)
| /chap07/fig7-3-left.R | no_license | MatsuuraKentaro/RStanBook | R | false | false | 621 | r | library(ggplot2)
load('output/result-model7-1.RData')
ms <- rstan::extract(fit)
qua <- apply(ms$y_pred, 2, quantile, probs=c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X=d$Y, t(qua), check.names=FALSE)
p <- ggplot(data=d_est, aes(x=X, y=`50%`)) +
theme_bw(base_size=18) +
coord_fixed(ratio=1, xlim=c(-50, 1900), ylim=c(-50, 1900)) +
geom_pointrange(aes(ymin=`10%`, ymax=`90%`), color='grey5', fill='grey95', shape=21) +
geom_abline(aes(slope=1, intercept=0), color='black', alpha=3/5, linetype='dashed') +
labs(x='Observed', y='Predicted')
ggsave(p, file='output/fig7-3-left.png', dpi=300, w=4.2, h=4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BuildFeatureMatrix.R
\name{BuildFeatureMatrix}
\alias{BuildFeatureMatrix}
\title{Build a Feature matrix from the with speaq 2.0 processed data}
\usage{
BuildFeatureMatrix(Y.data, var = "peakValue", impute = "zero",
delete.below.threshold = FALSE, baselineThresh = 500, snrThres = 3,
thresholds.pass = "any-to-pass")
}
\arguments{
\item{Y.data}{The dataset after (at least) peak detection and alignment with speaq 2.0.}
\item{var}{The variable to be used in the Featurematrix. This can be any of 'peakIndex', 'peakPPM', 'peakValue' (default), 'peakSNR', 'peakScale', or 'Sample'.}
\item{impute}{What to impute when a certain peak is missing for a certain sample and feature combo. Options are 'zero' (or 'zeros'), any other statement will produce NA's.}
\item{delete.below.threshold}{Whether to ignore peaks for which the 'var' variable has a value below 'baselineThresh' (default = FALSE).}
\item{baselineThresh}{The threshold for the 'var' variable peaks have to surpass to be included in the feature matrix.}
\item{snrThres}{The threshold for the signal-to-noise ratio of a peak.}
\item{thresholds.pass}{This variable lets users deside whether a peak has to pass all the thresholds (both snrThres and baselineThresh), or just one. (If the peak does not need to surpass any thresholds set 'delete.below.threshold' to FALSE).}
}
\value{
a matrix, data.matrix, with samples for rows and features for columns. The values in the matrix are thoes of the 'var' variable.
}
\description{
This function converts the aligned peak data (so at least pake detection and alignment/grouping has to be completed)
to a matrix with features (aligned peaks) in the columns and the value of that peak for every sample in the rows.
}
\examples{
\dontrun{
# 'DetectedPeaks_aligned' is the peak data after wavelet based peak detection and alignment
Featurematrix <- BuildFeatureMatrix(Y.data = DetectedPeaks_aligned, var = 'peakValue')
}
}
\author{
Charlie Beirnaert, \email{charlie.beirnaert@uantwerpen.be}
}
| /man/BuildFeatureMatrix.Rd | no_license | cran/speaq2 | R | false | true | 2,078 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BuildFeatureMatrix.R
\name{BuildFeatureMatrix}
\alias{BuildFeatureMatrix}
\title{Build a Feature matrix from the with speaq 2.0 processed data}
\usage{
BuildFeatureMatrix(Y.data, var = "peakValue", impute = "zero",
delete.below.threshold = FALSE, baselineThresh = 500, snrThres = 3,
thresholds.pass = "any-to-pass")
}
\arguments{
\item{Y.data}{The dataset after (at least) peak detection and alignment with speaq 2.0.}
\item{var}{The variable to be used in the Featurematrix. This can be any of 'peakIndex', 'peakPPM', 'peakValue' (default), 'peakSNR', 'peakScale', or 'Sample'.}
\item{impute}{What to impute when a certain peak is missing for a certain sample and feature combo. Options are 'zero' (or 'zeros'), any other statement will produce NA's.}
\item{delete.below.threshold}{Whether to ignore peaks for which the 'var' variable has a value below 'baselineThresh' (default = FALSE).}
\item{baselineThresh}{The threshold for the 'var' variable peaks have to surpass to be included in the feature matrix.}
\item{snrThres}{The threshold for the signal-to-noise ratio of a peak.}
\item{thresholds.pass}{This variable lets users deside whether a peak has to pass all the thresholds (both snrThres and baselineThresh), or just one. (If the peak does not need to surpass any thresholds set 'delete.below.threshold' to FALSE).}
}
\value{
a matrix, data.matrix, with samples for rows and features for columns. The values in the matrix are thoes of the 'var' variable.
}
\description{
This function converts the aligned peak data (so at least pake detection and alignment/grouping has to be completed)
to a matrix with features (aligned peaks) in the columns and the value of that peak for every sample in the rows.
}
\examples{
\dontrun{
# 'DetectedPeaks_aligned' is the peak data after wavelet based peak detection and alignment
Featurematrix <- BuildFeatureMatrix(Y.data = DetectedPeaks_aligned, var = 'peakValue')
}
}
\author{
Charlie Beirnaert, \email{charlie.beirnaert@uantwerpen.be}
}
|
#' @title Parse coefficients from SSF Model
#
#' @description Extract covars for plotting from an SSF Model
#' @param x SSF model object
#' @param type character, either RSF or Dist for model type
#' @return Returns Coefficients from SSF model object
#' @keywords ssf, direct competition, coefficients
#' @export
#' @examples
#' \donttest{coef<-as.data.frame(data.table::rbindlist(lapply(mods,parse_coefs)))}
parse_coefs<-function(x, type){
info<-x[[2]]
x<-x[[1]]
coefs<-as.data.frame(x$coefficients[1:20])
coefs$Var<-row.names(coefs)
names(coefs)<-c('Value','Var')
if(type=='RSF'){
coefs$Model<-gsub('RF_','',coefs$Var[12])
}else{
coefs$Model<-'ClosestDist'
}
coefs$Spp<-info$Spp
coefs$Year<-info$Year
coefs$Month<-info$Month
return(coefs)
} | /R/parse_coefs.R | no_license | MovingUngulate/DEERP | R | false | false | 793 | r | #' @title Parse coefficients from SSF Model
#
#' @description Extract covars for plotting from an SSF Model
#' @param x SSF model object
#' @param type character, either RSF or Dist for model type
#' @return Returns Coefficients from SSF model object
#' @keywords ssf, direct competition, coefficients
#' @export
#' @examples
#' \donttest{coef<-as.data.frame(data.table::rbindlist(lapply(mods,parse_coefs)))}
parse_coefs<-function(x, type){
info<-x[[2]]
x<-x[[1]]
coefs<-as.data.frame(x$coefficients[1:20])
coefs$Var<-row.names(coefs)
names(coefs)<-c('Value','Var')
if(type=='RSF'){
coefs$Model<-gsub('RF_','',coefs$Var[12])
}else{
coefs$Model<-'ClosestDist'
}
coefs$Spp<-info$Spp
coefs$Year<-info$Year
coefs$Month<-info$Month
return(coefs)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy-dist-mat.R
\name{print.tidy_dist_mat}
\alias{print.tidy_dist_mat}
\title{print tidy_dist_mat objects}
\usage{
\method{print}{tidy_dist_mat}(x, ..., n = NULL)
}
\arguments{
\item{x}{tidy_dist_mat object}
\item{...}{(like \code{digits = 6}) number of significant digits to display
(uses \code{signif})}
\item{n}{maximum number of rows or columns of x to print}
}
\description{
print tidy_dist_mat objects
}
\examples{
inner_data <- data.frame(x = rnorm(3), y = rnorm(3))
my_dist_mat <- as.matrix(dist(inner_data))
rownames_df <- data.frame(id = 1:3)
colnames_df <- data.frame(id = c(1,2,1), id2 = c("f", "f", "s"))
my_tidy_dm <- tidy_dist_mat(my_dist_mat, rownames_df, colnames_df)
print(my_tidy_dm)
}
| /man/print.tidy_dist_mat.Rd | permissive | skgallagher/EpiCompare | R | false | true | 786 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy-dist-mat.R
\name{print.tidy_dist_mat}
\alias{print.tidy_dist_mat}
\title{print tidy_dist_mat objects}
\usage{
\method{print}{tidy_dist_mat}(x, ..., n = NULL)
}
\arguments{
\item{x}{tidy_dist_mat object}
\item{...}{(like \code{digits = 6}) number of significant digits to display
(uses \code{signif})}
\item{n}{maximum number of rows or columns of x to print}
}
\description{
print tidy_dist_mat objects
}
\examples{
inner_data <- data.frame(x = rnorm(3), y = rnorm(3))
my_dist_mat <- as.matrix(dist(inner_data))
rownames_df <- data.frame(id = 1:3)
colnames_df <- data.frame(id = c(1,2,1), id2 = c("f", "f", "s"))
my_tidy_dm <- tidy_dist_mat(my_dist_mat, rownames_df, colnames_df)
print(my_tidy_dm)
}
|
#' input_panel UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_input_panel_ui <- function(id, i18n) {
ns <- NS(id)
tagList(
i18n$t(
"Rakendus ennustab miinimumpalga muutuse esmast mõju sotsiaalsetele näitajatele, eeldusel, et muud näitajad jäävad samaks."
),
br(),
numericInput(
ns("min_wage"),
tagList(
i18n$t("Sisesta miinimumpalk (bruto)"),
textOutput(ns("range"), inline = TRUE)
),
600
),
selectInput(
ns("year"),
i18n$t("Rakendumise aasta"),
c(2018, 2019, 2020)
),
actionButton(ns("run"), i18n$t("Arvuta"))
)
}
#' input_panel Server Functions
#'
#' @noRd
mod_input_panel_server <- function(id, i18n, input_limits) {
moduleServer(id, function(input, output, session) {
ns <- session$ns
limits <- reactive(input_limits[input_limits$year == input$year, ])
output$range <- renderText(sprintf("[%d,%d]", limits()$min, limits()$max))
iv <- shinyvalidate::InputValidator$new()
iv$add_rule("min_wage", shinyvalidate::sv_required())
iv$add_rule(
"min_wage",
function(min_wage) {
if (min_wage < limits()$min || min_wage > limits()$max) {
i18n()$t("Sisend väljaspool vahemikku")
}
}
)
iv$enable()
observe({
if (!iv$is_valid()) {
shinyjs::disable("run")
} else {
shinyjs::enable("run")
}
})
app_inputs <- reactive({
req(iv$is_valid())
list("year" = input$year, "min_wage" = input$min_wage)
}) %>% bindEvent(input$run, ignoreInit = TRUE)
return(app_inputs)
})
}
| /R/mod_input_panel.R | permissive | oluwandabira/euromod-web-interface | R | false | false | 1,738 | r | #' input_panel UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_input_panel_ui <- function(id, i18n) {
ns <- NS(id)
tagList(
i18n$t(
"Rakendus ennustab miinimumpalga muutuse esmast mõju sotsiaalsetele näitajatele, eeldusel, et muud näitajad jäävad samaks."
),
br(),
numericInput(
ns("min_wage"),
tagList(
i18n$t("Sisesta miinimumpalk (bruto)"),
textOutput(ns("range"), inline = TRUE)
),
600
),
selectInput(
ns("year"),
i18n$t("Rakendumise aasta"),
c(2018, 2019, 2020)
),
actionButton(ns("run"), i18n$t("Arvuta"))
)
}
#' input_panel Server Functions
#'
#' @noRd
mod_input_panel_server <- function(id, i18n, input_limits) {
moduleServer(id, function(input, output, session) {
ns <- session$ns
limits <- reactive(input_limits[input_limits$year == input$year, ])
output$range <- renderText(sprintf("[%d,%d]", limits()$min, limits()$max))
iv <- shinyvalidate::InputValidator$new()
iv$add_rule("min_wage", shinyvalidate::sv_required())
iv$add_rule(
"min_wage",
function(min_wage) {
if (min_wage < limits()$min || min_wage > limits()$max) {
i18n()$t("Sisend väljaspool vahemikku")
}
}
)
iv$enable()
observe({
if (!iv$is_valid()) {
shinyjs::disable("run")
} else {
shinyjs::enable("run")
}
})
app_inputs <- reactive({
req(iv$is_valid())
list("year" = input$year, "min_wage" = input$min_wage)
}) %>% bindEvent(input$run, ignoreInit = TRUE)
return(app_inputs)
})
}
|
#' Beryl Zhuang
#' April 3, 2015
#' Disclaimer:
#' The following script is taken from
#' https://github.com/sjackman/stat540-project/blob/master/topGO.R
#' island2GO contains GO identifiers, which is used to build the topGOdata object
#' for the functional enrichment analysis
library(IlluminaHumanMethylation450k.db)
library(topGO)
#### ALL ISLANDS
#GO annotations for all Islands on the 450K
GO <- as.list(IlluminaHumanMethylation450kGO2PROBE)
head(GO) #GO<-GO2probe, want probe to GO then Island to GO
probe2GO<-inverseList(GO)
#Summaize GO groups associated with each island (object for topGO function)
Island<-as.data.frame(IlluminaHumanMethylation450kCPGINAME)
#lookup all the GO of each Island and store as list
islGO<-function(x) probe2GO[[Island[x,1]]]
isl<-as.list(1:nrow(Island))
#Island GO data (from probe GO data)
Island2GO<-lapply(isl,islGO)
names(Island2GO)<-Island$cpgiview.ucscname
save(Island2GO, file="../data/FEA_island2go.Rdata")
| /rscripts/FEA_build_island2GO.R | no_license | esqyap/yy_team01_colorectal-cancer_STAT540_2015_mirror | R | false | false | 959 | r | #' Beryl Zhuang
#' April 3, 2015
#' Disclaimer:
#' The following script is taken from
#' https://github.com/sjackman/stat540-project/blob/master/topGO.R
#' island2GO contains GO identifiers, which is used to build the topGOdata object
#' for the functional enrichment analysis
library(IlluminaHumanMethylation450k.db)
library(topGO)
#### ALL ISLANDS
#GO annotations for all Islands on the 450K
GO <- as.list(IlluminaHumanMethylation450kGO2PROBE)
head(GO) #GO<-GO2probe, want probe to GO then Island to GO
probe2GO<-inverseList(GO)
#Summaize GO groups associated with each island (object for topGO function)
Island<-as.data.frame(IlluminaHumanMethylation450kCPGINAME)
#lookup all the GO of each Island and store as list
islGO<-function(x) probe2GO[[Island[x,1]]]
isl<-as.list(1:nrow(Island))
#Island GO data (from probe GO data)
Island2GO<-lapply(isl,islGO)
names(Island2GO)<-Island$cpgiview.ucscname
save(Island2GO, file="../data/FEA_island2go.Rdata")
|
#' Safety Results Over Time plot
#'
#' @param data labs data structured as one record per person per visit per measurement. See details for column requirements.
#' @param settings named list of settings with the parameters specified below.
#'
#' @details The settings object provides details the columns in the data set.
#'
#' \itemize{
#' \item{"value_col"}{Value column}
#' \item{"measure_col"}{Measure column}
#' \item{"measure_values"}{Measure values}
#' \item{"visit_col"}{Study Visit}
#' \item{"visitn_col"}{Study Number}
#' \item{"group_col"}{Grouping column}
#' \item{"violins"}{Show Violin plots?}
#' \item{"boxplots"}{Show Box Plots?}
#' \item{"axis"}{set to "log" to use a log transformed axis, linear otherwise}
#' \item{"drop_visit_string"}{Drop visits that contain this string. e.g. "unscheduled"}
#' }
#'
#' @examples
#' library(dplyr)
#' lb <- safetyData::sdtm_lb
#' sub_ids <- unique(lb$USUBJID)[1:100]
#' lb<-lb %>% filter(USUBJID %in% sub_ids)
#' settings <- list(
#' value_col = "LBORRES",
#' measure_col = "LBTEST",
#' measure_values = c("Chloride"),
#' visit_col = "VISIT",
#' visitn_col = "VISITNUM",
#' axis = "log"
#' )
#' safety_results_over_time(lb, settings)
#'
#' # remove unscheduled visits, add violin plot and 2nd panel
#' settings$drop_visit_string <- "unscheduled"
#' settings$violins <- TRUE
#' settings$measure_values <- c("Albumin")
#' safety_results_over_time(lb, settings)
#'
#' # add grouping by treatment
#' dm_sub <- safetyData::sdtm_dm %>% select(USUBJID, ARM)
#' dm_lb <- dm_sub %>% left_join(lb)
#' settings$group_col <- "ARM"
#' safety_results_over_time(dm_lb, settings)
#'
#' @return returns a chart object
#'
#' @import dplyr
#' @importFrom forcats fct_drop fct_reorder
#' @import ggplot2
#' @importFrom RColorBrewer brewer.pal
#' @importFrom rlang sym
#' @importFrom stringr str_detect
#' @importFrom utils hasName
#'
#' @export
safety_results_over_time <- function(data, settings) {
#########################################
# Set default values
#########################################
if (!utils::hasName(settings, "axis")) settings$axis <- "linear"
if (!utils::hasName(settings, "violins")) settings$violins <- FALSE
if (!utils::hasName(settings, "boxplots")) settings$boxplots <- TRUE
if (!utils::hasName(settings, "group_col")) settings$group_col <- "Overall"
if (!utils::hasName(settings, "drop_visit_string")) settings$drop_visit_string <- ""
#########################################
# Chart appearance settings
#########################################
if (settings$group_col == "Overall") {
data$Overall <- "Overall"
colors <- "gray80"
} else {
colors <- c(
RColorBrewer::brewer.pal(9, "Set1")[c(2, 3, 1, 4:9)],
RColorBrewer::brewer.pal(8, "Set2")
)
}
alpha <- ifelse(settings[["violins"]] & settings[["boxplots"]], 0.7, 1)
pd <- ggplot2::position_dodge(width = 0.75, preserve = "total")
#########################################
# Prep data
#########################################
# Convert settings to symbols ready for standard evaluation
visit_sym <- rlang::sym(settings[["visit_col"]])
visitn_sym <- rlang::sym(settings[["visitn_col"]])
value_sym <- rlang::sym(settings[["value_col"]])
measure_sym <- rlang::sym(settings[["measure_col"]])
# Filter to selected measures if specified
if (utils::hasName(settings, "measure_values")) {
dd <- data %>% dplyr::filter(!!measure_sym %in% settings$measure_values)
} else {
dd <- data
}
# Drop unscheduled visits if specified
if (nchar(settings[["drop_visit_string"]]) > 0) {
dd <- dplyr::filter(
dd,
!stringr::str_detect(
tolower(!!visit_sym),
tolower(settings[["drop_visit_string"]])
)
)
}
dd <- dd %>%
dplyr::mutate(!!value_sym := as.numeric(!!value_sym)) %>% # coerce result to numeric
dplyr::filter(!is.na(!!value_sym)) %>% # drop visits without data
dplyr::mutate(!!visit_sym := forcats::fct_drop(as.factor(!!visit_sym))) %>% # remove unused factor levels
dplyr::mutate(!!visit_sym := forcats::fct_reorder(!!visit_sym, !!visitn_sym)) # reorder visits by visit number
#########################################
# Create figure
#########################################
# initiate plot - overall or by group
params <- ggplot2::aes_(
x = as.name(settings$visit_col),
y = as.name(settings$value_col),
color = as.name(settings$group_col)
)
p <- ggplot2::ggplot(data = dd, params) +
ggplot2::scale_color_manual(values = colors) +
ggplot2::facet_grid(
rows = as.name(settings$measure_col),
scales = "free_y"
)
if (settings[["violins"]]) {
p <- p +
ggplot2::geom_violin(
alpha = alpha,
position = pd
)
}
if (settings[["boxplots"]]) {
p <- p +
ggplot2::geom_boxplot(
alpha = alpha,
position = ggplot2::position_dodge2(width = 0.75, preserve = "single"),
fatten = 1,
outlier.alpha = 0.8,
outlier.shape = 21
)
}
if (settings[["axis"]] == "log") {
p <- p +
ggplot2::scale_y_log10() +
ggplot2::annotation_logticks(sides = "l")
summary_fun <- function(x) {
log10(mean(10**x))
}
} else {
summary_fun <- mean
}
p <- p +
ggplot2::coord_cartesian(clip = "off") +
ggplot2::stat_summary(
fun.y = summary_fun,
geom = "point",
position = pd
)
p <- p +
ggplot2::theme_bw() +
ggplot2::theme(
axis.text.x = ggplot2::element_text(
angle = 45,
hjust = 1
),
legend.position = "bottom"
)
return(p)
}
| /R/safety_results_over_time.R | permissive | SafetyGraphics/safetyCharts | R | false | false | 6,097 | r | #' Safety Results Over Time plot
#'
#' @param data labs data structured as one record per person per visit per measurement. See details for column requirements.
#' @param settings named list of settings with the parameters specified below.
#'
#' @details The settings object provides details the columns in the data set.
#'
#' \itemize{
#' \item{"value_col"}{Value column}
#' \item{"measure_col"}{Measure column}
#' \item{"measure_values"}{Measure values}
#' \item{"visit_col"}{Study Visit}
#' \item{"visitn_col"}{Study Number}
#' \item{"group_col"}{Grouping column}
#' \item{"violins"}{Show Violin plots?}
#' \item{"boxplots"}{Show Box Plots?}
#' \item{"axis"}{set to "log" to use a log transformed axis, linear otherwise}
#' \item{"drop_visit_string"}{Drop visits that contain this string. e.g. "unscheduled"}
#' }
#'
#' @examples
#' library(dplyr)
#' lb <- safetyData::sdtm_lb
#' sub_ids <- unique(lb$USUBJID)[1:100]
#' lb<-lb %>% filter(USUBJID %in% sub_ids)
#' settings <- list(
#' value_col = "LBORRES",
#' measure_col = "LBTEST",
#' measure_values = c("Chloride"),
#' visit_col = "VISIT",
#' visitn_col = "VISITNUM",
#' axis = "log"
#' )
#' safety_results_over_time(lb, settings)
#'
#' # remove unscheduled visits, add violin plot and 2nd panel
#' settings$drop_visit_string <- "unscheduled"
#' settings$violins <- TRUE
#' settings$measure_values <- c("Albumin")
#' safety_results_over_time(lb, settings)
#'
#' # add grouping by treatment
#' dm_sub <- safetyData::sdtm_dm %>% select(USUBJID, ARM)
#' dm_lb <- dm_sub %>% left_join(lb)
#' settings$group_col <- "ARM"
#' safety_results_over_time(dm_lb, settings)
#'
#' @return returns a chart object
#'
#' @import dplyr
#' @importFrom forcats fct_drop fct_reorder
#' @import ggplot2
#' @importFrom RColorBrewer brewer.pal
#' @importFrom rlang sym
#' @importFrom stringr str_detect
#' @importFrom utils hasName
#'
#' @export
safety_results_over_time <- function(data, settings) {
#########################################
# Set default values
#########################################
if (!utils::hasName(settings, "axis")) settings$axis <- "linear"
if (!utils::hasName(settings, "violins")) settings$violins <- FALSE
if (!utils::hasName(settings, "boxplots")) settings$boxplots <- TRUE
if (!utils::hasName(settings, "group_col")) settings$group_col <- "Overall"
if (!utils::hasName(settings, "drop_visit_string")) settings$drop_visit_string <- ""
#########################################
# Chart appearance settings
#########################################
if (settings$group_col == "Overall") {
data$Overall <- "Overall"
colors <- "gray80"
} else {
colors <- c(
RColorBrewer::brewer.pal(9, "Set1")[c(2, 3, 1, 4:9)],
RColorBrewer::brewer.pal(8, "Set2")
)
}
alpha <- ifelse(settings[["violins"]] & settings[["boxplots"]], 0.7, 1)
pd <- ggplot2::position_dodge(width = 0.75, preserve = "total")
#########################################
# Prep data
#########################################
# Convert settings to symbols ready for standard evaluation
visit_sym <- rlang::sym(settings[["visit_col"]])
visitn_sym <- rlang::sym(settings[["visitn_col"]])
value_sym <- rlang::sym(settings[["value_col"]])
measure_sym <- rlang::sym(settings[["measure_col"]])
# Filter to selected measures if specified
if (utils::hasName(settings, "measure_values")) {
dd <- data %>% dplyr::filter(!!measure_sym %in% settings$measure_values)
} else {
dd <- data
}
# Drop unscheduled visits if specified
if (nchar(settings[["drop_visit_string"]]) > 0) {
dd <- dplyr::filter(
dd,
!stringr::str_detect(
tolower(!!visit_sym),
tolower(settings[["drop_visit_string"]])
)
)
}
dd <- dd %>%
dplyr::mutate(!!value_sym := as.numeric(!!value_sym)) %>% # coerce result to numeric
dplyr::filter(!is.na(!!value_sym)) %>% # drop visits without data
dplyr::mutate(!!visit_sym := forcats::fct_drop(as.factor(!!visit_sym))) %>% # remove unused factor levels
dplyr::mutate(!!visit_sym := forcats::fct_reorder(!!visit_sym, !!visitn_sym)) # reorder visits by visit number
#########################################
# Create figure
#########################################
# initiate plot - overall or by group
params <- ggplot2::aes_(
x = as.name(settings$visit_col),
y = as.name(settings$value_col),
color = as.name(settings$group_col)
)
p <- ggplot2::ggplot(data = dd, params) +
ggplot2::scale_color_manual(values = colors) +
ggplot2::facet_grid(
rows = as.name(settings$measure_col),
scales = "free_y"
)
if (settings[["violins"]]) {
p <- p +
ggplot2::geom_violin(
alpha = alpha,
position = pd
)
}
if (settings[["boxplots"]]) {
p <- p +
ggplot2::geom_boxplot(
alpha = alpha,
position = ggplot2::position_dodge2(width = 0.75, preserve = "single"),
fatten = 1,
outlier.alpha = 0.8,
outlier.shape = 21
)
}
if (settings[["axis"]] == "log") {
p <- p +
ggplot2::scale_y_log10() +
ggplot2::annotation_logticks(sides = "l")
summary_fun <- function(x) {
log10(mean(10**x))
}
} else {
summary_fun <- mean
}
p <- p +
ggplot2::coord_cartesian(clip = "off") +
ggplot2::stat_summary(
fun.y = summary_fun,
geom = "point",
position = pd
)
p <- p +
ggplot2::theme_bw() +
ggplot2::theme(
axis.text.x = ggplot2::element_text(
angle = 45,
hjust = 1
),
legend.position = "bottom"
)
return(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscaling_operations.R
\name{autoscaling_put_lifecycle_hook}
\alias{autoscaling_put_lifecycle_hook}
\title{Creates or updates a lifecycle hook for the specified Auto Scaling group}
\usage{
autoscaling_put_lifecycle_hook(LifecycleHookName, AutoScalingGroupName,
LifecycleTransition, RoleARN, NotificationTargetARN,
NotificationMetadata, HeartbeatTimeout, DefaultResult)
}
\arguments{
\item{LifecycleHookName}{[required] The name of the lifecycle hook.}
\item{AutoScalingGroupName}{[required] The name of the Auto Scaling group.}
\item{LifecycleTransition}{The instance state to which you want to attach the lifecycle hook. The
valid values are:
\itemize{
\item autoscaling:EC2\\_INSTANCE\\_LAUNCHING
\item autoscaling:EC2\\_INSTANCE\\_TERMINATING
}
Conditional: This parameter is required for new lifecycle hooks, but
optional when updating existing hooks.}
\item{RoleARN}{The ARN of the IAM role that allows the Auto Scaling group to publish to
the specified notification target, for example, an Amazon SNS topic or
an Amazon SQS queue.
Conditional: This parameter is required for new lifecycle hooks, but
optional when updating existing hooks.}
\item{NotificationTargetARN}{The ARN of the notification target that Amazon EC2 Auto Scaling uses to
notify you when an instance is in the transition state for the lifecycle
hook. This target can be either an SQS queue or an SNS topic.
If you specify an empty string, this overrides the current ARN.
This operation uses the JSON format when sending notifications to an
Amazon SQS queue, and an email key-value pair format when sending
notifications to an Amazon SNS topic.
When you specify a notification target, Amazon EC2 Auto Scaling sends it
a test message. Test messages contain the following additional key-value
pair: \code{"Event": "autoscaling:TEST_NOTIFICATION"}.}
\item{NotificationMetadata}{Additional information that you want to include any time Amazon EC2 Auto
Scaling sends a message to the notification target.}
\item{HeartbeatTimeout}{The maximum time, in seconds, that can elapse before the lifecycle hook
times out. The range is from \code{30} to \code{7200} seconds. The default value
is \code{3600} seconds (1 hour).
If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the
action that you specified in the \code{DefaultResult} parameter. You can
prevent the lifecycle hook from timing out by calling
RecordLifecycleActionHeartbeat.}
\item{DefaultResult}{Defines the action the Auto Scaling group should take when the lifecycle
hook timeout elapses or if an unexpected failure occurs. This parameter
can be either \code{CONTINUE} or \code{ABANDON}. The default value is \code{ABANDON}.}
}
\description{
Creates or updates a lifecycle hook for the specified Auto Scaling
group.
}
\details{
A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on
an instance when the instance launches (before it is put into service)
or as the instance terminates (before it is fully terminated).
This step is a part of the procedure for adding a lifecycle hook to an
Auto Scaling group:
\enumerate{
\item (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2
Auto Scaling launches or terminates instances.
\item (Optional) Create a notification target and an IAM role. The target
can be either an Amazon SQS queue or an Amazon SNS topic. The role
allows Amazon EC2 Auto Scaling to publish lifecycle notifications to
the target.
\item \strong{Create the lifecycle hook. Specify whether the hook is used when
the instances launch or terminate.}
\item If you need more time, record the lifecycle action heartbeat to keep
the instance in a pending state using
RecordLifecycleActionHeartbeat.
\item If you finish before the timeout period ends, complete the lifecycle
action using CompleteLifecycleAction.
}
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html}{Amazon EC2 Auto Scaling Lifecycle Hooks}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
If you exceed your maximum limit of lifecycle hooks, which by default is
50 per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using
DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you
can delete it using DeleteLifecycleHook.
}
\section{Request syntax}{
\preformatted{svc$put_lifecycle_hook(
LifecycleHookName = "string",
AutoScalingGroupName = "string",
LifecycleTransition = "string",
RoleARN = "string",
NotificationTargetARN = "string",
NotificationMetadata = "string",
HeartbeatTimeout = 123,
DefaultResult = "string"
)
}
}
\examples{
\dontrun{
# This example creates a lifecycle hook.
svc$put_lifecycle_hook(
AutoScalingGroupName = "my-auto-scaling-group",
LifecycleHookName = "my-lifecycle-hook",
LifecycleTransition = "autoscaling:EC2_INSTANCE_LAUNCHING",
NotificationTargetARN = "arn:aws:sns:us-west-2:123456789012:my-sns-topic --role-arn",
RoleARN = "arn:aws:iam::123456789012:role/my-auto-scaling-role"
)
}
}
\keyword{internal}
| /paws/man/autoscaling_put_lifecycle_hook.Rd | permissive | johnnytommy/paws | R | false | true | 5,188 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscaling_operations.R
\name{autoscaling_put_lifecycle_hook}
\alias{autoscaling_put_lifecycle_hook}
\title{Creates or updates a lifecycle hook for the specified Auto Scaling group}
\usage{
autoscaling_put_lifecycle_hook(LifecycleHookName, AutoScalingGroupName,
LifecycleTransition, RoleARN, NotificationTargetARN,
NotificationMetadata, HeartbeatTimeout, DefaultResult)
}
\arguments{
\item{LifecycleHookName}{[required] The name of the lifecycle hook.}
\item{AutoScalingGroupName}{[required] The name of the Auto Scaling group.}
\item{LifecycleTransition}{The instance state to which you want to attach the lifecycle hook. The
valid values are:
\itemize{
\item autoscaling:EC2\\_INSTANCE\\_LAUNCHING
\item autoscaling:EC2\\_INSTANCE\\_TERMINATING
}
Conditional: This parameter is required for new lifecycle hooks, but
optional when updating existing hooks.}
\item{RoleARN}{The ARN of the IAM role that allows the Auto Scaling group to publish to
the specified notification target, for example, an Amazon SNS topic or
an Amazon SQS queue.
Conditional: This parameter is required for new lifecycle hooks, but
optional when updating existing hooks.}
\item{NotificationTargetARN}{The ARN of the notification target that Amazon EC2 Auto Scaling uses to
notify you when an instance is in the transition state for the lifecycle
hook. This target can be either an SQS queue or an SNS topic.
If you specify an empty string, this overrides the current ARN.
This operation uses the JSON format when sending notifications to an
Amazon SQS queue, and an email key-value pair format when sending
notifications to an Amazon SNS topic.
When you specify a notification target, Amazon EC2 Auto Scaling sends it
a test message. Test messages contain the following additional key-value
pair: \code{"Event": "autoscaling:TEST_NOTIFICATION"}.}
\item{NotificationMetadata}{Additional information that you want to include any time Amazon EC2 Auto
Scaling sends a message to the notification target.}
\item{HeartbeatTimeout}{The maximum time, in seconds, that can elapse before the lifecycle hook
times out. The range is from \code{30} to \code{7200} seconds. The default value
is \code{3600} seconds (1 hour).
If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the
action that you specified in the \code{DefaultResult} parameter. You can
prevent the lifecycle hook from timing out by calling
RecordLifecycleActionHeartbeat.}
\item{DefaultResult}{Defines the action the Auto Scaling group should take when the lifecycle
hook timeout elapses or if an unexpected failure occurs. This parameter
can be either \code{CONTINUE} or \code{ABANDON}. The default value is \code{ABANDON}.}
}
\description{
Creates or updates a lifecycle hook for the specified Auto Scaling
group.
}
\details{
A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on
an instance when the instance launches (before it is put into service)
or as the instance terminates (before it is fully terminated).
This step is a part of the procedure for adding a lifecycle hook to an
Auto Scaling group:
\enumerate{
\item (Optional) Create a Lambda function and a rule that allows
CloudWatch Events to invoke your Lambda function when Amazon EC2
Auto Scaling launches or terminates instances.
\item (Optional) Create a notification target and an IAM role. The target
can be either an Amazon SQS queue or an Amazon SNS topic. The role
allows Amazon EC2 Auto Scaling to publish lifecycle notifications to
the target.
\item \strong{Create the lifecycle hook. Specify whether the hook is used when
the instances launch or terminate.}
\item If you need more time, record the lifecycle action heartbeat to keep
the instance in a pending state using
RecordLifecycleActionHeartbeat.
\item If you finish before the timeout period ends, complete the lifecycle
action using CompleteLifecycleAction.
}
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html}{Amazon EC2 Auto Scaling Lifecycle Hooks}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
If you exceed your maximum limit of lifecycle hooks, which by default is
50 per Auto Scaling group, the call fails.
You can view the lifecycle hooks for an Auto Scaling group using
DescribeLifecycleHooks. If you are no longer using a lifecycle hook, you
can delete it using DeleteLifecycleHook.
}
\section{Request syntax}{
\preformatted{svc$put_lifecycle_hook(
LifecycleHookName = "string",
AutoScalingGroupName = "string",
LifecycleTransition = "string",
RoleARN = "string",
NotificationTargetARN = "string",
NotificationMetadata = "string",
HeartbeatTimeout = 123,
DefaultResult = "string"
)
}
}
\examples{
\dontrun{
# This example creates a lifecycle hook.
svc$put_lifecycle_hook(
AutoScalingGroupName = "my-auto-scaling-group",
LifecycleHookName = "my-lifecycle-hook",
LifecycleTransition = "autoscaling:EC2_INSTANCE_LAUNCHING",
NotificationTargetARN = "arn:aws:sns:us-west-2:123456789012:my-sns-topic --role-arn",
RoleARN = "arn:aws:iam::123456789012:role/my-auto-scaling-role"
)
}
}
\keyword{internal}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/lymphoid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=FALSE)
sink('./lymphoid_040.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/lymphoid/lymphoid_040.R | no_license | esbgkannan/QSMART | R | false | false | 354 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/lymphoid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=FALSE)
sink('./lymphoid_040.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#==============================================================================
# source ('DevPool.R');
#rm (list = ls());
#==============================================================================
# History:
# BTL - 2010.12.10ish
# Created for the Sydney Cumberland Plains Woodland project to replace the
# existing loss model / offset model call to choose a fixed number of PUs to
# develop each time step. The new version is aimed at averaging a certain
# number of hectares developed per time step instead of a certain number of
# planning units. At the moment, it has lots of things specific to Sydney
# in it, but these may factor out later if we can convert it to an OOP
# representation instead.
# BTL - 2010.12.12
# Have attempted to convert this to use R's OOP system.
#==============================================================================
# Things that still need work
# 1) Are these hmv or cpw loss rates? Need to also make sure they're named
# correctly in the yaml file:
# PAR.initial.inside.gc.cpw.loss.rate <- 39.6 # hectares per yr
# PAR.initial.outside.gc.cpw.loss.rate <- 48
# +/- 10 per yr, so, could runif or truncated normal
# in [38 to 58] to get cur rate
# 2) offsetting constraints
# #90% offset inside gc until 797 ha reached and then all go outside
# 4) may need to update the loss rate(s) on each time step, particularly for outside gc,
# e.g., if you want the loss rate to increase over time
# 4a) # The label "cur." is used here because we may want to have the target value change
# over the run of the model, e.g., to accomodate increasing development rates.
# 4b) #*** Need to add an runif() call to each time step?
#initial.outside.gc.cpw.loss.rate <- 48 # +/- 10 per yr, so, could runif or truncated normal
# in [38 to 58] to get cur rate
#cur.outside.gc.target.loss.rate.for.ts <- initial.outside.gc.cpw.loss.rate * step.interval
# 4c) # In the initialization routines for the target rate, I'm using hmv.
# Should I be using cpw instead?
# See notes around the initialization routines:
# initialize.inside.gc.dev.pool.target.loss.rate() and analogous for outside.
# 4d) # The initialize.inside.gc.dev.pool...() and outside...() need to be converted
# into initialize methods for the corresponding classes, but R doesn't like the
# way I did it. For the moment, I've just moved the logic into some stanalone
# routines outside the classes.
# 5) assign.PU.to.cur.ts (cur.dev.pool, PU.to.develop)
# WHAT HAPPENS WITH ALL THESE RUNNING TOTALS IF OFFSETTING FAILS?
# DO WE NEED TO HAVE THESE ONLY AS SCRATCH VALUES UNTIL OFFSET SUCCEEDS
# (WHICH IS ALSO MAKING THE ASSUMPTION THAT OFFSETTING IS EVEN BEING DONE).
# 6) May need to have a test about the SECURED status of the parcel when building
# the eligibility query since there are interactions between protection
# and tenure security.
# For example, TENURE = "Secured" (a form of catastrophic loss)
# 7) Clean up final overflow calculation
# 8) Make it so that inside and outside are chosen probabilistically instead of
# doing all inside and then all outside.
# 9) Probably need to make some kind of distinction between not allowing any more development
# this time step and not allowing any more at all (e.g., if all parcels
# have been developed). However, runs that have to do with protection
# expiring may allow things to change and what formerly could not happen
# will suddenly become possible. So, maybe this is not such a good idea.
# Have to think about it.
# 10) First creation of the dev pools in loss.model.R initializes the running
# totals to 0 by assuming that their prototype values are 0, but
# that's bad to do for several reasons. Need to change that to
# explicitly set them to zero. One problem is that in the class
# prototype here, the values are set to CONST.UNINITIALIZED.NON.NEG.NUM,
# which is in fact, already non-negative number so it can't be
# distinguished as unitialized. Need to clean this up and go back
# to using -77 or something as soon as you get the code in loss.model.R
# corrected to set the 0's explicitly instead of implicitly.
# 11) Need to write up intro to using OOP in R.
# One important thing to add is what I discovered this morning about
# the cryptic error message you get when you use the same slot name in
# multiple classes and redefine the generic accessor functions for it
# in each class rather than just once. It reinforces the need to add
# the check for existance code for generics that is shown in some of
# the tutorial examples. It should be done automatically!
#==============================================================================
library (methods);
#==============================================================================
#------------------------------------------------------
# Global initializations before starting time steps.
#------------------------------------------------------
#==============================================================================
source ('constants.R')
CONST.NO.PU.LEFT.TO.DEVELOP <- -88
CONST.NO.ELIGIBLE.PU.TO.DEVELOP <- -33
CONST.NO.OVERFLOW.PU.TO.DEV <- 0
#CONST.UNINITIALIZED.NON.NEG.NUM <- -77
CONST.UNINITIALIZED.NON.NEG.NUM <- 0.0
#==============================================================================
#==============================================================================
#==============================================================================
get.hmv.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_C1_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
return (sql.get.data (PUinformationDBname, query))
}
get.mmv.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_C2_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
return (sql.get.data (PUinformationDBname, query))
}
get.lmv.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_C3_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
return (sql.get.data (PUinformationDBname, query))
}
get.cpw.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
retval <- sql.get.data (PUinformationDBname, query)
# cat ("\n\nIn get.cpw.of (", PU.to.develop, "), query = \n")
# cat (query)
# cat ("\nretval = ", retval, "\n\n")
# return (sql.get.data (PUinformationDBname, query))
return (retval)
}
get.area.of <- function (PU.to.develop)
{
query <- paste ('select AREA from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
retval <- sql.get.data (PUinformationDBname, query)
# cat ("\n\nIn get.area.of (", PU.to.develop, "), query = \n")
# cat (query)
# cat ("\nretval = ", retval, "\n\n")
# return (sql.get.data (PUinformationDBname, query))
return (retval)
}
#==============================================================================
#==============================================================================
#==============================================================================
setClass ("DevPool",
representation (DP.db.field.label = "character",
name = "character",
more.dev.allowed.in.cur.ts = "logical",
cur.tot.cpw.for.ts = "numeric",
cur.cpw.tot.developed = "numeric",
cur.hmv.tot.developed = "numeric",
cur.mmv.tot.developed = "numeric",
cur.lmv.tot.developed = "numeric",
offset.multiplier = "numeric",
#------------------------------------------------
# The label "cur." is used here because we may
# want to have the target value change over
# the run of the model, e.g., to accomodate
# increasing development rates.
#------------------------------------------------
cur.target.loss.rate.for.ts = "numeric"
),
prototype (DP.db.field.label = "",
name = "",
more.dev.allowed.in.cur.ts = TRUE,
cur.tot.cpw.for.ts = 0.0,
cur.cpw.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.hmv.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.mmv.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.lmv.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
offset.multiplier = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.target.loss.rate.for.ts = CONST.UNINITIALIZED.NON.NEG.NUM
)
);
###setValidity ("DevPool",
### function (object) {
### if (FALSE)
### {
### cat ("\n\nDebugging: at start of DevPool::validObject()\n",
### " object@cur.cpw.tot.developed = ", object@cur.cpw.tot.developed, "\n",
### " object@cur.hmv.tot.developed = ", object@cur.hmv.tot.developed, "\n",
### " object@cur.mmv.tot.developed = ", object@cur.mmv.tot.developed, "\n",
### " object@cur.lmv.tot.developed = ", object@cur.lmv.tot.developed, "\n",
### " object@cur.target.loss.rate.for.ts = ", object@cur.target.loss.rate.for.ts, "\n"
### );
### }
###
### if (object@cur.cpw.tot.developed < 0)
### "cur.cpw.tot.developed must be >= 0"
### else if (object@cur.hmv.tot.developed < 0)
### "cur.hmv.tot.developed must be >= 0"
### else if (object@cur.mmv.tot.developed < 0)
### "cur.mmv.tot.developed must be >= 0"
### else if (object@cur.lmv.tot.developed < 0)
### "cur.lmv.tot.developed must be >= 0"
### else if (object@cur.tot.cpw.for.ts < 0)
### "cur.tot.cpw.for.ts must be >= 0"
### else if (object@cur.target.loss.rate.for.ts < 0)
### "cur.target.loss.rate.for.ts must be >= 0"
### else
### TRUE
### }
### );
#==============================================================================
#-------------------------------------------------------------
# Create the specializations of the DevPool class to handle
# things that are very specific to inside and outside the
# growth center in Sydney.
#
# Nearly everything can be handled through data values, but
# a couple of things currently require the use of special
# methods.
#-------------------------------------------------------------
setClass ("DevPool.inside.gc",
prototype = prototype (DP.db.field.label = "INSIDE_GC"),
contains = "DevPool"
);
#----------
setClass ("DevPool.outside.gc",
prototype = prototype (DP.db.field.label = "OUTSIDE_GC"),
contains = "DevPool"
);
#==============================================================================
#==============================================================================
#==============================================================================
# Create generic and specific get and set routines for
# all instance variables.
#==============================================================================
#----- DP.db.field.label -----#
# Get
setGeneric ("DP.db.field.label", signature = "x",
function (x) standardGeneric ("DP.db.field.label"))
setMethod ("DP.db.field.label", "DevPool",
function (x) x@DP.db.field.label);
# Set
setGeneric ("DP.db.field.label<-", signature = "x",
function (x, value) standardGeneric ("DP.db.field.label<-"))
setMethod ("DP.db.field.label<-", "DevPool",
function (x, value) initialize (x, DP.db.field.label = value))
#----- name -----#
# Get
setGeneric ("name", signature = "x",
function (x) standardGeneric ("name"))
setMethod ("name", "DevPool",
function (x) x@name);
# Set
setGeneric ("name<-", signature = "x",
function (x, value) standardGeneric ("name<-"))
setMethod ("name<-", "DevPool",
function (x, value) initialize (x, name = value))
#----- more.dev.allowed.in.cur.ts -----#
# Get
setGeneric ("more.dev.allowed.in.cur.ts", signature = "x",
function (x) standardGeneric ("more.dev.allowed.in.cur.ts"))
setMethod ("more.dev.allowed.in.cur.ts", "DevPool",
function (x) x@more.dev.allowed.in.cur.ts);
# Set
setGeneric ("more.dev.allowed.in.cur.ts<-", signature = "x",
function (x, value) standardGeneric ("more.dev.allowed.in.cur.ts<-"))
setMethod ("more.dev.allowed.in.cur.ts<-", "DevPool",
function (x, value) initialize (x, more.dev.allowed.in.cur.ts = value))
#----- cur.tot.cpw.for.ts -----#
# Get
setGeneric ("cur.tot.cpw.for.ts", signature = "x",
function (x) standardGeneric ("cur.tot.cpw.for.ts"))
setMethod ("cur.tot.cpw.for.ts", "DevPool",
function (x) x@cur.tot.cpw.for.ts);
# Set
setGeneric ("cur.tot.cpw.for.ts<-", signature = "x",
function (x, value) standardGeneric ("cur.tot.cpw.for.ts<-"))
setMethod ("cur.tot.cpw.for.ts<-", "DevPool",
function (x, value) initialize (x, cur.tot.cpw.for.ts = value))
#----- cur.cpw.tot.developed -----#
# Get
setGeneric ("cur.cpw.tot.developed", signature = "x",
function (x) standardGeneric ("cur.cpw.tot.developed"))
setMethod ("cur.cpw.tot.developed", "DevPool",
function (x) x@cur.cpw.tot.developed);
# Set
setGeneric ("cur.cpw.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.cpw.tot.developed<-"))
setMethod ("cur.cpw.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.cpw.tot.developed = value))
#----- cur.hmv.tot.developed -----#
# Get
setGeneric ("cur.hmv.tot.developed", signature = "x",
function (x) standardGeneric ("cur.hmv.tot.developed"))
setMethod ("cur.hmv.tot.developed", "DevPool",
function (x) x@cur.hmv.tot.developed);
# Set
setGeneric ("cur.hmv.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.hmv.tot.developed<-"))
setMethod ("cur.hmv.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.hmv.tot.developed = value))
#----- cur.mmv.tot.developed -----#
# Get
setGeneric ("cur.mmv.tot.developed", signature = "x",
function (x) standardGeneric ("cur.mmv.tot.developed"))
setMethod ("cur.mmv.tot.developed", "DevPool",
function (x) x@cur.mmv.tot.developed);
# Set
setGeneric ("cur.mmv.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.mmv.tot.developed<-"))
setMethod ("cur.mmv.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.mmv.tot.developed = value))
#----- cur.lmv.tot.developed -----#
# Get
setGeneric ("cur.lmv.tot.developed", signature = "x",
function (x) standardGeneric ("cur.lmv.tot.developed"))
setMethod ("cur.lmv.tot.developed", "DevPool",
function (x) x@cur.lmv.tot.developed);
# Set
setGeneric ("cur.lmv.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.lmv.tot.developed<-"))
setMethod ("cur.lmv.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.lmv.tot.developed = value))
#----- offset.multiplier -----#
# Get
setGeneric ("offset.multiplier", signature = "x",
function (x) standardGeneric ("offset.multiplier"))
setMethod ("offset.multiplier", "DevPool",
function (x) x@offset.multiplier);
# Set
setGeneric ("offset.multiplier<-", signature = "x",
function (x, value) standardGeneric ("offset.multiplier<-"))
setMethod ("offset.multiplier<-", "DevPool",
function (x, value) initialize (x, offset.multiplier = value))
#----- cur.target.loss.rate.for.ts -----#
# Get
setGeneric ("cur.target.loss.rate.for.ts", signature = "x",
function (x) standardGeneric ("cur.target.loss.rate.for.ts"))
setMethod ("cur.target.loss.rate.for.ts", "DevPool",
function (x) x@cur.target.loss.rate.for.ts);
# Set
setGeneric ("cur.target.loss.rate.for.ts<-", signature = "x",
function (x, value) standardGeneric ("cur.target.loss.rate.for.ts<-"))
setMethod ("cur.target.loss.rate.for.ts<-", "DevPool",
function (x, value) initialize (x, cur.target.loss.rate.for.ts = value))
#==============================================================================
# Initializers for the classes.
#==============================================================================
### FOR SOME REASON, R IS UNHAPPY WITH HOW I'VE DEFINED THESE INITIALIZERS,
### BUT I CAN'T FIGURE OUT WHAT THE ERROR MESSAGE MEANS:
###
### Error in conformMethod(signature, mnames, fnames, f, fdef, definition) :
### in method for ‘initialize’ with signature ‘.Object="DevPool.inside.gc"’:
### formal arguments (.Object = "DevPool.inside.gc") omitted in the method
### definition cannot be in the signature
###
### SO, FOR THE MOMENT, I'M JUST GOING TO DO THE INITIALIZATION BY HAND
### WHEN THE OBJECTS ARE CREATED AND COME BACK TO THIS LATER...
###setMethod (f = "initialize",
### signature = "DevPool.inside.gc",
### definition =
### function (object, initial.hmv.loss.rate, step.interval)
### {
### cat("~~~ DevPool.inside.gc: initializer ~~~ \n")
# ARE THESE SUPPOSED TO BE USING THE HMV LOSS RATE?
# AREN'T THEY SUPPOSED TO BE USING THE TOTAL CPW LOSS RATE INSTEAD?
# PAR.initial.inside.gc.cpw.loss.rate <- 39.6 # hectares per yr
### object@cur.target.loss.rate.for.ts <- initial.cpw.loss.rate * step.interval
### return (object)
### }
### )
#----------
###setMethod (f = "initialize",
### signature = "DevPool.outside.gc",
### definition =
### function (object, initial.hmv.loss.rate, step.interval)
### {
### cat("~~~ DevPool.outside.gc: initializer ~~~ \n")
# ARE THESE SUPPOSED TO BE USING THE HMV LOSS RATE?
# AREN'T THEY SUPPOSED TO BE USING THE TOTAL CPW LOSS RATE INSTEAD?
# PAR.initial.outside.gc.cpw.loss.rate <- 48
# +/- 10 per yr, so, could runif or truncated normal
# in [38 to 58] to get cur rate
### object@cur.target.loss.rate.for.ts <- initial.cpw.loss.rate * step.interval
### return (object)
### }
### )
#==============================================================================
#----- initialize.dev.pool.running.totals.at.start.of.ts -----#
setGeneric ("initialize.dev.pool.running.totals.at.start.of.ts", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("initialize.dev.pool.running.totals.at.start.of.ts"))
#--------------------
# Need to reload the running totals from the database.
# This really should be done in the initialize routine for the classes,
# but I haven't got that working correctly yet.
setMethod ("initialize.dev.pool.running.totals.at.start.of.ts", "DevPool",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
# Figure out whether the field names use "INSIDE_GC" or "OUTSIDE_GC".
cur.DP.db.field.label <- DP.db.field.label (cur.dev.pool)
if(DEBUG.OFFSETTING) cat (" cur.DP.db.field.label = <", cur.DP.db.field.label, ">\n")
# CPW running total
query <- paste ('select CPW_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.cpw.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
# HMV running total
query <- paste ('select HMV_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.hmv.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
# MMV running total
query <- paste ('select MMV_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.mmv.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
# LMV running total
query <- paste ('select LMV_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.lmv.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
#### dummy setting to see if it registers anywhere...
#### cur.dev.pool@cur.lmv.tot.developed <- 52
if(DEBUG.OFFSETTING) cat ("In initialize.dev.pool.running.totals.at.start.of.ts: \n");
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" cur.dev.pool = \n");
if(DEBUG.OFFSETTING) print (cur.dev.pool)
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
)
#==============================================================================
#----- save.cur.dev.pool.running.totals -----#
setGeneric ("save.cur.dev.pool.running.totals", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("save.cur.dev.pool.running.totals"))
#--------------------
setMethod ("save.cur.dev.pool.running.totals", "DevPool",
function (cur.dev.pool)
{
# Set these to 0 at the very start of the model.
# DON'T reset them to 0 every time you start a time step.
# DO retrieve their values from the database at the start of
# each time step.
# Need to initialize all of these in the workingvars database at the start of the model and
# then reload their values into these variables at the start of each time step
# by reading their values out of the database.
connect.to.database( PUinformationDBname );
#-----
if(DEBUG.OFFSETTING) cat ("\n\nIn save.cur.dev.pool.running.totals():")
# Figure out whether the field names use "INSIDE_GC" or "OUTSIDE_GC".
cur.DP.db.field.label <- DP.db.field.label (cur.dev.pool)
if(DEBUG.OFFSETTING) cat ("\n cur.DP.db.field.label = ", cur.DP.db.field.label)
# CPW running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set CPW_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.cpw.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n CPW running total query = ", query)
sql.send.operation (query)
# HMV running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set HMV_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.hmv.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n HMV running total query = ", query)
sql.send.operation (query)
# MMV running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set MMV_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.mmv.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n MMV running total query = ", query)
sql.send.operation (query)
# LMV running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set LMV_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.lmv.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n LMV running total query = ", query)
sql.send.operation (query)
#-----
close.database.connection();
if(DEBUG.OFFSETTING) cat ("\n\nAt end of DevPool::save.cur.dev.pool.running.totals()\n")
if(DEBUG.OFFSETTING) cat (" Are these totals saved correctly in the db?\n",
" They're 0 when reloaded at the start of the next time step (except for tot cpw).\n")
if(DEBUG.OFFSETTING) print (cur.dev.pool)
}
)
#==============================================================================
#***
#------------------------------------------------------------------------------
# These things should be in the initialize() routine for each class, but
# R is giving me an error message that I can't figure out, so I'll do it
# by hand here.
#------------------------------------------------------------------------------
#***
#------------------------------------------------------------------------------
# ARE THESE SUPPOSED TO BE USING THE HMV LOSS RATE?
# AREN'T THEY SUPPOSED TO BE USING THE TOTAL CPW LOSS RATE INSTEAD?
#------------------------------------------------------------------------------
#----- initialize.inside.gc.dev.pool.target.loss.rate -----#
setGeneric ("initialize.dev.pool.target.loss.rate", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("initialize.dev.pool.target.loss.rate"))
#--------------------
# inside gc #
setMethod ("initialize.dev.pool.target.loss.rate", "DevPool.inside.gc",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
#step.interval <- 5
cur.target.loss.rate.for.ts (cur.dev.pool) <-
PAR.initial.inside.gc.cpw.loss.rate * step.interval
if(DEBUG.OFFSETTING) cat ("\n\nIn initialize.dev.pool.target.loss.rate: INSIDE gc.\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" PAR.initial.inside.gc.cpw.loss.rate = ", PAR.initial.inside.gc.cpw.loss.rate, "\n")
if(DEBUG.OFFSETTING) cat (" step.interval = ", step.interval, "\n")
if(DEBUG.OFFSETTING) cat (" cur.target.loss.rate.for.ts (cur.dev.pool) = ", cur.target.loss.rate.for.ts (cur.dev.pool), "\n")
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
)
#---------------------------------------------
# outside gc #
setMethod ("initialize.dev.pool.target.loss.rate", "DevPool.outside.gc",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
#step.interval <- 5
cur.target.loss.rate.for.ts (cur.dev.pool) <-
PAR.initial.outside.gc.cpw.loss.rate * step.interval
if(DEBUG.OFFSETTING) cat ("\n\nIn initialize.dev.pool.target.loss.rate: OUTSIDE gc.\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" PAR.initial.outside.gc.cpw.loss.rate = ", PAR.initial.outside.gc.cpw.loss.rate, "\n")
if(DEBUG.OFFSETTING) cat (" step.interval = ", step.interval, "\n")
if(DEBUG.OFFSETTING) cat (" cur.target.loss.rate.for.ts (cur.dev.pool) = ", cur.target.loss.rate.for.ts (cur.dev.pool), "\n")
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
)
#==============================================================================
#----- choose.offset.pool -----#
setGeneric ("choose.offset.pool", signature = ".Object",
function (.Object) standardGeneric ("choose.offset.pool"))
#--------------------
#---------------------------------------------------------------------
# This function is where you would designate an offset to be leaked
# outside the study area, but I haven't done that yet because
# we haven't discussed how that would work yet.
#---------------------------------------------------------------------
#--------------------
setMethod ("choose.offset.pool", "DevPool.outside.gc",
function (.Object)
{
if(DEBUG.OFFSETTING) cat ('\nOffset should go OUTSIDE GC')
return ( CONST.dev.OUT.offset.OUT)
}
)
#--------------------
setMethod ("choose.offset.pool", "DevPool.inside.gc",
function (.Object)
{
offset.location <- CONST.dev.IN.offset.IN
if (runif(1) < PAR.prob.that.inside.gc.is.offset.inside.gc)
{
if(DEBUG.OFFSETTING) cat ('\nOffset should go INSIDE GC')
} else
{
offset.location <- CONST.dev.IN.offset.OUT
if(DEBUG.OFFSETTING) cat ('\nOffset should go OUTSIDE GC')
}
return (offset.location)
}
)
#==============================================================================
#-----------------------------------------------------------------------
# Utility functions, particularly related to dealing with overflow of
# development from one time step to the next.
#
# At the moment, these are just dummy calls that need to be replaced
# with database interactions whose tables have not been set up yet.
#-----------------------------------------------------------------------
#==============================================================================
#----- assign.PU.to.cur.ts -----#
setGeneric ("assign.PU.to.cur.ts", signature = "cur.dev.pool",
function (cur.dev.pool, PU.to.develop) standardGeneric ("assign.PU.to.cur.ts"))
#--------------------
setMethod ("assign.PU.to.cur.ts", "DevPool",
function (cur.dev.pool, PU.to.develop)
{
nameObject <- deparse (substitute (cur.dev.pool))
# WHAT HAPPENS WITH ALL THESE RUNNING TOTALS IF OFFSETTING FAILS?
# DO WE NEED TO HAVE THESE ONLY AS SCRATCH VALUES UNTIL OFFSET SUCCEEDS
# (WHICH IS ALSO MAKING THE ASSUMPTION THAT OFFSETTING IS EVEN BEING DONE).
# May need to store the incremental values added in the working table as well
# (e.g., the result of the get.cpw.of (PU.to.develop), get.hmv.of... calls here)
# so that you can undo the changes made here if offsetting fails.
cur.cpw.tot.developed (cur.dev.pool) <- cur.cpw.tot.developed (cur.dev.pool) + get.cpw.of (PU.to.develop)
cur.hmv.tot.developed (cur.dev.pool) <- cur.hmv.tot.developed (cur.dev.pool) + get.hmv.of (PU.to.develop)
cur.mmv.tot.developed (cur.dev.pool) <- cur.mmv.tot.developed (cur.dev.pool) + get.mmv.of (PU.to.develop)
cur.lmv.tot.developed (cur.dev.pool) <- cur.lmv.tot.developed (cur.dev.pool) + get.lmv.of (PU.to.develop)
# The values above are for overall running totals over the whole run.
# This one is just for what's been developed in the current time step.
# It's used for doing overflow calculations.
cur.tot.cpw.for.ts (cur.dev.pool) <- cur.tot.cpw.for.ts (cur.dev.pool) + get.cpw.of (PU.to.develop)
if(DEBUG.OFFSETTING) cat ("\n\nIn assign.PU.to.cur.ts: <DEVELOPING PU ID ", PU.to.develop, ">\n");
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
) # end - setMethod assign.PU.to.cur.ts
#==============================================================================
#----- select.PUs.currently.eligible.for.dev -----#
setGeneric ("select.PUs.currently.eligible.for.dev", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("select.PUs.currently.eligible.for.dev"))
#--------------------
setMethod ("select.PUs.currently.eligible.for.dev", "DevPool",
function (cur.dev.pool)
{
query <- build.eligibility.query (cur.dev.pool)
eligible.PUs <- (sql.get.data (PUinformationDBname, query))
if(DEBUG.OFFSETTING) cat ("\n\nIn select.PUs.currently.eligible.for.dev:\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" num of eligible.PUs = ", length (eligible.PUs), "\n")
if(DEBUG.OFFSETTING) cat (" eligible.PUs = ", eligible.PUs [1:5], "...\n")
if(DEBUG.OFFSETTING) cat (" Dev Query = ", query, "\n" )
return (eligible.PUs)
}
)
#==============================================================================
#----- build.eligibility.query -----#
setGeneric ("build.eligibility.query", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("build.eligibility.query"))
#--------------------
# NEED TO TEST FOR 2 DIFFERENT KINDS OF THINGS HERE?
# I.E., ONE IS ABOUT OVERFLOW TO THE NEXT STEP AND THE OTHER IS ABOUT EXCEEDING THE TOTAL
# ALLOWED AMOUNT OF DEVELOPMENT FOR EACH CPW CLASS (HMV, MMV, LMV).
# ONCE A CHOSEN PARCEL WOULD EXCEED ONE OR MORE OF THE LIMITS, THEN IT NEEDS TO BE REMOVED
# FROM THE DEVELOPMENT POOL SINCE IT WILL NEVER BE UNDER THE LIMIT AFTER THAT.
# *****
# NOTE: THAT ASSUMES THAT THE LIMITS WILL NOT BE RESET LATER IN THE MODEL RUN AND
# THAT THE LIMIT IS ON AMOUNT DEVELOPED, NOT ON TOTAL AVAILABLE IN THE LANDSCAPE.
# IF MANAGEMENT ALLOWED FOR INCREASE IN CONDITION, THE TOTAL AMOUNT COULD INCREASE
# (OR DECREASE) OVER TIME. THIS SUGGESTS A POLICY QUESTION ABOUT WHETHER THE
# DEVELOPMENT SHOULD BE GOVERNED BY MECHANISM OR BY OUTCOME.
# SHOULD MODEL THESE TWO CHOICES TO HIGHLIGHT THIS.
# If mechanism is the driver, then you do not have to make the check again once you
# have exceeded the limit. If outcome is the driver, then you have to keep checking.
# One other thing though - outcome could be phrased as trying to stay around the
# target with falling back to the target from a higher point allowed (i.e., if your
# proposed development does not drop the total below the target level, then go ahead,
# even though it does cause loss)
# or it could be phrased as never allowing any gain to be lost.
# *****
#--------------------
setMethod ("build.eligibility.query", "DevPool.inside.gc",
function (cur.dev.pool)
{
# Need to compute the amount of space left under each cpw cap
hmv.space.left.under.limit.inside.gc <-
PAR.hmv.limit.inside.gc - cur.hmv.tot.developed (cur.dev.pool)
mmv.space.left.under.limit.inside.gc <-
PAR.mmv.limit.inside.gc - cur.mmv.tot.developed (cur.dev.pool)
lmv.space.left.under.limit.inside.gc <-
PAR.lmv.limit.inside.gc - cur.lmv.tot.developed (cur.dev.pool)
query <- paste ('select ID from ', dynamicPUinfoTableName,
'where DEVELOPED = 0',
'and GROWTH_CENTRE = 1',
'and TENURE = "Unprotected"',
'and RESERVED = 0',
'and GC_CERT = 1',
'and AREA_OF_C1_CPW <=', hmv.space.left.under.limit.inside.gc,
'and AREA_OF_C2_CPW <=', mmv.space.left.under.limit.inside.gc,
'and AREA_OF_C3_CPW <=', lmv.space.left.under.limit.inside.gc
)
return (query)
}
)
#--------------------
setMethod ("build.eligibility.query", "DevPool.outside.gc",
function (cur.dev.pool)
{
# for OUTSIDE gc, there are no tests other than staying around the
# outside gc loss rate.
# Moving part of this to the yaml file - Ascelin Gordon 2011.01.19
## query <- paste ('select ID from ', dynamicPUinfoTableName,
## 'where DEVELOPED = 0',
## 'and GROWTH_CENTRE = 0',
## 'and TENURE = "Unprotected"',
## 'and RESERVED = 0'
## )
query <- paste ('select ID from', dynamicPUinfoTableName, 'where', PAR.dev.outside.GC.criteria )
return (query)
}
)
#==============================================================================
#----- make.sure.overflow.dev.PU.is.still.legal -----#
setGeneric ("make.sure.overflow.dev.PU.is.still.legal", signature = "cur.dev.pool",
function (cur.dev.pool, overflow.PU) standardGeneric ("make.sure.overflow.dev.PU.is.still.legal"))
#--------------------
setMethod ("make.sure.overflow.dev.PU.is.still.legal", "DevPool",
function (cur.dev.pool, overflow.PU)
{
eligible.PUs <- select.PUs.currently.eligible.for.dev (cur.dev.pool)
if(DEBUG.OFFSETTING) cat ("\n\nIn make.sure.overflow.dev.PU.is.still.legal:\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" overflow.PU = ", overflow.PU, "\n")
if(DEBUG.OFFSETTING) cat (" num of eligible.PUs = ", length (eligible.PUs), "\n")
if(DEBUG.OFFSETTING) cat (" any (eligible.PUs == overflow.PU) = ", any (eligible.PUs == overflow.PU), "\n")
if(DEBUG.OFFSETTING) cat (" eligible.PUs = ", eligible.PUs [1:5], "...\n")
return (any (eligible.PUs == overflow.PU)) # Returns TRUE of overflow.PU is in the list
}
)
#==============================================================================
#----- get.overflow.PU.from.prev.ts.to.develop -----#
setGeneric ("get.overflow.PU.from.prev.ts.to.develop", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("get.overflow.PU.from.prev.ts.to.develop"))
#--------------------
setMethod ("get.overflow.PU.from.prev.ts.to.develop", "DevPool",
function (cur.dev.pool)
{
query <- paste ('select ',
DP.db.field.label (cur.dev.pool), '_DEV_OVERFLOW_PU_ID from ',
offsettingWorkingVarsTableName, sep = '');
return (sql.get.data (PUinformationDBname, query)); # overflow PU to develop
}
)
#==============================================================================
#----- prev.ts.left.overflow.PU.to.develop -----#
setGeneric ("prev.ts.left.overflow.PU.to.develop", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("prev.ts.left.overflow.PU.to.develop"))
#--------------------
setMethod ("prev.ts.left.overflow.PU.to.develop", "DevPool",
function (cur.dev.pool)
{
if(DEBUG.OFFSETTING) cat ("\n\nIn prev.ts.left.overflow.PU.to.develop:\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
overflow.PU.from.prev <- get.overflow.PU.from.prev.ts.to.develop (cur.dev.pool)
if(DEBUG.OFFSETTING) cat (" overflow.PU.from.prev = ", overflow.PU.from.prev)
return (overflow.PU.from.prev
!=
CONST.NO.OVERFLOW.PU.TO.DEV)
}
)
#==============================================================================
#----- compute.overflow.fraction.for.PU.to.develop -----#
setGeneric ("compute.overflow.fraction.for.PU.to.develop", signature = "cur.dev.pool",
function (cur.dev.pool, PU.to.develop) standardGeneric ("compute.overflow.fraction.for.PU.to.develop"))
#--------------------
setMethod ("compute.overflow.fraction.for.PU.to.develop", "DevPool",
function (cur.dev.pool, PU.to.develop)
{
overflow.fraction <- 0.0
cur.PU.cpw <- get.cpw.of (PU.to.develop)
if (cur.PU.cpw < 0)
{
errMsg <- paste ("\n\nERROR in compute.overflow.fraction.for.PU.to.develop():",
"\n cur.PU.cpw = ", cur.PU.cpw, " -- Must be >= 0.\n\n", sep='')
stop (errMsg)
} else if (cur.PU.cpw > 0)
{
#----------------------------------------------------------------------
# Compute what the running cpw total will be if this PU is developed
# and what fraction of the PU's area will be overflowing the current
# target rate for development in this time step.
#----------------------------------------------------------------------
next.inside.gc.tot.cpw.for.ts <- cur.tot.cpw.for.ts (cur.dev.pool) + cur.PU.cpw
overflow.fraction <-
(next.inside.gc.tot.cpw.for.ts - cur.target.loss.rate.for.ts (cur.dev.pool)) /
cur.PU.cpw
if(DEBUG.OFFSETTING) cat ("\n\nIn compute.overflow.fraction.for.PU.to.develop\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" PU.to.develop = ", PU.to.develop, "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.cpw.tot.developed (cur.dev.pool) = ",
cur.cpw.tot.developed (cur.dev.pool), "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.tot.cpw.for.ts (cur.dev.pool) = ",
cur.tot.cpw.for.ts (cur.dev.pool), "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.PU.cpw = ", cur.PU.cpw, "\n")
if(DEBUG.OFFSETTING) cat (" --- next.inside.gc.tot.cpw.for.ts = ",
next.inside.gc.tot.cpw.for.ts, "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.target.loss.rate.for.ts (cur.dev.pool) = ",
cur.target.loss.rate.for.ts (cur.dev.pool), "\n\n")
if(DEBUG.OFFSETTING) cat (" overflow.fraction = ", overflow.fraction, "\n")
}
return (overflow.fraction)
}
)
#==============================================================================
#----- set.dev.overflow.PU.from.prev.ts -----#
setGeneric ("set.dev.overflow.PU.from.prev.ts", signature = "cur.dev.pool",
function (cur.dev.pool, value) standardGeneric ("set.dev.overflow.PU.from.prev.ts"))
#--------------------
setMethod ("set.dev.overflow.PU.from.prev.ts", "DevPool",
function (cur.dev.pool, value)
{
query <- paste ('update ', offsettingWorkingVarsTableName, ' set ',
DP.db.field.label (cur.dev.pool), '_DEV_OVERFLOW_PU_ID = ',
value,
sep = '')
connect.to.database( PUinformationDBname );
sql.send.operation (query);
close.database.connection();
}
)
#==============================================================================
#----- clear.record.of.dev.overflow.from.prev.ts -----#
setGeneric ("clear.record.of.dev.overflow.from.prev.ts", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("clear.record.of.dev.overflow.from.prev.ts"))
#--------------------
setMethod ("clear.record.of.dev.overflow.from.prev.ts", "DevPool",
function (cur.dev.pool)
{
set.dev.overflow.PU.from.prev.ts (cur.dev.pool, CONST.NO.OVERFLOW.PU.TO.DEV)
}
)
#==============================================================================
#----- push.PU.to.next.ts -----#
setGeneric ("push.PU.to.next.ts", signature = "cur.dev.pool",
function (cur.dev.pool, PU.to.develop) standardGeneric ("push.PU.to.next.ts"))
#--------------------
setMethod ("push.PU.to.next.ts", "DevPool",
function (cur.dev.pool, PU.to.develop)
{
set.dev.overflow.PU.from.prev.ts (cur.dev.pool, PU.to.develop)
}
)
#==============================================================================
#----- choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities -----#
setGeneric ("choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities", signature = "cur.dev.pool",
function (cur.dev.pool, PUs.currently.eligible.for.dev) standardGeneric ("choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities"))
#--------------------
setMethod ("choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities", "DevPool",
function (cur.dev.pool, PUs.currently.eligible.for.dev)
{
# By default, just choose one at random.
# However, this could be a fancier, project or pool-specific choice,
# e.g., using a distribution that weights some sizes or types more heavily.
# That's why I have put the cur.dev.pool argument in the list
# even though it's currently not used.
# I anticipate this to be an instance method of the dev.pool class
# and that would require the pool.
PU.to.develop <- sample.rdv (PUs.currently.eligible.for.dev, 1);
return (PU.to.develop)
}
)
#==============================================================================
#==============================================================================
#----- choose.PU.to.develop -----#
#setGeneric ("choose.PU.to.develop", signature = "cur.dev.pool",
# function (cur.dev.pool) standardGeneric ("choose.PU.to.develop"))
setGeneric ("choose.PU.to.develop.OOP", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("choose.PU.to.develop.OOP"))
#--------------------
#setMethod ("choose.PU.to.develop", "DevPool",
setMethod ("choose.PU.to.develop.OOP", "DevPool",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
PU.to.develop <- CONST.NO.ELIGIBLE.PU.TO.DEVELOP
if(DEBUG.OFFSETTING) cat ("\n\n===================================================================================\n\n",
"At start of choose.PU.to.develop.OOP at ", "\n",
" >>>>> current.time.step = ", current.time.step, "\n",
" >>>>> cur.dev.pool@DP.db.field.label = ", cur.dev.pool@DP.db.field.label, "\n",
" >>>>> cur.tot.cpw.for.ts (cur.dev.pool) = ", cur.tot.cpw.for.ts (cur.dev.pool), "\n",
" >>>>> cur.target.loss.rate.for.ts (cur.dev.pool) = ", cur.target.loss.rate.for.ts (cur.dev.pool), "\n\n",
" cur.dev.pool@cur.cpw.tot.developed = ", cur.dev.pool@cur.cpw.tot.developed, "\n",
" cur.dev.pool@cur.hmv.tot.developed = ", cur.dev.pool@cur.hmv.tot.developed, "\n",
" cur.dev.pool@cur.mmv.tot.developed = ", cur.dev.pool@cur.mmv.tot.developed, "\n",
" cur.dev.pool@cur.lmv.tot.developed = ", cur.dev.pool@cur.lmv.tot.developed, "\n",
" cur.dev.pool@cur.target.loss.rate.for.ts = ", cur.dev.pool@cur.target.loss.rate.for.ts
);
more.dev.allowed.in.cur.ts (cur.dev.pool) <- TRUE
select.new.PU.to.dev <- TRUE
dev.PU.in.this.ts <- TRUE
#--------------------------------------------------
# Check for overflow PU from previous time step.
# If there is one, then just return that PU.
# Otherwise, have to look for one.
#--------------------------------------------------
if(DEBUG.OFFSETTING) cat ("\n\nAbout to test prev.ts.left.overflow.PU.to.develop at ts = ",
current.time.step, "\n")
if (prev.ts.left.overflow.PU.to.develop (cur.dev.pool))
{
#----------------------------------------------------------------------
# Parcel overflowed from previous time step.
# Get its ID and turn off the overflow marker since you're using up
# the overflow now.
#----------------------------------------------------------------------
if(DEBUG.OFFSETTING) cat ("\n\nAbout to test prev.ts.left.overflow.PU.to.develop.\n")
PU.to.develop <- get.overflow.PU.from.prev.ts.to.develop (cur.dev.pool)
if(DEBUG.OFFSETTING) cat ("\n\nAbout to clear.record.of.dev.overflow.from.prev.ts.\n")
clear.record.of.dev.overflow.from.prev.ts (cur.dev.pool)
PU.to.dev.is.legal <-
make.sure.overflow.dev.PU.is.still.legal (cur.dev.pool, PU.to.develop)
if(DEBUG.OFFSETTING) cat ("\n\nAfter make.sure.overflow.dev.PU.is.still.legal (cur.dev.pool, ",
PU.to.develop, ").\n")
if(DEBUG.OFFSETTING) cat (" PU.to.dev.is.legal = ", PU.to.dev.is.legal, "\n")
if (PU.to.dev.is.legal)
{
#----------------------------------------------------------------------------
# The previous step pushed a development to this step and it's still legal
# so do it.
#----------------------------------------------------------------------------
# assign.PU.to.cur.ts (cur.dev.pool, PU.to.develop)
select.new.PU.to.dev <- FALSE
dev.PU.in.this.ts <- TRUE # already true, but just want to point it out here
} else
{
#--------------------------------------------------------------------------
# Overflow PU is no longer legal.
# This shouldn't happen, but it does.
# Not sure if it's a bug or what...
###### There may be a problem here with overflow PU not getting marked
###### as ineligible for development, e.g., after inside.gc claims it
###### for overflow, but doesn't mark it as developed and then outside.gc
###### or offsetting come along and use it before inside.gc gets another
###### chance at it?
######
###### If you mark it as DEVELOPED though, then it won't be seen as
###### eligible on the next round, so it probably needs a bit more
###### special attention than it's getting right now...
#--------------------------------------------------------------------------
more.dev.allowed.in.cur.ts (cur.dev.pool) <- FALSE
if(DEBUG.OFFSETTING) cat ("\n\nWARNING, POSSIBLE BUG:\n",
" In choose.PU.to.develop: <<FAILED OVERFLOW PU ", PU.to.develop, ">> ",
" for ", DP.db.field.label (cur.dev.pool), "\n",
" Overflow from previous time step is no longer legal.\n", sep='');
#----------------------------------------------------------------------------
# UNTIL WE HAVE THIS STRAIGHTENED OUT, JUST GET A DIFFERENT PU TO DEVELOP.
#----------------------------------------------------------------------------
select.new.PU.to.dev <- TRUE # already true, but just want to point it out here
# stop ()
} # end else - PU to develop is not legal
} # end if - previous time step left overflow PU to develop
#-------------------------------------------------------------------------------------
# Have now determined that either:
# a) there was no overflow from previous step and we need to pick a PU to develop
# or
# b) there was an overflow and it's still legal so it has now been developed
# and there's nothing left to do in this call except check that developing
# that PU has not overflowed the current time step as well. We'll do that
# check at the very end of the routine and the only consequence if it does
# overflow is to say that we shouldn't develop any more PUs this time step.
# or
# c) there was an overflow but for some reason, it is no longer legal.
# While this is probably a bug that we need to fix, for the moment we want
# to just ignore the problem and just pick a new PU to develop so that we
# can get some runs done. If it IS an error, then it only happens very
# occasionally and doesn't screw anything up other than the probability
# of the offending patch getting picked.
#-------------------------------------------------------------------------------------
if (select.new.PU.to.dev)
{
PUs.currently.eligible.for.dev <- select.PUs.currently.eligible.for.dev (cur.dev.pool)
if (length (PUs.currently.eligible.for.dev) < 1) # is.null()? is.NA()??
{
more.dev.allowed.in.cur.ts (cur.dev.pool) <- FALSE
dev.PU.in.this.ts <- FALSE
# cat ("\n\nERROR: In choose.PU.to.develop, ",
# "\n no legal PUs to develop.\n\n");
# stop ()
} else # end if - no PUs eligible to develop
{
#---------------------------------------------------------------------------------
# There ARE PUs eligible to develop.
# Choose one and see whether it overflows the target amount for this time step.
#---------------------------------------------------------------------------------
PU.to.develop <-
choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities (
cur.dev.pool, PUs.currently.eligible.for.dev)
#----------------------------------------------------
# See if it fits in current time step's allotment.
#----------------------------------------------------
overflow.fraction <- compute.overflow.fraction.for.PU.to.develop (cur.dev.pool, PU.to.develop)
if(DEBUG.OFFSETTING) cat ("\n\noverflow.fraction = ", overflow.fraction, "\n")
if (overflow.fraction > 0)
{
#----------------------------------------------------------------------
# Parcel does not fit inside current time step.
# Flip a biased coin to see whether to include it anyway.
#
# Bias the flip in inverse proportion to the amount of overflow,
# i.e., the more overflow, the less chance of including in
# current time step.
#
# Also mark the fact that you have come to the end of the time step.
#----------------------------------------------------------------------
if (runif (1) < overflow.fraction)
{
#---------------------------------------------------
# Lost the toss. Move this PU to next time step.
#---------------------------------------------------
if(DEBUG.OFFSETTING) cat ("\n\nIn choose.PU.to.develop: lost the toss, <<PUSHING PU ID ",
PU.to.develop,
">> to next time step for ",
DP.db.field.label (cur.dev.pool), "\n", sep='');
push.PU.to.next.ts (cur.dev.pool, PU.to.develop)
dev.PU.in.this.ts <- FALSE
} else # end if - lost the runif() toss
{
if(DEBUG.OFFSETTING) cat ("\n\nIn choose.PU.to.develop: won the toss for ", PU.to.develop,
" in ", DP.db.field.label (cur.dev.pool), "\n", sep='');
} # end else - won the toss
} # end if - PU to develop overflows current time step
} # end else - there were PUs eligible to develop
} # end if - no overflow from previous time step
if (dev.PU.in.this.ts)
{
assign.PU.to.cur.ts (cur.dev.pool, PU.to.develop)
}
#---------------------------------------------------------------------------
# No matter what action was taken in this routine, there is a possibility
# that it has led to an overflow that has been accepted.
# Check for that now and if it has occurred, then you should not allow
# any more development in this time step.
#---------------------------------------------------------------------------
final.overflow <- cur.dev.pool@cur.tot.cpw.for.ts - cur.dev.pool@cur.target.loss.rate.for.ts
if(DEBUG.OFFSETTING) cat ("\n\nAt end of choose.PU.to.develop, final overflow = ",
final.overflow, "\n")
if (final.overflow > 0)
{
if(DEBUG.OFFSETTING) cat ("\n\nIn choose.PU.to.develop: ", DP.db.field.label (cur.dev.pool),
"\n Positive overflow, so stopping dev for this time step.\n", sep='');
more.dev.allowed.in.cur.ts (cur.dev.pool) <- FALSE
}
#-------------------------------------------------------------------------
# If you're ending the time step, save the running totals to use on the
# next time step.
#-------------------------------------------------------------------------
if (! more.dev.allowed.in.cur.ts (cur.dev.pool))
{
save.cur.dev.pool.running.totals (cur.dev.pool)
}
assign (nameObject, cur.dev.pool, envir=parent.frame())
return (PU.to.develop)
} # end function - choose.PU.to.develop
)
#==============================================================================
#---------------------------------------------
# Functions that are not part of the class.
#---------------------------------------------
#==============================================================================
example.model.code <- function ()
{
# At start of model, need to initialize running totals, etc.
inside.gc.dev.pool <- new ("DevPool.inside.gc")
# Initialize dev pool running totals in db to 0.
# Can use the save...() routine because the dev pool initial values
# are 0 when they are created.
stopifnot (cur.cpw.tot.developed (cur.dev.pool) == 0.0) # Check that assumption.
save.cur.dev.pool.running.totals (inside.gc.dev.pool)
}
#==============================================================================
initialize.dev.pools.at.start.of.model <- function ()
{
initialize.dev.pool.running.totals.at.start.of.model ("INSIDE_GC")
initialize.dev.pool.running.totals.at.start.of.model ("OUTSIDE_GC")
}
#==============================================================================
#==============================================================================
#--------------------------------------------------------------------------
# Dummy global control code to emulate running a full set of time steps.
#--------------------------------------------------------------------------
#==============================================================================
#----- execute.cpw.test.ts -----#
# Strictly for testing inside a dummy loop over model time steps.
# Can remove this routine if desired.
# It DOES show how things are expected to be set up and called.
execute.cpw.test.ts <- function ()
{
inside.gc.dev.pool <- new ("DevPool.inside.gc")
outside.gc.dev.pool <- new ("DevPool.outside.gc")
#-----------------------------------------------
initialize.dev.pools.at.start.of.model ()
#-----------------------------------------------
### while (more.dev.allowed.in.cur.ts (inside.gc.dev.pool))
### {
### cat ("\n\n====================================================",
### "\n\nAt ts = ", cur.ts, ", before choose.PU...()",
### PU.to.develop);
###### PU.to.develop <- choose.PU.to.develop (cur.dev.pool)
### }
}
#==============================================================================
##current.time.step <- 15
##step.interval <- 5
##PAR.initial.inside.gc.cpw.loss.rate <- 10
##x <- new ("DevPool.inside.gc")
##cat ("\n\nx after new = \n")
##print (x)
##initialize.dev.pool.target.loss.rate (x)
##cat ("\n\nx after initialize.dev.pool.target.loss.rate = \n")
##print (x)
| /R/DevPool.R | no_license | langfob/rdv-framework-frozen-google-code-export-do-not-change | R | false | false | 61,032 | r | #==============================================================================
# source ('DevPool.R');
#rm (list = ls());
#==============================================================================
# History:
# BTL - 2010.12.10ish
# Created for the Sydney Cumberland Plains Woodland project to replace the
# existing loss model / offset model call to choose a fixed number of PUs to
# develop each time step. The new version is aimed at averaging a certain
# number of hectares developed per time step instead of a certain number of
# planning units. At the moment, it has lots of things specific to Sydney
# in it, but these may factor out later if we can convert it to an OOP
# representation instead.
# BTL - 2010.12.12
# Have attempted to convert this to use R's OOP system.
#==============================================================================
# Things that still need work
# 1) Are these hmv or cpw loss rates? Need to also make sure they're named
# correctly in the yaml file:
# PAR.initial.inside.gc.cpw.loss.rate <- 39.6 # hectares per yr
# PAR.initial.outside.gc.cpw.loss.rate <- 48
# +/- 10 per yr, so, could runif or truncated normal
# in [38 to 58] to get cur rate
# 2) offsetting constraints
# #90% offset inside gc until 797 ha reached and then all go outside
# 4) may need to update the loss rate(s) on each time step, particularly for outside gc,
# e.g., if you want the loss rate to increase over time
# 4a) # The label "cur." is used here because we may want to have the target value change
# over the run of the model, e.g., to accomodate increasing development rates.
# 4b) #*** Need to add an runif() call to each time step?
#initial.outside.gc.cpw.loss.rate <- 48 # +/- 10 per yr, so, could runif or truncated normal
# in [38 to 58] to get cur rate
#cur.outside.gc.target.loss.rate.for.ts <- initial.outside.gc.cpw.loss.rate * step.interval
# 4c) # In the initialization routines for the target rate, I'm using hmv.
# Should I be using cpw instead?
# See notes around the initialization routines:
# initialize.inside.gc.dev.pool.target.loss.rate() and analogous for outside.
# 4d) # The initialize.inside.gc.dev.pool...() and outside...() need to be converted
# into initialize methods for the corresponding classes, but R doesn't like the
# way I did it. For the moment, I've just moved the logic into some stanalone
# routines outside the classes.
# 5) assign.PU.to.cur.ts (cur.dev.pool, PU.to.develop)
# WHAT HAPPENS WITH ALL THESE RUNNING TOTALS IF OFFSETTING FAILS?
# DO WE NEED TO HAVE THESE ONLY AS SCRATCH VALUES UNTIL OFFSET SUCCEEDS
# (WHICH IS ALSO MAKING THE ASSUMPTION THAT OFFSETTING IS EVEN BEING DONE).
# 6) May need to have a test about the SECURED status of the parcel when building
# the eligibility query since there are interactions between protection
# and tenure security.
# For example, TENURE = "Secured" (a form of catastrophic loss)
# 7) Clean up final overflow calculation
# 8) Make it so that inside and outside are chosen probabilistically instead of
# doing all inside and then all outside.
# 9) Probably need to make some kind of distinction between not allowing any more development
# this time step and not allowing any more at all (e.g., if all parcels
# have been developed). However, runs that have to do with protection
# expiring may allow things to change and what formerly could not happen
# will suddenly become possible. So, maybe this is not such a good idea.
# Have to think about it.
# 10) First creation of the dev pools in loss.model.R initializes the running
# totals to 0 by assuming that their prototype values are 0, but
# that's bad to do for several reasons. Need to change that to
# explicitly set them to zero. One problem is that in the class
# prototype here, the values are set to CONST.UNINITIALIZED.NON.NEG.NUM,
# which is in fact, already non-negative number so it can't be
# distinguished as unitialized. Need to clean this up and go back
# to using -77 or something as soon as you get the code in loss.model.R
# corrected to set the 0's explicitly instead of implicitly.
# 11) Need to write up intro to using OOP in R.
# One important thing to add is what I discovered this morning about
# the cryptic error message you get when you use the same slot name in
# multiple classes and redefine the generic accessor functions for it
# in each class rather than just once. It reinforces the need to add
# the check for existance code for generics that is shown in some of
# the tutorial examples. It should be done automatically!
#==============================================================================
library (methods);
#==============================================================================
#------------------------------------------------------
# Global initializations before starting time steps.
#------------------------------------------------------
#==============================================================================
source ('constants.R')
CONST.NO.PU.LEFT.TO.DEVELOP <- -88
CONST.NO.ELIGIBLE.PU.TO.DEVELOP <- -33
CONST.NO.OVERFLOW.PU.TO.DEV <- 0
#CONST.UNINITIALIZED.NON.NEG.NUM <- -77
CONST.UNINITIALIZED.NON.NEG.NUM <- 0.0
#==============================================================================
#==============================================================================
#==============================================================================
get.hmv.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_C1_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
return (sql.get.data (PUinformationDBname, query))
}
get.mmv.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_C2_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
return (sql.get.data (PUinformationDBname, query))
}
get.lmv.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_C3_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
return (sql.get.data (PUinformationDBname, query))
}
get.cpw.of <- function (PU.to.develop)
{
query <- paste ('select AREA_OF_CPW from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
retval <- sql.get.data (PUinformationDBname, query)
# cat ("\n\nIn get.cpw.of (", PU.to.develop, "), query = \n")
# cat (query)
# cat ("\nretval = ", retval, "\n\n")
# return (sql.get.data (PUinformationDBname, query))
return (retval)
}
get.area.of <- function (PU.to.develop)
{
query <- paste ('select AREA from', dynamicPUinfoTableName,
"where ID =", PU.to.develop);
retval <- sql.get.data (PUinformationDBname, query)
# cat ("\n\nIn get.area.of (", PU.to.develop, "), query = \n")
# cat (query)
# cat ("\nretval = ", retval, "\n\n")
# return (sql.get.data (PUinformationDBname, query))
return (retval)
}
#==============================================================================
#==============================================================================
#==============================================================================
setClass ("DevPool",
representation (DP.db.field.label = "character",
name = "character",
more.dev.allowed.in.cur.ts = "logical",
cur.tot.cpw.for.ts = "numeric",
cur.cpw.tot.developed = "numeric",
cur.hmv.tot.developed = "numeric",
cur.mmv.tot.developed = "numeric",
cur.lmv.tot.developed = "numeric",
offset.multiplier = "numeric",
#------------------------------------------------
# The label "cur." is used here because we may
# want to have the target value change over
# the run of the model, e.g., to accomodate
# increasing development rates.
#------------------------------------------------
cur.target.loss.rate.for.ts = "numeric"
),
prototype (DP.db.field.label = "",
name = "",
more.dev.allowed.in.cur.ts = TRUE,
cur.tot.cpw.for.ts = 0.0,
cur.cpw.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.hmv.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.mmv.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.lmv.tot.developed = CONST.UNINITIALIZED.NON.NEG.NUM,
offset.multiplier = CONST.UNINITIALIZED.NON.NEG.NUM,
cur.target.loss.rate.for.ts = CONST.UNINITIALIZED.NON.NEG.NUM
)
);
###setValidity ("DevPool",
### function (object) {
### if (FALSE)
### {
### cat ("\n\nDebugging: at start of DevPool::validObject()\n",
### " object@cur.cpw.tot.developed = ", object@cur.cpw.tot.developed, "\n",
### " object@cur.hmv.tot.developed = ", object@cur.hmv.tot.developed, "\n",
### " object@cur.mmv.tot.developed = ", object@cur.mmv.tot.developed, "\n",
### " object@cur.lmv.tot.developed = ", object@cur.lmv.tot.developed, "\n",
### " object@cur.target.loss.rate.for.ts = ", object@cur.target.loss.rate.for.ts, "\n"
### );
### }
###
### if (object@cur.cpw.tot.developed < 0)
### "cur.cpw.tot.developed must be >= 0"
### else if (object@cur.hmv.tot.developed < 0)
### "cur.hmv.tot.developed must be >= 0"
### else if (object@cur.mmv.tot.developed < 0)
### "cur.mmv.tot.developed must be >= 0"
### else if (object@cur.lmv.tot.developed < 0)
### "cur.lmv.tot.developed must be >= 0"
### else if (object@cur.tot.cpw.for.ts < 0)
### "cur.tot.cpw.for.ts must be >= 0"
### else if (object@cur.target.loss.rate.for.ts < 0)
### "cur.target.loss.rate.for.ts must be >= 0"
### else
### TRUE
### }
### );
#==============================================================================
#-------------------------------------------------------------
# Create the specializations of the DevPool class to handle
# things that are very specific to inside and outside the
# growth center in Sydney.
#
# Nearly everything can be handled through data values, but
# a couple of things currently require the use of special
# methods.
#-------------------------------------------------------------
setClass ("DevPool.inside.gc",
prototype = prototype (DP.db.field.label = "INSIDE_GC"),
contains = "DevPool"
);
#----------
setClass ("DevPool.outside.gc",
prototype = prototype (DP.db.field.label = "OUTSIDE_GC"),
contains = "DevPool"
);
#==============================================================================
#==============================================================================
#==============================================================================
# Create generic and specific get and set routines for
# all instance variables.
#==============================================================================
#----- DP.db.field.label -----#
# Get
setGeneric ("DP.db.field.label", signature = "x",
function (x) standardGeneric ("DP.db.field.label"))
setMethod ("DP.db.field.label", "DevPool",
function (x) x@DP.db.field.label);
# Set
setGeneric ("DP.db.field.label<-", signature = "x",
function (x, value) standardGeneric ("DP.db.field.label<-"))
setMethod ("DP.db.field.label<-", "DevPool",
function (x, value) initialize (x, DP.db.field.label = value))
#----- name -----#
# Get
setGeneric ("name", signature = "x",
function (x) standardGeneric ("name"))
setMethod ("name", "DevPool",
function (x) x@name);
# Set
setGeneric ("name<-", signature = "x",
function (x, value) standardGeneric ("name<-"))
setMethod ("name<-", "DevPool",
function (x, value) initialize (x, name = value))
#----- more.dev.allowed.in.cur.ts -----#
# Get
setGeneric ("more.dev.allowed.in.cur.ts", signature = "x",
function (x) standardGeneric ("more.dev.allowed.in.cur.ts"))
setMethod ("more.dev.allowed.in.cur.ts", "DevPool",
function (x) x@more.dev.allowed.in.cur.ts);
# Set
setGeneric ("more.dev.allowed.in.cur.ts<-", signature = "x",
function (x, value) standardGeneric ("more.dev.allowed.in.cur.ts<-"))
setMethod ("more.dev.allowed.in.cur.ts<-", "DevPool",
function (x, value) initialize (x, more.dev.allowed.in.cur.ts = value))
#----- cur.tot.cpw.for.ts -----#
# Get
setGeneric ("cur.tot.cpw.for.ts", signature = "x",
function (x) standardGeneric ("cur.tot.cpw.for.ts"))
setMethod ("cur.tot.cpw.for.ts", "DevPool",
function (x) x@cur.tot.cpw.for.ts);
# Set
setGeneric ("cur.tot.cpw.for.ts<-", signature = "x",
function (x, value) standardGeneric ("cur.tot.cpw.for.ts<-"))
setMethod ("cur.tot.cpw.for.ts<-", "DevPool",
function (x, value) initialize (x, cur.tot.cpw.for.ts = value))
#----- cur.cpw.tot.developed -----#
# Get
setGeneric ("cur.cpw.tot.developed", signature = "x",
function (x) standardGeneric ("cur.cpw.tot.developed"))
setMethod ("cur.cpw.tot.developed", "DevPool",
function (x) x@cur.cpw.tot.developed);
# Set
setGeneric ("cur.cpw.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.cpw.tot.developed<-"))
setMethod ("cur.cpw.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.cpw.tot.developed = value))
#----- cur.hmv.tot.developed -----#
# Get
setGeneric ("cur.hmv.tot.developed", signature = "x",
function (x) standardGeneric ("cur.hmv.tot.developed"))
setMethod ("cur.hmv.tot.developed", "DevPool",
function (x) x@cur.hmv.tot.developed);
# Set
setGeneric ("cur.hmv.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.hmv.tot.developed<-"))
setMethod ("cur.hmv.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.hmv.tot.developed = value))
#----- cur.mmv.tot.developed -----#
# Get
setGeneric ("cur.mmv.tot.developed", signature = "x",
function (x) standardGeneric ("cur.mmv.tot.developed"))
setMethod ("cur.mmv.tot.developed", "DevPool",
function (x) x@cur.mmv.tot.developed);
# Set
setGeneric ("cur.mmv.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.mmv.tot.developed<-"))
setMethod ("cur.mmv.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.mmv.tot.developed = value))
#----- cur.lmv.tot.developed -----#
# Get
setGeneric ("cur.lmv.tot.developed", signature = "x",
function (x) standardGeneric ("cur.lmv.tot.developed"))
setMethod ("cur.lmv.tot.developed", "DevPool",
function (x) x@cur.lmv.tot.developed);
# Set
setGeneric ("cur.lmv.tot.developed<-", signature = "x",
function (x, value) standardGeneric ("cur.lmv.tot.developed<-"))
setMethod ("cur.lmv.tot.developed<-", "DevPool",
function (x, value) initialize (x, cur.lmv.tot.developed = value))
#----- offset.multiplier -----#
# Get
setGeneric ("offset.multiplier", signature = "x",
function (x) standardGeneric ("offset.multiplier"))
setMethod ("offset.multiplier", "DevPool",
function (x) x@offset.multiplier);
# Set
setGeneric ("offset.multiplier<-", signature = "x",
function (x, value) standardGeneric ("offset.multiplier<-"))
setMethod ("offset.multiplier<-", "DevPool",
function (x, value) initialize (x, offset.multiplier = value))
#----- cur.target.loss.rate.for.ts -----#
# Get
setGeneric ("cur.target.loss.rate.for.ts", signature = "x",
function (x) standardGeneric ("cur.target.loss.rate.for.ts"))
setMethod ("cur.target.loss.rate.for.ts", "DevPool",
function (x) x@cur.target.loss.rate.for.ts);
# Set
setGeneric ("cur.target.loss.rate.for.ts<-", signature = "x",
function (x, value) standardGeneric ("cur.target.loss.rate.for.ts<-"))
setMethod ("cur.target.loss.rate.for.ts<-", "DevPool",
function (x, value) initialize (x, cur.target.loss.rate.for.ts = value))
#==============================================================================
# Initializers for the classes.
#==============================================================================
### FOR SOME REASON, R IS UNHAPPY WITH HOW I'VE DEFINED THESE INITIALIZERS,
### BUT I CAN'T FIGURE OUT WHAT THE ERROR MESSAGE MEANS:
###
### Error in conformMethod(signature, mnames, fnames, f, fdef, definition) :
### in method for ‘initialize’ with signature ‘.Object="DevPool.inside.gc"’:
### formal arguments (.Object = "DevPool.inside.gc") omitted in the method
### definition cannot be in the signature
###
### SO, FOR THE MOMENT, I'M JUST GOING TO DO THE INITIALIZATION BY HAND
### WHEN THE OBJECTS ARE CREATED AND COME BACK TO THIS LATER...
###setMethod (f = "initialize",
### signature = "DevPool.inside.gc",
### definition =
### function (object, initial.hmv.loss.rate, step.interval)
### {
### cat("~~~ DevPool.inside.gc: initializer ~~~ \n")
# ARE THESE SUPPOSED TO BE USING THE HMV LOSS RATE?
# AREN'T THEY SUPPOSED TO BE USING THE TOTAL CPW LOSS RATE INSTEAD?
# PAR.initial.inside.gc.cpw.loss.rate <- 39.6 # hectares per yr
### object@cur.target.loss.rate.for.ts <- initial.cpw.loss.rate * step.interval
### return (object)
### }
### )
#----------
###setMethod (f = "initialize",
### signature = "DevPool.outside.gc",
### definition =
### function (object, initial.hmv.loss.rate, step.interval)
### {
### cat("~~~ DevPool.outside.gc: initializer ~~~ \n")
# ARE THESE SUPPOSED TO BE USING THE HMV LOSS RATE?
# AREN'T THEY SUPPOSED TO BE USING THE TOTAL CPW LOSS RATE INSTEAD?
# PAR.initial.outside.gc.cpw.loss.rate <- 48
# +/- 10 per yr, so, could runif or truncated normal
# in [38 to 58] to get cur rate
### object@cur.target.loss.rate.for.ts <- initial.cpw.loss.rate * step.interval
### return (object)
### }
### )
#==============================================================================
#----- initialize.dev.pool.running.totals.at.start.of.ts -----#
setGeneric ("initialize.dev.pool.running.totals.at.start.of.ts", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("initialize.dev.pool.running.totals.at.start.of.ts"))
#--------------------
# Need to reload the running totals from the database.
# This really should be done in the initialize routine for the classes,
# but I haven't got that working correctly yet.
setMethod ("initialize.dev.pool.running.totals.at.start.of.ts", "DevPool",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
# Figure out whether the field names use "INSIDE_GC" or "OUTSIDE_GC".
cur.DP.db.field.label <- DP.db.field.label (cur.dev.pool)
if(DEBUG.OFFSETTING) cat (" cur.DP.db.field.label = <", cur.DP.db.field.label, ">\n")
# CPW running total
query <- paste ('select CPW_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.cpw.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
# HMV running total
query <- paste ('select HMV_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.hmv.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
# MMV running total
query <- paste ('select MMV_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.mmv.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
# LMV running total
query <- paste ('select LMV_TOT_DEV_', cur.DP.db.field.label, ' from ', offsettingWorkingVarsTableName,
sep='')
cur.lmv.tot.developed (cur.dev.pool) <- sql.get.data (PUinformationDBname, query)
#### dummy setting to see if it registers anywhere...
#### cur.dev.pool@cur.lmv.tot.developed <- 52
if(DEBUG.OFFSETTING) cat ("In initialize.dev.pool.running.totals.at.start.of.ts: \n");
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" cur.dev.pool = \n");
if(DEBUG.OFFSETTING) print (cur.dev.pool)
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
)
#==============================================================================
#----- save.cur.dev.pool.running.totals -----#
setGeneric ("save.cur.dev.pool.running.totals", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("save.cur.dev.pool.running.totals"))
#--------------------
setMethod ("save.cur.dev.pool.running.totals", "DevPool",
function (cur.dev.pool)
{
# Set these to 0 at the very start of the model.
# DON'T reset them to 0 every time you start a time step.
# DO retrieve their values from the database at the start of
# each time step.
# Need to initialize all of these in the workingvars database at the start of the model and
# then reload their values into these variables at the start of each time step
# by reading their values out of the database.
connect.to.database( PUinformationDBname );
#-----
if(DEBUG.OFFSETTING) cat ("\n\nIn save.cur.dev.pool.running.totals():")
# Figure out whether the field names use "INSIDE_GC" or "OUTSIDE_GC".
cur.DP.db.field.label <- DP.db.field.label (cur.dev.pool)
if(DEBUG.OFFSETTING) cat ("\n cur.DP.db.field.label = ", cur.DP.db.field.label)
# CPW running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set CPW_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.cpw.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n CPW running total query = ", query)
sql.send.operation (query)
# HMV running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set HMV_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.hmv.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n HMV running total query = ", query)
sql.send.operation (query)
# MMV running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set MMV_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.mmv.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n MMV running total query = ", query)
sql.send.operation (query)
# LMV running total
query <- paste ('update ', offsettingWorkingVarsTableName,
' set LMV_TOT_DEV_', cur.DP.db.field.label, ' = ',
cur.lmv.tot.developed (cur.dev.pool),
sep = '' )
if(DEBUG.OFFSETTING) cat ("\n LMV running total query = ", query)
sql.send.operation (query)
#-----
close.database.connection();
if(DEBUG.OFFSETTING) cat ("\n\nAt end of DevPool::save.cur.dev.pool.running.totals()\n")
if(DEBUG.OFFSETTING) cat (" Are these totals saved correctly in the db?\n",
" They're 0 when reloaded at the start of the next time step (except for tot cpw).\n")
if(DEBUG.OFFSETTING) print (cur.dev.pool)
}
)
#==============================================================================
#***
#------------------------------------------------------------------------------
# These things should be in the initialize() routine for each class, but
# R is giving me an error message that I can't figure out, so I'll do it
# by hand here.
#------------------------------------------------------------------------------
#***
#------------------------------------------------------------------------------
# ARE THESE SUPPOSED TO BE USING THE HMV LOSS RATE?
# AREN'T THEY SUPPOSED TO BE USING THE TOTAL CPW LOSS RATE INSTEAD?
#------------------------------------------------------------------------------
#----- initialize.inside.gc.dev.pool.target.loss.rate -----#
setGeneric ("initialize.dev.pool.target.loss.rate", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("initialize.dev.pool.target.loss.rate"))
#--------------------
# inside gc #
setMethod ("initialize.dev.pool.target.loss.rate", "DevPool.inside.gc",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
#step.interval <- 5
cur.target.loss.rate.for.ts (cur.dev.pool) <-
PAR.initial.inside.gc.cpw.loss.rate * step.interval
if(DEBUG.OFFSETTING) cat ("\n\nIn initialize.dev.pool.target.loss.rate: INSIDE gc.\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" PAR.initial.inside.gc.cpw.loss.rate = ", PAR.initial.inside.gc.cpw.loss.rate, "\n")
if(DEBUG.OFFSETTING) cat (" step.interval = ", step.interval, "\n")
if(DEBUG.OFFSETTING) cat (" cur.target.loss.rate.for.ts (cur.dev.pool) = ", cur.target.loss.rate.for.ts (cur.dev.pool), "\n")
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
)
#---------------------------------------------
# outside gc #
setMethod ("initialize.dev.pool.target.loss.rate", "DevPool.outside.gc",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
#step.interval <- 5
cur.target.loss.rate.for.ts (cur.dev.pool) <-
PAR.initial.outside.gc.cpw.loss.rate * step.interval
if(DEBUG.OFFSETTING) cat ("\n\nIn initialize.dev.pool.target.loss.rate: OUTSIDE gc.\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" PAR.initial.outside.gc.cpw.loss.rate = ", PAR.initial.outside.gc.cpw.loss.rate, "\n")
if(DEBUG.OFFSETTING) cat (" step.interval = ", step.interval, "\n")
if(DEBUG.OFFSETTING) cat (" cur.target.loss.rate.for.ts (cur.dev.pool) = ", cur.target.loss.rate.for.ts (cur.dev.pool), "\n")
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
)
#==============================================================================
#----- choose.offset.pool -----#
setGeneric ("choose.offset.pool", signature = ".Object",
function (.Object) standardGeneric ("choose.offset.pool"))
#--------------------
#---------------------------------------------------------------------
# This function is where you would designate an offset to be leaked
# outside the study area, but I haven't done that yet because
# we haven't discussed how that would work yet.
#---------------------------------------------------------------------
#--------------------
setMethod ("choose.offset.pool", "DevPool.outside.gc",
function (.Object)
{
if(DEBUG.OFFSETTING) cat ('\nOffset should go OUTSIDE GC')
return ( CONST.dev.OUT.offset.OUT)
}
)
#--------------------
setMethod ("choose.offset.pool", "DevPool.inside.gc",
function (.Object)
{
offset.location <- CONST.dev.IN.offset.IN
if (runif(1) < PAR.prob.that.inside.gc.is.offset.inside.gc)
{
if(DEBUG.OFFSETTING) cat ('\nOffset should go INSIDE GC')
} else
{
offset.location <- CONST.dev.IN.offset.OUT
if(DEBUG.OFFSETTING) cat ('\nOffset should go OUTSIDE GC')
}
return (offset.location)
}
)
#==============================================================================
#-----------------------------------------------------------------------
# Utility functions, particularly related to dealing with overflow of
# development from one time step to the next.
#
# At the moment, these are just dummy calls that need to be replaced
# with database interactions whose tables have not been set up yet.
#-----------------------------------------------------------------------
#==============================================================================
#----- assign.PU.to.cur.ts -----#
setGeneric ("assign.PU.to.cur.ts", signature = "cur.dev.pool",
function (cur.dev.pool, PU.to.develop) standardGeneric ("assign.PU.to.cur.ts"))
#--------------------
setMethod ("assign.PU.to.cur.ts", "DevPool",
function (cur.dev.pool, PU.to.develop)
{
nameObject <- deparse (substitute (cur.dev.pool))
# WHAT HAPPENS WITH ALL THESE RUNNING TOTALS IF OFFSETTING FAILS?
# DO WE NEED TO HAVE THESE ONLY AS SCRATCH VALUES UNTIL OFFSET SUCCEEDS
# (WHICH IS ALSO MAKING THE ASSUMPTION THAT OFFSETTING IS EVEN BEING DONE).
# May need to store the incremental values added in the working table as well
# (e.g., the result of the get.cpw.of (PU.to.develop), get.hmv.of... calls here)
# so that you can undo the changes made here if offsetting fails.
cur.cpw.tot.developed (cur.dev.pool) <- cur.cpw.tot.developed (cur.dev.pool) + get.cpw.of (PU.to.develop)
cur.hmv.tot.developed (cur.dev.pool) <- cur.hmv.tot.developed (cur.dev.pool) + get.hmv.of (PU.to.develop)
cur.mmv.tot.developed (cur.dev.pool) <- cur.mmv.tot.developed (cur.dev.pool) + get.mmv.of (PU.to.develop)
cur.lmv.tot.developed (cur.dev.pool) <- cur.lmv.tot.developed (cur.dev.pool) + get.lmv.of (PU.to.develop)
# The values above are for overall running totals over the whole run.
# This one is just for what's been developed in the current time step.
# It's used for doing overflow calculations.
cur.tot.cpw.for.ts (cur.dev.pool) <- cur.tot.cpw.for.ts (cur.dev.pool) + get.cpw.of (PU.to.develop)
if(DEBUG.OFFSETTING) cat ("\n\nIn assign.PU.to.cur.ts: <DEVELOPING PU ID ", PU.to.develop, ">\n");
assign (nameObject, cur.dev.pool, envir=parent.frame())
}
) # end - setMethod assign.PU.to.cur.ts
#==============================================================================
#----- select.PUs.currently.eligible.for.dev -----#
setGeneric ("select.PUs.currently.eligible.for.dev", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("select.PUs.currently.eligible.for.dev"))
#--------------------
setMethod ("select.PUs.currently.eligible.for.dev", "DevPool",
function (cur.dev.pool)
{
query <- build.eligibility.query (cur.dev.pool)
eligible.PUs <- (sql.get.data (PUinformationDBname, query))
if(DEBUG.OFFSETTING) cat ("\n\nIn select.PUs.currently.eligible.for.dev:\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" num of eligible.PUs = ", length (eligible.PUs), "\n")
if(DEBUG.OFFSETTING) cat (" eligible.PUs = ", eligible.PUs [1:5], "...\n")
if(DEBUG.OFFSETTING) cat (" Dev Query = ", query, "\n" )
return (eligible.PUs)
}
)
#==============================================================================
#----- build.eligibility.query -----#
setGeneric ("build.eligibility.query", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("build.eligibility.query"))
#--------------------
# NEED TO TEST FOR 2 DIFFERENT KINDS OF THINGS HERE?
# I.E., ONE IS ABOUT OVERFLOW TO THE NEXT STEP AND THE OTHER IS ABOUT EXCEEDING THE TOTAL
# ALLOWED AMOUNT OF DEVELOPMENT FOR EACH CPW CLASS (HMV, MMV, LMV).
# ONCE A CHOSEN PARCEL WOULD EXCEED ONE OR MORE OF THE LIMITS, THEN IT NEEDS TO BE REMOVED
# FROM THE DEVELOPMENT POOL SINCE IT WILL NEVER BE UNDER THE LIMIT AFTER THAT.
# *****
# NOTE: THAT ASSUMES THAT THE LIMITS WILL NOT BE RESET LATER IN THE MODEL RUN AND
# THAT THE LIMIT IS ON AMOUNT DEVELOPED, NOT ON TOTAL AVAILABLE IN THE LANDSCAPE.
# IF MANAGEMENT ALLOWED FOR INCREASE IN CONDITION, THE TOTAL AMOUNT COULD INCREASE
# (OR DECREASE) OVER TIME. THIS SUGGESTS A POLICY QUESTION ABOUT WHETHER THE
# DEVELOPMENT SHOULD BE GOVERNED BY MECHANISM OR BY OUTCOME.
# SHOULD MODEL THESE TWO CHOICES TO HIGHLIGHT THIS.
# If mechanism is the driver, then you do not have to make the check again once you
# have exceeded the limit. If outcome is the driver, then you have to keep checking.
# One other thing though - outcome could be phrased as trying to stay around the
# target with falling back to the target from a higher point allowed (i.e., if your
# proposed development does not drop the total below the target level, then go ahead,
# even though it does cause loss)
# or it could be phrased as never allowing any gain to be lost.
# *****
#--------------------
setMethod ("build.eligibility.query", "DevPool.inside.gc",
function (cur.dev.pool)
{
# Need to compute the amount of space left under each cpw cap
hmv.space.left.under.limit.inside.gc <-
PAR.hmv.limit.inside.gc - cur.hmv.tot.developed (cur.dev.pool)
mmv.space.left.under.limit.inside.gc <-
PAR.mmv.limit.inside.gc - cur.mmv.tot.developed (cur.dev.pool)
lmv.space.left.under.limit.inside.gc <-
PAR.lmv.limit.inside.gc - cur.lmv.tot.developed (cur.dev.pool)
query <- paste ('select ID from ', dynamicPUinfoTableName,
'where DEVELOPED = 0',
'and GROWTH_CENTRE = 1',
'and TENURE = "Unprotected"',
'and RESERVED = 0',
'and GC_CERT = 1',
'and AREA_OF_C1_CPW <=', hmv.space.left.under.limit.inside.gc,
'and AREA_OF_C2_CPW <=', mmv.space.left.under.limit.inside.gc,
'and AREA_OF_C3_CPW <=', lmv.space.left.under.limit.inside.gc
)
return (query)
}
)
#--------------------
setMethod ("build.eligibility.query", "DevPool.outside.gc",
function (cur.dev.pool)
{
# for OUTSIDE gc, there are no tests other than staying around the
# outside gc loss rate.
# Moving part of this to the yaml file - Ascelin Gordon 2011.01.19
## query <- paste ('select ID from ', dynamicPUinfoTableName,
## 'where DEVELOPED = 0',
## 'and GROWTH_CENTRE = 0',
## 'and TENURE = "Unprotected"',
## 'and RESERVED = 0'
## )
query <- paste ('select ID from', dynamicPUinfoTableName, 'where', PAR.dev.outside.GC.criteria )
return (query)
}
)
#==============================================================================
#----- make.sure.overflow.dev.PU.is.still.legal -----#
setGeneric ("make.sure.overflow.dev.PU.is.still.legal", signature = "cur.dev.pool",
function (cur.dev.pool, overflow.PU) standardGeneric ("make.sure.overflow.dev.PU.is.still.legal"))
#--------------------
setMethod ("make.sure.overflow.dev.PU.is.still.legal", "DevPool",
function (cur.dev.pool, overflow.PU)
{
eligible.PUs <- select.PUs.currently.eligible.for.dev (cur.dev.pool)
if(DEBUG.OFFSETTING) cat ("\n\nIn make.sure.overflow.dev.PU.is.still.legal:\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" overflow.PU = ", overflow.PU, "\n")
if(DEBUG.OFFSETTING) cat (" num of eligible.PUs = ", length (eligible.PUs), "\n")
if(DEBUG.OFFSETTING) cat (" any (eligible.PUs == overflow.PU) = ", any (eligible.PUs == overflow.PU), "\n")
if(DEBUG.OFFSETTING) cat (" eligible.PUs = ", eligible.PUs [1:5], "...\n")
return (any (eligible.PUs == overflow.PU)) # Returns TRUE of overflow.PU is in the list
}
)
#==============================================================================
#----- get.overflow.PU.from.prev.ts.to.develop -----#
setGeneric ("get.overflow.PU.from.prev.ts.to.develop", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("get.overflow.PU.from.prev.ts.to.develop"))
#--------------------
setMethod ("get.overflow.PU.from.prev.ts.to.develop", "DevPool",
function (cur.dev.pool)
{
query <- paste ('select ',
DP.db.field.label (cur.dev.pool), '_DEV_OVERFLOW_PU_ID from ',
offsettingWorkingVarsTableName, sep = '');
return (sql.get.data (PUinformationDBname, query)); # overflow PU to develop
}
)
#==============================================================================
#----- prev.ts.left.overflow.PU.to.develop -----#
setGeneric ("prev.ts.left.overflow.PU.to.develop", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("prev.ts.left.overflow.PU.to.develop"))
#--------------------
setMethod ("prev.ts.left.overflow.PU.to.develop", "DevPool",
function (cur.dev.pool)
{
if(DEBUG.OFFSETTING) cat ("\n\nIn prev.ts.left.overflow.PU.to.develop:\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
overflow.PU.from.prev <- get.overflow.PU.from.prev.ts.to.develop (cur.dev.pool)
if(DEBUG.OFFSETTING) cat (" overflow.PU.from.prev = ", overflow.PU.from.prev)
return (overflow.PU.from.prev
!=
CONST.NO.OVERFLOW.PU.TO.DEV)
}
)
#==============================================================================
#----- compute.overflow.fraction.for.PU.to.develop -----#
setGeneric ("compute.overflow.fraction.for.PU.to.develop", signature = "cur.dev.pool",
function (cur.dev.pool, PU.to.develop) standardGeneric ("compute.overflow.fraction.for.PU.to.develop"))
#--------------------
setMethod ("compute.overflow.fraction.for.PU.to.develop", "DevPool",
function (cur.dev.pool, PU.to.develop)
{
overflow.fraction <- 0.0
cur.PU.cpw <- get.cpw.of (PU.to.develop)
if (cur.PU.cpw < 0)
{
errMsg <- paste ("\n\nERROR in compute.overflow.fraction.for.PU.to.develop():",
"\n cur.PU.cpw = ", cur.PU.cpw, " -- Must be >= 0.\n\n", sep='')
stop (errMsg)
} else if (cur.PU.cpw > 0)
{
#----------------------------------------------------------------------
# Compute what the running cpw total will be if this PU is developed
# and what fraction of the PU's area will be overflowing the current
# target rate for development in this time step.
#----------------------------------------------------------------------
next.inside.gc.tot.cpw.for.ts <- cur.tot.cpw.for.ts (cur.dev.pool) + cur.PU.cpw
overflow.fraction <-
(next.inside.gc.tot.cpw.for.ts - cur.target.loss.rate.for.ts (cur.dev.pool)) /
cur.PU.cpw
if(DEBUG.OFFSETTING) cat ("\n\nIn compute.overflow.fraction.for.PU.to.develop\n")
if(DEBUG.OFFSETTING) cat (" current.time.step = ", current.time.step, "\n");
if(DEBUG.OFFSETTING) cat (" PU.to.develop = ", PU.to.develop, "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.cpw.tot.developed (cur.dev.pool) = ",
cur.cpw.tot.developed (cur.dev.pool), "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.tot.cpw.for.ts (cur.dev.pool) = ",
cur.tot.cpw.for.ts (cur.dev.pool), "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.PU.cpw = ", cur.PU.cpw, "\n")
if(DEBUG.OFFSETTING) cat (" --- next.inside.gc.tot.cpw.for.ts = ",
next.inside.gc.tot.cpw.for.ts, "\n")
if(DEBUG.OFFSETTING) cat (" --- cur.target.loss.rate.for.ts (cur.dev.pool) = ",
cur.target.loss.rate.for.ts (cur.dev.pool), "\n\n")
if(DEBUG.OFFSETTING) cat (" overflow.fraction = ", overflow.fraction, "\n")
}
return (overflow.fraction)
}
)
#==============================================================================
#----- set.dev.overflow.PU.from.prev.ts -----#
setGeneric ("set.dev.overflow.PU.from.prev.ts", signature = "cur.dev.pool",
function (cur.dev.pool, value) standardGeneric ("set.dev.overflow.PU.from.prev.ts"))
#--------------------
setMethod ("set.dev.overflow.PU.from.prev.ts", "DevPool",
function (cur.dev.pool, value)
{
query <- paste ('update ', offsettingWorkingVarsTableName, ' set ',
DP.db.field.label (cur.dev.pool), '_DEV_OVERFLOW_PU_ID = ',
value,
sep = '')
connect.to.database( PUinformationDBname );
sql.send.operation (query);
close.database.connection();
}
)
#==============================================================================
#----- clear.record.of.dev.overflow.from.prev.ts -----#
setGeneric ("clear.record.of.dev.overflow.from.prev.ts", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("clear.record.of.dev.overflow.from.prev.ts"))
#--------------------
setMethod ("clear.record.of.dev.overflow.from.prev.ts", "DevPool",
function (cur.dev.pool)
{
set.dev.overflow.PU.from.prev.ts (cur.dev.pool, CONST.NO.OVERFLOW.PU.TO.DEV)
}
)
#==============================================================================
#----- push.PU.to.next.ts -----#
setGeneric ("push.PU.to.next.ts", signature = "cur.dev.pool",
function (cur.dev.pool, PU.to.develop) standardGeneric ("push.PU.to.next.ts"))
#--------------------
setMethod ("push.PU.to.next.ts", "DevPool",
function (cur.dev.pool, PU.to.develop)
{
set.dev.overflow.PU.from.prev.ts (cur.dev.pool, PU.to.develop)
}
)
#==============================================================================
#----- choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities -----#
setGeneric ("choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities", signature = "cur.dev.pool",
function (cur.dev.pool, PUs.currently.eligible.for.dev) standardGeneric ("choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities"))
#--------------------
setMethod ("choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities", "DevPool",
function (cur.dev.pool, PUs.currently.eligible.for.dev)
{
# By default, just choose one at random.
# However, this could be a fancier, project or pool-specific choice,
# e.g., using a distribution that weights some sizes or types more heavily.
# That's why I have put the cur.dev.pool argument in the list
# even though it's currently not used.
# I anticipate this to be an instance method of the dev.pool class
# and that would require the pool.
PU.to.develop <- sample.rdv (PUs.currently.eligible.for.dev, 1);
return (PU.to.develop)
}
)
#==============================================================================
#==============================================================================
#----- choose.PU.to.develop -----#
#setGeneric ("choose.PU.to.develop", signature = "cur.dev.pool",
# function (cur.dev.pool) standardGeneric ("choose.PU.to.develop"))
setGeneric ("choose.PU.to.develop.OOP", signature = "cur.dev.pool",
function (cur.dev.pool) standardGeneric ("choose.PU.to.develop.OOP"))
#--------------------
#setMethod ("choose.PU.to.develop", "DevPool",
setMethod ("choose.PU.to.develop.OOP", "DevPool",
function (cur.dev.pool)
{
nameObject <- deparse (substitute (cur.dev.pool))
PU.to.develop <- CONST.NO.ELIGIBLE.PU.TO.DEVELOP
if(DEBUG.OFFSETTING) cat ("\n\n===================================================================================\n\n",
"At start of choose.PU.to.develop.OOP at ", "\n",
" >>>>> current.time.step = ", current.time.step, "\n",
" >>>>> cur.dev.pool@DP.db.field.label = ", cur.dev.pool@DP.db.field.label, "\n",
" >>>>> cur.tot.cpw.for.ts (cur.dev.pool) = ", cur.tot.cpw.for.ts (cur.dev.pool), "\n",
" >>>>> cur.target.loss.rate.for.ts (cur.dev.pool) = ", cur.target.loss.rate.for.ts (cur.dev.pool), "\n\n",
" cur.dev.pool@cur.cpw.tot.developed = ", cur.dev.pool@cur.cpw.tot.developed, "\n",
" cur.dev.pool@cur.hmv.tot.developed = ", cur.dev.pool@cur.hmv.tot.developed, "\n",
" cur.dev.pool@cur.mmv.tot.developed = ", cur.dev.pool@cur.mmv.tot.developed, "\n",
" cur.dev.pool@cur.lmv.tot.developed = ", cur.dev.pool@cur.lmv.tot.developed, "\n",
" cur.dev.pool@cur.target.loss.rate.for.ts = ", cur.dev.pool@cur.target.loss.rate.for.ts
);
more.dev.allowed.in.cur.ts (cur.dev.pool) <- TRUE
select.new.PU.to.dev <- TRUE
dev.PU.in.this.ts <- TRUE
#--------------------------------------------------
# Check for overflow PU from previous time step.
# If there is one, then just return that PU.
# Otherwise, have to look for one.
#--------------------------------------------------
if(DEBUG.OFFSETTING) cat ("\n\nAbout to test prev.ts.left.overflow.PU.to.develop at ts = ",
current.time.step, "\n")
if (prev.ts.left.overflow.PU.to.develop (cur.dev.pool))
{
#----------------------------------------------------------------------
# Parcel overflowed from previous time step.
# Get its ID and turn off the overflow marker since you're using up
# the overflow now.
#----------------------------------------------------------------------
if(DEBUG.OFFSETTING) cat ("\n\nAbout to test prev.ts.left.overflow.PU.to.develop.\n")
PU.to.develop <- get.overflow.PU.from.prev.ts.to.develop (cur.dev.pool)
if(DEBUG.OFFSETTING) cat ("\n\nAbout to clear.record.of.dev.overflow.from.prev.ts.\n")
clear.record.of.dev.overflow.from.prev.ts (cur.dev.pool)
PU.to.dev.is.legal <-
make.sure.overflow.dev.PU.is.still.legal (cur.dev.pool, PU.to.develop)
if(DEBUG.OFFSETTING) cat ("\n\nAfter make.sure.overflow.dev.PU.is.still.legal (cur.dev.pool, ",
PU.to.develop, ").\n")
if(DEBUG.OFFSETTING) cat (" PU.to.dev.is.legal = ", PU.to.dev.is.legal, "\n")
if (PU.to.dev.is.legal)
{
#----------------------------------------------------------------------------
# The previous step pushed a development to this step and it's still legal
# so do it.
#----------------------------------------------------------------------------
# assign.PU.to.cur.ts (cur.dev.pool, PU.to.develop)
select.new.PU.to.dev <- FALSE
dev.PU.in.this.ts <- TRUE # already true, but just want to point it out here
} else
{
#--------------------------------------------------------------------------
# Overflow PU is no longer legal.
# This shouldn't happen, but it does.
# Not sure if it's a bug or what...
###### There may be a problem here with overflow PU not getting marked
###### as ineligible for development, e.g., after inside.gc claims it
###### for overflow, but doesn't mark it as developed and then outside.gc
###### or offsetting come along and use it before inside.gc gets another
###### chance at it?
######
###### If you mark it as DEVELOPED though, then it won't be seen as
###### eligible on the next round, so it probably needs a bit more
###### special attention than it's getting right now...
#--------------------------------------------------------------------------
more.dev.allowed.in.cur.ts (cur.dev.pool) <- FALSE
if(DEBUG.OFFSETTING) cat ("\n\nWARNING, POSSIBLE BUG:\n",
" In choose.PU.to.develop: <<FAILED OVERFLOW PU ", PU.to.develop, ">> ",
" for ", DP.db.field.label (cur.dev.pool), "\n",
" Overflow from previous time step is no longer legal.\n", sep='');
#----------------------------------------------------------------------------
# UNTIL WE HAVE THIS STRAIGHTENED OUT, JUST GET A DIFFERENT PU TO DEVELOP.
#----------------------------------------------------------------------------
select.new.PU.to.dev <- TRUE # already true, but just want to point it out here
# stop ()
} # end else - PU to develop is not legal
} # end if - previous time step left overflow PU to develop
#-------------------------------------------------------------------------------------
# Have now determined that either:
# a) there was no overflow from previous step and we need to pick a PU to develop
# or
# b) there was an overflow and it's still legal so it has now been developed
# and there's nothing left to do in this call except check that developing
# that PU has not overflowed the current time step as well. We'll do that
# check at the very end of the routine and the only consequence if it does
# overflow is to say that we shouldn't develop any more PUs this time step.
# or
# c) there was an overflow but for some reason, it is no longer legal.
# While this is probably a bug that we need to fix, for the moment we want
# to just ignore the problem and just pick a new PU to develop so that we
# can get some runs done. If it IS an error, then it only happens very
# occasionally and doesn't screw anything up other than the probability
# of the offending patch getting picked.
#-------------------------------------------------------------------------------------
if (select.new.PU.to.dev)
{
PUs.currently.eligible.for.dev <- select.PUs.currently.eligible.for.dev (cur.dev.pool)
if (length (PUs.currently.eligible.for.dev) < 1) # is.null()? is.NA()??
{
more.dev.allowed.in.cur.ts (cur.dev.pool) <- FALSE
dev.PU.in.this.ts <- FALSE
# cat ("\n\nERROR: In choose.PU.to.develop, ",
# "\n no legal PUs to develop.\n\n");
# stop ()
} else # end if - no PUs eligible to develop
{
#---------------------------------------------------------------------------------
# There ARE PUs eligible to develop.
# Choose one and see whether it overflows the target amount for this time step.
#---------------------------------------------------------------------------------
PU.to.develop <-
choose.dev.PU.from.set.that.has.been.restricted.to.only.legal.possibilities (
cur.dev.pool, PUs.currently.eligible.for.dev)
#----------------------------------------------------
# See if it fits in current time step's allotment.
#----------------------------------------------------
overflow.fraction <- compute.overflow.fraction.for.PU.to.develop (cur.dev.pool, PU.to.develop)
if(DEBUG.OFFSETTING) cat ("\n\noverflow.fraction = ", overflow.fraction, "\n")
if (overflow.fraction > 0)
{
#----------------------------------------------------------------------
# Parcel does not fit inside current time step.
# Flip a biased coin to see whether to include it anyway.
#
# Bias the flip in inverse proportion to the amount of overflow,
# i.e., the more overflow, the less chance of including in
# current time step.
#
# Also mark the fact that you have come to the end of the time step.
#----------------------------------------------------------------------
if (runif (1) < overflow.fraction)
{
#---------------------------------------------------
# Lost the toss. Move this PU to next time step.
#---------------------------------------------------
if(DEBUG.OFFSETTING) cat ("\n\nIn choose.PU.to.develop: lost the toss, <<PUSHING PU ID ",
PU.to.develop,
">> to next time step for ",
DP.db.field.label (cur.dev.pool), "\n", sep='');
push.PU.to.next.ts (cur.dev.pool, PU.to.develop)
dev.PU.in.this.ts <- FALSE
} else # end if - lost the runif() toss
{
if(DEBUG.OFFSETTING) cat ("\n\nIn choose.PU.to.develop: won the toss for ", PU.to.develop,
" in ", DP.db.field.label (cur.dev.pool), "\n", sep='');
} # end else - won the toss
} # end if - PU to develop overflows current time step
} # end else - there were PUs eligible to develop
} # end if - no overflow from previous time step
if (dev.PU.in.this.ts)
{
assign.PU.to.cur.ts (cur.dev.pool, PU.to.develop)
}
#---------------------------------------------------------------------------
# No matter what action was taken in this routine, there is a possibility
# that it has led to an overflow that has been accepted.
# Check for that now and if it has occurred, then you should not allow
# any more development in this time step.
#---------------------------------------------------------------------------
final.overflow <- cur.dev.pool@cur.tot.cpw.for.ts - cur.dev.pool@cur.target.loss.rate.for.ts
if(DEBUG.OFFSETTING) cat ("\n\nAt end of choose.PU.to.develop, final overflow = ",
final.overflow, "\n")
if (final.overflow > 0)
{
if(DEBUG.OFFSETTING) cat ("\n\nIn choose.PU.to.develop: ", DP.db.field.label (cur.dev.pool),
"\n Positive overflow, so stopping dev for this time step.\n", sep='');
more.dev.allowed.in.cur.ts (cur.dev.pool) <- FALSE
}
#-------------------------------------------------------------------------
# If you're ending the time step, save the running totals to use on the
# next time step.
#-------------------------------------------------------------------------
if (! more.dev.allowed.in.cur.ts (cur.dev.pool))
{
save.cur.dev.pool.running.totals (cur.dev.pool)
}
assign (nameObject, cur.dev.pool, envir=parent.frame())
return (PU.to.develop)
} # end function - choose.PU.to.develop
)
#==============================================================================
#---------------------------------------------
# Functions that are not part of the class.
#---------------------------------------------
#==============================================================================
example.model.code <- function ()
{
# At start of model, need to initialize running totals, etc.
inside.gc.dev.pool <- new ("DevPool.inside.gc")
# Initialize dev pool running totals in db to 0.
# Can use the save...() routine because the dev pool initial values
# are 0 when they are created.
stopifnot (cur.cpw.tot.developed (cur.dev.pool) == 0.0) # Check that assumption.
save.cur.dev.pool.running.totals (inside.gc.dev.pool)
}
#==============================================================================
initialize.dev.pools.at.start.of.model <- function ()
{
initialize.dev.pool.running.totals.at.start.of.model ("INSIDE_GC")
initialize.dev.pool.running.totals.at.start.of.model ("OUTSIDE_GC")
}
#==============================================================================
#==============================================================================
#--------------------------------------------------------------------------
# Dummy global control code to emulate running a full set of time steps.
#--------------------------------------------------------------------------
#==============================================================================
#----- execute.cpw.test.ts -----#
# Strictly for testing inside a dummy loop over model time steps.
# Can remove this routine if desired.
# It DOES show how things are expected to be set up and called.
execute.cpw.test.ts <- function ()
{
inside.gc.dev.pool <- new ("DevPool.inside.gc")
outside.gc.dev.pool <- new ("DevPool.outside.gc")
#-----------------------------------------------
initialize.dev.pools.at.start.of.model ()
#-----------------------------------------------
### while (more.dev.allowed.in.cur.ts (inside.gc.dev.pool))
### {
### cat ("\n\n====================================================",
### "\n\nAt ts = ", cur.ts, ", before choose.PU...()",
### PU.to.develop);
###### PU.to.develop <- choose.PU.to.develop (cur.dev.pool)
### }
}
#==============================================================================
##current.time.step <- 15
##step.interval <- 5
##PAR.initial.inside.gc.cpw.loss.rate <- 10
##x <- new ("DevPool.inside.gc")
##cat ("\n\nx after new = \n")
##print (x)
##initialize.dev.pool.target.loss.rate (x)
##cat ("\n\nx after initialize.dev.pool.target.loss.rate = \n")
##print (x)
|
run_analysis<- function(){
source("download_data.R")
source("get_feature_names.R")
source("get_activity_labels.R")
source("get_subject_ids.R")
source("get_activities.R")
source("get_measurements.R")
source("get_summary.R")
## run_analysis function performs the following:
# Downloads and extract data to "./data/"
# Creates a clean data set with measurements from "test" and "train" sets
# The data set has descriptive column names
# The file tidy_data_set.txt is created
# Output of the function is a summary with mean of each variable grouped by activity and subject
# Download file and extract data
print("Dowloading data")
download_data()
data_sets <- c("test", "train")
merged_dataframes <- list("test" = NULL, "train" = NULL)
# Get feature names
features_list <- get_feature_names()
# Get activity lables
activity_labels <- get_activity_labels()
start_index <- 1
for(data_set in data_sets){
print(sprintf("Parsing data set %s", data_set))
# Get subject ids
subject_ids <- get_subject_ids(data_set, start_index)
# Get activities
activities <- get_activities(data_set, start_index, activity_labels)
# Get measurements
measurements <- get_measurements(data_set, features_list$features, features_list$relevant_features, features_list$columns_to_read, start_index)
# Save last index for next iteration
start_index <- nrow(subject_ids)+1
# Merge into single data frame
print(sprintf("Merging data set %s", data_set))
merged <- Reduce(function(x, y) merge(x, y, all=TRUE), list(subject_ids, activities, measurements))
merged_dataframes[[data_set]] <- merged
}
print("Merging complete data")
complete_data <-rbind(merged_dataframes$test, merged_dataframes$train)
output_file <- "complete_clean_data_set.txt"
print(sprintf("Writing to file %s", output_file))
write.table(complete_data, output_file, row.name=FALSE)
print("Getting summary")
summarised <- get_summary(complete_data)
output_file <- "tidy_data_set.txt"
print(sprintf("Writing to file %s", output_file))
write.table(summarised, output_file, row.name=FALSE)
return(summarised)
}
| /run_analysis.R | no_license | gersf11/DataCleaning | R | false | false | 2,306 | r | run_analysis<- function(){
source("download_data.R")
source("get_feature_names.R")
source("get_activity_labels.R")
source("get_subject_ids.R")
source("get_activities.R")
source("get_measurements.R")
source("get_summary.R")
## run_analysis function performs the following:
# Downloads and extract data to "./data/"
# Creates a clean data set with measurements from "test" and "train" sets
# The data set has descriptive column names
# The file tidy_data_set.txt is created
# Output of the function is a summary with mean of each variable grouped by activity and subject
# Download file and extract data
print("Dowloading data")
download_data()
data_sets <- c("test", "train")
merged_dataframes <- list("test" = NULL, "train" = NULL)
# Get feature names
features_list <- get_feature_names()
# Get activity lables
activity_labels <- get_activity_labels()
start_index <- 1
for(data_set in data_sets){
print(sprintf("Parsing data set %s", data_set))
# Get subject ids
subject_ids <- get_subject_ids(data_set, start_index)
# Get activities
activities <- get_activities(data_set, start_index, activity_labels)
# Get measurements
measurements <- get_measurements(data_set, features_list$features, features_list$relevant_features, features_list$columns_to_read, start_index)
# Save last index for next iteration
start_index <- nrow(subject_ids)+1
# Merge into single data frame
print(sprintf("Merging data set %s", data_set))
merged <- Reduce(function(x, y) merge(x, y, all=TRUE), list(subject_ids, activities, measurements))
merged_dataframes[[data_set]] <- merged
}
print("Merging complete data")
complete_data <-rbind(merged_dataframes$test, merged_dataframes$train)
output_file <- "complete_clean_data_set.txt"
print(sprintf("Writing to file %s", output_file))
write.table(complete_data, output_file, row.name=FALSE)
print("Getting summary")
summarised <- get_summary(complete_data)
output_file <- "tidy_data_set.txt"
print(sprintf("Writing to file %s", output_file))
write.table(summarised, output_file, row.name=FALSE)
return(summarised)
}
|
library(AGDEX)
library("RColorBrewer")
library(ggplot2)
res.dir <- '/home/gabriel/Dropbox/research/qmul/results/mb_agdex/by_gene_ncott100'
grp.a <- c("hu.mb", "C", "D", "SHH")
names(grp.a) <- c("All MB", "Group C", "Group D", "SHH")
grp.b <- c("mo.mb", "sb.chd7", "sb.nochd7")
names(grp.b) <- c("All MB", "SB CHD7 insertion", "SB no CHD7 insertion")
ctrl.a <- "hu.control"
ctrl.b <- "mo.control"
grid <- expand.grid(grp.a, grp.b)
grid.labels <- expand.grid(names(grp.a), names(grp.b))
generateFilename <- function(a, b) {
s <- "agdex_{a}-{act}_{b}-{bct}.res.csv"
s <- sub('{a}', a, s, fixed = T)
s <- sub('{act}', ctrl.a, s, fixed = T)
s <- sub('{b}', b, s, fixed = T)
s <- sub('{bct}', ctrl.b, s, fixed = T)
return(s)
}
filenames <- apply(grid, 1, function (x) generateFilename(x[1], x[2]))
loadResults <- function(fn) {
ff <- file.path(res.dir, fn)
res <- read.agdex.result(ff)
return(res$gwide.agdex.res)
}
res <- lapply(filenames, loadResults)
extractMetric <- function(func) {
m <- as.numeric(lapply(res, func))
m <- matrix(m, nrow=length(grp.b), ncol=length(grp.a), byrow=T)
rownames(m) <- names(grp.b)
colnames(m) <- names(grp.a)
return(m)
}
cos.vals <- extractMetric(function(x) x$stat.value[x$stat.name == 'cos'])
cos.pvals.a <- extractMetric(function(x) x$A.pval[x$stat.name == 'cos'])
cos.pvals.b <- extractMetric(function(x) x$B.pval[x$stat.name == 'cos'])
cos.pvals.worst <- extractMetric(function(x) max(x[x$stat.name == 'cos', c("A.pval", "B.pval")]))
dop.vals <- extractMetric(function(x) x$stat.value[x$stat.name == 'dop'])
dop.pvals.a <- extractMetric(function(x) x$A.pval[x$stat.name == 'dop'])
dop.pvals.b <- extractMetric(function(x) x$B.pval[x$stat.name == 'dop'])
dop.pvals.worst <- extractMetric(function(x) max(x[x$stat.name == 'dop', c("A.pval", "B.pval")]))
plotLabelledHeatmap <- function(
data,
xlab="Mouse (relative to healthy)",
ylab="Human (relative to healthy)",
clim=NULL,
cdirection = 1,
title=NULL,
subtitle=NULL,
save.filestem=NULL) {
dat <- melt(data)
colnames(dat) <- c(xlab, ylab, "value")
g = ggplot(dat, aes_q(x = as.name(xlab), y = as.name(ylab))) +
geom_tile(aes(fill = value)) +
geom_text(aes(label = round(dat$value, 3))) +
scale_fill_distiller(limits=clim, palette = "Reds", direction = cdirection)
if (!is.null(title)) {
g = g + ggtitle(title, subtitle = subtitle) + theme(plot.title = element_text(lineheight=.8, hjust=0.5))
}
if (!is.null(save.filestem)) {
ggsave(paste0(out.filestem, '.pdf'), plot=g, width=8, height=6, units="in", dpi=200)
ggsave(paste0(out.filestem, '.png'), plot=g, width=8, height=6, units="in", dpi=200)
} else {
print(g)
}
}
# COS
out.filestem = file.path(res.dir, "cos_values")
plotLabelledHeatmap(cos.vals, title = "AGDEX cos value", save.filestem = out.filestem)
out.filestem = file.path(res.dir, "cos_pvalues_human")
plotLabelledHeatmap(cos.pvals.a, title = "AGDEX cos pvalues", subtitle = "Permuting human group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "cos_pvalues_mouse")
plotLabelledHeatmap(cos.pvals.b, title = "AGDEX cos pvalues", subtitle = "Permuting mouse group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "cos_pvalues_worst")
plotLabelledHeatmap(cos.pvals.worst, title = "AGDEX cos pvalues", subtitle = "Worst result from permuting group labels on both species", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
# DOP
out.filestem = file.path(res.dir, "dop_values")
plotLabelledHeatmap(dop.vals, title = "AGDEX dop value", save.filestem = out.filestem)
out.filestem = file.path(res.dir, "dop_pvalues_human")
plotLabelledHeatmap(dop.pvals.a, title = "AGDEX dop pvalues", subtitle = "Permuting human group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "dop_pvalues_mouse")
plotLabelledHeatmap(dop.pvals.b, title = "AGDEX dop pvalues", subtitle = "Permuting mouse group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "dop_pvalues_worst")
plotLabelledHeatmap(dop.pvals.worst, title = "AGDEX dop pvalues", subtitle = "Worst result from permuting group labels on both species", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
| /R/process_agdex_results.R | no_license | gaberosser/qmul-bioinf | R | false | false | 4,400 | r | library(AGDEX)
library("RColorBrewer")
library(ggplot2)
res.dir <- '/home/gabriel/Dropbox/research/qmul/results/mb_agdex/by_gene_ncott100'
grp.a <- c("hu.mb", "C", "D", "SHH")
names(grp.a) <- c("All MB", "Group C", "Group D", "SHH")
grp.b <- c("mo.mb", "sb.chd7", "sb.nochd7")
names(grp.b) <- c("All MB", "SB CHD7 insertion", "SB no CHD7 insertion")
ctrl.a <- "hu.control"
ctrl.b <- "mo.control"
grid <- expand.grid(grp.a, grp.b)
grid.labels <- expand.grid(names(grp.a), names(grp.b))
generateFilename <- function(a, b) {
s <- "agdex_{a}-{act}_{b}-{bct}.res.csv"
s <- sub('{a}', a, s, fixed = T)
s <- sub('{act}', ctrl.a, s, fixed = T)
s <- sub('{b}', b, s, fixed = T)
s <- sub('{bct}', ctrl.b, s, fixed = T)
return(s)
}
filenames <- apply(grid, 1, function (x) generateFilename(x[1], x[2]))
loadResults <- function(fn) {
ff <- file.path(res.dir, fn)
res <- read.agdex.result(ff)
return(res$gwide.agdex.res)
}
res <- lapply(filenames, loadResults)
extractMetric <- function(func) {
m <- as.numeric(lapply(res, func))
m <- matrix(m, nrow=length(grp.b), ncol=length(grp.a), byrow=T)
rownames(m) <- names(grp.b)
colnames(m) <- names(grp.a)
return(m)
}
cos.vals <- extractMetric(function(x) x$stat.value[x$stat.name == 'cos'])
cos.pvals.a <- extractMetric(function(x) x$A.pval[x$stat.name == 'cos'])
cos.pvals.b <- extractMetric(function(x) x$B.pval[x$stat.name == 'cos'])
cos.pvals.worst <- extractMetric(function(x) max(x[x$stat.name == 'cos', c("A.pval", "B.pval")]))
dop.vals <- extractMetric(function(x) x$stat.value[x$stat.name == 'dop'])
dop.pvals.a <- extractMetric(function(x) x$A.pval[x$stat.name == 'dop'])
dop.pvals.b <- extractMetric(function(x) x$B.pval[x$stat.name == 'dop'])
dop.pvals.worst <- extractMetric(function(x) max(x[x$stat.name == 'dop', c("A.pval", "B.pval")]))
plotLabelledHeatmap <- function(
data,
xlab="Mouse (relative to healthy)",
ylab="Human (relative to healthy)",
clim=NULL,
cdirection = 1,
title=NULL,
subtitle=NULL,
save.filestem=NULL) {
dat <- melt(data)
colnames(dat) <- c(xlab, ylab, "value")
g = ggplot(dat, aes_q(x = as.name(xlab), y = as.name(ylab))) +
geom_tile(aes(fill = value)) +
geom_text(aes(label = round(dat$value, 3))) +
scale_fill_distiller(limits=clim, palette = "Reds", direction = cdirection)
if (!is.null(title)) {
g = g + ggtitle(title, subtitle = subtitle) + theme(plot.title = element_text(lineheight=.8, hjust=0.5))
}
if (!is.null(save.filestem)) {
ggsave(paste0(out.filestem, '.pdf'), plot=g, width=8, height=6, units="in", dpi=200)
ggsave(paste0(out.filestem, '.png'), plot=g, width=8, height=6, units="in", dpi=200)
} else {
print(g)
}
}
# COS
out.filestem = file.path(res.dir, "cos_values")
plotLabelledHeatmap(cos.vals, title = "AGDEX cos value", save.filestem = out.filestem)
out.filestem = file.path(res.dir, "cos_pvalues_human")
plotLabelledHeatmap(cos.pvals.a, title = "AGDEX cos pvalues", subtitle = "Permuting human group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "cos_pvalues_mouse")
plotLabelledHeatmap(cos.pvals.b, title = "AGDEX cos pvalues", subtitle = "Permuting mouse group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "cos_pvalues_worst")
plotLabelledHeatmap(cos.pvals.worst, title = "AGDEX cos pvalues", subtitle = "Worst result from permuting group labels on both species", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
# DOP
out.filestem = file.path(res.dir, "dop_values")
plotLabelledHeatmap(dop.vals, title = "AGDEX dop value", save.filestem = out.filestem)
out.filestem = file.path(res.dir, "dop_pvalues_human")
plotLabelledHeatmap(dop.pvals.a, title = "AGDEX dop pvalues", subtitle = "Permuting human group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "dop_pvalues_mouse")
plotLabelledHeatmap(dop.pvals.b, title = "AGDEX dop pvalues", subtitle = "Permuting mouse group labels", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
out.filestem = file.path(res.dir, "dop_pvalues_worst")
plotLabelledHeatmap(dop.pvals.worst, title = "AGDEX dop pvalues", subtitle = "Worst result from permuting group labels on both species", cdirection = -1, clim=c(0, 1), save.filestem = out.filestem)
|
# dataframe 데이터를 gis 데이터 형태로 변환 해주는 library
library(GISTools)
library(raster)
library(maptools)
# 각 브랜드별 매장 위치 데이터 load
olive <- read.csv('./store/서울시 올리브영 매장 위치.csv', header=T)
lobs <- read.csv('./store/서울시 롭스 매장 위치.csv', header=T)
lalavla <- read.csv('./store/서울시 랄라블라 매장 위치.csv', header=T)
# shpfile load
seoul_shp <- shapefile('서울시(행정동)경계_EPSG_5179.shp')
# shpfile epsg 변경
seoul_shp <- spTransform(seoul_shp, CRS("+init=epsg:4326"))
# 기존 shpfile의 형태 check
proj4string(seoul_shp)
# 좌표데이터 -> gis 형태로 변환
# 올리브영
olive_coord <- olive
coordinates(olive_coord) = ~Longitude+Latitude
proj4string(olive_coord) <- CRS("+init=epsg:5179 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Count polygon in points
cnt <- poly.counts(olive_coord, seoul_shp)
cnt2 <- as.data.frame(cnt)
str(cnt2)
cnt2$dong <- seoul_shp$ADM_NM
# 학교
school_coord <- school
coordinates(school_coord) = ~Longitude+Latitude
proj4string(school_coord) <- CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
cnt3 <- poly.counts(school_coord, seoul_shp)
cnt4 <- as.data.frame(cnt3)
cnt4$dong <- seoul_shp$ADM_NM
# 올리브영 count 통합
cnt4$olive <- cnt2$cnt
# 랄라블라
lala_coord <- lalavla
coordinates(lala_coord) = ~Longitude+Latitude
proj4string(lala_coord) <- CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Count polygon in points
cnt <- poly.counts(lala_coord, seoul_shp)
cnt2 <- as.data.frame(cnt)
cnt4$lalavla <- cnt2$cnt
# 롭스
lobs_coord <- lobs
coordinates(lobs_coord) = ~Longitude+Latitude
proj4string(lobs_coord) <- CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Count polygon in points
cnt <- poly.counts(lobs_coord, seoul_shp)
cnt2 <- as.data.frame(cnt)
cnt4$lobs <- cnt2$cnt
# 파일 중간저장
write.csv(cnt4, 'analysis.csv')
library(dplyr)
# file load
df <- read.csv('analysis.csv', header=T)
df <- select(df, dong, olive, lalavla, lobs, school)
# 임대료 데이터 load
rent <- read.csv('rent.csv', header=T)
head(rent)
# 같은 행정동의 평균임대료 데이터 insert
for (i in 1:NROW(df)) {
for (j in 1:NROW(rent)) {
if(df[i,1] == rent[j,2]){
df[i,6] <- rent[j,3]
}
}
}
# column 이름 변경
cols <- colnames(df)
cols[6] <- 'rent'
names(df) <- cols
# 지하철역 데이터 load
subway <- read.csv('subway.csv', header = T)
subway
head(subway)
# 행정동코드 데이터 load
dong_code <- read.csv('dong_code2.csv', header=T)
dong_code
str(dong_code)
names(dong_code) <- c('dong_codes', 'sido','na','sigungu','dong','days','na2')
codes <- dong_code[,1]
dong2 <- dong_code[,5]
dong_codes <- data.frame(codes, dong2)
dong_codes
head(dong_codes)
dong_codes$codes <- dong_codes$codes/100
names(dong_codes) <- c('행정동코드', '행정동명')
# 데이터 중간 저장
write.csv(dongpop2, 'floatpop.csv') | /[분석] Health&Bueaty Store 독점 대응 경쟁사 입지 선정/Scripts/데이터 전처리.R | no_license | dustinkim86/MyProjects | R | false | false | 3,161 | r | # dataframe 데이터를 gis 데이터 형태로 변환 해주는 library
library(GISTools)
library(raster)
library(maptools)
# 각 브랜드별 매장 위치 데이터 load
olive <- read.csv('./store/서울시 올리브영 매장 위치.csv', header=T)
lobs <- read.csv('./store/서울시 롭스 매장 위치.csv', header=T)
lalavla <- read.csv('./store/서울시 랄라블라 매장 위치.csv', header=T)
# shpfile load
seoul_shp <- shapefile('서울시(행정동)경계_EPSG_5179.shp')
# shpfile epsg 변경
seoul_shp <- spTransform(seoul_shp, CRS("+init=epsg:4326"))
# 기존 shpfile의 형태 check
proj4string(seoul_shp)
# 좌표데이터 -> gis 형태로 변환
# 올리브영
olive_coord <- olive
coordinates(olive_coord) = ~Longitude+Latitude
proj4string(olive_coord) <- CRS("+init=epsg:5179 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Count polygon in points
cnt <- poly.counts(olive_coord, seoul_shp)
cnt2 <- as.data.frame(cnt)
str(cnt2)
cnt2$dong <- seoul_shp$ADM_NM
# 학교
school_coord <- school
coordinates(school_coord) = ~Longitude+Latitude
proj4string(school_coord) <- CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
cnt3 <- poly.counts(school_coord, seoul_shp)
cnt4 <- as.data.frame(cnt3)
cnt4$dong <- seoul_shp$ADM_NM
# 올리브영 count 통합
cnt4$olive <- cnt2$cnt
# 랄라블라
lala_coord <- lalavla
coordinates(lala_coord) = ~Longitude+Latitude
proj4string(lala_coord) <- CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Count polygon in points
cnt <- poly.counts(lala_coord, seoul_shp)
cnt2 <- as.data.frame(cnt)
cnt4$lalavla <- cnt2$cnt
# 롭스
lobs_coord <- lobs
coordinates(lobs_coord) = ~Longitude+Latitude
proj4string(lobs_coord) <- CRS("+init=epsg:4326 +proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
# Count polygon in points
cnt <- poly.counts(lobs_coord, seoul_shp)
cnt2 <- as.data.frame(cnt)
cnt4$lobs <- cnt2$cnt
# 파일 중간저장
write.csv(cnt4, 'analysis.csv')
library(dplyr)
# file load
df <- read.csv('analysis.csv', header=T)
df <- select(df, dong, olive, lalavla, lobs, school)
# 임대료 데이터 load
rent <- read.csv('rent.csv', header=T)
head(rent)
# 같은 행정동의 평균임대료 데이터 insert
for (i in 1:NROW(df)) {
for (j in 1:NROW(rent)) {
if(df[i,1] == rent[j,2]){
df[i,6] <- rent[j,3]
}
}
}
# column 이름 변경
cols <- colnames(df)
cols[6] <- 'rent'
names(df) <- cols
# 지하철역 데이터 load
subway <- read.csv('subway.csv', header = T)
subway
head(subway)
# 행정동코드 데이터 load
dong_code <- read.csv('dong_code2.csv', header=T)
dong_code
str(dong_code)
names(dong_code) <- c('dong_codes', 'sido','na','sigungu','dong','days','na2')
codes <- dong_code[,1]
dong2 <- dong_code[,5]
dong_codes <- data.frame(codes, dong2)
dong_codes
head(dong_codes)
dong_codes$codes <- dong_codes$codes/100
names(dong_codes) <- c('행정동코드', '행정동명')
# 데이터 중간 저장
write.csv(dongpop2, 'floatpop.csv') |
\name{APML0}
\alias{APML0}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Fit a Model with Various Regularization Forms
}
\description{
Fit linear, logistic and Cox models regularized with L0, lasso (L1), elastic-net (L1 and L2), or net (L1 and Laplacian) penalty, and their adaptive forms, such as adaptive lasso / elastic-net and net adjusting for signs of linked coefficients.
It solves L0 penalty problem by simultaneously selecting regularization parameters and performing hard-thresholding (or selecting number of non-zeros). This augmented and penalized minimization method provides an approximation solution to the L0 penalty problem and runs as fast as L1 regularization.
The function uses one-step coordinate descent algorithm and runs extremely fast by taking into account the sparsity structure of coefficients. It could deal with very high dimensional data.
}
\usage{
APML0(x, y, family=c("gaussian", "binomial", "cox"), penalty=c("Lasso","Enet", "Net"),
Omega=NULL, alpha=1.0, lambda=NULL, nlambda=50, rlambda=NULL, wbeta=rep(1,ncol(x)),
sgn=rep(1,ncol(x)), nfolds=1, foldid=NULL, ill=TRUE, iL0=TRUE, icutB=FALSE, ncutB=10,
ifast=TRUE, isd=FALSE, iysd=FALSE, ifastr=TRUE, keep.beta=FALSE,
thresh=1e-6, maxit=1e+5, threshC=1e-5, maxitC=1e+2, threshP=1e-5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{input matrix. Each row is an observation vector.
}
\item{y}{response variable. For \code{family = "gaussian"},
\code{y} is a continuous vector. For \code{family = "binomial"},
\code{y} is a binary vector with 0 and 1. For \code{family = "cox"}, \code{y} is a two-column matrix with columns named `time' and `status'. `status' is a binary variable, with `1' indicating event, and `0' indicating right censored.
}
\item{family}{type of outcome. Can be "gaussian", "binomial" or "cox".
}
\item{penalty}{penalty type. Can choose \code{"Net"}, \code{"Enet"} (elastic net) and \code{"Lasso"}. For \code{"Net"}, need to specify \code{Omega}; otherwise, \code{"Enet"} is performed. For \code{penalty = "Net"}, the penalty is defined as \deqn{\lambda*{\alpha*||\beta||_1+(1-\alpha)/2*(\beta^{T}L\beta)},}
where \eqn{L} is a Laplacian matrix calculated from \code{Omega}.
}
\item{Omega}{adjacency matrix with zero diagonal and non-negative off-diagonal, used for \code{penalty = "Net"} to calculate Laplacian matrix.
}
\item{alpha}{ratio between L1 and Laplacian for \code{"Net"}, or between L1 and L2 for \code{"Enet"}. Default is \code{alpha = 1.0}, i.e. lasso.
}
\item{lambda}{a user supplied decreasing sequence. If \code{lambda = NULL}, a sequence of \code{lambda} is generated based on \code{nlambda} and \code{rlambda}. Supplying a value of \code{lambda} overrides this.
}
\item{nlambda}{number of \code{lambda} values. Default is 50.
}
\item{rlambda}{fraction of \code{lambda.max} to determine the smallest value for \code{lambda}. The default is \code{rlambda = 0.0001} when the number of observations is larger than or equal to the number of variables; otherwise, \code{rlambda = 0.01}.
}
\item{wbeta}{penalty weights used with L1 penalty (adaptive L1), given by \eqn{\sum_{j=1}^qw_j|\beta_j|.} The \code{wbeta} is a vector of non-negative values and works as adaptive L1. No penalty is imposed for those coefficients with zero values in \code{wbeta}. Default is 1 for all coefficients. The same weights are also applied to L0.
}
\item{sgn}{sign adjustment used with Laplacian penalty (adaptive Laplacian). The \code{sgn} is a vector of 1 or -1. The \code{sgn} could be based on an initial estimate of \eqn{\beta}, and 1 is used for \eqn{\beta>0} and -1 is for \eqn{\beta<0}. Default is 1 for all coefficients.
}
\item{nfolds}{number of folds. With \code{nfolds = 1} and \code{foldid = NULL} by default, cross-validation is not performed. For cross-validation, smallest value allowable is \code{nfolds = 3}. Specifying \code{foldid} overrides \code{nfolds}.
}
\item{foldid}{an optional vector of values between 1 and \code{nfolds} specifying which fold each observation is in.
}
\item{ill}{logical flag for using likelihood-based as the cross-validation criteria. Default is \code{ill = TRUE}. For \code{family = "gaussian"}, set \code{ill = FALSE} to use predict mean squared error as the criteria.
}
\item{iL0}{logical flag for simultaneously performing L0-norm via performing hard-thresholding or selecting number of non-zeros. Default is \code{iL0 = TRUE}.
}
\item{icutB}{logical flag for performing hard-thresholding by selecting the number of non-zero coefficients with the default of \code{icutB = FALSE}. Alternative way is to apply thresholding on the coefficients by setting \code{icutB = TRUE}.
}
\item{ncutB}{the number of thresholds used for \code{icutB = TRUE}. Default is \code{ncutB=10}. Increasing \code{ncutB} may improve the variable selection performance but will increase the computation time.
}
\item{ifast}{logical flag for searching for the best cutoff or the number of non-zero. Default is \code{ifast=TRUE} for local searching. Setting \code{ifast=TRUE} will search from the smallest cutoff (or number of non-zeros) to the largest but will increase the computation time.
}
\item{isd}{logical flag for outputting standardized coefficients. \code{x} is always standardized prior to fitting the model. Default is \code{isd = FALSE}, returning \eqn{\beta} on the original scale.
}
\item{iysd}{logical flag for standardizing \code{y} prior to computation, for \code{family = "gaussian"}. The returning coefficients are always based the original \code{y} (unstandardized). Default is \code{isd = FALSE}.
}
\item{ifastr}{logical flag for efficient calculation of risk set updates for \code{family = "cox"}. Default is \code{ifastr = TRUE}. Setting \code{ifastr = FALSE} may improve the accuracy of calculating the risk set.
}
\item{keep.beta}{logical flag for returning estimates for all \code{lambda} values. For \code{keep.beta = FALSE}, only return the estimate with the minimum cross-validation value.
}
\item{thresh}{convergence threshold for coordinate descent. Default value is \code{1E-6}.
}
\item{maxit}{Maximum number of iterations for coordinate descent. Default is \code{10^5}.
}
\item{threshC}{convergence threshold for hard-thresholding for \code{family = "binomial"}. Default value is \code{1E-5}.
}
\item{maxitC}{Maximum number of iterations for hard-thresholding for \code{family = "binomial"}. Default is \code{10^2}.
}
\item{threshP}{Cutoff when calculating the probability in \code{family = "binomial"}. The probability is bounded within \code{threshP} and \code{1-threshP}. Default value is \code{1E-5}.
}
}
\details{
One-step coordinate descent algorithm is applied for each \code{lambda}. Cross-validation is used for tuning parameters. For \code{iL0 = TRUE}, we further perform hard-thresholding (for \code{icutB=TRUE}) to the coefficients or select the number of non-zero coefficients (for \code{icutB=FALSE}), which is obtained from regularized model at each \code{lambda}. This is motivated by formulating L0 variable selection in an augmented form, which shows significant improvement over the commonly used regularized methods without this technique. Details could be found in our publication.
\code{x} is always standardized prior to fitting the model and the estimate is returned on the original scale for \code{isd=FALSE}.
Each one element of \code{wbeta} corresponds to each variable in \code{x}. Setting the value in \code{wbeta} will not impose any penalty on that variable.
For \code{family = "cox"}, \code{ifastr = TRUE} adopts an efficient way to update risk set and sometimes the algorithm ends before all \code{nlambda} values of \code{lambda} have been evaluated. To evaluate small values of \code{lambda}, use \code{ifast = FALSE}. The two methods only affect the efficiency of algorithm, not the estimates.
\code{ifast = TRUE} seems to perform well.
}
\value{
An object with S3 class \code{"APML0"}.
\item{a}{the intercept for \code{family = "gaussian"}.}
\item{Beta}{a sparse Matrix of coefficients, stored in class "dgCMatrix". For \code{family = "binomial"}, the first coefficient is the intercept.}
\item{Beta0}{coefficients after additionally performing L0-norm for \code{iL0 = TRUE}. For \code{family = "binomial"}, the first coefficient is the intercept.}
\item{fit}{a data.frame containing \code{lambda} and the number of non-zero coefficients \code{nzero}. With cross-validation, additional results are reported, such as average cross-validation partial likelihood \code{cvm} and its standard error \code{cvse}, and \code{index} with `*' indicating the minimum \code{cvm}. For \code{family = "gaussian"}, \code{rsq} is also reported.}
\item{fit0}{a data.frame containing \code{lambda}, \code{cvm} and \code{nzero} based on \code{iL0 = TRUE}. \code{cvm} in \code{fit0} may be different from \code{cvm} in \code{fit}, because the constaint on the number of non-zeros is imposed in the cross-validation. The maximum number of non-zeros is based on the full dataset not the one used in the cross-validation.}
\item{lambda.min}{value of \code{lambda} that gives minimum \code{cvm}.}
\item{lambda.opt}{value of \code{lambda} based on \code{iL0 = TRUE}.}
\item{penalty}{penalty type.}
\item{adaptive}{logical flags for adaptive version (see above).}
\item{flag}{convergence flag (for internal debugging). \code{flag = 0} means converged.}
}
\references{Li, X., Xie, S., Zeng, D., Wang, Y. (2018).
\emph{Efficient l0-norm feature selection based on augmented and penalized minimization. Statistics in medicine, 37(3), 473-486.}\cr
\url{https://onlinelibrary.wiley.com/doi/full/10.1002/sim.7526}\cr
Boyd, S., Parikh, N., Chu, E., Peleato, B., Eckstein, J. (2011).
\emph{Distributed optimization and statistical learning via the alternating direction method of multipliers. Foundations and Trends in Machine Learning, 3(1), 1-122.}\cr
\url{http://dl.acm.org/citation.cfm?id=2185816}\cr
Friedman, J., Hastie, T., Tibshirani, R. (2010).
\emph{Regularization paths for generalized linear models via coordinate descent, Journal of Statistical Software, Vol. 33(1), 1.}\cr
\url{http://www.jstatsoft.org/v33/i01/}\cr
}
\author{
Xiang Li, Shanghong Xie, Donglin Zeng and Yuanjia Wang\cr
Maintainer: Xiang Li <spiritcoke@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\section{Warning}{
It may terminate and return \code{NULL}.
}
\seealso{
\code{\link{APML0}}, \code{\link{print.APML0}}
}
\examples{
### Linear model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
y=rnorm(N,xb)
fiti=APML0(x,y,penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
### Logistic model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
y=rbinom(n=N, size=1, prob=1.0/(1.0+exp(-xb)))
fiti=APML0(x,y,family="binomial",penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,family="binomial",penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
### Cox model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
ty=rexp(N, exp(xb))
td=rexp(N, 0.05)
tcens=ifelse(td<ty,1,0) # censoring indicator
y=cbind(time=ty,status=1-tcens)
fiti=APML0(x,y,family="cox",penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,family="cox",penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{L0}
\keyword{Hard-thresholding}
\keyword{Number of non-zero}
\keyword{Regularization}
| /fuzzedpackages/APML0/man/APML0.Rd | no_license | akhikolla/testpackages | R | false | false | 11,941 | rd | \name{APML0}
\alias{APML0}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Fit a Model with Various Regularization Forms
}
\description{
Fit linear, logistic and Cox models regularized with L0, lasso (L1), elastic-net (L1 and L2), or net (L1 and Laplacian) penalty, and their adaptive forms, such as adaptive lasso / elastic-net and net adjusting for signs of linked coefficients.
It solves L0 penalty problem by simultaneously selecting regularization parameters and performing hard-thresholding (or selecting number of non-zeros). This augmented and penalized minimization method provides an approximation solution to the L0 penalty problem and runs as fast as L1 regularization.
The function uses one-step coordinate descent algorithm and runs extremely fast by taking into account the sparsity structure of coefficients. It could deal with very high dimensional data.
}
\usage{
APML0(x, y, family=c("gaussian", "binomial", "cox"), penalty=c("Lasso","Enet", "Net"),
Omega=NULL, alpha=1.0, lambda=NULL, nlambda=50, rlambda=NULL, wbeta=rep(1,ncol(x)),
sgn=rep(1,ncol(x)), nfolds=1, foldid=NULL, ill=TRUE, iL0=TRUE, icutB=FALSE, ncutB=10,
ifast=TRUE, isd=FALSE, iysd=FALSE, ifastr=TRUE, keep.beta=FALSE,
thresh=1e-6, maxit=1e+5, threshC=1e-5, maxitC=1e+2, threshP=1e-5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{input matrix. Each row is an observation vector.
}
\item{y}{response variable. For \code{family = "gaussian"},
\code{y} is a continuous vector. For \code{family = "binomial"},
\code{y} is a binary vector with 0 and 1. For \code{family = "cox"}, \code{y} is a two-column matrix with columns named `time' and `status'. `status' is a binary variable, with `1' indicating event, and `0' indicating right censored.
}
\item{family}{type of outcome. Can be "gaussian", "binomial" or "cox".
}
\item{penalty}{penalty type. Can choose \code{"Net"}, \code{"Enet"} (elastic net) and \code{"Lasso"}. For \code{"Net"}, need to specify \code{Omega}; otherwise, \code{"Enet"} is performed. For \code{penalty = "Net"}, the penalty is defined as \deqn{\lambda*{\alpha*||\beta||_1+(1-\alpha)/2*(\beta^{T}L\beta)},}
where \eqn{L} is a Laplacian matrix calculated from \code{Omega}.
}
\item{Omega}{adjacency matrix with zero diagonal and non-negative off-diagonal, used for \code{penalty = "Net"} to calculate Laplacian matrix.
}
\item{alpha}{ratio between L1 and Laplacian for \code{"Net"}, or between L1 and L2 for \code{"Enet"}. Default is \code{alpha = 1.0}, i.e. lasso.
}
\item{lambda}{a user supplied decreasing sequence. If \code{lambda = NULL}, a sequence of \code{lambda} is generated based on \code{nlambda} and \code{rlambda}. Supplying a value of \code{lambda} overrides this.
}
\item{nlambda}{number of \code{lambda} values. Default is 50.
}
\item{rlambda}{fraction of \code{lambda.max} to determine the smallest value for \code{lambda}. The default is \code{rlambda = 0.0001} when the number of observations is larger than or equal to the number of variables; otherwise, \code{rlambda = 0.01}.
}
\item{wbeta}{penalty weights used with L1 penalty (adaptive L1), given by \eqn{\sum_{j=1}^qw_j|\beta_j|.} The \code{wbeta} is a vector of non-negative values and works as adaptive L1. No penalty is imposed for those coefficients with zero values in \code{wbeta}. Default is 1 for all coefficients. The same weights are also applied to L0.
}
\item{sgn}{sign adjustment used with Laplacian penalty (adaptive Laplacian). The \code{sgn} is a vector of 1 or -1. The \code{sgn} could be based on an initial estimate of \eqn{\beta}, and 1 is used for \eqn{\beta>0} and -1 is for \eqn{\beta<0}. Default is 1 for all coefficients.
}
\item{nfolds}{number of folds. With \code{nfolds = 1} and \code{foldid = NULL} by default, cross-validation is not performed. For cross-validation, smallest value allowable is \code{nfolds = 3}. Specifying \code{foldid} overrides \code{nfolds}.
}
\item{foldid}{an optional vector of values between 1 and \code{nfolds} specifying which fold each observation is in.
}
\item{ill}{logical flag for using likelihood-based as the cross-validation criteria. Default is \code{ill = TRUE}. For \code{family = "gaussian"}, set \code{ill = FALSE} to use predict mean squared error as the criteria.
}
\item{iL0}{logical flag for simultaneously performing L0-norm via performing hard-thresholding or selecting number of non-zeros. Default is \code{iL0 = TRUE}.
}
\item{icutB}{logical flag for performing hard-thresholding by selecting the number of non-zero coefficients with the default of \code{icutB = FALSE}. Alternative way is to apply thresholding on the coefficients by setting \code{icutB = TRUE}.
}
\item{ncutB}{the number of thresholds used for \code{icutB = TRUE}. Default is \code{ncutB=10}. Increasing \code{ncutB} may improve the variable selection performance but will increase the computation time.
}
\item{ifast}{logical flag for searching for the best cutoff or the number of non-zero. Default is \code{ifast=TRUE} for local searching. Setting \code{ifast=TRUE} will search from the smallest cutoff (or number of non-zeros) to the largest but will increase the computation time.
}
\item{isd}{logical flag for outputting standardized coefficients. \code{x} is always standardized prior to fitting the model. Default is \code{isd = FALSE}, returning \eqn{\beta} on the original scale.
}
\item{iysd}{logical flag for standardizing \code{y} prior to computation, for \code{family = "gaussian"}. The returning coefficients are always based the original \code{y} (unstandardized). Default is \code{isd = FALSE}.
}
\item{ifastr}{logical flag for efficient calculation of risk set updates for \code{family = "cox"}. Default is \code{ifastr = TRUE}. Setting \code{ifastr = FALSE} may improve the accuracy of calculating the risk set.
}
\item{keep.beta}{logical flag for returning estimates for all \code{lambda} values. For \code{keep.beta = FALSE}, only return the estimate with the minimum cross-validation value.
}
\item{thresh}{convergence threshold for coordinate descent. Default value is \code{1E-6}.
}
\item{maxit}{Maximum number of iterations for coordinate descent. Default is \code{10^5}.
}
\item{threshC}{convergence threshold for hard-thresholding for \code{family = "binomial"}. Default value is \code{1E-5}.
}
\item{maxitC}{Maximum number of iterations for hard-thresholding for \code{family = "binomial"}. Default is \code{10^2}.
}
\item{threshP}{Cutoff when calculating the probability in \code{family = "binomial"}. The probability is bounded within \code{threshP} and \code{1-threshP}. Default value is \code{1E-5}.
}
}
\details{
One-step coordinate descent algorithm is applied for each \code{lambda}. Cross-validation is used for tuning parameters. For \code{iL0 = TRUE}, we further perform hard-thresholding (for \code{icutB=TRUE}) to the coefficients or select the number of non-zero coefficients (for \code{icutB=FALSE}), which is obtained from regularized model at each \code{lambda}. This is motivated by formulating L0 variable selection in an augmented form, which shows significant improvement over the commonly used regularized methods without this technique. Details could be found in our publication.
\code{x} is always standardized prior to fitting the model and the estimate is returned on the original scale for \code{isd=FALSE}.
Each one element of \code{wbeta} corresponds to each variable in \code{x}. Setting the value in \code{wbeta} will not impose any penalty on that variable.
For \code{family = "cox"}, \code{ifastr = TRUE} adopts an efficient way to update risk set and sometimes the algorithm ends before all \code{nlambda} values of \code{lambda} have been evaluated. To evaluate small values of \code{lambda}, use \code{ifast = FALSE}. The two methods only affect the efficiency of algorithm, not the estimates.
\code{ifast = TRUE} seems to perform well.
}
\value{
An object with S3 class \code{"APML0"}.
\item{a}{the intercept for \code{family = "gaussian"}.}
\item{Beta}{a sparse Matrix of coefficients, stored in class "dgCMatrix". For \code{family = "binomial"}, the first coefficient is the intercept.}
\item{Beta0}{coefficients after additionally performing L0-norm for \code{iL0 = TRUE}. For \code{family = "binomial"}, the first coefficient is the intercept.}
\item{fit}{a data.frame containing \code{lambda} and the number of non-zero coefficients \code{nzero}. With cross-validation, additional results are reported, such as average cross-validation partial likelihood \code{cvm} and its standard error \code{cvse}, and \code{index} with `*' indicating the minimum \code{cvm}. For \code{family = "gaussian"}, \code{rsq} is also reported.}
\item{fit0}{a data.frame containing \code{lambda}, \code{cvm} and \code{nzero} based on \code{iL0 = TRUE}. \code{cvm} in \code{fit0} may be different from \code{cvm} in \code{fit}, because the constaint on the number of non-zeros is imposed in the cross-validation. The maximum number of non-zeros is based on the full dataset not the one used in the cross-validation.}
\item{lambda.min}{value of \code{lambda} that gives minimum \code{cvm}.}
\item{lambda.opt}{value of \code{lambda} based on \code{iL0 = TRUE}.}
\item{penalty}{penalty type.}
\item{adaptive}{logical flags for adaptive version (see above).}
\item{flag}{convergence flag (for internal debugging). \code{flag = 0} means converged.}
}
\references{Li, X., Xie, S., Zeng, D., Wang, Y. (2018).
\emph{Efficient l0-norm feature selection based on augmented and penalized minimization. Statistics in medicine, 37(3), 473-486.}\cr
\url{https://onlinelibrary.wiley.com/doi/full/10.1002/sim.7526}\cr
Boyd, S., Parikh, N., Chu, E., Peleato, B., Eckstein, J. (2011).
\emph{Distributed optimization and statistical learning via the alternating direction method of multipliers. Foundations and Trends in Machine Learning, 3(1), 1-122.}\cr
\url{http://dl.acm.org/citation.cfm?id=2185816}\cr
Friedman, J., Hastie, T., Tibshirani, R. (2010).
\emph{Regularization paths for generalized linear models via coordinate descent, Journal of Statistical Software, Vol. 33(1), 1.}\cr
\url{http://www.jstatsoft.org/v33/i01/}\cr
}
\author{
Xiang Li, Shanghong Xie, Donglin Zeng and Yuanjia Wang\cr
Maintainer: Xiang Li <spiritcoke@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\section{Warning}{
It may terminate and return \code{NULL}.
}
\seealso{
\code{\link{APML0}}, \code{\link{print.APML0}}
}
\examples{
### Linear model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
y=rnorm(N,xb)
fiti=APML0(x,y,penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
### Logistic model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
y=rbinom(n=N, size=1, prob=1.0/(1.0+exp(-xb)))
fiti=APML0(x,y,family="binomial",penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,family="binomial",penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
### Cox model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
ty=rexp(N, exp(xb))
td=rexp(N, 0.05)
tcens=ifelse(td<ty,1,0) # censoring indicator
y=cbind(time=ty,status=1-tcens)
fiti=APML0(x,y,family="cox",penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,family="cox",penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{L0}
\keyword{Hard-thresholding}
\keyword{Number of non-zero}
\keyword{Regularization}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rplotfriend.R
\docType{package}
\name{rplotfriend}
\alias{rplotfriend}
\alias{rplotfriend-package}
\title{rplotfriend}
\description{
Package with some plotting functions to help make aesthetically pleasing plots.
}
\details{
Plotting functions use gggplot, gtable, and grid packages. Includes functions for making heatmaps, aligning multiple distinct plots, formatting numbers, and manipulating legends.
}
\author{
Alicia Schep
}
\seealso{
\code{\link{align_plots_vert}}, \code{\link{align_plots_hor}}, \code{\link{plot_custom_grid}},
\code{\link{ggheatmap}}, \code{\link{pub_theme}}, \code{\link{pretty_scientific}}, \code{\link{get_legend}}, \code{\link{add_legend}}
}
| /man/rplotfriend.Rd | permissive | jeffmgranja/rplotfriend | R | false | true | 753 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rplotfriend.R
\docType{package}
\name{rplotfriend}
\alias{rplotfriend}
\alias{rplotfriend-package}
\title{rplotfriend}
\description{
Package with some plotting functions to help make aesthetically pleasing plots.
}
\details{
Plotting functions use gggplot, gtable, and grid packages. Includes functions for making heatmaps, aligning multiple distinct plots, formatting numbers, and manipulating legends.
}
\author{
Alicia Schep
}
\seealso{
\code{\link{align_plots_vert}}, \code{\link{align_plots_hor}}, \code{\link{plot_custom_grid}},
\code{\link{ggheatmap}}, \code{\link{pub_theme}}, \code{\link{pretty_scientific}}, \code{\link{get_legend}}, \code{\link{add_legend}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IndicesPrecios.R
\name{inflacion_tot}
\alias{inflacion_tot}
\title{Obtener terminos de intercambio}
\usage{
inflacion_tot(token)
}
\arguments{
\item{token}{token personal emitido por el INEGI para acceder al API.}
}
\value{
Data.frame
}
\description{
Obtiene la razón de términos de intercambio para México (ToT). Es un wrapper de las funciones serie_inegi() y YoY().
La razón se define como el índice de precios de exportaciones entre el índice de precios de importaciones.
Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
}
\examples{
\dontrun{
token<-"webservice_token"
TerminosIntercambio<-inflacion_tot(token)
}
}
\author{
Eduardo Flores
}
| /man/Inflacion_ToT.Rd | no_license | neraunzaran/inegiR | R | false | true | 752 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IndicesPrecios.R
\name{inflacion_tot}
\alias{inflacion_tot}
\title{Obtener terminos de intercambio}
\usage{
inflacion_tot(token)
}
\arguments{
\item{token}{token personal emitido por el INEGI para acceder al API.}
}
\value{
Data.frame
}
\description{
Obtiene la razón de términos de intercambio para México (ToT). Es un wrapper de las funciones serie_inegi() y YoY().
La razón se define como el índice de precios de exportaciones entre el índice de precios de importaciones.
Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
}
\examples{
\dontrun{
token<-"webservice_token"
TerminosIntercambio<-inflacion_tot(token)
}
}
\author{
Eduardo Flores
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/norm_and_annot_by_platform.R
\name{norm_and_annot_by_platform}
\alias{norm_and_annot_by_platform}
\title{Normalization and annotation of microarray data}
\usage{
norm_and_annot_by_platform(obiekt, platform)
}
\arguments{
\item{obiekt}{An object of class marrayRaw, EListRaw, AffyBatch, ExonFeatureSet or list of these objects with raw expression data.}
\item{platform}{A character indicating experiment's platform, could be 'Affymetrix' or 'Agilent'.}
}
\value{
An expression matrix with normalized and annotated expression values.
}
\description{
\code{norm_and_annot_by_platform} function normalizes and annotates data from a single microarray experiment.
}
\details{
Normalization
For Affymetrix platform rma background correction and normalization is performed. For Agilent platform background correction
is performed by subtracting background values. Used normalization method for this platform is vsn.
Annotation
Annotation for Affybatch object is made using custom cdf file automatically downloaded from brainarray and for ExonFeatureSet
using huex10sttranscriptcluster.db package. Annotation for Agilent platform is made with org.Hs.eg.db package.
The first argument could be a list due to make it possible to process data from an experiment where there are few array types within
one platform (Affymetrix or Agilent). To process data from many experiments at once you should consider using
\code{multi_norm_and_annot} function.
}
\examples{
\dontrun{
### download, load, normalize and annotate data from ArrayExpress
data = downloadAE("E-GEOD-21066", getwd())
loaded_experiment = load_data(data, "Affymetrix")
processed_data = norm_and_annot_by_platform(loaded_experiment[[1]], "Affymetrix")
}
}
\seealso{
\code{\link{multi_norm_and_annot}}
}
| /man/norm_and_annot_by_platform.Rd | no_license | EwaMarek/FindReference | R | false | true | 1,836 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/norm_and_annot_by_platform.R
\name{norm_and_annot_by_platform}
\alias{norm_and_annot_by_platform}
\title{Normalization and annotation of microarray data}
\usage{
norm_and_annot_by_platform(obiekt, platform)
}
\arguments{
\item{obiekt}{An object of class marrayRaw, EListRaw, AffyBatch, ExonFeatureSet or list of these objects with raw expression data.}
\item{platform}{A character indicating experiment's platform, could be 'Affymetrix' or 'Agilent'.}
}
\value{
An expression matrix with normalized and annotated expression values.
}
\description{
\code{norm_and_annot_by_platform} function normalizes and annotates data from a single microarray experiment.
}
\details{
Normalization
For Affymetrix platform rma background correction and normalization is performed. For Agilent platform background correction
is performed by subtracting background values. Used normalization method for this platform is vsn.
Annotation
Annotation for Affybatch object is made using custom cdf file automatically downloaded from brainarray and for ExonFeatureSet
using huex10sttranscriptcluster.db package. Annotation for Agilent platform is made with org.Hs.eg.db package.
The first argument could be a list due to make it possible to process data from an experiment where there are few array types within
one platform (Affymetrix or Agilent). To process data from many experiments at once you should consider using
\code{multi_norm_and_annot} function.
}
\examples{
\dontrun{
### download, load, normalize and annotate data from ArrayExpress
data = downloadAE("E-GEOD-21066", getwd())
loaded_experiment = load_data(data, "Affymetrix")
processed_data = norm_and_annot_by_platform(loaded_experiment[[1]], "Affymetrix")
}
}
\seealso{
\code{\link{multi_norm_and_annot}}
}
|
.globals <- new.env(parent = emptyenv())
.globals$dynamicVars <- list()
# This is a global hook for intercepting meta-mode reads of metaReactive/2.
# The first argument is the (delayed eval) code result, and rexpr is the
# metaReactive/2 object itself. If evaluation of x is not triggered by the
# hook function, then the metaReactive/2 code will not execute/be expanded.
#
# The return value should be a code object.
.globals$rexprMetaReadFilter <- function(x, rexpr) {
x
}
#' Create a meta-reactive expression
#'
#' Create a [reactive()] that, when invoked with meta-mode activated
#' (i.e. called within [withMetaMode()] or [expandChain()]), returns a
#' code expression (instead of evaluating that expression and returning the value).
#'
#' @details If you wish to capture specific code inside of `expr` (e.g. ignore code
#' that has no meaning outside shiny, like [req()]), use `metaReactive2()` in combination
#' with `metaExpr()`. When using `metaReactive2()`, `expr` must return a `metaExpr()`.
#'
#' If `varname` is unspecified, [srcref]s are used in attempt to infer the name
#' bound to the meta-reactive object. In order for this inference to work, the
#' `keep.source` [option] must be `TRUE` and `expr` must begin with `\{`.
#'
#' @param varname An R variable name that this object prefers to be named when
#' its code is extracted into an R script. (See also: [expandChain()])
#'
#' @param inline If `TRUE`, during code expansion, do not declare a variable for
#' this object; instead, inline the code into every call site. Use this to avoid
#' introducing variables for very simple expressions. (See also: [expandChain()])
#'
#' @inheritParams shiny::reactive
#' @inheritParams metaExpr
#' @export
#' @seealso [metaExpr()]
#' @examples
#'
#' library(shiny)
#' options(shiny.suppressMissingContextError = TRUE)
#'
#' input <- list(x = 1)
#'
#' y <- metaReactive({
#' req(input$x)
#' a <- !!input$x + 1
#' b <- a + 1
#' c + 1
#' })
#'
#' withMetaMode(y())
#' expandChain(y())
#'
#' y <- metaReactive2({
#' req(input$x)
#'
#' metaExpr({
#' a <- !!input$x + 1
#' b <- a + 1
#' c + 1
#' }, bindToReturn = TRUE)
#' })
#'
#' expandChain(y())
#'
metaReactive <- function(expr, env = parent.frame(), quoted = FALSE,
varname = NULL, domain = shiny::getDefaultReactiveDomain(), inline = FALSE,
localize = "auto", bindToReturn = FALSE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
varname <- exprToVarname(expr, varname, inline, "metaReactive")
# Need to wrap expr with shinymeta:::metaExpr, but can't use rlang/!! to do
# so, because we want to keep any `!!` contained in expr intact (i.e. too
# early to perform expansion of expr here).
#
# Even though expr itself is quoted, wrapExpr will effectively unquote it by
# interpolating it into the `metaExpr()` call, thus quoted = FALSE.
expr <- wrapExpr(shinymeta::metaExpr, expr, env, quoted = FALSE, localize = localize, bindToReturn = bindToReturn)
metaReactiveImpl(expr = expr, env = env, varname = varname, domain = domain, inline = inline)
}
#' @export
#' @rdname metaReactive
metaReactive2 <- function(expr, env = parent.frame(), quoted = FALSE,
varname = NULL, domain = shiny::getDefaultReactiveDomain(), inline = FALSE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
varname <- exprToVarname(expr, varname, inline, "metaReactive2")
metaReactiveImpl(expr = expr, env = env, varname = varname, domain = domain, inline = inline)
}
exprToVarname <- function(expr, varname = NULL, inline, objectType = "metaReactive") {
if (is.null(varname)) {
if (inline) return("anonymous")
srcref <- attr(expr, "srcref", exact = TRUE)
if (is.null(srcref)) {
if (identical(getOption("keep.source"), FALSE)) {
warning(
"Unable to infer variable name for ", objectType, " when the option ",
"keep.source is FALSE. Either set `options(keep.source = TRUE)` ",
"or specify `varname` in ", objectType,
call. = FALSE
)
} else if (!rlang::is_call(expr, "{")) {
warning(
"Unable to infer variable name for ", objectType, " when `expr` does not ",
"begin with `{`. Either start `expr` with `{` or specify `varname` in ",
objectType,
call. = FALSE
)
} else {
warning(
"Unable to infer variable name for ", objectType, " because no srcref ",
"is available. Please report an issue to https://github.com/rstudio/shinymeta/issues/new",
call. = FALSE
)
}
}
varname <- mrexprSrcrefToLabel(srcref[[1]], defaultLabel = NULL)
} else {
if (!is.character(varname) || length(varname) != 1 || is.na(varname) || nchar(varname) == 0) {
stop("varname must be a non-empty string", call. = FALSE)
}
}
varname
}
metaReactiveImpl <- function(expr, env, varname, domain, inline) {
force(expr)
force(env)
force(varname)
force(domain)
force(inline)
r_normal <- shiny::reactive(expr, env = env, quoted = TRUE, label = varname, domain = domain)
r_meta <- function() {
shiny::withReactiveDomain(domain, {
rlang::eval_tidy(expr, NULL, env)
})
}
self <- structure(
function() {
metaDispatch(
normal = {
r_normal()
},
meta = {
.globals$rexprMetaReadFilter(r_meta(), self)
}
)
},
class = c("shinymeta_reactive", "shinymeta_object", "function"),
shinymetaVarname = varname,
shinymetaUID = getFromNamespace("createUniqueId", "shiny")(8),
shinymetaDomain = domain,
shinymetaInline = inline
)
self
}
#' @export
print.shinymeta_reactive <- function(x, ...) {
cat("metaReactive:", attr(x, "shinymetaVarname"), "\n", sep = "")
}
# A global variable that can be one of three values:
# 1. FALSE - metaExpr() should return its EVALUATED expr
# 2. TRUE - metaExpr() should return its QUOTED expr
# 3. "mixed" - same as TRUE, but see below
#
# The "mixed" exists to serve cases like metaReactive2. In cases
# where calls to metaReactives are encountered inside of metaReactive2
# but outside of metaExpr, those metaReactives should be evaluated in
# non-meta mode (i.e. metaMode(FALSE)).
#
# See metaDispatch for more details on mixed mode.
metaMode <- local({
value <- FALSE
function(x) {
if (missing(x)) {
value
} else {
if (!isTRUE(x) && !isFALSE(x) && !identical(x, "mixed")) {
stop("Invalid metaMode() value: legal values are TRUE, FALSE, and \"mixed\"")
}
value <<- x
}
}
})
# More-specific replacement for switch() on the value of metaMode().
#
# This gives us a single place to update if we need to modify the set of
# supported metaMode values.
switchMetaMode <- function(normal, meta, mixed) {
if (missing(normal) || missing(meta) || missing(mixed)) {
stop("switchMetaMode call was missing required argument(s)")
}
mode <- metaMode()
if (isTRUE(mode)) {
meta
} else if (isFALSE(mode)) {
normal
} else if (identical(mode, "mixed")) {
mixed
} else {
stop("Illegal metaMode detected: ", format(mode))
}
}
# metaDispatch implements the innermost if/switch for meta-reactive objects:
# metaReactive/metaReactive2, metaObserve/metaObserve2, metaRender/metaRender2.
#
# We basically want to detect nested calls to `metaDispatch` without an
# intervening `withMetaMode(TRUE)` or `metaExpr`, and treat those cases as
# metaMode(FALSE).
#
# mr1 <- metaReactive({
# 1 + 1
# })
#
# mr2 <- metaReactive2({
# mr1() # returns 2
# !!mr1() # `!!`` is treated as double-boolean (NOT unquote), so: TRUE
# metaExpr(
# !!mr1() # returns quote(1 + 1)
# )
# })
#
# withMetaMode(mr2())
metaDispatch <- function(normal, meta) {
switchMetaMode(
normal = {
force(normal)
},
meta = {
withMetaMode(meta, "mixed")
},
mixed = {
withMetaMode(normal, FALSE)
}
)
}
metaCacheKey <- function() {
list(.globals$dynamicVars, metaMode())
}
#' Evaluate an expression with meta mode activated
#'
#' @param expr an expression.
#' @param mode whether or not to evaluate expression in meta mode.
#'
#' @seealso [expandChain()]
#' @export
withMetaMode <- function(expr, mode = TRUE) {
origVal <- metaMode()
if (!identical(origVal, mode)) {
metaMode(mode)
on.exit(metaMode(origVal), add = TRUE)
}
if (switchMetaMode(normal = FALSE, meta = TRUE, mixed = FALSE)) {
expr <- prefix_class(expr, "shinyMetaExpr")
}
force(expr)
}
#' Mark an expression as a meta-expression
#'
#'
#'
#' @param expr An expression (quoted or unquoted).
#' @param env An environment.
#' @param quoted Is the expression quoted? This is useful when you want to use an expression
#' that is stored in a variable; to do so, it must be quoted with [`quote()`].
#' @param localize Whether or not to wrap the returned expression in [`local()`].
#' The default, `"auto"`, only wraps expressions with a top-level [`return()`]
#' statement (i.e., return statements in anonymized functions are ignored).
#' @param bindToReturn For non-`localize`d expressions, should an assignment
#' of a meta expression be applied to the _last child_ of the top-level `\{` call?
#'
#' @seealso [metaReactive2()], [metaObserve2()], [metaRender2()]
#' @export
metaExpr <- function(expr, env = parent.frame(), quoted = FALSE, localize = "auto", bindToReturn = FALSE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
metaExpr_(expr, env = env, quoted = quoted, localize = localize, bindToReturn = bindToReturn)
}
metaExpr_ <- function(expr, env = parent.frame(), quoted = FALSE, localize = "auto", bindToReturn = FALSE,
topLevelDynVars = TRUE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
if (switchMetaMode(normal = TRUE, meta = FALSE, mixed = FALSE)) {
expr <- expandExpr(expr, list(), env)
return(rlang::eval_tidy(expr, env = env))
}
# metaExpr() moves us from mixed to meta state
withMetaMode(mode = TRUE, {
expr <- comment_flags(expr)
expr <- expandExpr(expr, if (topLevelDynVars) .globals$dynamicVars, env)
expr <- strip_outer_brace(expr)
# Note that bindToReturn won't make sense for a localized call,
# so determine we need local scope first, then add a special class
# (we don't yet have the name for binding the return value)
expr <- add_local_scope(expr, localize)
# Apply bindToReturn rules, if relevant
expr <- bind_to_return(expr)
# TODO: let user opt-out of comment elevation
# (I _think_ this is always safe)?
expr <- elevate_comments(expr)
# flag the call so that we know to bind next time we see this call
# inside an assign call, we should modify it
if (bindToReturn && rlang::is_call(expr, "{")) {
expr <- prefix_class(expr, "bindToReturn")
}
expr <- prefix_class(expr, "shinyMetaExpr")
expr
})
}
withDynamicScope <- function(expr, ..., .list = list(...)) {
if (length(.list) > 0) {
if (is.null(names(.list)) || !all(nzchar(names(.list)))) {
stop("withDynamicScope invoked with unnamed vars; all vars must be named")
}
oldVars <- .globals$dynamicVars
.globals$dynamicVars <- c(oldVars[setdiff(names(oldVars), names(.list))], .list)
on.exit(.globals$dynamicVars <- oldVars)
}
expr
# TODO use promise domain
}
#' Expand meta primitives into user code
#'
#' This function provides the main entry point for generating user code
#' via meta-components (e.g., [metaReactive()], [metaObserve()], [metaRender()], etc).
#' It's similar to [withMetaMode()], but instead, quotes the `expr`, which allows you
#' to generate code from multiple meta-components via quasiquotation (e.g. [rlang::!!]).
#' When producing code from multiple meta-components, you may find that code produced from one
#' meta-component overlaps (i.e., repeats redundant computation) with another meta-component.
#' In that case, it's desirable to assign the return value of a meta-component to a variable, and
#' use that variable (i.e., symbol) in downstream code generated from other meta-components. This
#' can be done via the `patchCalls` argument which can replace the return value of
#' a meta-component with a relevant variable name.
#'
#' @inheritParams metaExpr
#' @param patchCalls a named list of quoted symbols. The names of the list
#' should match name(s) bound to relevant meta-component(s) found in `expr`
#' (e.g. `petal_width` in the example below). The quoted symbol(s) should
#' match variable name(s) representing the return value of the meta-component(s).
#'
#' @seealso [withMetaMode()]
#' @noRd
expandCode <- function(expr, env = parent.frame(), quoted = FALSE, patchCalls = list()) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
withMetaMode(
withDynamicScope(
metaExpr_(expr, env = env, quoted = quoted, localize = FALSE,
bindToReturn = FALSE, topLevelDynVars = FALSE),
.list = lapply(patchCalls, constf)
)
)
}
is_output_read <- function(expr) {
is_dollar <- rlang::is_call(expr, name = "$", n = 2) &&
rlang::is_symbol(expr[[2]], "output") &&
rlang::is_symbol(expr[[3]])
is_bracket <- rlang::is_call(expr, name = "[[", n = 2) &&
rlang::is_symbol(expr[[2]], "output") &&
is.character(expr[[3]])
is_dollar || is_bracket
}
# Create an `lhs <- rhs` expression, unless lhs == "", in which case
# just return rhs.
#
# lhs should be either "", some other string (to be converted using as.name),
# or a language object (e.g. quote(foo) or quote(foo$bar)).
#
# rhs can be anything; either a simple value, or a language object.
#
# Return value will probably be a language object, but possibly not (e.g.
# make_assign_expr("", 10) would just return 10).
make_assign_expr <- function(lhs = "", rhs) {
stopifnot(is.character(lhs) || is.language(lhs))
if (is.character(lhs)) {
if (lhs == "") {
return(rhs)
} else {
lhs <- as.name(lhs)
}
}
call("<-", lhs, rhs)
}
#' @param ... A collection of meta-reactives.
#' @param .env An environment.
#' @param .pkgs A character vector of packages to load before the expanded code.
#' @noRd
expandObjects <- function(..., .env = parent.frame(), .pkgs) {
exprs <- rlang::exprs(...)
patchCalls <- list()
objs <- mapply(names(exprs), exprs, FUN = function(nm, x) {
if (is_comment(x)) {
if (nzchar(nm)) {
stop("expandObjects called with a named comment; only unnamed comments are supported")
}
attr(x, "shinymeta_comment") <- TRUE
return(x)
}
# Do a sensible thing if someone has done `expandObjects(mr())` instead of `expandObjects(mr)`
if (rlang::is_call(x) && length(x) == 1 && (is.symbol(x[[1]]) || is_output_read(x[[1]]))) {
x <- x[[1]]
}
if (is.symbol(x)) {
# Get the value pointed to by `x`. We'll need this to decide what rules we
# apply to its expansion. Throws error if not found.
val <- get(as.character(x), pos = .env, inherits = TRUE)
# Observers and reactive expressions get different rules.
is_observe <- inherits(val, "shinymeta_observer")
is_reactive_expr <- inherits(val, "shinymeta_reactive")
# Only metaObserve and metaReactive objects are supported
if (!is_observe && !is_reactive_expr) {
stop("expandObjects called with ", as.character(x), ", which has unrecognized object type ", deparse(class(val)))
}
# If metaReactive objects are passed without an explicit name, use the
# name of the object itself as the name--this is the common case.
if (is_reactive_expr && nm == "") {
nm <- as.character(x)
}
# Reactive expressions always go into patchCalls; observers never do, even
# if they're passed to us as named arguments, because there's no way they
# can be validly referred to from other meta-reactive objects.
if (is_reactive_expr) {
patchCalls[[as.character(x)]] <<- as.name(nm)
}
rhs <- wrapExpr(`!!`, as.call(list(x)))
return(make_assign_expr(nm, rhs))
}
if (is_output_read(x)) {
output_obj <- withMetaMode(eval(x, envir = .env))
if (is.null(output_obj)) {
stop("Could not find ", format(x))
}
rhs <- wrapExpr(`!!`, as.call(list(x)))
return(make_assign_expr(nm, rhs))
}
stop("expandObjects requires all arguments to be comment-strings and/or variable names of meta-reactive objects")
})
if (!missing(.pkgs)) {
libs <- lapply(.pkgs, function(x) call("library", x))
objs <- c(libs, objs)
}
expr <- do.call(call, c(list("{"), objs), quote = TRUE)
expandCode(!!expandExpr(expr, NULL, .env), patchCalls = patchCalls)
}
#' @rdname expandChain
#' @name expandChain
#' @export
newExpansionContext <- function() {
self <- structure(
list(
uidToVarname = fastmap::fastmap(missing_default = NULL),
seenVarname = fastmap::fastmap(missing_default = FALSE),
uidToSubstitute = fastmap::fastmap(missing_default = NULL),
# Function to make a (hopefully but not guaranteed to be new) varname
makeVarname = local({
nextVarId <- 0L
function() {
nextVarId <<- nextVarId + 1L
paste0("var_", nextVarId)
}
}),
substituteMetaReactive = function(mrobj, callback) {
if (!inherits(mrobj, "shinymeta_reactive")) {
stop(call. = FALSE, "Attempted to substitute an object that wasn't a metaReactive")
}
if (!is.function(callback) || length(formals(callback)) != 0) {
stop(call. = FALSE, "Substitution callback should be a function that takes 0 args")
}
uid <- attr(mrobj, "shinymetaUID", exact = TRUE)
if (!is.null(self$uidToVarname$get(uid))) {
stop(call. = FALSE, "Attempt to substitute a metaReactive object that's already been rendered into code")
}
self$uidToSubstitute$set(uid, callback)
invisible(self)
}
),
class = "shinymetaExpansionContext"
)
self
}
#' @export
print.shinymetaExpansionContext <- function(x, ...) {
map <- x$uidToVarname
cat(sprintf("%s [id: %s]", map$mget(map$keys()), map$keys()), sep = "\n")
}
#' Expand code objects
#'
#' Use `expandChain` to write code out of one or more metaReactive objects.
#' Each meta-reactive object (expression, observer, or renderer) will cause not
#' only its own code to be written, but that of its dependencies as well.
#'
#' @param ... All arguments must be unnamed, and must be one of: 1) calls to
#' meta-reactive objects, 2) comment string (e.g. `"# A comment"`), 3)
#' language object (e.g. `quote(print(1 + 1))`), or 4) `NULL` (which will be
#' ignored). Calls to meta-reactive objects can optionally be [invisible()],
#' see Details.
#' @param .expansionContext Accept the default value if calling `expandChain` a
#' single time to generate a corpus of code; or create an expansion context
#' object using `newExpansionContext()` and pass it to multiple related calls
#' of `expandChain`. See Details.
#'
#' @return The return value of `expandCode` is a code object that's suitable for
#' printing or passing to [displayCodeModal()], [buildScriptBundle()], or
#' [buildRmdBundle()].
#'
#' The return value of `newExpansionContext` is an object that should be
#' passed to multiple `expandCode()` calls.
#'
#' @references <https://rstudio.github.io/shinymeta/articles/code-generation.html>
#'
#' @details
#'
#' There are two ways to extract code from meta objects (i.e. [metaReactive()],
#' [metaObserve()], and [metaRender()]): `withMetaMode()` and `expandChain()`.
#' The simplest is `withMetaMode(obj())`, which crawls the tree of meta-reactive
#' dependencies and expands each `!!` in place.
#'
#' For example, consider these meta objects:
#'
#' ```
#' nums <- metaReactive({ runif(100) })
#' obs <- metaObserve({
#' summary(!!nums())
#' hist(!!nums())
#' })
#' ```
#'
#' When code is extracted using `withMetaMode`:
#' ```
#' withMetaMode(obs())
#' ```
#'
#' The result looks like this:
#'
#' ```
#' summary(runif(100))
#' plot(runif(100))
#' ```
#'
#' Notice how `runif(100)` is inlined wherever `!!nums()`
#' appears, which is not desirable if we wish to reuse the same
#' values for `summary()` and `plot()`.
#'
#' The `expandChain` function helps us workaround this issue
#' by assigning return values of `metaReactive()` expressions to
#' a name, then replaces relevant expansion (e.g., `!!nums()`)
#' with the appropriate name (e.g. `nums`).
#'
#' ```
#' expandChain(obs())
#' ```
#'
#' The result looks like this:
#'
#' ```
#' nums <- runif(100)
#' summary(nums)
#' plot(nums)
#' ```
#'
#' You can pass multiple meta objects and/or comments to `expandChain`.
#'
#' ```
#' expandChain(
#' "# Generate values",
#' nums(),
#' "# Summarize and plot",
#' obs()
#' )
#' ```
#'
#' Output:
#'
#' ```
#' # Load data
#' nums <- runif(100)
#' nums
#' # Inspect data
#' summary(nums)
#' plot(nums)
#' ```
#'
#' You can suppress the printing of the `nums` vector in the previous example by
#' wrapping the `nums()` argument to `expandChain()` with `invisible(nums())`.
#'
#' @section Preserving dependencies between `expandChain()` calls:
#'
#' Sometimes we may have related meta objects that we want to generate code for,
#' but we want the code for some objects in one code chunk, and the code for
#' other objects in another code chunk; for example, you might be constructing
#' an R Markdown report that has a specific place for each code chunk.
#'
#' Within a single `expandChain()` call, all `metaReactive` objects are
#' guaranteed to only be declared once, even if they're declared on by multiple
#' meta objects; but since we're making two `expandChain()` calls, we will end
#' up with duplicated code. To remove this duplication, we need the second
#' `expandChain` call to know what code was emitted in the first `expandChain`
#' call.
#'
#' We can achieve this by creating an "expansion context" and sharing it between
#' the two calls.
#'
#' ```
#' exp_ctx <- newExpansionContext()
#' chunk1 <- expandChain(.expansionContext = exp_ctx,
#' invisible(nums())
#' )
#' chunk2 <- expandChain(.expansionContext = exp_ctx,
#' obs()
#' )
#' ```
#'
#' After this code is run, `chunk1` contains only the definition of `nums` and
#' `chunk2` contains only the code for `obs`.
#'
#' @section Substituting `metaReactive` objects:
#'
#' Sometimes, when generating code, we want to completely replace the
#' implementation of a `metaReactive`. For example, our Shiny app might contain
#' this logic, using [shiny::fileInput()]:
#'
#' ```
#' data <- metaReactive2({
#' req(input$file_upload)
#' metaExpr(read.csv(!!input$file_upload$datapath))
#' })
#' obs <- metaObserve({
#' summary(!!data())
#' })
#' ```
#'
#' Shiny's file input works by saving uploading files to a temp directory. The
#' file referred to by `input$file_upload$datapath` won't be available when
#' another user tries to run the generated code.
#'
#' You can use the expansion context object to swap out the implementation of
#' `data`, or any other `metaReactive`:
#'
#' ```
#' ec <- newExpansionContext()
#' ec$substituteMetaReactive(data, function() {
#' metaExpr(read.csv("data.csv"))
#' })
#'
#' expandChain(.expansionContext = ec, obs())
#' ```
#'
#' Result:
#'
#' ```
#' data <- read.csv("data.csv")
#' summary(data)
#' ```
#'
#' Just make sure this code ends up in a script or Rmd bundle that includes the
#' uploaded file as `data.csv`, and the user will be able to reproduce your
#' analysis.
#'
#' The `substituteMetaReactive` method takes two arguments: the `metaReactive`
#' object to substitute, and a function that takes zero arguments and returns a
#' quoted expression (for the nicest looking results, use `metaExpr` to create
#' the expression). This function will be invoked the first time the
#' `metaReactive` object is encountered (or if the `metaReactive` is defined
#' with `inline = TRUE`, then every time it is encountered).
#'
#' @examples
#' input <- list(dataset = "cars")
#'
#' # varname is only required if srcref aren't supported
#' # (R CMD check disables them for some reason?)
#' mr <- metaReactive({
#' get(!!input$dataset, "package:datasets")
#' })
#'
#' top <- metaReactive({
#' head(!!mr())
#' })
#'
#' bottom <- metaReactive({
#' tail(!!mr())
#' })
#'
#' obs <- metaObserve({
#' message("Top:")
#' summary(!!top())
#' message("Bottom:")
#' summary(!!bottom())
#' })
#'
#' # Simple case
#' expandChain(obs())
#'
#' # Explicitly print top
#' expandChain(top(), obs())
#'
#' # Separate into two code chunks
#' exp_ctx <- newExpansionContext()
#' expandChain(.expansionContext = exp_ctx,
#' invisible(top()),
#' invisible(bottom()))
#' expandChain(.expansionContext = exp_ctx,
#' obs())
#'
#' @export
expandChain <- function(..., .expansionContext = newExpansionContext()) {
# As we come across previously unseen objects (i.e. the UID has not been
# encountered before) we have to make some decisions about what variable name
# (i.e. varname) to use to represent that object. This varname is either
# auto-detected based on the metaReactive's variable name, or provided
# explicitly by the user when the metaReactive is created. (If the object
# belongs to a module, then we use the module ID to prefix the varname.)
#
# But, the desired variable name might already have been used by a different
# metaReactive (i.e. two objects have the same label). In this case, we can
# also use a var_1, var_2, etc. (and this is what the code currently does)
# but it'd be even better to try to disambiguate by using the desired name
# plus _1, _2, etc. (keep going til you find one that hasn't been used yet).
#
# IDEA:
# A different strategy we could use is to generate a gensym as the label at
# first, keeping track of the metadata for every gensym (label, module id).
# Then after the code generation is done, we can go back and see what the
# best overall set of variable names is. For example, if the same variable
# name "df" is used within module IDs "one" and "two", we can use "one_df"
# and "two_df"; but if only module ID "one" is used, we can just leave it
# as "df". (As opposed to the current strategy, where if "one" and "two"
# are both used, we end up with "df" and "df_two".)
# Keep track of what label we have used for each UID we have previously
# encountered. If a UID isn't found in this map, then we haven't yet
# encountered it.
uidToVarname <- .expansionContext$uidToVarname
# Keep track of what labels we have used, so we can be sure we don't
# reuse them.
seenVarname <- .expansionContext$seenVarname
# As we encounter metaReactives that we depend on (directly or indirectly),
# we'll append their code to this list (including assigning them to a label).
dependencyCode <- list()
# Override the rexprMetaReadFilter while we generate code. This is a filter
# function that metaReactive/metaReactive2 will call when someone asks them
# for their meta value. The `x` is the (lazily evaluated) logic for actually
# generating their code (or retrieving it from cache).
oldFilter <- .globals$rexprMetaReadFilter
.globals$rexprMetaReadFilter <- function(x, rexpr) {
# Read this object's UID.
uid <- attr(rexpr, "shinymetaUID", exact = TRUE)
domain <- attr(rexpr, "shinymetaDomain", exact = TRUE)
inline <- attr(rexpr, "shinymetaInline", exact = TRUE)
exec <- function() {
subfunc <- .expansionContext$uidToSubstitute$get(uid)
result <- if (!is.null(subfunc)) {
withMetaMode(subfunc())
} else {
x
}
}
if (isTRUE(inline)) {
# The metaReactive doesn't want to have its own variable
return(exec())
}
# Check if we've seen this UID before, and if so, just return the same
# varname as we used last time.
varname <- uidToVarname$get(uid)
if (!is.null(varname)) {
return(as.symbol(varname))
}
# OK, we haven't seen this UID before. We need to figure out what variable
# name to use.
# Our first choice would be whatever varname the object itself has (the true
# var name of this metaReactive, or a name the user explicitly provided).
varname <- attr(rexpr, "shinymetaVarname", exact = TRUE)
# If there wasn't either a varname or explicitly provided name, just make
# a totally generic one up.
if (is.null(varname) || varname == "" || length(varname) != 1) {
varname <- .expansionContext$makeVarname()
} else {
if (!is.null(domain)) {
varname <- gsub("-", "_", domain$ns(varname))
}
}
# Make sure we don't use a variable name that has already been used.
while (seenVarname$get(varname)) {
varname <- .expansionContext$makeVarname()
}
# Remember this UID/varname combination for the future.
uidToVarname$set(uid, varname)
# Make sure this varname doesn't get used again.
seenVarname$set(varname, TRUE)
# Since this is the first time we're seeing this object, now we need to
# generate its code and store it in our running list of dependencies.
expr <- rlang::expr(`<-`(!!as.symbol(varname), !!exec()))
dependencyCode <<- c(dependencyCode, list(expr))
# This is what we're returning to the caller; whomever wanted the code for
# this metaReactive is going to get this variable name instead.
as.symbol(varname)
}
on.exit(.globals$rexprMetaReadFilter <- oldFilter, add = TRUE, after = FALSE)
withMetaMode({
# Trigger evaluation of the ..., which will also cause dependencyCode to be
# populated. The value of list(...) should all be code expressions, unless
# the user passed in something wrong.
dot_args <- eval(substitute(alist(...)))
if (!is.null(names(dot_args))) {
stop(call. = FALSE, "Named ... arguments to expandChain are not supported")
}
res <- lapply(seq_along(dot_args), function(i) {
# Grab the nth element. We do it with this gross `..n` business because
# we want to make sure we trigger evaluation of the arguments one at a
# time. We can't use rlang's dots-related functions, because it eagerly
# expands the `!!` in arguments, which we want to leave alone.
#
# Use `withVisible` because invisible() arguments should have their
# deps inserted, but not their actual code. Note that metaReactives
# consider *themselves* their own dependencies, so for metaReactive
# this means the code that assigns it is created (`mr <- ...`),
# but the additional line for printing it (`mr`) will be suppressed.
x_vis <- withVisible(eval(as.symbol(paste0("..", i)), envir = environment()))
x <- x_vis$value
val <- if (is_comment(x)) {
do.call(metaExpr, list(rlang::expr({!!x; {}})))
} else if (is.language(x)) {
x
} else if (is.null(x)) {
x
} else {
stop(call. = FALSE, "expandChain() understands language objects, comment-strings, and NULL; but not ", class(x)[1], " objects")
}
myDependencyCode <- dependencyCode
dependencyCode <<- list()
if (x_vis$visible) {
c(myDependencyCode, list(val))
} else {
myDependencyCode
}
})
res <- unlist(res, recursive = FALSE)
res <- res[!vapply(res, is.null, logical(1))]
# Expand into a block of code
metaExpr({!!!res})
})
}
prefix_class <- function (x, y) {
# Can't set attributes on a symbol, but that's alright because
# we don't need to flag or compute on symbols
if (is.symbol(x) || !is.language(x)) return(x)
oldClass(x) <- unique(c(y, oldClass(x)))
x
}
remove_class <- function(x, y) {
if (is.symbol(x) || !is.language(x)) return(x)
oldClass(x) <- setdiff(oldClass(x), y)
x
}
quotedList <- function(...) {
enquote(...)
}
| /R/metareactive.R | no_license | NlIceD/shinymeta | R | false | false | 32,088 | r | .globals <- new.env(parent = emptyenv())
.globals$dynamicVars <- list()
# This is a global hook for intercepting meta-mode reads of metaReactive/2.
# The first argument is the (delayed eval) code result, and rexpr is the
# metaReactive/2 object itself. If evaluation of x is not triggered by the
# hook function, then the metaReactive/2 code will not execute/be expanded.
#
# The return value should be a code object.
.globals$rexprMetaReadFilter <- function(x, rexpr) {
x
}
#' Create a meta-reactive expression
#'
#' Create a [reactive()] that, when invoked with meta-mode activated
#' (i.e. called within [withMetaMode()] or [expandChain()]), returns a
#' code expression (instead of evaluating that expression and returning the value).
#'
#' @details If you wish to capture specific code inside of `expr` (e.g. ignore code
#' that has no meaning outside shiny, like [req()]), use `metaReactive2()` in combination
#' with `metaExpr()`. When using `metaReactive2()`, `expr` must return a `metaExpr()`.
#'
#' If `varname` is unspecified, [srcref]s are used in attempt to infer the name
#' bound to the meta-reactive object. In order for this inference to work, the
#' `keep.source` [option] must be `TRUE` and `expr` must begin with `\{`.
#'
#' @param varname An R variable name that this object prefers to be named when
#' its code is extracted into an R script. (See also: [expandChain()])
#'
#' @param inline If `TRUE`, during code expansion, do not declare a variable for
#' this object; instead, inline the code into every call site. Use this to avoid
#' introducing variables for very simple expressions. (See also: [expandChain()])
#'
#' @inheritParams shiny::reactive
#' @inheritParams metaExpr
#' @export
#' @seealso [metaExpr()]
#' @examples
#'
#' library(shiny)
#' options(shiny.suppressMissingContextError = TRUE)
#'
#' input <- list(x = 1)
#'
#' y <- metaReactive({
#' req(input$x)
#' a <- !!input$x + 1
#' b <- a + 1
#' c + 1
#' })
#'
#' withMetaMode(y())
#' expandChain(y())
#'
#' y <- metaReactive2({
#' req(input$x)
#'
#' metaExpr({
#' a <- !!input$x + 1
#' b <- a + 1
#' c + 1
#' }, bindToReturn = TRUE)
#' })
#'
#' expandChain(y())
#'
metaReactive <- function(expr, env = parent.frame(), quoted = FALSE,
varname = NULL, domain = shiny::getDefaultReactiveDomain(), inline = FALSE,
localize = "auto", bindToReturn = FALSE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
varname <- exprToVarname(expr, varname, inline, "metaReactive")
# Need to wrap expr with shinymeta:::metaExpr, but can't use rlang/!! to do
# so, because we want to keep any `!!` contained in expr intact (i.e. too
# early to perform expansion of expr here).
#
# Even though expr itself is quoted, wrapExpr will effectively unquote it by
# interpolating it into the `metaExpr()` call, thus quoted = FALSE.
expr <- wrapExpr(shinymeta::metaExpr, expr, env, quoted = FALSE, localize = localize, bindToReturn = bindToReturn)
metaReactiveImpl(expr = expr, env = env, varname = varname, domain = domain, inline = inline)
}
#' @export
#' @rdname metaReactive
metaReactive2 <- function(expr, env = parent.frame(), quoted = FALSE,
varname = NULL, domain = shiny::getDefaultReactiveDomain(), inline = FALSE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
varname <- exprToVarname(expr, varname, inline, "metaReactive2")
metaReactiveImpl(expr = expr, env = env, varname = varname, domain = domain, inline = inline)
}
exprToVarname <- function(expr, varname = NULL, inline, objectType = "metaReactive") {
if (is.null(varname)) {
if (inline) return("anonymous")
srcref <- attr(expr, "srcref", exact = TRUE)
if (is.null(srcref)) {
if (identical(getOption("keep.source"), FALSE)) {
warning(
"Unable to infer variable name for ", objectType, " when the option ",
"keep.source is FALSE. Either set `options(keep.source = TRUE)` ",
"or specify `varname` in ", objectType,
call. = FALSE
)
} else if (!rlang::is_call(expr, "{")) {
warning(
"Unable to infer variable name for ", objectType, " when `expr` does not ",
"begin with `{`. Either start `expr` with `{` or specify `varname` in ",
objectType,
call. = FALSE
)
} else {
warning(
"Unable to infer variable name for ", objectType, " because no srcref ",
"is available. Please report an issue to https://github.com/rstudio/shinymeta/issues/new",
call. = FALSE
)
}
}
varname <- mrexprSrcrefToLabel(srcref[[1]], defaultLabel = NULL)
} else {
if (!is.character(varname) || length(varname) != 1 || is.na(varname) || nchar(varname) == 0) {
stop("varname must be a non-empty string", call. = FALSE)
}
}
varname
}
metaReactiveImpl <- function(expr, env, varname, domain, inline) {
force(expr)
force(env)
force(varname)
force(domain)
force(inline)
r_normal <- shiny::reactive(expr, env = env, quoted = TRUE, label = varname, domain = domain)
r_meta <- function() {
shiny::withReactiveDomain(domain, {
rlang::eval_tidy(expr, NULL, env)
})
}
self <- structure(
function() {
metaDispatch(
normal = {
r_normal()
},
meta = {
.globals$rexprMetaReadFilter(r_meta(), self)
}
)
},
class = c("shinymeta_reactive", "shinymeta_object", "function"),
shinymetaVarname = varname,
shinymetaUID = getFromNamespace("createUniqueId", "shiny")(8),
shinymetaDomain = domain,
shinymetaInline = inline
)
self
}
#' @export
print.shinymeta_reactive <- function(x, ...) {
cat("metaReactive:", attr(x, "shinymetaVarname"), "\n", sep = "")
}
# A global variable that can be one of three values:
# 1. FALSE - metaExpr() should return its EVALUATED expr
# 2. TRUE - metaExpr() should return its QUOTED expr
# 3. "mixed" - same as TRUE, but see below
#
# The "mixed" exists to serve cases like metaReactive2. In cases
# where calls to metaReactives are encountered inside of metaReactive2
# but outside of metaExpr, those metaReactives should be evaluated in
# non-meta mode (i.e. metaMode(FALSE)).
#
# See metaDispatch for more details on mixed mode.
metaMode <- local({
value <- FALSE
function(x) {
if (missing(x)) {
value
} else {
if (!isTRUE(x) && !isFALSE(x) && !identical(x, "mixed")) {
stop("Invalid metaMode() value: legal values are TRUE, FALSE, and \"mixed\"")
}
value <<- x
}
}
})
# More-specific replacement for switch() on the value of metaMode().
#
# This gives us a single place to update if we need to modify the set of
# supported metaMode values.
switchMetaMode <- function(normal, meta, mixed) {
if (missing(normal) || missing(meta) || missing(mixed)) {
stop("switchMetaMode call was missing required argument(s)")
}
mode <- metaMode()
if (isTRUE(mode)) {
meta
} else if (isFALSE(mode)) {
normal
} else if (identical(mode, "mixed")) {
mixed
} else {
stop("Illegal metaMode detected: ", format(mode))
}
}
# metaDispatch implements the innermost if/switch for meta-reactive objects:
# metaReactive/metaReactive2, metaObserve/metaObserve2, metaRender/metaRender2.
#
# We basically want to detect nested calls to `metaDispatch` without an
# intervening `withMetaMode(TRUE)` or `metaExpr`, and treat those cases as
# metaMode(FALSE).
#
# mr1 <- metaReactive({
# 1 + 1
# })
#
# mr2 <- metaReactive2({
# mr1() # returns 2
# !!mr1() # `!!`` is treated as double-boolean (NOT unquote), so: TRUE
# metaExpr(
# !!mr1() # returns quote(1 + 1)
# )
# })
#
# withMetaMode(mr2())
metaDispatch <- function(normal, meta) {
switchMetaMode(
normal = {
force(normal)
},
meta = {
withMetaMode(meta, "mixed")
},
mixed = {
withMetaMode(normal, FALSE)
}
)
}
metaCacheKey <- function() {
list(.globals$dynamicVars, metaMode())
}
#' Evaluate an expression with meta mode activated
#'
#' @param expr an expression.
#' @param mode whether or not to evaluate expression in meta mode.
#'
#' @seealso [expandChain()]
#' @export
withMetaMode <- function(expr, mode = TRUE) {
origVal <- metaMode()
if (!identical(origVal, mode)) {
metaMode(mode)
on.exit(metaMode(origVal), add = TRUE)
}
if (switchMetaMode(normal = FALSE, meta = TRUE, mixed = FALSE)) {
expr <- prefix_class(expr, "shinyMetaExpr")
}
force(expr)
}
#' Mark an expression as a meta-expression
#'
#'
#'
#' @param expr An expression (quoted or unquoted).
#' @param env An environment.
#' @param quoted Is the expression quoted? This is useful when you want to use an expression
#' that is stored in a variable; to do so, it must be quoted with [`quote()`].
#' @param localize Whether or not to wrap the returned expression in [`local()`].
#' The default, `"auto"`, only wraps expressions with a top-level [`return()`]
#' statement (i.e., return statements in anonymized functions are ignored).
#' @param bindToReturn For non-`localize`d expressions, should an assignment
#' of a meta expression be applied to the _last child_ of the top-level `\{` call?
#'
#' @seealso [metaReactive2()], [metaObserve2()], [metaRender2()]
#' @export
metaExpr <- function(expr, env = parent.frame(), quoted = FALSE, localize = "auto", bindToReturn = FALSE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
metaExpr_(expr, env = env, quoted = quoted, localize = localize, bindToReturn = bindToReturn)
}
metaExpr_ <- function(expr, env = parent.frame(), quoted = FALSE, localize = "auto", bindToReturn = FALSE,
topLevelDynVars = TRUE) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
if (switchMetaMode(normal = TRUE, meta = FALSE, mixed = FALSE)) {
expr <- expandExpr(expr, list(), env)
return(rlang::eval_tidy(expr, env = env))
}
# metaExpr() moves us from mixed to meta state
withMetaMode(mode = TRUE, {
expr <- comment_flags(expr)
expr <- expandExpr(expr, if (topLevelDynVars) .globals$dynamicVars, env)
expr <- strip_outer_brace(expr)
# Note that bindToReturn won't make sense for a localized call,
# so determine we need local scope first, then add a special class
# (we don't yet have the name for binding the return value)
expr <- add_local_scope(expr, localize)
# Apply bindToReturn rules, if relevant
expr <- bind_to_return(expr)
# TODO: let user opt-out of comment elevation
# (I _think_ this is always safe)?
expr <- elevate_comments(expr)
# flag the call so that we know to bind next time we see this call
# inside an assign call, we should modify it
if (bindToReturn && rlang::is_call(expr, "{")) {
expr <- prefix_class(expr, "bindToReturn")
}
expr <- prefix_class(expr, "shinyMetaExpr")
expr
})
}
withDynamicScope <- function(expr, ..., .list = list(...)) {
if (length(.list) > 0) {
if (is.null(names(.list)) || !all(nzchar(names(.list)))) {
stop("withDynamicScope invoked with unnamed vars; all vars must be named")
}
oldVars <- .globals$dynamicVars
.globals$dynamicVars <- c(oldVars[setdiff(names(oldVars), names(.list))], .list)
on.exit(.globals$dynamicVars <- oldVars)
}
expr
# TODO use promise domain
}
#' Expand meta primitives into user code
#'
#' This function provides the main entry point for generating user code
#' via meta-components (e.g., [metaReactive()], [metaObserve()], [metaRender()], etc).
#' It's similar to [withMetaMode()], but instead, quotes the `expr`, which allows you
#' to generate code from multiple meta-components via quasiquotation (e.g. [rlang::!!]).
#' When producing code from multiple meta-components, you may find that code produced from one
#' meta-component overlaps (i.e., repeats redundant computation) with another meta-component.
#' In that case, it's desirable to assign the return value of a meta-component to a variable, and
#' use that variable (i.e., symbol) in downstream code generated from other meta-components. This
#' can be done via the `patchCalls` argument which can replace the return value of
#' a meta-component with a relevant variable name.
#'
#' @inheritParams metaExpr
#' @param patchCalls a named list of quoted symbols. The names of the list
#' should match name(s) bound to relevant meta-component(s) found in `expr`
#' (e.g. `petal_width` in the example below). The quoted symbol(s) should
#' match variable name(s) representing the return value of the meta-component(s).
#'
#' @seealso [withMetaMode()]
#' @noRd
expandCode <- function(expr, env = parent.frame(), quoted = FALSE, patchCalls = list()) {
if (!quoted) {
expr <- substitute(expr)
quoted <- TRUE
}
withMetaMode(
withDynamicScope(
metaExpr_(expr, env = env, quoted = quoted, localize = FALSE,
bindToReturn = FALSE, topLevelDynVars = FALSE),
.list = lapply(patchCalls, constf)
)
)
}
is_output_read <- function(expr) {
is_dollar <- rlang::is_call(expr, name = "$", n = 2) &&
rlang::is_symbol(expr[[2]], "output") &&
rlang::is_symbol(expr[[3]])
is_bracket <- rlang::is_call(expr, name = "[[", n = 2) &&
rlang::is_symbol(expr[[2]], "output") &&
is.character(expr[[3]])
is_dollar || is_bracket
}
# Create an `lhs <- rhs` expression, unless lhs == "", in which case
# just return rhs.
#
# lhs should be either "", some other string (to be converted using as.name),
# or a language object (e.g. quote(foo) or quote(foo$bar)).
#
# rhs can be anything; either a simple value, or a language object.
#
# Return value will probably be a language object, but possibly not (e.g.
# make_assign_expr("", 10) would just return 10).
make_assign_expr <- function(lhs = "", rhs) {
stopifnot(is.character(lhs) || is.language(lhs))
if (is.character(lhs)) {
if (lhs == "") {
return(rhs)
} else {
lhs <- as.name(lhs)
}
}
call("<-", lhs, rhs)
}
#' @param ... A collection of meta-reactives.
#' @param .env An environment.
#' @param .pkgs A character vector of packages to load before the expanded code.
#' @noRd
expandObjects <- function(..., .env = parent.frame(), .pkgs) {
exprs <- rlang::exprs(...)
patchCalls <- list()
objs <- mapply(names(exprs), exprs, FUN = function(nm, x) {
if (is_comment(x)) {
if (nzchar(nm)) {
stop("expandObjects called with a named comment; only unnamed comments are supported")
}
attr(x, "shinymeta_comment") <- TRUE
return(x)
}
# Do a sensible thing if someone has done `expandObjects(mr())` instead of `expandObjects(mr)`
if (rlang::is_call(x) && length(x) == 1 && (is.symbol(x[[1]]) || is_output_read(x[[1]]))) {
x <- x[[1]]
}
if (is.symbol(x)) {
# Get the value pointed to by `x`. We'll need this to decide what rules we
# apply to its expansion. Throws error if not found.
val <- get(as.character(x), pos = .env, inherits = TRUE)
# Observers and reactive expressions get different rules.
is_observe <- inherits(val, "shinymeta_observer")
is_reactive_expr <- inherits(val, "shinymeta_reactive")
# Only metaObserve and metaReactive objects are supported
if (!is_observe && !is_reactive_expr) {
stop("expandObjects called with ", as.character(x), ", which has unrecognized object type ", deparse(class(val)))
}
# If metaReactive objects are passed without an explicit name, use the
# name of the object itself as the name--this is the common case.
if (is_reactive_expr && nm == "") {
nm <- as.character(x)
}
# Reactive expressions always go into patchCalls; observers never do, even
# if they're passed to us as named arguments, because there's no way they
# can be validly referred to from other meta-reactive objects.
if (is_reactive_expr) {
patchCalls[[as.character(x)]] <<- as.name(nm)
}
rhs <- wrapExpr(`!!`, as.call(list(x)))
return(make_assign_expr(nm, rhs))
}
if (is_output_read(x)) {
output_obj <- withMetaMode(eval(x, envir = .env))
if (is.null(output_obj)) {
stop("Could not find ", format(x))
}
rhs <- wrapExpr(`!!`, as.call(list(x)))
return(make_assign_expr(nm, rhs))
}
stop("expandObjects requires all arguments to be comment-strings and/or variable names of meta-reactive objects")
})
if (!missing(.pkgs)) {
libs <- lapply(.pkgs, function(x) call("library", x))
objs <- c(libs, objs)
}
expr <- do.call(call, c(list("{"), objs), quote = TRUE)
expandCode(!!expandExpr(expr, NULL, .env), patchCalls = patchCalls)
}
#' @rdname expandChain
#' @name expandChain
#' @export
newExpansionContext <- function() {
self <- structure(
list(
uidToVarname = fastmap::fastmap(missing_default = NULL),
seenVarname = fastmap::fastmap(missing_default = FALSE),
uidToSubstitute = fastmap::fastmap(missing_default = NULL),
# Function to make a (hopefully but not guaranteed to be new) varname
makeVarname = local({
nextVarId <- 0L
function() {
nextVarId <<- nextVarId + 1L
paste0("var_", nextVarId)
}
}),
substituteMetaReactive = function(mrobj, callback) {
if (!inherits(mrobj, "shinymeta_reactive")) {
stop(call. = FALSE, "Attempted to substitute an object that wasn't a metaReactive")
}
if (!is.function(callback) || length(formals(callback)) != 0) {
stop(call. = FALSE, "Substitution callback should be a function that takes 0 args")
}
uid <- attr(mrobj, "shinymetaUID", exact = TRUE)
if (!is.null(self$uidToVarname$get(uid))) {
stop(call. = FALSE, "Attempt to substitute a metaReactive object that's already been rendered into code")
}
self$uidToSubstitute$set(uid, callback)
invisible(self)
}
),
class = "shinymetaExpansionContext"
)
self
}
#' @export
print.shinymetaExpansionContext <- function(x, ...) {
map <- x$uidToVarname
cat(sprintf("%s [id: %s]", map$mget(map$keys()), map$keys()), sep = "\n")
}
#' Expand code objects
#'
#' Use `expandChain` to write code out of one or more metaReactive objects.
#' Each meta-reactive object (expression, observer, or renderer) will cause not
#' only its own code to be written, but that of its dependencies as well.
#'
#' @param ... All arguments must be unnamed, and must be one of: 1) calls to
#' meta-reactive objects, 2) comment string (e.g. `"# A comment"`), 3)
#' language object (e.g. `quote(print(1 + 1))`), or 4) `NULL` (which will be
#' ignored). Calls to meta-reactive objects can optionally be [invisible()],
#' see Details.
#' @param .expansionContext Accept the default value if calling `expandChain` a
#' single time to generate a corpus of code; or create an expansion context
#' object using `newExpansionContext()` and pass it to multiple related calls
#' of `expandChain`. See Details.
#'
#' @return The return value of `expandCode` is a code object that's suitable for
#' printing or passing to [displayCodeModal()], [buildScriptBundle()], or
#' [buildRmdBundle()].
#'
#' The return value of `newExpansionContext` is an object that should be
#' passed to multiple `expandCode()` calls.
#'
#' @references <https://rstudio.github.io/shinymeta/articles/code-generation.html>
#'
#' @details
#'
#' There are two ways to extract code from meta objects (i.e. [metaReactive()],
#' [metaObserve()], and [metaRender()]): `withMetaMode()` and `expandChain()`.
#' The simplest is `withMetaMode(obj())`, which crawls the tree of meta-reactive
#' dependencies and expands each `!!` in place.
#'
#' For example, consider these meta objects:
#'
#' ```
#' nums <- metaReactive({ runif(100) })
#' obs <- metaObserve({
#' summary(!!nums())
#' hist(!!nums())
#' })
#' ```
#'
#' When code is extracted using `withMetaMode`:
#' ```
#' withMetaMode(obs())
#' ```
#'
#' The result looks like this:
#'
#' ```
#' summary(runif(100))
#' plot(runif(100))
#' ```
#'
#' Notice how `runif(100)` is inlined wherever `!!nums()`
#' appears, which is not desirable if we wish to reuse the same
#' values for `summary()` and `plot()`.
#'
#' The `expandChain` function helps us workaround this issue
#' by assigning return values of `metaReactive()` expressions to
#' a name, then replaces relevant expansion (e.g., `!!nums()`)
#' with the appropriate name (e.g. `nums`).
#'
#' ```
#' expandChain(obs())
#' ```
#'
#' The result looks like this:
#'
#' ```
#' nums <- runif(100)
#' summary(nums)
#' plot(nums)
#' ```
#'
#' You can pass multiple meta objects and/or comments to `expandChain`.
#'
#' ```
#' expandChain(
#' "# Generate values",
#' nums(),
#' "# Summarize and plot",
#' obs()
#' )
#' ```
#'
#' Output:
#'
#' ```
#' # Load data
#' nums <- runif(100)
#' nums
#' # Inspect data
#' summary(nums)
#' plot(nums)
#' ```
#'
#' You can suppress the printing of the `nums` vector in the previous example by
#' wrapping the `nums()` argument to `expandChain()` with `invisible(nums())`.
#'
#' @section Preserving dependencies between `expandChain()` calls:
#'
#' Sometimes we may have related meta objects that we want to generate code for,
#' but we want the code for some objects in one code chunk, and the code for
#' other objects in another code chunk; for example, you might be constructing
#' an R Markdown report that has a specific place for each code chunk.
#'
#' Within a single `expandChain()` call, all `metaReactive` objects are
#' guaranteed to only be declared once, even if they're declared on by multiple
#' meta objects; but since we're making two `expandChain()` calls, we will end
#' up with duplicated code. To remove this duplication, we need the second
#' `expandChain` call to know what code was emitted in the first `expandChain`
#' call.
#'
#' We can achieve this by creating an "expansion context" and sharing it between
#' the two calls.
#'
#' ```
#' exp_ctx <- newExpansionContext()
#' chunk1 <- expandChain(.expansionContext = exp_ctx,
#' invisible(nums())
#' )
#' chunk2 <- expandChain(.expansionContext = exp_ctx,
#' obs()
#' )
#' ```
#'
#' After this code is run, `chunk1` contains only the definition of `nums` and
#' `chunk2` contains only the code for `obs`.
#'
#' @section Substituting `metaReactive` objects:
#'
#' Sometimes, when generating code, we want to completely replace the
#' implementation of a `metaReactive`. For example, our Shiny app might contain
#' this logic, using [shiny::fileInput()]:
#'
#' ```
#' data <- metaReactive2({
#' req(input$file_upload)
#' metaExpr(read.csv(!!input$file_upload$datapath))
#' })
#' obs <- metaObserve({
#' summary(!!data())
#' })
#' ```
#'
#' Shiny's file input works by saving uploading files to a temp directory. The
#' file referred to by `input$file_upload$datapath` won't be available when
#' another user tries to run the generated code.
#'
#' You can use the expansion context object to swap out the implementation of
#' `data`, or any other `metaReactive`:
#'
#' ```
#' ec <- newExpansionContext()
#' ec$substituteMetaReactive(data, function() {
#' metaExpr(read.csv("data.csv"))
#' })
#'
#' expandChain(.expansionContext = ec, obs())
#' ```
#'
#' Result:
#'
#' ```
#' data <- read.csv("data.csv")
#' summary(data)
#' ```
#'
#' Just make sure this code ends up in a script or Rmd bundle that includes the
#' uploaded file as `data.csv`, and the user will be able to reproduce your
#' analysis.
#'
#' The `substituteMetaReactive` method takes two arguments: the `metaReactive`
#' object to substitute, and a function that takes zero arguments and returns a
#' quoted expression (for the nicest looking results, use `metaExpr` to create
#' the expression). This function will be invoked the first time the
#' `metaReactive` object is encountered (or if the `metaReactive` is defined
#' with `inline = TRUE`, then every time it is encountered).
#'
#' @examples
#' input <- list(dataset = "cars")
#'
#' # varname is only required if srcref aren't supported
#' # (R CMD check disables them for some reason?)
#' mr <- metaReactive({
#' get(!!input$dataset, "package:datasets")
#' })
#'
#' top <- metaReactive({
#' head(!!mr())
#' })
#'
#' bottom <- metaReactive({
#' tail(!!mr())
#' })
#'
#' obs <- metaObserve({
#' message("Top:")
#' summary(!!top())
#' message("Bottom:")
#' summary(!!bottom())
#' })
#'
#' # Simple case
#' expandChain(obs())
#'
#' # Explicitly print top
#' expandChain(top(), obs())
#'
#' # Separate into two code chunks
#' exp_ctx <- newExpansionContext()
#' expandChain(.expansionContext = exp_ctx,
#' invisible(top()),
#' invisible(bottom()))
#' expandChain(.expansionContext = exp_ctx,
#' obs())
#'
#' @export
expandChain <- function(..., .expansionContext = newExpansionContext()) {
# As we come across previously unseen objects (i.e. the UID has not been
# encountered before) we have to make some decisions about what variable name
# (i.e. varname) to use to represent that object. This varname is either
# auto-detected based on the metaReactive's variable name, or provided
# explicitly by the user when the metaReactive is created. (If the object
# belongs to a module, then we use the module ID to prefix the varname.)
#
# But, the desired variable name might already have been used by a different
# metaReactive (i.e. two objects have the same label). In this case, we can
# also use a var_1, var_2, etc. (and this is what the code currently does)
# but it'd be even better to try to disambiguate by using the desired name
# plus _1, _2, etc. (keep going til you find one that hasn't been used yet).
#
# IDEA:
# A different strategy we could use is to generate a gensym as the label at
# first, keeping track of the metadata for every gensym (label, module id).
# Then after the code generation is done, we can go back and see what the
# best overall set of variable names is. For example, if the same variable
# name "df" is used within module IDs "one" and "two", we can use "one_df"
# and "two_df"; but if only module ID "one" is used, we can just leave it
# as "df". (As opposed to the current strategy, where if "one" and "two"
# are both used, we end up with "df" and "df_two".)
# Keep track of what label we have used for each UID we have previously
# encountered. If a UID isn't found in this map, then we haven't yet
# encountered it.
uidToVarname <- .expansionContext$uidToVarname
# Keep track of what labels we have used, so we can be sure we don't
# reuse them.
seenVarname <- .expansionContext$seenVarname
# As we encounter metaReactives that we depend on (directly or indirectly),
# we'll append their code to this list (including assigning them to a label).
dependencyCode <- list()
# Override the rexprMetaReadFilter while we generate code. This is a filter
# function that metaReactive/metaReactive2 will call when someone asks them
# for their meta value. The `x` is the (lazily evaluated) logic for actually
# generating their code (or retrieving it from cache).
oldFilter <- .globals$rexprMetaReadFilter
.globals$rexprMetaReadFilter <- function(x, rexpr) {
# Read this object's UID.
uid <- attr(rexpr, "shinymetaUID", exact = TRUE)
domain <- attr(rexpr, "shinymetaDomain", exact = TRUE)
inline <- attr(rexpr, "shinymetaInline", exact = TRUE)
exec <- function() {
subfunc <- .expansionContext$uidToSubstitute$get(uid)
result <- if (!is.null(subfunc)) {
withMetaMode(subfunc())
} else {
x
}
}
if (isTRUE(inline)) {
# The metaReactive doesn't want to have its own variable
return(exec())
}
# Check if we've seen this UID before, and if so, just return the same
# varname as we used last time.
varname <- uidToVarname$get(uid)
if (!is.null(varname)) {
return(as.symbol(varname))
}
# OK, we haven't seen this UID before. We need to figure out what variable
# name to use.
# Our first choice would be whatever varname the object itself has (the true
# var name of this metaReactive, or a name the user explicitly provided).
varname <- attr(rexpr, "shinymetaVarname", exact = TRUE)
# If there wasn't either a varname or explicitly provided name, just make
# a totally generic one up.
if (is.null(varname) || varname == "" || length(varname) != 1) {
varname <- .expansionContext$makeVarname()
} else {
if (!is.null(domain)) {
varname <- gsub("-", "_", domain$ns(varname))
}
}
# Make sure we don't use a variable name that has already been used.
while (seenVarname$get(varname)) {
varname <- .expansionContext$makeVarname()
}
# Remember this UID/varname combination for the future.
uidToVarname$set(uid, varname)
# Make sure this varname doesn't get used again.
seenVarname$set(varname, TRUE)
# Since this is the first time we're seeing this object, now we need to
# generate its code and store it in our running list of dependencies.
expr <- rlang::expr(`<-`(!!as.symbol(varname), !!exec()))
dependencyCode <<- c(dependencyCode, list(expr))
# This is what we're returning to the caller; whomever wanted the code for
# this metaReactive is going to get this variable name instead.
as.symbol(varname)
}
on.exit(.globals$rexprMetaReadFilter <- oldFilter, add = TRUE, after = FALSE)
withMetaMode({
# Trigger evaluation of the ..., which will also cause dependencyCode to be
# populated. The value of list(...) should all be code expressions, unless
# the user passed in something wrong.
dot_args <- eval(substitute(alist(...)))
if (!is.null(names(dot_args))) {
stop(call. = FALSE, "Named ... arguments to expandChain are not supported")
}
res <- lapply(seq_along(dot_args), function(i) {
# Grab the nth element. We do it with this gross `..n` business because
# we want to make sure we trigger evaluation of the arguments one at a
# time. We can't use rlang's dots-related functions, because it eagerly
# expands the `!!` in arguments, which we want to leave alone.
#
# Use `withVisible` because invisible() arguments should have their
# deps inserted, but not their actual code. Note that metaReactives
# consider *themselves* their own dependencies, so for metaReactive
# this means the code that assigns it is created (`mr <- ...`),
# but the additional line for printing it (`mr`) will be suppressed.
x_vis <- withVisible(eval(as.symbol(paste0("..", i)), envir = environment()))
x <- x_vis$value
val <- if (is_comment(x)) {
do.call(metaExpr, list(rlang::expr({!!x; {}})))
} else if (is.language(x)) {
x
} else if (is.null(x)) {
x
} else {
stop(call. = FALSE, "expandChain() understands language objects, comment-strings, and NULL; but not ", class(x)[1], " objects")
}
myDependencyCode <- dependencyCode
dependencyCode <<- list()
if (x_vis$visible) {
c(myDependencyCode, list(val))
} else {
myDependencyCode
}
})
res <- unlist(res, recursive = FALSE)
res <- res[!vapply(res, is.null, logical(1))]
# Expand into a block of code
metaExpr({!!!res})
})
}
prefix_class <- function (x, y) {
# Can't set attributes on a symbol, but that's alright because
# we don't need to flag or compute on symbols
if (is.symbol(x) || !is.language(x)) return(x)
oldClass(x) <- unique(c(y, oldClass(x)))
x
}
remove_class <- function(x, y) {
if (is.symbol(x) || !is.language(x)) return(x)
oldClass(x) <- setdiff(oldClass(x), y)
x
}
quotedList <- function(...) {
enquote(...)
}
|
speciation_rate <- function(tm,tree,pars,model,soc){
speciation_r = get(paste0("lambda.", model))
lambda = speciation_r(tm,tree,pars,soc=soc)
return(lambda)
}
sum_speciation_rate <- function(x,tree,pars,model,soc){
N = sapply(x, n_from_time,tree=tree,soc=soc)
speciation_r = get(paste0("lambda.", model))
lambda = speciation_r(x,tree,pars,soc=soc)
return(N*lambda)
}
# Speciations rates
lambda.rpd5 <- function(tm,tree,pars,soc){
pd = sapply(tm,phylodiversity,tree=tree,soc=soc)
N = sapply(tm, n_from_time,tree=tree,soc=soc)
lambda = max(0, pars[2] + pars[3]*N + pars[4] * (pd/N) )
return(lambda)
}
lambda.rpd1 <- function(tm,tree,pars,soc){
N = sapply(tm, n_from_time,tree=tree,soc=soc)
lambda = max(0, pars[2] + pars[3]*N )
return(lambda)
}
lambda.rpd5c <- function(tm,tree,pars,soc){
pd = sapply(tm,phylodiversity,tree=tree,soc=soc)-tm
N = sapply(tm, n_from_time,tree=tree,soc=soc)
lambda = max(0, pars[2] + pars[3]*N + pars[4] * pd/N )
return(lambda)
}
#############################
| /R/speciation_rates.R | no_license | thijsjanzen/emphasis | R | false | false | 1,049 | r | speciation_rate <- function(tm,tree,pars,model,soc){
speciation_r = get(paste0("lambda.", model))
lambda = speciation_r(tm,tree,pars,soc=soc)
return(lambda)
}
sum_speciation_rate <- function(x,tree,pars,model,soc){
N = sapply(x, n_from_time,tree=tree,soc=soc)
speciation_r = get(paste0("lambda.", model))
lambda = speciation_r(x,tree,pars,soc=soc)
return(N*lambda)
}
# Speciations rates
lambda.rpd5 <- function(tm,tree,pars,soc){
pd = sapply(tm,phylodiversity,tree=tree,soc=soc)
N = sapply(tm, n_from_time,tree=tree,soc=soc)
lambda = max(0, pars[2] + pars[3]*N + pars[4] * (pd/N) )
return(lambda)
}
lambda.rpd1 <- function(tm,tree,pars,soc){
N = sapply(tm, n_from_time,tree=tree,soc=soc)
lambda = max(0, pars[2] + pars[3]*N )
return(lambda)
}
lambda.rpd5c <- function(tm,tree,pars,soc){
pd = sapply(tm,phylodiversity,tree=tree,soc=soc)-tm
N = sapply(tm, n_from_time,tree=tree,soc=soc)
lambda = max(0, pars[2] + pars[3]*N + pars[4] * pd/N )
return(lambda)
}
#############################
|
#' Wrapper of \code{knitr::include_graphics} to Deal with URLs and Invalid File Types
#'
#' Deals with URL paths and invalid file types passed to \code{path}
#' of \code{\link[knitr]{include_graphics}}. When the output format
#' of the R Markdown is \code{PDF}, and an URL is passed to
#' \code{path}, the figure is automatically downloaded from the URL
#' and included using the local relative path.
#' If a figure has an invalid file extension for PDF output
#' (e.g. \code{.gif}, \code{.svg}), the function passed to
#' \code{handler} is used to override the default behavior:
#' inserting figures with \code{knitr::include_graphics}.
#'
#' @details Read more about using the function at
#' \url{http://bit.ly/include_graphics2}.
#'
#' @param path String. Path to a figure to be included. Can be either
#' an URL or a local path.
#' @param alt_path String. An alternative figure path for \code{path}
#' with invalid extensions. In the case of PDF ('LaTeX') output,
#' invalid extensions are \code{.gif}, \code{.svg}.
#' @param handler Function. A function with a single argument \code{path}.
#' Used to insert alternative contents, such as a piece of text,
#' when the figure cannot be inserted.
#' @param ... Other arguments to pass to
#' \code{\link[knitr]{include_graphics}}.
#'
#' @examples
#' png_url <- 'https://commonmark.org/images/markdown-mark.png'
#' gif_url <- 'https://media.giphy.com/media/k3dcUPvxuNpK/giphy.gif'
#'
#' \dontrun{
#' include_graphics2(gif_url, alt_path = png_url)
#' }
#'
#' @export
include_graphics2 <- function(path, alt_path = NULL, handler = function(path) knitr::asis_output(paste('View', tools::file_ext(path), 'at', path)), ...) {
if (knitr::is_latex_output()) {
return(include_graphics_latex(path, alt_path, handler, ...))
} else {
return(knitr::include_graphics(path, ...))
}
}
#' Wrapper of \code{knitr::include_graphics} for PDF Output
#'
#' Deals with URL and GIFs. If an url is passed to
#' \code{path} of \code{\link[knitr]{include_graphics}},
#' the figure is automatically downloaded and included
#' using local relative path. If a figure with \code{.gif}
#' extension is included, a piece of text, rather than the
#' figure, is inserted.
#'
#' @importFrom tools file_ext file_path_sans_ext
#' @importFrom utils download.file
#' @importFrom knitr current_input
#' @keywords internal
include_graphics_latex <- function(path, alt_path = NULL, handler = function(path) knitr::asis_output(paste('View', tools::file_ext(path), 'at', path)), ...) {
# URL
if (grepl('^https?://', path)) {
ifelse(use_alt_path(path, alt_path),
path <- alt_path,
return(handler(path)))
## Download Figure
dir_path <- paste0('downloadFigs4latex_',
file_path_sans_ext(current_input()))
if (!dir.exists(dir_path)) dir.create(dir_path)
file_path <- paste0(dir_path, '/',
knitr::opts_current$get()$label, '.',
file_ext(path))
download.file(path, destfile = file_path)
path <- file_path
}
# Local files
else {
ifelse(use_alt_path(path, alt_path),
path <- alt_path,
return(handler(path)))
}
# Insert Figure
return(knitr::include_graphics(path, ...))
}
use_alt_path <- function(path, alt_path) {
# Invalid img ext & no alt provided: Don't include in File
if (inval_latex_img(path) && is.null(alt_path)) return(FALSE)
# Invalid img ext with alt provided: insert alt-figure
if (inval_latex_img(path) && !is.null(alt_path)) {
stopifnot(!inval_latex_img(alt_path))
return(TRUE)
}
}
inval_latex_img <- function(path) {
invalid_ext <- c('svg', 'SVG', 'GIF', 'gif')
return(tools::file_ext(path) %in% invalid_ext)
}
| /inst/slides/bin/include_graphics2.R | no_license | sahirbhatnagar/EPIB607 | R | false | false | 3,755 | r | #' Wrapper of \code{knitr::include_graphics} to Deal with URLs and Invalid File Types
#'
#' Deals with URL paths and invalid file types passed to \code{path}
#' of \code{\link[knitr]{include_graphics}}. When the output format
#' of the R Markdown is \code{PDF}, and an URL is passed to
#' \code{path}, the figure is automatically downloaded from the URL
#' and included using the local relative path.
#' If a figure has an invalid file extension for PDF output
#' (e.g. \code{.gif}, \code{.svg}), the function passed to
#' \code{handler} is used to override the default behavior:
#' inserting figures with \code{knitr::include_graphics}.
#'
#' @details Read more about using the function at
#' \url{http://bit.ly/include_graphics2}.
#'
#' @param path String. Path to a figure to be included. Can be either
#' an URL or a local path.
#' @param alt_path String. An alternative figure path for \code{path}
#' with invalid extensions. In the case of PDF ('LaTeX') output,
#' invalid extensions are \code{.gif}, \code{.svg}.
#' @param handler Function. A function with a single argument \code{path}.
#' Used to insert alternative contents, such as a piece of text,
#' when the figure cannot be inserted.
#' @param ... Other arguments to pass to
#' \code{\link[knitr]{include_graphics}}.
#'
#' @examples
#' png_url <- 'https://commonmark.org/images/markdown-mark.png'
#' gif_url <- 'https://media.giphy.com/media/k3dcUPvxuNpK/giphy.gif'
#'
#' \dontrun{
#' include_graphics2(gif_url, alt_path = png_url)
#' }
#'
#' @export
include_graphics2 <- function(path, alt_path = NULL, handler = function(path) knitr::asis_output(paste('View', tools::file_ext(path), 'at', path)), ...) {
if (knitr::is_latex_output()) {
return(include_graphics_latex(path, alt_path, handler, ...))
} else {
return(knitr::include_graphics(path, ...))
}
}
#' Wrapper of \code{knitr::include_graphics} for PDF Output
#'
#' Deals with URL and GIFs. If an url is passed to
#' \code{path} of \code{\link[knitr]{include_graphics}},
#' the figure is automatically downloaded and included
#' using local relative path. If a figure with \code{.gif}
#' extension is included, a piece of text, rather than the
#' figure, is inserted.
#'
#' @importFrom tools file_ext file_path_sans_ext
#' @importFrom utils download.file
#' @importFrom knitr current_input
#' @keywords internal
include_graphics_latex <- function(path, alt_path = NULL, handler = function(path) knitr::asis_output(paste('View', tools::file_ext(path), 'at', path)), ...) {
# URL
if (grepl('^https?://', path)) {
ifelse(use_alt_path(path, alt_path),
path <- alt_path,
return(handler(path)))
## Download Figure
dir_path <- paste0('downloadFigs4latex_',
file_path_sans_ext(current_input()))
if (!dir.exists(dir_path)) dir.create(dir_path)
file_path <- paste0(dir_path, '/',
knitr::opts_current$get()$label, '.',
file_ext(path))
download.file(path, destfile = file_path)
path <- file_path
}
# Local files
else {
ifelse(use_alt_path(path, alt_path),
path <- alt_path,
return(handler(path)))
}
# Insert Figure
return(knitr::include_graphics(path, ...))
}
use_alt_path <- function(path, alt_path) {
# Invalid img ext & no alt provided: Don't include in File
if (inval_latex_img(path) && is.null(alt_path)) return(FALSE)
# Invalid img ext with alt provided: insert alt-figure
if (inval_latex_img(path) && !is.null(alt_path)) {
stopifnot(!inval_latex_img(alt_path))
return(TRUE)
}
}
inval_latex_img <- function(path) {
invalid_ext <- c('svg', 'SVG', 'GIF', 'gif')
return(tools::file_ext(path) %in% invalid_ext)
}
|
## ================== 0. Paquetes ------------------------------------------------
rm(list = ls())
# options(scipen = 999) 1018426823
library(shiny);library(ggplot2);library(dplyr)
library(maptools);library(shinydashboard);library(shinythemes)
library(DT);library(leaflet);library(shinyjs)
library(data.table); library(readxl); library(leaflet.extras)
library(geosphere); library(rgdal)
library(Hmisc); library(tidyr); library(tictoc)
library(shinycustomloader)
# Pir1, salario, sectorCiiu, estrato, estadoscivil, numero_beneficarios_cuota_monetaria, segmento_grupo_familair
# ================== 1. Datos -----------------------------------------------
empresa <- readRDS("Data/geo_empresas_may.rds") %>%
mutate(id_empresa = as.character(id_empresa)) %>%
filter(estado_empresa == "al día",
!is.na(cx_empresa))
str(empresa)
persona <- readRDS("Data/geo_personas_may.rds") %>%
mutate(id_empresa = as.character(id_empresa),
Categoria = as.character(Categoria),
Segmento_poblacional = as.character(Segmento_poblacional))
str(persona)
name_piramide1 <- c("1 Emp Grandes","2 Emp Medio","3 Empresas Pymes","4 Micro")
name_piramide2 <- c("Total","1.1 Platinum","1.2 Premium","2.1 Gold","2.2 Silver","3.1 VIP","3.2 VIP Estándar","4.1 Estándar")
name_segmento <- c("Alto","Joven","Medio","Básico")
name_segmento2 <- c("Total","Alto","Joven","Medio","Básico")
name_edad <- c("Total",names(table(persona$Edad_agru)))
name_localidad <- c(unique(empresa$LOCALIDAD))
# # Capas
cundi <- readRDS("Data/poligonos-localidades/Cundinamarca.rds")
localidad <- readOGR("Data/poligonos-localidades/poligonos-localidades.shp")
# departamento <- shapefile("Data/poligonos_dpto/depto.shp")
# Infraestructura Colsubsidio
infra <- read_excel("Data/INFRAESTRUCTURA_PROPIA_COLSUBSIDIO.xlsx") %>% data.frame()
str(infra)
AGENCIA <- infra %>% filter(UES == "AGENCIA DE EMPLEO")
CSERVICIOS <- infra %>% filter(UES=="CENTROS DE SERVICIO")
EDUCACION <- infra %>% filter(UES=="EDUCACION")
MERCADEO_SOCIAL <- infra %>% filter(UES=="MERCADEO SOCIAL")
SUPERMERCADOS <- infra %>% filter(TIPO=="SUPERMERCADO")
MEDICAMENTOS <- infra %>% filter(TIPO=="DROGUERIA")
RYT <- infra %>% filter(UES=="RECREACION Y TURISMO")
SALUD <- infra %>% filter(UES=="SALUD")
VIVIENDA <- infra %>% filter(UES=="VIVIENDA")
# Infraestrutura LogColsubsidio
leafIconsAG <- icons(
iconUrl = ifelse(AGENCIA$UES == "AGENCIA DE EMPLEO",
"Data/icons/ICONOS_ACT/LogColsubsidio.png","Data/icons/ICONOS_COLSUBSIDIO/LogColsubsidio.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsCS <- icons(
iconUrl = ifelse(CSERVICIOS$UES == "CENTROS DE SERVICIO",
"Data/icons/ICONOS_ACT/Colsubsidio.png","Data/icons/ICONOS_COLSUBSIDIO/Colsubsidio2.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsED <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "EDUCACION",
"Data/icons/ICONOS_ACT/Educacion.png","Data/icons/ICONOS_ACT/Educacion.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsSP <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$TIPO == "SUPERMERCADOS",
"Data/icons/ICONOS_ACT/Supermercados.png","Data/icons/ICONOS_ACT/Supermercados.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsDR <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$TIPO == "DROGUERIA",
"Data/icons/ICONOS_ACT/Farmacias.png","Data/icons/ICONOS_ACT/Farmacias.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsRYT <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "RECREACION Y TURISMO",
"Data/icons/ICONOS_ACT/Recreacion.png","Data/icons/ICONOS_ACT/Recreacion.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsSL <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "SALUD",
"Data/icons/ICONOS_ACT/Salud.png","Data/icons/ICONOS_ACT/Salud.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsVV <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "VIVIENDA",
"Data/icons/ICONOS_ACT/Vivienda.png","Data/icons/ICONOS_ACT/Vivienda.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
| /global.R | no_license | edgaruio/App_Afiliados | R | false | false | 4,483 | r | ## ================== 0. Paquetes ------------------------------------------------
rm(list = ls())
# options(scipen = 999) 1018426823
library(shiny);library(ggplot2);library(dplyr)
library(maptools);library(shinydashboard);library(shinythemes)
library(DT);library(leaflet);library(shinyjs)
library(data.table); library(readxl); library(leaflet.extras)
library(geosphere); library(rgdal)
library(Hmisc); library(tidyr); library(tictoc)
library(shinycustomloader)
# Pir1, salario, sectorCiiu, estrato, estadoscivil, numero_beneficarios_cuota_monetaria, segmento_grupo_familair
# ================== 1. Datos -----------------------------------------------
empresa <- readRDS("Data/geo_empresas_may.rds") %>%
mutate(id_empresa = as.character(id_empresa)) %>%
filter(estado_empresa == "al día",
!is.na(cx_empresa))
str(empresa)
persona <- readRDS("Data/geo_personas_may.rds") %>%
mutate(id_empresa = as.character(id_empresa),
Categoria = as.character(Categoria),
Segmento_poblacional = as.character(Segmento_poblacional))
str(persona)
name_piramide1 <- c("1 Emp Grandes","2 Emp Medio","3 Empresas Pymes","4 Micro")
name_piramide2 <- c("Total","1.1 Platinum","1.2 Premium","2.1 Gold","2.2 Silver","3.1 VIP","3.2 VIP Estándar","4.1 Estándar")
name_segmento <- c("Alto","Joven","Medio","Básico")
name_segmento2 <- c("Total","Alto","Joven","Medio","Básico")
name_edad <- c("Total",names(table(persona$Edad_agru)))
name_localidad <- c(unique(empresa$LOCALIDAD))
# # Capas
cundi <- readRDS("Data/poligonos-localidades/Cundinamarca.rds")
localidad <- readOGR("Data/poligonos-localidades/poligonos-localidades.shp")
# departamento <- shapefile("Data/poligonos_dpto/depto.shp")
# Infraestructura Colsubsidio
infra <- read_excel("Data/INFRAESTRUCTURA_PROPIA_COLSUBSIDIO.xlsx") %>% data.frame()
str(infra)
AGENCIA <- infra %>% filter(UES == "AGENCIA DE EMPLEO")
CSERVICIOS <- infra %>% filter(UES=="CENTROS DE SERVICIO")
EDUCACION <- infra %>% filter(UES=="EDUCACION")
MERCADEO_SOCIAL <- infra %>% filter(UES=="MERCADEO SOCIAL")
SUPERMERCADOS <- infra %>% filter(TIPO=="SUPERMERCADO")
MEDICAMENTOS <- infra %>% filter(TIPO=="DROGUERIA")
RYT <- infra %>% filter(UES=="RECREACION Y TURISMO")
SALUD <- infra %>% filter(UES=="SALUD")
VIVIENDA <- infra %>% filter(UES=="VIVIENDA")
# Infraestrutura LogColsubsidio
leafIconsAG <- icons(
iconUrl = ifelse(AGENCIA$UES == "AGENCIA DE EMPLEO",
"Data/icons/ICONOS_ACT/LogColsubsidio.png","Data/icons/ICONOS_COLSUBSIDIO/LogColsubsidio.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsCS <- icons(
iconUrl = ifelse(CSERVICIOS$UES == "CENTROS DE SERVICIO",
"Data/icons/ICONOS_ACT/Colsubsidio.png","Data/icons/ICONOS_COLSUBSIDIO/Colsubsidio2.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsED <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "EDUCACION",
"Data/icons/ICONOS_ACT/Educacion.png","Data/icons/ICONOS_ACT/Educacion.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsSP <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$TIPO == "SUPERMERCADOS",
"Data/icons/ICONOS_ACT/Supermercados.png","Data/icons/ICONOS_ACT/Supermercados.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsDR <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$TIPO == "DROGUERIA",
"Data/icons/ICONOS_ACT/Farmacias.png","Data/icons/ICONOS_ACT/Farmacias.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsRYT <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "RECREACION Y TURISMO",
"Data/icons/ICONOS_ACT/Recreacion.png","Data/icons/ICONOS_ACT/Recreacion.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsSL <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "SALUD",
"Data/icons/ICONOS_ACT/Salud.png","Data/icons/ICONOS_ACT/Salud.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
leafIconsVV <- icons(
iconUrl = ifelse(MERCADEO_SOCIAL$UES == "VIVIENDA",
"Data/icons/ICONOS_ACT/Vivienda.png","Data/icons/ICONOS_ACT/Vivienda.png"),
iconWidth = 28, iconHeight = 45,
iconAnchorX = 16, iconAnchorY = 40)
|
guttmanErrors <- function(data){
data <- checkData(data)
maxx <- max(data)
minx <- min(data)
N <- nrow(data)
J <- ncol(data)
Y <- matrix(t(data),1,N*J)
Z <- matrix(rep(Y,maxx),maxx,N*J,T)
Z <- ifelse(Z < row(Z),0,1)
Z <- matrix(as.vector(Z),N,maxx*J,T)
# COMPUTE WEIGHTS
if (maxx == 1) tmp.1 <- matrix(apply(data,2,tabulate, maxx), nrow=1) else tmp.1 <- apply(data,2,tabulate, maxx)
tmp.2 <- apply(tmp.1,2,function(x) rev(cumsum(rev(x))))+runif(J*maxx,0,1e-3)
# runif is added to avoid equal ranks
tmp.3 <- matrix(rank(-tmp.2),1,maxx*J)
# tmp.3 is a vector with the order of the ISRFs
Z <- Z[,order(tmp.3)]
w <- apply(Z,1,function(x){sum(x*cumsum(abs(x-1)))})
return(w)
} | /GetR/R/guttmanErrors.R | no_license | ingted/R-Examples | R | false | false | 738 | r | guttmanErrors <- function(data){
data <- checkData(data)
maxx <- max(data)
minx <- min(data)
N <- nrow(data)
J <- ncol(data)
Y <- matrix(t(data),1,N*J)
Z <- matrix(rep(Y,maxx),maxx,N*J,T)
Z <- ifelse(Z < row(Z),0,1)
Z <- matrix(as.vector(Z),N,maxx*J,T)
# COMPUTE WEIGHTS
if (maxx == 1) tmp.1 <- matrix(apply(data,2,tabulate, maxx), nrow=1) else tmp.1 <- apply(data,2,tabulate, maxx)
tmp.2 <- apply(tmp.1,2,function(x) rev(cumsum(rev(x))))+runif(J*maxx,0,1e-3)
# runif is added to avoid equal ranks
tmp.3 <- matrix(rank(-tmp.2),1,maxx*J)
# tmp.3 is a vector with the order of the ISRFs
Z <- Z[,order(tmp.3)]
w <- apply(Z,1,function(x){sum(x*cumsum(abs(x-1)))})
return(w)
} |
#### load relevant packages ####
# library("sgejobs")
# sgejobs::job_single(
# "raj",
# create_shell = TRUE,
# queue = "bluejay",
# memory = "20G",
# command = "Rscript 02_raj.R"
# )
library("readxl")
library("spatialLIBD")
library("dplyr")
library("sessioninfo")
library("here")
library("scran")
library("purrr")
# Table S2: split by "Trait". There's 3 of them.
# Statistics available: FDR
# Table S3:
# Direction available: None
# Statistics available: Both FDR (P-value_Benjamini-Hochberg) and Bonferroni (P-value_BonferroniAdjusted) adjusted.
# Could subset each to < 0.05 (so 2 sets total).
### load get_ensemble function
source(here("code/14_external_gene_sets/get_ensembl_function.R"))
#### load modeling results ####
load(here(
"processed-data", "11_grey_matter_only", "wholegenome",
"Visium_SPG_AD_modeling_results.Rdata"
))
#### read in external gene sets ####
table_s2 <- read_excel(here("raw-data", "GeneSets", "1_Bulk_RNA-seq", "Raj et al", "Table S2.xlsx"))
head(table_s2)
# intronic_cluster_id cluster chr start end gene_id Beta SE `Z-score` `P-value` FDR Trait
# <chr> <chr> <dbl> <dbl> <dbl> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
# 1 10_3146136_3147307_clu_8247 clu_8247 10 3146136 3147307 PFKP -0.0793 0.0160 -4.97 0.000000967 0.0158 NEURITIC PLAQUES
# 2 10_3146980_3147307_clu_8247 clu_8247 10 3146980 3147307 PFKP 0.119 0.0231 5.14 0.000000416 0.0134 NEURITIC PLAQUES
# 3 10_3147351_3147585_clu_8247 clu_8247 10 3147351 3147585 PFKP 0.0898 0.0175 5.13 0.000000430 0.0134 NEURITIC PLAQUES
unique(table_s2$Trait)
# "NEURITIC PLAQUES", "AMYLOID", "Tangles"
table_s2 <- table_s2 |> dplyr::filter(FDR < 0.1)
table_s2_np <- table_s2 |> dplyr::filter(Trait == "NEURITIC PLAQUES")
table_s2_am <- table_s2 |> dplyr::filter(Trait == "AMYLOID")
genes_s2_am <- get_ensembl(table_s2_am, gene_id, "gene_id")
genes_s2_am <- unique(genes_s2_am, na.rm = TRUE)
table_s2_ta <- table_s2 |> dplyr::filter(Trait == "Tangles")
genes_s2_ta <- get_ensembl(table_s2_ta, gene_id, "gene_id")
genes_s2_ta <- unique(genes_s2_ta, na.rm = TRUE)
# gene_id
# 1 AC015936
# 2 AC018730
# 5 AC068057
# 8 AC174470
# 9 ATP5J
# 10 C19orf26
# 11 FAM73B
# 12 FLJ27365
# 15 KIAA1211L
# 16 MRP63
# 17 PIDD
# 18 RP11-123K3
# 19 RP11-463D19
# 20 RP11-637O19
# 21 RP11-85M11
# 23 RP6-109B7
# 24 SARS
# 25 SUZ12P
nrow(table_s2)
# [1] 234
# > nrow(table_s2_np)
# [1] 2
# > nrow(table_s2_am)
# [1] 65
# > nrow(table_s2_ta)
# [1] 167
# load table 3
table_s3 <- read_excel(here("raw-data", "GeneSets", "1_Bulk_RNA-seq", "Raj et al", "Table S3.xlsx"))
# intronic_cluster gene_id `P-value` `P-value_Benjamini-Hochberg` `P-value_BonferroniAdjusted`
# <chr> <chr> <dbl> <dbl> <dbl>
# 1 chr10:clu_8247 PFKP 1.79e-28 4.95e-24 4.97e-24
# 2 chr14:clu_18324 NDRG2 2.03e-23 2.81e-19 5.62e-19
# 3 chr19:clu_21882 CTD-2527I21.4 2.74e-20 2.53e-16 7.59e-16
# 4 chr7:clu_28844 BCL7B 6.95e-19 4.81e-15 1.92e-14
table_s3 <- table_s3 |> dplyr::filter(`P-value_BonferroniAdjusted` < 0.1)
# > nrow(table_s3)
# [1] 99
table_s3 <- get_ensembl(table_s3, gene_id, "gene_id")
# > nrow(table_s3 |> dplyr::filter(is.na(gene_ensembl_id)))
# [1] 4
raj_geneList <- list(
raj_table_2_am = genes_s2_am,
raj_table_2_ta = genes_s2_ta,
raj_table_3 = table_s3
)
#### calculate enrichment #####
raj_enrichment <- gene_set_enrichment(
raj_geneList,
fdr_cut = 0.1,
modeling_results = modeling_results,
model_type = "enrichment"
)
raj_depleted <- gene_set_enrichment(
raj_geneList,
fdr_cut = 0.1,
modeling_results = modeling_results,
model_type = "enrichment", reverse = TRUE
)
##### Enrichment plotting #####
# dir.create(here("plots", "14_external_gene_sets"))
output_dir <- here("plots", "14_external_gene_sets")
pdf(paste0(output_dir, "/02_raj_enriched.pdf"), width = 11)
gene_set_enrichment_plot(
raj_enrichment,
xlabs = unique(raj_enrichment$ID),
PThresh = 12,
ORcut = 1.30103,
enrichOnly = FALSE,
layerHeights = c(0, seq_len(length(unique(raj_enrichment$test)))) * 15,
mypal = c("white", (grDevices::colorRampPalette(RColorBrewer::brewer.pal(
9,
"YlOrRd"
)))(50)),
cex = 1.2
)
dev.off()
pdf(paste0(output_dir, "/02_raj_depleted.pdf"), width = 11)
gene_set_enrichment_plot(
raj_depleted,
xlabs = unique(raj_depleted$ID),
PThresh = 12,
ORcut = 1.30103,
enrichOnly = FALSE,
layerHeights = c(0, seq_len(length(unique(raj_depleted$test)))) * 15,
mypal = c("white", (grDevices::colorRampPalette(RColorBrewer::brewer.pal(
9,
"YlOrRd"
)))(50)),
cex = 1.2
)
dev.off()
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
| /code/14_external_gene_sets/02_raj.R | no_license | LieberInstitute/Visium_SPG_AD | R | false | false | 5,228 | r | #### load relevant packages ####
# library("sgejobs")
# sgejobs::job_single(
# "raj",
# create_shell = TRUE,
# queue = "bluejay",
# memory = "20G",
# command = "Rscript 02_raj.R"
# )
library("readxl")
library("spatialLIBD")
library("dplyr")
library("sessioninfo")
library("here")
library("scran")
library("purrr")
# Table S2: split by "Trait". There's 3 of them.
# Statistics available: FDR
# Table S3:
# Direction available: None
# Statistics available: Both FDR (P-value_Benjamini-Hochberg) and Bonferroni (P-value_BonferroniAdjusted) adjusted.
# Could subset each to < 0.05 (so 2 sets total).
### load get_ensemble function
source(here("code/14_external_gene_sets/get_ensembl_function.R"))
#### load modeling results ####
load(here(
"processed-data", "11_grey_matter_only", "wholegenome",
"Visium_SPG_AD_modeling_results.Rdata"
))
#### read in external gene sets ####
table_s2 <- read_excel(here("raw-data", "GeneSets", "1_Bulk_RNA-seq", "Raj et al", "Table S2.xlsx"))
head(table_s2)
# intronic_cluster_id cluster chr start end gene_id Beta SE `Z-score` `P-value` FDR Trait
# <chr> <chr> <dbl> <dbl> <dbl> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <chr>
# 1 10_3146136_3147307_clu_8247 clu_8247 10 3146136 3147307 PFKP -0.0793 0.0160 -4.97 0.000000967 0.0158 NEURITIC PLAQUES
# 2 10_3146980_3147307_clu_8247 clu_8247 10 3146980 3147307 PFKP 0.119 0.0231 5.14 0.000000416 0.0134 NEURITIC PLAQUES
# 3 10_3147351_3147585_clu_8247 clu_8247 10 3147351 3147585 PFKP 0.0898 0.0175 5.13 0.000000430 0.0134 NEURITIC PLAQUES
unique(table_s2$Trait)
# "NEURITIC PLAQUES", "AMYLOID", "Tangles"
table_s2 <- table_s2 |> dplyr::filter(FDR < 0.1)
table_s2_np <- table_s2 |> dplyr::filter(Trait == "NEURITIC PLAQUES")
table_s2_am <- table_s2 |> dplyr::filter(Trait == "AMYLOID")
genes_s2_am <- get_ensembl(table_s2_am, gene_id, "gene_id")
genes_s2_am <- unique(genes_s2_am, na.rm = TRUE)
table_s2_ta <- table_s2 |> dplyr::filter(Trait == "Tangles")
genes_s2_ta <- get_ensembl(table_s2_ta, gene_id, "gene_id")
genes_s2_ta <- unique(genes_s2_ta, na.rm = TRUE)
# gene_id
# 1 AC015936
# 2 AC018730
# 5 AC068057
# 8 AC174470
# 9 ATP5J
# 10 C19orf26
# 11 FAM73B
# 12 FLJ27365
# 15 KIAA1211L
# 16 MRP63
# 17 PIDD
# 18 RP11-123K3
# 19 RP11-463D19
# 20 RP11-637O19
# 21 RP11-85M11
# 23 RP6-109B7
# 24 SARS
# 25 SUZ12P
nrow(table_s2)
# [1] 234
# > nrow(table_s2_np)
# [1] 2
# > nrow(table_s2_am)
# [1] 65
# > nrow(table_s2_ta)
# [1] 167
# load table 3
table_s3 <- read_excel(here("raw-data", "GeneSets", "1_Bulk_RNA-seq", "Raj et al", "Table S3.xlsx"))
# intronic_cluster gene_id `P-value` `P-value_Benjamini-Hochberg` `P-value_BonferroniAdjusted`
# <chr> <chr> <dbl> <dbl> <dbl>
# 1 chr10:clu_8247 PFKP 1.79e-28 4.95e-24 4.97e-24
# 2 chr14:clu_18324 NDRG2 2.03e-23 2.81e-19 5.62e-19
# 3 chr19:clu_21882 CTD-2527I21.4 2.74e-20 2.53e-16 7.59e-16
# 4 chr7:clu_28844 BCL7B 6.95e-19 4.81e-15 1.92e-14
table_s3 <- table_s3 |> dplyr::filter(`P-value_BonferroniAdjusted` < 0.1)
# > nrow(table_s3)
# [1] 99
table_s3 <- get_ensembl(table_s3, gene_id, "gene_id")
# > nrow(table_s3 |> dplyr::filter(is.na(gene_ensembl_id)))
# [1] 4
raj_geneList <- list(
raj_table_2_am = genes_s2_am,
raj_table_2_ta = genes_s2_ta,
raj_table_3 = table_s3
)
#### calculate enrichment #####
raj_enrichment <- gene_set_enrichment(
raj_geneList,
fdr_cut = 0.1,
modeling_results = modeling_results,
model_type = "enrichment"
)
raj_depleted <- gene_set_enrichment(
raj_geneList,
fdr_cut = 0.1,
modeling_results = modeling_results,
model_type = "enrichment", reverse = TRUE
)
##### Enrichment plotting #####
# dir.create(here("plots", "14_external_gene_sets"))
output_dir <- here("plots", "14_external_gene_sets")
pdf(paste0(output_dir, "/02_raj_enriched.pdf"), width = 11)
gene_set_enrichment_plot(
raj_enrichment,
xlabs = unique(raj_enrichment$ID),
PThresh = 12,
ORcut = 1.30103,
enrichOnly = FALSE,
layerHeights = c(0, seq_len(length(unique(raj_enrichment$test)))) * 15,
mypal = c("white", (grDevices::colorRampPalette(RColorBrewer::brewer.pal(
9,
"YlOrRd"
)))(50)),
cex = 1.2
)
dev.off()
pdf(paste0(output_dir, "/02_raj_depleted.pdf"), width = 11)
gene_set_enrichment_plot(
raj_depleted,
xlabs = unique(raj_depleted$ID),
PThresh = 12,
ORcut = 1.30103,
enrichOnly = FALSE,
layerHeights = c(0, seq_len(length(unique(raj_depleted$test)))) * 15,
mypal = c("white", (grDevices::colorRampPalette(RColorBrewer::brewer.pal(
9,
"YlOrRd"
)))(50)),
cex = 1.2
)
dev.off()
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
|
#############################
# < Yi Qu >
# STAT W4240
# Homework <HW 02> , Problem <1>
# < Homework Due Dat : Sep 30 >
#############################
#################
# Setup
#################
setwd("C:/Users/yi/Desktop/W4240/hw02")
#################
# Problem 1a
#################
#----- START YOUR CODE BLOCK HERE -----#
tb1<-read.csv("hw02_q1_p1_fall14.csv",header=T)
fix(tb1)
dim(tb1)
rowmeans<-apply(tb1,1,mean)
columnmeans<-apply(tb1,2,mean)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1b
#################
#----- START YOUR CODE BLOCK HERE -----#
center.tb1<-scale(tb1,scale=FALSE)
center.tb1[1:5,]
#equal to tb1-columnmeans
#center.tb1 <- apply(tb1,2,function(y)y-mean(y))
cov.matrix<-cov(center.tb1)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1c
#################
#----- START YOUR CODE BLOCK HERE -----#
eigen(cov.matrix)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1d
#################
#----- START YOUR CODE BLOCK HERE -----#
pc = princomp(tb1,scores=T)
names(pc)
pc$loadings[1:5,]
pc$scores[1:5,]
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1e
#################
#----- START YOUR CODE BLOCK HERE -----#
names(pc)
x<-c('Comp1','Comp2','Comp3', 'Comp4', 'Comp5')
plot(factor(x),pc$sdev^2/sum(pc$sdev^2),
ylab="Proportion of Variance")
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1f
#################
#----- START YOUR CODE BLOCK HERE -----#
tb2<-read.csv("hw02_q1_p2_fall14.csv",header=T)
pc2 = princomp(tb2,scores=T)
tb2.scores <- pc2$scores
tb2.scores
tb2.loadings <-pc2$loadings
fix(tb2.loadings)
tb2.columnmeans<-apply(tb2,2,mean)
center.tb2 <- scale(tb2,scale=FALSE)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1g
#################
#----- START YOUR CODE BLOCK HERE -----#
#what is the first two scores, columns? yes!
two.center.tb2 <- tb2.scores[,1:2] %*% t(pc2$loadings[,1:2])
two.center.tb2[1:5,]
center.tb2
error <- center.tb2-two.center.tb2
for(i in 1:5)
Euclidean[i]<-sqrt(sum((error[i,])^2))
Euclidean
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1h
#################
#----- START YOUR CODE BLOCK HERE -----#
# what is direction for error??
error %*% t(two.center.tb2)
# orthogonality
#----- END YOUR CODE BLOCK HERE -----#
| /hw02/hw02_q1.R | no_license | quyiwode/Data-Mining-in-R | R | false | false | 2,325 | r | #############################
# < Yi Qu >
# STAT W4240
# Homework <HW 02> , Problem <1>
# < Homework Due Dat : Sep 30 >
#############################
#################
# Setup
#################
setwd("C:/Users/yi/Desktop/W4240/hw02")
#################
# Problem 1a
#################
#----- START YOUR CODE BLOCK HERE -----#
tb1<-read.csv("hw02_q1_p1_fall14.csv",header=T)
fix(tb1)
dim(tb1)
rowmeans<-apply(tb1,1,mean)
columnmeans<-apply(tb1,2,mean)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1b
#################
#----- START YOUR CODE BLOCK HERE -----#
center.tb1<-scale(tb1,scale=FALSE)
center.tb1[1:5,]
#equal to tb1-columnmeans
#center.tb1 <- apply(tb1,2,function(y)y-mean(y))
cov.matrix<-cov(center.tb1)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1c
#################
#----- START YOUR CODE BLOCK HERE -----#
eigen(cov.matrix)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1d
#################
#----- START YOUR CODE BLOCK HERE -----#
pc = princomp(tb1,scores=T)
names(pc)
pc$loadings[1:5,]
pc$scores[1:5,]
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1e
#################
#----- START YOUR CODE BLOCK HERE -----#
names(pc)
x<-c('Comp1','Comp2','Comp3', 'Comp4', 'Comp5')
plot(factor(x),pc$sdev^2/sum(pc$sdev^2),
ylab="Proportion of Variance")
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1f
#################
#----- START YOUR CODE BLOCK HERE -----#
tb2<-read.csv("hw02_q1_p2_fall14.csv",header=T)
pc2 = princomp(tb2,scores=T)
tb2.scores <- pc2$scores
tb2.scores
tb2.loadings <-pc2$loadings
fix(tb2.loadings)
tb2.columnmeans<-apply(tb2,2,mean)
center.tb2 <- scale(tb2,scale=FALSE)
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1g
#################
#----- START YOUR CODE BLOCK HERE -----#
#what is the first two scores, columns? yes!
two.center.tb2 <- tb2.scores[,1:2] %*% t(pc2$loadings[,1:2])
two.center.tb2[1:5,]
center.tb2
error <- center.tb2-two.center.tb2
for(i in 1:5)
Euclidean[i]<-sqrt(sum((error[i,])^2))
Euclidean
#----- END YOUR CODE BLOCK HERE -----#
#################
# Problem 1h
#################
#----- START YOUR CODE BLOCK HERE -----#
# what is direction for error??
error %*% t(two.center.tb2)
# orthogonality
#----- END YOUR CODE BLOCK HERE -----#
|
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(tidyr, warn.conflicts = F, quietly = T)
library(bin2mi, warn.conflicts = F, quietly = T)
pc <- 0.9
m2 <<- 0.1
pt <- pc - m2
n_obs <- 100
do_rate <- 0.1
num_n_mi <- 2
num_m_mi <- 100
set_n <- 4
mp_y1 <- 0.107
mu_k <- 1.078
sd_k <- 0.12
x1 <- parallel::mclapply(X = 1:10100,
mc.cores = 20,
FUN= function(x)
{
set.seed(13000*set_n + x)
###################################################
# fully obnserved data generation and analysis #
###################################################
dtfull <- dt_p2(n = n_obs, pc = pc, pt = pt)
full_mle <- p2_mle(dtfull, m2 = m2)
full_ci <-
bind_rows(p2_full_ci(full_mle, "wald", 0.05),
p2_full_ci(full_mle, "fm", 0.05),
p2_full_ci(full_mle, "wn", 0.05))%>%
dplyr::mutate(sim_id = x,
set_n = set_n)%>%
dplyr::rename(n_obs = c_n_obs)
########################################
# missing data generation and analysis #
########################################
#impose MNAR
dtmiss <-
dtfull%>%
tidyr::nest(-trt)%>%
dplyr::mutate(mp_y1_val = mp_y1,
mp_y0_val = ifelse(trt == 'c',
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pc),
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pt)))%>%
dplyr::mutate(data_m = purrr::pmap(list(data, mp_y1_val, mp_y0_val),
.f = function(x, mp_y1_val, mp_y0_val){
x%>%
dplyr::filter(y == 1)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y1_val))%>%
dplyr::bind_rows(
x%>%
dplyr::filter(y == 0)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y0_val)))%>%
dplyr::mutate(
y.m = ifelse(r==1 ,NA_integer_ ,y)
)
}))%>%
dplyr::select(-c(data, mp_y1_val, mp_y0_val))%>%
tidyr::unnest()
#check do rate
do_check <- dtmiss%>%
dplyr::group_by(trt)%>%
dplyr::summarise(do = mean(r))%>%
dplyr::mutate(set_n = set_n)
del_seq <- 0
#if do rate is zero for one of the groups, delete observations again
while(sum(dtmiss$r[dtmiss$trt=='c']) == 0 || sum(dtmiss$r[dtmiss$trt=='t']) == 0){
del_seq <- del_seq + 1
set.seed(1234*del_seq)
dtmiss <-
dtfull%>%
tidyr::nest(-trt)%>%
dplyr::mutate(mp_y1_val = mp_y1,
mp_y0_val = ifelse(trt == 'c',
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pc),
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pt)))%>%
dplyr::mutate(data_m = purrr::pmap(list(data, mp_y1_val, mp_y0_val),
.f = function(x, mp_y1_val, mp_y0_val){
x%>%
dplyr::filter(y == 1)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y1_val))%>%
dplyr::bind_rows(
x%>%
dplyr::filter(y == 0)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y0_val)))%>%
dplyr::mutate(
y.m = ifelse(r==1 ,NA_integer_ ,y)
)
}))%>%
dplyr::select(-c(data, mp_y1_val, mp_y0_val))%>%
tidyr::unnest()
}
check_ymean <- dtmiss%>%
filter(r==0)%>%
group_by(trt)%>%
summarise(ymean = mean(y.m))%>%
dplyr::mutate(check=ymean>0&ymean<1)%>%
dplyr::pull(check)
if (check_ymean%>%all()){
#MI
dt_mi <-
dtmiss%>%
split(.$trt)%>%
purrr::map_df(mi, n_mi = num_n_mi, m_mi = num_m_mi, mu_k = mu_k, sd_k = sd_k, .id='trt')
#calculate estimate for difference in prorpotions and its variance terms
dt_mi_est <- p2d_mi(dt_mi, m2 = m2)
#combine MI results and calculate CIs using different methods
wald_mi <-
dt_mi_est%>%
mi_comb(level=2, phat = 'phat_d', var_phat = 'var_d')%>%
dplyr::mutate(method = "wald",
lower_bound = qbar - qt(0.975, v)*sqrt(t),
upper_bound = qbar + qt(0.975, v)*sqrt(t))
fm_mi <-
dt_mi_est%>%
mi_comb(level=2, phat = 'phat_d', var_phat = 'var_dr')%>%
dplyr::mutate(method = "fm",
lower_bound = qbar - qt(0.975, v)*sqrt(t),
upper_bound = qbar + qt(0.975, v)*sqrt(t))
wn_plug_pc <-
dt_mi_est%>%
mi_comb(level=2, phat = 'c_phat', var_phat = 'c_phat_var')%>%
dplyr::mutate(method = "wn-plug for pc",
lower_bound = pmap_dbl(list(phat = qbar), lb_wn, z = qnorm(0.975), n_obs = n_obs),
upper_bound = pmap_dbl(list(phat = qbar), ub_wn, z = qnorm(0.975), n_obs = n_obs))
wn_plug_pt <-
dt_mi_est%>%
mi_comb(level=2, phat = 't_phat', var_phat = 't_phat_var')%>%
dplyr::mutate(method = "wn-plug for pt",
lower_bound = pmap_dbl(list(phat = qbar), lb_wn, z = qnorm(0.975), n_obs = n_obs),
upper_bound = pmap_dbl(list(phat = qbar), ub_wn, z = qnorm(0.975), n_obs = n_obs))
wn_plug <-
tibble(method = "wn-plug",
pc = wn_plug_pc$qbar, lb_pc = wn_plug_pc$lower_bound, ub_pc = wn_plug_pc$upper_bound,
pt = wn_plug_pt$qbar, lb_pt = wn_plug_pt$lower_bound, ub_pt = wn_plug_pt$upper_bound,
qbar = wn_plug_pc$qbar - wn_plug_pt$qbar)%>%
dplyr::mutate(lower_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), lb_wn_p2),
upper_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), ub_wn_p2))%>%
dplyr::select(method, qbar, lower_bound, upper_bound)
wn_mi_pc <-
dt_mi_est%>%
mi_comb(level=2, phat = 'c_phat', var_phat = 'c_phat_var')%>%
dplyr::mutate(method = "wn-mi for pc",
lower_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), lb_wn_ign, n_obs = n_obs),
upper_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), ub_wn_ign, n_obs = n_obs))
wn_mi_pt <-
dt_mi_est%>%
mi_comb(level=2, phat = 't_phat', var_phat = 't_phat_var')%>%
dplyr::mutate(method = "wn-mi for pt",
lower_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), lb_wn_ign, n_obs = n_obs),
upper_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), ub_wn_ign, n_obs = n_obs))
wn_mi <-
tibble(method = "wn-mi",
pc = wn_mi_pc$qbar, lb_pc = wn_mi_pc$lower_bound, ub_pc = wn_mi_pc$upper_bound,
pt = wn_mi_pt$qbar, lb_pt = wn_mi_pt$lower_bound, ub_pt = wn_mi_pt$upper_bound,
qbar = wn_mi_pc$qbar - wn_mi_pt$qbar)%>%
dplyr::mutate(lower_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), lb_wn_p2),
upper_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), ub_wn_p2))%>%
dplyr::select(method, qbar, lower_bound, upper_bound)
mi_all <-
bind_rows(wald_mi, fm_mi, wn_plug, wn_mi,
wn_plug_pc, wn_plug_pt, wn_mi_pc, wn_mi_pt)%>%
dplyr::mutate(sim_id = x,
set_n = set_n,
mu_k = mu_k,
sd_k = sd_k)
out <- list(full_ci, do_check, mi_all)%>%
purrr::set_names(c("full_ci", "do_check", "mi_all"))
}
else{
out <- list('simulation was not completed due to phat = 1 in one of the arms')%>%
purrr::set_names('err')
}
return(out)
})
saveRDS(x1, sprintf("results/p2_mnar_set_n%s.rds",
set_n))
| /pgms_slurm/p2_mnar/p2_mnar_set_n4.R | no_license | yuliasidi/wilson_newcombe | R | false | false | 8,010 | r | library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(tidyr, warn.conflicts = F, quietly = T)
library(bin2mi, warn.conflicts = F, quietly = T)
pc <- 0.9
m2 <<- 0.1
pt <- pc - m2
n_obs <- 100
do_rate <- 0.1
num_n_mi <- 2
num_m_mi <- 100
set_n <- 4
mp_y1 <- 0.107
mu_k <- 1.078
sd_k <- 0.12
x1 <- parallel::mclapply(X = 1:10100,
mc.cores = 20,
FUN= function(x)
{
set.seed(13000*set_n + x)
###################################################
# fully obnserved data generation and analysis #
###################################################
dtfull <- dt_p2(n = n_obs, pc = pc, pt = pt)
full_mle <- p2_mle(dtfull, m2 = m2)
full_ci <-
bind_rows(p2_full_ci(full_mle, "wald", 0.05),
p2_full_ci(full_mle, "fm", 0.05),
p2_full_ci(full_mle, "wn", 0.05))%>%
dplyr::mutate(sim_id = x,
set_n = set_n)%>%
dplyr::rename(n_obs = c_n_obs)
########################################
# missing data generation and analysis #
########################################
#impose MNAR
dtmiss <-
dtfull%>%
tidyr::nest(-trt)%>%
dplyr::mutate(mp_y1_val = mp_y1,
mp_y0_val = ifelse(trt == 'c',
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pc),
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pt)))%>%
dplyr::mutate(data_m = purrr::pmap(list(data, mp_y1_val, mp_y0_val),
.f = function(x, mp_y1_val, mp_y0_val){
x%>%
dplyr::filter(y == 1)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y1_val))%>%
dplyr::bind_rows(
x%>%
dplyr::filter(y == 0)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y0_val)))%>%
dplyr::mutate(
y.m = ifelse(r==1 ,NA_integer_ ,y)
)
}))%>%
dplyr::select(-c(data, mp_y1_val, mp_y0_val))%>%
tidyr::unnest()
#check do rate
do_check <- dtmiss%>%
dplyr::group_by(trt)%>%
dplyr::summarise(do = mean(r))%>%
dplyr::mutate(set_n = set_n)
del_seq <- 0
#if do rate is zero for one of the groups, delete observations again
while(sum(dtmiss$r[dtmiss$trt=='c']) == 0 || sum(dtmiss$r[dtmiss$trt=='t']) == 0){
del_seq <- del_seq + 1
set.seed(1234*del_seq)
dtmiss <-
dtfull%>%
tidyr::nest(-trt)%>%
dplyr::mutate(mp_y1_val = mp_y1,
mp_y0_val = ifelse(trt == 'c',
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pc),
mp_y0(do_tar = do_rate, mp_y1 = mp_y1, p_y1 = pt)))%>%
dplyr::mutate(data_m = purrr::pmap(list(data, mp_y1_val, mp_y0_val),
.f = function(x, mp_y1_val, mp_y0_val){
x%>%
dplyr::filter(y == 1)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y1_val))%>%
dplyr::bind_rows(
x%>%
dplyr::filter(y == 0)%>%
dplyr::mutate(r = rbinom(n(), 1, mp_y0_val)))%>%
dplyr::mutate(
y.m = ifelse(r==1 ,NA_integer_ ,y)
)
}))%>%
dplyr::select(-c(data, mp_y1_val, mp_y0_val))%>%
tidyr::unnest()
}
check_ymean <- dtmiss%>%
filter(r==0)%>%
group_by(trt)%>%
summarise(ymean = mean(y.m))%>%
dplyr::mutate(check=ymean>0&ymean<1)%>%
dplyr::pull(check)
if (check_ymean%>%all()){
#MI
dt_mi <-
dtmiss%>%
split(.$trt)%>%
purrr::map_df(mi, n_mi = num_n_mi, m_mi = num_m_mi, mu_k = mu_k, sd_k = sd_k, .id='trt')
#calculate estimate for difference in prorpotions and its variance terms
dt_mi_est <- p2d_mi(dt_mi, m2 = m2)
#combine MI results and calculate CIs using different methods
wald_mi <-
dt_mi_est%>%
mi_comb(level=2, phat = 'phat_d', var_phat = 'var_d')%>%
dplyr::mutate(method = "wald",
lower_bound = qbar - qt(0.975, v)*sqrt(t),
upper_bound = qbar + qt(0.975, v)*sqrt(t))
fm_mi <-
dt_mi_est%>%
mi_comb(level=2, phat = 'phat_d', var_phat = 'var_dr')%>%
dplyr::mutate(method = "fm",
lower_bound = qbar - qt(0.975, v)*sqrt(t),
upper_bound = qbar + qt(0.975, v)*sqrt(t))
wn_plug_pc <-
dt_mi_est%>%
mi_comb(level=2, phat = 'c_phat', var_phat = 'c_phat_var')%>%
dplyr::mutate(method = "wn-plug for pc",
lower_bound = pmap_dbl(list(phat = qbar), lb_wn, z = qnorm(0.975), n_obs = n_obs),
upper_bound = pmap_dbl(list(phat = qbar), ub_wn, z = qnorm(0.975), n_obs = n_obs))
wn_plug_pt <-
dt_mi_est%>%
mi_comb(level=2, phat = 't_phat', var_phat = 't_phat_var')%>%
dplyr::mutate(method = "wn-plug for pt",
lower_bound = pmap_dbl(list(phat = qbar), lb_wn, z = qnorm(0.975), n_obs = n_obs),
upper_bound = pmap_dbl(list(phat = qbar), ub_wn, z = qnorm(0.975), n_obs = n_obs))
wn_plug <-
tibble(method = "wn-plug",
pc = wn_plug_pc$qbar, lb_pc = wn_plug_pc$lower_bound, ub_pc = wn_plug_pc$upper_bound,
pt = wn_plug_pt$qbar, lb_pt = wn_plug_pt$lower_bound, ub_pt = wn_plug_pt$upper_bound,
qbar = wn_plug_pc$qbar - wn_plug_pt$qbar)%>%
dplyr::mutate(lower_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), lb_wn_p2),
upper_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), ub_wn_p2))%>%
dplyr::select(method, qbar, lower_bound, upper_bound)
wn_mi_pc <-
dt_mi_est%>%
mi_comb(level=2, phat = 'c_phat', var_phat = 'c_phat_var')%>%
dplyr::mutate(method = "wn-mi for pc",
lower_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), lb_wn_ign, n_obs = n_obs),
upper_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), ub_wn_ign, n_obs = n_obs))
wn_mi_pt <-
dt_mi_est%>%
mi_comb(level=2, phat = 't_phat', var_phat = 't_phat_var')%>%
dplyr::mutate(method = "wn-mi for pt",
lower_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), lb_wn_ign, n_obs = n_obs),
upper_bound = pmap_dbl(list(z = qt(0.975, df = v), qhat = qbar, rn = rn), ub_wn_ign, n_obs = n_obs))
wn_mi <-
tibble(method = "wn-mi",
pc = wn_mi_pc$qbar, lb_pc = wn_mi_pc$lower_bound, ub_pc = wn_mi_pc$upper_bound,
pt = wn_mi_pt$qbar, lb_pt = wn_mi_pt$lower_bound, ub_pt = wn_mi_pt$upper_bound,
qbar = wn_mi_pc$qbar - wn_mi_pt$qbar)%>%
dplyr::mutate(lower_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), lb_wn_p2),
upper_bound = pmap_dbl(list(pc, lb_pc, ub_pc, pt, lb_pt, ub_pt), ub_wn_p2))%>%
dplyr::select(method, qbar, lower_bound, upper_bound)
mi_all <-
bind_rows(wald_mi, fm_mi, wn_plug, wn_mi,
wn_plug_pc, wn_plug_pt, wn_mi_pc, wn_mi_pt)%>%
dplyr::mutate(sim_id = x,
set_n = set_n,
mu_k = mu_k,
sd_k = sd_k)
out <- list(full_ci, do_check, mi_all)%>%
purrr::set_names(c("full_ci", "do_check", "mi_all"))
}
else{
out <- list('simulation was not completed due to phat = 1 in one of the arms')%>%
purrr::set_names('err')
}
return(out)
})
saveRDS(x1, sprintf("results/p2_mnar_set_n%s.rds",
set_n))
|
# Trains a regression model for the Galaxy Prediction script
library(randomForest)
library(gbm)
library(caret)
library(doMC)
registerDoMC(cores=6)
build_models <- function(train) {
# Builds a variety of models
#
# Args:
# train: dataset to train models on
glm_1 <- glm(Prob_Smooth ~ .-id, data=train)
set.seed(276)
rf_1 <- randomForest(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth)
set.seed(829345)
gbm_1 <- gbm.fit(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth,
distribution = "gaussian")
train_control <- trainControl(method="cv")
set.seed(276)
rf_2 <- train(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth,
method="rf", trControl=train_control)
set.seed(829345)
gbm_2 <- train(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth,
method="gbm", trControl=train_control)
models <- list(glm_1, rf_1, gbm_1, rf_2, gbm_2)
return (models)
}
get_best_model <- function(models, validate) {
# Returns the best model in terms of RMSE along with the RMSE of each model
#
# Args:
# models: list of models to evaluate
# validate: validation data to test the models agains
error_rates <- sapply(models, function(model) {
pred <- predict(model, validate)
rmse <- postResample(pred, validate$Prob_Smooth)[[1]]
return (rmse)
})
return (list(models[[which.min(error_rates)]], error_rates))
}
| /Galaxy Prediction/train_gp.R | no_license | klinvill/CSCI-183-Data-Science | R | false | false | 1,507 | r | # Trains a regression model for the Galaxy Prediction script
library(randomForest)
library(gbm)
library(caret)
library(doMC)
registerDoMC(cores=6)
build_models <- function(train) {
# Builds a variety of models
#
# Args:
# train: dataset to train models on
glm_1 <- glm(Prob_Smooth ~ .-id, data=train)
set.seed(276)
rf_1 <- randomForest(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth)
set.seed(829345)
gbm_1 <- gbm.fit(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth,
distribution = "gaussian")
train_control <- trainControl(method="cv")
set.seed(276)
rf_2 <- train(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth,
method="rf", trControl=train_control)
set.seed(829345)
gbm_2 <- train(x=subset(train, select=-c(id, Prob_Smooth)), y=train$Prob_Smooth,
method="gbm", trControl=train_control)
models <- list(glm_1, rf_1, gbm_1, rf_2, gbm_2)
return (models)
}
get_best_model <- function(models, validate) {
# Returns the best model in terms of RMSE along with the RMSE of each model
#
# Args:
# models: list of models to evaluate
# validate: validation data to test the models agains
error_rates <- sapply(models, function(model) {
pred <- predict(model, validate)
rmse <- postResample(pred, validate$Prob_Smooth)[[1]]
return (rmse)
})
return (list(models[[which.min(error_rates)]], error_rates))
}
|
#' @name phenolong
#' @title Long Format Version of The unpak Phenotypes
#' @doctype data
#' @description This data frame contains the following columns: "plantID" "accession" "meta.experiment" "facility" "Gene_idGene" "treatment" "value" "experiment" "phenotype". The value column contains the actual observations the rest of the columns are used in classifying the observation
#' @format one row per observation
NULL
| /R/phenolong.R | no_license | stranda/unpakathonJan2016 | R | false | false | 422 | r | #' @name phenolong
#' @title Long Format Version of The unpak Phenotypes
#' @doctype data
#' @description This data frame contains the following columns: "plantID" "accession" "meta.experiment" "facility" "Gene_idGene" "treatment" "value" "experiment" "phenotype". The value column contains the actual observations the rest of the columns are used in classifying the observation
#' @format one row per observation
NULL
|
# Two models
# - one where preference is ignored
# - one with full preference interaction weighted by preference distribution
# Generate the data and the run two trials:
# - one which ignores preference
# - one which treats preference as subgroups
#' Generate potential outcomes if a participant were to receive each treatment
#' @param n The number of participants
#' @param m A matrix giving the mean response on each treatment (cols) by preference (rows)
#' @param s The variability of response assumed constant across preferences/treatments
#' @param p Distribution of preferences in the population
#' @export
generate_preference_data <- function(
n = 320,
m = matrix(c(-3,-1,-1,-1,-2,2,0,0,-2,0,2,0,-1,1,1,3), 4, 4),
s = 10,
p = c(0.2, 0.7, 0.05, 0.05)
) {
pref <- sample.int(length(p), n, replace = T, prob = p)
y <- matrix(rnorm(ncol(m)*n, m[pref, ], s), n, ncol(m))
time <- seq(0, 156, length.out = n)
list(
id = 1:n,
pref = pref,
t0 = time,
t1 = time + 12,
y = y,
p = p
)
}
#' Probability each column is maximum
#'
#' @param M A matrix of Monte Carlo draws
prob_supr <- function(M) {
as.numeric(prop.table(table(factor(max.col(M), 1:ncol(M)))))
}
#' Estimate linear regression model
#'
#' @param X Design matrix
#' @param y Response vector
#' @param m0 Prior mean
#' @param s0 Prior SD
#' @param a0 Prior shape
#' @param b0 Prior shape
#' @param ... Other arguments to `vb_lm`
estimate_model <- function(
X,
y,
m0 = rep(0, ncol(X)),
s0 = rep(10, ncol(X)),
a0 = 1e-2,
b0 = 1e-2,
...
) {
fit <- varapproxr::vb_lm(X, y, m0, diag(s0^2), a0, b0, ...)
return(fit)
}
#' Update linear regression model
#'
#' @param fit A previous `vb_lm` fit
#' @param X New design matrix data
#' @param y New response vector data
#' @param ... Other arguments to `update_vb_lm`
update_model <- function(
fit,
X,
y,
...
) {
fitnew <- varapproxr::update_vb_lm(fit, X, y, ...)
return(fitnew)
}
#' Simulate trial without any knowledge of preference
#'
#' @param dat Data generated from `generate_preference_data`
#' @param n_seq Sequence of interim analyses
#' @param alloc Initial allocation probabilities
#' @param brar Use BRAR?
#' @param ... Other arguments to `vb_lm` functions
#' @export
simulate_standard_trial <- function(
dat = generate_preference_data(),
n_seq = c(100, 150, 200, 250, nrow(dat$y)),
alloc = rep(1/ncol(dat$y), ncol(dat$y)),
sup_eps = 0.95,
eff_eps = 0.95,
brar = FALSE,
...
) {
# Interim setup
N <- length(n_seq)
n_enr <- sapply(1:N, function(a) length(dat$t0[dat$t0 <= dat$t1[n_seq[a]]]))
n_new <- diff(c(0, n_enr))
idobs <- cbind(c(1, n_seq[-length(n_seq)] + 1), n_seq)
idenr <- cbind(c(1, n_enr[-length(n_enr)] + 1), n_enr)
yobs <- NULL
Xobs <- NULL
trt <- NULL
trtobs<- NULL
prf <- dat$pref
# Model design matrix
K <- ncol(dat$y)
C <- as.matrix(Matrix::bdiag(1, eigen(diag(1, K) - 1/K)$vector[, 1:(K-1)]))
Xd <- cbind(1, diag(1, 4))
X <- Xd %*% C
invC <- MASS::ginv(C)
Z <- cbind(-1, diag(1, K - 1))
# Output storage
trtlabs <- paste0("trt", 1:K)
parlabs <- c("intercept", trtlabs)
n_enr <- matrix(0, N, ncol(X), dimnames = list(analysis = 1:N, treatment = trtlabs))
n_obs <- n_enr
y_obs <- n_enr
trt_mean <- n_enr
trt_var <- n_enr
eff_mean <- matrix(0, N, ncol(X) - 1, dimnames = list(analysis = 1:N, treatment = trtlabs[-1]))
eff_var <- eff_mean
p_supr <- trt_mean
i_supr <- trt_mean
i_infr <- trt_mean
p_eff <- eff_mean
i_eff <- eff_mean
i_inf <- eff_mean
i_acti <- matrix(1, N+1, ncol(X), dimnames = list(analysis = 0:N, treatment = trtlabs))
b_mean <- matrix(0, N, ncol(Xd), dimnames = list(analysis = 1:N, parameter = parlabs))
b_var <- b_mean
p_pair <- matrix(NA, N, ncol(X), dimnames = list(analysis = 1:N, treatment = trtlabs))
final <- matrix(0, N, 1, dimnames = list(analysis = 1:N, final = 'final'))
stopped <- FALSE
for(i in 1:N) {
# Update the data
if(stopped) {
trtnew <- trt[idobs[i,1]:idenr[i-1,2]]
trtobs <- c(trtobs, trtnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idenr[i-1,2], trtnew)]
yobs <- c(yobs, yobsnew)
Xobsnew <- X[trtnew, ]
Xobs <- rbind(Xobs, Xobsnew)
} else {
trtenr <- sample.int(K, n_new[i], TRUE, prob = alloc)
trt <- c(trt, trtenr)
trtnew <- trt[idobs[i,1]:idobs[i,2]]
trtobs <- c(trtobs, trtnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idobs[i,2], trtnew)]
yobs <- c(yobs, yobsnew)
Xobsnew <- X[trtnew, ]
Xobs <- rbind(Xobs, Xobsnew)
}
final[i] <- stopped | i == N
n_enr[i, ] <- as.numeric(table(factor(trt, 1:K)))
n_obs[i, ] <- as.numeric(table(factor(trtobs, 1:K)))
y_obs[i, ] <- aggregate(yobs, by = list(factor(trtobs, 1:K)), mean, drop = F)$x
mean_y <- mean(yobs)
sd_y <- sd(yobs)
y_std <- (yobs - mean_y) / sd_y
# Update the model
# fit <- estimate_model(Xobs, yobs, ...)
fit <- estimate_model(Xobs, y_std, ...)
# Back transform parameters to original scale
mu <- drop(fit$mu) * sd_y
mu[1] <- mu[1] + mean_y
Sigma <- fit$Sigma * sd_y^2
# Update the inferences
trt_mean[i, ] <- drop(X %*% mu)
trt_var[i, ] <- diag(X %*% Sigma %*% t(X))
eff_mean[i, ] <- drop(Z %*% trt_mean[i, ])
eff_var[i, ] <- diag((Z %*% X) %*% Sigma %*% t(Z %*% X))
b_mean[i, ] <- drop(C %*% mu)
b_var[i, ] <- diag(C %*% Sigma %*% t(C))
draws <- mvnfast::rmvn(2e4, mu, Sigma)
means <- draws %*% t(X)
# Conclusions carry forward
# i.e. once decided effective, also effective at all future analyses
if(i == 1) {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- p_eff[i, ] > eff_eps
i_inf[i, ] <- p_eff[i, ] < 1 - eff_eps
p_supr[i, ] <- prob_supr(means)
i_supr[i, ] <- p_supr[i, ] > sup_eps
i_infr[i, ] <- p_supr[i, ] < (1 - sup_eps) / (K - 1)
if(any(i_supr[i, ] == 1)) i_infr[i, i_supr[i, ] != 1] <- 1
i_acti[i+1, ] <- 1 - i_infr[i, ]
} else {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- (p_eff[i, ] > eff_eps) | (i_eff[i-1,] == 1)
i_inf[i, ] <- (p_eff[i, ] < 1 - eff_eps) | (i_inf[i-1, ] == 1)
p_supr[i, i_acti[i, ] == 1] <- prob_supr(means[, i_acti[i, ] == 1, drop = F])
i_supr[i, ] <- (p_supr[i, ] > sup_eps) | (i_supr[i-1, ] == 1)
i_infr[i, ] <- p_supr[i, ] < min(1, (1 - sup_eps) / (sum(i_acti[i, ]) - 1))
if(any(i_supr[i, ] == 1)) i_infr[i, i_supr[i, ] != 1] <- 1
i_acti[i+1, ] <- 1 - i_infr[i, ]
}
# Pairwise comparisons with current best
best <- which.max(p_supr[i, ])
P <- diag(1, K)
P[, best] <- P[, best] - 1
pair_mean <- (P %*% X %*% mu)
pair_var <- diag((P %*% X) %*% Sigma %*% t(P %*% X))
p_pair[i, -best] <- pnorm(0, pair_mean, sqrt(pair_var))[-best]
# Update allocations
if(brar) {
ratio <- sqrt(p_supr[i, ] * i_acti[i+1, ] / n_enr[i, ])
alloc <- ratio / sum(ratio)
} else {
alloc <- (alloc * i_acti[i+1, ]) / sum(alloc * i_acti[i+1, ])
}
if(stopped) break
if(any(i_supr[i, ] == 1)) stopped <- TRUE
}
idx <- 1:i
# Results
list(
final = final[idx, , drop = F],
n_enr = n_enr[idx, , drop = F],
n_obs = n_obs[idx, , drop = F],
y_obs = y_obs[idx, , drop = F],
trt_mean = trt_mean[idx, , drop = F],
trt_var = trt_var[idx, , drop = F],
eff_mean = eff_mean[idx, , drop = F],
eff_var = eff_var[idx, , drop = F],
p_eff = p_eff[idx, , drop = F],
i_eff = i_eff[idx, , drop = F],
i_inf = i_inf[idx, , drop = F],
p_supr = p_supr[idx, , drop = F],
i_supr = i_supr[idx, , drop = F],
i_infr = i_infr[idx, , drop = F],
i_acti = i_acti[idx + 1, , drop = F],
b_mean = b_mean[idx, , drop = F],
b_var = b_var[idx, , drop = F],
p_pair = p_pair[idx, , drop = F]
)
}
#' Simulate trial accounting for preference
#'
#' @param dat Data generated from `generate_preference_data`
#' @param n_seq Sequence of interim analyses
#' @param alloc Initial allocation probabilities (matrix)
#' @param brar Use BRAR?
#' @param min_decision The minimum sample size in a subgroup to make decision/switch on BRAR
#' @param ... Other arguments to `vb_lm` functions
#' @export
simulate_preference_trial <- function(
dat = generate_preference_data(),
n_seq = c(100, 150, 200, 250, nrow(dat$y)),
alloc = matrix(1/ncol(dat$y), length(dat$p), ncol(dat$y)),
sup_eps = 0.95,
eff_eps = 0.95,
brar = FALSE,
min_decision = 20,
...
) {
Q <- length(dat$p)
# Interim setup
N <- length(n_seq)
n_enr <- sapply(1:N, function(a) length(dat$t0[dat$t0 < dat$t1[n_seq[a]]]))
n_new <- diff(c(0, n_enr))
idobs <- cbind(c(1, n_seq[-length(n_seq)] + 1), n_seq)
idenr <- cbind(c(1, n_enr[-length(n_enr)] + 1), n_enr)
yobs <- NULL
Xobs <- NULL
trt <- NULL
trtobs<- NULL
prf <- NULL
prfobs<- NULL
com <- NULL
comobs<- NULL
# Model design matrix (preference design is data dependent)
K <- ncol(dat$y)
Xtrt <- kronecker(diag(1, K), rep(1, Q))
Xprf <- kronecker(rep(1, K), diag(1, Q))
Xcom <- matrix(apply(Xtrt, 2, function(x) x * Xprf), Q*K)
Xd <- cbind(1, Xtrt, Xprf, Xcom)
Ctrt <- eigen(diag(1, K) - 1/K)$vector[, 1:(K-1)]
Z <- cbind(-1, diag(1, K - 1))
Zcom <- cbind(kronecker(rep(1,K-1),diag(-1, Q)), kronecker(diag(1, Q),diag(1, K-1)))
map_com <- function(pref, treat) (treat - 1)*K + pref
# Output storage
trtlabs <- paste0("trt", 1:K)
prflabs <- paste0("prf", 1:Q)
comlabs <- paste(rep(trtlabs, each = Q), rep(prflabs, times = K), sep = "_")
comefflabs <- paste(rep(trtlabs[-1], each = Q), rep(prflabs, times = K-1), sep = "_")
parlabs <- c("intercept", trtlabs, prflabs, comlabs)
n_enr_prf <- matrix(0, N, Q, dimnames = list(analysis = 1:N, preference = prflabs))
n_obs_prf <- n_enr_prf
y_obs_prf <- n_enr_prf
prf_mean <- n_enr_prf
prf_var <- n_enr_prf
n_enr_trt <- matrix(0, N, K, dimnames = list(analysis = 1:N, treatment = trtlabs))
n_obs_trt <- n_enr_trt
y_obs_trt <- n_enr_trt
trt_mean <- n_enr_trt
trt_var <- n_enr_trt
n_enr_com <- matrix(0, N, Q*K, dimnames = list(analysis = 1:N, treatment = comlabs))
n_obs_com <- n_enr_com
y_obs_com <- n_enr_com
com_mean <- matrix(0, N, Q*K, dimnames = list(analysis = 1:N, combination = comlabs))
com_var <- com_mean
eff_mean <- matrix(0, N, K - 1, dimnames = list(analysis = 1:N, treatment = trtlabs[-1]))
eff_var <- eff_mean
p_eff <- eff_mean
i_eff <- eff_mean
i_inf <- eff_mean
p_supr_trt <- trt_mean
i_supr_trt <- trt_mean
i_infr_trt <- trt_mean
i_acti_trt <- matrix(1, N+1, K, dimnames = list(analysis = 0:N, treatment = trtlabs))
p_supr_com <- com_mean
i_supr_com <- com_mean
i_infr_com <- com_mean
p_eff_com <- matrix(0, N, K*Q - Q, dimnames = list(analysis = 1:N, combination = comefflabs))
i_eff_com <- p_eff_com
i_inf_com <- p_eff_com
i_acti_com <- matrix(1, N+1, Q*K, dimnames = list(analysis = 0:N, combination = comlabs))
b_mean <- matrix(0, N, Q+K+Q*K+1, dimnames = list(analysis = 1:N, parameter = parlabs))
b_var <- b_mean
final <- matrix(0, N, 1, dimnames = list(analysis = 1:N, final = 'final'))
stopped <- FALSE
for(i in 1:N) {
# Update the data
if(stopped) {
prfnew <- prf[idobs[i,1]:idenr[i-1,2]]
prfobs <- c(prfobs, prfnew)
trtnew <- trt[idobs[i,1]:idenr[i-1,2]]
trtobs <- c(trtobs, trtnew)
comnew <- map_com(prfnew, trtnew)
comobs <- c(comobs, comnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idenr[i-1,2], trtnew)]
yobs <- c(yobs, yobsnew)
# Determine preference distribution estimates
n_obs_prf[i, ] <- as.numeric(table(factor(prfobs, 1:Q)))
p_prf <- n_obs_prf[i, ] / sum(n_obs_prf[i, ])
# Constrain by preference distribution
Cprf <- eigen(diag(1, Q) - matrix(kronecker(p_prf, rep(1, K)), Q, K))[[2]][, 1:(Q-1)]
Ccom <- kronecker(Ctrt, Cprf)
C <- as.matrix(Matrix::bdiag(1, Ctrt, Cprf, Ccom))
X <- Xd %*% C
Xobs <- X[comobs, ]
} else {
prfenr <- dat$pref[idenr[i,1]:idenr[i,2]]
prf <- c(prf, prfenr)
prfnew <- prf[idobs[i,1]:idobs[i,2]]
prfobs <- c(prfobs, prfnew)
trtenr <- sapply(1:n_new[i], function(a) sample.int(K, 1, prob = alloc[prfenr[a], ]))
trt <- c(trt, trtenr)
trtnew <- trt[idobs[i,1]:idobs[i,2]]
trtobs <- c(trtobs, trtnew)
com <- c(com, map_com(prfenr, trtenr))
comnew <- map_com(prfnew, trtnew)
comobs <- c(comobs, comnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idobs[i,2], trtnew)]
yobs <- c(yobs, yobsnew)
# Determine preference distribution estimates
n_obs_prf[i, ] <- as.numeric(table(factor(prfobs, 1:Q)))
p_prf <- n_obs_prf[i, ] / sum(n_obs_prf[i, ])
# Constrain by preference distribution
Cprf <- eigen(diag(1, Q) - matrix(kronecker(p_prf, rep(1, K)), Q, K))[[2]][, 1:(Q-1)]
Ccom <- kronecker(Ctrt, Cprf)
C <- as.matrix(Matrix::bdiag(1, Ctrt, Cprf, Ccom))
X <- Xd %*% C
Xobs <- X[comobs, ]
}
final[i] <- stopped | i == N
# Aggregate summaries
n_enr_prf[i, ] <- as.numeric(table(factor(prf, 1:Q)))
y_obs_prf[i, ] <- aggregate(yobs, by = list(factor(prfobs, 1:Q)), mean, drop = F)$x
n_enr_trt[i, ] <- as.numeric(table(factor(trt, 1:K)))
n_obs_trt[i, ] <- as.numeric(table(factor(trtobs, 1:K)))
y_obs_trt[i, ] <- aggregate(yobs, by = list(factor(trtobs, 1:K)), mean, drop = F)$x
n_enr_com[i, ] <- as.numeric(table(factor(com, 1:(Q*K))))
n_obs_com[i, ] <- as.numeric(table(factor(comobs, 1:(Q*K))))
y_obs_com[i, ] <- aggregate(yobs, by = list(factor(comobs, 1:(Q*K))), mean, drop = F)$x
mean_y <- mean(yobs)
sd_y <- sd(yobs)
y_std <- (yobs - mean_y) / sd_y
# Update the model
fit <- estimate_model(Xobs, y_std, ...)
if(!fit$converged) stop("Failed to converge")
# Back transform parameters to original scale
mu <- drop(fit$mu) * sd_y
mu[1] <- mu[1] + mean_y
Sigma <- fit$Sigma * sd_y^2
# Update the inferences
b_mean[i, ] <- drop(C %*% mu)
b_var[i, ] <- diag(C %*% Sigma %*% t(C))
com_mean[i, ] <- drop(X %*% mu)
com_var[i, ] <- diag(X %*% Sigma %*% t(X))
trt_mean[i, ] <- drop(cbind(1, Ctrt) %*% mu[1:K])
trt_var[i, ] <- diag(cbind(1, Ctrt) %*% Sigma[1:K, 1:K] %*% t(cbind(1, Ctrt)))
prf_mean[i, ] <- drop(cbind(1, Cprf) %*% mu[c(1, (K+1):(K+Q-1))])
prf_var[i, ] <- diag(
cbind(1, Cprf) %*%
fit$Sigma[c(1,(K+1):(K+Q-1)), c(1,(K+1):(K+Q-1))] %*%
t(cbind(1, Cprf)))
eff_mean[i, ] <- drop(Z %*% cbind(1, Ctrt) %*% mu[1:K])
eff_var[i, ] <- diag(Z %*% cbind(1, Ctrt) %*% Sigma[1:K, 1:K] %*% t(Z %*% cbind(1, Ctrt)))
effcom_mean <- Zcom %*% X %*% mu
effcom_var <- diag((Zcom %*% X) %*% Sigma %*% t(Zcom %*% X))
draws <- mvnfast::rmvn(2e4, mu, Sigma)
betas <- draws %*% t(C)
means <- draws %*% t(X)
# Conclusions carry forward
if(i == 1) {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- p_eff[i, ] > eff_eps
i_inf[i, ] <- p_eff[i, ] < 1 - eff_eps
for(a in 1:Q) {
id <- map_com(a, 1:K)
ideff <- map_com(a, 2:K) - Q
p_supr_com[i, id] <- prob_supr(means[, id])
p_eff_com[i, ideff] <- 1 - pnorm(0, effcom_mean[ideff], sqrt(effcom_var[ideff]))
if(n_obs_prf[i, a] >= min_decision) {
i_supr_com[i, id] <- p_supr_com[i, id] > sup_eps
i_infr_com[i, id] <- p_supr_com[i, id] < (1 - sup_eps) / (K - 1)
if(any(i_supr_com[i, id])) i_infr_com[i, id][i_supr_com[i, id] != 1] <- 1
i_acti_com[i+1, id] <- 1 - i_infr_com[i, id]
}
i_eff_com[i, ideff] <- p_eff_com[i, ideff] > eff_eps
i_inf_com[i, ideff] <- p_eff_com[i, ideff] < 1 - eff_eps
}
p_supr_trt[i, ] <- prob_supr(betas[, 1] + betas[, 2:5])
i_supr_trt[i, ] <- p_supr_trt[i, ] > sup_eps
i_infr_trt[i, ] <- p_supr_trt[i, ] < (1 - sup_eps) / (K - 1)
} else {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- (p_eff[i, ] > eff_eps) | (i_eff[i-1, ] == 1)
i_inf[i, ] <- (p_eff[i, ] < 1 - eff_eps) | (i_inf[i-1,] == 1)
for(a in 1:Q) {
id <- map_com(a, 1:K)
ideff <- map_com(a, 2:K) - Q
act_id <- i_acti_com[i, id] == 1
p_supr_com[i, id][act_id] <-
prob_supr(means[, id][, act_id, drop = F])
p_eff_com[i, ideff] <- 1 - pnorm(0, effcom_mean[ideff], sqrt(effcom_var[ideff]))
if(n_obs_prf[i, a] >= min_decision) {
i_supr_com[i, id] <- p_supr_com[i, id] >= sup_eps | (i_supr_com[i-1, id] == 1)
i_infr_com[i, id] <- ((p_supr_com[i, id] < (1 - sup_eps) / (sum(i_acti_com[i, id]) - 1)) |
(i_infr_com[i-1, id] == 1)) & (i_supr_com[i-1, id] != 1)
if(any(i_supr_com[i, id])) {
i_infr_com[i, id][i_supr_com[i, id] != 1] <- 1
# i_infr_com[i, id][i_supr_com[i, id] == 1] <- 0
}
i_acti_com[i+1, id] <- 1 - i_infr_com[i, id]
}
i_eff_com[i, ideff] <- p_eff_com[i, ideff] > eff_eps
i_inf_com[i, ideff] <- p_eff_com[i, ideff] < 1 - eff_eps
}
p_supr_trt[i, ] <- prob_supr(betas[, 1] + betas[, 2:5])
i_supr_trt[i, ] <- p_supr_trt[i, ] > sup_eps
i_infr_trt[i, ] <- p_supr_trt[i, ] < (1 - sup_eps) / (K - 1)
}
# Update allocations
if(brar) {
for(a in 1:Q) {
# Only BRAR if exceed minimum sample size
if(n_obs_prf[i, a] >= min_decision) {
id <- map_com(a, 1:K)
ratio <- sqrt(p_supr_com[i, id] * i_acti_com[i+1, id] / (n_enr_com[i, id] + 1))
alloc[a, ] <- ratio / sum(ratio)
}
}
} else {
for(a in 1:Q) {
id <- map_com(a, 1:K)
alloc[a, ] <- alloc[a, ] * i_acti_com[i+1, id] / sum(alloc[a, ] * i_acti_com[i+1, id])
}
}
if(any(is.na(alloc))) return(list(alloc, i_supr_com, i_infr_com, i_acti_com))
if(stopped) break
if(all(sapply(1:Q, function(a) any(i_supr_com[i, map_com(a, 1:K)] == 1)))) stopped <- TRUE
}
idx <- 1:i
# Results
list(
final = final[idx, , drop = F],
n_enr_prf = n_enr_prf[idx, , drop = F],
n_obs_prf = n_obs_prf[idx, , drop = F],
y_obs_prf = y_obs_prf[idx, , drop = F],
n_enr_trt = n_enr_trt[idx, , drop = F],
n_obs_trt = n_obs_trt[idx, , drop = F],
y_obs_trt = y_obs_trt[idx, , drop = F],
n_enr_com = n_enr_com[idx, , drop = F],
n_obs_com = n_obs_com[idx, , drop = F],
y_obs_com = y_obs_com[idx, , drop = F],
prf_mean = prf_mean[idx, , drop = F],
prf_var = prf_var[idx, , drop = F],
trt_mean = trt_mean[idx, , drop = F],
trt_var = trt_var[idx, , drop = F],
eff_mean = eff_mean[idx, , drop = F],
eff_var = eff_var[idx, , drop = F],
com_mean = com_mean[idx, , drop = F],
com_var = com_var[idx, , drop = F],
p_eff = p_eff[idx, , drop = F],
i_eff = i_eff[idx, , drop = F],
i_inf = i_inf[idx, , drop = F],
p_supr_com = p_supr_com[idx, , drop = F],
i_supr_com = i_supr_com[idx, , drop = F],
i_infr_com = i_infr_com[idx, , drop = F],
i_acti_com = i_acti_com[idx + 1, , drop = F],
p_eff_com = p_eff_com[idx, , drop = F],
i_eff_com = i_eff_com[idx, , drop = F],
i_inf_com = i_inf_com[idx, , drop = F],
p_supr_trt = p_supr_trt[idx, , drop = F],
i_supr_trt = i_supr_trt[idx, , drop = F],
i_infr_trt = i_infr_trt[idx, , drop = F],
b_mean = b_mean[idx, , drop = F],
b_var = b_var[idx, , drop = F]
)
}
| /R/simulate_trial_model2.R | no_license | jatotterdell/mfittrial | R | false | false | 19,787 | r | # Two models
# - one where preference is ignored
# - one with full preference interaction weighted by preference distribution
# Generate the data and the run two trials:
# - one which ignores preference
# - one which treats preference as subgroups
#' Generate potential outcomes if a participant were to receive each treatment
#' @param n The number of participants
#' @param m A matrix giving the mean response on each treatment (cols) by preference (rows)
#' @param s The variability of response assumed constant across preferences/treatments
#' @param p Distribution of preferences in the population
#' @export
generate_preference_data <- function(
n = 320,
m = matrix(c(-3,-1,-1,-1,-2,2,0,0,-2,0,2,0,-1,1,1,3), 4, 4),
s = 10,
p = c(0.2, 0.7, 0.05, 0.05)
) {
pref <- sample.int(length(p), n, replace = T, prob = p)
y <- matrix(rnorm(ncol(m)*n, m[pref, ], s), n, ncol(m))
time <- seq(0, 156, length.out = n)
list(
id = 1:n,
pref = pref,
t0 = time,
t1 = time + 12,
y = y,
p = p
)
}
#' Probability each column is maximum
#'
#' @param M A matrix of Monte Carlo draws
prob_supr <- function(M) {
as.numeric(prop.table(table(factor(max.col(M), 1:ncol(M)))))
}
#' Estimate linear regression model
#'
#' @param X Design matrix
#' @param y Response vector
#' @param m0 Prior mean
#' @param s0 Prior SD
#' @param a0 Prior shape
#' @param b0 Prior shape
#' @param ... Other arguments to `vb_lm`
estimate_model <- function(
X,
y,
m0 = rep(0, ncol(X)),
s0 = rep(10, ncol(X)),
a0 = 1e-2,
b0 = 1e-2,
...
) {
fit <- varapproxr::vb_lm(X, y, m0, diag(s0^2), a0, b0, ...)
return(fit)
}
#' Update linear regression model
#'
#' @param fit A previous `vb_lm` fit
#' @param X New design matrix data
#' @param y New response vector data
#' @param ... Other arguments to `update_vb_lm`
update_model <- function(
fit,
X,
y,
...
) {
fitnew <- varapproxr::update_vb_lm(fit, X, y, ...)
return(fitnew)
}
#' Simulate trial without any knowledge of preference
#'
#' @param dat Data generated from `generate_preference_data`
#' @param n_seq Sequence of interim analyses
#' @param alloc Initial allocation probabilities
#' @param brar Use BRAR?
#' @param ... Other arguments to `vb_lm` functions
#' @export
simulate_standard_trial <- function(
dat = generate_preference_data(),
n_seq = c(100, 150, 200, 250, nrow(dat$y)),
alloc = rep(1/ncol(dat$y), ncol(dat$y)),
sup_eps = 0.95,
eff_eps = 0.95,
brar = FALSE,
...
) {
# Interim setup
N <- length(n_seq)
n_enr <- sapply(1:N, function(a) length(dat$t0[dat$t0 <= dat$t1[n_seq[a]]]))
n_new <- diff(c(0, n_enr))
idobs <- cbind(c(1, n_seq[-length(n_seq)] + 1), n_seq)
idenr <- cbind(c(1, n_enr[-length(n_enr)] + 1), n_enr)
yobs <- NULL
Xobs <- NULL
trt <- NULL
trtobs<- NULL
prf <- dat$pref
# Model design matrix
K <- ncol(dat$y)
C <- as.matrix(Matrix::bdiag(1, eigen(diag(1, K) - 1/K)$vector[, 1:(K-1)]))
Xd <- cbind(1, diag(1, 4))
X <- Xd %*% C
invC <- MASS::ginv(C)
Z <- cbind(-1, diag(1, K - 1))
# Output storage
trtlabs <- paste0("trt", 1:K)
parlabs <- c("intercept", trtlabs)
n_enr <- matrix(0, N, ncol(X), dimnames = list(analysis = 1:N, treatment = trtlabs))
n_obs <- n_enr
y_obs <- n_enr
trt_mean <- n_enr
trt_var <- n_enr
eff_mean <- matrix(0, N, ncol(X) - 1, dimnames = list(analysis = 1:N, treatment = trtlabs[-1]))
eff_var <- eff_mean
p_supr <- trt_mean
i_supr <- trt_mean
i_infr <- trt_mean
p_eff <- eff_mean
i_eff <- eff_mean
i_inf <- eff_mean
i_acti <- matrix(1, N+1, ncol(X), dimnames = list(analysis = 0:N, treatment = trtlabs))
b_mean <- matrix(0, N, ncol(Xd), dimnames = list(analysis = 1:N, parameter = parlabs))
b_var <- b_mean
p_pair <- matrix(NA, N, ncol(X), dimnames = list(analysis = 1:N, treatment = trtlabs))
final <- matrix(0, N, 1, dimnames = list(analysis = 1:N, final = 'final'))
stopped <- FALSE
for(i in 1:N) {
# Update the data
if(stopped) {
trtnew <- trt[idobs[i,1]:idenr[i-1,2]]
trtobs <- c(trtobs, trtnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idenr[i-1,2], trtnew)]
yobs <- c(yobs, yobsnew)
Xobsnew <- X[trtnew, ]
Xobs <- rbind(Xobs, Xobsnew)
} else {
trtenr <- sample.int(K, n_new[i], TRUE, prob = alloc)
trt <- c(trt, trtenr)
trtnew <- trt[idobs[i,1]:idobs[i,2]]
trtobs <- c(trtobs, trtnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idobs[i,2], trtnew)]
yobs <- c(yobs, yobsnew)
Xobsnew <- X[trtnew, ]
Xobs <- rbind(Xobs, Xobsnew)
}
final[i] <- stopped | i == N
n_enr[i, ] <- as.numeric(table(factor(trt, 1:K)))
n_obs[i, ] <- as.numeric(table(factor(trtobs, 1:K)))
y_obs[i, ] <- aggregate(yobs, by = list(factor(trtobs, 1:K)), mean, drop = F)$x
mean_y <- mean(yobs)
sd_y <- sd(yobs)
y_std <- (yobs - mean_y) / sd_y
# Update the model
# fit <- estimate_model(Xobs, yobs, ...)
fit <- estimate_model(Xobs, y_std, ...)
# Back transform parameters to original scale
mu <- drop(fit$mu) * sd_y
mu[1] <- mu[1] + mean_y
Sigma <- fit$Sigma * sd_y^2
# Update the inferences
trt_mean[i, ] <- drop(X %*% mu)
trt_var[i, ] <- diag(X %*% Sigma %*% t(X))
eff_mean[i, ] <- drop(Z %*% trt_mean[i, ])
eff_var[i, ] <- diag((Z %*% X) %*% Sigma %*% t(Z %*% X))
b_mean[i, ] <- drop(C %*% mu)
b_var[i, ] <- diag(C %*% Sigma %*% t(C))
draws <- mvnfast::rmvn(2e4, mu, Sigma)
means <- draws %*% t(X)
# Conclusions carry forward
# i.e. once decided effective, also effective at all future analyses
if(i == 1) {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- p_eff[i, ] > eff_eps
i_inf[i, ] <- p_eff[i, ] < 1 - eff_eps
p_supr[i, ] <- prob_supr(means)
i_supr[i, ] <- p_supr[i, ] > sup_eps
i_infr[i, ] <- p_supr[i, ] < (1 - sup_eps) / (K - 1)
if(any(i_supr[i, ] == 1)) i_infr[i, i_supr[i, ] != 1] <- 1
i_acti[i+1, ] <- 1 - i_infr[i, ]
} else {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- (p_eff[i, ] > eff_eps) | (i_eff[i-1,] == 1)
i_inf[i, ] <- (p_eff[i, ] < 1 - eff_eps) | (i_inf[i-1, ] == 1)
p_supr[i, i_acti[i, ] == 1] <- prob_supr(means[, i_acti[i, ] == 1, drop = F])
i_supr[i, ] <- (p_supr[i, ] > sup_eps) | (i_supr[i-1, ] == 1)
i_infr[i, ] <- p_supr[i, ] < min(1, (1 - sup_eps) / (sum(i_acti[i, ]) - 1))
if(any(i_supr[i, ] == 1)) i_infr[i, i_supr[i, ] != 1] <- 1
i_acti[i+1, ] <- 1 - i_infr[i, ]
}
# Pairwise comparisons with current best
best <- which.max(p_supr[i, ])
P <- diag(1, K)
P[, best] <- P[, best] - 1
pair_mean <- (P %*% X %*% mu)
pair_var <- diag((P %*% X) %*% Sigma %*% t(P %*% X))
p_pair[i, -best] <- pnorm(0, pair_mean, sqrt(pair_var))[-best]
# Update allocations
if(brar) {
ratio <- sqrt(p_supr[i, ] * i_acti[i+1, ] / n_enr[i, ])
alloc <- ratio / sum(ratio)
} else {
alloc <- (alloc * i_acti[i+1, ]) / sum(alloc * i_acti[i+1, ])
}
if(stopped) break
if(any(i_supr[i, ] == 1)) stopped <- TRUE
}
idx <- 1:i
# Results
list(
final = final[idx, , drop = F],
n_enr = n_enr[idx, , drop = F],
n_obs = n_obs[idx, , drop = F],
y_obs = y_obs[idx, , drop = F],
trt_mean = trt_mean[idx, , drop = F],
trt_var = trt_var[idx, , drop = F],
eff_mean = eff_mean[idx, , drop = F],
eff_var = eff_var[idx, , drop = F],
p_eff = p_eff[idx, , drop = F],
i_eff = i_eff[idx, , drop = F],
i_inf = i_inf[idx, , drop = F],
p_supr = p_supr[idx, , drop = F],
i_supr = i_supr[idx, , drop = F],
i_infr = i_infr[idx, , drop = F],
i_acti = i_acti[idx + 1, , drop = F],
b_mean = b_mean[idx, , drop = F],
b_var = b_var[idx, , drop = F],
p_pair = p_pair[idx, , drop = F]
)
}
#' Simulate trial accounting for preference
#'
#' @param dat Data generated from `generate_preference_data`
#' @param n_seq Sequence of interim analyses
#' @param alloc Initial allocation probabilities (matrix)
#' @param brar Use BRAR?
#' @param min_decision The minimum sample size in a subgroup to make decision/switch on BRAR
#' @param ... Other arguments to `vb_lm` functions
#' @export
simulate_preference_trial <- function(
dat = generate_preference_data(),
n_seq = c(100, 150, 200, 250, nrow(dat$y)),
alloc = matrix(1/ncol(dat$y), length(dat$p), ncol(dat$y)),
sup_eps = 0.95,
eff_eps = 0.95,
brar = FALSE,
min_decision = 20,
...
) {
Q <- length(dat$p)
# Interim setup
N <- length(n_seq)
n_enr <- sapply(1:N, function(a) length(dat$t0[dat$t0 < dat$t1[n_seq[a]]]))
n_new <- diff(c(0, n_enr))
idobs <- cbind(c(1, n_seq[-length(n_seq)] + 1), n_seq)
idenr <- cbind(c(1, n_enr[-length(n_enr)] + 1), n_enr)
yobs <- NULL
Xobs <- NULL
trt <- NULL
trtobs<- NULL
prf <- NULL
prfobs<- NULL
com <- NULL
comobs<- NULL
# Model design matrix (preference design is data dependent)
K <- ncol(dat$y)
Xtrt <- kronecker(diag(1, K), rep(1, Q))
Xprf <- kronecker(rep(1, K), diag(1, Q))
Xcom <- matrix(apply(Xtrt, 2, function(x) x * Xprf), Q*K)
Xd <- cbind(1, Xtrt, Xprf, Xcom)
Ctrt <- eigen(diag(1, K) - 1/K)$vector[, 1:(K-1)]
Z <- cbind(-1, diag(1, K - 1))
Zcom <- cbind(kronecker(rep(1,K-1),diag(-1, Q)), kronecker(diag(1, Q),diag(1, K-1)))
map_com <- function(pref, treat) (treat - 1)*K + pref
# Output storage
trtlabs <- paste0("trt", 1:K)
prflabs <- paste0("prf", 1:Q)
comlabs <- paste(rep(trtlabs, each = Q), rep(prflabs, times = K), sep = "_")
comefflabs <- paste(rep(trtlabs[-1], each = Q), rep(prflabs, times = K-1), sep = "_")
parlabs <- c("intercept", trtlabs, prflabs, comlabs)
n_enr_prf <- matrix(0, N, Q, dimnames = list(analysis = 1:N, preference = prflabs))
n_obs_prf <- n_enr_prf
y_obs_prf <- n_enr_prf
prf_mean <- n_enr_prf
prf_var <- n_enr_prf
n_enr_trt <- matrix(0, N, K, dimnames = list(analysis = 1:N, treatment = trtlabs))
n_obs_trt <- n_enr_trt
y_obs_trt <- n_enr_trt
trt_mean <- n_enr_trt
trt_var <- n_enr_trt
n_enr_com <- matrix(0, N, Q*K, dimnames = list(analysis = 1:N, treatment = comlabs))
n_obs_com <- n_enr_com
y_obs_com <- n_enr_com
com_mean <- matrix(0, N, Q*K, dimnames = list(analysis = 1:N, combination = comlabs))
com_var <- com_mean
eff_mean <- matrix(0, N, K - 1, dimnames = list(analysis = 1:N, treatment = trtlabs[-1]))
eff_var <- eff_mean
p_eff <- eff_mean
i_eff <- eff_mean
i_inf <- eff_mean
p_supr_trt <- trt_mean
i_supr_trt <- trt_mean
i_infr_trt <- trt_mean
i_acti_trt <- matrix(1, N+1, K, dimnames = list(analysis = 0:N, treatment = trtlabs))
p_supr_com <- com_mean
i_supr_com <- com_mean
i_infr_com <- com_mean
p_eff_com <- matrix(0, N, K*Q - Q, dimnames = list(analysis = 1:N, combination = comefflabs))
i_eff_com <- p_eff_com
i_inf_com <- p_eff_com
i_acti_com <- matrix(1, N+1, Q*K, dimnames = list(analysis = 0:N, combination = comlabs))
b_mean <- matrix(0, N, Q+K+Q*K+1, dimnames = list(analysis = 1:N, parameter = parlabs))
b_var <- b_mean
final <- matrix(0, N, 1, dimnames = list(analysis = 1:N, final = 'final'))
stopped <- FALSE
for(i in 1:N) {
# Update the data
if(stopped) {
prfnew <- prf[idobs[i,1]:idenr[i-1,2]]
prfobs <- c(prfobs, prfnew)
trtnew <- trt[idobs[i,1]:idenr[i-1,2]]
trtobs <- c(trtobs, trtnew)
comnew <- map_com(prfnew, trtnew)
comobs <- c(comobs, comnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idenr[i-1,2], trtnew)]
yobs <- c(yobs, yobsnew)
# Determine preference distribution estimates
n_obs_prf[i, ] <- as.numeric(table(factor(prfobs, 1:Q)))
p_prf <- n_obs_prf[i, ] / sum(n_obs_prf[i, ])
# Constrain by preference distribution
Cprf <- eigen(diag(1, Q) - matrix(kronecker(p_prf, rep(1, K)), Q, K))[[2]][, 1:(Q-1)]
Ccom <- kronecker(Ctrt, Cprf)
C <- as.matrix(Matrix::bdiag(1, Ctrt, Cprf, Ccom))
X <- Xd %*% C
Xobs <- X[comobs, ]
} else {
prfenr <- dat$pref[idenr[i,1]:idenr[i,2]]
prf <- c(prf, prfenr)
prfnew <- prf[idobs[i,1]:idobs[i,2]]
prfobs <- c(prfobs, prfnew)
trtenr <- sapply(1:n_new[i], function(a) sample.int(K, 1, prob = alloc[prfenr[a], ]))
trt <- c(trt, trtenr)
trtnew <- trt[idobs[i,1]:idobs[i,2]]
trtobs <- c(trtobs, trtnew)
com <- c(com, map_com(prfenr, trtenr))
comnew <- map_com(prfnew, trtnew)
comobs <- c(comobs, comnew)
yobsnew <- dat$y[cbind(idobs[i,1]:idobs[i,2], trtnew)]
yobs <- c(yobs, yobsnew)
# Determine preference distribution estimates
n_obs_prf[i, ] <- as.numeric(table(factor(prfobs, 1:Q)))
p_prf <- n_obs_prf[i, ] / sum(n_obs_prf[i, ])
# Constrain by preference distribution
Cprf <- eigen(diag(1, Q) - matrix(kronecker(p_prf, rep(1, K)), Q, K))[[2]][, 1:(Q-1)]
Ccom <- kronecker(Ctrt, Cprf)
C <- as.matrix(Matrix::bdiag(1, Ctrt, Cprf, Ccom))
X <- Xd %*% C
Xobs <- X[comobs, ]
}
final[i] <- stopped | i == N
# Aggregate summaries
n_enr_prf[i, ] <- as.numeric(table(factor(prf, 1:Q)))
y_obs_prf[i, ] <- aggregate(yobs, by = list(factor(prfobs, 1:Q)), mean, drop = F)$x
n_enr_trt[i, ] <- as.numeric(table(factor(trt, 1:K)))
n_obs_trt[i, ] <- as.numeric(table(factor(trtobs, 1:K)))
y_obs_trt[i, ] <- aggregate(yobs, by = list(factor(trtobs, 1:K)), mean, drop = F)$x
n_enr_com[i, ] <- as.numeric(table(factor(com, 1:(Q*K))))
n_obs_com[i, ] <- as.numeric(table(factor(comobs, 1:(Q*K))))
y_obs_com[i, ] <- aggregate(yobs, by = list(factor(comobs, 1:(Q*K))), mean, drop = F)$x
mean_y <- mean(yobs)
sd_y <- sd(yobs)
y_std <- (yobs - mean_y) / sd_y
# Update the model
fit <- estimate_model(Xobs, y_std, ...)
if(!fit$converged) stop("Failed to converge")
# Back transform parameters to original scale
mu <- drop(fit$mu) * sd_y
mu[1] <- mu[1] + mean_y
Sigma <- fit$Sigma * sd_y^2
# Update the inferences
b_mean[i, ] <- drop(C %*% mu)
b_var[i, ] <- diag(C %*% Sigma %*% t(C))
com_mean[i, ] <- drop(X %*% mu)
com_var[i, ] <- diag(X %*% Sigma %*% t(X))
trt_mean[i, ] <- drop(cbind(1, Ctrt) %*% mu[1:K])
trt_var[i, ] <- diag(cbind(1, Ctrt) %*% Sigma[1:K, 1:K] %*% t(cbind(1, Ctrt)))
prf_mean[i, ] <- drop(cbind(1, Cprf) %*% mu[c(1, (K+1):(K+Q-1))])
prf_var[i, ] <- diag(
cbind(1, Cprf) %*%
fit$Sigma[c(1,(K+1):(K+Q-1)), c(1,(K+1):(K+Q-1))] %*%
t(cbind(1, Cprf)))
eff_mean[i, ] <- drop(Z %*% cbind(1, Ctrt) %*% mu[1:K])
eff_var[i, ] <- diag(Z %*% cbind(1, Ctrt) %*% Sigma[1:K, 1:K] %*% t(Z %*% cbind(1, Ctrt)))
effcom_mean <- Zcom %*% X %*% mu
effcom_var <- diag((Zcom %*% X) %*% Sigma %*% t(Zcom %*% X))
draws <- mvnfast::rmvn(2e4, mu, Sigma)
betas <- draws %*% t(C)
means <- draws %*% t(X)
# Conclusions carry forward
if(i == 1) {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- p_eff[i, ] > eff_eps
i_inf[i, ] <- p_eff[i, ] < 1 - eff_eps
for(a in 1:Q) {
id <- map_com(a, 1:K)
ideff <- map_com(a, 2:K) - Q
p_supr_com[i, id] <- prob_supr(means[, id])
p_eff_com[i, ideff] <- 1 - pnorm(0, effcom_mean[ideff], sqrt(effcom_var[ideff]))
if(n_obs_prf[i, a] >= min_decision) {
i_supr_com[i, id] <- p_supr_com[i, id] > sup_eps
i_infr_com[i, id] <- p_supr_com[i, id] < (1 - sup_eps) / (K - 1)
if(any(i_supr_com[i, id])) i_infr_com[i, id][i_supr_com[i, id] != 1] <- 1
i_acti_com[i+1, id] <- 1 - i_infr_com[i, id]
}
i_eff_com[i, ideff] <- p_eff_com[i, ideff] > eff_eps
i_inf_com[i, ideff] <- p_eff_com[i, ideff] < 1 - eff_eps
}
p_supr_trt[i, ] <- prob_supr(betas[, 1] + betas[, 2:5])
i_supr_trt[i, ] <- p_supr_trt[i, ] > sup_eps
i_infr_trt[i, ] <- p_supr_trt[i, ] < (1 - sup_eps) / (K - 1)
} else {
p_eff[i, ] <- 1 - pnorm(0, eff_mean[i, ], sqrt(eff_var[i, ]))
i_eff[i, ] <- (p_eff[i, ] > eff_eps) | (i_eff[i-1, ] == 1)
i_inf[i, ] <- (p_eff[i, ] < 1 - eff_eps) | (i_inf[i-1,] == 1)
for(a in 1:Q) {
id <- map_com(a, 1:K)
ideff <- map_com(a, 2:K) - Q
act_id <- i_acti_com[i, id] == 1
p_supr_com[i, id][act_id] <-
prob_supr(means[, id][, act_id, drop = F])
p_eff_com[i, ideff] <- 1 - pnorm(0, effcom_mean[ideff], sqrt(effcom_var[ideff]))
if(n_obs_prf[i, a] >= min_decision) {
i_supr_com[i, id] <- p_supr_com[i, id] >= sup_eps | (i_supr_com[i-1, id] == 1)
i_infr_com[i, id] <- ((p_supr_com[i, id] < (1 - sup_eps) / (sum(i_acti_com[i, id]) - 1)) |
(i_infr_com[i-1, id] == 1)) & (i_supr_com[i-1, id] != 1)
if(any(i_supr_com[i, id])) {
i_infr_com[i, id][i_supr_com[i, id] != 1] <- 1
# i_infr_com[i, id][i_supr_com[i, id] == 1] <- 0
}
i_acti_com[i+1, id] <- 1 - i_infr_com[i, id]
}
i_eff_com[i, ideff] <- p_eff_com[i, ideff] > eff_eps
i_inf_com[i, ideff] <- p_eff_com[i, ideff] < 1 - eff_eps
}
p_supr_trt[i, ] <- prob_supr(betas[, 1] + betas[, 2:5])
i_supr_trt[i, ] <- p_supr_trt[i, ] > sup_eps
i_infr_trt[i, ] <- p_supr_trt[i, ] < (1 - sup_eps) / (K - 1)
}
# Update allocations
if(brar) {
for(a in 1:Q) {
# Only BRAR if exceed minimum sample size
if(n_obs_prf[i, a] >= min_decision) {
id <- map_com(a, 1:K)
ratio <- sqrt(p_supr_com[i, id] * i_acti_com[i+1, id] / (n_enr_com[i, id] + 1))
alloc[a, ] <- ratio / sum(ratio)
}
}
} else {
for(a in 1:Q) {
id <- map_com(a, 1:K)
alloc[a, ] <- alloc[a, ] * i_acti_com[i+1, id] / sum(alloc[a, ] * i_acti_com[i+1, id])
}
}
if(any(is.na(alloc))) return(list(alloc, i_supr_com, i_infr_com, i_acti_com))
if(stopped) break
if(all(sapply(1:Q, function(a) any(i_supr_com[i, map_com(a, 1:K)] == 1)))) stopped <- TRUE
}
idx <- 1:i
# Results
list(
final = final[idx, , drop = F],
n_enr_prf = n_enr_prf[idx, , drop = F],
n_obs_prf = n_obs_prf[idx, , drop = F],
y_obs_prf = y_obs_prf[idx, , drop = F],
n_enr_trt = n_enr_trt[idx, , drop = F],
n_obs_trt = n_obs_trt[idx, , drop = F],
y_obs_trt = y_obs_trt[idx, , drop = F],
n_enr_com = n_enr_com[idx, , drop = F],
n_obs_com = n_obs_com[idx, , drop = F],
y_obs_com = y_obs_com[idx, , drop = F],
prf_mean = prf_mean[idx, , drop = F],
prf_var = prf_var[idx, , drop = F],
trt_mean = trt_mean[idx, , drop = F],
trt_var = trt_var[idx, , drop = F],
eff_mean = eff_mean[idx, , drop = F],
eff_var = eff_var[idx, , drop = F],
com_mean = com_mean[idx, , drop = F],
com_var = com_var[idx, , drop = F],
p_eff = p_eff[idx, , drop = F],
i_eff = i_eff[idx, , drop = F],
i_inf = i_inf[idx, , drop = F],
p_supr_com = p_supr_com[idx, , drop = F],
i_supr_com = i_supr_com[idx, , drop = F],
i_infr_com = i_infr_com[idx, , drop = F],
i_acti_com = i_acti_com[idx + 1, , drop = F],
p_eff_com = p_eff_com[idx, , drop = F],
i_eff_com = i_eff_com[idx, , drop = F],
i_inf_com = i_inf_com[idx, , drop = F],
p_supr_trt = p_supr_trt[idx, , drop = F],
i_supr_trt = i_supr_trt[idx, , drop = F],
i_infr_trt = i_infr_trt[idx, , drop = F],
b_mean = b_mean[idx, , drop = F],
b_var = b_var[idx, , drop = F]
)
}
|
#' Title check if 2 column (numeric vector) have the same variance
#'
#' @param data dataframe
#' @param colName1 Name of the column
#' @param colName2 Name of the column
#'
#' @return fTestVariance ftest console output as variable
#' @export
#'
#' @examples
#' dataTest <- data.frame(x=c(rnorm(30, mean = 0, sd = 2)), y=c(rnorm(30, mean = 1, sd = 1)))
#' f_test_vector(dataTest,"x","y")
f_test_vector<-function(data, colName1, colName2){
typeof(unlist(data[colName1]))
fTestVariance<-var.test(unlist(data[colName1]), unlist(data[colName2]))
return(fTestVariance)
}
| /R/f_test_vector.R | no_license | pkchouhan14/customerChurnUU | R | false | false | 574 | r | #' Title check if 2 column (numeric vector) have the same variance
#'
#' @param data dataframe
#' @param colName1 Name of the column
#' @param colName2 Name of the column
#'
#' @return fTestVariance ftest console output as variable
#' @export
#'
#' @examples
#' dataTest <- data.frame(x=c(rnorm(30, mean = 0, sd = 2)), y=c(rnorm(30, mean = 1, sd = 1)))
#' f_test_vector(dataTest,"x","y")
f_test_vector<-function(data, colName1, colName2){
typeof(unlist(data[colName1]))
fTestVariance<-var.test(unlist(data[colName1]), unlist(data[colName2]))
return(fTestVariance)
}
|
explPlots(preTime=100, harvTime=10, fHarv=0.5, steps=1000, legendSize=0.5)
| /fish_harvest.R | no_license | Bio3SS/Exploitation_models | R | false | false | 75 | r | explPlots(preTime=100, harvTime=10, fHarv=0.5, steps=1000, legendSize=0.5)
|
## Script with common theme
## Project: Non-REM sleep in major depressive disorder
## Author: Leonore Bovy
options(warn = -1)
library(ggplot2)
raincloud_theme <- theme_classic() + theme(
text = element_text(size = 10),
axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 20, color="#000000"),
axis.text.y = element_text(size = 20, color="#000000"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 16),
legend.position = "left",
plot.title = element_text(lineheight = .8, face = "bold", size = 16),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.line.x = element_line(colour = "black", size = 1, linetype = "solid"),
axis.line.y = element_line(colour = "black", size = 1, linetype = "solid"))
my_theme <- theme_classic() +
theme(axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.text = element_text(size = 16, color="#000000"),
plot.title = element_text(size = 18, hjust = 0.5),
axis.line.x = element_line(colour = "black", size = 1, linetype = "solid"),
axis.line.y = element_line(colour = "black", size = 1, linetype = "solid"))
my_theme_freq <- theme_classic() +
theme(axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.text = element_text(size = 16, color="#000000"),
plot.title = element_text(size = 18, hjust = 0.5),
axis.line.x = element_line(colour = "black", size = 1.2, linetype = "solid"),
axis.line.y = element_line(colour = "black", size = 1.2, linetype = "solid"))
pretty_regressionplot <- function(dataframe, x, y, grouping) {
ggplot(dataframe, aes(x = x, y = y, colour = grouping)) +
geom_point(data = dataframe, aes(y = y, group = grouping), size = 4) +
geom_smooth(method = "lm", se = FALSE, fullrange=TRUE, size = 1.2) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5)) +
theme(axis.text.x = element_text(size=20, color="#000000", family="serif" ),
axis.text.y = element_text(size=20, color="#000000", family="serif"),
plot.title = element_text(size = 15,family="serif"),
axis.title.x = element_text(size=17,family="serif", margin = margin(b=10)),
axis.title.y = element_text(size=17,family="serif"),
axis.line.x = element_line(colour = "black", size = 1),
axis.line.y = element_line(colour = "black", size = 1))}
color_dataset_a = (c("#646464", "#b20000"))
color_dataset_b = (c("#646464", "#66c4ff"))
color_dataset_b_pat = (c("#66c4ff", "#009dff"))
color_dataset_b_all = (c("#646464", "#66c4ff", "#009dff"))
color_dataset_c = (c("#646464", "#7fcf7f"))
color_dataset_c_pat = (c("#7fcf7f", "#00a000"))
color_dataset_c_all = (c("#646464", "#7fcf7f", "#00a000"))
color_dataset_all = (c("#646464", "#b20000", "#646464", "#66c4ff", "#009dff", "#646464", "#7fcf7f", "#00a000"))
color_dataset_three = (c("#646464", "#b20000", "#009dff", "#646464", "#7fcf7f", "#00a000"))
color_dataset_hamd = (c("#b20000", "#66c4ff", "#7fcf7f"))
color_dataset_beh = (c("#646464", "#b20000","#646464", "#66c4ff", "#009dff")) # A and B
color_dataset_comb = (c("#646464", "#871F78")) #purple
| /funcs/theme.R | no_license | leonorebovy/Non-REM-sleep-in-major-depressive-disorder | R | false | false | 3,527 | r | ## Script with common theme
## Project: Non-REM sleep in major depressive disorder
## Author: Leonore Bovy
options(warn = -1)
library(ggplot2)
raincloud_theme <- theme_classic() + theme(
text = element_text(size = 10),
axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 20, color="#000000"),
axis.text.y = element_text(size = 20, color="#000000"),
legend.title = element_text(size = 16),
legend.text = element_text(size = 16),
legend.position = "left",
plot.title = element_text(lineheight = .8, face = "bold", size = 16),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.line.x = element_line(colour = "black", size = 1, linetype = "solid"),
axis.line.y = element_line(colour = "black", size = 1, linetype = "solid"))
my_theme <- theme_classic() +
theme(axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.text = element_text(size = 16, color="#000000"),
plot.title = element_text(size = 18, hjust = 0.5),
axis.line.x = element_line(colour = "black", size = 1, linetype = "solid"),
axis.line.y = element_line(colour = "black", size = 1, linetype = "solid"))
my_theme_freq <- theme_classic() +
theme(axis.title.x = element_text(size = 16),
axis.title.y = element_text(size = 16),
axis.text = element_text(size = 16, color="#000000"),
plot.title = element_text(size = 18, hjust = 0.5),
axis.line.x = element_line(colour = "black", size = 1.2, linetype = "solid"),
axis.line.y = element_line(colour = "black", size = 1.2, linetype = "solid"))
pretty_regressionplot <- function(dataframe, x, y, grouping) {
ggplot(dataframe, aes(x = x, y = y, colour = grouping)) +
geom_point(data = dataframe, aes(y = y, group = grouping), size = 4) +
geom_smooth(method = "lm", se = FALSE, fullrange=TRUE, size = 1.2) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5)) +
theme(axis.text.x = element_text(size=20, color="#000000", family="serif" ),
axis.text.y = element_text(size=20, color="#000000", family="serif"),
plot.title = element_text(size = 15,family="serif"),
axis.title.x = element_text(size=17,family="serif", margin = margin(b=10)),
axis.title.y = element_text(size=17,family="serif"),
axis.line.x = element_line(colour = "black", size = 1),
axis.line.y = element_line(colour = "black", size = 1))}
color_dataset_a = (c("#646464", "#b20000"))
color_dataset_b = (c("#646464", "#66c4ff"))
color_dataset_b_pat = (c("#66c4ff", "#009dff"))
color_dataset_b_all = (c("#646464", "#66c4ff", "#009dff"))
color_dataset_c = (c("#646464", "#7fcf7f"))
color_dataset_c_pat = (c("#7fcf7f", "#00a000"))
color_dataset_c_all = (c("#646464", "#7fcf7f", "#00a000"))
color_dataset_all = (c("#646464", "#b20000", "#646464", "#66c4ff", "#009dff", "#646464", "#7fcf7f", "#00a000"))
color_dataset_three = (c("#646464", "#b20000", "#009dff", "#646464", "#7fcf7f", "#00a000"))
color_dataset_hamd = (c("#b20000", "#66c4ff", "#7fcf7f"))
color_dataset_beh = (c("#646464", "#b20000","#646464", "#66c4ff", "#009dff")) # A and B
color_dataset_comb = (c("#646464", "#871F78")) #purple
|
library(class)
library(gmodels)
setwd("C:\\Users\\s174300\\Documents\\00 DTU\\5. semester\\02445 Project in statistics\\02445-Projekt")
load("armdata.RData")
#NORMALIZATION (feature scaling) OF X Y and Z COORDINATES
nor <- function(x) { (x-min(x))/(max(x)-min(x)) }
#DATA
data <- c()
for(i in 1:10){
for(j in 1:10){
ls <- c()
ls <- append(ls, i) #class
if(i == 9){
if(j == 1){
ls <- append(ls, armdata[[11]][[i]][[j]][,1][3])
ls <- append(ls, armdata[[11]][[i]][[j]][,1][3])
}
}
ls <- append(ls, armdata[[11]][[i]][[j]][,1][which(!is.na(armdata[[11]][[i]][[j]][,1]))])
if(i == 9){
if(j == 1){
ls <- append(ls, armdata[[11]][[i]][[j]][,2][3])
ls <- append(ls, armdata[[11]][[i]][[j]][,2][3])
}
}
ls <- append(ls, armdata[[11]][[i]][[j]][,2][which(!is.na(armdata[[11]][[i]][[j]][,2]))])
if(i == 9){
if(j == 1){
ls <- append(ls, armdata[[11]][[i]][[j]][,3][3])
ls <- append(ls, armdata[[11]][[i]][[j]][,3][3])
}
}
ls <- append(ls, armdata[[11]][[i]][[j]][,3][which(!is.na(armdata[[11]][[i]][[j]][,3]))])
data <- rbind(data, ls)
}
}
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
#CROSSVALIDATION
for(k in 1:5){
for(i in 2:301){
data[,i] <- nor(data[,i])
}
test <- data[seq(k,100,5),seq(2,301)]
train <- data[setdiff(seq(1,100), seq(k,100,5)),seq(2,301)]
test_cat <- data[seq(k,100,5), 1]
train_cat <- data[setdiff(seq(1,100), seq(k,100,5)), 1]
pr <- knn(train, test, cl=train_cat, k=10)
tab <- table(pr, test_cat)
print(k)
print(tab)
print(accuracy(tab))
}
| /knn.r | no_license | Ersboll/02445-Projekt | R | false | false | 1,645 | r | library(class)
library(gmodels)
setwd("C:\\Users\\s174300\\Documents\\00 DTU\\5. semester\\02445 Project in statistics\\02445-Projekt")
load("armdata.RData")
#NORMALIZATION (feature scaling) OF X Y and Z COORDINATES
nor <- function(x) { (x-min(x))/(max(x)-min(x)) }
#DATA
data <- c()
for(i in 1:10){
for(j in 1:10){
ls <- c()
ls <- append(ls, i) #class
if(i == 9){
if(j == 1){
ls <- append(ls, armdata[[11]][[i]][[j]][,1][3])
ls <- append(ls, armdata[[11]][[i]][[j]][,1][3])
}
}
ls <- append(ls, armdata[[11]][[i]][[j]][,1][which(!is.na(armdata[[11]][[i]][[j]][,1]))])
if(i == 9){
if(j == 1){
ls <- append(ls, armdata[[11]][[i]][[j]][,2][3])
ls <- append(ls, armdata[[11]][[i]][[j]][,2][3])
}
}
ls <- append(ls, armdata[[11]][[i]][[j]][,2][which(!is.na(armdata[[11]][[i]][[j]][,2]))])
if(i == 9){
if(j == 1){
ls <- append(ls, armdata[[11]][[i]][[j]][,3][3])
ls <- append(ls, armdata[[11]][[i]][[j]][,3][3])
}
}
ls <- append(ls, armdata[[11]][[i]][[j]][,3][which(!is.na(armdata[[11]][[i]][[j]][,3]))])
data <- rbind(data, ls)
}
}
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
#CROSSVALIDATION
for(k in 1:5){
for(i in 2:301){
data[,i] <- nor(data[,i])
}
test <- data[seq(k,100,5),seq(2,301)]
train <- data[setdiff(seq(1,100), seq(k,100,5)),seq(2,301)]
test_cat <- data[seq(k,100,5), 1]
train_cat <- data[setdiff(seq(1,100), seq(k,100,5)), 1]
pr <- knn(train, test, cl=train_cat, k=10)
tab <- table(pr, test_cat)
print(k)
print(tab)
print(accuracy(tab))
}
|
## S4 classes for kaps package
## Soo-Heang Eo, 2013-08-28
setOldClass("Formula")
setOldClass("kapsOptions")
setOldClass("Surv")
##########################################
### A class for adaptive partitioning
setClass(Class = "kapsOptions",
representation = representation(
V = "numeric",
pre.pt = "list",
scope = "list",
lower.limit = "numeric",
upper.limit = "numeric",
N.perm = "numeric",
N.boot = "numeric",
alpha = "numeric",
rho = "numeric",
fold = "logical",
ncl = "integer",
splits = "character",
boot.sel = "character",
shortcut = "logical",
p.adjust.methods = "character"
)
)
setClass(Class = "kaps",
representation = representation(
call = "language",
formula = "Formula",
data = "data.frame",
groupID = "vector",
index = "integer",
X = "numeric",
Z = "numeric",
pvalue = "numeric",
WH = "numeric",
t = "numeric",
pair = "numeric",
split.var = "character",
split.pt = "numeric",
mindat = "numeric",
elbow = "matrix",
over.stat.sample = "matrix",
pair.stat.sample = "matrix",
groups = "vector",
results = "list",
Options = "kapsOptions"
)
)
#####################################
### A class for data by recursive binary splits
setClass(Class = "dataset",
representation = representation(
Y = "Surv",
X = "data.frame",
Z = "data.frame",
resid = "vector",
resid.sign = "vector"
)
)
# END by Soo-Heang Eo | /R/Classes.R | no_license | hanansh/kaps | R | false | false | 1,552 | r | ## S4 classes for kaps package
## Soo-Heang Eo, 2013-08-28
setOldClass("Formula")
setOldClass("kapsOptions")
setOldClass("Surv")
##########################################
### A class for adaptive partitioning
setClass(Class = "kapsOptions",
representation = representation(
V = "numeric",
pre.pt = "list",
scope = "list",
lower.limit = "numeric",
upper.limit = "numeric",
N.perm = "numeric",
N.boot = "numeric",
alpha = "numeric",
rho = "numeric",
fold = "logical",
ncl = "integer",
splits = "character",
boot.sel = "character",
shortcut = "logical",
p.adjust.methods = "character"
)
)
setClass(Class = "kaps",
representation = representation(
call = "language",
formula = "Formula",
data = "data.frame",
groupID = "vector",
index = "integer",
X = "numeric",
Z = "numeric",
pvalue = "numeric",
WH = "numeric",
t = "numeric",
pair = "numeric",
split.var = "character",
split.pt = "numeric",
mindat = "numeric",
elbow = "matrix",
over.stat.sample = "matrix",
pair.stat.sample = "matrix",
groups = "vector",
results = "list",
Options = "kapsOptions"
)
)
#####################################
### A class for data by recursive binary splits
setClass(Class = "dataset",
representation = representation(
Y = "Surv",
X = "data.frame",
Z = "data.frame",
resid = "vector",
resid.sign = "vector"
)
)
# END by Soo-Heang Eo |
library(twitteR)
library(ROAuth)
library(bitops)
library(RCurl)
setup_twitter_oauth(consumer_key = "",
consumer_secret = "",
access_token = "",
access_secret = "")
#change it by yourself
user=getUser("")
user_follower_IDs=lookupUsers(user$getFollowerIDs(200000))
length(user_follower_IDs)
result<- sapply(user_follower_IDs, function(x) c(x$name, x$location))
write.csv(t(result),"G:\\Data.csv",row.names = FALSE)
# df <- do.call("rbind", lapply(s, as.data.frame))
# # extract the usernames
# users <- sapply(users, as.character)
# # make a data frame for the loop to work with
# users.df <- data.frame(users = users,
# followers = "", stringsAsFactors = FALSE)
# # loop to populate users$followers with follower
# # count obtained from Twitter API
# for (i in 1:nrow(users.df))
# {
# # tell the loop to skip a user if their account is protected
# # or some other error occurs
# result <- try(getUser(users.df$users[i])$followersCount, silent = TRUE);
# if(class(result) == "try-error") next;
# # get the number of followers for each user
# users.df$followers[i] <- getUser(users.df$users[i])$followersCount
# # tell the loop to pause for 60 s between iterations to
# # avoid exceeding the Twitter API request limit
# print('Sleeping for 60 seconds...')
# Sys.sleep(60);
# }
| /R/getfollowersand locations.R | no_license | Trouble404/NBA-with-Machine-Learning | R | false | false | 1,383 | r | library(twitteR)
library(ROAuth)
library(bitops)
library(RCurl)
setup_twitter_oauth(consumer_key = "",
consumer_secret = "",
access_token = "",
access_secret = "")
#change it by yourself
user=getUser("")
user_follower_IDs=lookupUsers(user$getFollowerIDs(200000))
length(user_follower_IDs)
result<- sapply(user_follower_IDs, function(x) c(x$name, x$location))
write.csv(t(result),"G:\\Data.csv",row.names = FALSE)
# df <- do.call("rbind", lapply(s, as.data.frame))
# # extract the usernames
# users <- sapply(users, as.character)
# # make a data frame for the loop to work with
# users.df <- data.frame(users = users,
# followers = "", stringsAsFactors = FALSE)
# # loop to populate users$followers with follower
# # count obtained from Twitter API
# for (i in 1:nrow(users.df))
# {
# # tell the loop to skip a user if their account is protected
# # or some other error occurs
# result <- try(getUser(users.df$users[i])$followersCount, silent = TRUE);
# if(class(result) == "try-error") next;
# # get the number of followers for each user
# users.df$followers[i] <- getUser(users.df$users[i])$followersCount
# # tell the loop to pause for 60 s between iterations to
# # avoid exceeding the Twitter API request limit
# print('Sleeping for 60 seconds...')
# Sys.sleep(60);
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_delete.R
\name{tar_delete}
\alias{tar_delete}
\title{Delete target return values.}
\usage{
tar_delete(names)
}
\arguments{
\item{names}{Names of the targets to remove from \verb{_targets/objects/}.
You can supply symbols, a character vector,
or \code{tidyselect} helpers like \code{\link[=starts_with]{starts_with()}}.}
}
\description{
Delete the return values of targets in \verb{_targets/objects/}.
but keep the records in \verb{_targets/meta/meta}.
Dynamic files outside the data store are unaffected.
The \verb{_targets/} data store must be in the current working directory.
}
\details{
For patterns recorded in the metadata, all the branches
will be deleted. For patterns no longer in the metadata,
branches are left alone.
}
\examples{
if (identical(Sys.getenv("TARGETS_LONG_EXAMPLES"), "true")) {
tar_dir({
tar_script(
tar_pipeline(
tar_target(y1, 1 + 1),
tar_target(y2, 1 + 1),
tar_target(z, y1 + y2)
)
)
tar_make()
tar_delete(starts_with("y")) # Only deletes y1 and y2.
tar_make() # y1 and y2 rebuild but return same values, so z is up to date.
})
}
}
| /man/tar_delete.Rd | permissive | liutiming/targets | R | false | true | 1,159 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_delete.R
\name{tar_delete}
\alias{tar_delete}
\title{Delete target return values.}
\usage{
tar_delete(names)
}
\arguments{
\item{names}{Names of the targets to remove from \verb{_targets/objects/}.
You can supply symbols, a character vector,
or \code{tidyselect} helpers like \code{\link[=starts_with]{starts_with()}}.}
}
\description{
Delete the return values of targets in \verb{_targets/objects/}.
but keep the records in \verb{_targets/meta/meta}.
Dynamic files outside the data store are unaffected.
The \verb{_targets/} data store must be in the current working directory.
}
\details{
For patterns recorded in the metadata, all the branches
will be deleted. For patterns no longer in the metadata,
branches are left alone.
}
\examples{
if (identical(Sys.getenv("TARGETS_LONG_EXAMPLES"), "true")) {
tar_dir({
tar_script(
tar_pipeline(
tar_target(y1, 1 + 1),
tar_target(y2, 1 + 1),
tar_target(z, y1 + y2)
)
)
tar_make()
tar_delete(starts_with("y")) # Only deletes y1 and y2.
tar_make() # y1 and y2 rebuild but return same values, so z is up to date.
})
}
}
|
projectchurn <- read.csv(file="D:/Development/httpd-data/project-churn.csv",head=TRUE,sep=",")
x <- projectchurn$interval
y <- projectchurn$churn
plot(x,y, type="o")
| /analysis/projectChurn.R | no_license | andymeneely/httpd-history | R | false | false | 168 | r | projectchurn <- read.csv(file="D:/Development/httpd-data/project-churn.csv",head=TRUE,sep=",")
x <- projectchurn$interval
y <- projectchurn$churn
plot(x,y, type="o")
|
library(ggplot2)
library(dplyr)
library(lubridate)
plot3 <- function(){
# load the data
power_cons <- read.csv("household_power_consumption.txt", sep=";",
colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),
na.string="?")
timeperiod <- interval(ymd_hms("2007-02-01 00:00:00"), ymd_hms("2007-02-02 24:00:00"))
power_cons_interval <- power_cons %>% mutate(Datetime=dmy_hms(paste(Date, Time))) %>%filter(Datetime %within% timeperiod)
data_submeter <- melt(power_cons_interval[,7:10], "Datetime")
# use base plot
png("plot3.png", width = 480, height = 480)
with(power_cons_interval, plot(Datetime, Sub_metering_1, xlab="", ylab="Energy sub metering", type="l"))
with(power_cons_interval, lines(Datetime, Sub_metering_2, col="red"))
with(power_cons_interval, lines(Datetime, Sub_metering_3, col="blue"))
legend("topright", lty = c(1,1,1), col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
# use ggplot2
g <- ggplot(data=data_submeter, aes(Datetime, value, color=variable))
gfinal <- g + geom_line() + labs(x=NULL, y ="Energy sub metering", color=NULL) + theme(legend.position=c(0.9, 0.9))
gfinal
ggsave("plot3_gg.png", width=4.80, height=4.80, dpi=100)
return(gfinal)
}
| /plot3.R | no_license | jschelbert/ExData_Plotting1 | R | false | false | 1,381 | r | library(ggplot2)
library(dplyr)
library(lubridate)
plot3 <- function(){
# load the data
power_cons <- read.csv("household_power_consumption.txt", sep=";",
colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"),
na.string="?")
timeperiod <- interval(ymd_hms("2007-02-01 00:00:00"), ymd_hms("2007-02-02 24:00:00"))
power_cons_interval <- power_cons %>% mutate(Datetime=dmy_hms(paste(Date, Time))) %>%filter(Datetime %within% timeperiod)
data_submeter <- melt(power_cons_interval[,7:10], "Datetime")
# use base plot
png("plot3.png", width = 480, height = 480)
with(power_cons_interval, plot(Datetime, Sub_metering_1, xlab="", ylab="Energy sub metering", type="l"))
with(power_cons_interval, lines(Datetime, Sub_metering_2, col="red"))
with(power_cons_interval, lines(Datetime, Sub_metering_3, col="blue"))
legend("topright", lty = c(1,1,1), col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
# use ggplot2
g <- ggplot(data=data_submeter, aes(Datetime, value, color=variable))
gfinal <- g + geom_line() + labs(x=NULL, y ="Energy sub metering", color=NULL) + theme(legend.position=c(0.9, 0.9))
gfinal
ggsave("plot3_gg.png", width=4.80, height=4.80, dpi=100)
return(gfinal)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BbcSE_manip_methods.R
\docType{methods}
\name{cbind,BbcSE-method}
\alias{cbind,BbcSE-method}
\title{Column combining method for BbcSE}
\usage{
\S4method{cbind}{BbcSE}(..., deparse.level = 1)
}
\arguments{
\item{...}{BbcSE objects}
\item{deparse.level}{See ?base::cbind for a description of this argument.}
}
\description{
Column combining method for BbcSE
}
| /man/cbind-BbcSE-method.Rd | no_license | deanpettinga/bbcRNA | R | false | true | 437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BbcSE_manip_methods.R
\docType{methods}
\name{cbind,BbcSE-method}
\alias{cbind,BbcSE-method}
\title{Column combining method for BbcSE}
\usage{
\S4method{cbind}{BbcSE}(..., deparse.level = 1)
}
\arguments{
\item{...}{BbcSE objects}
\item{deparse.level}{See ?base::cbind for a description of this argument.}
}
\description{
Column combining method for BbcSE
}
|
library(secr)
load("inputs.RData")
# is magnitude of detection affected by leopards
mClosedg04 <- secr.fit(cptr_hst, model=g0~LeopardRAI, mask=maskClosed, detectfn=1, CL=FALSE)
saveRDS(mClosedg04, file = "mClosedg04.rds") | /hpc11.R | no_license | samual-williams/Hyaena-density | R | false | false | 223 | r | library(secr)
load("inputs.RData")
# is magnitude of detection affected by leopards
mClosedg04 <- secr.fit(cptr_hst, model=g0~LeopardRAI, mask=maskClosed, detectfn=1, CL=FALSE)
saveRDS(mClosedg04, file = "mClosedg04.rds") |
## Figure code for basal area and climatic range
# by Alice Linder
## written Feb. 6 2017
# DBH Community composition script to compare focal individuals with surrounding DBH to find competitiveness index
## July 28, 2016
## TO DO: scale center of basal areas to account for trend of decreasing DBHs as you increase latitude
### command for this: scale(x, center = TRUE, scale = FALSE)
rm(list = ls())
setwd("~/Library/Mobile Documents/com~apple~CloudDocs/GitHub/senior-moment/data")
# setwd("~/Documents/git/senior-moment/data") # For Dan
# set libraries
library(vegan) # install.packages("vegan")
library(lme4)# install.packages("lme4")
library(scales)# install.packages("scales")
library(ggplot2) # install.packages("ggplot2")
library(plyr) # install.packages("plyr")
library(reshape) # install.packages("reshape")
library(sjPlot) # install.packages("sjPlot")
# detach("package:dplyr", unload=TRUE)
# load all data from source code
source("Fig2-source.R")
clim <- read.csv("climatic_data.csv")
clim <- subset(clim, select = c("Individual", "distance.to.climatic.centroid"))
# plot intraspecific competition
# ignore extra large DBH for FAGGUS value
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "FAGGRA" & focal.centroid$sum.BA > 20000),]
# ignore QUEALB for graphing purposes
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "QUEALB"),]
clim.focal <- merge(focal.centroid, clim, by = "Individual")
save(clim.focal, file="Clim.Focal.RData")
# ggplot(clim.focal,
# aes(distance.to.climatic.centroid, relative.BA, color = sp)) +
# geom_point() +
# geom_smooth(method="lm", se=F) +
# facet_wrap(~sp, ncol = 3, scales = "free") +
# xlab("Distance from Climatic Centroid") +
# ylab("Relative Basal Area")
# Analysis. Single linear models, very simple analysis here.
# ?lme
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "ACEPEN",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "BETPAP",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "CORALT",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "FAGGRA",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "HAMVIR",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "SORAME",]))
# Mixed effect model to use all species in single analysis
lme1 <- lmer(relative.BA ~ distance.to.climatic.centroid + (distance.to.climatic.centroid | sp), data = clim.focal)
fixef(lme1)
ranef(lme1)
summary(lme1)
ranef <- ranef(lme1)
sjt.lmer(lme1)
clim <- read.csv("climatic_data.csv")
clim <- subset(clim, select = c("Individual", "distance.to.climatic.centroid"))
# plot intraspecific competition
# ignore extra large DBH for FAGGUS value
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "FAGGRA" & focal.centroid$sum.BA > 20000),]
# ignore QUEALB for graphing purposes
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "QUEALB"),]
clim.focal <- merge(focal.centroid, clim, by = "Individual")
#ggplot(clim.focal,
#aes(distance.to.climatic.centroid, relative.BA, color = sp)) +
#geom_point() +
#geom_smooth(method="lm", se=F) +
#facet_wrap(~sp, ncol = 3, scales = "free") +
#xlab("Distance from Climatic Centroid") +
#ylab("Relative Basal Area")
myspecieslist <- unique(clim.focal$sp)
mycolors <- rep(c("#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E", "#E6AB02"), 10) # need 6 really!
plot(clim.focal$distance.to.climatic.centroid, clim.focal$relative.BA, type="n", main="Competitiveness Index across Climatic Envelope", xlab="Distance to Climatic Centroid", ylab="Relative Basal Area")
for (i in c(1:length(myspecieslist))){
subby <- subset(clim.focal, sp==myspecieslist[i])
points(subby$distance.to.climatic.centroid, subby$relative.BA, col=mycolors[i], pch=16)
# pch is symbol shape
}
#Overall trend
abline(0.09936796, -0.01446959 , col="black", lwd=3) # overall mean
#ACEPEN
abline(-0.08582783, 0.019607063, col="#1B9E77", lwd=2)
#BETPAP
abline(0.40367686, -0.092218542, col="#D95F02", lwd=2)
#CORALT
abline(-0.11111040, 0.025382776, col="#7570B3", lwd=2)
#FAGGRA
abline(-0.06564860, 0.014997190, col="#E7298A", lwd=2)
#HAMVIR
abline(-0.04179071, 0.009546939, col="#66A61E", lwd=2)
#SORAME
abline(-0.09929931, 0.022684574, col="#E6AB02", lwd=2)
?legend
legend('topright', legend=c("A. pensylvanicum", "B. papyrifera", "C. alternifolia", "F. grandifola", "H. virginiana", "S. americana"),
lty=1, col=mycolors, bty='n', cex=.75)
load("BA-CHVols.RData")
| /analyses/input/Fig3-BA_vs_Clim.r | no_license | alicelinder/senior-moment | R | false | false | 4,734 | r | ## Figure code for basal area and climatic range
# by Alice Linder
## written Feb. 6 2017
# DBH Community composition script to compare focal individuals with surrounding DBH to find competitiveness index
## July 28, 2016
## TO DO: scale center of basal areas to account for trend of decreasing DBHs as you increase latitude
### command for this: scale(x, center = TRUE, scale = FALSE)
rm(list = ls())
setwd("~/Library/Mobile Documents/com~apple~CloudDocs/GitHub/senior-moment/data")
# setwd("~/Documents/git/senior-moment/data") # For Dan
# set libraries
library(vegan) # install.packages("vegan")
library(lme4)# install.packages("lme4")
library(scales)# install.packages("scales")
library(ggplot2) # install.packages("ggplot2")
library(plyr) # install.packages("plyr")
library(reshape) # install.packages("reshape")
library(sjPlot) # install.packages("sjPlot")
# detach("package:dplyr", unload=TRUE)
# load all data from source code
source("Fig2-source.R")
clim <- read.csv("climatic_data.csv")
clim <- subset(clim, select = c("Individual", "distance.to.climatic.centroid"))
# plot intraspecific competition
# ignore extra large DBH for FAGGUS value
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "FAGGRA" & focal.centroid$sum.BA > 20000),]
# ignore QUEALB for graphing purposes
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "QUEALB"),]
clim.focal <- merge(focal.centroid, clim, by = "Individual")
save(clim.focal, file="Clim.Focal.RData")
# ggplot(clim.focal,
# aes(distance.to.climatic.centroid, relative.BA, color = sp)) +
# geom_point() +
# geom_smooth(method="lm", se=F) +
# facet_wrap(~sp, ncol = 3, scales = "free") +
# xlab("Distance from Climatic Centroid") +
# ylab("Relative Basal Area")
# Analysis. Single linear models, very simple analysis here.
# ?lme
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "ACEPEN",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "BETPAP",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "CORALT",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "FAGGRA",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "HAMVIR",]))
# summary(lm1 <- lm(relative.BA ~ distance.to.climatic.centroid, data = clim.focal[clim.focal$sp == "SORAME",]))
# Mixed effect model to use all species in single analysis
lme1 <- lmer(relative.BA ~ distance.to.climatic.centroid + (distance.to.climatic.centroid | sp), data = clim.focal)
fixef(lme1)
ranef(lme1)
summary(lme1)
ranef <- ranef(lme1)
sjt.lmer(lme1)
clim <- read.csv("climatic_data.csv")
clim <- subset(clim, select = c("Individual", "distance.to.climatic.centroid"))
# plot intraspecific competition
# ignore extra large DBH for FAGGUS value
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "FAGGRA" & focal.centroid$sum.BA > 20000),]
# ignore QUEALB for graphing purposes
focal.centroid <- focal.centroid[-which(focal.centroid$sp == "QUEALB"),]
clim.focal <- merge(focal.centroid, clim, by = "Individual")
#ggplot(clim.focal,
#aes(distance.to.climatic.centroid, relative.BA, color = sp)) +
#geom_point() +
#geom_smooth(method="lm", se=F) +
#facet_wrap(~sp, ncol = 3, scales = "free") +
#xlab("Distance from Climatic Centroid") +
#ylab("Relative Basal Area")
myspecieslist <- unique(clim.focal$sp)
mycolors <- rep(c("#1B9E77", "#D95F02", "#7570B3", "#E7298A", "#66A61E", "#E6AB02"), 10) # need 6 really!
plot(clim.focal$distance.to.climatic.centroid, clim.focal$relative.BA, type="n", main="Competitiveness Index across Climatic Envelope", xlab="Distance to Climatic Centroid", ylab="Relative Basal Area")
for (i in c(1:length(myspecieslist))){
subby <- subset(clim.focal, sp==myspecieslist[i])
points(subby$distance.to.climatic.centroid, subby$relative.BA, col=mycolors[i], pch=16)
# pch is symbol shape
}
#Overall trend
abline(0.09936796, -0.01446959 , col="black", lwd=3) # overall mean
#ACEPEN
abline(-0.08582783, 0.019607063, col="#1B9E77", lwd=2)
#BETPAP
abline(0.40367686, -0.092218542, col="#D95F02", lwd=2)
#CORALT
abline(-0.11111040, 0.025382776, col="#7570B3", lwd=2)
#FAGGRA
abline(-0.06564860, 0.014997190, col="#E7298A", lwd=2)
#HAMVIR
abline(-0.04179071, 0.009546939, col="#66A61E", lwd=2)
#SORAME
abline(-0.09929931, 0.022684574, col="#E6AB02", lwd=2)
?legend
legend('topright', legend=c("A. pensylvanicum", "B. papyrifera", "C. alternifolia", "F. grandifola", "H. virginiana", "S. americana"),
lty=1, col=mycolors, bty='n', cex=.75)
load("BA-CHVols.RData")
|
require(Kendall)
require(hydroTSM)
require(lubridate)
require(tibble)
require(dplyr)
require(hydroGOF)
source('juli_ET.R')
source('juli_ET_v2.R')
source('stage1.R')
source('euler.R')
source('SCS_curve_v2.R')
source('CN_calendar.R')
source('Kc_calendar.R')
source('Percolation.R')
source('Percolation_Rice.R')
########################## Long term rainfall
input_file = "Tirumungulam_rainfall_modified_v2.csv"
input = read.csv(input_file,header=TRUE)
in_date = as.Date(input[,1],"%Y-%m-%d")
x = zoo(input[,2],in_date)
t = window(x,start=as.Date(input[1,1]))
########################## 2013 field collected rainfall
input_file = "Hourly_rain.csv"
input2 = read.csv(input_file,header=TRUE)
date_seq = seq(as.POSIXct("2013-09-26 00:00"),by="hour",length.out = 5324)
t2 = zoo(input2[4],date_seq)
t_daily = subdaily2daily(t2,FUN=sum)*1000
t_daily = t_daily[1:163]
#t_daily[1:length(t_daily)]=10
#############################
##Box 1: Runoff generation
#############################
LU_details = tribble(
~LU1, ~LU2, ~LU3, ~Per_Area,
'Fallow','Fallow','Fallow',30,
'Rice', 'Cotton', 'Fallow', 60,
'KH_Millet', 'Fallow','Fallow',0,
'Juliflora','Fallow','Fallow',10)
LU_num = 3
#For now assuming constant crop depletion factor
LU_Parameters = tribble(
~LU,~plant_month,~LI,~LD,~LM,~LL,~KCI,~KCM,~KCL,~RD,~CN,
'Rice',10,20,30,30,25,1,1.20,0.9,300,9999,
'KH_Millet',10,20,35,40,30,0.3,1.2,0.5,300,70,
'Fallow',0,0,0,0,0,0,0,0,100,58,
'Juliflora',1,365,0,0,0,1.2,0,0,1000,80,
'Cotton',2,30,50,60,55,0.35,1.15,0.6,300,66)
#Rice CN is 9999 (basically implying that the SCS method is invalid for it)
CN_cal = CN_calendar(LU_details,LU_num,LU_Parameters)
Kc_cal = Kc_calendar(LU_details,LU_num,LU_Parameters)
AMC = 0
#This basically converts the orignal curve number formula to mm
#S = (25400/CN)-254
#Input the PET rate from Madurai for every month
PET = c(4.1,4.4,5.7,5.2,5.3,4.7,4.4,4.6,4.6,3.9,3.5,3.7)
#############################
##Box 2: Soil Moisture box
#############################
#Used to calculate the readily available moisture in the soil
rho1 = 0.3
#The max percolation rate calculated using Saturated hydraulic conductivity
max_percolation = (5*10^-5)*(24*60*60) #mm/day; from Gowing paper
Rice_max_percolation = 7 #mm/day; from Gowing paper
#############################
##Box 3: Groundwater box
#############################
AQ1_max = 600
AQ1_ini = 500
#############################
##Soil Parameters
#############################
RD = 500
Soil_WP = 0.1725
Soil_FC = 0.2825
Soil_sat = 0.415
max_percolation = 4.32 #mm/day; from Gowing paper
TAW1 = Soil_FC - Soil_WP
RAW1 = rho1*TAW1
pad_BH = 70
soil_paddy1 = vector()
soil_paddy1[1] = 0
#############################
##Variables
#############################
Q1f = vector()
ET1 = vector()
ET2 = vector()
Qw = vector()
Qr = vector()
Qx = vector()
S1 = vector()
S2 = vector()
S3 = vector()
Qf = vector()
Qu = vector()
Sc1 = vector()
Sc2 = vector()
runoff = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
DP1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
ET1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
SM1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
IF1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
AQ1 = vector()
runoff_vol = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
mod_loss = 0
#Tank variables
inflow_f1 = vector()
inflow_s1 = vector()
t1_inflow = vector()
t1_precip = vector()
t1_area = vector()
t1_area0 = 0
t1_vol = vector()
t1_vol0 = 0
t1_stage = vector()
t1_area = vector()
t1_spill = vector()
t1_sluice = vector()
t1_GW = vector()
t1_ET = vector()
t1_all = data.frame(matrix(ncol = 8, nrow = 0))
t1_const = as.data.frame(cbind(5e6,3.595,30,276405))
colnames(t1_const) = c("max_catch","weir_height","spill_len","max_volume")#Units c(m2,meter,meter,m3)
#Com Constants
HRU_Areas1 = t(LU_details$Per_Area)*t1_const$max_catch/100
#############################
#Initialize
#############################
samay = length(t_daily)
i = 146
for(i in 1:samay) {
cur_date = t_daily[i]
doy = yday(cur_date)
cur_month = month(index(cur_date))#months[i]
month_name = month.abb[cur_month]
cur_P = coredata(t_daily[i]) #100#test[i]
#Runoff Generation
#Estimate the antecendant conditions of the catchment
if(i > 5){
#rain_5 = sum(t_daily[(i-5):(i-1)]) * 1000
rain_5 = 500
} else {rain_5 = 30}
j = 3
for (j in 1:ncol(CN_cal)){
cur_CN = CN_cal[doy,j]
cur_pars = filter(LU_Parameters,CN==cur_CN)
cur_LU = cur_pars$LU
cur_kc = Kc_cal[doy,j]
if(cur_LU != 'Rice'){
runoff[i,j] = SCS_curve(cur_CN,cur_P,rain_5)
runoff_vol[i,j] = (LU_details$Per_Area[j] * t1_const$max_catch) * runoff[i,j] * (1/1000)
if (i > 1) {
#Keep an eye on if I should be calculating Percolation and ET based on the modified SM vs exact SM from previous timestep
temp_SM = SM1[i-1,j] + cur_P - runoff[i,j]
#Remove the interflow volume right away (if applicable)
if (temp_SM>cur_pars$RD*Soil_sat){
#Purely to ensure that the change in LU doesn't lead to interflow when there is no rainfall due to changing root depth
if (cur_P == 0 & (CN_cal[doy,j] != CN_cal[doy-1,j])) {
mod_loss_temp = (temp_SM - cur_pars$RD*Soil_sat)
mod_loss = mod_loss + (temp_SM - cur_pars$RD*Soil_sat)
IF1[i,j] = 0
temp_SM = temp_SM - IF1[i,j] - mod_loss_temp
} else {
IF1[i,j] = temp_SM - cur_pars$RD*Soil_sat
temp_SM = temp_SM - IF1[i,j]
}
} else {
IF1[i,j] = 0
}
#Basically call the function Percolation that uses one of the methods described in Raven
DP1[i,j] = Percolation(temp_SM,max_percolation,cur_pars)
#Estimate the evapotranspiration by first estimating the Water stress factor Ks
TAW1 = (cur_pars$RD * Soil_FC)-(cur_pars$RD * Soil_WP)
RAW1 = rho1 * TAW1
#Adjusted soil moisture is basically the total SM - WP
AdSM = temp_SM - (cur_pars$RD * Soil_WP)
if (AdSM < (TAW1-RAW1)){
#Keep an eye on whether to use SM1[i-1,j] or temp_SM
Ks = AdSM / (TAW1-RAW1)
ET1[i,j] = Ks * cur_kc * PET[cur_month]
if (ET1[i,j] < 0) {ET1[i,j] = 0}
} else {
ET1[i,j] = 1*cur_kc*PET[cur_month]
}
#The way it is setup right now the SM will never end the day at saturation point
SM1[i,j] = temp_SM - DP1[i,j] - ET1[i,j]
} else {
SM1[i,j] = cur_P - runoff[i,j]
if (SM1[i,j]>cur_pars$RD*Soil_sat){
DP1[i,j] = max_percolation
IF1[i,j] = SM1[i,j] - cur_pars$RD * Soil_sat - max_percolation
ET1[i,j] = 1 * cur_kc * PET[cur_month]
SM1[i,j] = SM1[i,j] - DP1[i,j] - IF1[i,j] - ET1[i,j]
} else if (SM1[i,j] < cur_pars$RD * Soil_sat){
DP1[i,j] = Percolation(SM1[i,j],max_percolation,cur_pars)
IF1[i,j] = 0
#Need to add a value to scale the AET based on the soil moisture level
#Ignore the values generated at i=1
ET1[i,j] = 1 * cur_kc * PET[cur_month]
SM1[i,j] = SM1[i,j] - DP1[i,j] - IF1[i,j] - ET1[i,j]
if (SM1[i,j] <= 0){SM1[i,j] = 0}
} else {
(print('Error due to Deep percolation calculation'))
}
}
} else if (cur_LU=='Rice'){
temp_SM = SM1[i-1,j] + cur_P
Rice_SM_sat = 0.415*cur_pars$RD
Rice_run_thresh = Rice_SM_sat + pad_BH
#Calculate the runoff from the rice fields here. Runoff only starts when the total SM value on the rice fields is greater than
#the bund height + soil saturation water content
if (temp_SM > Rice_run_thresh){
runoff[i,j] = temp_SM - Rice_run_thresh
runoff_vol[i,j] = (LU_details$Per_Area[j] * t1_const$max_catch) * runoff[i,j] * (1/1000)
temp_SM = temp_SM - runoff[i,j]
} else if (temp_SM < Rice_run_thresh){
runoff[i,j] = 0
runoff_vol[i,j] = (LU_details$Per_Area[j] * t1_const$max_catch) * runoff[i,j] * (1/1000)
} else {print('Error due to Rice runoff calculation')}
#Start calculating deep percolation from the rice fields here using the method in Gowing paper
DP1[i,j] = Percolation_Rice(temp_SM,Rice_max_percolation,cur_pars)
#Estimate the evapotranspiration by first estimating the Water stress factor Ks
TAW1 = (cur_pars$RD * Soil_FC)-(cur_pars$RD * Soil_WP)
RAW1 = rho1 * TAW1
#Adjusted soil moisture is basically the total SM - WP
if (temp_SM > Rice_SM_sat){
AdSM = Rice_SM_sat -(cur_pars$RD * Soil_WP)
} else if (temp_SM < Rice_SM_sat) {
AdSM = temp_SM -(cur_pars$RD * Soil_WP)
} else {print('Error due to Soil Moisture--ET calculation')}
if (AdSM < (TAW1-RAW1)){
#Keep an eye on whether to use SM1[i-1,j] or temp_SM
Ks = AdSM / (TAW1-RAW1)
ET1[i,j] = Ks * cur_kc * PET[cur_month]
if (ET1[i,j] < 0) {ET1[i,j] = 0}
} else {
ET1[i,j] = 1*cur_kc*PET[cur_month]
}
#Interflow will always be zero
IF1[i,j] = 0
#The way it is setup right now the SM will never end the day at saturation point
SM1[i,j] = temp_SM - DP1[i,j] - ET1[i,j]
}
}
#Estimate the deep percolation from all the HRU's
if (i > 1) {AQ1[i] = AQ1[i-1] + sum(DP1[i,])
} else {AQ1[1] = AQ1_ini + sum(DP1[i,])}
#Tank1 Start
#Get the tank area from the previous timestep
if (i == 1) {
t1_area[1] = 0
t1_vol[1] = 0
t1_temp_area = 0
t1_temp_vol = 0
} else {
t1_temp_area = t1_area[i-1]
t1_temp_vol = t1_vol[i-1]
}
#Estimate the area for each of the HRU's (special HRU is Fallow which converts to tank)
HRU_Areas1_temp = HRU_Areas1
HRU_Areas1_temp[1] = HRU_Areas1_temp[1] - t1_temp_area
t1_inflow_temp = (IF1[i,] + runoff[i,]) * HRU_Areas1_temp * (1/1000)
t1_inflow[i] = sum(t1_inflow_temp)
#Estimate the water added to the tank by direct precipitation
t1_precip[i] = cur_P * t1_temp_area *(1/1000)
#Update the temp_tank volume to include the inputs calculated above
t1_temp_vol = t1_temp_vol + t1_precip[i] + t1_inflow[i]
#Update the t1_temp area and then subsequently the fallow area
#Stage-Volume relationship from Mike data
t1_temp_stage = (t1_temp_vol/22914)^(1/1.9461)
#Stage Area
t1_temp_area=42942*(t1_temp_stage)^1.0993
#HRU_Areas update
HRU_Areas1_temp[1] = HRU_Areas1[1] - t1_temp_area
#ET from tank
t1_ET[i] = t1_temp_area * PET[cur_month] * (1/1000) #m3/day
####Compare the tank capacity and current volume of water in tank.
vol_diff1 = t1_temp_vol - t1_const$max_volume
if (vol_diff1 >= 0){
t1_spill[i] = vol_diff1
} else{ t1_spill[i] = 0 }
####Sluice Outflow
Qo1a = (( t1_temp_stage - 0.785 ) * 5.1903 ) * 86.4 #86.4 Converst L/s to m3/d
Qo1b = ((( t1_temp_stage - 1.185 ) * 9.6768 ) + (( t1_temp_stage - 1.185 ) * 4.9196 )) * 86.4 #86.4 Converst L/s to m3/d
if ( Qo1a < 0 ){ Qo1a = 0 }
if ( Qo1b < 0 ) { Qo1b = 0 }
if ( t1_temp_stage < 0.785 ){
t1_sluice[i] = 0 } else if (0.785 < t1_temp_stage & t1_temp_stage < 1.185) {
t1_sluice[i] = Qo1a} else if( t1_temp_stage > 1.185 ){
t1_sluice[i] = ( Qo1a + Qo1b )}
###Spillage from upstream tank
t1_spill_add = 0
####GW exchange- Mike paper
GW_loss1 = 8.6 * t1_temp_stage - 6.5 #mm/day
if ( GW_loss1 < 0) { GW_loss1 = 0}
t1_GW[i] = ( GW_loss1/1000 ) * t1_temp_area #m3/day
#Total Storage change
t1_vol[i] = t1_temp_vol - ( t1_ET[i] + t1_sluice[i] + t1_spill[i] + t1_GW[i] )
#Stage-Volume relationship from Mike data
t1_stage[i] = (t1_vol[i]/22914) ^ (1/1.9461)
#Stage Area
t1_area[i] = 42942 * (t1_stage[i]) ^ 1.0993
#cur_all=cbind(t1_stage[j],t1_area[j],t1_vol[j],inflow1[j],coredata(inflow_vol),t1_ET[j],t1_sluice[j],t1_spill[j],t1_GW[j])
#t1_all=rbind(t1_all,cur_all)
i=i+1
}
plot(t1_stage[12:110],type = 'l',ylim=c(0,4))
lines(t1_field_stage$T1_stage.m.[12:110],col = 'purple')
NSE.default(t1_stage[12:110],t1_field_stage$T1_stage.m.[12:110])
plot(AQ1,type = 'l')
#Estimating the Soil Moisture Balance for each HRU
inflow1 = IF1 + runoff
inflow2 = apply(inflow1, 1, mean)
plot(inflow2, type = 'l')
t1_field_stage = read.csv('tank1_fieldstage.csv',stringsAsFactors = FALSE)
plot(t1_stage,type = 'l')
lines(t1_field_stage$T1_stage.m.,col = 'purple')
apply(input2,2,sum)
| /Razor_v6.3.1.R | no_license | thora-9/Razor2 | R | false | false | 12,589 | r | require(Kendall)
require(hydroTSM)
require(lubridate)
require(tibble)
require(dplyr)
require(hydroGOF)
source('juli_ET.R')
source('juli_ET_v2.R')
source('stage1.R')
source('euler.R')
source('SCS_curve_v2.R')
source('CN_calendar.R')
source('Kc_calendar.R')
source('Percolation.R')
source('Percolation_Rice.R')
########################## Long term rainfall
input_file = "Tirumungulam_rainfall_modified_v2.csv"
input = read.csv(input_file,header=TRUE)
in_date = as.Date(input[,1],"%Y-%m-%d")
x = zoo(input[,2],in_date)
t = window(x,start=as.Date(input[1,1]))
########################## 2013 field collected rainfall
input_file = "Hourly_rain.csv"
input2 = read.csv(input_file,header=TRUE)
date_seq = seq(as.POSIXct("2013-09-26 00:00"),by="hour",length.out = 5324)
t2 = zoo(input2[4],date_seq)
t_daily = subdaily2daily(t2,FUN=sum)*1000
t_daily = t_daily[1:163]
#t_daily[1:length(t_daily)]=10
#############################
##Box 1: Runoff generation
#############################
LU_details = tribble(
~LU1, ~LU2, ~LU3, ~Per_Area,
'Fallow','Fallow','Fallow',30,
'Rice', 'Cotton', 'Fallow', 60,
'KH_Millet', 'Fallow','Fallow',0,
'Juliflora','Fallow','Fallow',10)
LU_num = 3
#For now assuming constant crop depletion factor
LU_Parameters = tribble(
~LU,~plant_month,~LI,~LD,~LM,~LL,~KCI,~KCM,~KCL,~RD,~CN,
'Rice',10,20,30,30,25,1,1.20,0.9,300,9999,
'KH_Millet',10,20,35,40,30,0.3,1.2,0.5,300,70,
'Fallow',0,0,0,0,0,0,0,0,100,58,
'Juliflora',1,365,0,0,0,1.2,0,0,1000,80,
'Cotton',2,30,50,60,55,0.35,1.15,0.6,300,66)
#Rice CN is 9999 (basically implying that the SCS method is invalid for it)
CN_cal = CN_calendar(LU_details,LU_num,LU_Parameters)
Kc_cal = Kc_calendar(LU_details,LU_num,LU_Parameters)
AMC = 0
#This basically converts the orignal curve number formula to mm
#S = (25400/CN)-254
#Input the PET rate from Madurai for every month
PET = c(4.1,4.4,5.7,5.2,5.3,4.7,4.4,4.6,4.6,3.9,3.5,3.7)
#############################
##Box 2: Soil Moisture box
#############################
#Used to calculate the readily available moisture in the soil
rho1 = 0.3
#The max percolation rate calculated using Saturated hydraulic conductivity
max_percolation = (5*10^-5)*(24*60*60) #mm/day; from Gowing paper
Rice_max_percolation = 7 #mm/day; from Gowing paper
#############################
##Box 3: Groundwater box
#############################
AQ1_max = 600
AQ1_ini = 500
#############################
##Soil Parameters
#############################
RD = 500
Soil_WP = 0.1725
Soil_FC = 0.2825
Soil_sat = 0.415
max_percolation = 4.32 #mm/day; from Gowing paper
TAW1 = Soil_FC - Soil_WP
RAW1 = rho1*TAW1
pad_BH = 70
soil_paddy1 = vector()
soil_paddy1[1] = 0
#############################
##Variables
#############################
Q1f = vector()
ET1 = vector()
ET2 = vector()
Qw = vector()
Qr = vector()
Qx = vector()
S1 = vector()
S2 = vector()
S3 = vector()
Qf = vector()
Qu = vector()
Sc1 = vector()
Sc2 = vector()
runoff = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
DP1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
ET1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
SM1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
IF1 = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
AQ1 = vector()
runoff_vol = matrix(nrow = length(t_daily),ncol = nrow(LU_details))
mod_loss = 0
#Tank variables
inflow_f1 = vector()
inflow_s1 = vector()
t1_inflow = vector()
t1_precip = vector()
t1_area = vector()
t1_area0 = 0
t1_vol = vector()
t1_vol0 = 0
t1_stage = vector()
t1_area = vector()
t1_spill = vector()
t1_sluice = vector()
t1_GW = vector()
t1_ET = vector()
t1_all = data.frame(matrix(ncol = 8, nrow = 0))
t1_const = as.data.frame(cbind(5e6,3.595,30,276405))
colnames(t1_const) = c("max_catch","weir_height","spill_len","max_volume")#Units c(m2,meter,meter,m3)
#Com Constants
HRU_Areas1 = t(LU_details$Per_Area)*t1_const$max_catch/100
#############################
#Initialize
#############################
samay = length(t_daily)
i = 146
for(i in 1:samay) {
cur_date = t_daily[i]
doy = yday(cur_date)
cur_month = month(index(cur_date))#months[i]
month_name = month.abb[cur_month]
cur_P = coredata(t_daily[i]) #100#test[i]
#Runoff Generation
#Estimate the antecendant conditions of the catchment
if(i > 5){
#rain_5 = sum(t_daily[(i-5):(i-1)]) * 1000
rain_5 = 500
} else {rain_5 = 30}
j = 3
for (j in 1:ncol(CN_cal)){
cur_CN = CN_cal[doy,j]
cur_pars = filter(LU_Parameters,CN==cur_CN)
cur_LU = cur_pars$LU
cur_kc = Kc_cal[doy,j]
if(cur_LU != 'Rice'){
runoff[i,j] = SCS_curve(cur_CN,cur_P,rain_5)
runoff_vol[i,j] = (LU_details$Per_Area[j] * t1_const$max_catch) * runoff[i,j] * (1/1000)
if (i > 1) {
#Keep an eye on if I should be calculating Percolation and ET based on the modified SM vs exact SM from previous timestep
temp_SM = SM1[i-1,j] + cur_P - runoff[i,j]
#Remove the interflow volume right away (if applicable)
if (temp_SM>cur_pars$RD*Soil_sat){
#Purely to ensure that the change in LU doesn't lead to interflow when there is no rainfall due to changing root depth
if (cur_P == 0 & (CN_cal[doy,j] != CN_cal[doy-1,j])) {
mod_loss_temp = (temp_SM - cur_pars$RD*Soil_sat)
mod_loss = mod_loss + (temp_SM - cur_pars$RD*Soil_sat)
IF1[i,j] = 0
temp_SM = temp_SM - IF1[i,j] - mod_loss_temp
} else {
IF1[i,j] = temp_SM - cur_pars$RD*Soil_sat
temp_SM = temp_SM - IF1[i,j]
}
} else {
IF1[i,j] = 0
}
#Basically call the function Percolation that uses one of the methods described in Raven
DP1[i,j] = Percolation(temp_SM,max_percolation,cur_pars)
#Estimate the evapotranspiration by first estimating the Water stress factor Ks
TAW1 = (cur_pars$RD * Soil_FC)-(cur_pars$RD * Soil_WP)
RAW1 = rho1 * TAW1
#Adjusted soil moisture is basically the total SM - WP
AdSM = temp_SM - (cur_pars$RD * Soil_WP)
if (AdSM < (TAW1-RAW1)){
#Keep an eye on whether to use SM1[i-1,j] or temp_SM
Ks = AdSM / (TAW1-RAW1)
ET1[i,j] = Ks * cur_kc * PET[cur_month]
if (ET1[i,j] < 0) {ET1[i,j] = 0}
} else {
ET1[i,j] = 1*cur_kc*PET[cur_month]
}
#The way it is setup right now the SM will never end the day at saturation point
SM1[i,j] = temp_SM - DP1[i,j] - ET1[i,j]
} else {
SM1[i,j] = cur_P - runoff[i,j]
if (SM1[i,j]>cur_pars$RD*Soil_sat){
DP1[i,j] = max_percolation
IF1[i,j] = SM1[i,j] - cur_pars$RD * Soil_sat - max_percolation
ET1[i,j] = 1 * cur_kc * PET[cur_month]
SM1[i,j] = SM1[i,j] - DP1[i,j] - IF1[i,j] - ET1[i,j]
} else if (SM1[i,j] < cur_pars$RD * Soil_sat){
DP1[i,j] = Percolation(SM1[i,j],max_percolation,cur_pars)
IF1[i,j] = 0
#Need to add a value to scale the AET based on the soil moisture level
#Ignore the values generated at i=1
ET1[i,j] = 1 * cur_kc * PET[cur_month]
SM1[i,j] = SM1[i,j] - DP1[i,j] - IF1[i,j] - ET1[i,j]
if (SM1[i,j] <= 0){SM1[i,j] = 0}
} else {
(print('Error due to Deep percolation calculation'))
}
}
} else if (cur_LU=='Rice'){
temp_SM = SM1[i-1,j] + cur_P
Rice_SM_sat = 0.415*cur_pars$RD
Rice_run_thresh = Rice_SM_sat + pad_BH
#Calculate the runoff from the rice fields here. Runoff only starts when the total SM value on the rice fields is greater than
#the bund height + soil saturation water content
if (temp_SM > Rice_run_thresh){
runoff[i,j] = temp_SM - Rice_run_thresh
runoff_vol[i,j] = (LU_details$Per_Area[j] * t1_const$max_catch) * runoff[i,j] * (1/1000)
temp_SM = temp_SM - runoff[i,j]
} else if (temp_SM < Rice_run_thresh){
runoff[i,j] = 0
runoff_vol[i,j] = (LU_details$Per_Area[j] * t1_const$max_catch) * runoff[i,j] * (1/1000)
} else {print('Error due to Rice runoff calculation')}
#Start calculating deep percolation from the rice fields here using the method in Gowing paper
DP1[i,j] = Percolation_Rice(temp_SM,Rice_max_percolation,cur_pars)
#Estimate the evapotranspiration by first estimating the Water stress factor Ks
TAW1 = (cur_pars$RD * Soil_FC)-(cur_pars$RD * Soil_WP)
RAW1 = rho1 * TAW1
#Adjusted soil moisture is basically the total SM - WP
if (temp_SM > Rice_SM_sat){
AdSM = Rice_SM_sat -(cur_pars$RD * Soil_WP)
} else if (temp_SM < Rice_SM_sat) {
AdSM = temp_SM -(cur_pars$RD * Soil_WP)
} else {print('Error due to Soil Moisture--ET calculation')}
if (AdSM < (TAW1-RAW1)){
#Keep an eye on whether to use SM1[i-1,j] or temp_SM
Ks = AdSM / (TAW1-RAW1)
ET1[i,j] = Ks * cur_kc * PET[cur_month]
if (ET1[i,j] < 0) {ET1[i,j] = 0}
} else {
ET1[i,j] = 1*cur_kc*PET[cur_month]
}
#Interflow will always be zero
IF1[i,j] = 0
#The way it is setup right now the SM will never end the day at saturation point
SM1[i,j] = temp_SM - DP1[i,j] - ET1[i,j]
}
}
#Estimate the deep percolation from all the HRU's
if (i > 1) {AQ1[i] = AQ1[i-1] + sum(DP1[i,])
} else {AQ1[1] = AQ1_ini + sum(DP1[i,])}
#Tank1 Start
#Get the tank area from the previous timestep
if (i == 1) {
t1_area[1] = 0
t1_vol[1] = 0
t1_temp_area = 0
t1_temp_vol = 0
} else {
t1_temp_area = t1_area[i-1]
t1_temp_vol = t1_vol[i-1]
}
#Estimate the area for each of the HRU's (special HRU is Fallow which converts to tank)
HRU_Areas1_temp = HRU_Areas1
HRU_Areas1_temp[1] = HRU_Areas1_temp[1] - t1_temp_area
t1_inflow_temp = (IF1[i,] + runoff[i,]) * HRU_Areas1_temp * (1/1000)
t1_inflow[i] = sum(t1_inflow_temp)
#Estimate the water added to the tank by direct precipitation
t1_precip[i] = cur_P * t1_temp_area *(1/1000)
#Update the temp_tank volume to include the inputs calculated above
t1_temp_vol = t1_temp_vol + t1_precip[i] + t1_inflow[i]
#Update the t1_temp area and then subsequently the fallow area
#Stage-Volume relationship from Mike data
t1_temp_stage = (t1_temp_vol/22914)^(1/1.9461)
#Stage Area
t1_temp_area=42942*(t1_temp_stage)^1.0993
#HRU_Areas update
HRU_Areas1_temp[1] = HRU_Areas1[1] - t1_temp_area
#ET from tank
t1_ET[i] = t1_temp_area * PET[cur_month] * (1/1000) #m3/day
####Compare the tank capacity and current volume of water in tank.
vol_diff1 = t1_temp_vol - t1_const$max_volume
if (vol_diff1 >= 0){
t1_spill[i] = vol_diff1
} else{ t1_spill[i] = 0 }
####Sluice Outflow
Qo1a = (( t1_temp_stage - 0.785 ) * 5.1903 ) * 86.4 #86.4 Converst L/s to m3/d
Qo1b = ((( t1_temp_stage - 1.185 ) * 9.6768 ) + (( t1_temp_stage - 1.185 ) * 4.9196 )) * 86.4 #86.4 Converst L/s to m3/d
if ( Qo1a < 0 ){ Qo1a = 0 }
if ( Qo1b < 0 ) { Qo1b = 0 }
if ( t1_temp_stage < 0.785 ){
t1_sluice[i] = 0 } else if (0.785 < t1_temp_stage & t1_temp_stage < 1.185) {
t1_sluice[i] = Qo1a} else if( t1_temp_stage > 1.185 ){
t1_sluice[i] = ( Qo1a + Qo1b )}
###Spillage from upstream tank
t1_spill_add = 0
####GW exchange- Mike paper
GW_loss1 = 8.6 * t1_temp_stage - 6.5 #mm/day
if ( GW_loss1 < 0) { GW_loss1 = 0}
t1_GW[i] = ( GW_loss1/1000 ) * t1_temp_area #m3/day
#Total Storage change
t1_vol[i] = t1_temp_vol - ( t1_ET[i] + t1_sluice[i] + t1_spill[i] + t1_GW[i] )
#Stage-Volume relationship from Mike data
t1_stage[i] = (t1_vol[i]/22914) ^ (1/1.9461)
#Stage Area
t1_area[i] = 42942 * (t1_stage[i]) ^ 1.0993
#cur_all=cbind(t1_stage[j],t1_area[j],t1_vol[j],inflow1[j],coredata(inflow_vol),t1_ET[j],t1_sluice[j],t1_spill[j],t1_GW[j])
#t1_all=rbind(t1_all,cur_all)
i=i+1
}
plot(t1_stage[12:110],type = 'l',ylim=c(0,4))
lines(t1_field_stage$T1_stage.m.[12:110],col = 'purple')
NSE.default(t1_stage[12:110],t1_field_stage$T1_stage.m.[12:110])
plot(AQ1,type = 'l')
#Estimating the Soil Moisture Balance for each HRU
inflow1 = IF1 + runoff
inflow2 = apply(inflow1, 1, mean)
plot(inflow2, type = 'l')
t1_field_stage = read.csv('tank1_fieldstage.csv',stringsAsFactors = FALSE)
plot(t1_stage,type = 'l')
lines(t1_field_stage$T1_stage.m.,col = 'purple')
apply(input2,2,sum)
|
\name{get.cid}
\alias{get.cid}
\title{
Get PubChem Compound Information
}
\description{
The PubChem compound collection stores a variety of information for
each molecule. These include canonical SMILES, molecular properties,
substance associations, synonyms etc.
This function will extract a subset of the molecular property
information for a single CID.
}
\usage{
get.cid(cid, quiet=TRUE)
}
\arguments{
\item{cid}{A single numeric CID}
\item{quiet}{If \code{FALSE}, output is verbose}
}
\value{
A \code{data.frame} with at least 23 columns including the CID, IUPAC name, InChI and InChI key, canonical SMILES and a variety of molecular descriptors. In addition, a few physical properties are also included. The text from the \code{Summary Information} section of the compound page page is included as an attribute of the \code{data.frame} with the name \code{Summary.Information}.
}
\details{The method currently queries PubChem via the PUG REST interface. Since the method
processes a single CID at a time, the user can parallelize processing. However, this is usually
not recommended, at least in an unrestricted manner.
In addition, since the \code{data.frame} for each CID may have a different set of physical properties, it is recommended to either extract the common set of columns or else use something like \code{bind_rows} from the \code{dplyr} package to get a uniform \code{data.frame} if processing multiple CIDs
}
\examples{
\dontrun{
cids <- c(5282108, 5282148, 91754124)
dat <- lapply(cids, get.cid)
dat <- dplyr::bind_rows(dat)
str(dat)
}
}
\seealso{
\code{\link{get.assay}},
\code{\link{get.sid}},
\code{\link{get.sid.list}}
}
\keyword{programming}
\author{Rajarshi Guha \email{rajarshi.guha@gmail.com}}
\concept{PubChem}
\concept{pubchem}
\concept{compound}
| /man/getcid.Rd | permissive | cbroeckl/rpubchem | R | false | false | 1,808 | rd | \name{get.cid}
\alias{get.cid}
\title{
Get PubChem Compound Information
}
\description{
The PubChem compound collection stores a variety of information for
each molecule. These include canonical SMILES, molecular properties,
substance associations, synonyms etc.
This function will extract a subset of the molecular property
information for a single CID.
}
\usage{
get.cid(cid, quiet=TRUE)
}
\arguments{
\item{cid}{A single numeric CID}
\item{quiet}{If \code{FALSE}, output is verbose}
}
\value{
A \code{data.frame} with at least 23 columns including the CID, IUPAC name, InChI and InChI key, canonical SMILES and a variety of molecular descriptors. In addition, a few physical properties are also included. The text from the \code{Summary Information} section of the compound page page is included as an attribute of the \code{data.frame} with the name \code{Summary.Information}.
}
\details{The method currently queries PubChem via the PUG REST interface. Since the method
processes a single CID at a time, the user can parallelize processing. However, this is usually
not recommended, at least in an unrestricted manner.
In addition, since the \code{data.frame} for each CID may have a different set of physical properties, it is recommended to either extract the common set of columns or else use something like \code{bind_rows} from the \code{dplyr} package to get a uniform \code{data.frame} if processing multiple CIDs
}
\examples{
\dontrun{
cids <- c(5282108, 5282148, 91754124)
dat <- lapply(cids, get.cid)
dat <- dplyr::bind_rows(dat)
str(dat)
}
}
\seealso{
\code{\link{get.assay}},
\code{\link{get.sid}},
\code{\link{get.sid.list}}
}
\keyword{programming}
\author{Rajarshi Guha \email{rajarshi.guha@gmail.com}}
\concept{PubChem}
\concept{pubchem}
\concept{compound}
|
#Collection of functions for working with WGS data
#Pavitra Roychoudhury
#Aug 2017
#Return the number of mapped reads in a bam file
n_mapped_reads<-function(bamfname){
require(Rsamtools)
indexBam(bamfname)
if(file.exists(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
return(idxstatsBam(bamfname)$mapped)
}else{
return(NA)
}
}
#Make a new reference from scaffolds
make_ref_from_assembly<-function(bamfname,reffname){
require(Rsamtools);
require(GenomicAlignments);
require(parallel)
ncores<-detectCores();
#Read reference sequence
ref_seq<-readDNAStringSet(reffname);
if(!is.na(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
#Index bam if required
if(!file.exists(paste(bamfname,'.bai',sep=''))){
baifname<-indexBam(bamfname);
}else{
baifname<-paste(bamfname,'.bai',sep='');
}
#Import bam file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
#Remove any contigs with width <200 bp
#gal<-gal[width(gal)>200];
#First lay contigs on reference space--this removes insertions and produces a seq of the same length as ref
qseq_on_ref<-sequenceLayer(mcols(gal)$seq,cigar(gal),from="query",to="reference");
qseq_on_ref_aligned<-stackStrings(qseq_on_ref,1,max(mcols(gal)$pos+qwidth(gal)-1,width(ref_seq)),
shift=mcols(gal)$pos-1,Lpadding.letter='N',Rpadding.letter='N');
#Make a consensus matrix and get a consensus sequence from the aligned scaffolds
cm<-consensusMatrix(qseq_on_ref_aligned,as.prob=T,shift=0)[c('A','C','G','T','N','-'),];
# cm[c('N','-'),]<-0;
cm['N',]<-0;
cm<-apply(cm,2,function(x)if(all(x==0))return(x) else return(x/sum(x)));
cm['N',colSums(cm)==0]<-1;
con_seq<-DNAStringSet(gsub('\\?','N',consensusString(cm,threshold=0.25)));
con_seq<-DNAStringSet(gsub('\\+','N',con_seq));
#Now fill in the Ns with the reference
temp<-as.matrix(con_seq);
temp[temp=='N']<-as.matrix(ref_seq)[temp=='N'];
con_seq<-DNAStringSet(paste0(temp,collapse=''));
names(con_seq)<-sub('.bam','_consensus',basename(bamfname));
#Look for insertions in bam cigar string
cigs_ref<-cigarRangesAlongReferenceSpace(cigar(gal),with.ops=F,ops='I',
reduce.ranges=T,drop.empty.ranges=F,
pos=mcols(gal)$pos);
cigs_query<-cigarRangesAlongQuerySpace(cigar(gal),ops='I',with.ops=F,
reduce.ranges=T,drop.empty.ranges=F);
all_ins<-mclapply(c(1:length(cigs_query)),function(i)
extractAt(mcols(gal)$seq[i],cigs_query[[i]])[[1]]);
#Merge all insertions
all_ins_merged<-do.call('rbind',mclapply(c(1:length(cigs_ref)),function(i)
return(data.frame(
start_ref=start(cigs_ref[[i]]),end_ref=end(cigs_ref[[i]]),
start_query=start(cigs_query[[i]]),end_query=end(cigs_query[[i]]),
ins_seq=all_ins[[i]],width_ins=width(all_ins[[i]]))),
mc.cores=ncores));
all_ins_merged<-all_ins_merged[order(all_ins_merged$end_ref),];
# write.csv(all_ins_merged,'./testing/all_ins.csv',row.names=F);
#TO DO: Check for overlaps--should be minimal since scaffolds don't usually overlap that much
if(any(table(all_ins_merged$start_ref)>1)){
print('Overlapping insertions')
#not the best way, but just pick the first for now
all_ins_merged<-all_ins_merged[!duplicated(all_ins_merged[,c('start_ref','end_ref')]),];
}
#Now the beauty part of inserting the strings back in
#Split ref seq by the insert positions
if(nrow(all_ins_merged)!=0){
new_strs<-DNAStringSet(rep('',nrow(all_ins_merged)+1))
for(i in 1:nrow(all_ins_merged)){
if(i==1){
new_strs[i]<-paste0(extractAt(con_seq,IRanges(start=1,end=all_ins_merged$end_ref[i]))[[1]],
all_ins_merged$ins_seq[i]);
}else{
new_strs[i]<-paste0(extractAt(con_seq,IRanges(start=all_ins_merged$start_ref[i-1],
end=all_ins_merged$end_ref[i]))[[1]],
all_ins_merged$ins_seq[i]);
}
}
#Last bit
new_strs[i+1]<-paste0(extractAt(con_seq,IRanges(start=all_ins_merged$start_ref[i],
end=width(con_seq)))[[1]])
temp_str<-paste0(as.character(new_strs),collapse='');
#Remove gaps to get final sequence
con_seq_final<-DNAStringSet(gsub('-','',temp_str));
#No insertions
}else{
con_seq_final<-con_seq;
}
names(con_seq_final)<-sub('.bam','_consensus',basename(bamfname));
if(!dir.exists('./ref_for_remapping')) dir.create('./ref_for_remapping');
writeXStringSet(con_seq_final,
paste0('./ref_for_remapping/',names(con_seq_final),'.fasta'));
#Delete bai file
file.remove(baifname);
}else{
print('Bam file could not be opened.')
return(NA)
}
}
#Takes in a bam file, produces consensus sequence
generate_consensus<-function(bamfname){
require(Rsamtools)
require(GenomicAlignments)
require(Biostrings)
require(parallel)
ncores<-detectCores()
#for testing this function--comment out or remove
# bamfname<-'./testing/ABI-HHV6A_S385_L001_A.sorted.bam'
if(!is.na(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
#Index bam if required
if(!file.exists(paste(bamfname,'.bai',sep=''))){
baifname<-indexBam(bamfname);
}else{
baifname<-paste(bamfname,'.bai',sep='');
}
#Import bam file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
# summary(gal);
#Remove any contigs with mapq <2 -- this leads to a loss of a lot of the DR seqs even though there are reads there
# gal<-gal[mcols(gal)$mapq>2];
#First lay reads on reference space--this doesn't include insertions
qseq_on_ref<-sequenceLayer(mcols(gal)$seq,cigar(gal),from="query",to="reference");
#Make a consensus matrix and get a consensus sequence from the aligned scaffolds
# cm<-consensusMatrix(qseq_on_ref,as.prob=T,shift=start(gal)-1,width=seqlengths(gal))[c('A','C','G','T','N','-'),];
# cm['N',colSums(cm)==0]<-1;
#Edit to include a coverage threshold
cm<-consensusMatrix(qseq_on_ref,as.prob=F,shift=start(gal)-1,width=seqlengths(gal))[c('A','C','G','T','N','-'),];
poor_cov<-which(colSums(cm)<0);
cm<-apply(cm,2,function(x)x/sum(x));
cm[,poor_cov]<-0;
cm['N',poor_cov]<-1;
tmp_str<-strsplit(consensusString(cm,ambiguityMap='?',threshold=0.5),'')[[1]];
ambig_sites<-which(tmp_str=='?');
ambig_bases<-unlist(lapply(ambig_sites,function(i){mixedbase<-paste(names(cm[,i])[cm[,i]>0],collapse='');
if(mixedbase%in%IUPAC_CODE_MAP) return(names(IUPAC_CODE_MAP)[IUPAC_CODE_MAP==mixedbase])
else return('N')}));
tmp_str[ambig_sites]<-ambig_bases
con_seq<-DNAStringSet(paste0(tmp_str,collapse=''));
names(con_seq)<-sub('.bam','_consensus',basename(bamfname));
rm(tmp_str);
#Remove gaps and leading and trailing Ns to get final sequence
con_seq_trimmed<-DNAStringSet(gsub("N*N$",'',gsub("^N*",'',as.character(con_seq))));
con_seq_final<-DNAStringSet(gsub('-','',as.character(con_seq_trimmed)));
names(con_seq_final)<-sub('.bam','_consensus',basename(bamfname));
#Delete bai file
file.remove(baifname);
return(con_seq_final);
}else{
return(NA)
}
}
clean_consensus_hsv<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref=c('hsv1_ref','hsv2_sd90e','hsv2_ref_hg52'),
bamfname_merged=c(grep(sampname,list.files(merged_bam_folder,'_hsv1_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(merged_bam_folder,'_hsv2_sd90e.*bam$',full.names=T),value=T),
grep(sampname,list.files(merged_bam_folder,'_hsv2_ref_hg52.*bam$',full.names=T),value=T)),
bamfname_mapped=c(grep(sampname,list.files(mapped_reads_folder,'_hsv1_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(mapped_reads_folder,'_hsv2_sd90e.*bam$',full.names=T),value=T),
grep(sampname,list.files(mapped_reads_folder,'_hsv2_ref_hg52.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_hhv6<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(
ref=c('hhv6A_ref_U1102','hhv6B_ref_z29'),
bamfname_merged=c(grep(paste0('\\/',sampname,'_'),list.files(merged_bam_folder,"_hhv6A_ref_U1102.*bam$",full.names=T),value=T),
grep(paste0('\\/',sampname,'_'),list.files(merged_bam_folder,'_hhv6B_ref_z29.*bam$',full.names=T),value=T)),
bamfname_mapped=c(grep(paste0('\\/',sampname,'_'),list.files(mapped_reads_folder,'_hhv6A_ref_U1102.*bam$',full.names=T),value=T),
grep(paste0('\\/',sampname,'_'),list.files(mapped_reads_folder,'_hhv6B_ref_z29.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_hiv<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(
ref=c('hiv_hxb2_ref'),
bamfname_merged=c(grep(sampname,list.files(merged_bam_folder,"_hiv_hxb2_ref.*bam$",full.names=T),value=T)),
bamfname_mapped=c(grep(sampname,list.files(mapped_reads_folder,'_hiv_hxb2_ref.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_hhv8<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref='hhv8_ref',
bamfname_merged=grep(sampname,list.files(merged_bam_folder,'_hhv8.*bam$',full.names=T),value=T),
bamfname_mapped=grep(sampname,list.files(mapped_reads_folder,'_hhv8.*bam$',full.names=T),value=T),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_rsv<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref=c('rsvA_ref','rsvB_ref'),
bamfname_merged=c(grep(sampname,list.files(merged_bam_folder,'_rsvA_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(merged_bam_folder,'_rsvB_ref.*bam$',full.names=T),value=T)),
bamfname_mapped=c(grep(sampname,list.files(mapped_reads_folder,'_rsvA_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(mapped_reads_folder,'_rsvB_ref.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#Measles (added Aug 2019)
clean_consensus_measles<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref='measles_ref',
bamfname_merged=grep(sampname,list.files(merged_bam_folder,'_measles_ref.*bam$',full.names=T),value=T),
bamfname_mapped=grep(sampname,list.files(mapped_reads_folder,'_measles_ref.*bam$',full.names=T),value=T),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#Treponema (added Dec 2019)
clean_consensus_tp<-function(sampname,merged_bam_folder,mapped_reads_folder,ref){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(ref=ref,
bamfname_merged=grep(sampname,list.files(merged_bam_folder,'*.bam$',full.names=T),value=T),
bamfname_mapped=grep(sampname,list.files(mapped_reads_folder,'*.bam$',full.names=T),value=T),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#hCoV (added Mar 2020)
clean_consensus_hcov<-function(sampname,remapped_bamfname,mappedtoref_bamfname,ref){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(ref=ref,
remapped_bam=remapped_bamfname,
mappedtoref_bam=mappedtoref_bamfname,
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seq<-generate_consensus(mapping_stats$remapped_bam);
if(!dir.exists('./consensus_seqs')) dir.create('./consensus_seqs');
writeXStringSet(con_seq,file=paste('./consensus_seqs/',sampname,'.fasta',sep=''),format='fasta');
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$mappedtoref_bam,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$remapped_bam,n_mapped_reads));
mapping_stats$num_Ns<-sum(letterFrequency(con_seq,c('N','+')));
mapping_stats$width<-width(con_seq);
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#Find coverage at each position in the alignment
cov_by_pos<-function(bamfname){
require(Rsamtools);
require(GenomicAlignments);
if(file.exists(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
#Import alignment
if(file.exists(paste(bamfname,'.bai',sep='')))
file.remove(paste(bamfname,'.bai',sep='')); #remove any old index files
baifname<-indexBam(bamfname); #Make an index file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
cov<-coverage(gal);
file.remove(baifname);
return(cov)
}else{
return(NA)
}
}
#Compute coverage stats
get_coverage<-function(bamfname){
if(length(bamfname)==0){
mapped<-NA; avg_cov<-NA;
sd_cov<-NA; min_cov<-NA; max_cov<-NA;
}else if(file.exists(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
require(Rsamtools);
require(GenomicAlignments);
#Import alignment
if(file.exists(paste(bamfname,'.bai',sep='')))
file.remove(paste(bamfname,'.bai',sep='')); #remove any old index files
baifname<-indexBam(bamfname); #Make an index file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
# summary(gal);
cov<-coverage(gal);
mapped<-length(gal);
avg_cov<-mean(cov);
sd_cov<-sd(cov);
min_cov<-min(cov);
max_cov<-max(cov);
file.remove(baifname);
}else{
mapped<-NA; avg_cov<-NA;
sd_cov<-NA; min_cov<-NA; max_cov<-NA;
}
return(data.frame(mapped,avg_cov,sd_cov,min_cov,max_cov))
}
#Extracts number of reads and read widths from html report generated by fastqc
fastqc_readstats<-function(fname){
require(rvest)
if(file.exists(fname)){
tmp_fastqc<-read_html(fname);
tmp_table<-html_table(tmp_fastqc)[[1]];
fastq_reads<-as.numeric(tmp_table[tmp_table$Measure=='Total Sequences','Value']);
fastq_width<-tmp_table[tmp_table$Measure=='Sequence length','Value']; #returns single number for raw reads and range for trimmed
gc<-as.numeric(tmp_table[tmp_table$Measure=='%GC','Value']);
}else{
fastq_reads<-NA;
fastq_width<-NA;
gc<-NA;
}
return(data.frame(fastq_reads,fastq_width,gc,stringsAsFactors=F));
}
#Compute stats on a consensus seq (or really any fasta file)
conseq_stats<-function(fname){
require(Biostrings)
if(length(fname)==0){
width<-NA; Ns<-NA; percNs<-NA;
}else if(file.exists(fname)){
conseq<-readDNAStringSet(fname,format='fasta');
width<-width(conseq);
Ns<-sum(letterFrequency(conseq,c('N','+')));
percNs<-100*Ns/width;
}else{
width<-NA; Ns<-NA; percNs<-NA;
}
return(data.frame(width,Ns,percNs));
}
#VCF to data frame for a vcf generated by Lofreq
vcf_to_df<-function(vcf_fname,sampid){
require(VariantAnnotation);
vcf<-readVcf(vcf_fname);
results<-data.frame(samp_id=sampid,pos=start(rowRanges(vcf)),af=info(vcf)$AF,dp=info(vcf)$DP,ref=ref(vcf),
alt=unlist(alt(vcf)),stringsAsFactors=F);
results$snpid<-paste(results$ref,'_',results$pos,'_',results$alt,sep='');
results$major_af<-unlist(lapply(results$af,function(x)max(x,1-x)));
results$minor_af<-unlist(lapply(results$af,function(x)min(x,1-x)));
return(results)
}
#Extract VRC samp year and ID from the fastq file name
get_year<-function(in_string){
yr<-strsplit(in_string,"-")[[1]][1];
if(grepl("^[0-9]{1,2}_(19[0-9][0-9]|20[0,1][0-9])",yr)){
return(strsplit(yr,'_')[[1]][2]);
}else if(!grepl("19[0-9][0-9]|20[0,1][0-9]",yr)){
return(NA)
}else{
return(yr)
}
}
get_sampid<-function(in_string){
if(!is.na(get_year(in_string))){
return(strsplit(strsplit(in_string,'-')[[1]][2],'_')[[1]][1]);
}else{
return(NA);
}
}
| /dockerfiles/R/wgs_functions.R | no_license | moreiradaniel/pipecov | R | false | false | 25,021 | r | #Collection of functions for working with WGS data
#Pavitra Roychoudhury
#Aug 2017
#Return the number of mapped reads in a bam file
n_mapped_reads<-function(bamfname){
require(Rsamtools)
indexBam(bamfname)
if(file.exists(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
return(idxstatsBam(bamfname)$mapped)
}else{
return(NA)
}
}
#Make a new reference from scaffolds
make_ref_from_assembly<-function(bamfname,reffname){
require(Rsamtools);
require(GenomicAlignments);
require(parallel)
ncores<-detectCores();
#Read reference sequence
ref_seq<-readDNAStringSet(reffname);
if(!is.na(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
#Index bam if required
if(!file.exists(paste(bamfname,'.bai',sep=''))){
baifname<-indexBam(bamfname);
}else{
baifname<-paste(bamfname,'.bai',sep='');
}
#Import bam file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
#Remove any contigs with width <200 bp
#gal<-gal[width(gal)>200];
#First lay contigs on reference space--this removes insertions and produces a seq of the same length as ref
qseq_on_ref<-sequenceLayer(mcols(gal)$seq,cigar(gal),from="query",to="reference");
qseq_on_ref_aligned<-stackStrings(qseq_on_ref,1,max(mcols(gal)$pos+qwidth(gal)-1,width(ref_seq)),
shift=mcols(gal)$pos-1,Lpadding.letter='N',Rpadding.letter='N');
#Make a consensus matrix and get a consensus sequence from the aligned scaffolds
cm<-consensusMatrix(qseq_on_ref_aligned,as.prob=T,shift=0)[c('A','C','G','T','N','-'),];
# cm[c('N','-'),]<-0;
cm['N',]<-0;
cm<-apply(cm,2,function(x)if(all(x==0))return(x) else return(x/sum(x)));
cm['N',colSums(cm)==0]<-1;
con_seq<-DNAStringSet(gsub('\\?','N',consensusString(cm,threshold=0.25)));
con_seq<-DNAStringSet(gsub('\\+','N',con_seq));
#Now fill in the Ns with the reference
temp<-as.matrix(con_seq);
temp[temp=='N']<-as.matrix(ref_seq)[temp=='N'];
con_seq<-DNAStringSet(paste0(temp,collapse=''));
names(con_seq)<-sub('.bam','_consensus',basename(bamfname));
#Look for insertions in bam cigar string
cigs_ref<-cigarRangesAlongReferenceSpace(cigar(gal),with.ops=F,ops='I',
reduce.ranges=T,drop.empty.ranges=F,
pos=mcols(gal)$pos);
cigs_query<-cigarRangesAlongQuerySpace(cigar(gal),ops='I',with.ops=F,
reduce.ranges=T,drop.empty.ranges=F);
all_ins<-mclapply(c(1:length(cigs_query)),function(i)
extractAt(mcols(gal)$seq[i],cigs_query[[i]])[[1]]);
#Merge all insertions
all_ins_merged<-do.call('rbind',mclapply(c(1:length(cigs_ref)),function(i)
return(data.frame(
start_ref=start(cigs_ref[[i]]),end_ref=end(cigs_ref[[i]]),
start_query=start(cigs_query[[i]]),end_query=end(cigs_query[[i]]),
ins_seq=all_ins[[i]],width_ins=width(all_ins[[i]]))),
mc.cores=ncores));
all_ins_merged<-all_ins_merged[order(all_ins_merged$end_ref),];
# write.csv(all_ins_merged,'./testing/all_ins.csv',row.names=F);
#TO DO: Check for overlaps--should be minimal since scaffolds don't usually overlap that much
if(any(table(all_ins_merged$start_ref)>1)){
print('Overlapping insertions')
#not the best way, but just pick the first for now
all_ins_merged<-all_ins_merged[!duplicated(all_ins_merged[,c('start_ref','end_ref')]),];
}
#Now the beauty part of inserting the strings back in
#Split ref seq by the insert positions
if(nrow(all_ins_merged)!=0){
new_strs<-DNAStringSet(rep('',nrow(all_ins_merged)+1))
for(i in 1:nrow(all_ins_merged)){
if(i==1){
new_strs[i]<-paste0(extractAt(con_seq,IRanges(start=1,end=all_ins_merged$end_ref[i]))[[1]],
all_ins_merged$ins_seq[i]);
}else{
new_strs[i]<-paste0(extractAt(con_seq,IRanges(start=all_ins_merged$start_ref[i-1],
end=all_ins_merged$end_ref[i]))[[1]],
all_ins_merged$ins_seq[i]);
}
}
#Last bit
new_strs[i+1]<-paste0(extractAt(con_seq,IRanges(start=all_ins_merged$start_ref[i],
end=width(con_seq)))[[1]])
temp_str<-paste0(as.character(new_strs),collapse='');
#Remove gaps to get final sequence
con_seq_final<-DNAStringSet(gsub('-','',temp_str));
#No insertions
}else{
con_seq_final<-con_seq;
}
names(con_seq_final)<-sub('.bam','_consensus',basename(bamfname));
if(!dir.exists('./ref_for_remapping')) dir.create('./ref_for_remapping');
writeXStringSet(con_seq_final,
paste0('./ref_for_remapping/',names(con_seq_final),'.fasta'));
#Delete bai file
file.remove(baifname);
}else{
print('Bam file could not be opened.')
return(NA)
}
}
#Takes in a bam file, produces consensus sequence
generate_consensus<-function(bamfname){
require(Rsamtools)
require(GenomicAlignments)
require(Biostrings)
require(parallel)
ncores<-detectCores()
#for testing this function--comment out or remove
# bamfname<-'./testing/ABI-HHV6A_S385_L001_A.sorted.bam'
if(!is.na(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
#Index bam if required
if(!file.exists(paste(bamfname,'.bai',sep=''))){
baifname<-indexBam(bamfname);
}else{
baifname<-paste(bamfname,'.bai',sep='');
}
#Import bam file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
# summary(gal);
#Remove any contigs with mapq <2 -- this leads to a loss of a lot of the DR seqs even though there are reads there
# gal<-gal[mcols(gal)$mapq>2];
#First lay reads on reference space--this doesn't include insertions
qseq_on_ref<-sequenceLayer(mcols(gal)$seq,cigar(gal),from="query",to="reference");
#Make a consensus matrix and get a consensus sequence from the aligned scaffolds
# cm<-consensusMatrix(qseq_on_ref,as.prob=T,shift=start(gal)-1,width=seqlengths(gal))[c('A','C','G','T','N','-'),];
# cm['N',colSums(cm)==0]<-1;
#Edit to include a coverage threshold
cm<-consensusMatrix(qseq_on_ref,as.prob=F,shift=start(gal)-1,width=seqlengths(gal))[c('A','C','G','T','N','-'),];
poor_cov<-which(colSums(cm)<0);
cm<-apply(cm,2,function(x)x/sum(x));
cm[,poor_cov]<-0;
cm['N',poor_cov]<-1;
tmp_str<-strsplit(consensusString(cm,ambiguityMap='?',threshold=0.5),'')[[1]];
ambig_sites<-which(tmp_str=='?');
ambig_bases<-unlist(lapply(ambig_sites,function(i){mixedbase<-paste(names(cm[,i])[cm[,i]>0],collapse='');
if(mixedbase%in%IUPAC_CODE_MAP) return(names(IUPAC_CODE_MAP)[IUPAC_CODE_MAP==mixedbase])
else return('N')}));
tmp_str[ambig_sites]<-ambig_bases
con_seq<-DNAStringSet(paste0(tmp_str,collapse=''));
names(con_seq)<-sub('.bam','_consensus',basename(bamfname));
rm(tmp_str);
#Remove gaps and leading and trailing Ns to get final sequence
con_seq_trimmed<-DNAStringSet(gsub("N*N$",'',gsub("^N*",'',as.character(con_seq))));
con_seq_final<-DNAStringSet(gsub('-','',as.character(con_seq_trimmed)));
names(con_seq_final)<-sub('.bam','_consensus',basename(bamfname));
#Delete bai file
file.remove(baifname);
return(con_seq_final);
}else{
return(NA)
}
}
clean_consensus_hsv<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref=c('hsv1_ref','hsv2_sd90e','hsv2_ref_hg52'),
bamfname_merged=c(grep(sampname,list.files(merged_bam_folder,'_hsv1_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(merged_bam_folder,'_hsv2_sd90e.*bam$',full.names=T),value=T),
grep(sampname,list.files(merged_bam_folder,'_hsv2_ref_hg52.*bam$',full.names=T),value=T)),
bamfname_mapped=c(grep(sampname,list.files(mapped_reads_folder,'_hsv1_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(mapped_reads_folder,'_hsv2_sd90e.*bam$',full.names=T),value=T),
grep(sampname,list.files(mapped_reads_folder,'_hsv2_ref_hg52.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_hhv6<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(
ref=c('hhv6A_ref_U1102','hhv6B_ref_z29'),
bamfname_merged=c(grep(paste0('\\/',sampname,'_'),list.files(merged_bam_folder,"_hhv6A_ref_U1102.*bam$",full.names=T),value=T),
grep(paste0('\\/',sampname,'_'),list.files(merged_bam_folder,'_hhv6B_ref_z29.*bam$',full.names=T),value=T)),
bamfname_mapped=c(grep(paste0('\\/',sampname,'_'),list.files(mapped_reads_folder,'_hhv6A_ref_U1102.*bam$',full.names=T),value=T),
grep(paste0('\\/',sampname,'_'),list.files(mapped_reads_folder,'_hhv6B_ref_z29.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_hiv<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(
ref=c('hiv_hxb2_ref'),
bamfname_merged=c(grep(sampname,list.files(merged_bam_folder,"_hiv_hxb2_ref.*bam$",full.names=T),value=T)),
bamfname_mapped=c(grep(sampname,list.files(mapped_reads_folder,'_hiv_hxb2_ref.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_hhv8<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref='hhv8_ref',
bamfname_merged=grep(sampname,list.files(merged_bam_folder,'_hhv8.*bam$',full.names=T),value=T),
bamfname_mapped=grep(sampname,list.files(mapped_reads_folder,'_hhv8.*bam$',full.names=T),value=T),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
clean_consensus_rsv<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref=c('rsvA_ref','rsvB_ref'),
bamfname_merged=c(grep(sampname,list.files(merged_bam_folder,'_rsvA_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(merged_bam_folder,'_rsvB_ref.*bam$',full.names=T),value=T)),
bamfname_mapped=c(grep(sampname,list.files(mapped_reads_folder,'_rsvA_ref.*bam$',full.names=T),value=T),
grep(sampname,list.files(mapped_reads_folder,'_rsvB_ref.*bam$',full.names=T),value=T)),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#Measles (added Aug 2019)
clean_consensus_measles<-function(sampname,merged_bam_folder,mapped_reads_folder){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
sampname<-paste0(sampname,'_');
mapping_stats<-data.frame(ref='measles_ref',
bamfname_merged=grep(sampname,list.files(merged_bam_folder,'_measles_ref.*bam$',full.names=T),value=T),
bamfname_mapped=grep(sampname,list.files(mapped_reads_folder,'_measles_ref.*bam$',full.names=T),value=T),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#Treponema (added Dec 2019)
clean_consensus_tp<-function(sampname,merged_bam_folder,mapped_reads_folder,ref){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(ref=ref,
bamfname_merged=grep(sampname,list.files(merged_bam_folder,'*.bam$',full.names=T),value=T),
bamfname_mapped=grep(sampname,list.files(mapped_reads_folder,'*.bam$',full.names=T),value=T),
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seqs<-lapply(mapping_stats$bamfname_merged,generate_consensus);
if(!dir.exists('./consensus_seqs_all')) dir.create('./consensus_seqs_all');
dummyvar<-lapply(con_seqs,function(x)
writeXStringSet(x,file=paste('./consensus_seqs_all/',names(x),'.fasta',sep=''),format='fasta'));
rm(dummyvar)
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$bamfname_mapped,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$bamfname_merged,n_mapped_reads));
mapping_stats$num_Ns<-unlist(lapply(con_seqs,function(x)sum(letterFrequency(x,c('N','+')))));
mapping_stats$width<-unlist(lapply(con_seqs,width));
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#hCoV (added Mar 2020)
clean_consensus_hcov<-function(sampname,remapped_bamfname,mappedtoref_bamfname,ref){
require(Rsamtools);
require(GenomicAlignments);
require(Biostrings);
mapping_stats<-data.frame(ref=ref,
remapped_bam=remapped_bamfname,
mappedtoref_bam=mappedtoref_bamfname,
mapped_reads_ref=0,mapped_reads_assemblyref=0,perc_Ns=0,num_Ns=0,width=0,
stringsAsFactors=F);
#Import mapped reads + assembly and generate consensus
con_seq<-generate_consensus(mapping_stats$remapped_bam);
if(!dir.exists('./consensus_seqs')) dir.create('./consensus_seqs');
writeXStringSet(con_seq,file=paste('./consensus_seqs/',sampname,'.fasta',sep=''),format='fasta');
#Compute #mapped reads and %Ns
mapping_stats$mapped_reads_ref<-unlist(lapply(mapping_stats$mappedtoref_bam,n_mapped_reads));
mapping_stats$mapped_reads_assemblyref<-unlist(lapply(mapping_stats$remapped_bam,n_mapped_reads));
mapping_stats$num_Ns<-sum(letterFrequency(con_seq,c('N','+')));
mapping_stats$width<-width(con_seq);
mapping_stats$perc_Ns<-100*mapping_stats$num_Ns/mapping_stats$width;
if(!dir.exists('./stats/')) dir.create('./stats/');
write.csv(mapping_stats,file=paste('./stats/',sampname,'_mappingstats.csv',sep=''),row.names=F);
return(TRUE)
}
#Find coverage at each position in the alignment
cov_by_pos<-function(bamfname){
require(Rsamtools);
require(GenomicAlignments);
if(file.exists(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
#Import alignment
if(file.exists(paste(bamfname,'.bai',sep='')))
file.remove(paste(bamfname,'.bai',sep='')); #remove any old index files
baifname<-indexBam(bamfname); #Make an index file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
cov<-coverage(gal);
file.remove(baifname);
return(cov)
}else{
return(NA)
}
}
#Compute coverage stats
get_coverage<-function(bamfname){
if(length(bamfname)==0){
mapped<-NA; avg_cov<-NA;
sd_cov<-NA; min_cov<-NA; max_cov<-NA;
}else if(file.exists(bamfname)&class(try(scanBamHeader(bamfname),silent=T))!='try-error'){
require(Rsamtools);
require(GenomicAlignments);
#Import alignment
if(file.exists(paste(bamfname,'.bai',sep='')))
file.remove(paste(bamfname,'.bai',sep='')); #remove any old index files
baifname<-indexBam(bamfname); #Make an index file
params<-ScanBamParam(flag=scanBamFlag(isUnmappedQuery=FALSE),
what=c('qname','rname','strand','pos','qwidth','mapq','cigar','seq'));
gal<-readGAlignments(bamfname,index=baifname,param=params);
# summary(gal);
cov<-coverage(gal);
mapped<-length(gal);
avg_cov<-mean(cov);
sd_cov<-sd(cov);
min_cov<-min(cov);
max_cov<-max(cov);
file.remove(baifname);
}else{
mapped<-NA; avg_cov<-NA;
sd_cov<-NA; min_cov<-NA; max_cov<-NA;
}
return(data.frame(mapped,avg_cov,sd_cov,min_cov,max_cov))
}
#Extracts number of reads and read widths from html report generated by fastqc
fastqc_readstats<-function(fname){
require(rvest)
if(file.exists(fname)){
tmp_fastqc<-read_html(fname);
tmp_table<-html_table(tmp_fastqc)[[1]];
fastq_reads<-as.numeric(tmp_table[tmp_table$Measure=='Total Sequences','Value']);
fastq_width<-tmp_table[tmp_table$Measure=='Sequence length','Value']; #returns single number for raw reads and range for trimmed
gc<-as.numeric(tmp_table[tmp_table$Measure=='%GC','Value']);
}else{
fastq_reads<-NA;
fastq_width<-NA;
gc<-NA;
}
return(data.frame(fastq_reads,fastq_width,gc,stringsAsFactors=F));
}
#Compute stats on a consensus seq (or really any fasta file)
conseq_stats<-function(fname){
require(Biostrings)
if(length(fname)==0){
width<-NA; Ns<-NA; percNs<-NA;
}else if(file.exists(fname)){
conseq<-readDNAStringSet(fname,format='fasta');
width<-width(conseq);
Ns<-sum(letterFrequency(conseq,c('N','+')));
percNs<-100*Ns/width;
}else{
width<-NA; Ns<-NA; percNs<-NA;
}
return(data.frame(width,Ns,percNs));
}
#VCF to data frame for a vcf generated by Lofreq
vcf_to_df<-function(vcf_fname,sampid){
require(VariantAnnotation);
vcf<-readVcf(vcf_fname);
results<-data.frame(samp_id=sampid,pos=start(rowRanges(vcf)),af=info(vcf)$AF,dp=info(vcf)$DP,ref=ref(vcf),
alt=unlist(alt(vcf)),stringsAsFactors=F);
results$snpid<-paste(results$ref,'_',results$pos,'_',results$alt,sep='');
results$major_af<-unlist(lapply(results$af,function(x)max(x,1-x)));
results$minor_af<-unlist(lapply(results$af,function(x)min(x,1-x)));
return(results)
}
#Extract VRC samp year and ID from the fastq file name
get_year<-function(in_string){
yr<-strsplit(in_string,"-")[[1]][1];
if(grepl("^[0-9]{1,2}_(19[0-9][0-9]|20[0,1][0-9])",yr)){
return(strsplit(yr,'_')[[1]][2]);
}else if(!grepl("19[0-9][0-9]|20[0,1][0-9]",yr)){
return(NA)
}else{
return(yr)
}
}
get_sampid<-function(in_string){
if(!is.na(get_year(in_string))){
return(strsplit(strsplit(in_string,'-')[[1]][2],'_')[[1]][1]);
}else{
return(NA);
}
}
|
#' Annual Maximum Rainfall
#'
#' Example of observed annual maximum rainfall statistic calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010 (122 years)
#'
#' @source onk.silo.daily.csv
#' @format Numeric vector of length 122
#' @examples
#' obs_ann_rain_max
"obs_ann_rain_max"
#' Annual Total Rainfall
#'
#' Example of observed annual total rainfall statistic calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010 (122 years)
#'
#' @source onk.silo.daily.csv
#' @format Numeric vector of length 122
#' @examples
#' obs_ann_rain_tot
"obs_ann_rain_tot"
#' Correlation of Wet Day Rainfall
#'
#' Example of observed wet day rainfall correlations by month calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_cor_wetdays_daily
"obs_cor_wetdays_daily"
#' Variance of Wet Day Rainfall
#'
#' Example of observed variance of wet day rainfall by month calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_var_wetdays_daily
"obs_var_wetdays_daily"
#' Mean Monthly Rainfall
#'
#' Example of observed monthly mean rainfall calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_mon_rain_mean
"obs_mon_rain_mean"
#' Standard Deviation of Monthly Rainfall
#'
#' Example of observed monthly rainfall standard deviations calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_mon_rain_sd
"obs_mon_rain_sd"
#' Proportion of Dry Days
#'
#' Example of observed proportion of dry (zero-rainfall) days calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_pdry
"obs_pdry"
| /R/obs_data.R | no_license | anjanadevanand/toyWGN | R | false | false | 2,477 | r | #' Annual Maximum Rainfall
#'
#' Example of observed annual maximum rainfall statistic calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010 (122 years)
#'
#' @source onk.silo.daily.csv
#' @format Numeric vector of length 122
#' @examples
#' obs_ann_rain_max
"obs_ann_rain_max"
#' Annual Total Rainfall
#'
#' Example of observed annual total rainfall statistic calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010 (122 years)
#'
#' @source onk.silo.daily.csv
#' @format Numeric vector of length 122
#' @examples
#' obs_ann_rain_tot
"obs_ann_rain_tot"
#' Correlation of Wet Day Rainfall
#'
#' Example of observed wet day rainfall correlations by month calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_cor_wetdays_daily
"obs_cor_wetdays_daily"
#' Variance of Wet Day Rainfall
#'
#' Example of observed variance of wet day rainfall by month calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_var_wetdays_daily
"obs_var_wetdays_daily"
#' Mean Monthly Rainfall
#'
#' Example of observed monthly mean rainfall calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_mon_rain_mean
"obs_mon_rain_mean"
#' Standard Deviation of Monthly Rainfall
#'
#' Example of observed monthly rainfall standard deviations calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_mon_rain_sd
"obs_mon_rain_sd"
#' Proportion of Dry Days
#'
#' Example of observed proportion of dry (zero-rainfall) days calculated from data
#' at station no. 23700 from 01 Jan 1889 to 31 Dec 2010
#'
#' @source onk.silo.daily.csv
#' @format Named numeric vector of length 12. The names of the vector are the months
#' from Jan to Dec.
#' @examples
#' obs_pdry
"obs_pdry"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readCurationFile.R
\name{readCurationFile}
\alias{readCurationFile}
\title{Read curation file}
\usage{
readCurationFile(file.rds, file.curation = gsub(".rds$", ".csv", file.rds),
remove.failed = FALSE, report.best.only = FALSE, min.ploidy = NULL,
max.ploidy = NULL)
}
\arguments{
\item{file.rds}{Output of the \code{\link{runAbsoluteCN}} function,
serialized with \code{saveRDS}.}
\item{file.curation}{Filename of a curation file that points to the correct
tumor purity and ploidy solution.}
\item{remove.failed}{Do not return solutions that failed.}
\item{report.best.only}{Only return correct/best solution (useful on low
memory machines when lots of samples are loaded).}
\item{min.ploidy}{Minimum ploidy to be considered. If \code{NULL}, all. Can
be used to automatically ignore unlikely solutions.}
\item{max.ploidy}{Maximum ploidy to be considered. If \code{NULL}, all. Can
be used to automatically ignore unlikely solutions.}
}
\value{
The return value of the corresponding \code{\link{runAbsoluteCN}}
call, but with the results array manipulated according the curation CSV file
and arguments of this function.
}
\description{
Function that can be used to read the curated output of the
\code{\link{runAbsoluteCN}} function.
}
\examples{
data(purecn.example.output)
file.rds <- "Sample1_PureCN.rds"
createCurationFile(file.rds)
# User can change the maximum likelihood solution manually in the generated
# CSV file. The correct solution is then loaded with readCurationFile.
purecn.curated.example.output <-readCurationFile(file.rds)
}
\seealso{
\code{\link{runAbsoluteCN} \link{createCurationFile}}
}
\author{
Markus Riester
}
| /man/readCurationFile.Rd | permissive | chapmanb/PureCN | R | false | true | 1,727 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readCurationFile.R
\name{readCurationFile}
\alias{readCurationFile}
\title{Read curation file}
\usage{
readCurationFile(file.rds, file.curation = gsub(".rds$", ".csv", file.rds),
remove.failed = FALSE, report.best.only = FALSE, min.ploidy = NULL,
max.ploidy = NULL)
}
\arguments{
\item{file.rds}{Output of the \code{\link{runAbsoluteCN}} function,
serialized with \code{saveRDS}.}
\item{file.curation}{Filename of a curation file that points to the correct
tumor purity and ploidy solution.}
\item{remove.failed}{Do not return solutions that failed.}
\item{report.best.only}{Only return correct/best solution (useful on low
memory machines when lots of samples are loaded).}
\item{min.ploidy}{Minimum ploidy to be considered. If \code{NULL}, all. Can
be used to automatically ignore unlikely solutions.}
\item{max.ploidy}{Maximum ploidy to be considered. If \code{NULL}, all. Can
be used to automatically ignore unlikely solutions.}
}
\value{
The return value of the corresponding \code{\link{runAbsoluteCN}}
call, but with the results array manipulated according the curation CSV file
and arguments of this function.
}
\description{
Function that can be used to read the curated output of the
\code{\link{runAbsoluteCN}} function.
}
\examples{
data(purecn.example.output)
file.rds <- "Sample1_PureCN.rds"
createCurationFile(file.rds)
# User can change the maximum likelihood solution manually in the generated
# CSV file. The correct solution is then loaded with readCurationFile.
purecn.curated.example.output <-readCurationFile(file.rds)
}
\seealso{
\code{\link{runAbsoluteCN} \link{createCurationFile}}
}
\author{
Markus Riester
}
|
#' @export
arima_rte = function(train_label, test_label, Date, plt = FALSE){
train_ts = xts::xts(train_label, order.by = Date)
train_msts = forecast::msts(train_ts, seasonal.periods = 365.25)
tmp = forecast::fourier(train_msts, K = 15, h = length(test_label))
arimamodel = forecast::auto.arima(train_ts, seasonal =FALSE, xreg = forecast::fourier(train_msts, K = 15))
forecast = data.frame(forecast::forecast(arimamodel, xreg = tmp))
if (plt){
plot(train$Load, type='l', xlim=c(0,length(total.time)),
main="Prédiction de ARIMA", xlab="Temps", ylab="Load")
lines(test$time, forecast$Point.Forecast, col='green', lwd=1)
}
return(list("pred.train" = arimamodel$fitted, "pred.test" = forecast$Point.Forecast))
}
| /R/arima.r | no_license | nbereux/SIM202 | R | false | false | 770 | r | #' @export
arima_rte = function(train_label, test_label, Date, plt = FALSE){
train_ts = xts::xts(train_label, order.by = Date)
train_msts = forecast::msts(train_ts, seasonal.periods = 365.25)
tmp = forecast::fourier(train_msts, K = 15, h = length(test_label))
arimamodel = forecast::auto.arima(train_ts, seasonal =FALSE, xreg = forecast::fourier(train_msts, K = 15))
forecast = data.frame(forecast::forecast(arimamodel, xreg = tmp))
if (plt){
plot(train$Load, type='l', xlim=c(0,length(total.time)),
main="Prédiction de ARIMA", xlab="Temps", ylab="Load")
lines(test$time, forecast$Point.Forecast, col='green', lwd=1)
}
return(list("pred.train" = arimamodel$fitted, "pred.test" = forecast$Point.Forecast))
}
|
library(tidyverse)
library(rebus)
library(httr)
library(rvest)
print("It might take some minutes to scrape...")
#The following functions are sort of helper functions in order to scrape the data:
# removes spaces, new lines, some symbols from all scraped data
clean_str = function(strg) {
strg = str_remove_all(strg, "\n")
strg = str_remove_all(strg, " ")
strg = str_remove_all(strg, regex("[$%+,]"))
}
# returns school type
get_school_type = function(info) {
school_type = str_split(info[1], ",", n = 2)
school_type = school_type[[1]][1]
school_type = clean_str(school_type)
return(school_type)
}
# returns year founded
get_year_founded = function(info) {
year = clean_str(info[2])
year = str_remove_all(year, regex("[a-z]"))
return(year)
}
# returns religious affiliation
get_religion = function(info) {
religion = str_remove_all(info[3], "\n")
religion = str_remove_all(religion, "religious affiliation")
religion = str_remove_all(religion, "/s")
return(religion)
}
# returns endowment
get_endowment = function(info) {
endowment = str_remove_all(info[6], regex("[a-z]"))
endowment = str_remove_all(endowment, regex("20[0-9]{2}"))
endowment = clean_str(endowment)
# choose unit of million or billion
if (str_detect(info[6], pattern = "million")) {
return(endowment)
} else {
endowment = as.numeric(endowment) * 1000
return(endowment)
}
}
# returns median starting salary for new graduates
get_median_starting_salary = function(info2) {
if (str_detect(info2[1], regex("[0-9]"))) {
salary = clean_str(info2[1])
salary = str_remove_all(salary, regex("[,*]"))
return(salary)
} else {
return(NA)
}
}
# returns acceptance rate
get_acc_rate = function(info2) {
lon = 2:9
accept = NA
for (i in lon) {
if (str_detect(info2[i], "%")) {
accept = info2[2]
accept = clean_str(info2[i])
break
}
}
return(accept)
}
# get student faculty ratio
get_stu_fac_ratio = function(info2) {
lon = 9:13
ratio = NA
try({
for (i in lon) {
if (str_detect(info2[i], ":")) {
ratio = clean_str(info2[i])
break
}
}
}, silent = TRUE)
return(ratio)
}
# get 4 year graduation rate
get_grad_rate = function(info) {
lon = 10:14
grad = NA
try({
for (i in lon) {
if (str_detect(info2[i], "%")) {
grad = clean_str(info2[i])
break
}
}
}, silent = TRUE)
return(grad)
}
# gets score
get_score = function(details) {
score = NA
if (str_detect(details[2], pattern = "Overall")) {
score = clean_str(details[2])
score = str_remove_all(score, regex("[a-zA-Z]"))
score = str_split(score, "/", n = 2)
score = score[[1]][1]
}
return(score)
}
# gets location
get_location = function(details) {
lon = 1:4
location = NA
for (i in lon) {
if (str_detect(details[i], ",")) {
location = details[i]
break
}
}
return(location)
}
# gets tuition
get_tuition = function(details) {
lon = 4:9
tuition = NA
for (i in lon) {
if (str_detect(details[i], "Quick")) {
ind = i + 1
tuition = clean_str(details[ind])
tuition = str_split(tuition, "\\(", n = 2)
tuition = tuition[[1]][1]
}
}
return(tuition)
}
# get room & board
get_room_board = function(details) {
lon = 4:9
rb = NA
for (i in lon) {
if (str_detect(details[i], "Quick")) {
ind = i + 2
rb = clean_str(details[ind])
rb = str_split(rb, "\\(", n = 2)
rb = rb[[1]][1]
}
}
return(rb)
}
# get enrollment
get_enrollment = function(details) {
lon = 4:9
enroll = NA
for (i in lon) {
if (str_detect(details[i], "Quick")) {
ind = i + 3
enroll = clean_str(details[ind])
}
}
if (str_detect(enroll, "\\(")) {
enroll = str_split(enroll, "\\(", n = 2)
enroll = enroll[[1]][1]
}
return(enroll)
}
universities = rep(NA, 312)
links_u = rep(NA, 312)
count = 0
try(while (TRUE) {
# change url
count = count + 1
url = str_c("https://www.usnews.com/best-colleges/rankings/national-universities?_mode=table&_page=", as.character(count))
tryCatch(webpage <- read_html(url), error = function() break)
# university names
names = html_text(html_nodes(webpage, "td.full-width > div > a"))
universities[(sum(!is.na(universities)) + 1):(sum(!is.na(universities)) + length(names))] = names
# links
semi_links = html_attr(html_nodes(webpage, "div.text-strong.text-large.block-tighter > a"), "href")
links_u[(sum(!is.na(links_u)) + 1):(sum(!is.na(links_u)) + length(semi_links))] = str_c("https://www.usnews.com", semi_links)
}, silent = TRUE)
year_founded = rep(NA, length(universities))
religion = rep(NA, length(universities))
endowment = rep(NA, length(universities))
school_type = rep(NA, length(universities))
median_start_sal = rep(NA, length(universities))
acc_rate = rep(NA, length(universities))
stu_fac_ratio = rep(NA, length(universities))
grad_rate = rep(NA, length(universities))
score = rep(NA, length(universities))
location = rep(NA, length(universities))
tuition = rep(NA, length(universities))
room_board = rep(NA, length(universities))
enrollment = rep(NA, length(universities))
try(
for (i in 1:length(universities)) {
link = read_html(links_u[i])
info = html_text(html_nodes(link, ".flex-small"))
details = html_text(html_nodes(link, ".full-width , strong"))
info2 = html_text(html_nodes(link, ".medium-end"))
year_founded[i] = get_year_founded(info)
religion[i] = get_religion(info)
endowment[i] = get_endowment(info)
school_type[i] = get_school_type(info)
median_start_sal[i] = get_median_starting_salary(info2)
acc_rate[i] = get_acc_rate(info2)
stu_fac_ratio[i] = get_stu_fac_ratio(info2)
grad_rate[i] = get_grad_rate(info2)
score[i] = get_score(details)
location[i] = get_location(details)
tuition[i] = get_tuition(details)
room_board[i] = get_room_board(details)
enrollment[i] = get_enrollment(details)
},
silent = TRUE)
df = data.frame(as.character(universities),
as.integer(as.character(year_founded)),
religion, as.integer(as.character(endowment)),
school_type,
as.integer(as.character(median_start_sal)),
as.integer(as.character(acc_rate)),
as.character(stu_fac_ratio),
as.integer(as.character(grad_rate)),
as.integer(as.character(score)),
as.character(location),
as.integer(as.character(tuition)),
as.integer(as.character(room_board)),
as.integer(as.character(enrollment)))
colnames(df) = c("University",
"Year_Founded",
"Religion",
"Endowment",
"School_Type",
"Median_Start_Sal",
"Acc_Rate",
"Stu_Fac_Ratio",
"Graduation_Rate",
"Score",
"Location",
"Tuition",
"Room_Board",
"Enrollment")
return(df)
| /kitty_test.R | no_license | PHP-2560/r-package-group-1-4-0 | R | false | false | 7,119 | r |
library(tidyverse)
library(rebus)
library(httr)
library(rvest)
print("It might take some minutes to scrape...")
#The following functions are sort of helper functions in order to scrape the data:
# removes spaces, new lines, some symbols from all scraped data
clean_str = function(strg) {
strg = str_remove_all(strg, "\n")
strg = str_remove_all(strg, " ")
strg = str_remove_all(strg, regex("[$%+,]"))
}
# returns school type
get_school_type = function(info) {
school_type = str_split(info[1], ",", n = 2)
school_type = school_type[[1]][1]
school_type = clean_str(school_type)
return(school_type)
}
# returns year founded
get_year_founded = function(info) {
year = clean_str(info[2])
year = str_remove_all(year, regex("[a-z]"))
return(year)
}
# returns religious affiliation
get_religion = function(info) {
religion = str_remove_all(info[3], "\n")
religion = str_remove_all(religion, "religious affiliation")
religion = str_remove_all(religion, "/s")
return(religion)
}
# returns endowment
get_endowment = function(info) {
endowment = str_remove_all(info[6], regex("[a-z]"))
endowment = str_remove_all(endowment, regex("20[0-9]{2}"))
endowment = clean_str(endowment)
# choose unit of million or billion
if (str_detect(info[6], pattern = "million")) {
return(endowment)
} else {
endowment = as.numeric(endowment) * 1000
return(endowment)
}
}
# returns median starting salary for new graduates
get_median_starting_salary = function(info2) {
if (str_detect(info2[1], regex("[0-9]"))) {
salary = clean_str(info2[1])
salary = str_remove_all(salary, regex("[,*]"))
return(salary)
} else {
return(NA)
}
}
# returns acceptance rate
get_acc_rate = function(info2) {
lon = 2:9
accept = NA
for (i in lon) {
if (str_detect(info2[i], "%")) {
accept = info2[2]
accept = clean_str(info2[i])
break
}
}
return(accept)
}
# get student faculty ratio
get_stu_fac_ratio = function(info2) {
lon = 9:13
ratio = NA
try({
for (i in lon) {
if (str_detect(info2[i], ":")) {
ratio = clean_str(info2[i])
break
}
}
}, silent = TRUE)
return(ratio)
}
# get 4 year graduation rate
get_grad_rate = function(info) {
lon = 10:14
grad = NA
try({
for (i in lon) {
if (str_detect(info2[i], "%")) {
grad = clean_str(info2[i])
break
}
}
}, silent = TRUE)
return(grad)
}
# gets score
get_score = function(details) {
score = NA
if (str_detect(details[2], pattern = "Overall")) {
score = clean_str(details[2])
score = str_remove_all(score, regex("[a-zA-Z]"))
score = str_split(score, "/", n = 2)
score = score[[1]][1]
}
return(score)
}
# gets location
get_location = function(details) {
lon = 1:4
location = NA
for (i in lon) {
if (str_detect(details[i], ",")) {
location = details[i]
break
}
}
return(location)
}
# gets tuition
get_tuition = function(details) {
lon = 4:9
tuition = NA
for (i in lon) {
if (str_detect(details[i], "Quick")) {
ind = i + 1
tuition = clean_str(details[ind])
tuition = str_split(tuition, "\\(", n = 2)
tuition = tuition[[1]][1]
}
}
return(tuition)
}
# get room & board
get_room_board = function(details) {
lon = 4:9
rb = NA
for (i in lon) {
if (str_detect(details[i], "Quick")) {
ind = i + 2
rb = clean_str(details[ind])
rb = str_split(rb, "\\(", n = 2)
rb = rb[[1]][1]
}
}
return(rb)
}
# get enrollment
get_enrollment = function(details) {
lon = 4:9
enroll = NA
for (i in lon) {
if (str_detect(details[i], "Quick")) {
ind = i + 3
enroll = clean_str(details[ind])
}
}
if (str_detect(enroll, "\\(")) {
enroll = str_split(enroll, "\\(", n = 2)
enroll = enroll[[1]][1]
}
return(enroll)
}
universities = rep(NA, 312)
links_u = rep(NA, 312)
count = 0
try(while (TRUE) {
# change url
count = count + 1
url = str_c("https://www.usnews.com/best-colleges/rankings/national-universities?_mode=table&_page=", as.character(count))
tryCatch(webpage <- read_html(url), error = function() break)
# university names
names = html_text(html_nodes(webpage, "td.full-width > div > a"))
universities[(sum(!is.na(universities)) + 1):(sum(!is.na(universities)) + length(names))] = names
# links
semi_links = html_attr(html_nodes(webpage, "div.text-strong.text-large.block-tighter > a"), "href")
links_u[(sum(!is.na(links_u)) + 1):(sum(!is.na(links_u)) + length(semi_links))] = str_c("https://www.usnews.com", semi_links)
}, silent = TRUE)
year_founded = rep(NA, length(universities))
religion = rep(NA, length(universities))
endowment = rep(NA, length(universities))
school_type = rep(NA, length(universities))
median_start_sal = rep(NA, length(universities))
acc_rate = rep(NA, length(universities))
stu_fac_ratio = rep(NA, length(universities))
grad_rate = rep(NA, length(universities))
score = rep(NA, length(universities))
location = rep(NA, length(universities))
tuition = rep(NA, length(universities))
room_board = rep(NA, length(universities))
enrollment = rep(NA, length(universities))
try(
for (i in 1:length(universities)) {
link = read_html(links_u[i])
info = html_text(html_nodes(link, ".flex-small"))
details = html_text(html_nodes(link, ".full-width , strong"))
info2 = html_text(html_nodes(link, ".medium-end"))
year_founded[i] = get_year_founded(info)
religion[i] = get_religion(info)
endowment[i] = get_endowment(info)
school_type[i] = get_school_type(info)
median_start_sal[i] = get_median_starting_salary(info2)
acc_rate[i] = get_acc_rate(info2)
stu_fac_ratio[i] = get_stu_fac_ratio(info2)
grad_rate[i] = get_grad_rate(info2)
score[i] = get_score(details)
location[i] = get_location(details)
tuition[i] = get_tuition(details)
room_board[i] = get_room_board(details)
enrollment[i] = get_enrollment(details)
},
silent = TRUE)
df = data.frame(as.character(universities),
as.integer(as.character(year_founded)),
religion, as.integer(as.character(endowment)),
school_type,
as.integer(as.character(median_start_sal)),
as.integer(as.character(acc_rate)),
as.character(stu_fac_ratio),
as.integer(as.character(grad_rate)),
as.integer(as.character(score)),
as.character(location),
as.integer(as.character(tuition)),
as.integer(as.character(room_board)),
as.integer(as.character(enrollment)))
colnames(df) = c("University",
"Year_Founded",
"Religion",
"Endowment",
"School_Type",
"Median_Start_Sal",
"Acc_Rate",
"Stu_Fac_Ratio",
"Graduation_Rate",
"Score",
"Location",
"Tuition",
"Room_Board",
"Enrollment")
return(df)
|
llgpcp <- function( coefficients, targets, achievements, variable.classes, maxiter=1000, verbose=FALSE )
{
###
### This function minimizes \eqn{ a'=[g_1(n,p), g_2(n,p), ..., g_K(n,p)] } subjective
### to C x + n - p = b, x >= 0, n >= 0 and p >= 0 with complementary pivoting
###
### Parameters
### coefficients = a matrix with the coefficients of the linear objective functions
### targets = a vector of target values for the objective function
### achievements = a data frame with the weights of the deviation variables for each
### objective along with the corresponding priority level
### variable.classes = a data frame that defines the complementary classes of
### the decision variables
### maxiter = maximum number of iterations
### zero = number smaller than this value (in absolute terms) are set to zero
### verbose = an optional logic variable to indicate whether interm results are to be printed
###
### validate the argument objects
###
###
### create the tableau
###
tab <- llgpcptab( coefficients, targets, achievements, variable.classes )
###
### reset the print and iteration countersls()
###
prnt <- 0
tab$iter <- 0
###
### check tableau for negative RHS target values and repair if necessary
###
check.tb( tab )
###
### loop over priority levels
###
for ( k in 1:tab$levels ) {
###
### update the level in the tableau
###
tab$level <- k
###
### calculate the index rows for levels 1 to k
###
tab <- calc.ti.k( tab, k )
###
### calculate the achievements for levels 1 to k
###
tab <- calc.ta.k( tab, k )
###
### infinite loop while there a possibility of converging to a solution
###
sp <- ev.llgpcp( tab, k )
while ( sp != 0 ) {
tab$iter <- tab$iter + 1
if ( tab$iter >= maxiter ) {
prnt <- prnt + 1
cat( paste( "Algorithm did not finish", tab$iter, "iterations at level", k ) )
cat( "\n" )
print( tab )
out <- llgpout( tab, coefficients, targets )
result <- list( tab=tab, out=out, converged=FALSE )
class( result ) <- "llgpcp"
return( result )
}
###
### get the index of the departing variable
###
ip <- dv.llgp( tab, sp )
if ( ip == 0 ) {
cat( paste( "Failed pivot computation at level", k ) )
cat( "\n" )
prnt <- prnt + 1
print( tab )
out <- llgpout( tab, coefficients, targets )
result <- list( tab=tab, out=out, converged=FALSE )
class( result ) <- "llgpcp"
return( result )
}
###
### swap the entering and departing variables
###
tab <- piv.llgp( tab, sp, ip, verbose )
###
### update the index rows and achievement functions
###
tab <- calc.ti.k( tab, k )
tab <- calc.ta.k( tab, k )
sp <- ev.llgpcp( tab, k )
if ( verbose ) print( tab )
}
}
out <- llgpout( tab, coefficients, targets )
result <- list( tab=tab, out=out, converged=TRUE )
class( result ) <- "llgpcp"
return( result )
}
| /goalprog/R/llgpcp.R | no_license | ingted/R-Examples | R | false | false | 3,357 | r | llgpcp <- function( coefficients, targets, achievements, variable.classes, maxiter=1000, verbose=FALSE )
{
###
### This function minimizes \eqn{ a'=[g_1(n,p), g_2(n,p), ..., g_K(n,p)] } subjective
### to C x + n - p = b, x >= 0, n >= 0 and p >= 0 with complementary pivoting
###
### Parameters
### coefficients = a matrix with the coefficients of the linear objective functions
### targets = a vector of target values for the objective function
### achievements = a data frame with the weights of the deviation variables for each
### objective along with the corresponding priority level
### variable.classes = a data frame that defines the complementary classes of
### the decision variables
### maxiter = maximum number of iterations
### zero = number smaller than this value (in absolute terms) are set to zero
### verbose = an optional logic variable to indicate whether interm results are to be printed
###
### validate the argument objects
###
###
### create the tableau
###
tab <- llgpcptab( coefficients, targets, achievements, variable.classes )
###
### reset the print and iteration countersls()
###
prnt <- 0
tab$iter <- 0
###
### check tableau for negative RHS target values and repair if necessary
###
check.tb( tab )
###
### loop over priority levels
###
for ( k in 1:tab$levels ) {
###
### update the level in the tableau
###
tab$level <- k
###
### calculate the index rows for levels 1 to k
###
tab <- calc.ti.k( tab, k )
###
### calculate the achievements for levels 1 to k
###
tab <- calc.ta.k( tab, k )
###
### infinite loop while there a possibility of converging to a solution
###
sp <- ev.llgpcp( tab, k )
while ( sp != 0 ) {
tab$iter <- tab$iter + 1
if ( tab$iter >= maxiter ) {
prnt <- prnt + 1
cat( paste( "Algorithm did not finish", tab$iter, "iterations at level", k ) )
cat( "\n" )
print( tab )
out <- llgpout( tab, coefficients, targets )
result <- list( tab=tab, out=out, converged=FALSE )
class( result ) <- "llgpcp"
return( result )
}
###
### get the index of the departing variable
###
ip <- dv.llgp( tab, sp )
if ( ip == 0 ) {
cat( paste( "Failed pivot computation at level", k ) )
cat( "\n" )
prnt <- prnt + 1
print( tab )
out <- llgpout( tab, coefficients, targets )
result <- list( tab=tab, out=out, converged=FALSE )
class( result ) <- "llgpcp"
return( result )
}
###
### swap the entering and departing variables
###
tab <- piv.llgp( tab, sp, ip, verbose )
###
### update the index rows and achievement functions
###
tab <- calc.ti.k( tab, k )
tab <- calc.ta.k( tab, k )
sp <- ev.llgpcp( tab, k )
if ( verbose ) print( tab )
}
}
out <- llgpout( tab, coefficients, targets )
result <- list( tab=tab, out=out, converged=TRUE )
class( result ) <- "llgpcp"
return( result )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init.R
\name{init}
\alias{init}
\title{Title}
\usage{
init()
}
\value{
None
}
\description{
Title
}
| /eyewit/man/init.Rd | permissive | ccp-eva/eyewit | R | false | true | 178 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init.R
\name{init}
\alias{init}
\title{Title}
\usage{
init()
}
\value{
None
}
\description{
Title
}
|
GetReviewResultsForHIT <-
reviewresults <-
function (hit, assignment = NULL, policy.level = NULL, retrieve.results = TRUE,
retrieve.actions = TRUE, keypair = credentials(), print = TRUE,
browser = FALSE, log.requests = TRUE, sandbox = FALSE)
{
keyid <- keypair[1]
secret <- keypair[2]
operation = "GetReviewResultsForHIT"
GETparameters <- paste("&HITId=", hit, sep = "")
if (!is.null(policy.level)) {
if (!policy.level %in% c("HIT", "Assignments"))
stop("PolicyLevel must be 'HIT' | 'Assignments'")
GETparameters <- paste(GETparameters, "&PolicyLevel=",
policy.level, sep = "")
}
if (!is.null(assignment))
GETparameters <- paste(GETparameters, "&AssignmentId=",
assignment, sep = "")
if (!is.null(retrieve.actions)) {
if (!retrieve.actions %in% c(TRUE, FALSE))
stop("RetrieveActions must be TRUE or FALSE")
else if (retrieve.actions == TRUE)
GETparameters <- paste(GETparameters, "&RetrieveActions=T",
sep = "")
else if (retrieve.actions == FALSE)
GETparameters <- paste(GETparameters, "&RetrieveActions=F",
sep = "")
}
if (!is.null(retrieve.results)) {
if (!retrieve.results %in% c(TRUE, FALSE))
stop("RetrieveResults must be TRUE or FALSE")
else if (retrieve.results == TRUE)
GETparameters <- paste(GETparameters, "&RetrieveResults=T",
sep = "")
else if (retrieve.results == FALSE)
GETparameters <- paste(GETparameters, "&RetrieveResults=F",
sep = "")
}
auth = authenticate(operation, secret)
if (browser == TRUE) {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, browser = browser,
sandbox = sandbox)
}
else {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, log.requests = log.requests,
sandbox = sandbox)
if (request$valid == TRUE) {
ReviewResults <- ReviewResultsToDataFrame(xml = request$xml)
if (print == TRUE) {
cat("ReviewResults Retrieved: ")
if (is.null(ReviewResults))
cat("0\n")
else {
if ("AssignmentReviewResult" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" Assignment ReviewResults Retrieved\n",
sep = "")
if ("AssignmentReviewAction" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" Assignment ReviewActions Retrieved\n",
sep = "")
if ("HITReviewResult" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" HIT ReviewResults Retrieved\n", sep = "")
if ("HITReviewAction" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" HIT ReviewActions Retrieved\n", sep = "")
return(ReviewResults)
}
}
else invisible(ReviewResults)
}
else if (request$valid == FALSE) {
if (print == TRUE)
cat("Invalid Request\n")
invisible(request)
}
}
}
| /R/GetReviewResultsForHIT.R | no_license | SolomonMg/MTurkR-1 | R | false | false | 3,564 | r | GetReviewResultsForHIT <-
reviewresults <-
function (hit, assignment = NULL, policy.level = NULL, retrieve.results = TRUE,
retrieve.actions = TRUE, keypair = credentials(), print = TRUE,
browser = FALSE, log.requests = TRUE, sandbox = FALSE)
{
keyid <- keypair[1]
secret <- keypair[2]
operation = "GetReviewResultsForHIT"
GETparameters <- paste("&HITId=", hit, sep = "")
if (!is.null(policy.level)) {
if (!policy.level %in% c("HIT", "Assignments"))
stop("PolicyLevel must be 'HIT' | 'Assignments'")
GETparameters <- paste(GETparameters, "&PolicyLevel=",
policy.level, sep = "")
}
if (!is.null(assignment))
GETparameters <- paste(GETparameters, "&AssignmentId=",
assignment, sep = "")
if (!is.null(retrieve.actions)) {
if (!retrieve.actions %in% c(TRUE, FALSE))
stop("RetrieveActions must be TRUE or FALSE")
else if (retrieve.actions == TRUE)
GETparameters <- paste(GETparameters, "&RetrieveActions=T",
sep = "")
else if (retrieve.actions == FALSE)
GETparameters <- paste(GETparameters, "&RetrieveActions=F",
sep = "")
}
if (!is.null(retrieve.results)) {
if (!retrieve.results %in% c(TRUE, FALSE))
stop("RetrieveResults must be TRUE or FALSE")
else if (retrieve.results == TRUE)
GETparameters <- paste(GETparameters, "&RetrieveResults=T",
sep = "")
else if (retrieve.results == FALSE)
GETparameters <- paste(GETparameters, "&RetrieveResults=F",
sep = "")
}
auth = authenticate(operation, secret)
if (browser == TRUE) {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, browser = browser,
sandbox = sandbox)
}
else {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, log.requests = log.requests,
sandbox = sandbox)
if (request$valid == TRUE) {
ReviewResults <- ReviewResultsToDataFrame(xml = request$xml)
if (print == TRUE) {
cat("ReviewResults Retrieved: ")
if (is.null(ReviewResults))
cat("0\n")
else {
if ("AssignmentReviewResult" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" Assignment ReviewResults Retrieved\n",
sep = "")
if ("AssignmentReviewAction" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" Assignment ReviewActions Retrieved\n",
sep = "")
if ("HITReviewResult" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" HIT ReviewResults Retrieved\n", sep = "")
if ("HITReviewAction" %in% names(ReviewResults))
cat(length(ReviewResults$AssignmentReviewResults),
" HIT ReviewActions Retrieved\n", sep = "")
return(ReviewResults)
}
}
else invisible(ReviewResults)
}
else if (request$valid == FALSE) {
if (print == TRUE)
cat("Invalid Request\n")
invisible(request)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additional_priors.R
\name{hexp}
\alias{hexp}
\title{A hierarchical model for seeded infections}
\usage{
hexp(prior_aux = rstanarm::exponential(0.03))
}
\arguments{
\item{prior_aux}{Specifies the prior distribution on the auxiliary parameter.
This can be a call to \code{\link[rstanarm]{normal}}, \code{\link[rstanarm]{student_t}}
or \code{\link[rstanarm]{exponential}}.}
}
\value{
A named list to be parsed internally by \code{\link[epidemia]{epim}}.
}
\description{
This distribution assigns seeded infections in each population an
exponential prior. The \code{aux} parameter refers to the mean of this
distribution. This mean parameter is common to seeded infections in each
group, and is given a prior distribution. This approach of assigning
priors to hyperparameters is referred to as hierarchical modeling. A call
to this function can be passed as the \code{prior_seeds} argument in
\code{\link[epidemia]{epiinf}}.
}
\references{
\insertAllCited{}
}
| /man/hexp.Rd | no_license | dimbage/epidemia | R | false | true | 1,034 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additional_priors.R
\name{hexp}
\alias{hexp}
\title{A hierarchical model for seeded infections}
\usage{
hexp(prior_aux = rstanarm::exponential(0.03))
}
\arguments{
\item{prior_aux}{Specifies the prior distribution on the auxiliary parameter.
This can be a call to \code{\link[rstanarm]{normal}}, \code{\link[rstanarm]{student_t}}
or \code{\link[rstanarm]{exponential}}.}
}
\value{
A named list to be parsed internally by \code{\link[epidemia]{epim}}.
}
\description{
This distribution assigns seeded infections in each population an
exponential prior. The \code{aux} parameter refers to the mean of this
distribution. This mean parameter is common to seeded infections in each
group, and is given a prior distribution. This approach of assigning
priors to hyperparameters is referred to as hierarchical modeling. A call
to this function can be passed as the \code{prior_seeds} argument in
\code{\link[epidemia]{epiinf}}.
}
\references{
\insertAllCited{}
}
|
#' Videiras: parcelas subdivididas em DBC
#'
#' @description Experimento sobre videiras (nao publicado) que
#' estudou a influencia de diferentes adubos e datas de
#' colheita no pH das uvas.
#' @docType data
#' @keywords datasets
#' @name ex
#' @usage data(ex)
#' @format Um data frame com 24 observacoes das seguintes 4
#' variaveis:
#' \describe{
#' \item{\code{trat}}{Fator com os niveis \code{A} e \code{B}}
#' \item{\code{dose}}{Vetor numerico}
#' \item{\code{rep}}{Vetor numerico}
#' \item{\code{resp}}{Vetor numerico}
#' }
#' @author Eric Batista Ferreira,
#' \email{eric.ferreira@@unifal-mg.edu.br}
NULL
| /R/ex.r | no_license | denisnog/ExpDes.pt | R | false | false | 626 | r | #' Videiras: parcelas subdivididas em DBC
#'
#' @description Experimento sobre videiras (nao publicado) que
#' estudou a influencia de diferentes adubos e datas de
#' colheita no pH das uvas.
#' @docType data
#' @keywords datasets
#' @name ex
#' @usage data(ex)
#' @format Um data frame com 24 observacoes das seguintes 4
#' variaveis:
#' \describe{
#' \item{\code{trat}}{Fator com os niveis \code{A} e \code{B}}
#' \item{\code{dose}}{Vetor numerico}
#' \item{\code{rep}}{Vetor numerico}
#' \item{\code{resp}}{Vetor numerico}
#' }
#' @author Eric Batista Ferreira,
#' \email{eric.ferreira@@unifal-mg.edu.br}
NULL
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.