content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
shinyServer(function(input, output, session) {
createProjectDefinition <- function() {
data.frame(id = getComputationInfo("id"),
compType = getComputationInfo("compType"),
projectName = getComputationInfo("projectName"),
projectDesc = getComputationInfo("projectDesc"),
formula = getComputationInfo("formula"),
stringsAsFactors=FALSE)
}
observe({
if (input$exitApp > 0) stopApp(TRUE)
})
## -- End: functions to make tabs active sequentially --
## Variables to detect various states the app can be in, depending
## on what actions the user has taken.
## output$datasetSpecified
## output$datasetChecked
## output$formulaEntered
## output$formulaChecked
## output$definitionSaved
## When the user chooses a file and clicks on the "Load Data" button
## this function is triggered
output$dataFileContentSummary <- renderPrint({
if (input$loadData == 0) return("")
## Create data frame from source
isolate({
if (input$input_type == 'CSV File') {
inputFile <- input$dataFile
shiny::validate(
need(inputFile != "", "Please select a data set")
)
## Parse missing value strings
missingValueIndicators <- stringr::str_trim(scan(textConnection(input$missingIndicators),
what = character(0), sep=",", quiet=TRUE))
## Return data frame or error as the case may be
dataResult <- tryCatch(
{ read.csv(file = inputFile$datapath, na.strings = missingValueIndicators) } ,
warning = function(x) x,
error = function(x) x
)
} else if (input$input_type == 'Redcap API') {
shiny::validate(
need(requireNamespace("redcapAPI", quietly = TRUE), "Please install the redcapAPI package"),
need(input$redcapURL != "", "Please enter your Redcap URL"),
need(input$redcapToken != "", "Please enter your Redcap Token")
)
dataResult <-tryCatch(
{ redcapAPI::exportRecords(redcapAPI::redcapConnection(url = input$redcapURL, token = input$redcapToken)) },
warning = function(x) x,
error = function(x) x
)
} else if (input$input_type == 'Postgres') {
shiny::validate(
need(requireNamespace("RPostgreSQL", quietly = TRUE), "Please install the RPostgreSQL package"),
need(requireNamespace("dplyr", quietly = TRUE), "Please install the dplyr package"),
need(input$dbName != "", "Please enter your Postgres database name"),
need(input$dbHost != "", "Please enter your Postgres host"),
need(!is.na(as.integer(input$dbPort)), "Please enter your Postgres port number"),
need(input$dbUser != "", "Please enter your Postgres database user name"),
need(input$dbPassword != "", "Please enter your Postgres database user password"),
need(input$dbTable != "", "Please enter your Postgres database table name")
)
dataResult <- tryCatch(
{
db <- dplyr::src_postgres(dbname = input$dbName, host = input$dbHost, port = input$dbPort,
user = input$dbUser, password = input$dbPassword)
## CAUTION: Need to do better to prevent SQL injection...
dplyr::tbl(db, paste("SELECT * from ", input$dbTable))
},
warning = function(x) x,
error = function(x) x
)
} else {
shiny::validate(
need(FALSE, "Report bug to Package owner: unexpected Data source!")
)
dataResult <- NULL
}
if (is.data.frame(dataResult)){
setComputationInfo("data", dataResult) ## Store data object
updateTabsetPanel(session, inputId="navigationList", selected="Sanity Check")
str(dataResult)
} else {
cat('Error!', dataResult$message)
}
})
})
output$dataLoaded <- reactive({
if (input$loadData == 0) return()
ifelse(is.data.frame(getComputationInfo("data")), "Data Loaded; Proceed to Sanity Check", "")
})
## When the user clicks on the "Check Sanity" button
## this function is triggered
output$sanityCheckResult <- reactive({
if (input$checkSanity == 0) return()
formula <- getComputationInfo("formula")
isolate({
result <- tryCatch(
{
CoxWorker$new(defn = createProjectDefinition(), data = getComputationInfo("data"))
},
warning = function(x) x,
error = function(x) x)
if ("CoxWorker" %in% class(result)) { ## Success
"Success: data matches formula. Send to Opencpu Server."
} else {
paste("Error!", result$message)
}
})
})
output$populateResult <- renderPrint({
if (input$populateServer == 0) return()
isolate({
shiny::validate(need(input$siteName != "", "Please enter a site name"))
shiny::validate(need(input$ocpuURL != "", "Please enter an opencpu URL"))
site <- list(name = input$siteName, url = input$ocpuURL)
defn <- makeDefinition(getComputationInfo("compType"))
data <- getComputationInfo("data")
result <- tryCatch(uploadNewComputation(site=site, defn=defn, data=data),
error = function(x) x,
warning = function(x) x)
if (inherits(result, "error") ) {
"Error: Uploading the definition to server"
} else {
ifelse(result, "Success: definition uploaded to server",
"Error while uploading definition to server")
}
})
})
})
| /inst/webApps/setupWorkerApp/setupCoxWorker/server.R | no_license | emcramer/distcomp | R | false | false | 6,505 | r | shinyServer(function(input, output, session) {
createProjectDefinition <- function() {
data.frame(id = getComputationInfo("id"),
compType = getComputationInfo("compType"),
projectName = getComputationInfo("projectName"),
projectDesc = getComputationInfo("projectDesc"),
formula = getComputationInfo("formula"),
stringsAsFactors=FALSE)
}
observe({
if (input$exitApp > 0) stopApp(TRUE)
})
## -- End: functions to make tabs active sequentially --
## Variables to detect various states the app can be in, depending
## on what actions the user has taken.
## output$datasetSpecified
## output$datasetChecked
## output$formulaEntered
## output$formulaChecked
## output$definitionSaved
## When the user chooses a file and clicks on the "Load Data" button
## this function is triggered
output$dataFileContentSummary <- renderPrint({
if (input$loadData == 0) return("")
## Create data frame from source
isolate({
if (input$input_type == 'CSV File') {
inputFile <- input$dataFile
shiny::validate(
need(inputFile != "", "Please select a data set")
)
## Parse missing value strings
missingValueIndicators <- stringr::str_trim(scan(textConnection(input$missingIndicators),
what = character(0), sep=",", quiet=TRUE))
## Return data frame or error as the case may be
dataResult <- tryCatch(
{ read.csv(file = inputFile$datapath, na.strings = missingValueIndicators) } ,
warning = function(x) x,
error = function(x) x
)
} else if (input$input_type == 'Redcap API') {
shiny::validate(
need(requireNamespace("redcapAPI", quietly = TRUE), "Please install the redcapAPI package"),
need(input$redcapURL != "", "Please enter your Redcap URL"),
need(input$redcapToken != "", "Please enter your Redcap Token")
)
dataResult <-tryCatch(
{ redcapAPI::exportRecords(redcapAPI::redcapConnection(url = input$redcapURL, token = input$redcapToken)) },
warning = function(x) x,
error = function(x) x
)
} else if (input$input_type == 'Postgres') {
shiny::validate(
need(requireNamespace("RPostgreSQL", quietly = TRUE), "Please install the RPostgreSQL package"),
need(requireNamespace("dplyr", quietly = TRUE), "Please install the dplyr package"),
need(input$dbName != "", "Please enter your Postgres database name"),
need(input$dbHost != "", "Please enter your Postgres host"),
need(!is.na(as.integer(input$dbPort)), "Please enter your Postgres port number"),
need(input$dbUser != "", "Please enter your Postgres database user name"),
need(input$dbPassword != "", "Please enter your Postgres database user password"),
need(input$dbTable != "", "Please enter your Postgres database table name")
)
dataResult <- tryCatch(
{
db <- dplyr::src_postgres(dbname = input$dbName, host = input$dbHost, port = input$dbPort,
user = input$dbUser, password = input$dbPassword)
## CAUTION: Need to do better to prevent SQL injection...
dplyr::tbl(db, paste("SELECT * from ", input$dbTable))
},
warning = function(x) x,
error = function(x) x
)
} else {
shiny::validate(
need(FALSE, "Report bug to Package owner: unexpected Data source!")
)
dataResult <- NULL
}
if (is.data.frame(dataResult)){
setComputationInfo("data", dataResult) ## Store data object
updateTabsetPanel(session, inputId="navigationList", selected="Sanity Check")
str(dataResult)
} else {
cat('Error!', dataResult$message)
}
})
})
output$dataLoaded <- reactive({
if (input$loadData == 0) return()
ifelse(is.data.frame(getComputationInfo("data")), "Data Loaded; Proceed to Sanity Check", "")
})
## When the user clicks on the "Check Sanity" button
## this function is triggered
output$sanityCheckResult <- reactive({
if (input$checkSanity == 0) return()
formula <- getComputationInfo("formula")
isolate({
result <- tryCatch(
{
CoxWorker$new(defn = createProjectDefinition(), data = getComputationInfo("data"))
},
warning = function(x) x,
error = function(x) x)
if ("CoxWorker" %in% class(result)) { ## Success
"Success: data matches formula. Send to Opencpu Server."
} else {
paste("Error!", result$message)
}
})
})
output$populateResult <- renderPrint({
if (input$populateServer == 0) return()
isolate({
shiny::validate(need(input$siteName != "", "Please enter a site name"))
shiny::validate(need(input$ocpuURL != "", "Please enter an opencpu URL"))
site <- list(name = input$siteName, url = input$ocpuURL)
defn <- makeDefinition(getComputationInfo("compType"))
data <- getComputationInfo("data")
result <- tryCatch(uploadNewComputation(site=site, defn=defn, data=data),
error = function(x) x,
warning = function(x) x)
if (inherits(result, "error") ) {
"Error: Uploading the definition to server"
} else {
ifelse(result, "Success: definition uploaded to server",
"Error while uploading definition to server")
}
})
})
})
|
modelInfo <- list(label = "Principal Component Analysis",
library = "pls",
type = "Regression",
parameters = data.frame(parameter = 'ncomp',
class = "numeric",
label = '#Components'),
grid = function(x, y, len = NULL)
data.frame(ncomp = seq(1, min(ncol(x) - 1, len), by = 1)),
loop = function(grid) {
grid <- grid[order(grid$ncomp, decreasing = TRUE),, drop = FALSE]
loop <- grid[1,,drop = FALSE]
submodels <- list(grid[-1,,drop = FALSE])
list(loop = loop, submodels = submodels)
},
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
pcr(.outcome ~ ., data = dat, ncomp = param$ncomp, ...)
},
predict = function(modelFit, newdata, submodels = NULL) {
out <- as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
if(!is.null(submodels))
{
tmp <- apply(predict(modelFit, newdata, ncomp = submodels$ncomp), 3, function(x) list(x))
tmp <- as.data.frame(tmp)
out <- c(list(out), as.list(tmp))
}
out
},
predictors = function(x, ...) rownames(x$projection),
tags = c("Linear Regression", "Feature Extraction"),
prob = NULL,
sort = function(x) x[order(x[,1]),])
| /models/files/pcr.R | no_license | bleutner/caret | R | false | false | 1,864 | r | modelInfo <- list(label = "Principal Component Analysis",
library = "pls",
type = "Regression",
parameters = data.frame(parameter = 'ncomp',
class = "numeric",
label = '#Components'),
grid = function(x, y, len = NULL)
data.frame(ncomp = seq(1, min(ncol(x) - 1, len), by = 1)),
loop = function(grid) {
grid <- grid[order(grid$ncomp, decreasing = TRUE),, drop = FALSE]
loop <- grid[1,,drop = FALSE]
submodels <- list(grid[-1,,drop = FALSE])
list(loop = loop, submodels = submodels)
},
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
pcr(.outcome ~ ., data = dat, ncomp = param$ncomp, ...)
},
predict = function(modelFit, newdata, submodels = NULL) {
out <- as.vector(pls:::predict.mvr(modelFit, newdata, ncomp = max(modelFit$ncomp)))
if(!is.null(submodels))
{
tmp <- apply(predict(modelFit, newdata, ncomp = submodels$ncomp), 3, function(x) list(x))
tmp <- as.data.frame(tmp)
out <- c(list(out), as.list(tmp))
}
out
},
predictors = function(x, ...) rownames(x$projection),
tags = c("Linear Regression", "Feature Extraction"),
prob = NULL,
sort = function(x) x[order(x[,1]),])
|
#' @title The freadss() function reads in a csv and perhaps subsets the rows (optionally sampled)
#'
#' @seealso \code{\link[data.table]{fread}}
#' @description The input file is read in-memory via \code{\link[data.table]{fread}}.
#' If rows are subset, there is a slow down. Hence, subsetting rows costs a one-time slow down
#' but affords ability to bean count the read-in memory footprint
#'
#' @details NOTE: if both ss and ind_choose are NULL, no subsetting is done. Entire csv is read in.
#'
#' @param input character for file path of csv passed to \code{\link[data.table]{fread}}
#' @param ss integer for desired row sample size. Default of ss is NULL, meaning no subsampling.
#' @param replace a logical picking with/without-replacement sampling
#' @param ind_choose optional integer vector of specific rows to read in (instead of sampling)
#'
#' @return a 'data.frame' with optionally subsetted rows (perhaps from sampling)
#' @export
#'
#' @examples
#'
#' set.seed(1); m = matrix(rnorm(10*100),ncol=100,nrow=100)
#'
#' csv = data.frame(m)
#' names(csv) = paste0('x',seq_along(csv))
#' names(csv)
#'
#' tf = tempfile()
#' write.csv(csv,tf,row.names=FALSE)
#' dir_test=tf
#'
#' # if ss=NULL and ind_choose=NULL
#' # no sub sampling, basically fread() but no flexible optional args.
#' # just demo, might as well use fread() directly
#'
#' identical(freadss(input=dir_test),fread(dir_test))
#'
#' # user wants to sample 5 random rows
#' freadss(input=dir_test,ss=5)
#'
#' # user picks 5 specific rows
#' ind_pick = c(1,7,23,69,100)
#'
#' df_subset_before = freadss(input=dir_test,ind_choose = ind_pick)
#' df_subset_after = freadss(input=dir_test)[ind_pick,]
#' identical(df_subset_before,df_subset_after)
#'
# disabled optional ... args passed to fread()
# eg drop/keep bugs out since issue of original header name lost during do.call(fread,list_ss)
freadss = function(input,ss=NULL,replace=TRUE,ind_choose=NULL){
# ss = 100 # samp size
# ind_choose=ind_pick
# negative ss
# ss and ind_choose non null
if((!is.null(ss))&&(!is.null(ind_choose))){stop('you can not have BOTH non-null ss and ind_choose')}
require(data.table)
# other args passed to fread()
# dots = list(...)
if((is.null(ss)&&is.null(ind_choose))==TRUE){
# no subsampling of rows
return(fread(input))
}
# else, subsample rows
# must know nrow beforehand
# input = '~/projects/datzen/tests/proto/test.csv'
num_rows = data.table::fread(paste0('wc -l ',input))[[1]] - 1
name_header_orig = names(fread(input,nrows=0))
# row index random sampled
if(is.null(ind_choose)&&(!is.null(ss))){
if(ss <= 0){stop('ss must be NULL or greater than 0')}
# use ind_samp
if(num_rows < ss){
ind_samp = sample(x=(1:nrow(dat_raw)),size=ss,replace=TRUE)
warning('nrow() less than ss, so will force replace=TRUE')
} else {
ind_samp = sample(x=(1:num_rows),size=ss,replace=replace)
}
ind_spec = as.integer(ind_samp)
}
# row index user specified
if(!is.null(ind_choose)&&(is.null(ss))){
ind_spec = as.integer(ind_choose)
}
# probably slightly faster, but storage overhead
v = rep(FALSE,num_rows)
v[ind_spec] = TRUE
# sum(v)
# v <- (1:num_rows %in% ind_spec)
seq = rle(v)
idx = c(0, cumsum(seq$lengths))[which(seq$values)] + 1
df_indx = data.frame(start=idx, length=seq$length[which(seq$values)])
# str(df_indx)
result = do.call(rbind,apply(df_indx,1, function(x) return(fread(input,nrows=x[2],skip=x[1]))))
names(result) = name_header_orig
# revisit drop keep optional args
# names(result) = name_header_orig[!(name_header_orig %in% drop)]
return(result)
}
########################################
#
#
# result = do.call(rbind,
# apply(X=df_indx,MARGIN=1,
# FUN=function(xx){
#
# # str(df_indx)
# # xx = df_indx[1,]
#
#
# # internal do.call(fread,X[[i]])
# # will chop off first global row (true header)
# # and following rows will be auto renamed to use V1 etc
# # behavior of fread()
#
# args_cust = list(input,
# # header=FALSE,
# nrows=unlist(xx[2]),
# skip=unlist(xx[1])
# )
#
# # str(args_cust)
#
# # append 'dots' from topmost scope
# # args_all = append(args_cust,dots)
#
# args_all = args_cust
#
#
# return(do.call(fread,args_all))
#
# }))
#
#
# one line at time is signifig slower
#
#
# freadss = function(input,ss=10,replace=TRUE,ind_choose=NULL,...){
#
# require(data.table)
#
# # other args passed to fread()
# dots = list(...)
#
# # must know nrow beforehand
# # input = '~/projects/datzen/tests/proto/test.csv'
#
# num_rows = data.table::fread(paste0('wc -l ',input))[[1]] - 1
#
# if(is.null(ind_choose)){
# # use ind_samp
# if(num_rows < ss){
# ind_samp = sample(x=(1:nrow(dat_raw)),size=ss,replace=TRUE)
# warning('nrow() less than ss, so will force replace=TRUE')
#
# } else {
# ind_samp = sample(x=(1:num_rows),size=ss,replace=replace)
# }
# ind_spec = as.integer(ind_samp)
# } else {
# # ind_choose = c(1:50,53, 65,77,90,100:200,350:500, 5000:6000)
# ind_spec = as.integer(ind_choose)
# }
#
#
# # 1 line at a time version
#
# result = do.call(rbind,
# lapply(X=ind_spec,
# FUN=function(xx){
#
# # str(df_indx)
# # xx = df_indx[1,]
#
# # read 1 entry at a time without rle()
# # nrows=ind_spec
# # skip=1
#
# args_cust = list(input=input,
# nrows=1,
# skip=unlist(xx))
#
# # str(args_cust)
# # append 'dots' from topmost scope
#
# args_all = append(args_cust,dots)
# # do.call(fread,args_all)
#
# return(do.call(fread,args_all))
#
# }))
#
# return(result)
# }
#
# I don't want to be that random! Can you just specifically give me rows 69,23, and 7 ?
#
# ```{r message=FALSE}
# freadss(input=tf,ind_choose=c(69,23,7)) %>% str
# ```
| /R/freadss.R | no_license | GapData/datzen | R | false | false | 6,707 | r | #' @title The freadss() function reads in a csv and perhaps subsets the rows (optionally sampled)
#'
#' @seealso \code{\link[data.table]{fread}}
#' @description The input file is read in-memory via \code{\link[data.table]{fread}}.
#' If rows are subset, there is a slow down. Hence, subsetting rows costs a one-time slow down
#' but affords ability to bean count the read-in memory footprint
#'
#' @details NOTE: if both ss and ind_choose are NULL, no subsetting is done. Entire csv is read in.
#'
#' @param input character for file path of csv passed to \code{\link[data.table]{fread}}
#' @param ss integer for desired row sample size. Default of ss is NULL, meaning no subsampling.
#' @param replace a logical picking with/without-replacement sampling
#' @param ind_choose optional integer vector of specific rows to read in (instead of sampling)
#'
#' @return a 'data.frame' with optionally subsetted rows (perhaps from sampling)
#' @export
#'
#' @examples
#'
#' set.seed(1); m = matrix(rnorm(10*100),ncol=100,nrow=100)
#'
#' csv = data.frame(m)
#' names(csv) = paste0('x',seq_along(csv))
#' names(csv)
#'
#' tf = tempfile()
#' write.csv(csv,tf,row.names=FALSE)
#' dir_test=tf
#'
#' # if ss=NULL and ind_choose=NULL
#' # no sub sampling, basically fread() but no flexible optional args.
#' # just demo, might as well use fread() directly
#'
#' identical(freadss(input=dir_test),fread(dir_test))
#'
#' # user wants to sample 5 random rows
#' freadss(input=dir_test,ss=5)
#'
#' # user picks 5 specific rows
#' ind_pick = c(1,7,23,69,100)
#'
#' df_subset_before = freadss(input=dir_test,ind_choose = ind_pick)
#' df_subset_after = freadss(input=dir_test)[ind_pick,]
#' identical(df_subset_before,df_subset_after)
#'
# disabled optional ... args passed to fread()
# eg drop/keep bugs out since issue of original header name lost during do.call(fread,list_ss)
freadss = function(input,ss=NULL,replace=TRUE,ind_choose=NULL){
# ss = 100 # samp size
# ind_choose=ind_pick
# negative ss
# ss and ind_choose non null
if((!is.null(ss))&&(!is.null(ind_choose))){stop('you can not have BOTH non-null ss and ind_choose')}
require(data.table)
# other args passed to fread()
# dots = list(...)
if((is.null(ss)&&is.null(ind_choose))==TRUE){
# no subsampling of rows
return(fread(input))
}
# else, subsample rows
# must know nrow beforehand
# input = '~/projects/datzen/tests/proto/test.csv'
num_rows = data.table::fread(paste0('wc -l ',input))[[1]] - 1
name_header_orig = names(fread(input,nrows=0))
# row index random sampled
if(is.null(ind_choose)&&(!is.null(ss))){
if(ss <= 0){stop('ss must be NULL or greater than 0')}
# use ind_samp
if(num_rows < ss){
ind_samp = sample(x=(1:nrow(dat_raw)),size=ss,replace=TRUE)
warning('nrow() less than ss, so will force replace=TRUE')
} else {
ind_samp = sample(x=(1:num_rows),size=ss,replace=replace)
}
ind_spec = as.integer(ind_samp)
}
# row index user specified
if(!is.null(ind_choose)&&(is.null(ss))){
ind_spec = as.integer(ind_choose)
}
# probably slightly faster, but storage overhead
v = rep(FALSE,num_rows)
v[ind_spec] = TRUE
# sum(v)
# v <- (1:num_rows %in% ind_spec)
seq = rle(v)
idx = c(0, cumsum(seq$lengths))[which(seq$values)] + 1
df_indx = data.frame(start=idx, length=seq$length[which(seq$values)])
# str(df_indx)
result = do.call(rbind,apply(df_indx,1, function(x) return(fread(input,nrows=x[2],skip=x[1]))))
names(result) = name_header_orig
# revisit drop keep optional args
# names(result) = name_header_orig[!(name_header_orig %in% drop)]
return(result)
}
########################################
#
#
# result = do.call(rbind,
# apply(X=df_indx,MARGIN=1,
# FUN=function(xx){
#
# # str(df_indx)
# # xx = df_indx[1,]
#
#
# # internal do.call(fread,X[[i]])
# # will chop off first global row (true header)
# # and following rows will be auto renamed to use V1 etc
# # behavior of fread()
#
# args_cust = list(input,
# # header=FALSE,
# nrows=unlist(xx[2]),
# skip=unlist(xx[1])
# )
#
# # str(args_cust)
#
# # append 'dots' from topmost scope
# # args_all = append(args_cust,dots)
#
# args_all = args_cust
#
#
# return(do.call(fread,args_all))
#
# }))
#
#
# one line at time is signifig slower
#
#
# freadss = function(input,ss=10,replace=TRUE,ind_choose=NULL,...){
#
# require(data.table)
#
# # other args passed to fread()
# dots = list(...)
#
# # must know nrow beforehand
# # input = '~/projects/datzen/tests/proto/test.csv'
#
# num_rows = data.table::fread(paste0('wc -l ',input))[[1]] - 1
#
# if(is.null(ind_choose)){
# # use ind_samp
# if(num_rows < ss){
# ind_samp = sample(x=(1:nrow(dat_raw)),size=ss,replace=TRUE)
# warning('nrow() less than ss, so will force replace=TRUE')
#
# } else {
# ind_samp = sample(x=(1:num_rows),size=ss,replace=replace)
# }
# ind_spec = as.integer(ind_samp)
# } else {
# # ind_choose = c(1:50,53, 65,77,90,100:200,350:500, 5000:6000)
# ind_spec = as.integer(ind_choose)
# }
#
#
# # 1 line at a time version
#
# result = do.call(rbind,
# lapply(X=ind_spec,
# FUN=function(xx){
#
# # str(df_indx)
# # xx = df_indx[1,]
#
# # read 1 entry at a time without rle()
# # nrows=ind_spec
# # skip=1
#
# args_cust = list(input=input,
# nrows=1,
# skip=unlist(xx))
#
# # str(args_cust)
# # append 'dots' from topmost scope
#
# args_all = append(args_cust,dots)
# # do.call(fread,args_all)
#
# return(do.call(fread,args_all))
#
# }))
#
# return(result)
# }
#
# I don't want to be that random! Can you just specifically give me rows 69,23, and 7 ?
#
# ```{r message=FALSE}
# freadss(input=tf,ind_choose=c(69,23,7)) %>% str
# ```
|
################
# File: Lab8.R
# Author: Taeyong Park
# Summary: Regression Analysis for Time Series Data
#################
# install.packages("tseries")
# install.packages("lmtest")
# install.packages("Hmisc")
library(tseries); library(lmtest); library(Hmisc)
top=read.csv("top1.csv")
colnames(top)
# Test for stationarity
adf.test(top$top1_cg, k=1)
adf.test(top$topmarg, k=1)
adf.test(top$capgtax, k=1)
adf.test(top$tbill_3, k=1)
# If every variable is stationary,
# you can run a linear regression using the OLS (ADL).
# model = lm(top1_cg~topmarg+capgtax+tbill_3,data=top)
# The problem is that it's rare that every variable in the model is stationary.
# If you find evidence of nonstationarity, consider the ecm.
# First-difference
top$d.top1_cg = c(NA, diff(top$top1_cg, differences = 1))
top$d.topmarg = c(NA, diff(top$topmarg, differences = 1))
top$d.capgtax = c(NA, diff(top$capgtax, differences = 1))
top$d.tbill_3 = c(NA, diff(top$tbill_3, differences = 1))
# Create lagged variables
top$l.top1_cg = Lag(top$top1_cg)
top$l.topmarg = Lag(top$topmarg)
top$l.capgtax = Lag(top$capgtax)
top$l.tbill_3 = Lag(top$tbill_3)
# Cointegration test
step1 = lm(top1_cg~ topmarg + capgtax + tbill_3, data=top)
res.step1 = step1$residuals
step2 = adf.test(res.step1, k=1)
# ECM
model.ecm = lm(d.top1_cg ~ l.top1_cg + d.topmarg + l.topmarg + d.capgtax + l.capgtax + d.tbill_3 + l.tbill_3, data=top)
summary(model.ecm)
# Test for autocorrelated residuals
bgtest(model.ecm)
| /70208_Lab8.R | no_license | typark99/TeachingLab_RegressionAnalysis | R | false | false | 1,487 | r | ################
# File: Lab8.R
# Author: Taeyong Park
# Summary: Regression Analysis for Time Series Data
#################
# install.packages("tseries")
# install.packages("lmtest")
# install.packages("Hmisc")
library(tseries); library(lmtest); library(Hmisc)
top=read.csv("top1.csv")
colnames(top)
# Test for stationarity
adf.test(top$top1_cg, k=1)
adf.test(top$topmarg, k=1)
adf.test(top$capgtax, k=1)
adf.test(top$tbill_3, k=1)
# If every variable is stationary,
# you can run a linear regression using the OLS (ADL).
# model = lm(top1_cg~topmarg+capgtax+tbill_3,data=top)
# The problem is that it's rare that every variable in the model is stationary.
# If you find evidence of nonstationarity, consider the ecm.
# First-difference
top$d.top1_cg = c(NA, diff(top$top1_cg, differences = 1))
top$d.topmarg = c(NA, diff(top$topmarg, differences = 1))
top$d.capgtax = c(NA, diff(top$capgtax, differences = 1))
top$d.tbill_3 = c(NA, diff(top$tbill_3, differences = 1))
# Create lagged variables
top$l.top1_cg = Lag(top$top1_cg)
top$l.topmarg = Lag(top$topmarg)
top$l.capgtax = Lag(top$capgtax)
top$l.tbill_3 = Lag(top$tbill_3)
# Cointegration test
step1 = lm(top1_cg~ topmarg + capgtax + tbill_3, data=top)
res.step1 = step1$residuals
step2 = adf.test(res.step1, k=1)
# ECM
model.ecm = lm(d.top1_cg ~ l.top1_cg + d.topmarg + l.topmarg + d.capgtax + l.capgtax + d.tbill_3 + l.tbill_3, data=top)
summary(model.ecm)
# Test for autocorrelated residuals
bgtest(model.ecm)
|
rm(list=ls(all =TRUE))
for(x in 1:4){
message("x=",x)
}
for(x in -2:4){
message("x=",x)
}
for(x in seq(5,-3,-0.5)){
message("x=",x)
}
#
#--------------------------------
rm(list=ls(all =TRUE))
#要先幫z做出一個空間
z<- NULL
for (y in -5:10){
z[y]<-y^2
message("z=",z)
}
#比較,是否可以從其他數字開始?
z<- NULL
print(y)
y<-1:10
for (i in 1:length(y)){
z[i]<-y[i]^2
message("z=",z[i])
}
#############
#while loop
y<-0
while(y<=10){
y<-y+1
message(y)
}
#另一個例子
limit<-0.0001
y<-100
n<-1
dy<-100
y2<-NULL
while(dy>limit){
y2<-y/2
dy<-y-y2
y<-y2
#(加上計數器)
n<-n+1
print(dy)
}
#for有明確數字的範圍,但是while不一定需要
#while只要滿足括弧中的條件都會跑回圈
z<- c(1,2,4,3,5,6,7,8,9,10)
x<-c(1,3,5,9,7,11,13,15,17,19)
x%in%z
z!=x
#repeat-break-next
x<-0
#switch
switch(1,1+2,2+3,3+4,4+5)
switch(4,1+2,2+3,3+4,4+5)
x<-9
y<-switch(x,x+2,x+3,x+4,x+5)
#switch
score<-56
level<-floor(score/10)
#-----------------------------------
#example
x<-seq(-2,2,0.5)
if(x>0){
y<- x^2+3
}else{
y<- -x^2-3
}
#-----------------------------------
#example kai
x<-seq(-2,2,0.5)
for(i in c(1:length(x))){
if(x[i]>0){
y[i]<- x[i]^2+3
}else{
y[i]<- -x[i]^2-3
}
}
x
y
#-------------------------------
x<-seq(-2,2,0.5)
y<- NULL
for(i in c(1:length(x))){
ifelse(x[i]>0, y[i]<-x[i]^2+3,y[i]<- -x[i]^2-3)
}
y
#-------------------------
#which 寫法
x<-seq(-2,2,0.5)
y<-array(0,length(x))
y[which(x>0)]<- x[which(x>0)]^2+3
y[which(x<=0)]<- -x[which(x<=0)]^2-3
y | /week11/Skagerrakschlacht+8.R | no_license | B05208038/NTU_freshman_Introduction_to_Computer_and_Programming_Geography_department | R | false | false | 1,562 | r | rm(list=ls(all =TRUE))
for(x in 1:4){
message("x=",x)
}
for(x in -2:4){
message("x=",x)
}
for(x in seq(5,-3,-0.5)){
message("x=",x)
}
#
#--------------------------------
rm(list=ls(all =TRUE))
#要先幫z做出一個空間
z<- NULL
for (y in -5:10){
z[y]<-y^2
message("z=",z)
}
#比較,是否可以從其他數字開始?
z<- NULL
print(y)
y<-1:10
for (i in 1:length(y)){
z[i]<-y[i]^2
message("z=",z[i])
}
#############
#while loop
y<-0
while(y<=10){
y<-y+1
message(y)
}
#另一個例子
limit<-0.0001
y<-100
n<-1
dy<-100
y2<-NULL
while(dy>limit){
y2<-y/2
dy<-y-y2
y<-y2
#(加上計數器)
n<-n+1
print(dy)
}
#for有明確數字的範圍,但是while不一定需要
#while只要滿足括弧中的條件都會跑回圈
z<- c(1,2,4,3,5,6,7,8,9,10)
x<-c(1,3,5,9,7,11,13,15,17,19)
x%in%z
z!=x
#repeat-break-next
x<-0
#switch
switch(1,1+2,2+3,3+4,4+5)
switch(4,1+2,2+3,3+4,4+5)
x<-9
y<-switch(x,x+2,x+3,x+4,x+5)
#switch
score<-56
level<-floor(score/10)
#-----------------------------------
#example
x<-seq(-2,2,0.5)
if(x>0){
y<- x^2+3
}else{
y<- -x^2-3
}
#-----------------------------------
#example kai
x<-seq(-2,2,0.5)
for(i in c(1:length(x))){
if(x[i]>0){
y[i]<- x[i]^2+3
}else{
y[i]<- -x[i]^2-3
}
}
x
y
#-------------------------------
x<-seq(-2,2,0.5)
y<- NULL
for(i in c(1:length(x))){
ifelse(x[i]>0, y[i]<-x[i]^2+3,y[i]<- -x[i]^2-3)
}
y
#-------------------------
#which 寫法
x<-seq(-2,2,0.5)
y<-array(0,length(x))
y[which(x>0)]<- x[which(x>0)]^2+3
y[which(x<=0)]<- -x[which(x<=0)]^2-3
y |
#' @title Creates a registry which can be used for running several Llama models on a cluster.
#'
#' @description
#' It is likely that you need to install some additional R packages for this from CRAN or extra
#' Weka learner. The latter can be done via e.g. \code{WPM("install-package", "XMeans")}.
#'
#' Feature costs are added for real prognostic models but not for baseline models.
#'
#' @param asscenarios [(list of) \code{\link{ASScenario}}]\cr
#' Algorithm selection scenarios.
#' @param feature.steps.list [\code{list} of \code{character}]\cr
#' Named list of feature steps we want to use.
#' Must be named with scenario ids.
#' Default is to take the default feature steps from the scenario.
#' @param baselines [\code{character}]\cr
#' Vector of characters, defining the baseline models.
#' Default is c("vbs", "singleBest", "singleBestByPar", "singleBestBySuccesses").
#' @param learners [list of \code{\link[mlr]{Learner}}]\cr
#' mlr learners to use for modeling.
#' Default is none.
#' @param par.sets [list of \code{\link[ParamHelpers]{ParamSet}}]\cr
#' Param sets for learners to tune via random search.
#' Pass an empty param set, if you want no tuning.
#' Must be in of same length as \code{learners} and in the same order.
#' Default is none.
#' @param rs.iters [\code{integer(1)}]\cr
#' Number of iterations for random search hyperparameter tuning.
#' Default is 100.
#' @param n.inner.folds [\code{integer(1)}]\cr
#' Number of cross-validation folds for inner CV in hyperparameter tuning.
#' Default is 2L.
#' @return batchtools registry.
#' @export
runLlamaModels = function(asscenarios, feature.steps.list = NULL, baselines = NULL,
learners = list(), par.sets = list(), rs.iters = 100L, n.inner.folds = 2L) {
asscenarios = ensureVector(asscenarios, 1L, cl = "ASScenario")
assertList(asscenarios, types = "ASScenario")
scenario.ids = extractSubList(asscenarios, c("desc", "scenario_id"), use.names = FALSE)
names(asscenarios) = scenario.ids
if (is.null(feature.steps.list)) {
feature.steps.list = extractSubList(asscenarios, c("desc", "default_steps"),
simplify = FALSE, use.names = TRUE)
} else {
feature.steps.list = ensureVector(feature.steps.list, 1L, cl = "character")
assertList(feature.steps.list, types = "character", names = "unique")
assertSetEqual(names(feature.steps.list), scenario.ids, ordered = FALSE)
}
# sort in correct order
feature.steps.list = feature.steps.list[scenario.ids]
# models and defaults
baselines.all = c("vbs", "singleBest", "singleBestByPar", "singleBestBySuccesses")
# check model args
if (is.null(baselines)) {
baselines = baselines.all
} else {
assertCharacter(baselines, any.missing = FALSE)
assertSubset(baselines, baselines.all)
}
assertList(learners, types = "Learner")
learner.ids = extractSubList(learners, "id")
assertList(par.sets, types = "ParamSet", len = length(learners))
rs.iters = asInt(rs.iters, lower = 1L)
n.inner.folds = asInt(n.inner.folds, lower = 2L)
packs = c("RWeka", "llama", "methods", "ParamHelpers", "mlr", "batchtools")
requirePackages(packs, why = "runLlamaModels")
llama.scenarios = mapply(convertToLlama, asscenario = asscenarios,
feature.steps = feature.steps.list, SIMPLIFY = FALSE)
llama.cvs = lapply(asscenarios, convertToLlamaCVFolds)
# FIXME:
unlink("run_llama_models-files", recursive = TRUE)
reg = makeExperimentRegistry("run_llama_models", packages = packs)
batchExport(reg = reg, export = list(fixFeckingPresolve = fixFeckingPresolve,
doNestedCVWithTuning = doNestedCVWithTuning, tuneLlamaModel = tuneLlamaModel))
for (i in seq_along(asscenarios)) {
asscenario = asscenarios[[i]]
desc = asscenario$desc
cutoff = desc$algorithm_cutoff_time
timeout = if (desc$performance_type[[1L]] == "runtime" && !is.na(cutoff)) {
cutoff
} else {
NULL
}
addProblem(reg = reg, name = desc$scenario_id,
data = list(
asscenario = asscenario,
feature.steps = feature.steps.list[[desc$scenario_id]],
timeout = timeout,
llama.scenario = llama.scenarios[[i]],
llama.cv = llama.cvs[[i]],
n.algos = length(getAlgorithmNames(asscenario)),
rs.iters = rs.iters,
n.inner.folds = n.inner.folds,
makeRes = function(data, p, timeout, addCosts) {
if (addCosts) {
data = fixFeckingPresolve(asscenario, data)
}
list(
predictions = p$predictions,
succ = mean(successes(data, p, timeout = timeout, addCosts = addCosts)),
par10 = mean(parscores(data, p, timeout = timeout, addCosts = addCosts)),
mcp = mean(misclassificationPenalties(data, p))
)
}
), fun = NULL
)
}
# add baselines to reg
if (length(baselines) > 0L) {
addAlgorithm(reg = reg, name = "baseline", fun = function(data, job, instance, type, ...) {
llama.fun = get(type, envir = asNamespace("llama"))
p = llama.fun(data = data$llama.scenario)
p = list(predictions = p)
# this is how LLAMA checks what type of argument is given to the evaluation function
attr(p, "hasPredictions") = TRUE
data$makeRes(data$llama.scenario, p, data$timeout, FALSE)
})
des = list()
des$baseline = data.table::data.table(type = baselines)
addExperiments(reg = reg, algo.designs = des)
}
# add real selectors
addLearnerAlgoAndExps = function(lrn, par.set) {
# BE does not like the dots in mlr ids
id = str_replace_all(lrn$id, "\\.", "_")
addAlgorithm(reg = reg, name = id, fun = function(data, job, instance, ...) {
llama.fun = switch(lrn$type,
classif = llama::classify,
regr = llama::regression,
cluster = llama::cluster
)
if (lrn$type == "cluster") {
pre = llama::normalize
} else {
pre = function(x, y = NULL) {
list(features = x)
}
}
p = if (ParamHelpers::isEmpty(par.set))
llama.fun(lrn, data = data$llama.cv, pre = pre)
else
doNestedCVWithTuning(data$asscenario, data$llama.cv, pre, data$timeout,
lrn, par.set, llama.fun, data$rs.iters, data$n.inner.folds)
data$makeRes(data$llama.cv, p, data$timeout, TRUE)
})
# empty design for algorithm
algo.designs = vector(mode = "list", length = 1L)
algo.designs[[1]] = data.table::data.table(type = NULL)
names(algo.designs) = id
addExperiments(reg = reg, algo.designs = algo.designs)
}
if (length(learners) > 0L)
mapply(addLearnerAlgoAndExps, learners, par.sets)
return(reg)
}
doNestedCVWithTuning = function(asscenario, ldf, pre, timeout, learner, par.set, llama.fun,
rs.iters, n.inner.folds) {
n.outer.folds = length(ldf$test)
outer.preds = vector("list", n.outer.folds)
for (i in 1:n.outer.folds) {
ldf2 = ldf
ldf2$data = ldf$data[ldf$train[[i]],]
ldf2$train = NULL
ldf2$test = NULL
ldf3 = cvFolds(ldf2, nfolds = n.inner.folds, stratify = FALSE)
parvals = tuneLlamaModel(asscenario, ldf3, pre, timeout, learner, par.set, llama.fun, rs.iters)
# now fit only on outer trainining set with best params and predict outer test set
learner2 = setHyperPars(learner, par.vals = parvals)
outer.split.ldf = ldf
outer.split.ldf$train = list(ldf$train[[i]])
outer.split.ldf$test = list(ldf$test[[i]])
outer.preds[[i]] = llama.fun(learner2, data = outer.split.ldf, pre = pre)
}
retval = outer.preds[[1]]
retval$predictions = do.call(rbind, lapply(outer.preds, function(x) { x$predictions }))
return(retval)
}
tuneLlamaModel = function(asscenario, cv.splits, pre, timeout, learner, par.set, llama.fun, rs.iters) {
des = ParamHelpers::generateRandomDesign(rs.iters, par.set, trafo = TRUE)
des.list = ParamHelpers::dfRowsToList(des, par.set)
requirePackages(c("parallelMap"), why = "tuneLlamaModel")
parallelStartMulticore()
ys = parallelMap(function(x) {
par10 = try({
learner = setHyperPars(learner, par.vals = x)
p = llama.fun(learner, data = cv.splits, pre = pre)
ldf = fixFeckingPresolve(asscenario, cv.splits)
par10 = mean(parscores(ldf, p, timeout = timeout))
messagef("[Tune]: %s : par10 = %g", ParamHelpers::paramValueToString(par.set, x), par10)
return(par10)
})
if(inherits(par10, "try-error")) {
par10 = NA
}
return(par10)
}, des.list, simplify = TRUE)
parallelStop()
# messagef"[Tune]: Tuning evals failed: %i", sum(is.na(ys))]
best.i = getMinIndex(ys)
best.parvals = des.list[[best.i]]
messagef("[Best]: %s : par10 = %g", ParamHelpers::paramValueToString(par.set, best.parvals), ys[best.i])
return(best.parvals)
}
| /aslib/R/runLlamaModels.R | no_license | coseal/aslib-r | R | false | false | 8,814 | r | #' @title Creates a registry which can be used for running several Llama models on a cluster.
#'
#' @description
#' It is likely that you need to install some additional R packages for this from CRAN or extra
#' Weka learner. The latter can be done via e.g. \code{WPM("install-package", "XMeans")}.
#'
#' Feature costs are added for real prognostic models but not for baseline models.
#'
#' @param asscenarios [(list of) \code{\link{ASScenario}}]\cr
#' Algorithm selection scenarios.
#' @param feature.steps.list [\code{list} of \code{character}]\cr
#' Named list of feature steps we want to use.
#' Must be named with scenario ids.
#' Default is to take the default feature steps from the scenario.
#' @param baselines [\code{character}]\cr
#' Vector of characters, defining the baseline models.
#' Default is c("vbs", "singleBest", "singleBestByPar", "singleBestBySuccesses").
#' @param learners [list of \code{\link[mlr]{Learner}}]\cr
#' mlr learners to use for modeling.
#' Default is none.
#' @param par.sets [list of \code{\link[ParamHelpers]{ParamSet}}]\cr
#' Param sets for learners to tune via random search.
#' Pass an empty param set, if you want no tuning.
#' Must be in of same length as \code{learners} and in the same order.
#' Default is none.
#' @param rs.iters [\code{integer(1)}]\cr
#' Number of iterations for random search hyperparameter tuning.
#' Default is 100.
#' @param n.inner.folds [\code{integer(1)}]\cr
#' Number of cross-validation folds for inner CV in hyperparameter tuning.
#' Default is 2L.
#' @return batchtools registry.
#' @export
runLlamaModels = function(asscenarios, feature.steps.list = NULL, baselines = NULL,
learners = list(), par.sets = list(), rs.iters = 100L, n.inner.folds = 2L) {
asscenarios = ensureVector(asscenarios, 1L, cl = "ASScenario")
assertList(asscenarios, types = "ASScenario")
scenario.ids = extractSubList(asscenarios, c("desc", "scenario_id"), use.names = FALSE)
names(asscenarios) = scenario.ids
if (is.null(feature.steps.list)) {
feature.steps.list = extractSubList(asscenarios, c("desc", "default_steps"),
simplify = FALSE, use.names = TRUE)
} else {
feature.steps.list = ensureVector(feature.steps.list, 1L, cl = "character")
assertList(feature.steps.list, types = "character", names = "unique")
assertSetEqual(names(feature.steps.list), scenario.ids, ordered = FALSE)
}
# sort in correct order
feature.steps.list = feature.steps.list[scenario.ids]
# models and defaults
baselines.all = c("vbs", "singleBest", "singleBestByPar", "singleBestBySuccesses")
# check model args
if (is.null(baselines)) {
baselines = baselines.all
} else {
assertCharacter(baselines, any.missing = FALSE)
assertSubset(baselines, baselines.all)
}
assertList(learners, types = "Learner")
learner.ids = extractSubList(learners, "id")
assertList(par.sets, types = "ParamSet", len = length(learners))
rs.iters = asInt(rs.iters, lower = 1L)
n.inner.folds = asInt(n.inner.folds, lower = 2L)
packs = c("RWeka", "llama", "methods", "ParamHelpers", "mlr", "batchtools")
requirePackages(packs, why = "runLlamaModels")
llama.scenarios = mapply(convertToLlama, asscenario = asscenarios,
feature.steps = feature.steps.list, SIMPLIFY = FALSE)
llama.cvs = lapply(asscenarios, convertToLlamaCVFolds)
# FIXME:
unlink("run_llama_models-files", recursive = TRUE)
reg = makeExperimentRegistry("run_llama_models", packages = packs)
batchExport(reg = reg, export = list(fixFeckingPresolve = fixFeckingPresolve,
doNestedCVWithTuning = doNestedCVWithTuning, tuneLlamaModel = tuneLlamaModel))
for (i in seq_along(asscenarios)) {
asscenario = asscenarios[[i]]
desc = asscenario$desc
cutoff = desc$algorithm_cutoff_time
timeout = if (desc$performance_type[[1L]] == "runtime" && !is.na(cutoff)) {
cutoff
} else {
NULL
}
addProblem(reg = reg, name = desc$scenario_id,
data = list(
asscenario = asscenario,
feature.steps = feature.steps.list[[desc$scenario_id]],
timeout = timeout,
llama.scenario = llama.scenarios[[i]],
llama.cv = llama.cvs[[i]],
n.algos = length(getAlgorithmNames(asscenario)),
rs.iters = rs.iters,
n.inner.folds = n.inner.folds,
makeRes = function(data, p, timeout, addCosts) {
if (addCosts) {
data = fixFeckingPresolve(asscenario, data)
}
list(
predictions = p$predictions,
succ = mean(successes(data, p, timeout = timeout, addCosts = addCosts)),
par10 = mean(parscores(data, p, timeout = timeout, addCosts = addCosts)),
mcp = mean(misclassificationPenalties(data, p))
)
}
), fun = NULL
)
}
# add baselines to reg
if (length(baselines) > 0L) {
addAlgorithm(reg = reg, name = "baseline", fun = function(data, job, instance, type, ...) {
llama.fun = get(type, envir = asNamespace("llama"))
p = llama.fun(data = data$llama.scenario)
p = list(predictions = p)
# this is how LLAMA checks what type of argument is given to the evaluation function
attr(p, "hasPredictions") = TRUE
data$makeRes(data$llama.scenario, p, data$timeout, FALSE)
})
des = list()
des$baseline = data.table::data.table(type = baselines)
addExperiments(reg = reg, algo.designs = des)
}
# add real selectors
addLearnerAlgoAndExps = function(lrn, par.set) {
# BE does not like the dots in mlr ids
id = str_replace_all(lrn$id, "\\.", "_")
addAlgorithm(reg = reg, name = id, fun = function(data, job, instance, ...) {
llama.fun = switch(lrn$type,
classif = llama::classify,
regr = llama::regression,
cluster = llama::cluster
)
if (lrn$type == "cluster") {
pre = llama::normalize
} else {
pre = function(x, y = NULL) {
list(features = x)
}
}
p = if (ParamHelpers::isEmpty(par.set))
llama.fun(lrn, data = data$llama.cv, pre = pre)
else
doNestedCVWithTuning(data$asscenario, data$llama.cv, pre, data$timeout,
lrn, par.set, llama.fun, data$rs.iters, data$n.inner.folds)
data$makeRes(data$llama.cv, p, data$timeout, TRUE)
})
# empty design for algorithm
algo.designs = vector(mode = "list", length = 1L)
algo.designs[[1]] = data.table::data.table(type = NULL)
names(algo.designs) = id
addExperiments(reg = reg, algo.designs = algo.designs)
}
if (length(learners) > 0L)
mapply(addLearnerAlgoAndExps, learners, par.sets)
return(reg)
}
doNestedCVWithTuning = function(asscenario, ldf, pre, timeout, learner, par.set, llama.fun,
rs.iters, n.inner.folds) {
n.outer.folds = length(ldf$test)
outer.preds = vector("list", n.outer.folds)
for (i in 1:n.outer.folds) {
ldf2 = ldf
ldf2$data = ldf$data[ldf$train[[i]],]
ldf2$train = NULL
ldf2$test = NULL
ldf3 = cvFolds(ldf2, nfolds = n.inner.folds, stratify = FALSE)
parvals = tuneLlamaModel(asscenario, ldf3, pre, timeout, learner, par.set, llama.fun, rs.iters)
# now fit only on outer trainining set with best params and predict outer test set
learner2 = setHyperPars(learner, par.vals = parvals)
outer.split.ldf = ldf
outer.split.ldf$train = list(ldf$train[[i]])
outer.split.ldf$test = list(ldf$test[[i]])
outer.preds[[i]] = llama.fun(learner2, data = outer.split.ldf, pre = pre)
}
retval = outer.preds[[1]]
retval$predictions = do.call(rbind, lapply(outer.preds, function(x) { x$predictions }))
return(retval)
}
tuneLlamaModel = function(asscenario, cv.splits, pre, timeout, learner, par.set, llama.fun, rs.iters) {
des = ParamHelpers::generateRandomDesign(rs.iters, par.set, trafo = TRUE)
des.list = ParamHelpers::dfRowsToList(des, par.set)
requirePackages(c("parallelMap"), why = "tuneLlamaModel")
parallelStartMulticore()
ys = parallelMap(function(x) {
par10 = try({
learner = setHyperPars(learner, par.vals = x)
p = llama.fun(learner, data = cv.splits, pre = pre)
ldf = fixFeckingPresolve(asscenario, cv.splits)
par10 = mean(parscores(ldf, p, timeout = timeout))
messagef("[Tune]: %s : par10 = %g", ParamHelpers::paramValueToString(par.set, x), par10)
return(par10)
})
if(inherits(par10, "try-error")) {
par10 = NA
}
return(par10)
}, des.list, simplify = TRUE)
parallelStop()
# messagef"[Tune]: Tuning evals failed: %i", sum(is.na(ys))]
best.i = getMinIndex(ys)
best.parvals = des.list[[best.i]]
messagef("[Best]: %s : par10 = %g", ParamHelpers::paramValueToString(par.set, best.parvals), ys[best.i])
return(best.parvals)
}
|
.onAttach <- function(...){
## Retrieve Year Information
date <- date()
x <- regexpr("[0-9]{4}", date)
this.year <- substr(date, x[1], x[1] + attr(x, "match.length") - 1)
# Retrieve Current Version
this.version = packageVersion("NetworkDistance")
## Print on Screen
packageStartupMessage("**--------------------------------------------------------**")
packageStartupMessage("** NetworkDistance - Distance Measures for Networks")
packageStartupMessage("**")
packageStartupMessage("** Version : ",this.version," (",this.year,")",sep="")
packageStartupMessage("** Maintainer : Kisung You (kisungyou@outlook.com)")
packageStartupMessage("**")
packageStartupMessage("** Please share any bugs or suggestions to the maintainer.")
packageStartupMessage("**--------------------------------------------------------**")
}
.onUnload <- function(libpath) {
library.dynam.unload("NetworkDistance", libpath)
}
| /R/zzz.R | no_license | cran/NetworkDistance | R | false | false | 940 | r | .onAttach <- function(...){
## Retrieve Year Information
date <- date()
x <- regexpr("[0-9]{4}", date)
this.year <- substr(date, x[1], x[1] + attr(x, "match.length") - 1)
# Retrieve Current Version
this.version = packageVersion("NetworkDistance")
## Print on Screen
packageStartupMessage("**--------------------------------------------------------**")
packageStartupMessage("** NetworkDistance - Distance Measures for Networks")
packageStartupMessage("**")
packageStartupMessage("** Version : ",this.version," (",this.year,")",sep="")
packageStartupMessage("** Maintainer : Kisung You (kisungyou@outlook.com)")
packageStartupMessage("**")
packageStartupMessage("** Please share any bugs or suggestions to the maintainer.")
packageStartupMessage("**--------------------------------------------------------**")
}
.onUnload <- function(libpath) {
library.dynam.unload("NetworkDistance", libpath)
}
|
#'@title Metabolism calculated from the maximum likelihood estimates of the parameters in a standard linear regression model
#'@description Process-error-only model with parameters fitted via maximum likelihood estimation (MLE). This function runs the maximum likelihood metabolism model on the supplied gas concentration and other supporting data.
#'@param do.obs Vector of dissolved oxygen concentration observations, \eqn{mg O[2] L^{-1}}{mg O2 / L}
#'@param do.sat Vector of dissolved oxygen saturation values based on water temperature. Calculate using \link{o2.at.sat}
#'@param k.gas Vector of kGAS values calculated from any of the gas flux models
#'(e.g., \link{k.cole}) and converted to kGAS using \link{k600.2.kGAS}
#'@param z.mix Vector of mixed-layer depths in meters. To calculate, see \link{ts.meta.depths}
#'@param irr Vector of photosynthetically active radiation in \eqn{\mu mol\ m^{-2} s^{-1}}{micro mols / m^2 / s}
#'@param wtr Vector of water temperatures in \eqn{^{\circ}C}{degrees C}. Used in scaling respiration with temperature
#'@param error.type Option specifying if model should assume pure Process Error 'PE' or Observation Error 'OE'. Defaults to observation error 'OE'.
#'@param ... additional arguments; currently "datetime" is the only recognized argument passed through \code{...}
#'@return
#'A data.frame with columns corresponding to components of metabolism
#'\describe{
#'\item{GPP}{numeric estimate of Gross Primary Production, \eqn{mg O_2 L^{-1} d^{-1}}{mg O2 / L / d}}
#'\item{R}{numeric estimate of Respiration, \eqn{mg O_2 L^{-1} d^{-1}}{mg O2 / L / d}}
#'\item{NEP}{numeric estimate of Net Ecosystem production, \eqn{mg O_2 L^{-1} d^{-1}}{mg O2 / L / d}}
#'}
#' The maximum likelihood estimates of model parameters can be accessed via \code{attributes(metab.mle(...))[["params"]]}
#'
#'@details
#'The model has the three parameters, \eqn{c_1, c_2, \epsilon}{c1, c2, epsilon}, and has the form
#'
#'\deqn{v=k.gas/z.mix}{v=k.gas/z.mix}
#'
#'\deqn{a_t = c_1*irr_{t-1} + c_2*log_e(wtr_{t-1}) + v_{t-1}*do.sat_{t-1}}{a[t] = c1*irr[t-1] + c2*log(wtr[t-1]) + v[t-1]*do.sat[t-1]}
#'
#'\deqn{\beta = e^{-v}}{beta = exp(-v)}
#'
#'\deqn{do.obs_t = a_t/v_{t-1} + -e^{-v_{t-1}}*a_t/v_{t-1} + \beta_{t-1}*\do.obs_{t-1} + \epsilon_t}{do.obs[t] = a[t]/v[t-1] + -exp(-v[t-1])*a[t]/v[t-1] + beta[t-1]*do.obs[t-1] + epsilon[t]}
#'
#'
#' The above model is used during model fitting, but if gas flux is not integrated between time steps, those equations simplify to the following:
#'
#' \deqn{F_{t-1} = k.gas_{t-1}*(do.sat_{t-1} - do.obs_{t-1})/z.mix_{t-1}}{F[t-1] = k.gas[t-1]*(do.sat[t-1] - do.obs[t-1])/z.mix[t-1]}
#'
#'\deqn{do.obs_t=do.obs_{t-1}+c_1*irr_{t-1}+c_2*log_e(wtr_{t-1}) + F_{t-1} + \epsilon_t}{do.obs[t] = do.obs[t-1] + c1*irr[t-1] + c2*log(wtr[t-1]) + F[t-1] + epsilon[t]}
#'
#'
#'The parameters are fit using maximum likelihood, and the optimization (minimization of the negative log likelihood function) is performed by \code{optim} using default settings.
#'
#'GPP is then calculated as \code{mean(c1*irr, na.rm=TRUE)*freq}, where \code{freq} is the number of observations per day, as estimated from the typical size between time steps. Thus, generally \code{freq==length(do.obs)}.
#'
#'Similarly, R is calculated as \code{mean(c2*log(wtr), na.rm=TRUE)*freq}.
#'
#'NEP is the sum of GPP and R.
#'
#'@note Currently, missing values in any arguments will result in an error, so freq must always equal nobs.
#'@author Luke A Winslow, Ryan Batt, GLEON Fellows
#'@references
#'Hanson, PC, SR Carpenter, N Kimura, C Wu, SP Cornelius, TK Kratz. 2008
#'\emph{Evaluation of metabolism models for free-water dissolved oxygen in lakes}.
#'Limnology and Oceanography: Methods 6: 454:465.
#'
#'Solomon CT, DA Bruesewitz, DC Richardson, KC Rose, MC Van de Bogert, PC Hanson, TK Kratz, B Larget,
#'R Adrian, B Leroux Babin, CY Chiu, DP Hamilton, EE Gaiser, S Hendricks, V Istvanovics, A Laas, DM O'Donnell,
#'ML Pace, E Ryder, PA Staehr, T Torgersen, MJ Vanni, KC Weathers, G Zhuw. 2013.
#'\emph{Ecosystem Respiration: Drivers of Daily Variability and Background Respiration in Lakes around the Globe}.
#'Limnology and Oceanography 58 (3): 849:866. doi:10.4319/lo.2013.58.3.0849.
#'
#'@importFrom stats dnorm optim
#'
#'@seealso
#'\link{metab}, \link{metab.bookkeep}, \link{metab.ols}, \link{metab.kalman}, \link{metab.bayesian}
#'@examples
#'library(rLakeAnalyzer)
#'doobs = load.ts(system.file('extdata',
#' 'sparkling.doobs', package="LakeMetabolizer"))
#'wtr = load.ts(system.file('extdata',
#' 'sparkling.wtr', package="LakeMetabolizer"))
#'wnd = load.ts(system.file('extdata',
#' 'sparkling.wnd', package="LakeMetabolizer"))
#'irr = load.ts(system.file('extdata',
#' 'sparkling.par', package="LakeMetabolizer"))
#'
#'#Subset a day
#'mod.date = as.POSIXct('2009-07-08', 'GMT')
#'doobs = doobs[trunc(doobs$datetime, 'day') == mod.date, ]
#'wtr = wtr[trunc(wtr$datetime, 'day') == mod.date, ]
#'wnd = wnd[trunc(wnd$datetime, 'day') == mod.date, ]
#'irr = irr[trunc(irr$datetime, 'day') == mod.date, ]
#'z.mix = ts.thermo.depth(wtr)
#'
#'k600 = k.cole.base(wnd[,2])
#'k.gas = k600.2.kGAS.base(k600, wtr[,3], 'O2')
#'do.sat = o2.at.sat.base(wtr[,3], altitude=300)
#'
#'metab.mle(doobs[,2], do.sat, k.gas, z.mix[,2], irr[,2], wtr[,3])
#'@export
metab.mle <- function(do.obs, do.sat, k.gas, z.mix, irr, wtr, error.type="OE", ...){
complete.inputs(do.obs=do.obs, do.sat=do.sat, k.gas=k.gas,
z.mix=z.mix, irr=irr, wtr=wtr, error=TRUE)
match.arg(error.type, choices=c('OE', 'PE'))
nobs <- length(do.obs)
mm.args <- list(...)
if(any(z.mix <= 0)){
stop("z.mix must be greater than zero.")
}
if(any(wtr <= 0)){
stop("all wtr must be positive.")
}
if("datetime"%in%names(mm.args)){ # check to see if datetime is in the ... args
datetime <- mm.args$datetime # extract datetime
freq <- calc.freq(datetime) # calculate sampling frequency from datetime
if(nobs!=freq){ # nobs and freq should agree, if they don't issue a warning
bad.date <- format.Date(datetime[1], format="%Y-%m-%d")
warning("number of observations on ", bad.date, " (", nobs, ") ", "does not equal estimated sampling frequency", " (", freq, ")", sep="")
}
}else{ # if datetime is *not* in the ... args
warning("datetime not found, inferring sampling frequency from # of observations") # issue a warning (note checks in addNAs)
# NOTE: because of the checks in addNA's, it is unlikely a user would receive this warning via metab()
# warning will only be seen through direct use of metab.bookkeep when datettime is not supplied
freq <- nobs
}
chk.list <- list(do.obs, irr, do.sat, z.mix, k.gas, wtr)
if(!all(sapply(chk.list, is.numeric)) || !all(sapply(chk.list, is.vector))){
stop('All metab.mle inputs must be numeric vectors.')
}
if(!all(nobs==sapply(chk.list, length))){
stop('All input data to metab.mle must be the same length')
}
Q0 <- ((diff(range(do.obs,na.rm=TRUE)) - mean(do.obs,na.rm=TRUE))^2 / length(do.obs))
guesses <- c(1E-4, 1E-4, log(Q0))
#We have a different number of fitted parameters depending on error type of the model
if(error.type=='OE'){
guesses <- c(guesses, do.obs[1])
fit <- optim(guesses, fn=mleNllOE, do.obs=do.obs, do.sat=do.sat, k.gas=(k.gas/freq), z.mix=z.mix, irr=irr, wtr=wtr)
pars0 <- fit$par
pars <- c("gppCoeff"=pars0[1], "rCoeff"=pars0[2], "Q"=exp(pars0[3]), "nll"=fit$value, "doInit"=pars0[4])
}else if(error.type=='PE'){
guesses <- c(guesses)
fit <- optim(guesses, fn=mleNllPE, do.obs=do.obs, do.sat=do.sat, k.gas=(k.gas/freq), z.mix=z.mix, irr=irr, wtr=wtr)
pars0 <- fit$par
pars <- c("gppCoeff"=pars0[1], "rCoeff"=pars0[2], "Q"=exp(pars0[3]), "nll"=fit$value)
}else{
stop("error.type must be either 'OE' or 'PE', Observation Error or Process Error respectively.")
}
# ====================================
# = Use fits to calculate metabolism =
# ====================================
GPP <- mean(pars[1]*irr, na.rm=TRUE) * freq
R <- mean(pars[2]*log(wtr), na.rm=TRUE) * freq
return(list("params"=pars, "metab"=c("GPP"=GPP,"R"=R,"NEP"=GPP+R)))
}
# ============================================
# = The R loop for Observation Error mle NLL =
# ============================================
mleLoopOE <- function(alpha, doobs, c1, c2, beta, irr, wtr, kz, dosat){
nobs <- length(doobs)
a.loop <- .C("mleLoopCoe", alpha=as.double(alpha), as.double(doobs), as.double(c1), as.double(c2), as.double(beta), as.double(irr), as.double(wtr), as.double(kz), as.double(dosat), as.integer(nobs), PACKAGE="LakeMetabolizer")
return(a.loop[["alpha"]])
}
# ============================================
# = The R loop for Process Error mle NLL =
# ============================================
mleLoopPE <- function(alpha, doobs, c1, c2, beta, irr, wtr, kz, dosat){
nobs <- length(doobs)
a.loop <- .C("mleLoopCpe", alpha=as.double(alpha), as.double(doobs), as.double(c1), as.double(c2), as.double(beta), as.double(irr), as.double(wtr), as.double(kz), as.double(dosat), as.integer(nobs), PACKAGE="LakeMetabolizer")
return(a.loop[["alpha"]])
}
# ====================
# = mle NLL function =
# ====================
mleNllPE <- function(Params, do.obs, do.sat, k.gas, z.mix, irr, wtr){
c1 <- Params[1] #PAR coeff
c2 <- Params[2] #log(Temp) coeff
Q <- exp(Params[3]) # Variance of the process error
# See KalmanDO_smooth.R comments for explanation of beta
kz <- k.gas/z.mix # K and Zmix are both vector of length nobs
beta <- exp(-kz) # This beta is for using the differential equation form
# Set first true value equal to first observation
alpha <- rep(0, length(do.obs))
alpha[1] <- do.obs[1]#Let's give this model some starting values
#R version of C loop
#for(i in 2:length(do.obs)){
# a1 <- c1*irr[i-1] + c2*log(wtr[i-1]) + kz[i-1]*do.sat[i-1]
# alpha[i] <- a1/kz[i-1] + -exp(-kz[i-1])*a1/kz[i-1] + beta[i-1]*alpha[i-1] # NOTE: beta==exp(-kz); kz=K/Zmix
#}
alpha <- mleLoopPE(alpha=alpha, doobs=do.obs, c1=c1, c2=c2, beta=beta, irr=irr, wtr=wtr, kz=kz, dosat=do.sat)
return(-sum(dnorm(do.obs, alpha, sd=sqrt(Q), log=TRUE), na.rm=TRUE))
}#End function
# ====================
# = mle NLL function =
# ====================
mleNllOE <- function(Params, do.obs, do.sat, k.gas, z.mix, irr, wtr, error.type){
c1 <- Params[1] #PAR coeff
c2 <- Params[2] #log(Temp) coeff
Q <- exp(Params[3]) # Variance of the process error
# See KalmanDO_smooth.R comments for explanation of beta
kz <- k.gas/z.mix # K and Zmix are both vector of length nobs
beta <- exp(-kz) # This beta is for using the differential equation form
# Set first true value equal to first observation
alpha <- rep(0, length(do.obs))
alpha[1] <- Params[4] #Free varying initial DO value
alpha <- mleLoopOE(alpha=alpha, doobs=do.obs, c1=c1, c2=c2, beta=beta, irr=irr, wtr=wtr, kz=kz, dosat=do.sat)
return(-sum(dnorm(do.obs, alpha, sd=sqrt(Q), log=TRUE), na.rm=TRUE))
}#End function
| /LakeMetabolizer/R/metab.mle.R | no_license | ingted/R-Examples | R | false | false | 11,288 | r | #'@title Metabolism calculated from the maximum likelihood estimates of the parameters in a standard linear regression model
#'@description Process-error-only model with parameters fitted via maximum likelihood estimation (MLE). This function runs the maximum likelihood metabolism model on the supplied gas concentration and other supporting data.
#'@param do.obs Vector of dissolved oxygen concentration observations, \eqn{mg O[2] L^{-1}}{mg O2 / L}
#'@param do.sat Vector of dissolved oxygen saturation values based on water temperature. Calculate using \link{o2.at.sat}
#'@param k.gas Vector of kGAS values calculated from any of the gas flux models
#'(e.g., \link{k.cole}) and converted to kGAS using \link{k600.2.kGAS}
#'@param z.mix Vector of mixed-layer depths in meters. To calculate, see \link{ts.meta.depths}
#'@param irr Vector of photosynthetically active radiation in \eqn{\mu mol\ m^{-2} s^{-1}}{micro mols / m^2 / s}
#'@param wtr Vector of water temperatures in \eqn{^{\circ}C}{degrees C}. Used in scaling respiration with temperature
#'@param error.type Option specifying if model should assume pure Process Error 'PE' or Observation Error 'OE'. Defaults to observation error 'OE'.
#'@param ... additional arguments; currently "datetime" is the only recognized argument passed through \code{...}
#'@return
#'A data.frame with columns corresponding to components of metabolism
#'\describe{
#'\item{GPP}{numeric estimate of Gross Primary Production, \eqn{mg O_2 L^{-1} d^{-1}}{mg O2 / L / d}}
#'\item{R}{numeric estimate of Respiration, \eqn{mg O_2 L^{-1} d^{-1}}{mg O2 / L / d}}
#'\item{NEP}{numeric estimate of Net Ecosystem production, \eqn{mg O_2 L^{-1} d^{-1}}{mg O2 / L / d}}
#'}
#' The maximum likelihood estimates of model parameters can be accessed via \code{attributes(metab.mle(...))[["params"]]}
#'
#'@details
#'The model has the three parameters, \eqn{c_1, c_2, \epsilon}{c1, c2, epsilon}, and has the form
#'
#'\deqn{v=k.gas/z.mix}{v=k.gas/z.mix}
#'
#'\deqn{a_t = c_1*irr_{t-1} + c_2*log_e(wtr_{t-1}) + v_{t-1}*do.sat_{t-1}}{a[t] = c1*irr[t-1] + c2*log(wtr[t-1]) + v[t-1]*do.sat[t-1]}
#'
#'\deqn{\beta = e^{-v}}{beta = exp(-v)}
#'
#'\deqn{do.obs_t = a_t/v_{t-1} + -e^{-v_{t-1}}*a_t/v_{t-1} + \beta_{t-1}*\do.obs_{t-1} + \epsilon_t}{do.obs[t] = a[t]/v[t-1] + -exp(-v[t-1])*a[t]/v[t-1] + beta[t-1]*do.obs[t-1] + epsilon[t]}
#'
#'
#' The above model is used during model fitting, but if gas flux is not integrated between time steps, those equations simplify to the following:
#'
#' \deqn{F_{t-1} = k.gas_{t-1}*(do.sat_{t-1} - do.obs_{t-1})/z.mix_{t-1}}{F[t-1] = k.gas[t-1]*(do.sat[t-1] - do.obs[t-1])/z.mix[t-1]}
#'
#'\deqn{do.obs_t=do.obs_{t-1}+c_1*irr_{t-1}+c_2*log_e(wtr_{t-1}) + F_{t-1} + \epsilon_t}{do.obs[t] = do.obs[t-1] + c1*irr[t-1] + c2*log(wtr[t-1]) + F[t-1] + epsilon[t]}
#'
#'
#'The parameters are fit using maximum likelihood, and the optimization (minimization of the negative log likelihood function) is performed by \code{optim} using default settings.
#'
#'GPP is then calculated as \code{mean(c1*irr, na.rm=TRUE)*freq}, where \code{freq} is the number of observations per day, as estimated from the typical size between time steps. Thus, generally \code{freq==length(do.obs)}.
#'
#'Similarly, R is calculated as \code{mean(c2*log(wtr), na.rm=TRUE)*freq}.
#'
#'NEP is the sum of GPP and R.
#'
#'@note Currently, missing values in any arguments will result in an error, so freq must always equal nobs.
#'@author Luke A Winslow, Ryan Batt, GLEON Fellows
#'@references
#'Hanson, PC, SR Carpenter, N Kimura, C Wu, SP Cornelius, TK Kratz. 2008
#'\emph{Evaluation of metabolism models for free-water dissolved oxygen in lakes}.
#'Limnology and Oceanography: Methods 6: 454:465.
#'
#'Solomon CT, DA Bruesewitz, DC Richardson, KC Rose, MC Van de Bogert, PC Hanson, TK Kratz, B Larget,
#'R Adrian, B Leroux Babin, CY Chiu, DP Hamilton, EE Gaiser, S Hendricks, V Istvanovics, A Laas, DM O'Donnell,
#'ML Pace, E Ryder, PA Staehr, T Torgersen, MJ Vanni, KC Weathers, G Zhuw. 2013.
#'\emph{Ecosystem Respiration: Drivers of Daily Variability and Background Respiration in Lakes around the Globe}.
#'Limnology and Oceanography 58 (3): 849:866. doi:10.4319/lo.2013.58.3.0849.
#'
#'@importFrom stats dnorm optim
#'
#'@seealso
#'\link{metab}, \link{metab.bookkeep}, \link{metab.ols}, \link{metab.kalman}, \link{metab.bayesian}
#'@examples
#'library(rLakeAnalyzer)
#'doobs = load.ts(system.file('extdata',
#' 'sparkling.doobs', package="LakeMetabolizer"))
#'wtr = load.ts(system.file('extdata',
#' 'sparkling.wtr', package="LakeMetabolizer"))
#'wnd = load.ts(system.file('extdata',
#' 'sparkling.wnd', package="LakeMetabolizer"))
#'irr = load.ts(system.file('extdata',
#' 'sparkling.par', package="LakeMetabolizer"))
#'
#'#Subset a day
#'mod.date = as.POSIXct('2009-07-08', 'GMT')
#'doobs = doobs[trunc(doobs$datetime, 'day') == mod.date, ]
#'wtr = wtr[trunc(wtr$datetime, 'day') == mod.date, ]
#'wnd = wnd[trunc(wnd$datetime, 'day') == mod.date, ]
#'irr = irr[trunc(irr$datetime, 'day') == mod.date, ]
#'z.mix = ts.thermo.depth(wtr)
#'
#'k600 = k.cole.base(wnd[,2])
#'k.gas = k600.2.kGAS.base(k600, wtr[,3], 'O2')
#'do.sat = o2.at.sat.base(wtr[,3], altitude=300)
#'
#'metab.mle(doobs[,2], do.sat, k.gas, z.mix[,2], irr[,2], wtr[,3])
#'@export
metab.mle <- function(do.obs, do.sat, k.gas, z.mix, irr, wtr, error.type="OE", ...){
complete.inputs(do.obs=do.obs, do.sat=do.sat, k.gas=k.gas,
z.mix=z.mix, irr=irr, wtr=wtr, error=TRUE)
match.arg(error.type, choices=c('OE', 'PE'))
nobs <- length(do.obs)
mm.args <- list(...)
if(any(z.mix <= 0)){
stop("z.mix must be greater than zero.")
}
if(any(wtr <= 0)){
stop("all wtr must be positive.")
}
if("datetime"%in%names(mm.args)){ # check to see if datetime is in the ... args
datetime <- mm.args$datetime # extract datetime
freq <- calc.freq(datetime) # calculate sampling frequency from datetime
if(nobs!=freq){ # nobs and freq should agree, if they don't issue a warning
bad.date <- format.Date(datetime[1], format="%Y-%m-%d")
warning("number of observations on ", bad.date, " (", nobs, ") ", "does not equal estimated sampling frequency", " (", freq, ")", sep="")
}
}else{ # if datetime is *not* in the ... args
warning("datetime not found, inferring sampling frequency from # of observations") # issue a warning (note checks in addNAs)
# NOTE: because of the checks in addNA's, it is unlikely a user would receive this warning via metab()
# warning will only be seen through direct use of metab.bookkeep when datettime is not supplied
freq <- nobs
}
chk.list <- list(do.obs, irr, do.sat, z.mix, k.gas, wtr)
if(!all(sapply(chk.list, is.numeric)) || !all(sapply(chk.list, is.vector))){
stop('All metab.mle inputs must be numeric vectors.')
}
if(!all(nobs==sapply(chk.list, length))){
stop('All input data to metab.mle must be the same length')
}
Q0 <- ((diff(range(do.obs,na.rm=TRUE)) - mean(do.obs,na.rm=TRUE))^2 / length(do.obs))
guesses <- c(1E-4, 1E-4, log(Q0))
#We have a different number of fitted parameters depending on error type of the model
if(error.type=='OE'){
guesses <- c(guesses, do.obs[1])
fit <- optim(guesses, fn=mleNllOE, do.obs=do.obs, do.sat=do.sat, k.gas=(k.gas/freq), z.mix=z.mix, irr=irr, wtr=wtr)
pars0 <- fit$par
pars <- c("gppCoeff"=pars0[1], "rCoeff"=pars0[2], "Q"=exp(pars0[3]), "nll"=fit$value, "doInit"=pars0[4])
}else if(error.type=='PE'){
guesses <- c(guesses)
fit <- optim(guesses, fn=mleNllPE, do.obs=do.obs, do.sat=do.sat, k.gas=(k.gas/freq), z.mix=z.mix, irr=irr, wtr=wtr)
pars0 <- fit$par
pars <- c("gppCoeff"=pars0[1], "rCoeff"=pars0[2], "Q"=exp(pars0[3]), "nll"=fit$value)
}else{
stop("error.type must be either 'OE' or 'PE', Observation Error or Process Error respectively.")
}
# ====================================
# = Use fits to calculate metabolism =
# ====================================
GPP <- mean(pars[1]*irr, na.rm=TRUE) * freq
R <- mean(pars[2]*log(wtr), na.rm=TRUE) * freq
return(list("params"=pars, "metab"=c("GPP"=GPP,"R"=R,"NEP"=GPP+R)))
}
# ============================================
# = The R loop for Observation Error mle NLL =
# ============================================
mleLoopOE <- function(alpha, doobs, c1, c2, beta, irr, wtr, kz, dosat){
nobs <- length(doobs)
a.loop <- .C("mleLoopCoe", alpha=as.double(alpha), as.double(doobs), as.double(c1), as.double(c2), as.double(beta), as.double(irr), as.double(wtr), as.double(kz), as.double(dosat), as.integer(nobs), PACKAGE="LakeMetabolizer")
return(a.loop[["alpha"]])
}
# ============================================
# = The R loop for Process Error mle NLL =
# ============================================
mleLoopPE <- function(alpha, doobs, c1, c2, beta, irr, wtr, kz, dosat){
nobs <- length(doobs)
a.loop <- .C("mleLoopCpe", alpha=as.double(alpha), as.double(doobs), as.double(c1), as.double(c2), as.double(beta), as.double(irr), as.double(wtr), as.double(kz), as.double(dosat), as.integer(nobs), PACKAGE="LakeMetabolizer")
return(a.loop[["alpha"]])
}
# ====================
# = mle NLL function =
# ====================
mleNllPE <- function(Params, do.obs, do.sat, k.gas, z.mix, irr, wtr){
c1 <- Params[1] #PAR coeff
c2 <- Params[2] #log(Temp) coeff
Q <- exp(Params[3]) # Variance of the process error
# See KalmanDO_smooth.R comments for explanation of beta
kz <- k.gas/z.mix # K and Zmix are both vector of length nobs
beta <- exp(-kz) # This beta is for using the differential equation form
# Set first true value equal to first observation
alpha <- rep(0, length(do.obs))
alpha[1] <- do.obs[1]#Let's give this model some starting values
#R version of C loop
#for(i in 2:length(do.obs)){
# a1 <- c1*irr[i-1] + c2*log(wtr[i-1]) + kz[i-1]*do.sat[i-1]
# alpha[i] <- a1/kz[i-1] + -exp(-kz[i-1])*a1/kz[i-1] + beta[i-1]*alpha[i-1] # NOTE: beta==exp(-kz); kz=K/Zmix
#}
alpha <- mleLoopPE(alpha=alpha, doobs=do.obs, c1=c1, c2=c2, beta=beta, irr=irr, wtr=wtr, kz=kz, dosat=do.sat)
return(-sum(dnorm(do.obs, alpha, sd=sqrt(Q), log=TRUE), na.rm=TRUE))
}#End function
# ====================
# = mle NLL function =
# ====================
mleNllOE <- function(Params, do.obs, do.sat, k.gas, z.mix, irr, wtr, error.type){
c1 <- Params[1] #PAR coeff
c2 <- Params[2] #log(Temp) coeff
Q <- exp(Params[3]) # Variance of the process error
# See KalmanDO_smooth.R comments for explanation of beta
kz <- k.gas/z.mix # K and Zmix are both vector of length nobs
beta <- exp(-kz) # This beta is for using the differential equation form
# Set first true value equal to first observation
alpha <- rep(0, length(do.obs))
alpha[1] <- Params[4] #Free varying initial DO value
alpha <- mleLoopOE(alpha=alpha, doobs=do.obs, c1=c1, c2=c2, beta=beta, irr=irr, wtr=wtr, kz=kz, dosat=do.sat)
return(-sum(dnorm(do.obs, alpha, sd=sqrt(Q), log=TRUE), na.rm=TRUE))
}#End function
|
ea9e268689cc24c61b5f240dfba899e6 incrementer-enc06-uniform-depth-0.qdimacs 954 2405 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/incrementer-encoder/incrementer-enc06-uniform-depth-0/incrementer-enc06-uniform-depth-0.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 83 | r | ea9e268689cc24c61b5f240dfba899e6 incrementer-enc06-uniform-depth-0.qdimacs 954 2405 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dem2stl.R
\name{latlong2ellipsoid}
\alias{latlong2ellipsoid}
\title{Map latitude/longitude coordinates onto an ellipsoid. Vectorised.}
\usage{
latlong2ellipsoid(lat, long, a = 1, b = 1, c = 1)
}
\arguments{
\item{lat}{latitude coordinates (scalar or vector, in radians)}
\item{long}{longitude coordinates (same length as \code{lat}, in radians)}
\item{a}{semi-principal axis length}
\item{b}{semi-principal axis length}
\item{c}{semi-principal axis length}
}
\value{
a \code{length(lat)x3} matrix giving the lat/long coordinates points
on an ellipsoid with the given axes lengths in 3d space
}
\description{
Map latitude/longitude coordinates onto an ellipsoid. Vectorised.
}
| /man/latlong2ellipsoid.Rd | permissive | kevinstadler/dem2stl | R | false | true | 760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dem2stl.R
\name{latlong2ellipsoid}
\alias{latlong2ellipsoid}
\title{Map latitude/longitude coordinates onto an ellipsoid. Vectorised.}
\usage{
latlong2ellipsoid(lat, long, a = 1, b = 1, c = 1)
}
\arguments{
\item{lat}{latitude coordinates (scalar or vector, in radians)}
\item{long}{longitude coordinates (same length as \code{lat}, in radians)}
\item{a}{semi-principal axis length}
\item{b}{semi-principal axis length}
\item{c}{semi-principal axis length}
}
\value{
a \code{length(lat)x3} matrix giving the lat/long coordinates points
on an ellipsoid with the given axes lengths in 3d space
}
\description{
Map latitude/longitude coordinates onto an ellipsoid. Vectorised.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{draw.funcEnrich.bar}
\alias{draw.funcEnrich.bar}
\title{Bar Plot for Gene Set Enrichment Analysis Result}
\usage{
draw.funcEnrich.bar(funcEnrich_res = NULL, top_number = 30,
Pv_col = "Ori_P", item_col = "Intersected_items", Pv_thre = 0.1,
display_genes = FALSE, name_col = "#Name", gs_cex = 0.5,
gene_cex = 0.5, main = "", bar_col = brewer.pal(8, "RdBu")[7],
eg_num = 5, pdf_file = NULL)
}
\arguments{
\item{funcEnrich_res}{data.frame, containing the result of functional enrichment analysis.
It is highly suggested to use \code{funcEnrich.Fisher} to create this data frame.
If users decided to prepare the data.frame on their own, please make sure the column names match the following parameters.}
\item{top_number}{numeric, the number of top enriched gene sets to be displayed. Default is 30.}
\item{Pv_col}{character, the name of the column in \code{funcEnrich_res} which contains P-value. Default is "Ori_P".}
\item{item_col}{character, the name of the column in \code{funcEnrich_res} which contains intersected genes collapsed with ";".
Default is "Intersected_items".}
\item{Pv_thre}{numeric, threshold of P-values. Genes or drivers with P-values lower than the threshold will be kept. Default is 0.1.}
\item{display_genes}{logical, if TRUE, the intersected genes will be displayed. Default is FALSE.}
\item{name_col}{character, the name of the column in \code{funcEnrich_res} which contains gene set name. Default is "#Name".}
\item{gs_cex}{numeric, giving the amount by which the text of gene sets names should be magnified relative to the default. Default is 0.5.}
\item{gene_cex}{numeric, giving the amount by which the text of gene symbols should be magnified relative to the default. Default is 0.5.}
\item{main}{character, an overall title for the plot.}
\item{bar_col}{character, the color code used to plot the bars. Default is brewer.pal(8,'RdBu')[7].}
\item{eg_num}{numeric, the number of intersected gene symbols to display on the right side of the bar. Default is 5.}
\item{pdf_file}{character, the file path to save as PDF file. If NULL, no PDF file will be saved. Default is NULL.}
}
\value{
Return a logical value. If TRUE, the plot has been created successfully.
}
\description{
\code{draw.funcEnrich.bar} draws a horizontal bar plot to visualize the gene set enrichment analysis.
Users can choose to display P-values and the top intersected genes from each gene set.
}
\examples{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
ms_tab <- analysis.par$final_ms_tab
sig_driver <- draw.volcanoPlot(dat=ms_tab,label_col='gene_label',
logFC_col='logFC.G4.Vs.others_DA',
Pv_col='P.Value.G4.Vs.others_DA',
logFC_thre=0.4,
Pv_thre=1e-7,
main='Volcano Plot for G4.Vs.others_DA',
show_label=FALSE,
label_type = 'origin',
label_cex = 0.5)
gs.preload(use_spe='Homo sapiens',update=FALSE)
res1 <- funcEnrich.Fisher(input_list=ms_tab[rownames(sig_driver),'geneSymbol'],
bg_list=ms_tab[,'geneSymbol'],
use_gs=c('H','C5'),Pv_thre=0.1,
Pv_adj = 'none')
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=5,
main='Function Enrichment for Top drivers',
gs_cex=0.4,gene_cex=0.5)
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=3,
main='Function Enrichment for Top drivers',
display_genes = TRUE,eg_num=3,
gs_cex=0.3,gene_cex=0.3)
\dontrun{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
ms_tab <- analysis.par$final_ms_tab
sig_driver <- draw.volcanoPlot(dat=ms_tab,label_col='gene_label',
logFC_col='logFC.G4.Vs.others_DA',
Pv_col='P.Value.G4.Vs.others_DA',
logFC_thre=0.4,
Pv_thre=1e-7,
main='Volcano Plot for G4.Vs.others_DA',
show_label=FALSE,
label_type = 'origin',
label_cex = 0.5)
gs.preload(use_spe='Homo sapiens',update=FALSE)
res1 <- funcEnrich.Fisher(input_list=ms_tab[rownames(sig_driver),'geneSymbol'],
bg_list=ms_tab[,'geneSymbol'],
use_gs=c('H','C5'),Pv_thre=0.1,Pv_adj = 'none')
analysis.par$out.dir.PLOT <- getwd() ## directory for saving the pdf files
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=30,
main='Function Enrichment for Top drivers',
pdf_file=sprintf('\%s/funcEnrich_bar_nogene.pdf',
analysis.par$out.dir.PLOT))
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=30,
main='Function Enrichment for Top drivers',
display_genes = TRUE,gs_cex=0.6,
pdf_file=sprintf('\%s/funcEnrich_bar_withgene.pdf',
analysis.par$out.dir.PLOT))
}
}
| /man/draw.funcEnrich.bar.Rd | permissive | WenboSheng/NetBID | R | false | true | 5,492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{draw.funcEnrich.bar}
\alias{draw.funcEnrich.bar}
\title{Bar Plot for Gene Set Enrichment Analysis Result}
\usage{
draw.funcEnrich.bar(funcEnrich_res = NULL, top_number = 30,
Pv_col = "Ori_P", item_col = "Intersected_items", Pv_thre = 0.1,
display_genes = FALSE, name_col = "#Name", gs_cex = 0.5,
gene_cex = 0.5, main = "", bar_col = brewer.pal(8, "RdBu")[7],
eg_num = 5, pdf_file = NULL)
}
\arguments{
\item{funcEnrich_res}{data.frame, containing the result of functional enrichment analysis.
It is highly suggested to use \code{funcEnrich.Fisher} to create this data frame.
If users decided to prepare the data.frame on their own, please make sure the column names match the following parameters.}
\item{top_number}{numeric, the number of top enriched gene sets to be displayed. Default is 30.}
\item{Pv_col}{character, the name of the column in \code{funcEnrich_res} which contains P-value. Default is "Ori_P".}
\item{item_col}{character, the name of the column in \code{funcEnrich_res} which contains intersected genes collapsed with ";".
Default is "Intersected_items".}
\item{Pv_thre}{numeric, threshold of P-values. Genes or drivers with P-values lower than the threshold will be kept. Default is 0.1.}
\item{display_genes}{logical, if TRUE, the intersected genes will be displayed. Default is FALSE.}
\item{name_col}{character, the name of the column in \code{funcEnrich_res} which contains gene set name. Default is "#Name".}
\item{gs_cex}{numeric, giving the amount by which the text of gene sets names should be magnified relative to the default. Default is 0.5.}
\item{gene_cex}{numeric, giving the amount by which the text of gene symbols should be magnified relative to the default. Default is 0.5.}
\item{main}{character, an overall title for the plot.}
\item{bar_col}{character, the color code used to plot the bars. Default is brewer.pal(8,'RdBu')[7].}
\item{eg_num}{numeric, the number of intersected gene symbols to display on the right side of the bar. Default is 5.}
\item{pdf_file}{character, the file path to save as PDF file. If NULL, no PDF file will be saved. Default is NULL.}
}
\value{
Return a logical value. If TRUE, the plot has been created successfully.
}
\description{
\code{draw.funcEnrich.bar} draws a horizontal bar plot to visualize the gene set enrichment analysis.
Users can choose to display P-values and the top intersected genes from each gene set.
}
\examples{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
ms_tab <- analysis.par$final_ms_tab
sig_driver <- draw.volcanoPlot(dat=ms_tab,label_col='gene_label',
logFC_col='logFC.G4.Vs.others_DA',
Pv_col='P.Value.G4.Vs.others_DA',
logFC_thre=0.4,
Pv_thre=1e-7,
main='Volcano Plot for G4.Vs.others_DA',
show_label=FALSE,
label_type = 'origin',
label_cex = 0.5)
gs.preload(use_spe='Homo sapiens',update=FALSE)
res1 <- funcEnrich.Fisher(input_list=ms_tab[rownames(sig_driver),'geneSymbol'],
bg_list=ms_tab[,'geneSymbol'],
use_gs=c('H','C5'),Pv_thre=0.1,
Pv_adj = 'none')
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=5,
main='Function Enrichment for Top drivers',
gs_cex=0.4,gene_cex=0.5)
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=3,
main='Function Enrichment for Top drivers',
display_genes = TRUE,eg_num=3,
gs_cex=0.3,gene_cex=0.3)
\dontrun{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
ms_tab <- analysis.par$final_ms_tab
sig_driver <- draw.volcanoPlot(dat=ms_tab,label_col='gene_label',
logFC_col='logFC.G4.Vs.others_DA',
Pv_col='P.Value.G4.Vs.others_DA',
logFC_thre=0.4,
Pv_thre=1e-7,
main='Volcano Plot for G4.Vs.others_DA',
show_label=FALSE,
label_type = 'origin',
label_cex = 0.5)
gs.preload(use_spe='Homo sapiens',update=FALSE)
res1 <- funcEnrich.Fisher(input_list=ms_tab[rownames(sig_driver),'geneSymbol'],
bg_list=ms_tab[,'geneSymbol'],
use_gs=c('H','C5'),Pv_thre=0.1,Pv_adj = 'none')
analysis.par$out.dir.PLOT <- getwd() ## directory for saving the pdf files
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=30,
main='Function Enrichment for Top drivers',
pdf_file=sprintf('\%s/funcEnrich_bar_nogene.pdf',
analysis.par$out.dir.PLOT))
draw.funcEnrich.bar(funcEnrich_res=res1,top_number=30,
main='Function Enrichment for Top drivers',
display_genes = TRUE,gs_cex=0.6,
pdf_file=sprintf('\%s/funcEnrich_bar_withgene.pdf',
analysis.par$out.dir.PLOT))
}
}
|
heuristic_models=function(Data, var_path, var_conv, var_value=NULL, sep=">"){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
res=.Call("heuristic_models_cpp", Data, var_path, var_conv, var_value, sep)
return(as.data.frame(res))
}
markov_model=function(Data, var_path, var_conv, var_value=NULL, var_null=NULL, order=1, nsim=NULL, max_step=NULL, out_more=FALSE, sep=">", seed=NULL){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
if(is.null(var_null)){var_null="0"}
if(is.null(nsim)){nsim=0}
if(is.null(max_step)){max_step=0}
if(!is.null(seed)){set.seed(seed)}
res=.Call("markov_model_cpp", Data, var_path, var_conv, var_value, var_null, order, nsim, max_step, out_more, sep)
if(out_more==FALSE){
return(as.data.frame(res))
}else{
return(list(result=as.data.frame(res$result),transition_matrix=as.data.frame(res$transition_matrix),removal_effects=as.data.frame(res$removal_effects)))
}
}
choose_order=function(Data, var_path, var_conv, var_null, max_order=10, sep=">", ncore=Inf, roc_npt=100, plot=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
if(ncore==Inf){
ncore=max_order
}else if(ncore<1){
ncore=1
}
res=.Call("choose_order_cpp", Data, var_path, var_conv, var_null, max_order, sep, ncore, roc_npt)
ck=res$auc$order[res$auc$order!=0]
res$auc$order=res$auc$order[ck]
res$auc$auc=res$auc$auc[ck]
res$auc$pauc=res$auc$pauc[ck]
best_order=res$auc$order[res$auc$pauc==max(res$auc$pauc)]
if(best_order==max_order){
print(paste0("Suggested order not found. Try increasing max_order."))
}else{
print(paste0("Suggested order: ", res$auc$order[res$auc$pauc==max(res$auc$pauc)]))
}
if(plot=="TRUE"){
plot(res$auc$order,res$auc$pauc,type="l",xlab="order",ylab="penalized auc",main="PENALIZED AUC")
}
res[['suggested_order']]=best_order
return(res)
}
markov_model_mp=function(Data, var_path, var_conv, var_value=NULL, var_null=NULL, order=1, nsim_start=1e5, max_step=NULL, out_more=FALSE, sep=">", ncore=Inf, nfold=10, seed=0, conv_par=0.05, rate_step_sim=1.5, verbose=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
if(is.null(var_null)){var_null="0"}
if(is.null(max_step)){max_step=0}
if(!is.null(seed)){set.seed(seed)}
res=.Call("markov_model_mp_cpp", Data, var_path, var_conv, var_value, var_null, order, nsim_start, max_step, out_more, sep, ncore, nfold, seed, conv_par, rate_step_sim,verbose)
if(out_more==FALSE){
return(as.data.frame(res))
}else{
return(list(result=as.data.frame(res$result),transition_matrix=as.data.frame(res$transition_matrix),removal_effects=as.data.frame(res$removal_effects)))
}
}
transition_matrix=function(Data, var_path, var_conv, var_null, order=1, sep=">", flg_equal=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
res=.Call("transition_matrix_cpp", Data, var_path, var_conv, var_null, order, sep, flg_equal)
return(list(channels=data.frame(id=1:length(res$channels),channel_name=res$channels),transition_matrix=as.data.frame(res$transition_matrix)))
}
auto_markov_model=function(Data, var_path, var_conv, var_null, var_value=NULL, max_order=10, roc_npt=100, plot=FALSE, nsim_start=1e5, max_step=NULL, out_more=FALSE, sep=">", ncore=Inf, nfold=10, seed=0, conv_par=0.05, rate_step_sim=1.5, verbose=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
if(is.null(var_null)){var_null="0"}
if(is.null(max_step)){max_step=0}
if(!is.null(seed)){set.seed(seed)}
order=choose_order(Data, var_path, var_conv, var_null, max_order=max_order, sep=sep, ncore=ncore, roc_npt=roc_npt, plot=plot)
order=res[['suggested_order']]
res=markov_model_mp(Data, var_path, var_conv, var_value=var_value, var_null=var_null, order=order, nsim_start=nsim_start, max_step=max_step, out_more=out_more, sep=sep, ncore=ncore, nfold=nfold, seed=seed, conv_par=conv_par, rate_step_sim=rate_step_sim, verbose=verbose)
if(out_more==FALSE){
return(as.data.frame(res))
}else{
return(list(result=as.data.frame(res$result),transition_matrix=as.data.frame(res$transition_matrix),removal_effects=as.data.frame(res$removal_effects)))
}
}
| /R/ChannelAttribution.R | no_license | jburkhardt/ChannelAttribution | R | false | false | 4,298 | r |
heuristic_models=function(Data, var_path, var_conv, var_value=NULL, sep=">"){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
res=.Call("heuristic_models_cpp", Data, var_path, var_conv, var_value, sep)
return(as.data.frame(res))
}
markov_model=function(Data, var_path, var_conv, var_value=NULL, var_null=NULL, order=1, nsim=NULL, max_step=NULL, out_more=FALSE, sep=">", seed=NULL){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
if(is.null(var_null)){var_null="0"}
if(is.null(nsim)){nsim=0}
if(is.null(max_step)){max_step=0}
if(!is.null(seed)){set.seed(seed)}
res=.Call("markov_model_cpp", Data, var_path, var_conv, var_value, var_null, order, nsim, max_step, out_more, sep)
if(out_more==FALSE){
return(as.data.frame(res))
}else{
return(list(result=as.data.frame(res$result),transition_matrix=as.data.frame(res$transition_matrix),removal_effects=as.data.frame(res$removal_effects)))
}
}
choose_order=function(Data, var_path, var_conv, var_null, max_order=10, sep=">", ncore=Inf, roc_npt=100, plot=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
if(ncore==Inf){
ncore=max_order
}else if(ncore<1){
ncore=1
}
res=.Call("choose_order_cpp", Data, var_path, var_conv, var_null, max_order, sep, ncore, roc_npt)
ck=res$auc$order[res$auc$order!=0]
res$auc$order=res$auc$order[ck]
res$auc$auc=res$auc$auc[ck]
res$auc$pauc=res$auc$pauc[ck]
best_order=res$auc$order[res$auc$pauc==max(res$auc$pauc)]
if(best_order==max_order){
print(paste0("Suggested order not found. Try increasing max_order."))
}else{
print(paste0("Suggested order: ", res$auc$order[res$auc$pauc==max(res$auc$pauc)]))
}
if(plot=="TRUE"){
plot(res$auc$order,res$auc$pauc,type="l",xlab="order",ylab="penalized auc",main="PENALIZED AUC")
}
res[['suggested_order']]=best_order
return(res)
}
markov_model_mp=function(Data, var_path, var_conv, var_value=NULL, var_null=NULL, order=1, nsim_start=1e5, max_step=NULL, out_more=FALSE, sep=">", ncore=Inf, nfold=10, seed=0, conv_par=0.05, rate_step_sim=1.5, verbose=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
if(is.null(var_null)){var_null="0"}
if(is.null(max_step)){max_step=0}
if(!is.null(seed)){set.seed(seed)}
res=.Call("markov_model_mp_cpp", Data, var_path, var_conv, var_value, var_null, order, nsim_start, max_step, out_more, sep, ncore, nfold, seed, conv_par, rate_step_sim,verbose)
if(out_more==FALSE){
return(as.data.frame(res))
}else{
return(list(result=as.data.frame(res$result),transition_matrix=as.data.frame(res$transition_matrix),removal_effects=as.data.frame(res$removal_effects)))
}
}
transition_matrix=function(Data, var_path, var_conv, var_null, order=1, sep=">", flg_equal=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
res=.Call("transition_matrix_cpp", Data, var_path, var_conv, var_null, order, sep, flg_equal)
return(list(channels=data.frame(id=1:length(res$channels),channel_name=res$channels),transition_matrix=as.data.frame(res$transition_matrix)))
}
auto_markov_model=function(Data, var_path, var_conv, var_null, var_value=NULL, max_order=10, roc_npt=100, plot=FALSE, nsim_start=1e5, max_step=NULL, out_more=FALSE, sep=">", ncore=Inf, nfold=10, seed=0, conv_par=0.05, rate_step_sim=1.5, verbose=TRUE){
if(length(sep)>1){stop("Separator must have length 1")}
if(is.null(var_value)){var_value="0"}
if(is.null(var_null)){var_null="0"}
if(is.null(max_step)){max_step=0}
if(!is.null(seed)){set.seed(seed)}
order=choose_order(Data, var_path, var_conv, var_null, max_order=max_order, sep=sep, ncore=ncore, roc_npt=roc_npt, plot=plot)
order=res[['suggested_order']]
res=markov_model_mp(Data, var_path, var_conv, var_value=var_value, var_null=var_null, order=order, nsim_start=nsim_start, max_step=max_step, out_more=out_more, sep=sep, ncore=ncore, nfold=nfold, seed=seed, conv_par=conv_par, rate_step_sim=rate_step_sim, verbose=verbose)
if(out_more==FALSE){
return(as.data.frame(res))
}else{
return(list(result=as.data.frame(res$result),transition_matrix=as.data.frame(res$transition_matrix),removal_effects=as.data.frame(res$removal_effects)))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rtoolkit.R
\name{ENgetflowunits}
\alias{ENgetflowunits}
\title{Retrieve a code number indicating the units used to express all flow rates.}
\usage{
ENgetflowunits()
}
\value{
An integer, the code numnber indicating the flow units.
}
\description{
\code{ENgetflowunits} retrieves a code number indicating the units used to express all flow rates.
}
\note{
Flow units codes are as follows:
\tabular{lll}{
0 \tab = \code{EN_CFS} \tab cubic feet per second\cr
1 \tab = \code{EN_GPM} \tab gallons per minute\cr
2 \tab = \code{EN_MGD} \tab million gallons per day\cr
3 \tab = \code{EN_IMGD} \tab Imperial mgd\cr
4 \tab = \code{EN_AFD} \tab acre-feet per day\cr
5 \tab = \code{EN_LPS} \tab liters per second\cr
6 \tab = \code{EN_LPM} \tab liters per minute\cr
7 \tab = \code{EN_MLD} \tab million liters per day\cr
8 \tab = \code{EN_CMH} \tab cubic meters per hour\cr
9 \tab = \code{EN_CMD} \tab cubic meters per day
}
Flow units are specified in the \code{[OPTIONS]} section of the EPANET Input file.
Flow units in liters or cubic meters implies that metric units are used for all other quantities in
addition to flow. Otherwise US units are employed. (See Units of Measurement).
}
\examples{
# path to Net1.inp example file included with this package
inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp")
ENopen( inp, "Net1.rpt")
ENgetflowunits()
ENclose()
}
| /man/ENgetflowunits.Rd | no_license | cran/epanet2toolkit | R | false | true | 1,485 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rtoolkit.R
\name{ENgetflowunits}
\alias{ENgetflowunits}
\title{Retrieve a code number indicating the units used to express all flow rates.}
\usage{
ENgetflowunits()
}
\value{
An integer, the code numnber indicating the flow units.
}
\description{
\code{ENgetflowunits} retrieves a code number indicating the units used to express all flow rates.
}
\note{
Flow units codes are as follows:
\tabular{lll}{
0 \tab = \code{EN_CFS} \tab cubic feet per second\cr
1 \tab = \code{EN_GPM} \tab gallons per minute\cr
2 \tab = \code{EN_MGD} \tab million gallons per day\cr
3 \tab = \code{EN_IMGD} \tab Imperial mgd\cr
4 \tab = \code{EN_AFD} \tab acre-feet per day\cr
5 \tab = \code{EN_LPS} \tab liters per second\cr
6 \tab = \code{EN_LPM} \tab liters per minute\cr
7 \tab = \code{EN_MLD} \tab million liters per day\cr
8 \tab = \code{EN_CMH} \tab cubic meters per hour\cr
9 \tab = \code{EN_CMD} \tab cubic meters per day
}
Flow units are specified in the \code{[OPTIONS]} section of the EPANET Input file.
Flow units in liters or cubic meters implies that metric units are used for all other quantities in
addition to flow. Otherwise US units are employed. (See Units of Measurement).
}
\examples{
# path to Net1.inp example file included with this package
inp <- file.path( find.package("epanet2toolkit"), "extdata","Net1.inp")
ENopen( inp, "Net1.rpt")
ENgetflowunits()
ENclose()
}
|
#Seurat 3.0
library(dplyr)
library(Seurat)
pbmc.data.1 <- Read10X(data.dir = "./age/filtered_feature_bc_matrix")
pbmc.data.2 <- Read10X(data.dir = "./young/filtered_feature_bc_matrix")
colnames(pbmc.data.1)=paste0('Aged_',colnames(pbmc.data.1))
colnames(pbmc.data.2)=paste0('Young_',colnames(pbmc.data.2))
source('scRef.R')
DATA=.simple_combine(pbmc.data.1,pbmc.data.2)$combine
pbmc <- CreateSeuratObject(counts = DATA, project = "AGE", min.cells = 3, min.features = 200)
pbmc[["percent.mt"]] <- PercentageFeatureSet(pbmc, pattern = "^mt-")
pdf('QC1.pdf',width=15,height=5)
VlnPlot(pbmc, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
dev.off()
pbmc <- subset(pbmc, subset = nFeature_RNA > 200 & nFeature_RNA < 3000)
dim(pbmc)
#[1] 17970 9179
pbmc@meta.data$batch=pbmc@meta.data$orig.ident
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 5000)
#all.genes <- rownames(pbmc)
#pbmc <- ScaleData(pbmc, features = VariableFeatures(object = pbmc), vars.to.regress = c("percent.mt","nCount_RNA","batch"))
#PCNUM=310
#pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc),npcs=PCNUM)
saveRDS(pbmc, 'RAW.RDS')
#################
source('BEER_Seurat3.R')
EXP=as.matrix(pbmc@assays$RNA@counts)
D1=EXP[,which(colnames(EXP) %in% rownames(pbmc@meta.data[which(pbmc@meta.data$batch=='Aged'),]) ) ]
D2=EXP[,which(colnames(EXP) %in% rownames(pbmc@meta.data[which(pbmc@meta.data$batch=='Young'),]) ) ]
mybeer=BEER(D1, D2, CNUM=20, PCNUM=200, GN=5000, CPU=4, MTTAG="^mt-", REGBATCH=FALSE)
saveRDS(mybeer, file='mybeer.RDS')
plot(mybeer$cor, xlab='PCs', ylab="COR", pch=16)
npbmc <- mybeer$seurat
#
PCUSE <- which(mybeer$cor>0.7)#which(mybeer$cor>quantile(mybeer$cor,0.3) )
npbmc <- RunUMAP(object = npbmc, reduction.use='pca',dims = PCUSE, check_duplicates=FALSE)
PCUSE <- c(1:200)
allpbmc <- RunUMAP(object = npbmc, reduction.use='pca',dims = PCUSE, check_duplicates=FALSE)
#npbmc=readRDS('pbmc.RDS')
#npbmc@meta.data$group=as.character(npbmc@active.ident)
#npbmc@meta.data$group[which(npbmc@meta.data$group=='SmallIntestine')]='CDC42KO'
pdf('BATCH.pdf',width=7,height=6)
DimPlot(allpbmc, reduction = "umap")
#DimPlot(npbmc, reduction = "umap")
#DimPlot(npbmc, reduction = "umap",group.by='map')
dev.off()
npbmc=allpbmc
npbmc@meta.data$batch=npbmc@meta.data$orig.ident
saveRDS(npbmc,file='pbmc.RDS')
##############################
#source('scdemix.R')
#get_file(npbmc,TMP='./')
#agg_local(USED_CUTOFF=1, NUMBER=150, TMP='./')
#agg_cloud(npbmc,TMP="./")
#com_local(USED_CUTOFF=1,TMP='./')
#pbmc=readRDS('demix.RDS')
#pdf("GROUP.pdf",width=8,height=3)
#FeaturePlot(pbmc, cols=c("lightgrey",'red'), features = c('CDC42HET','SmallIntestine'))
#dev.off()
exp_ref1=read.table('MCA_INTEST.txt',header=T,row.names=1,sep='\t')
exp_ref2=read.table('GSE92332_intestin_mouse_ref.txt',header=T,row.names=1,sep='\t')
exp_sc=as.matrix(npbmc@assays$RNA@counts)
source('scRef.R')
OUT1=SCREF(exp_sc, exp_ref1, CPU=4, min_cell=10,print_step=10)
#OUT1=OUT
npbmc@meta.data$mca=OUT1$tag2[,2]
pdf("MCA.pdf",width=10,height=5)
DimPlot(npbmc, group.by='mca',label=T)
dev.off()
pbmc=npbmc
pbmc@meta.data$type=pbmc@meta.data$mca
#pbmc@meta.data$type[which(pbmc@reductions$umap@cell.embeddings[,1] > -2.8)]='Epithelium'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Columnar.epithelium_5',
"Epithelial_4","Epithelial.cell_17","Epithelium_9","Epithelium.of.small.intestinal.villi_13" ,
"Epithelium.of.small.intestinal.villi_24","Epithelium.of.small.intestinal.villi_25",
"Epithelium.of.small.intestinal.villi_3","Stromal.cell_11","Paneth.cell_21" ,'Mast.cell_26' ,"Macrophage_19" ,
'Paneth.cell_21','S.cell_8' ,'S.cell_16' ))]='Epithelium'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Macrophage_6'))]='Macrophage'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('T.cell_10','T.cell_6',
'T.cell_12','T.cell_27','T.cell_7'))]='T.cell'
#pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('S.cell_16'))]='S.cell'
#pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Paneth.cell_21','S.cell_8'))]='Paneth.cell'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Dendrtic.cell_22'))]='Dendrtic.cell'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('B.cell_2'))]='B.cell'
pdf("MCA_EPI.pdf",width=10,height=5)
DimPlot(pbmc, group.by='type',label=T)
dev.off()
exp_sc=as.matrix(npbmc@assays$RNA@counts)
exp_sc=exp_sc[,which(pbmc@meta.data$type=='Epithelium')]
OUT2=SCREF(exp_sc, exp_ref2, CPU=4, min_cell=10,print_step=10)
pbmc@meta.data$all=pbmc@meta.data$type
pbmc@meta.data$all[which(pbmc@meta.data$type=='Epithelium')]=OUT2$tag2[,2]
pdf("ALL.pdf",width=10,height=7)
DimPlot(pbmc, group.by='all',label=T)
dev.off()
#Aged=which(pbmc@active.ident=='CDC42HET')
#CDC42HET=rep(CDC42HET,pbmc@meta.data$CDC42HET[CDC42HET])
#CDC42KO=which(pbmc@active.ident=='CDC42KO')
#CDC42KO=rep(CDC42KO,pbmc@meta.data$CDC42KO[CDC42KO])
#write.table(sort(table(pbmc@meta.data$all[CDC42HET])),file='CDC42HET.txt',sep='\t',quote=F,col.names=F,row.names=F)
#write.table(sort(table(pbmc@meta.data$all[CDC42KO])),file='CDC42KO.txt',sep='\t',quote=F,col.names=F,row.names=F)
TAB=table(pbmc@meta.data$all,pbmc@meta.data$batch)
write.table(TAB,file='STAT.txt',sep='\t',quote=F,col.names=T,row.names=T)
saveRDS(pbmc, file='ALL.RDS')
pbmc@meta.data$batchall=paste0(pbmc@meta.data$batch,'_',pbmc@meta.data$all)
#######################
#For the Stem cell, please check "Lgr5, Ascl2, Slc12a2, Axin2, Olfm4, Axin2".
#For the TA (progenitor) cells, please check "Mki67, Cdk4, Mcm5, Mcm6, Pcna".
#For quiescent stem cell, please check "Bmi1, Hoxp, Lrig1, mTert".
pdf("EXP1.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Wnt1','Wnt2','Wnt3','Wnt3a','Wnt8a','Wnt8b','Wnt10a','Wnt10b'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
pdf("EXP2.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Ascl2','Axin2','Olfm4','Ctnnb1'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
pdf("EXP3.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Wnt4','Wnt5a','Wnt5b','Wnt6','Wnt7a','Wnt7b','Wnt11'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
pdf("EXP4.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Ephb2', 'Myc', 'Cd44'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
###########
library(Seurat)
GS1=Wnt1, Wnt2, Wnt3, Wnt3a, Wnt8a, Wnt8b, Wnt10a, Wnt10b, Ascl2, Axin2, Olfm4, Ctnnb1, Ephb2, CD44, Myc
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
library(Seurat)
pbmc=readRDS('ALL.RDS')
#pbmc@meta.data$batchall
pbmc@meta.data$batchall=paste0(pbmc@meta.data$batch,'_',pbmc@meta.data$all)
DimPlot(pbmc,group.by='batchall')
this_gene='Lgr5'
this_index=which(rownames(pbmc)==this_gene)
this_count=pbmc@assays$RNA@counts[this_index,]
this_pos=rep(0,length(this_count))
this_pos[which(this_count>0)]=1
table(this_pos,pbmc@meta.data$batch)
LGR5=this_pos
this_gene='Olfm4'
this_index=which(rownames(pbmc)==this_gene)
this_count=pbmc@assays$RNA@counts[this_index,]
this_pos=rep(0,length(this_count))
this_pos[which(this_count>0)]=1
table(this_pos,pbmc@meta.data$batch)
OLFM4=this_pos
this_gene='Ascl2'
this_index=which(rownames(pbmc)==this_gene)
this_count=pbmc@assays$RNA@counts[this_index,]
this_pos=rep(0,length(this_count))
this_pos[which(this_count>0)]=1
table(this_pos,pbmc@meta.data$batch)
ASCL2=this_pos
BATCH=pbmc@meta.data$batch
table(BATCH)
#Aged Young
#8184 7918
LGR5_YOUNG=colnames(pbmc)[which(LGR5==1 & BATCH=='Young')]
LGR5_AGED=colnames(pbmc)[which(LGR5==1 & BATCH=='Aged')]
OLFM4_YOUNG=colnames(pbmc)[which(OLFM4==1 & BATCH=='Young')]
OLFM4_AGED=colnames(pbmc)[which(OLFM4==1 & BATCH=='Aged')]
ASCL2_YOUNG=colnames(pbmc)[which(ASCL2==1 & BATCH=='Young')]
ASCL2_AGED=colnames(pbmc)[which(ASCL2==1 & BATCH=='Aged')]
library(VennDiagram)
venn.diagram(x=list(LGR5=LGR5_YOUNG, OLFM4=OLFM4_YOUNG, ASCL2= ASCL2_YOUNG), paste0("./YOUNG_VENN.png"),
height = 450, width = 450, resolution =300, imagetype="png", col="white",
fill=c(colors()[616], colors()[38], colors()[200]), alpha=c(0.6, 0.6, 0.6),lwd=0.5, cex=0.5,cat.cex=0.5)
venn.diagram(x=list(LGR5=LGR5_AGED, OLFM4=OLFM4_AGED, ASCL2= ASCL2_AGED), paste0("./AGED_VENN.png"),
height = 450, width = 450, resolution =300, imagetype="png", col="white",
fill=c(colors()[616], colors()[38], colors()[200]), alpha=c(0.6, 0.6, 0.6),lwd=0.5, cex=0.5,cat.cex=0.5)
pdf('Lgr5Olfm4Ascl2.pdf',width=7,height=7)
FeaturePlot(pbmc, features = c("Lgr5","Olfm4","Ascl2"))
dev.off()
FeaturePlot(pbmc, features = c("Lgr5","Olfm4","Ascl2"),cells=colnames(pbmc)[which(BATCH=='Young')])
FeaturePlot(pbmc, features = c("Lgr5","Olfm4","Ascl2"),cells=colnames(pbmc)[which(BATCH=='Aged')])
| /scRNAseq/AGE_20190530/go.R | no_license | jumphone/Bioinformatics | R | false | false | 9,427 | r | #Seurat 3.0
library(dplyr)
library(Seurat)
pbmc.data.1 <- Read10X(data.dir = "./age/filtered_feature_bc_matrix")
pbmc.data.2 <- Read10X(data.dir = "./young/filtered_feature_bc_matrix")
colnames(pbmc.data.1)=paste0('Aged_',colnames(pbmc.data.1))
colnames(pbmc.data.2)=paste0('Young_',colnames(pbmc.data.2))
source('scRef.R')
DATA=.simple_combine(pbmc.data.1,pbmc.data.2)$combine
pbmc <- CreateSeuratObject(counts = DATA, project = "AGE", min.cells = 3, min.features = 200)
pbmc[["percent.mt"]] <- PercentageFeatureSet(pbmc, pattern = "^mt-")
pdf('QC1.pdf',width=15,height=5)
VlnPlot(pbmc, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
dev.off()
pbmc <- subset(pbmc, subset = nFeature_RNA > 200 & nFeature_RNA < 3000)
dim(pbmc)
#[1] 17970 9179
pbmc@meta.data$batch=pbmc@meta.data$orig.ident
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc <- FindVariableFeatures(pbmc, selection.method = "vst", nfeatures = 5000)
#all.genes <- rownames(pbmc)
#pbmc <- ScaleData(pbmc, features = VariableFeatures(object = pbmc), vars.to.regress = c("percent.mt","nCount_RNA","batch"))
#PCNUM=310
#pbmc <- RunPCA(pbmc, features = VariableFeatures(object = pbmc),npcs=PCNUM)
saveRDS(pbmc, 'RAW.RDS')
#################
source('BEER_Seurat3.R')
EXP=as.matrix(pbmc@assays$RNA@counts)
D1=EXP[,which(colnames(EXP) %in% rownames(pbmc@meta.data[which(pbmc@meta.data$batch=='Aged'),]) ) ]
D2=EXP[,which(colnames(EXP) %in% rownames(pbmc@meta.data[which(pbmc@meta.data$batch=='Young'),]) ) ]
mybeer=BEER(D1, D2, CNUM=20, PCNUM=200, GN=5000, CPU=4, MTTAG="^mt-", REGBATCH=FALSE)
saveRDS(mybeer, file='mybeer.RDS')
plot(mybeer$cor, xlab='PCs', ylab="COR", pch=16)
npbmc <- mybeer$seurat
#
PCUSE <- which(mybeer$cor>0.7)#which(mybeer$cor>quantile(mybeer$cor,0.3) )
npbmc <- RunUMAP(object = npbmc, reduction.use='pca',dims = PCUSE, check_duplicates=FALSE)
PCUSE <- c(1:200)
allpbmc <- RunUMAP(object = npbmc, reduction.use='pca',dims = PCUSE, check_duplicates=FALSE)
#npbmc=readRDS('pbmc.RDS')
#npbmc@meta.data$group=as.character(npbmc@active.ident)
#npbmc@meta.data$group[which(npbmc@meta.data$group=='SmallIntestine')]='CDC42KO'
pdf('BATCH.pdf',width=7,height=6)
DimPlot(allpbmc, reduction = "umap")
#DimPlot(npbmc, reduction = "umap")
#DimPlot(npbmc, reduction = "umap",group.by='map')
dev.off()
npbmc=allpbmc
npbmc@meta.data$batch=npbmc@meta.data$orig.ident
saveRDS(npbmc,file='pbmc.RDS')
##############################
#source('scdemix.R')
#get_file(npbmc,TMP='./')
#agg_local(USED_CUTOFF=1, NUMBER=150, TMP='./')
#agg_cloud(npbmc,TMP="./")
#com_local(USED_CUTOFF=1,TMP='./')
#pbmc=readRDS('demix.RDS')
#pdf("GROUP.pdf",width=8,height=3)
#FeaturePlot(pbmc, cols=c("lightgrey",'red'), features = c('CDC42HET','SmallIntestine'))
#dev.off()
exp_ref1=read.table('MCA_INTEST.txt',header=T,row.names=1,sep='\t')
exp_ref2=read.table('GSE92332_intestin_mouse_ref.txt',header=T,row.names=1,sep='\t')
exp_sc=as.matrix(npbmc@assays$RNA@counts)
source('scRef.R')
OUT1=SCREF(exp_sc, exp_ref1, CPU=4, min_cell=10,print_step=10)
#OUT1=OUT
npbmc@meta.data$mca=OUT1$tag2[,2]
pdf("MCA.pdf",width=10,height=5)
DimPlot(npbmc, group.by='mca',label=T)
dev.off()
pbmc=npbmc
pbmc@meta.data$type=pbmc@meta.data$mca
#pbmc@meta.data$type[which(pbmc@reductions$umap@cell.embeddings[,1] > -2.8)]='Epithelium'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Columnar.epithelium_5',
"Epithelial_4","Epithelial.cell_17","Epithelium_9","Epithelium.of.small.intestinal.villi_13" ,
"Epithelium.of.small.intestinal.villi_24","Epithelium.of.small.intestinal.villi_25",
"Epithelium.of.small.intestinal.villi_3","Stromal.cell_11","Paneth.cell_21" ,'Mast.cell_26' ,"Macrophage_19" ,
'Paneth.cell_21','S.cell_8' ,'S.cell_16' ))]='Epithelium'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Macrophage_6'))]='Macrophage'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('T.cell_10','T.cell_6',
'T.cell_12','T.cell_27','T.cell_7'))]='T.cell'
#pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('S.cell_16'))]='S.cell'
#pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Paneth.cell_21','S.cell_8'))]='Paneth.cell'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('Dendrtic.cell_22'))]='Dendrtic.cell'
pbmc@meta.data$type[which(pbmc@meta.data$mca %in% c('B.cell_2'))]='B.cell'
pdf("MCA_EPI.pdf",width=10,height=5)
DimPlot(pbmc, group.by='type',label=T)
dev.off()
exp_sc=as.matrix(npbmc@assays$RNA@counts)
exp_sc=exp_sc[,which(pbmc@meta.data$type=='Epithelium')]
OUT2=SCREF(exp_sc, exp_ref2, CPU=4, min_cell=10,print_step=10)
pbmc@meta.data$all=pbmc@meta.data$type
pbmc@meta.data$all[which(pbmc@meta.data$type=='Epithelium')]=OUT2$tag2[,2]
pdf("ALL.pdf",width=10,height=7)
DimPlot(pbmc, group.by='all',label=T)
dev.off()
#Aged=which(pbmc@active.ident=='CDC42HET')
#CDC42HET=rep(CDC42HET,pbmc@meta.data$CDC42HET[CDC42HET])
#CDC42KO=which(pbmc@active.ident=='CDC42KO')
#CDC42KO=rep(CDC42KO,pbmc@meta.data$CDC42KO[CDC42KO])
#write.table(sort(table(pbmc@meta.data$all[CDC42HET])),file='CDC42HET.txt',sep='\t',quote=F,col.names=F,row.names=F)
#write.table(sort(table(pbmc@meta.data$all[CDC42KO])),file='CDC42KO.txt',sep='\t',quote=F,col.names=F,row.names=F)
TAB=table(pbmc@meta.data$all,pbmc@meta.data$batch)
write.table(TAB,file='STAT.txt',sep='\t',quote=F,col.names=T,row.names=T)
saveRDS(pbmc, file='ALL.RDS')
pbmc@meta.data$batchall=paste0(pbmc@meta.data$batch,'_',pbmc@meta.data$all)
#######################
#For the Stem cell, please check "Lgr5, Ascl2, Slc12a2, Axin2, Olfm4, Axin2".
#For the TA (progenitor) cells, please check "Mki67, Cdk4, Mcm5, Mcm6, Pcna".
#For quiescent stem cell, please check "Bmi1, Hoxp, Lrig1, mTert".
pdf("EXP1.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Wnt1','Wnt2','Wnt3','Wnt3a','Wnt8a','Wnt8b','Wnt10a','Wnt10b'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
pdf("EXP2.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Ascl2','Axin2','Olfm4','Ctnnb1'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
pdf("EXP3.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Wnt4','Wnt5a','Wnt5b','Wnt6','Wnt7a','Wnt7b','Wnt11'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
pdf("EXP4.pdf",width=12,height=40)
VlnPlot(pbmc, ncol=1,
features = c('Ephb2', 'Myc', 'Cd44'),
group.by='batchall',slot = "counts", log =F) #TRUE)
dev.off()
###########
library(Seurat)
GS1=Wnt1, Wnt2, Wnt3, Wnt3a, Wnt8a, Wnt8b, Wnt10a, Wnt10b, Ascl2, Axin2, Olfm4, Ctnnb1, Ephb2, CD44, Myc
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
####################################################################
library(Seurat)
pbmc=readRDS('ALL.RDS')
#pbmc@meta.data$batchall
pbmc@meta.data$batchall=paste0(pbmc@meta.data$batch,'_',pbmc@meta.data$all)
DimPlot(pbmc,group.by='batchall')
this_gene='Lgr5'
this_index=which(rownames(pbmc)==this_gene)
this_count=pbmc@assays$RNA@counts[this_index,]
this_pos=rep(0,length(this_count))
this_pos[which(this_count>0)]=1
table(this_pos,pbmc@meta.data$batch)
LGR5=this_pos
this_gene='Olfm4'
this_index=which(rownames(pbmc)==this_gene)
this_count=pbmc@assays$RNA@counts[this_index,]
this_pos=rep(0,length(this_count))
this_pos[which(this_count>0)]=1
table(this_pos,pbmc@meta.data$batch)
OLFM4=this_pos
this_gene='Ascl2'
this_index=which(rownames(pbmc)==this_gene)
this_count=pbmc@assays$RNA@counts[this_index,]
this_pos=rep(0,length(this_count))
this_pos[which(this_count>0)]=1
table(this_pos,pbmc@meta.data$batch)
ASCL2=this_pos
BATCH=pbmc@meta.data$batch
table(BATCH)
#Aged Young
#8184 7918
LGR5_YOUNG=colnames(pbmc)[which(LGR5==1 & BATCH=='Young')]
LGR5_AGED=colnames(pbmc)[which(LGR5==1 & BATCH=='Aged')]
OLFM4_YOUNG=colnames(pbmc)[which(OLFM4==1 & BATCH=='Young')]
OLFM4_AGED=colnames(pbmc)[which(OLFM4==1 & BATCH=='Aged')]
ASCL2_YOUNG=colnames(pbmc)[which(ASCL2==1 & BATCH=='Young')]
ASCL2_AGED=colnames(pbmc)[which(ASCL2==1 & BATCH=='Aged')]
library(VennDiagram)
venn.diagram(x=list(LGR5=LGR5_YOUNG, OLFM4=OLFM4_YOUNG, ASCL2= ASCL2_YOUNG), paste0("./YOUNG_VENN.png"),
height = 450, width = 450, resolution =300, imagetype="png", col="white",
fill=c(colors()[616], colors()[38], colors()[200]), alpha=c(0.6, 0.6, 0.6),lwd=0.5, cex=0.5,cat.cex=0.5)
venn.diagram(x=list(LGR5=LGR5_AGED, OLFM4=OLFM4_AGED, ASCL2= ASCL2_AGED), paste0("./AGED_VENN.png"),
height = 450, width = 450, resolution =300, imagetype="png", col="white",
fill=c(colors()[616], colors()[38], colors()[200]), alpha=c(0.6, 0.6, 0.6),lwd=0.5, cex=0.5,cat.cex=0.5)
pdf('Lgr5Olfm4Ascl2.pdf',width=7,height=7)
FeaturePlot(pbmc, features = c("Lgr5","Olfm4","Ascl2"))
dev.off()
FeaturePlot(pbmc, features = c("Lgr5","Olfm4","Ascl2"),cells=colnames(pbmc)[which(BATCH=='Young')])
FeaturePlot(pbmc, features = c("Lgr5","Olfm4","Ascl2"),cells=colnames(pbmc)[which(BATCH=='Aged')])
|
##' Calculate clusters for back tracectories
##'
##' This function carries out cluster analysis of HYSPLIT back
##' trajectories. The function is specifically designed to work with
##' the trajectories imported using the \code{openair}
##' \code{importTraj} function, which provides pre-calculated back
##' trajectories at specific receptor locations.
##'
##' Two main methods are available to cluster the back trajectories
##' using two different calculations of the distance matrix. The
##' default is to use the standard Euclidian distance between each
##' pair of trajectories. Also available is an angle-based distance
##' matrix based on Sirois and Bottenheim (1995). The latter method is
##' useful when the interest is the direction of the trajectories in
##' clustering.
##'
##' The distance matrix calculations are made in C++ for speed. For
##' data sets of up to 1 year both methods should be relatively fast,
##' although the \code{method = "Angle"} does tend to take much longer
##' to calculate. Further details of these methods are given in the
##' openair manual.
##'
##' @param traj An openair trajectory data frame resulting from the
##' use of \code{importTraj}.
##' @param method Method used to calculate the distance matrix for the
##' back trajectories. There are two methods available:
##' \dQuote{Euclid} and \dQuote{Angle}.
##' @param n.cluster Number of clusters to calculate.
##' @param plot Should a plot be produced?
##' @param type \code{type} determines how the data are split i.e.
##' conditioned, and then plotted. The default is will produce a
##' single plot using the entire data. Type can be one of the
##' built-in types as detailed in \code{cutData} e.g.
##' \dQuote{season}, \dQuote{year}, \dQuote{weekday} and so on. For
##' example, \code{type = "season"} will produce four plots --- one
##' for each season. Note that the cluster calculations are
##' separately made of each level of "type".
##' @param cols Colours to be used for plotting. Options include
##' \dQuote{default}, \dQuote{increment}, \dQuote{heat},
##' \dQuote{jet} and \code{RColorBrewer} colours --- see the
##' \code{openair} \code{openColours} function for more details. For
##' user defined the user can supply a list of colour names
##' recognised by R (type \code{colours()} to see the full list). An
##' example would be \code{cols = c("yellow", "green", "blue")}
##' @param split.after For \code{type} other than \dQuote{default}
##' e.g. \dQuote{season}, the trajectories can either be calculated
##' for each level of \code{type} independently or extracted after
##' the cluster calculations have been applied to the whole data
##' set.
##' @param map.fill Should the base map be a filled polygon? Default
##' is to fill countries.
##' @param map.cols If \code{map.fill = TRUE} \code{map.cols} controls
##' the fill colour. Examples include \code{map.fill = "grey40"} and
##' \code{map.fill = openColours("default", 10)}. The latter colours
##' the countries and can help differentiate them.
##' @param map.alpha The transpency level of the filled map which
##' takes values from 0 (full transparency) to 1 (full opacity).
##' Setting it below 1 can help view trajectories, trajectory
##' surfaces etc. \emph{and} a filled base map.
##' @param projection The map projection to be used. Different map
##' projections are possible through the \code{mapproj} package.
##' See\code{?mapproject} for extensive details and information on
##' setting other parameters and orientation (see below).
##' @param parameters From the \code{mapproj} package. Optional
##' numeric vector of parameters for use with the projection
##' argument. This argument is optional only in the sense that
##' certain projections do not require additional parameters. If a
##' projection does require additional parameters, these must be
##' given in the parameters argument.
##' @param orientation From the \code{mapproj} package. An optional
##' vector c(latitude,longitude,rotation) which describes where the
##' "North Pole" should be when computing the projection. Normally
##' this is c(90,0), which is appropriate for cylindrical and conic
##' projections. For a planar projection, you should set it to the
##' desired point of tangency. The third value is a clockwise
##' rotation (in degrees), which defaults to the midrange of the
##' longitude coordinates in the map.
##' @param by.type The percentage of the total number of trajectories
##' is given for all data by default. Setting \code{by.type = TRUE}
##' will make each panel add up to 100.
##' @param origin If \code{TRUE} a filled circle dot is shown to mark the
##' receptor point.
##' @param ... Other graphical parameters passed onto
##' \code{lattice:levelplot} and \code{cutData}. Similarly, common
##' axis and title labelling options (such as \code{xlab},
##' \code{ylab}, \code{main}) are passed to \code{levelplot} via
##' \code{quickText} to handle routine formatting.
##' @export
##' @useDynLib openair, .registration = TRUE
##' @import cluster
##' @return Returns a list with two data components. The first
##' (\code{data}) contains the orginal data with the cluster
##' identified. The second (\code{results}) contains the data used
##' to plot the clustered trajectories.
##' @seealso \code{\link{importTraj}}, \code{\link{trajPlot}},
##' \code{\link{trajLevel}}
##' @author David Carslaw
##' @references
##'
##' Sirois, A. and Bottenheim, J.W., 1995. Use of backward
##' trajectories to interpret the 5-year record of PAN and O3 ambient
##' air concentrations at Kejimkujik National Park, Nova
##' Scotia. Journal of Geophysical Research, 100: 2867-2881.
##' @keywords methods
##' @examples
##' \dontrun{
##' ## import trajectories
##' traj <- importTraj(site = "london", year = 2009)
##' ## calculate clusters
##' clust <- trajCluster(traj, n.cluster = 5)
##' head(clust$data) ## note new variable 'cluster'
##' ## use different distance matrix calculation, and calculate by season
##' traj <- trajCluster(traj, method = "Angle", type = "season", n.cluster = 4)
##' }
trajCluster <- function(traj, method = "Euclid", n.cluster = 5,
plot = TRUE, type = "default",
cols = "Set1", split.after = FALSE, map.fill = TRUE,
map.cols = "grey40", map.alpha = 0.4,
projection = "lambert",
parameters = c(51, 51), orientation = c(90, 0, 0),
by.type = FALSE, origin = TRUE, ...) {
# silence R check
freq <- hour.inc <- default <- NULL
if (tolower(method) == "euclid") {
method <- "distEuclid"
} else {
method <- "distAngle"
}
# remove any missing lat/lon
traj <- filter(traj, !is.na(lat), !is.na(lon))
# check to see if all back trajectories are the same length
traj <- group_by(traj, date) %>%
mutate(traj_len = length(date))
if (length(unique(traj$traj_len)) > 1) {
warning("Trajectory lengths differ, using most common length.")
ux <- unique(traj$traj_len)
nmax <- ux[which.max(tabulate(match(traj$traj_len, ux)))]
traj <- ungroup(traj) %>%
filter(traj_len == nmax)
}
Args <- list(...)
## set graphics
current.strip <- trellis.par.get("strip.background")
current.font <- trellis.par.get("fontsize")
## reset graphic parameters
on.exit(trellis.par.set(
fontsize = current.font
))
## label controls
Args$plot.type <- if ("plot.type" %in% names(Args)) {
Args$plot.type
} else {
Args$plot.type <- "l"
}
Args$lwd <- if ("lwd" %in% names(Args)) {
Args$lwd
} else {
Args$lwd <- 4
}
if ("fontsize" %in% names(Args)) {
trellis.par.set(fontsize = list(text = Args$fontsize))
}
calcTraj <- function(traj) {
## make sure ordered correctly
traj <- traj[order(traj$date, traj$hour.inc), ]
## length of back trajectories
traj <- group_by(traj, date) %>%
mutate(len = length(date))
## find length of back trajectories
## 96-hour back trajectories with origin: length should be 97
n <- max(abs(traj$hour.inc)) + 1
traj <- subset(traj, len == n)
len <- nrow(traj) / n
## lat/lon input matrices
x <- matrix(traj$lon, nrow = n)
y <- matrix(traj$lat, nrow = n)
z <- matrix(0, nrow = n, ncol = len)
res <- matrix(0, nrow = len, ncol = len)
if (method == "distEuclid") {
res <- .Call("distEuclid", x, y, res)
}
if (method == "distAngle") {
res <- .Call("distAngle", x, y, res)
}
res[is.na(res)] <- 0 ## possible for some to be NA if trajectory does not move between two hours?
dist.res <- as.dist(res)
clusters <- pam(dist.res, n.cluster)
cluster <- rep(clusters$clustering, each = n)
traj$cluster <- as.character(paste("C", cluster, sep = ""))
traj
}
## this bit decides whether to separately calculate trajectories for each level of type
if (split.after) {
traj <- group_by(traj, default) %>%
do(calcTraj(.))
traj <- cutData(traj, type)
} else {
traj <- cutData(traj, type)
traj <- traj %>%
group_by(across(type)) %>%
do(calcTraj(.))
}
# trajectory origin
origin_xy <- head(subset(traj, hour.inc == 0), 1) ## origin
tmp <- mapproject(
x = origin_xy[["lon"]][1],
y = origin_xy[["lat"]][1],
projection = projection,
parameters = parameters,
orientation = orientation
)
receptor <- c(tmp$x, tmp$y)
if (plot) {
## calculate the mean trajectories by cluster
vars <- c("lat", "lon", "date", "cluster", "hour.inc", type)
vars2 <- c("cluster", "hour.inc", type)
agg <- select(traj, vars) %>%
group_by(across(vars2)) %>%
summarise(across(everything(), mean))
# the data frame we want to return before it is transformed
resRtn <- agg
## proportion of total clusters
vars <- c(type, "cluster")
clusters <- traj %>%
group_by(across(vars)) %>%
tally() %>%
mutate(freq = round(100 * n / sum(n), 1))
## make each panel add up to 100
if (by.type) {
clusters <- clusters %>%
group_by(across(type)) %>%
mutate(freq = 100 * freq / sum(freq))
clusters$freq <- round(clusters$freq, 1)
}
## make sure date is in correct format
class(agg$date) <- class(traj$date)
attr(agg$date, "tzone") <- "GMT"
## xlim and ylim set by user
if (!"xlim" %in% names(Args)) {
Args$xlim <- range(agg$lon)
}
if (!"ylim" %in% names(Args)) {
Args$ylim <- range(agg$lat)
}
## extent of data (or limits set by user) in degrees
trajLims <- c(Args$xlim, Args$ylim)
## need *outline* of boundary for map limits
Args <- setTrajLims(traj, Args, projection, parameters, orientation)
## transform data for map projection
tmp <- mapproject(
x = agg[["lon"]],
y = agg[["lat"]],
projection = projection,
parameters = parameters,
orientation = orientation
)
agg[["lon"]] <- tmp$x
agg[["lat"]] <- tmp$y
plot.args <- list(
agg,
x = "lon", y = "lat", group = "cluster",
col = cols, type = type, map = TRUE, map.fill = map.fill,
map.cols = map.cols, map.alpha = map.alpha,
projection = projection, parameters = parameters,
orientation = orientation, traj = TRUE, trajLims = trajLims,
clusters = clusters, receptor = receptor,
origin = origin
)
## reset for Args
plot.args <- listUpdate(plot.args, Args)
## plot
plt <- do.call(scatterPlot, plot.args)
}
output <- list(plot = plt, data = traj, results = resRtn, call = match.call())
class(output) <- "openair"
invisible(output)
}
| /R/trajCluster.R | no_license | hrngultekin/openair | R | false | false | 11,776 | r | ##' Calculate clusters for back tracectories
##'
##' This function carries out cluster analysis of HYSPLIT back
##' trajectories. The function is specifically designed to work with
##' the trajectories imported using the \code{openair}
##' \code{importTraj} function, which provides pre-calculated back
##' trajectories at specific receptor locations.
##'
##' Two main methods are available to cluster the back trajectories
##' using two different calculations of the distance matrix. The
##' default is to use the standard Euclidian distance between each
##' pair of trajectories. Also available is an angle-based distance
##' matrix based on Sirois and Bottenheim (1995). The latter method is
##' useful when the interest is the direction of the trajectories in
##' clustering.
##'
##' The distance matrix calculations are made in C++ for speed. For
##' data sets of up to 1 year both methods should be relatively fast,
##' although the \code{method = "Angle"} does tend to take much longer
##' to calculate. Further details of these methods are given in the
##' openair manual.
##'
##' @param traj An openair trajectory data frame resulting from the
##' use of \code{importTraj}.
##' @param method Method used to calculate the distance matrix for the
##' back trajectories. There are two methods available:
##' \dQuote{Euclid} and \dQuote{Angle}.
##' @param n.cluster Number of clusters to calculate.
##' @param plot Should a plot be produced?
##' @param type \code{type} determines how the data are split i.e.
##' conditioned, and then plotted. The default is will produce a
##' single plot using the entire data. Type can be one of the
##' built-in types as detailed in \code{cutData} e.g.
##' \dQuote{season}, \dQuote{year}, \dQuote{weekday} and so on. For
##' example, \code{type = "season"} will produce four plots --- one
##' for each season. Note that the cluster calculations are
##' separately made of each level of "type".
##' @param cols Colours to be used for plotting. Options include
##' \dQuote{default}, \dQuote{increment}, \dQuote{heat},
##' \dQuote{jet} and \code{RColorBrewer} colours --- see the
##' \code{openair} \code{openColours} function for more details. For
##' user defined the user can supply a list of colour names
##' recognised by R (type \code{colours()} to see the full list). An
##' example would be \code{cols = c("yellow", "green", "blue")}
##' @param split.after For \code{type} other than \dQuote{default}
##' e.g. \dQuote{season}, the trajectories can either be calculated
##' for each level of \code{type} independently or extracted after
##' the cluster calculations have been applied to the whole data
##' set.
##' @param map.fill Should the base map be a filled polygon? Default
##' is to fill countries.
##' @param map.cols If \code{map.fill = TRUE} \code{map.cols} controls
##' the fill colour. Examples include \code{map.fill = "grey40"} and
##' \code{map.fill = openColours("default", 10)}. The latter colours
##' the countries and can help differentiate them.
##' @param map.alpha The transpency level of the filled map which
##' takes values from 0 (full transparency) to 1 (full opacity).
##' Setting it below 1 can help view trajectories, trajectory
##' surfaces etc. \emph{and} a filled base map.
##' @param projection The map projection to be used. Different map
##' projections are possible through the \code{mapproj} package.
##' See\code{?mapproject} for extensive details and information on
##' setting other parameters and orientation (see below).
##' @param parameters From the \code{mapproj} package. Optional
##' numeric vector of parameters for use with the projection
##' argument. This argument is optional only in the sense that
##' certain projections do not require additional parameters. If a
##' projection does require additional parameters, these must be
##' given in the parameters argument.
##' @param orientation From the \code{mapproj} package. An optional
##' vector c(latitude,longitude,rotation) which describes where the
##' "North Pole" should be when computing the projection. Normally
##' this is c(90,0), which is appropriate for cylindrical and conic
##' projections. For a planar projection, you should set it to the
##' desired point of tangency. The third value is a clockwise
##' rotation (in degrees), which defaults to the midrange of the
##' longitude coordinates in the map.
##' @param by.type The percentage of the total number of trajectories
##' is given for all data by default. Setting \code{by.type = TRUE}
##' will make each panel add up to 100.
##' @param origin If \code{TRUE} a filled circle dot is shown to mark the
##' receptor point.
##' @param ... Other graphical parameters passed onto
##' \code{lattice:levelplot} and \code{cutData}. Similarly, common
##' axis and title labelling options (such as \code{xlab},
##' \code{ylab}, \code{main}) are passed to \code{levelplot} via
##' \code{quickText} to handle routine formatting.
##' @export
##' @useDynLib openair, .registration = TRUE
##' @import cluster
##' @return Returns a list with two data components. The first
##' (\code{data}) contains the orginal data with the cluster
##' identified. The second (\code{results}) contains the data used
##' to plot the clustered trajectories.
##' @seealso \code{\link{importTraj}}, \code{\link{trajPlot}},
##' \code{\link{trajLevel}}
##' @author David Carslaw
##' @references
##'
##' Sirois, A. and Bottenheim, J.W., 1995. Use of backward
##' trajectories to interpret the 5-year record of PAN and O3 ambient
##' air concentrations at Kejimkujik National Park, Nova
##' Scotia. Journal of Geophysical Research, 100: 2867-2881.
##' @keywords methods
##' @examples
##' \dontrun{
##' ## import trajectories
##' traj <- importTraj(site = "london", year = 2009)
##' ## calculate clusters
##' clust <- trajCluster(traj, n.cluster = 5)
##' head(clust$data) ## note new variable 'cluster'
##' ## use different distance matrix calculation, and calculate by season
##' traj <- trajCluster(traj, method = "Angle", type = "season", n.cluster = 4)
##' }
trajCluster <- function(traj, method = "Euclid", n.cluster = 5,
plot = TRUE, type = "default",
cols = "Set1", split.after = FALSE, map.fill = TRUE,
map.cols = "grey40", map.alpha = 0.4,
projection = "lambert",
parameters = c(51, 51), orientation = c(90, 0, 0),
by.type = FALSE, origin = TRUE, ...) {
# silence R check
freq <- hour.inc <- default <- NULL
if (tolower(method) == "euclid") {
method <- "distEuclid"
} else {
method <- "distAngle"
}
# remove any missing lat/lon
traj <- filter(traj, !is.na(lat), !is.na(lon))
# check to see if all back trajectories are the same length
traj <- group_by(traj, date) %>%
mutate(traj_len = length(date))
if (length(unique(traj$traj_len)) > 1) {
warning("Trajectory lengths differ, using most common length.")
ux <- unique(traj$traj_len)
nmax <- ux[which.max(tabulate(match(traj$traj_len, ux)))]
traj <- ungroup(traj) %>%
filter(traj_len == nmax)
}
Args <- list(...)
## set graphics
current.strip <- trellis.par.get("strip.background")
current.font <- trellis.par.get("fontsize")
## reset graphic parameters
on.exit(trellis.par.set(
fontsize = current.font
))
## label controls
Args$plot.type <- if ("plot.type" %in% names(Args)) {
Args$plot.type
} else {
Args$plot.type <- "l"
}
Args$lwd <- if ("lwd" %in% names(Args)) {
Args$lwd
} else {
Args$lwd <- 4
}
if ("fontsize" %in% names(Args)) {
trellis.par.set(fontsize = list(text = Args$fontsize))
}
calcTraj <- function(traj) {
## make sure ordered correctly
traj <- traj[order(traj$date, traj$hour.inc), ]
## length of back trajectories
traj <- group_by(traj, date) %>%
mutate(len = length(date))
## find length of back trajectories
## 96-hour back trajectories with origin: length should be 97
n <- max(abs(traj$hour.inc)) + 1
traj <- subset(traj, len == n)
len <- nrow(traj) / n
## lat/lon input matrices
x <- matrix(traj$lon, nrow = n)
y <- matrix(traj$lat, nrow = n)
z <- matrix(0, nrow = n, ncol = len)
res <- matrix(0, nrow = len, ncol = len)
if (method == "distEuclid") {
res <- .Call("distEuclid", x, y, res)
}
if (method == "distAngle") {
res <- .Call("distAngle", x, y, res)
}
res[is.na(res)] <- 0 ## possible for some to be NA if trajectory does not move between two hours?
dist.res <- as.dist(res)
clusters <- pam(dist.res, n.cluster)
cluster <- rep(clusters$clustering, each = n)
traj$cluster <- as.character(paste("C", cluster, sep = ""))
traj
}
## this bit decides whether to separately calculate trajectories for each level of type
if (split.after) {
traj <- group_by(traj, default) %>%
do(calcTraj(.))
traj <- cutData(traj, type)
} else {
traj <- cutData(traj, type)
traj <- traj %>%
group_by(across(type)) %>%
do(calcTraj(.))
}
# trajectory origin
origin_xy <- head(subset(traj, hour.inc == 0), 1) ## origin
tmp <- mapproject(
x = origin_xy[["lon"]][1],
y = origin_xy[["lat"]][1],
projection = projection,
parameters = parameters,
orientation = orientation
)
receptor <- c(tmp$x, tmp$y)
if (plot) {
## calculate the mean trajectories by cluster
vars <- c("lat", "lon", "date", "cluster", "hour.inc", type)
vars2 <- c("cluster", "hour.inc", type)
agg <- select(traj, vars) %>%
group_by(across(vars2)) %>%
summarise(across(everything(), mean))
# the data frame we want to return before it is transformed
resRtn <- agg
## proportion of total clusters
vars <- c(type, "cluster")
clusters <- traj %>%
group_by(across(vars)) %>%
tally() %>%
mutate(freq = round(100 * n / sum(n), 1))
## make each panel add up to 100
if (by.type) {
clusters <- clusters %>%
group_by(across(type)) %>%
mutate(freq = 100 * freq / sum(freq))
clusters$freq <- round(clusters$freq, 1)
}
## make sure date is in correct format
class(agg$date) <- class(traj$date)
attr(agg$date, "tzone") <- "GMT"
## xlim and ylim set by user
if (!"xlim" %in% names(Args)) {
Args$xlim <- range(agg$lon)
}
if (!"ylim" %in% names(Args)) {
Args$ylim <- range(agg$lat)
}
## extent of data (or limits set by user) in degrees
trajLims <- c(Args$xlim, Args$ylim)
## need *outline* of boundary for map limits
Args <- setTrajLims(traj, Args, projection, parameters, orientation)
## transform data for map projection
tmp <- mapproject(
x = agg[["lon"]],
y = agg[["lat"]],
projection = projection,
parameters = parameters,
orientation = orientation
)
agg[["lon"]] <- tmp$x
agg[["lat"]] <- tmp$y
plot.args <- list(
agg,
x = "lon", y = "lat", group = "cluster",
col = cols, type = type, map = TRUE, map.fill = map.fill,
map.cols = map.cols, map.alpha = map.alpha,
projection = projection, parameters = parameters,
orientation = orientation, traj = TRUE, trajLims = trajLims,
clusters = clusters, receptor = receptor,
origin = origin
)
## reset for Args
plot.args <- listUpdate(plot.args, Args)
## plot
plt <- do.call(scatterPlot, plot.args)
}
output <- list(plot = plt, data = traj, results = resRtn, call = match.call())
class(output) <- "openair"
invisible(output)
}
|
##
## makeCacheMatrix: This function creates a special "matrix" object that can
##cache its inverse.
##
##Computing the inverse of a square matrix can be done with the solve function
##in R. For example, if X is a square invertible matrix, then solve(X) returns its
##inverse.
makeCacheMatrix <- function(xx = matrix()) {
inverse_matrix <- NULL
set <- function(yy) {
xx <<- yy
inverse_matrix <<- NULL
}
get <- function() xx
setinverse <- function(inverse) inverse_matrix <<- inverse
getinverse <- function() inverse_matrix
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
##2. cacheSolve: This function computes the inverse of the special "matrix"
##returned by makeCacheMatrix above. If the inverse has already been
##calculated (and the matrix has not changed), then the cachesolve should
##retrieve the inverse from the cache.
##
## assumption: The matrix supplied is always invertible
##
##
cacheSolve <- function(xx, ...) {
inverse_matrix <- xx$getinverse()
if(!is.null(inverse_matrix)) {
return(inverse_matrix)
}
data <- xx$get()
inverse_matrix <- solve(data)
xx$setinverse(inverse_matrix)
inverse_matrix
}
| /cachematrix.R | no_license | sjafr/ProgrammingAssignment2 | R | false | false | 1,357 | r | ##
## makeCacheMatrix: This function creates a special "matrix" object that can
##cache its inverse.
##
##Computing the inverse of a square matrix can be done with the solve function
##in R. For example, if X is a square invertible matrix, then solve(X) returns its
##inverse.
makeCacheMatrix <- function(xx = matrix()) {
inverse_matrix <- NULL
set <- function(yy) {
xx <<- yy
inverse_matrix <<- NULL
}
get <- function() xx
setinverse <- function(inverse) inverse_matrix <<- inverse
getinverse <- function() inverse_matrix
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
##2. cacheSolve: This function computes the inverse of the special "matrix"
##returned by makeCacheMatrix above. If the inverse has already been
##calculated (and the matrix has not changed), then the cachesolve should
##retrieve the inverse from the cache.
##
## assumption: The matrix supplied is always invertible
##
##
cacheSolve <- function(xx, ...) {
inverse_matrix <- xx$getinverse()
if(!is.null(inverse_matrix)) {
return(inverse_matrix)
}
data <- xx$get()
inverse_matrix <- solve(data)
xx$setinverse(inverse_matrix)
inverse_matrix
}
|
# Importing libraries
# For file structures
library(here)
# For tidying data
library(tidyverse)
# Importing Data
# Assuming that banned_items.csv has been downloaded from allData.zip and kept in a folder called `raw_data` within the current working directory.
# Download banned_items.csv from https://github.com/the-pudding/data/tree/master/dress_codes/banned_items.csv
bannedItems <- read.csv(here::here("raw_data", "banned_items.csv"), stringsAsFactors = FALSE, header = TRUE)
# Calculate how many schools are in the dataset
nSchools <- bannedItems %>%
# remove any duplicates by looking for distinct state/school name combinations
distinct(state, schoolName) %>%
# count the remaining items
count(.)
# Check which schools ban body parts
body <- bannedItems %>%
# keep only banned body parts
filter(type == "body") %>%
# keep only distinct school names
distinct(schoolName)
# Percentage of schools that ban specific body parts
bodyPercent <- bannedItems %>%
# Keep only the schools that ban body parts
filter(schoolName %in% body$schoolName) %>%
# Keep only the banned body parts
filter(type == "body") %>%
# Count how many times each body part was banned
count(item, sort = TRUE) %>%
# Calculate the percentage (compared to total number of dress codes analyzed)
mutate(per = n/nSchools$n * 100) %>%
# Round the percentage to a whole number
mutate(per = round(per, 0)) %>%
# Keep only the percentages greater than or equal to 5%
filter(per >= 5) | /dress-codes/body_percentages.R | permissive | the-pudding/data | R | false | false | 1,503 | r | # Importing libraries
# For file structures
library(here)
# For tidying data
library(tidyverse)
# Importing Data
# Assuming that banned_items.csv has been downloaded from allData.zip and kept in a folder called `raw_data` within the current working directory.
# Download banned_items.csv from https://github.com/the-pudding/data/tree/master/dress_codes/banned_items.csv
bannedItems <- read.csv(here::here("raw_data", "banned_items.csv"), stringsAsFactors = FALSE, header = TRUE)
# Calculate how many schools are in the dataset
nSchools <- bannedItems %>%
# remove any duplicates by looking for distinct state/school name combinations
distinct(state, schoolName) %>%
# count the remaining items
count(.)
# Check which schools ban body parts
body <- bannedItems %>%
# keep only banned body parts
filter(type == "body") %>%
# keep only distinct school names
distinct(schoolName)
# Percentage of schools that ban specific body parts
bodyPercent <- bannedItems %>%
# Keep only the schools that ban body parts
filter(schoolName %in% body$schoolName) %>%
# Keep only the banned body parts
filter(type == "body") %>%
# Count how many times each body part was banned
count(item, sort = TRUE) %>%
# Calculate the percentage (compared to total number of dress codes analyzed)
mutate(per = n/nSchools$n * 100) %>%
# Round the percentage to a whole number
mutate(per = round(per, 0)) %>%
# Keep only the percentages greater than or equal to 5%
filter(per >= 5) |
source("global.R")
shinyServer(function(input, output, session){
output$textual <- renderPrint({input$text})
selectedData <- reactive({
input$go
isolate(final(input$text,input$tweets,as.character(input$dateRange[1]),
as.character(input$dateRange[2])))
})
output$plot <- renderPlot({
plot(selectedData()[,21],type="l",col="dark green",xlab="# of Tweets",
ylab="Difference between total positive & negative")
})
output$table <- renderDataTable({
sources(selectedData()[,10])
})
})
| /server.R | no_license | luisecastro/nlp_twitter_sentiment_analysis | R | false | false | 626 | r | source("global.R")
shinyServer(function(input, output, session){
output$textual <- renderPrint({input$text})
selectedData <- reactive({
input$go
isolate(final(input$text,input$tweets,as.character(input$dateRange[1]),
as.character(input$dateRange[2])))
})
output$plot <- renderPlot({
plot(selectedData()[,21],type="l",col="dark green",xlab="# of Tweets",
ylab="Difference between total positive & negative")
})
output$table <- renderDataTable({
sources(selectedData()[,10])
})
})
|
library(quantreg.nonpar)
### Name: npqr
### Title: Nonparametric Series Quantile Regression
### Aliases: npqr
### Keywords: nonparametric regression htest
### ** Examples
data(india)
## Subset the data for speed
india.subset<-india[1:1000,]
formula=cheight~mbmi+breastfeeding+mage+medu+edupartner
basis.bsp <- create.bspline.basis(breaks=quantile(india$cage,c(0:10)/10))
n=length(india$cage)
B=500
alpha=.95
taus=c(1:24)/25
print.taus=c(1:4)/5
## Inference on average growth rate
piv.bsp <- npqr(formula=formula, data=india.subset, basis=basis.bsp,
var="cage", taus=taus, print.taus=print.taus, B=B, nderivs=1,
average=1, alpha=alpha, process="pivotal", rearrange=FALSE,
uniform=TRUE, se="unconditional", printOutput=TRUE, method="fn")
yrange<-range(piv.bsp$CI)
xrange<-c(0,1)
plot(xrange,yrange,type="n",xlab="",ylab="Average Growth (cm/month)")
lines(piv.bsp$taus,piv.bsp$point.est)
lines(piv.bsp$taus,piv.bsp$CI[1,,1],col="blue")
lines(piv.bsp$taus,piv.bsp$CI[1,,2],col="blue")
title("Average Growth Rate")
## Estimation on average growth acceleration with no inference
piv.bsp.secondderiv <- npqr(formula=formula, data=india.subset,
basis=basis.bsp, var="cage", taus=taus, print.taus=print.taus,
B=B, nderivs=2, average=0, alpha=alpha, process="none",
se="conditional", rearrange=FALSE, printOutput=FALSE, method="fn")
xsurf<-as.vector(piv.bsp.secondderiv$taus)
ysurf<-as.vector(piv.bsp.secondderiv$var.unique)
zsurf<-t(piv.bsp.secondderiv$point.est)
persp(xsurf, ysurf, zsurf, xlab="Quantile", ylab="Age (months)",
zlab="Growth Acceleration", ticktype="detailed", phi=30,
theta=120, d=5, col="green", shade=0.75, main="Growth Acceleration")
| /data/genthat_extracted_code/quantreg.nonpar/examples/npqr.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,684 | r | library(quantreg.nonpar)
### Name: npqr
### Title: Nonparametric Series Quantile Regression
### Aliases: npqr
### Keywords: nonparametric regression htest
### ** Examples
data(india)
## Subset the data for speed
india.subset<-india[1:1000,]
formula=cheight~mbmi+breastfeeding+mage+medu+edupartner
basis.bsp <- create.bspline.basis(breaks=quantile(india$cage,c(0:10)/10))
n=length(india$cage)
B=500
alpha=.95
taus=c(1:24)/25
print.taus=c(1:4)/5
## Inference on average growth rate
piv.bsp <- npqr(formula=formula, data=india.subset, basis=basis.bsp,
var="cage", taus=taus, print.taus=print.taus, B=B, nderivs=1,
average=1, alpha=alpha, process="pivotal", rearrange=FALSE,
uniform=TRUE, se="unconditional", printOutput=TRUE, method="fn")
yrange<-range(piv.bsp$CI)
xrange<-c(0,1)
plot(xrange,yrange,type="n",xlab="",ylab="Average Growth (cm/month)")
lines(piv.bsp$taus,piv.bsp$point.est)
lines(piv.bsp$taus,piv.bsp$CI[1,,1],col="blue")
lines(piv.bsp$taus,piv.bsp$CI[1,,2],col="blue")
title("Average Growth Rate")
## Estimation on average growth acceleration with no inference
piv.bsp.secondderiv <- npqr(formula=formula, data=india.subset,
basis=basis.bsp, var="cage", taus=taus, print.taus=print.taus,
B=B, nderivs=2, average=0, alpha=alpha, process="none",
se="conditional", rearrange=FALSE, printOutput=FALSE, method="fn")
xsurf<-as.vector(piv.bsp.secondderiv$taus)
ysurf<-as.vector(piv.bsp.secondderiv$var.unique)
zsurf<-t(piv.bsp.secondderiv$point.est)
persp(xsurf, ysurf, zsurf, xlab="Quantile", ylab="Age (months)",
zlab="Growth Acceleration", ticktype="detailed", phi=30,
theta=120, d=5, col="green", shade=0.75, main="Growth Acceleration")
|
\name{stiff}
\alias{stiff}
\docType{data}
\title{
The Board Stiffness Dataset
}
\description{
Four measures of stiffness of 30 boards are available. The first
measure of stiffness is obtained by sending a shock wave down the
board, the second measure is obtained by vibrating the board, and
remaining are obtained from static tests.
}
\usage{data(stiff)}
\format{
A data frame with 30 observations on the following 4 variables.
\describe{
\item{\code{x1}}{first measure of stiffness is obtained by sending a shock
wave down the board}
\item{\code{x2}}{second measure is obtained by vibrating the board}
\item{\code{x3}}{third measure is obtained by a static test}
\item{\code{x4}}{fourth measure is obtained by a static test}
}
}
\references{
Johnson, R.A., and Wichern, D.W. (1982-2007). Applied Multivariate
Statistical Analysis, 6e. Pearson Education.
Tattar, et al. (2016). A Course in Statistics with R. J. Wiley.
}
\examples{
data(stiff)
summary(stiff)
}
\keyword{datasets}
| /man/stiff.Rd | no_license | cran/SMLoutliers | R | false | false | 1,042 | rd | \name{stiff}
\alias{stiff}
\docType{data}
\title{
The Board Stiffness Dataset
}
\description{
Four measures of stiffness of 30 boards are available. The first
measure of stiffness is obtained by sending a shock wave down the
board, the second measure is obtained by vibrating the board, and
remaining are obtained from static tests.
}
\usage{data(stiff)}
\format{
A data frame with 30 observations on the following 4 variables.
\describe{
\item{\code{x1}}{first measure of stiffness is obtained by sending a shock
wave down the board}
\item{\code{x2}}{second measure is obtained by vibrating the board}
\item{\code{x3}}{third measure is obtained by a static test}
\item{\code{x4}}{fourth measure is obtained by a static test}
}
}
\references{
Johnson, R.A., and Wichern, D.W. (1982-2007). Applied Multivariate
Statistical Analysis, 6e. Pearson Education.
Tattar, et al. (2016). A Course in Statistics with R. J. Wiley.
}
\examples{
data(stiff)
summary(stiff)
}
\keyword{datasets}
|
## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get, setinverse = setinverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | Daisy-bbfish/ProgrammingAssignment2 | R | false | false | 792 | r | ## A pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get, setinverse = setinverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
# data_prep.R for Tourism & Recreation - master data_prep.R file
# Jul2015: Casey O'Hara - combining multiple scripts and data sets into one
# master script.
# Supplemental scripts are located in TourismRecreation/R.
# Outputs:
# * tr_unemployment.csv
# * rgn_id, year, percent
# * Percent unemployment (0-100%)
# * tr_sustainability.csv
# * rgn_id, score (no year value - only current year)
# * TTCI score, not normalized (1-7)
# * tr_jobs_tourism.csv
# * rgn_id, year, jobs_ct (individuals)
# * Number of jobs, direct employment in tourism
# * tr_jobs_pct_tourism.csv
# * rgn_id, year, jobs_pct
# * Percent of direct tourism jobs
# * tr_jobs_total.csv
# * rgn_id, year, count (individuals)
# * Total jobs
##############################################################################=
### setup -----
##############################################################################=
library(ohicore) # devtools::install_github('ohi-science/ohicore') # may require uninstall and reinstall
setwd('~/github/ohiprep')
source('src/R/common.R')
library(readr)
goal <- 'globalprep/TourismRecreation'
scenario <- 'v2015'
dir_anx <- file.path(dir_neptune_data, 'git-annex', goal)
dir_git <- file.path('~/github/ohiprep', goal)
dir_data <- file.path(dir_git, scenario, 'data')
dir_int <- file.path(dir_git, scenario, 'intermediate')
# dir_git points to TourismRecreation; dir_data and dir_int point to v201X/data and v201x/intermediate
# within the TourismRecreation directory.
source(file.path(dir_git, 'R/tr_fxns.R'))
##############################################################################=
### Process data and layers ----
##############################################################################=
tr_data_files <- c(unem = file.path(dir_int, 'wb_rgn_uem.csv'),
jobs_tot = file.path(dir_int, 'wb_rgn_tlf.csv'),
sust = file.path(dir_int, 'wef_ttci_2015.csv'),
jobs_tour = file.path(dir_int, 'wttc_empd_rgn.csv'))
tr_prep_data(tr_data_files, reload = TRUE)
### Process each data set and saves tidied data in v201X/intermediate directory.
tr_layers <- c(unem = file.path(dir_int, 'tr_pregap_unemployment.csv'),
jobs_tot = file.path(dir_int, 'tr_pregap_jobs_total.csv'),
sust = file.path(dir_int, 'tr_pregap_sustainability.csv'),
jobs_tour = file.path(dir_int, 'tr_pregap_jobs_tourism.csv'),
jobs_pct_tour = file.path(dir_int, 'tr_pregap_jobs_pct_tourism.csv'))
tr_prep_layers(tr_layers, tr_data_files, reload = TRUE)
### Separate out just the model variables and save these to v201X/data directory,
### ready for use in the toolbox.
##############################################################################=
### Assembling the data from layers -----
##############################################################################=
year_max <- 2013
tr_data_raw <- tr_assemble_layers(tr_layers)
### Attach georegions and per-capita GDP info for various gapfilling
georegions <- read.csv('../ohi-global/eez2013/layers/rgn_georegions.csv', na.strings='')
georegion_labels <- read.csv('../ohi-global/eez2013/layers/rgn_georegion_labels.csv')
tr_data_raw <- tr_data_raw %>%
left_join(georegion_labels %>%
spread(level, label) %>%
select(-r0),
by = 'rgn_id') %>%
filter(rgn_id != 255) # ditch disputed regions...
gdppcppp <- read.csv(file.path(dir_int, 'wb_rgn_gdppcppp.csv')) %>%
select(rgn_id, year, pcgdp = intl_dollar)
tr_data_raw <- tr_data_raw %>%
left_join(gdppcppp, by = c('rgn_id', 'year'))
### Add gapfill flag variable
tr_data_raw <- tr_data_raw %>% gapfill_flags()
write_csv(tr_data_raw, file.path(dir_int, 'tr_data_raw.csv'))
##############################################################################=
### Gapfilling ----
##############################################################################=
### Gapfill S using r1 and/or r2 regional data and PPP-adjusted per-capita GDP
tr_data_raw <- read.csv(file.path(dir_int, 'tr_data_raw.csv'), stringsAsFactors = FALSE)
tr_data <- tr_data_raw %>% gdp_gapfill()
### gap fill any missing GDP values, so the TTCI Score gapfill can use gdp as a proxy
tr_data <- s_gapfill_r2_r1(tr_data)
# Apply only the 2013 S_score to all years - so it's consistent, as we only have
# actual scores from the current year. NOTE: doesn't change gapfill flag for past years...
tr_data <- tr_data %>%
select(-S_score) %>%
left_join(tr_data %>%
filter(year == year_max) %>%
select(rgn_id, S_score),
by = 'rgn_id')
### Gapfill Ep using regional averages
tr_data <- tr_data %>%
group_by(r2, year) %>%
mutate(E_mdl2 = mean(Ep, na.rm = TRUE),
gaps = ifelse(is.na(Ep) & !is.na(E_mdl2), str_replace(gaps, 'E', 'r'), gaps),
gaps = ifelse(is.na(Ep) & !is.na(E_mdl2), str_replace(gaps, 'U', '*'), gaps),
gaps = ifelse(is.na(Ep) & !is.na(E_mdl2), str_replace(gaps, 'L', '*'), gaps),
Ep = ifelse(is.na(Ep), E_mdl2, Ep)) %>%
select(-E_mdl2) %>%
ungroup()
# summary(lm(E_mdl2 ~ Ep, data = tr_data)) # R^2 = .3124 before replacement
#
# library(ggplot2)
# ggplot(data1 %>% filter(year == year_max),
# aes(x = Ep, y = E_mdl2, color = r1)) +
# geom_point() +
# geom_abline(slope = 1, intercept = 0, color = 'red') +
# labs(x = '% tourism employment, reported',
# y = '% tourism employment, rgn avg',
# title = 'Comparison of Tourism/Total jobs')
### write full gapfilled data set to the intermediate directory
write_csv(tr_data, file.path(dir_int, 'tr_data_processed.csv'))
### write layers, post-gapfill, to data directory-----
write_csv(tr_data %>% select(rgn_id, year, U), file.path(dir_data, 'tr_unemployment.csv'))
write_csv(tr_data %>% select(rgn_id, year, Ed), file.path(dir_data, 'tr_jobs_tourism.csv'))
write_csv(tr_data %>% select(rgn_id, year, Ep), file.path(dir_data, 'tr_jobs_pct_tourism.csv'))
write_csv(tr_data %>% select(rgn_id, year, L), file.path(dir_data, 'tr_jobs_total.csv'))
write_csv(tr_data %>% filter(year == year_max) %>% select(rgn_id, S_score), file.path(dir_data, 'tr_sustainability.csv'))
### NOTE: only writing most recent year of sustainability index - use same value across all years.
write_csv(tr_data %>% select(rgn_id, year, gaps), file.path(dir_data, 'tr_gapfill.csv'))
##############################################################################=
### Run model (transfer to functions.R) -----
##############################################################################=
### Load data layers, reassemble, and process them through the model calculations.
# rgn_names <- read_csv('~/github/ohi-global/eez2013/layers/rgn_global.csv', stringsAsFactors = FALSE) %>%
# rename(rgn_name = label)
#
# tr_data1 <- read_csv(file.path(dir_data, 'tr_jobs_pct_tourism.csv')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_jobs_tourism.csv')), by = c('rgn_id', 'year')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_unemployment.csv')), by = c('rgn_id', 'year')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_jobs_total.csv')), by = c('rgn_id', 'year')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_sustainability.csv')), by = c('rgn_id')) %>%
# ### NOTE: just using most recent sustainability year value across all years, since TTCI data
# ### is only available for most recent year
# full_join(read_csv(file.path(dir_data, 'tr_gapfill.csv')), by = c('rgn_id', 'year')) %>%
# full_join(rgn_names, by = 'rgn_id') %>%
# filter(year <= year_max)
#
# tr_model <- tr_calc_model(tr_data1) %>%
# filter(year <= year_max & year > year_max - 5)
# # five data years, four intervals
#
#
#
# ### Write the un-normalized model calculations.
# write_csv(tr_model, file.path(dir_int, 'tr_model.csv'))
#
# # regions with Travel Warnings at http://travel.state.gov/content/passports/english/alertswarnings.html
# rgn_travel_warnings <- read.csv(file.path(dir_data, 'tr_travelwarnings_2015.csv'), stringsAsFactors = F) %>%
# select(rgn_name) %>%
# left_join(rgn_names, by = 'rgn_name') %>%
# filter(!is.na(rgn_id))
# # TODO: check if regions with travel warnings are gapfilled (manually checked for 2013)
# tr_model <- tr_model %>%
# filter(!rgn_id %in% rgn_travel_warnings$rgn_id) %>%
# bind_rows(tr_model %>%
# filter(rgn_id %in% rgn_travel_warnings$rgn_id) %>%
# mutate(
# Xtr = 0.1 * Xtr))
#
# ### Calculate status based on quantile reference
# pct_ref <- 90 # the threshold for quantile where status score = 1.0
#
# tr_scores <- tr_model %>%
# select(rgn_id, S_score, year, rgn_name, Xtr, gaps) %>%
# left_join(tr_model %>%
# group_by(year) %>%
# summarize(Xtr_q = quantile(Xtr, probs = pct_ref/100, na.rm = TRUE)),
# by = 'year') %>%
# mutate(
# Xtr_rq = ifelse(Xtr / Xtr_q > 1, 1, Xtr / Xtr_q)) # rescale to qth percentile, cap at 1
#
# write_csv(tr_scores, file.path(dir_int, 'tr_scores.csv'))
#
| /globalprep/TourismRecreation/data_prep.R | no_license | yan-cri/ohiprep | R | false | false | 9,168 | r | # data_prep.R for Tourism & Recreation - master data_prep.R file
# Jul2015: Casey O'Hara - combining multiple scripts and data sets into one
# master script.
# Supplemental scripts are located in TourismRecreation/R.
# Outputs:
# * tr_unemployment.csv
# * rgn_id, year, percent
# * Percent unemployment (0-100%)
# * tr_sustainability.csv
# * rgn_id, score (no year value - only current year)
# * TTCI score, not normalized (1-7)
# * tr_jobs_tourism.csv
# * rgn_id, year, jobs_ct (individuals)
# * Number of jobs, direct employment in tourism
# * tr_jobs_pct_tourism.csv
# * rgn_id, year, jobs_pct
# * Percent of direct tourism jobs
# * tr_jobs_total.csv
# * rgn_id, year, count (individuals)
# * Total jobs
##############################################################################=
### setup -----
##############################################################################=
library(ohicore) # devtools::install_github('ohi-science/ohicore') # may require uninstall and reinstall
setwd('~/github/ohiprep')
source('src/R/common.R')
library(readr)
goal <- 'globalprep/TourismRecreation'
scenario <- 'v2015'
dir_anx <- file.path(dir_neptune_data, 'git-annex', goal)
dir_git <- file.path('~/github/ohiprep', goal)
dir_data <- file.path(dir_git, scenario, 'data')
dir_int <- file.path(dir_git, scenario, 'intermediate')
# dir_git points to TourismRecreation; dir_data and dir_int point to v201X/data and v201x/intermediate
# within the TourismRecreation directory.
source(file.path(dir_git, 'R/tr_fxns.R'))
##############################################################################=
### Process data and layers ----
##############################################################################=
tr_data_files <- c(unem = file.path(dir_int, 'wb_rgn_uem.csv'),
jobs_tot = file.path(dir_int, 'wb_rgn_tlf.csv'),
sust = file.path(dir_int, 'wef_ttci_2015.csv'),
jobs_tour = file.path(dir_int, 'wttc_empd_rgn.csv'))
tr_prep_data(tr_data_files, reload = TRUE)
### Process each data set and saves tidied data in v201X/intermediate directory.
tr_layers <- c(unem = file.path(dir_int, 'tr_pregap_unemployment.csv'),
jobs_tot = file.path(dir_int, 'tr_pregap_jobs_total.csv'),
sust = file.path(dir_int, 'tr_pregap_sustainability.csv'),
jobs_tour = file.path(dir_int, 'tr_pregap_jobs_tourism.csv'),
jobs_pct_tour = file.path(dir_int, 'tr_pregap_jobs_pct_tourism.csv'))
tr_prep_layers(tr_layers, tr_data_files, reload = TRUE)
### Separate out just the model variables and save these to v201X/data directory,
### ready for use in the toolbox.
##############################################################################=
### Assembling the data from layers -----
##############################################################################=
year_max <- 2013
tr_data_raw <- tr_assemble_layers(tr_layers)
### Attach georegions and per-capita GDP info for various gapfilling
georegions <- read.csv('../ohi-global/eez2013/layers/rgn_georegions.csv', na.strings='')
georegion_labels <- read.csv('../ohi-global/eez2013/layers/rgn_georegion_labels.csv')
tr_data_raw <- tr_data_raw %>%
left_join(georegion_labels %>%
spread(level, label) %>%
select(-r0),
by = 'rgn_id') %>%
filter(rgn_id != 255) # ditch disputed regions...
gdppcppp <- read.csv(file.path(dir_int, 'wb_rgn_gdppcppp.csv')) %>%
select(rgn_id, year, pcgdp = intl_dollar)
tr_data_raw <- tr_data_raw %>%
left_join(gdppcppp, by = c('rgn_id', 'year'))
### Add gapfill flag variable
tr_data_raw <- tr_data_raw %>% gapfill_flags()
write_csv(tr_data_raw, file.path(dir_int, 'tr_data_raw.csv'))
##############################################################################=
### Gapfilling ----
##############################################################################=
### Gapfill S using r1 and/or r2 regional data and PPP-adjusted per-capita GDP
tr_data_raw <- read.csv(file.path(dir_int, 'tr_data_raw.csv'), stringsAsFactors = FALSE)
tr_data <- tr_data_raw %>% gdp_gapfill()
### gap fill any missing GDP values, so the TTCI Score gapfill can use gdp as a proxy
tr_data <- s_gapfill_r2_r1(tr_data)
# Apply only the 2013 S_score to all years - so it's consistent, as we only have
# actual scores from the current year. NOTE: doesn't change gapfill flag for past years...
tr_data <- tr_data %>%
select(-S_score) %>%
left_join(tr_data %>%
filter(year == year_max) %>%
select(rgn_id, S_score),
by = 'rgn_id')
### Gapfill Ep using regional averages
tr_data <- tr_data %>%
group_by(r2, year) %>%
mutate(E_mdl2 = mean(Ep, na.rm = TRUE),
gaps = ifelse(is.na(Ep) & !is.na(E_mdl2), str_replace(gaps, 'E', 'r'), gaps),
gaps = ifelse(is.na(Ep) & !is.na(E_mdl2), str_replace(gaps, 'U', '*'), gaps),
gaps = ifelse(is.na(Ep) & !is.na(E_mdl2), str_replace(gaps, 'L', '*'), gaps),
Ep = ifelse(is.na(Ep), E_mdl2, Ep)) %>%
select(-E_mdl2) %>%
ungroup()
# summary(lm(E_mdl2 ~ Ep, data = tr_data)) # R^2 = .3124 before replacement
#
# library(ggplot2)
# ggplot(data1 %>% filter(year == year_max),
# aes(x = Ep, y = E_mdl2, color = r1)) +
# geom_point() +
# geom_abline(slope = 1, intercept = 0, color = 'red') +
# labs(x = '% tourism employment, reported',
# y = '% tourism employment, rgn avg',
# title = 'Comparison of Tourism/Total jobs')
### write full gapfilled data set to the intermediate directory
write_csv(tr_data, file.path(dir_int, 'tr_data_processed.csv'))
### write layers, post-gapfill, to data directory-----
write_csv(tr_data %>% select(rgn_id, year, U), file.path(dir_data, 'tr_unemployment.csv'))
write_csv(tr_data %>% select(rgn_id, year, Ed), file.path(dir_data, 'tr_jobs_tourism.csv'))
write_csv(tr_data %>% select(rgn_id, year, Ep), file.path(dir_data, 'tr_jobs_pct_tourism.csv'))
write_csv(tr_data %>% select(rgn_id, year, L), file.path(dir_data, 'tr_jobs_total.csv'))
write_csv(tr_data %>% filter(year == year_max) %>% select(rgn_id, S_score), file.path(dir_data, 'tr_sustainability.csv'))
### NOTE: only writing most recent year of sustainability index - use same value across all years.
write_csv(tr_data %>% select(rgn_id, year, gaps), file.path(dir_data, 'tr_gapfill.csv'))
##############################################################################=
### Run model (transfer to functions.R) -----
##############################################################################=
### Load data layers, reassemble, and process them through the model calculations.
# rgn_names <- read_csv('~/github/ohi-global/eez2013/layers/rgn_global.csv', stringsAsFactors = FALSE) %>%
# rename(rgn_name = label)
#
# tr_data1 <- read_csv(file.path(dir_data, 'tr_jobs_pct_tourism.csv')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_jobs_tourism.csv')), by = c('rgn_id', 'year')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_unemployment.csv')), by = c('rgn_id', 'year')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_jobs_total.csv')), by = c('rgn_id', 'year')) %>%
# full_join(read_csv(file.path(dir_data, 'tr_sustainability.csv')), by = c('rgn_id')) %>%
# ### NOTE: just using most recent sustainability year value across all years, since TTCI data
# ### is only available for most recent year
# full_join(read_csv(file.path(dir_data, 'tr_gapfill.csv')), by = c('rgn_id', 'year')) %>%
# full_join(rgn_names, by = 'rgn_id') %>%
# filter(year <= year_max)
#
# tr_model <- tr_calc_model(tr_data1) %>%
# filter(year <= year_max & year > year_max - 5)
# # five data years, four intervals
#
#
#
# ### Write the un-normalized model calculations.
# write_csv(tr_model, file.path(dir_int, 'tr_model.csv'))
#
# # regions with Travel Warnings at http://travel.state.gov/content/passports/english/alertswarnings.html
# rgn_travel_warnings <- read.csv(file.path(dir_data, 'tr_travelwarnings_2015.csv'), stringsAsFactors = F) %>%
# select(rgn_name) %>%
# left_join(rgn_names, by = 'rgn_name') %>%
# filter(!is.na(rgn_id))
# # TODO: check if regions with travel warnings are gapfilled (manually checked for 2013)
# tr_model <- tr_model %>%
# filter(!rgn_id %in% rgn_travel_warnings$rgn_id) %>%
# bind_rows(tr_model %>%
# filter(rgn_id %in% rgn_travel_warnings$rgn_id) %>%
# mutate(
# Xtr = 0.1 * Xtr))
#
# ### Calculate status based on quantile reference
# pct_ref <- 90 # the threshold for quantile where status score = 1.0
#
# tr_scores <- tr_model %>%
# select(rgn_id, S_score, year, rgn_name, Xtr, gaps) %>%
# left_join(tr_model %>%
# group_by(year) %>%
# summarize(Xtr_q = quantile(Xtr, probs = pct_ref/100, na.rm = TRUE)),
# by = 'year') %>%
# mutate(
# Xtr_rq = ifelse(Xtr / Xtr_q > 1, 1, Xtr / Xtr_q)) # rescale to qth percentile, cap at 1
#
# write_csv(tr_scores, file.path(dir_int, 'tr_scores.csv'))
#
|
testlist <- list(x = c(NaN, 2.12196353786585e-314, 2.40382335695285e-315, 3.31388849947084e-310, 1.32963809622704e-105, 7.41841938664413e-68, -2.66305241450813e+305, 2.11370687089355e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) | /netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612798753-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 349 | r | testlist <- list(x = c(NaN, 2.12196353786585e-314, 2.40382335695285e-315, 3.31388849947084e-310, 1.32963809622704e-105, 7.41841938664413e-68, -2.66305241450813e+305, 2.11370687089355e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result) |
\name{panel_raster}
\alias{panel_raster}
\alias{.panel_raster}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Add raster to the image panel
}
\description{
If specified image has 3 or 4 bands, then color composite is plotted on image panel, else the image is plotted regarding to its color table.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
panel_raster(...)
%%~ ## non-public
%%~ .panel_raster(obj, useRaster = NA, interpolate = FALSE, alpha = NA , verbose = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{\dots}{Set of argumetns, which are recognized via their names (using \link[base:regex]{regular expressions}) and classes.
\cr 1. Passed to \code{\link[ursa:colorize]{colorize}}.
\cr 2. Interpeted in this function:
%%~ Passed to non-public \code{.panel_raster}.
\describe{
\item{\code{"(^$|obj)"} \var{as} \code{obj}}{Object of class \code{ursaRaster}. Raster band for plotting. Multiple bands are allowed if then can be interpreted as RGB or RGBA.}
\item{\code{"useRaster"} \var{as} \code{useRaster}}{Logical. If \code{TRUE} then a bitmap raster is used to plot the image instead of polygons. See argument \code{useRaster} in function \code{\link[graphics]{image}}. Default depends on PNG device (\code{getOption("ursaPngDevice")}, which is set up in \code{\link[ursa]{compose_open}}); it is \code{TRUE} for \code{"cairo"} device, and \code{FALSE} for \code{"windows"} device.}
\item{\code{"interp(olate)*"} \var{as} \code{interpolate}}{Logical. Passed as argument \code{interpolate} to function \code{\link[graphics:rasterImage]{rasterImage}}.}
\item{\code{"(alpha|transp(aren(cy)*)*)"} \var{as} \code{alpha}}{Numeric or character. Level of transparency. If numeric, the either \verb{0 <= alpha <= 1} or \code{0 <= alpha <= 255}. If character, then one byte of hexadecimal value \verb{"00" <= alpha <= "FF"}. If \code{NA}, then transparency is used from colortable, else transparency of colortable is overwritten by \code{alpha}. Default is \code{NA}.}
\item{\code{"verb(ose)*"} \var{as} \code{verbose}}{Logical. Value \code{TRUE} may provide some additional information on console. Default is \code{FALSE}.}
}
%%~ \tabular{lll}{
%%~ \emph{Pattern} (\code{panel_raster}) \tab \emph{Argument} (\code{.panel_raster}) \tab \emph{Description}
%%~ \cr \code{(^$|obj)} \tab \code{obj} \tab \emph{See below}.
%%~ \cr \code{useRaster} \tab \code{useRaster} \tab \emph{See below}.
%%~ \cr \code{interp(olate)*} \tab \code{interpolate} \tab \emph{See below}.
%%~ \cr \code{(alpha|transp(aren(cy)*)*)} \tab \code{alpha} \tab \emph{See below}.
%%~ \cr \code{verb(ose)*} \tab \code{verbose} \tab \emph{See below}.
%%~ \cr \code{\dots} \tab \tab Passed to \code{\link[ursa:colorize]{colorize}}.
%%~ }
}
%%~ \item{obj}{Object of class \code{ursaRaster}. Raster band for plotting. Multiple bands are allowed if then can be interpreted as RGB or RGBA.}
%%~ \item{useRaster}{Logical. If \code{TRUE} then a bitmap raster is used to plot the image instead of polygons. See argument \code{useRaster} in function \code{\link[graphics]{image}}. Default depends on PNG device (\code{getOption("ursaPngDevice")}, which is set up in \code{\link[ursa]{compose_open}}); it is \code{TRUE} for \code{"cairo"} device, and \code{FALSE} for \code{"windows"} device.}
%%~ \item{interpolate}{Logical. Passed as argument \code{interpolate} to function \code{\link[graphics:rasterImage]{rasterImage}}.}
%%~ \item{alpha}{Numeric or character. Level of transparency. If numeric, the either \verb{0 <= alpha <= 1} or \code{0 <= alpha <= 255}. If character, then one byte of hexadecimal value \verb{"00" <= alpha <= "FF"}. If \code{NA}, then transparency is used from colortable, else transparency of colortable is overwritten by \code{alpha}. Default is \code{NA}.}
%%~ \item{verbose}{Logical. Value \code{TRUE} may provide some additional information on console. Default is \code{FALSE}.}
}
\details{
If \code{obj} is list of raster images, then \code{panel_raster} is applied to each item of list, and colortable of last item is returned.
If \code{obj} has 3 or 4 bands then \code{obj} is intepreted as RGB(A) image.
Function attempts to speed up plotting by reduce image matrix for big rasters.
}
\value{
If argument \code{obj} has strictly one band, then function returns \link[ursa:classColorTable]{color table} - object of class \code{ursaColorTable}, which can be used as an input argument for the colorbar legend (function \code{\link[ursa:legend_colorbar]{legend_colorbar}}). Otherwise function returns \code{NULL} value.
}
%%~ \references{
%%~ %% ~put references to the literature/web site here ~
%%~ }
\author{
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
%% ~Make other sections like Warning with \section{Warning }{....} ~
%%~ \seealso{
%%~ %% ~~objects to See Also as \code{\link{help}}, ~~~
%%~ }
\examples{
session_grid(NULL)
# example no.1 -- direct use
session_grid(regrid(mul=1/32))
dima <- with(session_grid(),c(columns,rows,3))
a <- ursa_new(value=array(runif(prod(dima),min=127,max=255),dim=dima))
p <- colorize(a,pal=c("black","white"),ramp=TRUE,value=0:256)
compose_open(layout=c(2,3),skip=4,legend=list(list("top","full"),list("bottom",2:3)))
for (i in seq(6)) {
panel_new()
if (i<4)
panel_raster(p[i])
else
panel_raster(a,interpolate=i==5)
panel_decor(col="black")
panel_annotation(c("red","green","blue"
,"interpolate=FALSE","interpolate=TRUE"))
}
legend_colorbar(p,label=seq(0,256,by=16),units="channels")
legend_mtext("color composite")
compose_close()
# example no.2 -- indirect use
display(pixelsize(NULL),raster.verb=TRUE)
# example no.3 -- color table for legend
session_grid(NULL)
compose_open()
panel_new()
ct <- panel_raster(pixelsize(),palname="Greens")
panel_decor()
compose_legend(ct)
compose_close()
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{aplot}
| /man/panel_raster.Rd | no_license | yangxhcaf/ursa | R | false | false | 6,182 | rd | \name{panel_raster}
\alias{panel_raster}
\alias{.panel_raster}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Add raster to the image panel
}
\description{
If specified image has 3 or 4 bands, then color composite is plotted on image panel, else the image is plotted regarding to its color table.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
panel_raster(...)
%%~ ## non-public
%%~ .panel_raster(obj, useRaster = NA, interpolate = FALSE, alpha = NA , verbose = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{\dots}{Set of argumetns, which are recognized via their names (using \link[base:regex]{regular expressions}) and classes.
\cr 1. Passed to \code{\link[ursa:colorize]{colorize}}.
\cr 2. Interpeted in this function:
%%~ Passed to non-public \code{.panel_raster}.
\describe{
\item{\code{"(^$|obj)"} \var{as} \code{obj}}{Object of class \code{ursaRaster}. Raster band for plotting. Multiple bands are allowed if then can be interpreted as RGB or RGBA.}
\item{\code{"useRaster"} \var{as} \code{useRaster}}{Logical. If \code{TRUE} then a bitmap raster is used to plot the image instead of polygons. See argument \code{useRaster} in function \code{\link[graphics]{image}}. Default depends on PNG device (\code{getOption("ursaPngDevice")}, which is set up in \code{\link[ursa]{compose_open}}); it is \code{TRUE} for \code{"cairo"} device, and \code{FALSE} for \code{"windows"} device.}
\item{\code{"interp(olate)*"} \var{as} \code{interpolate}}{Logical. Passed as argument \code{interpolate} to function \code{\link[graphics:rasterImage]{rasterImage}}.}
\item{\code{"(alpha|transp(aren(cy)*)*)"} \var{as} \code{alpha}}{Numeric or character. Level of transparency. If numeric, the either \verb{0 <= alpha <= 1} or \code{0 <= alpha <= 255}. If character, then one byte of hexadecimal value \verb{"00" <= alpha <= "FF"}. If \code{NA}, then transparency is used from colortable, else transparency of colortable is overwritten by \code{alpha}. Default is \code{NA}.}
\item{\code{"verb(ose)*"} \var{as} \code{verbose}}{Logical. Value \code{TRUE} may provide some additional information on console. Default is \code{FALSE}.}
}
%%~ \tabular{lll}{
%%~ \emph{Pattern} (\code{panel_raster}) \tab \emph{Argument} (\code{.panel_raster}) \tab \emph{Description}
%%~ \cr \code{(^$|obj)} \tab \code{obj} \tab \emph{See below}.
%%~ \cr \code{useRaster} \tab \code{useRaster} \tab \emph{See below}.
%%~ \cr \code{interp(olate)*} \tab \code{interpolate} \tab \emph{See below}.
%%~ \cr \code{(alpha|transp(aren(cy)*)*)} \tab \code{alpha} \tab \emph{See below}.
%%~ \cr \code{verb(ose)*} \tab \code{verbose} \tab \emph{See below}.
%%~ \cr \code{\dots} \tab \tab Passed to \code{\link[ursa:colorize]{colorize}}.
%%~ }
}
%%~ \item{obj}{Object of class \code{ursaRaster}. Raster band for plotting. Multiple bands are allowed if then can be interpreted as RGB or RGBA.}
%%~ \item{useRaster}{Logical. If \code{TRUE} then a bitmap raster is used to plot the image instead of polygons. See argument \code{useRaster} in function \code{\link[graphics]{image}}. Default depends on PNG device (\code{getOption("ursaPngDevice")}, which is set up in \code{\link[ursa]{compose_open}}); it is \code{TRUE} for \code{"cairo"} device, and \code{FALSE} for \code{"windows"} device.}
%%~ \item{interpolate}{Logical. Passed as argument \code{interpolate} to function \code{\link[graphics:rasterImage]{rasterImage}}.}
%%~ \item{alpha}{Numeric or character. Level of transparency. If numeric, the either \verb{0 <= alpha <= 1} or \code{0 <= alpha <= 255}. If character, then one byte of hexadecimal value \verb{"00" <= alpha <= "FF"}. If \code{NA}, then transparency is used from colortable, else transparency of colortable is overwritten by \code{alpha}. Default is \code{NA}.}
%%~ \item{verbose}{Logical. Value \code{TRUE} may provide some additional information on console. Default is \code{FALSE}.}
}
\details{
If \code{obj} is list of raster images, then \code{panel_raster} is applied to each item of list, and colortable of last item is returned.
If \code{obj} has 3 or 4 bands then \code{obj} is intepreted as RGB(A) image.
Function attempts to speed up plotting by reduce image matrix for big rasters.
}
\value{
If argument \code{obj} has strictly one band, then function returns \link[ursa:classColorTable]{color table} - object of class \code{ursaColorTable}, which can be used as an input argument for the colorbar legend (function \code{\link[ursa:legend_colorbar]{legend_colorbar}}). Otherwise function returns \code{NULL} value.
}
%%~ \references{
%%~ %% ~put references to the literature/web site here ~
%%~ }
\author{
Nikita Platonov \email{platonov@sevin.ru}
}
%%~ \note{
%%~ %% ~~further notes~~
%%~ }
%% ~Make other sections like Warning with \section{Warning }{....} ~
%%~ \seealso{
%%~ %% ~~objects to See Also as \code{\link{help}}, ~~~
%%~ }
\examples{
session_grid(NULL)
# example no.1 -- direct use
session_grid(regrid(mul=1/32))
dima <- with(session_grid(),c(columns,rows,3))
a <- ursa_new(value=array(runif(prod(dima),min=127,max=255),dim=dima))
p <- colorize(a,pal=c("black","white"),ramp=TRUE,value=0:256)
compose_open(layout=c(2,3),skip=4,legend=list(list("top","full"),list("bottom",2:3)))
for (i in seq(6)) {
panel_new()
if (i<4)
panel_raster(p[i])
else
panel_raster(a,interpolate=i==5)
panel_decor(col="black")
panel_annotation(c("red","green","blue"
,"interpolate=FALSE","interpolate=TRUE"))
}
legend_colorbar(p,label=seq(0,256,by=16),units="channels")
legend_mtext("color composite")
compose_close()
# example no.2 -- indirect use
display(pixelsize(NULL),raster.verb=TRUE)
# example no.3 -- color table for legend
session_grid(NULL)
compose_open()
panel_new()
ct <- panel_raster(pixelsize(),palname="Greens")
panel_decor()
compose_legend(ct)
compose_close()
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{aplot}
|
# Descriptive statistics
library(combinat)
library(gtools)
# permuation and combinations ----
factorial(5)
# e.g. 7C3
choose(7,3)
combn(7,3)
# permutations
permutations(3,2)
nrow(permutations(3,2))
# descriptive statistics ----
x <- c(-2,-1,0,1,2)
px <- c(1/8,2/8,2/8,2/8,1/8)
meanp <- sum(x*px)
varp <- sum((x-meanp)^2*px)
meanp
varp
x <- c(2,14,2,6,43,2)
mean(x)
sd(x)
summary(x)
range(x)
median(x)
mode(x)
quantile(x)
# distribution ----
# normal - find probability
pnorm(60, mean=48, sd=8, lower.tail=TRUE)
# binom - find probability
pbinom(c(5,4), size = 14, prob = 0.13, lower.tail=TRUE)
# binom - density curve
dbinom(0:10, size = 10, prob = 0.5)
# binom - find smallest number of x s.t. P(X <= x) > 0.95
qbinom(0.95, size = 10, 0.5)
# poiss - find
# rate is always per time
ppois(5, 2)
# CLT ---------
# for sample mean
sample_mean <- 44
n <- 64
pop_sd <- 16
sample_sd <- pop_sd/sqrt(n)
# P(X < 85)
pnorm(85, sample_mean, sample_sd)
# for proportion
p <- 0.6
n <- 150
sample_mean <- p #proportion
sample_sd <- sqrt((p*(1-p))/n)
pnorm(0.52, sample_mean, sample_sd)
| /Probability.R | no_license | yenngee/AppliedStatisticsInR | R | false | false | 1,099 | r | # Descriptive statistics
library(combinat)
library(gtools)
# permuation and combinations ----
factorial(5)
# e.g. 7C3
choose(7,3)
combn(7,3)
# permutations
permutations(3,2)
nrow(permutations(3,2))
# descriptive statistics ----
x <- c(-2,-1,0,1,2)
px <- c(1/8,2/8,2/8,2/8,1/8)
meanp <- sum(x*px)
varp <- sum((x-meanp)^2*px)
meanp
varp
x <- c(2,14,2,6,43,2)
mean(x)
sd(x)
summary(x)
range(x)
median(x)
mode(x)
quantile(x)
# distribution ----
# normal - find probability
pnorm(60, mean=48, sd=8, lower.tail=TRUE)
# binom - find probability
pbinom(c(5,4), size = 14, prob = 0.13, lower.tail=TRUE)
# binom - density curve
dbinom(0:10, size = 10, prob = 0.5)
# binom - find smallest number of x s.t. P(X <= x) > 0.95
qbinom(0.95, size = 10, 0.5)
# poiss - find
# rate is always per time
ppois(5, 2)
# CLT ---------
# for sample mean
sample_mean <- 44
n <- 64
pop_sd <- 16
sample_sd <- pop_sd/sqrt(n)
# P(X < 85)
pnorm(85, sample_mean, sample_sd)
# for proportion
p <- 0.6
n <- 150
sample_mean <- p #proportion
sample_sd <- sqrt((p*(1-p))/n)
pnorm(0.52, sample_mean, sample_sd)
|
### Projekt ###
yearLsoa <- read.table("year_lsoa_grocery.csv", header = TRUE, sep =",")
yearLsoa
lsoaArea <- yearLsoa$area_sq_km
lsoaPopulation <- yearLsoa$population
yearMsoa <- read.table("year_msoa_grocery.csv", header = TRUE, sep =",")
yearMsoa
msoaArea <- yearMsoa$area_sq_km
msoaPopulation <- yearMsoa$population
yearOsward <- read.table("year_osward_grocery.csv", header = TRUE, sep =",")
yearOsward
oswardArea <- yearOsward$area_sq_km
oswardPopulation <- yearOsward$population
yearBorough <- read.table("year_borough_grocery.csv", header = TRUE, sep = ",")
yearBorough
boroughArea <- yearBorough$area_sq_km
boroughPopulation <- yearBorough$population
areas <- list(lsoa = lsoaArea, msoa = msoaArea, ward = oswardArea, borough = boroughArea)
population <- list(lsoa = lsoaPopulation, msoa = msoaPopulation, ward = oswardPopulation, borough = boroughPopulation)
lapply(areas, sum) ## alle gleich bis auf lsoaArea
lapply(population, sum) ## wieder alle gleich bis auf lsoaPopulation
# relative Abstände
relDif <- function(var){
x <- numeric()
for (i in seq_along(var)){
x <- append(x, round((max(var)-var[i])/max(var)*100,2))
}
return(x)
}
relD <- lapply(areas, relDif)
str(relD)
relD <- lapply(population, relDif)
str(relD)
### Dataframe ###
noAreas <- lapply(areas, length)
tab1Dataframe <- data.frame(
Area = c("LSOA","MSOA","Ward","Borough"),
NumberOfAreas = unlist(noAreas),
AvgSurfaces = round(unlist(Map("/", lapply(areas, sum), noAreas)),2),
AvgPopulation = round(unlist(Map("/", lapply(population, sum), noAreas)),0),
MedianPopulation = unlist(lapply(population, median)),
MedianArea = unlist(lapply(areas, median)),
stringsAsFactors = FALSE
)
rownames(tab1Dataframe) <- NULL
tab1Dataframe
## Visualisieren
par(mfrow=c(1,2))
hist(lsoaPopulation, main = "Lsoa Bevoelkerung", xlab = "Bevoelkerung", ylab = "Dichte",breaks= seq(min(lsoaPopulation),max(lsoaPopulation)+100, 100), col =3, freq = FALSE, xlim = c(min(lsoaPopulation),quantile(lsoaPopulation,probs = 0.99)))
hist(lsoaArea, main = "Lsoa Flaeche",xlab = "Flaeche", ylab = "Dichte", breaks= seq(min(lsoaArea),max(lsoaArea)+0.5, 0.1), col =4, freq = FALSE, xlim = c(min(lsoaArea),quantile(lsoaArea,probs = 0.99)))
#2
install.packages("sf")
library(sf)
boundariesLondon <- st_read("./statistical-gis-boundaries-london/statistical-gis-boundaries-london/ESRI/LSOA_2011_London_gen_MHW.shp")
class(boundariesLondon)
dim(boundariesLondon)
colnames(boundariesLondon)
mergeboundaries <- merge(boundariesLondon, yearLsoa, by.x = "LSOA11CD", by.y = "area_id")
class(mergeboundaries)
dim(mergeboundaries) # zwei Reihen weniger, da diese in "yearLsoa" nicht vorhanden sind (weiß nicht, wie es gehört)
colnames(mergeboundaries)
#install.packages("colorspace")
library("colorspace")
s
mergeboundaries$perresident <- mergeboundaries$num_transactions/mergeboundaries$population
par(mfrow=c(1,2))
plot(mergeboundaries["num_transactions"], logz = TRUE, main = "Number of Transactions", pal= function(...) colorspace::sequential_hcl(...,palette = "Blue-Yellow", rev = TRUE))
plot(mergeboundaries["perresident"], logz = TRUE, main = "Transactions per resident", pal= function(...) colorspace::sequential_hcl(...,palette = "Blue-Yellow",rev = TRUE))
#3
sequ <- seq(0, 1, by = 0.01)
dat <- list(lsoa = yearLsoa, msoa = yearMsoa, ward = yearOsward)
repr <- function(var){
x <- numeric()
for(i in sequ){
x <- append(x, nrow(var[var$representativeness_norm > i,])/nrow(var))
}
return(list(seq = sequ, perc = x))
}
normRepr <- lapply(dat, repr)
par(mfrow=c(1,1))
plot(as.data.frame(normRepr$lsoa), main = "Representativitaet", xlab = "Threshold")
lines(as.data.frame(normRepr$msoa), lty = 1)
lines(as.data.frame(normRepr$ward), lty = 2)
legend("topright", c("lsoa", "msoa", "ward"), lty = c(NA,1,2), pch = c(1,NA,NA))
#4
energyTot <- yearMsoa$energy_tot
energyFatTot <- yearMsoa$energy_fat / energyTot
energyFibreTot <- yearMsoa$energy_fibre / energyTot
energySatTot <- yearMsoa$energy_saturate / energyTot
energyCarbTot <- yearMsoa$energy_carb / energyTot
energySugarTot <- yearMsoa$energy_sugar / energyTot
energyProteinTot <- yearMsoa$energy_protein / energyTot
par(mfrow=c(2,3))
h <- hist(energyCarbTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Kohlenhydraten",xlim = c(0,0.6), ylim = c(0,0.6), col = "red", freq = F)
h <- hist(energyFatTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Fetten",xlim = c(0,0.6), ylim = c(0,0.6), col = "purple", freq = F)
h <- hist(energySatTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Saturated",xlim = c(0,0.6), ylim = c(0,0.6), col = "pink", freq = F)
h <- hist(energySugarTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Zucker",xlim = c(0,0.6), ylim = c(0,0.6), col = "orange", freq = F)
h <- hist(energyProteinTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Eiweiß",xlim = c(0,0.6), ylim = c(0,0.6), col = "green", freq = F)
h <- hist(energyFibreTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Ballaststoffe",xlim = c(0,0.6), ylim = c(0,0.6), col = "lightgreen", freq = F)
#hist(energyFatTot,main = NULL, xlab = "Energie von Fetten", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "purple", ylim = c(0,300), plot = F)
#hist(energySatTot, main = NULL, xlab = "Energie von Saturated", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "pink", ylim = c(0,300))
#hist(energySugarTot, main = NULL, xlab = "Energie von Zucker", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "orange", ylim = c(0,300))
#hist(energyProteinTot, main = NULL, xlab = "Energie von Eiweiß", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "green", ylim = c(0,300))
#hist(energyFibreTot,main = NULL, xlab = "Energie von Ballaststoffe", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "lightgreen", ylim = c(0,300))
#5
diabetes <- read.table("diabetes_estimates_osward_2016.csv", header = TRUE, sep = ",")
head(diabetes)
# Einlesen der Shapefiles
londonWardX <- st_read("./statistical-gis-boundaries-london/statistical-gis-boundaries-london/ESRI/London_Ward_CityMerged.shp")
londonWardY <- st_read("./London-wards-2014/London-wards-2014 (1)/London-wards-2014_ESRI/London_Ward_CityMerged.shp")
compare_data <- function(x, y, id.x, id.y = id.x, var.x = character(0), var.y = var.x, FUN = sum, ...) {
# Zwei Listen müssen übergeben werden, ansonsten nicht genügend Daten
if(!is.list(x) | !is.list(y))
stop("Bitte überprüfen Sie Ihre Eingabe für x und y!")
# ID muss als String übergeben werden (zumindest x, y nicht unbedingt. Wenn y übergeben wird, muss es auch ein String sein)
if(!is.character(id.x) | ((id.x != id.y) & !is.character(id.y)))
stop("es muss eine korrekte ID angegeben werden!")
# Variable muss als String übergeben werden (zumindest x, y nicht unbedingt. Wenn y übergeben wird, muss es auch ein String sein)
if(!is.character(var.x) | ((var.x != var.y) & !is.character(var.y)))
stop("es muss eine korrekte Variable angegeben werden!")
#spezielle Behandlung bei merge von 2 sf Objekten
if(class(x)[1] == "sf" & class(y)[1] == "sf"){
# verknüpft werden die Objekte, indem sie zuerst in einen Dataframe umgewandelt werden und mit der jeweiligen ID verbunden werden. Dabei wird
# sozusagen die Geometry-Variable deaktiviert
mergedXandY <- merge(x %>% as.data.frame(), y %>% as.data.frame(), by.x = id.x, by.y = id.y)
# Danach wird die Geometry-Variable wieder "aktiviert" und wieder in ein Objekt der Klasse sf umgewandelt
mergedXandY %>% st_sf(sf_column_name = 'geometry.x')
mergedX <- merge(x %>% as.data.frame(), y %>% as.data.frame(), by.x = id.x, by.y = id.y, all.x = TRUE)
mergedX %>% st_sf(sf_column_name = 'geometry.x')
mergedY <- merge(x %>% as.data.frame(), y %>% as.data.frame(), by.x = id.x, by.y = id.y, all.y = TRUE)
mergedY %>% st_sf(sf_column_name = 'geometry.y')
# Funktion wird auf die einzelnen Gruppierungen angewendet
sumXandY <- sumXandY1 <- (FUN(get(paste(var.x, ".x",sep = ""), mergedXandY), ...))
sumXandY2 <- (FUN(get(paste(var.y, ".x",sep = ""), mergedXandY), ...))
sumX <- (FUN(get(paste(var.x,".x",sep = ""), mergedX), ...))
sumY <- (FUN(get(paste(var.y,".y",sep = ""), mergedY), ...))
}
else{
# Verknüpfung der Objekte nach den jeweiligen IDs
mergedXandY <- merge(x, y, by.x = id.x, by.y = id.y)
mergedX <- merge(x, y, by.x = id.x, by.y = id.y, all.x = TRUE)
mergedY <- merge(x, y, by.x = id.x, by.y = id.y, all.y = TRUE)
sumXandY <- sumXandY1 <- (FUN(get(var.x, mergedXandY), ...))
sumXandY2 <- (FUN(get(var.y, mergedXandY), ...))
sumX <- (FUN(get(var.x, mergedX), ...))
sumY <- (FUN(get(var.y, mergedY),...))
}
# wenn die Ergebnisse der angewendeten Funktion aus dem ersten für die erste und zweite Variable nicht übereinstimmen, wird für das Ergebnis der gemeinsamen Daten
# von X und Y NA als Ergebnis verwendet
if(sumXandY1 != sumXandY2)
sumXandY <- NA
df <- data.frame(mergeType = c("X und Y", "nur X", "nur Y"),
anzahl = c(nrow(mergedXandY),nrow(mergedX)-nrow(mergedXandY), nrow(mergedY)-nrow(mergedXandY)),
ergebnis = c(round(sumXandY,0), if(var.x %in% colnames(x)) round(sumX-sumXandY1,0) else NA,
if(var.y %in% colnames(y)) round(sumY-sumXandY2,0) else NA))
return(df)
}
compare_data(londonWardX, londonWardY, "GSS_CODE", "GSS_CODE", "HECTARES", na.rm = TRUE)
compare_data(diabetes, londonWardY, "area_id", "GSS_CODE", "HECTARES", na.rm= TRUE)
compare_data(diabetes, yearOsward, "area_id", "area_id", "area_sq_km", na.rm = TRUE)
compare_data(diabetes, yearOsward, "area_id", "area_id", "gp_patients", "population", na.rm = TRUE)
sum(diabetes$gp_patients)
sum(yearOsward$population)
#6
merged <- merge(yearOsward, diabetes, by= "area_id")
# zusammenhang spearman
cor.test(merged$estimated_diabetes_prevalence, merged$energy_tot, method = "spearman")
par(mfrow=c(1,1))
plot(merged$estimated_diabetes_prevalence, merged$energy_tot, main = "Streudiagramm", xlab = "geschätzte Diabetes-Prävalenz", ylab = "Energie der Nährstoffe")
| /ProjectAufgabe1.R | no_license | laurenzhinterholzer22/ProjectForProgrammierenMitR | R | false | false | 10,576 | r | ### Projekt ###
yearLsoa <- read.table("year_lsoa_grocery.csv", header = TRUE, sep =",")
yearLsoa
lsoaArea <- yearLsoa$area_sq_km
lsoaPopulation <- yearLsoa$population
yearMsoa <- read.table("year_msoa_grocery.csv", header = TRUE, sep =",")
yearMsoa
msoaArea <- yearMsoa$area_sq_km
msoaPopulation <- yearMsoa$population
yearOsward <- read.table("year_osward_grocery.csv", header = TRUE, sep =",")
yearOsward
oswardArea <- yearOsward$area_sq_km
oswardPopulation <- yearOsward$population
yearBorough <- read.table("year_borough_grocery.csv", header = TRUE, sep = ",")
yearBorough
boroughArea <- yearBorough$area_sq_km
boroughPopulation <- yearBorough$population
areas <- list(lsoa = lsoaArea, msoa = msoaArea, ward = oswardArea, borough = boroughArea)
population <- list(lsoa = lsoaPopulation, msoa = msoaPopulation, ward = oswardPopulation, borough = boroughPopulation)
lapply(areas, sum) ## alle gleich bis auf lsoaArea
lapply(population, sum) ## wieder alle gleich bis auf lsoaPopulation
# relative Abstände
relDif <- function(var){
x <- numeric()
for (i in seq_along(var)){
x <- append(x, round((max(var)-var[i])/max(var)*100,2))
}
return(x)
}
relD <- lapply(areas, relDif)
str(relD)
relD <- lapply(population, relDif)
str(relD)
### Dataframe ###
noAreas <- lapply(areas, length)
tab1Dataframe <- data.frame(
Area = c("LSOA","MSOA","Ward","Borough"),
NumberOfAreas = unlist(noAreas),
AvgSurfaces = round(unlist(Map("/", lapply(areas, sum), noAreas)),2),
AvgPopulation = round(unlist(Map("/", lapply(population, sum), noAreas)),0),
MedianPopulation = unlist(lapply(population, median)),
MedianArea = unlist(lapply(areas, median)),
stringsAsFactors = FALSE
)
rownames(tab1Dataframe) <- NULL
tab1Dataframe
## Visualisieren
par(mfrow=c(1,2))
hist(lsoaPopulation, main = "Lsoa Bevoelkerung", xlab = "Bevoelkerung", ylab = "Dichte",breaks= seq(min(lsoaPopulation),max(lsoaPopulation)+100, 100), col =3, freq = FALSE, xlim = c(min(lsoaPopulation),quantile(lsoaPopulation,probs = 0.99)))
hist(lsoaArea, main = "Lsoa Flaeche",xlab = "Flaeche", ylab = "Dichte", breaks= seq(min(lsoaArea),max(lsoaArea)+0.5, 0.1), col =4, freq = FALSE, xlim = c(min(lsoaArea),quantile(lsoaArea,probs = 0.99)))
#2
install.packages("sf")
library(sf)
boundariesLondon <- st_read("./statistical-gis-boundaries-london/statistical-gis-boundaries-london/ESRI/LSOA_2011_London_gen_MHW.shp")
class(boundariesLondon)
dim(boundariesLondon)
colnames(boundariesLondon)
mergeboundaries <- merge(boundariesLondon, yearLsoa, by.x = "LSOA11CD", by.y = "area_id")
class(mergeboundaries)
dim(mergeboundaries) # zwei Reihen weniger, da diese in "yearLsoa" nicht vorhanden sind (weiß nicht, wie es gehört)
colnames(mergeboundaries)
#install.packages("colorspace")
library("colorspace")
s
mergeboundaries$perresident <- mergeboundaries$num_transactions/mergeboundaries$population
par(mfrow=c(1,2))
plot(mergeboundaries["num_transactions"], logz = TRUE, main = "Number of Transactions", pal= function(...) colorspace::sequential_hcl(...,palette = "Blue-Yellow", rev = TRUE))
plot(mergeboundaries["perresident"], logz = TRUE, main = "Transactions per resident", pal= function(...) colorspace::sequential_hcl(...,palette = "Blue-Yellow",rev = TRUE))
#3
sequ <- seq(0, 1, by = 0.01)
dat <- list(lsoa = yearLsoa, msoa = yearMsoa, ward = yearOsward)
repr <- function(var){
x <- numeric()
for(i in sequ){
x <- append(x, nrow(var[var$representativeness_norm > i,])/nrow(var))
}
return(list(seq = sequ, perc = x))
}
normRepr <- lapply(dat, repr)
par(mfrow=c(1,1))
plot(as.data.frame(normRepr$lsoa), main = "Representativitaet", xlab = "Threshold")
lines(as.data.frame(normRepr$msoa), lty = 1)
lines(as.data.frame(normRepr$ward), lty = 2)
legend("topright", c("lsoa", "msoa", "ward"), lty = c(NA,1,2), pch = c(1,NA,NA))
#4
energyTot <- yearMsoa$energy_tot
energyFatTot <- yearMsoa$energy_fat / energyTot
energyFibreTot <- yearMsoa$energy_fibre / energyTot
energySatTot <- yearMsoa$energy_saturate / energyTot
energyCarbTot <- yearMsoa$energy_carb / energyTot
energySugarTot <- yearMsoa$energy_sugar / energyTot
energyProteinTot <- yearMsoa$energy_protein / energyTot
par(mfrow=c(2,3))
h <- hist(energyCarbTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Kohlenhydraten",xlim = c(0,0.6), ylim = c(0,0.6), col = "red", freq = F)
h <- hist(energyFatTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Fetten",xlim = c(0,0.6), ylim = c(0,0.6), col = "purple", freq = F)
h <- hist(energySatTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Saturated",xlim = c(0,0.6), ylim = c(0,0.6), col = "pink", freq = F)
h <- hist(energySugarTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Zucker",xlim = c(0,0.6), ylim = c(0,0.6), col = "orange", freq = F)
h <- hist(energyProteinTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Eiweiß",xlim = c(0,0.6), ylim = c(0,0.6), col = "green", freq = F)
h <- hist(energyFibreTot, breaks=seq(0,0.6,length=75), plot = F)
h$density <- h$counts/sum(h$counts)
plot(h, main = NULL, xlab = "Energie von Ballaststoffe",xlim = c(0,0.6), ylim = c(0,0.6), col = "lightgreen", freq = F)
#hist(energyFatTot,main = NULL, xlab = "Energie von Fetten", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "purple", ylim = c(0,300), plot = F)
#hist(energySatTot, main = NULL, xlab = "Energie von Saturated", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "pink", ylim = c(0,300))
#hist(energySugarTot, main = NULL, xlab = "Energie von Zucker", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "orange", ylim = c(0,300))
#hist(energyProteinTot, main = NULL, xlab = "Energie von Eiweiß", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "green", ylim = c(0,300))
#hist(energyFibreTot,main = NULL, xlab = "Energie von Ballaststoffe", ylab = "Haeufigkeit",breaks=seq(0,0.6,length=100), col = "lightgreen", ylim = c(0,300))
#5
diabetes <- read.table("diabetes_estimates_osward_2016.csv", header = TRUE, sep = ",")
head(diabetes)
# Einlesen der Shapefiles
londonWardX <- st_read("./statistical-gis-boundaries-london/statistical-gis-boundaries-london/ESRI/London_Ward_CityMerged.shp")
londonWardY <- st_read("./London-wards-2014/London-wards-2014 (1)/London-wards-2014_ESRI/London_Ward_CityMerged.shp")
compare_data <- function(x, y, id.x, id.y = id.x, var.x = character(0), var.y = var.x, FUN = sum, ...) {
# Zwei Listen müssen übergeben werden, ansonsten nicht genügend Daten
if(!is.list(x) | !is.list(y))
stop("Bitte überprüfen Sie Ihre Eingabe für x und y!")
# ID muss als String übergeben werden (zumindest x, y nicht unbedingt. Wenn y übergeben wird, muss es auch ein String sein)
if(!is.character(id.x) | ((id.x != id.y) & !is.character(id.y)))
stop("es muss eine korrekte ID angegeben werden!")
# Variable muss als String übergeben werden (zumindest x, y nicht unbedingt. Wenn y übergeben wird, muss es auch ein String sein)
if(!is.character(var.x) | ((var.x != var.y) & !is.character(var.y)))
stop("es muss eine korrekte Variable angegeben werden!")
#spezielle Behandlung bei merge von 2 sf Objekten
if(class(x)[1] == "sf" & class(y)[1] == "sf"){
# verknüpft werden die Objekte, indem sie zuerst in einen Dataframe umgewandelt werden und mit der jeweiligen ID verbunden werden. Dabei wird
# sozusagen die Geometry-Variable deaktiviert
mergedXandY <- merge(x %>% as.data.frame(), y %>% as.data.frame(), by.x = id.x, by.y = id.y)
# Danach wird die Geometry-Variable wieder "aktiviert" und wieder in ein Objekt der Klasse sf umgewandelt
mergedXandY %>% st_sf(sf_column_name = 'geometry.x')
mergedX <- merge(x %>% as.data.frame(), y %>% as.data.frame(), by.x = id.x, by.y = id.y, all.x = TRUE)
mergedX %>% st_sf(sf_column_name = 'geometry.x')
mergedY <- merge(x %>% as.data.frame(), y %>% as.data.frame(), by.x = id.x, by.y = id.y, all.y = TRUE)
mergedY %>% st_sf(sf_column_name = 'geometry.y')
# Funktion wird auf die einzelnen Gruppierungen angewendet
sumXandY <- sumXandY1 <- (FUN(get(paste(var.x, ".x",sep = ""), mergedXandY), ...))
sumXandY2 <- (FUN(get(paste(var.y, ".x",sep = ""), mergedXandY), ...))
sumX <- (FUN(get(paste(var.x,".x",sep = ""), mergedX), ...))
sumY <- (FUN(get(paste(var.y,".y",sep = ""), mergedY), ...))
}
else{
# Verknüpfung der Objekte nach den jeweiligen IDs
mergedXandY <- merge(x, y, by.x = id.x, by.y = id.y)
mergedX <- merge(x, y, by.x = id.x, by.y = id.y, all.x = TRUE)
mergedY <- merge(x, y, by.x = id.x, by.y = id.y, all.y = TRUE)
sumXandY <- sumXandY1 <- (FUN(get(var.x, mergedXandY), ...))
sumXandY2 <- (FUN(get(var.y, mergedXandY), ...))
sumX <- (FUN(get(var.x, mergedX), ...))
sumY <- (FUN(get(var.y, mergedY),...))
}
# wenn die Ergebnisse der angewendeten Funktion aus dem ersten für die erste und zweite Variable nicht übereinstimmen, wird für das Ergebnis der gemeinsamen Daten
# von X und Y NA als Ergebnis verwendet
if(sumXandY1 != sumXandY2)
sumXandY <- NA
df <- data.frame(mergeType = c("X und Y", "nur X", "nur Y"),
anzahl = c(nrow(mergedXandY),nrow(mergedX)-nrow(mergedXandY), nrow(mergedY)-nrow(mergedXandY)),
ergebnis = c(round(sumXandY,0), if(var.x %in% colnames(x)) round(sumX-sumXandY1,0) else NA,
if(var.y %in% colnames(y)) round(sumY-sumXandY2,0) else NA))
return(df)
}
compare_data(londonWardX, londonWardY, "GSS_CODE", "GSS_CODE", "HECTARES", na.rm = TRUE)
compare_data(diabetes, londonWardY, "area_id", "GSS_CODE", "HECTARES", na.rm= TRUE)
compare_data(diabetes, yearOsward, "area_id", "area_id", "area_sq_km", na.rm = TRUE)
compare_data(diabetes, yearOsward, "area_id", "area_id", "gp_patients", "population", na.rm = TRUE)
sum(diabetes$gp_patients)
sum(yearOsward$population)
#6
merged <- merge(yearOsward, diabetes, by= "area_id")
# zusammenhang spearman
cor.test(merged$estimated_diabetes_prevalence, merged$energy_tot, method = "spearman")
par(mfrow=c(1,1))
plot(merged$estimated_diabetes_prevalence, merged$energy_tot, main = "Streudiagramm", xlab = "geschätzte Diabetes-Prävalenz", ylab = "Energie der Nährstoffe")
|
#' Null multivariate linear regression model fitting
#'
#' All outcomes are regressed on the same set of covariates.
#' @param Ys matrix of outcomes (samples in rows and trait in columns)
#' @param Xs matrix of common covariates for all traits. Default to NULL for no covariates.
#' @return
#' \describe{
#' \item{res}{ residual matrix (sample in rows) }
#' \item{Ux}{ eigen vectors of projection matrix }
#' \item{n,m,p}{ sample size and outcome/covariate dimensions }
#' }
#' @export
## null model fitting based on SVD
MLM.null <- function(Ys,Xs=NULL){
n = dim(Ys)[1]; m = dim(Ys)[2]
if(is.null(Xs)){
p = 0
Ux = matrix(1/sqrt(n),n,1)
} else{
p = dim(Xs)[2]
X = cbind(1,Xs)
## SVD
Ux = svd(X,nv=0)$u
}
res = Ys - Ux%*%(t(Ux)%*%Ys)
return(list(res=res,Ux=Ux,n=n,m=m,p=p))
}
#' Multiple quantitative trait association test with common covariates
#'
#' Extremely efficient computation of genome-wide association test of multiple quantitative traits.
#' @param obj fitted null model from MLM.null
#' @param G genotype vector
#' @return
#' \describe{
#' \item{p.value}{ three association p-values: an ombinus m-DF Wald test; two 1-DF Wald tests assuming common effect or common scaled effect (see ref) }
#' \item{coef}{ estimated variant regression coefficients for all traits}
#' }
#'
#' @export
#' @references
#' Wu,B. and Pankow,J.S. (2018) Fast and accurate genome-wide association test of multiple quantitative traits. \emph{Computational and mathematical methods in medicine}, in press.
MQTAc <- function(obj,G){
n = obj$n; m=obj$m; p=obj$p
pval = rep(NA,3)
Gh = as.vector( obj$Ux%*%colSums(obj$Ux*G) )
Ge = G-Gh; G2 = sum(Ge^2)
if(G2<1e-3) return(list(p.value=pval,coef=NA))
## beta coef
U0 = colSums(obj$res*Ge)
rcf = U0/G2
res = obj$res - outer(Ge,rcf)
R0 = t(res)%*%res/(n-p-2); R0i = solve(R0); sigs = sqrt(diag(R0))
## omnibus test
RU0 = R0i%*%U0
Z = sum(U0*RU0)/G2
Zf = Z*(n-p-m-1)/m/(n-p-2)
pval[1] = pf(Zf,m,n-p-m-1,lower=FALSE)
## 1-DF common scaled effect
Z2 = sum(RU0*sigs)^2/G2/sum(t(R0i*sigs)*sigs)
Zf2 = Z2*(n-p-m-1)/(n-p-2)
pval[2] = pf(Zf2,1,n-p-m-1,lower=FALSE)
## 1-DF common beta
Z1 = sum(RU0)^2/G2/sum(R0i)
Zf1 = Z1*(n-p-m-1)/(n-p-2)
pval[3] = pf(Zf1,1,n-p-m-1,lower=FALSE)
## return(list(p.value=pval,coef=rcf,Sigma=R0))
return(list(p.value=pval,coef=rcf))
}
#' Minimum p-value based multiple quantitative trait association test
#'
#' We efficiently compute minimum test p-values (minP) across multiple quantitative traits;
#' and report two test p-values: 1) Bonferroni corrected minimum p-values (Pbonf) across traits.
#' 2) Analytical significance p-value (Pmin) of minP using asymptotic multivariate normal integration.
#' See refs in the PGmvn() function.
#'
#' Remarks: (1) Be cautious to interpret extreme Pmin: it is very hard to accurately compute extreme p-values.
#' (2) Generally Pmin is close to Pbonf at extreme values under moderate trait correlations.
#' (3) Under extreme trait correlations, Pmin can offer advantages compared to Pbonf.
#' (4) Note that theoretically we have minP \eqn{\le} Pmin \eqn{\le} Pbonf.
#' @param obj fitted null model from MLM.null
#' @param G genotype vector
#' @return
#' \describe{
#' \item{p.value}{ individual trait association test p-values }
#' \item{Pbonf}{ Bonferroni corrected minimum p-value }
#' \item{Pmin}{ Significance p-value of minimum p-value computed analytically based on multivariate normal dist }
#' }
#'
#' @export
#' @references
#' Conneely,K.N. and Boehnke,M. (2007) So many correlated tests, so little time! Rapid adjustment of P values for multiple correlated tests. Am. J. Hum. Genet. 81, 1158–1168.
#'
#' Conneely,K.N. and Boehnke,M. (2010) Meta-analysis of genetic association studies and adjustment for multiple testing of correlated SNPs and traits. Genetic Epidemiology. 34:739-746.
#'
#' Wu,B. (2017) MTAR: an R package for versatile genome-wide association test of multiple traits. tech report.
MTA.minp <- function(obj,G){
n = obj$n; m=obj$m; p=obj$p
Gh = as.vector( obj$Ux%*%colSums(obj$Ux*G) )
Ge = G-Gh; G2 = sum(Ge^2)
if(G2<1e-3) return(list(p.value=rep(NA,m),coef=NA))
## beta coef
U0 = colSums(obj$res*Ge)
rcf = U0/G2
res = obj$res - outer(Ge,rcf)
S2 = colSums(res^2)/(n-p-2)
sigs = sqrt(S2)
Z = rcf/sigs*sqrt(G2)
pval = pt(-abs(Z),df=n-p-2)*2
## uni-test
R0 = t(res)%*%res/(n-p-2)
R0s = t(R0/sigs)/sigs
p0 = min(pval)
pbonf = min(p0*m, 1)
Z0 = -qnorm(p0/2)
psvd = try( {PGmvn(lower=rep(-Z0,m),upper=rep(Z0,m),sigma=R0s,Nsample=1e5,Ncov=m)} )
if(class(psvd)=='try-error'){
it = 0; maxpts=1e5; ## abseps = max(pbonf*0.05,1e-8); ## abseps = min(max(pbonf*0.05, p0/2), 1e-3)
abseps = min(max(pbonf*0.05, p0), 1e-5)
pmvn = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=GenzBretz(maxpts=maxpts,abseps=abseps))
while( (attr(pmvn,'msg')!='Normal Completion')&(it<10) ){
it = it+1; maxpts = maxpts*2
pmvn = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=GenzBretz(maxpts=maxpts,abseps=abseps))
}
pmin = min(max(pmvn, p0), pbonf)
} else{
pmin = min(max(psvd, p0), pbonf)
}
return(list(p.value=pval, pmin=pmin,pbonf=pbonf))
## Genz() is not accurate!!! (quasi-MC) almost not working for extreme p-values!!!
## Miwa() can produce negative values!!! and computationally intensive for large m!!!
## PGmvn() can often lead to much more accurate results.
## return(list(p.value=pval, pmin=pmin,pbonf=pbonf, psvd=psvd))
## it = 0; steps=128
## pmiwa = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=Miwa(steps=steps))
## while( (attr(pmiwa,'msg')!='Normal Completion')&(it<10) ){
## it = it+1; steps = steps*2
## pmiwa = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=Miwa(steps=steps))
## }
## return(list(p.value=pval, pmin=pmin,pbonf=pbonf, pmiwa=pmiwa))
## 1-psvdnorm(lower=rep(-Z0,m),upper=rep(Z0,m),Sigma=R0s,Nsample=1e5,Ncov=m)
}
#' Compute the tail probability of the m-dim multivariate normal distribution
#'
#' Internal function. Not to be called directly.
#' @param lower the vector of lower limits of length m
#' @param upper the vector of upper limits of length m
#' @param mean the mean vector of length m
#' @param sigma the covariance matrix of dimension m
#' @param Nsample the number of Monte Carlo samples
#' @param Ncov the number of control variates to be used (<=m).
#' @return multivariate normal distribution probability of outside the specified box region.
##' @export
#' @references
#' Phinikettos,I. and Gandy,A. (2011) Fast computation of high-dimensional multivariate normal probabilities. Computational Statistics & Data Analysis. 55, 1521–1529.
#'
#' Genz, A., Bretz, F., Miwa, T., Mi, X., Leisch, F., Scheipl, F., Bornkamp, B., Maechler, M., Hothorn, T. (2015) mvtnorm: Multivariate Normal and t Distributions. R package version 1.0-3. \url{https://cran.r-project.org/web/packages/mvtnorm/index.html}
PGmvn <- function(lower=-Inf, upper=Inf, mean=NULL, sigma, Nsample=1e4,Ncov=1){
m = dim(sigma)[1]
if(is.null(mean)){
mu = rep(0,m)
} else{
mu = mean
}
if(length(lower)<=1) lower = rep(lower,m)
if(length(upper)<=1) upper = rep(upper,m)
lz = lower-mu; uz = upper-mu
1-psvdnorm(lz,uz,Sigma=sigma,Nsample=Nsample,Ncov=Ncov)
}
| /R/mlm.R | no_license | baolinwu/MTAR | R | false | false | 7,397 | r | #' Null multivariate linear regression model fitting
#'
#' All outcomes are regressed on the same set of covariates.
#' @param Ys matrix of outcomes (samples in rows and trait in columns)
#' @param Xs matrix of common covariates for all traits. Default to NULL for no covariates.
#' @return
#' \describe{
#' \item{res}{ residual matrix (sample in rows) }
#' \item{Ux}{ eigen vectors of projection matrix }
#' \item{n,m,p}{ sample size and outcome/covariate dimensions }
#' }
#' @export
## null model fitting based on SVD
MLM.null <- function(Ys,Xs=NULL){
n = dim(Ys)[1]; m = dim(Ys)[2]
if(is.null(Xs)){
p = 0
Ux = matrix(1/sqrt(n),n,1)
} else{
p = dim(Xs)[2]
X = cbind(1,Xs)
## SVD
Ux = svd(X,nv=0)$u
}
res = Ys - Ux%*%(t(Ux)%*%Ys)
return(list(res=res,Ux=Ux,n=n,m=m,p=p))
}
#' Multiple quantitative trait association test with common covariates
#'
#' Extremely efficient computation of genome-wide association test of multiple quantitative traits.
#' @param obj fitted null model from MLM.null
#' @param G genotype vector
#' @return
#' \describe{
#' \item{p.value}{ three association p-values: an ombinus m-DF Wald test; two 1-DF Wald tests assuming common effect or common scaled effect (see ref) }
#' \item{coef}{ estimated variant regression coefficients for all traits}
#' }
#'
#' @export
#' @references
#' Wu,B. and Pankow,J.S. (2018) Fast and accurate genome-wide association test of multiple quantitative traits. \emph{Computational and mathematical methods in medicine}, in press.
MQTAc <- function(obj,G){
n = obj$n; m=obj$m; p=obj$p
pval = rep(NA,3)
Gh = as.vector( obj$Ux%*%colSums(obj$Ux*G) )
Ge = G-Gh; G2 = sum(Ge^2)
if(G2<1e-3) return(list(p.value=pval,coef=NA))
## beta coef
U0 = colSums(obj$res*Ge)
rcf = U0/G2
res = obj$res - outer(Ge,rcf)
R0 = t(res)%*%res/(n-p-2); R0i = solve(R0); sigs = sqrt(diag(R0))
## omnibus test
RU0 = R0i%*%U0
Z = sum(U0*RU0)/G2
Zf = Z*(n-p-m-1)/m/(n-p-2)
pval[1] = pf(Zf,m,n-p-m-1,lower=FALSE)
## 1-DF common scaled effect
Z2 = sum(RU0*sigs)^2/G2/sum(t(R0i*sigs)*sigs)
Zf2 = Z2*(n-p-m-1)/(n-p-2)
pval[2] = pf(Zf2,1,n-p-m-1,lower=FALSE)
## 1-DF common beta
Z1 = sum(RU0)^2/G2/sum(R0i)
Zf1 = Z1*(n-p-m-1)/(n-p-2)
pval[3] = pf(Zf1,1,n-p-m-1,lower=FALSE)
## return(list(p.value=pval,coef=rcf,Sigma=R0))
return(list(p.value=pval,coef=rcf))
}
#' Minimum p-value based multiple quantitative trait association test
#'
#' We efficiently compute minimum test p-values (minP) across multiple quantitative traits;
#' and report two test p-values: 1) Bonferroni corrected minimum p-values (Pbonf) across traits.
#' 2) Analytical significance p-value (Pmin) of minP using asymptotic multivariate normal integration.
#' See refs in the PGmvn() function.
#'
#' Remarks: (1) Be cautious to interpret extreme Pmin: it is very hard to accurately compute extreme p-values.
#' (2) Generally Pmin is close to Pbonf at extreme values under moderate trait correlations.
#' (3) Under extreme trait correlations, Pmin can offer advantages compared to Pbonf.
#' (4) Note that theoretically we have minP \eqn{\le} Pmin \eqn{\le} Pbonf.
#' @param obj fitted null model from MLM.null
#' @param G genotype vector
#' @return
#' \describe{
#' \item{p.value}{ individual trait association test p-values }
#' \item{Pbonf}{ Bonferroni corrected minimum p-value }
#' \item{Pmin}{ Significance p-value of minimum p-value computed analytically based on multivariate normal dist }
#' }
#'
#' @export
#' @references
#' Conneely,K.N. and Boehnke,M. (2007) So many correlated tests, so little time! Rapid adjustment of P values for multiple correlated tests. Am. J. Hum. Genet. 81, 1158–1168.
#'
#' Conneely,K.N. and Boehnke,M. (2010) Meta-analysis of genetic association studies and adjustment for multiple testing of correlated SNPs and traits. Genetic Epidemiology. 34:739-746.
#'
#' Wu,B. (2017) MTAR: an R package for versatile genome-wide association test of multiple traits. tech report.
MTA.minp <- function(obj,G){
n = obj$n; m=obj$m; p=obj$p
Gh = as.vector( obj$Ux%*%colSums(obj$Ux*G) )
Ge = G-Gh; G2 = sum(Ge^2)
if(G2<1e-3) return(list(p.value=rep(NA,m),coef=NA))
## beta coef
U0 = colSums(obj$res*Ge)
rcf = U0/G2
res = obj$res - outer(Ge,rcf)
S2 = colSums(res^2)/(n-p-2)
sigs = sqrt(S2)
Z = rcf/sigs*sqrt(G2)
pval = pt(-abs(Z),df=n-p-2)*2
## uni-test
R0 = t(res)%*%res/(n-p-2)
R0s = t(R0/sigs)/sigs
p0 = min(pval)
pbonf = min(p0*m, 1)
Z0 = -qnorm(p0/2)
psvd = try( {PGmvn(lower=rep(-Z0,m),upper=rep(Z0,m),sigma=R0s,Nsample=1e5,Ncov=m)} )
if(class(psvd)=='try-error'){
it = 0; maxpts=1e5; ## abseps = max(pbonf*0.05,1e-8); ## abseps = min(max(pbonf*0.05, p0/2), 1e-3)
abseps = min(max(pbonf*0.05, p0), 1e-5)
pmvn = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=GenzBretz(maxpts=maxpts,abseps=abseps))
while( (attr(pmvn,'msg')!='Normal Completion')&(it<10) ){
it = it+1; maxpts = maxpts*2
pmvn = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=GenzBretz(maxpts=maxpts,abseps=abseps))
}
pmin = min(max(pmvn, p0), pbonf)
} else{
pmin = min(max(psvd, p0), pbonf)
}
return(list(p.value=pval, pmin=pmin,pbonf=pbonf))
## Genz() is not accurate!!! (quasi-MC) almost not working for extreme p-values!!!
## Miwa() can produce negative values!!! and computationally intensive for large m!!!
## PGmvn() can often lead to much more accurate results.
## return(list(p.value=pval, pmin=pmin,pbonf=pbonf, psvd=psvd))
## it = 0; steps=128
## pmiwa = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=Miwa(steps=steps))
## while( (attr(pmiwa,'msg')!='Normal Completion')&(it<10) ){
## it = it+1; steps = steps*2
## pmiwa = 1-pmvnorm(lower=rep(-Z0,m),upper=rep(Z0,m), sigma=R0s, algorithm=Miwa(steps=steps))
## }
## return(list(p.value=pval, pmin=pmin,pbonf=pbonf, pmiwa=pmiwa))
## 1-psvdnorm(lower=rep(-Z0,m),upper=rep(Z0,m),Sigma=R0s,Nsample=1e5,Ncov=m)
}
#' Compute the tail probability of the m-dim multivariate normal distribution
#'
#' Internal function. Not to be called directly.
#' @param lower the vector of lower limits of length m
#' @param upper the vector of upper limits of length m
#' @param mean the mean vector of length m
#' @param sigma the covariance matrix of dimension m
#' @param Nsample the number of Monte Carlo samples
#' @param Ncov the number of control variates to be used (<=m).
#' @return multivariate normal distribution probability of outside the specified box region.
##' @export
#' @references
#' Phinikettos,I. and Gandy,A. (2011) Fast computation of high-dimensional multivariate normal probabilities. Computational Statistics & Data Analysis. 55, 1521–1529.
#'
#' Genz, A., Bretz, F., Miwa, T., Mi, X., Leisch, F., Scheipl, F., Bornkamp, B., Maechler, M., Hothorn, T. (2015) mvtnorm: Multivariate Normal and t Distributions. R package version 1.0-3. \url{https://cran.r-project.org/web/packages/mvtnorm/index.html}
PGmvn <- function(lower=-Inf, upper=Inf, mean=NULL, sigma, Nsample=1e4,Ncov=1){
m = dim(sigma)[1]
if(is.null(mean)){
mu = rep(0,m)
} else{
mu = mean
}
if(length(lower)<=1) lower = rep(lower,m)
if(length(upper)<=1) upper = rep(upper,m)
lz = lower-mu; uz = upper-mu
1-psvdnorm(lz,uz,Sigma=sigma,Nsample=Nsample,Ncov=Ncov)
}
|
#' @title Confidence Interval of ratios
#' @description Confidence Interval of ratios based on a ChiSquare Distribution
#' @param varratio a variance ratio
#' @param df.1 degree of freedom of denominator
#' @param df.2 degree of freedom of numerator
#' @param pval desired significance level, defaults to 0.1
#' @return lower and upper confidence intervals
#' @author Thomas Laepple
ConfRatio<-function(varratio,df.1,df.2,pval=0.1)
{
return(varratio*qf(c(pval/2,(1-pval/2)),df1=df.1,df2=df.2))
}
| /R/ConfRatio.R | permissive | EarthSystemDiagnostics/paleospec | R | false | false | 501 | r | #' @title Confidence Interval of ratios
#' @description Confidence Interval of ratios based on a ChiSquare Distribution
#' @param varratio a variance ratio
#' @param df.1 degree of freedom of denominator
#' @param df.2 degree of freedom of numerator
#' @param pval desired significance level, defaults to 0.1
#' @return lower and upper confidence intervals
#' @author Thomas Laepple
ConfRatio<-function(varratio,df.1,df.2,pval=0.1)
{
return(varratio*qf(c(pval/2,(1-pval/2)),df1=df.1,df2=df.2))
}
|
# Capek attempt: problems with diacritics, lemmatization, etc.
# lemmatizator: http://lindat.mff.cuni.cz/services/morphodita/
library(tidyverse)
capek <- read_lines("capek_rur_lemma.txt")
library(tidytext)
# text_df <- as.tibble(capek)
text_df <- tibble(line = 1:4483, text = capek)
tidy_capek <- text_df %>%
unnest_tokens(word, text)
tidy_capek %>%
count(word, sort = TRUE)
library(ggplot2)
tidy_capek %>%
count(word, sort = TRUE) %>%
filter(n > 50) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_col() +
xlab(NULL) +
coord_flip()
| /homework_2.R | no_license | MojmirDocekal/GitHubMDRepoPrednaskaProDoktorandy2020 | R | false | false | 580 | r | # Capek attempt: problems with diacritics, lemmatization, etc.
# lemmatizator: http://lindat.mff.cuni.cz/services/morphodita/
library(tidyverse)
capek <- read_lines("capek_rur_lemma.txt")
library(tidytext)
# text_df <- as.tibble(capek)
text_df <- tibble(line = 1:4483, text = capek)
tidy_capek <- text_df %>%
unnest_tokens(word, text)
tidy_capek %>%
count(word, sort = TRUE)
library(ggplot2)
tidy_capek %>%
count(word, sort = TRUE) %>%
filter(n > 50) %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n)) +
geom_col() +
xlab(NULL) +
coord_flip()
|
##########
#Load Database
bd <- read.csv("~/Rall/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
#R Libraries
library(plyr)
library(lubridate)
# Transform class
bd <- mutate(bd, Date = as.Date(dmy(Date)))
#subset 2007-02-01 to 2007-02-02
subbd <- bd[(bd$Date >= as.Date("2007/02/01") & bd$Date <= as.Date("2007/02/02")),]
subbd <- mutate(subbd, DateTime = ymd_hms(paste(subbd$Date,subbd$Time)))
#Second PLOT
plot (subbd$DateTime, subbd$Global_active_power, type = "l", ylab="Global Active Power (Kilowatts)")
#END | /plot2.R | no_license | alfonsogd/RPlots | R | false | false | 548 | r | ##########
#Load Database
bd <- read.csv("~/Rall/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
#R Libraries
library(plyr)
library(lubridate)
# Transform class
bd <- mutate(bd, Date = as.Date(dmy(Date)))
#subset 2007-02-01 to 2007-02-02
subbd <- bd[(bd$Date >= as.Date("2007/02/01") & bd$Date <= as.Date("2007/02/02")),]
subbd <- mutate(subbd, DateTime = ymd_hms(paste(subbd$Date,subbd$Time)))
#Second PLOT
plot (subbd$DateTime, subbd$Global_active_power, type = "l", ylab="Global Active Power (Kilowatts)")
#END |
#
# Copyright 2007-2018 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Program: BivariateCorrelation.R
# Author: Hermine Maes
# Date: 2009.08.01
#
# ModelType: Saturated
# DataType: Continuous
# Field: None
#
# Purpose:
# Optimization Example in OpenMx: Testing significance of correlation
#
# RevisionHistory:
# Hermine Maes -- 2009.10.08 updated & reformatted
# Ross Gore -- 2011.06.15 added Model, Data & Field metadata
# Mike Hunter -- 2013.09.16 nudged starting values of second model varainces away from zero
# Hermine Maes -- 2014.11.02 piecewise specification
# -----------------------------------------------------------------------------
require(OpenMx)
require(MASS)
# Load Library
# -----------------------------------------------------------------------------
set.seed(200)
rs=.5
xy <- mvtnorm::rmvnorm (1000, c(0,0), matrix(c(1,rs,rs,1),2,2))
testData <- xy
testData <- testData[, order(apply(testData, 2, var))[2:1]] #put the data columns in order from largest to smallest variance
# Note: Users do NOT have to re-order their data columns. This is only to make data generation the same on different operating systems: to fix an inconsistency with the mvtnorm::rmvnorm function in the MASS package.
selVars <- c('X','Y')
dimnames(testData) <- list(NULL, selVars)
summary(testData)
cov(testData)
# Simulate Data: two standardized variables X & Y with correlation of .5
# -----------------------------------------------------------------------------
bivCorModel <- mxModel("bivCor",
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values=c(0,0), name="expMean" ),
mxMatrix( type="Lower", nrow=2, ncol=2, free=TRUE, values=.5, name="Chol" ),
mxAlgebra( expression=Chol %*% t(Chol), name="expCov"),
mxData( observed=testData, type="raw" ),
mxExpectationNormal( covariance="expCov", means="expMean", dimnames=selVars),
mxFitFunctionML()
)
# Fit Saturated Model with Raw Data and Matrix-style Input
# -----------------------------------------------------------------------------
bivCorFit <- mxRun(bivCorModel)
EM <- mxEval(expMean, bivCorFit)
EC <- mxEval(expCov, bivCorFit)
LL <- mxEval(fitfunction, bivCorFit)
# Run Model and Generate Output
# -----------------------------------------------------------------------------
bivCorModelSub <-mxModel(bivCorModel,
mxMatrix( type="Diag", nrow=2, ncol=2, free=TRUE,
values=.2, # Note: to test optimizer for robustness to bad starting values, change to 0.
name="Chol" )
)
# Specify SubModel testing Covariance=Zero
# -----------------------------------------------------------------------------
bivCorFitSub <- mxRun(bivCorModelSub)
EMs <- mxEval(expMean, bivCorFitSub)
ECs <- mxEval(expCov, bivCorFitSub)
LLs <- mxEval(fitfunction, bivCorFitSub)
Chi= LLs-LL;
LRT= rbind(LL,LLs,Chi); LRT
# Run Model and Generate Output
# -----------------------------------------------------------------------------
omxCheckCloseEnough(LL, 5407.036, .001)
omxCheckCloseEnough(c(EC), c(1.0656, 0.4752, 0.4752, 0.9292), .001)
omxCheckCloseEnough(c(EM), c(0.058, 0.006), .001)
# Compare OpenMx Results to Mx Results
# LL: likelihood; EC: expected covariance, EM: expected means
# -----------------------------------------------------------------------------
| /SilveR/R-3.5.1/library/OpenMx/demo/BivariateCorrelation.R | permissive | kevinmiles/SilveR | R | false | false | 3,915 | r | #
# Copyright 2007-2018 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Program: BivariateCorrelation.R
# Author: Hermine Maes
# Date: 2009.08.01
#
# ModelType: Saturated
# DataType: Continuous
# Field: None
#
# Purpose:
# Optimization Example in OpenMx: Testing significance of correlation
#
# RevisionHistory:
# Hermine Maes -- 2009.10.08 updated & reformatted
# Ross Gore -- 2011.06.15 added Model, Data & Field metadata
# Mike Hunter -- 2013.09.16 nudged starting values of second model varainces away from zero
# Hermine Maes -- 2014.11.02 piecewise specification
# -----------------------------------------------------------------------------
require(OpenMx)
require(MASS)
# Load Library
# -----------------------------------------------------------------------------
set.seed(200)
rs=.5
xy <- mvtnorm::rmvnorm (1000, c(0,0), matrix(c(1,rs,rs,1),2,2))
testData <- xy
testData <- testData[, order(apply(testData, 2, var))[2:1]] #put the data columns in order from largest to smallest variance
# Note: Users do NOT have to re-order their data columns. This is only to make data generation the same on different operating systems: to fix an inconsistency with the mvtnorm::rmvnorm function in the MASS package.
selVars <- c('X','Y')
dimnames(testData) <- list(NULL, selVars)
summary(testData)
cov(testData)
# Simulate Data: two standardized variables X & Y with correlation of .5
# -----------------------------------------------------------------------------
bivCorModel <- mxModel("bivCor",
mxMatrix( type="Full", nrow=1, ncol=2, free=TRUE, values=c(0,0), name="expMean" ),
mxMatrix( type="Lower", nrow=2, ncol=2, free=TRUE, values=.5, name="Chol" ),
mxAlgebra( expression=Chol %*% t(Chol), name="expCov"),
mxData( observed=testData, type="raw" ),
mxExpectationNormal( covariance="expCov", means="expMean", dimnames=selVars),
mxFitFunctionML()
)
# Fit Saturated Model with Raw Data and Matrix-style Input
# -----------------------------------------------------------------------------
bivCorFit <- mxRun(bivCorModel)
EM <- mxEval(expMean, bivCorFit)
EC <- mxEval(expCov, bivCorFit)
LL <- mxEval(fitfunction, bivCorFit)
# Run Model and Generate Output
# -----------------------------------------------------------------------------
bivCorModelSub <-mxModel(bivCorModel,
mxMatrix( type="Diag", nrow=2, ncol=2, free=TRUE,
values=.2, # Note: to test optimizer for robustness to bad starting values, change to 0.
name="Chol" )
)
# Specify SubModel testing Covariance=Zero
# -----------------------------------------------------------------------------
bivCorFitSub <- mxRun(bivCorModelSub)
EMs <- mxEval(expMean, bivCorFitSub)
ECs <- mxEval(expCov, bivCorFitSub)
LLs <- mxEval(fitfunction, bivCorFitSub)
Chi= LLs-LL;
LRT= rbind(LL,LLs,Chi); LRT
# Run Model and Generate Output
# -----------------------------------------------------------------------------
omxCheckCloseEnough(LL, 5407.036, .001)
omxCheckCloseEnough(c(EC), c(1.0656, 0.4752, 0.4752, 0.9292), .001)
omxCheckCloseEnough(c(EM), c(0.058, 0.006), .001)
# Compare OpenMx Results to Mx Results
# LL: likelihood; EC: expected covariance, EM: expected means
# -----------------------------------------------------------------------------
|
openSQLiteDB = function(database) {
m = dbDriver("SQLite", max.con = 25)
con = dbConnect(m, dbname=database)
return( list(m=m, con=con) )
}
addDataFrameToSQLiteDB = function(con, tablename, df) {
values.questionmarks = paste(paste(rep('?,', ncol(df)-1), collapse=''), '?', sep='')
sql = paste('insert into ', tablename, ' values (', values.questionmarks, ')', sep='')
dbBeginTransaction(con)
dbGetPreparedQuery(con, sql, bind.data = df)
dbCommit(con)
}
SQLiteQuery = function(con, sql) {
rs = dbSendQuery(con, sql)
fetch(rs)
} | /RFinance/R/sqlite.R | no_license | rootfs-analytics/RFinance | R | false | false | 567 | r | openSQLiteDB = function(database) {
m = dbDriver("SQLite", max.con = 25)
con = dbConnect(m, dbname=database)
return( list(m=m, con=con) )
}
addDataFrameToSQLiteDB = function(con, tablename, df) {
values.questionmarks = paste(paste(rep('?,', ncol(df)-1), collapse=''), '?', sep='')
sql = paste('insert into ', tablename, ' values (', values.questionmarks, ')', sep='')
dbBeginTransaction(con)
dbGetPreparedQuery(con, sql, bind.data = df)
dbCommit(con)
}
SQLiteQuery = function(con, sql) {
rs = dbSendQuery(con, sql)
fetch(rs)
} |
# Start from 1990.csv file
# Use the shell to extract the ArrDelay column and
# by the shell's sort | uniq -c build up a frequency table of counts
# of unique delay values, save that table as txt file.
# Use read.table to obtain that txt file in R and compute the mean, sd and
# median from the frequency table.
ptm <- proc.time()
setwd ("/Users/Qian/Documents/STA_250Duncan/Data")
system ("cut -f 15 -d \",\" \"1990.csv\" | sort -n | uniq -c > \"1990.txt\"
", intern = TRUE )
dataset = read.table ("1990.txt")
dataset[,2] = as.integer(as.character(dataset[,2]))
dataset = dataset[-which(is.na(dataset[,2])),]
colnames(dataset) = c("freq","time")
byTime =
split ( dataset $ freq, dataset $ time )
sumTime =
sort ( sapply ( byTime , sum ) , decreasing = T )
DelayTable =
cbind(as.numeric(sumTime),as.numeric(names(sumTime)))
mean_delay =
sum(DelayTable[,1]*DelayTable[,2])/sum(DelayTable[,1])
sd_delay =
sqrt(sum(DelayTable[,1]*(DelayTable[,2]-mean_delay)^2)/sum(DelayTable[,1]))
tt = DelayTable[order(DelayTable[,2]),]
for (i in 1: nrow(tt)){
if ( sum (tt[1:i,1]) >= sum(DelayTable[,1])/2)
break
}
median_delay = tt[i,2]
time = proc.time() - ptm;time
result2_1990 =
list( time = time, mean = mean_delay,sd = sd_delay,median = median_delay,
system = Sys.info(), session = sessionInfo() )
| /HW1/Shell_freq_1990.R | no_license | QianLiUCD/STA250Duncan | R | false | false | 1,337 | r | # Start from 1990.csv file
# Use the shell to extract the ArrDelay column and
# by the shell's sort | uniq -c build up a frequency table of counts
# of unique delay values, save that table as txt file.
# Use read.table to obtain that txt file in R and compute the mean, sd and
# median from the frequency table.
ptm <- proc.time()
setwd ("/Users/Qian/Documents/STA_250Duncan/Data")
system ("cut -f 15 -d \",\" \"1990.csv\" | sort -n | uniq -c > \"1990.txt\"
", intern = TRUE )
dataset = read.table ("1990.txt")
dataset[,2] = as.integer(as.character(dataset[,2]))
dataset = dataset[-which(is.na(dataset[,2])),]
colnames(dataset) = c("freq","time")
byTime =
split ( dataset $ freq, dataset $ time )
sumTime =
sort ( sapply ( byTime , sum ) , decreasing = T )
DelayTable =
cbind(as.numeric(sumTime),as.numeric(names(sumTime)))
mean_delay =
sum(DelayTable[,1]*DelayTable[,2])/sum(DelayTable[,1])
sd_delay =
sqrt(sum(DelayTable[,1]*(DelayTable[,2]-mean_delay)^2)/sum(DelayTable[,1]))
tt = DelayTable[order(DelayTable[,2]),]
for (i in 1: nrow(tt)){
if ( sum (tt[1:i,1]) >= sum(DelayTable[,1])/2)
break
}
median_delay = tt[i,2]
time = proc.time() - ptm;time
result2_1990 =
list( time = time, mean = mean_delay,sd = sd_delay,median = median_delay,
system = Sys.info(), session = sessionInfo() )
|
remove(list=ls())
#place your own directory
source("C:/Users/mce1908.52713/Desktop/FEDERATED_GLM/GLM_FL.R")
#set your own directory to sve files
setwd("C:/Users/mce1908.52713/Desktop/FEDERATED_GLM/Example_Files/")
### create the master file. This has to be done once for all
### can be done by the server and distributed to all users
master_init(formula = num_awards ~ prog + math,family = 'poisson',maxit=25,tol= 1e-08)
repeat{
###this happen in node 1
master=readRDS("master.Rds")
data_user1=read.csv('data_user1.csv')
node_beta(Data = data_user1,master = master,userID = 'user1')
rm(data_user1,master)
###this happen in node 2
master=readRDS("master.Rds")
data_user2=read.csv('data_user2.csv')
node_beta(Data = data_user2,master = master,userID = 'user2')
rm(data_user2,master)
###this happen in node 3
master=readRDS("master.Rds")
data_user3=read.csv('data_user3.csv')
node_beta(Data = data_user3,master = master,userID = 'user3')
rm(data_user3,master)
###############################
###this happen in the master ##
###############################
user_1=readRDS("user1.Rds")
user_2=readRDS("user2.Rds")
user_3=readRDS("user3.Rds")
master=readRDS("master.Rds")
master_beta(user_1,user_2,user_3,master = master)
rm(user_1,user_2,user_3,master)
###############################
###############################
###this happen in node 1
master=readRDS("master.Rds")
data_user1=read.csv('data_user1.csv')
node_deviance(Data = data_user1,master = master,userID = 'user1')
rm(master,data_user1)
###this happen in node 2
master=readRDS("master.Rds")
data_user2=read.csv('data_user2.csv')
node_deviance(Data = data_user2,master = master,userID = 'user2')
rm(master,data_user2)
###this happen in node 3
master=readRDS("master.Rds")
data_user3=read.csv('data_user3.csv')
node_deviance(Data = data_user3,master = master,userID = 'user3')
rm(master,data_user3)
###############################
###this happen in the master ##
###############################
user_1=readRDS("user1.Rds")
user_2=readRDS("user2.Rds")
user_3=readRDS("user3.Rds")
master=readRDS("master.Rds")
master_deviance(user_1,user_2,user_3,master=master)
rm(user_1,user_2,user_3,master)
###############################
###############################
#convergence check
master=readRDS("master.Rds")
if(master$converged) break
rm(master)
}
master
| /NEW_Example_GLM_FL.R | no_license | IKNL/vtg_GLM | R | false | false | 2,448 | r | remove(list=ls())
#place your own directory
source("C:/Users/mce1908.52713/Desktop/FEDERATED_GLM/GLM_FL.R")
#set your own directory to sve files
setwd("C:/Users/mce1908.52713/Desktop/FEDERATED_GLM/Example_Files/")
### create the master file. This has to be done once for all
### can be done by the server and distributed to all users
master_init(formula = num_awards ~ prog + math,family = 'poisson',maxit=25,tol= 1e-08)
repeat{
###this happen in node 1
master=readRDS("master.Rds")
data_user1=read.csv('data_user1.csv')
node_beta(Data = data_user1,master = master,userID = 'user1')
rm(data_user1,master)
###this happen in node 2
master=readRDS("master.Rds")
data_user2=read.csv('data_user2.csv')
node_beta(Data = data_user2,master = master,userID = 'user2')
rm(data_user2,master)
###this happen in node 3
master=readRDS("master.Rds")
data_user3=read.csv('data_user3.csv')
node_beta(Data = data_user3,master = master,userID = 'user3')
rm(data_user3,master)
###############################
###this happen in the master ##
###############################
user_1=readRDS("user1.Rds")
user_2=readRDS("user2.Rds")
user_3=readRDS("user3.Rds")
master=readRDS("master.Rds")
master_beta(user_1,user_2,user_3,master = master)
rm(user_1,user_2,user_3,master)
###############################
###############################
###this happen in node 1
master=readRDS("master.Rds")
data_user1=read.csv('data_user1.csv')
node_deviance(Data = data_user1,master = master,userID = 'user1')
rm(master,data_user1)
###this happen in node 2
master=readRDS("master.Rds")
data_user2=read.csv('data_user2.csv')
node_deviance(Data = data_user2,master = master,userID = 'user2')
rm(master,data_user2)
###this happen in node 3
master=readRDS("master.Rds")
data_user3=read.csv('data_user3.csv')
node_deviance(Data = data_user3,master = master,userID = 'user3')
rm(master,data_user3)
###############################
###this happen in the master ##
###############################
user_1=readRDS("user1.Rds")
user_2=readRDS("user2.Rds")
user_3=readRDS("user3.Rds")
master=readRDS("master.Rds")
master_deviance(user_1,user_2,user_3,master=master)
rm(user_1,user_2,user_3,master)
###############################
###############################
#convergence check
master=readRDS("master.Rds")
if(master$converged) break
rm(master)
}
master
|
raw_data1 <- read.csv2("./household_power_consumption.txt") # Load data set which is stored in the working directory
raw_data2 <- subset(raw_data1,raw_data1$Date=="1/2/2007" | raw_data1$Date=="2/2/2007") # "We will only be using data from the dates 2007-02-01 and 2007-02-02"
raw_data2[raw_data2=="?"]<- NA # Replaces the missing value character "?" by NA
sm1 <-as.numeric(paste(raw_data2$Sub_metering_1)) # By default the data type of "Global_active_power is "character". This command converts the variable to "numeric"
sm2 <-as.numeric(paste(raw_data2$Sub_metering_2))
sm3 <-as.numeric(paste(raw_data2$Sub_metering_3))
par(mfrow=c(1,1),mar=c(5.1,4.1,4.1,2.1)) # sets parameters to default
plot(sm1,ylab="Energy sub metering",xlab="",xaxt="n", type="l",col="black") #creates plot with first line
lines(sm2,col="red") # adds second line
lines(sm3,col="blue") # adds third line
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col=c("black", "red","blue"), lty=1:1, cex=.8) # adds legend
dev.copy(png,'plot3.png',width=480,height=480) # saves created plot
dev.off() | /plot3.R | no_license | jimmaluschke/ExData_Plotting1 | R | false | false | 1,128 | r | raw_data1 <- read.csv2("./household_power_consumption.txt") # Load data set which is stored in the working directory
raw_data2 <- subset(raw_data1,raw_data1$Date=="1/2/2007" | raw_data1$Date=="2/2/2007") # "We will only be using data from the dates 2007-02-01 and 2007-02-02"
raw_data2[raw_data2=="?"]<- NA # Replaces the missing value character "?" by NA
sm1 <-as.numeric(paste(raw_data2$Sub_metering_1)) # By default the data type of "Global_active_power is "character". This command converts the variable to "numeric"
sm2 <-as.numeric(paste(raw_data2$Sub_metering_2))
sm3 <-as.numeric(paste(raw_data2$Sub_metering_3))
par(mfrow=c(1,1),mar=c(5.1,4.1,4.1,2.1)) # sets parameters to default
plot(sm1,ylab="Energy sub metering",xlab="",xaxt="n", type="l",col="black") #creates plot with first line
lines(sm2,col="red") # adds second line
lines(sm3,col="blue") # adds third line
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"), col=c("black", "red","blue"), lty=1:1, cex=.8) # adds legend
dev.copy(png,'plot3.png',width=480,height=480) # saves created plot
dev.off() |
###
### $Id: vander.R 51 2014-02-05 21:22:28Z plroebuck $
###
##-----------------------------------------------------------------------------
test.vander <- function(input, expected) {
output <- do.call(getFromNamespace("vander", "matlab"), input)
identical(output, expected)
}
vander.expected.empty <- matrix(as.numeric(NA), nrow=0, ncol=0)
vander.expected.scalar <- matrix(1.0, nrow=1, ncol=1)
vander.expected.seq3 <- matrix(c( 1, 1, 1, 1, 1,
5.0625, 3.375, 2.25, 1.5, 1,
16, 8, 4, 2, 1,
39.0625, 15.625, 6.25, 2.5, 1,
81, 27, 9, 3, 1),
nrow=5, ncol=5, byrow=TRUE)
vander.expected.complex <- matrix(c(-1, 0.0 + 1i, 1.0,
-4, 0.0 + 2i, 1.0,
-9, 0.0 + 3i, 1.0),
nrow=3, ncol=3, byrow=TRUE)
test.vander(list(v=numeric()), vander.expected.empty)
test.vander(list(v=1), vander.expected.scalar)
test.vander(list(v=seq(from=1, to=3, by=0.5)), vander.expected.seq3)
test.vander(list(v=c(1,2,3)*1i), vander.expected.complex)
| /data/genthat_extracted_code/matlab/tests/vander.R | no_license | surayaaramli/typeRrh | R | false | false | 1,245 | r | ###
### $Id: vander.R 51 2014-02-05 21:22:28Z plroebuck $
###
##-----------------------------------------------------------------------------
test.vander <- function(input, expected) {
output <- do.call(getFromNamespace("vander", "matlab"), input)
identical(output, expected)
}
vander.expected.empty <- matrix(as.numeric(NA), nrow=0, ncol=0)
vander.expected.scalar <- matrix(1.0, nrow=1, ncol=1)
vander.expected.seq3 <- matrix(c( 1, 1, 1, 1, 1,
5.0625, 3.375, 2.25, 1.5, 1,
16, 8, 4, 2, 1,
39.0625, 15.625, 6.25, 2.5, 1,
81, 27, 9, 3, 1),
nrow=5, ncol=5, byrow=TRUE)
vander.expected.complex <- matrix(c(-1, 0.0 + 1i, 1.0,
-4, 0.0 + 2i, 1.0,
-9, 0.0 + 3i, 1.0),
nrow=3, ncol=3, byrow=TRUE)
test.vander(list(v=numeric()), vander.expected.empty)
test.vander(list(v=1), vander.expected.scalar)
test.vander(list(v=seq(from=1, to=3, by=0.5)), vander.expected.seq3)
test.vander(list(v=c(1,2,3)*1i), vander.expected.complex)
|
# do some data exploration on the raw dataset ----
masterRaw <- readRDS("data/masterRaw.RDS")
# load libraries
library(magrittr)
library(data.table)
library(ggplot2)
# theme
source("project_analysis/figures/projectTheme.R")
# create autoimmune disease variable ----
cols <- c("CASSTILL", "HAYF1", "HAYF2", "RALLG1", "RALLG2", "DALLG1", "DALLG2", "SALLG1", "SALLG2")
masterRaw[, AUTOIMM := dplyr::if_else(masterRaw[, Reduce(`|`, lapply(.SD, `==`, 1L)), .SDcols = cols], 1L, 0L, 0L)]
masterRaw[, AUTOIMM := factor(AUTOIMM, levels = c(0,1), labels=c("No autoimmune", "Has autoimmune"))]
rm(cols)
cols <- c("P_ASSMEV", "P_DIBTYPE", "P_AHAYFYR")
masterRaw[, P_AUTOIMM := dplyr::if_else(masterRaw[, Reduce(`|`, lapply(.SD, `==`, 1L)), .SDcols = cols], 1L, 0L, 0L)]
masterRaw[, P_AUTOIMM := factor(P_AUTOIMM, levels = c(0,1), labels=c("No autoimmune", "Has autoimmune"))]
rm(cols)
# IMPUTATION FIGS ----
fig <- ggplot(data=masterRaw[BWTGRM_P < 10000]) +
geom_density(aes(x=BWTGRM_P), fill="grey45", color="grey45", alpha=0.5) +
labs(x="Birthweight (in grams)") +
theme_minimal()
saveRDS(fig, "project_analysis/figures/birthweight_freq.RDS")
rm(fig)
# autoimmune by age ----
data <- masterRaw[, .N, by=.(AGE_P, AUTOIMM)] %>%
.[, PROP := N/sum(N, na.rm=T), by=.(AGE_P)] %>%
.[AUTOIMM=="Has autoimmune", .(AGE_P, PROP)] %>%
.[order(AGE_P)]
fig <- ggplot(data=data) +
geom_bar(aes(x=AGE_P,y=PROP), stat = "identity") +
geom_text(aes(x=AGE_P,y=PROP,
label=scales::percent(PROP)), nudge_y = 0.01, size=3) +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(breaks = data[, AGE_P]) +
labs(x="Age",y=NULL) +
theme_void() +
theme(axis.text.x = element_text(size = 10, margin = margin(-15,0,0,0)),
axis.title.x = element_text(margin=margin(5,0,10,0)),
axis.title = element_text(size=11))
saveRDS(fig, "project_analysis/figures/age_autoimmune.RDS")
rm(data,fig)
# are kids from parents with autoimmune disease more likley to have this condition? ----
data <- masterRaw[, .N, by = c("P_AUTOIMM", "AUTOIMM")] %>%
.[, PROP := N/sum(N, na.rm=T), by="P_AUTOIMM"] %>%
.[AUTOIMM == "Has autoimmune", ]
fig <- ggplot(data=data) +
geom_bar(aes(x=P_AUTOIMM,y=PROP), stat = "identity") +
geom_text(aes(x=P_AUTOIMM,y=PROP, label=scales::percent(PROP)), nudge_y = -0.015, size=7, color="white") +
scale_y_continuous(labels = scales::percent) +
labs(x=NULL,y=NULL) +
scale_x_discrete(labels=c("No autoimmune disease", "Has autoimmune disease")) +
projectTheme() +
theme(axis.text = element_text(size = 15, margin = margin(-15,0,5,0)),
axis.title = element_text(size=11),
axis.text.x = element_text(size = 15, margin = margin(-15,0,5,0)),
axis.text.y = element_blank())
saveRDS(fig, "project_analysis/figures/parent_autoimmune.RDS")
rm(data, fig)
# FAMILY SIZE AND AUTOIMMUNE DISEASE ----
data <- masterRaw[, .(KIDS = ifelse(FM_KIDS > 3, 4, FM_KIDS), AUTOIMM)] %>%
.[, KIDS := factor(KIDS, levels = c(1:4),labels = as.character(c(c(1:3), "4+")), ordered = T)] %>%
.[, .N, by = c("KIDS", "AUTOIMM")] %>%
.[, PROP := N/sum(N, na.rm=T), by=c("KIDS")] %>%
.[AUTOIMM == "Has autoimmune"] %>%
.[order(KIDS)]
fig <- ggplot(data=data, aes(x=as.numeric(KIDS), y=PROP)) +
geom_line(size=1.25, alpha=0.5) +
geom_point(size=5, color="white",
fill="white") +
geom_point(size=3, alpha=0.5,
color="black") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.004, color="white", fontface="bold") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.004) +
labs(x="Number of children in the family",
y="Child has an autoimmune disease (%)") +
scale_x_continuous(labels=as.character(c(1:3, "+4"))) +
scale_y_continuous(labels=function(x) scales::percent(x, accuracy = 1)) +
theme_minimal() +
theme(text = element_text(family = "sans"),
axis.text.x = element_text(margin=margin(0,0,5,0)),
axis.text.y = element_text(margin=margin(0,0,0,5)),
axis.title.x = element_text(margin=margin(0,0,5,0)),
axis.title.y = element_text(margin=margin(0,0,0,5)))
saveRDS(fig, "project_analysis/figures/kids_autoimmune.RDS")
rm(fig)
# income and autoimmune disease ----
data <- masterRaw[INCGRP5 <= 4, ] %>%
.[, .N, by=c("INCGRP5", "AUTOIMM")] %>%
.[, PROP := N/sum(N, na.rm=T), by="INCGRP5"] %>%
.[AUTOIMM == "Has autoimmune"]
fig <- ggplot(data=data, aes(x=INCGRP5, y=PROP)) +
geom_line(size=1.25, alpha=0.5) +
geom_point(size=5, color="white",
fill="white") +
geom_point(size=3, alpha=0.5,
color="black") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.002, color="white", fontface="bold") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.002) +
labs(x="Family income",
y="Child has an autoimmune disease (%)") +
scale_x_continuous(labels=c("$0 - $34,999",
"$35,000 - $74,999",
"$75,000 - $99,999",
"$100,000+")) +
scale_y_continuous(labels=function(x) scales::percent(x, accuracy = 1)) +
theme_minimal() +
theme(text = element_text(family = "sans"),
axis.text.x = element_text(margin=margin(0,0,5,0)),
axis.text.y = element_text(margin=margin(0,0,0,5)),
axis.title.x = element_text(margin=margin(0,0,5,0)),
axis.title.y = element_text(margin=margin(0,0,0,5)))
saveRDS(fig, "project_analysis/figures/income_autoimmune.RDS")
rm(fig, data) | /project2/project_analysis/figures/data_figures.R | no_license | csarvi/mat4376 | R | false | false | 5,625 | r | # do some data exploration on the raw dataset ----
masterRaw <- readRDS("data/masterRaw.RDS")
# load libraries
library(magrittr)
library(data.table)
library(ggplot2)
# theme
source("project_analysis/figures/projectTheme.R")
# create autoimmune disease variable ----
cols <- c("CASSTILL", "HAYF1", "HAYF2", "RALLG1", "RALLG2", "DALLG1", "DALLG2", "SALLG1", "SALLG2")
masterRaw[, AUTOIMM := dplyr::if_else(masterRaw[, Reduce(`|`, lapply(.SD, `==`, 1L)), .SDcols = cols], 1L, 0L, 0L)]
masterRaw[, AUTOIMM := factor(AUTOIMM, levels = c(0,1), labels=c("No autoimmune", "Has autoimmune"))]
rm(cols)
cols <- c("P_ASSMEV", "P_DIBTYPE", "P_AHAYFYR")
masterRaw[, P_AUTOIMM := dplyr::if_else(masterRaw[, Reduce(`|`, lapply(.SD, `==`, 1L)), .SDcols = cols], 1L, 0L, 0L)]
masterRaw[, P_AUTOIMM := factor(P_AUTOIMM, levels = c(0,1), labels=c("No autoimmune", "Has autoimmune"))]
rm(cols)
# IMPUTATION FIGS ----
fig <- ggplot(data=masterRaw[BWTGRM_P < 10000]) +
geom_density(aes(x=BWTGRM_P), fill="grey45", color="grey45", alpha=0.5) +
labs(x="Birthweight (in grams)") +
theme_minimal()
saveRDS(fig, "project_analysis/figures/birthweight_freq.RDS")
rm(fig)
# autoimmune by age ----
data <- masterRaw[, .N, by=.(AGE_P, AUTOIMM)] %>%
.[, PROP := N/sum(N, na.rm=T), by=.(AGE_P)] %>%
.[AUTOIMM=="Has autoimmune", .(AGE_P, PROP)] %>%
.[order(AGE_P)]
fig <- ggplot(data=data) +
geom_bar(aes(x=AGE_P,y=PROP), stat = "identity") +
geom_text(aes(x=AGE_P,y=PROP,
label=scales::percent(PROP)), nudge_y = 0.01, size=3) +
scale_y_continuous(labels = scales::percent) +
scale_x_continuous(breaks = data[, AGE_P]) +
labs(x="Age",y=NULL) +
theme_void() +
theme(axis.text.x = element_text(size = 10, margin = margin(-15,0,0,0)),
axis.title.x = element_text(margin=margin(5,0,10,0)),
axis.title = element_text(size=11))
saveRDS(fig, "project_analysis/figures/age_autoimmune.RDS")
rm(data,fig)
# are kids from parents with autoimmune disease more likley to have this condition? ----
data <- masterRaw[, .N, by = c("P_AUTOIMM", "AUTOIMM")] %>%
.[, PROP := N/sum(N, na.rm=T), by="P_AUTOIMM"] %>%
.[AUTOIMM == "Has autoimmune", ]
fig <- ggplot(data=data) +
geom_bar(aes(x=P_AUTOIMM,y=PROP), stat = "identity") +
geom_text(aes(x=P_AUTOIMM,y=PROP, label=scales::percent(PROP)), nudge_y = -0.015, size=7, color="white") +
scale_y_continuous(labels = scales::percent) +
labs(x=NULL,y=NULL) +
scale_x_discrete(labels=c("No autoimmune disease", "Has autoimmune disease")) +
projectTheme() +
theme(axis.text = element_text(size = 15, margin = margin(-15,0,5,0)),
axis.title = element_text(size=11),
axis.text.x = element_text(size = 15, margin = margin(-15,0,5,0)),
axis.text.y = element_blank())
saveRDS(fig, "project_analysis/figures/parent_autoimmune.RDS")
rm(data, fig)
# FAMILY SIZE AND AUTOIMMUNE DISEASE ----
data <- masterRaw[, .(KIDS = ifelse(FM_KIDS > 3, 4, FM_KIDS), AUTOIMM)] %>%
.[, KIDS := factor(KIDS, levels = c(1:4),labels = as.character(c(c(1:3), "4+")), ordered = T)] %>%
.[, .N, by = c("KIDS", "AUTOIMM")] %>%
.[, PROP := N/sum(N, na.rm=T), by=c("KIDS")] %>%
.[AUTOIMM == "Has autoimmune"] %>%
.[order(KIDS)]
fig <- ggplot(data=data, aes(x=as.numeric(KIDS), y=PROP)) +
geom_line(size=1.25, alpha=0.5) +
geom_point(size=5, color="white",
fill="white") +
geom_point(size=3, alpha=0.5,
color="black") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.004, color="white", fontface="bold") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.004) +
labs(x="Number of children in the family",
y="Child has an autoimmune disease (%)") +
scale_x_continuous(labels=as.character(c(1:3, "+4"))) +
scale_y_continuous(labels=function(x) scales::percent(x, accuracy = 1)) +
theme_minimal() +
theme(text = element_text(family = "sans"),
axis.text.x = element_text(margin=margin(0,0,5,0)),
axis.text.y = element_text(margin=margin(0,0,0,5)),
axis.title.x = element_text(margin=margin(0,0,5,0)),
axis.title.y = element_text(margin=margin(0,0,0,5)))
saveRDS(fig, "project_analysis/figures/kids_autoimmune.RDS")
rm(fig)
# income and autoimmune disease ----
data <- masterRaw[INCGRP5 <= 4, ] %>%
.[, .N, by=c("INCGRP5", "AUTOIMM")] %>%
.[, PROP := N/sum(N, na.rm=T), by="INCGRP5"] %>%
.[AUTOIMM == "Has autoimmune"]
fig <- ggplot(data=data, aes(x=INCGRP5, y=PROP)) +
geom_line(size=1.25, alpha=0.5) +
geom_point(size=5, color="white",
fill="white") +
geom_point(size=3, alpha=0.5,
color="black") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.002, color="white", fontface="bold") +
geom_text(aes(label=scales::percent(PROP, accuracy=.1)), nudge_y = 0.002) +
labs(x="Family income",
y="Child has an autoimmune disease (%)") +
scale_x_continuous(labels=c("$0 - $34,999",
"$35,000 - $74,999",
"$75,000 - $99,999",
"$100,000+")) +
scale_y_continuous(labels=function(x) scales::percent(x, accuracy = 1)) +
theme_minimal() +
theme(text = element_text(family = "sans"),
axis.text.x = element_text(margin=margin(0,0,5,0)),
axis.text.y = element_text(margin=margin(0,0,0,5)),
axis.title.x = element_text(margin=margin(0,0,5,0)),
axis.title.y = element_text(margin=margin(0,0,0,5)))
saveRDS(fig, "project_analysis/figures/income_autoimmune.RDS")
rm(fig, data) |
# allows for use with Rscript bash functionality
args <- commandArgs(TRUE)
# options(BioC_mirror="http://master.bioconductor.org")
# source("http://master.bioconductor.org/biocLite.R")
#prcomp is used instread of princomp here to perform PCA since:
# 1) princomp is limited to experiments where observations >> variables
# 2) prcomp can be used to easily visualize clusters using PCs and metadata
# ggfortify and ggplot are required for plotting and clustering PCs to metadatas
#library(ggfortify)
library(ggplot2)
library(data.table)
library(gplots)
# output_file is the name of the PDF the user would like to create and write to
# the pdf() function actually creates the pdf
# temp args array to test script
args = c("2018-Apr-9_PCA_metadata_corrected.pdf", 'est_cts_tmm.txt', 'metadata.txt', 'category_list.txt', 'continuous.txt')
output_file = args[1]
pdf(file = output_file)
# counts_file is the FULL path to the counts matrix in CSV format--MUST HAVE HEADER AND ROW NAMES!!!
# counts must be rounded to nearest integer
# row names should be gene/transcript names
# headers should be BIDs or sample ID names
# meta_file is the FULL path to the metadata matrix in CSV format--MUST HAVE HEADER AND ROW NAMES!!!
# row names should be BIDs or sample ID names
# headers should be the name of the metadata measurement (i.e. sex, diagnosis, chrM contamination, etc...)
# to keep it straight, it makes sense to have n x m counts files and a m x u because the matrices can be multiplied since
# their inner components are the same dimensions ( nxm matrix [dotproduct] mxu matrix = nxu matrix)
counts_file = args[2]
meta_file = args[3]
# give two variable list files pertaining to which are categorical and which are continuous
cat_list <- scan(args[4], what="", sep="\n")
cont_list <- scan(args[5], what="", sep="\n")
# set stopping point for
sub_stop = 5
# read in gene expression table and metadata row.names needs to be set to the column number where the row names are listed
# it is important to set check.names=FALSE so that R doesn't try to change any numerical names into odd characters
total_raw_counts <- read.table(counts_file, header=TRUE, row.names=1, check.names=FALSE, sep="\t")
metadata <- read.table(meta_file, header=TRUE, row.names=1, check.names=FALSE, sep="\t")
# converts each table into a data frame
total_raw_read_counts_dataframe <- as.data.frame(total_raw_counts)
metadata_dataframe <- as.data.frame(metadata)
#pre-filter step, remove any ROWS that have zero counts in the count matrix, it does not mean that a sample cannot
# have zero counts, we are just removing the genes where no counts exist across all samples
total_raw_read_counts_dataframe <- total_raw_read_counts_dataframe[rowSums(total_raw_read_counts_dataframe)!=0, ]
#add pseudo-count, why?? Because the log(0) = Does not exist, so to address this issue add 1 to all counts
total_raw_read_counts_dataframe <- total_raw_read_counts_dataframe + 1
# take the log of the counts data, this will help normalize the data
transformed_dataframe <- log(total_raw_read_counts_dataframe, 2)
# remove BIDs that have missing or strange data
# transformed_dataframe <- within(transformed_dataframe, rm('nothing'))
# metadata_dataframe <- metadata_dataframe[!rownames(metadata_dataframe) %in% c('nothing'), ]
# The remaining block of code can be uncommented out if you are going to use a SUBSET of the data for PCA
# in the metadata_to_be_removed variable you can indicate in which() statment the column with the specified variable
# of all the samples to remove for analysis. For example, in this case we go to the column 'Diagnosis' and remove
# all samples that are listed as 'Control'. This can be changed to anything.
# metadata_to_be_removed <- metadata_dataframe[which(metadata_dataframe$Diagnosis=='SCZ'), ]
# sample_removal <- levels(droplevels(metadata_to_be_removed$BID))
# transformed_dataframe <-transformed_dataframe[,-which(names(transformed_dataframe) %in% sample_removal)]
# metadata_dataframe <- metadata_dataframe[!rownames(metadata_dataframe) %in% sample_removal, ]
# print("Samples to be removed from counts matrix and metadata")
# print(sample_removal)
#remove rows that have a sum of zero
transformed_dataframe <- transformed_dataframe[rowSums(transformed_dataframe)!=0, ]
# sort dataframes. Dataframes MUST be sorted by colnames in the counts matrix
# and sorted by the rownames in the metadata matrix. This ensures that the sample names properly match
# each other between counts matix and metadata matrix. Note, you will see the word 'TRUE' printed
# if they are properly match, else you will see 'FALSE' in which case you need to sort
counts_sorted <- transformed_dataframe[,order(colnames(transformed_dataframe))]
# removing batch effect may cause some genes to have 0 varaince, need to remove them
to_rm = apply(counts_sorted, 1, function(x) length(unique(x)) > 1)
counts_sorted = counts_sorted[to_rm,]
metadata_sorted <- metadata_dataframe[order(rownames(metadata_dataframe)),]
all(rownames(metadata_sorted)==colnames(counts_sorted))
# This part is more important if you are removing data from analysis. It has to extract and collapse missing
# levels from the dataframe of data that was removed
print("getting levels")
sapply(metadata_dataframe,levels)
# Calculation of the prinipal components using prcomp
# the t() function means to take the transpose of the counts matrix
# scale.=TRUE means PCs will be baed on corrlation matrix and not covariance matrix
# corr is better if scales of variables are very different
# center=TRUE uses the centers the data, use centroid/mean
pca_matrix <- prcomp(t(counts_sorted), center=TRUE, scale. = TRUE)
# plot the PCs againts how much variance each PC contributes to the data set
# looking to incorporate the min number of PCs before "elbowing-effect" into the model unless
# a PC is strongly correlated with a variable in the metadata set, in which case,
# just regress out the variable rather than the PC
plot(pca_matrix, type ="l")
#********************************************Model functions***************************************
# helper function to iterate through desired fields to regress in two modes - categorical and continuous
build_categorical_model <- function(factors_affecting_pcs, category, metadata_sorted, pca_matrix_build, pc){
linear_model <- lm(pca_matrix_build$x[,pc] ~ na.omit(as.factor(metadata_sorted[,category])))
factors_affecting_pcs[[category]][[as.character(pc)]]=list()
factors_affecting_pcs[[category]][[as.character(pc)]][['adj.r.squared']]=summary(linear_model)$adj.r.squared
factors_affecting_pcs[[category]][[as.character(pc)]][['-log10Pval']]=-log10(anova(linear_model)$Pr[1])
return(factors_affecting_pcs)
}
build_continuous_model <- function(factors_affecting_pcs, continuous_variable, metadata_sorted, pca_matrix_build, pc){
linear_model <- lm(pca_matrix_build$x[,pc] ~ na.omit(metadata_sorted[,continuous_variable]))
factors_affecting_pcs[[continuous_variable]][[as.character(pc)]]=list()
factors_affecting_pcs[[continuous_variable]][[as.character(pc)]][['adj.r.squared']]=summary(linear_model)$adj.r.squared
factors_affecting_pcs[[continuous_variable]][[as.character(pc)]][['-log10Pval']]=-log10(anova(linear_model)$Pr[1])
return(factors_affecting_pcs)
}
# ************************End model functions*****************************************************
# ***************************Function to correlate PCs with metadata******************************
model_pcs <- function(pca_matrix_check){
# this block of code will need to be changed depending on the metadata columns available
factors_affecting_pcs=list()
# iterate through all PCs and get -log10(p-value) and adjusted R-squared value to every PC correlated to each
# metadata column listed
for (pc in seq(1,dim(pca_matrix_check$rotation)[2])){
# correlation PCs on factor sex
# lm() is the Limma library function to build a linear model, in this case each PC is being tested against 'Sex'
# to determine if any PC is strongly correlated to Sex. If yes, regress sex out in model
# Note: in the linear model as.factor() should be placed around categorical variables, while it can be omitted
# in continuous non-discrete variables
for (category in cat_list){
factors_affecting_pcs = build_categorical_model(factors_affecting_pcs, category, metadata_sorted, pca_matrix_check, pc)
}
for (continuous_variable in cont_list){
factors_affecting_pcs = build_continuous_model(factors_affecting_pcs, continuous_variable, metadata_sorted, pca_matrix_check, pc)
}
}
# create heatmeap to visualize PC and metadata correlations
# create a dataframe to store all the -log10 p-values and adjusted R squared vals for the visualization of PCA data
pvalues <- data.frame()
adjRsq <- data.frame()
# iterate only through the look first 10 PCs and extract their -log10(pval) and adj R-sq values
for (all_factors in seq(1,length(factors_affecting_pcs))){
for (pc in seq(1,sub_stop)){
pvalues[all_factors, pc] <- unlist(factors_affecting_pcs[all_factors][[1]][[pc]][[2]])
adjRsq[all_factors, pc] <- unlist(factors_affecting_pcs[all_factors][[1]][[pc]][[1]])
}
}
# get the row and column names match the p-values
rownames(pvalues) <- names(factors_affecting_pcs)
colnames(pvalues) <- unlist(lapply(seq(1,sub_stop),function(x) paste(c('PC',x),collapse='')))
# get the row and column names matching the adj R-sq values
rownames(adjRsq) <- names(factors_affecting_pcs)
colnames(adjRsq) <- unlist(lapply(seq(1,sub_stop),function(x) paste(c('PC',x),collapse='')))
# round all -log10(pvalue) in the dataframe to three decimal places
is.num <- sapply(pvalues, is.numeric)
pvalues[is.num] <- lapply(pvalues[is.num], round, 3)
# create a heatmap of these values, value is -log10(p-val) and color is the adj R-sq value
heatmap.2(as.matrix(adjRsq), cellnote=pvalues, notecol = "black", notecex = 0.5, cexRow = 0.3, dendrogram = "none", col=colorRampPalette(c("white", "yellow", "red"))(10))
print("heatmap completed")
}
#*********************************************End of function*******************************************
# ********************************************Regression functions**************************************
regress_categorical <- function(pca_matrix_test, metadata_sorted, category){
for (pc in seq(1,dim(pca_matrix_test$rotation)[2])){
linear_model <- lm(pca_matrix_test$x[,pc] ~ na.omit(as.factor(metadata_sorted[,category])))
pca_matrix_test$x[,pc] <- linear_model$residuals
# plot(linear_model$fitted.values, linear_model$residuals, main = paste('Versus fit', category), xlab = 'Fitted values', ylab = 'Residuals')
}
# call function again on regressed out variables
model_pcs(pca_matrix_test)
mtext(paste0("vars regressed: ", category), side=3, line=0)
plot(linear_model$fitted.values, linear_model$residuals, main = paste('Versus fit', category), xlab = 'Fitted values', ylab = 'Residuals')
# writes the variables that were regressed out, NOTE this must be changed manually!!!!!!
}
regress_continuous <- function(pca_matrix_test, metadata_sorted, continuous){
for (pc in seq(1,dim(pca_matrix_test$rotation)[2])){
linear_model <- lm(pca_matrix_test$x[,pc] ~ na.omit(metadata_sorted[,continuous]))
pca_matrix_test$x[,pc] <- linear_model$residuals
}
# call function again on regressed out variables
model_pcs(pca_matrix_test)
# writes the variables that were regressed out, NOTE this must be changed manually!!!!!!
mtext(paste0("vars regressed: ", continuous), side=3, line=0)
plot(linear_model$fitted.values, linear_model$residuals, main = paste('Versus fit', continuous), xlab = 'Fitted values', ylab = 'Residuals')
}
# ********************************************End regression functions**********************************
model_pcs(pca_matrix)
#---------------------------regress out variables of interest-------------------------------------------------
# for each variable that is to be regressed a linear model must be made and the residuals of the linear model
# must be extract and replace the old PC matrix
# regress out FlowcellBatch
for (category in cat_list){
regress_categorical(pca_matrix, metadata_sorted, category)
}
for (cont in cont_list){
regress_continuous(pca_matrix, metadata_sorted, cont)
}
# saves and closes newly created PDF
dev.off()
# call function again on regressed out variables
#model_pcs(pca_matrix)
# writes the variables that were regressed out, NOTE this must be changed manually!!!!!!
#mtext("vars regressed: FlowcellBatch, Sex, UF 5-3 bias, RIN, PMI, AgeDeath, TissueState, BrainBank", side=3, line=0)
# saves and closes newly created PDF
#dev.off()
| /diffExp/PCA-many-heatmaps.R | no_license | WhiteLab/RNAseq | R | false | false | 12,895 | r | # allows for use with Rscript bash functionality
args <- commandArgs(TRUE)
# options(BioC_mirror="http://master.bioconductor.org")
# source("http://master.bioconductor.org/biocLite.R")
#prcomp is used instread of princomp here to perform PCA since:
# 1) princomp is limited to experiments where observations >> variables
# 2) prcomp can be used to easily visualize clusters using PCs and metadata
# ggfortify and ggplot are required for plotting and clustering PCs to metadatas
#library(ggfortify)
library(ggplot2)
library(data.table)
library(gplots)
# output_file is the name of the PDF the user would like to create and write to
# the pdf() function actually creates the pdf
# temp args array to test script
args = c("2018-Apr-9_PCA_metadata_corrected.pdf", 'est_cts_tmm.txt', 'metadata.txt', 'category_list.txt', 'continuous.txt')
output_file = args[1]
pdf(file = output_file)
# counts_file is the FULL path to the counts matrix in CSV format--MUST HAVE HEADER AND ROW NAMES!!!
# counts must be rounded to nearest integer
# row names should be gene/transcript names
# headers should be BIDs or sample ID names
# meta_file is the FULL path to the metadata matrix in CSV format--MUST HAVE HEADER AND ROW NAMES!!!
# row names should be BIDs or sample ID names
# headers should be the name of the metadata measurement (i.e. sex, diagnosis, chrM contamination, etc...)
# to keep it straight, it makes sense to have n x m counts files and a m x u because the matrices can be multiplied since
# their inner components are the same dimensions ( nxm matrix [dotproduct] mxu matrix = nxu matrix)
counts_file = args[2]
meta_file = args[3]
# give two variable list files pertaining to which are categorical and which are continuous
cat_list <- scan(args[4], what="", sep="\n")
cont_list <- scan(args[5], what="", sep="\n")
# set stopping point for
sub_stop = 5
# read in gene expression table and metadata row.names needs to be set to the column number where the row names are listed
# it is important to set check.names=FALSE so that R doesn't try to change any numerical names into odd characters
total_raw_counts <- read.table(counts_file, header=TRUE, row.names=1, check.names=FALSE, sep="\t")
metadata <- read.table(meta_file, header=TRUE, row.names=1, check.names=FALSE, sep="\t")
# converts each table into a data frame
total_raw_read_counts_dataframe <- as.data.frame(total_raw_counts)
metadata_dataframe <- as.data.frame(metadata)
#pre-filter step, remove any ROWS that have zero counts in the count matrix, it does not mean that a sample cannot
# have zero counts, we are just removing the genes where no counts exist across all samples
total_raw_read_counts_dataframe <- total_raw_read_counts_dataframe[rowSums(total_raw_read_counts_dataframe)!=0, ]
#add pseudo-count, why?? Because the log(0) = Does not exist, so to address this issue add 1 to all counts
total_raw_read_counts_dataframe <- total_raw_read_counts_dataframe + 1
# take the log of the counts data, this will help normalize the data
transformed_dataframe <- log(total_raw_read_counts_dataframe, 2)
# remove BIDs that have missing or strange data
# transformed_dataframe <- within(transformed_dataframe, rm('nothing'))
# metadata_dataframe <- metadata_dataframe[!rownames(metadata_dataframe) %in% c('nothing'), ]
# The remaining block of code can be uncommented out if you are going to use a SUBSET of the data for PCA
# in the metadata_to_be_removed variable you can indicate in which() statment the column with the specified variable
# of all the samples to remove for analysis. For example, in this case we go to the column 'Diagnosis' and remove
# all samples that are listed as 'Control'. This can be changed to anything.
# metadata_to_be_removed <- metadata_dataframe[which(metadata_dataframe$Diagnosis=='SCZ'), ]
# sample_removal <- levels(droplevels(metadata_to_be_removed$BID))
# transformed_dataframe <-transformed_dataframe[,-which(names(transformed_dataframe) %in% sample_removal)]
# metadata_dataframe <- metadata_dataframe[!rownames(metadata_dataframe) %in% sample_removal, ]
# print("Samples to be removed from counts matrix and metadata")
# print(sample_removal)
#remove rows that have a sum of zero
transformed_dataframe <- transformed_dataframe[rowSums(transformed_dataframe)!=0, ]
# sort dataframes. Dataframes MUST be sorted by colnames in the counts matrix
# and sorted by the rownames in the metadata matrix. This ensures that the sample names properly match
# each other between counts matix and metadata matrix. Note, you will see the word 'TRUE' printed
# if they are properly match, else you will see 'FALSE' in which case you need to sort
counts_sorted <- transformed_dataframe[,order(colnames(transformed_dataframe))]
# removing batch effect may cause some genes to have 0 varaince, need to remove them
to_rm = apply(counts_sorted, 1, function(x) length(unique(x)) > 1)
counts_sorted = counts_sorted[to_rm,]
metadata_sorted <- metadata_dataframe[order(rownames(metadata_dataframe)),]
all(rownames(metadata_sorted)==colnames(counts_sorted))
# This part is more important if you are removing data from analysis. It has to extract and collapse missing
# levels from the dataframe of data that was removed
print("getting levels")
sapply(metadata_dataframe,levels)
# Calculation of the prinipal components using prcomp
# the t() function means to take the transpose of the counts matrix
# scale.=TRUE means PCs will be baed on corrlation matrix and not covariance matrix
# corr is better if scales of variables are very different
# center=TRUE uses the centers the data, use centroid/mean
pca_matrix <- prcomp(t(counts_sorted), center=TRUE, scale. = TRUE)
# plot the PCs againts how much variance each PC contributes to the data set
# looking to incorporate the min number of PCs before "elbowing-effect" into the model unless
# a PC is strongly correlated with a variable in the metadata set, in which case,
# just regress out the variable rather than the PC
plot(pca_matrix, type ="l")
#********************************************Model functions***************************************
# helper function to iterate through desired fields to regress in two modes - categorical and continuous
build_categorical_model <- function(factors_affecting_pcs, category, metadata_sorted, pca_matrix_build, pc){
linear_model <- lm(pca_matrix_build$x[,pc] ~ na.omit(as.factor(metadata_sorted[,category])))
factors_affecting_pcs[[category]][[as.character(pc)]]=list()
factors_affecting_pcs[[category]][[as.character(pc)]][['adj.r.squared']]=summary(linear_model)$adj.r.squared
factors_affecting_pcs[[category]][[as.character(pc)]][['-log10Pval']]=-log10(anova(linear_model)$Pr[1])
return(factors_affecting_pcs)
}
build_continuous_model <- function(factors_affecting_pcs, continuous_variable, metadata_sorted, pca_matrix_build, pc){
linear_model <- lm(pca_matrix_build$x[,pc] ~ na.omit(metadata_sorted[,continuous_variable]))
factors_affecting_pcs[[continuous_variable]][[as.character(pc)]]=list()
factors_affecting_pcs[[continuous_variable]][[as.character(pc)]][['adj.r.squared']]=summary(linear_model)$adj.r.squared
factors_affecting_pcs[[continuous_variable]][[as.character(pc)]][['-log10Pval']]=-log10(anova(linear_model)$Pr[1])
return(factors_affecting_pcs)
}
# ************************End model functions*****************************************************
# ***************************Function to correlate PCs with metadata******************************
model_pcs <- function(pca_matrix_check){
# this block of code will need to be changed depending on the metadata columns available
factors_affecting_pcs=list()
# iterate through all PCs and get -log10(p-value) and adjusted R-squared value to every PC correlated to each
# metadata column listed
for (pc in seq(1,dim(pca_matrix_check$rotation)[2])){
# correlation PCs on factor sex
# lm() is the Limma library function to build a linear model, in this case each PC is being tested against 'Sex'
# to determine if any PC is strongly correlated to Sex. If yes, regress sex out in model
# Note: in the linear model as.factor() should be placed around categorical variables, while it can be omitted
# in continuous non-discrete variables
for (category in cat_list){
factors_affecting_pcs = build_categorical_model(factors_affecting_pcs, category, metadata_sorted, pca_matrix_check, pc)
}
for (continuous_variable in cont_list){
factors_affecting_pcs = build_continuous_model(factors_affecting_pcs, continuous_variable, metadata_sorted, pca_matrix_check, pc)
}
}
# create heatmeap to visualize PC and metadata correlations
# create a dataframe to store all the -log10 p-values and adjusted R squared vals for the visualization of PCA data
pvalues <- data.frame()
adjRsq <- data.frame()
# iterate only through the look first 10 PCs and extract their -log10(pval) and adj R-sq values
for (all_factors in seq(1,length(factors_affecting_pcs))){
for (pc in seq(1,sub_stop)){
pvalues[all_factors, pc] <- unlist(factors_affecting_pcs[all_factors][[1]][[pc]][[2]])
adjRsq[all_factors, pc] <- unlist(factors_affecting_pcs[all_factors][[1]][[pc]][[1]])
}
}
# get the row and column names match the p-values
rownames(pvalues) <- names(factors_affecting_pcs)
colnames(pvalues) <- unlist(lapply(seq(1,sub_stop),function(x) paste(c('PC',x),collapse='')))
# get the row and column names matching the adj R-sq values
rownames(adjRsq) <- names(factors_affecting_pcs)
colnames(adjRsq) <- unlist(lapply(seq(1,sub_stop),function(x) paste(c('PC',x),collapse='')))
# round all -log10(pvalue) in the dataframe to three decimal places
is.num <- sapply(pvalues, is.numeric)
pvalues[is.num] <- lapply(pvalues[is.num], round, 3)
# create a heatmap of these values, value is -log10(p-val) and color is the adj R-sq value
heatmap.2(as.matrix(adjRsq), cellnote=pvalues, notecol = "black", notecex = 0.5, cexRow = 0.3, dendrogram = "none", col=colorRampPalette(c("white", "yellow", "red"))(10))
print("heatmap completed")
}
#*********************************************End of function*******************************************
# ********************************************Regression functions**************************************
regress_categorical <- function(pca_matrix_test, metadata_sorted, category){
for (pc in seq(1,dim(pca_matrix_test$rotation)[2])){
linear_model <- lm(pca_matrix_test$x[,pc] ~ na.omit(as.factor(metadata_sorted[,category])))
pca_matrix_test$x[,pc] <- linear_model$residuals
# plot(linear_model$fitted.values, linear_model$residuals, main = paste('Versus fit', category), xlab = 'Fitted values', ylab = 'Residuals')
}
# call function again on regressed out variables
model_pcs(pca_matrix_test)
mtext(paste0("vars regressed: ", category), side=3, line=0)
plot(linear_model$fitted.values, linear_model$residuals, main = paste('Versus fit', category), xlab = 'Fitted values', ylab = 'Residuals')
# writes the variables that were regressed out, NOTE this must be changed manually!!!!!!
}
regress_continuous <- function(pca_matrix_test, metadata_sorted, continuous){
for (pc in seq(1,dim(pca_matrix_test$rotation)[2])){
linear_model <- lm(pca_matrix_test$x[,pc] ~ na.omit(metadata_sorted[,continuous]))
pca_matrix_test$x[,pc] <- linear_model$residuals
}
# call function again on regressed out variables
model_pcs(pca_matrix_test)
# writes the variables that were regressed out, NOTE this must be changed manually!!!!!!
mtext(paste0("vars regressed: ", continuous), side=3, line=0)
plot(linear_model$fitted.values, linear_model$residuals, main = paste('Versus fit', continuous), xlab = 'Fitted values', ylab = 'Residuals')
}
# ********************************************End regression functions**********************************
model_pcs(pca_matrix)
#---------------------------regress out variables of interest-------------------------------------------------
# for each variable that is to be regressed a linear model must be made and the residuals of the linear model
# must be extract and replace the old PC matrix
# regress out FlowcellBatch
for (category in cat_list){
regress_categorical(pca_matrix, metadata_sorted, category)
}
for (cont in cont_list){
regress_continuous(pca_matrix, metadata_sorted, cont)
}
# saves and closes newly created PDF
dev.off()
# call function again on regressed out variables
#model_pcs(pca_matrix)
# writes the variables that were regressed out, NOTE this must be changed manually!!!!!!
#mtext("vars regressed: FlowcellBatch, Sex, UF 5-3 bias, RIN, PMI, AgeDeath, TissueState, BrainBank", side=3, line=0)
# saves and closes newly created PDF
#dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{year_tw}
\alias{year_tw}
\title{western year to tw year}
\usage{
year_tw(y_we)
}
\arguments{
\item{y_we}{An integer}
}
\value{
the tw year
}
\description{
western year to tw year
}
\examples{
year_tw("2018")
year_tw(1912)
}
| /man/year_tw.Rd | no_license | obarisk/twdate | R | false | true | 320 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{year_tw}
\alias{year_tw}
\title{western year to tw year}
\usage{
year_tw(y_we)
}
\arguments{
\item{y_we}{An integer}
}
\value{
the tw year
}
\description{
western year to tw year
}
\examples{
year_tw("2018")
year_tw(1912)
}
|
# library(RPostgreSQL)
# library(pool)
# # Set connection parameters
# pg_drv <- RPostgreSQL::PostgreSQL()
# pg_host <- "vm-srv-finstad.vm.ntnu.no"
# pg_db <- 'nofa'
# pg_user <- rstudioapi::askForPassword("enter username")
# pg_password <- rstudioapi::askForPassword("enter psw")
# pool <- dbPool(
# drv = pg_drv,
# dbname = pg_db,
# host = pg_host,
# user = pg_user,
# password = pg_password,
# idleTimeout = 36000000
# )
# con <- poolCheckout(pool)
####################################################################################
# N?
### Hent ut milj?data for alle innsj?er i Agder
get_location_environment <- function(db_conection) {
sql_string <- "SELECT * FROM nofa.view_location_environment AS a WHERE county IN ('Vest-Agder', 'Aust-Agder');"
res <- dbGetQuery(db_conection, sql_string)
res
}
# Eksempel
#loc_env <- get_location_environment(con)
| /R/get_location_environment.R | no_license | ninsbl/fremmedfisk | R | false | false | 881 | r | # library(RPostgreSQL)
# library(pool)
# # Set connection parameters
# pg_drv <- RPostgreSQL::PostgreSQL()
# pg_host <- "vm-srv-finstad.vm.ntnu.no"
# pg_db <- 'nofa'
# pg_user <- rstudioapi::askForPassword("enter username")
# pg_password <- rstudioapi::askForPassword("enter psw")
# pool <- dbPool(
# drv = pg_drv,
# dbname = pg_db,
# host = pg_host,
# user = pg_user,
# password = pg_password,
# idleTimeout = 36000000
# )
# con <- poolCheckout(pool)
####################################################################################
# N?
### Hent ut milj?data for alle innsj?er i Agder
get_location_environment <- function(db_conection) {
sql_string <- "SELECT * FROM nofa.view_location_environment AS a WHERE county IN ('Vest-Agder', 'Aust-Agder');"
res <- dbGetQuery(db_conection, sql_string)
res
}
# Eksempel
#loc_env <- get_location_environment(con)
|
#
# title : plotEnsTracePlot.R
# purpose : R script to create a trace plot of an ensemble sampler
# : samples of model calibration using:
# : - All output types
# : - Model discrepancy, variance
# : - Model discrepancy, mean (centered)
# author : WD41, LRS/EPFL/PSI
# date : Nov. 2017
#
# Load required libraries -----------------------------------------------------
library(ggplot2)
source("./r-scripts/multiplot.R")
source("./r-scripts/plot_ensemble.R")
# Global variables ------------------------------------------------------------
# Output filename
otpfullname <- "./figures/plotEnsTraceDiscAllCentered.pdf"
# Input filename
ens_rds_fullname <- "../../../../wd41-thesis.analysis.new/trace-mcmc-fixvar-revise-reduced/results/2000/216-ens-all-1000-2000-fix_bc-fix_scale-unbiased-nobc-rep1.Rds"
# Burnin for this case (always change accordingly)
burnin <- 760
# Graphic variables
fig_size <- c(18, 9)
# Create tidy data ------------------------------------------------------------
ens_samples <- readRDS(ens_rds_fullname)
# Rescale model parameters and save to a file
ens_samples_rescaled <- ens_samples
# x5 : Grid HT Enhancement [0.5, 2.0] logunif
k <- 1
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x6 : iafbWallHTC [0.5, 2.0] logunif
k <- 2
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x7 : dffbWallHTC [0.5, 2.0] logunif
k <- 3
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x8 : dffbVIHTC [0.25, 4.0] logunif
k <- 4
ens_samples_rescaled[k,,] <- 16^ens_samples_rescaled[k,,] * 0.25
# x9 : iafbIntDrag [0.25, 4.0] logunif
k <- 5
ens_samples_rescaled[k,,] <- 16^ens_samples_rescaled[k,,] * 0.25
# x10: dffbIntDrag [0.25, 4.0] logunif
k <- 6
ens_samples_rescaled[k,,] <- 16^ens_samples_rescaled[k,,] * 0.25
# x11: dffbWallDrag [0.5, 2.0] logunif
k <- 7
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x12: Tminfb [-50, 50] unif
k <- 8
ens_samples_rescaled[k,,] <- 100 * ens_samples_rescaled[k,,] - 50
ens_tidy <- data.frame(walker = c(), param = c(), iter = c(), value = c())
n_param <- dim(ens_samples)[1]
n_walker <- dim(ens_samples)[2]
n_iter <- dim(ens_samples)[3]
for (i in 1:1000)
{
for (j in 1:8)
{
ens_tidy <- rbind(ens_tidy, data.frame(walker = rep(i, n_iter-burnin),
param = rep(param_names[j], n_iter-burnin),
iter = seq(1, n_iter-burnin),
value = ens_samples_rescaled[j,i,(burnin+1):n_iter]))
}
}
saveRDS(ens_tidy, "216-ens-all-1000-2000-fix_bc-fix_scale-unbiased-nobc-rep1-tidy_df.Rds")
# Create dummy for the plot limit ---------------------------------------------
ddummy <- data.frame(walker = c(), param = c(), iter = c(), value = c())
for (i in 1:8)
{
ddummy <- rbind(ddummy, data.frame(walker = rep(1, 2),
param = rep(param_names[i], 2),
iter = seq(1, 2),
value = prior_ranges[[i]]))
}
# Make the plot --------------------------------------------------------------
ens_tidy_sub <- subset(ens_tidy, iter >= 1141)
ens_tidy_sub <- subset(ens_tidy_sub, walker >= 601)
p <- ggplot(data = ens_tidy_sub,
aes(x = iter, y = value, group = walker))
p <- p + geom_line(alpha = 0.55)
p <- p + geom_blank(data = ddummy, aes(x = iter, y = value))
p <- p + theme_bw()
p <- p + facet_wrap(~param, scales = "free_y", ncol = 4)
# Set axis labels and font size
p <- p + labs(y = "Parameter value",
x = "Number of Iterations")
p <- p + scale_x_continuous(limits = c(1141, 1240))
p <- p + theme(strip.text.x = element_text(size = 16, face = "bold"))
p <- p + theme(axis.ticks.y = element_line(size = 1),
axis.ticks.x = element_line(size = 1),
axis.text.x = element_text(size = 16, angle = 30, hjust = 1),
axis.text.y = element_text(size = 18))
p <- p + theme(axis.title.y = element_text(vjust = 1.5, size = 24),
axis.title.x = element_text(vjust = -0.5, size = 24))
p
# Make the plot ---------------------------------------------------------------
pdf(otpfullname, family = "CM Roman",
width = fig_size[1], height = fig_size[2])
print(p)
dev.off()
embed_fonts(otpfullname, outfile=otpfullname)
| /figures/chapter5/r-scripts/plotEnsTrace.R | no_license | damar-wicaksono/wd41-thesis | R | false | false | 4,506 | r | #
# title : plotEnsTracePlot.R
# purpose : R script to create a trace plot of an ensemble sampler
# : samples of model calibration using:
# : - All output types
# : - Model discrepancy, variance
# : - Model discrepancy, mean (centered)
# author : WD41, LRS/EPFL/PSI
# date : Nov. 2017
#
# Load required libraries -----------------------------------------------------
library(ggplot2)
source("./r-scripts/multiplot.R")
source("./r-scripts/plot_ensemble.R")
# Global variables ------------------------------------------------------------
# Output filename
otpfullname <- "./figures/plotEnsTraceDiscAllCentered.pdf"
# Input filename
ens_rds_fullname <- "../../../../wd41-thesis.analysis.new/trace-mcmc-fixvar-revise-reduced/results/2000/216-ens-all-1000-2000-fix_bc-fix_scale-unbiased-nobc-rep1.Rds"
# Burnin for this case (always change accordingly)
burnin <- 760
# Graphic variables
fig_size <- c(18, 9)
# Create tidy data ------------------------------------------------------------
ens_samples <- readRDS(ens_rds_fullname)
# Rescale model parameters and save to a file
ens_samples_rescaled <- ens_samples
# x5 : Grid HT Enhancement [0.5, 2.0] logunif
k <- 1
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x6 : iafbWallHTC [0.5, 2.0] logunif
k <- 2
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x7 : dffbWallHTC [0.5, 2.0] logunif
k <- 3
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x8 : dffbVIHTC [0.25, 4.0] logunif
k <- 4
ens_samples_rescaled[k,,] <- 16^ens_samples_rescaled[k,,] * 0.25
# x9 : iafbIntDrag [0.25, 4.0] logunif
k <- 5
ens_samples_rescaled[k,,] <- 16^ens_samples_rescaled[k,,] * 0.25
# x10: dffbIntDrag [0.25, 4.0] logunif
k <- 6
ens_samples_rescaled[k,,] <- 16^ens_samples_rescaled[k,,] * 0.25
# x11: dffbWallDrag [0.5, 2.0] logunif
k <- 7
ens_samples_rescaled[k,,] <- 4^ens_samples_rescaled[k,,] * 0.5
# x12: Tminfb [-50, 50] unif
k <- 8
ens_samples_rescaled[k,,] <- 100 * ens_samples_rescaled[k,,] - 50
ens_tidy <- data.frame(walker = c(), param = c(), iter = c(), value = c())
n_param <- dim(ens_samples)[1]
n_walker <- dim(ens_samples)[2]
n_iter <- dim(ens_samples)[3]
for (i in 1:1000)
{
for (j in 1:8)
{
ens_tidy <- rbind(ens_tidy, data.frame(walker = rep(i, n_iter-burnin),
param = rep(param_names[j], n_iter-burnin),
iter = seq(1, n_iter-burnin),
value = ens_samples_rescaled[j,i,(burnin+1):n_iter]))
}
}
saveRDS(ens_tidy, "216-ens-all-1000-2000-fix_bc-fix_scale-unbiased-nobc-rep1-tidy_df.Rds")
# Create dummy for the plot limit ---------------------------------------------
ddummy <- data.frame(walker = c(), param = c(), iter = c(), value = c())
for (i in 1:8)
{
ddummy <- rbind(ddummy, data.frame(walker = rep(1, 2),
param = rep(param_names[i], 2),
iter = seq(1, 2),
value = prior_ranges[[i]]))
}
# Make the plot --------------------------------------------------------------
ens_tidy_sub <- subset(ens_tidy, iter >= 1141)
ens_tidy_sub <- subset(ens_tidy_sub, walker >= 601)
p <- ggplot(data = ens_tidy_sub,
aes(x = iter, y = value, group = walker))
p <- p + geom_line(alpha = 0.55)
p <- p + geom_blank(data = ddummy, aes(x = iter, y = value))
p <- p + theme_bw()
p <- p + facet_wrap(~param, scales = "free_y", ncol = 4)
# Set axis labels and font size
p <- p + labs(y = "Parameter value",
x = "Number of Iterations")
p <- p + scale_x_continuous(limits = c(1141, 1240))
p <- p + theme(strip.text.x = element_text(size = 16, face = "bold"))
p <- p + theme(axis.ticks.y = element_line(size = 1),
axis.ticks.x = element_line(size = 1),
axis.text.x = element_text(size = 16, angle = 30, hjust = 1),
axis.text.y = element_text(size = 18))
p <- p + theme(axis.title.y = element_text(vjust = 1.5, size = 24),
axis.title.x = element_text(vjust = -0.5, size = 24))
p
# Make the plot ---------------------------------------------------------------
pdf(otpfullname, family = "CM Roman",
width = fig_size[1], height = fig_size[2])
print(p)
dev.off()
embed_fonts(otpfullname, outfile=otpfullname)
|
## This is code that takes the 11 ensemble members of a POAMA run
## Started on a particular day
## And outputs some interested results for further analysis
## I care about ECLs, so main result is a daily timeseries of # of members that
## have an ECL on that day
## Also outputs a figure for each of the months in the file
## That shows the number of cyclones centred in that month
## Option to set a higher intensity threshold
library(ncdf4)
library(raster)
library(maps)
library(abind)
library(sp)
color.palette <- function(steps, n.steps.between=NULL, ...){
if(is.null(n.steps.between)) n.steps.between <- rep(0, (length(steps)-1))
if(length(n.steps.between) != length(steps)-1) stop("Must have one less n.steps.between value than steps")
fill.steps <- cumsum(rep(1, length(steps))+c(0,n.steps.between))
RGB <- matrix(NA, nrow=3, ncol=fill.steps[length(fill.steps)])
RGB[,fill.steps] <- col2rgb(steps)
for(i in which(n.steps.between>0)){
col.start=RGB[,fill.steps[i]]
col.end=RGB[,fill.steps[i+1]]
for(j in seq(3)){
vals <- seq(col.start[j], col.end[j], length.out=n.steps.between[i]+2)[2:(2+n.steps.between[i]-1)]
RGB[j,(fill.steps[i]+1):(fill.steps[i+1]-1)] <- vals
}
}
new.steps <- rgb(RGB[1,], RGB[2,], RGB[3,], maxColorValue = 255)
pal <- colorRampPalette(new.steps, ...)
return(pal)
}
col_anom <- color.palette(c("darkblue","blue","white","red","darkred"),c(10,20,20,10))
col_val <- color.palette(c("white","blue","darkblue","black"),c(20,10,5))
ColorBar <- function(brks,cols,vert=T,subsampleg=1,nstart=2)
{
if(vert) {
par(mar = c(1, 1, 1, 3), mgp = c(1, 1, 0), las = 1, cex = 1)
image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols,
xlab = '', ylab = '')
box()
axis(4, at = seq(nstart-0.5, length(brks) - 1.5, subsampleg), tick = TRUE,
labels = brks[seq(nstart, length(brks)-1, subsampleg)])
} else {
par(mar = c(1.5, 1, 1, 1), mgp = c(1.5, 0.3, 0), las = 1, cex = 1)
image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols,
xlab = '', ylab = '')
box()
axis(1, at = seq(nstart-0.5, length(brks) - 1.5, subsampleg),
labels = brks[seq(nstart=2, length(brks)-1, subsampleg)])
}
}
lat=seq(-89.5,89.5)
lon=seq(0,359.5)
makesmooth<-function(data,winwid=5)
{
a=dim(data)
m1=abind(data[(a[1]-winwid):a[1],a[2]:1],data[,a[2]:1],data[1:winwid,a[2]:1],along=1)
m2=raster(t(m1),xmn=min(lon)-winwid,xmx=max(lon)+winwid,ymn=min(lat),ymx=max(lat))
w2=focalWeight(m2,winwid,type="circle")
m3=focal(m2,w2)
tmp=t(as.matrix(m3)[a[2]:1,(winwid+1):(a[1]+winwid)])
return(tmp)
}
plot_freq_panel<-function(year1,year2,mthresh=c(0,100,200,300,400,600),
seasons=rbind(c(5,10),c(11,4)),snames=c("MJJASO","NDJFMA"),
dir="/short/eg3/asp561/cts.dir/gcyc_out/netcdf/",reanal="ERAI",
latlim=c(-90,90),lonlim=c(0,360),breaks=c(0,0.05,seq(0.1,1,0.1),1000),
closed=F,cv=NaN,dur=NA,move=NA,fout="output",type="high")
{
years=seq(year1,year2,1)
vars=c("Laplacian","Central Pressure (hPa)","Radius (degrees)","Movement Speed (km/hr)")
vnames=c("CV","MSLP","Radius","Move")
if(type=="high" | type=="anticyclone") {
vbreaks=list(c(seq(0.1,0.4,0.05),100),
c(seq(1010,1040,5),9999),
c(seq(3,10,1),1000),
c(seq(0,60,10),1000))
} else {
vbreaks=list(c(seq(0.2,0.9,0.1),100),
c(seq(1020,970,-10),0),
c(seq(3,7,0.5),1000),
c(seq(0,60,10),1000))
}
lat=seq(-89.5,89.5)
lon=seq(0,359.5) ### Can always combine into bigger cells later
ss=length(snames)
if(ss<=2) vbreaks[[1]]=c(0,0.05,0.25,0.5,0.75,1,1.5,2,100)
vv=length(vars)
freq=array(0,c(length(lon),length(lat),length(years),ss))
varfreq=array(NaN,c(length(lon),length(lat),length(years),ss,vv,2))
varfreq[,,,,,1]=0
for(y in 1:length(years))
{
print(years[y])
fname=paste(dir,"/tracks_",years[y],".dat",sep="")
read.table(fname, sep="",skip=1)->fixes
colnames(fixes)=c("ID","Fix","Date","Time","Open","Lon","Lat","MSLP","CV","Depth","Radius","Up","Vp")
fixes$Year=floor(fixes$Date/10000)
if(length(unique(fixes$Year))>1) fixes=fixes[fixes$Year==unique(fixes$Year)[2],]
fixes$Month=floor(fixes$Date/100)%%100
fixes$Lat2=floor(fixes$Lat)
fixes$Lon2=floor(fixes$Lon)%%360
fixes$CV=abs(fixes$CV)
fixes$Move<-NaN
I=which(fixes$Fix>1)
if(I[1]==1) I=I[-1]
for(i in 1:length(I)) fixes$Move[I[i]]=spDistsN1(cbind(fixes$Lon[I[i]],fixes$Lat[I[i]]),cbind(fixes$Lon[I[i]-1],fixes$Lat[I[i]-1]),longlat=T)
if(!is.na(move))
{
x<-rle(fixes$ID)
events<-data.frame(ID=x$values,Length=x$lengths,Date1=rep(0,length(x$values)),Move=rep(0,length(x$values)))
for(i in 1:length(events$ID))
{
I=which(fixes$ID==events[i,1])
events$Move[i]=spDistsN1(cbind(fixes$Lon[min(I)],fixes$Lat[min(I)]),
cbind(fixes$Lon[max(I)],fixes$Lat[max(I)]),longlat=T)
}
events=events[events$Move>=move,]
include<-match(fixes[,1],events[,1])
J<-which(is.na(include)==0)
fixes=fixes[J,]
}
if(!is.na(dur))
{
x<-rle(fixes$ID)
events<-cbind(x$values,x$lengths,matrix(data=0,nrow=length(x$values),ncol=1))
events=events[events[,2]>=dur,]
include<-match(fixes[,1],events[,1])
J<-which(is.na(include)==0)
fixes=fixes[J,]
}
fixes$Move=fixes$Move/6 # Speed per hour
# fixes=fixes[!is.na(fixes$Move),]
if(!is.na(cv)) fixes=fixes[fixes$CV>=cv,]
if(closed) fixes=fixes[(fixes$Open==0 | fixes$Open==10),] # Remove all open systems
for(s in 1:ss)
{
if(seasons[s,2]>=seasons[s,1]) mlist=seq(seasons[s,1],seasons[s,2]) else mlist=c(seq(seasons[s,1],12),seq(1,seasons[s,2]))
for(v in 1:vv)
{
I=which(fixes$Month%in%mlist & !is.na(fixes[,vnames[v]]))
if(length(I)>0) tmp=table(factor(fixes$Lon2[I],levels=0:359),factor(fixes$Lat2[I],levels=-90:89))
varfreq[,,y,s,v,1]=tmp
tmp2=aggregate(fixes[I,vnames[v]],by=list(fixes$Lon2[I],fixes$Lat2[I]),FUN=mean)
jlat=match(tmp2[,2],seq(-90,89))
ilon=match(tmp2[,1],seq(0,359))
for(nn in which(!is.na(jlat))) varfreq[ilon[nn],jlat[nn],y,s,v,2]=as.numeric(tmp2[nn,3])
}
}
}
if(lonlim[1]<0) {
lon=seq(-179.5,179.5,1)
library(abind)
freq=abind(freq[181:360,,,],freq[1:180,,,],along=1)
varfreq=abind(varfreq[181:360,,,,,],varfreq[1:180,,,,,],along=1)
}
pnum=1
pdf(file=paste0(fout,".pdf"),width=(4*ss)+1.2,height=(2.7*vv))
tmp=matrix(0,vv,ss+1)
n=1
for(s in 1:ss)
for(i in 1:vv)
{
tmp[i,s]=n
n=n+1
}
tmp[,ss+1]=seq(n,n+vv-1)
layout(tmp,width=c(rep(1,ss),0.3))
par(mar=c(2,2,4,1))
for(s in 1:ss)
for(v in 1:vv)
{
print(mean(varfreq[,,,s,v,2],na.rm=T))
breaks=vbreaks[[v]]
col1=col_val(length(breaks))
col1=col1[-1]
meanvar=apply(varfreq[,,,s,v,2]*varfreq[,,,s,v,1],c(1,2),sum,na.rm=T)/apply(varfreq[,,,s,v,1],c(1,2),sum,na.rm=T)
meanvar[apply(varfreq[,,,s,v,1],c(1,2),mean,na.rm=T)<0.05]=NaN
meanvar2=makesmooth(apply(varfreq[,,,s,v,2]*varfreq[,,,s,v,1],c(1,2),sum,na.rm=T))/makesmooth(apply(varfreq[,,,s,v,1],c(1,2),sum,na.rm=T))
meanvar2[makesmooth(apply(varfreq[,,,s,v,1],c(1,2),mean,na.rm=T))<0.05]=NaN
tit=paste0(letters[pnum],") ",snames[s],": Mean ",vars[v])
print(mean(meanvar,na.rm=T))
if(breaks[2]>breaks[1]) image(lon,lat,meanvar,breaks=breaks,col=col1,xlab="",ylab="",xlim=lonlim,ylim=latlim,main=tit,cex.main=1.5,cex.axis=1) else
image(lon,lat,meanvar,breaks=rev(breaks),col=rev(col1),xlab="",ylab="",xlim=lonlim,ylim=latlim,main=tit,cex.main=1.5,cex.axis=1)
if(lonlim[1]<0) map('world',add=T) else map('world2',add=T)
contour(lon,lat,meanvar2,levels=breaks,add=T,lwd=2,col="black",drawlabels=F)
pnum=pnum+1
}
for(v in 1:vv)
{
breaks=vbreaks[[v]]
col1=col_val(length(breaks))
col1=col1[-1]
ColorBar(breaks,col1,subsampleg=2,vert=T,nstart=1)
}
dev.off()
}
slist2=c("MAM","JJA","SON","DJF")
smons2=rbind(c(3,5),c(6,8),c(9,11),c(12,2))
slist=c("MJJASO","NDJFMA")
smons=rbind(c(5,10),c(11,4))
plot_freq_panel(1980,2016,seasons=smons,snames=slist,breaks=breaks,closed=T,move=500,
dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_highs_rad10cv0.075/",latlim=c(-50,-10),lonlim=c(100,180),
reanal="ERAI",fout="paperfig_anticycfreq_ERAI_rad10cv0.075_500kmstats_2seasons_australia")
#plot_freq_panel(1980,2016,seasons=smons,snames=slist,breaks=breaks,closed=T,move=500,
# dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_lows_rad5cv0.15/",
# reanal="ERAI",fout="paperfig_cycfreq_ERAI_rad5cv0.15_500kmstats_vseason")
plot_freq_panel(1980,2016,seasons=smons2,snames=slist2,breaks=breaks,closed=T,move=500,
dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_highs_rad10cv0.075/",latlim=c(-50,-10),lonlim=c(100,180),
reanal="ERAI",fout="paperfig_anticycfreq_ERAI_rad10cv0.075_500kmstats_4seasons_australia")
#plot_freq_panel(1980,2016,seasons=smons2,snames=slist2,breaks=breaks,closed=T,move=500,
# dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_lows_rad5cv0.15/",
# reanal="ERAI",fout="paperfig_cycfreq_ERAI_rad5cv0.15_500kmstats_4seasons")
| /plot_freq_raster_statspanel_vseason_v2.R | no_license | apepler/R-Raijin | R | false | false | 9,101 | r | ## This is code that takes the 11 ensemble members of a POAMA run
## Started on a particular day
## And outputs some interested results for further analysis
## I care about ECLs, so main result is a daily timeseries of # of members that
## have an ECL on that day
## Also outputs a figure for each of the months in the file
## That shows the number of cyclones centred in that month
## Option to set a higher intensity threshold
library(ncdf4)
library(raster)
library(maps)
library(abind)
library(sp)
color.palette <- function(steps, n.steps.between=NULL, ...){
if(is.null(n.steps.between)) n.steps.between <- rep(0, (length(steps)-1))
if(length(n.steps.between) != length(steps)-1) stop("Must have one less n.steps.between value than steps")
fill.steps <- cumsum(rep(1, length(steps))+c(0,n.steps.between))
RGB <- matrix(NA, nrow=3, ncol=fill.steps[length(fill.steps)])
RGB[,fill.steps] <- col2rgb(steps)
for(i in which(n.steps.between>0)){
col.start=RGB[,fill.steps[i]]
col.end=RGB[,fill.steps[i+1]]
for(j in seq(3)){
vals <- seq(col.start[j], col.end[j], length.out=n.steps.between[i]+2)[2:(2+n.steps.between[i]-1)]
RGB[j,(fill.steps[i]+1):(fill.steps[i+1]-1)] <- vals
}
}
new.steps <- rgb(RGB[1,], RGB[2,], RGB[3,], maxColorValue = 255)
pal <- colorRampPalette(new.steps, ...)
return(pal)
}
col_anom <- color.palette(c("darkblue","blue","white","red","darkred"),c(10,20,20,10))
col_val <- color.palette(c("white","blue","darkblue","black"),c(20,10,5))
ColorBar <- function(brks,cols,vert=T,subsampleg=1,nstart=2)
{
if(vert) {
par(mar = c(1, 1, 1, 3), mgp = c(1, 1, 0), las = 1, cex = 1)
image(1, c(1:length(cols)), t(c(1:length(cols))), axes = FALSE, col = cols,
xlab = '', ylab = '')
box()
axis(4, at = seq(nstart-0.5, length(brks) - 1.5, subsampleg), tick = TRUE,
labels = brks[seq(nstart, length(brks)-1, subsampleg)])
} else {
par(mar = c(1.5, 1, 1, 1), mgp = c(1.5, 0.3, 0), las = 1, cex = 1)
image(1:length(cols), 1, t(t(1:length(cols))), axes = FALSE, col = cols,
xlab = '', ylab = '')
box()
axis(1, at = seq(nstart-0.5, length(brks) - 1.5, subsampleg),
labels = brks[seq(nstart=2, length(brks)-1, subsampleg)])
}
}
lat=seq(-89.5,89.5)
lon=seq(0,359.5)
makesmooth<-function(data,winwid=5)
{
a=dim(data)
m1=abind(data[(a[1]-winwid):a[1],a[2]:1],data[,a[2]:1],data[1:winwid,a[2]:1],along=1)
m2=raster(t(m1),xmn=min(lon)-winwid,xmx=max(lon)+winwid,ymn=min(lat),ymx=max(lat))
w2=focalWeight(m2,winwid,type="circle")
m3=focal(m2,w2)
tmp=t(as.matrix(m3)[a[2]:1,(winwid+1):(a[1]+winwid)])
return(tmp)
}
plot_freq_panel<-function(year1,year2,mthresh=c(0,100,200,300,400,600),
seasons=rbind(c(5,10),c(11,4)),snames=c("MJJASO","NDJFMA"),
dir="/short/eg3/asp561/cts.dir/gcyc_out/netcdf/",reanal="ERAI",
latlim=c(-90,90),lonlim=c(0,360),breaks=c(0,0.05,seq(0.1,1,0.1),1000),
closed=F,cv=NaN,dur=NA,move=NA,fout="output",type="high")
{
years=seq(year1,year2,1)
vars=c("Laplacian","Central Pressure (hPa)","Radius (degrees)","Movement Speed (km/hr)")
vnames=c("CV","MSLP","Radius","Move")
if(type=="high" | type=="anticyclone") {
vbreaks=list(c(seq(0.1,0.4,0.05),100),
c(seq(1010,1040,5),9999),
c(seq(3,10,1),1000),
c(seq(0,60,10),1000))
} else {
vbreaks=list(c(seq(0.2,0.9,0.1),100),
c(seq(1020,970,-10),0),
c(seq(3,7,0.5),1000),
c(seq(0,60,10),1000))
}
lat=seq(-89.5,89.5)
lon=seq(0,359.5) ### Can always combine into bigger cells later
ss=length(snames)
if(ss<=2) vbreaks[[1]]=c(0,0.05,0.25,0.5,0.75,1,1.5,2,100)
vv=length(vars)
freq=array(0,c(length(lon),length(lat),length(years),ss))
varfreq=array(NaN,c(length(lon),length(lat),length(years),ss,vv,2))
varfreq[,,,,,1]=0
for(y in 1:length(years))
{
print(years[y])
fname=paste(dir,"/tracks_",years[y],".dat",sep="")
read.table(fname, sep="",skip=1)->fixes
colnames(fixes)=c("ID","Fix","Date","Time","Open","Lon","Lat","MSLP","CV","Depth","Radius","Up","Vp")
fixes$Year=floor(fixes$Date/10000)
if(length(unique(fixes$Year))>1) fixes=fixes[fixes$Year==unique(fixes$Year)[2],]
fixes$Month=floor(fixes$Date/100)%%100
fixes$Lat2=floor(fixes$Lat)
fixes$Lon2=floor(fixes$Lon)%%360
fixes$CV=abs(fixes$CV)
fixes$Move<-NaN
I=which(fixes$Fix>1)
if(I[1]==1) I=I[-1]
for(i in 1:length(I)) fixes$Move[I[i]]=spDistsN1(cbind(fixes$Lon[I[i]],fixes$Lat[I[i]]),cbind(fixes$Lon[I[i]-1],fixes$Lat[I[i]-1]),longlat=T)
if(!is.na(move))
{
x<-rle(fixes$ID)
events<-data.frame(ID=x$values,Length=x$lengths,Date1=rep(0,length(x$values)),Move=rep(0,length(x$values)))
for(i in 1:length(events$ID))
{
I=which(fixes$ID==events[i,1])
events$Move[i]=spDistsN1(cbind(fixes$Lon[min(I)],fixes$Lat[min(I)]),
cbind(fixes$Lon[max(I)],fixes$Lat[max(I)]),longlat=T)
}
events=events[events$Move>=move,]
include<-match(fixes[,1],events[,1])
J<-which(is.na(include)==0)
fixes=fixes[J,]
}
if(!is.na(dur))
{
x<-rle(fixes$ID)
events<-cbind(x$values,x$lengths,matrix(data=0,nrow=length(x$values),ncol=1))
events=events[events[,2]>=dur,]
include<-match(fixes[,1],events[,1])
J<-which(is.na(include)==0)
fixes=fixes[J,]
}
fixes$Move=fixes$Move/6 # Speed per hour
# fixes=fixes[!is.na(fixes$Move),]
if(!is.na(cv)) fixes=fixes[fixes$CV>=cv,]
if(closed) fixes=fixes[(fixes$Open==0 | fixes$Open==10),] # Remove all open systems
for(s in 1:ss)
{
if(seasons[s,2]>=seasons[s,1]) mlist=seq(seasons[s,1],seasons[s,2]) else mlist=c(seq(seasons[s,1],12),seq(1,seasons[s,2]))
for(v in 1:vv)
{
I=which(fixes$Month%in%mlist & !is.na(fixes[,vnames[v]]))
if(length(I)>0) tmp=table(factor(fixes$Lon2[I],levels=0:359),factor(fixes$Lat2[I],levels=-90:89))
varfreq[,,y,s,v,1]=tmp
tmp2=aggregate(fixes[I,vnames[v]],by=list(fixes$Lon2[I],fixes$Lat2[I]),FUN=mean)
jlat=match(tmp2[,2],seq(-90,89))
ilon=match(tmp2[,1],seq(0,359))
for(nn in which(!is.na(jlat))) varfreq[ilon[nn],jlat[nn],y,s,v,2]=as.numeric(tmp2[nn,3])
}
}
}
if(lonlim[1]<0) {
lon=seq(-179.5,179.5,1)
library(abind)
freq=abind(freq[181:360,,,],freq[1:180,,,],along=1)
varfreq=abind(varfreq[181:360,,,,,],varfreq[1:180,,,,,],along=1)
}
pnum=1
pdf(file=paste0(fout,".pdf"),width=(4*ss)+1.2,height=(2.7*vv))
tmp=matrix(0,vv,ss+1)
n=1
for(s in 1:ss)
for(i in 1:vv)
{
tmp[i,s]=n
n=n+1
}
tmp[,ss+1]=seq(n,n+vv-1)
layout(tmp,width=c(rep(1,ss),0.3))
par(mar=c(2,2,4,1))
for(s in 1:ss)
for(v in 1:vv)
{
print(mean(varfreq[,,,s,v,2],na.rm=T))
breaks=vbreaks[[v]]
col1=col_val(length(breaks))
col1=col1[-1]
meanvar=apply(varfreq[,,,s,v,2]*varfreq[,,,s,v,1],c(1,2),sum,na.rm=T)/apply(varfreq[,,,s,v,1],c(1,2),sum,na.rm=T)
meanvar[apply(varfreq[,,,s,v,1],c(1,2),mean,na.rm=T)<0.05]=NaN
meanvar2=makesmooth(apply(varfreq[,,,s,v,2]*varfreq[,,,s,v,1],c(1,2),sum,na.rm=T))/makesmooth(apply(varfreq[,,,s,v,1],c(1,2),sum,na.rm=T))
meanvar2[makesmooth(apply(varfreq[,,,s,v,1],c(1,2),mean,na.rm=T))<0.05]=NaN
tit=paste0(letters[pnum],") ",snames[s],": Mean ",vars[v])
print(mean(meanvar,na.rm=T))
if(breaks[2]>breaks[1]) image(lon,lat,meanvar,breaks=breaks,col=col1,xlab="",ylab="",xlim=lonlim,ylim=latlim,main=tit,cex.main=1.5,cex.axis=1) else
image(lon,lat,meanvar,breaks=rev(breaks),col=rev(col1),xlab="",ylab="",xlim=lonlim,ylim=latlim,main=tit,cex.main=1.5,cex.axis=1)
if(lonlim[1]<0) map('world',add=T) else map('world2',add=T)
contour(lon,lat,meanvar2,levels=breaks,add=T,lwd=2,col="black",drawlabels=F)
pnum=pnum+1
}
for(v in 1:vv)
{
breaks=vbreaks[[v]]
col1=col_val(length(breaks))
col1=col1[-1]
ColorBar(breaks,col1,subsampleg=2,vert=T,nstart=1)
}
dev.off()
}
slist2=c("MAM","JJA","SON","DJF")
smons2=rbind(c(3,5),c(6,8),c(9,11),c(12,2))
slist=c("MJJASO","NDJFMA")
smons=rbind(c(5,10),c(11,4))
plot_freq_panel(1980,2016,seasons=smons,snames=slist,breaks=breaks,closed=T,move=500,
dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_highs_rad10cv0.075/",latlim=c(-50,-10),lonlim=c(100,180),
reanal="ERAI",fout="paperfig_anticycfreq_ERAI_rad10cv0.075_500kmstats_2seasons_australia")
#plot_freq_panel(1980,2016,seasons=smons,snames=slist,breaks=breaks,closed=T,move=500,
# dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_lows_rad5cv0.15/",
# reanal="ERAI",fout="paperfig_cycfreq_ERAI_rad5cv0.15_500kmstats_vseason")
plot_freq_panel(1980,2016,seasons=smons2,snames=slist2,breaks=breaks,closed=T,move=500,
dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_highs_rad10cv0.075/",latlim=c(-50,-10),lonlim=c(100,180),
reanal="ERAI",fout="paperfig_anticycfreq_ERAI_rad10cv0.075_500kmstats_4seasons_australia")
#plot_freq_panel(1980,2016,seasons=smons2,snames=slist2,breaks=breaks,closed=T,move=500,
# dir="/short/eg3/asp561/cts.dir/gcyc_out/ERAI/proj100_lows_rad5cv0.15/",
# reanal="ERAI",fout="paperfig_cycfreq_ERAI_rad5cv0.15_500kmstats_4seasons")
|
#' Statistical analysis
#'
#' @import data.table
#'
#' @export
do.stats <- function(dat,
use.cols,
sample.col,
grp.col,
comparisons, # make a tool to create a factorial comparison design -- for now just specify manually
variance.test = "kruskal.test", ## add ANOVA
pairwise.test = "wilcox.text", ## Add t-test
corrections = "fdr"){
message("The 'do.stats' function has been depreciated. Please use 'create.stats' instead")
} | /R/do.stats.R | permissive | ImmuneDynamics/Spectre | R | false | false | 623 | r | #' Statistical analysis
#'
#' @import data.table
#'
#' @export
do.stats <- function(dat,
use.cols,
sample.col,
grp.col,
comparisons, # make a tool to create a factorial comparison design -- for now just specify manually
variance.test = "kruskal.test", ## add ANOVA
pairwise.test = "wilcox.text", ## Add t-test
corrections = "fdr"){
message("The 'do.stats' function has been depreciated. Please use 'create.stats' instead")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getRecordedAtTimes}
\alias{streamSet$getRecordedAtTimes}
\title{Returns recorded values of attributes for an element, event frame or attribute at the specified times.}
\arguments{
\item{webId}{The ID of an element, event frame or attribute, which is the base element or parent of all the stream attributes.}
\item{time}{The timestamp at which to retrieve a recorded value. Multiple timestamps may be specified with multiple instances of the parameter.}
\item{categoryName}{Specify that included attributes must have this category. The default is no category filter.}
\item{nameFilter}{The name query string used for filtering attributes. The default is no filter.}
\item{retrievalMode}{An optional value that determines the values to return when values don't exist at the exact time specified. The default is 'Auto'.}
\item{searchFullHierarchy}{Specifies if the search should include attributes nested further than the immediate attributes of the searchRoot. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{showExcluded}{Specified if the search should include attributes with the Excluded property set. The default is 'false'.}
\item{showHidden}{Specified if the search should include attributes with the Hidden property set. The default is 'false'.}
\item{sortOrder}{The order that the returned collection is sorted. The default is 'Ascending'.}
\item{templateName}{Specify that included attributes must be members of this template. The default is no template filter.}
\item{timeZone}{The time zone in which the time string will be interpreted. This parameter will be ignored if a time zone is specified in the time string. If no time zone is specified in either places, the PI Web API server time zone will be used.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL brevity and other special cases. Default is the value of the configuration item "WebIDType".}
}
\value{
Recorded values of the streams that meet the specified conditions.
}
\description{
Returns recorded values of attributes for an element, event frame or attribute at the specified times.
}
| /man/streamSet-cash-getRecordedAtTimes.Rd | permissive | frbl/PI-Web-API-Client-R | R | false | true | 2,376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getRecordedAtTimes}
\alias{streamSet$getRecordedAtTimes}
\title{Returns recorded values of attributes for an element, event frame or attribute at the specified times.}
\arguments{
\item{webId}{The ID of an element, event frame or attribute, which is the base element or parent of all the stream attributes.}
\item{time}{The timestamp at which to retrieve a recorded value. Multiple timestamps may be specified with multiple instances of the parameter.}
\item{categoryName}{Specify that included attributes must have this category. The default is no category filter.}
\item{nameFilter}{The name query string used for filtering attributes. The default is no filter.}
\item{retrievalMode}{An optional value that determines the values to return when values don't exist at the exact time specified. The default is 'Auto'.}
\item{searchFullHierarchy}{Specifies if the search should include attributes nested further than the immediate attributes of the searchRoot. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{showExcluded}{Specified if the search should include attributes with the Excluded property set. The default is 'false'.}
\item{showHidden}{Specified if the search should include attributes with the Hidden property set. The default is 'false'.}
\item{sortOrder}{The order that the returned collection is sorted. The default is 'Ascending'.}
\item{templateName}{Specify that included attributes must be members of this template. The default is no template filter.}
\item{timeZone}{The time zone in which the time string will be interpreted. This parameter will be ignored if a time zone is specified in the time string. If no time zone is specified in either places, the PI Web API server time zone will be used.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL brevity and other special cases. Default is the value of the configuration item "WebIDType".}
}
\value{
Recorded values of the streams that meet the specified conditions.
}
\description{
Returns recorded values of attributes for an element, event frame or attribute at the specified times.
}
|
x <- c(0, .6509467)
y <- c(0, 1.606)
approx(x,y)
library(ggplot2)
library(gganimate)
theme_set(theme_bw())
library(tidyverse)
data <- data.frame(role = c("agent", "food"),
position_x = c(sample(0:10, 1), sample(0:10, 1)),
position_y = c(sample(0:10, 1), sample(0:10, 1)),
iteration = c(0, 0))
interp <- approx(data$position_x, data$position_y)
data_path_agent <- data.frame(role = "agent",
position_x = rev(interp$x),
position_y = rev(interp$y),
iteration = 1:length(interp$x),
image = "https://image.flaticon.com/icons/png/128/2942/2942667.png")
data_path_food <- data.frame(role = "food",
position_x = data[data$role == "food",]$position_x,
position_y = data[data$role == "food",]$position_y,
iteration = 1:length(interp$x)-1,
image = "https://images.vexels.com/media/users/3/143088/isolated/preview/f565debc52083dacca60da22284e4083-iacute-cone-de-coxa-de-frango-by-vexels.png")
data_path <- rbind(data_path_agent, data_path_food)
p <- data_path %>%
ggplot(aes(x = position_x, y = position_y, colour = role)) +
geom_point(show.legend = FALSE, alpha = 0.7) +
scale_color_viridis_d() +
scale_size(range = c(2, 12)) +
ylim(0, 10) + xlim(0, 10) +
theme_void() +
transition_time(iteration)
animate(p, renderer = gifski_renderer(loop = F))
| /poc.R | no_license | gabrielteotonio/vehicle-food-agent | R | false | false | 1,555 | r | x <- c(0, .6509467)
y <- c(0, 1.606)
approx(x,y)
library(ggplot2)
library(gganimate)
theme_set(theme_bw())
library(tidyverse)
data <- data.frame(role = c("agent", "food"),
position_x = c(sample(0:10, 1), sample(0:10, 1)),
position_y = c(sample(0:10, 1), sample(0:10, 1)),
iteration = c(0, 0))
interp <- approx(data$position_x, data$position_y)
data_path_agent <- data.frame(role = "agent",
position_x = rev(interp$x),
position_y = rev(interp$y),
iteration = 1:length(interp$x),
image = "https://image.flaticon.com/icons/png/128/2942/2942667.png")
data_path_food <- data.frame(role = "food",
position_x = data[data$role == "food",]$position_x,
position_y = data[data$role == "food",]$position_y,
iteration = 1:length(interp$x)-1,
image = "https://images.vexels.com/media/users/3/143088/isolated/preview/f565debc52083dacca60da22284e4083-iacute-cone-de-coxa-de-frango-by-vexels.png")
data_path <- rbind(data_path_agent, data_path_food)
p <- data_path %>%
ggplot(aes(x = position_x, y = position_y, colour = role)) +
geom_point(show.legend = FALSE, alpha = 0.7) +
scale_color_viridis_d() +
scale_size(range = c(2, 12)) +
ylim(0, 10) + xlim(0, 10) +
theme_void() +
transition_time(iteration)
animate(p, renderer = gifski_renderer(loop = F))
|
# =====================================================================================
# "makeDatasets.R" file |
# designate key constants, folder locations, and load packages |
# load data mapping files |
# load population denominator data |
# load death data (cbdDat0) |
# build functions for YLL and rate calcuations |
# contruction initial tract, community & county CBD data files |
# process data and calculate age-adjusted rates |
# final merges and processing of main CBD data files |
# export files for use in CBD app |
#======================================================================================
# -- Designate locations and load packages---------------------------------------------------------
whichDat <- "fake"
STATE <- "California"
myDrive <- "E:"
myPlace <- paste0(myDrive,"/0.CBD/myCBD")
upPlace <- paste0(myDrive,"/0.CBD/myUpstream")
library(tidyverse)
library(epitools)
library(sqldf)
library(readxl)
library(fs)
yF <- 100000 # rate constant
pop5 <- 5 # 5 years
pop1 <- 1 # 1 year
yearGrp <- "2013-2017"
#-- LOAD STANDARDS AND DATA MAPPING FILES ---------------------------------------------------------
# add to technical notes the purposes and contents of each data mapping file
# this "as.data.frame" below and elsewhere is really annoying.... but at least icdToGroup function below does not work otherwise;
# becuase the "tibble" is double precision or for some other reason this messes up;
# and get error "Error: Can't use matrix or array for column indexing"
leMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/le.Map.xlsx"), sheet="LifeExpLink", range = cell_cols("A:B")))
yearMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/year.Map.xlsx")))
geoMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/countycodes.Map.xlsx")))
cbdLinkCA <- read.csv(paste0(myPlace,"/myInfo/cbdLinkCA.csv"),colClasses = "character") # file linking MSSAs to census
comName <- unique(cbdLinkCA[,c("comID","comName")]) # dataframe linking comID and comName
ageMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/Age Groups and Standard US 2000 pop.xlsx"),sheet = "data"))
#-- LOAD AND PROCESS POPULATION DATA --------------------------------------------------------------
# ungrouping important for subsequent data set merging
popTract <- readRDS(path(upPlace,"/upData/popTract2013.RDS")) %>% ungroup()
popTractSexTot <- filter(popTract,ageG == "Total")
popTractSexTotAgeG <- filter(popTract,ageG != "Total")
popCommSexTot <- popTractSexTot %>% group_by(yearG,county,comID,sex) %>% summarise(pop=sum(pop)) %>% ungroup()
popCommSexTotAgeG <- popTractSexTotAgeG %>% group_by(yearG,county,comID,sex,ageG) %>% summarise(pop=sum(pop)) %>% ungroup()
popCounty <- readRDS(path(upPlace,"/upData/popCounty2000to2015.RDS")) %>% ungroup()
popCountySexTot <- filter(popCounty,ageG == "Total")
popCountySexTotAgeG <- filter(popCounty,ageG != "Total")
popStandard <- ageMap %>% mutate(ageG = paste0(lAge," - ",uAge))
# == LOAD AND PROCESS DEATH DATA =================================================================
if (whichDat == "real") {
# CAUTION --- if using REAL DATA INCLUDE these two lines below and edit the first one with your secure location
# load("G:/CCB/0.Secure.Data/myData/cbdDat0FULL.R")
load("H:/0.Secure.Data/myData/cbdDat0FULL.R")
cbdDat0 <- cbdDat0FULL
}
if (whichDat == "fake") {
# Load FAKE Data --- COMMENT OUT these two lines if using REAL DATA
load(paste0(upPlace,"/upData/cbdDat0SAMP.R"))
cbdDat0 <- cbdDat0SAMP
}
# GEOID/COUNTY CORRECTION HERE =============================================
# (1) LA CENSUS TRACT TO RECODE
# 06037930401 should be recoded to 06037137000 in all data files
cbdDat0$GEOID[cbdDat0$GEOID=="06037930401"] <- "06037137000"
# (2) all occurences of "06037800325" in death data are Ventura, all are LA in pop data
# (3) fix county based on GEOID analysis here:
allWater <- c("06017990000","06037990300","06061990000","06083990000","06111990100")
#forEthan <- sample_n(cbdDat0SAMP,100000)
#saveRDS(forEthan, file=paste0(upPlace,"/upData/forEthan.RDS"))
cbdDat0 <- mutate(cbdDat0,
sex = c("Male","Female")[match(sex,c("M","F"))],
age = as.numeric(age), # redundant...
ICD10 = as.character(ICD10), # redundant...
comID = cbdLinkCA[match(cbdDat0$GEOID,cbdLinkCA[,"GEOID"]),"comID"],
yll = leMap[match(cbdDat0$age,leMap[,"Age"]),"LE"],
yearG = yearMap[match(year,yearMap[,"year"]),"yGroup1"]
)
cbdDat0Save <- cbdDat0
.cbdDat0Sex <- mutate(cbdDat0, sex = "Total")
cbdDat0 <- bind_rows(cbdDat0,.cbdDat0Sex)
# Add Age-Group variable ----------------------------------------
aL <- ageMap$lAge # lower age ranges
aU <- c(-1,ageMap$uAge) # upper age ranges, plus inital value of "-1" for lower limit
aLabs <- paste(aL,"-",aU[-1]) # make label for ranges
aMark <- findInterval(cbdDat0$age,aU,left.open = TRUE) # vector indicating age RANGE value of each INDIVIDUAL age value
cbdDat0$ageG <- aLabs[aMark] # make new "ageG" variable based on two objects above
# Map ICD-10 codes to GBD conditions ----------------------------
gbdMap0 <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/gbd.ICD.Map.xlsx"), sheet="main")) # also have e.g. range="A1:J167"
allLabels <- sort(gbdMap0$LABEL[!is.na(gbdMap0$LABEL)])
mapICD <- gbdMap0[!is.na(gbdMap0$CODE),c("CODE","regEx10")]
icdToGroup <- function(myIn) {
Cause <- rep(NA,length(myIn))
for (i in 1:nrow(mapICD)) {Cause[grepl(mapICD[i,"regEx10"],myIn)] <- mapICD[i,"CODE"] }
Cause}
cbdDat0$icdCODE <- icdToGroup(myIn=cbdDat0$ICD10)
cbdDat0$icdCODE[cbdDat0$ICD10 %in% c("","000","0000")] <- "cZ02" # >3500 records have no ICD10 code -- label them as cZ for now
junk <- filter(cbdDat0,is.na(icdCODE))
table(junk$ICD10,useNA = "ifany")
cbdDat0$icdCODE[is.na(cbdDat0$icdCODE)] <- "cZ03" # 370 records that are not mapping to a code right now --TEMP
temp <- nchar(str_sub(cbdDat0$icdCODE,2,5))
cbdDat0 <- cbdDat0 %>% mutate(lev0 = "0",
lev1 = str_sub(icdCODE,2,2),
lev2 = str_sub(icdCODE,2,4),
lev3 = ifelse(temp==4,str_sub(icdCODE,2,5),NA)
)
# DATA CLEANING ISSUES (see at bottom of file) ----------------------------------------------------
# DEATH MEASURES FUNCTIONS =========================================================================
calculateYLLmeasures <- function(group_vars,levLab){
dat <- cbdDat0 %>% group_by_(.dots = group_vars) %>%
summarize(Ndeaths = n() ,
YLL = sum(yll, na.rm = TRUE), # NEED TO ADD CIs
m.YLL = mean(yll, na.rm = TRUE), # NEED TO ADD CIs
mean.age = mean(age,na.rm=TRUE)
) %>% ungroup
names(dat)[grep("lev", names(dat))] <- "CAUSE"
dat$Level <- levLab
# dat <- filter(dat,!is.na(CAUSE)) # "HARD FIX" that should be assessed carefully
dat %>% data.frame
}
calculateRates <- function(inData,yearN){
transform(inData,
YLLper = yF*YLL/(yearN*pop),
YLLrateLCI = yF*pois.approx(YLL,yearN*pop, conf.level = 0.95)$lower, # need to confirm that this is correct
YLLrateUCI = yF*pois.approx(YLL,yearN*pop, conf.level = 0.95)$upper,
cDeathRate = yF*Ndeaths/(yearN*pop),
rateLCI = yF*pois.approx(Ndeaths,yearN*pop, conf.level = 0.95)$lower,
rateUCI = yF*pois.approx(Ndeaths,yearN*pop, conf.level = 0.95)$upper
)
}
# == build COUNTY-level file ======================================================================
c.t1 <- calculateYLLmeasures(c("county","year","sex","lev0"),"lev0")
c.t2 <- calculateYLLmeasures(c("county","year","sex","lev1"),"lev1")
c.t3 <- calculateYLLmeasures(c("county","year","sex","lev2"),"lev2")
c.t4 <- calculateYLLmeasures(c("county","year","sex","lev3"),"lev3")
datCounty <- bind_rows(c.t1,c.t2,c.t3,c.t4)
s.t1 <- calculateYLLmeasures(c("year","sex","lev0"),"lev0")
s.t2 <- calculateYLLmeasures(c("year","sex","lev1"),"lev1")
s.t3 <- calculateYLLmeasures(c("year","sex","lev2"),"lev2")
s.t4 <- calculateYLLmeasures(c("year","sex","lev3"),"lev3")
datState <- bind_rows(s.t1,s.t2,s.t3,s.t4)
datState$county = STATE
datCounty <- bind_rows(datCounty,datState)
# datCounty$causeName <- gbdMap0[match(datCounty$CAUSE,gbdMap0[,1]),"nameOnly"] # "needed?"
datCounty <- merge(datCounty,popCountySexTot,by = c("year","county","sex"))
datCounty <- calculateRates(datCounty,1)
datState <- datCounty %>% filter(county == STATE) %>%
mutate(stateRate = cDeathRate) %>%
select(year,sex,Level,CAUSE,stateRate)
# for LOCAL installation of application EXCLUDE save line and INCLUDE load line
save(datState, file= paste0(upPlace,"/upData/datState.R"))
#load(file= paste0(upPlace,"/upData/datState.R"))
datCounty <- merge(datCounty,datState,by = c("year","sex","Level","CAUSE"))
datCounty$SMR <- datCounty$cDeathRate / datCounty$stateRate
# == build COMMUNITY-level file ===================================================================
c.t1 <- calculateYLLmeasures(c("comID","yearG","sex","lev0"),"lev0") #removed "county",
c.t2 <- calculateYLLmeasures(c("comID","yearG","sex","lev1"),"lev1")
c.t3 <- calculateYLLmeasures(c("comID","yearG","sex","lev2"),"lev2")
datComm <- bind_rows(c.t1,c.t2,c.t3) %>%
filter(yearG == yearGrp) %>% # 2013-2017 ONLY!!!
arrange(comID,yearG,CAUSE)
datComm <- merge(datComm,popCommSexTot,by = c("yearG","comID","sex"),all=TRUE)
datComm <- calculateRates(datComm,5)
# add community names POSSIBLE REMOVE
datComm <- merge(datComm, comName, by = "comID",all=TRUE) %>%
arrange(comID,yearG,CAUSE)
# == build TRACT-level file =======================================================================
c.t1 <- calculateYLLmeasures(c("GEOID","yearG","sex","lev0"),"lev0")
c.t2 <- calculateYLLmeasures(c("GEOID","yearG","sex","lev1"),"lev1")
datTract <- bind_rows(c.t1,c.t2) %>%
filter(yearG == yearGrp) %>% # 2013-2017 ONLY!!!
arrange(GEOID,yearG,CAUSE)
# NOTE -- includes many with NA GEOID
# MERGE Death and Population files
datTract <- merge(datTract,popTractSexTot,by = c("yearG","GEOID","sex"),all=TRUE)
# Calculate Rates
datTract <- calculateRates(datTract,5) %>%
arrange(GEOID,yearG,CAUSE)
# == AGE ADJUSTED ("AA") RATES =========================================================================================
# https://github.com/cran/epitools/blob/master/R/ageadjust.direct.R
ageadjust.direct.SAM <- function (count, pop, rate = NULL, stdpop, conf.level = 0.95)
{
if (missing(count) == TRUE & !missing(pop) == TRUE & is.null(rate) == TRUE) count <- rate * pop
if (missing(pop) == TRUE & !missing(count) == TRUE & is.null(rate) == TRUE) pop <- count/rate
if (is.null(rate) == TRUE & !missing(count) == TRUE & !missing(pop) == TRUE) rate <- count/pop
rate[is.na(pop)] <- 0
rate[is.null(pop)] <- 0
pop[is.na(pop)] <- 0
pop[is.null(pop)] <- 0
alpha <- 1 - conf.level
cruderate <- sum(count,na.rm=TRUE)/sum(pop,na.rm=TRUE)
stdwt <- stdpop/sum(stdpop,na.rm=TRUE)
dsr <- sum(stdwt * rate,na.rm=TRUE)
dsr.var <- sum((stdwt^2) * (count/pop^2))
dsr.se <- sqrt(dsr.var)
wm<- max(stdwt/pop)
gamma.lci <- qgamma(alpha/2, shape = (dsr^2)/dsr.var, scale = dsr.var/dsr)
gamma.uci <- qgamma(1 - alpha/2, shape = ((dsr+wm)^2)/(dsr.var+wm^2),
scale = (dsr.var+wm^2)/(dsr+wm))
c(crude.rate = cruderate, adj.rate = dsr, lci = gamma.lci,
uci = gamma.uci, se = dsr.se)
}
# makes dataframe of all possible combinations of county, year, CAUSE, and ageG
year <- data.frame(year = 2000:2017) # these "vectors" need to be dataframes for the sq merge below to work
yearG <- data.frame(yearG = yearGrp)
CAUSE1 <- data.frame(CAUSE=allLabels)
CAUSE2 <- data.frame(CAUSE=CAUSE1[nchar(as.character(CAUSE1$CAUSE)) < 4,])
CAUSE3 <- data.frame(CAUSE=CAUSE1[nchar(as.character(CAUSE1$CAUSE)) < 2,])
sex <- data.frame(sex = c("Male","Female","Total"))
ageG <- data.frame(ageG = sort(unique(cbdDat0$ageG)))
county <- data.frame(county = c(geoMap$countyName,"California"))
comID <- data.frame(comID = unique(cbdLinkCA[,"comID"]))
GEOID <- data.frame(GEOID = cbdLinkCA[,"GEOID"])
# other cool approach from Adam:
# fullMatCounty <- Reduce(function(...) merge(..., all = TRUE), list(county, year, CAUSE, sex, ageG))
fullMatCounty <- sqldf(" select * from county cross join year cross join CAUSE1 cross join sex cross join ageG")
fullMatComm <- sqldf(" select * from comID cross join yearG cross join CAUSE2 cross join sex cross join ageG")
fullMatTract <- sqldf(" select * from GEOID cross join yearG cross join CAUSE3 cross join sex cross join ageG")
#######CAUSE CHARACTER##################
fullMatCounty <- mutate(fullMatCounty, county = as.character(county), CAUSE = as.character(CAUSE), sex = as.character(sex), ageG = as.character(ageG), tester = 0)
fullMatComm <- mutate(fullMatComm, comID = as.character(comID), yearG = as.character(yearG), CAUSE = as.character(CAUSE), sex = as.character(sex), ageG = as.character(ageG), tester = 0)
fullMatTract <- mutate(fullMatTract, GEOID = as.character(GEOID), yearG = as.character(yearG), CAUSE = as.character(CAUSE), sex = as.character(sex), ageG = as.character(ageG), tester = 0)
# County age deaths -------------------------------------------------------------------------------
tA1 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA2 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA3 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev2) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA4 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev3) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA5 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
tA6 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
tA7 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev2) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
tA8 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev3) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
datAA1 <- bind_rows(tA1,tA2,tA3,tA4,tA5,tA6,tA7,tA8) %>% ungroup() # UNGROUP HERE!!!!
# DATA CLEANING ISSUES as above
datAA1 <- filter(datAA1,!is.na(ageG)) # remove 403 records with missing age (0.065% of deaths) -- impact of this?
# datAA1 <- filter(datAA1,!is.na(CAUSE)) # remove 6955 records with missing CAUSE
datAA1 <- filter(datAA1,!is.na(county)) # remove 758 records with missing county
# datAA1 <- filter(datAA1,!is.na(sex)) # remove
ageCounty <- full_join(fullMatCounty,datAA1 ,by = c("county","year","sex","ageG","CAUSE")) %>% # merge death data and "fullMatCounty"
full_join(popCountySexTotAgeG, by = c("county","year","sex","ageG") ) %>% # merge population
full_join(popStandard[,c("ageG","US2000POP")], by="ageG") # merge standard population
ageCounty$Ndeaths[is.na(ageCounty$Ndeaths)] <- 0 # if NA deaths in strata change to "0"
ageCounty$YLL[is.na(ageCounty$YLL)] <- 0 # if NA deaths in strata change to "0"
countyAA <- ageCounty %>% group_by(county,year,sex,CAUSE) %>%
summarize(oDeaths = sum(Ndeaths,na.rm=TRUE),
aRate = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000,
aLCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[3]*100000,
aUCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[4]*100000,
aSE = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[5]*100000,
YLL.adj.rate = ageadjust.direct.SAM(count=YLL, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000) # CONFIRM
countyAA <- countyAA[!(countyAA$oDeaths==0),c("county","year","sex","CAUSE","aRate","aLCI","aUCI","aSE","YLL.adj.rate")] # remove strata with no deaths and select columns
#tester <- filter(ageCounty,year==2015,county=="Alameda",sex=="Male",CAUSE==0)
#ageadjust.direct.SAM(count=tester$Ndeaths, pop=tester$pop, rate = NULL, stdpop=tester$US2000POP, conf.level = 0.95)*100000
# Community age deaths ----------------------------------------------------------------------------
tA1 <- cbdDat0 %>% group_by(comID, yearG, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) #cut county,
tA2 <- cbdDat0 %>% group_by(comID, yearG, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA3 <- cbdDat0 %>% group_by(comID, yearG, sex, ageG,CAUSE=lev2) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
datAA1 <- bind_rows(tA1,tA2,tA3) %>% filter(comID != "")
#datAA1 <- na.omit(datAA1)
ageComm <- full_join(fullMatComm,datAA1,by = c("comID","yearG","sex","ageG","CAUSE")) %>%
filter(yearG == yearGrp) %>%
full_join(popCommSexTotAgeG, by = c("comID","yearG","sex","ageG")) %>% # population
full_join(popStandard[,c("ageG","US2000POP")],by="ageG") # standard population
ageComm$Ndeaths[is.na(ageComm$Ndeaths)] <- 0 # if NA deaths in strata change to "0"
ageComm$YLL[is.na(ageComm$YLL)] <- 0 # if NA deaths in strata change to "0"
commAA <- ageComm %>% group_by(comID,yearG,sex,CAUSE) %>%
summarize(oDeaths = sum(Ndeaths,na.rm=TRUE),
aRate = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000,
aLCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[3]*100000,
aUCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[4]*100000,
aSE = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[5]*100000,
YLL.adj.rate = ageadjust.direct.SAM(count=YLL, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000)
commAA <- commAA[!(commAA$oDeaths==0),c("comID","yearG","sex","CAUSE","oDeaths","aRate","aLCI","aUCI","aSE","YLL.adj.rate")]
# removes rows with aRate = inf HERE there are only ALPINE
commAA <- commAA[!(commAA$aRate > 10000),]
#tester <- filter(ageComm,yearG==yearGrp,comID=="104",sex=="Female",CAUSE==0)
#ageadjust.direct.SAM(count=tester$Ndeaths, pop=tester$pop, rate = NULL, stdpop=tester$US2000POP, conf.level = 0.95)*100000
# Tract age deaths -----------------------------------------------------------------------------------------------------
tA1 <- cbdDat0 %>% group_by(GEOID, yearG, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA2 <- cbdDat0 %>% group_by(GEOID, yearG, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
datAA1 <- bind_rows(tA1,tA2) %>% filter(GEOID != "")
ageTract <- full_join(fullMatTract,datAA1,by = c("GEOID","yearG","sex","ageG","CAUSE")) %>%
filter(yearG == yearGrp) %>%
full_join(popTractSexTotAgeG,by = c("GEOID","yearG","sex","ageG")) %>% # add population
full_join(popStandard[,c("ageG","US2000POP")],by="ageG") # add standard population
ageTract$Ndeaths[is.na(ageTract$Ndeaths)] <- 0 # if NA deaths in strata change to "0"
ageTract$YLL[is.na(ageTract$YLL)] <- 0 # if NA deaths in strata change to "0"
tractAA <- ageTract %>% group_by(GEOID,yearG,sex,CAUSE) %>%
summarize(oDeaths = sum(Ndeaths,na.rm=TRUE),
aRate = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000,
aLCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[3]*100000,
aUCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[4]*100000,
YLL.adj.rate = ageadjust.direct.SAM(count=YLL, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000)
tractAA <- tractAA[!(tractAA$oDeaths==0),c("GEOID","yearG","sex","CAUSE","oDeaths","aRate","aLCI","aUCI","YLL.adj.rate")]
# removes rows with aRate = inf, or infinity becuase some population strata is 0 AND some other odd strata
tractAA <- tractAA[!(tractAA$aRate > 5000),]
tractAA <- tractAA[!(is.na(tractAA$aRate)),]
# -- Merge adjusted rates into main data files and final Clean Up ----------------------------------------------------------
datTract <- merge(datTract, tractAA ,by = c("GEOID","yearG","sex","CAUSE"),all=TRUE) %>%
mutate_if(is.numeric, signif,digits=4) %>%
filter(!is.na(county)) %>% # REMOVE ALL out-of-state GEOID and missing GEOID
filter(!is.na(CAUSE)) # removes about 130 records with bad/no GEOID and/or wrong County based on GEOID
datComm <- merge(datComm, commAA ,by = c("comID","yearG","sex","CAUSE"),all=TRUE) %>%
mutate_if(is.numeric, signif,digits=4) %>%
filter(!is.na(county)) # as above
datCounty <- merge(datCounty,countyAA ,by = c("county","year","sex","CAUSE"),all=TRUE) %>%
filter(!(is.na(CAUSE))) %>% # removes "Level3" NA (most 'causes' are NA on Level3)
select(-ageG,-stateRate) %>%
mutate_if(is.numeric, signif,digits=4) %>% # much smaller file and easier to read
mutate(county = ifelse(county==STATE, toupper(STATE),county) # e.g. California --> CALIFORNIA
)
# == Final Data Clean Up and Export ==================================================================================
# "SMALL CELL and "RISKY CAUSE" supression ----------
# xCause0 <- c(14,41,50,139,4,49,192)
# xCause1 <- c(xCause0,10)
# datTract <- filter(datTract, !(CAUSE %in% xCause1))
# datComm <- filter(datComm, !(CAUSE %in% xCause1))
# datCounty <- filter(datCounty,!(CAUSE %in% xCause0))
if (1==2){
datTract <- readRDS(path(myPlace,"/myData/",whichDat,"datTract.RDS"))
datComm <- readRDS(path(myPlace,"/myData/",whichDat,"datComm.RDS"))
datCounty <- readRDS(path(myPlace,"/myData/",whichDat,"datCounty.RDS"))
}
criticalNum <- 11
datTract <- datTract %>% mutate(Ndeaths = ifelse(Ndeaths < criticalNum,0,Ndeaths),
cDeathRate = ifelse(Ndeaths < criticalNum,0,cDeathRate),
YLL = ifelse(Ndeaths < criticalNum,0,YLL),
YLLper = ifelse(Ndeaths < criticalNum,0,YLLper),
rateLCI = ifelse(Ndeaths < criticalNum,0,rateLCI),
rateUCI = ifelse(Ndeaths < criticalNum,0,rateUCI),
mean.age = ifelse(Ndeaths < criticalNum,0,mean.age)
)
datComm <- datComm %>% mutate(Ndeaths = ifelse(Ndeaths < criticalNum,0,Ndeaths),
cDeathRate = ifelse(Ndeaths < criticalNum,0,cDeathRate),
YLL = ifelse(Ndeaths < criticalNum,0,YLL),
YLLper = ifelse(Ndeaths < criticalNum,0,YLLper),
rateLCI = ifelse(Ndeaths < criticalNum,0,rateLCI),
rateUCI = ifelse(Ndeaths < criticalNum,0,rateUCI),
mean.age = ifelse(Ndeaths < criticalNum,0,mean.age)
)
datCounty <- datCounty %>% mutate(Ndeaths = ifelse(Ndeaths < criticalNum,0,Ndeaths),
cDeathRate = ifelse(Ndeaths < criticalNum,0,cDeathRate),
YLL = ifelse(Ndeaths < criticalNum,0,YLL),
YLLper = ifelse(Ndeaths < criticalNum,0,YLLper),
SMR = ifelse(Ndeaths < criticalNum,0,SMR),
rateLCI = ifelse(Ndeaths < criticalNum,0,rateLCI),
rateUCI = ifelse(Ndeaths < criticalNum,0,rateUCI),
mean.age = ifelse(Ndeaths < criticalNum,0,mean.age)
)
# Quick fix to replace with Version Beta 1.1
# eliminates pop 0 and therefore infinity rates
datTract <- filter(datTract,pop>0)
datCounty <- filter(datCounty,!is.na(Ndeaths))
saveRDS(datTract, file= path(myPlace,"/myData/",whichDat,"datTract.RDS"))
saveRDS(datComm, file= path(myPlace,"/myData/",whichDat,"datComm.RDS"))
saveRDS(datCounty, file= path(myPlace,"/myData/",whichDat,"datCounty.RDS"))
# #
# datTract$causeName <- gbdMap0[match(datTract$CAUSE,gbdMap0[,"LABEL"]),"causeList"]
# datComm$causeName <- gbdMap0[match(datComm$CAUSE,gbdMap0[,"LABEL"]),"causeList"]
# datCounty$causeName <- gbdMap0[match(datCounty$CAUSE,gbdMap0[,"LABEL"]),"causeList"]
#
# write.csv(datTract,(paste0(upPlace,"/tempOutput/Tract CCB Work.csv")))
# write.csv(datComm,(paste0(upPlace,"/tempOutput/Community CCB Work.csv")))
# write.csv(datCounty,(paste0(upPlace,"/tempOutput/County CCB Work.csv")))
# END ===================================================================================================================
# DATA CLEANING ISSUES ----------------------------------
# in 2012 Los Angeles Census Tract 9304.01 was merged into tract 1370.00
# "The deletion of Census 2000 Tract 1370.00 is now corrected, and the tract is reinstated
# with its former boundaries. This change incorporates all of former (2010) Census Tract 9304.01
# and part of (2010) Census Tract 8002.04 into the reinstated (2012) tract 1370.00.
# https://www.census.gov/programs-surveys/acs/technical-documentation/table-and-geography-changes/2012/geography-changes.html
# LA CENSUS TRACT TO RECODE
# 06037930401 should be recoded to 06037137000 in all data files
# CENSUS TRACTS
# current from acsWork0 has 8057 tracts
# current cbdLinkCA has 8036 (2010 data)
# current cbddat0 has 8603! bad geocodes?
# something ?? has 8035 ... check...
#temp <- popCensusCom$GEOID
#junk <- cbdDat0[!(cbdDat0$GEOID %in% temp),]
#junk <- junk[junk$GEOID != "",]
#write.csv(junk,(paste0(upPlace,"/tempOutput/junk Tracts.csv")))
# these records have a GEOID but not comID suggesting the GEOID is "bad"
# junk <- filter(cbdDat0,is.na(comID) & GEOID != "" & year > 2004)
# 651 records
# length(unique(junk$GEOID))
# 590 unique GEOID not in California (based on current link file)
# write.csv(table(junk$GEOID,junk$year),(paste0(upPlace,"/tempOutput/junk Tracts.csv")))
# county missing from 3797 records
# junk <- filter(cbdDat0,is.na(county))
# 3797 records
# countyFIPS blank=2145 and 999=1652 (but State="CA; based on "F71" only)
# write.csv(table(junk$year,junk$countyFIPS),(paste0(upPlace,"/tempOutput/missing County FIPS.csv")))
# MAJOR cleaning issue!!!
# junk <- filter(cbdDat0,is.na(gbd36))
# 82775 records where ICD10 does not map to gbd36 -- errors in info file!
# write.csv(table(junk$year,junk$countyFIPS),(paste0(upPlace,"/tempOutput/no ICD10 to gbd36.csv")))
# Potentially useful old code bits:
# could make aL and aU like this, or as below based on an input file:
# aL <- c( 0, 5,15,25,35,45,55,65,75,85)
# aU <- c(-1,4,14,24,34,44,54,64,74,84,999)
# "Manual' calcuation of age-adjustment
# popStandard <- readRDS(paste0(upPlace,"/upData/popStandard.RDS"))
# ageCounty <- merge(ageCounty,popStandard,by = c("ageG"),all=TRUE) # merge with "Standard" population
#calculate number of expected deaths in strata among standard population
#ageCounty$deathsE <- (ageCounty$Ndeaths/ageCounty$pop)*ageCounty$popStandard
# "manual" calculation of age-adjusted rates, AND using ageadjust.direct function from EpiTools package
# NOTE: oDeaths etc != total deaths in other files because of missings removed#
# summarize(oDeaths = sum(Ndeaths), # na.rm=TRUE not needed becuase of cleaning above
# oPop = sum(pop),
# cRate = 100000*oDeaths/oPop,
# eDeaths = sum(deathsE),
# ePop = sum(popStandard),
# aRate = 100000*eDeaths/ePop)
# age-adjustment reference
# https://www.cdc.gov/nchs/data/nvsr/nvsr47/nvs47_03.pdf
| /myUpstream/makeDatasets.R | no_license | aculich/CACommunityBurden | R | false | false | 30,548 | r | # =====================================================================================
# "makeDatasets.R" file |
# designate key constants, folder locations, and load packages |
# load data mapping files |
# load population denominator data |
# load death data (cbdDat0) |
# build functions for YLL and rate calcuations |
# contruction initial tract, community & county CBD data files |
# process data and calculate age-adjusted rates |
# final merges and processing of main CBD data files |
# export files for use in CBD app |
#======================================================================================
# -- Designate locations and load packages---------------------------------------------------------
whichDat <- "fake"
STATE <- "California"
myDrive <- "E:"
myPlace <- paste0(myDrive,"/0.CBD/myCBD")
upPlace <- paste0(myDrive,"/0.CBD/myUpstream")
library(tidyverse)
library(epitools)
library(sqldf)
library(readxl)
library(fs)
yF <- 100000 # rate constant
pop5 <- 5 # 5 years
pop1 <- 1 # 1 year
yearGrp <- "2013-2017"
#-- LOAD STANDARDS AND DATA MAPPING FILES ---------------------------------------------------------
# add to technical notes the purposes and contents of each data mapping file
# this "as.data.frame" below and elsewhere is really annoying.... but at least icdToGroup function below does not work otherwise;
# becuase the "tibble" is double precision or for some other reason this messes up;
# and get error "Error: Can't use matrix or array for column indexing"
leMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/le.Map.xlsx"), sheet="LifeExpLink", range = cell_cols("A:B")))
yearMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/year.Map.xlsx")))
geoMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/countycodes.Map.xlsx")))
cbdLinkCA <- read.csv(paste0(myPlace,"/myInfo/cbdLinkCA.csv"),colClasses = "character") # file linking MSSAs to census
comName <- unique(cbdLinkCA[,c("comID","comName")]) # dataframe linking comID and comName
ageMap <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/Age Groups and Standard US 2000 pop.xlsx"),sheet = "data"))
#-- LOAD AND PROCESS POPULATION DATA --------------------------------------------------------------
# ungrouping important for subsequent data set merging
popTract <- readRDS(path(upPlace,"/upData/popTract2013.RDS")) %>% ungroup()
popTractSexTot <- filter(popTract,ageG == "Total")
popTractSexTotAgeG <- filter(popTract,ageG != "Total")
popCommSexTot <- popTractSexTot %>% group_by(yearG,county,comID,sex) %>% summarise(pop=sum(pop)) %>% ungroup()
popCommSexTotAgeG <- popTractSexTotAgeG %>% group_by(yearG,county,comID,sex,ageG) %>% summarise(pop=sum(pop)) %>% ungroup()
popCounty <- readRDS(path(upPlace,"/upData/popCounty2000to2015.RDS")) %>% ungroup()
popCountySexTot <- filter(popCounty,ageG == "Total")
popCountySexTotAgeG <- filter(popCounty,ageG != "Total")
popStandard <- ageMap %>% mutate(ageG = paste0(lAge," - ",uAge))
# == LOAD AND PROCESS DEATH DATA =================================================================
if (whichDat == "real") {
# CAUTION --- if using REAL DATA INCLUDE these two lines below and edit the first one with your secure location
# load("G:/CCB/0.Secure.Data/myData/cbdDat0FULL.R")
load("H:/0.Secure.Data/myData/cbdDat0FULL.R")
cbdDat0 <- cbdDat0FULL
}
if (whichDat == "fake") {
# Load FAKE Data --- COMMENT OUT these two lines if using REAL DATA
load(paste0(upPlace,"/upData/cbdDat0SAMP.R"))
cbdDat0 <- cbdDat0SAMP
}
# GEOID/COUNTY CORRECTION HERE =============================================
# (1) LA CENSUS TRACT TO RECODE
# 06037930401 should be recoded to 06037137000 in all data files
cbdDat0$GEOID[cbdDat0$GEOID=="06037930401"] <- "06037137000"
# (2) all occurences of "06037800325" in death data are Ventura, all are LA in pop data
# (3) fix county based on GEOID analysis here:
allWater <- c("06017990000","06037990300","06061990000","06083990000","06111990100")
#forEthan <- sample_n(cbdDat0SAMP,100000)
#saveRDS(forEthan, file=paste0(upPlace,"/upData/forEthan.RDS"))
cbdDat0 <- mutate(cbdDat0,
sex = c("Male","Female")[match(sex,c("M","F"))],
age = as.numeric(age), # redundant...
ICD10 = as.character(ICD10), # redundant...
comID = cbdLinkCA[match(cbdDat0$GEOID,cbdLinkCA[,"GEOID"]),"comID"],
yll = leMap[match(cbdDat0$age,leMap[,"Age"]),"LE"],
yearG = yearMap[match(year,yearMap[,"year"]),"yGroup1"]
)
cbdDat0Save <- cbdDat0
.cbdDat0Sex <- mutate(cbdDat0, sex = "Total")
cbdDat0 <- bind_rows(cbdDat0,.cbdDat0Sex)
# Add Age-Group variable ----------------------------------------
aL <- ageMap$lAge # lower age ranges
aU <- c(-1,ageMap$uAge) # upper age ranges, plus inital value of "-1" for lower limit
aLabs <- paste(aL,"-",aU[-1]) # make label for ranges
aMark <- findInterval(cbdDat0$age,aU,left.open = TRUE) # vector indicating age RANGE value of each INDIVIDUAL age value
cbdDat0$ageG <- aLabs[aMark] # make new "ageG" variable based on two objects above
# Map ICD-10 codes to GBD conditions ----------------------------
gbdMap0 <- as.data.frame(read_excel(paste0(myPlace,"/myInfo/gbd.ICD.Map.xlsx"), sheet="main")) # also have e.g. range="A1:J167"
allLabels <- sort(gbdMap0$LABEL[!is.na(gbdMap0$LABEL)])
mapICD <- gbdMap0[!is.na(gbdMap0$CODE),c("CODE","regEx10")]
icdToGroup <- function(myIn) {
Cause <- rep(NA,length(myIn))
for (i in 1:nrow(mapICD)) {Cause[grepl(mapICD[i,"regEx10"],myIn)] <- mapICD[i,"CODE"] }
Cause}
cbdDat0$icdCODE <- icdToGroup(myIn=cbdDat0$ICD10)
cbdDat0$icdCODE[cbdDat0$ICD10 %in% c("","000","0000")] <- "cZ02" # >3500 records have no ICD10 code -- label them as cZ for now
junk <- filter(cbdDat0,is.na(icdCODE))
table(junk$ICD10,useNA = "ifany")
cbdDat0$icdCODE[is.na(cbdDat0$icdCODE)] <- "cZ03" # 370 records that are not mapping to a code right now --TEMP
temp <- nchar(str_sub(cbdDat0$icdCODE,2,5))
cbdDat0 <- cbdDat0 %>% mutate(lev0 = "0",
lev1 = str_sub(icdCODE,2,2),
lev2 = str_sub(icdCODE,2,4),
lev3 = ifelse(temp==4,str_sub(icdCODE,2,5),NA)
)
# DATA CLEANING ISSUES (see at bottom of file) ----------------------------------------------------
# DEATH MEASURES FUNCTIONS =========================================================================
calculateYLLmeasures <- function(group_vars,levLab){
dat <- cbdDat0 %>% group_by_(.dots = group_vars) %>%
summarize(Ndeaths = n() ,
YLL = sum(yll, na.rm = TRUE), # NEED TO ADD CIs
m.YLL = mean(yll, na.rm = TRUE), # NEED TO ADD CIs
mean.age = mean(age,na.rm=TRUE)
) %>% ungroup
names(dat)[grep("lev", names(dat))] <- "CAUSE"
dat$Level <- levLab
# dat <- filter(dat,!is.na(CAUSE)) # "HARD FIX" that should be assessed carefully
dat %>% data.frame
}
calculateRates <- function(inData,yearN){
transform(inData,
YLLper = yF*YLL/(yearN*pop),
YLLrateLCI = yF*pois.approx(YLL,yearN*pop, conf.level = 0.95)$lower, # need to confirm that this is correct
YLLrateUCI = yF*pois.approx(YLL,yearN*pop, conf.level = 0.95)$upper,
cDeathRate = yF*Ndeaths/(yearN*pop),
rateLCI = yF*pois.approx(Ndeaths,yearN*pop, conf.level = 0.95)$lower,
rateUCI = yF*pois.approx(Ndeaths,yearN*pop, conf.level = 0.95)$upper
)
}
# == build COUNTY-level file ======================================================================
c.t1 <- calculateYLLmeasures(c("county","year","sex","lev0"),"lev0")
c.t2 <- calculateYLLmeasures(c("county","year","sex","lev1"),"lev1")
c.t3 <- calculateYLLmeasures(c("county","year","sex","lev2"),"lev2")
c.t4 <- calculateYLLmeasures(c("county","year","sex","lev3"),"lev3")
datCounty <- bind_rows(c.t1,c.t2,c.t3,c.t4)
s.t1 <- calculateYLLmeasures(c("year","sex","lev0"),"lev0")
s.t2 <- calculateYLLmeasures(c("year","sex","lev1"),"lev1")
s.t3 <- calculateYLLmeasures(c("year","sex","lev2"),"lev2")
s.t4 <- calculateYLLmeasures(c("year","sex","lev3"),"lev3")
datState <- bind_rows(s.t1,s.t2,s.t3,s.t4)
datState$county = STATE
datCounty <- bind_rows(datCounty,datState)
# datCounty$causeName <- gbdMap0[match(datCounty$CAUSE,gbdMap0[,1]),"nameOnly"] # "needed?"
datCounty <- merge(datCounty,popCountySexTot,by = c("year","county","sex"))
datCounty <- calculateRates(datCounty,1)
datState <- datCounty %>% filter(county == STATE) %>%
mutate(stateRate = cDeathRate) %>%
select(year,sex,Level,CAUSE,stateRate)
# for LOCAL installation of application EXCLUDE save line and INCLUDE load line
save(datState, file= paste0(upPlace,"/upData/datState.R"))
#load(file= paste0(upPlace,"/upData/datState.R"))
datCounty <- merge(datCounty,datState,by = c("year","sex","Level","CAUSE"))
datCounty$SMR <- datCounty$cDeathRate / datCounty$stateRate
# == build COMMUNITY-level file ===================================================================
c.t1 <- calculateYLLmeasures(c("comID","yearG","sex","lev0"),"lev0") #removed "county",
c.t2 <- calculateYLLmeasures(c("comID","yearG","sex","lev1"),"lev1")
c.t3 <- calculateYLLmeasures(c("comID","yearG","sex","lev2"),"lev2")
datComm <- bind_rows(c.t1,c.t2,c.t3) %>%
filter(yearG == yearGrp) %>% # 2013-2017 ONLY!!!
arrange(comID,yearG,CAUSE)
datComm <- merge(datComm,popCommSexTot,by = c("yearG","comID","sex"),all=TRUE)
datComm <- calculateRates(datComm,5)
# add community names POSSIBLE REMOVE
datComm <- merge(datComm, comName, by = "comID",all=TRUE) %>%
arrange(comID,yearG,CAUSE)
# == build TRACT-level file =======================================================================
c.t1 <- calculateYLLmeasures(c("GEOID","yearG","sex","lev0"),"lev0")
c.t2 <- calculateYLLmeasures(c("GEOID","yearG","sex","lev1"),"lev1")
datTract <- bind_rows(c.t1,c.t2) %>%
filter(yearG == yearGrp) %>% # 2013-2017 ONLY!!!
arrange(GEOID,yearG,CAUSE)
# NOTE -- includes many with NA GEOID
# MERGE Death and Population files
datTract <- merge(datTract,popTractSexTot,by = c("yearG","GEOID","sex"),all=TRUE)
# Calculate Rates
datTract <- calculateRates(datTract,5) %>%
arrange(GEOID,yearG,CAUSE)
# == AGE ADJUSTED ("AA") RATES =========================================================================================
# https://github.com/cran/epitools/blob/master/R/ageadjust.direct.R
ageadjust.direct.SAM <- function (count, pop, rate = NULL, stdpop, conf.level = 0.95)
{
if (missing(count) == TRUE & !missing(pop) == TRUE & is.null(rate) == TRUE) count <- rate * pop
if (missing(pop) == TRUE & !missing(count) == TRUE & is.null(rate) == TRUE) pop <- count/rate
if (is.null(rate) == TRUE & !missing(count) == TRUE & !missing(pop) == TRUE) rate <- count/pop
rate[is.na(pop)] <- 0
rate[is.null(pop)] <- 0
pop[is.na(pop)] <- 0
pop[is.null(pop)] <- 0
alpha <- 1 - conf.level
cruderate <- sum(count,na.rm=TRUE)/sum(pop,na.rm=TRUE)
stdwt <- stdpop/sum(stdpop,na.rm=TRUE)
dsr <- sum(stdwt * rate,na.rm=TRUE)
dsr.var <- sum((stdwt^2) * (count/pop^2))
dsr.se <- sqrt(dsr.var)
wm<- max(stdwt/pop)
gamma.lci <- qgamma(alpha/2, shape = (dsr^2)/dsr.var, scale = dsr.var/dsr)
gamma.uci <- qgamma(1 - alpha/2, shape = ((dsr+wm)^2)/(dsr.var+wm^2),
scale = (dsr.var+wm^2)/(dsr+wm))
c(crude.rate = cruderate, adj.rate = dsr, lci = gamma.lci,
uci = gamma.uci, se = dsr.se)
}
# makes dataframe of all possible combinations of county, year, CAUSE, and ageG
year <- data.frame(year = 2000:2017) # these "vectors" need to be dataframes for the sq merge below to work
yearG <- data.frame(yearG = yearGrp)
CAUSE1 <- data.frame(CAUSE=allLabels)
CAUSE2 <- data.frame(CAUSE=CAUSE1[nchar(as.character(CAUSE1$CAUSE)) < 4,])
CAUSE3 <- data.frame(CAUSE=CAUSE1[nchar(as.character(CAUSE1$CAUSE)) < 2,])
sex <- data.frame(sex = c("Male","Female","Total"))
ageG <- data.frame(ageG = sort(unique(cbdDat0$ageG)))
county <- data.frame(county = c(geoMap$countyName,"California"))
comID <- data.frame(comID = unique(cbdLinkCA[,"comID"]))
GEOID <- data.frame(GEOID = cbdLinkCA[,"GEOID"])
# other cool approach from Adam:
# fullMatCounty <- Reduce(function(...) merge(..., all = TRUE), list(county, year, CAUSE, sex, ageG))
fullMatCounty <- sqldf(" select * from county cross join year cross join CAUSE1 cross join sex cross join ageG")
fullMatComm <- sqldf(" select * from comID cross join yearG cross join CAUSE2 cross join sex cross join ageG")
fullMatTract <- sqldf(" select * from GEOID cross join yearG cross join CAUSE3 cross join sex cross join ageG")
#######CAUSE CHARACTER##################
fullMatCounty <- mutate(fullMatCounty, county = as.character(county), CAUSE = as.character(CAUSE), sex = as.character(sex), ageG = as.character(ageG), tester = 0)
fullMatComm <- mutate(fullMatComm, comID = as.character(comID), yearG = as.character(yearG), CAUSE = as.character(CAUSE), sex = as.character(sex), ageG = as.character(ageG), tester = 0)
fullMatTract <- mutate(fullMatTract, GEOID = as.character(GEOID), yearG = as.character(yearG), CAUSE = as.character(CAUSE), sex = as.character(sex), ageG = as.character(ageG), tester = 0)
# County age deaths -------------------------------------------------------------------------------
tA1 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA2 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA3 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev2) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA4 <- cbdDat0 %>% group_by(county,year, sex, ageG,CAUSE=lev3) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA5 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
tA6 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
tA7 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev2) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
tA8 <- cbdDat0 %>% group_by( year, sex, ageG,CAUSE=lev3) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) %>% mutate(county=STATE)
datAA1 <- bind_rows(tA1,tA2,tA3,tA4,tA5,tA6,tA7,tA8) %>% ungroup() # UNGROUP HERE!!!!
# DATA CLEANING ISSUES as above
datAA1 <- filter(datAA1,!is.na(ageG)) # remove 403 records with missing age (0.065% of deaths) -- impact of this?
# datAA1 <- filter(datAA1,!is.na(CAUSE)) # remove 6955 records with missing CAUSE
datAA1 <- filter(datAA1,!is.na(county)) # remove 758 records with missing county
# datAA1 <- filter(datAA1,!is.na(sex)) # remove
ageCounty <- full_join(fullMatCounty,datAA1 ,by = c("county","year","sex","ageG","CAUSE")) %>% # merge death data and "fullMatCounty"
full_join(popCountySexTotAgeG, by = c("county","year","sex","ageG") ) %>% # merge population
full_join(popStandard[,c("ageG","US2000POP")], by="ageG") # merge standard population
ageCounty$Ndeaths[is.na(ageCounty$Ndeaths)] <- 0 # if NA deaths in strata change to "0"
ageCounty$YLL[is.na(ageCounty$YLL)] <- 0 # if NA deaths in strata change to "0"
countyAA <- ageCounty %>% group_by(county,year,sex,CAUSE) %>%
summarize(oDeaths = sum(Ndeaths,na.rm=TRUE),
aRate = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000,
aLCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[3]*100000,
aUCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[4]*100000,
aSE = ageadjust.direct.SAM(count=Ndeaths, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[5]*100000,
YLL.adj.rate = ageadjust.direct.SAM(count=YLL, pop=pop, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000) # CONFIRM
countyAA <- countyAA[!(countyAA$oDeaths==0),c("county","year","sex","CAUSE","aRate","aLCI","aUCI","aSE","YLL.adj.rate")] # remove strata with no deaths and select columns
#tester <- filter(ageCounty,year==2015,county=="Alameda",sex=="Male",CAUSE==0)
#ageadjust.direct.SAM(count=tester$Ndeaths, pop=tester$pop, rate = NULL, stdpop=tester$US2000POP, conf.level = 0.95)*100000
# Community age deaths ----------------------------------------------------------------------------
tA1 <- cbdDat0 %>% group_by(comID, yearG, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) ) #cut county,
tA2 <- cbdDat0 %>% group_by(comID, yearG, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA3 <- cbdDat0 %>% group_by(comID, yearG, sex, ageG,CAUSE=lev2) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
datAA1 <- bind_rows(tA1,tA2,tA3) %>% filter(comID != "")
#datAA1 <- na.omit(datAA1)
ageComm <- full_join(fullMatComm,datAA1,by = c("comID","yearG","sex","ageG","CAUSE")) %>%
filter(yearG == yearGrp) %>%
full_join(popCommSexTotAgeG, by = c("comID","yearG","sex","ageG")) %>% # population
full_join(popStandard[,c("ageG","US2000POP")],by="ageG") # standard population
ageComm$Ndeaths[is.na(ageComm$Ndeaths)] <- 0 # if NA deaths in strata change to "0"
ageComm$YLL[is.na(ageComm$YLL)] <- 0 # if NA deaths in strata change to "0"
commAA <- ageComm %>% group_by(comID,yearG,sex,CAUSE) %>%
summarize(oDeaths = sum(Ndeaths,na.rm=TRUE),
aRate = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000,
aLCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[3]*100000,
aUCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[4]*100000,
aSE = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[5]*100000,
YLL.adj.rate = ageadjust.direct.SAM(count=YLL, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000)
commAA <- commAA[!(commAA$oDeaths==0),c("comID","yearG","sex","CAUSE","oDeaths","aRate","aLCI","aUCI","aSE","YLL.adj.rate")]
# removes rows with aRate = inf HERE there are only ALPINE
commAA <- commAA[!(commAA$aRate > 10000),]
#tester <- filter(ageComm,yearG==yearGrp,comID=="104",sex=="Female",CAUSE==0)
#ageadjust.direct.SAM(count=tester$Ndeaths, pop=tester$pop, rate = NULL, stdpop=tester$US2000POP, conf.level = 0.95)*100000
# Tract age deaths -----------------------------------------------------------------------------------------------------
tA1 <- cbdDat0 %>% group_by(GEOID, yearG, sex, ageG,CAUSE=lev0) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
tA2 <- cbdDat0 %>% group_by(GEOID, yearG, sex, ageG,CAUSE=lev1) %>% summarize(Ndeaths = n(), YLL = sum(yll,na.rm=TRUE) )
datAA1 <- bind_rows(tA1,tA2) %>% filter(GEOID != "")
ageTract <- full_join(fullMatTract,datAA1,by = c("GEOID","yearG","sex","ageG","CAUSE")) %>%
filter(yearG == yearGrp) %>%
full_join(popTractSexTotAgeG,by = c("GEOID","yearG","sex","ageG")) %>% # add population
full_join(popStandard[,c("ageG","US2000POP")],by="ageG") # add standard population
ageTract$Ndeaths[is.na(ageTract$Ndeaths)] <- 0 # if NA deaths in strata change to "0"
ageTract$YLL[is.na(ageTract$YLL)] <- 0 # if NA deaths in strata change to "0"
tractAA <- ageTract %>% group_by(GEOID,yearG,sex,CAUSE) %>%
summarize(oDeaths = sum(Ndeaths,na.rm=TRUE),
aRate = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000,
aLCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[3]*100000,
aUCI = ageadjust.direct.SAM(count=Ndeaths, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[4]*100000,
YLL.adj.rate = ageadjust.direct.SAM(count=YLL, pop=pop*pop5, rate = NULL, stdpop=US2000POP, conf.level = 0.95)[2]*100000)
tractAA <- tractAA[!(tractAA$oDeaths==0),c("GEOID","yearG","sex","CAUSE","oDeaths","aRate","aLCI","aUCI","YLL.adj.rate")]
# removes rows with aRate = inf, or infinity becuase some population strata is 0 AND some other odd strata
tractAA <- tractAA[!(tractAA$aRate > 5000),]
tractAA <- tractAA[!(is.na(tractAA$aRate)),]
# -- Merge adjusted rates into main data files and final Clean Up ----------------------------------------------------------
datTract <- merge(datTract, tractAA ,by = c("GEOID","yearG","sex","CAUSE"),all=TRUE) %>%
mutate_if(is.numeric, signif,digits=4) %>%
filter(!is.na(county)) %>% # REMOVE ALL out-of-state GEOID and missing GEOID
filter(!is.na(CAUSE)) # removes about 130 records with bad/no GEOID and/or wrong County based on GEOID
datComm <- merge(datComm, commAA ,by = c("comID","yearG","sex","CAUSE"),all=TRUE) %>%
mutate_if(is.numeric, signif,digits=4) %>%
filter(!is.na(county)) # as above
datCounty <- merge(datCounty,countyAA ,by = c("county","year","sex","CAUSE"),all=TRUE) %>%
filter(!(is.na(CAUSE))) %>% # removes "Level3" NA (most 'causes' are NA on Level3)
select(-ageG,-stateRate) %>%
mutate_if(is.numeric, signif,digits=4) %>% # much smaller file and easier to read
mutate(county = ifelse(county==STATE, toupper(STATE),county) # e.g. California --> CALIFORNIA
)
# == Final Data Clean Up and Export ==================================================================================
# "SMALL CELL and "RISKY CAUSE" supression ----------
# xCause0 <- c(14,41,50,139,4,49,192)
# xCause1 <- c(xCause0,10)
# datTract <- filter(datTract, !(CAUSE %in% xCause1))
# datComm <- filter(datComm, !(CAUSE %in% xCause1))
# datCounty <- filter(datCounty,!(CAUSE %in% xCause0))
if (1==2){
datTract <- readRDS(path(myPlace,"/myData/",whichDat,"datTract.RDS"))
datComm <- readRDS(path(myPlace,"/myData/",whichDat,"datComm.RDS"))
datCounty <- readRDS(path(myPlace,"/myData/",whichDat,"datCounty.RDS"))
}
criticalNum <- 11
datTract <- datTract %>% mutate(Ndeaths = ifelse(Ndeaths < criticalNum,0,Ndeaths),
cDeathRate = ifelse(Ndeaths < criticalNum,0,cDeathRate),
YLL = ifelse(Ndeaths < criticalNum,0,YLL),
YLLper = ifelse(Ndeaths < criticalNum,0,YLLper),
rateLCI = ifelse(Ndeaths < criticalNum,0,rateLCI),
rateUCI = ifelse(Ndeaths < criticalNum,0,rateUCI),
mean.age = ifelse(Ndeaths < criticalNum,0,mean.age)
)
datComm <- datComm %>% mutate(Ndeaths = ifelse(Ndeaths < criticalNum,0,Ndeaths),
cDeathRate = ifelse(Ndeaths < criticalNum,0,cDeathRate),
YLL = ifelse(Ndeaths < criticalNum,0,YLL),
YLLper = ifelse(Ndeaths < criticalNum,0,YLLper),
rateLCI = ifelse(Ndeaths < criticalNum,0,rateLCI),
rateUCI = ifelse(Ndeaths < criticalNum,0,rateUCI),
mean.age = ifelse(Ndeaths < criticalNum,0,mean.age)
)
datCounty <- datCounty %>% mutate(Ndeaths = ifelse(Ndeaths < criticalNum,0,Ndeaths),
cDeathRate = ifelse(Ndeaths < criticalNum,0,cDeathRate),
YLL = ifelse(Ndeaths < criticalNum,0,YLL),
YLLper = ifelse(Ndeaths < criticalNum,0,YLLper),
SMR = ifelse(Ndeaths < criticalNum,0,SMR),
rateLCI = ifelse(Ndeaths < criticalNum,0,rateLCI),
rateUCI = ifelse(Ndeaths < criticalNum,0,rateUCI),
mean.age = ifelse(Ndeaths < criticalNum,0,mean.age)
)
# Quick fix to replace with Version Beta 1.1
# eliminates pop 0 and therefore infinity rates
datTract <- filter(datTract,pop>0)
datCounty <- filter(datCounty,!is.na(Ndeaths))
saveRDS(datTract, file= path(myPlace,"/myData/",whichDat,"datTract.RDS"))
saveRDS(datComm, file= path(myPlace,"/myData/",whichDat,"datComm.RDS"))
saveRDS(datCounty, file= path(myPlace,"/myData/",whichDat,"datCounty.RDS"))
# #
# datTract$causeName <- gbdMap0[match(datTract$CAUSE,gbdMap0[,"LABEL"]),"causeList"]
# datComm$causeName <- gbdMap0[match(datComm$CAUSE,gbdMap0[,"LABEL"]),"causeList"]
# datCounty$causeName <- gbdMap0[match(datCounty$CAUSE,gbdMap0[,"LABEL"]),"causeList"]
#
# write.csv(datTract,(paste0(upPlace,"/tempOutput/Tract CCB Work.csv")))
# write.csv(datComm,(paste0(upPlace,"/tempOutput/Community CCB Work.csv")))
# write.csv(datCounty,(paste0(upPlace,"/tempOutput/County CCB Work.csv")))
# END ===================================================================================================================
# DATA CLEANING ISSUES ----------------------------------
# in 2012 Los Angeles Census Tract 9304.01 was merged into tract 1370.00
# "The deletion of Census 2000 Tract 1370.00 is now corrected, and the tract is reinstated
# with its former boundaries. This change incorporates all of former (2010) Census Tract 9304.01
# and part of (2010) Census Tract 8002.04 into the reinstated (2012) tract 1370.00.
# https://www.census.gov/programs-surveys/acs/technical-documentation/table-and-geography-changes/2012/geography-changes.html
# LA CENSUS TRACT TO RECODE
# 06037930401 should be recoded to 06037137000 in all data files
# CENSUS TRACTS
# current from acsWork0 has 8057 tracts
# current cbdLinkCA has 8036 (2010 data)
# current cbddat0 has 8603! bad geocodes?
# something ?? has 8035 ... check...
#temp <- popCensusCom$GEOID
#junk <- cbdDat0[!(cbdDat0$GEOID %in% temp),]
#junk <- junk[junk$GEOID != "",]
#write.csv(junk,(paste0(upPlace,"/tempOutput/junk Tracts.csv")))
# these records have a GEOID but not comID suggesting the GEOID is "bad"
# junk <- filter(cbdDat0,is.na(comID) & GEOID != "" & year > 2004)
# 651 records
# length(unique(junk$GEOID))
# 590 unique GEOID not in California (based on current link file)
# write.csv(table(junk$GEOID,junk$year),(paste0(upPlace,"/tempOutput/junk Tracts.csv")))
# county missing from 3797 records
# junk <- filter(cbdDat0,is.na(county))
# 3797 records
# countyFIPS blank=2145 and 999=1652 (but State="CA; based on "F71" only)
# write.csv(table(junk$year,junk$countyFIPS),(paste0(upPlace,"/tempOutput/missing County FIPS.csv")))
# MAJOR cleaning issue!!!
# junk <- filter(cbdDat0,is.na(gbd36))
# 82775 records where ICD10 does not map to gbd36 -- errors in info file!
# write.csv(table(junk$year,junk$countyFIPS),(paste0(upPlace,"/tempOutput/no ICD10 to gbd36.csv")))
# Potentially useful old code bits:
# could make aL and aU like this, or as below based on an input file:
# aL <- c( 0, 5,15,25,35,45,55,65,75,85)
# aU <- c(-1,4,14,24,34,44,54,64,74,84,999)
# "Manual' calcuation of age-adjustment
# popStandard <- readRDS(paste0(upPlace,"/upData/popStandard.RDS"))
# ageCounty <- merge(ageCounty,popStandard,by = c("ageG"),all=TRUE) # merge with "Standard" population
#calculate number of expected deaths in strata among standard population
#ageCounty$deathsE <- (ageCounty$Ndeaths/ageCounty$pop)*ageCounty$popStandard
# "manual" calculation of age-adjusted rates, AND using ageadjust.direct function from EpiTools package
# NOTE: oDeaths etc != total deaths in other files because of missings removed#
# summarize(oDeaths = sum(Ndeaths), # na.rm=TRUE not needed becuase of cleaning above
# oPop = sum(pop),
# cRate = 100000*oDeaths/oPop,
# eDeaths = sum(deathsE),
# ePop = sum(popStandard),
# aRate = 100000*eDeaths/ePop)
# age-adjustment reference
# https://www.cdc.gov/nchs/data/nvsr/nvsr47/nvs47_03.pdf
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.ASD.rhow.R
\name{plot.ASD.rhow}
\alias{plot.ASD.rhow}
\title{Produit des figures pour la réflectance marine}
\usage{
plot.ASD.rhow(asd, PNG = FALSE, RADIANCES = FALSE)
}
\description{
@param asd est une liste produite par la fonction \code{\link{compute.ASD.rhow}}
@param PNG est une variable booléenne (TRUE ou FALSE) qui permet de produire un fichier png.
Par défaut PNG=FALSE
@param RADIANCES est une variable booléenne (TRUE ou FALSE) qui permet de produire une figure
avec les mesures de luminances et la réflectance de la surface et du ciel. Par défaut RADIANCES=FALSE
}
\details{
@author Simon Bélanger
}
| /man/plot.ASD.rhow.Rd | no_license | belasi01/asd | R | false | true | 702 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.ASD.rhow.R
\name{plot.ASD.rhow}
\alias{plot.ASD.rhow}
\title{Produit des figures pour la réflectance marine}
\usage{
plot.ASD.rhow(asd, PNG = FALSE, RADIANCES = FALSE)
}
\description{
@param asd est une liste produite par la fonction \code{\link{compute.ASD.rhow}}
@param PNG est une variable booléenne (TRUE ou FALSE) qui permet de produire un fichier png.
Par défaut PNG=FALSE
@param RADIANCES est une variable booléenne (TRUE ou FALSE) qui permet de produire une figure
avec les mesures de luminances et la réflectance de la surface et du ciel. Par défaut RADIANCES=FALSE
}
\details{
@author Simon Bélanger
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{TSSEnrichment}
\alias{TSSEnrichment}
\title{Compute TSS enrichment score per cell}
\usage{
TSSEnrichment(
object,
tss.positions,
assay = NULL,
cells = NULL,
verbose = TRUE
)
}
\arguments{
\item{object}{A Seurat object}
\item{tss.positions}{A GRanges object containing the TSS positions}
\item{assay}{Name of assay to use}
\item{cells}{A vector of cells to include. If NULL (default), use all cells
in the object}
\item{verbose}{Display messages}
}
\value{
Returns a \code{\link[Seurat]{Seurat}} object
}
\description{
Compute the transcription start site (TSS) enrichment score for each cell, as defined by ENCODE:
\url{https://www.encodeproject.org/data-standards/terms/}.
}
\details{
The computed score will be added to the object metadata as "TSS.enrichment".
}
\examples{
\donttest{
library(EnsDb.Hsapiens.v75)
gene.ranges <- genes(EnsDb.Hsapiens.v75)
gene.ranges <- gene.ranges[gene.ranges$gene_biotype == 'protein_coding', ]
tss.ranges <- GRanges(
seqnames = seqnames(gene.ranges),
ranges = IRanges(start = start(gene.ranges), width = 2),
strand = strand(gene.ranges)
)
seqlevelsStyle(tss.ranges) <- 'UCSC'
tss.ranges <- keepStandardChromosomes(tss.ranges, pruning.mode = 'coarse')
fpath <- system.file("extdata", "fragments.tsv.gz", package="Signac")
atac_small <- SetFragments(object = atac_small, file = fpath)
TSSEnrichment(object = atac_small, tss.positions = tss.ranges[1:100])
}
}
| /man/TSSEnrichment.Rd | permissive | Austin-s-h/signac | R | false | true | 1,515 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{TSSEnrichment}
\alias{TSSEnrichment}
\title{Compute TSS enrichment score per cell}
\usage{
TSSEnrichment(
object,
tss.positions,
assay = NULL,
cells = NULL,
verbose = TRUE
)
}
\arguments{
\item{object}{A Seurat object}
\item{tss.positions}{A GRanges object containing the TSS positions}
\item{assay}{Name of assay to use}
\item{cells}{A vector of cells to include. If NULL (default), use all cells
in the object}
\item{verbose}{Display messages}
}
\value{
Returns a \code{\link[Seurat]{Seurat}} object
}
\description{
Compute the transcription start site (TSS) enrichment score for each cell, as defined by ENCODE:
\url{https://www.encodeproject.org/data-standards/terms/}.
}
\details{
The computed score will be added to the object metadata as "TSS.enrichment".
}
\examples{
\donttest{
library(EnsDb.Hsapiens.v75)
gene.ranges <- genes(EnsDb.Hsapiens.v75)
gene.ranges <- gene.ranges[gene.ranges$gene_biotype == 'protein_coding', ]
tss.ranges <- GRanges(
seqnames = seqnames(gene.ranges),
ranges = IRanges(start = start(gene.ranges), width = 2),
strand = strand(gene.ranges)
)
seqlevelsStyle(tss.ranges) <- 'UCSC'
tss.ranges <- keepStandardChromosomes(tss.ranges, pruning.mode = 'coarse')
fpath <- system.file("extdata", "fragments.tsv.gz", package="Signac")
atac_small <- SetFragments(object = atac_small, file = fpath)
TSSEnrichment(object = atac_small, tss.positions = tss.ranges[1:100])
}
}
|
require(tidyverse)
require(broom)
require(broom.mixed)
require(lme4)
###### data exploration
items%>%
group_by(condition, terranova_problem_id)%>%
summarize(correct=mean(correct))%>%
ggplot(aes(condition,correct))+
geom_col(position='dodge')+facet_wrap(~terranova_problem_id)
items%>%
filter(condition=='Treatment',workedProbs>1000)%>%
ggplot(aes(correct==1,probCount))+
geom_violin()+facet_wrap(~terranova_problem_id)+
scale_y_sqrt()
items%>%
group_by(terranova_problem_id,condition)%>%
summarize(nas=mean(is.na(correct)))%>%
pivot_wider( id_cols=terranova_problem_id,names_from=condition,values_from=nas)
probLevel%>%
mutate(prob=factor(terranova_problem_id,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
ggplot(aes(prob,workedProbs))+geom_point()+
scale_y_sqrt()
effs1 <- getEffs(mod1)
### merge w problem-level skill builder data
effs1 <-
probLevel%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))),
type=ifelse(prob%in%c(33:36,paste0(37,c('A','B','C'))),'open-ended','multiple choice'))%>%
full_join(getEffs(mod1))
### effects per problem
ggplot(ungroup(effs1),aes(as.numeric(prob),effect))+
geom_point()+
geom_line()+
# geom_errorbar(aes(ymin=min1se,ymax=max1se),width=0,size=2)+
geom_errorbar(aes(ymin=min2se,ymax=max2se),width=0,size=1)+
geom_hline(yintercept=0)+
scale_x_continuous(breaks=1:39,labels=levels(effs1$prob))+
labs(x='Item',y='Effect')
ggsave('../assEffects.jpg',width=6.75,height=2)
tn <- read_csv('assistmentsMaine/Terranova Data/Terranova Data/WPI TerraNova and Data/Cristina and Andrew Test Taging.csv')
names(tn)[1] <- 'Problem'
names(tn)[2] <- 'cristina'
names(tn)[3] <- 'cristina2'
names(tn)[4] <- 'andrew'
tn$grade <- tn$cristina%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[1])
tn$grade[tn$Problem%in%c(23,35)] <- 7 ## in next column over
tn$skill <- tn$cristina%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[2])
tn$gradeA <- tn$andrew%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[1])
tn$gradeA[tn$gradeA=='?'] <- NA
tn$skillA <- tn$andrew%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[2])
skillz <- c(
EE="Expressions & Equations",
F="Functions",
G="Geometry",
MD="Measurement & Data",
NBT="Number & Operations in Base 10",
NF="Number & Operations--Fractions",
NS="The Number System",
OA="Operations & Algebraic Thinking",
RP="Ratios & Proportional Relationships",
SP="Statistics & Probability")
tn$skillz <- skillz[tn$skill]
getProbs <- function(sk){
probs <-
with(tn,
sort(na.omit(unique(c(Problem[skill==sk],Problem[skillA==sk])))))
int <-
with(tn,
sort(na.omit(intersect(Problem[skill==sk],Problem[skillA==sk]))))
probs%>%
map_chr(~paste0(.,ifelse(.%in%int,'','*')))%>%
paste(collapse=',')
}
map_dfr(names(skillz),
~with(tn,tibble(
Standard=skillz[.],
Grades=paste(sort(unique(na.omit(c(grade[skill==.],gradeA[skillA==.])))),
collapse='&'),
Items=getProbs(.)
)))%>%
xtable(caption="Common Core State Standards and grade levesl for the 37 TerraNova items, as identified by Andrew Burnett and Cristina Heffernan. Items classified discordantly are marked with a *.",label="tab:assistmentsSkills",align=c('c','p{2.5in}','p{1in}','p{2.5in}'))%>%
print(file='terranovva.tex',include.rownames=FALSE,floating.environment="table*")
tn <- read.csv('Terranova Data/Terranova Data/WPI TerraNova and Data/fromWordDoc.csv')
tn <- na.omit(tn)
names(tn)[1] <- 'prob'
tn$prob <- as.character(tn$prob)
tn <- tn[c(1:nrow(tn),37,37),]
tn$prob[37:39] <- paste0('37',c('A','B','C'))
effs1 <- full_join(effs1,tn)
effs1%>%
filter(type=='multiple choice')%>%
mutate(#CCSS=ifelse(type=='open-ended','Open\nEnded',CCSS),
CCSS=fct_lump_min(CCSS,3))%>%
group_by(CCSS)%>%
mutate(newnum=1:n(),CCSS=gsub(' ','\n',CCSS))%>%ungroup()%>%
ggplot(aes(newnum,effect,label=prob))+
geom_point()+
geom_text(nudge_y=.1,color='black')+
geom_line()+geom_hline(yintercept=0,linetype='dotted')+
facet_wrap(~CCSS,nrow=1,scales='free_x')+
theme(
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())+
labs(y='Treatment Effect (Logit Scale)')
ggsave('../assEffectsType.jpg',width=6.5,height=2)
### effects per problem by worked/correct probs
ggplot(filter(effs1,type=='multiple choice'),aes(workedProbs,effect,label=prob))+#,color=type,fill=type))+
geom_text()+
#geom_errorbar(aes(ymin=min1se,ymax=max1se),width=0,size=2)+
#geom_errorbar(aes(ymin=min2se,ymax=max2se),width=0,size=1)+
#geom_hline(yintercept=0)+
labs(x='Worked Problems',y='Effect')+
scale_x_sqrt(breaks=c(0,500,1000,2000,5000,10000,20000,40000,60000),labels=c(0,500,'1K','2K','5K','10K','20K','40K','60K'))+
geom_smooth(se=TRUE)
ggsave('../assEffectsWorkedProbs.jpg',width=6.5,height=2)
ggplot(filter(effs1,type=='multiple choice'),aes(correctProbs,effect,label=prob))+#color=type,fill=type))+
geom_text()+
#geom_errorbar(aes(ymin=min1se,ymax=max1se),width=0,size=2)+
#geom_errorbar(aes(ymin=min2se,ymax=max2se),width=0,size=1)+
#geom_hline(yintercept=0)+
labs(x='Correct Problems',y='Effect')+
scale_x_sqrt(breaks=c(0,500,1000,2000,5000,10000,20000,40000),labels=c(0,500,'1K','2K','5K','10K','20K','40K'))+
geom_smooth(se=TRUE)
ggsave('../assEffectsCorrectProbs.jpg',width=6.5,height=2)
### does big glmer agree w seprarate models for each problem?
### effect estimates:
normal%>%
select(-effect)%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
full_join(effs1)%>%
ggplot(aes(estimate,effect,color=type))+geom_point()+
geom_hline(yintercept=0)+geom_abline(intercept=0,slope=1)+
labs(title='Effect Estimates',x='Separate Regressions',y='Together')
### SEs
normal%>%
select(-effect)%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
full_join(effs1)%>%
ggplot(aes(std.error,se,color=type))+geom_point()+
geom_abline(intercept=0,slope=1)+
labs(title='Standard Errors',x='Separate Regressions',y='Together')+xlim(0,0.4)+ylim(0,0.4)
normalOLS%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
full_join(effs1)%>%
ggplot(aes(estimate,effect,color=type))+geom_point()+
geom_hline(yintercept=0)+geom_vline(xintercept=0)+
geom_smooth(method='lm',se=FALSE)+
labs(title='Effect Estimates',x='Separate OLS Regressions',y='Together')
### anova: test for between-problem effects
### se for open ended
seoe <- sqrt(
vcov(mod1)['conditionTreatment','conditionTreatment']+
vcov(mod1)['conditionTreatment:typeopen-ended','conditionTreatment:typeopen-ended']+
2*vcov(mod1)['conditionTreatment','conditionTreatment:typeopen-ended']
)
re <- ranef(mod1,condVar=TRUE)
effs <- re$terranova_problem_id$conditionTreatment
names(effs) <- rownames(re$terranova_problem_id)
V <- apply(attr(re$terranova_problem_id,'postVar'),3,function(x) x[2,2])
names(V) <- names(effs)
V <- V[order(effs)]
effs <- effs[order(effs)]
ps <- outer(names(effs),names(effs),function(n1,n2) 2*pnorm(-abs(effs[n1]-effs[n2])/sqrt(V[n1]+V[n2])))
stars <- ifelse(ps<0.001,'p<0.001',ifelse(ps<0.01,'p<0.01',ifelse(ps<0.05,'p<0.05',NA)))#ifelse(ps<0.1,'.',''))))
rownames(stars) <- colnames(stars) <- names(effs)
colnames(stars) <- rev(names(effs))
omar <- par()$mar
on.exit(par(mar=omar))
if(key) par(mar=c(2, 2, 2, 4.1)) else par(mar=c(2,2,2,0)) # adapt margins
plot(stars,asp=TRUE, axis.col=list(at=seq(1,39,2),labels=colnames(stars)[seq(1,39,2)],pos=0,cex.axis=.75,gap.axis=0,mgp=c(1,0,1),lwd.ticks=0),
axis.row=list(at=1:39,labels=colnames(stars),pos=0,cex.axis=0.5,gap.axis=0,mgp=c(1,0.5,1),las=2))#,main=paste(level,'Year',year),
# key=if(key) list(side=4,las=1) else NULL)
axis(1,at=seq(2,39,2),labels=colnames(stars)[seq(2,32,2)],cex.axis=.75,gap.axis=0,tick=FALSE,mgp=c(1,1,1))
| /assistmentsMaine/code/plotsResults.r | no_license | adamSales/itemLevelEffects | R | false | false | 8,236 | r | require(tidyverse)
require(broom)
require(broom.mixed)
require(lme4)
###### data exploration
items%>%
group_by(condition, terranova_problem_id)%>%
summarize(correct=mean(correct))%>%
ggplot(aes(condition,correct))+
geom_col(position='dodge')+facet_wrap(~terranova_problem_id)
items%>%
filter(condition=='Treatment',workedProbs>1000)%>%
ggplot(aes(correct==1,probCount))+
geom_violin()+facet_wrap(~terranova_problem_id)+
scale_y_sqrt()
items%>%
group_by(terranova_problem_id,condition)%>%
summarize(nas=mean(is.na(correct)))%>%
pivot_wider( id_cols=terranova_problem_id,names_from=condition,values_from=nas)
probLevel%>%
mutate(prob=factor(terranova_problem_id,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
ggplot(aes(prob,workedProbs))+geom_point()+
scale_y_sqrt()
effs1 <- getEffs(mod1)
### merge w problem-level skill builder data
effs1 <-
probLevel%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))),
type=ifelse(prob%in%c(33:36,paste0(37,c('A','B','C'))),'open-ended','multiple choice'))%>%
full_join(getEffs(mod1))
### effects per problem
ggplot(ungroup(effs1),aes(as.numeric(prob),effect))+
geom_point()+
geom_line()+
# geom_errorbar(aes(ymin=min1se,ymax=max1se),width=0,size=2)+
geom_errorbar(aes(ymin=min2se,ymax=max2se),width=0,size=1)+
geom_hline(yintercept=0)+
scale_x_continuous(breaks=1:39,labels=levels(effs1$prob))+
labs(x='Item',y='Effect')
ggsave('../assEffects.jpg',width=6.75,height=2)
tn <- read_csv('assistmentsMaine/Terranova Data/Terranova Data/WPI TerraNova and Data/Cristina and Andrew Test Taging.csv')
names(tn)[1] <- 'Problem'
names(tn)[2] <- 'cristina'
names(tn)[3] <- 'cristina2'
names(tn)[4] <- 'andrew'
tn$grade <- tn$cristina%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[1])
tn$grade[tn$Problem%in%c(23,35)] <- 7 ## in next column over
tn$skill <- tn$cristina%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[2])
tn$gradeA <- tn$andrew%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[1])
tn$gradeA[tn$gradeA=='?'] <- NA
tn$skillA <- tn$andrew%>%strsplit('.',fixed=TRUE)%>%map_chr(~.[2])
skillz <- c(
EE="Expressions & Equations",
F="Functions",
G="Geometry",
MD="Measurement & Data",
NBT="Number & Operations in Base 10",
NF="Number & Operations--Fractions",
NS="The Number System",
OA="Operations & Algebraic Thinking",
RP="Ratios & Proportional Relationships",
SP="Statistics & Probability")
tn$skillz <- skillz[tn$skill]
getProbs <- function(sk){
probs <-
with(tn,
sort(na.omit(unique(c(Problem[skill==sk],Problem[skillA==sk])))))
int <-
with(tn,
sort(na.omit(intersect(Problem[skill==sk],Problem[skillA==sk]))))
probs%>%
map_chr(~paste0(.,ifelse(.%in%int,'','*')))%>%
paste(collapse=',')
}
map_dfr(names(skillz),
~with(tn,tibble(
Standard=skillz[.],
Grades=paste(sort(unique(na.omit(c(grade[skill==.],gradeA[skillA==.])))),
collapse='&'),
Items=getProbs(.)
)))%>%
xtable(caption="Common Core State Standards and grade levesl for the 37 TerraNova items, as identified by Andrew Burnett and Cristina Heffernan. Items classified discordantly are marked with a *.",label="tab:assistmentsSkills",align=c('c','p{2.5in}','p{1in}','p{2.5in}'))%>%
print(file='terranovva.tex',include.rownames=FALSE,floating.environment="table*")
tn <- read.csv('Terranova Data/Terranova Data/WPI TerraNova and Data/fromWordDoc.csv')
tn <- na.omit(tn)
names(tn)[1] <- 'prob'
tn$prob <- as.character(tn$prob)
tn <- tn[c(1:nrow(tn),37,37),]
tn$prob[37:39] <- paste0('37',c('A','B','C'))
effs1 <- full_join(effs1,tn)
effs1%>%
filter(type=='multiple choice')%>%
mutate(#CCSS=ifelse(type=='open-ended','Open\nEnded',CCSS),
CCSS=fct_lump_min(CCSS,3))%>%
group_by(CCSS)%>%
mutate(newnum=1:n(),CCSS=gsub(' ','\n',CCSS))%>%ungroup()%>%
ggplot(aes(newnum,effect,label=prob))+
geom_point()+
geom_text(nudge_y=.1,color='black')+
geom_line()+geom_hline(yintercept=0,linetype='dotted')+
facet_wrap(~CCSS,nrow=1,scales='free_x')+
theme(
axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())+
labs(y='Treatment Effect (Logit Scale)')
ggsave('../assEffectsType.jpg',width=6.5,height=2)
### effects per problem by worked/correct probs
ggplot(filter(effs1,type=='multiple choice'),aes(workedProbs,effect,label=prob))+#,color=type,fill=type))+
geom_text()+
#geom_errorbar(aes(ymin=min1se,ymax=max1se),width=0,size=2)+
#geom_errorbar(aes(ymin=min2se,ymax=max2se),width=0,size=1)+
#geom_hline(yintercept=0)+
labs(x='Worked Problems',y='Effect')+
scale_x_sqrt(breaks=c(0,500,1000,2000,5000,10000,20000,40000,60000),labels=c(0,500,'1K','2K','5K','10K','20K','40K','60K'))+
geom_smooth(se=TRUE)
ggsave('../assEffectsWorkedProbs.jpg',width=6.5,height=2)
ggplot(filter(effs1,type=='multiple choice'),aes(correctProbs,effect,label=prob))+#color=type,fill=type))+
geom_text()+
#geom_errorbar(aes(ymin=min1se,ymax=max1se),width=0,size=2)+
#geom_errorbar(aes(ymin=min2se,ymax=max2se),width=0,size=1)+
#geom_hline(yintercept=0)+
labs(x='Correct Problems',y='Effect')+
scale_x_sqrt(breaks=c(0,500,1000,2000,5000,10000,20000,40000),labels=c(0,500,'1K','2K','5K','10K','20K','40K'))+
geom_smooth(se=TRUE)
ggsave('../assEffectsCorrectProbs.jpg',width=6.5,height=2)
### does big glmer agree w seprarate models for each problem?
### effect estimates:
normal%>%
select(-effect)%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
full_join(effs1)%>%
ggplot(aes(estimate,effect,color=type))+geom_point()+
geom_hline(yintercept=0)+geom_abline(intercept=0,slope=1)+
labs(title='Effect Estimates',x='Separate Regressions',y='Together')
### SEs
normal%>%
select(-effect)%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
full_join(effs1)%>%
ggplot(aes(std.error,se,color=type))+geom_point()+
geom_abline(intercept=0,slope=1)+
labs(title='Standard Errors',x='Separate Regressions',y='Together')+xlim(0,0.4)+ylim(0,0.4)
normalOLS%>%
rename(prob=terranova_problem_id)%>%
mutate(prob=factor(prob,levels=c(1:36,paste0(37,c('A','B','C')))))%>%
full_join(effs1)%>%
ggplot(aes(estimate,effect,color=type))+geom_point()+
geom_hline(yintercept=0)+geom_vline(xintercept=0)+
geom_smooth(method='lm',se=FALSE)+
labs(title='Effect Estimates',x='Separate OLS Regressions',y='Together')
### anova: test for between-problem effects
### se for open ended
seoe <- sqrt(
vcov(mod1)['conditionTreatment','conditionTreatment']+
vcov(mod1)['conditionTreatment:typeopen-ended','conditionTreatment:typeopen-ended']+
2*vcov(mod1)['conditionTreatment','conditionTreatment:typeopen-ended']
)
re <- ranef(mod1,condVar=TRUE)
effs <- re$terranova_problem_id$conditionTreatment
names(effs) <- rownames(re$terranova_problem_id)
V <- apply(attr(re$terranova_problem_id,'postVar'),3,function(x) x[2,2])
names(V) <- names(effs)
V <- V[order(effs)]
effs <- effs[order(effs)]
ps <- outer(names(effs),names(effs),function(n1,n2) 2*pnorm(-abs(effs[n1]-effs[n2])/sqrt(V[n1]+V[n2])))
stars <- ifelse(ps<0.001,'p<0.001',ifelse(ps<0.01,'p<0.01',ifelse(ps<0.05,'p<0.05',NA)))#ifelse(ps<0.1,'.',''))))
rownames(stars) <- colnames(stars) <- names(effs)
colnames(stars) <- rev(names(effs))
omar <- par()$mar
on.exit(par(mar=omar))
if(key) par(mar=c(2, 2, 2, 4.1)) else par(mar=c(2,2,2,0)) # adapt margins
plot(stars,asp=TRUE, axis.col=list(at=seq(1,39,2),labels=colnames(stars)[seq(1,39,2)],pos=0,cex.axis=.75,gap.axis=0,mgp=c(1,0,1),lwd.ticks=0),
axis.row=list(at=1:39,labels=colnames(stars),pos=0,cex.axis=0.5,gap.axis=0,mgp=c(1,0.5,1),las=2))#,main=paste(level,'Year',year),
# key=if(key) list(side=4,las=1) else NULL)
axis(1,at=seq(2,39,2),labels=colnames(stars)[seq(2,32,2)],cex.axis=.75,gap.axis=0,tick=FALSE,mgp=c(1,1,1))
|
#Análisis de datos sobre peso y tamaño de obreras y machos de las colonias:
#Como los datos no siguen una distribución normal y hay muchos datos que faltan, utilizamos tests no paramétricos
#Primero vemos los datos en gráficas:
boxplot(wweight~group, data=worker_male_size)
boxplot(wwidth~group, data=worker_male_size)
boxplot(mweight~group, data=worker_male_size, na.action = na.omit)
boxplot(mwidth~group, data=worker_male_size, na.action = na.omit)
# al correr los boxplots, en los casos de mweight y mwidth he tenido que indicar que son variables númericas, ya que se consideraban categóricas (segurmante porque hay NA)
#para esto:
worker_male_size$mweight <- as.numeric(as.character(worker_male_size$mweight))
worker_male_size$mwidth <- as.numeric(as.character(worker_male_size$mwidth))
# y tras hacer esto ya no ha habido problema para correr los boxplots
#Hacemos test Kruskal Wallis con cada variable:
kruskal.test(wweight~group, data=worker_male_size)
Error in kruskal.test.default(c(0.103, 0.086, 0.059, 0.073, 0.059, 0.094, : all group levels must be finite
#tenemos que especificar que group es una variable categórica para poder correr el análisis.
worker_male_size$group <- as.factor(worker_male_size$group)
kruskal.test(wweight~group, data=worker_male_size)
Kruskal-Wallis rank sum test
data: wweight by group
Kruskal-Wallis chi-squared = 5.9792, df = 6, p-value = 0.4255
# sin diferencias significativas entre grupos para el peso de las obreras
kruskal.test(wwidth~group, data=worker_male_size)
Kruskal-Wallis rank sum test
data: wwidth by group
Kruskal-Wallis chi-squared = 8.1816, df = 6, p-value = 0.2251
# sin diferencias significativas para el tamaño de las obreras
kruskal.test(mweight~group, data=worker_male_size, na.action = na.omit)
Kruskal-Wallis rank sum test
data: mweight by group
Kruskal-Wallis chi-squared = 7.3313, df = 6, p-value = 0.2913
# sin diferencias significativas entre grupos para el peso de los machos
kruskal.test(mwidth~group, data=worker_male_size, na.action = na.omit)
Kruskal-Wallis rank sum test
data: mwidth by group
Kruskal-Wallis chi-squared = 10.639, df = 6, p-value = 0.1002
# Los tratamientos no parecen tener un efecto en el peso y tama?o de obreras y machos en las colonias.
#Ahora miramos si los tratamientos tienen efecto en el número de obreras, machos, reinas, celdillas de cría y realeras
View(offspring)
#Echamos un vistazo a los datos:
boxplot(no.wrk~group, data=offspring)
boxplot(no.mls~group, data=offspring)
boxplot(no.brd~group, data=offspring)
boxplot(no.qcells~group, data=offspring)
#Corremos test Kruskal Wallis con cada variable:
offspring$group <- as.factor(offspring$group)
kruskal.test(no.wrk~group, data=offspspring)
Kruskal-Wallis rank sum test
data: no.wrk by group
Kruskal-Wallis chi-squared = 2.417, df = 6, p-value = 0.8776
# el número de obreras no es significativamente diferente entre grupos
kruskal.test(no.mls~group, data=offspring)
Kruskal-Wallis rank sum test
data: no.mls by group
Kruskal-Wallis chi-squared = 7.6512, df = 6, p-value = 0.2648
# el número de machos tampoco es significativamente diferente entre grupos
kruskal.test(no.brd~group, data=offspring)
Kruskal-Wallis rank sum test
data: no.brd by group
Kruskal-Wallis chi-squared = 16.561, df = 6, p-value = 0.01104
# la cantidad de cría sí se ve afectada por los tratamientos, así que voy a hacer post hoc para comparar dos a dos:
library(PMCMRplus)
posthoc.kruskal.nemenyi.test(no.brd ~ group, data=offspring, dist="Tukey")
Pairwise comparisons using Tukey and Kramer (Nemenyi) test
with Tukey-Dist approximation for independent samples
data: no.brd by group
C T1 T2 T3 T4 T5
T1 0.087 - - - - -
T2 0.066 1.000 - - - -
T3 0.988 0.429 0.363 - - -
T4 0.025 0.999 1.000 0.193 - -
T5 0.491 0.978 0.961 0.924 0.854 -
T6 0.728 0.887 0.841 0.989 0.647 1.000
P value adjustment method: none
Warning message:
In posthoc.kruskal.nemenyi.test.default(c(71L, 63L, 66L, 82L, 39L, :
Ties are present, p-values are not corrected.
# según el test post hoc, la diferencia se da entre el grupo T4 (neonics+piretroides+Nosema) y el control
kruskal.test(no.qcells~group, data=offspring)
Kruskal-Wallis rank sum test
data: no.qcells by group
Kruskal-Wallis chi-squared = 15.931, df = 6, p-value = 0.01413
# hay diferencias significativas entre grupos para el número de realeras. Hacemos post hoc para ver qué grupos son diferentes:
posthoc.kruskal.nemenyi.test(no.qcells ~ group, data=offspring, dist="Tukey")
Pairwise comparisons using Tukey and Kramer (Nemenyi) test
with Tukey-Dist approximation for independent samples
data: no.qcells by group
C T1 T2 T3 T4 T5
T1 0.113 - - - - -
T2 0.321 0.999 - - - -
T3 1.000 0.148 0.387 - - -
T4 0.079 1.000 0.996 0.104 - -
T5 0.771 0.899 0.993 0.830 0.838 -
T6 0.827 0.856 0.985 0.878 0.783 1.000
P value adjustment method: none
Warning message:
In posthoc.kruskal.nemenyi.test.default(c(16L, 11L, 15L, 18L, 0L, :
Ties are present, p-values are not corrected.
#pruebo el post hoc utilizando el test Wilcoxon con corrección de p-values (Bonferroni-Holm method):
pairwise.wilcox.test(offspring$no.qcells, offspring$group, p.adjust.method = p.adjust.methods, paired = F)
Pairwise comparisons using Wilcoxon rank sum test
data: offspring$no.qcells and offspring$group
C T1 T2 T3 T4 T5
T1 0.091 - - - - -
T2 0.276 1.000 - - - -
T3 1.000 0.266 0.606 - - -
T4 0.051 1.000 1.000 0.229 - -
T5 1.000 1.000 1.000 1.000 1.000 -
T6 1.000 1.000 1.000 1.000 1.000 1.000
P value adjustment method: holm
#parece que es el grupo T4 el que ha producido menos reinas que el control.
| /fitness parameters.R | no_license | cristinabotias/multiple-stressors-experiment | R | false | false | 5,952 | r | #Análisis de datos sobre peso y tamaño de obreras y machos de las colonias:
#Como los datos no siguen una distribución normal y hay muchos datos que faltan, utilizamos tests no paramétricos
#Primero vemos los datos en gráficas:
boxplot(wweight~group, data=worker_male_size)
boxplot(wwidth~group, data=worker_male_size)
boxplot(mweight~group, data=worker_male_size, na.action = na.omit)
boxplot(mwidth~group, data=worker_male_size, na.action = na.omit)
# al correr los boxplots, en los casos de mweight y mwidth he tenido que indicar que son variables númericas, ya que se consideraban categóricas (segurmante porque hay NA)
#para esto:
worker_male_size$mweight <- as.numeric(as.character(worker_male_size$mweight))
worker_male_size$mwidth <- as.numeric(as.character(worker_male_size$mwidth))
# y tras hacer esto ya no ha habido problema para correr los boxplots
#Hacemos test Kruskal Wallis con cada variable:
kruskal.test(wweight~group, data=worker_male_size)
Error in kruskal.test.default(c(0.103, 0.086, 0.059, 0.073, 0.059, 0.094, : all group levels must be finite
#tenemos que especificar que group es una variable categórica para poder correr el análisis.
worker_male_size$group <- as.factor(worker_male_size$group)
kruskal.test(wweight~group, data=worker_male_size)
Kruskal-Wallis rank sum test
data: wweight by group
Kruskal-Wallis chi-squared = 5.9792, df = 6, p-value = 0.4255
# sin diferencias significativas entre grupos para el peso de las obreras
kruskal.test(wwidth~group, data=worker_male_size)
Kruskal-Wallis rank sum test
data: wwidth by group
Kruskal-Wallis chi-squared = 8.1816, df = 6, p-value = 0.2251
# sin diferencias significativas para el tamaño de las obreras
kruskal.test(mweight~group, data=worker_male_size, na.action = na.omit)
Kruskal-Wallis rank sum test
data: mweight by group
Kruskal-Wallis chi-squared = 7.3313, df = 6, p-value = 0.2913
# sin diferencias significativas entre grupos para el peso de los machos
kruskal.test(mwidth~group, data=worker_male_size, na.action = na.omit)
Kruskal-Wallis rank sum test
data: mwidth by group
Kruskal-Wallis chi-squared = 10.639, df = 6, p-value = 0.1002
# Los tratamientos no parecen tener un efecto en el peso y tama?o de obreras y machos en las colonias.
#Ahora miramos si los tratamientos tienen efecto en el número de obreras, machos, reinas, celdillas de cría y realeras
View(offspring)
#Echamos un vistazo a los datos:
boxplot(no.wrk~group, data=offspring)
boxplot(no.mls~group, data=offspring)
boxplot(no.brd~group, data=offspring)
boxplot(no.qcells~group, data=offspring)
#Corremos test Kruskal Wallis con cada variable:
offspring$group <- as.factor(offspring$group)
kruskal.test(no.wrk~group, data=offspspring)
Kruskal-Wallis rank sum test
data: no.wrk by group
Kruskal-Wallis chi-squared = 2.417, df = 6, p-value = 0.8776
# el número de obreras no es significativamente diferente entre grupos
kruskal.test(no.mls~group, data=offspring)
Kruskal-Wallis rank sum test
data: no.mls by group
Kruskal-Wallis chi-squared = 7.6512, df = 6, p-value = 0.2648
# el número de machos tampoco es significativamente diferente entre grupos
kruskal.test(no.brd~group, data=offspring)
Kruskal-Wallis rank sum test
data: no.brd by group
Kruskal-Wallis chi-squared = 16.561, df = 6, p-value = 0.01104
# la cantidad de cría sí se ve afectada por los tratamientos, así que voy a hacer post hoc para comparar dos a dos:
library(PMCMRplus)
posthoc.kruskal.nemenyi.test(no.brd ~ group, data=offspring, dist="Tukey")
Pairwise comparisons using Tukey and Kramer (Nemenyi) test
with Tukey-Dist approximation for independent samples
data: no.brd by group
C T1 T2 T3 T4 T5
T1 0.087 - - - - -
T2 0.066 1.000 - - - -
T3 0.988 0.429 0.363 - - -
T4 0.025 0.999 1.000 0.193 - -
T5 0.491 0.978 0.961 0.924 0.854 -
T6 0.728 0.887 0.841 0.989 0.647 1.000
P value adjustment method: none
Warning message:
In posthoc.kruskal.nemenyi.test.default(c(71L, 63L, 66L, 82L, 39L, :
Ties are present, p-values are not corrected.
# según el test post hoc, la diferencia se da entre el grupo T4 (neonics+piretroides+Nosema) y el control
kruskal.test(no.qcells~group, data=offspring)
Kruskal-Wallis rank sum test
data: no.qcells by group
Kruskal-Wallis chi-squared = 15.931, df = 6, p-value = 0.01413
# hay diferencias significativas entre grupos para el número de realeras. Hacemos post hoc para ver qué grupos son diferentes:
posthoc.kruskal.nemenyi.test(no.qcells ~ group, data=offspring, dist="Tukey")
Pairwise comparisons using Tukey and Kramer (Nemenyi) test
with Tukey-Dist approximation for independent samples
data: no.qcells by group
C T1 T2 T3 T4 T5
T1 0.113 - - - - -
T2 0.321 0.999 - - - -
T3 1.000 0.148 0.387 - - -
T4 0.079 1.000 0.996 0.104 - -
T5 0.771 0.899 0.993 0.830 0.838 -
T6 0.827 0.856 0.985 0.878 0.783 1.000
P value adjustment method: none
Warning message:
In posthoc.kruskal.nemenyi.test.default(c(16L, 11L, 15L, 18L, 0L, :
Ties are present, p-values are not corrected.
#pruebo el post hoc utilizando el test Wilcoxon con corrección de p-values (Bonferroni-Holm method):
pairwise.wilcox.test(offspring$no.qcells, offspring$group, p.adjust.method = p.adjust.methods, paired = F)
Pairwise comparisons using Wilcoxon rank sum test
data: offspring$no.qcells and offspring$group
C T1 T2 T3 T4 T5
T1 0.091 - - - - -
T2 0.276 1.000 - - - -
T3 1.000 0.266 0.606 - - -
T4 0.051 1.000 1.000 0.229 - -
T5 1.000 1.000 1.000 1.000 1.000 -
T6 1.000 1.000 1.000 1.000 1.000 1.000
P value adjustment method: holm
#parece que es el grupo T4 el que ha producido menos reinas que el control.
|
# Outlier Analysis
#outlier not predicted well in by fitted regression model
marks = c(1:80,100, 1000)
marks2 = c(1:80, 100)
marks
boxplot(marks2)
summary(marks)
boxplot(marks)
fit2 = lm(weight ~ height + I(height^2), data=women)
par(mfrow=c(2,2)) ;plot(fit2)
par(mfrow=c(1,1)) ; plot(fit2, which=4)
?plot
fit3 = lm(weight ~ height + I(height^2), data=women[-c(13,15),])
fit3
par(mfrow=c(1,1)) ; plot(fit3, which=4)
names(mtcars)
fit = lm(mpg ~ wt + cyl + hp + am + gear, data=mtcars)
boxplot(mtcars)
mtv1 = c('hp', 'disp')
boxplot(mtcars[mtv1])
boxplot(mtcars[-c('hp', 'disp'),])
ix= match(mtv1,names(mtcars))
boxplot(mtcars[,-ix])
library(car)
car::outlier.test(fit)
library(outliers)
set.seed(1234)
x = rnorm(10)
chisq.out.test(x)
chisq.out.test(x,opposite=TRUE)
car::outlier.test(fit, labels=names(rstud))
car::outlier.test(lm(prestige~income+education, data=Duncan))
car::avPlots(fit, ask=F, id.method='identify')
car::influencePlot()
#Identify---
set.seed(482)
y = rnorm(100)
boxplot(y)
identify(rep(1, length(y)), y, labels = seq_along(y))
#Univariate
url <- "http://rstatistics.net/wp-content/uploads/2015/09/ozone.csv"
# alternate source: https://raw.githubusercontent.com/selva86/datasets/master/ozone.csv
inputData <- read.csv(url) # import data
outlier_values <- boxplot.stats(inputData$pressure_height)$out
outlier_values
boxplot(inputData$pressure_height, main="Pressure Height", boxwex=0.1)
mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6)
#Bivariate
url <- "http://rstatistics.net/wp-content/uploads/2015/09/ozone.csv"
ozone <- read.csv(url)
# For categorical variable
boxplot(ozone_reading ~ Month, data=ozone, main="Ozone reading across months") # clear pattern is noticeable.
boxplot(ozone_reading ~ Day_of_week, data=ozone, main="Ozone reading for days of week") # this may not be significant, as day of week variable is a subset of the month var.
x <- ozone$pressure_height
qnt <- quantile(x, probs=c(.25, .75), na.rm = T)
caps <- quantile(x, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x, na.rm = T)
x[x < (qnt[1] - H)] <- caps[1]
x[x > (qnt[2] + H)] <- caps[2]
| /DH/outlier1.R | no_license | GopalKrishna-P/analytics | R | false | false | 2,145 | r | # Outlier Analysis
#outlier not predicted well in by fitted regression model
marks = c(1:80,100, 1000)
marks2 = c(1:80, 100)
marks
boxplot(marks2)
summary(marks)
boxplot(marks)
fit2 = lm(weight ~ height + I(height^2), data=women)
par(mfrow=c(2,2)) ;plot(fit2)
par(mfrow=c(1,1)) ; plot(fit2, which=4)
?plot
fit3 = lm(weight ~ height + I(height^2), data=women[-c(13,15),])
fit3
par(mfrow=c(1,1)) ; plot(fit3, which=4)
names(mtcars)
fit = lm(mpg ~ wt + cyl + hp + am + gear, data=mtcars)
boxplot(mtcars)
mtv1 = c('hp', 'disp')
boxplot(mtcars[mtv1])
boxplot(mtcars[-c('hp', 'disp'),])
ix= match(mtv1,names(mtcars))
boxplot(mtcars[,-ix])
library(car)
car::outlier.test(fit)
library(outliers)
set.seed(1234)
x = rnorm(10)
chisq.out.test(x)
chisq.out.test(x,opposite=TRUE)
car::outlier.test(fit, labels=names(rstud))
car::outlier.test(lm(prestige~income+education, data=Duncan))
car::avPlots(fit, ask=F, id.method='identify')
car::influencePlot()
#Identify---
set.seed(482)
y = rnorm(100)
boxplot(y)
identify(rep(1, length(y)), y, labels = seq_along(y))
#Univariate
url <- "http://rstatistics.net/wp-content/uploads/2015/09/ozone.csv"
# alternate source: https://raw.githubusercontent.com/selva86/datasets/master/ozone.csv
inputData <- read.csv(url) # import data
outlier_values <- boxplot.stats(inputData$pressure_height)$out
outlier_values
boxplot(inputData$pressure_height, main="Pressure Height", boxwex=0.1)
mtext(paste("Outliers: ", paste(outlier_values, collapse=", ")), cex=0.6)
#Bivariate
url <- "http://rstatistics.net/wp-content/uploads/2015/09/ozone.csv"
ozone <- read.csv(url)
# For categorical variable
boxplot(ozone_reading ~ Month, data=ozone, main="Ozone reading across months") # clear pattern is noticeable.
boxplot(ozone_reading ~ Day_of_week, data=ozone, main="Ozone reading for days of week") # this may not be significant, as day of week variable is a subset of the month var.
x <- ozone$pressure_height
qnt <- quantile(x, probs=c(.25, .75), na.rm = T)
caps <- quantile(x, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(x, na.rm = T)
x[x < (qnt[1] - H)] <- caps[1]
x[x > (qnt[2] + H)] <- caps[2]
|
## Plot 3
## Ojective
## --------------------------------------------------------------
## Of the four types of sources indicated by the type
## (point, nonpoint, onroad, nonroad) variable, which of these four
## sources have seen decreases in emissions from 1999-2008 for
## Baltimore City? Which have seen increases in emissions from 1999-2008?
## Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
library(dplyr)
## Note: data provided by course instructor, but can be reproduced
## using the source listed in README.md
## This first line will likely take a few seconds. Be patient!
nei <- readRDS("summarySCC_PM25.rds")
scc <- readRDS("Source_Classification_Code.rds")
df <- nei %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
summarise(total = sum(Emissions))
png(filename = "plot3.png")
ggplot(df, aes(x = factor(year), y = total)) +
geom_bar(stat = "identity", fill = "blue", alpha = 0.75) +
facet_wrap(~type) +
labs(x = "Year", y = "Tons of PM2.5",
title = "Total Emissions by Year and Type\nBaltimore City, Maryland") +
theme_bw()
dev.off()
| /data_exploration/plot3.R | no_license | brandonat/coursera_data_science | R | false | false | 1,126 | r | ## Plot 3
## Ojective
## --------------------------------------------------------------
## Of the four types of sources indicated by the type
## (point, nonpoint, onroad, nonroad) variable, which of these four
## sources have seen decreases in emissions from 1999-2008 for
## Baltimore City? Which have seen increases in emissions from 1999-2008?
## Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
library(dplyr)
## Note: data provided by course instructor, but can be reproduced
## using the source listed in README.md
## This first line will likely take a few seconds. Be patient!
nei <- readRDS("summarySCC_PM25.rds")
scc <- readRDS("Source_Classification_Code.rds")
df <- nei %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
summarise(total = sum(Emissions))
png(filename = "plot3.png")
ggplot(df, aes(x = factor(year), y = total)) +
geom_bar(stat = "identity", fill = "blue", alpha = 0.75) +
facet_wrap(~type) +
labs(x = "Year", y = "Tons of PM2.5",
title = "Total Emissions by Year and Type\nBaltimore City, Maryland") +
theme_bw()
dev.off()
|
install.packages("shiny")
install.packages("shinydashboard")
install.packages("shinyWidgets")
install.packages("DT")
install.packages("plotly")
install.packages("ggplot2")
install.packages("googleVis")
install.packages("colourpicker") | /install.R | no_license | thomasdenecker/bioinfo-fr_Shiny | R | false | false | 234 | r | install.packages("shiny")
install.packages("shinydashboard")
install.packages("shinyWidgets")
install.packages("DT")
install.packages("plotly")
install.packages("ggplot2")
install.packages("googleVis")
install.packages("colourpicker") |
print( "Hello World!" )
print( "Hello World!", quote=FALSE ) | /SRC/MyRScripts/Hello.R | no_license | djohnson67/RScripts | R | false | false | 63 | r | print( "Hello World!" )
print( "Hello World!", quote=FALSE ) |
"heatmap.plus" <-
function (x, Rowv = NULL, Colv = if (symm) "Rowv" else NULL,
distfun = dist, hclustfun = hclust, reorderfun = function(d,
w) reorder(d, w), add.expr, symm = FALSE, revC = identical(Colv,
"Rowv"), scale = c("row", "column", "none"), na.rm = TRUE,
margins = c(5, 5), ColSideColors, RowSideColors, cexRow = 0.2 +
1/log10(nr), cexCol = 0.2 + 1/log10(nc), labRow = NULL,
labCol = NULL, main = NULL, xlab = NULL, ylab = NULL, keep.dendro = FALSE,
verbose = getOption("verbose"), ...)
{
scale <- if (symm && missing(scale))
"none"
else match.arg(scale)
if (length(di <- dim(x)) != 2 || !is.numeric(x))
stop("'x' must be a numeric matrix")
nr <- di[1]
nc <- di[2]
if (nr <= 1 || nc <= 1)
stop("'x' must have at least 2 rows and 2 columns")
if (!is.numeric(margins) || length(margins) != 2)
stop("'margins' must be a numeric vector of length 2")
doRdend <- !identical(Rowv, NA)
doCdend <- !identical(Colv, NA)
if (is.null(Rowv))
Rowv <- rowMeans(x, na.rm = na.rm)
if (is.null(Colv))
Colv <- colMeans(x, na.rm = na.rm)
if (doRdend) {
if (inherits(Rowv, "dendrogram"))
ddr <- Rowv
else {
hcr <- hclustfun(distfun(x))
ddr <- as.dendrogram(hcr)
if (!is.logical(Rowv) || Rowv)
ddr <- reorderfun(ddr, Rowv)
}
if (nr != length(rowInd <- order.dendrogram(ddr)))
stop("row dendrogram ordering gave index of wrong length")
}
else rowInd <- 1:nr
if (doCdend) {
if (inherits(Colv, "dendrogram"))
ddc <- Colv
else if (identical(Colv, "Rowv")) {
if (nr != nc)
stop("Colv = \"Rowv\" but nrow(x) != ncol(x)")
ddc <- ddr
}
else {
hcc <- hclustfun(distfun(if (symm)
x
else t(x)))
ddc <- as.dendrogram(hcc)
if (!is.logical(Colv) || Colv)
ddc <- reorderfun(ddc, Colv)
}
if (nc != length(colInd <- order.dendrogram(ddc)))
stop("column dendrogram ordering gave index of wrong length")
}
else colInd <- 1:nc
x <- x[rowInd, colInd]
labRow <- if (is.null(labRow))
if (is.null(rownames(x)))
(1:nr)[rowInd]
else rownames(x)
else labRow[rowInd]
labCol <- if (is.null(labCol))
if (is.null(colnames(x)))
(1:nc)[colInd]
else colnames(x)
else labCol[colInd]
if (scale == "row") {
x <- sweep(x, 1, rowMeans(x, na.rm = na.rm))
sx <- apply(x, 1, sd, na.rm = na.rm)
x <- sweep(x, 1, sx, "/")
}
else if (scale == "column") {
x <- sweep(x, 2, colMeans(x, na.rm = na.rm))
sx <- apply(x, 2, sd, na.rm = na.rm)
x <- sweep(x, 2, sx, "/")
}
lmat <- rbind(c(NA, 3), 2:1)
lwid <- c(if (doRdend) 1 else 0.05, 4)
lhei <- c((if (doCdend) 1 else 0.05) + if (!is.null(main)) 0.2 else 0,
4)
if (!missing(ColSideColors)) {
if (!is.matrix(ColSideColors))
stop("'ColSideColors' must be a matrix")
if (!is.character(ColSideColors) || dim(ColSideColors)[1] != nc)
stop("'ColSideColors' dim()[2] must be of length ncol(x)")
lmat <- rbind(lmat[1, ] + 1, c(NA, 1), lmat[2, ] + 1)
lhei <- c(lhei[1], 0.2, lhei[2])
}
if (!missing(RowSideColors)) {
if (!is.matrix(RowSideColors))
stop("'RowSideColors' must be a matrix")
if (!is.character(RowSideColors) || dim(RowSideColors)[1] != nr)
stop("'RowSideColors' must be a character vector of length nrow(x)")
lmat <- cbind(lmat[, 1] + 1, c(rep(NA, nrow(lmat) - 1),
1), lmat[, 2] + 1)
lwid <- c(lwid[1], 0.2, lwid[2])
}
lmat[is.na(lmat)] <- 0
if (verbose) {
cat("layout: widths = ", lwid, ", heights = ", lhei,
"; lmat=\n")
print(lmat)
}
op <- par(no.readonly = TRUE)
on.exit(par(op))
layout(lmat, widths = lwid, heights = lhei, respect = FALSE)
if (!missing(RowSideColors)) {
par(mar = c(margins[1], 0, 0, 0.5))
rsc=RowSideColors[rowInd,];
rsc.colors=matrix();
rsc.names=names(table(rsc));
rsc.i=1;
for(rsc.name in rsc.names){
rsc.colors[rsc.i]=rsc.name;
rsc[rsc==rsc.name]=rsc.i;
rsc.i=rsc.i+1;
}
rsc=matrix(as.numeric(rsc), nrow=dim(rsc)[1]);
image(t(rsc), col = as.vector(rsc.colors), axes = FALSE)
if (length(colnames(RowSideColors))>0) {
axis(1, 0:(dim(rsc)[2]-1) / (dim(rsc)[2]-1), colnames(RowSideColors), las=2, tick=FALSE);
}
}
if (!missing(ColSideColors)) {
par(mar = c(0.5, 0, 0, margins[2]))
csc=ColSideColors[colInd,];
csc.colors=matrix();
csc.names=names(table(csc));
csc.i=1;
for(csc.name in csc.names){
csc.colors[csc.i]=csc.name;
csc[csc==csc.name]=csc.i;
csc.i=csc.i+1;
}
csc=matrix(as.numeric(csc), nrow=dim(csc)[1]);
image(csc, col = as.vector(csc.colors), axes = FALSE)
if (length(colnames(ColSideColors))>0) {
axis(2, 0:(dim(csc)[2]-1) / (dim(csc)[2]-1), colnames(ColSideColors), las=2, tick=FALSE);
}
}
par(mar = c(margins[1], 0, 0, margins[2]))
if (!symm || scale != "none") {
x <- t(x)
}
if (revC) {
iy <- nr:1
ddr <- rev(ddr)
x <- x[, iy]
}
else iy <- 1:nr
image(1:nc, 1:nr, x, xlim = 0.5 + c(0, nc), ylim = 0.5 +
c(0, nr), axes = FALSE, xlab = "", ylab = "", ...)
axis(1, 1:nc, labels = labCol, las = 2, line = -0.5, tick = 0,
cex.axis = cexCol)
if (!is.null(xlab))
mtext(xlab, side = 1, line = margins[1] - 1.25)
axis(4, iy, labels = labRow, las = 2, line = -0.5, tick = 0,
cex.axis = cexRow)
if (!is.null(ylab))
mtext(ylab, side = 4, line = margins[2] - 1.25)
if (!missing(add.expr))
eval(substitute(add.expr))
par(mar = c(margins[1], 0, 0, 0))
if (doRdend)
plot(ddr, horiz = TRUE, axes = FALSE, yaxs = "i", leaflab = "none")
else frame()
par(mar = c(0, 0, if (!is.null(main)) 1 else 0, margins[2]))
if (doCdend)
plot(ddc, axes = FALSE, xaxs = "i", leaflab = "none")
else if (!is.null(main))
frame()
if (!is.null(main))
title(main, cex.main = 1.5 * op[["cex.main"]])
invisible(list(rowInd = rowInd, colInd = colInd, Rowv = if (keep.dendro &&
doRdend) ddr, Colv = if (keep.dendro && doCdend) ddc))
}
| /heatmap.plus/R/heatmap.plus.R | no_license | ingted/R-Examples | R | false | false | 6,747 | r | "heatmap.plus" <-
function (x, Rowv = NULL, Colv = if (symm) "Rowv" else NULL,
distfun = dist, hclustfun = hclust, reorderfun = function(d,
w) reorder(d, w), add.expr, symm = FALSE, revC = identical(Colv,
"Rowv"), scale = c("row", "column", "none"), na.rm = TRUE,
margins = c(5, 5), ColSideColors, RowSideColors, cexRow = 0.2 +
1/log10(nr), cexCol = 0.2 + 1/log10(nc), labRow = NULL,
labCol = NULL, main = NULL, xlab = NULL, ylab = NULL, keep.dendro = FALSE,
verbose = getOption("verbose"), ...)
{
scale <- if (symm && missing(scale))
"none"
else match.arg(scale)
if (length(di <- dim(x)) != 2 || !is.numeric(x))
stop("'x' must be a numeric matrix")
nr <- di[1]
nc <- di[2]
if (nr <= 1 || nc <= 1)
stop("'x' must have at least 2 rows and 2 columns")
if (!is.numeric(margins) || length(margins) != 2)
stop("'margins' must be a numeric vector of length 2")
doRdend <- !identical(Rowv, NA)
doCdend <- !identical(Colv, NA)
if (is.null(Rowv))
Rowv <- rowMeans(x, na.rm = na.rm)
if (is.null(Colv))
Colv <- colMeans(x, na.rm = na.rm)
if (doRdend) {
if (inherits(Rowv, "dendrogram"))
ddr <- Rowv
else {
hcr <- hclustfun(distfun(x))
ddr <- as.dendrogram(hcr)
if (!is.logical(Rowv) || Rowv)
ddr <- reorderfun(ddr, Rowv)
}
if (nr != length(rowInd <- order.dendrogram(ddr)))
stop("row dendrogram ordering gave index of wrong length")
}
else rowInd <- 1:nr
if (doCdend) {
if (inherits(Colv, "dendrogram"))
ddc <- Colv
else if (identical(Colv, "Rowv")) {
if (nr != nc)
stop("Colv = \"Rowv\" but nrow(x) != ncol(x)")
ddc <- ddr
}
else {
hcc <- hclustfun(distfun(if (symm)
x
else t(x)))
ddc <- as.dendrogram(hcc)
if (!is.logical(Colv) || Colv)
ddc <- reorderfun(ddc, Colv)
}
if (nc != length(colInd <- order.dendrogram(ddc)))
stop("column dendrogram ordering gave index of wrong length")
}
else colInd <- 1:nc
x <- x[rowInd, colInd]
labRow <- if (is.null(labRow))
if (is.null(rownames(x)))
(1:nr)[rowInd]
else rownames(x)
else labRow[rowInd]
labCol <- if (is.null(labCol))
if (is.null(colnames(x)))
(1:nc)[colInd]
else colnames(x)
else labCol[colInd]
if (scale == "row") {
x <- sweep(x, 1, rowMeans(x, na.rm = na.rm))
sx <- apply(x, 1, sd, na.rm = na.rm)
x <- sweep(x, 1, sx, "/")
}
else if (scale == "column") {
x <- sweep(x, 2, colMeans(x, na.rm = na.rm))
sx <- apply(x, 2, sd, na.rm = na.rm)
x <- sweep(x, 2, sx, "/")
}
lmat <- rbind(c(NA, 3), 2:1)
lwid <- c(if (doRdend) 1 else 0.05, 4)
lhei <- c((if (doCdend) 1 else 0.05) + if (!is.null(main)) 0.2 else 0,
4)
if (!missing(ColSideColors)) {
if (!is.matrix(ColSideColors))
stop("'ColSideColors' must be a matrix")
if (!is.character(ColSideColors) || dim(ColSideColors)[1] != nc)
stop("'ColSideColors' dim()[2] must be of length ncol(x)")
lmat <- rbind(lmat[1, ] + 1, c(NA, 1), lmat[2, ] + 1)
lhei <- c(lhei[1], 0.2, lhei[2])
}
if (!missing(RowSideColors)) {
if (!is.matrix(RowSideColors))
stop("'RowSideColors' must be a matrix")
if (!is.character(RowSideColors) || dim(RowSideColors)[1] != nr)
stop("'RowSideColors' must be a character vector of length nrow(x)")
lmat <- cbind(lmat[, 1] + 1, c(rep(NA, nrow(lmat) - 1),
1), lmat[, 2] + 1)
lwid <- c(lwid[1], 0.2, lwid[2])
}
lmat[is.na(lmat)] <- 0
if (verbose) {
cat("layout: widths = ", lwid, ", heights = ", lhei,
"; lmat=\n")
print(lmat)
}
op <- par(no.readonly = TRUE)
on.exit(par(op))
layout(lmat, widths = lwid, heights = lhei, respect = FALSE)
if (!missing(RowSideColors)) {
par(mar = c(margins[1], 0, 0, 0.5))
rsc=RowSideColors[rowInd,];
rsc.colors=matrix();
rsc.names=names(table(rsc));
rsc.i=1;
for(rsc.name in rsc.names){
rsc.colors[rsc.i]=rsc.name;
rsc[rsc==rsc.name]=rsc.i;
rsc.i=rsc.i+1;
}
rsc=matrix(as.numeric(rsc), nrow=dim(rsc)[1]);
image(t(rsc), col = as.vector(rsc.colors), axes = FALSE)
if (length(colnames(RowSideColors))>0) {
axis(1, 0:(dim(rsc)[2]-1) / (dim(rsc)[2]-1), colnames(RowSideColors), las=2, tick=FALSE);
}
}
if (!missing(ColSideColors)) {
par(mar = c(0.5, 0, 0, margins[2]))
csc=ColSideColors[colInd,];
csc.colors=matrix();
csc.names=names(table(csc));
csc.i=1;
for(csc.name in csc.names){
csc.colors[csc.i]=csc.name;
csc[csc==csc.name]=csc.i;
csc.i=csc.i+1;
}
csc=matrix(as.numeric(csc), nrow=dim(csc)[1]);
image(csc, col = as.vector(csc.colors), axes = FALSE)
if (length(colnames(ColSideColors))>0) {
axis(2, 0:(dim(csc)[2]-1) / (dim(csc)[2]-1), colnames(ColSideColors), las=2, tick=FALSE);
}
}
par(mar = c(margins[1], 0, 0, margins[2]))
if (!symm || scale != "none") {
x <- t(x)
}
if (revC) {
iy <- nr:1
ddr <- rev(ddr)
x <- x[, iy]
}
else iy <- 1:nr
image(1:nc, 1:nr, x, xlim = 0.5 + c(0, nc), ylim = 0.5 +
c(0, nr), axes = FALSE, xlab = "", ylab = "", ...)
axis(1, 1:nc, labels = labCol, las = 2, line = -0.5, tick = 0,
cex.axis = cexCol)
if (!is.null(xlab))
mtext(xlab, side = 1, line = margins[1] - 1.25)
axis(4, iy, labels = labRow, las = 2, line = -0.5, tick = 0,
cex.axis = cexRow)
if (!is.null(ylab))
mtext(ylab, side = 4, line = margins[2] - 1.25)
if (!missing(add.expr))
eval(substitute(add.expr))
par(mar = c(margins[1], 0, 0, 0))
if (doRdend)
plot(ddr, horiz = TRUE, axes = FALSE, yaxs = "i", leaflab = "none")
else frame()
par(mar = c(0, 0, if (!is.null(main)) 1 else 0, margins[2]))
if (doCdend)
plot(ddc, axes = FALSE, xaxs = "i", leaflab = "none")
else if (!is.null(main))
frame()
if (!is.null(main))
title(main, cex.main = 1.5 * op[["cex.main"]])
invisible(list(rowInd = rowInd, colInd = colInd, Rowv = if (keep.dendro &&
doRdend) ddr, Colv = if (keep.dendro && doCdend) ddc))
}
|
\name{rate.caq}
\alias{rate.caq}
\docType{data}
\title{
CAQ Rating
}
\description{
This is a dataset of CAQ ratings of participants' personality completed by research assistants using likert type ratings.
}
\usage{data(rate.caq)}
\format{
A data frame with 64 observations on the following 100 variables.
\describe{
\item{\code{CAQ001}}{Critical, skeptical, not easily impressed}
\item{\code{CAQ002}}{A genuinely dependable and responsible person}
\item{\code{CAQ003}}{Has a wide range of interests}
\item{\code{CAQ004}}{Talkative}
\item{\code{CAQ005}}{Behaves in a giving way toward others}
\item{\code{CAQ006}}{Fastidious, perfectionistic}
\item{\code{CAQ007}}{Favors conservative values}
\item{\code{CAQ008}}{Appears to have a high degree of intellectual capacity}
\item{\code{CAQ009}}{Uncomfortable with uncertainty and complexity}
\item{\code{CAQ010}}{Anxiety and tension find outlet in bodily symptoms}
\item{\code{CAQ011}}{Protective of those close to him or her}
\item{\code{CAQ012}}{Tends to be self-defensive}
\item{\code{CAQ013}}{Thin-skinned; sensitive to criticism or interpersonal slight}
\item{\code{CAQ014}}{Genuinely submissive; accepts domination comfortably}
\item{\code{CAQ015}}{Skilled in social techniques of imaginative play, pretending, and humor}
\item{\code{CAQ016}}{Introspective and concerned with self as an object}
\item{\code{CAQ017}}{Sympathetic and considerate}
\item{\code{CAQ018}}{Initiates humor}
\item{\code{CAQ019}}{Seeks reassurance from others}
\item{\code{CAQ020}}{Has a rapid personal tempo; behaves and acts quickly}
\item{\code{CAQ021}}{Arouses nurturant feelings in others}
\item{\code{CAQ022}}{Feels a lack of personal meaning in life}
\item{\code{CAQ023}}{Extrapunitive; tends to transfer or project blame}
\item{\code{CAQ024}}{Prides self on being objective,rational}
\item{\code{CAQ025}}{Tends toward over-control of needs and impulses}
\item{\code{CAQ026}}{Productive; gets things done}
\item{\code{CAQ027}}{Shows condescending behavior in relations with others}
\item{\code{CAQ028}}{Tends to arouse liking and acceptance }
\item{\code{CAQ029}}{Turned to for advice and reassurance}
\item{\code{CAQ030}}{Gives up and withdraws where possible in the face of frustration and adversity}
\item{\code{CAQ031}}{Regards self as physically attractive}
\item{\code{CAQ032}}{Aware of the impression made on others}
\item{\code{CAQ033}}{Calm, relaxed in manner}
\item{\code{CAQ034}}{Over-reactive to minor frustrations, irritable}
\item{\code{CAQ035}}{Has warmth; has the capacity for close relationships; compassionate}
\item{\code{CAQ036}}{Subtly negativistic; tends to undermine and obstruct }
\item{\code{CAQ037}}{Guileful and deceitful, manipulative, opportunistic}
\item{\code{CAQ038}}{Has hostility toward others}
\item{\code{CAQ039}}{Thinks and associates ideas in unusual ways; has unconventional thought processes}
\item{\code{CAQ040}}{Vulnerable to real or fancied threat, generally fearful}
\item{\code{CAQ041}}{Moralistic}
\item{\code{CAQ042}}{Reluctant to commit to any definite course of action; tends to delay or avoid action}
\item{\code{CAQ043}}{Facially and/or gesturally expressive}
\item{\code{CAQ044}}{Evaluates the motivation of others in interpreting situations}
\item{\code{CAQ045}}{Has a brittle ego-defense system; does not cope well under stress or strainr}
\item{\code{CAQ046}}{Engages in personal fantasy and daydreams}
\item{\code{CAQ047}}{Has a readiness to feel guilt}
\item{\code{CAQ048}}{Keeps people at a distance; avoids close interpersonal relationships}
\item{\code{CAQ049}}{Basically distrustful of people in general}
\item{\code{CAQ050}}{Unpredictable and changeable in behavior and attitudes}
\item{\code{CAQ051}}{Genuinely values intellectual and cognitive matters}
\item{\code{CAQ052}}{Behaves in an assertive fashion}
\item{\code{CAQ053}}{Unable to delay gratification}
\item{\code{CAQ054}}{Emphasizes being with others; gregarious}
\item{\code{CAQ055}}{Self-defeating}
\item{\code{CAQ056}}{Responds to humor}
\item{\code{CAQ057}}{Interesting, arresting person}
\item{\code{CAQ058}}{Enjoys sensuous experiences (touch, taste, smell, physical contact)}
\item{\code{CAQ059}}{Concerned with own body and adequacy of physiological functioning}
\item{\code{CAQ060}}{Has insight into own motives and behavior}
\item{\code{CAQ061}}{Creates and exploits dependency in people}
\item{\code{CAQ062}}{Tends to be rebellious and non-conforming}
\item{\code{CAQ063}}{Judges self and other in conventional terms}
\item{\code{CAQ064}}{Socially perceptive of a wide range of interpersonal cues}
\item{\code{CAQ065}}{Pushes and tries to stretch limits}
\item{\code{CAQ066}}{Enjoys esthetic impressions; is esthetically reactive}
\item{\code{CAQ067}}{Self-indulgent}
\item{\code{CAQ068}}{Basically anxious}
\item{\code{CAQ069}}{Sensitive to anything that can be construed as a demand}
\item{\code{CAQ070}}{Behaves in an ethically consistent manner}
\item{\code{CAQ071}}{Has high aspiration level for self}
\item{\code{CAQ072}}{Concerned with own adequacy as a person}
\item{\code{CAQ073}}{Tends to perceive many different contexts in sexual terms}
\item{\code{CAQ074}}{Subjectively unaware of self-concern; feels satisfied with self}
\item{\code{CAQ075}}{Has a clear-cut, internally consistent personality}
\item{\code{CAQ076}}{Projects feelings and motivations onto others}
\item{\code{CAQ077}}{Appears straightforward, forthright, candid in dealing with others}
\item{\code{CAQ078}}{Feels cheated and victimized by life; self-pitying}
\item{\code{CAQ079}}{Ruminates and has persistent, preoccupying thoughts}
\item{\code{CAQ080}}{Interested in members of the opposite sex}
\item{\code{CAQ081}}{Physically attractive; good-looking}
\item{\code{CAQ082}}{Has fluctuating moods}
\item{\code{CAQ083}}{Able to see to the heart of important problems}
\item{\code{CAQ084}}{Cheerful}
\item{\code{CAQ085}}{Emphasizes communication through action and non-verbal behavior}
\item{\code{CAQ086}}{Repressive and dissociative tendencies; denies unpleasant thoughts and conflicts}
\item{\code{CAQ087}}{Interprets basically simple and clear-cut situations in complicated and particularizing ways}
\item{\code{CAQ088}}{Personally charming}
\item{\code{CAQ089}}{Compares self to others}
\item{\code{CAQ090}}{Concerned with philosophical problems}
\item{\code{CAQ091}}{Power-oriented; values power in self and others}
\item{\code{CAQ092}}{Has social poise and presence; appears socially at ease}
\item{\code{CAQ093}}{Behaves in gender-appropriate masculine or feminine style and manner}
\item{\code{CAQ094}}{Expresses hostile feelings directly}
\item{\code{CAQ095}}{Tends to offer advice}
\item{\code{CAQ096}}{Values own independence and autonomy}
\item{\code{CAQ097}}{Emotionally bland; has flattened affect}
\item{\code{CAQ098}}{Verbally fluent; can express ideas well}
\item{\code{CAQ099}}{Self-dramatizing; histrionic}
\item{\code{CAQ100}}{Does not vary roles; relates to everyone in the same way}
}
}
\source{
\url{http://psy2.fau.edu/~shermanr/index.html}
}
\references{
Serfass, D. G., & Sherman, R. A. (2013). A methodological note on ordered q-sort ratings. Journal of Research in Personality, 47(12), 853-858
}
\examples{
data(rate.caq)
head(rate.caq)
}
\keyword{datasets}
| /man/rate.caq.Rd | no_license | Justin8428/multicon | R | false | false | 7,634 | rd | \name{rate.caq}
\alias{rate.caq}
\docType{data}
\title{
CAQ Rating
}
\description{
This is a dataset of CAQ ratings of participants' personality completed by research assistants using likert type ratings.
}
\usage{data(rate.caq)}
\format{
A data frame with 64 observations on the following 100 variables.
\describe{
\item{\code{CAQ001}}{Critical, skeptical, not easily impressed}
\item{\code{CAQ002}}{A genuinely dependable and responsible person}
\item{\code{CAQ003}}{Has a wide range of interests}
\item{\code{CAQ004}}{Talkative}
\item{\code{CAQ005}}{Behaves in a giving way toward others}
\item{\code{CAQ006}}{Fastidious, perfectionistic}
\item{\code{CAQ007}}{Favors conservative values}
\item{\code{CAQ008}}{Appears to have a high degree of intellectual capacity}
\item{\code{CAQ009}}{Uncomfortable with uncertainty and complexity}
\item{\code{CAQ010}}{Anxiety and tension find outlet in bodily symptoms}
\item{\code{CAQ011}}{Protective of those close to him or her}
\item{\code{CAQ012}}{Tends to be self-defensive}
\item{\code{CAQ013}}{Thin-skinned; sensitive to criticism or interpersonal slight}
\item{\code{CAQ014}}{Genuinely submissive; accepts domination comfortably}
\item{\code{CAQ015}}{Skilled in social techniques of imaginative play, pretending, and humor}
\item{\code{CAQ016}}{Introspective and concerned with self as an object}
\item{\code{CAQ017}}{Sympathetic and considerate}
\item{\code{CAQ018}}{Initiates humor}
\item{\code{CAQ019}}{Seeks reassurance from others}
\item{\code{CAQ020}}{Has a rapid personal tempo; behaves and acts quickly}
\item{\code{CAQ021}}{Arouses nurturant feelings in others}
\item{\code{CAQ022}}{Feels a lack of personal meaning in life}
\item{\code{CAQ023}}{Extrapunitive; tends to transfer or project blame}
\item{\code{CAQ024}}{Prides self on being objective,rational}
\item{\code{CAQ025}}{Tends toward over-control of needs and impulses}
\item{\code{CAQ026}}{Productive; gets things done}
\item{\code{CAQ027}}{Shows condescending behavior in relations with others}
\item{\code{CAQ028}}{Tends to arouse liking and acceptance }
\item{\code{CAQ029}}{Turned to for advice and reassurance}
\item{\code{CAQ030}}{Gives up and withdraws where possible in the face of frustration and adversity}
\item{\code{CAQ031}}{Regards self as physically attractive}
\item{\code{CAQ032}}{Aware of the impression made on others}
\item{\code{CAQ033}}{Calm, relaxed in manner}
\item{\code{CAQ034}}{Over-reactive to minor frustrations, irritable}
\item{\code{CAQ035}}{Has warmth; has the capacity for close relationships; compassionate}
\item{\code{CAQ036}}{Subtly negativistic; tends to undermine and obstruct }
\item{\code{CAQ037}}{Guileful and deceitful, manipulative, opportunistic}
\item{\code{CAQ038}}{Has hostility toward others}
\item{\code{CAQ039}}{Thinks and associates ideas in unusual ways; has unconventional thought processes}
\item{\code{CAQ040}}{Vulnerable to real or fancied threat, generally fearful}
\item{\code{CAQ041}}{Moralistic}
\item{\code{CAQ042}}{Reluctant to commit to any definite course of action; tends to delay or avoid action}
\item{\code{CAQ043}}{Facially and/or gesturally expressive}
\item{\code{CAQ044}}{Evaluates the motivation of others in interpreting situations}
\item{\code{CAQ045}}{Has a brittle ego-defense system; does not cope well under stress or strainr}
\item{\code{CAQ046}}{Engages in personal fantasy and daydreams}
\item{\code{CAQ047}}{Has a readiness to feel guilt}
\item{\code{CAQ048}}{Keeps people at a distance; avoids close interpersonal relationships}
\item{\code{CAQ049}}{Basically distrustful of people in general}
\item{\code{CAQ050}}{Unpredictable and changeable in behavior and attitudes}
\item{\code{CAQ051}}{Genuinely values intellectual and cognitive matters}
\item{\code{CAQ052}}{Behaves in an assertive fashion}
\item{\code{CAQ053}}{Unable to delay gratification}
\item{\code{CAQ054}}{Emphasizes being with others; gregarious}
\item{\code{CAQ055}}{Self-defeating}
\item{\code{CAQ056}}{Responds to humor}
\item{\code{CAQ057}}{Interesting, arresting person}
\item{\code{CAQ058}}{Enjoys sensuous experiences (touch, taste, smell, physical contact)}
\item{\code{CAQ059}}{Concerned with own body and adequacy of physiological functioning}
\item{\code{CAQ060}}{Has insight into own motives and behavior}
\item{\code{CAQ061}}{Creates and exploits dependency in people}
\item{\code{CAQ062}}{Tends to be rebellious and non-conforming}
\item{\code{CAQ063}}{Judges self and other in conventional terms}
\item{\code{CAQ064}}{Socially perceptive of a wide range of interpersonal cues}
\item{\code{CAQ065}}{Pushes and tries to stretch limits}
\item{\code{CAQ066}}{Enjoys esthetic impressions; is esthetically reactive}
\item{\code{CAQ067}}{Self-indulgent}
\item{\code{CAQ068}}{Basically anxious}
\item{\code{CAQ069}}{Sensitive to anything that can be construed as a demand}
\item{\code{CAQ070}}{Behaves in an ethically consistent manner}
\item{\code{CAQ071}}{Has high aspiration level for self}
\item{\code{CAQ072}}{Concerned with own adequacy as a person}
\item{\code{CAQ073}}{Tends to perceive many different contexts in sexual terms}
\item{\code{CAQ074}}{Subjectively unaware of self-concern; feels satisfied with self}
\item{\code{CAQ075}}{Has a clear-cut, internally consistent personality}
\item{\code{CAQ076}}{Projects feelings and motivations onto others}
\item{\code{CAQ077}}{Appears straightforward, forthright, candid in dealing with others}
\item{\code{CAQ078}}{Feels cheated and victimized by life; self-pitying}
\item{\code{CAQ079}}{Ruminates and has persistent, preoccupying thoughts}
\item{\code{CAQ080}}{Interested in members of the opposite sex}
\item{\code{CAQ081}}{Physically attractive; good-looking}
\item{\code{CAQ082}}{Has fluctuating moods}
\item{\code{CAQ083}}{Able to see to the heart of important problems}
\item{\code{CAQ084}}{Cheerful}
\item{\code{CAQ085}}{Emphasizes communication through action and non-verbal behavior}
\item{\code{CAQ086}}{Repressive and dissociative tendencies; denies unpleasant thoughts and conflicts}
\item{\code{CAQ087}}{Interprets basically simple and clear-cut situations in complicated and particularizing ways}
\item{\code{CAQ088}}{Personally charming}
\item{\code{CAQ089}}{Compares self to others}
\item{\code{CAQ090}}{Concerned with philosophical problems}
\item{\code{CAQ091}}{Power-oriented; values power in self and others}
\item{\code{CAQ092}}{Has social poise and presence; appears socially at ease}
\item{\code{CAQ093}}{Behaves in gender-appropriate masculine or feminine style and manner}
\item{\code{CAQ094}}{Expresses hostile feelings directly}
\item{\code{CAQ095}}{Tends to offer advice}
\item{\code{CAQ096}}{Values own independence and autonomy}
\item{\code{CAQ097}}{Emotionally bland; has flattened affect}
\item{\code{CAQ098}}{Verbally fluent; can express ideas well}
\item{\code{CAQ099}}{Self-dramatizing; histrionic}
\item{\code{CAQ100}}{Does not vary roles; relates to everyone in the same way}
}
}
\source{
\url{http://psy2.fau.edu/~shermanr/index.html}
}
\references{
Serfass, D. G., & Sherman, R. A. (2013). A methodological note on ordered q-sort ratings. Journal of Research in Personality, 47(12), 853-858
}
\examples{
data(rate.caq)
head(rate.caq)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formula-tools.R
\name{as_string_formula}
\alias{as_string_formula}
\title{Converts formulas to strings}
\usage{
as_string_formula(formula)
}
\arguments{
\item{formula}{A model formula that may use standard fixed
effects, random effects using \link{lme4} syntax (see \code{\link[=re]{re()}}), and random walks
defined using the \code{\link[=rw]{rw()}} helper function.}
}
\value{
A character string of the supplied formula
}
\description{
Converts formulas to strings
}
\examples{
epinowcast:::as_string_formula(~ 1 + age_group)
}
\seealso{
Functions used to help convert formulas into model designs
\code{\link{construct_re}()},
\code{\link{construct_rw}()},
\code{\link{enw_formula}()},
\code{\link{enw_manual_formula}()},
\code{\link{parse_formula}()},
\code{\link{remove_rw_terms}()},
\code{\link{re}()},
\code{\link{rw_terms}()},
\code{\link{rw}()},
\code{\link{split_formula_to_terms}()}
}
\concept{formulatools}
| /man/as_string_formula.Rd | permissive | epinowcast/epinowcast | R | false | true | 996 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formula-tools.R
\name{as_string_formula}
\alias{as_string_formula}
\title{Converts formulas to strings}
\usage{
as_string_formula(formula)
}
\arguments{
\item{formula}{A model formula that may use standard fixed
effects, random effects using \link{lme4} syntax (see \code{\link[=re]{re()}}), and random walks
defined using the \code{\link[=rw]{rw()}} helper function.}
}
\value{
A character string of the supplied formula
}
\description{
Converts formulas to strings
}
\examples{
epinowcast:::as_string_formula(~ 1 + age_group)
}
\seealso{
Functions used to help convert formulas into model designs
\code{\link{construct_re}()},
\code{\link{construct_rw}()},
\code{\link{enw_formula}()},
\code{\link{enw_manual_formula}()},
\code{\link{parse_formula}()},
\code{\link{remove_rw_terms}()},
\code{\link{re}()},
\code{\link{rw_terms}()},
\code{\link{rw}()},
\code{\link{split_formula_to_terms}()}
}
\concept{formulatools}
|
# set directory to your repository
setwd(dir = '~/stop-frisk-analysis/')
# load data
load('data/cleanedData.Rdata')
require(plyr)
require(ggplot2)
require(ggthemes)
# Distributions of physical features by race
img =
qplot(hourStop, data = df, geom = 'bar', fill = race, binwidth = 1) +
theme_economist() +
xlim(0,23) +
xlab('Hour in the day') +
ylab('Number of stops')
ggsave(file="hourStop.png", plot=img, width=6, height=3, units = 'in')
img =
qplot(weekStop, data = df, geom = 'bar', fill = race, binwidth = 1) +
theme_economist() +
xlim(1,52) +
xlab('Week in the year')+
ylab('Number of stops')
ggsave(file="weekStop.png", plot=img, width=6, height=3, units = 'in')
img =
qplot(dayStop, data = df, geom = 'bar', fill = race, binwidth = 1) +
theme_economist() +
xlim(1,31) +
xlab('Day in the month')+
ylab('Number of stops')
ggsave(file="dayStop.png", plot=img, width=6, height=3, units = 'in')
weekSet<-ddply(.data = df, .variables = c('weekStop', 'CMDESC2'),
summarize,
numStops = NROW(CMDESC)
)
img =
ggplot(weekSet, aes(x = as.numeric(weekStop), y= numStops, group = CMDESC2)) +
geom_point() + xlab('Week in the year') + ylab('Number of stops') +
geom_line(aes(group = CMDESC2, color = CMDESC2)) +
theme_economist() + theme(legend.position = 'right')
ggsave(file="week_desc.png", plot=img, width=6, height=3, units = 'in')
hourSet<-ddply(.data = df, .variables = c('hourStop', 'CMDESC2'),
summarize,
numStops = NROW(CMDESC)
)
img = ggplot(hourSet, aes(x = as.numeric(hourStop), y= numStops, group = CMDESC2)) +
geom_point() + xlab('Hour of the day') + ylab('Number of stops') +
geom_line(aes(group = CMDESC2, color = CMDESC2)) +
theme_economist() + theme(legend.position = 'right')
ggsave(file="hour_desc.png", plot=img, width=6, height=3, units = 'in')
dtest <- ddply(.data = df,
.variables = c('race','weekStop'),
summarize,
numStops = NROW(CMDESC)
)
ggplot(dtest, aes(x = weekStop, y = numStops, group = race)) + geom_line(aes(color = race))
| /code/old/timeAnalysisAgg.R | no_license | bchnge/stop-frisk-analysis | R | false | false | 2,137 | r | # set directory to your repository
setwd(dir = '~/stop-frisk-analysis/')
# load data
load('data/cleanedData.Rdata')
require(plyr)
require(ggplot2)
require(ggthemes)
# Distributions of physical features by race
img =
qplot(hourStop, data = df, geom = 'bar', fill = race, binwidth = 1) +
theme_economist() +
xlim(0,23) +
xlab('Hour in the day') +
ylab('Number of stops')
ggsave(file="hourStop.png", plot=img, width=6, height=3, units = 'in')
img =
qplot(weekStop, data = df, geom = 'bar', fill = race, binwidth = 1) +
theme_economist() +
xlim(1,52) +
xlab('Week in the year')+
ylab('Number of stops')
ggsave(file="weekStop.png", plot=img, width=6, height=3, units = 'in')
img =
qplot(dayStop, data = df, geom = 'bar', fill = race, binwidth = 1) +
theme_economist() +
xlim(1,31) +
xlab('Day in the month')+
ylab('Number of stops')
ggsave(file="dayStop.png", plot=img, width=6, height=3, units = 'in')
weekSet<-ddply(.data = df, .variables = c('weekStop', 'CMDESC2'),
summarize,
numStops = NROW(CMDESC)
)
img =
ggplot(weekSet, aes(x = as.numeric(weekStop), y= numStops, group = CMDESC2)) +
geom_point() + xlab('Week in the year') + ylab('Number of stops') +
geom_line(aes(group = CMDESC2, color = CMDESC2)) +
theme_economist() + theme(legend.position = 'right')
ggsave(file="week_desc.png", plot=img, width=6, height=3, units = 'in')
hourSet<-ddply(.data = df, .variables = c('hourStop', 'CMDESC2'),
summarize,
numStops = NROW(CMDESC)
)
img = ggplot(hourSet, aes(x = as.numeric(hourStop), y= numStops, group = CMDESC2)) +
geom_point() + xlab('Hour of the day') + ylab('Number of stops') +
geom_line(aes(group = CMDESC2, color = CMDESC2)) +
theme_economist() + theme(legend.position = 'right')
ggsave(file="hour_desc.png", plot=img, width=6, height=3, units = 'in')
dtest <- ddply(.data = df,
.variables = c('race','weekStop'),
summarize,
numStops = NROW(CMDESC)
)
ggplot(dtest, aes(x = weekStop, y = numStops, group = race)) + geom_line(aes(color = race))
|
makeCacheMatrix <- function(x = matrix()) {
inversa <- NULL
set <- function(y) {
x <<- y
inversa <<- NULL
}
get <- function() {x}
setInversa <- function(inversa1) {inversa <<- inversa1}
getInversa <- function() {inversa}
list(set = set, get = get,
setInversa = setInversa,
getInversa = getInversa)
}
cacheSolve <- function(x, ...) {
inversa <- x$getInversa()
if(!is.null(inversa)) {
message("metodo cache para matrix inversa")
return(inversa)
}
data <- x$get()
inversa <- solve(data, ...)
x$setInversa(inversa)
inversa
}
| /cachematrix.R | no_license | johanftg/ProgrammingAssignment2 | R | false | false | 582 | r |
makeCacheMatrix <- function(x = matrix()) {
inversa <- NULL
set <- function(y) {
x <<- y
inversa <<- NULL
}
get <- function() {x}
setInversa <- function(inversa1) {inversa <<- inversa1}
getInversa <- function() {inversa}
list(set = set, get = get,
setInversa = setInversa,
getInversa = getInversa)
}
cacheSolve <- function(x, ...) {
inversa <- x$getInversa()
if(!is.null(inversa)) {
message("metodo cache para matrix inversa")
return(inversa)
}
data <- x$get()
inversa <- solve(data, ...)
x$setInversa(inversa)
inversa
}
|
library(tidyr)
library(ggplot2)
library(reshape2)
library(ggrepel)
library(ggpubr)
#--------------- Variables ---------------#
args <- commandArgs(TRUE)
#--------------- Graphing averages ---------------#
#commentblock
# S-v > :s/^/#/
# S-v > :s/^#// to remove
#df <- read.table("2020_bDD-top5pc1.csv", header=TRUE, sep=",")
#dfn <- melt(df, value.name = "PC", variable.name = "DAI")
#
#p <- ggplot(dfn, aes(x=DAI, y=PC, group=Code))+
# geom_line(aes(color=Code))+
# labs(title="Top 5 Bermudagrass Hybrids (Digital)", x="Days after irrigation (DAI)", y="Percent Coverage (%)")
#p
#
#png("2020_bDD-T5PC-avg.png", height=720, units="px")
#p + coord_cartesian(ylim=c(0.00,1.25))+
# scale_y_continuous(breaks=seq(0.00,1.25,0.20))+
# geom_vline(xintercept="D65", linetype="dashed", color="blue")
#dev.off()
#--------------- Graphing t5 vs cc ---------------#
df1 <- read.table("2020_bDrydown-exD1.csv", header=TRUE, sep=",")
df1$No. <- as.factor(df1$No.)
loads <- c("UCRC180040", "UCRC180557", "UCRC180576", "UCRC180038", "UCRC180229",
"Bandera", "Celebration", "Santa-Ana", "TifTuf", "Tifway-II",
"UCR17-8", "UCRTP6-3")
bhy <- c("UCRC180040", "UCRC180557", "UCRC180576", "UCRC180038", "UCRC180229", "UCR17-8", "UCRTP6-3")
#bhy <- c("UCRC180040", "UCRC180557", "UCRC180576", "UCRC180038", "UCRC180229")#,"UCR17-8", "UCRTP6-3")
cmc <- c("Bandera", "Celebration", "Santa-Ana", "TifTuf", "Tifway-II")#"UCR17-8", "UCRTP6-3")
#cmc <- c("Bandera", "Celebration", "Santa-Ana", "TifTuf", "Tifway-II", "UCR17-8", "UCRTP6-3")
df2 <- df1[df1$Code %in% loads, ]
df2a <- df2[df2$Code %in% bhy, ]
df2b <- df2[df2$Code %in% cmc, ]
rect <- data.frame(xmin=68, xmax=81, ymin=-Inf, ymax=Inf)
# 2(accession), 21(LC), 22(PC), 28(nPC), 29(DAI), 32(FLC), 33(FnPC)
# solid,15 longdash,18
p1 <- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="solid", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="longdash", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Bermuda Hybrids vs. Cultivars", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
#legend.position = "bottom",
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
) +
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
p2 <- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="solid", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="longdash", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Cultivars vs. Bermuda Hybrids", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
)+
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
q1 <- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="solid", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="longdash", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Bermuda Hybrids vs. Cultivars (DIA)", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
)+
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
q2<- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="solid", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="longdash", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Cultivars vs. Bermuda Hybrids (DIA)", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
)+
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
png("2020_bFigs.png", height=9.5, width=9.75, units="in", res=300)
#svg("2020_bFigs.svg", height=9.5, width=9.75)
ggarrange(p1, p2, q1, q2,
labels=c("A","B","C","D"),
common.legend=TRUE, legend="bottom", font.label=list(size=18),
ncol=2, nrow=2)
dev.off()
| /ggplot/ggplot_uniform5.R | no_license | 0cb/warbling-wRen | R | false | false | 11,520 | r | library(tidyr)
library(ggplot2)
library(reshape2)
library(ggrepel)
library(ggpubr)
#--------------- Variables ---------------#
args <- commandArgs(TRUE)
#--------------- Graphing averages ---------------#
#commentblock
# S-v > :s/^/#/
# S-v > :s/^#// to remove
#df <- read.table("2020_bDD-top5pc1.csv", header=TRUE, sep=",")
#dfn <- melt(df, value.name = "PC", variable.name = "DAI")
#
#p <- ggplot(dfn, aes(x=DAI, y=PC, group=Code))+
# geom_line(aes(color=Code))+
# labs(title="Top 5 Bermudagrass Hybrids (Digital)", x="Days after irrigation (DAI)", y="Percent Coverage (%)")
#p
#
#png("2020_bDD-T5PC-avg.png", height=720, units="px")
#p + coord_cartesian(ylim=c(0.00,1.25))+
# scale_y_continuous(breaks=seq(0.00,1.25,0.20))+
# geom_vline(xintercept="D65", linetype="dashed", color="blue")
#dev.off()
#--------------- Graphing t5 vs cc ---------------#
df1 <- read.table("2020_bDrydown-exD1.csv", header=TRUE, sep=",")
df1$No. <- as.factor(df1$No.)
loads <- c("UCRC180040", "UCRC180557", "UCRC180576", "UCRC180038", "UCRC180229",
"Bandera", "Celebration", "Santa-Ana", "TifTuf", "Tifway-II",
"UCR17-8", "UCRTP6-3")
bhy <- c("UCRC180040", "UCRC180557", "UCRC180576", "UCRC180038", "UCRC180229", "UCR17-8", "UCRTP6-3")
#bhy <- c("UCRC180040", "UCRC180557", "UCRC180576", "UCRC180038", "UCRC180229")#,"UCR17-8", "UCRTP6-3")
cmc <- c("Bandera", "Celebration", "Santa-Ana", "TifTuf", "Tifway-II")#"UCR17-8", "UCRTP6-3")
#cmc <- c("Bandera", "Celebration", "Santa-Ana", "TifTuf", "Tifway-II", "UCR17-8", "UCRTP6-3")
df2 <- df1[df1$Code %in% loads, ]
df2a <- df2[df2$Code %in% bhy, ]
df2b <- df2[df2$Code %in% cmc, ]
rect <- data.frame(xmin=68, xmax=81, ymin=-Inf, ymax=Inf)
# 2(accession), 21(LC), 22(PC), 28(nPC), 29(DAI), 32(FLC), 33(FnPC)
# solid,15 longdash,18
p1 <- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="solid", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="longdash", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Bermuda Hybrids vs. Cultivars", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
#legend.position = "bottom",
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
) +
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
p2 <- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="solid", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="line", linetype="longdash", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=LC, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Cultivars vs. Bermuda Hybrids", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
)+
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
q1 <- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="solid", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="longdash", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Bermuda Hybrids vs. Cultivars (DIA)", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
)+
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
q2<- ggplot()+
stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="solid", size=0.75, alpha=0.500)+ #avg on FLC
#stat_summary(data=df2a, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point", alpha=0.500)+
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar")+ #SE bars
#stat_summary(data=df2a, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", position=position_dodge(width=0, height=4), size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#geom_text_repel(data=df2a, aes(x=DAI, y=LC, label=ifelse(DAI==118, as.character(Code),'')))+
#
stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="line", linetype="longdash", size=1.5)+#, alpha=0.25)+ #avg on FLC
#stat_summary(data=df2b, fun=mean, aes(x=DAI, y=nPC2, group=Code, color=Code), geom="point")+#, alpha=0.25)+
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, group=Code, color=Code), geom="errorbar", alpha=0.15)+ #SE bars
#stat_summary(data=df2b, fun.data=mean_se, aes(x=DAI, y=LC, color=Code, label=ifelse(DAI==118, as.character(Code),'')), geom="text", size=8, hjust=1, vjust=1, show.legend=FALSE)+ #SE bars
#
labs(title="Cultivars vs. Bermuda Hybrids (DIA)", x="Days after irrigation (DAI)", y="Percent coverage (%)")+
#
theme(
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(fill="transparent", color=NA),
plot.background = element_rect(fill="transparent", color=NA),
#plot.margin = unit(c(5.5,5.5,2,5.5), unit="point"),
#
axis.text = element_text(size=12),
axis.title = element_text(size=14),
axis.line = element_line(size=0.5, color="grey20"),
plot.title = element_text(size=16),
#
legend.position = "none",
legend.text = element_text(color="grey20", size=10),
legend.background = element_rect(linetype="solid", size=0.5, color="grey20"),
)+
coord_cartesian(ylim=c(0.00,130))+
scale_y_continuous(breaks=seq(0.00,125,25))+
geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax),
color="skyblue1", fill="skyblue1", alpha=0.3, inherit.aes=FALSE)
png("2020_bFigs.png", height=9.5, width=9.75, units="in", res=300)
#svg("2020_bFigs.svg", height=9.5, width=9.75)
ggarrange(p1, p2, q1, q2,
labels=c("A","B","C","D"),
common.legend=TRUE, legend="bottom", font.label=list(size=18),
ncol=2, nrow=2)
dev.off()
|
#' @title Find density modes with HMM-VB
#'
#' @description This function finds the density modes with HMM-VB. First, for each data point it finds an optimal state sequence
#' using Viterbi algorithm. Next, it uses Modal Baum-Welch algorithm (MBW) to find the modes
#' of distinct Viterbi state sequences. Data points associated the same modes form clusters.
#' @param data A numeric vector, matrix, or data frame of observations.
#' Categorical values are not allowed. If a matrix or data frame, rows
#' correspond to observations and columns correspond to variables.
#' @param model An object of class 'HMMVB' that contains trained HMM-VB obtained
#' by the call to function \code{hmmvbTrain}.
#' @param nthread An integer specifying the number of threads used in clustering.
#' @param bicObj An object of class 'HMMVBBIC' which stores results of model selection.
#' If provided, argument \code{model} is ignored.
#' @return An object of class 'HMMVBclust'.
#' @seealso \code{\link{HMMVB-class}}, \code{\link{HMMVBclust-class}}, \code{\link{hmmvbTrain}}
#' @examples
#' # find modes using trained HMM-VB
#' Vb <- vb(1, dim=4, numst=2)
#' set.seed(12345)
#' hmmvb <- hmmvbTrain(iris[,1:4], VbStructure=Vb)
#' modes <- hmmvbFindModes(iris[,1:4], model=hmmvb)
#' show(modes)
#'
#' \donttest{
#' # find modes using HMMVBBIC object obtained in model selection
#' Vb <- vb(1, dim=4, numst=1)
#' set.seed(12345)
#' modelBIC <- hmmvbBIC(iris[,1:4], VbStructure=Vb)
#' modes <- hmmvbClust(iris[,1:4], bicObj=modelBIC)
#' show(modes)}
hmmvbFindModes <- function(data, model=NULL, nthread=1, bicObj=NULL){
data = as.matrix(data)
nthread = as.integer(nthread)
if ((length(nthread) != 1) || nthread < 1)
stop('nthread should be a positive integer >= 1!\n')
if (!is.null(bicObj) && (!isS4(bicObj) || !is(bicObj, "HMMVBBIC")))
stop('If provided, bicObj should be an instance of class HMMVBBIC!\n')
if (!is.null(bicObj)){
res <- rcpp_findModes(t(data), getOptHMMVB(bicObj), nthread)
} else{
if (is.null(model) || !isS4(model) || !is(model, "HMMVB"))
stop('If argument bicObj is not provided, model argument should be given and it must be an instance of class HMMVB!\n')
res <- rcpp_findModes(t(data), model, nthread)
}
res@data <- data
res@clsid = res@clsid + 1
return(res)
}
| /fuzzedpackages/HDclust/R/hmmvbFindModes.R | no_license | akhikolla/testpackages | R | false | false | 2,359 | r | #' @title Find density modes with HMM-VB
#'
#' @description This function finds the density modes with HMM-VB. First, for each data point it finds an optimal state sequence
#' using Viterbi algorithm. Next, it uses Modal Baum-Welch algorithm (MBW) to find the modes
#' of distinct Viterbi state sequences. Data points associated the same modes form clusters.
#' @param data A numeric vector, matrix, or data frame of observations.
#' Categorical values are not allowed. If a matrix or data frame, rows
#' correspond to observations and columns correspond to variables.
#' @param model An object of class 'HMMVB' that contains trained HMM-VB obtained
#' by the call to function \code{hmmvbTrain}.
#' @param nthread An integer specifying the number of threads used in clustering.
#' @param bicObj An object of class 'HMMVBBIC' which stores results of model selection.
#' If provided, argument \code{model} is ignored.
#' @return An object of class 'HMMVBclust'.
#' @seealso \code{\link{HMMVB-class}}, \code{\link{HMMVBclust-class}}, \code{\link{hmmvbTrain}}
#' @examples
#' # find modes using trained HMM-VB
#' Vb <- vb(1, dim=4, numst=2)
#' set.seed(12345)
#' hmmvb <- hmmvbTrain(iris[,1:4], VbStructure=Vb)
#' modes <- hmmvbFindModes(iris[,1:4], model=hmmvb)
#' show(modes)
#'
#' \donttest{
#' # find modes using HMMVBBIC object obtained in model selection
#' Vb <- vb(1, dim=4, numst=1)
#' set.seed(12345)
#' modelBIC <- hmmvbBIC(iris[,1:4], VbStructure=Vb)
#' modes <- hmmvbClust(iris[,1:4], bicObj=modelBIC)
#' show(modes)}
hmmvbFindModes <- function(data, model=NULL, nthread=1, bicObj=NULL){
data = as.matrix(data)
nthread = as.integer(nthread)
if ((length(nthread) != 1) || nthread < 1)
stop('nthread should be a positive integer >= 1!\n')
if (!is.null(bicObj) && (!isS4(bicObj) || !is(bicObj, "HMMVBBIC")))
stop('If provided, bicObj should be an instance of class HMMVBBIC!\n')
if (!is.null(bicObj)){
res <- rcpp_findModes(t(data), getOptHMMVB(bicObj), nthread)
} else{
if (is.null(model) || !isS4(model) || !is(model, "HMMVB"))
stop('If argument bicObj is not provided, model argument should be given and it must be an instance of class HMMVB!\n')
res <- rcpp_findModes(t(data), model, nthread)
}
res@data <- data
res@clsid = res@clsid + 1
return(res)
}
|
# Start with clean environment
rm(list=ls())
gc()
# Flux sampling files
lb_samples <- '~/Desktop/repos/Jenior_RIPTiDe_2019/data/flux_samples/media_conditions/LB_aerobic.flux_samples.tsv'
invivo_samples <- '~/Desktop/repos/Jenior_RIPTiDe_2019/data/flux_samples/media_conditions/invivo.flux_samples.tsv'
# Read in data
lb_samples <- read.delim(lb_samples, sep='\t', header=TRUE)
invivo_samples <- read.delim(invivo_samples, sep='\t', header=TRUE)
# Subset to exchange reactions
lb_exchanges <- lb_samples[, grep('EX_', colnames(lb_samples))]
invivo_exchanges <- invivo_samples[, grep('EX_', colnames(invivo_samples))]
rm(lb_samples, invivo_samples)
# Format row names
lb_exchanges$X <- NULL
lb_names <- paste('lb_samples_', 1:nrow(lb_exchanges), sep='')
rownames(lb_exchanges) <- lb_names
invivo_exchanges$X <- NULL
invivo_names <- paste('invivo_', 1:nrow(invivo_exchanges), sep='')
rownames(invivo_exchanges) <- invivo_names
# Consider only net-imported metabolites
lb_exchanges <- lb_exchanges[, which(as.vector(colMeans(lb_exchanges)) < 0)]
invivo_exchanges <- invivo_exchanges[, which(as.vector(colMeans(invivo_exchanges)) < 0)]
# Collect differences in exchanges across models
lb_only <- setdiff(colnames(lb_exchanges), colnames(invivo_exchanges))
invivo_only <- setdiff(colnames(invivo_exchanges), colnames(lb_exchanges))
lb_exchanges <- lb_exchanges[, lb_only]
invivo_exchanges <- invivo_exchanges[, invivo_only]
rm(lb_only, invivo_only)
# Calculate absolute flux through exchanges
lb_exchanges <- log2(abs(lb_exchanges) + 1)
invivo_exchanges <- log2(abs(invivo_exchanges) + 1)
# Subsetting to those >1
lb_exch_med <- apply(lb_exchanges, 2, median)
lb_exchanges <- lb_exchanges[, which(lb_exch_med > 1)]
invivo_exch_med <- apply(invivo_exchanges, 2, median)
invivo_exchanges <- invivo_exchanges[, which(invivo_exch_med > 1)]
# Calculate IQRs for exchange fluxes
lb_exch_q25 <- apply(lb_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.25)))
lb_exch_med <- apply(lb_exchanges, 2, median)
lb_exch_q75 <- apply(lb_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.75)))
invivo_exch_q25 <- apply(invivo_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.25)))
invivo_exch_med <- apply(invivo_exchanges, 2, median)
invivo_exch_q75 <- apply(invivo_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.75)))
exch_q25 <- c(lb_exch_q25, invivo_exch_q25)
exch_median <- c(lb_exch_med, invivo_exch_med)
exch_q75 <- c(lb_exch_q75, invivo_exch_q75)
# Collect name variables
exchange_rxns <- c(colnames(invivo_exchanges), colnames(lb_exchanges))
exchange_cpds <- c(rev(c("L-Asparagine", "D-Glucose 6-phosphate", "L-Methionine S-oxide", "Nitrite", "Thymidine")),
rev(c("Deoxyuridine", "L-methionine", "Nitrate", "L-valine")))
# Generate figure
png(filename='~/Desktop/repos/Jenior_RIPTiDe_2019/results/figures/figure_5C.png',
units='in', width=5, height=6, res=300)
par(mar=c(3.5,9,1,1.5), las=1, mgp=c(1.9,0.7,0), lwd=2, xaxs='i', yaxs='i')
plot(0, type='n', xaxt='n', yaxt='n', xlim=c(0,10), ylim=c(0,length(exchange_cpds)+0.5),
xlab=expression(paste('Inverse Exchange Flux (log'['2'],')')), ylab='', cex.lab=1.1)
axis(1, at=c(0,5,10), labels=c('0','30','1000'), lwd=2)
minors <- c(1.5,2.5,3.3,3.9,4.3,4.6,4.8)
axis(1, at=minors, labels=rep('', length(minors)), lwd=1, tck=-0.03)
axis(1, at=minors+5, labels=rep('', length(minors)), lwd=1, tck=-0.03)
axis(2, at=c(0.4:length(exchange_cpds)+0.4), labels=rev(exchange_cpds), tck=0, cex.axis=0.9)
bar_cols <- c(rep('#ffa05d', ncol(lb_exchanges)), rep('firebrick', ncol(invivo_exchanges)))
barplot(exch_median, col=bar_cols, width=0.5, space=1, horiz=TRUE, add=TRUE, xaxt='n', yaxt='n')
segments(x0=exch_q25, x1=exch_q75, y0=c(0.38:length(exchange_cpds)+0.38))
abline(h=4.25)
text(x=c(8.5,9), y=c(9.2, 3.95), labels=c(as.expression(bquote(italic('in vivo'))),'LB'), cex=1.1)
par(xpd=TRUE)
text(x=10.5, y=4.5, 'Context-specific Growth Substrates', srt=270)
dev.off()
| /code/R/figure_5C.R | permissive | shuyima1/Jenior_RIPTiDe_2020 | R | false | false | 3,973 | r |
# Start with clean environment
rm(list=ls())
gc()
# Flux sampling files
lb_samples <- '~/Desktop/repos/Jenior_RIPTiDe_2019/data/flux_samples/media_conditions/LB_aerobic.flux_samples.tsv'
invivo_samples <- '~/Desktop/repos/Jenior_RIPTiDe_2019/data/flux_samples/media_conditions/invivo.flux_samples.tsv'
# Read in data
lb_samples <- read.delim(lb_samples, sep='\t', header=TRUE)
invivo_samples <- read.delim(invivo_samples, sep='\t', header=TRUE)
# Subset to exchange reactions
lb_exchanges <- lb_samples[, grep('EX_', colnames(lb_samples))]
invivo_exchanges <- invivo_samples[, grep('EX_', colnames(invivo_samples))]
rm(lb_samples, invivo_samples)
# Format row names
lb_exchanges$X <- NULL
lb_names <- paste('lb_samples_', 1:nrow(lb_exchanges), sep='')
rownames(lb_exchanges) <- lb_names
invivo_exchanges$X <- NULL
invivo_names <- paste('invivo_', 1:nrow(invivo_exchanges), sep='')
rownames(invivo_exchanges) <- invivo_names
# Consider only net-imported metabolites
lb_exchanges <- lb_exchanges[, which(as.vector(colMeans(lb_exchanges)) < 0)]
invivo_exchanges <- invivo_exchanges[, which(as.vector(colMeans(invivo_exchanges)) < 0)]
# Collect differences in exchanges across models
lb_only <- setdiff(colnames(lb_exchanges), colnames(invivo_exchanges))
invivo_only <- setdiff(colnames(invivo_exchanges), colnames(lb_exchanges))
lb_exchanges <- lb_exchanges[, lb_only]
invivo_exchanges <- invivo_exchanges[, invivo_only]
rm(lb_only, invivo_only)
# Calculate absolute flux through exchanges
lb_exchanges <- log2(abs(lb_exchanges) + 1)
invivo_exchanges <- log2(abs(invivo_exchanges) + 1)
# Subsetting to those >1
lb_exch_med <- apply(lb_exchanges, 2, median)
lb_exchanges <- lb_exchanges[, which(lb_exch_med > 1)]
invivo_exch_med <- apply(invivo_exchanges, 2, median)
invivo_exchanges <- invivo_exchanges[, which(invivo_exch_med > 1)]
# Calculate IQRs for exchange fluxes
lb_exch_q25 <- apply(lb_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.25)))
lb_exch_med <- apply(lb_exchanges, 2, median)
lb_exch_q75 <- apply(lb_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.75)))
invivo_exch_q25 <- apply(invivo_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.25)))
invivo_exch_med <- apply(invivo_exchanges, 2, median)
invivo_exch_q75 <- apply(invivo_exchanges, 2, function(x) as.numeric(quantile(x, probs=0.75)))
exch_q25 <- c(lb_exch_q25, invivo_exch_q25)
exch_median <- c(lb_exch_med, invivo_exch_med)
exch_q75 <- c(lb_exch_q75, invivo_exch_q75)
# Collect name variables
exchange_rxns <- c(colnames(invivo_exchanges), colnames(lb_exchanges))
exchange_cpds <- c(rev(c("L-Asparagine", "D-Glucose 6-phosphate", "L-Methionine S-oxide", "Nitrite", "Thymidine")),
rev(c("Deoxyuridine", "L-methionine", "Nitrate", "L-valine")))
# Generate figure
png(filename='~/Desktop/repos/Jenior_RIPTiDe_2019/results/figures/figure_5C.png',
units='in', width=5, height=6, res=300)
par(mar=c(3.5,9,1,1.5), las=1, mgp=c(1.9,0.7,0), lwd=2, xaxs='i', yaxs='i')
plot(0, type='n', xaxt='n', yaxt='n', xlim=c(0,10), ylim=c(0,length(exchange_cpds)+0.5),
xlab=expression(paste('Inverse Exchange Flux (log'['2'],')')), ylab='', cex.lab=1.1)
axis(1, at=c(0,5,10), labels=c('0','30','1000'), lwd=2)
minors <- c(1.5,2.5,3.3,3.9,4.3,4.6,4.8)
axis(1, at=minors, labels=rep('', length(minors)), lwd=1, tck=-0.03)
axis(1, at=minors+5, labels=rep('', length(minors)), lwd=1, tck=-0.03)
axis(2, at=c(0.4:length(exchange_cpds)+0.4), labels=rev(exchange_cpds), tck=0, cex.axis=0.9)
bar_cols <- c(rep('#ffa05d', ncol(lb_exchanges)), rep('firebrick', ncol(invivo_exchanges)))
barplot(exch_median, col=bar_cols, width=0.5, space=1, horiz=TRUE, add=TRUE, xaxt='n', yaxt='n')
segments(x0=exch_q25, x1=exch_q75, y0=c(0.38:length(exchange_cpds)+0.38))
abline(h=4.25)
text(x=c(8.5,9), y=c(9.2, 3.95), labels=c(as.expression(bquote(italic('in vivo'))),'LB'), cex=1.1)
par(xpd=TRUE)
text(x=10.5, y=4.5, 'Context-specific Growth Substrates', srt=270)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/problem.R
\name{problem}
\alias{problem}
\alias{problem,Raster,Raster-method}
\alias{problem,Spatial,Raster-method}
\alias{problem,data.frame,data.frame-method}
\alias{problem,numeric,data.frame-method}
\alias{problem,data.frame,character-method}
\alias{problem,Spatial,character-method}
\alias{problem,Raster,ZonesRaster-method}
\alias{problem,Spatial,ZonesRaster-method}
\alias{problem,Spatial,ZonesCharacter-method}
\alias{problem,data.frame,ZonesCharacter-method}
\alias{problem,matrix,data.frame-method}
\alias{problem,sf,Raster-method}
\alias{problem,sf,ZonesCharacter-method}
\alias{problem,sf,character-method}
\alias{problem,sf,ZonesRaster-method}
\title{Conservation planning problem}
\usage{
problem(x, features, ...)
\S4method{problem}{Raster,Raster}(x, features, run_checks, ...)
\S4method{problem}{Raster,ZonesRaster}(x, features, run_checks, ...)
\S4method{problem}{Spatial,Raster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{Spatial,ZonesRaster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{Spatial,character}(x, features, cost_column, ...)
\S4method{problem}{Spatial,ZonesCharacter}(x, features, cost_column, ...)
\S4method{problem}{data.frame,character}(x, features, cost_column, ...)
\S4method{problem}{data.frame,ZonesCharacter}(x, features, cost_column, ...)
\S4method{problem}{data.frame,data.frame}(x, features, rij, cost_column, zones, ...)
\S4method{problem}{numeric,data.frame}(x, features, rij_matrix, ...)
\S4method{problem}{matrix,data.frame}(x, features, rij_matrix, ...)
\S4method{problem}{sf,Raster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{sf,ZonesRaster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{sf,character}(x, features, cost_column, ...)
\S4method{problem}{sf,ZonesCharacter}(x, features, cost_column, ...)
}
\arguments{
\item{x}{\code{\linkS4class{Raster}},
\code{\link[sf:sf]{sf::st_sf()}},
\code{\linkS4class{SpatialPolygonsDataFrame}},
\code{\linkS4class{SpatialLinesDataFrame}},
\code{\linkS4class{SpatialPointsDataFrame}},
\code{\link[=data.frame]{data.frame()}} object,
\code{\link[=numeric]{numeric()}} vector, or
\code{\link[=matrix]{matrix()}} specifying the planning units to use in the reserve
design exercise and their corresponding cost. It may be desirable to
exclude some planning units from the analysis, for example those outside
the study area. To exclude planning units, set the cost for those raster
cells to \code{NA}, or use the \code{add_locked_out_constraint} function.}
\item{features}{The feature data can be specified in a variety of ways.
The specific formats that can be used depend on the cost data format (i.e.
argument to \code{x}) and whether the problem should have a single zone or
multiple zones. If the problem should have a single zone, then the feature
data can be specified following:
\itemize{
\item \code{\link[raster:Raster-classes]{x = RasterLayer-class}}, or
\code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}:
\code{\link[raster:Raster-classes]{y = Raster-class}}
object showing the distribution of conservation features. Missing
values (i.e. \code{NA} values) can be used to indicate the absence of
a feature in a particular cell instead of explicitly setting these
cells to zero. Note that this argument type for \code{features} can
only be used to specify data for problems involving a single zone.
\item \code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}, or
\code{x = data.frame}:
\code{y = character} vector
with column names that correspond to the abundance or occurrence of
different features in each planning unit. Note that this argument
type can only be used to create problems involving a single zone.
\item \code{x = data.frame}, or
\code{x = numeric} vector, or
\code{x = matrix}:
\code{y = data.frame} object
containing the names of the features. Note that if this
type of argument is supplied to \code{features} then the argument
\code{rij} or \code{rij_matrix} must also be supplied. This type of
argument should follow the conventions used by \emph{Marxan}, wherein
each row corresponds to a different feature. It must also contain the
following columns:
\describe{
\item{id}{\code{integer} unique identifier for each feature
These identifiers are used in the argument to \code{rij}.}
\item{name}{\code{character} name for each feature.}
\item{prop}{\code{numeric} relative target for each feature
(optional).}
\item{amount}{\code{numeric} absolute target for each
feature (optional).}
}
}
If the problem should have multiple zones, then the feature
data can be specified following:
\itemize{
\item \code{\link[raster:Raster-classes]{x = RasterStack-class}}, or
\code{\link[raster:Raster-classes]{x = RasterBrick-class}}, or
\code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}:
\code{\link[=zones]{y = ZonesRaster}}:
object showing the distribution of conservation features in multiple
zones. As above, missing values (i.e. \code{NA} values) can be used to
indicate the absence of a feature in a particular cell instead of
explicitly setting these cells to zero.
\item \code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}, or
or \code{x = data.frame}:
\code{\link[=zones]{y = ZonesCharacter}}
object with column names that correspond to the abundance or
occurrence of different features in each planning unit in different
zones.
}}
\item{...}{not used.}
\item{run_checks}{\code{logical} flag indicating whether checks should be
run to ensure the integrity of the input data. These checks are run by
default; however, for large datasets they may increase run time. If it is
taking a prohibitively long time to create the prioritization problem, it
is suggested to try setting \code{run_checks} to \code{FALSE}.}
\item{cost_column}{\code{character} name or \code{integer} indicating the
column(s) with the cost data. This argument must be supplied when the
argument to \code{x} is a \code{\linkS4class{Spatial}} or
\code{data.frame} object. This argument should contain the name of each
column containing cost data for each management zone when creating
problems with multiple zones. To create a problem with a single zone, then
set the argument to \code{cost_column} as a single column name.}
\item{rij}{\code{data.frame} containing information on the amount of
each feature in each planning unit assuming each management zone. Similar
to \code{data.frame} arguments for \code{features}, the \code{data.frame}
objects must follow the conventions used by \emph{Marxan}. Note that the
\code{"zone"} column is not needed for problems involving a single
management zone. Specifically, the argument should contain the following
columns:
\describe{
\item{pu}{\code{integer} planning unit identifier.}
\item{species}{\code{integer} feature identifier.}
\item{zone}{\code{integer} zone identifier (optional for
problems involving a single zone).}
\item{amount}{\code{numeric} amount of the feature in the
planning unit.}
}}
\item{zones}{\code{data.frame} containing information on the zones. This
argument is only used when argument to \code{x} and \code{y} are
both \code{data.frame} objects and the problem being built contains
multiple zones. Following conventions used in \code{MarZone}, this
argument should contain the following columns:
columns:
\describe{
\item{id}{\code{integer} zone identifier.}
\item{name}{\code{character} zone name.}
}}
\item{rij_matrix}{\code{list} of \code{matrix} or
\code{\linkS4class{dgCMatrix}}
objects specifying the amount of each feature (rows) within each planning
unit (columns) for each zone. The \code{list} elements denote
different zones, matrix rows denote features, and matrix columns denote
planning units. For convenience, the argument to
\code{rij_matrix} can be a single \code{matrix} or
\code{\linkS4class{dgCMatrix}} when specifying a problem with a
single management zone. This argument is only used when the argument
to \code{x} is a \code{numeric} or \code{matrix} object.}
}
\value{
\code{\linkS4class{ConservationProblem}} object containing
data for a prioritization.
}
\description{
Create a systematic conservation planning problem. This function is used to
specify the basic data used in a spatial prioritization problem: the
spatial distribution of the planning units and their costs, as well as
the features (e.g. species, ecosystems) that need to be conserved. After
constructing this \code{ConservationProblem-class} object, it can be
customized to meet specific goals using \link{objectives},
\link{targets}, \link{constraints}, and
\link{penalties}. After building the problem, the
\code{\link[=solve]{solve()}} function can be used to identify solutions.
\strong{Note that problems require an objective, and failing to specify an
an objective will throw an error when attempting to solve it.}
}
\details{
A systematic conservation planning exercise leverages data to help inform
conservation decision making. To help ensure that the
data -- and resulting prioritizations -- are relevant to the over-arching
goals of the exercise, you should decide on the management action
(or set of actions) that need be considered in the exercise.
For example, these actions could include establishing protected areas,
selecting land for conservation easements, restoring habitat,
planting trees for carbon sequestration, eradicating invasive
species, or some combination of the previous actions.
If the exercise involves multiple different actions, they can
be incorporated by using multiple zones
(see the Management Zones vignette for details). After deciding
on the management action(s), you can compile the following data.
First, you will need to create a set of planning units
(i.e. discrete spatial areas) to inform decision making.
Planning units are often created by subdividing a study region
into a set square or hexagonal cells. They can also be created using
administrative boundaries (e.g. provinces), land management boundaries
(e.g. property boundaries derived from cadastral data), or
ecological boundaries (e.g. based on ecosystem classification data).
The size (i.e. spatial grain) of the planning units is often determined
based on a compromise between the scale needed to inform decision making, the
spatial accuracy (resolution) of available datasets, and
the computational resources available for generating prioritizations
(e.g. RAM and number of CPUs on your computer).
Second, you will need data to quantify the cost of implementing
implementing each management action within each planning unit.
Critically, the cost data should reflect the management action(s)
considered in the exercise.
For example, costs are often specified using data that reflect economic
expenditure (e.g. land acquisition cost),
socioeconomic conditions (e.g. human population density),
opportunity costs of foregone commercial activities
(e.g. logging or agriculture), or
opportunity costs of foregone recreational activities
(e.g. recreational fishing) activities,
In some cases -- depending on the management action(s) considered --
it can make sense to use a constant cost value
(e.g. all planning units are assigned a cost value equal to one)
or use a cost value based on spatial extent
(e.g. each planning unit is assigned a cost value based on its total area).
Also, in most cases, you want to avoid negative cost values.
This because a negative value means that a place is \emph{desirable}
for implementing a management action, and such places will almost
always be selected for prioritization even if they provide no benefit.
Third, you will need data to quantify the benefits of implementing
management actions within planning units.
To achieve this, you will need to select a set of conservation features
that relate to the over-arching goals of the exercise.
For example, conservation features often include
species (e.g. Clouded Leopard), habitats (e.g. mangroves or
cloud forest), or ecosystems.
The benefit that each feature derives from a planning unit
can take a variety of forms, but is typically occupancy (i.e.
presence or absence), area of occurrence within each planning unit
(e.g. based on species' geographic range data), or
a measure of habitat suitability (e.g. estimated using a statistical model).
After compiling these data, you have the minimal data need to generate
a prioritization.
A systematic conservation planning exercise involves prioritizing a set of
management actions to be implemented within certain planning units.
Critically, this prioritization should ideally optimize the trade-off
between benefits and costs.
To accomplish this, the \pkg{prioritizr} package uses input data
to formulate optimization problems (see Optimization section for details).
Broadly speaking, the goal of an optimization problem is to minimize
(or maximize) an objective function over a set of
decision variables, subject to a series of constraints.
Here, an objective function specifies the metric for evaluating
conservation plans. The decision variables are what we control, and usually
there is one binary variable for each planning unit to specify whether that
unit is selected or not (but other approaches are available, see
\link{decisions}). The constraints can be thought of as rules that must be
followed. For example, constraints can be used to ensure a prioritization
must stay within a certain budget. These constraints can also leverage
additional data to help ensure that prioritizations meet the over-arching
goals of the exercise. For example, to account for existing conservation
efforts, you could obtain data delineating the extent of existing protected
areas and use constraints to lock in planning units that are covered by them
(see \link{add_locked_in_constraints}).
}
\section{Optimization}{
The \pkg{prioritizr} package uses exact algorithms to solve reserve design
problems (see \link{solvers} for details).
To achieve this, it internally formulates mathematical optimization problems
using mixed integer linear programming (MILP). The general form of
such problems can be expressed in matrix notation using
the following equation.
\deqn{\mathit{Minimize} \space \mathbf{c}^{\mathbf{T}}\mathbf{x} \space
\mathit{subject \space to} \space
\mathbf{Ax}\geq= or\leq \mathbf{b}}{Minimize (c^T)*x subject to Ax \ge, =,
or \le b}
Here, \eqn{x} is a vector of decision variables, \eqn{c} and \eqn{b} are
vectors of known coefficients, and \eqn{A} is the constraint
matrix. The final term specifies a series of structural
constraints where relational operators for the constraint can be either
\eqn{\ge}, \eqn{=}, or \eqn{\le} the coefficients. For example, in the
minimum set cover problem, \eqn{c} would be a vector of costs for each
planning unit, \eqn{b} a vector of targets for each conservation feature,
the relational operator would be \eqn{\ge} for all features, and \eqn{A}
would be the representation matrix with \eqn{A_{ij}=r_{ij}}{Aij = rij}, the
representation level of feature \eqn{i} in planning unit \eqn{j}.
If you wish to see exactly how a conservation planning problem is
formulated as mixed integer linear programming problem, you can use
the \code{\link[=write_problem]{write_problem()}} function to save the optimization problem
to a plain-text file on your computer and then view it using a standard
text editor (e.g. Notepad).
Please note that this function internally computes the amount of each
feature in each planning unit when this data is not supplied (using the
\link{rij_matrix} function). As a consequence, it can take a while to
initialize large-scale conservation planning problems that involve
millions of planning units.
}
\examples{
# load data
data(sim_pu_raster, sim_pu_polygons, sim_pu_lines, sim_pu_points,
sim_pu_sf, sim_features)
# create problem using raster planning unit data
p1 <- problem(sim_pu_raster, sim_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
\dontrun{
# create problem using polygon (Spatial) planning unit data
p2 <- problem(sim_pu_polygons, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem using line (Spatial) planning unit data
p3 <- problem(sim_pu_lines, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem using point (Spatial) planning unit data
p4 <- problem(sim_pu_points, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem using polygon (sf) planning unit data
p5 <- problem(sim_pu_sf, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# since geo-processing can be slow for large spatial vector datasets
# (e.g. polygons, lines, points), it can be worthwhile to pre-process the
# planning unit data so that it contains columns indicating the amount of
# each feature inside each planning unit
# (i.e. each column corresponds to a different feature)
# calculate the amount of each species within each planning unit
# (i.e. SpatialPolygonsDataFrame object)
pre_proc_data <- rij_matrix(sim_pu_polygons, sim_features)
# add extra columns to the polygon (Spatial) planning unit data
# to indicate the amount of each species within each planning unit
pre_proc_data <- as.data.frame(t(as.matrix(pre_proc_data)))
names(pre_proc_data) <- names(sim_features)
sim_pu_polygons@data <- cbind(sim_pu_polygons@data, pre_proc_data)
# create problem using the polygon (Spatial) planning unit data
# with the pre-processed columns
p6 <- problem(sim_pu_polygons, features = names(pre_proc_data), "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# this strategy of pre-processing columns can be used for sf objects too
pre_proc_data2 <- rij_matrix(sim_pu_sf, sim_features)
pre_proc_data2 <- as.data.frame(t(as.matrix(pre_proc_data2)))
names(pre_proc_data2) <- names(sim_features)
sim_pu_sf <- cbind(sim_pu_sf, pre_proc_data2)
# create problem using the polygon (sf) planning unit data
# with pre-processed columns
p7 <- problem(sim_pu_sf, features = names(pre_proc_data2), "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# in addition to spatially explicit data, pre-processed aspatial data
# can also be used to create a problem
# (e.g. data created using external spreadsheet software)
costs <- sim_pu_polygons$cost
features <- data.frame(id = seq_len(nlayers(sim_features)),
name = names(sim_features))
rij_mat <- rij_matrix(sim_pu_polygons, sim_features)
p8 <- problem(costs, features, rij_matrix = rij_mat) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# solve problems
s1 <- solve(p1)
s2 <- solve(p2)
s3 <- solve(p3)
s4 <- solve(p4)
s5 <- solve(p5)
s6 <- solve(p6)
s7 <- solve(p7)
s8 <- solve(p8)
# plot solutions for problems associated with spatial data
par(mfrow = c(3, 2), mar = c(0, 0, 4.1, 0))
plot(s1, main = "raster data", axes = FALSE, box = FALSE, legend = FALSE)
plot(s2, main = "polygon data")
plot(s2[s2$solution_1 > 0.5, ], col = "darkgreen", add = TRUE)
plot(s3, main = "line data")
lines(s3[s3$solution_1 > 0.5, ], col = "darkgreen", lwd = 2)
plot(s4, main = "point data", pch = 19)
points(s4[s4$solution_1 > 0.5, ], col = "darkgreen", cex = 2, pch = 19)
# note that as_Spatial() is for convenience to plot all solutions together
plot(as_Spatial(s5), main = "sf (polygon) data", pch = 19)
plot(as_Spatial(s5[s5$solution_1 > 0.5, ]), col = "darkgreen", add = TRUE)
plot(s6, main = "preprocessed data (polygon data)", pch = 19)
plot(s6[s6$solution_1 > 0.5, ], col = "darkgreen", add = TRUE)
# show solutions for problems associated with aspatial data
str(s8)
}
# create some problems with multiple zones
# first, create a matrix containing the targets for multi-zone problems
# here each row corresponds to a different feature, each
# column corresponds to a different zone, and values correspond
# to the total (absolute) amount of a given feature that needs to be secured
# in a given zone
targets <- matrix(rpois(15, 1),
nrow = number_of_features(sim_features_zones),
ncol = number_of_zones(sim_features_zones),
dimnames = list(feature_names(sim_features_zones),
zone_names(sim_features_zones)))
# print targets
print(targets)
# create a multi-zone problem with raster data
p8 <- problem(sim_pu_zones_stack, sim_features_zones) \%>\%
add_min_set_objective() \%>\%
add_absolute_targets(targets) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
\dontrun{
# solve problem
s8 <- solve(p8)
# plot solution
# here, each layer/panel corresponds to a different zone and pixel values
# indicate if a given planning unit has been allocated to a given zone
par(mfrow = c(1, 1))
plot(s8, main = c("zone 1", "zone 2", "zone 3"), axes = FALSE, box = FALSE)
# alternatively, the category_layer function can be used to create
# a new raster object containing the zone ids for each planning unit
# in the solution (note this only works for problems with binary decisions)
par(mfrow = c(1, 1))
plot(category_layer(s8), axes = FALSE, box = FALSE)
# create a multi-zone problem with polygon data
p9 <- problem(sim_pu_zones_polygons, sim_features_zones,
cost_column = c("cost_1", "cost_2", "cost_3")) \%>\%
add_min_set_objective() \%>\%
add_absolute_targets(targets) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# solve problem
s9 <- solve(p9)
# create column containing the zone id for which each planning unit was
# allocated to in the solution
s9$solution <- category_vector(s9@data[, c("solution_1_zone_1",
"solution_1_zone_2",
"solution_1_zone_3")])
s9$solution <- factor(s9$solution)
# plot solution
spplot(s9, zcol = "solution", main = "solution", axes = FALSE, box = FALSE)
# create a multi-zone problem with polygon planning unit data
# and where fields (columns) in the attribute table correspond
# to feature abundances
# first fields need to be added to the planning unit data
# which indicate the amount of each feature in each zone
# to do this, the fields will be populated with random counts
sim_pu_zones_polygons$spp1_z1 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp2_z1 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp3_z1 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp1_z2 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp2_z2 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp3_z2 <- rpois(nrow(sim_pu_zones_polygons), 1)
# create problem with polygon planning unit data and use field names
# to indicate feature data
# additionally, to make this example slightly more interesting,
# the problem will have prfoportion-type decisions such that
# a proportion of each planning unit can be allocated to each of the
# two management zones
p10 <- problem(sim_pu_zones_polygons,
zones(c("spp1_z1", "spp2_z1", "spp3_z1"),
c("spp1_z2", "spp2_z2", "spp3_z2"),
zone_names = c("z1", "z2")),
cost_column = c("cost_1", "cost_2")) \%>\%
add_min_set_objective() \%>\%
add_absolute_targets(targets[1:3, 1:2]) \%>\%
add_proportion_decisions() \%>\%
add_default_solver(verbose = FALSE)
# solve problem
s10 <- solve(p10)
# plot solution
spplot(s10, zcol = c("solution_1_z1", "solution_1_z2"), main = "solution",
axes = FALSE, box = FALSE)
}
}
\seealso{
See \code{\link[=solve]{solve()}} for details on solving a problem to generate solutions.
Also, see \link{objectives}, \link{penalties}, \link{targets}, \link{constraints},
\link{decisions}, \link{portfolios}, \link{solvers} for information on customizing problems.
Additionally, see \link{summaries} and \link{importance} for information on
evaluating solutions.
}
| /man/problem.Rd | no_license | BrandonEdwards/prioritizr | R | false | true | 24,879 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/problem.R
\name{problem}
\alias{problem}
\alias{problem,Raster,Raster-method}
\alias{problem,Spatial,Raster-method}
\alias{problem,data.frame,data.frame-method}
\alias{problem,numeric,data.frame-method}
\alias{problem,data.frame,character-method}
\alias{problem,Spatial,character-method}
\alias{problem,Raster,ZonesRaster-method}
\alias{problem,Spatial,ZonesRaster-method}
\alias{problem,Spatial,ZonesCharacter-method}
\alias{problem,data.frame,ZonesCharacter-method}
\alias{problem,matrix,data.frame-method}
\alias{problem,sf,Raster-method}
\alias{problem,sf,ZonesCharacter-method}
\alias{problem,sf,character-method}
\alias{problem,sf,ZonesRaster-method}
\title{Conservation planning problem}
\usage{
problem(x, features, ...)
\S4method{problem}{Raster,Raster}(x, features, run_checks, ...)
\S4method{problem}{Raster,ZonesRaster}(x, features, run_checks, ...)
\S4method{problem}{Spatial,Raster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{Spatial,ZonesRaster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{Spatial,character}(x, features, cost_column, ...)
\S4method{problem}{Spatial,ZonesCharacter}(x, features, cost_column, ...)
\S4method{problem}{data.frame,character}(x, features, cost_column, ...)
\S4method{problem}{data.frame,ZonesCharacter}(x, features, cost_column, ...)
\S4method{problem}{data.frame,data.frame}(x, features, rij, cost_column, zones, ...)
\S4method{problem}{numeric,data.frame}(x, features, rij_matrix, ...)
\S4method{problem}{matrix,data.frame}(x, features, rij_matrix, ...)
\S4method{problem}{sf,Raster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{sf,ZonesRaster}(x, features, cost_column, run_checks, ...)
\S4method{problem}{sf,character}(x, features, cost_column, ...)
\S4method{problem}{sf,ZonesCharacter}(x, features, cost_column, ...)
}
\arguments{
\item{x}{\code{\linkS4class{Raster}},
\code{\link[sf:sf]{sf::st_sf()}},
\code{\linkS4class{SpatialPolygonsDataFrame}},
\code{\linkS4class{SpatialLinesDataFrame}},
\code{\linkS4class{SpatialPointsDataFrame}},
\code{\link[=data.frame]{data.frame()}} object,
\code{\link[=numeric]{numeric()}} vector, or
\code{\link[=matrix]{matrix()}} specifying the planning units to use in the reserve
design exercise and their corresponding cost. It may be desirable to
exclude some planning units from the analysis, for example those outside
the study area. To exclude planning units, set the cost for those raster
cells to \code{NA}, or use the \code{add_locked_out_constraint} function.}
\item{features}{The feature data can be specified in a variety of ways.
The specific formats that can be used depend on the cost data format (i.e.
argument to \code{x}) and whether the problem should have a single zone or
multiple zones. If the problem should have a single zone, then the feature
data can be specified following:
\itemize{
\item \code{\link[raster:Raster-classes]{x = RasterLayer-class}}, or
\code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}:
\code{\link[raster:Raster-classes]{y = Raster-class}}
object showing the distribution of conservation features. Missing
values (i.e. \code{NA} values) can be used to indicate the absence of
a feature in a particular cell instead of explicitly setting these
cells to zero. Note that this argument type for \code{features} can
only be used to specify data for problems involving a single zone.
\item \code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}, or
\code{x = data.frame}:
\code{y = character} vector
with column names that correspond to the abundance or occurrence of
different features in each planning unit. Note that this argument
type can only be used to create problems involving a single zone.
\item \code{x = data.frame}, or
\code{x = numeric} vector, or
\code{x = matrix}:
\code{y = data.frame} object
containing the names of the features. Note that if this
type of argument is supplied to \code{features} then the argument
\code{rij} or \code{rij_matrix} must also be supplied. This type of
argument should follow the conventions used by \emph{Marxan}, wherein
each row corresponds to a different feature. It must also contain the
following columns:
\describe{
\item{id}{\code{integer} unique identifier for each feature
These identifiers are used in the argument to \code{rij}.}
\item{name}{\code{character} name for each feature.}
\item{prop}{\code{numeric} relative target for each feature
(optional).}
\item{amount}{\code{numeric} absolute target for each
feature (optional).}
}
}
If the problem should have multiple zones, then the feature
data can be specified following:
\itemize{
\item \code{\link[raster:Raster-classes]{x = RasterStack-class}}, or
\code{\link[raster:Raster-classes]{x = RasterBrick-class}}, or
\code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}:
\code{\link[=zones]{y = ZonesRaster}}:
object showing the distribution of conservation features in multiple
zones. As above, missing values (i.e. \code{NA} values) can be used to
indicate the absence of a feature in a particular cell instead of
explicitly setting these cells to zero.
\item \code{\link[sp:Spatial-class]{x = Spatial-class}}, or
\code{\link[sf:sf]{x = sf::st_sf()}}, or
or \code{x = data.frame}:
\code{\link[=zones]{y = ZonesCharacter}}
object with column names that correspond to the abundance or
occurrence of different features in each planning unit in different
zones.
}}
\item{...}{not used.}
\item{run_checks}{\code{logical} flag indicating whether checks should be
run to ensure the integrity of the input data. These checks are run by
default; however, for large datasets they may increase run time. If it is
taking a prohibitively long time to create the prioritization problem, it
is suggested to try setting \code{run_checks} to \code{FALSE}.}
\item{cost_column}{\code{character} name or \code{integer} indicating the
column(s) with the cost data. This argument must be supplied when the
argument to \code{x} is a \code{\linkS4class{Spatial}} or
\code{data.frame} object. This argument should contain the name of each
column containing cost data for each management zone when creating
problems with multiple zones. To create a problem with a single zone, then
set the argument to \code{cost_column} as a single column name.}
\item{rij}{\code{data.frame} containing information on the amount of
each feature in each planning unit assuming each management zone. Similar
to \code{data.frame} arguments for \code{features}, the \code{data.frame}
objects must follow the conventions used by \emph{Marxan}. Note that the
\code{"zone"} column is not needed for problems involving a single
management zone. Specifically, the argument should contain the following
columns:
\describe{
\item{pu}{\code{integer} planning unit identifier.}
\item{species}{\code{integer} feature identifier.}
\item{zone}{\code{integer} zone identifier (optional for
problems involving a single zone).}
\item{amount}{\code{numeric} amount of the feature in the
planning unit.}
}}
\item{zones}{\code{data.frame} containing information on the zones. This
argument is only used when argument to \code{x} and \code{y} are
both \code{data.frame} objects and the problem being built contains
multiple zones. Following conventions used in \code{MarZone}, this
argument should contain the following columns:
columns:
\describe{
\item{id}{\code{integer} zone identifier.}
\item{name}{\code{character} zone name.}
}}
\item{rij_matrix}{\code{list} of \code{matrix} or
\code{\linkS4class{dgCMatrix}}
objects specifying the amount of each feature (rows) within each planning
unit (columns) for each zone. The \code{list} elements denote
different zones, matrix rows denote features, and matrix columns denote
planning units. For convenience, the argument to
\code{rij_matrix} can be a single \code{matrix} or
\code{\linkS4class{dgCMatrix}} when specifying a problem with a
single management zone. This argument is only used when the argument
to \code{x} is a \code{numeric} or \code{matrix} object.}
}
\value{
\code{\linkS4class{ConservationProblem}} object containing
data for a prioritization.
}
\description{
Create a systematic conservation planning problem. This function is used to
specify the basic data used in a spatial prioritization problem: the
spatial distribution of the planning units and their costs, as well as
the features (e.g. species, ecosystems) that need to be conserved. After
constructing this \code{ConservationProblem-class} object, it can be
customized to meet specific goals using \link{objectives},
\link{targets}, \link{constraints}, and
\link{penalties}. After building the problem, the
\code{\link[=solve]{solve()}} function can be used to identify solutions.
\strong{Note that problems require an objective, and failing to specify an
an objective will throw an error when attempting to solve it.}
}
\details{
A systematic conservation planning exercise leverages data to help inform
conservation decision making. To help ensure that the
data -- and resulting prioritizations -- are relevant to the over-arching
goals of the exercise, you should decide on the management action
(or set of actions) that need be considered in the exercise.
For example, these actions could include establishing protected areas,
selecting land for conservation easements, restoring habitat,
planting trees for carbon sequestration, eradicating invasive
species, or some combination of the previous actions.
If the exercise involves multiple different actions, they can
be incorporated by using multiple zones
(see the Management Zones vignette for details). After deciding
on the management action(s), you can compile the following data.
First, you will need to create a set of planning units
(i.e. discrete spatial areas) to inform decision making.
Planning units are often created by subdividing a study region
into a set square or hexagonal cells. They can also be created using
administrative boundaries (e.g. provinces), land management boundaries
(e.g. property boundaries derived from cadastral data), or
ecological boundaries (e.g. based on ecosystem classification data).
The size (i.e. spatial grain) of the planning units is often determined
based on a compromise between the scale needed to inform decision making, the
spatial accuracy (resolution) of available datasets, and
the computational resources available for generating prioritizations
(e.g. RAM and number of CPUs on your computer).
Second, you will need data to quantify the cost of implementing
implementing each management action within each planning unit.
Critically, the cost data should reflect the management action(s)
considered in the exercise.
For example, costs are often specified using data that reflect economic
expenditure (e.g. land acquisition cost),
socioeconomic conditions (e.g. human population density),
opportunity costs of foregone commercial activities
(e.g. logging or agriculture), or
opportunity costs of foregone recreational activities
(e.g. recreational fishing) activities,
In some cases -- depending on the management action(s) considered --
it can make sense to use a constant cost value
(e.g. all planning units are assigned a cost value equal to one)
or use a cost value based on spatial extent
(e.g. each planning unit is assigned a cost value based on its total area).
Also, in most cases, you want to avoid negative cost values.
This because a negative value means that a place is \emph{desirable}
for implementing a management action, and such places will almost
always be selected for prioritization even if they provide no benefit.
Third, you will need data to quantify the benefits of implementing
management actions within planning units.
To achieve this, you will need to select a set of conservation features
that relate to the over-arching goals of the exercise.
For example, conservation features often include
species (e.g. Clouded Leopard), habitats (e.g. mangroves or
cloud forest), or ecosystems.
The benefit that each feature derives from a planning unit
can take a variety of forms, but is typically occupancy (i.e.
presence or absence), area of occurrence within each planning unit
(e.g. based on species' geographic range data), or
a measure of habitat suitability (e.g. estimated using a statistical model).
After compiling these data, you have the minimal data need to generate
a prioritization.
A systematic conservation planning exercise involves prioritizing a set of
management actions to be implemented within certain planning units.
Critically, this prioritization should ideally optimize the trade-off
between benefits and costs.
To accomplish this, the \pkg{prioritizr} package uses input data
to formulate optimization problems (see Optimization section for details).
Broadly speaking, the goal of an optimization problem is to minimize
(or maximize) an objective function over a set of
decision variables, subject to a series of constraints.
Here, an objective function specifies the metric for evaluating
conservation plans. The decision variables are what we control, and usually
there is one binary variable for each planning unit to specify whether that
unit is selected or not (but other approaches are available, see
\link{decisions}). The constraints can be thought of as rules that must be
followed. For example, constraints can be used to ensure a prioritization
must stay within a certain budget. These constraints can also leverage
additional data to help ensure that prioritizations meet the over-arching
goals of the exercise. For example, to account for existing conservation
efforts, you could obtain data delineating the extent of existing protected
areas and use constraints to lock in planning units that are covered by them
(see \link{add_locked_in_constraints}).
}
\section{Optimization}{
The \pkg{prioritizr} package uses exact algorithms to solve reserve design
problems (see \link{solvers} for details).
To achieve this, it internally formulates mathematical optimization problems
using mixed integer linear programming (MILP). The general form of
such problems can be expressed in matrix notation using
the following equation.
\deqn{\mathit{Minimize} \space \mathbf{c}^{\mathbf{T}}\mathbf{x} \space
\mathit{subject \space to} \space
\mathbf{Ax}\geq= or\leq \mathbf{b}}{Minimize (c^T)*x subject to Ax \ge, =,
or \le b}
Here, \eqn{x} is a vector of decision variables, \eqn{c} and \eqn{b} are
vectors of known coefficients, and \eqn{A} is the constraint
matrix. The final term specifies a series of structural
constraints where relational operators for the constraint can be either
\eqn{\ge}, \eqn{=}, or \eqn{\le} the coefficients. For example, in the
minimum set cover problem, \eqn{c} would be a vector of costs for each
planning unit, \eqn{b} a vector of targets for each conservation feature,
the relational operator would be \eqn{\ge} for all features, and \eqn{A}
would be the representation matrix with \eqn{A_{ij}=r_{ij}}{Aij = rij}, the
representation level of feature \eqn{i} in planning unit \eqn{j}.
If you wish to see exactly how a conservation planning problem is
formulated as mixed integer linear programming problem, you can use
the \code{\link[=write_problem]{write_problem()}} function to save the optimization problem
to a plain-text file on your computer and then view it using a standard
text editor (e.g. Notepad).
Please note that this function internally computes the amount of each
feature in each planning unit when this data is not supplied (using the
\link{rij_matrix} function). As a consequence, it can take a while to
initialize large-scale conservation planning problems that involve
millions of planning units.
}
\examples{
# load data
data(sim_pu_raster, sim_pu_polygons, sim_pu_lines, sim_pu_points,
sim_pu_sf, sim_features)
# create problem using raster planning unit data
p1 <- problem(sim_pu_raster, sim_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
\dontrun{
# create problem using polygon (Spatial) planning unit data
p2 <- problem(sim_pu_polygons, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem using line (Spatial) planning unit data
p3 <- problem(sim_pu_lines, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem using point (Spatial) planning unit data
p4 <- problem(sim_pu_points, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# create problem using polygon (sf) planning unit data
p5 <- problem(sim_pu_sf, sim_features, "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# since geo-processing can be slow for large spatial vector datasets
# (e.g. polygons, lines, points), it can be worthwhile to pre-process the
# planning unit data so that it contains columns indicating the amount of
# each feature inside each planning unit
# (i.e. each column corresponds to a different feature)
# calculate the amount of each species within each planning unit
# (i.e. SpatialPolygonsDataFrame object)
pre_proc_data <- rij_matrix(sim_pu_polygons, sim_features)
# add extra columns to the polygon (Spatial) planning unit data
# to indicate the amount of each species within each planning unit
pre_proc_data <- as.data.frame(t(as.matrix(pre_proc_data)))
names(pre_proc_data) <- names(sim_features)
sim_pu_polygons@data <- cbind(sim_pu_polygons@data, pre_proc_data)
# create problem using the polygon (Spatial) planning unit data
# with the pre-processed columns
p6 <- problem(sim_pu_polygons, features = names(pre_proc_data), "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# this strategy of pre-processing columns can be used for sf objects too
pre_proc_data2 <- rij_matrix(sim_pu_sf, sim_features)
pre_proc_data2 <- as.data.frame(t(as.matrix(pre_proc_data2)))
names(pre_proc_data2) <- names(sim_features)
sim_pu_sf <- cbind(sim_pu_sf, pre_proc_data2)
# create problem using the polygon (sf) planning unit data
# with pre-processed columns
p7 <- problem(sim_pu_sf, features = names(pre_proc_data2), "cost") \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# in addition to spatially explicit data, pre-processed aspatial data
# can also be used to create a problem
# (e.g. data created using external spreadsheet software)
costs <- sim_pu_polygons$cost
features <- data.frame(id = seq_len(nlayers(sim_features)),
name = names(sim_features))
rij_mat <- rij_matrix(sim_pu_polygons, sim_features)
p8 <- problem(costs, features, rij_matrix = rij_mat) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.2) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# solve problems
s1 <- solve(p1)
s2 <- solve(p2)
s3 <- solve(p3)
s4 <- solve(p4)
s5 <- solve(p5)
s6 <- solve(p6)
s7 <- solve(p7)
s8 <- solve(p8)
# plot solutions for problems associated with spatial data
par(mfrow = c(3, 2), mar = c(0, 0, 4.1, 0))
plot(s1, main = "raster data", axes = FALSE, box = FALSE, legend = FALSE)
plot(s2, main = "polygon data")
plot(s2[s2$solution_1 > 0.5, ], col = "darkgreen", add = TRUE)
plot(s3, main = "line data")
lines(s3[s3$solution_1 > 0.5, ], col = "darkgreen", lwd = 2)
plot(s4, main = "point data", pch = 19)
points(s4[s4$solution_1 > 0.5, ], col = "darkgreen", cex = 2, pch = 19)
# note that as_Spatial() is for convenience to plot all solutions together
plot(as_Spatial(s5), main = "sf (polygon) data", pch = 19)
plot(as_Spatial(s5[s5$solution_1 > 0.5, ]), col = "darkgreen", add = TRUE)
plot(s6, main = "preprocessed data (polygon data)", pch = 19)
plot(s6[s6$solution_1 > 0.5, ], col = "darkgreen", add = TRUE)
# show solutions for problems associated with aspatial data
str(s8)
}
# create some problems with multiple zones
# first, create a matrix containing the targets for multi-zone problems
# here each row corresponds to a different feature, each
# column corresponds to a different zone, and values correspond
# to the total (absolute) amount of a given feature that needs to be secured
# in a given zone
targets <- matrix(rpois(15, 1),
nrow = number_of_features(sim_features_zones),
ncol = number_of_zones(sim_features_zones),
dimnames = list(feature_names(sim_features_zones),
zone_names(sim_features_zones)))
# print targets
print(targets)
# create a multi-zone problem with raster data
p8 <- problem(sim_pu_zones_stack, sim_features_zones) \%>\%
add_min_set_objective() \%>\%
add_absolute_targets(targets) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
\dontrun{
# solve problem
s8 <- solve(p8)
# plot solution
# here, each layer/panel corresponds to a different zone and pixel values
# indicate if a given planning unit has been allocated to a given zone
par(mfrow = c(1, 1))
plot(s8, main = c("zone 1", "zone 2", "zone 3"), axes = FALSE, box = FALSE)
# alternatively, the category_layer function can be used to create
# a new raster object containing the zone ids for each planning unit
# in the solution (note this only works for problems with binary decisions)
par(mfrow = c(1, 1))
plot(category_layer(s8), axes = FALSE, box = FALSE)
# create a multi-zone problem with polygon data
p9 <- problem(sim_pu_zones_polygons, sim_features_zones,
cost_column = c("cost_1", "cost_2", "cost_3")) \%>\%
add_min_set_objective() \%>\%
add_absolute_targets(targets) \%>\%
add_binary_decisions() \%>\%
add_default_solver(verbose = FALSE)
# solve problem
s9 <- solve(p9)
# create column containing the zone id for which each planning unit was
# allocated to in the solution
s9$solution <- category_vector(s9@data[, c("solution_1_zone_1",
"solution_1_zone_2",
"solution_1_zone_3")])
s9$solution <- factor(s9$solution)
# plot solution
spplot(s9, zcol = "solution", main = "solution", axes = FALSE, box = FALSE)
# create a multi-zone problem with polygon planning unit data
# and where fields (columns) in the attribute table correspond
# to feature abundances
# first fields need to be added to the planning unit data
# which indicate the amount of each feature in each zone
# to do this, the fields will be populated with random counts
sim_pu_zones_polygons$spp1_z1 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp2_z1 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp3_z1 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp1_z2 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp2_z2 <- rpois(nrow(sim_pu_zones_polygons), 1)
sim_pu_zones_polygons$spp3_z2 <- rpois(nrow(sim_pu_zones_polygons), 1)
# create problem with polygon planning unit data and use field names
# to indicate feature data
# additionally, to make this example slightly more interesting,
# the problem will have prfoportion-type decisions such that
# a proportion of each planning unit can be allocated to each of the
# two management zones
p10 <- problem(sim_pu_zones_polygons,
zones(c("spp1_z1", "spp2_z1", "spp3_z1"),
c("spp1_z2", "spp2_z2", "spp3_z2"),
zone_names = c("z1", "z2")),
cost_column = c("cost_1", "cost_2")) \%>\%
add_min_set_objective() \%>\%
add_absolute_targets(targets[1:3, 1:2]) \%>\%
add_proportion_decisions() \%>\%
add_default_solver(verbose = FALSE)
# solve problem
s10 <- solve(p10)
# plot solution
spplot(s10, zcol = c("solution_1_z1", "solution_1_z2"), main = "solution",
axes = FALSE, box = FALSE)
}
}
\seealso{
See \code{\link[=solve]{solve()}} for details on solving a problem to generate solutions.
Also, see \link{objectives}, \link{penalties}, \link{targets}, \link{constraints},
\link{decisions}, \link{portfolios}, \link{solvers} for information on customizing problems.
Additionally, see \link{summaries} and \link{importance} for information on
evaluating solutions.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response_matrices.R
\name{response}
\alias{response}
\title{compute PDX response}
\usage{
response(object, model.id = NULL, batch = NULL,
res.measure = c("mRECIST", "slope", "AUC", "angle", "abc", "TGI",
"lmm"), treatment.only = FALSE, max.time = NULL,
impute.value = TRUE, min.time = 10, concurrent.time = TRUE,
vol.normal = FALSE, log.volume = FALSE, verbose = TRUE)
}
\arguments{
\item{object}{Xeva object.}
\item{model.id}{\code{model.id} for which the durg response is to be computed.}
\item{batch}{\code{batch.id} or experiment design for which the drug response is to be computed.}
\item{res.measure}{Drug response measure. See \code{Details} below}
\item{treatment.only}{Default \code{FALSE}. If \code{TRUE}, give data for non-zero dose periods only (if dose data are available).}
\item{max.time}{Maximum time for data.}
\item{impute.value}{Default \code{FALSE}. If \code{TRUE}, impute the missing values.}
\item{min.time}{Default \strong{10} days. Used for \emph{mRECIST} computation.}
\item{concurrent.time}{Default \code{FALSE}. If \code{TRUE}, cut the batch data such that control and treatment will end at same time point.}
\item{vol.normal}{If TRUE it will normalize the volume. Default \code{FALSE}.}
\item{log.volume}{If TRUE log of the volume will be used for response calculation. Default \code{FALSE}}
\item{verbose}{Default \code{TRUE} will print information.}
}
\value{
Returns model or batch drug response object.
}
\description{
\code{response} Computes the drug response of an individual PDX model or batch.
}
\details{
At present the following response measures are implemented
\itemize{
\item mRECIST Computes mRECIST for individual PDX models
\item slope Computes slope of the fitted individual PDX curves
\item AUC Computes area under a PDX curve for individual PDX models
\item angle Computes angle between treatment and control PDX curves
\item abc Computes area between the treatment and control PDX curves
\item TGI Computes tumor growth inhibition using treatment and control PDX curves
\item lmm Computes linear mixed model (lmm) statistics for a PDX batch
}
}
\examples{
data(brca)
response(brca, model.id="X.1004.BG98", res.measure="mRECIST")
response(brca, batch="X-6047.paclitaxel", res.measure="angle")
ed <- list(batch.name="myBatch", treatment=c("X.6047.LJ16","X.6047.LJ16.trab"),
control=c("X.6047.uned"))
response(brca, batch=ed, res.measure="angle")
}
| /man/response.Rd | no_license | Feigeliudan01/Xeva | R | false | true | 2,510 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response_matrices.R
\name{response}
\alias{response}
\title{compute PDX response}
\usage{
response(object, model.id = NULL, batch = NULL,
res.measure = c("mRECIST", "slope", "AUC", "angle", "abc", "TGI",
"lmm"), treatment.only = FALSE, max.time = NULL,
impute.value = TRUE, min.time = 10, concurrent.time = TRUE,
vol.normal = FALSE, log.volume = FALSE, verbose = TRUE)
}
\arguments{
\item{object}{Xeva object.}
\item{model.id}{\code{model.id} for which the durg response is to be computed.}
\item{batch}{\code{batch.id} or experiment design for which the drug response is to be computed.}
\item{res.measure}{Drug response measure. See \code{Details} below}
\item{treatment.only}{Default \code{FALSE}. If \code{TRUE}, give data for non-zero dose periods only (if dose data are available).}
\item{max.time}{Maximum time for data.}
\item{impute.value}{Default \code{FALSE}. If \code{TRUE}, impute the missing values.}
\item{min.time}{Default \strong{10} days. Used for \emph{mRECIST} computation.}
\item{concurrent.time}{Default \code{FALSE}. If \code{TRUE}, cut the batch data such that control and treatment will end at same time point.}
\item{vol.normal}{If TRUE it will normalize the volume. Default \code{FALSE}.}
\item{log.volume}{If TRUE log of the volume will be used for response calculation. Default \code{FALSE}}
\item{verbose}{Default \code{TRUE} will print information.}
}
\value{
Returns model or batch drug response object.
}
\description{
\code{response} Computes the drug response of an individual PDX model or batch.
}
\details{
At present the following response measures are implemented
\itemize{
\item mRECIST Computes mRECIST for individual PDX models
\item slope Computes slope of the fitted individual PDX curves
\item AUC Computes area under a PDX curve for individual PDX models
\item angle Computes angle between treatment and control PDX curves
\item abc Computes area between the treatment and control PDX curves
\item TGI Computes tumor growth inhibition using treatment and control PDX curves
\item lmm Computes linear mixed model (lmm) statistics for a PDX batch
}
}
\examples{
data(brca)
response(brca, model.id="X.1004.BG98", res.measure="mRECIST")
response(brca, batch="X-6047.paclitaxel", res.measure="angle")
ed <- list(batch.name="myBatch", treatment=c("X.6047.LJ16","X.6047.LJ16.trab"),
control=c("X.6047.uned"))
response(brca, batch=ed, res.measure="angle")
}
|
## ragtop -- convertibles pricing in R
##
## Copyright (C) 2016 Brian Boonstra <ragtop@boonstra.org>
##
## This file is part of the ragtop package for GNU R.
## It is made available under the terms of the GNU General Public
## License, version 2, or at your option, any later version,
## incorporated herein by reference.
##
## This program is distributed in the hope that it will be
## useful, but WITHOUT ANY WARRANTY; without even the implied
## warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License
## along with ragtop. If not, see <http://www.gnu.org/licenses/>.
library(futile.logger)
#' Create a variance cumulation function from a volatility term structure
#'
#' Given a volatility term structure, create a corresponding variance
#' cumulation function. The function assumes piecewise constant
#' forward volatility, with the final such forward volatility
#' extending to infinity.
#'
#' @param vols_df A data.frame with numeric columns \code{time} (in
#' increasing order) and \code{volatility} (not decreasing so quickly
#' as to give negative forward variance)
#' @return A function taking two time arguments, which returns the cumulated
#' variance from the second to the first
#' @examples
#' vc = variance_cumulation_from_vols(
#' data.frame(time=c(0.1,2,3),
#' volatility=c(0.2,0.5,1.2)))
#' vc(1.5, 0)
#'
#' @export variance_cumulation_from_vols
variance_cumulation_from_vols = function(vols_df)
{
N = nrow(vols_df)
cumulated_variances = c(0, vols_df$volatility^2 * vols_df$time)
if (any(cumulated_variances<0)) {
stop("Nonsensical variance")
}
fwd_variances = diff(cumulated_variances)
if (any(fwd_variances<0)) {
stop("Nonsensical negative forward variance")
}
augmented_t = c(0, vols_df$time)
time_diffs = diff(augmented_t)
max_t = max(vols_df$time)
last_vol = vols_df$volatility[[N]]
vols_df$fwd_vols = sqrt(fwd_variances/time_diffs)
cumul_var_0 = function(x) {
if (x==0) {
cmvar = 0
} else if (x>=max_t) {
cmvar = cumulated_variances[[N+1]] + vols_df$fwd_vols[[N]]^2 * (x-max_t)
flog.debug("Found %s was beyond max_t %s, N=%s, using time diff %s from last anchor time max_t=%s applied to fwd vol %s and prev var %s",
x, max_t, N, (x-max_t), max_t, vols_df$fwd_vols[[N]], cumulated_variances[[N]],
name='ragtop.term_structures.variance_cumulation_from_vols')
} else {
k = findInterval(x, augmented_t) # Will not be larger than N
dt = x - augmented_t[[k]]
if (dt<0) {
stop("Negative time interval after call to findInterval() in variance_cumulation_from_vols()")
}
cmvar = cumulated_variances[[k]] + vols_df$fwd_vols[[k]]^2 * dt
flog.debug("Found k=%s, using time diff %s from prev anchor time %s applied to fwd vol %s and prev var %s",
k, dt, augmented_t[[k]], vols_df$fwd_vols[[k]], cumulated_variances[[k]],
name='ragtop.term_structures.variance_cumulation_from_vols')
}
cmvar
}
cumul_var = function(T,t=0) {
cv = cumul_var_0(T) - cumul_var_0(t)
if ((T>t) && (cv<=0)) {
stop("Nonsensical cumulative variance ", cv, " from t=", t, " to T=", T)
}
cv
}
cumul_var
}
#' Create a discount factor function from a yield curve
#'
#' Use a piecewise constant approximation to the given spot curve to
#' generate a function capable of returning corresponding discount factors
#'
#' @param yield_curve A data.frame with numeric columns \code{time} (in
#' increasing order) and \code{rate} (in natural units)
#' @return A function taking two time arguments, which returns the discount factor from the second to the first
#' @examples
#' disct_fcn = ragtop::spot_to_df_fcn(
#' data.frame(time=c(1, 5, 10, 15),
#' rate=c(0.01, 0.02, 0.03, 0.05)))
#' print(disct_fcn(1, 0.5))
#' @export spot_to_df_fcn
spot_to_df_fcn = function(yield_curve) {
yield_curve$dfs = exp(-yield_curve$time*yield_curve$rate)
later = yield_curve$dfs[2:length(yield_curve$dfs)]
sooner = yield_curve$dfs[1:(length(yield_curve$dfs)-1)]
fwd_rates = -log(later/sooner)/diff(yield_curve$time)
yield_curve$fwd_rate = fwd_rates[[length(fwd_rates)]]
yield_curve$fwd_rate[1:(length(yield_curve$fwd_rate)-1)] = fwd_rates
ycdf = function(x) {
loc_df = NA
n = findInterval(x, yield_curve$time)
if (n>0) {
dt = (x-yield_curve$time[[n]])
loc_df = yield_curve$dfs[[n]] * exp(-yield_curve$fwd_rate[[n]]*dt)
} else {
loc_df = exp(-yield_curve$rate[[1]]*x)
}
loc_df
}
treasury_df_fcn = function(T,t=0,...) {ycdf(T)/ycdf(t)}
treasury_df_fcn
}
#' Get a US Treasury curve discount factor function
#'
#' @param on_date Date for which to query Quandl for the curve
#' @return A function taking two time arguments, which returns the discount factor from the second to the first
#' @export Quandl_df_fcn_UST_raw
Quandl_df_fcn_UST_raw = function(on_date) {
if (is.element('R.cache', utils::installed.packages()[,1])) {
yield_curve_elems = Quandl::Quandl("USTREASURY/YIELD", start_date=on_date, end_date=on_date)
yield_curve_elems$Date = NULL
yc_rates = as.numeric(yield_curve_elems)/100 # Values are reported as percent
yield_curve = data.frame(time=c(0, 30/360, 90/360, 1/2, 1,2,3,5,7,10,20,30), rate=c(0,yc_rates))
df_frame = spot_to_df_fcn(yield_curve)
} else {
flog.error('Quandl package not available for treasury curve queries')
df_frame = data.frame()
}
df_frame
}
#' Get a US Treasury curve discount factor function
#'
#' This is a caching wrapper for \code{\link{Quandl_df_fcn_UST_raw}}
#'
#' @param ... Arguments passed to \code{\link{Quandl_df_fcn_UST_raw}}
#' @param envir Environment passed to \code{\link{Quandl_df_fcn_UST_raw}}
#' @return A function taking two time arguments, which returns the discount factor from the second to the first
#' @export Quandl_df_fcn_UST
Quandl_df_fcn_UST = function(...,envir=parent.frame()) {
Quandl_df_fcn_UST_raw(...)
}
if (is.element('R.cache', utils::installed.packages()[,1])) {
Quandl_df_fcn_UST = R.cache::addMemoization(Quandl_df_fcn_UST_raw)
}
| /R/term_structures.R | no_license | whjieee/ragtop | R | false | false | 6,260 | r | ## ragtop -- convertibles pricing in R
##
## Copyright (C) 2016 Brian Boonstra <ragtop@boonstra.org>
##
## This file is part of the ragtop package for GNU R.
## It is made available under the terms of the GNU General Public
## License, version 2, or at your option, any later version,
## incorporated herein by reference.
##
## This program is distributed in the hope that it will be
## useful, but WITHOUT ANY WARRANTY; without even the implied
## warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the GNU General Public License for more
## details.
##
## You should have received a copy of the GNU General Public License
## along with ragtop. If not, see <http://www.gnu.org/licenses/>.
library(futile.logger)
#' Create a variance cumulation function from a volatility term structure
#'
#' Given a volatility term structure, create a corresponding variance
#' cumulation function. The function assumes piecewise constant
#' forward volatility, with the final such forward volatility
#' extending to infinity.
#'
#' @param vols_df A data.frame with numeric columns \code{time} (in
#' increasing order) and \code{volatility} (not decreasing so quickly
#' as to give negative forward variance)
#' @return A function taking two time arguments, which returns the cumulated
#' variance from the second to the first
#' @examples
#' vc = variance_cumulation_from_vols(
#' data.frame(time=c(0.1,2,3),
#' volatility=c(0.2,0.5,1.2)))
#' vc(1.5, 0)
#'
#' @export variance_cumulation_from_vols
variance_cumulation_from_vols = function(vols_df)
{
N = nrow(vols_df)
cumulated_variances = c(0, vols_df$volatility^2 * vols_df$time)
if (any(cumulated_variances<0)) {
stop("Nonsensical variance")
}
fwd_variances = diff(cumulated_variances)
if (any(fwd_variances<0)) {
stop("Nonsensical negative forward variance")
}
augmented_t = c(0, vols_df$time)
time_diffs = diff(augmented_t)
max_t = max(vols_df$time)
last_vol = vols_df$volatility[[N]]
vols_df$fwd_vols = sqrt(fwd_variances/time_diffs)
cumul_var_0 = function(x) {
if (x==0) {
cmvar = 0
} else if (x>=max_t) {
cmvar = cumulated_variances[[N+1]] + vols_df$fwd_vols[[N]]^2 * (x-max_t)
flog.debug("Found %s was beyond max_t %s, N=%s, using time diff %s from last anchor time max_t=%s applied to fwd vol %s and prev var %s",
x, max_t, N, (x-max_t), max_t, vols_df$fwd_vols[[N]], cumulated_variances[[N]],
name='ragtop.term_structures.variance_cumulation_from_vols')
} else {
k = findInterval(x, augmented_t) # Will not be larger than N
dt = x - augmented_t[[k]]
if (dt<0) {
stop("Negative time interval after call to findInterval() in variance_cumulation_from_vols()")
}
cmvar = cumulated_variances[[k]] + vols_df$fwd_vols[[k]]^2 * dt
flog.debug("Found k=%s, using time diff %s from prev anchor time %s applied to fwd vol %s and prev var %s",
k, dt, augmented_t[[k]], vols_df$fwd_vols[[k]], cumulated_variances[[k]],
name='ragtop.term_structures.variance_cumulation_from_vols')
}
cmvar
}
cumul_var = function(T,t=0) {
cv = cumul_var_0(T) - cumul_var_0(t)
if ((T>t) && (cv<=0)) {
stop("Nonsensical cumulative variance ", cv, " from t=", t, " to T=", T)
}
cv
}
cumul_var
}
#' Create a discount factor function from a yield curve
#'
#' Use a piecewise constant approximation to the given spot curve to
#' generate a function capable of returning corresponding discount factors
#'
#' @param yield_curve A data.frame with numeric columns \code{time} (in
#' increasing order) and \code{rate} (in natural units)
#' @return A function taking two time arguments, which returns the discount factor from the second to the first
#' @examples
#' disct_fcn = ragtop::spot_to_df_fcn(
#' data.frame(time=c(1, 5, 10, 15),
#' rate=c(0.01, 0.02, 0.03, 0.05)))
#' print(disct_fcn(1, 0.5))
#' @export spot_to_df_fcn
spot_to_df_fcn = function(yield_curve) {
yield_curve$dfs = exp(-yield_curve$time*yield_curve$rate)
later = yield_curve$dfs[2:length(yield_curve$dfs)]
sooner = yield_curve$dfs[1:(length(yield_curve$dfs)-1)]
fwd_rates = -log(later/sooner)/diff(yield_curve$time)
yield_curve$fwd_rate = fwd_rates[[length(fwd_rates)]]
yield_curve$fwd_rate[1:(length(yield_curve$fwd_rate)-1)] = fwd_rates
ycdf = function(x) {
loc_df = NA
n = findInterval(x, yield_curve$time)
if (n>0) {
dt = (x-yield_curve$time[[n]])
loc_df = yield_curve$dfs[[n]] * exp(-yield_curve$fwd_rate[[n]]*dt)
} else {
loc_df = exp(-yield_curve$rate[[1]]*x)
}
loc_df
}
treasury_df_fcn = function(T,t=0,...) {ycdf(T)/ycdf(t)}
treasury_df_fcn
}
#' Get a US Treasury curve discount factor function
#'
#' @param on_date Date for which to query Quandl for the curve
#' @return A function taking two time arguments, which returns the discount factor from the second to the first
#' @export Quandl_df_fcn_UST_raw
Quandl_df_fcn_UST_raw = function(on_date) {
if (is.element('R.cache', utils::installed.packages()[,1])) {
yield_curve_elems = Quandl::Quandl("USTREASURY/YIELD", start_date=on_date, end_date=on_date)
yield_curve_elems$Date = NULL
yc_rates = as.numeric(yield_curve_elems)/100 # Values are reported as percent
yield_curve = data.frame(time=c(0, 30/360, 90/360, 1/2, 1,2,3,5,7,10,20,30), rate=c(0,yc_rates))
df_frame = spot_to_df_fcn(yield_curve)
} else {
flog.error('Quandl package not available for treasury curve queries')
df_frame = data.frame()
}
df_frame
}
#' Get a US Treasury curve discount factor function
#'
#' This is a caching wrapper for \code{\link{Quandl_df_fcn_UST_raw}}
#'
#' @param ... Arguments passed to \code{\link{Quandl_df_fcn_UST_raw}}
#' @param envir Environment passed to \code{\link{Quandl_df_fcn_UST_raw}}
#' @return A function taking two time arguments, which returns the discount factor from the second to the first
#' @export Quandl_df_fcn_UST
Quandl_df_fcn_UST = function(...,envir=parent.frame()) {
Quandl_df_fcn_UST_raw(...)
}
if (is.element('R.cache', utils::installed.packages()[,1])) {
Quandl_df_fcn_UST = R.cache::addMemoization(Quandl_df_fcn_UST_raw)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utlis.R
\name{runTsne}
\alias{runTsne}
\title{runTsne}
\usage{
runTsne(my.dist, dims = 2, theta = 0, initial_dims = 50,
max_iter = 2000, perplexity = 10, seed = 11, is_distance = TRUE,
...)
}
\arguments{
\item{my.dist}{A distance object typically produced with pearsonsDist.}
\item{dims}{Argument to Rtsne. Numeric indicating the output dimensions.}
\item{theta}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{initial_dims}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{max_iter}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{perplexity}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{seed}{The desired seed to set before running.}
\item{is_distance}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{...}{Additional arguments to pass on}
}
\value{
A matrix containing the mean value for each gene for each
classification group.
}
\description{
Calculates the x and y coordinates of the mean of each classif ied group.
}
\details{
This method is typically only used in conjunction with plotting. It
calculates the 2 dimensional location of the mean of each classified group
in the supplied unsupervised dimensionality reduction (t-SNE) data
representation.
}
\examples{
my.dist <- pearsonsDist(getData(CIMseqSinglets_test, "counts.cpm"), 1:2000)
tsne <- runTsne(my.dist, max_iter = 10)
}
\author{
Jason T. Serviss
}
\keyword{runTsne}
| /man/runTsne.Rd | no_license | jasonserviss/CIMseq | R | false | true | 1,623 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utlis.R
\name{runTsne}
\alias{runTsne}
\title{runTsne}
\usage{
runTsne(my.dist, dims = 2, theta = 0, initial_dims = 50,
max_iter = 2000, perplexity = 10, seed = 11, is_distance = TRUE,
...)
}
\arguments{
\item{my.dist}{A distance object typically produced with pearsonsDist.}
\item{dims}{Argument to Rtsne. Numeric indicating the output dimensions.}
\item{theta}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{initial_dims}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{max_iter}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{perplexity}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{seed}{The desired seed to set before running.}
\item{is_distance}{Argument to
[Rtsne](https://cran.r-project.org/web/packages/Rtsne/index.html).}
\item{...}{Additional arguments to pass on}
}
\value{
A matrix containing the mean value for each gene for each
classification group.
}
\description{
Calculates the x and y coordinates of the mean of each classif ied group.
}
\details{
This method is typically only used in conjunction with plotting. It
calculates the 2 dimensional location of the mean of each classified group
in the supplied unsupervised dimensionality reduction (t-SNE) data
representation.
}
\examples{
my.dist <- pearsonsDist(getData(CIMseqSinglets_test, "counts.cpm"), 1:2000)
tsne <- runTsne(my.dist, max_iter = 10)
}
\author{
Jason T. Serviss
}
\keyword{runTsne}
|
## Plot 1
## Histogram of Global Active Power
## ----------------------------------------------------------------------
## Prepare data
## ----------------------------------------------------------------------
## Read Electric Power Consumption data "household_power_consumption.txt"
myd <- read.table("../household_power_consumption.txt", header=TRUE, sep=";",
na.strings="?", stringsAsFactors=FALSE)
## restrict subset to dates 2007-02-01 to 2007-02-02
sub <- (myd$Date=="1/2/2007") | (myd$Date=="2/2/2007")
myd <- myd[sub,]
## convert character date and times to date/time format
myd$Daten = as.Date(myd$Date, "%d/%m/%Y")
## convert date+time to date/time format
myd$Timen = strptime(paste(myd$Date,myd$Time), "%d/%m/%Y %T")
## ----------------------------------------------------------------------
## Create plot and save to png file
## Histogram of global active power
## ----------------------------------------------------------------------
png(file="plot1.png",
width=480,
height=480)
hist(myd$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
dev.off() | /plot1.R | no_license | kchis/ExData_Plotting1 | R | false | false | 1,208 | r | ## Plot 1
## Histogram of Global Active Power
## ----------------------------------------------------------------------
## Prepare data
## ----------------------------------------------------------------------
## Read Electric Power Consumption data "household_power_consumption.txt"
myd <- read.table("../household_power_consumption.txt", header=TRUE, sep=";",
na.strings="?", stringsAsFactors=FALSE)
## restrict subset to dates 2007-02-01 to 2007-02-02
sub <- (myd$Date=="1/2/2007") | (myd$Date=="2/2/2007")
myd <- myd[sub,]
## convert character date and times to date/time format
myd$Daten = as.Date(myd$Date, "%d/%m/%Y")
## convert date+time to date/time format
myd$Timen = strptime(paste(myd$Date,myd$Time), "%d/%m/%Y %T")
## ----------------------------------------------------------------------
## Create plot and save to png file
## Histogram of global active power
## ----------------------------------------------------------------------
png(file="plot1.png",
width=480,
height=480)
hist(myd$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency")
dev.off() |
## uk10k data as our reference data set
## THIS IS FOR REFERENCE ONLY !!!! NOT RUN IN THIS LOCATION
## process PBC
## first convert to annotSnpStats
library(annotSnpStats)
library(data.table)
library(magrittr)
library(optparse)
TEST <- TRUE
DEFAULT_TARGET_DIRNAME <- 'split'
option_list = list(
make_option(c("-f", "--file"), type="character",default='',
help="File to process", metavar="character")
)
opt_parser = OptionParser(option_list=option_list);
args = parse_args(opt_parser)
print(args)
uk10 <- readRDS("/home/ob219/rds/hpc-work/DATA/UK10K/UK10K_0.005_MAF.RDS")
setnames(uk10,'CHROM','CHR')
uk10[CHR=='X',CHR:='23']
uk10[,CHR:=as.numeric(CHR)]
uk10 <- uk10[order(CHR,POS),]
uk10m <- uk10[,.(CHR,BP=POS,uk10_A1=REF,uk10_A2=ALT,uk10_A2_AF=AF)]
switcheroo <- function(x,y,mafdiff=0.1,do.plot=FALSE){
asw <- getFromNamespace("asw", "annotSnpStats")
x.alleles <- apply(x@snps[,alleles(x)],1,paste,collapse="/")
y.alleles <- apply(y[,.(a1,a2)],1,paste,collapse="/")
names(y.alleles)<-names(x.alleles)
message("These are the allele codes as currently defined, before any switching:")
print(tt <- as.matrix(table(x.alleles, y.alleles)))
## genotype classes
sw.class <- g.class(x.alleles,y.alleles)
any.comp <- any(sw.class %in% c("comp","revcomp"))
any.ambig <- any(sw.class=="ambig")
sw <- sw.class %in% c("rev","revcomp")
if(length(wh <- which(sw.class=="impossible"))) {
message(length(wh)," pairwise impossible allele labels found. These genotypes will be set to missing.")
sw[wh] <- NA
}
sw[sw.class=="impossible"] <- NA
if(any.comp & any.ambig) { # there are reverse complements in the distinguishable cases
ind <- which(sw.class=="ambig")
message(length(ind)," SNPs have alleles not completely resolvable without strand information, confirming guess by checking allele freqs.")
x.cs <- col.summary(x[,ind])
rdiff <- x.cs[,"RAF"] - y[ind,]$raf
sw2 <- ifelse(abs(x.cs[,"RAF"] - y[ind,]$raf) < abs(1 - x.cs[,"RAF"] - y[ind,]$raf), FALSE, TRUE)
too.close <- abs(x.cs[,"MAF"]-0.5)<mafdiff
# if(any(too.close)) {
# can.match <- sw.class %in% c("comp","nochange","rev","revcomp")
# xsw <- switch.alleles(x[,-ind],which(sw[-ind]))
# ysw <- y[,-ind]
# ## step through too.close SNPs checking signed correlation
# message("using signed correlation for ",sum(too.close)," SNPs too close to 50% MAF")
# ldx <- ld(xsw,x[,ind[too.close],drop=FALSE], stats="R")
# ldy <- ld(ysw,y[,ind[too.close],drop=FALSE], stats="R")
# ldx[abs(ldx)<0.04] <- NA ## drop uncorrelated - have no information
# ldy[abs(ldy)<0.04] <- NA ## drop uncorrelated - have no information
# cor.sw <- sapply(1:ncol(ldx), function(j) cor(ldx[,j], ldy[,j], use="pair"))
# cor.sw[ abs(cor.sw)<0.8 ] <- NA # NA unless correlation is pretty strong
# sw2[too.close] <- cor.sw < 0
# too.close <- too.close[is.na(cor.sw)]
# }
message(sum(is.na(sw2))," SNPs not resolvable (MAF too close to 0.5).")
sw[ind] <- sw2
}
if(!any.comp & any.ambig) { # there are no reverse complements in distinguishable cases
ind <- which(sw.class=="ambig")
message(length(ind)," SNPs have alleles not completely resolvable without strand information,\nbut there is no evidence of strand switches amongst SNPs which are resolvable.\nAssuming fixed strand.")
ind <- which(sw.class=="ambig")
sw2 <- x.alleles[ind]==g.rev(y.alleles[ind])
sw[ind] <- sw2
}
## do switch
if(any(is.na(sw))){
x@.Data[,is.na(sw)] <- as.raw("00")
}
if(length(wh <- which(sw))) {
xsw <- asw(x@.Data,wh)
x@.Data=xsw
##x <- switch.alleles(x, wh)
x@snps[wh, alleles(x) ] <- y[wh, .(a1,a2)]
}
if(do.plot) {
x.cs <- col.summary(x)
plot(x.cs[,"RAF"], y$raf,main="RAF after switching",xlab="x",ylab="y",pch="+")
abline(0,1,col="red",lty=1)
}
return(x)
}
processJIA <- function(f,out.dir='/home/ob219/rds/rds-cew54-wallace-share/Data/GWAS/JIA-2017-data/as_basis'){
(load(f))
## add chromosome to snps
snps <- snps(G) %>% data.table
snps[,posid:=1:.N]
snps <- merge(snps,uk10m,by.x=c('chromosome','position'),by.y=c('CHR','BP'))
snps[,pid:=paste(chromosome,position,sep=':')]
## if there are duplicates this indicate non-binary alleles which we remove
dup.pid <- snps[duplicated(pid),]$pid
if(length(dup.pid)>0)
snps <- snps[!pid %in% dup.pid,]
Y <- copy(G)
Y <- Y[,snps$posid]
y<-snps[,.(a1=uk10_A1,a2=uk10_A2,raf=uk10_A2_AF)]
Y<-switcheroo(Y,y,do.plot=TRUE)
fname <- file.path(out.dir,gsub("annotsnpstats[-](.*)","chr\\1",basename(f)))
save(Y,file=fname)
samples <- cbind(samples(Y) %>% data.table,row.summary(Y) %>% data.table)
snps <- cbind(snps(Y) %>% data.table,col.summary(Y) %>% data.table)
snps <- merge(snps,uk10m,by.x=c('CHR','position'),by.y=c('CHR','BP'))
fname <- file.path(out.dir,sprintf("summ_%s",gsub("annotsnpstats[-](.*)","chr\\1",basename(f))))
fname <- gsub("RData","RDS",fname)
saveRDS(list(snps=snps,samples=samples),file=fname)
fname
}
# code to do allele switch without requiring y to be annot snp stats
## x is annot snp stats
## y is DT of a1 a2 af_wrt2 - that matches snps in X
## shameless rip off of Chris' code on annotSnpStats
out <- processJIA(args$file)
if(FALSE){
DATA.DIR <- '/home/ob219/rds/rds-cew54-wallace-share/Data/GWAS/JIA-2017-data'
gt.files <- list.files(path=DATA.DIR,pattern="*.RData",full.names=TRUE)
RSCRIPT <- '/home/ob219/git/as_basis/R/Individual_projection/switch_alleleles_JIA_q.R'
cmds <- lapply(gt.files,function(f){
sprintf("Rscript %s -f %s",RSCRIPT,f)
}) %>% do.call('c',.)
write(cmds,file="~/tmp/qsub/jia_align.txt")
## run using sh script
}
#summ.files<-lapply(gt.files,processJIA)
| /GWAS/JIA/individual_level/switch_alleles_JIA_q.R | permissive | ollyburren/basis_paper | R | false | false | 5,744 | r | ## uk10k data as our reference data set
## THIS IS FOR REFERENCE ONLY !!!! NOT RUN IN THIS LOCATION
## process PBC
## first convert to annotSnpStats
library(annotSnpStats)
library(data.table)
library(magrittr)
library(optparse)
TEST <- TRUE
DEFAULT_TARGET_DIRNAME <- 'split'
option_list = list(
make_option(c("-f", "--file"), type="character",default='',
help="File to process", metavar="character")
)
opt_parser = OptionParser(option_list=option_list);
args = parse_args(opt_parser)
print(args)
uk10 <- readRDS("/home/ob219/rds/hpc-work/DATA/UK10K/UK10K_0.005_MAF.RDS")
setnames(uk10,'CHROM','CHR')
uk10[CHR=='X',CHR:='23']
uk10[,CHR:=as.numeric(CHR)]
uk10 <- uk10[order(CHR,POS),]
uk10m <- uk10[,.(CHR,BP=POS,uk10_A1=REF,uk10_A2=ALT,uk10_A2_AF=AF)]
switcheroo <- function(x,y,mafdiff=0.1,do.plot=FALSE){
asw <- getFromNamespace("asw", "annotSnpStats")
x.alleles <- apply(x@snps[,alleles(x)],1,paste,collapse="/")
y.alleles <- apply(y[,.(a1,a2)],1,paste,collapse="/")
names(y.alleles)<-names(x.alleles)
message("These are the allele codes as currently defined, before any switching:")
print(tt <- as.matrix(table(x.alleles, y.alleles)))
## genotype classes
sw.class <- g.class(x.alleles,y.alleles)
any.comp <- any(sw.class %in% c("comp","revcomp"))
any.ambig <- any(sw.class=="ambig")
sw <- sw.class %in% c("rev","revcomp")
if(length(wh <- which(sw.class=="impossible"))) {
message(length(wh)," pairwise impossible allele labels found. These genotypes will be set to missing.")
sw[wh] <- NA
}
sw[sw.class=="impossible"] <- NA
if(any.comp & any.ambig) { # there are reverse complements in the distinguishable cases
ind <- which(sw.class=="ambig")
message(length(ind)," SNPs have alleles not completely resolvable without strand information, confirming guess by checking allele freqs.")
x.cs <- col.summary(x[,ind])
rdiff <- x.cs[,"RAF"] - y[ind,]$raf
sw2 <- ifelse(abs(x.cs[,"RAF"] - y[ind,]$raf) < abs(1 - x.cs[,"RAF"] - y[ind,]$raf), FALSE, TRUE)
too.close <- abs(x.cs[,"MAF"]-0.5)<mafdiff
# if(any(too.close)) {
# can.match <- sw.class %in% c("comp","nochange","rev","revcomp")
# xsw <- switch.alleles(x[,-ind],which(sw[-ind]))
# ysw <- y[,-ind]
# ## step through too.close SNPs checking signed correlation
# message("using signed correlation for ",sum(too.close)," SNPs too close to 50% MAF")
# ldx <- ld(xsw,x[,ind[too.close],drop=FALSE], stats="R")
# ldy <- ld(ysw,y[,ind[too.close],drop=FALSE], stats="R")
# ldx[abs(ldx)<0.04] <- NA ## drop uncorrelated - have no information
# ldy[abs(ldy)<0.04] <- NA ## drop uncorrelated - have no information
# cor.sw <- sapply(1:ncol(ldx), function(j) cor(ldx[,j], ldy[,j], use="pair"))
# cor.sw[ abs(cor.sw)<0.8 ] <- NA # NA unless correlation is pretty strong
# sw2[too.close] <- cor.sw < 0
# too.close <- too.close[is.na(cor.sw)]
# }
message(sum(is.na(sw2))," SNPs not resolvable (MAF too close to 0.5).")
sw[ind] <- sw2
}
if(!any.comp & any.ambig) { # there are no reverse complements in distinguishable cases
ind <- which(sw.class=="ambig")
message(length(ind)," SNPs have alleles not completely resolvable without strand information,\nbut there is no evidence of strand switches amongst SNPs which are resolvable.\nAssuming fixed strand.")
ind <- which(sw.class=="ambig")
sw2 <- x.alleles[ind]==g.rev(y.alleles[ind])
sw[ind] <- sw2
}
## do switch
if(any(is.na(sw))){
x@.Data[,is.na(sw)] <- as.raw("00")
}
if(length(wh <- which(sw))) {
xsw <- asw(x@.Data,wh)
x@.Data=xsw
##x <- switch.alleles(x, wh)
x@snps[wh, alleles(x) ] <- y[wh, .(a1,a2)]
}
if(do.plot) {
x.cs <- col.summary(x)
plot(x.cs[,"RAF"], y$raf,main="RAF after switching",xlab="x",ylab="y",pch="+")
abline(0,1,col="red",lty=1)
}
return(x)
}
processJIA <- function(f,out.dir='/home/ob219/rds/rds-cew54-wallace-share/Data/GWAS/JIA-2017-data/as_basis'){
(load(f))
## add chromosome to snps
snps <- snps(G) %>% data.table
snps[,posid:=1:.N]
snps <- merge(snps,uk10m,by.x=c('chromosome','position'),by.y=c('CHR','BP'))
snps[,pid:=paste(chromosome,position,sep=':')]
## if there are duplicates this indicate non-binary alleles which we remove
dup.pid <- snps[duplicated(pid),]$pid
if(length(dup.pid)>0)
snps <- snps[!pid %in% dup.pid,]
Y <- copy(G)
Y <- Y[,snps$posid]
y<-snps[,.(a1=uk10_A1,a2=uk10_A2,raf=uk10_A2_AF)]
Y<-switcheroo(Y,y,do.plot=TRUE)
fname <- file.path(out.dir,gsub("annotsnpstats[-](.*)","chr\\1",basename(f)))
save(Y,file=fname)
samples <- cbind(samples(Y) %>% data.table,row.summary(Y) %>% data.table)
snps <- cbind(snps(Y) %>% data.table,col.summary(Y) %>% data.table)
snps <- merge(snps,uk10m,by.x=c('CHR','position'),by.y=c('CHR','BP'))
fname <- file.path(out.dir,sprintf("summ_%s",gsub("annotsnpstats[-](.*)","chr\\1",basename(f))))
fname <- gsub("RData","RDS",fname)
saveRDS(list(snps=snps,samples=samples),file=fname)
fname
}
# code to do allele switch without requiring y to be annot snp stats
## x is annot snp stats
## y is DT of a1 a2 af_wrt2 - that matches snps in X
## shameless rip off of Chris' code on annotSnpStats
out <- processJIA(args$file)
if(FALSE){
DATA.DIR <- '/home/ob219/rds/rds-cew54-wallace-share/Data/GWAS/JIA-2017-data'
gt.files <- list.files(path=DATA.DIR,pattern="*.RData",full.names=TRUE)
RSCRIPT <- '/home/ob219/git/as_basis/R/Individual_projection/switch_alleleles_JIA_q.R'
cmds <- lapply(gt.files,function(f){
sprintf("Rscript %s -f %s",RSCRIPT,f)
}) %>% do.call('c',.)
write(cmds,file="~/tmp/qsub/jia_align.txt")
## run using sh script
}
#summ.files<-lapply(gt.files,processJIA)
|
######################
## HEAT MAP FIGURE ##
######################
# ** It is recommended to use Microsoft Open R (v3.5.1) for improved performance without compromsing compatibility **
## SCRIPT PREPERATION
# clear space and allocate memory
gc()
memory.limit(size = 56000)
script.start <- Sys.time() # start script timestamp
# first make sure checkpoint is installed locally
# this is the only package that is ok to not use a 'checkpointed' (i.e. archived version of a package)
# checkpoint does not archive itself and it should not create dependency issues
if (!require("checkpoint")){
install.packages("checkpoint")
library(checkpoint, quietly = T)
}
# load all other dependant packages from the local repo
.libPaths(paste0(getwd(),"/.checkpoint/2019-01-01/lib/x86_64-w64-mingw32/3.5.1"))
#lib.path <- paste0(getwd(),"/.checkpoint/2019-01-01/lib/x86_64-w64-mingw32/3.5.1") # lib.loc = lib.path,
library(XML)
library(sp)
library(sf)
library(raster)
library(rgdal)
library(rgeos)
library(maptools)
library(gdalUtils)
library(doParallel)
library(data.table)
library(here)
# archive/update snapshot of packages at checkpoint date
checkpoint("2019-01-01", # Sys.Date() - 1 this calls the MRAN snapshot from yestersday
R.version = "3.5.1", # will only work if using the same version of R
checkpointLocation = here(), # calls here package
verbose = F)
# R function to write both a .csv and a .csvt file from single data.frame/data.table to be imported into QGIS
write.qgis.csv <- function(df, filename){
csvt <- vector(mode = "list", length = ncol(df))
csvt[] <- sapply(df, class)
csvt[] <- ifelse(csvt == "numeric","Real", csvt)
csvt[] <- ifelse(csvt == "integer","Integer", csvt)
csvt[] <- ifelse(csvt != "Real" & csvt != "Integer","String", csvt)
filename <- ifelse(substr(filename, nchar(filename) - 3, nchar(filename)) == ".csv", filename, paste0(filename,".csv"))
write.csv(df, file = filename, row.names = F)
write.table(csvt, file = paste0(filename,"t"), sep = ",", row.names = F, col.names = F)
}
# import data
blkgrp <- shapefile(here("data/shapefiles/boundaries/phx-blkgrp-geom.shp")) # census blockgroup shapfile (clipped to Maricopa UZA)
osm.block.min <- readRDS(here("data/outputs/osm-blockgroup-dissolved-min.rds"))
osm.block.max <- readRDS(here("data/outputs/osm-blockgroup-dissolved-max.rds"))
uza.buffer <- readRDS(here("data/outputs/temp/uza-buffer.rds")) # Maricopa UZA buffered ~1mi
# rasterize polygon roadway min/max data and point parking data
# note: "For polygons, values are transferred if the polygon covers the center of a raster cell."
########
start.time <- Sys.time() # start script timestamp
# create empty raster at desired extent (use uza buffer to ensure everything captured)
#r <- raster(ext = extent(uza.buffer), crs = crs(uza.buffer), res = 32.8084) # create raster = ~10 x 10 m, 50 hrs est. run time with this time
#r <- raster(ext = extent(uza.buffer), crs = crs(uza.buffer), res = 1640.42) # create raster = ~500 x 500 m, ~ 40 mins
r <- raster(ext = extent(uza.buffer), crs = crs(uza.buffer), res = 3280.84) # create raster = ~1000 x 1000 m, ~ 5 mins
# calculate the number of cores
my.cores <- parallel::detectCores() - 1 # store computers cores
# number of polygons features in SPDF
features.min <- 1:nrow(osm.block.min[,])
features.max <- 1:nrow(osm.block.max[,])
# split features in n parts
n <- my.cores
parts.min <- split(features.min, cut(features.min, n))
parts.max <- split(features.max, cut(features.max, n))
# initiate cluster after loading all the necessary object to R environment
cl <- makeCluster(my.cores)
registerDoParallel(cl) # register parallel backend
clusterCall(cl, function(x) .libPaths(x), .libPaths())
print(cl)
# rasterize parts of min/max road network area and save in parellel
system.time(foreach(i = 1:n, .packages = c("raster", "here")) %dopar% {
rasterize(osm.block.min[parts.min[[i]],], r, getCover = T, background = 0,
filename = here(paste0("data/outputs/temp/road-min-part-", i, ".tif")),
overwrite = T)})
system.time(foreach(i = 1:n, .packages = c("raster", "here")) %dopar% {
rasterize(osm.block.max[parts.max[[i]],], r, getCover = T, background = 0,
filename = here(paste0("data/outputs/temp/road-max-part-", i, ".tif")),
overwrite = T)})
# stop cluster
stopCluster(cl)
# create list of raster parts from file
r.road.min <- lapply(1:n, function (i) raster(here(paste0("data/outputs/temp/road-min-part-", i, ".tif"))))
r.road.max <- lapply(1:n, function (i) raster(here(paste0("data/outputs/temp/road-max-part-", i, ".tif"))))
# merge all raster parts
r.road.min$fun <- sum
r.road.max$fun <- sum
r.road.min.m <- do.call(mosaic, r.road.min)
r.road.max.m <- do.call(mosaic, r.road.max)
# plot merged raster
plot(r.road.min.m)
plot(r.road.max.m)
# export to check how accurate in QGIS
writeRaster(r.road.min.m, here("data/outputs/temp/osm-min-area-raster.tif"), format = "GTiff", overwrite = T)
writeRaster(r.road.max.m, here("data/outputs/temp/osm-max-area-raster.tif"), format = "GTiff", overwrite = T)
# save R objects
saveRDS(r.road.min.m, here("data/outputs/temp/osm-min-area-raster.rds"))
saveRDS(r.road.max.m, here("data/outputs/temp/osm-max-area-raster.rds"))
# save image
save.image(here("data/outputs/temp/7-veh-pave-heat-map.RData"))
paste0("R model run complete on ", Sys.info()[4]," at ", Sys.time(),
". Model run length: ", round(difftime(Sys.time(), start.time, units = "mins"),0)," mins.")
############
# import parking data
parking <- fread(here("data/parking/phx-parking-blkgrp.csv")) # phoenix total parking data (aggregated to blockgroup id)
parking.par <- readRDS(here("data/parking/phoenix-parking-parcel.rds")) # phoenix off-street parking data (raw by parcel)
parking.pts <- shapefile(here("data/parking/phx-parcel-points.shp")) # points of center of APN of parcels
# simplify prop type var
parking.par <- parking.par[PROPTYPE == "Non-residential Off-street Spaces", type := "com"][PROPTYPE == "Residential Off-street Spaces", type := "res"]
# agg spaces to APN and keep type (first of uniques)
parking.par.m <- parking.par[, .(spaces = sum(spaces, na.rm = T), type = first(type)), by = "APN"]
# make parking points unique
parking.pts.u <- parking.pts[which(!duplicated(parking.pts$APN)), ]
# merge parking spaces totals to points by APN
parking.merged <- merge(parking.pts.u, parking.par.m, by = "APN")
saveRDS(parking.merged, here("data/parking/phoenix-parking-parcel-points.rds"))
# export parking parcel data to merge in qgis
#write.qgis.csv(parking.par.m, here("data/parking/parcel-off"))
# rasterize parking and min/max road area
#r.park <- rasterize(parking.merged[parking.merged$spaces,], r, field = "spaces") 42 Gb
# import most recent pavement model run data
# define folder for data reterival (automaticlly take most recent folder with "run_metadata" in it)
folder <- as.data.table(file.info(list.dirs(here("data/outputs/"), recursive = F)),
keep.rownames = T)[grep("run_metadata", rn),][order(ctime)][.N, rn]
all.model.runs <- readRDS(paste0(folder, "/stats_all_model_runs.rds")) # meta data by run
all.surface.data <- readRDS(paste0(folder, "/all_pave_surface_data.rds")) # surface temporal data by run
# ROAD AREA BY BLOCKGROUP
# min road area by blockgroup
blkgrp$min.road.area.sqf <- gArea(osm.block.min, byid = T)
blkgrp$tot.area.sqf <- gArea(blkgrp, byid = T)
blkgrp$min.road.pct <- blkgrp$min.road.area.sqf / blkgrp$tot.area.sqf
# road area by raster cell (diff way)
mean(blkgrp$min.road.pct)
max(blkgrp$min.road.pct)
# max road area by blockgroup
blkgrp$max.road.area.sqf <- gArea(osm.block.max, byid = T)
blkgrp$max.road.pct <- blkgrp$max.road.area.sqf / blkgrp$tot.area.sqf
# min/mean/max total percent of area covered by roads
sum(blkgrp$min.road.area.sqf) / sum(blkgrp$tot.area.sqf)
(sum(blkgrp$min.road.area.sqf) + sum(blkgrp$max.road.area.sqf)) / 2 / sum(blkgrp$tot.area.sqf)
sum(blkgrp$max.road.area.sqf) / sum(blkgrp$tot.area.sqf)
# calcualte total and fractional area of each fclass grouping
hwy <- c("motorway")
maj <- c("primary", "trunk")
min <- c("secondary", "tertiary")
maj <- c("residential", "service", "unclassified")
# * 0.092903 m2 per ft2
blkgrp$avg_hwy_area <- 0.1 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
blkgrp$avg_maj_area <- 0.2 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
blkgrp$avg_min_area <- 0.3 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
blkgrp$avg_col_area <- 0.4 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
#blkgrp$avg_hwy_frac <- blkgrp$avg_hwy_area / blkgrp$tot.area.sqf
#blkgrp$avg_maj_frac <- blkgrp$avg_maj_area / blkgrp$tot.area.sqf
#blkgrp$avg_min_frac <- blkgrp$avg_min_area / blkgrp$tot.area.sqf
#blkgrp$avg_col_frac <- blkgrp$avg_col_area / blkgrp$tot.area.sqf
# PARKING AREA BY BLOCKGROUP
# merge parking data to blkgrp shapefile
blkgrp <- merge(blkgrp, parking[, .(fid, res.off, com.off)], by = "fid", duplicateGeoms = T) # ignore on-street spaces for area calcs because part of roadway
# assumed upper and lower area allocated per space
blkgrp$min.park.area.sqf <- (blkgrp$com.off * 250) + (blkgrp$res.off * 200)
blkgrp$max.park.area.sqf <- (blkgrp$com.off * 331) + (blkgrp$res.off * 500)
# min/mean/max total percent of area covered by roads
sum(blkgrp$min.park.area.sqf, na.rm = T) / sum(blkgrp$tot.area.sqf, na.rm = T)
(sum(blkgrp$min.park.area.sqf, na.rm = T) + sum(blkgrp$max.park.area.sqf, na.rm = T)) / 2 / sum(blkgrp$tot.area.sqf, na.rm = T)
sum(blkgrp$max.park.area.sqf, na.rm = T) / sum(blkgrp$tot.area.sqf, na.rm = T)
# SUMMARIZE MODEL SURFACE DATA
# calc flux vars (W/m2)
all.surface.data[, inc.sol := ((1 - albedo) * SVF * solar)]
all.surface.data[, ref.sol := albedo * SVF * solar]
all.surface.data[, net.flux := -inc.sol + q.rad + q.cnv]
# calculate total energy by average day in season by batch in MJ / m2 (1 MJ == 1E6 J = 1E6 W * s)
delta.t <- 30
surface.data.a <- all.surface.data[, .(out.heat = delta.t * sum(q.rad + q.cnv) / 1E6,
inc.sol = delta.t * sum(inc.sol) / 1E6,
net.heat = delta.t * sum(net.flux) / 1E6,
ref.heat = delta.t * sum(ref.sol) / 1E6),
by = c("batch.name", "season")]
ground.heat <- surface.data.a[batch.name == "Bare Ground / Desert Soil", .(season,out.heat)]
setnames(ground.heat, "out.heat", "out.ground.heat")
surface.data.a <- merge(surface.data.a, ground.heat, by = "season")
surface.data.a[, added.heat := out.heat - out.ground.heat] # MJ / m2
# define roadway ratios to apply **TEMPORARY** WILL BE DEFINED LATER BY % AREA OF OSM BY FCLASS
surface.data.a <- merge(surface.data.a, data.table(ratio = c(0.30, 0.00, 0.00, 0.70),
batch.name = unique(surface.data.a[, batch.name])),
by = "batch.name")
# temp avg day factor for all pave types
avg.day.heat <- sum(surface.data.a[, .(day.heat = mean(added.heat * ratio)), by = "batch.name"][ ,day.heat])
# average daily added heat (over undevelopedbare ground )
# in GJ / day / blkgrp
blkgrp$avg_hwy_day_heat <- blkgrp$avg_hwy_area * avg.day.heat / 1000
blkgrp$avg_maj_day_heat <- blkgrp$avg_maj_area * avg.day.heat / 1000
blkgrp$avg_min_day_heat <- blkgrp$avg_min_area * avg.day.heat / 1000
blkgrp$avg_col_day_heat <- blkgrp$avg_col_area * avg.day.heat / 1000
blkgrp$avg_day_heat_total <- blkgrp$avg_hwy_day_heat + blkgrp$avg_maj_day_heat + blkgrp$avg_min_day_heat + blkgrp$avg_col_day_heat
blkgrp$hectares <- blkgrp$tot.area.sqf * 9.2903E-6
blkgrp$avg_day_heat_GJha <- blkgrp$avg_day_heat_total / blkgrp$hectares
# output data
shapefile(blkgrp, here("data/outputs/osm-blkgrp-heat-working"), overwrite = T)
| /R/old code/7-osm-road-area-raster.R | no_license | cghoehne/transport-uhi-phx | R | false | false | 11,917 | r | ######################
## HEAT MAP FIGURE ##
######################
# ** It is recommended to use Microsoft Open R (v3.5.1) for improved performance without compromsing compatibility **
## SCRIPT PREPERATION
# clear space and allocate memory
gc()
memory.limit(size = 56000)
script.start <- Sys.time() # start script timestamp
# first make sure checkpoint is installed locally
# this is the only package that is ok to not use a 'checkpointed' (i.e. archived version of a package)
# checkpoint does not archive itself and it should not create dependency issues
if (!require("checkpoint")){
install.packages("checkpoint")
library(checkpoint, quietly = T)
}
# load all other dependant packages from the local repo
.libPaths(paste0(getwd(),"/.checkpoint/2019-01-01/lib/x86_64-w64-mingw32/3.5.1"))
#lib.path <- paste0(getwd(),"/.checkpoint/2019-01-01/lib/x86_64-w64-mingw32/3.5.1") # lib.loc = lib.path,
library(XML)
library(sp)
library(sf)
library(raster)
library(rgdal)
library(rgeos)
library(maptools)
library(gdalUtils)
library(doParallel)
library(data.table)
library(here)
# archive/update snapshot of packages at checkpoint date
checkpoint("2019-01-01", # Sys.Date() - 1 this calls the MRAN snapshot from yestersday
R.version = "3.5.1", # will only work if using the same version of R
checkpointLocation = here(), # calls here package
verbose = F)
# R function to write both a .csv and a .csvt file from single data.frame/data.table to be imported into QGIS
write.qgis.csv <- function(df, filename){
csvt <- vector(mode = "list", length = ncol(df))
csvt[] <- sapply(df, class)
csvt[] <- ifelse(csvt == "numeric","Real", csvt)
csvt[] <- ifelse(csvt == "integer","Integer", csvt)
csvt[] <- ifelse(csvt != "Real" & csvt != "Integer","String", csvt)
filename <- ifelse(substr(filename, nchar(filename) - 3, nchar(filename)) == ".csv", filename, paste0(filename,".csv"))
write.csv(df, file = filename, row.names = F)
write.table(csvt, file = paste0(filename,"t"), sep = ",", row.names = F, col.names = F)
}
# import data
blkgrp <- shapefile(here("data/shapefiles/boundaries/phx-blkgrp-geom.shp")) # census blockgroup shapfile (clipped to Maricopa UZA)
osm.block.min <- readRDS(here("data/outputs/osm-blockgroup-dissolved-min.rds"))
osm.block.max <- readRDS(here("data/outputs/osm-blockgroup-dissolved-max.rds"))
uza.buffer <- readRDS(here("data/outputs/temp/uza-buffer.rds")) # Maricopa UZA buffered ~1mi
# rasterize polygon roadway min/max data and point parking data
# note: "For polygons, values are transferred if the polygon covers the center of a raster cell."
########
start.time <- Sys.time() # start script timestamp
# create empty raster at desired extent (use uza buffer to ensure everything captured)
#r <- raster(ext = extent(uza.buffer), crs = crs(uza.buffer), res = 32.8084) # create raster = ~10 x 10 m, 50 hrs est. run time with this time
#r <- raster(ext = extent(uza.buffer), crs = crs(uza.buffer), res = 1640.42) # create raster = ~500 x 500 m, ~ 40 mins
r <- raster(ext = extent(uza.buffer), crs = crs(uza.buffer), res = 3280.84) # create raster = ~1000 x 1000 m, ~ 5 mins
# calculate the number of cores
my.cores <- parallel::detectCores() - 1 # store computers cores
# number of polygons features in SPDF
features.min <- 1:nrow(osm.block.min[,])
features.max <- 1:nrow(osm.block.max[,])
# split features in n parts
n <- my.cores
parts.min <- split(features.min, cut(features.min, n))
parts.max <- split(features.max, cut(features.max, n))
# initiate cluster after loading all the necessary object to R environment
cl <- makeCluster(my.cores)
registerDoParallel(cl) # register parallel backend
clusterCall(cl, function(x) .libPaths(x), .libPaths())
print(cl)
# rasterize parts of min/max road network area and save in parellel
system.time(foreach(i = 1:n, .packages = c("raster", "here")) %dopar% {
rasterize(osm.block.min[parts.min[[i]],], r, getCover = T, background = 0,
filename = here(paste0("data/outputs/temp/road-min-part-", i, ".tif")),
overwrite = T)})
system.time(foreach(i = 1:n, .packages = c("raster", "here")) %dopar% {
rasterize(osm.block.max[parts.max[[i]],], r, getCover = T, background = 0,
filename = here(paste0("data/outputs/temp/road-max-part-", i, ".tif")),
overwrite = T)})
# stop cluster
stopCluster(cl)
# create list of raster parts from file
r.road.min <- lapply(1:n, function (i) raster(here(paste0("data/outputs/temp/road-min-part-", i, ".tif"))))
r.road.max <- lapply(1:n, function (i) raster(here(paste0("data/outputs/temp/road-max-part-", i, ".tif"))))
# merge all raster parts
r.road.min$fun <- sum
r.road.max$fun <- sum
r.road.min.m <- do.call(mosaic, r.road.min)
r.road.max.m <- do.call(mosaic, r.road.max)
# plot merged raster
plot(r.road.min.m)
plot(r.road.max.m)
# export to check how accurate in QGIS
writeRaster(r.road.min.m, here("data/outputs/temp/osm-min-area-raster.tif"), format = "GTiff", overwrite = T)
writeRaster(r.road.max.m, here("data/outputs/temp/osm-max-area-raster.tif"), format = "GTiff", overwrite = T)
# save R objects
saveRDS(r.road.min.m, here("data/outputs/temp/osm-min-area-raster.rds"))
saveRDS(r.road.max.m, here("data/outputs/temp/osm-max-area-raster.rds"))
# save image
save.image(here("data/outputs/temp/7-veh-pave-heat-map.RData"))
paste0("R model run complete on ", Sys.info()[4]," at ", Sys.time(),
". Model run length: ", round(difftime(Sys.time(), start.time, units = "mins"),0)," mins.")
############
# import parking data
parking <- fread(here("data/parking/phx-parking-blkgrp.csv")) # phoenix total parking data (aggregated to blockgroup id)
parking.par <- readRDS(here("data/parking/phoenix-parking-parcel.rds")) # phoenix off-street parking data (raw by parcel)
parking.pts <- shapefile(here("data/parking/phx-parcel-points.shp")) # points of center of APN of parcels
# simplify prop type var
parking.par <- parking.par[PROPTYPE == "Non-residential Off-street Spaces", type := "com"][PROPTYPE == "Residential Off-street Spaces", type := "res"]
# agg spaces to APN and keep type (first of uniques)
parking.par.m <- parking.par[, .(spaces = sum(spaces, na.rm = T), type = first(type)), by = "APN"]
# make parking points unique
parking.pts.u <- parking.pts[which(!duplicated(parking.pts$APN)), ]
# merge parking spaces totals to points by APN
parking.merged <- merge(parking.pts.u, parking.par.m, by = "APN")
saveRDS(parking.merged, here("data/parking/phoenix-parking-parcel-points.rds"))
# export parking parcel data to merge in qgis
#write.qgis.csv(parking.par.m, here("data/parking/parcel-off"))
# rasterize parking and min/max road area
#r.park <- rasterize(parking.merged[parking.merged$spaces,], r, field = "spaces") 42 Gb
# import most recent pavement model run data
# define folder for data reterival (automaticlly take most recent folder with "run_metadata" in it)
folder <- as.data.table(file.info(list.dirs(here("data/outputs/"), recursive = F)),
keep.rownames = T)[grep("run_metadata", rn),][order(ctime)][.N, rn]
all.model.runs <- readRDS(paste0(folder, "/stats_all_model_runs.rds")) # meta data by run
all.surface.data <- readRDS(paste0(folder, "/all_pave_surface_data.rds")) # surface temporal data by run
# ROAD AREA BY BLOCKGROUP
# min road area by blockgroup
blkgrp$min.road.area.sqf <- gArea(osm.block.min, byid = T)
blkgrp$tot.area.sqf <- gArea(blkgrp, byid = T)
blkgrp$min.road.pct <- blkgrp$min.road.area.sqf / blkgrp$tot.area.sqf
# road area by raster cell (diff way)
mean(blkgrp$min.road.pct)
max(blkgrp$min.road.pct)
# max road area by blockgroup
blkgrp$max.road.area.sqf <- gArea(osm.block.max, byid = T)
blkgrp$max.road.pct <- blkgrp$max.road.area.sqf / blkgrp$tot.area.sqf
# min/mean/max total percent of area covered by roads
sum(blkgrp$min.road.area.sqf) / sum(blkgrp$tot.area.sqf)
(sum(blkgrp$min.road.area.sqf) + sum(blkgrp$max.road.area.sqf)) / 2 / sum(blkgrp$tot.area.sqf)
sum(blkgrp$max.road.area.sqf) / sum(blkgrp$tot.area.sqf)
# calcualte total and fractional area of each fclass grouping
hwy <- c("motorway")
maj <- c("primary", "trunk")
min <- c("secondary", "tertiary")
maj <- c("residential", "service", "unclassified")
# * 0.092903 m2 per ft2
blkgrp$avg_hwy_area <- 0.1 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
blkgrp$avg_maj_area <- 0.2 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
blkgrp$avg_min_area <- 0.3 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
blkgrp$avg_col_area <- 0.4 * (blkgrp$min.road.area.sqf + blkgrp$max.road.area.sqf) / 2 * 0.092903
#blkgrp$avg_hwy_frac <- blkgrp$avg_hwy_area / blkgrp$tot.area.sqf
#blkgrp$avg_maj_frac <- blkgrp$avg_maj_area / blkgrp$tot.area.sqf
#blkgrp$avg_min_frac <- blkgrp$avg_min_area / blkgrp$tot.area.sqf
#blkgrp$avg_col_frac <- blkgrp$avg_col_area / blkgrp$tot.area.sqf
# PARKING AREA BY BLOCKGROUP
# merge parking data to blkgrp shapefile
blkgrp <- merge(blkgrp, parking[, .(fid, res.off, com.off)], by = "fid", duplicateGeoms = T) # ignore on-street spaces for area calcs because part of roadway
# assumed upper and lower area allocated per space
blkgrp$min.park.area.sqf <- (blkgrp$com.off * 250) + (blkgrp$res.off * 200)
blkgrp$max.park.area.sqf <- (blkgrp$com.off * 331) + (blkgrp$res.off * 500)
# min/mean/max total percent of area covered by roads
sum(blkgrp$min.park.area.sqf, na.rm = T) / sum(blkgrp$tot.area.sqf, na.rm = T)
(sum(blkgrp$min.park.area.sqf, na.rm = T) + sum(blkgrp$max.park.area.sqf, na.rm = T)) / 2 / sum(blkgrp$tot.area.sqf, na.rm = T)
sum(blkgrp$max.park.area.sqf, na.rm = T) / sum(blkgrp$tot.area.sqf, na.rm = T)
# SUMMARIZE MODEL SURFACE DATA
# calc flux vars (W/m2)
all.surface.data[, inc.sol := ((1 - albedo) * SVF * solar)]
all.surface.data[, ref.sol := albedo * SVF * solar]
all.surface.data[, net.flux := -inc.sol + q.rad + q.cnv]
# calculate total energy by average day in season by batch in MJ / m2 (1 MJ == 1E6 J = 1E6 W * s)
delta.t <- 30
surface.data.a <- all.surface.data[, .(out.heat = delta.t * sum(q.rad + q.cnv) / 1E6,
inc.sol = delta.t * sum(inc.sol) / 1E6,
net.heat = delta.t * sum(net.flux) / 1E6,
ref.heat = delta.t * sum(ref.sol) / 1E6),
by = c("batch.name", "season")]
ground.heat <- surface.data.a[batch.name == "Bare Ground / Desert Soil", .(season,out.heat)]
setnames(ground.heat, "out.heat", "out.ground.heat")
surface.data.a <- merge(surface.data.a, ground.heat, by = "season")
surface.data.a[, added.heat := out.heat - out.ground.heat] # MJ / m2
# define roadway ratios to apply **TEMPORARY** WILL BE DEFINED LATER BY % AREA OF OSM BY FCLASS
surface.data.a <- merge(surface.data.a, data.table(ratio = c(0.30, 0.00, 0.00, 0.70),
batch.name = unique(surface.data.a[, batch.name])),
by = "batch.name")
# temp avg day factor for all pave types
avg.day.heat <- sum(surface.data.a[, .(day.heat = mean(added.heat * ratio)), by = "batch.name"][ ,day.heat])
# average daily added heat (over undevelopedbare ground )
# in GJ / day / blkgrp
blkgrp$avg_hwy_day_heat <- blkgrp$avg_hwy_area * avg.day.heat / 1000
blkgrp$avg_maj_day_heat <- blkgrp$avg_maj_area * avg.day.heat / 1000
blkgrp$avg_min_day_heat <- blkgrp$avg_min_area * avg.day.heat / 1000
blkgrp$avg_col_day_heat <- blkgrp$avg_col_area * avg.day.heat / 1000
blkgrp$avg_day_heat_total <- blkgrp$avg_hwy_day_heat + blkgrp$avg_maj_day_heat + blkgrp$avg_min_day_heat + blkgrp$avg_col_day_heat
blkgrp$hectares <- blkgrp$tot.area.sqf * 9.2903E-6
blkgrp$avg_day_heat_GJha <- blkgrp$avg_day_heat_total / blkgrp$hectares
# output data
shapefile(blkgrp, here("data/outputs/osm-blkgrp-heat-working"), overwrite = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locatate_header_groups.R
\name{hook}
\alias{hook}
\title{Supply an expression to identify which header groups are hooked.}
\usage{
hook(...)
}
\arguments{
\item{...}{expression applied to a identifies which header groups are hooked}
}
\description{
This function is used with the \code{.hook_if} or \code{.hook_if_rev}
arguments in the \code{locate_groups} function.
This expression must evaluate to a single value for each header group.
For example, see below that any(fmt_alignment_indent == 0) is used rather that
fmt_alignment_indent == 0.
It passes an expression to \code{dplyr::summarise} that identifies which header groups are hooked
\itemize{
\item for example swiched from N to NNW.
See the the \code{locate_groups} documentation for more information and an example.
}
}
\examples{
\dontrun{
library(tidyverse)
# Read in tidyxl data frame
xl_df <-
locatr_example("worked-examples.xlsx") \%>\%
xlsx_cells_fmt(sheets = "pivot-hierarchy") \%>\%
append_fmt(fmt_alignment_indent)
# Add location annotations
xl_df <-
xl_df \%>\%
locate_data(data_type == "numeric") \%>\%
locate_groups(
direction = "W",
.groupings = groupings(fmt_alignment_indent),
.hook_if = hook_if(any(fmt_alignment_indent == 0))
) \%>\%
locate(direction = "N", name = student)
# Use `migrate` to reshape the data frame such that each data cells has its own row and each
header variable has its own column.
xl_df \%>\% migrate()
}
}
| /man/hook.Rd | permissive | jimsforks/locatr | R | false | true | 1,521 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/locatate_header_groups.R
\name{hook}
\alias{hook}
\title{Supply an expression to identify which header groups are hooked.}
\usage{
hook(...)
}
\arguments{
\item{...}{expression applied to a identifies which header groups are hooked}
}
\description{
This function is used with the \code{.hook_if} or \code{.hook_if_rev}
arguments in the \code{locate_groups} function.
This expression must evaluate to a single value for each header group.
For example, see below that any(fmt_alignment_indent == 0) is used rather that
fmt_alignment_indent == 0.
It passes an expression to \code{dplyr::summarise} that identifies which header groups are hooked
\itemize{
\item for example swiched from N to NNW.
See the the \code{locate_groups} documentation for more information and an example.
}
}
\examples{
\dontrun{
library(tidyverse)
# Read in tidyxl data frame
xl_df <-
locatr_example("worked-examples.xlsx") \%>\%
xlsx_cells_fmt(sheets = "pivot-hierarchy") \%>\%
append_fmt(fmt_alignment_indent)
# Add location annotations
xl_df <-
xl_df \%>\%
locate_data(data_type == "numeric") \%>\%
locate_groups(
direction = "W",
.groupings = groupings(fmt_alignment_indent),
.hook_if = hook_if(any(fmt_alignment_indent == 0))
) \%>\%
locate(direction = "N", name = student)
# Use `migrate` to reshape the data frame such that each data cells has its own row and each
header variable has its own column.
xl_df \%>\% migrate()
}
}
|
ErrorCrit_KGE2 <- function(InputsCrit, OutputsModel, warnings = TRUE, verbose = TRUE) {
##Arguments_check________________________________
if (!inherits(InputsCrit, "InputsCrit")) {
stop("InputsCrit must be of class 'InputsCrit' \n")
return(NULL)
}
if (!inherits(OutputsModel, "OutputsModel")) {
stop("OutputsModel must be of class 'OutputsModel' \n")
return(NULL)
}
##Initialisation_________________________________
CritName <- NA
if (InputsCrit$transfo == "") {
CritName <- "KGE'[Q]"
}
if (InputsCrit$transfo == "sqrt") {
CritName <- "KGE'[sqrt(Q)]"
}
if (InputsCrit$transfo == "log") {
CritName <- "KGE'[log(Q)]"
}
if (InputsCrit$transfo == "inv") {
CritName <- "KGE'[1/Q]"
}
if (InputsCrit$transfo == "sort") {
CritName <- "KGE'[sort(Q)]"
}
CritValue <- NA
CritBestValue <- +1
Multiplier <- -1
### must be equal to -1 or +1 only
##Data_preparation_______________________________
VarObs <- InputsCrit$Qobs
VarObs[!InputsCrit$BoolCrit] <- NA
VarSim <- OutputsModel$Qsim
VarSim[!InputsCrit$BoolCrit] <- NA
##Data_transformation
if (InputsCrit$transfo %in% c("log", "inv") & is.null(InputsCrit$epsilon) & verbose) {
if (any(VarObs %in% 0)) {
warning("zeroes detected in Qobs: the corresponding time-steps will be exclude from the criteria computation if the epsilon agrument of 'CreateInputsCrit' = NULL")
}
if (any(VarSim %in% 0)) {
warning("zeroes detected in Qsim: the corresponding time-steps will be exclude from the criteria computation if the epsilon agrument of 'CreateInputsCrit' = NULL")
}
}
if ("epsilon" %in% names(InputsCrit) & !is.null(InputsCrit$epsilon)) {
VarObs <- VarObs + InputsCrit$epsilon
VarSim <- VarSim + InputsCrit$epsilon
}
if (InputsCrit$transfo == "sqrt") {
VarObs <- sqrt(VarObs)
VarSim <- sqrt(VarSim)
}
if (InputsCrit$transfo == "log") {
VarObs <- log(VarObs)
VarSim <- log(VarSim)
VarSim[VarSim < -1e100] <- NA
}
if (InputsCrit$transfo == "inv") {
VarObs <- 1 / VarObs
VarSim <- 1 / VarSim
VarSim[abs(VarSim) > 1e+100] <- NA
}
if (InputsCrit$transfo == "sort") {
VarSim[is.na(VarObs)] <- NA
VarSim <- sort(VarSim, na.last = TRUE)
VarObs <- sort(VarObs, na.last = TRUE)
InputsCrit$BoolCrit <- sort(InputsCrit$BoolCrit, decreasing = TRUE)
}
##TS_ignore
TS_ignore <- !is.finite(VarObs) | !is.finite(VarSim) | !InputsCrit$BoolCrit
Ind_TS_ignore <- which(TS_ignore)
if (length(Ind_TS_ignore) == 0) {
Ind_TS_ignore <- NULL
}
if (sum(!TS_ignore) == 0) {
OutputsCrit <- list(NA)
names(OutputsCrit) <- c("CritValue")
return(OutputsCrit)
}
if (sum(!TS_ignore) == 1) {
OutputsCrit <- list(NA)
names(OutputsCrit) <- c("CritValue")
return(OutputsCrit)
} ### to avoid a problem in standard deviation computation
if (inherits(OutputsModel, "hourly")) {
WarningTS <- 365
}
if (inherits(OutputsModel, "daily")) {
WarningTS <- 365
}
if (inherits(OutputsModel, "monthly")) {
WarningTS <- 12
}
if (inherits(OutputsModel, "yearly")) {
WarningTS <- 3
}
if (sum(!TS_ignore) < WarningTS & warnings) {
warning("\t criterion computed on less than ", WarningTS, " time-steps")
}
##Other_variables_preparation
meanVarObs <- mean(VarObs[!TS_ignore])
meanVarSim <- mean(VarSim[!TS_ignore])
iCrit <- 0
SubCritPrint <- NULL
SubCritNames <- NULL
SubCritValues <- NULL
##SubErrorCrit_____KGE_rPearson__________________
iCrit <- iCrit + 1
SubCritPrint[iCrit] <- paste(CritName, " cor(sim, obs, \"pearson\") =", sep = "")
SubCritValues[iCrit] <- NA
SubCritNames[iCrit] <- "r"
Numer <- sum((VarObs[!TS_ignore] - meanVarObs) * (VarSim[!TS_ignore] - meanVarSim))
Deno1 <- sqrt(sum((VarObs[!TS_ignore] - meanVarObs)^2))
Deno2 <- sqrt(sum((VarSim[!TS_ignore] - meanVarSim)^2))
if (Numer == 0) {
if (Deno1 == 0 & Deno2 == 0) {
Crit <- 1
} else {
Crit <- 0
}
} else {
Crit <- Numer / (Deno1 * Deno2)
}
if (is.numeric(Crit) & is.finite(Crit)) {
SubCritValues[iCrit] <- Crit
}
##SubErrorCrit_____KGE_gamma______________________
iCrit <- iCrit + 1
SubCritPrint[iCrit] <- paste(CritName, " cv(sim)/cv(obs) =", sep = "")
SubCritValues[iCrit] <- NA
SubCritNames[iCrit] <- "gamma"
if (meanVarSim == 0) {
if (sd(VarSim[!TS_ignore]) == 0) {
CVsim <- 1
} else {
CVsim <- 99999
}
} else {
CVsim <- sd(VarSim[!TS_ignore]) / meanVarSim
}
if (meanVarObs == 0) {
if (sd(VarObs[!TS_ignore]) == 0) {
CVobs <- 1
} else {
CVobs <- 99999
}
} else {
CVobs <- sd(VarObs[!TS_ignore]) / meanVarObs
}
if (CVsim == 0 &
CVobs == 0) {
Crit <- 1
} else {
Crit <- CVsim / CVobs
}
if (is.numeric(Crit) & is.finite(Crit)) {
SubCritValues[iCrit] <- Crit
}
##SubErrorCrit_____KGE_beta______________________
iCrit <- iCrit + 1
SubCritPrint[iCrit] <- paste(CritName, " mean(sim)/mean(obs) =", sep = "")
SubCritValues[iCrit] <- NA
SubCritNames[iCrit] <- "beta"
if (meanVarSim == 0 & meanVarObs == 0) {
Crit <- 1
} else {
Crit <- meanVarSim / meanVarObs
}
if (is.numeric(Crit) & is.finite(Crit)) {
SubCritValues[iCrit] <- Crit
}
##ErrorCrit______________________________________
if (sum(is.na(SubCritValues)) == 0) {
CritValue <- (1 - sqrt((SubCritValues[1] - 1)^2 + (SubCritValues[2] - 1)^2 + (SubCritValues[3] - 1)^2))
}
##Verbose______________________________________
if (verbose) {
message("Crit. ", CritName, " = ", sprintf("%.4f", CritValue))
message(paste("\tSubCrit.", SubCritPrint, sprintf("%.4f", SubCritValues), "\n", sep = " "))
}
##Output_________________________________________
OutputsCrit <- list(CritValue = CritValue,
CritName = CritName,
SubCritValues = SubCritValues,
SubCritNames = SubCritNames,
CritBestValue = CritBestValue,
Multiplier = Multiplier,
Ind_notcomputed = Ind_TS_ignore
)
return(OutputsCrit)
}
| /R/ErrorCrit_KGE2.R | no_license | kongdd/airGR | R | false | false | 6,965 | r | ErrorCrit_KGE2 <- function(InputsCrit, OutputsModel, warnings = TRUE, verbose = TRUE) {
##Arguments_check________________________________
if (!inherits(InputsCrit, "InputsCrit")) {
stop("InputsCrit must be of class 'InputsCrit' \n")
return(NULL)
}
if (!inherits(OutputsModel, "OutputsModel")) {
stop("OutputsModel must be of class 'OutputsModel' \n")
return(NULL)
}
##Initialisation_________________________________
CritName <- NA
if (InputsCrit$transfo == "") {
CritName <- "KGE'[Q]"
}
if (InputsCrit$transfo == "sqrt") {
CritName <- "KGE'[sqrt(Q)]"
}
if (InputsCrit$transfo == "log") {
CritName <- "KGE'[log(Q)]"
}
if (InputsCrit$transfo == "inv") {
CritName <- "KGE'[1/Q]"
}
if (InputsCrit$transfo == "sort") {
CritName <- "KGE'[sort(Q)]"
}
CritValue <- NA
CritBestValue <- +1
Multiplier <- -1
### must be equal to -1 or +1 only
##Data_preparation_______________________________
VarObs <- InputsCrit$Qobs
VarObs[!InputsCrit$BoolCrit] <- NA
VarSim <- OutputsModel$Qsim
VarSim[!InputsCrit$BoolCrit] <- NA
##Data_transformation
if (InputsCrit$transfo %in% c("log", "inv") & is.null(InputsCrit$epsilon) & verbose) {
if (any(VarObs %in% 0)) {
warning("zeroes detected in Qobs: the corresponding time-steps will be exclude from the criteria computation if the epsilon agrument of 'CreateInputsCrit' = NULL")
}
if (any(VarSim %in% 0)) {
warning("zeroes detected in Qsim: the corresponding time-steps will be exclude from the criteria computation if the epsilon agrument of 'CreateInputsCrit' = NULL")
}
}
if ("epsilon" %in% names(InputsCrit) & !is.null(InputsCrit$epsilon)) {
VarObs <- VarObs + InputsCrit$epsilon
VarSim <- VarSim + InputsCrit$epsilon
}
if (InputsCrit$transfo == "sqrt") {
VarObs <- sqrt(VarObs)
VarSim <- sqrt(VarSim)
}
if (InputsCrit$transfo == "log") {
VarObs <- log(VarObs)
VarSim <- log(VarSim)
VarSim[VarSim < -1e100] <- NA
}
if (InputsCrit$transfo == "inv") {
VarObs <- 1 / VarObs
VarSim <- 1 / VarSim
VarSim[abs(VarSim) > 1e+100] <- NA
}
if (InputsCrit$transfo == "sort") {
VarSim[is.na(VarObs)] <- NA
VarSim <- sort(VarSim, na.last = TRUE)
VarObs <- sort(VarObs, na.last = TRUE)
InputsCrit$BoolCrit <- sort(InputsCrit$BoolCrit, decreasing = TRUE)
}
##TS_ignore
TS_ignore <- !is.finite(VarObs) | !is.finite(VarSim) | !InputsCrit$BoolCrit
Ind_TS_ignore <- which(TS_ignore)
if (length(Ind_TS_ignore) == 0) {
Ind_TS_ignore <- NULL
}
if (sum(!TS_ignore) == 0) {
OutputsCrit <- list(NA)
names(OutputsCrit) <- c("CritValue")
return(OutputsCrit)
}
if (sum(!TS_ignore) == 1) {
OutputsCrit <- list(NA)
names(OutputsCrit) <- c("CritValue")
return(OutputsCrit)
} ### to avoid a problem in standard deviation computation
if (inherits(OutputsModel, "hourly")) {
WarningTS <- 365
}
if (inherits(OutputsModel, "daily")) {
WarningTS <- 365
}
if (inherits(OutputsModel, "monthly")) {
WarningTS <- 12
}
if (inherits(OutputsModel, "yearly")) {
WarningTS <- 3
}
if (sum(!TS_ignore) < WarningTS & warnings) {
warning("\t criterion computed on less than ", WarningTS, " time-steps")
}
##Other_variables_preparation
meanVarObs <- mean(VarObs[!TS_ignore])
meanVarSim <- mean(VarSim[!TS_ignore])
iCrit <- 0
SubCritPrint <- NULL
SubCritNames <- NULL
SubCritValues <- NULL
##SubErrorCrit_____KGE_rPearson__________________
iCrit <- iCrit + 1
SubCritPrint[iCrit] <- paste(CritName, " cor(sim, obs, \"pearson\") =", sep = "")
SubCritValues[iCrit] <- NA
SubCritNames[iCrit] <- "r"
Numer <- sum((VarObs[!TS_ignore] - meanVarObs) * (VarSim[!TS_ignore] - meanVarSim))
Deno1 <- sqrt(sum((VarObs[!TS_ignore] - meanVarObs)^2))
Deno2 <- sqrt(sum((VarSim[!TS_ignore] - meanVarSim)^2))
if (Numer == 0) {
if (Deno1 == 0 & Deno2 == 0) {
Crit <- 1
} else {
Crit <- 0
}
} else {
Crit <- Numer / (Deno1 * Deno2)
}
if (is.numeric(Crit) & is.finite(Crit)) {
SubCritValues[iCrit] <- Crit
}
##SubErrorCrit_____KGE_gamma______________________
iCrit <- iCrit + 1
SubCritPrint[iCrit] <- paste(CritName, " cv(sim)/cv(obs) =", sep = "")
SubCritValues[iCrit] <- NA
SubCritNames[iCrit] <- "gamma"
if (meanVarSim == 0) {
if (sd(VarSim[!TS_ignore]) == 0) {
CVsim <- 1
} else {
CVsim <- 99999
}
} else {
CVsim <- sd(VarSim[!TS_ignore]) / meanVarSim
}
if (meanVarObs == 0) {
if (sd(VarObs[!TS_ignore]) == 0) {
CVobs <- 1
} else {
CVobs <- 99999
}
} else {
CVobs <- sd(VarObs[!TS_ignore]) / meanVarObs
}
if (CVsim == 0 &
CVobs == 0) {
Crit <- 1
} else {
Crit <- CVsim / CVobs
}
if (is.numeric(Crit) & is.finite(Crit)) {
SubCritValues[iCrit] <- Crit
}
##SubErrorCrit_____KGE_beta______________________
iCrit <- iCrit + 1
SubCritPrint[iCrit] <- paste(CritName, " mean(sim)/mean(obs) =", sep = "")
SubCritValues[iCrit] <- NA
SubCritNames[iCrit] <- "beta"
if (meanVarSim == 0 & meanVarObs == 0) {
Crit <- 1
} else {
Crit <- meanVarSim / meanVarObs
}
if (is.numeric(Crit) & is.finite(Crit)) {
SubCritValues[iCrit] <- Crit
}
##ErrorCrit______________________________________
if (sum(is.na(SubCritValues)) == 0) {
CritValue <- (1 - sqrt((SubCritValues[1] - 1)^2 + (SubCritValues[2] - 1)^2 + (SubCritValues[3] - 1)^2))
}
##Verbose______________________________________
if (verbose) {
message("Crit. ", CritName, " = ", sprintf("%.4f", CritValue))
message(paste("\tSubCrit.", SubCritPrint, sprintf("%.4f", SubCritValues), "\n", sep = " "))
}
##Output_________________________________________
OutputsCrit <- list(CritValue = CritValue,
CritName = CritName,
SubCritValues = SubCritValues,
SubCritNames = SubCritNames,
CritBestValue = CritBestValue,
Multiplier = Multiplier,
Ind_notcomputed = Ind_TS_ignore
)
return(OutputsCrit)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix(), ...) {
x <- matrix(x, ...)
s <- NULL
set <- function(y, ...) {
x <<- y
s <- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)){
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | hrobot3/ProgrammingAssignment2 | R | false | false | 738 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix(), ...) {
x <- matrix(x, ...)
s <- NULL
set <- function(y, ...) {
x <<- y
s <- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getsolve()
if(!is.null(s)){
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
|
# Household Survey data analysis - AGRICULTURE
# Harmonic Biosphere Company Limited
# June, 2020
# Set working directory --------------------------------------------------------
setwd("~")
dir.create("RAP", showWarnings = FALSE)
setwd("~/RAP")
# Load/install required packageslibralies ---------------------------------------
library(tidyverse)
library(scales)
library(ggrepel)
options(scipen = 999)
df<-read.csv("Household_Survey.csv",check.names = TRUE,stringsAsFactors = FALSE)
# Renaming varibales
df<-df %>%
rename(Respondent_Name = Respondent_.Name,
HoH_N = HoH_.N, HM1_N = HM1_.N, HM7_G = HM6_G.1, HM8_G = HM_G)
# BUSINESS ---------------
# c) Percentage of other economic activities [small business activities etc. in each affected village by the project
df_Business <- df %>%
select(Village, Business, Other_sources) %>%
mutate(Business = ifelse(Business == "-", NA, Business),
Other_sources = ifelse(Other_sources == "-", NA,Other_sources)) %>%
filter(!is.na(Business)) %>%
view()
# Business
Busines <- df_Business %>%
group_by(Village, Business) %>%
summarise(Busines_Activ = n()) %>%
ungroup()%>%
pivot_wider(names_from = "Business", values_from = "Busines_Activ")%>%
replace(is.na(.),0)%>%
mutate(Total = rowSums(.[2:3]))%>%
mutate(No_Perc = round(No/Total*100, digits = 2),
Yes_Perc = round(Yes/Total*100, digits = 2))%>%
pivot_longer(cols = 5:6, names_to = "Business", values_to = "Percentages") %>%
mutate(Business = ifelse(Business == "Yes_Perc", "Yes", "No")) %>%
view()
# Kisewe pie chart plot
Busines %>%
filter(Village == "Kisewe") %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = ifelse(Percentages > 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), vjust = -6, size = 5)+
geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Kisewe Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# Makanga pie chart plot
Busines %>%
filter(Village == "Makanga") %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = str_c(Percentages, "%")), position = position_stack(vjust = 0.5), vjust = -2, size = 5)+
#geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Makanga Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# Mdindo pie chart plot
Busines %>%
filter(Village == "Mdindo") %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = str_c(Percentages, "%")), position = position_stack(vjust = 0.5), vjust = -2, size = 5)+
#geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Mdindo Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# Nawenge pie chart plot
Busines %>%
filter(Village == "Nawenge") %>%
filter(Percentages > 0) %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = str_c(Percentages, "%")), position = position_stack(vjust = 0.5), vjust = -8, size = 5)+
#geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Nawenge Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# other Busines
other_Busines <- df_Business %>%
group_by(Village, Other_sources) %>%
summarise(Other_Activ = n()) %>%
ungroup()%>%
pivot_wider(names_from = "Other_sources", values_from = "Other_Activ")%>%
replace(is.na(.),0)%>%
mutate(Total = rowSums(.[2:3]))%>%
mutate(No_Perc = round(No/Total*100, digits = 2),
Yes_Perc = round(Yes/Total*100, digits = 2))%>%
view()
| /Household Data Analysis/R Scripts/Household_Business.R | no_license | MalesaMT/Household-Survey | R | false | false | 5,681 | r | # Household Survey data analysis - AGRICULTURE
# Harmonic Biosphere Company Limited
# June, 2020
# Set working directory --------------------------------------------------------
setwd("~")
dir.create("RAP", showWarnings = FALSE)
setwd("~/RAP")
# Load/install required packageslibralies ---------------------------------------
library(tidyverse)
library(scales)
library(ggrepel)
options(scipen = 999)
df<-read.csv("Household_Survey.csv",check.names = TRUE,stringsAsFactors = FALSE)
# Renaming varibales
df<-df %>%
rename(Respondent_Name = Respondent_.Name,
HoH_N = HoH_.N, HM1_N = HM1_.N, HM7_G = HM6_G.1, HM8_G = HM_G)
# BUSINESS ---------------
# c) Percentage of other economic activities [small business activities etc. in each affected village by the project
df_Business <- df %>%
select(Village, Business, Other_sources) %>%
mutate(Business = ifelse(Business == "-", NA, Business),
Other_sources = ifelse(Other_sources == "-", NA,Other_sources)) %>%
filter(!is.na(Business)) %>%
view()
# Business
Busines <- df_Business %>%
group_by(Village, Business) %>%
summarise(Busines_Activ = n()) %>%
ungroup()%>%
pivot_wider(names_from = "Business", values_from = "Busines_Activ")%>%
replace(is.na(.),0)%>%
mutate(Total = rowSums(.[2:3]))%>%
mutate(No_Perc = round(No/Total*100, digits = 2),
Yes_Perc = round(Yes/Total*100, digits = 2))%>%
pivot_longer(cols = 5:6, names_to = "Business", values_to = "Percentages") %>%
mutate(Business = ifelse(Business == "Yes_Perc", "Yes", "No")) %>%
view()
# Kisewe pie chart plot
Busines %>%
filter(Village == "Kisewe") %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = ifelse(Percentages > 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), vjust = -6, size = 5)+
geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Kisewe Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# Makanga pie chart plot
Busines %>%
filter(Village == "Makanga") %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = str_c(Percentages, "%")), position = position_stack(vjust = 0.5), vjust = -2, size = 5)+
#geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Makanga Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# Mdindo pie chart plot
Busines %>%
filter(Village == "Mdindo") %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = str_c(Percentages, "%")), position = position_stack(vjust = 0.5), vjust = -2, size = 5)+
#geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Mdindo Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# Nawenge pie chart plot
Busines %>%
filter(Village == "Nawenge") %>%
filter(Percentages > 0) %>%
ggplot(aes(x = "", y = Percentages, fill = Business))+
geom_bar(stat = "identity")+
geom_text(aes(label = str_c(Percentages, "%")), position = position_stack(vjust = 0.5), vjust = -8, size = 5)+
#geom_text(aes(label = ifelse(Percentages < 90, str_c(Percentages, "%"), "")), position = position_stack(vjust = 0.5), angle = 90, hjust = -0.5, size = 5)+
coord_polar("y", start = 0)+
labs(title = "Household Involved in Business - Nawenge Village", caption = "HBCL RAP 2020")+
theme_void()+
theme(plot.title = element_text(hjust = 0.5, size = 15, face = "bold", vjust = -8))+
theme(plot.caption = element_text(hjust = 0.5, size = 5, vjust = 25))+
theme(legend.title = element_text(face = "bold", size = 10))
# other Busines
other_Busines <- df_Business %>%
group_by(Village, Other_sources) %>%
summarise(Other_Activ = n()) %>%
ungroup()%>%
pivot_wider(names_from = "Other_sources", values_from = "Other_Activ")%>%
replace(is.na(.),0)%>%
mutate(Total = rowSums(.[2:3]))%>%
mutate(No_Perc = round(No/Total*100, digits = 2),
Yes_Perc = round(Yes/Total*100, digits = 2))%>%
view()
|
library(readr)
hpcDF <- read_delim("Ryan/Education/Coursera_Data_Science_Specialization/04_ExploratoryAnalysis/2018-August/household_power_consumption.txt", ";", escape_double = FALSE, trim_ws = TRUE)
as.data.frame(hpcDF)
hpcDF$Date <- as.Date(hpcDF$Date, "%d/%m/%Y")
hpcDF <- subset(hpcDF, Date == '2007-02-01' | Date == '2007-02-02')
hpcDF$Time <- format(strptime(hpcDF$Time, "%H:%M:%S"),"%H:%M:%S")
hpcDF$DateTime <- as.POSIXct(paste(hpcDF$Date, hpcDF$Time), format="%Y-%m-%d %H:%M:%S")
plot(x=hpcDF$DateTime,
y=hpcDF$Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(hpcDF$DateTime, hpcDF$Sub_metering_2, col="red")
lines(hpcDF$DateTime, hpcDF$Sub_metering_3, col="blue")
legend("topright",
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red", "blue"),
lty=1, cex = 0.5)
dev.copy(png, file = "plot3.png", width=480, height=480)
dev.off() | /plot3.R | no_license | thatsawinner/ExData_Plotting1 | R | false | false | 957 | r | library(readr)
hpcDF <- read_delim("Ryan/Education/Coursera_Data_Science_Specialization/04_ExploratoryAnalysis/2018-August/household_power_consumption.txt", ";", escape_double = FALSE, trim_ws = TRUE)
as.data.frame(hpcDF)
hpcDF$Date <- as.Date(hpcDF$Date, "%d/%m/%Y")
hpcDF <- subset(hpcDF, Date == '2007-02-01' | Date == '2007-02-02')
hpcDF$Time <- format(strptime(hpcDF$Time, "%H:%M:%S"),"%H:%M:%S")
hpcDF$DateTime <- as.POSIXct(paste(hpcDF$Date, hpcDF$Time), format="%Y-%m-%d %H:%M:%S")
plot(x=hpcDF$DateTime,
y=hpcDF$Sub_metering_1,
type="l",
col="black",
xlab="",
ylab="Energy sub metering")
lines(hpcDF$DateTime, hpcDF$Sub_metering_2, col="red")
lines(hpcDF$DateTime, hpcDF$Sub_metering_3, col="blue")
legend("topright",
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black", "red", "blue"),
lty=1, cex = 0.5)
dev.copy(png, file = "plot3.png", width=480, height=480)
dev.off() |
data<-read.table("Course project 1.txt",sep=";",header=TRUE)
> data$Date<-strptime(data$Date,"%d/%m/%Y")
> par(mar=c(4,4,2,2))
> par(mfrow=c(2,2))
> plot(data$Global_active_power,type="l",xlab="",ylab="Global Active Power")
> names(data)
[1] "Date" "Time" "Global_active_power"
[4] "Global_reactive_power" "Voltage" "Global_intensity"
[7] "Sub_metering_1" "Sub_metering_2" "Sub_metering_3"
> plot(data$Voltage,type="l",xlab="datetime",ylab="Voltage")
> plot(data$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
> lines(data$Sub_metering_2,col="red")
> lines(data$Sub_metering_3,col="blue")
> legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch=-1,lty=1,col=c("black","red","blue"))
> plot(data$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
> dev.copy(png,file="plot4.png",height=480,width=480)
png
3
> dev.off()
windows
2
>
| /plot 4.R | no_license | yiyezhiqiulily/ExData_Plotting1 | R | false | false | 987 | r | data<-read.table("Course project 1.txt",sep=";",header=TRUE)
> data$Date<-strptime(data$Date,"%d/%m/%Y")
> par(mar=c(4,4,2,2))
> par(mfrow=c(2,2))
> plot(data$Global_active_power,type="l",xlab="",ylab="Global Active Power")
> names(data)
[1] "Date" "Time" "Global_active_power"
[4] "Global_reactive_power" "Voltage" "Global_intensity"
[7] "Sub_metering_1" "Sub_metering_2" "Sub_metering_3"
> plot(data$Voltage,type="l",xlab="datetime",ylab="Voltage")
> plot(data$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering")
> lines(data$Sub_metering_2,col="red")
> lines(data$Sub_metering_3,col="blue")
> legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),pch=-1,lty=1,col=c("black","red","blue"))
> plot(data$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
> dev.copy(png,file="plot4.png",height=480,width=480)
png
3
> dev.off()
windows
2
>
|
model_evapotranspiration <- function (isWindVpDefined = 1,
evapoTranspirationPriestlyTaylor = 449.367,
evapoTranspirationPenman = 830.958){
#'- Name: EvapoTranspiration -Version: 1.0, -Time step: 1
#'- Description:
#' * Title: Evapotranspiration Model
#' * Author: Pierre Martre
#' * Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
#' Evapotranspiration and canopy and soil temperature calculations
#' * Institution: INRA Montpellier
#' * Abstract: According to the availability of wind and/or vapor pressure daily data, the
#' SiriusQuality2 model calculates the evapotranspiration rate using the Penman (if wind
#' and vapor pressure data are available) (Penman 1948) or the Priestly-Taylor
#' (Priestley and Taylor 1972) method
#'- inputs:
#' * name: isWindVpDefined
#' ** description : if wind and vapour pressure are defined
#' ** parametercategory : constant
#' ** datatype : INT
#' ** default : 1
#' ** min : 0
#' ** max : 1
#' ** unit :
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : parameter
#' * name: evapoTranspirationPriestlyTaylor
#' ** description : evapoTranspiration of Priestly Taylor
#' ** variablecategory : rate
#' ** default : 449.367
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 10000
#' ** unit : mm
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#' * name: evapoTranspirationPenman
#' ** description : evapoTranspiration of Penman
#' ** datatype : DOUBLE
#' ** variablecategory : rate
#' ** default : 830.958
#' ** min : 0
#' ** max : 10000
#' ** unit : mm
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#'- outputs:
#' * name: evapoTranspiration
#' ** description : evapoTranspiration
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 10000
#' ** unit : mm
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
if (isWindVpDefined == 1)
{
evapoTranspiration <- evapoTranspirationPenman
}
else
{
evapoTranspiration <- evapoTranspirationPriestlyTaylor
}
return (list('evapoTranspiration' = evapoTranspiration))
} | /src/pycrop2ml_ui/packages/SQ_Energy_Balance/src/r/Evapotranspiration.r | permissive | AgriculturalModelExchangeInitiative/Pycrop2ml_ui | R | false | false | 3,412 | r | model_evapotranspiration <- function (isWindVpDefined = 1,
evapoTranspirationPriestlyTaylor = 449.367,
evapoTranspirationPenman = 830.958){
#'- Name: EvapoTranspiration -Version: 1.0, -Time step: 1
#'- Description:
#' * Title: Evapotranspiration Model
#' * Author: Pierre Martre
#' * Reference: Modelling energy balance in the wheat crop model SiriusQuality2:
#' Evapotranspiration and canopy and soil temperature calculations
#' * Institution: INRA Montpellier
#' * Abstract: According to the availability of wind and/or vapor pressure daily data, the
#' SiriusQuality2 model calculates the evapotranspiration rate using the Penman (if wind
#' and vapor pressure data are available) (Penman 1948) or the Priestly-Taylor
#' (Priestley and Taylor 1972) method
#'- inputs:
#' * name: isWindVpDefined
#' ** description : if wind and vapour pressure are defined
#' ** parametercategory : constant
#' ** datatype : INT
#' ** default : 1
#' ** min : 0
#' ** max : 1
#' ** unit :
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : parameter
#' * name: evapoTranspirationPriestlyTaylor
#' ** description : evapoTranspiration of Priestly Taylor
#' ** variablecategory : rate
#' ** default : 449.367
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 10000
#' ** unit : mm
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#' * name: evapoTranspirationPenman
#' ** description : evapoTranspiration of Penman
#' ** datatype : DOUBLE
#' ** variablecategory : rate
#' ** default : 830.958
#' ** min : 0
#' ** max : 10000
#' ** unit : mm
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
#' ** inputtype : variable
#'- outputs:
#' * name: evapoTranspiration
#' ** description : evapoTranspiration
#' ** variablecategory : rate
#' ** datatype : DOUBLE
#' ** min : 0
#' ** max : 10000
#' ** unit : mm
#' ** uri : http://www1.clermont.inra.fr/siriusquality/?page_id=547
if (isWindVpDefined == 1)
{
evapoTranspiration <- evapoTranspirationPenman
}
else
{
evapoTranspiration <- evapoTranspirationPriestlyTaylor
}
return (list('evapoTranspiration' = evapoTranspiration))
} |
library(lmomco)
### Name: par2qua2
### Title: Equivalent Quantile Function of Two Distributions
### Aliases: par2qua2
### Keywords: quantile mixture function distribution (mixture) mixed
### distribution
### ** Examples
lmr <- lmoms(rnorm(20)); left <- parnor(lmr); right <- pargev(lmr)
mixed.median <- par2qua2(0.5, left, right)
# Bigger example--using Kappa fit to whole sample for the right tail and
# Normal fit to whole sample for the left tail
D <- c(123, 523, 345, 356, 2134, 345, 2365, 235, 12, 235, 61, 432, 843)
lmr <- lmoms(D); KAP <- parkap(lmr); NOR <- parnor(lmr); PP <- pp(D)
plot( PP, sort(D), ylim=c(-500, 2300))
lines(PP, par2qua( PP, KAP), col=2)
lines(PP, par2qua( PP, NOR), col=3)
lines(PP, par2qua2(PP, NOR, KAP), col=4)
| /data/genthat_extracted_code/lmomco/examples/par2qua2.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 765 | r | library(lmomco)
### Name: par2qua2
### Title: Equivalent Quantile Function of Two Distributions
### Aliases: par2qua2
### Keywords: quantile mixture function distribution (mixture) mixed
### distribution
### ** Examples
lmr <- lmoms(rnorm(20)); left <- parnor(lmr); right <- pargev(lmr)
mixed.median <- par2qua2(0.5, left, right)
# Bigger example--using Kappa fit to whole sample for the right tail and
# Normal fit to whole sample for the left tail
D <- c(123, 523, 345, 356, 2134, 345, 2365, 235, 12, 235, 61, 432, 843)
lmr <- lmoms(D); KAP <- parkap(lmr); NOR <- parnor(lmr); PP <- pp(D)
plot( PP, sort(D), ylim=c(-500, 2300))
lines(PP, par2qua( PP, KAP), col=2)
lines(PP, par2qua( PP, NOR), col=3)
lines(PP, par2qua2(PP, NOR, KAP), col=4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipelines.R
\name{pipeline_version_objects_list}
\alias{pipeline_version_objects_list}
\title{List pipeline objects}
\usage{
pipeline_version_objects_list(pipeline.name, version,
preload_content = TRUE, ...)
}
\arguments{
\item{pipeline.name}{character}
\item{version}{character}
\item{preload_content}{(optional) Whether the API response should be preloaded. When TRUE the JSON response string is parsed to an R object. When FALSE, unprocessed API response object is returned. - Default = TRUE}
\item{...}{UBIOPS_PROJECT (system environment variable) UbiOps project name
UBIOPS_API_TOKEN (system environment variable) Token to connect to UbiOps API
UBIOPS_API_URL (optional - system environment variable) UbiOps API url - Default = "https://api.ubiops.com/v2.1"
UBIOPS_TIMEOUT (optional - system environment variable) Maximum request timeout to connect to UbiOps API - Default = NA
UBIOPS_DEFAULT_HEADERS (optional - system environment variable) Default headers to pass to UbiOps API, formatted like "header1:value1,header2:value2" - Default = ""}
}
\value{
Response from the API
A list of details of the pipeline objects in the pipeline version
- `id`: Unique identifier for the pipeline object (UUID)
- `name`: Name of the pipeline object
- `reference_name`: Name of the object it references
- `version`: Version name of reference object
}
\description{
List all pipeline objects in a pipeline version
}
\examples{
\dontrun{
# Use environment variables
Sys.setenv("UBIOPS_PROJECT" = "YOUR PROJECT NAME")
Sys.setenv("UBIOPS_API_TOKEN" = "YOUR API TOKEN")
result <- ubiops::pipeline_version_objects_list(
pipeline.name, version
)
# Or provide directly
result <- ubiops::pipeline_version_objects_list(
pipeline.name, version,
UBIOPS_PROJECT = "YOUR PROJECT NAME", UBIOPS_API_TOKEN = "YOUR API TOKEN"
)
print(result)
# The default API url is https://api.ubiops.com/v2.1
# Want to use a different API url?
# Provide `UBIOPS_API_URL`, either directly or as environment variable.
}
}
| /man/pipeline_version_objects_list.Rd | permissive | stjordanis/client-library-r | R | false | true | 2,084 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipelines.R
\name{pipeline_version_objects_list}
\alias{pipeline_version_objects_list}
\title{List pipeline objects}
\usage{
pipeline_version_objects_list(pipeline.name, version,
preload_content = TRUE, ...)
}
\arguments{
\item{pipeline.name}{character}
\item{version}{character}
\item{preload_content}{(optional) Whether the API response should be preloaded. When TRUE the JSON response string is parsed to an R object. When FALSE, unprocessed API response object is returned. - Default = TRUE}
\item{...}{UBIOPS_PROJECT (system environment variable) UbiOps project name
UBIOPS_API_TOKEN (system environment variable) Token to connect to UbiOps API
UBIOPS_API_URL (optional - system environment variable) UbiOps API url - Default = "https://api.ubiops.com/v2.1"
UBIOPS_TIMEOUT (optional - system environment variable) Maximum request timeout to connect to UbiOps API - Default = NA
UBIOPS_DEFAULT_HEADERS (optional - system environment variable) Default headers to pass to UbiOps API, formatted like "header1:value1,header2:value2" - Default = ""}
}
\value{
Response from the API
A list of details of the pipeline objects in the pipeline version
- `id`: Unique identifier for the pipeline object (UUID)
- `name`: Name of the pipeline object
- `reference_name`: Name of the object it references
- `version`: Version name of reference object
}
\description{
List all pipeline objects in a pipeline version
}
\examples{
\dontrun{
# Use environment variables
Sys.setenv("UBIOPS_PROJECT" = "YOUR PROJECT NAME")
Sys.setenv("UBIOPS_API_TOKEN" = "YOUR API TOKEN")
result <- ubiops::pipeline_version_objects_list(
pipeline.name, version
)
# Or provide directly
result <- ubiops::pipeline_version_objects_list(
pipeline.name, version,
UBIOPS_PROJECT = "YOUR PROJECT NAME", UBIOPS_API_TOKEN = "YOUR API TOKEN"
)
print(result)
# The default API url is https://api.ubiops.com/v2.1
# Want to use a different API url?
# Provide `UBIOPS_API_URL`, either directly or as environment variable.
}
}
|
# Challenge 13 - "All your Base8^2"
# ????
cody <- "Q29uZ3JhdHVsYXRpb25zISBZb3UgaGF2ZSBzb2x2ZWQgQ2hhbGxlbmdlIDEzLiBZb3UgYXJlIG5vdyByZXdhcmRlZCAzMiBwb2ludHMu"
# Your Code Here:
s <- base64enc::base64decode(cody)%>%
str_flatten()
#Janky code I found online that converts hexadecimal into ascii
h <- sapply(seq(1, nchar(s), by=2), function(x) substr(s, x, x+1))
rawToChar(as.raw(strtoi(h, 16L)))
# Answer:
#"Congratulations! You have solved Challenge 13. You are now rewarded 32 points." | /challenges/challenge_13.R | no_license | codemasta14/dss_challenge | R | false | false | 492 | r | # Challenge 13 - "All your Base8^2"
# ????
cody <- "Q29uZ3JhdHVsYXRpb25zISBZb3UgaGF2ZSBzb2x2ZWQgQ2hhbGxlbmdlIDEzLiBZb3UgYXJlIG5vdyByZXdhcmRlZCAzMiBwb2ludHMu"
# Your Code Here:
s <- base64enc::base64decode(cody)%>%
str_flatten()
#Janky code I found online that converts hexadecimal into ascii
h <- sapply(seq(1, nchar(s), by=2), function(x) substr(s, x, x+1))
rawToChar(as.raw(strtoi(h, 16L)))
# Answer:
#"Congratulations! You have solved Challenge 13. You are now rewarded 32 points." |
library(stringr)
library(reticulate)
Sys.setenv(RETICULATE_PYTHON = "/home/zy/tools/anaconda3/bin/python3")
source('/home/zy/my_git/bioinformatics/scRef/try/scRef.R')
num.cpu <- 10
# reference (science advance)
exp_ref_mat=read.table('/home/yjingjing/project/hfz2020adjust/pla_adjust/4th_combNA_SA_scRef/SA.villi.csv',
header=T,row.names=1,sep='\t')
exp_ref_mat.origin <- exp_ref_mat
# HCA cell name
names(exp_ref_mat.origin) <- c("Villous trophoblast cell", "Syncytiotrophoblast cell",
"Extravillous trophoblast", "Hofbauer cell",
"Erythroid cell", "Fibroblast cell", "Fibroblast cell",
"Fibroblast cell", "Vascular endothelial cell")
find.markers <- function(exp_ref_mat) {
###### regard MCA as reference of DEG
file.MCA <- '/home/disk/scRef/HumanAtlas_SingleCell_Han2020/combinedHCA/HCA_combined_by_cell.txt'
df.MCA <- read.table(file.MCA, header=T, row.names=1, sep='\t', check.name=F)
df.MCA[is.na(df.MCA)] <- 0
df.MCA <- df.MCA[rowSums(df.MCA) != 0,]
library(DESeq2)
coldata.MCA <- DataFrame(row.names = names(df.MCA))
obj.DESeq.MCA <- DESeqDataSetFromMatrix(countData = df.MCA, colData = coldata.MCA,
design = ~ 1)
fpm.MCA <- fpm(obj.DESeq.MCA, robust = T)
# overlap genes
fpm.MCA=fpm.MCA[order(rownames(fpm.MCA)),]
exp_ref_mat=exp_ref_mat[order(rownames(exp_ref_mat)),]
gene_MCA=rownames(fpm.MCA)
gene_ref=rownames(exp_ref_mat)
gene_over= gene_MCA[which(gene_MCA %in% gene_ref)]
fpm.MCA=fpm.MCA[which(gene_MCA %in% gene_over),]
exp_ref_mat=exp_ref_mat[which(gene_ref %in% gene_over),]
print('Number of overlapped genes:')
print(nrow(exp_ref_mat))
cell.MCA <- dimnames(fpm.MCA)[[2]]
cell.ref <- names(exp_ref_mat)
cell.overlap <- intersect(cell.MCA, cell.ref)
# combat
library(sva)
mtx.in <- cbind(fpm.MCA, exp_ref_mat)
names(mtx.in) <- c(paste0('MCA.', cell.MCA), paste0('Ref.', cell.ref))
batch <- c(rep(1, dim(fpm.MCA)[2]), rep(2, dim(exp_ref_mat)[2]))
cov.cell <- c(cell.MCA, names(exp_ref_mat))
mod <- model.matrix(~ as.factor(cov.cell))
# mod <- model.matrix(~ 1)
mtx.combat <- ComBat(mtx.in, batch, mod, par.prior = T)
mtx.MCA <- mtx.combat[,paste0('MCA.', cell.MCA)]
mtx.ref <- mtx.combat[,paste0('Ref.', cell.ref)]
dimnames(mtx.ref)[[2]] <- cell.ref
cells <- cell.ref
cutoff.fc <- 1
cutoff.pval <- 0.05
list.cell.genes <- list()
for (cell in cells) {
vec.cell <- mtx.ref[, cell]
exp.top10 <- quantile(vec.cell, 0.9)
genes.high <- gene_over[vec.cell > exp.top10]
mtx.in <- cbind(mtx.MCA, vec.cell)
bool.cell <- as.factor(c(rep('1', dim(mtx.MCA)[2]), '2'))
res.limma <- .getDEgeneF(mtx.in, bool.cell)
# df.diff <- res.limma[
# ((res.limma$logFC > cutoff.fc) & (res.limma$adj.P.Val < cutoff.pval)),]
genes.diff <- row.names(res.limma)[1:100]
list.cell.genes[[cell]] <- genes.diff
}
out <- list()
out[['list.cell.genes']] <- list.cell.genes
out[['exp_ref_mat']] <- mtx.ref
return(out)
}
out.markers <- find.markers(exp_ref_mat.origin)
list.cell.genes <- out.markers[['list.cell.genes']]
genes.ref <- dimnames(out.markers[['exp_ref_mat']])[[1]]
######################### unlabeled data
file.data <- '/home/yjingjing/project/hfz2020adjust/pla_adjust/4th_combNA_SA_scRef/6701.deci_pla'
file.label.unlabeled <- '/home/yjingjing/project/hfz2020adjust/pla_adjust/4th_combNA_SA_scRef/E-MTAB-6701.processed.2'
file.data <- file.data
data.unlabeled <- read.delim(file.data, row.names=1)
colnames <- names(data.unlabeled)
list.colnames <- strsplit(colnames, '.', fixed = T)
colnames <- c()
for (i in 1:length(list.colnames)) {
sub_colname <- list.colnames[[i]]
colnames <- c(colnames, paste0(strsplit(sub_colname[1], '_')[[1]][1:2], collapse = '.'))
}
names(data.unlabeled) <- colnames
# read label file
file.label.unlabeled <- file.label.unlabeled
label.unlabeled <- read.delim(file.label.unlabeled, row.names=1)
row.names(label.unlabeled) <- str_replace_all(row.names(label.unlabeled), '_', '.')
# filter data
use.cols <- row.names(label.unlabeled)[label.unlabeled$location == 'Placenta']
data.filter <- data.unlabeled[,use.cols]
label.filter <- data.frame(label.unlabeled[use.cols, 'annotation'], row.names = use.cols)
# label.filter <- data.frame(label.unlabeled[use.cols,], row.names = use.cols)
# get overlap genes
exp_sc_mat = data.filter
exp_ref_mat <- exp_ref_mat.origin[genes.ref,]
exp_sc_mat=exp_sc_mat[order(rownames(exp_sc_mat)),]
exp_ref_mat=exp_ref_mat[order(rownames(exp_ref_mat)),]
gene_sc=rownames(exp_sc_mat)
gene_ref=rownames(exp_ref_mat)
gene_over= gene_sc[which(gene_sc %in% gene_ref)]
exp_sc_mat=exp_sc_mat[which(gene_sc %in% gene_over),]
exp_ref_mat=exp_ref_mat[which(gene_ref %in% gene_over),]
print('Number of overlapped genes:')
print(nrow(exp_sc_mat))
######################### run scRef
result.scref <- SCREF(exp_sc_mat, exp_ref_mat, CPU = num.cpu)
tag <- result.scref$tag2
# confirm label
exp_sc_mat <- exp_sc_mat[gene_over,]
ori.tag = label.filter[names(exp_sc_mat), 1]
scRef.tag = tag[,2]
method.test <- 'wilcox'
# method.test <- 't.test'
# method.test <- 'oneway_test'
meta.tag <- comfirm.label(exp_sc_mat, ori.tag, scRef.tag, method.test)
# evaluation
# import python package: sklearn.metrics
use_condaenv("/home/zy/tools/anaconda3")
# py_config()
metrics <- import('sklearn.metrics')
# uniform tags
scRef.tag[scRef.tag == "Astrocyte"] <- "astrocytes_ependymal"
scRef.tag[scRef.tag == "Newly Formed Oligodendrocyte"] <- "oligodendrocytes"
scRef.tag[scRef.tag == "Myelinating oligodendrocyte"] <- "oligodendrocytes"
scRef.tag[scRef.tag == "Endothelial cell"] <- "endothelial-mural"
scRef.tag[scRef.tag == "Neuron"] <- "neurons"
scRef.tag[scRef.tag == "Microglia"] <- "microglia"
cell.delete <- "astrocytes_ependymal"
meta.tag$scRef.tag <- scRef.tag
vec.cutoff <- c(seq(0.005, 0.1, 0.005), seq(0.15, 0.95, 0.05))
df.metrics <- data.frame(cutoff = vec.cutoff, weighted.f1 = rep(0, length(vec.cutoff)))
for (i in 1:length(vec.cutoff)) {
cutoff <- vec.cutoff[i]
true.tag <- meta.tag$ori.tag
true.tag[true.tag == cell.delete] <- 'unknown'
our.tag <- meta.tag$scRef.tag
our.tag[meta.tag$qvalue > cutoff] <- 'unknown'
sub.weighted.f1 <- metrics$f1_score(true.tag, our.tag, average = 'weighted')
df.metrics[i, 'weighted.f1'] <- sub.weighted.f1
}
# best cutoff
best.cutoff <- df.metrics$cutoff[df.metrics$weighted.f1 == max(df.metrics$weighted.f1)][1]
best.cutoff <- 0.01
new.tag <- meta.tag$scRef.tag
new.tag[meta.tag$qvalue > best.cutoff] <- 'unknown'
meta.tag$new.tag <- new.tag
print(best.cutoff)
# no scRef plus
true.tag <- meta.tag$ori.tag
true.tag[true.tag == cell.delete] <- 'unknown'
meta.tag$ori.tag <- true.tag
metrics$f1_score(true.tag, scRef.tag, average = 'weighted')
metrics$f1_score(true.tag, new.tag, average = 'weighted')
###
fb=meta.tag[meta.tag$ori.tag %in% c('fFB1', 'fFB2'),]
### plot
library(Seurat)
# data preparing
seurat.unlabeled <- CreateSeuratObject(counts = data.filter, project = "pbmc3k", min.cells = 3, min.features = 200)
seurat.unlabeled <- NormalizeData(seurat.unlabeled, normalization.method = "LogNormalize", scale.factor = 10000)
seurat.unlabeled <- FindVariableFeatures(seurat.unlabeled, selection.method = "vst", nfeatures = 2000)
all.genes <- rownames(seurat.unlabeled)
seurat.unlabeled <- ScaleData(seurat.unlabeled, features = all.genes)
# add label
seurat.unlabeled@meta.data$original.label <- ori.tag
seurat.unlabeled@meta.data$scRef.tag <- scRef.tag
seurat.unlabeled@meta.data$new.tag <- new.tag
# PCA
seurat.unlabeled <- RunPCA(seurat.unlabeled, features = VariableFeatures(object = seurat.unlabeled), verbose = F)
# UMAP
seurat.unlabeled <- RunUMAP(seurat.unlabeled, dims = 1:15, n.neighbors = 30)
# figure1: ture label
DimPlot(seurat.unlabeled, reduction = "umap", label = T, group.by = 'original.label')
# figure2: scRef label
DimPlot(seurat.unlabeled, reduction = "umap", label = T, group.by = 'scRef.tag')
# figure3: scRef plus label
DimPlot(seurat.unlabeled, reduction = "umap", label = T, group.by = 'new.tag')
| /scRef/try/redhouse.R | no_license | Drizzle-Zhang/bioinformatics | R | false | false | 8,298 | r | library(stringr)
library(reticulate)
Sys.setenv(RETICULATE_PYTHON = "/home/zy/tools/anaconda3/bin/python3")
source('/home/zy/my_git/bioinformatics/scRef/try/scRef.R')
num.cpu <- 10
# reference (science advance)
exp_ref_mat=read.table('/home/yjingjing/project/hfz2020adjust/pla_adjust/4th_combNA_SA_scRef/SA.villi.csv',
header=T,row.names=1,sep='\t')
exp_ref_mat.origin <- exp_ref_mat
# HCA cell name
names(exp_ref_mat.origin) <- c("Villous trophoblast cell", "Syncytiotrophoblast cell",
"Extravillous trophoblast", "Hofbauer cell",
"Erythroid cell", "Fibroblast cell", "Fibroblast cell",
"Fibroblast cell", "Vascular endothelial cell")
find.markers <- function(exp_ref_mat) {
###### regard MCA as reference of DEG
file.MCA <- '/home/disk/scRef/HumanAtlas_SingleCell_Han2020/combinedHCA/HCA_combined_by_cell.txt'
df.MCA <- read.table(file.MCA, header=T, row.names=1, sep='\t', check.name=F)
df.MCA[is.na(df.MCA)] <- 0
df.MCA <- df.MCA[rowSums(df.MCA) != 0,]
library(DESeq2)
coldata.MCA <- DataFrame(row.names = names(df.MCA))
obj.DESeq.MCA <- DESeqDataSetFromMatrix(countData = df.MCA, colData = coldata.MCA,
design = ~ 1)
fpm.MCA <- fpm(obj.DESeq.MCA, robust = T)
# overlap genes
fpm.MCA=fpm.MCA[order(rownames(fpm.MCA)),]
exp_ref_mat=exp_ref_mat[order(rownames(exp_ref_mat)),]
gene_MCA=rownames(fpm.MCA)
gene_ref=rownames(exp_ref_mat)
gene_over= gene_MCA[which(gene_MCA %in% gene_ref)]
fpm.MCA=fpm.MCA[which(gene_MCA %in% gene_over),]
exp_ref_mat=exp_ref_mat[which(gene_ref %in% gene_over),]
print('Number of overlapped genes:')
print(nrow(exp_ref_mat))
cell.MCA <- dimnames(fpm.MCA)[[2]]
cell.ref <- names(exp_ref_mat)
cell.overlap <- intersect(cell.MCA, cell.ref)
# combat
library(sva)
mtx.in <- cbind(fpm.MCA, exp_ref_mat)
names(mtx.in) <- c(paste0('MCA.', cell.MCA), paste0('Ref.', cell.ref))
batch <- c(rep(1, dim(fpm.MCA)[2]), rep(2, dim(exp_ref_mat)[2]))
cov.cell <- c(cell.MCA, names(exp_ref_mat))
mod <- model.matrix(~ as.factor(cov.cell))
# mod <- model.matrix(~ 1)
mtx.combat <- ComBat(mtx.in, batch, mod, par.prior = T)
mtx.MCA <- mtx.combat[,paste0('MCA.', cell.MCA)]
mtx.ref <- mtx.combat[,paste0('Ref.', cell.ref)]
dimnames(mtx.ref)[[2]] <- cell.ref
cells <- cell.ref
cutoff.fc <- 1
cutoff.pval <- 0.05
list.cell.genes <- list()
for (cell in cells) {
vec.cell <- mtx.ref[, cell]
exp.top10 <- quantile(vec.cell, 0.9)
genes.high <- gene_over[vec.cell > exp.top10]
mtx.in <- cbind(mtx.MCA, vec.cell)
bool.cell <- as.factor(c(rep('1', dim(mtx.MCA)[2]), '2'))
res.limma <- .getDEgeneF(mtx.in, bool.cell)
# df.diff <- res.limma[
# ((res.limma$logFC > cutoff.fc) & (res.limma$adj.P.Val < cutoff.pval)),]
genes.diff <- row.names(res.limma)[1:100]
list.cell.genes[[cell]] <- genes.diff
}
out <- list()
out[['list.cell.genes']] <- list.cell.genes
out[['exp_ref_mat']] <- mtx.ref
return(out)
}
out.markers <- find.markers(exp_ref_mat.origin)
list.cell.genes <- out.markers[['list.cell.genes']]
genes.ref <- dimnames(out.markers[['exp_ref_mat']])[[1]]
######################### unlabeled data
file.data <- '/home/yjingjing/project/hfz2020adjust/pla_adjust/4th_combNA_SA_scRef/6701.deci_pla'
file.label.unlabeled <- '/home/yjingjing/project/hfz2020adjust/pla_adjust/4th_combNA_SA_scRef/E-MTAB-6701.processed.2'
file.data <- file.data
data.unlabeled <- read.delim(file.data, row.names=1)
colnames <- names(data.unlabeled)
list.colnames <- strsplit(colnames, '.', fixed = T)
colnames <- c()
for (i in 1:length(list.colnames)) {
sub_colname <- list.colnames[[i]]
colnames <- c(colnames, paste0(strsplit(sub_colname[1], '_')[[1]][1:2], collapse = '.'))
}
names(data.unlabeled) <- colnames
# read label file
file.label.unlabeled <- file.label.unlabeled
label.unlabeled <- read.delim(file.label.unlabeled, row.names=1)
row.names(label.unlabeled) <- str_replace_all(row.names(label.unlabeled), '_', '.')
# filter data
use.cols <- row.names(label.unlabeled)[label.unlabeled$location == 'Placenta']
data.filter <- data.unlabeled[,use.cols]
label.filter <- data.frame(label.unlabeled[use.cols, 'annotation'], row.names = use.cols)
# label.filter <- data.frame(label.unlabeled[use.cols,], row.names = use.cols)
# get overlap genes
exp_sc_mat = data.filter
exp_ref_mat <- exp_ref_mat.origin[genes.ref,]
exp_sc_mat=exp_sc_mat[order(rownames(exp_sc_mat)),]
exp_ref_mat=exp_ref_mat[order(rownames(exp_ref_mat)),]
gene_sc=rownames(exp_sc_mat)
gene_ref=rownames(exp_ref_mat)
gene_over= gene_sc[which(gene_sc %in% gene_ref)]
exp_sc_mat=exp_sc_mat[which(gene_sc %in% gene_over),]
exp_ref_mat=exp_ref_mat[which(gene_ref %in% gene_over),]
print('Number of overlapped genes:')
print(nrow(exp_sc_mat))
######################### run scRef
result.scref <- SCREF(exp_sc_mat, exp_ref_mat, CPU = num.cpu)
tag <- result.scref$tag2
# confirm label
exp_sc_mat <- exp_sc_mat[gene_over,]
ori.tag = label.filter[names(exp_sc_mat), 1]
scRef.tag = tag[,2]
method.test <- 'wilcox'
# method.test <- 't.test'
# method.test <- 'oneway_test'
meta.tag <- comfirm.label(exp_sc_mat, ori.tag, scRef.tag, method.test)
# evaluation
# import python package: sklearn.metrics
use_condaenv("/home/zy/tools/anaconda3")
# py_config()
metrics <- import('sklearn.metrics')
# uniform tags
scRef.tag[scRef.tag == "Astrocyte"] <- "astrocytes_ependymal"
scRef.tag[scRef.tag == "Newly Formed Oligodendrocyte"] <- "oligodendrocytes"
scRef.tag[scRef.tag == "Myelinating oligodendrocyte"] <- "oligodendrocytes"
scRef.tag[scRef.tag == "Endothelial cell"] <- "endothelial-mural"
scRef.tag[scRef.tag == "Neuron"] <- "neurons"
scRef.tag[scRef.tag == "Microglia"] <- "microglia"
cell.delete <- "astrocytes_ependymal"
meta.tag$scRef.tag <- scRef.tag
vec.cutoff <- c(seq(0.005, 0.1, 0.005), seq(0.15, 0.95, 0.05))
df.metrics <- data.frame(cutoff = vec.cutoff, weighted.f1 = rep(0, length(vec.cutoff)))
for (i in 1:length(vec.cutoff)) {
cutoff <- vec.cutoff[i]
true.tag <- meta.tag$ori.tag
true.tag[true.tag == cell.delete] <- 'unknown'
our.tag <- meta.tag$scRef.tag
our.tag[meta.tag$qvalue > cutoff] <- 'unknown'
sub.weighted.f1 <- metrics$f1_score(true.tag, our.tag, average = 'weighted')
df.metrics[i, 'weighted.f1'] <- sub.weighted.f1
}
# best cutoff
best.cutoff <- df.metrics$cutoff[df.metrics$weighted.f1 == max(df.metrics$weighted.f1)][1]
best.cutoff <- 0.01
new.tag <- meta.tag$scRef.tag
new.tag[meta.tag$qvalue > best.cutoff] <- 'unknown'
meta.tag$new.tag <- new.tag
print(best.cutoff)
# no scRef plus
true.tag <- meta.tag$ori.tag
true.tag[true.tag == cell.delete] <- 'unknown'
meta.tag$ori.tag <- true.tag
metrics$f1_score(true.tag, scRef.tag, average = 'weighted')
metrics$f1_score(true.tag, new.tag, average = 'weighted')
###
fb=meta.tag[meta.tag$ori.tag %in% c('fFB1', 'fFB2'),]
### plot
library(Seurat)
# data preparing
seurat.unlabeled <- CreateSeuratObject(counts = data.filter, project = "pbmc3k", min.cells = 3, min.features = 200)
seurat.unlabeled <- NormalizeData(seurat.unlabeled, normalization.method = "LogNormalize", scale.factor = 10000)
seurat.unlabeled <- FindVariableFeatures(seurat.unlabeled, selection.method = "vst", nfeatures = 2000)
all.genes <- rownames(seurat.unlabeled)
seurat.unlabeled <- ScaleData(seurat.unlabeled, features = all.genes)
# add label
seurat.unlabeled@meta.data$original.label <- ori.tag
seurat.unlabeled@meta.data$scRef.tag <- scRef.tag
seurat.unlabeled@meta.data$new.tag <- new.tag
# PCA
seurat.unlabeled <- RunPCA(seurat.unlabeled, features = VariableFeatures(object = seurat.unlabeled), verbose = F)
# UMAP
seurat.unlabeled <- RunUMAP(seurat.unlabeled, dims = 1:15, n.neighbors = 30)
# figure1: ture label
DimPlot(seurat.unlabeled, reduction = "umap", label = T, group.by = 'original.label')
# figure2: scRef label
DimPlot(seurat.unlabeled, reduction = "umap", label = T, group.by = 'scRef.tag')
# figure3: scRef plus label
DimPlot(seurat.unlabeled, reduction = "umap", label = T, group.by = 'new.tag')
|
#' Returns elements of vector \eqn{x} that are not contained within vector \eqn{y}.
#'
#' @author Will Haese-Hill
#' @param x vector with values to be matched.
#' @param y vector with values to match against vector \eqn{x}.
#' @return A vector of all values contained in vector \eqn{x} that could not be located in vector \eqn{y}.
# @seealso \code{\link{"%in%"}} which this function is virtually opposite to.
#' @examples
#' union_jack = c("red","white","blue")
#' rainbow = c("red","orange","yellow","green","blue","indigo","violet")
#' union_jack %a/b% rainbow # gives "white".
#' # i.e. the only colour in the Union Jack that is not contained in the rainbow is "white".
#' @usage x \%a/b\% y
#' @rdname ASlashB
#' @export
##################################################################################################
#### %a/b% ####
##################################################################################################
# Simple function to find which elements of x are NOT contained in y (opposite of %in%)
#
# Example usage:
#
#
# union_jack = c("red","white","blue")
# rainbow = c("red","orange","yellow","green","blue","indigo","violet")
#
# union_jack %a/b% rainbow # gives "white".
# rainbow %a/b% union_jack
#
# * Therefore, the only colour in the Union Jack that is not contained in the rainbow is "white".
#
##################################################################################################
"%a/b%" <- function(x, y){
x[!x %in% y]
}
##############################################################
################ END OF %a/b% function #######################
##############################################################
| /R/ASlashB.R | no_license | jgrevel/BAST1-R-Library | R | false | false | 1,825 | r | #' Returns elements of vector \eqn{x} that are not contained within vector \eqn{y}.
#'
#' @author Will Haese-Hill
#' @param x vector with values to be matched.
#' @param y vector with values to match against vector \eqn{x}.
#' @return A vector of all values contained in vector \eqn{x} that could not be located in vector \eqn{y}.
# @seealso \code{\link{"%in%"}} which this function is virtually opposite to.
#' @examples
#' union_jack = c("red","white","blue")
#' rainbow = c("red","orange","yellow","green","blue","indigo","violet")
#' union_jack %a/b% rainbow # gives "white".
#' # i.e. the only colour in the Union Jack that is not contained in the rainbow is "white".
#' @usage x \%a/b\% y
#' @rdname ASlashB
#' @export
##################################################################################################
#### %a/b% ####
##################################################################################################
# Simple function to find which elements of x are NOT contained in y (opposite of %in%)
#
# Example usage:
#
#
# union_jack = c("red","white","blue")
# rainbow = c("red","orange","yellow","green","blue","indigo","violet")
#
# union_jack %a/b% rainbow # gives "white".
# rainbow %a/b% union_jack
#
# * Therefore, the only colour in the Union Jack that is not contained in the rainbow is "white".
#
##################################################################################################
"%a/b%" <- function(x, y){
x[!x %in% y]
}
##############################################################
################ END OF %a/b% function #######################
##############################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor2.R
\name{cor2}
\alias{cor2}
\title{Correlation Analysis}
\usage{
cor2(data, x1, x2, method = "spearman")
}
\arguments{
\item{data}{dataframe}
\item{x1}{x1}
\item{x2}{x2}
\item{method}{1,2,3}
}
\description{
Correlation Analysis
}
| /man/cor2.Rd | no_license | cran/fastStat | R | false | true | 335 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor2.R
\name{cor2}
\alias{cor2}
\title{Correlation Analysis}
\usage{
cor2(data, x1, x2, method = "spearman")
}
\arguments{
\item{data}{dataframe}
\item{x1}{x1}
\item{x2}{x2}
\item{method}{1,2,3}
}
\description{
Correlation Analysis
}
|
plot4 <- function(){
## Load the loadPlotData helper file
if (!exists("loadPlotData", mode = "function")) source("loadPlotData.r")
## Load the data - this function is defined in loadPlotData
data <- loadPlotData()
## Open the png file
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
#####################################
## Write the graph to the png file ##
#####################################
############
## Plot 1 ##
############
plot(subset$combinedTime, subset$Global_active_power, type = "l", ylab = "Global Active Power", xlab="")
############
## Plot 2 ##
############
plot(subset$combinedTime, subset$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
############
## Plot 3 ##
############
## Create the base plot
plot(subset$combinedTime, subset$Sub_metering_1, col="black", type = "l", xlab = "", ylab = "Energy sub metering")
## Add the extra lines
lines(subset$combinedTime, subset$Sub_metering_2, col = "red", type = "l")
lines(subset$combinedTime, subset$Sub_metering_3, col = "blue", type = "l")
legend("topright", lty = c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
############
## Plot 4 ##
############
plot(subset$combinedTime, subset$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
## Close the png file
par(mfrow = c(1,1))
dev.off()
}
| /plot4.R | no_license | PeppeMiller/ExData_Plotting1 | R | false | false | 1,588 | r | plot4 <- function(){
## Load the loadPlotData helper file
if (!exists("loadPlotData", mode = "function")) source("loadPlotData.r")
## Load the data - this function is defined in loadPlotData
data <- loadPlotData()
## Open the png file
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
#####################################
## Write the graph to the png file ##
#####################################
############
## Plot 1 ##
############
plot(subset$combinedTime, subset$Global_active_power, type = "l", ylab = "Global Active Power", xlab="")
############
## Plot 2 ##
############
plot(subset$combinedTime, subset$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
############
## Plot 3 ##
############
## Create the base plot
plot(subset$combinedTime, subset$Sub_metering_1, col="black", type = "l", xlab = "", ylab = "Energy sub metering")
## Add the extra lines
lines(subset$combinedTime, subset$Sub_metering_2, col = "red", type = "l")
lines(subset$combinedTime, subset$Sub_metering_3, col = "blue", type = "l")
legend("topright", lty = c(1,1,1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
############
## Plot 4 ##
############
plot(subset$combinedTime, subset$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
## Close the png file
par(mfrow = c(1,1))
dev.off()
}
|
## Laura E Timmerman
## September 12, 2015
## The purpose of this code is to convert .XPT files (a SAS format) downloaded
## directly from the NHANES website to to a more useable format.
library(Hmisc)
## Get the list of files to convert
data_file_path = '~/GA_data_science/Project/Data/XPT_files/'
setwd(data_file_path)
xpt_files = list.files(path = ".")
## Go through the list, read and write each file.
for (file in xpt_files) {
temp_data = sasxport.get(file)
new_file = paste(strsplit(file,".XPT")[[1]],".CSV",sep="")
write.table(mydata,file=new_file,col.names=TRUE,row.names=FALSE,sep=",")
} | /Project/Code/convert_xpt_to_csv.R | no_license | lauratim/GA_data_science | R | false | false | 608 | r | ## Laura E Timmerman
## September 12, 2015
## The purpose of this code is to convert .XPT files (a SAS format) downloaded
## directly from the NHANES website to to a more useable format.
library(Hmisc)
## Get the list of files to convert
data_file_path = '~/GA_data_science/Project/Data/XPT_files/'
setwd(data_file_path)
xpt_files = list.files(path = ".")
## Go through the list, read and write each file.
for (file in xpt_files) {
temp_data = sasxport.get(file)
new_file = paste(strsplit(file,".XPT")[[1]],".CSV",sep="")
write.table(mydata,file=new_file,col.names=TRUE,row.names=FALSE,sep=",")
} |
\name{wrapText}
\alias{wrapText}
\title{Wrap, Mark and Indent a Long Text String}
\description{
Wrap a long text string to a desired width, mark it with
prefixes, and indent the lines following the first.
}
\usage{
wrapText(string, width=50, prefix=c("> ","+ "), exdent=3)
}
\arguments{
\item{string}{ a text string.}
\item{width}{ a positive integer giving the target column for wrapping lines in the output.}
\item{prefix}{ character strings to be used as prefixes for the first and subsequent lines.}
\item{exdent}{ a non-negative integer specifying the indentation of subsequent lines.}
}
\details{
Using \code{base::strwrap}, this function splits a long text string
into a target width, indents lines following the first, and adds
a prefix string \code{prefix[1]} to the first line and
\code{prefix[2]} to all indented lines.
}
\value{
A text string with inserted prefixes and carriage returns.
This output string can be sent to display functions
like \code{cat} and \code{text}.
}
\author{
Rowan Haigh, Pacific Biological Station, Nanaimo BC
}
\seealso{
\code{\link{revStr}}, \code{\link{showError}}
}
\examples{
local(envir=.PBStoolEnv,expr={
pbsfun=function(){
txt=wrapText(paste("USA state names: ",
paste(state.name,collapse=", "),sep=""),width=72,exdent=5)
showMessage(txt,as.is=TRUE,adj=0,col="blue",cex=.9,x=.05)
cat(txt,"\n"); invisible() }
pbsfun()
})
}
\keyword{character}
| /PBStools/man/wrapText.Rd | no_license | jfontestad/pbs-tools | R | false | false | 1,478 | rd | \name{wrapText}
\alias{wrapText}
\title{Wrap, Mark and Indent a Long Text String}
\description{
Wrap a long text string to a desired width, mark it with
prefixes, and indent the lines following the first.
}
\usage{
wrapText(string, width=50, prefix=c("> ","+ "), exdent=3)
}
\arguments{
\item{string}{ a text string.}
\item{width}{ a positive integer giving the target column for wrapping lines in the output.}
\item{prefix}{ character strings to be used as prefixes for the first and subsequent lines.}
\item{exdent}{ a non-negative integer specifying the indentation of subsequent lines.}
}
\details{
Using \code{base::strwrap}, this function splits a long text string
into a target width, indents lines following the first, and adds
a prefix string \code{prefix[1]} to the first line and
\code{prefix[2]} to all indented lines.
}
\value{
A text string with inserted prefixes and carriage returns.
This output string can be sent to display functions
like \code{cat} and \code{text}.
}
\author{
Rowan Haigh, Pacific Biological Station, Nanaimo BC
}
\seealso{
\code{\link{revStr}}, \code{\link{showError}}
}
\examples{
local(envir=.PBStoolEnv,expr={
pbsfun=function(){
txt=wrapText(paste("USA state names: ",
paste(state.name,collapse=", "),sep=""),width=72,exdent=5)
showMessage(txt,as.is=TRUE,adj=0,col="blue",cex=.9,x=.05)
cat(txt,"\n"); invisible() }
pbsfun()
})
}
\keyword{character}
|
\name{faoswsTrade-package}
\alias{faoswsTrade-package}
\alias{faoswsTrade}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab faoswsTrade\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-01-15\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /faoswsTrade/man/faoswsTrade-package.Rd | no_license | vannem/sws_trade | R | false | false | 1,055 | rd | \name{faoswsTrade-package}
\alias{faoswsTrade-package}
\alias{faoswsTrade}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab faoswsTrade\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2015-01-15\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_pert}
\alias{tfd_pert}
\title{Modified PERT distribution for modeling expert predictions.}
\usage{
tfd_pert(
low,
peak,
high,
temperature = 4,
validate_args = FALSE,
allow_nan_stats = FALSE,
name = "Pert"
)
}
\arguments{
\item{low}{lower bound}
\item{peak}{most frequent value}
\item{high}{upper bound}
\item{temperature}{controls the shape of the distribution}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
The PERT distribution is a loc-scale family of Beta distributions
fit onto a real interval between \code{low} and \code{high} values set by the user,
along with a \code{peak} to indicate the expert's most frequent prediction,
and \code{temperature} to control how sharp the peak is.
}
\details{
The distribution is similar to a \href{https://en.wikipedia.org/wiki/Triangular_distribution}{Triangular distribution}
(i.e. \code{tfd.Triangular}) but with a smooth peak.
Mathematical Details
In terms of a Beta distribution, PERT can be expressed as
\if{html}{\out{<div class="sourceCode">}}\preformatted{PERT ~ loc + scale * Beta(concentration1, concentration0)
}\if{html}{\out{</div>}}
where
\if{html}{\out{<div class="sourceCode">}}\preformatted{loc = low
scale = high - low
concentration1 = 1 + temperature * (peak - low)/(high - low)
concentration0 = 1 + temperature * (high - peak)/(high - low)
temperature > 0
}\if{html}{\out{</div>}}
The support is \verb{[low, high]}. The \code{peak} must fit in that interval:
\verb{low < peak < high}. The \code{temperature} is a positive parameter that
controls the shape of the distribution. Higher values yield a sharper peak.
The standard PERT distribution is obtained when \code{temperature = 4}.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
}
| /man/tfd_pert.Rd | no_license | cran/tfprobability | R | false | true | 2,500 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_pert}
\alias{tfd_pert}
\title{Modified PERT distribution for modeling expert predictions.}
\usage{
tfd_pert(
low,
peak,
high,
temperature = 4,
validate_args = FALSE,
allow_nan_stats = FALSE,
name = "Pert"
)
}
\arguments{
\item{low}{lower bound}
\item{peak}{most frequent value}
\item{high}{upper bound}
\item{temperature}{controls the shape of the distribution}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
The PERT distribution is a loc-scale family of Beta distributions
fit onto a real interval between \code{low} and \code{high} values set by the user,
along with a \code{peak} to indicate the expert's most frequent prediction,
and \code{temperature} to control how sharp the peak is.
}
\details{
The distribution is similar to a \href{https://en.wikipedia.org/wiki/Triangular_distribution}{Triangular distribution}
(i.e. \code{tfd.Triangular}) but with a smooth peak.
Mathematical Details
In terms of a Beta distribution, PERT can be expressed as
\if{html}{\out{<div class="sourceCode">}}\preformatted{PERT ~ loc + scale * Beta(concentration1, concentration0)
}\if{html}{\out{</div>}}
where
\if{html}{\out{<div class="sourceCode">}}\preformatted{loc = low
scale = high - low
concentration1 = 1 + temperature * (peak - low)/(high - low)
concentration0 = 1 + temperature * (high - peak)/(high - low)
temperature > 0
}\if{html}{\out{</div>}}
The support is \verb{[low, high]}. The \code{peak} must fit in that interval:
\verb{low < peak < high}. The \code{temperature} is a positive parameter that
controls the shape of the distribution. Higher values yield a sharper peak.
The standard PERT distribution is obtained when \code{temperature = 4}.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
}
|
#' Numeric Filter
#'
#'
#'
#' @import R6
#' @import shiny
#' @importFrom uuid UUIDgenerate
#'
#' @examples
#' appclass <- FilterNumeric$new(
#' min = 0,
#' max = 1,
#' name="myfilter",
#' subfilters = 3
#' )
#'
#'
#' # UI
#' # Here we only have to call the UI methods.
#' ui <- fluidPage(
#'
#' appclass$get_ui(),
#' tags$hr(),
#' verbatimTextOutput("exampleoutput")
#'
#' )
#'
#' # And here we just have to call the server methods.
#' server <- function(input, output, session) {
#'
#' output$exampleoutput <- renderText({
#' x <- appclass$get_server(session)
#'
#' paste(unlist(
#' lapply(x(), function(y) {
#' paste(unlist(y()))
#' })
#' ), collapse = ' - ')
#' })
#' }
#' if (interactive()) {
#' shinyApp(ui, server)
#' }
#' @export
FilterNumeric <- R6Class(
"FilterNumeric",
inherit = GenericFilter,
public = list(
#' @description
#' Constructor for a numeric filter
#' @param active (`logical`) Whether the
#' filter should be active per-se
#' @param name (`character`) Name of
#' the Filter. normally the column name
#' of the column the filter is applied to
#' @param min (`numeric`) minimum value of the filter
#' @param max (`numeric`) maximum value of the filter
#' @param subfilters (`numeric`) how many OR combined filters are allowed
#' for the numeric filter
#' @md
initialize = function(
active = FALSE,
name = NULL,
min,
max,
subfilters = 3) {
stopifnot(is.logical(active))
stopifnot(!is.null(name))
stopifnot(is.numeric(min) && !is.na(min))
stopifnot(is.numeric(max) && !is.na(max))
stopifnot(is.numeric(subfilters) && !is.na(subfilters))
private$id_input <- uuid::UUIDgenerate()
private$isactive <- active
private$name_val <- name
private$min_val <- min
private$max_val <- max
private$subfilters <- subfilters
private$subfilter_objects <- setNames(
lapply(1:subfilters, function(i) {
return(NumericLimiter$new(min = min, max = max))
}),
private$filtername(1:subfilters)
)
},
get_ui = function(ns = NS(NULL)) {
ns <- shiny::NS(ns(private$id_input))
shiny::fluidRow(
column(12,
sliderInput(
inputId = ns("nr_subfilters"),
label = "Number of OR combined filters",
min = 1,
max = private$subfilters,
step = 1,
value = 1
)
),
do.call(tagList, lapply(1:private$subfilters, function(i) {
conditionalPanel(
condition = paste0("input.nr_subfilters >= ", i),
private$subfilter_objects[[private$filtername(i)]]$get_ui(ns = ns),
ns = ns
)
}))
)
},
get_server = function(session) {
callModule(private$server, id = private$id_input, session = session)
}
),
private = list(
isactive = FALSE,
name_val = NULL,
min_val = NA,
max_val = NA,
subfilters = 3,
id_input = NULL,
subfilter_objects = NULL,
filtername = function(id) {
paste0("filter", id)
},
server = function(input, output, session) {
return(reactive({
lapply(1:input$nr_subfilters, function(i) {
private$subfilter_objects[[private$filtername(i)]]$get_server(session)
})
}))
}
)
)
| /R/FilterNumeric.R | no_license | zappingseb/shinyjsonconfig | R | false | false | 3,462 | r | #' Numeric Filter
#'
#'
#'
#' @import R6
#' @import shiny
#' @importFrom uuid UUIDgenerate
#'
#' @examples
#' appclass <- FilterNumeric$new(
#' min = 0,
#' max = 1,
#' name="myfilter",
#' subfilters = 3
#' )
#'
#'
#' # UI
#' # Here we only have to call the UI methods.
#' ui <- fluidPage(
#'
#' appclass$get_ui(),
#' tags$hr(),
#' verbatimTextOutput("exampleoutput")
#'
#' )
#'
#' # And here we just have to call the server methods.
#' server <- function(input, output, session) {
#'
#' output$exampleoutput <- renderText({
#' x <- appclass$get_server(session)
#'
#' paste(unlist(
#' lapply(x(), function(y) {
#' paste(unlist(y()))
#' })
#' ), collapse = ' - ')
#' })
#' }
#' if (interactive()) {
#' shinyApp(ui, server)
#' }
#' @export
FilterNumeric <- R6Class(
"FilterNumeric",
inherit = GenericFilter,
public = list(
#' @description
#' Constructor for a numeric filter
#' @param active (`logical`) Whether the
#' filter should be active per-se
#' @param name (`character`) Name of
#' the Filter. normally the column name
#' of the column the filter is applied to
#' @param min (`numeric`) minimum value of the filter
#' @param max (`numeric`) maximum value of the filter
#' @param subfilters (`numeric`) how many OR combined filters are allowed
#' for the numeric filter
#' @md
initialize = function(
active = FALSE,
name = NULL,
min,
max,
subfilters = 3) {
stopifnot(is.logical(active))
stopifnot(!is.null(name))
stopifnot(is.numeric(min) && !is.na(min))
stopifnot(is.numeric(max) && !is.na(max))
stopifnot(is.numeric(subfilters) && !is.na(subfilters))
private$id_input <- uuid::UUIDgenerate()
private$isactive <- active
private$name_val <- name
private$min_val <- min
private$max_val <- max
private$subfilters <- subfilters
private$subfilter_objects <- setNames(
lapply(1:subfilters, function(i) {
return(NumericLimiter$new(min = min, max = max))
}),
private$filtername(1:subfilters)
)
},
get_ui = function(ns = NS(NULL)) {
ns <- shiny::NS(ns(private$id_input))
shiny::fluidRow(
column(12,
sliderInput(
inputId = ns("nr_subfilters"),
label = "Number of OR combined filters",
min = 1,
max = private$subfilters,
step = 1,
value = 1
)
),
do.call(tagList, lapply(1:private$subfilters, function(i) {
conditionalPanel(
condition = paste0("input.nr_subfilters >= ", i),
private$subfilter_objects[[private$filtername(i)]]$get_ui(ns = ns),
ns = ns
)
}))
)
},
get_server = function(session) {
callModule(private$server, id = private$id_input, session = session)
}
),
private = list(
isactive = FALSE,
name_val = NULL,
min_val = NA,
max_val = NA,
subfilters = 3,
id_input = NULL,
subfilter_objects = NULL,
filtername = function(id) {
paste0("filter", id)
},
server = function(input, output, session) {
return(reactive({
lapply(1:input$nr_subfilters, function(i) {
private$subfilter_objects[[private$filtername(i)]]$get_server(session)
})
}))
}
)
)
|
\name{draw_lm_basic}
\alias{draw_lm_basic}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Draw samples from the basic LM model}
\description{Function that draws samples from the basic LM model with specific parameters.}
\usage{
draw_lm_basic(piv, Pi, Psi, n)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{piv }{vector of initial probabilities of the latent Markov chain}
\item{Pi }{set of transition probabilities matrices (k x k x TT)}
\item{Psi }{array of conditional response probabitlies (mb x k x r)}
\item{n }{sample size}
}
\value{
\item{Y }{matrix of response configurations unit by unit}
\item{S }{matrix of distinct response configurations}
\item{yv }{corresponding vector of frequencies}
}
\author{Francesco Bartolucci, Silvia Pandolfi, University of Perugia (IT), http://www.stat.unipg.it/bartolucci}
\examples{
# draw a sample for 1000 units and only one response variable
n = 1000
TT = 6
k = 2
r = 1 #number of response variables
mb = 3 #maximum number of response categories
piv = c(0.7,0.3)
Pi = matrix(c(0.9,0.1,0.1,0.9),k,k)
Pi = array(Pi,c(k,k,TT))
Pi[,,1] = 0
Psi = matrix(c(0.7,0.2,0.1,0.5,0.4,0.1),mb,k)
Psi = array(Psi,c(mb,k,r))
out = draw_lm_basic(piv, Pi, Psi, n=1000)
} | /man/draw_lm_basic.Rd | no_license | jmorten/LMest | R | false | false | 1,273 | rd | \name{draw_lm_basic}
\alias{draw_lm_basic}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Draw samples from the basic LM model}
\description{Function that draws samples from the basic LM model with specific parameters.}
\usage{
draw_lm_basic(piv, Pi, Psi, n)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{piv }{vector of initial probabilities of the latent Markov chain}
\item{Pi }{set of transition probabilities matrices (k x k x TT)}
\item{Psi }{array of conditional response probabitlies (mb x k x r)}
\item{n }{sample size}
}
\value{
\item{Y }{matrix of response configurations unit by unit}
\item{S }{matrix of distinct response configurations}
\item{yv }{corresponding vector of frequencies}
}
\author{Francesco Bartolucci, Silvia Pandolfi, University of Perugia (IT), http://www.stat.unipg.it/bartolucci}
\examples{
# draw a sample for 1000 units and only one response variable
n = 1000
TT = 6
k = 2
r = 1 #number of response variables
mb = 3 #maximum number of response categories
piv = c(0.7,0.3)
Pi = matrix(c(0.9,0.1,0.1,0.9),k,k)
Pi = array(Pi,c(k,k,TT))
Pi[,,1] = 0
Psi = matrix(c(0.7,0.2,0.1,0.5,0.4,0.1),mb,k)
Psi = array(Psi,c(mb,k,r))
out = draw_lm_basic(piv, Pi, Psi, n=1000)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accrate.R
\name{flux.age.ghost}
\alias{flux.age.ghost}
\title{Plot flux rates for proxies.}
\usage{
flux.age.ghost(
proxy = 1,
age.lim = c(),
yr.lim = age.lim,
age.res = 200,
yr.res = age.res,
set = get("info"),
flux = c(),
plot.range = TRUE,
prob = 0.8,
range.col = grey(0.5),
range.lty = 2,
plot.mean = TRUE,
mean.col = "red",
mean.lty = 2,
plot.median = TRUE,
median.col = "blue",
median.lty = 2,
flux.lim = c(),
flux.lab = expression("flux (g cm"^-1 * " yr"^-1 * ")"),
upper = 0.95,
rgb.scale = c(0, 0, 0),
rgb.res = 100,
dark = set$dark,
cutoff = 0.001,
BCAD = set$BCAD,
age.lab = c(),
yr.lab = age.lab,
rotate.axes = FALSE,
rev.flux = FALSE,
rev.age = FALSE,
rev.yr = rev.age
)
}
\arguments{
\item{proxy}{Which proxy to use (counting from the column number in the .csv file after the depths column).}
\item{age.lim}{Minimum and maximum calendar age ranges, calculated automatically by default (\code{age.lim=c()}).}
\item{yr.lim}{Deprecated - use age.lim instead}
\item{age.res}{Resolution or amount of greyscale pixels to cover the age scale of the plot. Default \code{age.res=200}.}
\item{yr.res}{Deprecated - use age.res instead}
\item{set}{Detailed information of the current run, stored within this session's memory as variable info.}
\item{flux}{Define a flux variable within the R session (consisting of depths and their proxy concentrations in two columns) and provide the name of this variable, e.g.:
\code{flux.age.ghost(flux=flux1)}. If left empty (\code{flux=c()}), a flux file is expected (see \code{proxy}).}
\item{plot.range}{Plot curves that indicate a probability range, at resolution of yr.res.}
\item{prob}{Probability range, defaults to \code{prob=0.8} (10 \% at each side).}
\item{range.col}{Red seems nice.}
\item{range.lty}{Line type of the confidence ranges.}
\item{plot.mean}{Plot the mean fluxes.}
\item{mean.col}{Red seems nice.}
\item{mean.lty}{Line type of the means.}
\item{plot.median}{Plot the median fluxes.}
\item{median.col}{Blue seems nice.}
\item{median.lty}{Line type of the medians.}
\item{flux.lim}{Limits of the flux axes.}
\item{flux.lab}{Axis labels. Defaults to \code{flux.lab="flux"}.}
\item{upper}{Maximum flux rates to plot. Defaults to the upper 99\%; \code{upper=0.99}.}
\item{rgb.scale}{The function to produce a coloured representation of all age-models. Needs 3 values for the intensity of red, green and blue. Defaults to grey-scales: \code{rgb.scale=c(0,0,0)}, but could also be, say, scales of red (\code{rgb.scale=c(1,0,0)}).}
\item{rgb.res}{Resolution of the colour spectrum depicting the age-depth model. Default \code{rgb.res=100}.}
\item{dark}{The darkest grey value is \code{dark=1} by default; lower values will result in lighter grey but \code{values >1} are not allowed.}
\item{cutoff}{Point below which colours will no longer be printed. Default \code{cutoff=0.001}.}
\item{BCAD}{The calendar scale of graphs and age output-files is in \code{cal BP} by default, but can be changed to BC/AD using \code{BCAD=TRUE}.}
\item{age.lab}{The labels for the calendar axis (default \code{age.lab="cal BP"} or \code{"BC/AD"} if \code{BCAD=TRUE}).}
\item{yr.lab}{Deprecated - use age.lab instead}
\item{rotate.axes}{The default of plotting calendar year on the horizontal axis and fluxes on the vertical one can be changed with \code{rotate.axes=TRUE}.}
\item{rev.flux}{The flux axis can be reversed with \code{rev.flux=TRUE}.}
\item{rev.age}{The direction of the age axis can be reversed using \code{rev.age=TRUE}.}
\item{rev.yr}{Deprecated - use rev.age instead}
}
\value{
A plot of flux rates.
}
\description{
Plot grey-scale representation of estimated flux rates for proxies against calendar age.
}
\details{
To plot flux rates (e.g. pollen grains/cm2/yr) as greyscales,
provide a plain text file with headers and the data in columns separated by commas, ending in '_flux.csv'
and saved in your core's folder. The first column should contain the depths, and the next columns should contain
the proxy concentration values (leaving missing values empty). Then type for example \code{flux.age.ghost(1)} to plot the
flux values for the first proxy in the .csv file. Instead of using a _flux.csv file, a flux variable can also be defined
within the R session (consisting of depths and their proxy concentrations in two columns). Then provide the name of this variable, e.g.: \code{flux.age.ghost(flux=flux1)}.
See Bacon_runs/MSB2K/MSB2K_flux.csv for an example.
}
\examples{
\dontrun{
Bacon(run=FALSE, coredir=tempfile())
agedepth(yr.res=50)
flux.age.ghost(1)
}
}
\author{
Maarten Blaauw, J. Andres Christen
}
| /man/flux.age.ghost.Rd | no_license | Maarten14C/rbacon | R | false | true | 4,746 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/accrate.R
\name{flux.age.ghost}
\alias{flux.age.ghost}
\title{Plot flux rates for proxies.}
\usage{
flux.age.ghost(
proxy = 1,
age.lim = c(),
yr.lim = age.lim,
age.res = 200,
yr.res = age.res,
set = get("info"),
flux = c(),
plot.range = TRUE,
prob = 0.8,
range.col = grey(0.5),
range.lty = 2,
plot.mean = TRUE,
mean.col = "red",
mean.lty = 2,
plot.median = TRUE,
median.col = "blue",
median.lty = 2,
flux.lim = c(),
flux.lab = expression("flux (g cm"^-1 * " yr"^-1 * ")"),
upper = 0.95,
rgb.scale = c(0, 0, 0),
rgb.res = 100,
dark = set$dark,
cutoff = 0.001,
BCAD = set$BCAD,
age.lab = c(),
yr.lab = age.lab,
rotate.axes = FALSE,
rev.flux = FALSE,
rev.age = FALSE,
rev.yr = rev.age
)
}
\arguments{
\item{proxy}{Which proxy to use (counting from the column number in the .csv file after the depths column).}
\item{age.lim}{Minimum and maximum calendar age ranges, calculated automatically by default (\code{age.lim=c()}).}
\item{yr.lim}{Deprecated - use age.lim instead}
\item{age.res}{Resolution or amount of greyscale pixels to cover the age scale of the plot. Default \code{age.res=200}.}
\item{yr.res}{Deprecated - use age.res instead}
\item{set}{Detailed information of the current run, stored within this session's memory as variable info.}
\item{flux}{Define a flux variable within the R session (consisting of depths and their proxy concentrations in two columns) and provide the name of this variable, e.g.:
\code{flux.age.ghost(flux=flux1)}. If left empty (\code{flux=c()}), a flux file is expected (see \code{proxy}).}
\item{plot.range}{Plot curves that indicate a probability range, at resolution of yr.res.}
\item{prob}{Probability range, defaults to \code{prob=0.8} (10 \% at each side).}
\item{range.col}{Red seems nice.}
\item{range.lty}{Line type of the confidence ranges.}
\item{plot.mean}{Plot the mean fluxes.}
\item{mean.col}{Red seems nice.}
\item{mean.lty}{Line type of the means.}
\item{plot.median}{Plot the median fluxes.}
\item{median.col}{Blue seems nice.}
\item{median.lty}{Line type of the medians.}
\item{flux.lim}{Limits of the flux axes.}
\item{flux.lab}{Axis labels. Defaults to \code{flux.lab="flux"}.}
\item{upper}{Maximum flux rates to plot. Defaults to the upper 99\%; \code{upper=0.99}.}
\item{rgb.scale}{The function to produce a coloured representation of all age-models. Needs 3 values for the intensity of red, green and blue. Defaults to grey-scales: \code{rgb.scale=c(0,0,0)}, but could also be, say, scales of red (\code{rgb.scale=c(1,0,0)}).}
\item{rgb.res}{Resolution of the colour spectrum depicting the age-depth model. Default \code{rgb.res=100}.}
\item{dark}{The darkest grey value is \code{dark=1} by default; lower values will result in lighter grey but \code{values >1} are not allowed.}
\item{cutoff}{Point below which colours will no longer be printed. Default \code{cutoff=0.001}.}
\item{BCAD}{The calendar scale of graphs and age output-files is in \code{cal BP} by default, but can be changed to BC/AD using \code{BCAD=TRUE}.}
\item{age.lab}{The labels for the calendar axis (default \code{age.lab="cal BP"} or \code{"BC/AD"} if \code{BCAD=TRUE}).}
\item{yr.lab}{Deprecated - use age.lab instead}
\item{rotate.axes}{The default of plotting calendar year on the horizontal axis and fluxes on the vertical one can be changed with \code{rotate.axes=TRUE}.}
\item{rev.flux}{The flux axis can be reversed with \code{rev.flux=TRUE}.}
\item{rev.age}{The direction of the age axis can be reversed using \code{rev.age=TRUE}.}
\item{rev.yr}{Deprecated - use rev.age instead}
}
\value{
A plot of flux rates.
}
\description{
Plot grey-scale representation of estimated flux rates for proxies against calendar age.
}
\details{
To plot flux rates (e.g. pollen grains/cm2/yr) as greyscales,
provide a plain text file with headers and the data in columns separated by commas, ending in '_flux.csv'
and saved in your core's folder. The first column should contain the depths, and the next columns should contain
the proxy concentration values (leaving missing values empty). Then type for example \code{flux.age.ghost(1)} to plot the
flux values for the first proxy in the .csv file. Instead of using a _flux.csv file, a flux variable can also be defined
within the R session (consisting of depths and their proxy concentrations in two columns). Then provide the name of this variable, e.g.: \code{flux.age.ghost(flux=flux1)}.
See Bacon_runs/MSB2K/MSB2K_flux.csv for an example.
}
\examples{
\dontrun{
Bacon(run=FALSE, coredir=tempfile())
agedepth(yr.res=50)
flux.age.ghost(1)
}
}
\author{
Maarten Blaauw, J. Andres Christen
}
|
library(rdss) # for helper functions
library(rstanarm)
declaration_9.3 <-
declare_model(N = 100, age = sample(0:80, size = N, replace = TRUE)) +
declare_inquiry(mean_age = mean(age)) +
declare_sampling(S = complete_rs(N = N, n = 3)) +
declare_estimator(
age ~ 1,
.method = stan_glm,
family = gaussian(link = "log"),
prior_intercept = normal(50, 5),
.summary = ~tidy_stan(., exponentiate = TRUE),
inquiry = "mean_age"
)
| /scripts_declarations/declaration_9.3.R | no_license | DeclareDesign/book | R | false | false | 454 | r | library(rdss) # for helper functions
library(rstanarm)
declaration_9.3 <-
declare_model(N = 100, age = sample(0:80, size = N, replace = TRUE)) +
declare_inquiry(mean_age = mean(age)) +
declare_sampling(S = complete_rs(N = N, n = 3)) +
declare_estimator(
age ~ 1,
.method = stan_glm,
family = gaussian(link = "log"),
prior_intercept = normal(50, 5),
.summary = ~tidy_stan(., exponentiate = TRUE),
inquiry = "mean_age"
)
|
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
# variable, which of these four sources have seen decreases in emissions from 1999-2008
# for Baltimore City? Which have seen increases in emissions from 1999-2008?
# Read in the NEI data
NEI <- readRDS("summarySCC_PM25.rds")
# Set a colourblind-friendly palette
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442",
"#0072B2", "#D55E00", "#CC79A7")
# Get the subset of data for Baltimore City
baltimoreData <- subset(NEI, fips == "24510")
# Use ddply to summarise the emissions by year and type
library(plyr)
baltimoreGroups <- ddply(baltimoreData, c("year","type"),
function(baltimoreData)sum(baltimoreData$Emissions))
names(baltimoreGroups) <-c("Year","Type", "Emissions")
# Change Year to be a factor so that it plots properly
baltimoreGroups$Year <- factor(baltimoreGroups$Year)
# Use ggplot to plot out the data
library(ggplot2)
# Generate the plot
png(file="plot3.png", width = 480, height = 480)
p <- ggplot(baltimoreGroups, aes(x=Year, y=Emissions, group=Type))
p + geom_line(aes(color=Type)) +
ggtitle("Emissions by Source in Baltimore City") # Set title
dev.off() | /plot3.R | no_license | JennyInNZ/ExData_Project2 | R | false | false | 1,234 | r | # Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
# variable, which of these four sources have seen decreases in emissions from 1999-2008
# for Baltimore City? Which have seen increases in emissions from 1999-2008?
# Read in the NEI data
NEI <- readRDS("summarySCC_PM25.rds")
# Set a colourblind-friendly palette
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442",
"#0072B2", "#D55E00", "#CC79A7")
# Get the subset of data for Baltimore City
baltimoreData <- subset(NEI, fips == "24510")
# Use ddply to summarise the emissions by year and type
library(plyr)
baltimoreGroups <- ddply(baltimoreData, c("year","type"),
function(baltimoreData)sum(baltimoreData$Emissions))
names(baltimoreGroups) <-c("Year","Type", "Emissions")
# Change Year to be a factor so that it plots properly
baltimoreGroups$Year <- factor(baltimoreGroups$Year)
# Use ggplot to plot out the data
library(ggplot2)
# Generate the plot
png(file="plot3.png", width = 480, height = 480)
p <- ggplot(baltimoreGroups, aes(x=Year, y=Emissions, group=Type))
p + geom_line(aes(color=Type)) +
ggtitle("Emissions by Source in Baltimore City") # Set title
dev.off() |
library(ape)
testtree <- read.tree("5421_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5421_0_unrooted.txt") | /codeml_files/newick_trees_processed/5421_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("5421_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5421_0_unrooted.txt") |
#' Distribution
#'
#' Distribution Chart.
#'
#' @param x A \code{\link{list}}, where each vector contains data to be plotted. If the data is not numeric
#' it is coerced to be numeric.
#' Input data may be a matrix or a vector, containing the height of the columns
#' to be plotted, with the name/rownames used as the column names of the chart. Numeric and date labels
#' will be parsed automatically.
#' @param weights An optional \code{\link{list}}, where each element is a vector containing weights corresponding to
#' the values of \code{x}, or, a vector where the weights is assumed applicable for each element in \code{x}.
#' @param vertical Display the densities vertically.
#' @param show.mean Displays the mean of the data.
#' @param show.median Displays the median of the data.
#' @param show.quartiles Displays the quartiles of the data.
#' @param show.range Displays the range of the data.
#' @param show.density Show the left or top (if \code{vertical} is FALSE) of the violin plot.
#' @param show.mirror.density Show the right or bottom (if \code{vertical} is FALSE) of the violin plot.
#' @param show.values Produces a rug plot of individual values.
#' @param density.type Plot the density as a \code{"Density"} plot, \code{"Histogram"} plot, or \code{"Box"} plot. With box plots, the
#' 'whiskers' are drawn at the the most outlying point within 1.5 IQR (inter-quaritle range) below the first quarter and 1.5 IQR above the third quartile.
#' @param bw The smoothing bandwidth to be used when creating a Density,
#' Bean, or Violin plot. This defaults to \code{"nrd0"}, whereas \code{"SJ"} may often be superior (see \code{\link{density}}).
#' The default is to \code{"nrd0"} as \code{"SJ"} fails with trivial categorical cases.
#' @param adjust A scaling factor for the bandwidth when creating a Density, Bean, or Violin plot. E.g., a value of 0.5 sets the bandwidth to have of that computed using \code{bw}.
#' @param kernel The kernel used when when creating a Density, Bean, or Violin plot. One of "gaussian" (the default), "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine".
#' @param n The number of equally-sapced points at which the density is to be estimated when creating a Density, Bean, or Violin plot. If greater than 512, it is rounded to a power of 2 (see \code{link{density}}).
#' @param from The left-most point of the grid used when creating a Density, Bean, or Violin plot.
#' @param to The right-most point of the grid used when creating a Density, Bean, or Violin plot.
#' @param cut By default, the values of \code{from} and \code{to} are \code{cut} bandwidths beyond the extremes of the data.
#' @param automatic.lower.density When \code{TRUE}, which is the default, \code{from} is set to the lowest value in the data.
#' @param histogram.cumulative Plots the cumulative histogram, if \code{histogram} is set to TRUE.
#' @param histogram.counts Displays the counts in tooltips of a histogram, rather than the proportions.
#' @param maximum.bins The maximum number of bins of the histogram. If \code{NULL}, this is generated automatically.
#' @param box.points How outliers are displayed boxplots. \code{"All"} plots all the points. \code{"Suspected outliers"} plots points
#' between 1.5 and 3 IQR from the 1st and 3rd quartile with un-filled circles. \code{"Outliers"} does not plot points between 1.5 and 3 IQR from the 1st and 3rd quartiles.
#' @param mean.color Defaults to "white"
#' @param median.color Defaults to "black"
#' @param quartile.color Defaults to "black",
#' @param range.color Defaults to "black"
#' @param values.color Defaults to "Green"
#' @param density.color Defaults to "Green"
#' @param title Character; chart title.
#' @param title.font.family Character; title font family. Can be "Arial Black",
#' "Arial", "Comic Sans MS", "Courier New", "Georgia", "Impact",
#' "Lucida Console", "Lucida Sans Unicode", "Marlett", "Symbol", "Tahoma",
#' "Times New Roman", "Trebuchet MS", "Verdana", "Webdings"
#' @param title.font.color Title font color as a named color in character
#' format (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param title.font.size Title font size; default = 10.
#' @param subtitle Character
#' @param subtitle.font.color subtitle font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param subtitle.font.family Character; subtitle font family
#' @param subtitle.font.size Integer; subtitle font size
#' @param footer Character
#' @param footer.font.color footer font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param footer.font.family Character; footer font family
#' @param footer.font.size Integer; footer font size
#' @param footer.wrap Logical; whether the footer text should be wrapped.
#' @param footer.wrap.nchar Number of characters (approximately) in each line of the footer when \code{footer.wordwrap} \code{TRUE}.
#' @param grid.show Logical; whether to show grid lines.
#' @param background.fill.color Background color in character format
#' (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param background.fill.opacity Background opacity as an alpha value
#' (0 to 1).
#' @param charting.area.fill.color Charting area background color as
#' a named color in character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param charting.area.fill.opacity Charting area background
#' opacity as an alpha value (0 to 1).
#' @param margin.top Margin between plot area and the top of the
#' graphic in pixels
#' @param margin.bottom Margin between plot area and the bottom of the
#' graphic in pixels
#' @param margin.left Margin between plot area and the left of the
#' graphic in pixels
#' @param margin.right Margin between plot area and the right of the
#' graphic in pixels
#' @param values.title Character, y-axis title; defaults to chart input values;
#' to turn off set to "FALSE".
#' @param values.title.font.color y-axis title font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0,
#' max = 255)).
#' @param values.title.font.family Character; y-axis title font family
#' @param values.title.font.size y-axis title font size
#' @param values.line.width y-axis line in pixels, 0 = no line
#' @param values.line.color y-axis line color as a named color in character format
#' (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.tick.mark.length Length of tick marks in pixels.
#' @param values.bounds.minimum Minimum of range for plotting;
#' NULL = no manual range set. Must be less than values.bounds.maximum
#' @param values.bounds.maximum Maximum of range for
#' plotting; NULL = no manual range set. Must be greater than values.bounds.minimum
#' @param values.tick.distance The distance between the ticks. Requires that \code{values.bounds.minimum} and \code{values.bounds.maximum} have been set.
#' @param values.zero.line.width Width in pixels of zero line; 0 = no zero line
#' shown
#' @param values.zero.line.color Color of horizontal zero line as a named
#' color in character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.grid.width Width of y-grid lines in pixels; 0 = no line
#' @param values.grid.color Color of y-grid lines as a named color in character
#' format (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.tick.show Whether to display the y-axis tick labels
#' @param values.tick.suffix y-axis tick label suffix
#' @param values.tick.prefix y-axis tick label prefix
#' @param values.tick.format d3 formatting string applied to the tick labels.
#' See https://github.com/mbostock/d3/wiki/Formatting#numbers
#' @param values.hovertext.format d3 formatting string applied to the hover text.
#' https://github.com/mbostock/d3/wiki/Formatting#numbers or
#' @param values.tick.angle y-axis tick label angle in degrees.
#' 90 = vertical; 0 = horizontal
#' @param values.tick.font.color y-axis tick label font color as a named color
#' in character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.tick.font.family Character; y-axis tick label font family
#' @param values.tick.font.size y-axis tick label font size
#' @param categories.tick.font.color X-axis tick label font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param categories.tick.font.family Character; x-axis tick label font family
#' @param categories.tick.font.size x-axis tick label font size
#' @param categories.tick.label.wrap Logical; whether to wrap long labels on the x-axis.
#' @param categories.tick.label.wrap.nchar Integer; number of characters in each line when \code{categories.tick.label.wrap} is \code{TRUE}.
#' @param modebar.show Logical; whether to show the zoom menu buttons or not.
#' @param global.font.family Character; font family for all occurrences of any
#' font attribute for the chart unless specified individually.
#' @param global.font.color Global font color as a named color in character format
#' (e.g. "black") or an rgb value (e.g. #' rgb(0, 0, 0, maxColorValue = 255)).
#' @param hovertext.font.family Font family of hover text.
#' @param hovertext.font.size Font size of hover text.
#' @param hover.on Only used for boxplot. If "all", then all the hovertext (for median,
# quartiles, range) will be shown simultaneously. If "points", then only the hovertext
#' for the element near the cursor will be shown.
#' @param tooltip.show Logical; whether to show a tooltip on hover.
#' @return A \code{plotly} chart.
#' @examples
#' Distribution(rnorm(100))
#' Distribution(list(rnorm(100), rexp(100)))
#' @importFrom grDevices rgb
#' @importFrom plotly plot_ly config toRGB add_trace add_text layout hide_colorbar
#' @importFrom stats loess loess.control lm predict sd
#' @importFrom flipTransformations AsNumeric
#' @export
Distribution <- function(x,
weights = NULL,
vertical = TRUE,
show.mean = TRUE,
show.median = TRUE,
show.quartiles = TRUE,
show.range = TRUE,
show.density = TRUE,
show.mirror.density = TRUE,
show.values = FALSE,
density.type = "Density",
bw = "nrd0",
adjust = 1,
kernel = c("gaussian", "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine"),
n = 512,
from = NULL,
to = NULL,
cut = 3,
automatic.lower.density = TRUE,
histogram.cumulative = FALSE,
histogram.counts = FALSE,
maximum.bins = NULL,
box.points = "Suspected outliers",
mean.color = "White",
median.color = "Black",
quartile.color = "Black",
range.color = "Black",
values.color = "#008000",
density.color = "#008000",
global.font.family = "Arial",
global.font.color = rgb(44, 44, 44, maxColorValue = 255),
title = "",
title.font.family = global.font.family,
title.font.color = global.font.color,
title.font.size = 16,
subtitle = "",
subtitle.font.family = global.font.family,
subtitle.font.color = global.font.color,
subtitle.font.size = 12,
footer = "",
footer.font.family = global.font.family,
footer.font.color = global.font.color,
footer.font.size = 8,
footer.wrap = TRUE,
footer.wrap.nchar = 100,
background.fill.color = "transparent",
background.fill.opacity = 1,
charting.area.fill.color = background.fill.color,
charting.area.fill.opacity = 0,
margin.top = NULL,
margin.bottom = NULL,
margin.left = NULL,
margin.right = NULL,
grid.show = FALSE,
values.title = "",
values.title.font.color = global.font.color,
values.title.font.family = global.font.family,
values.title.font.size = 12,
values.line.width = 0,
values.line.color = rgb(0, 0, 0, maxColorValue = 255),
values.tick.mark.length = 5,
values.bounds.minimum = NULL,
values.bounds.maximum = NULL,
values.tick.distance = NULL,
values.zero.line.width = 0,
values.zero.line.color = rgb(44, 44, 44, maxColorValue = 255),
values.grid.width = 1 * grid.show,
values.grid.color = rgb(225, 225, 225, maxColorValue = 255),
values.tick.show = TRUE,
values.tick.suffix = "",
values.tick.prefix = "",
values.tick.format = "",
values.hovertext.format = "",
values.tick.angle = NULL,
values.tick.font.color = global.font.color,
values.tick.font.family = global.font.family,
values.tick.font.size = 10,
categories.tick.font.color = global.font.color,
categories.tick.font.family = global.font.family,
categories.tick.font.size = 10,
categories.tick.label.wrap = TRUE,
categories.tick.label.wrap.nchar = 21,
hovertext.font.family = global.font.family,
hovertext.font.size = 11,
hover.on = c("all", "points")[1],
tooltip.show = TRUE,
modebar.show = FALSE)
{
# Extracting and wrapping labels
ErrorIfNotEnoughData(x, require.tidy = FALSE)
if (length(x) == 1 && is.list(x) && NCOL(x[[1]]) > 1)
x <- x[[1]]
if (is.matrix(x))
x <- as.data.frame(x)
else if (!is.list(x))
{
if (is.array(x) && length(dim(x)) == 1)
{
x <- list(x)
names(x) <- attributes(x[[1]])$name
}
else if (NCOL(x) == 1)
x <- list(x)
}
if (!is.list(x))
stop("Input data should be a list of numeric vectors or a matrix.")
# Checking for categories with no data.
all.missing <- sapply(x, function(x) all(is.na(x)))
if (any(all.missing))
{
warning("The following categories contain only missing values: ",
paste(names(all.missing)[all.missing], sep = ","))
x <- x[!all.missing]
}
# Adding in a title based on name if only 1 statistic.
if (length(x) == 1 && values.title == "")
{
table.name <- attributes(x[[1]])$label
if (is.null(table.name))
table.name <- attributes(x[[1]])$name
if(!is.null(table.name))
values.title <- table.name
}
# Extracting labels
labels <- names(x)
if (length(labels) == 1)
labels = ""
else
labels <- autoFormatLongLabels(labels, categories.tick.label.wrap, categories.tick.label.wrap.nchar)
x <- AsNumeric(x, FALSE)
# Warnings for chart types that cannot deal with weights.
if (!is.null(weights))
{
if (density.type == "Box")
{
warning("Weights are ignored in box plots.")
}
else if (density.type == "Histogram")
{
if (sd(weights) != 0)
warning("Weights are ignored in histograms. To create a weighted histogram, either (1), ",
"create a Histogram Chart in Q or Displayr from the menus or R by code, or (2) ",
"manually create the categories and create a column chart.")
}
}
# Checking inputs.
if (density.type != "Density" && show.mirror.density)
{
warning("Mirror densities are only shown with 'density.type' set to 'Density'.")
show.mirror.density = FALSE
}
if (density.type == "Box")
{
if (show.values)
{
show.values <- FALSE
box.points <- "All"
}
if (any(show.mean || show.range || show.median || show.quartiles))
warning("Means, medians, quartiles, and values, will often cause problems when added to a box plot (as the box plot already shows this information).")
}
# Titles and footers
if (sum(nchar(values.title), na.rm = TRUE) == 0)
values.title.font.size = 0
title.font = list(family = title.font.family, size = title.font.size, color = title.font.color)
subtitle.font = list(family = subtitle.font.family, size = subtitle.font.size, color = subtitle.font.color)
footer.font = list(family = footer.font.family, size = footer.font.size, color = footer.font.color)
values.title.font = list(family = values.title.font.family, size = values.title.font.size, color = values.title.font.color)
values.tick.font = list(family = values.tick.font.family, size = values.tick.font.size, color = values.tick.font.color)
categories.tick.font = list(family = categories.tick.font.family, size = categories.tick.font.size, color = categories.tick.font.color)
footer <- autoFormatLongLabels(footer, footer.wrap, footer.wrap.nchar, truncate = FALSE)
# Work out margin spacing
labels.nline <- max(sapply(gregexpr("<br>", labels), function(x){sum(x > -1)}), na.rm = TRUE) + 1
if (vertical)
margins <- list(t = 20, b = 40 + categories.tick.font.size * labels.nline, r = 60,
l = 60 + values.title.font.size, pad = 0)
else
margins <- list(t = 20, b = 30 + values.tick.font.size + values.title.font.size,
r = 60, l = 80, pad = 0)
margins <- setMarginsForText(margins, title, subtitle, footer, title.font.size,
10, 10)
margins <- setCustomMargins(margins, margin.top, margin.bottom, margin.left,
margin.right, 0)
if (!values.tick.show)
margins$autoexpand <- FALSE
## Initiate plotly object
p <- plot_ly()
p <- config(p, displayModeBar = modebar.show)
p$sizingPolicy$browser$padding <- 0
n.variables <- length(x)
if (length(density.color) < n.variables)
density.color <- rep(density.color, n.variables)
if (length(density.color) != n.variables)
warning("The number of colors provided for shading the densities is not consistent with the number of variables.")
# working out the range of the data
rng <- range(unlist(x), na.rm = TRUE)
if (is.null(maximum.bins) || is.na(maximum.bins))
maximum.bins <- min(length(unique(unlist(x))), 50)
bin.offset <- min(diff(sort(unique(unlist(x)))))/2
if (density.type == "Histogram")
rng <- rng + c(-1, 1) * bin.offset
bin.size = (rng[2] - rng[1])/maximum.bins
bins <- list(start = rng[1], end = rng[2],
size = if (bin.size < 0.5) bin.size else NULL) # avoiding bug for small ranges
# Creating the violin plot
for (v in 1:n.variables)
{
category.axis <- axisName(vertical, v, 1)
value.axis <- axisName(vertical, v, 2)
category.axis.2 <- axisName(vertical, v, 1, TRUE)
value.axis.2 <- axisName(vertical, v, 2, TRUE)
values <- x[[v]]
wgt <- if (is.null(weights)) rep(1, length(values)) else
(if (is.list(weights)) weights[[v]] else weights)
if (length(wgt) != length(values))
stop("The data and the weights do not have the same number of observations.")
# Removing missing values
# Removing missing values
not.missing <- !is.na(values)
values <- values[not.missing]
wgt <- wgt[not.missing]
wgt <- prop.table(wgt) # Rebasing the weight (Required by the density function)
from <- if (automatic.lower.density) rng[1] else from
p <- addDensities(p, values, wgt, labels[v], vertical, show.density, show.mirror.density, density.type, histogram.cumulative, histogram.counts, bins, maximum.bins, box.points, category.axis, value.axis, density.color, values.color, bw, adjust, kernel, n, from, to, cut, hover.on)
p <- addSummaryStatistics(p, values, wgt, vertical, show.mean, show.median, show.quartiles, show.range, show.values,
mean.color, median.color, quartile.color, range.color, values.color,
category.axis, axisName(vertical, v, 1, TRUE), value.axis, value.axis.2)
}
# Finalizing the layout
# Format axis labels
values.range <- setValRange(values.bounds.minimum, values.bounds.maximum, rng)
values.tick <- setTicks(values.range$min, values.range$max, values.tick.distance, FALSE)
axisFormat <- formatLabels(values, "Area", categories.tick.label.wrap, categories.tick.label.wrap.nchar, "", values.tick.format) #ignored
#axisFormat <- NULL
if (is.null(values.bounds.minimum))
values.bounds.minimum <- rng[1]
if (is.null(values.bounds.maximum))
values.bounds.maximum <- rng[2]
values.axis <- setAxis(values.title, "left", axisFormat, values.title.font, values.line.color, values.line.width, values.grid.width, values.grid.color,
values.tick, values.tick.font, values.tick.angle, values.tick.mark.length, values.tick.distance,
values.tick.format, values.tick.prefix, values.tick.suffix, values.tick.show,
FALSE, values.zero.line.width, values.zero.line.color,
values.hovertext.format)
hover.mode <- if (tooltip.show) "'closest'" else "FALSE"
txt <- paste0("p <- layout(p,
autosize = TRUE,
font = list(size = 11),
hovermode = ", hover.mode, ",",
"showlegend = FALSE,
showlegend = FALSE,",
violinCategoriesAxes(vertical, n.variables, gsub("'", "\\\\'", labels)), "
", if (vertical) "y" else "x", "axis = values.axis,
margin = margins,
annotations = list(setSubtitle(subtitle, subtitle.font, margins),
setTitle(title, title.font, margins),
setFooter(footer, footer.font, margins)),
hoverlabel = list(namelength = -1,
font = list(size = hovertext.font.size, family = hovertext.font.family)),
plot_bgcolor = toRGB(charting.area.fill.color, alpha = charting.area.fill.opacity),
paper_bgcolor = toRGB(background.fill.color, alpha = background.fill.opacity))")
eval(parse(text = txt))
result <- list(htmlwidget = p)
class(result) <- "StandardChart"
result
}
axisName <- function(vertical, n.variables, axis.number, secondary.category = FALSE)
{
if ( n.variables == 1 && !secondary.category || vertical & axis.number == 2 || !vertical & axis.number == 1)
return(if (vertical) "x" else "y")
paste0(if (vertical) "x" else "y", n.variables * 2 - if (secondary.category) 0 else 1)
}
addDensities <- function(p,
values,
weights,
label,
vertical,
show.density,
show.mirror.density,
density.type,
histogram.cumulative,
histogram.counts,
bins,
maximum.bins,
box.points,
category.axis,
value.axis,
density.color,
values.color,
# Density parameters
bw, adjust, kernel, n, from, to, cut,
hover.on)
{
# Comuting the density Also used in plotting other graphical elements.
d.args <- list(x = values, na.rm = TRUE, bw = bw, adjust = adjust, kernel = kernel, cut = cut, weights = weights)
if (!is.null(from))
d.args$from = from
if (!is.null(to))
d.args$from = to
values.density <- do.call(density, d.args)
attr(p, "values.density") <- values.density
if (!show.density && !show.mirror.density)
return(p)
if (density.type == "Box")
{
p <-add_trace(p,
boxpoints = switch(box.points, "Outliers" = "outliers", "All" = "all", "Suspected outliers" = "suspectedoutliers"),
x = if (vertical) NULL else values,
y = if (vertical) values else NULL,
fillcolor = rgb(t(col2rgb(density.color[1])), alpha = 128, maxColorValue = 255),
marker = list(color = values.color),
name = label,
line = list(color = density.color),
hoverinfo = if (hover.on == "points") "skip" else if (vertical) "y" else "x",
type = "box",
xaxis = category.axis,
yaxis = value.axis)
if (hover.on == "points")
{
# Unlike violin plots, box plots do not accept weights
# For consistency with plotly we use type = 5 (midpoints)
# this differs from violin plot quantiles (type = 6; i/n+1)
five.num <- quantile(values, type = 5)
names(five.num) <- c("Minimum:", "Lower quartile:", "Median:", "Upper quartile:", "Maximum:")
five.pos <- rep(0, length(five.num))
p <- add_trace(p,
x = if (vertical) five.pos else five.num,
y = if (vertical) five.num else five.pos,
type = "scatter",
mode = "markers", cliponaxis = FALSE,
marker = list(color = "transparent"),
hoverlabel = list(bgcolor = density.color[1]),
hoverinfo = "text",
text = paste(names(five.num), round(five.num, 2)))
}
} else if (density.type == "Histogram")
{
p <- add_trace(p,
xbins = if (!vertical) bins else NULL,
ybins = if (vertical) bins else NULL,
nbinsx = maximum.bins,
nbinsy = maximum.bins,
x = if (vertical) NULL else values,
y = if (vertical) values else NULL ,
marker = list(color = density.color[1]),
histnorm = if(histogram.counts) "" else "probability",
hoverinfo = if (vertical) "x" else "y",
cumulative = list(enabled = histogram.cumulative),
name = label,
type = "histogram",
xaxis = category.axis,
yaxis = value.axis)
return(p)
} else # Density
for (x.product in c(if (show.density) 1 else NULL, if (show.mirror.density) -1 else NULL))
p <- add_trace(p,
y = if (vertical) values.density$x else x.product * values.density$y,
x = if (vertical) x.product * values.density$y else values.density$x,
fill = if (vertical) "tozerox" else "tozeroy",
fillcolor = density.color[1],
hoverinfo = "none",
line = list(shape = "spline", width = 0),
mode = "lines",
name = label,
type = "scatter",
xaxis = category.axis,
yaxis = value.axis)
p
}
createWeights <- function(x, weights)
{
rep(list(weights), length(x))
# group.sizes <- sapply(x, length)
# if (is.null(weights))
# weights <- rep(1, sum(group.sizes))
# groups <- rep(1:length(x), group.sizes)
# tapply(weights, groups, c)
}
#' @importFrom stats density weighted.mean quantile
#' @importFrom Hmisc wtd.quantile
addSummaryStatistics <- function(p, values, weights, vertical, show.mean, show.median, show.quartiles, show.range, show.values,
mean.color, median.color, quartile.color, range.color, values.color,
category.axis, category.axis.2, value.axis, value.axis.2)
{
# Rug plot of values
if (show.values)
{
v2 <- values
v1 <- rep("rugplot", length(values))
p <- add_trace(p,
x = if (vertical) v1 else v2,
y = if (vertical) v2 else v1,
hoverinfo = "text",
marker = list(color = values.color, symbol = if (vertical) "line-ew-open" else "line-ns-open"),
mode = "markers",
name = "",
showlegend = FALSE,
text = round(values, 2),
type = "scatter",
xaxis = category.axis.2,
yaxis = value.axis.2)
}
### Violin plot
if (show.median || show.quartiles || show.range)
{
# Quantiles are computed in the same way as SPSS
# This corresponds to type = 6 (default is type = 7); see Rdocs for stats::quantile
five.num <- wtd.quantile(values, weights = weights, type = "i/(n+1)", normwt = TRUE)
names(five.num) <- c("Minimum:", "Lower quartile:", "Median:", "Upper quartile:", "Maximum:")
}
mn <- if(show.mean) c("Mean:" = weighted.mean(values, w = weights)) else NULL
# Function for adding components of boxplot to plot
.addBox <- function(p, y, x, name, line = NULL, marker = NULL)
{
p <- add_trace(p,
x = x,
y = y,
line = line,
marker = marker,
name = name,
hoverinfo = paste0("name+", if (vertical) "y" else "x"),
mode = if (is.null(line)) "markers" else "lines",
type = "scatter", cliponaxis = FALSE,
xaxis = category.axis,
yaxis = value.axis
)
}
# Adding box plot components
if (show.range)
{
v1 <- c(0, 0)
v2 <- five.num[c(1, 5)]
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Range", line = list(width = 1.5, color = range.color))
}
if (show.quartiles)
{
v1 <- c(0, 0)
v2 <- five.num[c(2, 4)]
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Quartiles", line = list(width = 8, color = quartile.color))
}
if (show.median)
{
half.mean.width = 0.2 * max(abs(range(attr(p, "values.density")$y)))
v1 <- c(-half.mean.width, half.mean.width)
v2 <- rep(five.num[3], 2)
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Median", line = list(width = 4, color = median.color))
}
if (show.mean)
{
v1 <- 0
v2 <- mn
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Mean", marker = list(color = mean.color, symbol = "square"))
}
p
}
violinCategoryAxis <- function(i, label, n.variables, vertical, show.values, show.density, show.mirror.density, family,
size, color, values.hovertext.format)
{
if (i > n.variables)
return(NULL)
if (!show.mirror.density)
domain = c(if (show.values) .12 else 0, .95)
else if (!show.density)
domain = c(0, .9)
else
domain = c(0, 1)
list(autorange = TRUE,
domain = domain / n.variables + (i - 1) / n.variables,
hoverformat = values.hovertext.format,
showgrid = FALSE,
showticklabels = FALSE,
ticks = "",
title = label,
titlefont = list(family = family, size = size, color = color),
type = "linear",
zeroline = FALSE)
}
rugCategoryAxis <- function(i, n.variables, vertical, show.density, show.mirror.density, show.values)
{
if(i > n.variables ||!show.values)
return(NULL)
offset <- max(10, n.variables+2)/2/100
if (show.density && show.mirror.density)
domain = c(.5 - offset, .5 + offset)
else if (show.density)
domain = c(0, 0.1)
else if (show.mirror.density)
domain = c(.9, 1)
list(autorange = TRUE,
domain = domain / n.variables + (i - 1) / n.variables,
autorange = TRUE,
#hoverformat = values.hovertext.format, does not work with type = "category"
range = c(-1, 1),
showgrid = FALSE,
showticklabels = FALSE,
title = "",
type = "category")}
violinCategoriesAxes <- function(vertical, n.variables, labels)
{
standard.parameters <- "n.variables, vertical, show.values, show.density, show.mirror.density, categories.tick.font.family, categories.tick.font.size, categories.tick.font.color, values.hovertext.format"
axes <- paste0("xaxis = violinCategoryAxis(1, '", labels[1], "',", standard.parameters, "), xaxis2 = rugCategoryAxis(1, n.variables, vertical, show.density, show.mirror.density, show.values), ")
if (n.variables > 1)
{
sq <- seq(4, n.variables * 2 , 2)
violin <- paste0("xaxis", sq - 1, " = violinCategoryAxis(", 2:n.variables, ", '", labels[-1], "',", standard.parameters, "), ", collapse = "")
rug <- paste0("xaxis", sq, " = rugCategoryAxis(", 2:n.variables, ", n.variables, vertical, show.density, show.mirror.density, show.values), ", collapse = "")
axes <- paste0(axes, violin, rug)
}
if (!vertical)
axes <- gsub("xaxis", "yaxis", axes)
axes
}
distributionArgs <- function(call, chart.function, arguments)
{
args <- modifyList(as.list(args(chart.function)), arguments)
nms <- names(args)
nms <- nms[nms != ""]
nms <- nms[!nms %in% names(call)]
args <- args[nms]
args <- args[!sapply(args, is.null)]
call[[1]] <- Distribution
call <- modify_call(call, args)
as.list(call[-1])
}
| /R/distribution.R | no_license | DaveGerson/flipStandardCharts | R | false | false | 33,328 | r | #' Distribution
#'
#' Distribution Chart.
#'
#' @param x A \code{\link{list}}, where each vector contains data to be plotted. If the data is not numeric
#' it is coerced to be numeric.
#' Input data may be a matrix or a vector, containing the height of the columns
#' to be plotted, with the name/rownames used as the column names of the chart. Numeric and date labels
#' will be parsed automatically.
#' @param weights An optional \code{\link{list}}, where each element is a vector containing weights corresponding to
#' the values of \code{x}, or, a vector where the weights is assumed applicable for each element in \code{x}.
#' @param vertical Display the densities vertically.
#' @param show.mean Displays the mean of the data.
#' @param show.median Displays the median of the data.
#' @param show.quartiles Displays the quartiles of the data.
#' @param show.range Displays the range of the data.
#' @param show.density Show the left or top (if \code{vertical} is FALSE) of the violin plot.
#' @param show.mirror.density Show the right or bottom (if \code{vertical} is FALSE) of the violin plot.
#' @param show.values Produces a rug plot of individual values.
#' @param density.type Plot the density as a \code{"Density"} plot, \code{"Histogram"} plot, or \code{"Box"} plot. With box plots, the
#' 'whiskers' are drawn at the the most outlying point within 1.5 IQR (inter-quaritle range) below the first quarter and 1.5 IQR above the third quartile.
#' @param bw The smoothing bandwidth to be used when creating a Density,
#' Bean, or Violin plot. This defaults to \code{"nrd0"}, whereas \code{"SJ"} may often be superior (see \code{\link{density}}).
#' The default is to \code{"nrd0"} as \code{"SJ"} fails with trivial categorical cases.
#' @param adjust A scaling factor for the bandwidth when creating a Density, Bean, or Violin plot. E.g., a value of 0.5 sets the bandwidth to have of that computed using \code{bw}.
#' @param kernel The kernel used when when creating a Density, Bean, or Violin plot. One of "gaussian" (the default), "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine".
#' @param n The number of equally-sapced points at which the density is to be estimated when creating a Density, Bean, or Violin plot. If greater than 512, it is rounded to a power of 2 (see \code{link{density}}).
#' @param from The left-most point of the grid used when creating a Density, Bean, or Violin plot.
#' @param to The right-most point of the grid used when creating a Density, Bean, or Violin plot.
#' @param cut By default, the values of \code{from} and \code{to} are \code{cut} bandwidths beyond the extremes of the data.
#' @param automatic.lower.density When \code{TRUE}, which is the default, \code{from} is set to the lowest value in the data.
#' @param histogram.cumulative Plots the cumulative histogram, if \code{histogram} is set to TRUE.
#' @param histogram.counts Displays the counts in tooltips of a histogram, rather than the proportions.
#' @param maximum.bins The maximum number of bins of the histogram. If \code{NULL}, this is generated automatically.
#' @param box.points How outliers are displayed boxplots. \code{"All"} plots all the points. \code{"Suspected outliers"} plots points
#' between 1.5 and 3 IQR from the 1st and 3rd quartile with un-filled circles. \code{"Outliers"} does not plot points between 1.5 and 3 IQR from the 1st and 3rd quartiles.
#' @param mean.color Defaults to "white"
#' @param median.color Defaults to "black"
#' @param quartile.color Defaults to "black",
#' @param range.color Defaults to "black"
#' @param values.color Defaults to "Green"
#' @param density.color Defaults to "Green"
#' @param title Character; chart title.
#' @param title.font.family Character; title font family. Can be "Arial Black",
#' "Arial", "Comic Sans MS", "Courier New", "Georgia", "Impact",
#' "Lucida Console", "Lucida Sans Unicode", "Marlett", "Symbol", "Tahoma",
#' "Times New Roman", "Trebuchet MS", "Verdana", "Webdings"
#' @param title.font.color Title font color as a named color in character
#' format (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param title.font.size Title font size; default = 10.
#' @param subtitle Character
#' @param subtitle.font.color subtitle font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param subtitle.font.family Character; subtitle font family
#' @param subtitle.font.size Integer; subtitle font size
#' @param footer Character
#' @param footer.font.color footer font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param footer.font.family Character; footer font family
#' @param footer.font.size Integer; footer font size
#' @param footer.wrap Logical; whether the footer text should be wrapped.
#' @param footer.wrap.nchar Number of characters (approximately) in each line of the footer when \code{footer.wordwrap} \code{TRUE}.
#' @param grid.show Logical; whether to show grid lines.
#' @param background.fill.color Background color in character format
#' (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param background.fill.opacity Background opacity as an alpha value
#' (0 to 1).
#' @param charting.area.fill.color Charting area background color as
#' a named color in character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param charting.area.fill.opacity Charting area background
#' opacity as an alpha value (0 to 1).
#' @param margin.top Margin between plot area and the top of the
#' graphic in pixels
#' @param margin.bottom Margin between plot area and the bottom of the
#' graphic in pixels
#' @param margin.left Margin between plot area and the left of the
#' graphic in pixels
#' @param margin.right Margin between plot area and the right of the
#' graphic in pixels
#' @param values.title Character, y-axis title; defaults to chart input values;
#' to turn off set to "FALSE".
#' @param values.title.font.color y-axis title font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0,
#' max = 255)).
#' @param values.title.font.family Character; y-axis title font family
#' @param values.title.font.size y-axis title font size
#' @param values.line.width y-axis line in pixels, 0 = no line
#' @param values.line.color y-axis line color as a named color in character format
#' (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.tick.mark.length Length of tick marks in pixels.
#' @param values.bounds.minimum Minimum of range for plotting;
#' NULL = no manual range set. Must be less than values.bounds.maximum
#' @param values.bounds.maximum Maximum of range for
#' plotting; NULL = no manual range set. Must be greater than values.bounds.minimum
#' @param values.tick.distance The distance between the ticks. Requires that \code{values.bounds.minimum} and \code{values.bounds.maximum} have been set.
#' @param values.zero.line.width Width in pixels of zero line; 0 = no zero line
#' shown
#' @param values.zero.line.color Color of horizontal zero line as a named
#' color in character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.grid.width Width of y-grid lines in pixels; 0 = no line
#' @param values.grid.color Color of y-grid lines as a named color in character
#' format (e.g. "black") or an rgb value (e.g. rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.tick.show Whether to display the y-axis tick labels
#' @param values.tick.suffix y-axis tick label suffix
#' @param values.tick.prefix y-axis tick label prefix
#' @param values.tick.format d3 formatting string applied to the tick labels.
#' See https://github.com/mbostock/d3/wiki/Formatting#numbers
#' @param values.hovertext.format d3 formatting string applied to the hover text.
#' https://github.com/mbostock/d3/wiki/Formatting#numbers or
#' @param values.tick.angle y-axis tick label angle in degrees.
#' 90 = vertical; 0 = horizontal
#' @param values.tick.font.color y-axis tick label font color as a named color
#' in character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param values.tick.font.family Character; y-axis tick label font family
#' @param values.tick.font.size y-axis tick label font size
#' @param categories.tick.font.color X-axis tick label font color as a named color in
#' character format (e.g. "black") or an rgb value (e.g.
#' rgb(0, 0, 0, maxColorValue = 255)).
#' @param categories.tick.font.family Character; x-axis tick label font family
#' @param categories.tick.font.size x-axis tick label font size
#' @param categories.tick.label.wrap Logical; whether to wrap long labels on the x-axis.
#' @param categories.tick.label.wrap.nchar Integer; number of characters in each line when \code{categories.tick.label.wrap} is \code{TRUE}.
#' @param modebar.show Logical; whether to show the zoom menu buttons or not.
#' @param global.font.family Character; font family for all occurrences of any
#' font attribute for the chart unless specified individually.
#' @param global.font.color Global font color as a named color in character format
#' (e.g. "black") or an rgb value (e.g. #' rgb(0, 0, 0, maxColorValue = 255)).
#' @param hovertext.font.family Font family of hover text.
#' @param hovertext.font.size Font size of hover text.
#' @param hover.on Only used for boxplot. If "all", then all the hovertext (for median,
# quartiles, range) will be shown simultaneously. If "points", then only the hovertext
#' for the element near the cursor will be shown.
#' @param tooltip.show Logical; whether to show a tooltip on hover.
#' @return A \code{plotly} chart.
#' @examples
#' Distribution(rnorm(100))
#' Distribution(list(rnorm(100), rexp(100)))
#' @importFrom grDevices rgb
#' @importFrom plotly plot_ly config toRGB add_trace add_text layout hide_colorbar
#' @importFrom stats loess loess.control lm predict sd
#' @importFrom flipTransformations AsNumeric
#' @export
Distribution <- function(x,
weights = NULL,
vertical = TRUE,
show.mean = TRUE,
show.median = TRUE,
show.quartiles = TRUE,
show.range = TRUE,
show.density = TRUE,
show.mirror.density = TRUE,
show.values = FALSE,
density.type = "Density",
bw = "nrd0",
adjust = 1,
kernel = c("gaussian", "epanechnikov", "rectangular", "triangular", "biweight", "cosine", "optcosine"),
n = 512,
from = NULL,
to = NULL,
cut = 3,
automatic.lower.density = TRUE,
histogram.cumulative = FALSE,
histogram.counts = FALSE,
maximum.bins = NULL,
box.points = "Suspected outliers",
mean.color = "White",
median.color = "Black",
quartile.color = "Black",
range.color = "Black",
values.color = "#008000",
density.color = "#008000",
global.font.family = "Arial",
global.font.color = rgb(44, 44, 44, maxColorValue = 255),
title = "",
title.font.family = global.font.family,
title.font.color = global.font.color,
title.font.size = 16,
subtitle = "",
subtitle.font.family = global.font.family,
subtitle.font.color = global.font.color,
subtitle.font.size = 12,
footer = "",
footer.font.family = global.font.family,
footer.font.color = global.font.color,
footer.font.size = 8,
footer.wrap = TRUE,
footer.wrap.nchar = 100,
background.fill.color = "transparent",
background.fill.opacity = 1,
charting.area.fill.color = background.fill.color,
charting.area.fill.opacity = 0,
margin.top = NULL,
margin.bottom = NULL,
margin.left = NULL,
margin.right = NULL,
grid.show = FALSE,
values.title = "",
values.title.font.color = global.font.color,
values.title.font.family = global.font.family,
values.title.font.size = 12,
values.line.width = 0,
values.line.color = rgb(0, 0, 0, maxColorValue = 255),
values.tick.mark.length = 5,
values.bounds.minimum = NULL,
values.bounds.maximum = NULL,
values.tick.distance = NULL,
values.zero.line.width = 0,
values.zero.line.color = rgb(44, 44, 44, maxColorValue = 255),
values.grid.width = 1 * grid.show,
values.grid.color = rgb(225, 225, 225, maxColorValue = 255),
values.tick.show = TRUE,
values.tick.suffix = "",
values.tick.prefix = "",
values.tick.format = "",
values.hovertext.format = "",
values.tick.angle = NULL,
values.tick.font.color = global.font.color,
values.tick.font.family = global.font.family,
values.tick.font.size = 10,
categories.tick.font.color = global.font.color,
categories.tick.font.family = global.font.family,
categories.tick.font.size = 10,
categories.tick.label.wrap = TRUE,
categories.tick.label.wrap.nchar = 21,
hovertext.font.family = global.font.family,
hovertext.font.size = 11,
hover.on = c("all", "points")[1],
tooltip.show = TRUE,
modebar.show = FALSE)
{
# Extracting and wrapping labels
ErrorIfNotEnoughData(x, require.tidy = FALSE)
if (length(x) == 1 && is.list(x) && NCOL(x[[1]]) > 1)
x <- x[[1]]
if (is.matrix(x))
x <- as.data.frame(x)
else if (!is.list(x))
{
if (is.array(x) && length(dim(x)) == 1)
{
x <- list(x)
names(x) <- attributes(x[[1]])$name
}
else if (NCOL(x) == 1)
x <- list(x)
}
if (!is.list(x))
stop("Input data should be a list of numeric vectors or a matrix.")
# Checking for categories with no data.
all.missing <- sapply(x, function(x) all(is.na(x)))
if (any(all.missing))
{
warning("The following categories contain only missing values: ",
paste(names(all.missing)[all.missing], sep = ","))
x <- x[!all.missing]
}
# Adding in a title based on name if only 1 statistic.
if (length(x) == 1 && values.title == "")
{
table.name <- attributes(x[[1]])$label
if (is.null(table.name))
table.name <- attributes(x[[1]])$name
if(!is.null(table.name))
values.title <- table.name
}
# Extracting labels
labels <- names(x)
if (length(labels) == 1)
labels = ""
else
labels <- autoFormatLongLabels(labels, categories.tick.label.wrap, categories.tick.label.wrap.nchar)
x <- AsNumeric(x, FALSE)
# Warnings for chart types that cannot deal with weights.
if (!is.null(weights))
{
if (density.type == "Box")
{
warning("Weights are ignored in box plots.")
}
else if (density.type == "Histogram")
{
if (sd(weights) != 0)
warning("Weights are ignored in histograms. To create a weighted histogram, either (1), ",
"create a Histogram Chart in Q or Displayr from the menus or R by code, or (2) ",
"manually create the categories and create a column chart.")
}
}
# Checking inputs.
if (density.type != "Density" && show.mirror.density)
{
warning("Mirror densities are only shown with 'density.type' set to 'Density'.")
show.mirror.density = FALSE
}
if (density.type == "Box")
{
if (show.values)
{
show.values <- FALSE
box.points <- "All"
}
if (any(show.mean || show.range || show.median || show.quartiles))
warning("Means, medians, quartiles, and values, will often cause problems when added to a box plot (as the box plot already shows this information).")
}
# Titles and footers
if (sum(nchar(values.title), na.rm = TRUE) == 0)
values.title.font.size = 0
title.font = list(family = title.font.family, size = title.font.size, color = title.font.color)
subtitle.font = list(family = subtitle.font.family, size = subtitle.font.size, color = subtitle.font.color)
footer.font = list(family = footer.font.family, size = footer.font.size, color = footer.font.color)
values.title.font = list(family = values.title.font.family, size = values.title.font.size, color = values.title.font.color)
values.tick.font = list(family = values.tick.font.family, size = values.tick.font.size, color = values.tick.font.color)
categories.tick.font = list(family = categories.tick.font.family, size = categories.tick.font.size, color = categories.tick.font.color)
footer <- autoFormatLongLabels(footer, footer.wrap, footer.wrap.nchar, truncate = FALSE)
# Work out margin spacing
labels.nline <- max(sapply(gregexpr("<br>", labels), function(x){sum(x > -1)}), na.rm = TRUE) + 1
if (vertical)
margins <- list(t = 20, b = 40 + categories.tick.font.size * labels.nline, r = 60,
l = 60 + values.title.font.size, pad = 0)
else
margins <- list(t = 20, b = 30 + values.tick.font.size + values.title.font.size,
r = 60, l = 80, pad = 0)
margins <- setMarginsForText(margins, title, subtitle, footer, title.font.size,
10, 10)
margins <- setCustomMargins(margins, margin.top, margin.bottom, margin.left,
margin.right, 0)
if (!values.tick.show)
margins$autoexpand <- FALSE
## Initiate plotly object
p <- plot_ly()
p <- config(p, displayModeBar = modebar.show)
p$sizingPolicy$browser$padding <- 0
n.variables <- length(x)
if (length(density.color) < n.variables)
density.color <- rep(density.color, n.variables)
if (length(density.color) != n.variables)
warning("The number of colors provided for shading the densities is not consistent with the number of variables.")
# working out the range of the data
rng <- range(unlist(x), na.rm = TRUE)
if (is.null(maximum.bins) || is.na(maximum.bins))
maximum.bins <- min(length(unique(unlist(x))), 50)
bin.offset <- min(diff(sort(unique(unlist(x)))))/2
if (density.type == "Histogram")
rng <- rng + c(-1, 1) * bin.offset
bin.size = (rng[2] - rng[1])/maximum.bins
bins <- list(start = rng[1], end = rng[2],
size = if (bin.size < 0.5) bin.size else NULL) # avoiding bug for small ranges
# Creating the violin plot
for (v in 1:n.variables)
{
category.axis <- axisName(vertical, v, 1)
value.axis <- axisName(vertical, v, 2)
category.axis.2 <- axisName(vertical, v, 1, TRUE)
value.axis.2 <- axisName(vertical, v, 2, TRUE)
values <- x[[v]]
wgt <- if (is.null(weights)) rep(1, length(values)) else
(if (is.list(weights)) weights[[v]] else weights)
if (length(wgt) != length(values))
stop("The data and the weights do not have the same number of observations.")
# Removing missing values
# Removing missing values
not.missing <- !is.na(values)
values <- values[not.missing]
wgt <- wgt[not.missing]
wgt <- prop.table(wgt) # Rebasing the weight (Required by the density function)
from <- if (automatic.lower.density) rng[1] else from
p <- addDensities(p, values, wgt, labels[v], vertical, show.density, show.mirror.density, density.type, histogram.cumulative, histogram.counts, bins, maximum.bins, box.points, category.axis, value.axis, density.color, values.color, bw, adjust, kernel, n, from, to, cut, hover.on)
p <- addSummaryStatistics(p, values, wgt, vertical, show.mean, show.median, show.quartiles, show.range, show.values,
mean.color, median.color, quartile.color, range.color, values.color,
category.axis, axisName(vertical, v, 1, TRUE), value.axis, value.axis.2)
}
# Finalizing the layout
# Format axis labels
values.range <- setValRange(values.bounds.minimum, values.bounds.maximum, rng)
values.tick <- setTicks(values.range$min, values.range$max, values.tick.distance, FALSE)
axisFormat <- formatLabels(values, "Area", categories.tick.label.wrap, categories.tick.label.wrap.nchar, "", values.tick.format) #ignored
#axisFormat <- NULL
if (is.null(values.bounds.minimum))
values.bounds.minimum <- rng[1]
if (is.null(values.bounds.maximum))
values.bounds.maximum <- rng[2]
values.axis <- setAxis(values.title, "left", axisFormat, values.title.font, values.line.color, values.line.width, values.grid.width, values.grid.color,
values.tick, values.tick.font, values.tick.angle, values.tick.mark.length, values.tick.distance,
values.tick.format, values.tick.prefix, values.tick.suffix, values.tick.show,
FALSE, values.zero.line.width, values.zero.line.color,
values.hovertext.format)
hover.mode <- if (tooltip.show) "'closest'" else "FALSE"
txt <- paste0("p <- layout(p,
autosize = TRUE,
font = list(size = 11),
hovermode = ", hover.mode, ",",
"showlegend = FALSE,
showlegend = FALSE,",
violinCategoriesAxes(vertical, n.variables, gsub("'", "\\\\'", labels)), "
", if (vertical) "y" else "x", "axis = values.axis,
margin = margins,
annotations = list(setSubtitle(subtitle, subtitle.font, margins),
setTitle(title, title.font, margins),
setFooter(footer, footer.font, margins)),
hoverlabel = list(namelength = -1,
font = list(size = hovertext.font.size, family = hovertext.font.family)),
plot_bgcolor = toRGB(charting.area.fill.color, alpha = charting.area.fill.opacity),
paper_bgcolor = toRGB(background.fill.color, alpha = background.fill.opacity))")
eval(parse(text = txt))
result <- list(htmlwidget = p)
class(result) <- "StandardChart"
result
}
axisName <- function(vertical, n.variables, axis.number, secondary.category = FALSE)
{
if ( n.variables == 1 && !secondary.category || vertical & axis.number == 2 || !vertical & axis.number == 1)
return(if (vertical) "x" else "y")
paste0(if (vertical) "x" else "y", n.variables * 2 - if (secondary.category) 0 else 1)
}
addDensities <- function(p,
values,
weights,
label,
vertical,
show.density,
show.mirror.density,
density.type,
histogram.cumulative,
histogram.counts,
bins,
maximum.bins,
box.points,
category.axis,
value.axis,
density.color,
values.color,
# Density parameters
bw, adjust, kernel, n, from, to, cut,
hover.on)
{
# Comuting the density Also used in plotting other graphical elements.
d.args <- list(x = values, na.rm = TRUE, bw = bw, adjust = adjust, kernel = kernel, cut = cut, weights = weights)
if (!is.null(from))
d.args$from = from
if (!is.null(to))
d.args$from = to
values.density <- do.call(density, d.args)
attr(p, "values.density") <- values.density
if (!show.density && !show.mirror.density)
return(p)
if (density.type == "Box")
{
p <-add_trace(p,
boxpoints = switch(box.points, "Outliers" = "outliers", "All" = "all", "Suspected outliers" = "suspectedoutliers"),
x = if (vertical) NULL else values,
y = if (vertical) values else NULL,
fillcolor = rgb(t(col2rgb(density.color[1])), alpha = 128, maxColorValue = 255),
marker = list(color = values.color),
name = label,
line = list(color = density.color),
hoverinfo = if (hover.on == "points") "skip" else if (vertical) "y" else "x",
type = "box",
xaxis = category.axis,
yaxis = value.axis)
if (hover.on == "points")
{
# Unlike violin plots, box plots do not accept weights
# For consistency with plotly we use type = 5 (midpoints)
# this differs from violin plot quantiles (type = 6; i/n+1)
five.num <- quantile(values, type = 5)
names(five.num) <- c("Minimum:", "Lower quartile:", "Median:", "Upper quartile:", "Maximum:")
five.pos <- rep(0, length(five.num))
p <- add_trace(p,
x = if (vertical) five.pos else five.num,
y = if (vertical) five.num else five.pos,
type = "scatter",
mode = "markers", cliponaxis = FALSE,
marker = list(color = "transparent"),
hoverlabel = list(bgcolor = density.color[1]),
hoverinfo = "text",
text = paste(names(five.num), round(five.num, 2)))
}
} else if (density.type == "Histogram")
{
p <- add_trace(p,
xbins = if (!vertical) bins else NULL,
ybins = if (vertical) bins else NULL,
nbinsx = maximum.bins,
nbinsy = maximum.bins,
x = if (vertical) NULL else values,
y = if (vertical) values else NULL ,
marker = list(color = density.color[1]),
histnorm = if(histogram.counts) "" else "probability",
hoverinfo = if (vertical) "x" else "y",
cumulative = list(enabled = histogram.cumulative),
name = label,
type = "histogram",
xaxis = category.axis,
yaxis = value.axis)
return(p)
} else # Density
for (x.product in c(if (show.density) 1 else NULL, if (show.mirror.density) -1 else NULL))
p <- add_trace(p,
y = if (vertical) values.density$x else x.product * values.density$y,
x = if (vertical) x.product * values.density$y else values.density$x,
fill = if (vertical) "tozerox" else "tozeroy",
fillcolor = density.color[1],
hoverinfo = "none",
line = list(shape = "spline", width = 0),
mode = "lines",
name = label,
type = "scatter",
xaxis = category.axis,
yaxis = value.axis)
p
}
createWeights <- function(x, weights)
{
rep(list(weights), length(x))
# group.sizes <- sapply(x, length)
# if (is.null(weights))
# weights <- rep(1, sum(group.sizes))
# groups <- rep(1:length(x), group.sizes)
# tapply(weights, groups, c)
}
#' @importFrom stats density weighted.mean quantile
#' @importFrom Hmisc wtd.quantile
addSummaryStatistics <- function(p, values, weights, vertical, show.mean, show.median, show.quartiles, show.range, show.values,
mean.color, median.color, quartile.color, range.color, values.color,
category.axis, category.axis.2, value.axis, value.axis.2)
{
# Rug plot of values
if (show.values)
{
v2 <- values
v1 <- rep("rugplot", length(values))
p <- add_trace(p,
x = if (vertical) v1 else v2,
y = if (vertical) v2 else v1,
hoverinfo = "text",
marker = list(color = values.color, symbol = if (vertical) "line-ew-open" else "line-ns-open"),
mode = "markers",
name = "",
showlegend = FALSE,
text = round(values, 2),
type = "scatter",
xaxis = category.axis.2,
yaxis = value.axis.2)
}
### Violin plot
if (show.median || show.quartiles || show.range)
{
# Quantiles are computed in the same way as SPSS
# This corresponds to type = 6 (default is type = 7); see Rdocs for stats::quantile
five.num <- wtd.quantile(values, weights = weights, type = "i/(n+1)", normwt = TRUE)
names(five.num) <- c("Minimum:", "Lower quartile:", "Median:", "Upper quartile:", "Maximum:")
}
mn <- if(show.mean) c("Mean:" = weighted.mean(values, w = weights)) else NULL
# Function for adding components of boxplot to plot
.addBox <- function(p, y, x, name, line = NULL, marker = NULL)
{
p <- add_trace(p,
x = x,
y = y,
line = line,
marker = marker,
name = name,
hoverinfo = paste0("name+", if (vertical) "y" else "x"),
mode = if (is.null(line)) "markers" else "lines",
type = "scatter", cliponaxis = FALSE,
xaxis = category.axis,
yaxis = value.axis
)
}
# Adding box plot components
if (show.range)
{
v1 <- c(0, 0)
v2 <- five.num[c(1, 5)]
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Range", line = list(width = 1.5, color = range.color))
}
if (show.quartiles)
{
v1 <- c(0, 0)
v2 <- five.num[c(2, 4)]
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Quartiles", line = list(width = 8, color = quartile.color))
}
if (show.median)
{
half.mean.width = 0.2 * max(abs(range(attr(p, "values.density")$y)))
v1 <- c(-half.mean.width, half.mean.width)
v2 <- rep(five.num[3], 2)
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Median", line = list(width = 4, color = median.color))
}
if (show.mean)
{
v1 <- 0
v2 <- mn
p <- .addBox(p, x = if (vertical) v1 else v2, y = if (vertical) v2 else v1, "Mean", marker = list(color = mean.color, symbol = "square"))
}
p
}
violinCategoryAxis <- function(i, label, n.variables, vertical, show.values, show.density, show.mirror.density, family,
size, color, values.hovertext.format)
{
if (i > n.variables)
return(NULL)
if (!show.mirror.density)
domain = c(if (show.values) .12 else 0, .95)
else if (!show.density)
domain = c(0, .9)
else
domain = c(0, 1)
list(autorange = TRUE,
domain = domain / n.variables + (i - 1) / n.variables,
hoverformat = values.hovertext.format,
showgrid = FALSE,
showticklabels = FALSE,
ticks = "",
title = label,
titlefont = list(family = family, size = size, color = color),
type = "linear",
zeroline = FALSE)
}
rugCategoryAxis <- function(i, n.variables, vertical, show.density, show.mirror.density, show.values)
{
if(i > n.variables ||!show.values)
return(NULL)
offset <- max(10, n.variables+2)/2/100
if (show.density && show.mirror.density)
domain = c(.5 - offset, .5 + offset)
else if (show.density)
domain = c(0, 0.1)
else if (show.mirror.density)
domain = c(.9, 1)
list(autorange = TRUE,
domain = domain / n.variables + (i - 1) / n.variables,
autorange = TRUE,
#hoverformat = values.hovertext.format, does not work with type = "category"
range = c(-1, 1),
showgrid = FALSE,
showticklabels = FALSE,
title = "",
type = "category")}
violinCategoriesAxes <- function(vertical, n.variables, labels)
{
standard.parameters <- "n.variables, vertical, show.values, show.density, show.mirror.density, categories.tick.font.family, categories.tick.font.size, categories.tick.font.color, values.hovertext.format"
axes <- paste0("xaxis = violinCategoryAxis(1, '", labels[1], "',", standard.parameters, "), xaxis2 = rugCategoryAxis(1, n.variables, vertical, show.density, show.mirror.density, show.values), ")
if (n.variables > 1)
{
sq <- seq(4, n.variables * 2 , 2)
violin <- paste0("xaxis", sq - 1, " = violinCategoryAxis(", 2:n.variables, ", '", labels[-1], "',", standard.parameters, "), ", collapse = "")
rug <- paste0("xaxis", sq, " = rugCategoryAxis(", 2:n.variables, ", n.variables, vertical, show.density, show.mirror.density, show.values), ", collapse = "")
axes <- paste0(axes, violin, rug)
}
if (!vertical)
axes <- gsub("xaxis", "yaxis", axes)
axes
}
distributionArgs <- function(call, chart.function, arguments)
{
args <- modifyList(as.list(args(chart.function)), arguments)
nms <- names(args)
nms <- nms[nms != ""]
nms <- nms[!nms %in% names(call)]
args <- args[nms]
args <- args[!sapply(args, is.null)]
call[[1]] <- Distribution
call <- modify_call(call, args)
as.list(call[-1])
}
|
context("Test_Matrix.Overlap")
test_that("General Test with 1 species and two genes", {
data(Seq.DF4) ## the first object of the list is the Species-by-gene matrix
# Run the function.
res = Matrix.Overlap(input = Seq.DF4[[1]], gene.Sel = c("co1", "16srrna"))
expect_equal(dim(res[[1]])[1], 2)
expect_equal(dim(res[[2]])[1], 2)
expect_equal(res[[2]][1,1], 100)
})
SpbyGeneMat=rbind(as.matrix(Seq.DF4[[1]]), c("Titi_titi",0, 2, 1), c("Toto_toto", 0, 0, 4))
row.names(SpbyGeneMat)=SpbyGeneMat[,1]
SpbyGeneMat=as.data.frame(SpbyGeneMat)
test_that("General Test with 3 species and 3 genes", {
res = Matrix.Overlap(input = SpbyGeneMat, gene.Sel = c("co1", "16srrna", "12srrna"))
expect_equal(dim(res[[1]])[1], 3)
expect_equal(dim(res[[2]])[1], 3)
expect_equal(sum(diag(res[[2]])), 300)
expect_equal(sum(diag(res[[1]])), 6)
})
| /tests/testthat/test-Test_Matrix.Overlap.R | no_license | dvdeme/regPhylo | R | false | false | 846 | r | context("Test_Matrix.Overlap")
test_that("General Test with 1 species and two genes", {
data(Seq.DF4) ## the first object of the list is the Species-by-gene matrix
# Run the function.
res = Matrix.Overlap(input = Seq.DF4[[1]], gene.Sel = c("co1", "16srrna"))
expect_equal(dim(res[[1]])[1], 2)
expect_equal(dim(res[[2]])[1], 2)
expect_equal(res[[2]][1,1], 100)
})
SpbyGeneMat=rbind(as.matrix(Seq.DF4[[1]]), c("Titi_titi",0, 2, 1), c("Toto_toto", 0, 0, 4))
row.names(SpbyGeneMat)=SpbyGeneMat[,1]
SpbyGeneMat=as.data.frame(SpbyGeneMat)
test_that("General Test with 3 species and 3 genes", {
res = Matrix.Overlap(input = SpbyGeneMat, gene.Sel = c("co1", "16srrna", "12srrna"))
expect_equal(dim(res[[1]])[1], 3)
expect_equal(dim(res[[2]])[1], 3)
expect_equal(sum(diag(res[[2]])), 300)
expect_equal(sum(diag(res[[1]])), 6)
})
|
library(testthat)
library(questionr)
context("Tables and cross-tables functions")
data(hdv2003)
data(fecondite)
test_that("Simple freq is correct", {
tab <- freq(hdv2003$qualif)
v <- as.numeric(summary(hdv2003$qualif))
val <- as.numeric(table(hdv2003$qualif))
expect_equal(names(tab), c("n", "%", "val%"))
expect_equal(rownames(tab), c(levels(hdv2003$qualif), "NA"))
expect_equal(tab$n, v)
expect_equal(tab$`%`, round(v / sum(v) * 100, 1))
expect_equal(tab$`val%`, c(round(val / sum(val) * 100, 1), NA))
})
test_that("freq with sort, digits, cum, valid and total is correct", {
tab <- freq(hdv2003$qualif, digits = 2, cum = TRUE, total = TRUE, valid = FALSE, sort = "inc", na.last = FALSE)
v <- sort(summary(hdv2003$qualif))
vnum <- as.numeric(v)
expect_equal(names(tab), c("n", "%", "%cum"))
expect_equal(rownames(tab), gsub("NA's", "NA", c(names(v), "Total")))
expect_equal(tab$n, c(vnum, sum(vnum)))
expect_equal(tab$`%`, c(round(vnum / sum(vnum) * 100, 2), 100))
expect_equal(tab$`%cum`, c(round(cumsum(vnum) / sum(vnum) * 100, 2), 100))
})
test_that("freq with sort, digits, cum, valid, total and na.last is correct", {
tab <- freq(hdv2003$qualif, digits = 2, cum = TRUE, total = TRUE, valid = FALSE, sort = "inc", na.last = TRUE)
v <- sort(summary(hdv2003$qualif))
v <- c(v[names(v) != "NA's"], v[names(v) == "NA's"])
vnum <- as.numeric(v)
expect_equal(names(tab), c("n", "%", "%cum"))
expect_equal(rownames(tab), gsub("NA's", "NA", c(names(v), "Total")))
expect_equal(tab$n, c(vnum, sum(vnum)))
expect_equal(tab$`%`, c(round(vnum / sum(vnum) * 100, 2), 100))
expect_equal(tab$`%cum`, c(round(cumsum(vnum) / sum(vnum) * 100, 2), 100))
})
test_that("freq with exclude is correct", {
tab <- freq(hdv2003$qualif, exclude = c(NA, "Cadre", "Autre"))
v <- hdv2003$qualif[!(hdv2003$qualif %in% c(NA, "Cadre", "Autre"))]
vtab <- as.numeric(table(v)[!(names(table(v)) %in% c(NA, "Cadre", "Autre"))])
expect_equal(names(tab), c("n", "%"))
expect_equal(rownames(tab), setdiff(levels(hdv2003$qualif), c("NA", "Cadre", "Autre")))
expect_equal(tab$n, vtab)
expect_equal(tab$`%`, round(vtab / sum(vtab) * 100, 1))
})
test_that("cprop results are correct" , {
tab <- table(hdv2003$qualif, hdv2003$clso, exclude = NULL)
etab <- tab[,apply(tab, 2, sum)>0]
ctab <- cprop(tab, n = TRUE)
expect_equal(colnames(ctab), c(levels(hdv2003$clso), gettext("All", domain="R-questionr")))
expect_equal(rownames(ctab), c(levels(hdv2003$qualif), NA, gettext("Total", domain="R-questionr"), "n"))
m <- prop.table(etab, 2) * 100
expect_equal(ctab[1:nrow(m), 1:ncol(m)], m)
margin <- margin.table(etab, 1)
margin <- as.numeric(round(margin / sum(margin) * 100, 2))
expect_equal(unname(ctab[1:length(margin), gettext("All", domain="R-questionr")]), margin)
n <- apply(etab, 2, sum)
expect_equal(ctab["n",][1:length(n)], n)
})
test_that("lprop results are correct" , {
tab <- table(hdv2003$qualif, hdv2003$clso, exclude = NULL)
etab <- tab[,apply(tab, 2, sum)>0]
ltab <- lprop(tab, n = TRUE)
expect_equal(colnames(ltab), c(levels(hdv2003$clso), gettext("Total", domain="R-questionr"), "n"))
expect_equal(rownames(ltab), c(levels(hdv2003$qualif), NA, gettext("All", domain="R-questionr")))
m <- prop.table(etab, 1) * 100
expect_equal(ltab[1:nrow(m), 1:ncol(m)], m)
margin <- margin.table(etab, 2)
margin <- as.numeric(round(margin / sum(margin) * 100, 2))
expect_equal(unname(ltab[gettext("All", domain="R-questionr"), 1:length(margin)]), margin)
n <- apply(etab, 1, sum)
expect_equal(ltab[,"n"][1:length(n)], n)
})
| /tests/testthat/test_tables.R | no_license | gdutz/questionr | R | false | false | 3,608 | r | library(testthat)
library(questionr)
context("Tables and cross-tables functions")
data(hdv2003)
data(fecondite)
test_that("Simple freq is correct", {
tab <- freq(hdv2003$qualif)
v <- as.numeric(summary(hdv2003$qualif))
val <- as.numeric(table(hdv2003$qualif))
expect_equal(names(tab), c("n", "%", "val%"))
expect_equal(rownames(tab), c(levels(hdv2003$qualif), "NA"))
expect_equal(tab$n, v)
expect_equal(tab$`%`, round(v / sum(v) * 100, 1))
expect_equal(tab$`val%`, c(round(val / sum(val) * 100, 1), NA))
})
test_that("freq with sort, digits, cum, valid and total is correct", {
tab <- freq(hdv2003$qualif, digits = 2, cum = TRUE, total = TRUE, valid = FALSE, sort = "inc", na.last = FALSE)
v <- sort(summary(hdv2003$qualif))
vnum <- as.numeric(v)
expect_equal(names(tab), c("n", "%", "%cum"))
expect_equal(rownames(tab), gsub("NA's", "NA", c(names(v), "Total")))
expect_equal(tab$n, c(vnum, sum(vnum)))
expect_equal(tab$`%`, c(round(vnum / sum(vnum) * 100, 2), 100))
expect_equal(tab$`%cum`, c(round(cumsum(vnum) / sum(vnum) * 100, 2), 100))
})
test_that("freq with sort, digits, cum, valid, total and na.last is correct", {
tab <- freq(hdv2003$qualif, digits = 2, cum = TRUE, total = TRUE, valid = FALSE, sort = "inc", na.last = TRUE)
v <- sort(summary(hdv2003$qualif))
v <- c(v[names(v) != "NA's"], v[names(v) == "NA's"])
vnum <- as.numeric(v)
expect_equal(names(tab), c("n", "%", "%cum"))
expect_equal(rownames(tab), gsub("NA's", "NA", c(names(v), "Total")))
expect_equal(tab$n, c(vnum, sum(vnum)))
expect_equal(tab$`%`, c(round(vnum / sum(vnum) * 100, 2), 100))
expect_equal(tab$`%cum`, c(round(cumsum(vnum) / sum(vnum) * 100, 2), 100))
})
test_that("freq with exclude is correct", {
tab <- freq(hdv2003$qualif, exclude = c(NA, "Cadre", "Autre"))
v <- hdv2003$qualif[!(hdv2003$qualif %in% c(NA, "Cadre", "Autre"))]
vtab <- as.numeric(table(v)[!(names(table(v)) %in% c(NA, "Cadre", "Autre"))])
expect_equal(names(tab), c("n", "%"))
expect_equal(rownames(tab), setdiff(levels(hdv2003$qualif), c("NA", "Cadre", "Autre")))
expect_equal(tab$n, vtab)
expect_equal(tab$`%`, round(vtab / sum(vtab) * 100, 1))
})
test_that("cprop results are correct" , {
tab <- table(hdv2003$qualif, hdv2003$clso, exclude = NULL)
etab <- tab[,apply(tab, 2, sum)>0]
ctab <- cprop(tab, n = TRUE)
expect_equal(colnames(ctab), c(levels(hdv2003$clso), gettext("All", domain="R-questionr")))
expect_equal(rownames(ctab), c(levels(hdv2003$qualif), NA, gettext("Total", domain="R-questionr"), "n"))
m <- prop.table(etab, 2) * 100
expect_equal(ctab[1:nrow(m), 1:ncol(m)], m)
margin <- margin.table(etab, 1)
margin <- as.numeric(round(margin / sum(margin) * 100, 2))
expect_equal(unname(ctab[1:length(margin), gettext("All", domain="R-questionr")]), margin)
n <- apply(etab, 2, sum)
expect_equal(ctab["n",][1:length(n)], n)
})
test_that("lprop results are correct" , {
tab <- table(hdv2003$qualif, hdv2003$clso, exclude = NULL)
etab <- tab[,apply(tab, 2, sum)>0]
ltab <- lprop(tab, n = TRUE)
expect_equal(colnames(ltab), c(levels(hdv2003$clso), gettext("Total", domain="R-questionr"), "n"))
expect_equal(rownames(ltab), c(levels(hdv2003$qualif), NA, gettext("All", domain="R-questionr")))
m <- prop.table(etab, 1) * 100
expect_equal(ltab[1:nrow(m), 1:ncol(m)], m)
margin <- margin.table(etab, 2)
margin <- as.numeric(round(margin / sum(margin) * 100, 2))
expect_equal(unname(ltab[gettext("All", domain="R-questionr"), 1:length(margin)]), margin)
n <- apply(etab, 1, sum)
expect_equal(ltab[,"n"][1:length(n)], n)
})
|
# Yige Wu @WashU May 2020
## plot cell type on integration UMAP
# set up libraries and output directory -----------------------------------
## set working directory
# dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA"
setwd(dir_base)
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table",
"ggplot2"
)
for (pkg_name_tmp in packages) {
library(package = pkg_name_tmp, character.only = T)
}
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input DEGs
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.0.5.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.1.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.2.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.3.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
# make matrix data -----------------------------------------------------------------
count_df <- as.data.frame(results_df) %>%
# filter(p_val_adj < 0.05) %>%
filter(p_val_adj < 0.05) %>%
mutate(diff_pct = (pct.1 - pct.2)) %>%
filter(diff_pct >= 0) %>%
group_by(cluster) %>%
summarise(number_degs = n())
plotdata_df$x_plot <- factor(plotdata_df$x_plot)
plotdata_df$y_plot <- factor(plotdata_df$y_plot)
# plot --------------------------------------------------------------------
p <- ggplot(data = plotdata_df, mapping = aes(x = x_plot, y = y_plot))
p <- p + geom_tile(mapping = aes(fill = number_degs))
p <- p + geom_text(mapping = aes(label = number_degs))
p <- p + scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 50, limit = c(0, 400), space = "Lab",
name="Number of\nDEGs")
p <- p + xlab("Cluster #1") + ylab("Cluster #2")
p <- p + theme_minimal()
p
| /findmarkers/30ccRCC_tumorcellreclustered/summarize_30ccRCC_tumorcellreclustered_findallmarkers_byres.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 2,813 | r | # Yige Wu @WashU May 2020
## plot cell type on integration UMAP
# set up libraries and output directory -----------------------------------
## set working directory
# dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
dir_base = "~/Library/CloudStorage/Box-Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA"
setwd(dir_base)
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table",
"ggplot2"
)
for (pkg_name_tmp in packages) {
library(package = pkg_name_tmp, character.only = T)
}
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input DEGs
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.0.5.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.1.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.2.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
results_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/30ccRCC_tumorcellreclustered/findallmarker_30ccRCC_tumorcellreclustered_byres/res.3.tumorcellsreclustered.markers.logfcthreshold.0.25.minpct.0.1.mindiffpct.0.tsv")
# make matrix data -----------------------------------------------------------------
count_df <- as.data.frame(results_df) %>%
# filter(p_val_adj < 0.05) %>%
filter(p_val_adj < 0.05) %>%
mutate(diff_pct = (pct.1 - pct.2)) %>%
filter(diff_pct >= 0) %>%
group_by(cluster) %>%
summarise(number_degs = n())
plotdata_df$x_plot <- factor(plotdata_df$x_plot)
plotdata_df$y_plot <- factor(plotdata_df$y_plot)
# plot --------------------------------------------------------------------
p <- ggplot(data = plotdata_df, mapping = aes(x = x_plot, y = y_plot))
p <- p + geom_tile(mapping = aes(fill = number_degs))
p <- p + geom_text(mapping = aes(label = number_degs))
p <- p + scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 50, limit = c(0, 400), space = "Lab",
name="Number of\nDEGs")
p <- p + xlab("Cluster #1") + ylab("Cluster #2")
p <- p + theme_minimal()
p
|
library(shades)
### Name: addmix
### Title: Colour mixtures
### Aliases: addmix submix %.)% %_/%
### ** Examples
addmix(c("red","green","blue"), "red")
submix(c("cyan","magenta","yellow"), "cyan")
| /data/genthat_extracted_code/shades/examples/mixtures.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 204 | r | library(shades)
### Name: addmix
### Title: Colour mixtures
### Aliases: addmix submix %.)% %_/%
### ** Examples
addmix(c("red","green","blue"), "red")
submix(c("cyan","magenta","yellow"), "cyan")
|
\name{fix.test.asis}
\docType{data}
\alias{fix.test.asis}
\title{ Test Data for Function ltdl.fix.df }
\description{
A set of test data to demonstrate how negative values are changed to half their positive value. Optionally numeric coded values representing missing data and/or zero values may be replaced by \code{NA}s.
The \code{.csv} file was read without deleting ID, the row (observation) identifier in the first column, from the header record, and with \code{as.is} set to \code{as.is = c(1)}. Therefore the character row ID is saved as a character variable, not as a factor variable. If ID had been deleted from the header record the row ID would have been stored as dimnames(fix.test)[[1]].
}
\usage{fix.test}
\format{ A data frame containing 15 rows and 5 columns (1 character, 2 factors, and 2 numeric). }
\seealso{ \code{\link{fix.test}} }
\keyword{ datasets }
| /man/fix.test.asis.Rd | no_license | cran/rgr | R | false | false | 905 | rd | \name{fix.test.asis}
\docType{data}
\alias{fix.test.asis}
\title{ Test Data for Function ltdl.fix.df }
\description{
A set of test data to demonstrate how negative values are changed to half their positive value. Optionally numeric coded values representing missing data and/or zero values may be replaced by \code{NA}s.
The \code{.csv} file was read without deleting ID, the row (observation) identifier in the first column, from the header record, and with \code{as.is} set to \code{as.is = c(1)}. Therefore the character row ID is saved as a character variable, not as a factor variable. If ID had been deleted from the header record the row ID would have been stored as dimnames(fix.test)[[1]].
}
\usage{fix.test}
\format{ A data frame containing 15 rows and 5 columns (1 character, 2 factors, and 2 numeric). }
\seealso{ \code{\link{fix.test}} }
\keyword{ datasets }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{enhancements_list}
\alias{enhancements_list}
\title{List Enhancements}
\usage{
enhancements_list(type = NULL, author = NULL, status = NULL,
archived = NULL, limit = NULL, page_num = NULL, order = NULL,
order_dir = NULL)
}
\arguments{
\item{type}{string optional. If specified, return objects of these types.}
\item{author}{string optional. If specified, return objects from this author. Must use user IDs. A comma separated list of IDs is also accepted to return objects from multiple authors.}
\item{status}{string optional. If specified, returns objects with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.}
\item{archived}{string optional. The archival status of the requested object(s).}
\item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50.}
\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.}
\item{order}{string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at.}
\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.}
}
\value{
An array containing the following fields:
\item{id}{integer, The ID for the enhancement.}
\item{name}{string, The name of the enhancement.}
\item{type}{string, The type of the enhancement (e.g CASS-NCOA)}
\item{createdAt}{string, The time this enhancement was created.}
\item{updatedAt}{string, The time the enhancement was last updated.}
\item{author}{object, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{state}{string, The status of the enhancement's last run}
\item{archived}{string, The archival status of the requested object(s).}
}
\description{
List Enhancements
}
| /man/enhancements_list.Rd | no_license | JosiahParry/civis-r | R | false | true | 2,204 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{enhancements_list}
\alias{enhancements_list}
\title{List Enhancements}
\usage{
enhancements_list(type = NULL, author = NULL, status = NULL,
archived = NULL, limit = NULL, page_num = NULL, order = NULL,
order_dir = NULL)
}
\arguments{
\item{type}{string optional. If specified, return objects of these types.}
\item{author}{string optional. If specified, return objects from this author. Must use user IDs. A comma separated list of IDs is also accepted to return objects from multiple authors.}
\item{status}{string optional. If specified, returns objects with one of these statuses. It accepts a comma-separated list, possible values are 'running', 'failed', 'succeeded', 'idle', 'scheduled'.}
\item{archived}{string optional. The archival status of the requested object(s).}
\item{limit}{integer optional. Number of results to return. Defaults to 20. Maximum allowed is 50.}
\item{page_num}{integer optional. Page number of the results to return. Defaults to the first page, 1.}
\item{order}{string optional. The field on which to order the result set. Defaults to updated_at. Must be one of: updated_at, name, created_at, last_run.updated_at.}
\item{order_dir}{string optional. Direction in which to sort, either asc (ascending) or desc (descending) defaulting to desc.}
}
\value{
An array containing the following fields:
\item{id}{integer, The ID for the enhancement.}
\item{name}{string, The name of the enhancement.}
\item{type}{string, The type of the enhancement (e.g CASS-NCOA)}
\item{createdAt}{string, The time this enhancement was created.}
\item{updatedAt}{string, The time the enhancement was last updated.}
\item{author}{object, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{state}{string, The status of the enhancement's last run}
\item{archived}{string, The archival status of the requested object(s).}
}
\description{
List Enhancements
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.