content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(nc)
library(testthat)
context("errors")
source(system.file("test_engines.R", package="nc", mustWork=TRUE), local=TRUE)
foo.bar <- c("foo", "bar")
test_engines("no capture groups is an error", {
expect_error({
capture_first_vec(foo.bar, "o")
}, "must have at least one named argument", fixed=TRUE)
})
named.chr.vec <- c("v\"name\\"="[a\"o]+")
expected.obj <- as.list(named.chr.vec)
expect_error_with_code <- function(expr, expected.obj){
msg <- tryCatch({
expr
}, error=function(e){
e$message
})
code <- capture_first_vec(
msg,
"did you mean ",
code=".*")$code
computed.obj <- eval(parse(text=code))
expect_identical(computed.obj, expected.obj)
}
test_engines("named character vector is an error with group name", {
expect_error_with_code({
capture_first_vec(foo.bar, gname=named.chr.vec)
}, expected.obj)
})
test_engines("named character vector is an error without group name", {
expect_error_with_code({
capture_first_vec(foo.bar, named.chr.vec)
}, expected.obj)
})
named.pat.list <- as.list(named.chr.vec)
exp.vec <- c("oo", "a")
test_engines("named pattern list in named arg makes two groups", {
(result.df <- capture_first_vec(foo.bar, gname=named.pat.list))
expect_identical(names(result.df), c("gname", names(named.pat.list)))
expect_identical(result.df$gname, exp.vec)
expect_identical(result.df[[names(named.pat.list)]], exp.vec)
})
test_engines("named pattern list in un-named arg makes one group", {
(result.df <- capture_first_vec(foo.bar, named.pat.list))
expect_identical(names(result.df), names(named.pat.list))
expect_identical(result.df[[names(named.pat.list)]], exp.vec)
})
test_engines("any capture group without a name is an error", {
expect_error({
capture_first_vec(foo.bar, "(o)(?P<name>o)")
}, "must have at least one named argument", fixed=TRUE)
})
test_engines("NA pattern is an error", {
expect_error({
capture_first_vec(foo.bar, NA_character_)
}, "patterns must not be missing/NA", fixed=TRUE)
})
test_engines("factor pattern is an error", {
expect_error({
capture_first_vec(foo.bar, factor("(?P<regex>foo)"))
}, "arguments must be character")
})
test_engines("multiple patterns is an error", {
expect_error({
capture_first_vec(foo.bar, c("(?P<name>.)", "(?P<name>.)"))
}, "patterns must be character vectors of length 1")
})
test_engines("subject of length 0 is an error", {
expect_error({
capture_first_vec(character(), "(?P<name>.)")
}, "subject has class=character and length=0 but should be a character vector with length>0")
})
test_engines("capture all works with only one 'name' group", {
subject <- c(missing=NA, nomatch="", match="foobar")
result.dt <- capture_all_str(subject, name="foo")
expect_equal(dim(result.dt), c(1, 1))
expect_identical(result.dt$name, "foo")
})
test_engines("informative error when converter fun has zero args", {
expect_error({
capture_first_vec(
"chr2:300-400",
chrom="chr", function()y)
}, "atomic vector",
fixed=TRUE)
})
test_engines("informative error when converter returns wrong length", {
expect_error({
capture_first_vec(
c("chr2:300-400", "chr2:300-400"),
chrom="chr", function(x)"foo")
}, "type conversion function for group 1(chrom) returned vector of length 1 but expected length 2", fixed=TRUE)
})
test_engines("informative error when converter returns non-atomic", {
expect_error({
capture_first_vec(
c("chr2:300-400", "chr2:300-400"),
chrom="chr", function(x)list(foo=200))
}, "type conversion function for group 1(chrom) must return atomic vector", fixed=TRUE)
})
test_engines("error for name group, missing subject, nomatch.error=TRUE", {
expect_error({
capture_first_vec(
c("chr1:20-40", NA, "chr2:300-400"),
name="[^:]+",
":",
chromStart="[0-9]+",
nomatch.error=TRUE)
}, "subject 2 did not match regex below")
})
test_engines("error for name group, no match, nomatch.error=TRUE", {
expect_error({
capture_first_vec(
c("chr1:20-40", "foobar", "chr2:300-400"),
name="[^:]+",
":",
chromStart="[0-9]+",
nomatch.error=TRUE)
}, "subject 2 did not match regex below")
})
test_that("informative error when subject is named", {
expect_error({
nc::capture_all_str(chrom="chr.*?", ":", chromStart="[0-9,]+")
}, "first argument is named chrom but must NOT be named; please include the subject to match as the first argument, with no name")
expect_error({
nc::capture_first_vec(chrom="chr.*?", ":", chromStart="[0-9,]+")
}, "first argument is named chrom but must NOT be named; please include the subject to match as the first argument, with no name")
})
| /tests/testthat/test-CRAN-errors.R | no_license | TimTaylor/nc | R | false | false | 4,720 | r | library(nc)
library(testthat)
context("errors")
source(system.file("test_engines.R", package="nc", mustWork=TRUE), local=TRUE)
foo.bar <- c("foo", "bar")
test_engines("no capture groups is an error", {
expect_error({
capture_first_vec(foo.bar, "o")
}, "must have at least one named argument", fixed=TRUE)
})
named.chr.vec <- c("v\"name\\"="[a\"o]+")
expected.obj <- as.list(named.chr.vec)
expect_error_with_code <- function(expr, expected.obj){
msg <- tryCatch({
expr
}, error=function(e){
e$message
})
code <- capture_first_vec(
msg,
"did you mean ",
code=".*")$code
computed.obj <- eval(parse(text=code))
expect_identical(computed.obj, expected.obj)
}
test_engines("named character vector is an error with group name", {
expect_error_with_code({
capture_first_vec(foo.bar, gname=named.chr.vec)
}, expected.obj)
})
test_engines("named character vector is an error without group name", {
expect_error_with_code({
capture_first_vec(foo.bar, named.chr.vec)
}, expected.obj)
})
named.pat.list <- as.list(named.chr.vec)
exp.vec <- c("oo", "a")
test_engines("named pattern list in named arg makes two groups", {
(result.df <- capture_first_vec(foo.bar, gname=named.pat.list))
expect_identical(names(result.df), c("gname", names(named.pat.list)))
expect_identical(result.df$gname, exp.vec)
expect_identical(result.df[[names(named.pat.list)]], exp.vec)
})
test_engines("named pattern list in un-named arg makes one group", {
(result.df <- capture_first_vec(foo.bar, named.pat.list))
expect_identical(names(result.df), names(named.pat.list))
expect_identical(result.df[[names(named.pat.list)]], exp.vec)
})
test_engines("any capture group without a name is an error", {
expect_error({
capture_first_vec(foo.bar, "(o)(?P<name>o)")
}, "must have at least one named argument", fixed=TRUE)
})
test_engines("NA pattern is an error", {
expect_error({
capture_first_vec(foo.bar, NA_character_)
}, "patterns must not be missing/NA", fixed=TRUE)
})
test_engines("factor pattern is an error", {
expect_error({
capture_first_vec(foo.bar, factor("(?P<regex>foo)"))
}, "arguments must be character")
})
test_engines("multiple patterns is an error", {
expect_error({
capture_first_vec(foo.bar, c("(?P<name>.)", "(?P<name>.)"))
}, "patterns must be character vectors of length 1")
})
test_engines("subject of length 0 is an error", {
expect_error({
capture_first_vec(character(), "(?P<name>.)")
}, "subject has class=character and length=0 but should be a character vector with length>0")
})
test_engines("capture all works with only one 'name' group", {
subject <- c(missing=NA, nomatch="", match="foobar")
result.dt <- capture_all_str(subject, name="foo")
expect_equal(dim(result.dt), c(1, 1))
expect_identical(result.dt$name, "foo")
})
test_engines("informative error when converter fun has zero args", {
expect_error({
capture_first_vec(
"chr2:300-400",
chrom="chr", function()y)
}, "atomic vector",
fixed=TRUE)
})
test_engines("informative error when converter returns wrong length", {
expect_error({
capture_first_vec(
c("chr2:300-400", "chr2:300-400"),
chrom="chr", function(x)"foo")
}, "type conversion function for group 1(chrom) returned vector of length 1 but expected length 2", fixed=TRUE)
})
test_engines("informative error when converter returns non-atomic", {
expect_error({
capture_first_vec(
c("chr2:300-400", "chr2:300-400"),
chrom="chr", function(x)list(foo=200))
}, "type conversion function for group 1(chrom) must return atomic vector", fixed=TRUE)
})
test_engines("error for name group, missing subject, nomatch.error=TRUE", {
expect_error({
capture_first_vec(
c("chr1:20-40", NA, "chr2:300-400"),
name="[^:]+",
":",
chromStart="[0-9]+",
nomatch.error=TRUE)
}, "subject 2 did not match regex below")
})
test_engines("error for name group, no match, nomatch.error=TRUE", {
expect_error({
capture_first_vec(
c("chr1:20-40", "foobar", "chr2:300-400"),
name="[^:]+",
":",
chromStart="[0-9]+",
nomatch.error=TRUE)
}, "subject 2 did not match regex below")
})
test_that("informative error when subject is named", {
expect_error({
nc::capture_all_str(chrom="chr.*?", ":", chromStart="[0-9,]+")
}, "first argument is named chrom but must NOT be named; please include the subject to match as the first argument, with no name")
expect_error({
nc::capture_first_vec(chrom="chr.*?", ":", chromStart="[0-9,]+")
}, "first argument is named chrom but must NOT be named; please include the subject to match as the first argument, with no name")
})
|
# read_blacklist_genotypes ---------------------------------------------------------------
#' @name read_blacklist_genotypes
#' @title read blacklist of genotypes
#' @description Read a blacklist object or file.
#'
#'
#' Used internally in \href{https://github.com/thierrygosselin/radiator}{radiator}
#' and might be of interest for users.
#'
#' @param blacklist.genotypes (path or object)
#' The blacklist is an object in your
#' global environment or a file in the working directory (e.g. "blacklist.geno.tsv").
#' The dataframe contains at least these 2 columns: \code{MARKERS, INDIVIDUALS}.
#' Additional columns are allowed: \code{CHROM, LOCUS, POS}.
#'
#' Useful to erase genotypes with bad QC, e.g. genotype with more than 2 alleles
#' in diploid likely
#' sequencing errors or genotypes with poor genotype likelihood or coverage.
#'
#' Columns are cleaned of separators that interfere with some packages or codes, detailed in
#' \code{\link{clean_markers_names}} and \code{\link{clean_ind_names}}
#' Default \code{blacklist.genotypes = NULL}.
#'
#' @inheritParams radiator_common_arguments
#' @examples
#' \dontrun{
#' bl <- radiator::read_blacklist_genotypes(data = data,
#' blacklist.genotypes = "blacklist.geno.iguana.tsv")
#' }
#' @section Life cycle:
#'
#' This function arguments will be subject to changes. Currently the function uses
#' erase.genotypes, but using the \code{dots-dots-dots ...} arguments allows to
#' pass \code{erase.genotypes and masked.genotypes}. These arguments do exactly
#' the same thing and only one can be used.
#' @export
#' @rdname read_blacklist_genotypes
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
read_blacklist_genotypes <- function(
blacklist.genotypes = NULL,
verbose = FALSE,
...
) {
# dotslist -------------------------------------------------------------------
dotslist <- rlang::dots_list(..., .homonyms = "error", .check_assign = TRUE)
want <- c("erase.genotypes", "masked.genotypes")
unknowned_param <- setdiff(names(dotslist), want)
if (length(unknowned_param) > 0) {
rlang::abort("Unknowned \"...\" parameters ",
stringi::stri_join(unknowned_param, collapse = " "))
}
radiator.dots <- dotslist[names(dotslist) %in% want]
erase.genotypes <- radiator.dots[["erase.genotypes"]]
masked.genotypes <- radiator.dots[["masked.genotypes"]]
if (!is.null(erase.genotypes) && !is.null(masked.genotypes) && !is.null(blacklist.genotypes)) {
rlang::abort("Only one of: erase.genotypes, masked.genotypes or blacklist.genotypes should be used")
}
if (!is.null(erase.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- erase.genotypes
}
if (!is.null(masked.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- masked.genotypes
}
if (!is.null(blacklist.genotypes)) {
# Import whitelist of markers
if (is.vector(blacklist.genotypes)) {
blacklist.genotypes <- suppressMessages(
readr::read_tsv(blacklist.genotypes, col_names = TRUE) %>%
dplyr::mutate_all(.tbl = ., .funs = as.character))
}
nrow.before <- nrow(blacklist.genotypes)
blacklist.genotypes <- dplyr::distinct(blacklist.genotypes)
nrow.after <- nrow(blacklist.genotypes)
duplicate.blacklist.genotypes <- nrow.before - nrow.after
if (duplicate.blacklist.genotypes > 0) {
if (verbose) message("Duplicated rows in blacklist genotypes: ", duplicate.blacklist.genotypes)
if (verbose) message(" Creating unique blacklist")
if (verbose) message(" Warning: downstream results might be impacted by this, check how you made your blacklist")
}
nrow.before <- duplicate.blacklist.genotypes <- nrow.after <- NULL
# cleaning names of markers
need.cleaning <- purrr::discard(
.x = colnames(blacklist.genotypes),
.p = colnames(blacklist.genotypes) %in% "INDIVIDUALS")
blacklist.genotypes <- dplyr::mutate_at(
.tbl = blacklist.genotypes,
.vars = need.cleaning,
.funs = clean_markers_names)
blacklist.genotypes <- dplyr::mutate_at(
.tbl = blacklist.genotypes,
.vars = "INDIVIDUALS",
.funs = clean_ind_names)
} else {
blacklist.genotypes <- NULL
}
return(blacklist.genotypes)
}#End
# filter_blacklist_genotypes ---------------------------------------------------------------
#' @name filter_blacklist_genotypes
#' @title Filter dataset with blacklist of genotypes
#' @description Filter dataset with blacklist of genotypes.
#'
#' This function allows to blacklist/erase/mask genotypes.
#'
#' Used internally in \href{https://github.com/thierrygosselin/radiator}{radiator}
#' and might be of interest for users.
#'
#' @param data (4 options) A file or object generated by radiator:
#' \itemize{
#' \item tidy data
#' \item Genomic Data Structure (GDS)
#' }
#'
#' \emph{How to get GDS and tidy data ?}
#' Look into \code{\link{tidy_genomic_data}},
#' \code{\link{write_seqarray}} or
#' \code{\link{tidy_vcf}}.
#'
#'
#' @inheritParams radiator_common_arguments
#' @inheritParams read_blacklist_genotypes
#' @examples
#' \dontrun{
#' data <- radiator::filter_blacklist_genotypes(
#' data = data, blacklist.geno = "blacklist.geno.tsv"
#' )
#' }
#' @section Life cycle:
#'
#' This function arguments will be subject to changes. Currently the function uses
#' erase.genotypes, but using the \code{dots-dots-dots ...} arguments allows to
#' pass \code{erase.genotypes and masked.genotypes}. These arguments do exactly
#' the same thing and only one can be used.
#' @export
#' @rdname filter_blacklist_genotypes
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
filter_blacklist_genotypes <- function(
data,
blacklist.genotypes,
verbose = TRUE,
...
) {
# Checking for missing and/or default arguments ------------------------------
if (missing(data)) rlang::abort("Input file missing")
file.date <- format(Sys.time(), "%Y%m%d@%H%M")# Date and time
# dotslist -------------------------------------------------------------------
dotslist <- rlang::dots_list(..., .homonyms = "error", .check_assign = TRUE)
want <- c("path.folder", "parameters", "erase.genotypes", "masked.genotypes")
unknowned_param <- setdiff(names(dotslist), want)
if (length(unknowned_param) > 0) {
rlang::abort("Unknowned \"...\" parameters ",
stringi::stri_join(unknowned_param, collapse = " "))
}
radiator.dots <- dotslist[names(dotslist) %in% want]
parameters <- radiator.dots[["parameters"]]
erase.genotypes <- radiator.dots[["erase.genotypes"]]
masked.genotypes <- radiator.dots[["masked.genotypes"]]
path.folder <- radiator.dots[["path.folder"]]
path.folder <- generate_folder(f = path.folder, file.date = file.date, verbose = verbose)
if (!is.null(erase.genotypes) && !is.null(masked.genotypes) && !is.null(blacklist.genotypes)) {
rlang::abort("Only one of: erase.genotypes, masked.genotypes or blacklist.genotypes should be used")
}
if (!is.null(erase.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- erase.genotypes
}
if (!is.null(masked.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- masked.genotypes
}
# read whitelist
blacklist.genotypes <- radiator::read_blacklist_genotypes(
blacklist.genotypes = blacklist.genotypes,
verbose = verbose)
if (!is.null(blacklist.genotypes)) {
# Import data ---------------------------------------------------------------
data.type <- radiator::detect_genomic_format(data)
if (!data.type %in% c("tbl_df", "fst.file", "SeqVarGDSClass", "gds.file")) {
rlang::abort("Input not supported for this function: read function documentation")
}
if (data.type %in% c("SeqVarGDSClass", "gds.file")) {
if (!"SeqVarTools" %in% utils::installed.packages()[,"Package"]) {
rlang::abort('Please install SeqVarTools for this option:\n
install.packages("BiocManager")
BiocManager::install("SeqVarTools")')
}
if (data.type == "gds.file") {
data <- radiator::read_rad(data, verbose = verbose)
data.type <- "SeqVarGDSClass"
}
} else {
if (is.vector(data)) data <- radiator::tidy_wide(data = data, import.metadata = TRUE)
data.type <- "tbl_df"
}
# Filter parameter file: generate and initiate -----------------------------
# TODO: modify the function to accomodate erasing genotypes...
# filters.parameters <- update_parameters(
# generate = TRUE,
# initiate = TRUE,
# update = FALSE,
# parameter.obj = parameters,
# data = data,
# path.folder = path.folder,
# file.date = file.date,
# verbose = verbose)
# checks -------------------------------------------------------------------
# check to keep only individuals present in the dataset and not already blacklisted
# check that markers bl are in dataset (not already blacklisted elsewhere...)
if (verbose) message("Checking matching individuals and markers between blacklist and data")
if (data.type == "tbl_df") {
id <- unique(data$INDIVIDUALS)
wl <- unique(data$MARKERS)
} else {
radiator.gds <- gdsfmt::index.gdsn(
node = data, path = "radiator", silent = TRUE)
id <- gdsfmt::index.gdsn(
node = radiator.gds, path = "individuals", silent = TRUE)
if (!is.null(id)) {
id <- gdsfmt::read.gdsn(id) %$% INDIVIDUALS
} else {
id <- SeqArray::seqGetData(gdsfile = data, var.name = "sample.id")
}
wl <- radiator::extract_markers_metadata(gds = data) %$% MARKERS
}
blacklist.genotypes %<>% dplyr::filter(INDIVIDUALS %in% id) %>%
dplyr::filter(MARKERS %in% wl)
n.markers.bl <- nrow(blacklist.genotypes)
if (n.markers.bl > 0) {
if (verbose) message("Blacklisted genotypes: ", n.markers.bl)
if (data.type == "tbl_df") {
blacklist.genotypes <- dplyr::semi_join(
data,
blacklist.genotypes,
by = intersect(colnames(data), colnames(blacklist.genotypes)))
if (tibble::has_name(blacklist.genotypes, "GT")) {
blacklist.genotypes %<>% dplyr::mutate(GT = rep("000000", n()))
}
if (tibble::has_name(blacklist.genotypes, "GT_VCF")) {
blacklist.genotypes %<>% dplyr::mutate(GT_VCF = rep("./.", n()))
}
if (tibble::has_name(blacklist.genotypes, "GT_VCF_NUC")) {
blacklist.genotypes %<>% dplyr::mutate(GT_VCF_NUC = rep("./.", n()))
}
if (tibble::has_name(blacklist.genotypes, "GT_BIN")) {
blacklist.genotypes %<>% dplyr::mutate(GT_BIN = rep(as.numeric(NA_character_), n()))
}
data %<>% dplyr::anti_join(
blacklist.genotypes,
by = intersect(colnames(data), colnames(blacklist.genotypes)))
data %<>% dplyr::bind_rows(blacklist.genotypes)
# TODO and optimize...
# required because REF/ALT might change after deleting genotypes...
data %<>% radiator::calibrate_alleles(data = data)$input
# GDS
if (data.type == "SeqVarGDSClass") {
message("Under construction...")
} # End GDS
} else {
message("There are no genotype left in the blacklist")
}
# Filter parameter file: update --------------------------------------------
# filters.parameters <- update_parameters(
# generate = FALSE,
# initiate = FALSE,
# update = TRUE,
# parameter.obj = filters.parameters,
# data = data,
# filter.name = "whitelist markers",
# param.name = "whitelist.markers",
# values = n.markers.w,
# path.folder = path.folder,
# file.date = file.date,
# verbose = verbose)
# if (verbose) {
# message("Number of individuals / strata / chrom / locus / SNP:")
# message(" Before: ", filters.parameters$filters.parameters$BEFORE)
# message(" Blacklisted: ", filters.parameters$filters.parameters$BLACKLIST)
# message(" After: ", filters.parameters$filters.parameters$AFTER)
# }
}# End !is.null
}
return(data)
}#End filter_blacklist_genotypes
# The section that was in tidy_genomic_data
# if (is.null(blacklist.genotype)) { # no Whitelist
# if (verbose) message("Erasing genotype: no")
# } else {
# if (verbose) message("Erasing genotype: yes")
# want <- c("MARKERS", "CHROM", "LOCUS", "POS", "INDIVIDUALS")
# if (is.vector(blacklist.genotype)) {
# suppressWarnings(suppressMessages(
# blacklist.genotype <- readr::read_tsv(blacklist.genotype, col_names = TRUE)))
# }
# suppressWarnings(suppressMessages(
# blacklist.genotype <- blacklist.genotype %>%
# dplyr::mutate_at(.tbl = ., .vars = "INDIVIDUALS",
# .funs = clean_ind_names) %>%
# dplyr::select(dplyr::one_of(want)) %>%
# dplyr::mutate_all(.tbl = ., .funs = as.character, exclude = NA)))
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
#
# if (data.type == "haplo.file") {
# blacklist.genotype <- dplyr::select(.data = blacklist.genotype, INDIVIDUALS, LOCUS)
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
# }
#
# # control check to keep only individuals in the strata.df
# blacklist.genotype <- suppressWarnings(
# blacklist.genotype %>%
# dplyr::filter(INDIVIDUALS %in% strata.df$INDIVIDUALS)
# )
#
# # control check to keep only whitelisted markers from the blacklist of genotypes
# if (!is.null(whitelist.markers)) {
# if (verbose) message("Control check to keep only whitelisted markers present in the blacklist of genotypes to erase.")
# # updating the whitelist of markers to have all columns that id markers
# if (data.type == "vcf.file") {
# whitelist.markers.ind <- input %>% dplyr::distinct(CHROM, LOCUS, POS, INDIVIDUALS)
# } else {
# whitelist.markers.ind <- input %>% dplyr::distinct(LOCUS, INDIVIDUALS)
# }
#
# # updating the blacklist.genotype
# blacklist.genotype <- suppressWarnings(
# dplyr::semi_join(whitelist.markers.ind, blacklist.genotype,
# by = columns.names.blacklist.genotype))
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
# }
#
# # Update column names
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
#
# blacklisted.gen.number <- nrow(blacklist.genotype)
# if (blacklisted.gen.number > 0) {
# message(" Number of genotype(s) to erase: ", blacklisted.gen.number)
# input.erase <- dplyr::semi_join(
# input, blacklist.genotype, by = columns.names.blacklist.genotype) %>%
# dplyr::mutate(GT = rep("000000", n()))
# input <- dplyr::anti_join(
# input, blacklist.genotype, by = columns.names.blacklist.genotype)
# if (rlang::has_name(input.erase, "GT_VCF")) {
# input.erase <- dplyr::mutate(input.erase, GT_VCF = rep("./.", n()))
# }
#
# if (rlang::has_name(input.erase, "GT_VCF_NUC")) {
# input.erase <- dplyr::mutate(input.erase, GT_VCF_NUC = rep("./.", n()))
# }
#
# if (rlang::has_name(input.erase, "GT_BIN")) {
# input.erase <- dplyr::mutate(input.erase, GT_BIN = rep(as.numeric(NA_character_), n()))
# }
# input <- dplyr::bind_rows(input, input.erase)
# } else {
# message("There are no genotype left in the blacklist: input file left intact")
# }
#
# # required because REF/ALT might change after deleting genotypes...
# input <- radiator::calibrate_alleles(data = input)$input
# } # End erase genotypes
| /R/filter_blacklist_genotypes.R | no_license | italo-granato/radiator | R | false | false | 15,624 | r | # read_blacklist_genotypes ---------------------------------------------------------------
#' @name read_blacklist_genotypes
#' @title read blacklist of genotypes
#' @description Read a blacklist object or file.
#'
#'
#' Used internally in \href{https://github.com/thierrygosselin/radiator}{radiator}
#' and might be of interest for users.
#'
#' @param blacklist.genotypes (path or object)
#' The blacklist is an object in your
#' global environment or a file in the working directory (e.g. "blacklist.geno.tsv").
#' The dataframe contains at least these 2 columns: \code{MARKERS, INDIVIDUALS}.
#' Additional columns are allowed: \code{CHROM, LOCUS, POS}.
#'
#' Useful to erase genotypes with bad QC, e.g. genotype with more than 2 alleles
#' in diploid likely
#' sequencing errors or genotypes with poor genotype likelihood or coverage.
#'
#' Columns are cleaned of separators that interfere with some packages or codes, detailed in
#' \code{\link{clean_markers_names}} and \code{\link{clean_ind_names}}
#' Default \code{blacklist.genotypes = NULL}.
#'
#' @inheritParams radiator_common_arguments
#' @examples
#' \dontrun{
#' bl <- radiator::read_blacklist_genotypes(data = data,
#' blacklist.genotypes = "blacklist.geno.iguana.tsv")
#' }
#' @section Life cycle:
#'
#' This function arguments will be subject to changes. Currently the function uses
#' erase.genotypes, but using the \code{dots-dots-dots ...} arguments allows to
#' pass \code{erase.genotypes and masked.genotypes}. These arguments do exactly
#' the same thing and only one can be used.
#' @export
#' @rdname read_blacklist_genotypes
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
read_blacklist_genotypes <- function(
blacklist.genotypes = NULL,
verbose = FALSE,
...
) {
# dotslist -------------------------------------------------------------------
dotslist <- rlang::dots_list(..., .homonyms = "error", .check_assign = TRUE)
want <- c("erase.genotypes", "masked.genotypes")
unknowned_param <- setdiff(names(dotslist), want)
if (length(unknowned_param) > 0) {
rlang::abort("Unknowned \"...\" parameters ",
stringi::stri_join(unknowned_param, collapse = " "))
}
radiator.dots <- dotslist[names(dotslist) %in% want]
erase.genotypes <- radiator.dots[["erase.genotypes"]]
masked.genotypes <- radiator.dots[["masked.genotypes"]]
if (!is.null(erase.genotypes) && !is.null(masked.genotypes) && !is.null(blacklist.genotypes)) {
rlang::abort("Only one of: erase.genotypes, masked.genotypes or blacklist.genotypes should be used")
}
if (!is.null(erase.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- erase.genotypes
}
if (!is.null(masked.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- masked.genotypes
}
if (!is.null(blacklist.genotypes)) {
# Import whitelist of markers
if (is.vector(blacklist.genotypes)) {
blacklist.genotypes <- suppressMessages(
readr::read_tsv(blacklist.genotypes, col_names = TRUE) %>%
dplyr::mutate_all(.tbl = ., .funs = as.character))
}
nrow.before <- nrow(blacklist.genotypes)
blacklist.genotypes <- dplyr::distinct(blacklist.genotypes)
nrow.after <- nrow(blacklist.genotypes)
duplicate.blacklist.genotypes <- nrow.before - nrow.after
if (duplicate.blacklist.genotypes > 0) {
if (verbose) message("Duplicated rows in blacklist genotypes: ", duplicate.blacklist.genotypes)
if (verbose) message(" Creating unique blacklist")
if (verbose) message(" Warning: downstream results might be impacted by this, check how you made your blacklist")
}
nrow.before <- duplicate.blacklist.genotypes <- nrow.after <- NULL
# cleaning names of markers
need.cleaning <- purrr::discard(
.x = colnames(blacklist.genotypes),
.p = colnames(blacklist.genotypes) %in% "INDIVIDUALS")
blacklist.genotypes <- dplyr::mutate_at(
.tbl = blacklist.genotypes,
.vars = need.cleaning,
.funs = clean_markers_names)
blacklist.genotypes <- dplyr::mutate_at(
.tbl = blacklist.genotypes,
.vars = "INDIVIDUALS",
.funs = clean_ind_names)
} else {
blacklist.genotypes <- NULL
}
return(blacklist.genotypes)
}#End
# filter_blacklist_genotypes ---------------------------------------------------------------
#' @name filter_blacklist_genotypes
#' @title Filter dataset with blacklist of genotypes
#' @description Filter dataset with blacklist of genotypes.
#'
#' This function allows to blacklist/erase/mask genotypes.
#'
#' Used internally in \href{https://github.com/thierrygosselin/radiator}{radiator}
#' and might be of interest for users.
#'
#' @param data (4 options) A file or object generated by radiator:
#' \itemize{
#' \item tidy data
#' \item Genomic Data Structure (GDS)
#' }
#'
#' \emph{How to get GDS and tidy data ?}
#' Look into \code{\link{tidy_genomic_data}},
#' \code{\link{write_seqarray}} or
#' \code{\link{tidy_vcf}}.
#'
#'
#' @inheritParams radiator_common_arguments
#' @inheritParams read_blacklist_genotypes
#' @examples
#' \dontrun{
#' data <- radiator::filter_blacklist_genotypes(
#' data = data, blacklist.geno = "blacklist.geno.tsv"
#' )
#' }
#' @section Life cycle:
#'
#' This function arguments will be subject to changes. Currently the function uses
#' erase.genotypes, but using the \code{dots-dots-dots ...} arguments allows to
#' pass \code{erase.genotypes and masked.genotypes}. These arguments do exactly
#' the same thing and only one can be used.
#' @export
#' @rdname filter_blacklist_genotypes
#' @author Thierry Gosselin \email{thierrygosselin@@icloud.com}
filter_blacklist_genotypes <- function(
data,
blacklist.genotypes,
verbose = TRUE,
...
) {
# Checking for missing and/or default arguments ------------------------------
if (missing(data)) rlang::abort("Input file missing")
file.date <- format(Sys.time(), "%Y%m%d@%H%M")# Date and time
# dotslist -------------------------------------------------------------------
dotslist <- rlang::dots_list(..., .homonyms = "error", .check_assign = TRUE)
want <- c("path.folder", "parameters", "erase.genotypes", "masked.genotypes")
unknowned_param <- setdiff(names(dotslist), want)
if (length(unknowned_param) > 0) {
rlang::abort("Unknowned \"...\" parameters ",
stringi::stri_join(unknowned_param, collapse = " "))
}
radiator.dots <- dotslist[names(dotslist) %in% want]
parameters <- radiator.dots[["parameters"]]
erase.genotypes <- radiator.dots[["erase.genotypes"]]
masked.genotypes <- radiator.dots[["masked.genotypes"]]
path.folder <- radiator.dots[["path.folder"]]
path.folder <- generate_folder(f = path.folder, file.date = file.date, verbose = verbose)
if (!is.null(erase.genotypes) && !is.null(masked.genotypes) && !is.null(blacklist.genotypes)) {
rlang::abort("Only one of: erase.genotypes, masked.genotypes or blacklist.genotypes should be used")
}
if (!is.null(erase.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- erase.genotypes
}
if (!is.null(masked.genotypes) && is.null(blacklist.genotypes)) {
blacklist.genotypes <- masked.genotypes
}
# read whitelist
blacklist.genotypes <- radiator::read_blacklist_genotypes(
blacklist.genotypes = blacklist.genotypes,
verbose = verbose)
if (!is.null(blacklist.genotypes)) {
# Import data ---------------------------------------------------------------
data.type <- radiator::detect_genomic_format(data)
if (!data.type %in% c("tbl_df", "fst.file", "SeqVarGDSClass", "gds.file")) {
rlang::abort("Input not supported for this function: read function documentation")
}
if (data.type %in% c("SeqVarGDSClass", "gds.file")) {
if (!"SeqVarTools" %in% utils::installed.packages()[,"Package"]) {
rlang::abort('Please install SeqVarTools for this option:\n
install.packages("BiocManager")
BiocManager::install("SeqVarTools")')
}
if (data.type == "gds.file") {
data <- radiator::read_rad(data, verbose = verbose)
data.type <- "SeqVarGDSClass"
}
} else {
if (is.vector(data)) data <- radiator::tidy_wide(data = data, import.metadata = TRUE)
data.type <- "tbl_df"
}
# Filter parameter file: generate and initiate -----------------------------
# TODO: modify the function to accomodate erasing genotypes...
# filters.parameters <- update_parameters(
# generate = TRUE,
# initiate = TRUE,
# update = FALSE,
# parameter.obj = parameters,
# data = data,
# path.folder = path.folder,
# file.date = file.date,
# verbose = verbose)
# checks -------------------------------------------------------------------
# check to keep only individuals present in the dataset and not already blacklisted
# check that markers bl are in dataset (not already blacklisted elsewhere...)
if (verbose) message("Checking matching individuals and markers between blacklist and data")
if (data.type == "tbl_df") {
id <- unique(data$INDIVIDUALS)
wl <- unique(data$MARKERS)
} else {
radiator.gds <- gdsfmt::index.gdsn(
node = data, path = "radiator", silent = TRUE)
id <- gdsfmt::index.gdsn(
node = radiator.gds, path = "individuals", silent = TRUE)
if (!is.null(id)) {
id <- gdsfmt::read.gdsn(id) %$% INDIVIDUALS
} else {
id <- SeqArray::seqGetData(gdsfile = data, var.name = "sample.id")
}
wl <- radiator::extract_markers_metadata(gds = data) %$% MARKERS
}
blacklist.genotypes %<>% dplyr::filter(INDIVIDUALS %in% id) %>%
dplyr::filter(MARKERS %in% wl)
n.markers.bl <- nrow(blacklist.genotypes)
if (n.markers.bl > 0) {
if (verbose) message("Blacklisted genotypes: ", n.markers.bl)
if (data.type == "tbl_df") {
blacklist.genotypes <- dplyr::semi_join(
data,
blacklist.genotypes,
by = intersect(colnames(data), colnames(blacklist.genotypes)))
if (tibble::has_name(blacklist.genotypes, "GT")) {
blacklist.genotypes %<>% dplyr::mutate(GT = rep("000000", n()))
}
if (tibble::has_name(blacklist.genotypes, "GT_VCF")) {
blacklist.genotypes %<>% dplyr::mutate(GT_VCF = rep("./.", n()))
}
if (tibble::has_name(blacklist.genotypes, "GT_VCF_NUC")) {
blacklist.genotypes %<>% dplyr::mutate(GT_VCF_NUC = rep("./.", n()))
}
if (tibble::has_name(blacklist.genotypes, "GT_BIN")) {
blacklist.genotypes %<>% dplyr::mutate(GT_BIN = rep(as.numeric(NA_character_), n()))
}
data %<>% dplyr::anti_join(
blacklist.genotypes,
by = intersect(colnames(data), colnames(blacklist.genotypes)))
data %<>% dplyr::bind_rows(blacklist.genotypes)
# TODO and optimize...
# required because REF/ALT might change after deleting genotypes...
data %<>% radiator::calibrate_alleles(data = data)$input
# GDS
if (data.type == "SeqVarGDSClass") {
message("Under construction...")
} # End GDS
} else {
message("There are no genotype left in the blacklist")
}
# Filter parameter file: update --------------------------------------------
# filters.parameters <- update_parameters(
# generate = FALSE,
# initiate = FALSE,
# update = TRUE,
# parameter.obj = filters.parameters,
# data = data,
# filter.name = "whitelist markers",
# param.name = "whitelist.markers",
# values = n.markers.w,
# path.folder = path.folder,
# file.date = file.date,
# verbose = verbose)
# if (verbose) {
# message("Number of individuals / strata / chrom / locus / SNP:")
# message(" Before: ", filters.parameters$filters.parameters$BEFORE)
# message(" Blacklisted: ", filters.parameters$filters.parameters$BLACKLIST)
# message(" After: ", filters.parameters$filters.parameters$AFTER)
# }
}# End !is.null
}
return(data)
}#End filter_blacklist_genotypes
# The section that was in tidy_genomic_data
# if (is.null(blacklist.genotype)) { # no Whitelist
# if (verbose) message("Erasing genotype: no")
# } else {
# if (verbose) message("Erasing genotype: yes")
# want <- c("MARKERS", "CHROM", "LOCUS", "POS", "INDIVIDUALS")
# if (is.vector(blacklist.genotype)) {
# suppressWarnings(suppressMessages(
# blacklist.genotype <- readr::read_tsv(blacklist.genotype, col_names = TRUE)))
# }
# suppressWarnings(suppressMessages(
# blacklist.genotype <- blacklist.genotype %>%
# dplyr::mutate_at(.tbl = ., .vars = "INDIVIDUALS",
# .funs = clean_ind_names) %>%
# dplyr::select(dplyr::one_of(want)) %>%
# dplyr::mutate_all(.tbl = ., .funs = as.character, exclude = NA)))
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
#
# if (data.type == "haplo.file") {
# blacklist.genotype <- dplyr::select(.data = blacklist.genotype, INDIVIDUALS, LOCUS)
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
# }
#
# # control check to keep only individuals in the strata.df
# blacklist.genotype <- suppressWarnings(
# blacklist.genotype %>%
# dplyr::filter(INDIVIDUALS %in% strata.df$INDIVIDUALS)
# )
#
# # control check to keep only whitelisted markers from the blacklist of genotypes
# if (!is.null(whitelist.markers)) {
# if (verbose) message("Control check to keep only whitelisted markers present in the blacklist of genotypes to erase.")
# # updating the whitelist of markers to have all columns that id markers
# if (data.type == "vcf.file") {
# whitelist.markers.ind <- input %>% dplyr::distinct(CHROM, LOCUS, POS, INDIVIDUALS)
# } else {
# whitelist.markers.ind <- input %>% dplyr::distinct(LOCUS, INDIVIDUALS)
# }
#
# # updating the blacklist.genotype
# blacklist.genotype <- suppressWarnings(
# dplyr::semi_join(whitelist.markers.ind, blacklist.genotype,
# by = columns.names.blacklist.genotype))
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
# }
#
# # Update column names
# columns.names.blacklist.genotype <- colnames(blacklist.genotype)
#
# blacklisted.gen.number <- nrow(blacklist.genotype)
# if (blacklisted.gen.number > 0) {
# message(" Number of genotype(s) to erase: ", blacklisted.gen.number)
# input.erase <- dplyr::semi_join(
# input, blacklist.genotype, by = columns.names.blacklist.genotype) %>%
# dplyr::mutate(GT = rep("000000", n()))
# input <- dplyr::anti_join(
# input, blacklist.genotype, by = columns.names.blacklist.genotype)
# if (rlang::has_name(input.erase, "GT_VCF")) {
# input.erase <- dplyr::mutate(input.erase, GT_VCF = rep("./.", n()))
# }
#
# if (rlang::has_name(input.erase, "GT_VCF_NUC")) {
# input.erase <- dplyr::mutate(input.erase, GT_VCF_NUC = rep("./.", n()))
# }
#
# if (rlang::has_name(input.erase, "GT_BIN")) {
# input.erase <- dplyr::mutate(input.erase, GT_BIN = rep(as.numeric(NA_character_), n()))
# }
# input <- dplyr::bind_rows(input, input.erase)
# } else {
# message("There are no genotype left in the blacklist: input file left intact")
# }
#
# # required because REF/ALT might change after deleting genotypes...
# input <- radiator::calibrate_alleles(data = input)$input
# } # End erase genotypes
|
# global.R ####
# Coursera Data Science Capstone Project (https://www.coursera.org/course/dsscapstone)
# Shiny script for loading data into global environment
# 2016-02-05
# Libraries and options ####
library(shiny)
library(tm)
library(quanteda)
library(stringr)
library(wordcloud)
library(RColorBrewer)
#set directory
setwd("~/Documents/DS_final/shiny")
# Load data ####
#search the right ngrams
unigrams<-readRDS("../rds/unigram.rds")
unigrams$senctence<- unigrams$ngram
unigrams$prediction<- unigrams$ngram
bigrams<-readRDS("../rds/bigram.rds")
trigrams<-readRDS("../rds/trigram.rds")
quatgrams<-readRDS("../rds/quatgram.rds")
#load functions
source("functions.R") | /shiny/global.R | no_license | CristelVeefkind/DSCapstone | R | false | false | 673 | r | # global.R ####
# Coursera Data Science Capstone Project (https://www.coursera.org/course/dsscapstone)
# Shiny script for loading data into global environment
# 2016-02-05
# Libraries and options ####
library(shiny)
library(tm)
library(quanteda)
library(stringr)
library(wordcloud)
library(RColorBrewer)
#set directory
setwd("~/Documents/DS_final/shiny")
# Load data ####
#search the right ngrams
unigrams<-readRDS("../rds/unigram.rds")
unigrams$senctence<- unigrams$ngram
unigrams$prediction<- unigrams$ngram
bigrams<-readRDS("../rds/bigram.rds")
trigrams<-readRDS("../rds/trigram.rds")
quatgrams<-readRDS("../rds/quatgram.rds")
#load functions
source("functions.R") |
addhead <-
function(part, Npart=2, Massarr=0, Time=0, z=0, FlagSfr=0, FlagFeedback=0, FlagCooling=0, BoxSize=0, OmegaM=0, OmegaL=0, h=1, FlagAge=0, FlagMetals=0, NallHW=0, flag_entr_ics=0){
N=length(part[,1])
NpartVec=rep(0,6)
NpartVec[Npart]=N
MassarrVec=rep(0,6)
MassarrVec[Npart]=Massarr
NallVec=rep(0,6)
NallVec[Npart]=N
NallHWVec=rep(0,6)
NallHWVec[Npart]=NallHW
return=list(part=part,head=list(Npart=NpartVec, Massarr=MassarrVec, Time=Time, z=z, FlagSfr=FlagSfr, FlagFeedback=FlagFeedback, Nall=NallVec, FlagCooling=FlagCooling, NumFiles=1, BoxSize=BoxSize, OmegaM=OmegaM, OmegaL=OmegaL, h=h, FlagAge=FlagAge, FlagMetals=FlagMetals, FlagMetals=FlagMetals, NallHW=NallHWVec, flag_entr_ics=flag_entr_ics))
}
| /snapshot/R/addhead.R | no_license | ingted/R-Examples | R | false | false | 717 | r | addhead <-
function(part, Npart=2, Massarr=0, Time=0, z=0, FlagSfr=0, FlagFeedback=0, FlagCooling=0, BoxSize=0, OmegaM=0, OmegaL=0, h=1, FlagAge=0, FlagMetals=0, NallHW=0, flag_entr_ics=0){
N=length(part[,1])
NpartVec=rep(0,6)
NpartVec[Npart]=N
MassarrVec=rep(0,6)
MassarrVec[Npart]=Massarr
NallVec=rep(0,6)
NallVec[Npart]=N
NallHWVec=rep(0,6)
NallHWVec[Npart]=NallHW
return=list(part=part,head=list(Npart=NpartVec, Massarr=MassarrVec, Time=Time, z=z, FlagSfr=FlagSfr, FlagFeedback=FlagFeedback, Nall=NallVec, FlagCooling=FlagCooling, NumFiles=1, BoxSize=BoxSize, OmegaM=OmegaM, OmegaL=OmegaL, h=h, FlagAge=FlagAge, FlagMetals=FlagMetals, FlagMetals=FlagMetals, NallHW=NallHWVec, flag_entr_ics=flag_entr_ics))
}
|
# -------------------------------------------------------------------------------
# This file is part of rangerts.
#
# rangerts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# -------------------------------------------------------------------------------
#' Tree information in human readable format
#'
#' Extract tree information of a \code{rangerts} object.
#'
#' Node and variable ID's are 0-indexed, i.e., node 0 is the root node.
#' If the formula interface is used in the \code{rangerts} call, the variable ID's are usually different to the original data used to grow the tree.
#' Refer to the variable name instead to be sure.
#'
#' Splitting at unordered factors (nominal variables) depends on the option \code{respect.unordered.factors} in the \code{rangerts} call.
#' For the "ignore" and "order" approaches, all values smaller or equal the \code{splitval} value go to the left and all values larger go to the right, as usual.
#' However, with "order" the values correspond to the order in \code{object$forest$covariate.levels} instead of the original order (usually alphabetical).
#' In the "partition" mode, the \code{splitval} values for unordered factor are comma separated lists of values, representing the factor levels (in the original order) going to the right.
#'
#' @param object \code{rangerts} object.
#' @param tree Number of the tree of interest.
#' @return A data.frame with the columns
#' \tabular{ll}{
#' \code{nodeID} \tab The nodeID, 0-indexed. \cr
#' \code{leftChild} \tab ID of the left child node, 0-indexed. \cr
#' \code{rightChild} \tab ID of the right child node, 0-indexed. \cr
#' \code{splitvarID} \tab ID of the splitting variable, 0-indexed. Caution, the variable order changes if the formula interface is used. \cr
#' \code{splitvarName} \tab Name of the splitting variable. \cr
#' \code{splitval} \tab The splitting value. For numeric or ordinal variables, all values smaller or equal go to the left, larger values to the right. For unordered factor variables see above. \cr
#' \code{terminal} \tab Logical, TRUE for terminal nodes. \cr
#' \code{prediction} \tab One column with the predicted class (factor) for classification and the predicted numerical value for regression. One probability per class for probability estimation in several columns. Nothing for survival, refer to \code{object$forest$chf} for the CHF node predictions. \cr
#' }
#' @examples
#' rf <- rangerts(Species ~ ., data = iris)
#' treeInfo(rf, 1)
#' @seealso \code{\link{rangerts}}
#' @author Marvin N. Wright
#' @export
treeInfo <- function(object, tree = 1) {
if (!inherits(object, "rangerts")) {
stop("Error: Invalid class of input object.")
}
forest <- object$forest
if (is.null(forest)) {
stop("Error: No saved forest in rangerts object. Please set write.forest to TRUE when calling rangerts.")
}
if (is.null(forest$num.trees) ||
is.null(forest$child.nodeIDs) || is.null(forest$split.varIDs) ||
is.null(forest$split.values) || is.null(forest$independent.variable.names) ||
is.null(forest$treetype)) {
stop("Error: Invalid forest object.")
}
if (forest$treetype == "Survival" && (is.null(forest$chf) || is.null(forest$unique.death.times))) {
stop("Error: Invalid forest object.")
}
if (length(forest$child.nodeIDs) != forest$num.trees || length(forest$child.nodeIDs[[1]]) != 2) {
stop("Error: Invalid forest object. Is the forest grown in rangerts version <0.3.9? Try with the same version the forest was grown.")
}
if (!is.null(forest$dependent.varID)) {
forest <- convert.pre.xy(forest, trees = tree)
}
if (tree > forest$num.trees) {
stop("Error: Requesting tree ", tree, ", but forest has only ", forest$num.trees, " trees.")
}
result <- data.frame(nodeID = 0:(length(forest$split.values[[tree]]) - 1),
leftChild = forest$child.nodeIDs[[tree]][[1]],
rightChild = forest$child.nodeIDs[[tree]][[2]],
splitvarID = forest$split.varIDs[[tree]],
splitvarName = "X",
splitval = forest$split.values[[tree]],
terminal = FALSE)
result$leftChild[result$leftChild == 0] <- NA
result$rightChild[result$rightChild == 0] <- NA
result$terminal[is.na(result$leftChild)] <- TRUE
result$splitvarID[result$terminal] <- NA
result$splitvarName[result$terminal] <- NA
result$splitval[result$terminal] <- NA
result$splitvarName <- forest$independent.variable.names[result$splitvarID + 1]
## Unordered splitting
idx.unordered <- !result$terminal & !forest$is.ordered[result$splitvarID + 1]
if (any(idx.unordered)) {
if (any(result$splitval[idx.unordered] > (2^31 - 1))) {
warning("Unordered splitting levels can only be shown for up to 31 levels.")
result$splitval[idx.unordered] <- NA
} else {
result$splitval[idx.unordered] <- sapply(result$splitval[idx.unordered], function(x) {
paste(which(as.logical(intToBits(x))), collapse = ",")
})
}
}
## Prediction
if (forest$treetype == "Classification") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
if (!is.null(forest$levels)) {
result$prediction <- factor(result$prediction, levels = forest$class.values, labels = forest$levels)
}
} else if (forest$treetype == "Regression") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
} else if (forest$treetype == "Probability estimation") {
predictions <- matrix(nrow = nrow(result), ncol = length(forest$levels))
predictions[result$terminal, ] <- do.call(rbind, forest$terminal.class.counts[[tree]])
colnames(predictions) <- paste0("pred.", forest$levels)
result <- data.frame(result, predictions)
} else if (forest$treetype == "Survival") {
# No prediction for survival (CHF too large?)
} else {
stop("Error: Unknown tree type.")
}
result
}
| /R/treeInfo.R | no_license | hyanworkspace/rangerts | R | false | false | 6,221 | r | # -------------------------------------------------------------------------------
# This file is part of rangerts.
#
# rangerts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# -------------------------------------------------------------------------------
#' Tree information in human readable format
#'
#' Extract tree information of a \code{rangerts} object.
#'
#' Node and variable ID's are 0-indexed, i.e., node 0 is the root node.
#' If the formula interface is used in the \code{rangerts} call, the variable ID's are usually different to the original data used to grow the tree.
#' Refer to the variable name instead to be sure.
#'
#' Splitting at unordered factors (nominal variables) depends on the option \code{respect.unordered.factors} in the \code{rangerts} call.
#' For the "ignore" and "order" approaches, all values smaller or equal the \code{splitval} value go to the left and all values larger go to the right, as usual.
#' However, with "order" the values correspond to the order in \code{object$forest$covariate.levels} instead of the original order (usually alphabetical).
#' In the "partition" mode, the \code{splitval} values for unordered factor are comma separated lists of values, representing the factor levels (in the original order) going to the right.
#'
#' @param object \code{rangerts} object.
#' @param tree Number of the tree of interest.
#' @return A data.frame with the columns
#' \tabular{ll}{
#' \code{nodeID} \tab The nodeID, 0-indexed. \cr
#' \code{leftChild} \tab ID of the left child node, 0-indexed. \cr
#' \code{rightChild} \tab ID of the right child node, 0-indexed. \cr
#' \code{splitvarID} \tab ID of the splitting variable, 0-indexed. Caution, the variable order changes if the formula interface is used. \cr
#' \code{splitvarName} \tab Name of the splitting variable. \cr
#' \code{splitval} \tab The splitting value. For numeric or ordinal variables, all values smaller or equal go to the left, larger values to the right. For unordered factor variables see above. \cr
#' \code{terminal} \tab Logical, TRUE for terminal nodes. \cr
#' \code{prediction} \tab One column with the predicted class (factor) for classification and the predicted numerical value for regression. One probability per class for probability estimation in several columns. Nothing for survival, refer to \code{object$forest$chf} for the CHF node predictions. \cr
#' }
#' @examples
#' rf <- rangerts(Species ~ ., data = iris)
#' treeInfo(rf, 1)
#' @seealso \code{\link{rangerts}}
#' @author Marvin N. Wright
#' @export
treeInfo <- function(object, tree = 1) {
if (!inherits(object, "rangerts")) {
stop("Error: Invalid class of input object.")
}
forest <- object$forest
if (is.null(forest)) {
stop("Error: No saved forest in rangerts object. Please set write.forest to TRUE when calling rangerts.")
}
if (is.null(forest$num.trees) ||
is.null(forest$child.nodeIDs) || is.null(forest$split.varIDs) ||
is.null(forest$split.values) || is.null(forest$independent.variable.names) ||
is.null(forest$treetype)) {
stop("Error: Invalid forest object.")
}
if (forest$treetype == "Survival" && (is.null(forest$chf) || is.null(forest$unique.death.times))) {
stop("Error: Invalid forest object.")
}
if (length(forest$child.nodeIDs) != forest$num.trees || length(forest$child.nodeIDs[[1]]) != 2) {
stop("Error: Invalid forest object. Is the forest grown in rangerts version <0.3.9? Try with the same version the forest was grown.")
}
if (!is.null(forest$dependent.varID)) {
forest <- convert.pre.xy(forest, trees = tree)
}
if (tree > forest$num.trees) {
stop("Error: Requesting tree ", tree, ", but forest has only ", forest$num.trees, " trees.")
}
result <- data.frame(nodeID = 0:(length(forest$split.values[[tree]]) - 1),
leftChild = forest$child.nodeIDs[[tree]][[1]],
rightChild = forest$child.nodeIDs[[tree]][[2]],
splitvarID = forest$split.varIDs[[tree]],
splitvarName = "X",
splitval = forest$split.values[[tree]],
terminal = FALSE)
result$leftChild[result$leftChild == 0] <- NA
result$rightChild[result$rightChild == 0] <- NA
result$terminal[is.na(result$leftChild)] <- TRUE
result$splitvarID[result$terminal] <- NA
result$splitvarName[result$terminal] <- NA
result$splitval[result$terminal] <- NA
result$splitvarName <- forest$independent.variable.names[result$splitvarID + 1]
## Unordered splitting
idx.unordered <- !result$terminal & !forest$is.ordered[result$splitvarID + 1]
if (any(idx.unordered)) {
if (any(result$splitval[idx.unordered] > (2^31 - 1))) {
warning("Unordered splitting levels can only be shown for up to 31 levels.")
result$splitval[idx.unordered] <- NA
} else {
result$splitval[idx.unordered] <- sapply(result$splitval[idx.unordered], function(x) {
paste(which(as.logical(intToBits(x))), collapse = ",")
})
}
}
## Prediction
if (forest$treetype == "Classification") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
if (!is.null(forest$levels)) {
result$prediction <- factor(result$prediction, levels = forest$class.values, labels = forest$levels)
}
} else if (forest$treetype == "Regression") {
result$prediction <- forest$split.values[[tree]]
result$prediction[!result$terminal] <- NA
} else if (forest$treetype == "Probability estimation") {
predictions <- matrix(nrow = nrow(result), ncol = length(forest$levels))
predictions[result$terminal, ] <- do.call(rbind, forest$terminal.class.counts[[tree]])
colnames(predictions) <- paste0("pred.", forest$levels)
result <- data.frame(result, predictions)
} else if (forest$treetype == "Survival") {
# No prediction for survival (CHF too large?)
} else {
stop("Error: Unknown tree type.")
}
result
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamodb_operations.R
\name{dynamodb_get_item}
\alias{dynamodb_get_item}
\title{The GetItem operation returns a set of attributes for the item with the
given primary key}
\usage{
dynamodb_get_item(TableName, Key, AttributesToGet, ConsistentRead,
ReturnConsumedCapacity, ProjectionExpression, ExpressionAttributeNames)
}
\arguments{
\item{TableName}{[required] The name of the table containing the requested item.}
\item{Key}{[required] A map of attribute names to \code{AttributeValue} objects, representing the
primary key of the item to retrieve.
For the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a value for
the partition key. For a composite primary key, you must provide values
for both the partition key and the sort key.}
\item{AttributesToGet}{This is a legacy parameter. Use \code{ProjectionExpression} instead. For more
information, see
\href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html}{AttributesToGet}
in the \emph{Amazon DynamoDB Developer Guide}.}
\item{ConsistentRead}{Determines the read consistency model: If set to \code{true}, then the
operation uses strongly consistent reads; otherwise, the operation uses
eventually consistent reads.}
\item{ReturnConsumedCapacity}{}
\item{ProjectionExpression}{A string that identifies one or more attributes to retrieve from the
table. These attributes can include scalars, sets, or elements of a JSON
document. The attributes in the expression must be separated by commas.
If no attribute names are specified, then all attributes are returned.
If any of the requested attributes are not found, they do not appear in
the result.
For more information, see \href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html}{Specifying Item Attributes}
in the \emph{Amazon DynamoDB Developer Guide}.}
\item{ExpressionAttributeNames}{One or more substitution tokens for attribute names in an expression.
The following are some use cases for using \code{ExpressionAttributeNames}:
\itemize{
\item To access an attribute whose name conflicts with a DynamoDB reserved
word.
\item To create a placeholder for repeating occurrences of an attribute
name in an expression.
\item To prevent special characters in an attribute name from being
misinterpreted in an expression.
}
Use the \strong{\\#} character in an expression to dereference an attribute
name. For example, consider the following attribute name:
\itemize{
\item \code{Percentile}
}
The name of this attribute conflicts with a reserved word, so it cannot
be used directly in an expression. (For the complete list of reserved
words, see \href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html}{Reserved Words}
in the \emph{Amazon DynamoDB Developer Guide}). To work around this, you
could specify the following for \code{ExpressionAttributeNames}:
\itemize{
\item \verb{\\\{"#P":"Percentile"\\\}}
}
You could then use this substitution in an expression, as in this
example:
\itemize{
\item \verb{#P = :val}
}
Tokens that begin with the \strong{:} character are \emph{expression attribute
values}, which are placeholders for the actual value at runtime.
For more information on expression attribute names, see \href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html}{Specifying Item Attributes}
in the \emph{Amazon DynamoDB Developer Guide}.}
}
\description{
The \code{GetItem} operation returns a set of attributes for the item with
the given primary key. If there is no matching item, \code{GetItem} does not
return any data and there will be no \code{Item} element in the response.
}
\details{
\code{GetItem} provides an eventually consistent read by default. If your
application requires a strongly consistent read, set \code{ConsistentRead} to
\code{true}. Although a strongly consistent read might take more time than an
eventually consistent read, it always returns the last updated value.
}
\section{Request syntax}{
\preformatted{svc$get_item(
TableName = "string",
Key = list(
list(
S = "string",
N = "string",
B = raw,
SS = list(
"string"
),
NS = list(
"string"
),
BS = list(
raw
),
M = list(
list()
),
L = list(
list()
),
NULL = TRUE|FALSE,
BOOL = TRUE|FALSE
)
),
AttributesToGet = list(
"string"
),
ConsistentRead = TRUE|FALSE,
ReturnConsumedCapacity = "INDEXES"|"TOTAL"|"NONE",
ProjectionExpression = "string",
ExpressionAttributeNames = list(
"string"
)
)
}
}
\examples{
\dontrun{
# This example retrieves an item from the Music table. The table has a
# partition key and a sort key (Artist and SongTitle), so you must specify
# both of these attributes.
svc$get_item(
Key = list(
Artist = list(
S = "Acme Band"
),
SongTitle = list(
S = "Happy Day"
)
),
TableName = "Music"
)
}
}
\keyword{internal}
| /cran/paws.database/man/dynamodb_get_item.Rd | permissive | johnnytommy/paws | R | false | true | 5,200 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamodb_operations.R
\name{dynamodb_get_item}
\alias{dynamodb_get_item}
\title{The GetItem operation returns a set of attributes for the item with the
given primary key}
\usage{
dynamodb_get_item(TableName, Key, AttributesToGet, ConsistentRead,
ReturnConsumedCapacity, ProjectionExpression, ExpressionAttributeNames)
}
\arguments{
\item{TableName}{[required] The name of the table containing the requested item.}
\item{Key}{[required] A map of attribute names to \code{AttributeValue} objects, representing the
primary key of the item to retrieve.
For the primary key, you must provide all of the attributes. For
example, with a simple primary key, you only need to provide a value for
the partition key. For a composite primary key, you must provide values
for both the partition key and the sort key.}
\item{AttributesToGet}{This is a legacy parameter. Use \code{ProjectionExpression} instead. For more
information, see
\href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html}{AttributesToGet}
in the \emph{Amazon DynamoDB Developer Guide}.}
\item{ConsistentRead}{Determines the read consistency model: If set to \code{true}, then the
operation uses strongly consistent reads; otherwise, the operation uses
eventually consistent reads.}
\item{ReturnConsumedCapacity}{}
\item{ProjectionExpression}{A string that identifies one or more attributes to retrieve from the
table. These attributes can include scalars, sets, or elements of a JSON
document. The attributes in the expression must be separated by commas.
If no attribute names are specified, then all attributes are returned.
If any of the requested attributes are not found, they do not appear in
the result.
For more information, see \href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html}{Specifying Item Attributes}
in the \emph{Amazon DynamoDB Developer Guide}.}
\item{ExpressionAttributeNames}{One or more substitution tokens for attribute names in an expression.
The following are some use cases for using \code{ExpressionAttributeNames}:
\itemize{
\item To access an attribute whose name conflicts with a DynamoDB reserved
word.
\item To create a placeholder for repeating occurrences of an attribute
name in an expression.
\item To prevent special characters in an attribute name from being
misinterpreted in an expression.
}
Use the \strong{\\#} character in an expression to dereference an attribute
name. For example, consider the following attribute name:
\itemize{
\item \code{Percentile}
}
The name of this attribute conflicts with a reserved word, so it cannot
be used directly in an expression. (For the complete list of reserved
words, see \href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html}{Reserved Words}
in the \emph{Amazon DynamoDB Developer Guide}). To work around this, you
could specify the following for \code{ExpressionAttributeNames}:
\itemize{
\item \verb{\\\{"#P":"Percentile"\\\}}
}
You could then use this substitution in an expression, as in this
example:
\itemize{
\item \verb{#P = :val}
}
Tokens that begin with the \strong{:} character are \emph{expression attribute
values}, which are placeholders for the actual value at runtime.
For more information on expression attribute names, see \href{https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html}{Specifying Item Attributes}
in the \emph{Amazon DynamoDB Developer Guide}.}
}
\description{
The \code{GetItem} operation returns a set of attributes for the item with
the given primary key. If there is no matching item, \code{GetItem} does not
return any data and there will be no \code{Item} element in the response.
}
\details{
\code{GetItem} provides an eventually consistent read by default. If your
application requires a strongly consistent read, set \code{ConsistentRead} to
\code{true}. Although a strongly consistent read might take more time than an
eventually consistent read, it always returns the last updated value.
}
\section{Request syntax}{
\preformatted{svc$get_item(
TableName = "string",
Key = list(
list(
S = "string",
N = "string",
B = raw,
SS = list(
"string"
),
NS = list(
"string"
),
BS = list(
raw
),
M = list(
list()
),
L = list(
list()
),
NULL = TRUE|FALSE,
BOOL = TRUE|FALSE
)
),
AttributesToGet = list(
"string"
),
ConsistentRead = TRUE|FALSE,
ReturnConsumedCapacity = "INDEXES"|"TOTAL"|"NONE",
ProjectionExpression = "string",
ExpressionAttributeNames = list(
"string"
)
)
}
}
\examples{
\dontrun{
# This example retrieves an item from the Music table. The table has a
# partition key and a sort key (Artist and SongTitle), so you must specify
# both of these attributes.
svc$get_item(
Key = list(
Artist = list(
S = "Acme Band"
),
SongTitle = list(
S = "Happy Day"
)
),
TableName = "Music"
)
}
}
\keyword{internal}
|
################################################################################
## Description: GAM mean/SE map at binned_survey_age
## Description: or delta SE, the difference between SE at binned_survey_age and SE at 12 months
## Sub-national SIAs: not included in prediction
## Spatial scale: single country
## MeanOrSE: "Mean", "SE"
getSmoothedMap_noSubSIA_Single <- function(Country, binned_survey_age, MeanOrSE="Mean", deltaSEfrom12=TRUE, GreenMagentaColorScale=TRUE) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_", DHS_code,"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0(MeanOrSE, "_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# For a map of means:
if(MeanOrSE=="Mean"){
# Legend name
legend_name <- substitute(paste("Mean at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
if(GreenMagentaColorScale==TRUE) {
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=c('#8e0152','#c51b7d','#de77ae','#f1b6da','#fde0ef','#e6f5d0','#b8e186','#7fbc41','#4d9221','#276419')) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
else {
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
}
# For a map of SEs:
else if(MeanOrSE=="SE") {
if(binned_survey_age==12 | deltaSEfrom12==FALSE) {
# Legend name
legend_name <- substitute(paste("SE at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=g_df[,which_data])) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=grid_tmp_lt5y[,which_data]), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0.05,0.9),
breaks=seq(0.1,0.85,by=0.15),
colours=rev(brewer.pal(10, "Spectral"))) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
else if (deltaSEfrom12==TRUE) {
# Legend name
legend_name <- bquote(paste(Delta, "SE at ", .(binned_survey_age), "m"))
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=g_df[,which_data]-g_df[,"SE_12m"])) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=grid_tmp_lt5y[,which_data]-grid_tmp_lt5y[,"SE_12m"]), size=0.1, breaks=seq(-0.05,0.3,by=0.01), colour="gray50") +
scale_fill_gradient2(name=legend_name,
limits=c(-0.06, 0.3),
breaks=seq(-0.05, 0.3, by=0.05)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
}
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
## Description: GAM mean/SE map at binned_survey_age
## Description: or delta SE, the difference between SE at binned_survey_age and SE at 12 months
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
## MeanOrSE: "Mean", "SE"
## Note: this code is also used to make a movie
getSmoothedMap_noSubSIA <- function(binned_survey_age, MeanOrSE="Mean", deltaSEfrom12=TRUE, GreenMagentaColorScale=TRUE, movie=FALSE) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0(MeanOrSE, "_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# For a map of means:
if(MeanOrSE=="Mean"){
# Legend name
legend_name <- substitute(paste("Mean at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
if(GreenMagentaColorScale==TRUE) {
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=c('#8e0152','#c51b7d','#de77ae','#f1b6da','#fde0ef','#e6f5d0','#b8e186','#7fbc41','#4d9221','#276419')) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
else {
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
# For a map of SEs:
else if(MeanOrSE=="SE") {
if(binned_survey_age==12 | deltaSEfrom12==FALSE) {
# Legend name
legend_name <- substitute(paste("SE at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=g_df_all[,which_data])) +
scale_fill_gradientn(name=legend_name,
limits=c(0.05,0.9),
breaks=seq(0.1,0.85,by=0.15),
colours=rev(brewer.pal(10, "Spectral"))) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=z), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
else if (deltaSEfrom12==TRUE) {
# Legend name
legend_name <- bquote(paste(Delta, "SE at ", .(binned_survey_age), "m"))
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=g_df_all[,which_data]-g_df_all[,"SE_12m"])) +
scale_fill_gradient2(name=legend_name,
limits=c(-0.06, 0.3),
breaks=seq(-0.05, 0.3, by=0.05)) +
guides(fill=guide_colorbar(barwidth=2, barheight=10))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
data_base <- get(paste0("contour_", i))[,c("long", "lat", "SE_12m")]
names(data_base) <- c("long", "lat", "z")
data$z <- data$z - data_base$z
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=z), size=0.1, breaks=seq(-0.05,0.3,by=0.01), colour="gray50")
}
}
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# If you're not plotting the movie
if(movie==FALSE) {
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
}
# If you are plotting the movie
if(movie==TRUE) {
# Plot with capitals and ticker at the bottom
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank()) +
# Horizontal bar
geom_segment(aes(x=9.9, xend=30.1, y=-30, yend=-30), size=1.3) +
# X-axis ticks
geom_segment(aes(x=9.96, xend=9.96, y=-30.5, yend=-29.91), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[12], xend=seq(10,30,length=60)[12], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[24], xend=seq(10,30,length=60)[24], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[36], xend=seq(10,30,length=60)[36], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[48], xend=seq(10,30,length=60)[48], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=30, xend=30, y=-30.5, yend=-30), size=1.3) +
# X-axis labels:seq(10,30,length=60)[c(1,12,24,36,48,60)]
geom_text(aes(label="01m", x=10, y=-31)) +
geom_text(aes(label="12m", x=seq(10,30,length=60)[12], y=-31)) +
geom_text(aes(label="24m", x=seq(10,30,length=60)[24], y=-31)) +
geom_text(aes(label="36m", x=seq(10,30,length=60)[36], y=-31)) +
geom_text(aes(label="48m", x=seq(10,30,length=60)[48], y=-31)) +
geom_text(aes(label="60m", x=30, y=-31)) +
# Color bar
geom_segment(data=LAB,aes(x=xmin,xend=xmax,y=-29.9,yend=-29),size=1.5,col="seagreen")
}
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: coldspots map
## Description: grid cells with < cutoff_percent mean coverage at binned_survey_age
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
## Note: this code is also used to make a movie
getSmoothedMap_noSubSIA_ColdSpots <- function(binned_survey_age=12, cutoff_percent=0.8, movie=FALSE) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Find and fill the coldspots, aka cells with less than cutoff_percent coverage
g_df_all$coverage_at_cutoff_age <- plogis(g_df_all[,which_data])
tmp <- tmp + geom_polygon(data=g_df_all[which(g_df_all$coverage_at_cutoff_age < cutoff_percent),], mapping=aes(x=long, y=lat, group=group_final, fill=factor(dummy))) + scale_fill_manual(name="Coldspots", values="gray75", labels=paste0("< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m")) + theme(legend.position=c(0.2,0.2), legend.background=element_rect(fill=NA, size=0.1, linetype="solid", colour="black"))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Ticker marks
WHERE <- seq(10, 30, length=60)
LAB <- data.frame(xmin=WHERE[1:binned_survey_age], xmax=WHERE[1:binned_survey_age])
# If you're not plotting the movie
if(movie==FALSE) {
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
}
# If you are plotting the movie
if(movie==TRUE) {
# Plot with capitals and ticker at the bottom
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank()) +
# Horizontal bar
geom_segment(aes(x=9.9, xend=30.1, y=-30, yend=-30), size=1.3) +
# X-axis ticks
geom_segment(aes(x=9.96, xend=9.96, y=-30.5, yend=-29.91), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[12], xend=seq(10,30,length=60)[12], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[24], xend=seq(10,30,length=60)[24], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[36], xend=seq(10,30,length=60)[36], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[48], xend=seq(10,30,length=60)[48], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=30, xend=30, y=-30.5, yend=-30), size=1.3) +
# X-axis labels:seq(10,30,length=60)[c(1,12,24,36,48,60)]
geom_text(aes(label="01m", x=10, y=-31)) +
geom_text(aes(label="12m", x=seq(10,30,length=60)[12], y=-31)) +
geom_text(aes(label="24m", x=seq(10,30,length=60)[24], y=-31)) +
geom_text(aes(label="36m", x=seq(10,30,length=60)[36], y=-31)) +
geom_text(aes(label="48m", x=seq(10,30,length=60)[48], y=-31)) +
geom_text(aes(label="60m", x=30, y=-31)) +
# Color bar
geom_segment(data=LAB,aes(x=xmin,xend=xmax,y=-29.9,yend=-29),size=1.5,col="seagreen")
}
print(tmp)
}
## Description: coldspots significance map
## Description: the proportion of the CDF at binned_survey_age that is < cutoff_percent
## Description: essentially, how confident are we that a cell is a coldspot?
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Coldspots_Significance <- function(binned_survey_age=12, cutoff_percent=0.8) {
# For plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Figure out the proportion of the distribution of the mean that's below cutoff_percent at binned_survey_age (on logit scale), at each cell
mean_data <- paste0("Mean_", binned_survey_age, "m")
se_data <- paste0("SE_", binned_survey_age, "m")
g_df_all$prop_below_cutoff_percent_at_cutoff_age <- pnorm(qlogis(cutoff_percent), mean=g_df_all[, mean_data], sd=g_df_all[, se_data])
# Plot the significance map
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=prop_below_cutoff_percent_at_cutoff_age)) +
scale_fill_gradientn(name=paste0("Proportion of\ndistribution of mean\n< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m"),
colours=colorRampPalette(rev(brewer.pal(9, "RdYlBu")))(50),
limits=c(0,1)) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Get an outline of where (mean) coldspots are, and add to plot
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", mean_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=0.8, colour="black")
}
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Ticker marks
WHERE <- seq(10, 30, length=60)
LAB <- data.frame(xmin=WHERE[1:binned_survey_age], xmax=WHERE[1:binned_survey_age])
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: map of the prevalence of grid cells as coldspots, between lower_age and upper_age
## Description: grid cells with < cutoff_percent mean coverage, at ages between lower_age and upper_age
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Coldspots_Prevalence <- function(lower_age, upper_age, cutoff_percent) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", lower_age:upper_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Calculate the proportion of months that each grid cell spends as a coldspot
logit_coverage_at_included_ages <- as.matrix(g_df_all[,which_data])
coverage_at_included_ages <- plogis(logit_coverage_at_included_ages)
indicator_for_coldspot <- ifelse(coverage_at_included_ages < cutoff_percent, 1, 0)
total_months <- ncol(indicator_for_coldspot)
prevalence_coldspot <- rowSums(indicator_for_coldspot)/total_months
g_df_all$coverage_at_cutoff_age <- prevalence_coldspot
# Legend name
legend_name <- paste("Coldspot prevalence\n", lower_age, "-", upper_age, "m\n", "All cells", sep="")
# Plot
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=coverage_at_cutoff_age)) + scale_fill_gradient(name=legend_name, limits=c(0,1), low="white", high="red", na.value="green") + guides(fill=guide_colorbar(barwidth=2, barheight=10))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
## Description: same as above, filtering population size of under 5 year olds (cutoff_popsize)
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Coldspots_Prevalence_Pop <- function(lower_age, upper_age, cutoff_percent, cutoff_popsize) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", lower_age:upper_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Calculate the proportion of months that each grid cell spends as a coldspot
logit_coverage_at_included_ages <- as.matrix(g_df_all[,which_data])
coverage_at_included_ages <- plogis(logit_coverage_at_included_ages)
indicator_for_coldspot <- ifelse(coverage_at_included_ages < cutoff_percent, 1, 0)
total_months <- ncol(indicator_for_coldspot)
prevalence_coldspot <- rowSums(indicator_for_coldspot)/total_months
g_df_all$coverage_at_cutoff_age <- prevalence_coldspot
# Get the number of people at each grid cell
g_df_all_2 <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoPeople_Della/GAMtoPeople_",codes$DHS_code[i],"_6-60m.RData"))
rowname <- match(g_df$dummy, g$dummy)
g_df$lt5y <- grid_tmp_lt5y[rowname, "lt5y"]
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all_2 <- rbind(g_df_all_2, g_df)
}
# Let lt5y_multiplier be 0 if < cutoff_popsize, and 1 if > cutoff_popsize
g_df_all$lt5y_mult <- ifelse(g_df_all_2$lt5y < cutoff_popsize, 0, 1)
# Legend name
legend_name <- paste("Coldspot\nprevalence\n", lower_age, "-", upper_age, "m\n", "Min: ", cutoff_popsize, sep="")
# Plot
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=coverage_at_cutoff_age * lt5y_mult)) + scale_fill_gradient(name=legend_name, limits=c(0,1), low="white", high="red", na.value="green") + guides(fill=guide_colorbar(barwidth=2, barheight=10))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp + geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: map of the number of people between [0, 60] months of age
## Description: assuming that monthly age bins are uniformly distributed
## Spatial scale: all countries
getSmoothedMap_WorldPop <- function() {
# For plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoPeople_Della/GAMtoPeople_",codes$DHS_code[i],"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
# Calculate the number of people between [0, 60] months of age
rowname <- match(g_df$dummy, g$dummy)
unvacc <- grid_tmp_lt5y[rowname, "lt5y"] #+ 1 # since we show on log scale
g_df$unvacc <- unvacc
# Legend name
legend_name <- paste("Population size\n", "0-60m", sep="")
# Plot
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=unvacc)) +
scale_fill_gradientn(name=legend_name,
colours=colorRampPalette(rev(brewer.pal(11, "RdBu")))(50),
trans="log",
na.value="royalblue4", # aka those with 0 people
limits=c(1,250000),
breaks=c(1,10,100,1000,10000,100000)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
## Description: map of the number of unvaccinated people between [lower_age, upper_age] months of age
## Description: assuming that monthly age bins are uniformly distributed
## Description: the minimum lower_age here is 6 months (i.e., eligibility for the analysis)
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_Unvaccinateds <- function(lower_age, upper_age) {
# For plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoPeople_Della/GAMtoPeople_",codes$DHS_code[i],"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
# Calculate the number of unvaccinated people between [lower_age, upper_age]
rowname <- match(g_df$dummy, g$dummy)
unvacc <- (rowSums(grid_tmp_lt5y[rowname, paste0("p_unvacc_", lower_age:upper_age, "m")]) * (grid_tmp_lt5y[rowname, "lt5y"]/60)) #+ 1 # since we show on log scale
g_df$unvacc <- unvacc
# Legend name
legend_name <- paste("Unvaccinateds\n", lower_age, "-", upper_age, "m", sep="")
# Plot
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=unvacc)) +
scale_fill_gradientn(name=legend_name,
colours=colorRampPalette(rev(brewer.pal(9, "RdYlBu")))(50),
trans="log",
na.value="royalblue4", # aka those with 0 people
limits=c(1,50000),
breaks=c(1,10,100,1000,10000)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: GAM mean map at binned_survey_age, with effect of sub-national SIAs
## Description: with and without contour lines
## Sub-national SIAs: included in prediction
## Spatial scale: single country
getSmoothedMap_WithSubSIA_Mean_Single_NoContour <- function(Country, binned_survey_age) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_WithSubnationals/GAMtoMeanAndSE_WithSubnationals_", DHS_code,"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\nSub-national SIAs", sep="")
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
getSmoothedMap_WithSubSIA_Mean_Single <- function(Country, binned_survey_age) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_WithSubnationals/GAMtoMeanAndSE_WithSubnationals_", DHS_code,"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\nSub-national SIAs", sep="")
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: GAM mean map at binned_survey_age, with jittering or DHS weights
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Mean_Jitter <- function(binned_survey_age) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Jitter.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Jittered locations", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
getSmoothedMap_noSubSIA_Mean_Weights <- function(binned_survey_age) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Weights.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "With DHS weights", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: coldspots map at binned_survey_age, with jittering or DHS weights
## Description: grid cells with < cutoff_percent mean coverage at binned_survey_age
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_ColdSpots_Jitter <- function(binned_survey_age=12, cutoff_percent=0.8) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Jitter.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Find and fill the coldspots, aka cells with less than cutoff_percent coverage
g_df_all$coverage_at_cutoff_age <- plogis(g_df_all[,which_data])
tmp <- tmp + geom_polygon(data=g_df_all[which(g_df_all$coverage_at_cutoff_age < cutoff_percent),], mapping=aes(x=long, y=lat, group=group_final, fill=factor(dummy))) + scale_fill_manual(name="Coldspots (Jittered locations)", values="gray75", labels=paste0("< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m")) + theme(legend.position=c(0.2,0.2), legend.background=element_rect(fill=NA, size=0.1, linetype="solid", colour="black"))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
print(tmp)
}
getSmoothedMap_noSubSIA_ColdSpots_Weights <- function(binned_survey_age=12, cutoff_percent=0.8) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Weights.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Find and fill the coldspots, aka cells with less than cutoff_percent coverage
g_df_all$coverage_at_cutoff_age <- plogis(g_df_all[,which_data])
tmp <- tmp + geom_polygon(data=g_df_all[which(g_df_all$coverage_at_cutoff_age < cutoff_percent),], mapping=aes(x=long, y=lat, group=group_final, fill=factor(dummy))) + scale_fill_manual(name="Coldspots (With DHS weights)", values="gray75", labels=paste0("< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m")) + theme(legend.position=c(0.2,0.2), legend.background=element_rect(fill=NA, size=0.1, linetype="solid", colour="black"))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
print(tmp)
}
################################################################################
## Description: GAM mean map at binned_survey_age, incorporating urbanicity
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
## Map_types: "Urban", "Rural", "Urban-Rural"
getSmoothedMap_noSubSIA_Mean_UrbanRural <- function(binned_survey_age, map_type) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
## For a map of Urban:
if(map_type=="Urban") {
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Urban.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Urban", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
## For a map of Rural:
if(map_type=="Rural") {
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Rural.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Rural", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
## For a map of Urban-Rural:
if(map_type=="Urban-Rural") {
# Urban - Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Urban.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_urban_", i), grid_tmp_lt5y)
}
g_df_all_urban <- g_df_all
# Rural - Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Rural.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_rural_", i), grid_tmp_lt5y)
}
g_df_all_rural <- g_df_all
# Combine U and R
g_df_all <- data.frame(long=g_df_all_urban$long, lat=g_df_all_urban$lat, group_final=g_df_all_urban$group_final, to_plot=g_df_all_urban[,which_data]-g_df_all_rural[,which_data])
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Urban-Rural", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=to_plot)) +
scale_fill_gradientn(name=legend_name,
limits=c(-0.1,0.6),
breaks=seq(-0.05, 0.55, by=0.1),
colours=brewer.pal(9, "YlGnBu")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: difference in GAM mean, between model with and without the sub-national SIA covariate
## Spatial scale: single country
getSmoothedMap_noSubSIA_InModel_Minus_noSubSIA_Mean_Single <- function(Country, binned_survey_age) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Load the Della output for "noSubSIA"
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_", DHS_code,"_6-60m.RData"))
g_df_offset <- plogis(g_df[,which_data])
grid_tmp_lt5y_offset <- plogis(grid_tmp_lt5y[,which_data])
# Load the Della output for "noSubSIA_InModel"
load(paste0("./Results/GAMtoMeanAndSE_Della_NoSubnationals/GAMtoMeanAndSE_", DHS_code,"_6-60m_NoSubnationals.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Legend name
legend_name <- bquote(paste(Delta, "Mean at ", .(binned_survey_age), "m"))
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data])-g_df_offset)) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])-grid_tmp_lt5y_offset), size=0.1, breaks=seq(-0.05,0.05,by=0.005), colour="gray50") +
scale_fill_gradient2(name=legend_name,
limits=c(-0.02, 0.02),
breaks=seq(-0.02, 0.02, by=0.01)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: maps of DHS data points
MainFigure_DHS <- function() {
# Get the long/lat coordinates of points in each country
dat_DHS <- NULL
for(i in 1:nrow(codes_noRB)) {
# Call in the country-specific data from getDataByInd_Cluster
load(paste0("./Data/Manipulated_data/", as.character(codes_noRB$DHS_code[i]), ".RData"))
# If some clusters have 0 people, remove them from the plot
empty_clusters <- subset(unique(dataClust$cluster_id), !(unique(dataClust$cluster_id) %in% unique(dataInd$cluster_id)))
# Save the subsetted data
long <- dataClust$long[!(dataClust$cluster_id %in% (empty_clusters))]
lat <- dataClust$lat[!(dataClust$cluster_id %in% (empty_clusters))]
inds <- c(table(dataInd$cluster_id))
# Save the data
dat_DHS$long <- c(dat_DHS$long, long)
dat_DHS$lat <- c(dat_DHS$lat, lat)
dat_DHS$inds <- c(dat_DHS$inds, inds)
dat_DHS$country <- c(dat_DHS$country, rep(as.character(codes_noRB$Country[i]), times=length(long)))
}
# Convert to data.frame
dat_DHS <- as.data.frame(dat_DHS)
dat_DHS <- dat_DHS[complete.cases(dat_DHS),]
# Remove the (0,0) point manually
dat_DHS <- dat_DHS[-which(dat_DHS$long<1),]
dat_DHS$inds <- as.numeric(dat_DHS$inds)
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[1], "_adm/", codes_noRB$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[1], "_adm/", codes_noRB$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes_noRB)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[i], "_adm/", codes_noRB$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[i], "_adm/", codes_noRB$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Color in Burundi and Rwanda
adm0_B <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[1], "_adm/", codes_RB$ADM_code[1], "_adm0.shp"))
adm0_R <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[2], "_adm/", codes_RB$ADM_code[2], "_adm0.shp"))
adm0_RB <- rbind(adm0_B, adm0_R, makeUniqueIDs=TRUE)
# For plot
tmp <- ggplot()
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray75", fill=NA, cex=0.1)
# Color in Burundi and Rwanda
tmp <- tmp + geom_polygon(data=adm0_RB, aes(x=long, y=lat, z=group), colour="gray90", fill="gray80", cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.1)
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.1, fill="azure", alpha=1)
# Plot the (binned) DHS data points
binned <- cut(dat_DHS$inds, c(0,19,39,59))
cols <- brewer.pal(3,"Set1")
names(cols) <- levels(binned)
levels(binned) <- cols
dat_DHS$colour <- binned
tmp <- tmp + geom_point(data=dat_DHS, mapping=aes(x=long, y=lat, colour=colour), size=0.05) + scale_colour_identity("Individuals", labels=names(cols), guide="legend")
# Plot with scalebar
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
tmp <- tmp + scaleBar(lon=15, lat=-24, distanceLon=400, distanceLat=20, distanceLegend=80, dist.unit="km", orientation=FALSE, legend.size=3)
tmp
}
SubFigure_DHS <- function() {
# Get the long/lat coordinates of points in each country
dat_DHS <- NULL
for(i in 1:nrow(codes_RB)) {
# Call in the country-specific data from getDataByInd_Cluster
load(paste0("./Data/Manipulated_data/", as.character(codes_RB$DHS_code[i]), ".RData"))
# If some clusters have 0 people, remove them from the plot
empty_clusters <- subset(unique(dataClust$cluster_id), !(unique(dataClust$cluster_id) %in% unique(dataInd$cluster_id)))
# Save the subsetted data
long <- dataClust$long[!(dataClust$cluster_id %in% (empty_clusters))]
lat <- dataClust$lat[!(dataClust$cluster_id %in% (empty_clusters))]
inds <- c(table(dataInd$cluster_id))
# Save the data
dat_DHS$long <- c(dat_DHS$long, long)
dat_DHS$lat <- c(dat_DHS$lat, lat)
dat_DHS$inds <- c(dat_DHS$inds, inds)
dat_DHS$country <- c(dat_DHS$country, rep(as.character(codes_RB$Country[i]), times=length(long)))
}
# Convert to data.frame
dat_DHS <- as.data.frame(dat_DHS)
dat_DHS <- dat_DHS[complete.cases(dat_DHS),]
dat_DHS$inds <- as.numeric(dat_DHS$inds)
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[1], "_adm/", codes_RB$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[1], "_adm/", codes_RB$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes_RB)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[i], "_adm/", codes_RB$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[i], "_adm/", codes_RB$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# For plot
tmp <- ggplot()
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray75", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.1)
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.1, fill="azure", alpha=1)
# Plot the (binned) DHS data points
binned <- cut(dat_DHS$inds, c(0,19,39,59))
cols <- brewer.pal(3,"Set1")
names(cols) <- levels(binned)
levels(binned) <- cols
dat_DHS$colour <- binned
tmp <- tmp + geom_point(data=dat_DHS, mapping=aes(x=long, y=lat, colour=colour), size=0.05) + scale_colour_identity("Individuals", labels=names(cols), guide="legend")
# Plot
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
tmp
}
################################################################################ | /Code/FunctionsForMaps.R | no_license | sakitakahashi/coldspots | R | false | false | 78,765 | r | ################################################################################
## Description: GAM mean/SE map at binned_survey_age
## Description: or delta SE, the difference between SE at binned_survey_age and SE at 12 months
## Sub-national SIAs: not included in prediction
## Spatial scale: single country
## MeanOrSE: "Mean", "SE"
getSmoothedMap_noSubSIA_Single <- function(Country, binned_survey_age, MeanOrSE="Mean", deltaSEfrom12=TRUE, GreenMagentaColorScale=TRUE) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_", DHS_code,"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0(MeanOrSE, "_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# For a map of means:
if(MeanOrSE=="Mean"){
# Legend name
legend_name <- substitute(paste("Mean at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
if(GreenMagentaColorScale==TRUE) {
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=c('#8e0152','#c51b7d','#de77ae','#f1b6da','#fde0ef','#e6f5d0','#b8e186','#7fbc41','#4d9221','#276419')) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
else {
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
}
# For a map of SEs:
else if(MeanOrSE=="SE") {
if(binned_survey_age==12 | deltaSEfrom12==FALSE) {
# Legend name
legend_name <- substitute(paste("SE at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=g_df[,which_data])) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=grid_tmp_lt5y[,which_data]), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0.05,0.9),
breaks=seq(0.1,0.85,by=0.15),
colours=rev(brewer.pal(10, "Spectral"))) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
else if (deltaSEfrom12==TRUE) {
# Legend name
legend_name <- bquote(paste(Delta, "SE at ", .(binned_survey_age), "m"))
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=g_df[,which_data]-g_df[,"SE_12m"])) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=grid_tmp_lt5y[,which_data]-grid_tmp_lt5y[,"SE_12m"]), size=0.1, breaks=seq(-0.05,0.3,by=0.01), colour="gray50") +
scale_fill_gradient2(name=legend_name,
limits=c(-0.06, 0.3),
breaks=seq(-0.05, 0.3, by=0.05)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
}
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
## Description: GAM mean/SE map at binned_survey_age
## Description: or delta SE, the difference between SE at binned_survey_age and SE at 12 months
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
## MeanOrSE: "Mean", "SE"
## Note: this code is also used to make a movie
getSmoothedMap_noSubSIA <- function(binned_survey_age, MeanOrSE="Mean", deltaSEfrom12=TRUE, GreenMagentaColorScale=TRUE, movie=FALSE) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0(MeanOrSE, "_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# For a map of means:
if(MeanOrSE=="Mean"){
# Legend name
legend_name <- substitute(paste("Mean at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
if(GreenMagentaColorScale==TRUE) {
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=c('#8e0152','#c51b7d','#de77ae','#f1b6da','#fde0ef','#e6f5d0','#b8e186','#7fbc41','#4d9221','#276419')) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
else {
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
# For a map of SEs:
else if(MeanOrSE=="SE") {
if(binned_survey_age==12 | deltaSEfrom12==FALSE) {
# Legend name
legend_name <- substitute(paste("SE at ", binned_survey_age, "m"), list(binned_survey_age=binned_survey_age))
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=g_df_all[,which_data])) +
scale_fill_gradientn(name=legend_name,
limits=c(0.05,0.9),
breaks=seq(0.1,0.85,by=0.15),
colours=rev(brewer.pal(10, "Spectral"))) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=z), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
else if (deltaSEfrom12==TRUE) {
# Legend name
legend_name <- bquote(paste(Delta, "SE at ", .(binned_survey_age), "m"))
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=g_df_all[,which_data]-g_df_all[,"SE_12m"])) +
scale_fill_gradient2(name=legend_name,
limits=c(-0.06, 0.3),
breaks=seq(-0.05, 0.3, by=0.05)) +
guides(fill=guide_colorbar(barwidth=2, barheight=10))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
data_base <- get(paste0("contour_", i))[,c("long", "lat", "SE_12m")]
names(data_base) <- c("long", "lat", "z")
data$z <- data$z - data_base$z
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=z), size=0.1, breaks=seq(-0.05,0.3,by=0.01), colour="gray50")
}
}
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# If you're not plotting the movie
if(movie==FALSE) {
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
}
# If you are plotting the movie
if(movie==TRUE) {
# Plot with capitals and ticker at the bottom
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank()) +
# Horizontal bar
geom_segment(aes(x=9.9, xend=30.1, y=-30, yend=-30), size=1.3) +
# X-axis ticks
geom_segment(aes(x=9.96, xend=9.96, y=-30.5, yend=-29.91), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[12], xend=seq(10,30,length=60)[12], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[24], xend=seq(10,30,length=60)[24], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[36], xend=seq(10,30,length=60)[36], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[48], xend=seq(10,30,length=60)[48], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=30, xend=30, y=-30.5, yend=-30), size=1.3) +
# X-axis labels:seq(10,30,length=60)[c(1,12,24,36,48,60)]
geom_text(aes(label="01m", x=10, y=-31)) +
geom_text(aes(label="12m", x=seq(10,30,length=60)[12], y=-31)) +
geom_text(aes(label="24m", x=seq(10,30,length=60)[24], y=-31)) +
geom_text(aes(label="36m", x=seq(10,30,length=60)[36], y=-31)) +
geom_text(aes(label="48m", x=seq(10,30,length=60)[48], y=-31)) +
geom_text(aes(label="60m", x=30, y=-31)) +
# Color bar
geom_segment(data=LAB,aes(x=xmin,xend=xmax,y=-29.9,yend=-29),size=1.5,col="seagreen")
}
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: coldspots map
## Description: grid cells with < cutoff_percent mean coverage at binned_survey_age
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
## Note: this code is also used to make a movie
getSmoothedMap_noSubSIA_ColdSpots <- function(binned_survey_age=12, cutoff_percent=0.8, movie=FALSE) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Find and fill the coldspots, aka cells with less than cutoff_percent coverage
g_df_all$coverage_at_cutoff_age <- plogis(g_df_all[,which_data])
tmp <- tmp + geom_polygon(data=g_df_all[which(g_df_all$coverage_at_cutoff_age < cutoff_percent),], mapping=aes(x=long, y=lat, group=group_final, fill=factor(dummy))) + scale_fill_manual(name="Coldspots", values="gray75", labels=paste0("< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m")) + theme(legend.position=c(0.2,0.2), legend.background=element_rect(fill=NA, size=0.1, linetype="solid", colour="black"))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Ticker marks
WHERE <- seq(10, 30, length=60)
LAB <- data.frame(xmin=WHERE[1:binned_survey_age], xmax=WHERE[1:binned_survey_age])
# If you're not plotting the movie
if(movie==FALSE) {
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
}
# If you are plotting the movie
if(movie==TRUE) {
# Plot with capitals and ticker at the bottom
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank()) +
# Horizontal bar
geom_segment(aes(x=9.9, xend=30.1, y=-30, yend=-30), size=1.3) +
# X-axis ticks
geom_segment(aes(x=9.96, xend=9.96, y=-30.5, yend=-29.91), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[12], xend=seq(10,30,length=60)[12], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[24], xend=seq(10,30,length=60)[24], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[36], xend=seq(10,30,length=60)[36], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=seq(10,30,length=60)[48], xend=seq(10,30,length=60)[48], y=-30.5, yend=-30), size=1.3) +
geom_segment(aes(x=30, xend=30, y=-30.5, yend=-30), size=1.3) +
# X-axis labels:seq(10,30,length=60)[c(1,12,24,36,48,60)]
geom_text(aes(label="01m", x=10, y=-31)) +
geom_text(aes(label="12m", x=seq(10,30,length=60)[12], y=-31)) +
geom_text(aes(label="24m", x=seq(10,30,length=60)[24], y=-31)) +
geom_text(aes(label="36m", x=seq(10,30,length=60)[36], y=-31)) +
geom_text(aes(label="48m", x=seq(10,30,length=60)[48], y=-31)) +
geom_text(aes(label="60m", x=30, y=-31)) +
# Color bar
geom_segment(data=LAB,aes(x=xmin,xend=xmax,y=-29.9,yend=-29),size=1.5,col="seagreen")
}
print(tmp)
}
## Description: coldspots significance map
## Description: the proportion of the CDF at binned_survey_age that is < cutoff_percent
## Description: essentially, how confident are we that a cell is a coldspot?
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Coldspots_Significance <- function(binned_survey_age=12, cutoff_percent=0.8) {
# For plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Figure out the proportion of the distribution of the mean that's below cutoff_percent at binned_survey_age (on logit scale), at each cell
mean_data <- paste0("Mean_", binned_survey_age, "m")
se_data <- paste0("SE_", binned_survey_age, "m")
g_df_all$prop_below_cutoff_percent_at_cutoff_age <- pnorm(qlogis(cutoff_percent), mean=g_df_all[, mean_data], sd=g_df_all[, se_data])
# Plot the significance map
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=prop_below_cutoff_percent_at_cutoff_age)) +
scale_fill_gradientn(name=paste0("Proportion of\ndistribution of mean\n< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m"),
colours=colorRampPalette(rev(brewer.pal(9, "RdYlBu")))(50),
limits=c(0,1)) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Get an outline of where (mean) coldspots are, and add to plot
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", mean_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=0.8, colour="black")
}
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Ticker marks
WHERE <- seq(10, 30, length=60)
LAB <- data.frame(xmin=WHERE[1:binned_survey_age], xmax=WHERE[1:binned_survey_age])
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: map of the prevalence of grid cells as coldspots, between lower_age and upper_age
## Description: grid cells with < cutoff_percent mean coverage, at ages between lower_age and upper_age
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Coldspots_Prevalence <- function(lower_age, upper_age, cutoff_percent) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", lower_age:upper_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Calculate the proportion of months that each grid cell spends as a coldspot
logit_coverage_at_included_ages <- as.matrix(g_df_all[,which_data])
coverage_at_included_ages <- plogis(logit_coverage_at_included_ages)
indicator_for_coldspot <- ifelse(coverage_at_included_ages < cutoff_percent, 1, 0)
total_months <- ncol(indicator_for_coldspot)
prevalence_coldspot <- rowSums(indicator_for_coldspot)/total_months
g_df_all$coverage_at_cutoff_age <- prevalence_coldspot
# Legend name
legend_name <- paste("Coldspot prevalence\n", lower_age, "-", upper_age, "m\n", "All cells", sep="")
# Plot
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=coverage_at_cutoff_age)) + scale_fill_gradient(name=legend_name, limits=c(0,1), low="white", high="red", na.value="green") + guides(fill=guide_colorbar(barwidth=2, barheight=10))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
## Description: same as above, filtering population size of under 5 year olds (cutoff_popsize)
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Coldspots_Prevalence_Pop <- function(lower_age, upper_age, cutoff_percent, cutoff_popsize) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", lower_age:upper_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Calculate the proportion of months that each grid cell spends as a coldspot
logit_coverage_at_included_ages <- as.matrix(g_df_all[,which_data])
coverage_at_included_ages <- plogis(logit_coverage_at_included_ages)
indicator_for_coldspot <- ifelse(coverage_at_included_ages < cutoff_percent, 1, 0)
total_months <- ncol(indicator_for_coldspot)
prevalence_coldspot <- rowSums(indicator_for_coldspot)/total_months
g_df_all$coverage_at_cutoff_age <- prevalence_coldspot
# Get the number of people at each grid cell
g_df_all_2 <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoPeople_Della/GAMtoPeople_",codes$DHS_code[i],"_6-60m.RData"))
rowname <- match(g_df$dummy, g$dummy)
g_df$lt5y <- grid_tmp_lt5y[rowname, "lt5y"]
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all_2 <- rbind(g_df_all_2, g_df)
}
# Let lt5y_multiplier be 0 if < cutoff_popsize, and 1 if > cutoff_popsize
g_df_all$lt5y_mult <- ifelse(g_df_all_2$lt5y < cutoff_popsize, 0, 1)
# Legend name
legend_name <- paste("Coldspot\nprevalence\n", lower_age, "-", upper_age, "m\n", "Min: ", cutoff_popsize, sep="")
# Plot
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=coverage_at_cutoff_age * lt5y_mult)) + scale_fill_gradient(name=legend_name, limits=c(0,1), low="white", high="red", na.value="green") + guides(fill=guide_colorbar(barwidth=2, barheight=10))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp + geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: map of the number of people between [0, 60] months of age
## Description: assuming that monthly age bins are uniformly distributed
## Spatial scale: all countries
getSmoothedMap_WorldPop <- function() {
# For plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoPeople_Della/GAMtoPeople_",codes$DHS_code[i],"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
# Calculate the number of people between [0, 60] months of age
rowname <- match(g_df$dummy, g$dummy)
unvacc <- grid_tmp_lt5y[rowname, "lt5y"] #+ 1 # since we show on log scale
g_df$unvacc <- unvacc
# Legend name
legend_name <- paste("Population size\n", "0-60m", sep="")
# Plot
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=unvacc)) +
scale_fill_gradientn(name=legend_name,
colours=colorRampPalette(rev(brewer.pal(11, "RdBu")))(50),
trans="log",
na.value="royalblue4", # aka those with 0 people
limits=c(1,250000),
breaks=c(1,10,100,1000,10000,100000)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
## Description: map of the number of unvaccinated people between [lower_age, upper_age] months of age
## Description: assuming that monthly age bins are uniformly distributed
## Description: the minimum lower_age here is 6 months (i.e., eligibility for the analysis)
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_Unvaccinateds <- function(lower_age, upper_age) {
# For plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoPeople_Della/GAMtoPeople_",codes$DHS_code[i],"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
# Calculate the number of unvaccinated people between [lower_age, upper_age]
rowname <- match(g_df$dummy, g$dummy)
unvacc <- (rowSums(grid_tmp_lt5y[rowname, paste0("p_unvacc_", lower_age:upper_age, "m")]) * (grid_tmp_lt5y[rowname, "lt5y"]/60)) #+ 1 # since we show on log scale
g_df$unvacc <- unvacc
# Legend name
legend_name <- paste("Unvaccinateds\n", lower_age, "-", upper_age, "m", sep="")
# Plot
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=unvacc)) +
scale_fill_gradientn(name=legend_name,
colours=colorRampPalette(rev(brewer.pal(9, "RdYlBu")))(50),
trans="log",
na.value="royalblue4", # aka those with 0 people
limits=c(1,50000),
breaks=c(1,10,100,1000,10000)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: GAM mean map at binned_survey_age, with effect of sub-national SIAs
## Description: with and without contour lines
## Sub-national SIAs: included in prediction
## Spatial scale: single country
getSmoothedMap_WithSubSIA_Mean_Single_NoContour <- function(Country, binned_survey_age) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_WithSubnationals/GAMtoMeanAndSE_WithSubnationals_", DHS_code,"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\nSub-national SIAs", sep="")
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
getSmoothedMap_WithSubSIA_Mean_Single <- function(Country, binned_survey_age) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_WithSubnationals/GAMtoMeanAndSE_WithSubnationals_", DHS_code,"_6-60m.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\nSub-national SIAs", sep="")
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data]))) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50") +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: GAM mean map at binned_survey_age, with jittering or DHS weights
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_Mean_Jitter <- function(binned_survey_age) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Jitter.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Jittered locations", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
getSmoothedMap_noSubSIA_Mean_Weights <- function(binned_survey_age) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Weights.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "With DHS weights", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: coldspots map at binned_survey_age, with jittering or DHS weights
## Description: grid cells with < cutoff_percent mean coverage at binned_survey_age
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
getSmoothedMap_noSubSIA_ColdSpots_Jitter <- function(binned_survey_age=12, cutoff_percent=0.8) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Jitter.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Find and fill the coldspots, aka cells with less than cutoff_percent coverage
g_df_all$coverage_at_cutoff_age <- plogis(g_df_all[,which_data])
tmp <- tmp + geom_polygon(data=g_df_all[which(g_df_all$coverage_at_cutoff_age < cutoff_percent),], mapping=aes(x=long, y=lat, group=group_final, fill=factor(dummy))) + scale_fill_manual(name="Coldspots (Jittered locations)", values="gray75", labels=paste0("< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m")) + theme(legend.position=c(0.2,0.2), legend.background=element_rect(fill=NA, size=0.1, linetype="solid", colour="black"))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
print(tmp)
}
getSmoothedMap_noSubSIA_ColdSpots_Weights <- function(binned_survey_age=12, cutoff_percent=0.8) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_JitterAndWeights/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Weights.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
}
# Plot the blank grid cells
tmp <- tmp + geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final), fill=NA)
# Find and fill the coldspots, aka cells with less than cutoff_percent coverage
g_df_all$coverage_at_cutoff_age <- plogis(g_df_all[,which_data])
tmp <- tmp + geom_polygon(data=g_df_all[which(g_df_all$coverage_at_cutoff_age < cutoff_percent),], mapping=aes(x=long, y=lat, group=group_final, fill=factor(dummy))) + scale_fill_manual(name="Coldspots (With DHS weights)", values="gray75", labels=paste0("< ", cutoff_percent*100, "% at ", sprintf("%02d", binned_survey_age), "m")) + theme(legend.position=c(0.2,0.2), legend.background=element_rect(fill=NA, size=0.1, linetype="solid", colour="black"))
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray60", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
print(tmp)
}
################################################################################
## Description: GAM mean map at binned_survey_age, incorporating urbanicity
## Sub-national SIAs: not included in prediction
## Spatial scale: all countries
## Map_types: "Urban", "Rural", "Urban-Rural"
getSmoothedMap_noSubSIA_Mean_UrbanRural <- function(binned_survey_age, map_type) {
# For plot
tmp <- ggplot()
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Get the outline of all the countries
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[1], "_adm/", codes$ADM_code[1], "_adm0.shp"))
for(i in 2:nrow(codes)) {
## Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes$ADM_code[i], "_adm/", codes$ADM_code[i], "_adm0.shp"))
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
}
## For a map of Urban:
if(map_type=="Urban") {
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Urban.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Urban", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
## For a map of Rural:
if(map_type=="Rural") {
# Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Rural.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_", i), grid_tmp_lt5y)
}
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Rural", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=plogis(g_df_all[,which_data]))) +
scale_fill_gradientn(name=legend_name,
limits=c(0,1),
breaks=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
values=c(0, 0.5, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1),
colours=brewer.pal(10, "RdYlGn")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add the contour lines
for(i in 1:nrow(codes)) {
data <- get(paste0("contour_", i))[,c("long", "lat", which_data)]
names(data) <- c("long", "lat", "z")
tmp <- tmp +
geom_contour(data=data, mapping=aes(x=long, y=lat, z=plogis(z)), size=0.1, breaks=seq(0,1,by=0.05), colour="gray50")
}
}
## For a map of Urban-Rural:
if(map_type=="Urban-Rural") {
# Urban - Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Urban.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_urban_", i), grid_tmp_lt5y)
}
g_df_all_urban <- g_df_all
# Rural - Aggregate data across all countries
g_df_all <- NULL
for(i in 1:nrow(codes)) {
# Load the Della output
load(paste0("./Results/GAMtoMeanAndSE_Della_urbanrural/GAMtoMeanAndSE_",codes$DHS_code[i],"_6-60m_Rural.RData"))
# Aggregate the cells using rbind
g_df$country <- i
g_df$group_final <- paste0(g_df$country, "_", g_df$group)
g_df_all <- rbind(g_df_all, g_df)
# Create a new data frame for contour
assign(paste0("contour_rural_", i), grid_tmp_lt5y)
}
g_df_all_rural <- g_df_all
# Combine U and R
g_df_all <- data.frame(long=g_df_all_urban$long, lat=g_df_all_urban$lat, group_final=g_df_all_urban$group_final, to_plot=g_df_all_urban[,which_data]-g_df_all_rural[,which_data])
# Legend name
legend_name <- paste("Mean at ", binned_survey_age, "m\n", "Urban-Rural", sep="")
tmp <- tmp +
geom_polygon(data=g_df_all, mapping=aes(x=long, y=lat, group=group_final, fill=to_plot)) +
scale_fill_gradientn(name=legend_name,
limits=c(-0.1,0.6),
breaks=seq(-0.05, 0.55, by=0.1),
colours=brewer.pal(9, "YlGnBu")) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
}
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25)
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
# Get capitals
cg.cap <- c(-4.316667, 15.316667)
ug.cap <- c(0.313611, 32.581111)
ke.cap <- c(-1.283333, 36.816667)
rw.cap <- c(-1.943889, 30.059444)
by.cap <- c(-3.383333, 29.366667)
tz.cap <- c(-6.173056, 35.741944)
za.cap <- c(-15.416667, 28.283333)
mi.cap <- c(-13.98972222, 33.78861111)
mo.cap <- c(-25.966667, 32.583333)
zi.cap <- c(-17.863889, 31.029722)
cap <- rbind(cg.cap,ug.cap,ke.cap,rw.cap,by.cap,tz.cap,za.cap,mi.cap,mo.cap,zi.cap)
cap <- data.frame(cap)
colnames(cap) <- c("lat", "long")
# Plot with capitals
tmp <- tmp +
geom_point(data=cap, aes(long,lat), cex=1.2, fill="plum", col="gray30", pch=21) +
coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: difference in GAM mean, between model with and without the sub-national SIA covariate
## Spatial scale: single country
getSmoothedMap_noSubSIA_InModel_Minus_noSubSIA_Mean_Single <- function(Country, binned_survey_age) {
# Get the rest
i <- as.numeric(rownames(codes))[which(Country==codes$Country)]
# Placeholders
DHS_code <- codes$DHS_code[i]
DHS_name <- codes$DHS_name[i]
ADM_code <- codes$ADM_code[i]
# Which column are we plotting?
which_data <- paste0("Mean_", binned_survey_age, "m")
# Load the Della output for "noSubSIA"
load(paste0("./Results/GAMtoMeanAndSE_Della/GAMtoMeanAndSE_", DHS_code,"_6-60m.RData"))
g_df_offset <- plogis(g_df[,which_data])
grid_tmp_lt5y_offset <- plogis(grid_tmp_lt5y[,which_data])
# Load the Della output for "noSubSIA_InModel"
load(paste0("./Results/GAMtoMeanAndSE_Della_NoSubnationals/GAMtoMeanAndSE_", DHS_code,"_6-60m_NoSubnationals.RData"))
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", ADM_code, "_adm/", ADM_code, "_adm0.shp"))
# Plot
tmp <- ggplot()
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Legend name
legend_name <- bquote(paste(Delta, "Mean at ", .(binned_survey_age), "m"))
tmp <- tmp +
geom_polygon(data=g_df, mapping=aes(x=long, y=lat, group=group, fill=plogis(g_df[,which_data])-g_df_offset)) +
geom_contour(data=grid_tmp_lt5y, mapping=aes(x=long, y=lat, z=plogis(grid_tmp_lt5y[,which_data])-grid_tmp_lt5y_offset), size=0.1, breaks=seq(-0.05,0.05,by=0.005), colour="gray50") +
scale_fill_gradient2(name=legend_name,
limits=c(-0.02, 0.02),
breaks=seq(-0.02, 0.02, by=0.01)) +
geom_polygon(data=adm0, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.25) +
guides(fill=guide_colorbar(barwidth=2, barheight=20))
# Add water in that country, if applicable
water_in_country <- rgeos::gIntersection(water, adm0, drop_lower_td=TRUE)
if(!is.null(water_in_country)) {
# Plot water
tmp <- tmp + geom_polygon(data=water_in_country, aes(x=long, y=lat, group=group), colour="gray40", size=0.2, fill="azure", alpha=1)
}
# Settings
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
# Change legend tick marks to dark
g <- ggplotGrob(tmp)
g$grobs[[8]][[1]][[1]][[1]][[5]]$gp$col <- "gray50"
grid.draw(g)
}
################################################################################
## Description: maps of DHS data points
MainFigure_DHS <- function() {
# Get the long/lat coordinates of points in each country
dat_DHS <- NULL
for(i in 1:nrow(codes_noRB)) {
# Call in the country-specific data from getDataByInd_Cluster
load(paste0("./Data/Manipulated_data/", as.character(codes_noRB$DHS_code[i]), ".RData"))
# If some clusters have 0 people, remove them from the plot
empty_clusters <- subset(unique(dataClust$cluster_id), !(unique(dataClust$cluster_id) %in% unique(dataInd$cluster_id)))
# Save the subsetted data
long <- dataClust$long[!(dataClust$cluster_id %in% (empty_clusters))]
lat <- dataClust$lat[!(dataClust$cluster_id %in% (empty_clusters))]
inds <- c(table(dataInd$cluster_id))
# Save the data
dat_DHS$long <- c(dat_DHS$long, long)
dat_DHS$lat <- c(dat_DHS$lat, lat)
dat_DHS$inds <- c(dat_DHS$inds, inds)
dat_DHS$country <- c(dat_DHS$country, rep(as.character(codes_noRB$Country[i]), times=length(long)))
}
# Convert to data.frame
dat_DHS <- as.data.frame(dat_DHS)
dat_DHS <- dat_DHS[complete.cases(dat_DHS),]
# Remove the (0,0) point manually
dat_DHS <- dat_DHS[-which(dat_DHS$long<1),]
dat_DHS$inds <- as.numeric(dat_DHS$inds)
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[1], "_adm/", codes_noRB$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[1], "_adm/", codes_noRB$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes_noRB)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[i], "_adm/", codes_noRB$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes_noRB$ADM_code[i], "_adm/", codes_noRB$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# Color in Burundi and Rwanda
adm0_B <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[1], "_adm/", codes_RB$ADM_code[1], "_adm0.shp"))
adm0_R <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[2], "_adm/", codes_RB$ADM_code[2], "_adm0.shp"))
adm0_RB <- rbind(adm0_B, adm0_R, makeUniqueIDs=TRUE)
# For plot
tmp <- ggplot()
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray75", fill=NA, cex=0.1)
# Color in Burundi and Rwanda
tmp <- tmp + geom_polygon(data=adm0_RB, aes(x=long, y=lat, z=group), colour="gray90", fill="gray80", cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.1)
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.1, fill="azure", alpha=1)
# Plot the (binned) DHS data points
binned <- cut(dat_DHS$inds, c(0,19,39,59))
cols <- brewer.pal(3,"Set1")
names(cols) <- levels(binned)
levels(binned) <- cols
dat_DHS$colour <- binned
tmp <- tmp + geom_point(data=dat_DHS, mapping=aes(x=long, y=lat, colour=colour), size=0.05) + scale_colour_identity("Individuals", labels=names(cols), guide="legend")
# Plot with scalebar
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
tmp <- tmp + scaleBar(lon=15, lat=-24, distanceLon=400, distanceLat=20, distanceLegend=80, dist.unit="km", orientation=FALSE, legend.size=3)
tmp
}
SubFigure_DHS <- function() {
# Get the long/lat coordinates of points in each country
dat_DHS <- NULL
for(i in 1:nrow(codes_RB)) {
# Call in the country-specific data from getDataByInd_Cluster
load(paste0("./Data/Manipulated_data/", as.character(codes_RB$DHS_code[i]), ".RData"))
# If some clusters have 0 people, remove them from the plot
empty_clusters <- subset(unique(dataClust$cluster_id), !(unique(dataClust$cluster_id) %in% unique(dataInd$cluster_id)))
# Save the subsetted data
long <- dataClust$long[!(dataClust$cluster_id %in% (empty_clusters))]
lat <- dataClust$lat[!(dataClust$cluster_id %in% (empty_clusters))]
inds <- c(table(dataInd$cluster_id))
# Save the data
dat_DHS$long <- c(dat_DHS$long, long)
dat_DHS$lat <- c(dat_DHS$lat, lat)
dat_DHS$inds <- c(dat_DHS$inds, inds)
dat_DHS$country <- c(dat_DHS$country, rep(as.character(codes_RB$Country[i]), times=length(long)))
}
# Convert to data.frame
dat_DHS <- as.data.frame(dat_DHS)
dat_DHS <- dat_DHS[complete.cases(dat_DHS),]
dat_DHS$inds <- as.numeric(dat_DHS$inds)
# Get the outline of all the countries (adm0 and adm1)
adm0_all <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[1], "_adm/", codes_RB$ADM_code[1], "_adm0.shp"))
adm1_all <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[1], "_adm/", codes_RB$ADM_code[1], "_adm1.shp"))
for(i in 2:nrow(codes_RB)) {
# Get border lines to add
adm0 <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[i], "_adm/", codes_RB$ADM_code[i], "_adm0.shp"))
adm1 <- readShapeSpatial(paste0("./Data/ADM/", codes_RB$ADM_code[i], "_adm/", codes_RB$ADM_code[i], "_adm1.shp"))
# Stitch together
adm0_all <- rbind(adm0_all, adm0, makeUniqueIDs=TRUE)
adm1_all <- rbind(adm1_all, adm1, makeUniqueIDs=TRUE)
}
# For plot
tmp <- ggplot()
# Add the adm1 boundaries
tmp <- tmp + geom_polygon(data=adm1_all, aes(x=long, y=lat, z=group), colour="gray75", fill=NA, cex=0.1)
# Add the adm0 boundaries
tmp <- tmp + geom_polygon(data=adm0_all, aes(x=long, y=lat, z=group), colour="gray40", fill=NA, cex=0.1)
# Get water bodies
water <- readShapeSpatial("./Data/waterbodies_africa/waterbodies_africa.shp")
# Add water in all the countries
water_in_region <- rgeos::gIntersection(water, adm0_all, drop_lower_td=TRUE)
# Plot water
tmp <- tmp + geom_polygon(data=water_in_region, aes(x=long, y=lat, group=group), colour="gray40", size=0.1, fill="azure", alpha=1)
# Plot the (binned) DHS data points
binned <- cut(dat_DHS$inds, c(0,19,39,59))
cols <- brewer.pal(3,"Set1")
names(cols) <- levels(binned)
levels(binned) <- cols
dat_DHS$colour <- binned
tmp <- tmp + geom_point(data=dat_DHS, mapping=aes(x=long, y=lat, colour=colour), size=0.05) + scale_colour_identity("Individuals", labels=names(cols), guide="legend")
# Plot
tmp <- tmp + coord_fixed() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank())
tmp
}
################################################################################ |
#' @noRd
wb_default_lang <- function(lang) {
if (missing(lang)) {
env_lang <- options()$wbstats.lang
if (!is.null(env_lang)) default_lang <- env_lang
else default_lang <- wb_api_parameters$default_lang
}
else {
# here is where you would set the environ var
# do we check against available defaults?
options(wbstats.lang = lang)
message(paste("Setting default wbstats language to", lang,
"\nTo change this run wb_default_lang(lang = value).",
"The default value is 'en' (english)"))
default_lang <- wb_default_lang()
}
default_lang
}
| /R/update_cache.R | no_license | cran/wbstats | R | false | false | 626 | r | #' @noRd
wb_default_lang <- function(lang) {
if (missing(lang)) {
env_lang <- options()$wbstats.lang
if (!is.null(env_lang)) default_lang <- env_lang
else default_lang <- wb_api_parameters$default_lang
}
else {
# here is where you would set the environ var
# do we check against available defaults?
options(wbstats.lang = lang)
message(paste("Setting default wbstats language to", lang,
"\nTo change this run wb_default_lang(lang = value).",
"The default value is 'en' (english)"))
default_lang <- wb_default_lang()
}
default_lang
}
|
#' @title Converts data in table form to tidy_data
#'
#' @description Convert data in table format to tidy_data to use in the indicators of
#' the tidy_data.
#' @param path Character vector containing one or more path names.
#' @param dataset Electoral results by party. It can be a \code{data.frame} or a \code{list}.
#' @param file.name Name of the data file with extension.
#' @param nSheets Number of countries (number of sheets).'
#' 'Country' is a generic unit of analysis (province, department, etc.)
#' @param election.name Name of the variable that contains elections.
#' @param unit.name Name of the variable that contains the unit.
#' 'unit' is a generic unit of analysis (province, department, etc.)
#' @param M.name Name of the variable that contains the district magnitude
#' (M+1). It is for the calculation of endogenous and exogenous
#' electoral volatility (Torcal and Lago, 2015).
#' @param votes_nac.name la la la
#' @param allSheet By default it is \code{FALSE}. Load all the sheets that are in the
#' files selected in \code{file.name}. This argument takes precedence over \code{nSheets}.
#' @param seats By default it is \code{FALSE}. If it is \code{TRUE}, it indicates that, in addition
#' to electoral data per party, there is allocation data per seat. In this case,
#' one column must be loaded for each party with the electoral result and another
#' with the number of seats it obtained. The structure must be:
#' party_1, party_2, ..., party_n, seats_party_1, seats_party_2, ..., seats_party_n.
#'
#'
#' @return data.frame
#'
#' @author Nicolas Schmidt \email{nschmidt@cienciassociales.edu.uy}
#'
#' @examples
#' votes <- list(data.frame(country = rep("ARG", 3),
#' year = c(1995, 2000, 2005),
#' party_A = c(40,10,20),
#' party_B = c(35,20,40),
#' party_C = c(25,70,40)),
#' data.frame(country = rep("URY", 4),
#' year = c(1995, 2000, 2005, 2010),
#' party_A = c(30,30,20,20),
#' party_B = c(30,50,40, 30),
#' party_C = c(30,10,30, 25),
#' party_D = c(10,10,10,25)),
#' data.frame(country = rep("BRA", 2),
#' year = c(1993, 1998),
#' party_A = c(30, 55),
#' party_B = c(70, 45)))
#'
#' votes <- convert_esaps(dataset = votes, unit.name = "country", election.name = "year")
#'
#'
#' votes2 <- data.frame(year = c(2000, 2005),
#' country = "URY",
#' votes_party1 = c(20, 30),
#' votes_party2 = c(30, 35),
#' votes_party3 = c(40, 25),
#' votes_party4 = c(10, 10),
#' seats_party1 = c(25, 35),
#' seats_party2 = c(20, 30),
#' seats_party3 = c(40, 30),
#' seats_party4 = c(15, 5))
#'
#' votes <- convert_esaps(dataset = votes2, unit.name = "country", election.name = "year",
#' seats = TRUE)
#'
#'
#' \dontrun{
#' v1 <- convert_esaps(path = getwd(),
#' file.name = c("electionBRA.xlsx", "electionARG.xlsx"),
#' election.name = "elec",
#' unit.name = "district",
#' allSheet = TRUE)
#'
#' v2 <-convert_esaps(path = getwd(),
#' file.name = c("ARG.ods", "URY.ods", "BRA.ods"),
#' nCountry = c(2, 3, 1),
#' election.name = "elec",
#' unit.name = "province")
#'
#' v3 <- convert_esaps(path = here::here(),
#' file.name = list.files(pattern = "*.xlsx"),
#' election.name = "year",
#' unit.name = "country",
#' M.name = "magnitude",
#' seats = TRUE,
#' allSheet = TRUE)
#' }
#'
#' @export
convert_esaps<-function(path = NULL,
dataset = NULL,
file.name = NULL,
nSheets = 1,
election.name,
unit.name,
M.name = NULL,
votes_nac.name = NULL,
seats = FALSE,
allSheet = FALSE){
if (is.null(path) && is.null(dataset)){
stop("any of the arguments 'path' or 'dataset' must not be NULL.", call. = FALSE)
}
if (!is.null(path)) {
extention <- c("xlsx", "ods")
extention <- extention[c(all(grepl("xlsx$", file.name)==TRUE), all(grepl("ods$", file.name)==TRUE))]
if (length(extention) != 1L){
stop("It only accepts extension 'xlsx' or 'ods'.", call. = FALSE)
}
if(length(file.name) != length(nSheets)){
nSheets <- rep(nSheets[1], length(file.name))
}
old_setwd <- getwd()
setwd(path)
if (extention == "ods") {
if(isTRUE(allSheet)){
nSheets <- vector("numeric", length = length(file.name))
for(i in 1:length(file.name)){
nSheets[i] <- length(readODS::ods_sheets(file.name[i]))
}
}
dat <- list()
init <- 0L
for(i in 1:length(file.name)){
for (j in 1:nSheets[i]) {
dat[[j+init]] <- as.data.frame(readODS::read_ods(file.name[i], sheet = j))
}
init <- length(dat)
}
}
if (extention == "xlsx") {
if(isTRUE(allSheet)){
nSheets <- vector("numeric", length = length(file.name))
for(i in 1:length(file.name)){
nSheets[i] <- length(readxl::excel_sheets(file.name[i]))
}
}
dat <- list()
init <- 0L
for(i in 1:length(file.name)){
for (j in 1:nSheets[i]) {
dat[[j+init]] <- as.data.frame(readxl::read_excel(file.name[i], sheet = j))
}
init <- length(dat)
}
}
setwd(old_setwd)
}
if (!is.null(dataset)) {
if (is.matrix(dataset)) {stop("'dataset' must be a list or a data.frame.", call. = FALSE)}
if (is.list(dataset) && !is.data.frame(dataset)) {dat <- dataset}
if (is.data.frame(dataset)) {dat <- list(dataset)}
}
if (0L %in% sapply(dat, nrow)) {
zero <- sapply(dat, nrow) %in% 0L
if(length(zero)==sum(zero)){
stop("The datasets have 0 rows.", call. = FALSE)
}else{
dat <- dat[!zero]
}
}
if(sum(sapply(dat, function(x) c(unit.name, election.name) %in% names(x))) == 0){
stop("'election.name' and 'unit.name' must be the same on all sheets.", call. = FALSE)
}
vector1 <- numeric()
vector2 <- numeric()
for (i in 1:length(dat)) {
vector1[i] <- which(colnames(dat[[i]]) == election.name)
colnames(dat[[i]])[vector1[i]] <- "election"
vector2[i] <- which(colnames(dat[[i]]) == unit.name)
colnames(dat[[i]])[vector2[i]] <- "unit"
}
dat2 <- list()
dat3 <- list()
out <- list()
for (i in 1:length(dat)) {
dat2[[i]] <- dat[[i]][ c(vector1[i], vector2[i])]
dat3[[i]] <- dat[[i]][-c(vector1[i], vector2[i])]
out[[i]] <- cbind(dat2[[i]], dat3[[i]])
out[[i]]$unit <- as.character(out[[i]]$unit)
}
if (!is.null(M.name)) {
if(!all(sapply(out, function(x) M.name %in% names(x)))){stop("'M.name' must be the same on all sheets.", call. = FALSE)}
m <- numeric()
data2 <- list()
for (i in 1:length(out)) {
m[i] <- which(names(out[[i]]) == M.name)
data2[[i]] <- out[[i]][,m[i]]
out[[i]] <- out[[i]][,-m[i]]
out[[i]] <- cbind(out[[i]][1:2], M = data2[[i]], out[[i]][3:ncol(out[[i]])])
}
}
if (!is.null(votes_nac.name)) {
if(isTRUE(seats) || !is.null(M.name)){stop("if 'votes_nac.name' are different from NULL, 'M.name' or 'seats' must have the default values.", call. = FALSE)}
out0<- lapply(out, function(x){tidyr::gather(x[which(x[,2]==votes_nac.name),-2], "party", "votes_nac", -"election")})
out <- lapply(out, function(x){tidyr::gather(x[-which(x[,2]==votes_nac.name),], "party", "votes", -"election", -"unit")})
output <- list()
for(i in 1:length(out)){output[[i]] <- dplyr::inner_join(out[[i]],out0[[i]] , by=c("election", "party"))}
pull <- do.call(rbind, output)
rownames(pull) <- NULL
return(pull)
}
variables <- c(election.name, unit.name, M.name) ## aca no va votes_nac_name???
var <- length(variables)
if(var > 2){variables <- c("election", "unit", "M")}else{variables <- c("election", "unit")}
if(isTRUE(seats)){
larS <- length(out)
c_seats <- as.logical((sapply(out, ncol)-var) %% 2)
if(length(which(c_seats==TRUE)) > 0){out <- out[-c(which(c_seats==TRUE))]}
if(length(out)!=larS && length(out) != 0){warning("The database was deleted: ", paste(which(c_seats==TRUE), collapse = ", "), " ...must have the same number of columns of parties and seats", call. = FALSE)}
if(length(out)==0L){stop("The structure of the data is not correct.", call. = FALSE)}
out3 <- lapply(out, function(x){x[,c(1:2,(((ncol(x)-var)/2)+(var+1)):ncol(x))]})
out4 <- lapply(out, function(x){x[,-c((((ncol(x)-var)/2) + (var+1)): ncol(x))]})
out3 <- lapply(out3, function(x){tidyr::gather(x, "party", "seats", -"election", -"unit")})
out4 <- lapply(out4, function(x){tidyr::gather(x, "party", "votes", -variables)})
for(i in 1:length(out4)){out4[[i]]<-cbind(out4[[i]], seats = out3[[i]]$seats)}
pull <- do.call(rbind, out4)
}else{
out4 <- lapply(out, function(x){tidyr::gather(x, "party", "votes", -variables)})
pull <- do.call(rbind.data.frame, out4)
}
if(length(unique(as.character(pull$unit)))>1){
pull <- pull[order(pull$unit), ]
}else{
pull <- pull[order(pull$election), ]
}
rownames(pull) <- NULL
return(pull)
}
| /R/convert_esaps.R | no_license | Nicolas-Schmidt/esaps | R | false | false | 11,261 | r | #' @title Converts data in table form to tidy_data
#'
#' @description Convert data in table format to tidy_data to use in the indicators of
#' the tidy_data.
#' @param path Character vector containing one or more path names.
#' @param dataset Electoral results by party. It can be a \code{data.frame} or a \code{list}.
#' @param file.name Name of the data file with extension.
#' @param nSheets Number of countries (number of sheets).'
#' 'Country' is a generic unit of analysis (province, department, etc.)
#' @param election.name Name of the variable that contains elections.
#' @param unit.name Name of the variable that contains the unit.
#' 'unit' is a generic unit of analysis (province, department, etc.)
#' @param M.name Name of the variable that contains the district magnitude
#' (M+1). It is for the calculation of endogenous and exogenous
#' electoral volatility (Torcal and Lago, 2015).
#' @param votes_nac.name la la la
#' @param allSheet By default it is \code{FALSE}. Load all the sheets that are in the
#' files selected in \code{file.name}. This argument takes precedence over \code{nSheets}.
#' @param seats By default it is \code{FALSE}. If it is \code{TRUE}, it indicates that, in addition
#' to electoral data per party, there is allocation data per seat. In this case,
#' one column must be loaded for each party with the electoral result and another
#' with the number of seats it obtained. The structure must be:
#' party_1, party_2, ..., party_n, seats_party_1, seats_party_2, ..., seats_party_n.
#'
#'
#' @return data.frame
#'
#' @author Nicolas Schmidt \email{nschmidt@cienciassociales.edu.uy}
#'
#' @examples
#' votes <- list(data.frame(country = rep("ARG", 3),
#' year = c(1995, 2000, 2005),
#' party_A = c(40,10,20),
#' party_B = c(35,20,40),
#' party_C = c(25,70,40)),
#' data.frame(country = rep("URY", 4),
#' year = c(1995, 2000, 2005, 2010),
#' party_A = c(30,30,20,20),
#' party_B = c(30,50,40, 30),
#' party_C = c(30,10,30, 25),
#' party_D = c(10,10,10,25)),
#' data.frame(country = rep("BRA", 2),
#' year = c(1993, 1998),
#' party_A = c(30, 55),
#' party_B = c(70, 45)))
#'
#' votes <- convert_esaps(dataset = votes, unit.name = "country", election.name = "year")
#'
#'
#' votes2 <- data.frame(year = c(2000, 2005),
#' country = "URY",
#' votes_party1 = c(20, 30),
#' votes_party2 = c(30, 35),
#' votes_party3 = c(40, 25),
#' votes_party4 = c(10, 10),
#' seats_party1 = c(25, 35),
#' seats_party2 = c(20, 30),
#' seats_party3 = c(40, 30),
#' seats_party4 = c(15, 5))
#'
#' votes <- convert_esaps(dataset = votes2, unit.name = "country", election.name = "year",
#' seats = TRUE)
#'
#'
#' \dontrun{
#' v1 <- convert_esaps(path = getwd(),
#' file.name = c("electionBRA.xlsx", "electionARG.xlsx"),
#' election.name = "elec",
#' unit.name = "district",
#' allSheet = TRUE)
#'
#' v2 <-convert_esaps(path = getwd(),
#' file.name = c("ARG.ods", "URY.ods", "BRA.ods"),
#' nCountry = c(2, 3, 1),
#' election.name = "elec",
#' unit.name = "province")
#'
#' v3 <- convert_esaps(path = here::here(),
#' file.name = list.files(pattern = "*.xlsx"),
#' election.name = "year",
#' unit.name = "country",
#' M.name = "magnitude",
#' seats = TRUE,
#' allSheet = TRUE)
#' }
#'
#' @export
convert_esaps<-function(path = NULL,
dataset = NULL,
file.name = NULL,
nSheets = 1,
election.name,
unit.name,
M.name = NULL,
votes_nac.name = NULL,
seats = FALSE,
allSheet = FALSE){
if (is.null(path) && is.null(dataset)){
stop("any of the arguments 'path' or 'dataset' must not be NULL.", call. = FALSE)
}
if (!is.null(path)) {
extention <- c("xlsx", "ods")
extention <- extention[c(all(grepl("xlsx$", file.name)==TRUE), all(grepl("ods$", file.name)==TRUE))]
if (length(extention) != 1L){
stop("It only accepts extension 'xlsx' or 'ods'.", call. = FALSE)
}
if(length(file.name) != length(nSheets)){
nSheets <- rep(nSheets[1], length(file.name))
}
old_setwd <- getwd()
setwd(path)
if (extention == "ods") {
if(isTRUE(allSheet)){
nSheets <- vector("numeric", length = length(file.name))
for(i in 1:length(file.name)){
nSheets[i] <- length(readODS::ods_sheets(file.name[i]))
}
}
dat <- list()
init <- 0L
for(i in 1:length(file.name)){
for (j in 1:nSheets[i]) {
dat[[j+init]] <- as.data.frame(readODS::read_ods(file.name[i], sheet = j))
}
init <- length(dat)
}
}
if (extention == "xlsx") {
if(isTRUE(allSheet)){
nSheets <- vector("numeric", length = length(file.name))
for(i in 1:length(file.name)){
nSheets[i] <- length(readxl::excel_sheets(file.name[i]))
}
}
dat <- list()
init <- 0L
for(i in 1:length(file.name)){
for (j in 1:nSheets[i]) {
dat[[j+init]] <- as.data.frame(readxl::read_excel(file.name[i], sheet = j))
}
init <- length(dat)
}
}
setwd(old_setwd)
}
if (!is.null(dataset)) {
if (is.matrix(dataset)) {stop("'dataset' must be a list or a data.frame.", call. = FALSE)}
if (is.list(dataset) && !is.data.frame(dataset)) {dat <- dataset}
if (is.data.frame(dataset)) {dat <- list(dataset)}
}
if (0L %in% sapply(dat, nrow)) {
zero <- sapply(dat, nrow) %in% 0L
if(length(zero)==sum(zero)){
stop("The datasets have 0 rows.", call. = FALSE)
}else{
dat <- dat[!zero]
}
}
if(sum(sapply(dat, function(x) c(unit.name, election.name) %in% names(x))) == 0){
stop("'election.name' and 'unit.name' must be the same on all sheets.", call. = FALSE)
}
vector1 <- numeric()
vector2 <- numeric()
for (i in 1:length(dat)) {
vector1[i] <- which(colnames(dat[[i]]) == election.name)
colnames(dat[[i]])[vector1[i]] <- "election"
vector2[i] <- which(colnames(dat[[i]]) == unit.name)
colnames(dat[[i]])[vector2[i]] <- "unit"
}
dat2 <- list()
dat3 <- list()
out <- list()
for (i in 1:length(dat)) {
dat2[[i]] <- dat[[i]][ c(vector1[i], vector2[i])]
dat3[[i]] <- dat[[i]][-c(vector1[i], vector2[i])]
out[[i]] <- cbind(dat2[[i]], dat3[[i]])
out[[i]]$unit <- as.character(out[[i]]$unit)
}
if (!is.null(M.name)) {
if(!all(sapply(out, function(x) M.name %in% names(x)))){stop("'M.name' must be the same on all sheets.", call. = FALSE)}
m <- numeric()
data2 <- list()
for (i in 1:length(out)) {
m[i] <- which(names(out[[i]]) == M.name)
data2[[i]] <- out[[i]][,m[i]]
out[[i]] <- out[[i]][,-m[i]]
out[[i]] <- cbind(out[[i]][1:2], M = data2[[i]], out[[i]][3:ncol(out[[i]])])
}
}
if (!is.null(votes_nac.name)) {
if(isTRUE(seats) || !is.null(M.name)){stop("if 'votes_nac.name' are different from NULL, 'M.name' or 'seats' must have the default values.", call. = FALSE)}
out0<- lapply(out, function(x){tidyr::gather(x[which(x[,2]==votes_nac.name),-2], "party", "votes_nac", -"election")})
out <- lapply(out, function(x){tidyr::gather(x[-which(x[,2]==votes_nac.name),], "party", "votes", -"election", -"unit")})
output <- list()
for(i in 1:length(out)){output[[i]] <- dplyr::inner_join(out[[i]],out0[[i]] , by=c("election", "party"))}
pull <- do.call(rbind, output)
rownames(pull) <- NULL
return(pull)
}
variables <- c(election.name, unit.name, M.name) ## aca no va votes_nac_name???
var <- length(variables)
if(var > 2){variables <- c("election", "unit", "M")}else{variables <- c("election", "unit")}
if(isTRUE(seats)){
larS <- length(out)
c_seats <- as.logical((sapply(out, ncol)-var) %% 2)
if(length(which(c_seats==TRUE)) > 0){out <- out[-c(which(c_seats==TRUE))]}
if(length(out)!=larS && length(out) != 0){warning("The database was deleted: ", paste(which(c_seats==TRUE), collapse = ", "), " ...must have the same number of columns of parties and seats", call. = FALSE)}
if(length(out)==0L){stop("The structure of the data is not correct.", call. = FALSE)}
out3 <- lapply(out, function(x){x[,c(1:2,(((ncol(x)-var)/2)+(var+1)):ncol(x))]})
out4 <- lapply(out, function(x){x[,-c((((ncol(x)-var)/2) + (var+1)): ncol(x))]})
out3 <- lapply(out3, function(x){tidyr::gather(x, "party", "seats", -"election", -"unit")})
out4 <- lapply(out4, function(x){tidyr::gather(x, "party", "votes", -variables)})
for(i in 1:length(out4)){out4[[i]]<-cbind(out4[[i]], seats = out3[[i]]$seats)}
pull <- do.call(rbind, out4)
}else{
out4 <- lapply(out, function(x){tidyr::gather(x, "party", "votes", -variables)})
pull <- do.call(rbind.data.frame, out4)
}
if(length(unique(as.character(pull$unit)))>1){
pull <- pull[order(pull$unit), ]
}else{
pull <- pull[order(pull$election), ]
}
rownames(pull) <- NULL
return(pull)
}
|
data(mtcars)
table(mtcars$am)
trans <- as.data.frame(table(mtcars$am))
trans
SamProp <- trans[1,2]/sum(trans$Freq)
PopProp <- 0.4
n <- sum(trans$Freq)
z <- (SamProp - PopProp) / (sqrt(PopProp*(1-PopProp))/n)
z
((19/32)-0.4)/sqrt((0.4*(1-0.4))/32)
SamProp
19/32
z <- (SamProp - PopProp) / sqrt((PopProp*(1-PopProp))/n)
z
qnorm(1-(0.5/2))
qnorm(1-(0.05/2))
pvalue <- 2 * pnorm(z,FALSE)
pvalue
z
pnorm(z)
prop.test(trans[1,2],sum(trans$Freq),0.4,two.sided,0.95,TRUE)
prop.test(trans[1,2],sum(trans$Freq),0.4,"two.sided",0.95,TRUE)
pvalue <- 2 * pnorm(z,lower.tail=FALSE)
pvalue
1-pnorm(z)
2 * (1-pnorm(z))
prop.test(trans[1,2],sum(trans$Freq),0.4,"two.sided",0.95,FALSE)
savehistory("E:/kamagyana/Computing/DARET/Submissions/Asst9.2.r")
| /Asst9.2.r | no_license | vasanthi72/DARET_Assignment9.2 | R | false | false | 734 | r | data(mtcars)
table(mtcars$am)
trans <- as.data.frame(table(mtcars$am))
trans
SamProp <- trans[1,2]/sum(trans$Freq)
PopProp <- 0.4
n <- sum(trans$Freq)
z <- (SamProp - PopProp) / (sqrt(PopProp*(1-PopProp))/n)
z
((19/32)-0.4)/sqrt((0.4*(1-0.4))/32)
SamProp
19/32
z <- (SamProp - PopProp) / sqrt((PopProp*(1-PopProp))/n)
z
qnorm(1-(0.5/2))
qnorm(1-(0.05/2))
pvalue <- 2 * pnorm(z,FALSE)
pvalue
z
pnorm(z)
prop.test(trans[1,2],sum(trans$Freq),0.4,two.sided,0.95,TRUE)
prop.test(trans[1,2],sum(trans$Freq),0.4,"two.sided",0.95,TRUE)
pvalue <- 2 * pnorm(z,lower.tail=FALSE)
pvalue
1-pnorm(z)
2 * (1-pnorm(z))
prop.test(trans[1,2],sum(trans$Freq),0.4,"two.sided",0.95,FALSE)
savehistory("E:/kamagyana/Computing/DARET/Submissions/Asst9.2.r")
|
#'@title histogram of data
#'
#'@description Get a histogram, showing the mean and median of your data
#'@author Oscar Ramirez Alan (\email{osoramirez@@gmail.com}).
#'@param x is a numeric value, could be a a vector or data.frame
#'@export plothist
#'@keywords plothist
#'@return a summary
#'@export plothist
#'@import "graphics","stats"
#'@examples
#'x<-rnorm(25,2,3)
#'plothist(x)
#'
#'
plothist <- function(x) {
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
par(mfrow=c(1,1))
xlab <- deparse(substitute(x)) # get the expression passed as y
hist(x, main=paste("Histogram of", xlab), col = "#EBEBEB", xlab = "", cex=.9)
abline(v = mean(x), lwd = 3, col="red")
abline(v = median(x), col = "#3F689E", lwd = 2.5,lty=2)
legend(x = "topright", pch = 1, title = "Legend",box.lty=0,cex=.7,bg="transparent",
c("Mean", "Median"),
col = c( "red", "#3F689E"),
lwd = c(2, 2), lty=c(1,2))
Max <- round(max(x), 3)
Stdev <- round(sd(x, na.rm = TRUE), 3)
Mean <- round(mean(x, na.rm = TRUE), 3)
Median <- round(median(x, na.rm = TRUE), 3)
n <- length(x)
cat(paste("This option performs a histogram that show "))
{
cat(paste("your data has mean of ", Mean, " and ", Stdev, " of standar desviation. "))
}
{
cat(paste("Your median is ", Median, " in ", n, " samples data. "))
}
cat(paste("Check all your statistical data, using function: resume2data."))
}
| /R/plothist.R | no_license | osoramirez/resumeRdesc | R | false | false | 1,413 | r | #'@title histogram of data
#'
#'@description Get a histogram, showing the mean and median of your data
#'@author Oscar Ramirez Alan (\email{osoramirez@@gmail.com}).
#'@param x is a numeric value, could be a a vector or data.frame
#'@export plothist
#'@keywords plothist
#'@return a summary
#'@export plothist
#'@import "graphics","stats"
#'@examples
#'x<-rnorm(25,2,3)
#'plothist(x)
#'
#'
plothist <- function(x) {
opar <- par(no.readonly = TRUE)
on.exit(par(opar))
par(mfrow=c(1,1))
xlab <- deparse(substitute(x)) # get the expression passed as y
hist(x, main=paste("Histogram of", xlab), col = "#EBEBEB", xlab = "", cex=.9)
abline(v = mean(x), lwd = 3, col="red")
abline(v = median(x), col = "#3F689E", lwd = 2.5,lty=2)
legend(x = "topright", pch = 1, title = "Legend",box.lty=0,cex=.7,bg="transparent",
c("Mean", "Median"),
col = c( "red", "#3F689E"),
lwd = c(2, 2), lty=c(1,2))
Max <- round(max(x), 3)
Stdev <- round(sd(x, na.rm = TRUE), 3)
Mean <- round(mean(x, na.rm = TRUE), 3)
Median <- round(median(x, na.rm = TRUE), 3)
n <- length(x)
cat(paste("This option performs a histogram that show "))
{
cat(paste("your data has mean of ", Mean, " and ", Stdev, " of standar desviation. "))
}
{
cat(paste("Your median is ", Median, " in ", n, " samples data. "))
}
cat(paste("Check all your statistical data, using function: resume2data."))
}
|
# airontime_sql_db.R
library(DBI)
library(odbc)
library(dplyr)
library(data.table)
library(lubridate)
library(ggplot2)
library(dbplot)
library(tidypredict)
source('config_aot.R')
# # Get `nycflights13` data tables ----
# # install.packages('nycflights13')
airlines13 <- nycflights13::airlines
airports13 <- nycflights13::airports
flights13 <- nycflights13::flights
planes13 <- nycflights13::planes
weather13 <- nycflights13::weather
connect_to_db <- TRUE
# Connect to Azure SQL server ----
sort(unique(odbc::odbcListDrivers()[[1]]))
if (connect_to_db) {
if ("ODBC Driver 17 for SQL Server" %in%
sort(unique(odbc::odbcListDrivers()[[1]]))) {
con <- DBI::dbConnect(odbc::odbc(),
.connection_string = con_string_aot_sql17)
}
else if ("ODBC Driver 13 for SQL Server" %in%
sort(unique(odbc::odbcListDrivers()[[1]]))) {
con <- DBI::dbConnect(odbc::odbc(),
.connection_string = con_string_aot_sql13)
}
}
DBI::dbListTables(con)[1:10]
DBI::dbListFields(con, 'aot_data')
# Single file name retrieval by year
# aot_1988_files <- list.files('~/Downloads/AirOnTimeCSV/',
# pattern = '^airOT1988.*.csv$')
# Batch file name retrieval by year
aot_files <- lapply(as.character(1988:2012),
FUN = function(x) {
list.files('~/Downloads/AirOnTimeCSV/',
pattern = paste0('^airOT', x, '.*.csv$'))
})
aot_files
start_year <- 1988
aot_files[[1988-start_year+1]]
aot_files[[2012-start_year+1]]
# READ DATA BY YEAR, PUSH TO SQL SERVER ----
aot_data_by_year <- function(year) {
start_year = 1988
rbindlist(
lapply(paste0('~/Downloads/AirOnTimeCSV/',
aot_files[[year-start_year+1]]),
FUN = fread,
sep = ',')
)
}
print_size <- function(x) {
print(object.size(x), units = 'Mb')
}
# # _ 20XX ----
# aot_data_20XX <- aot_data_by_year(20XX)
# str(aot_data_20XX)
# print_size(aot_data_20XX)
# _ 2012 ----
aot_data_2012 <- aot_data_by_year(2012)
str(aot_data_2012)
print_size(aot_data_2012)
# Write aot_data_2012 to 'aot' SQL database as 'aot_2012' table
DBI::dbWriteTable(con, name = 'aot_2012', value = aot_data_2012)
rm(aot_data_2012)
# _ 2011 ----
aot_data_2011 <- aot_data_by_year(2011)
str(aot_data_2011)
print_size(aot_data_2011)
# Append aot_data_2011 to 'aot_2012' SQL table (written above)
DBI::dbWriteTable(con, name = 'aot_2012', value = aot_data_2011, append = TRUE)
rm(aot_data_2011)
# Rename the table from 'aot_2012' to 'aot_data'
DBI::dbGetQuery(con, 'EXEC sp_rename aot_2012, aot_data')
DBI::dbListTables(con)[1:10]
# _ 2010 ----
aot_data_2010 <- aot_data_by_year(2010)
str(aot_data_2010)
print_size(aot_data_2010)
# Append aot_data_2011 to 'aot_data' SQL table (written above)
DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2010, append = TRUE)
# _ 2009 ----
aot_data_2009 <- aot_data_by_year(2009)
str(aot_data_2009)
print_size(aot_data_2009)
# DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2009, append = TRUE)
# _ 2008 ----
aot_data_2008 <- aot_data_by_year(2008)
str(aot_data_2008)
print_size(aot_data_2008)
# DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2008, append = TRUE)
# _ 2007 ----
aot_data_2007 <- aot_data_by_year(2007)
str(aot_data_2007)
print_size(aot_data_2007)
# DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2007, append = TRUE)
# ...
# ...
# ...
# _ 1990 ----
aot_data_1990 <- aot_data_by_year(1990)
str(aot_data_1990)
print_size(aot_data_1990)
# DBI::dbWriteTable(con, name = 'aot_1990', value = aot_data_1990)
# _ 1989 ----
aot_data_1989 <- aot_data_by_year(1989)
str(aot_data_1989)
print_size(aot_data_1989)
# DBI::dbWriteTable(con, name = 'aot_1989', value = aot_data_1989)
# _ 1988 ----
aot_data_1988 <- aot_data_by_year(1988)
str(aot_data_1988)
print_size(aot_data_1988)
# DBI::dbWriteTable(con, name = 'aot_1988', value = aot_data_1988)
# dplyr summaries
aot_data_db <- tbl(con, 'aot_data')
aot_data_db %>%
summarize(n = n())
dbGetQuery(con, 'SELECT COUNT(*) FROM aot_data;')
year_carr_n <- aot_data_db %>%
group_by(YEAR, UNIQUE_CARRIER) %>%
summarize(n = n()) %>%
collect()
targ_airlines <- c('AA', 'DL', 'UA', 'US', 'WN')
year_carr_n %>%
left_join(airlines13, by = c('UNIQUE_CARRIER' = 'carrier')) %>%
filter(UNIQUE_CARRIER %in% targ_airlines) %>%
ggplot(aes(x = YEAR, y = n, col = name)) +
geom_line()
y_m_d_carr_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, UNIQUE_CARRIER) %>%
summarize(n = n()) %>%
collect()
ymd_carr_n <- y_m_d_carr_n %>%
ungroup() %>%
mutate(date = as.Date(paste0(YEAR, '-', MONTH, '-', DAY_OF_MONTH))) %>%
select(UNIQUE_CARRIER, date, n)
ymd_carr_n %>%
left_join(airlines13, by = c('UNIQUE_CARRIER' = 'carrier')) %>%
filter(UNIQUE_CARRIER %in% targ_airlines) %>%
ggplot(aes(x = date, y = n, col = name)) +
geom_line()
# Y_M_D_ORIG
y_m_d_orig_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, ORIGIN) %>%
summarize(n = n()) %>%
collect()
# YMD_ORIG
ymd_orig_n <- y_m_d_orig_n %>%
ungroup() %>%
mutate(date = as.Date(paste0(YEAR, '-', MONTH, '-', DAY_OF_MONTH))) %>%
select(ORIGIN, date, n)
targ_airports <- ymd_orig_n %>%
group_by(ORIGIN) %>%
summarize(sum_n = sum(n)) %>%
arrange(desc(sum_n)) %>%
head(5) %>%
pull(ORIGIN)
ymd_orig_n %>%
filter(ORIGIN %in% targ_airports) %>%
left_join(airports13, by = c('ORIGIN' = 'faa')) %>%
ggplot(aes(x = date, y = n, col = name)) +
geom_line()
# YMD_W_ORIG
ymd_w_orig_n <- y_m_d_orig_n %>%
ungroup() %>%
mutate(date = as.Date(paste0(YEAR, '-', MONTH, '-', DAY_OF_MONTH)),
cum_week = (interval(min(date), date) %/% weeks(1)) + 1) %>%
select(ORIGIN, cum_week, n) %>%
group_by(ORIGIN, cum_week) %>%
summarize(cum_week_n = sum(n))
max_week <- max(ymd_w_orig_n$cum_week)
ymd_w_orig_n %>%
filter(ORIGIN %in% targ_airports) %>%
left_join(airports13, by = c('ORIGIN' = 'faa')) %>%
ggplot(aes(x = cum_week, y = cum_week_n, col = name)) +
geom_line() +
scale_x_continuous(breaks = seq(1, max_week, by = 52),
limits = c(1,max_week-1))
y_m_d_dest_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, DEST) %>%
summarize(n = n()) %>%
collect()
y_m_d_orig_dest_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, ORIGIN, DEST) %>%
summarize(n = n()) %>%
collect()
# # TEST ROW BINDS ----
#
# # Bind 1988-1990
# aot_data_1988__1990 <- rbind(aot_data_1988, aot_data_1989, aot_data_1990)
# class(aot_data_1988__1990)
# str(aot_data_1988__1990)
# print(object.size(aot_data_1988__1990), units = 'Gb')
# rm(aot_data_1988__1990)
# rm(aot_data_1988); rm(aot_data_1989); rm(aot_data_1990)
#
# # Bind 1988, 2011, 2012
# aot_data_1988_2011_2012 <- rbind(aot_data_1988, aot_data_2011, aot_data_2012)
# str(aot_data_1988_2011_2012)
# print_size(aot_data_1988_2011_2012)
#
# # Bind 2009-2012
# aot_data_2009__2012 <- rbind(# aot_data_2009,
# aot_data_2010,
# aot_data_2011,
# aot_data_2012)
# dbGetQuery(con, 'DROP TABLE aot_1988')
# Disconnect the database connection
dbDisconnect(con)
| /airontime_sql_db.R | no_license | ldnicolasmay/r_db_sandbox | R | false | false | 7,301 | r | # airontime_sql_db.R
library(DBI)
library(odbc)
library(dplyr)
library(data.table)
library(lubridate)
library(ggplot2)
library(dbplot)
library(tidypredict)
source('config_aot.R')
# # Get `nycflights13` data tables ----
# # install.packages('nycflights13')
airlines13 <- nycflights13::airlines
airports13 <- nycflights13::airports
flights13 <- nycflights13::flights
planes13 <- nycflights13::planes
weather13 <- nycflights13::weather
connect_to_db <- TRUE
# Connect to Azure SQL server ----
sort(unique(odbc::odbcListDrivers()[[1]]))
if (connect_to_db) {
if ("ODBC Driver 17 for SQL Server" %in%
sort(unique(odbc::odbcListDrivers()[[1]]))) {
con <- DBI::dbConnect(odbc::odbc(),
.connection_string = con_string_aot_sql17)
}
else if ("ODBC Driver 13 for SQL Server" %in%
sort(unique(odbc::odbcListDrivers()[[1]]))) {
con <- DBI::dbConnect(odbc::odbc(),
.connection_string = con_string_aot_sql13)
}
}
DBI::dbListTables(con)[1:10]
DBI::dbListFields(con, 'aot_data')
# Single file name retrieval by year
# aot_1988_files <- list.files('~/Downloads/AirOnTimeCSV/',
# pattern = '^airOT1988.*.csv$')
# Batch file name retrieval by year
aot_files <- lapply(as.character(1988:2012),
FUN = function(x) {
list.files('~/Downloads/AirOnTimeCSV/',
pattern = paste0('^airOT', x, '.*.csv$'))
})
aot_files
start_year <- 1988
aot_files[[1988-start_year+1]]
aot_files[[2012-start_year+1]]
# READ DATA BY YEAR, PUSH TO SQL SERVER ----
aot_data_by_year <- function(year) {
start_year = 1988
rbindlist(
lapply(paste0('~/Downloads/AirOnTimeCSV/',
aot_files[[year-start_year+1]]),
FUN = fread,
sep = ',')
)
}
print_size <- function(x) {
print(object.size(x), units = 'Mb')
}
# # _ 20XX ----
# aot_data_20XX <- aot_data_by_year(20XX)
# str(aot_data_20XX)
# print_size(aot_data_20XX)
# _ 2012 ----
aot_data_2012 <- aot_data_by_year(2012)
str(aot_data_2012)
print_size(aot_data_2012)
# Write aot_data_2012 to 'aot' SQL database as 'aot_2012' table
DBI::dbWriteTable(con, name = 'aot_2012', value = aot_data_2012)
rm(aot_data_2012)
# _ 2011 ----
aot_data_2011 <- aot_data_by_year(2011)
str(aot_data_2011)
print_size(aot_data_2011)
# Append aot_data_2011 to 'aot_2012' SQL table (written above)
DBI::dbWriteTable(con, name = 'aot_2012', value = aot_data_2011, append = TRUE)
rm(aot_data_2011)
# Rename the table from 'aot_2012' to 'aot_data'
DBI::dbGetQuery(con, 'EXEC sp_rename aot_2012, aot_data')
DBI::dbListTables(con)[1:10]
# _ 2010 ----
aot_data_2010 <- aot_data_by_year(2010)
str(aot_data_2010)
print_size(aot_data_2010)
# Append aot_data_2011 to 'aot_data' SQL table (written above)
DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2010, append = TRUE)
# _ 2009 ----
aot_data_2009 <- aot_data_by_year(2009)
str(aot_data_2009)
print_size(aot_data_2009)
# DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2009, append = TRUE)
# _ 2008 ----
aot_data_2008 <- aot_data_by_year(2008)
str(aot_data_2008)
print_size(aot_data_2008)
# DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2008, append = TRUE)
# _ 2007 ----
aot_data_2007 <- aot_data_by_year(2007)
str(aot_data_2007)
print_size(aot_data_2007)
# DBI::dbWriteTable(con, name = 'aot_data', value = aot_data_2007, append = TRUE)
# ...
# ...
# ...
# _ 1990 ----
aot_data_1990 <- aot_data_by_year(1990)
str(aot_data_1990)
print_size(aot_data_1990)
# DBI::dbWriteTable(con, name = 'aot_1990', value = aot_data_1990)
# _ 1989 ----
aot_data_1989 <- aot_data_by_year(1989)
str(aot_data_1989)
print_size(aot_data_1989)
# DBI::dbWriteTable(con, name = 'aot_1989', value = aot_data_1989)
# _ 1988 ----
aot_data_1988 <- aot_data_by_year(1988)
str(aot_data_1988)
print_size(aot_data_1988)
# DBI::dbWriteTable(con, name = 'aot_1988', value = aot_data_1988)
# dplyr summaries
aot_data_db <- tbl(con, 'aot_data')
aot_data_db %>%
summarize(n = n())
dbGetQuery(con, 'SELECT COUNT(*) FROM aot_data;')
year_carr_n <- aot_data_db %>%
group_by(YEAR, UNIQUE_CARRIER) %>%
summarize(n = n()) %>%
collect()
targ_airlines <- c('AA', 'DL', 'UA', 'US', 'WN')
year_carr_n %>%
left_join(airlines13, by = c('UNIQUE_CARRIER' = 'carrier')) %>%
filter(UNIQUE_CARRIER %in% targ_airlines) %>%
ggplot(aes(x = YEAR, y = n, col = name)) +
geom_line()
y_m_d_carr_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, UNIQUE_CARRIER) %>%
summarize(n = n()) %>%
collect()
ymd_carr_n <- y_m_d_carr_n %>%
ungroup() %>%
mutate(date = as.Date(paste0(YEAR, '-', MONTH, '-', DAY_OF_MONTH))) %>%
select(UNIQUE_CARRIER, date, n)
ymd_carr_n %>%
left_join(airlines13, by = c('UNIQUE_CARRIER' = 'carrier')) %>%
filter(UNIQUE_CARRIER %in% targ_airlines) %>%
ggplot(aes(x = date, y = n, col = name)) +
geom_line()
# Y_M_D_ORIG
y_m_d_orig_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, ORIGIN) %>%
summarize(n = n()) %>%
collect()
# YMD_ORIG
ymd_orig_n <- y_m_d_orig_n %>%
ungroup() %>%
mutate(date = as.Date(paste0(YEAR, '-', MONTH, '-', DAY_OF_MONTH))) %>%
select(ORIGIN, date, n)
targ_airports <- ymd_orig_n %>%
group_by(ORIGIN) %>%
summarize(sum_n = sum(n)) %>%
arrange(desc(sum_n)) %>%
head(5) %>%
pull(ORIGIN)
ymd_orig_n %>%
filter(ORIGIN %in% targ_airports) %>%
left_join(airports13, by = c('ORIGIN' = 'faa')) %>%
ggplot(aes(x = date, y = n, col = name)) +
geom_line()
# YMD_W_ORIG
ymd_w_orig_n <- y_m_d_orig_n %>%
ungroup() %>%
mutate(date = as.Date(paste0(YEAR, '-', MONTH, '-', DAY_OF_MONTH)),
cum_week = (interval(min(date), date) %/% weeks(1)) + 1) %>%
select(ORIGIN, cum_week, n) %>%
group_by(ORIGIN, cum_week) %>%
summarize(cum_week_n = sum(n))
max_week <- max(ymd_w_orig_n$cum_week)
ymd_w_orig_n %>%
filter(ORIGIN %in% targ_airports) %>%
left_join(airports13, by = c('ORIGIN' = 'faa')) %>%
ggplot(aes(x = cum_week, y = cum_week_n, col = name)) +
geom_line() +
scale_x_continuous(breaks = seq(1, max_week, by = 52),
limits = c(1,max_week-1))
y_m_d_dest_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, DEST) %>%
summarize(n = n()) %>%
collect()
y_m_d_orig_dest_n <- aot_data_db %>%
group_by(YEAR, MONTH, DAY_OF_MONTH, ORIGIN, DEST) %>%
summarize(n = n()) %>%
collect()
# # TEST ROW BINDS ----
#
# # Bind 1988-1990
# aot_data_1988__1990 <- rbind(aot_data_1988, aot_data_1989, aot_data_1990)
# class(aot_data_1988__1990)
# str(aot_data_1988__1990)
# print(object.size(aot_data_1988__1990), units = 'Gb')
# rm(aot_data_1988__1990)
# rm(aot_data_1988); rm(aot_data_1989); rm(aot_data_1990)
#
# # Bind 1988, 2011, 2012
# aot_data_1988_2011_2012 <- rbind(aot_data_1988, aot_data_2011, aot_data_2012)
# str(aot_data_1988_2011_2012)
# print_size(aot_data_1988_2011_2012)
#
# # Bind 2009-2012
# aot_data_2009__2012 <- rbind(# aot_data_2009,
# aot_data_2010,
# aot_data_2011,
# aot_data_2012)
# dbGetQuery(con, 'DROP TABLE aot_1988')
# Disconnect the database connection
dbDisconnect(con)
|
#Read data
household_power_consumption_subset <- read.csv("household_power_consumption_subset.txt", sep=";")
#Convert the date and time strings to POSIX format
formated_d=strptime(paste(household_power_consumption_subset$Date,household_power_consumption_subset$Time),format="%d/%m/%Y %H:%M:%S")
dw=format(formated_d,"%a")
#Plot the figure
png(filename = "plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
##plot1
plot(household_power_consumption_subset$Global_active_power,type="n",xaxt='n', xlab="", ylab="Global Active Power")
lines(household_power_consumption_subset$Global_active_power,xlim=c(1,length(dw)+1))
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
##plot2
plot(household_power_consumption_subset$Voltage,type="n",xaxt='n', xlab="datetime", ylab="Voltage")
lines(household_power_consumption_subset$Voltage,xlim=c(1,length(dw)+1))
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
##plot3
plot(household_power_consumption_subset$Sub_metering_1,type="n",xaxt='n', xlab="", ylab="Energy sub metering")
lines(household_power_consumption_subset$Sub_metering_1,xlim=c(1,length(dw)+1),col="black")
lines(household_power_consumption_subset$Sub_metering_2,xlim=c(1,length(dw)+1),col="red")
lines(household_power_consumption_subset$Sub_metering_3,xlim=c(1,length(dw)+1),col="blue")
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
legend(x="topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black","red","blue"),bty="n")
##plot4
plot(household_power_consumption_subset$Global_reactive_power,type="n",xaxt='n', xlab="datetime", ylab="Global_reactive_power")
lines(household_power_consumption_subset$Global_reactive_power,xlim=c(1,length(dw)+1))
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
dev.off() | /plot4.R | no_license | Jia340/ExData_Plotting1 | R | false | false | 1,985 | r | #Read data
household_power_consumption_subset <- read.csv("household_power_consumption_subset.txt", sep=";")
#Convert the date and time strings to POSIX format
formated_d=strptime(paste(household_power_consumption_subset$Date,household_power_consumption_subset$Time),format="%d/%m/%Y %H:%M:%S")
dw=format(formated_d,"%a")
#Plot the figure
png(filename = "plot4.png",width = 480, height = 480)
par(mfrow=c(2,2))
##plot1
plot(household_power_consumption_subset$Global_active_power,type="n",xaxt='n', xlab="", ylab="Global Active Power")
lines(household_power_consumption_subset$Global_active_power,xlim=c(1,length(dw)+1))
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
##plot2
plot(household_power_consumption_subset$Voltage,type="n",xaxt='n', xlab="datetime", ylab="Voltage")
lines(household_power_consumption_subset$Voltage,xlim=c(1,length(dw)+1))
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
##plot3
plot(household_power_consumption_subset$Sub_metering_1,type="n",xaxt='n', xlab="", ylab="Energy sub metering")
lines(household_power_consumption_subset$Sub_metering_1,xlim=c(1,length(dw)+1),col="black")
lines(household_power_consumption_subset$Sub_metering_2,xlim=c(1,length(dw)+1),col="red")
lines(household_power_consumption_subset$Sub_metering_3,xlim=c(1,length(dw)+1),col="blue")
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
legend(x="topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black","red","blue"),bty="n")
##plot4
plot(household_power_consumption_subset$Global_reactive_power,type="n",xaxt='n', xlab="datetime", ylab="Global_reactive_power")
lines(household_power_consumption_subset$Global_reactive_power,xlim=c(1,length(dw)+1))
axis(1,at=c(1,grep(unique(dw)[2],dw)[1],length(dw)+1),labels=c(unique(dw),format(formated_d[2880]+60,"%a")))
dev.off() |
library(chipmine)
## this script:
## 1) use diffbind regions and generate profile plot around peak
rm(list = ls())
outDir <- here::here("analysis", "02_ChIPseq_analysis", "03_diffBind")
if(!dir.exists(outDir)){
dir.create(path = outDir)
}
analysisName <- "creE_diffbind"
outPrefix <- paste(outDir, "/", analysisName, sep = "")
compare <- c("CREEHA_CONTROL", "CREEHA_10MMAA")
##################################################################################
file_diffbindInfo <- here::here("analysis", "02_ChIPseq_analysis", "03_diffBind", "sampleInfo.txt")
file_exptInfo <- here::here("data", "referenceData/sampleInfo.txt")
file_diffbindRes <- here::here("analysis", "02_ChIPseq_analysis", "03_diffBind",
"creE_diffbind.annotation.filtered.tab")
TF_dataPath <- here::here("data", "TF_data")
sampleInfo <- suppressMessages(readr::read_tsv(file = file_diffbindInfo))
## get the sample details
exptData <- get_sample_information(exptInfoFile = file_exptInfo,
dataPath = TF_dataPath,
profileMatrixSuffix = "normalizedmatrix")
exptDataList <- purrr::transpose(exptData) %>%
purrr::set_names(nm = purrr::map(., "sampleId"))
grp1 <- compare[1]
grp2 <- compare[2]
grp1Index <- which(exptData$groupId == grp1)
grp2Index <- which(exptData$groupId == grp2)
grp1Samples <- exptData$sampleId[grp1Index]
grp2Samples <- exptData$sampleId[grp2Index]
grp1SpecificOcc = paste(grp1, ":specific", sep = "")
grp2SpecificOcc = paste(grp2, ":specific", sep = "")
grp1EnrichedCategory <- paste(grp1, ":enriched", sep = "")
grp2EnrichedCategory <- paste(grp2, ":enriched", sep = "")
bestGrp1Id <- exptData$sampleId[exptData$groupId == grp1 & exptData$bestRep == 1]
bestGrp2Id <- exptData$sampleId[exptData$groupId == grp2 & exptData$bestRep == 1]
##################################################################################
# diffDba <- DiffBind::dba.load(file = paste(analysisName, ".dba", sep = ""), dir = outDir, pre = "")
diffbindRes <- suppressMessages(readr::read_tsv(file = file_diffbindRes)) %>%
dplyr::select(seqnames, start, end, name, Fold, FDR, diffBind, peakOccupancy,
categoryDiffbind, pvalGood.all) %>%
dplyr::filter(pvalGood.all != 0) %>%
dplyr::distinct(name, .keep_all = TRUE) %>%
dplyr::mutate(
cluster = dplyr::case_when(
peakOccupancy == "common" & categoryDiffbind != "common" ~
paste(peakOccupancy, categoryDiffbind, sep = "\n"),
TRUE ~ categoryDiffbind
)
)
diffGr <- GenomicRanges::makeGRangesFromDataFrame(diffbindRes, keep.extra.columns = T)
##################################################################################
## get the average summit position
peakList <- GenomicRanges::GRangesList(
lapply(X = exptData$peakFile[c(grp1Index, grp2Index)],
FUN = rtracklayer::import, format = "narrowPeak")
)
names(peakList) <- exptData$sampleId[c(grp1Index, grp2Index)]
# pgr <- peakList$CREEHA_CONTROL4
## find overlap of each peak GR with diffGr.
## if multiple peaks overlap with a diffGr, select strongest
ovPk <- endoapply(
X = peakList,
FUN = function(pgr){
ovlp <- findOverlaps(query = diffGr, subject = pgr)
opgr <- pgr[ovlp@to]
mcols(opgr)$diffGrId <- ovlp@from
opgr <- as.data.frame(opgr) %>%
dplyr::group_by(diffGrId) %>%
dplyr::arrange(desc(pValue)) %>%
dplyr::slice(1L) %>%
GenomicRanges::makeGRangesFromDataFrame(keep.extra.columns = TRUE)
return(opgr)
})
combinedPeaks <- unlist(ovPk)
summitPos <- GenomicRanges::resize(
x = GenomicRanges::shift(x = combinedPeaks, shift = combinedPeaks$peak),
width = 1, fix = "start"
)
avgSummit <- as.data.frame(x = summitPos, row.names = NULL) %>%
dplyr::group_by(diffGrId) %>%
dplyr::summarise(meanSummit = round(mean(start)),
summitSd = round(sd(start))
)
diffGr$avgSummit[avgSummit$diffGrId] <- avgSummit$meanSummit
diffGr$summitSd[avgSummit$diffGrId] <- avgSummit$summitSd
##################################################################################
## draw summit position variation for each peak region
as.data.frame(diffGr) %>%
dplyr::select(name, peakOccupancy, categoryDiffbind, avgSummit, summitSd) %>%
dplyr::filter(peakOccupancy == "common") %>%
ggplot(mapping = aes(x = summitSd)) +
geom_histogram(binwidth = 5) +
scale_x_continuous(breaks = seq(0, 500, by = 50)) +
labs(title = "peak summit standard deviation for common peaks") +
theme_bw() +
theme(
axis.text = element_text(size = 14),
axis.title = element_text(size = 14, face = "bold")
)
summitPos$sampleId <- names(summitPos)
summitSummaryDf <- as.data.frame(summitPos, row.names = NULL) %>%
dplyr::select(name, start, diffGrId, sampleId) %>%
tidyr::pivot_wider(id_cols = diffGrId, names_from = sampleId, values_from = start) %>%
dplyr::mutate(AA_CTRL4 = abs(CREEHA_10MMAA4 - CREEHA_CONTROL4),
AA_CTRL5 = abs(CREEHA_10MMAA5 - CREEHA_CONTROL5)) %>%
dplyr::mutate(AA_CTRL_avg = (AA_CTRL4 + AA_CTRL5) / 2)
summitSummaryDf$peakOccupancy <- diffGr$peakOccupancy[summitSummaryDf$diffGrId]
summitSummaryDf$categoryDiffbind <- diffGr$categoryDiffbind[summitSummaryDf$diffGrId]
summitSummaryDf$name <- diffGr$name[summitSummaryDf$diffGrId]
summitSummaryDf <- dplyr::select(
summitSummaryDf, diffGrId, name, peakOccupancy, categoryDiffbind,
starts_with("AA_CTRL"))
dplyr::filter_at(.tbl = summitSummaryDf,
.vars = vars(starts_with("AA_CTRL")),
.vars_predicate = any_vars(. > 50 & . < 100)) %>%
dplyr::arrange(desc(AA_CTRL_avg)) %>%
dplyr::filter(peakOccupancy == "common")
dplyr::filter(summitSummaryDf, peakOccupancy == "common") %>%
ggplot() +
geom_histogram(mapping = aes(x = AA_CTRL4), fill = "red", binwidth = 5, alpha = 0.4) +
geom_histogram(mapping = aes(x = AA_CTRL5), fill = "blue", binwidth = 5, alpha = 0.4) +
scale_x_continuous(breaks = seq(0, 1000, by = 100)) +
labs(title = "peak summit difference: common peaks",
x = "summit position difference (bp)") +
theme_bw() +
theme(
axis.text = element_text(size = 14),
plot.title = element_text(size = 18, face = "bold"),
axis.title = element_text(size = 14, face = "bold")
)
##################################################################################
# peakCenterGr <- GenomicRanges::resize(x = diffGr, fix = "center", width = 1, use.names = T)
peakCenterGr <- GRanges(seqnames = seqnames(diffGr),
ranges = IRanges(start = diffGr$avgSummit, width = 1))
mcols(peakCenterGr) <- mcols(diffGr)
which(table(diffGr$name) > 1)
peakDiffAn <- dplyr::select(diffbindRes, name, Fold, FDR, diffBind, peakOccupancy,
categoryDiffbind, pvalGood.all, cluster) %>%
dplyr::mutate(
pvalGroup = if_else(pvalGood.all == 0, "weak", "strong"),
rankMetric = (-log10(FDR) * sign(Fold))) %>%
dplyr::rename(geneId = name) %>%
as.data.frame()
peakDiffAn$diffBind <- factor(x = peakDiffAn$diffBind, levels = c("down", "noDiff", "up"))
peakDiffAn$peakOccupancy <- factor(
x = peakDiffAn$peakOccupancy,
levels = c(grp1SpecificOcc, "common", grp2SpecificOcc))
peakDiffAn$cluster <- factor(
x = peakDiffAn$cluster,
levels = c(grp1SpecificOcc, paste("common", grp1EnrichedCategory, sep = "\n"),
"common",
paste("common", grp2EnrichedCategory, sep = "\n"), grp2SpecificOcc))
# ## for the first time, generate profile matrices.
# ## next time these matrices can be directly imported
# for(i in 1:nrow(exptData)){
# mat <- bigwig_profile_matrix(
# bwFile = exptData$bwFile[i],
# regions = peakCenterGr,
# signalName = exptData$sampleId[i],
# storeLocal = TRUE, localPath = exptData$matFile[i],
# extend = c(1000, 1000), targetName = "peak center")
# }
exptData_tf <- exptData[c(grp1Index, grp2Index), ] %>%
dplyr::mutate(
matFile = stringr::str_replace(
string = matFile, pattern = "normalizedmatrix", replacement = "FE_matrix")
)
# ## FE track profile matrix for TF samples
#
# for(i in 1:nrow(exptData_tf)){
#
# mat <- bigwig_profile_matrix(
# bwFile = exptData_tf$FE_bwFile[i],
# regions = peakCenterGr,
# signalName = exptData_tf$sampleId[i],
# storeLocal = TRUE, localPath = exptData_tf$matFile[i],
# extend = c(1000, 1000), targetName = "peak center")
# }
matList <- import_profiles(exptInfo = exptData, geneList = diffGr$name,
source = "normalizedmatrix",
targetType = "point", targetName = "peak center",
up = 100, target = 0, down = 100)
## tf colors
tfMeanProfile <- NULL
if(length(exptData$sampleId) == 1){
tfMeanProfile <- matList[[exptData$sampleId[1]]]
} else{
tfMeanProfile <- getSignalsFromList(lt = matList[exptData$sampleId])
}
quantile(tfMeanProfile, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
# tfMeanColor <- colorRamp2(quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T), c("white", "red"))
tfColorList <- sapply(
X = exptData$sampleId,
FUN = function(x){
return(colorRamp2(breaks = quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T),
colors = unlist(strsplit(x = exptDataList[[x]]$color, split = ",")))
)
}
)
# ylimList <- sapply(X = exptData$sampleId, FUN = function(x) c(0, 80), simplify = FALSE)
ylimList <- list(
CREEHA_CONTROL4 = c(0, 75), CREEHA_CONTROL5 = c(0, 55), WT_CONTROL5 = c(0, 70),
CREEHA_10MMAA4 = c(0, 115), CREEHA_10MMAA5 = c(0, 115), WT_10MMAA5 = c(0, 70)
)
profilePlots <- multi_profile_plots(
exptInfo = exptData, genesToPlot = diffGr$name,
profileColors = tfColorList,
clusters = dplyr::select(peakDiffAn, geneId, cluster),
showAnnotation = FALSE,
clustOrd = levels(peakDiffAn$cluster),
targetType = "point",
targetName = "summit",
matBins = c(100, 0, 100, 10), matSource = "normalizedmatrix",
column_title_gp = gpar(fontsize = 12),
ylimFraction = ylimList
)
# pdf(file = paste(outPrefix, "_profiles.pdf", sep = ""), width = 18, height = 13)
png(file = paste(outPrefix, "_profiles.png", sep = ""), width = 3500, height = 2500, res = 250)
ht <- draw(
profilePlots$heatmapList,
main_heatmap = exptData$profileName[1],
# split = dplyr::select(peakDiffAn, pvalGroup, diffBind),
column_title = "creE peaks diffbind comparison",
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
htList2 <- profilePlots$profileHeatmaps$CREEHA_CONTROL4$heatmap +
profilePlots$profileHeatmaps$CREEHA_10MMAA4$heatmap +
profilePlots$profileHeatmaps$WT_CONTROL5$heatmap +
profilePlots$profileHeatmaps$WT_10MMAA5$heatmap
# pdf(file = paste(outPrefix, "_profiles.pdf", sep = ""), width = 18, height = 13)
png(file = paste(outPrefix, ".profiles.ungrouped.png", sep = ""), width = 3000, height = 3000, res = 250)
ht <- draw(
htList2,
# main_heatmap = exptData$profileName[1],
split = rep(1, nrow(tfMeanProfile)),
column_title = "creE peaks diffbind comparison",
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
##################################################################################
## FE track profile plots
matList <- import_profiles(exptInfo = exptData_tf, geneList = diffGr$name,
source = "normalizedmatrix",
targetType = "point", targetName = "peak center",
up = 100, target = 0, down = 100)
## tf colors
tfMeanProfile <- NULL
if(length(exptData_tf$sampleId) == 1){
tfMeanProfile <- matList[[exptData_tf$sampleId[1]]]
} else{
tfMeanProfile <- getSignalsFromList(lt = matList[exptData_tf$sampleId])
}
quantile(tfMeanProfile, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
# tfMeanColor <- colorRamp2(quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T), c("white", "red"))
tfColorList <- sapply(
X = exptData_tf$sampleId,
FUN = function(x){
return(colorRamp2(breaks = quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T),
colors = unlist(strsplit(x = exptDataList[[x]]$color, split = ",")))
)
}
)
# ylimList <- sapply(X = exptData_tf$sampleId, FUN = function(x) c(0, 80), simplify = FALSE)
ylimList <- list(
CREEHA_CONTROL4 = c(0, 5), CREEHA_CONTROL5 = c(0, 5), WT_CONTROL5 = c(0, 5),
CREEHA_10MMAA4 = c(0, 5), CREEHA_10MMAA5 = c(0, 5), WT_10MMAA5 = c(0, 5)
)
profilePlots <- multi_profile_plots(
exptInfo = exptData_tf, genesToPlot = diffGr$name,
profileColors = tfColorList,
showAnnotation = FALSE,
targetType = "point",
targetName = "summit",
matBins = c(100, 0, 100, 10), matSource = "normalizedmatrix",
column_title_gp = gpar(fontsize = 12),
ylimFraction = ylimList
)
rowOrd <- order(peakDiffAn$rankMetric)
pdf(file = paste(outPrefix, ".FE_profiles.pdf", sep = ""), width = 12, height = 10)
# png(file = paste(outPrefix, ".FE_profiles.png", sep = ""), width = 2500, height = 2500, res = 250)
ht <- draw(
profilePlots$heatmapList,
main_heatmap = exptData_tf$profileName[1],
row_order = rowOrd,
column_title = "rglT peaks: Fold Enrichment over untagged",
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
##################################################################################
## AA/CONTROL ratio tracks
## TF1 scalled matrix
tf1ScalledMat <- scale(x = matList[[bestGrp1Id]], center = TRUE, scale = TRUE)
quantile(tf1ScalledMat, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
tf1ScalledColor <- colorRamp2(
breaks = quantile(tf1ScalledMat, c(0.50, 0.99), na.rm = T),
colors = unlist(stringr::str_split(exptDataList[[bestGrp1Id]]$color, pattern = ","))
)
## TF2 scalled matrix
tf2ScalledMat <- scale(x = matList[[bestGrp2Id]], center = TRUE, scale = TRUE)
quantile(tf2ScalledMat, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
tf2ScalledColor <- colorRamp2(
breaks = quantile(tf2ScalledMat, c(0.50, 0.99), na.rm = T),
colors = unlist(stringr::str_split(exptDataList[[bestGrp2Id]]$color, pattern = ","))
)
## Difference between TF2 and TF1 scalled matrix
scalledTfDiffMat <- tf2ScalledMat - tf1ScalledMat
attr(scalledTfDiffMat, "signal_name") <- "fold_change"
attr(scalledTfDiffMat, "target_name") <- "summit"
plot(density(scalledTfDiffMat))
quantile(scalledTfDiffMat, c(0, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.02, 0.05, seq(0.1, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
scalledTfDiffColor <- colorRamp2(breaks = c(-3, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 3),
colors = RColorBrewer::brewer.pal(n = 11, name = "PRGn"))
## Scalled TF diff profile
scalledTfDiffProf <- profile_heatmap(
profileMat = scalledTfDiffMat,
showAnnotation = FALSE,
signalName = "fold_change",
profileColor = scalledTfDiffColor,
column_title_gp = gpar(fontsize = 12),
ylimFraction = c(-2.3, 1.7)
)
rowOrd <- order(peakDiffAn$rankMetric, decreasing = TRUE)
htListDiff <- scalledTfDiffProf$heatmap +
profilePlots$profileHeatmaps[[bestGrp2Id]]$heatmap +
profilePlots$profileHeatmaps[[bestGrp1Id]]$heatmap
pdf(file = paste(outPrefix, ".diff_profiles.pdf", sep = ""), width = 10, height = 12)
# png(file = paste(outPrefix, ".profiles.ungrouped.png", sep = ""), width = 3000, height = 3000, res = 250)
ht <- draw(
htListDiff,
main_heatmap = "fold_change",
# row_order = rowOrd,
column_title = paste("rglT peaks ", bestGrp2Id, "/", bestGrp1Id, " signal ratio", sep = ""),
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
| /scripts/12_peak_profile_plot.R | no_license | foreverst8/RglT_analysis | R | false | false | 16,208 | r | library(chipmine)
## this script:
## 1) use diffbind regions and generate profile plot around peak
rm(list = ls())
outDir <- here::here("analysis", "02_ChIPseq_analysis", "03_diffBind")
if(!dir.exists(outDir)){
dir.create(path = outDir)
}
analysisName <- "creE_diffbind"
outPrefix <- paste(outDir, "/", analysisName, sep = "")
compare <- c("CREEHA_CONTROL", "CREEHA_10MMAA")
##################################################################################
file_diffbindInfo <- here::here("analysis", "02_ChIPseq_analysis", "03_diffBind", "sampleInfo.txt")
file_exptInfo <- here::here("data", "referenceData/sampleInfo.txt")
file_diffbindRes <- here::here("analysis", "02_ChIPseq_analysis", "03_diffBind",
"creE_diffbind.annotation.filtered.tab")
TF_dataPath <- here::here("data", "TF_data")
sampleInfo <- suppressMessages(readr::read_tsv(file = file_diffbindInfo))
## get the sample details
exptData <- get_sample_information(exptInfoFile = file_exptInfo,
dataPath = TF_dataPath,
profileMatrixSuffix = "normalizedmatrix")
exptDataList <- purrr::transpose(exptData) %>%
purrr::set_names(nm = purrr::map(., "sampleId"))
grp1 <- compare[1]
grp2 <- compare[2]
grp1Index <- which(exptData$groupId == grp1)
grp2Index <- which(exptData$groupId == grp2)
grp1Samples <- exptData$sampleId[grp1Index]
grp2Samples <- exptData$sampleId[grp2Index]
grp1SpecificOcc = paste(grp1, ":specific", sep = "")
grp2SpecificOcc = paste(grp2, ":specific", sep = "")
grp1EnrichedCategory <- paste(grp1, ":enriched", sep = "")
grp2EnrichedCategory <- paste(grp2, ":enriched", sep = "")
bestGrp1Id <- exptData$sampleId[exptData$groupId == grp1 & exptData$bestRep == 1]
bestGrp2Id <- exptData$sampleId[exptData$groupId == grp2 & exptData$bestRep == 1]
##################################################################################
# diffDba <- DiffBind::dba.load(file = paste(analysisName, ".dba", sep = ""), dir = outDir, pre = "")
diffbindRes <- suppressMessages(readr::read_tsv(file = file_diffbindRes)) %>%
dplyr::select(seqnames, start, end, name, Fold, FDR, diffBind, peakOccupancy,
categoryDiffbind, pvalGood.all) %>%
dplyr::filter(pvalGood.all != 0) %>%
dplyr::distinct(name, .keep_all = TRUE) %>%
dplyr::mutate(
cluster = dplyr::case_when(
peakOccupancy == "common" & categoryDiffbind != "common" ~
paste(peakOccupancy, categoryDiffbind, sep = "\n"),
TRUE ~ categoryDiffbind
)
)
diffGr <- GenomicRanges::makeGRangesFromDataFrame(diffbindRes, keep.extra.columns = T)
##################################################################################
## get the average summit position
peakList <- GenomicRanges::GRangesList(
lapply(X = exptData$peakFile[c(grp1Index, grp2Index)],
FUN = rtracklayer::import, format = "narrowPeak")
)
names(peakList) <- exptData$sampleId[c(grp1Index, grp2Index)]
# pgr <- peakList$CREEHA_CONTROL4
## find overlap of each peak GR with diffGr.
## if multiple peaks overlap with a diffGr, select strongest
ovPk <- endoapply(
X = peakList,
FUN = function(pgr){
ovlp <- findOverlaps(query = diffGr, subject = pgr)
opgr <- pgr[ovlp@to]
mcols(opgr)$diffGrId <- ovlp@from
opgr <- as.data.frame(opgr) %>%
dplyr::group_by(diffGrId) %>%
dplyr::arrange(desc(pValue)) %>%
dplyr::slice(1L) %>%
GenomicRanges::makeGRangesFromDataFrame(keep.extra.columns = TRUE)
return(opgr)
})
combinedPeaks <- unlist(ovPk)
summitPos <- GenomicRanges::resize(
x = GenomicRanges::shift(x = combinedPeaks, shift = combinedPeaks$peak),
width = 1, fix = "start"
)
avgSummit <- as.data.frame(x = summitPos, row.names = NULL) %>%
dplyr::group_by(diffGrId) %>%
dplyr::summarise(meanSummit = round(mean(start)),
summitSd = round(sd(start))
)
diffGr$avgSummit[avgSummit$diffGrId] <- avgSummit$meanSummit
diffGr$summitSd[avgSummit$diffGrId] <- avgSummit$summitSd
##################################################################################
## draw summit position variation for each peak region
as.data.frame(diffGr) %>%
dplyr::select(name, peakOccupancy, categoryDiffbind, avgSummit, summitSd) %>%
dplyr::filter(peakOccupancy == "common") %>%
ggplot(mapping = aes(x = summitSd)) +
geom_histogram(binwidth = 5) +
scale_x_continuous(breaks = seq(0, 500, by = 50)) +
labs(title = "peak summit standard deviation for common peaks") +
theme_bw() +
theme(
axis.text = element_text(size = 14),
axis.title = element_text(size = 14, face = "bold")
)
summitPos$sampleId <- names(summitPos)
summitSummaryDf <- as.data.frame(summitPos, row.names = NULL) %>%
dplyr::select(name, start, diffGrId, sampleId) %>%
tidyr::pivot_wider(id_cols = diffGrId, names_from = sampleId, values_from = start) %>%
dplyr::mutate(AA_CTRL4 = abs(CREEHA_10MMAA4 - CREEHA_CONTROL4),
AA_CTRL5 = abs(CREEHA_10MMAA5 - CREEHA_CONTROL5)) %>%
dplyr::mutate(AA_CTRL_avg = (AA_CTRL4 + AA_CTRL5) / 2)
summitSummaryDf$peakOccupancy <- diffGr$peakOccupancy[summitSummaryDf$diffGrId]
summitSummaryDf$categoryDiffbind <- diffGr$categoryDiffbind[summitSummaryDf$diffGrId]
summitSummaryDf$name <- diffGr$name[summitSummaryDf$diffGrId]
summitSummaryDf <- dplyr::select(
summitSummaryDf, diffGrId, name, peakOccupancy, categoryDiffbind,
starts_with("AA_CTRL"))
dplyr::filter_at(.tbl = summitSummaryDf,
.vars = vars(starts_with("AA_CTRL")),
.vars_predicate = any_vars(. > 50 & . < 100)) %>%
dplyr::arrange(desc(AA_CTRL_avg)) %>%
dplyr::filter(peakOccupancy == "common")
dplyr::filter(summitSummaryDf, peakOccupancy == "common") %>%
ggplot() +
geom_histogram(mapping = aes(x = AA_CTRL4), fill = "red", binwidth = 5, alpha = 0.4) +
geom_histogram(mapping = aes(x = AA_CTRL5), fill = "blue", binwidth = 5, alpha = 0.4) +
scale_x_continuous(breaks = seq(0, 1000, by = 100)) +
labs(title = "peak summit difference: common peaks",
x = "summit position difference (bp)") +
theme_bw() +
theme(
axis.text = element_text(size = 14),
plot.title = element_text(size = 18, face = "bold"),
axis.title = element_text(size = 14, face = "bold")
)
##################################################################################
# peakCenterGr <- GenomicRanges::resize(x = diffGr, fix = "center", width = 1, use.names = T)
peakCenterGr <- GRanges(seqnames = seqnames(diffGr),
ranges = IRanges(start = diffGr$avgSummit, width = 1))
mcols(peakCenterGr) <- mcols(diffGr)
which(table(diffGr$name) > 1)
peakDiffAn <- dplyr::select(diffbindRes, name, Fold, FDR, diffBind, peakOccupancy,
categoryDiffbind, pvalGood.all, cluster) %>%
dplyr::mutate(
pvalGroup = if_else(pvalGood.all == 0, "weak", "strong"),
rankMetric = (-log10(FDR) * sign(Fold))) %>%
dplyr::rename(geneId = name) %>%
as.data.frame()
peakDiffAn$diffBind <- factor(x = peakDiffAn$diffBind, levels = c("down", "noDiff", "up"))
peakDiffAn$peakOccupancy <- factor(
x = peakDiffAn$peakOccupancy,
levels = c(grp1SpecificOcc, "common", grp2SpecificOcc))
peakDiffAn$cluster <- factor(
x = peakDiffAn$cluster,
levels = c(grp1SpecificOcc, paste("common", grp1EnrichedCategory, sep = "\n"),
"common",
paste("common", grp2EnrichedCategory, sep = "\n"), grp2SpecificOcc))
# ## for the first time, generate profile matrices.
# ## next time these matrices can be directly imported
# for(i in 1:nrow(exptData)){
# mat <- bigwig_profile_matrix(
# bwFile = exptData$bwFile[i],
# regions = peakCenterGr,
# signalName = exptData$sampleId[i],
# storeLocal = TRUE, localPath = exptData$matFile[i],
# extend = c(1000, 1000), targetName = "peak center")
# }
exptData_tf <- exptData[c(grp1Index, grp2Index), ] %>%
dplyr::mutate(
matFile = stringr::str_replace(
string = matFile, pattern = "normalizedmatrix", replacement = "FE_matrix")
)
# ## FE track profile matrix for TF samples
#
# for(i in 1:nrow(exptData_tf)){
#
# mat <- bigwig_profile_matrix(
# bwFile = exptData_tf$FE_bwFile[i],
# regions = peakCenterGr,
# signalName = exptData_tf$sampleId[i],
# storeLocal = TRUE, localPath = exptData_tf$matFile[i],
# extend = c(1000, 1000), targetName = "peak center")
# }
matList <- import_profiles(exptInfo = exptData, geneList = diffGr$name,
source = "normalizedmatrix",
targetType = "point", targetName = "peak center",
up = 100, target = 0, down = 100)
## tf colors
tfMeanProfile <- NULL
if(length(exptData$sampleId) == 1){
tfMeanProfile <- matList[[exptData$sampleId[1]]]
} else{
tfMeanProfile <- getSignalsFromList(lt = matList[exptData$sampleId])
}
quantile(tfMeanProfile, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
# tfMeanColor <- colorRamp2(quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T), c("white", "red"))
tfColorList <- sapply(
X = exptData$sampleId,
FUN = function(x){
return(colorRamp2(breaks = quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T),
colors = unlist(strsplit(x = exptDataList[[x]]$color, split = ",")))
)
}
)
# ylimList <- sapply(X = exptData$sampleId, FUN = function(x) c(0, 80), simplify = FALSE)
ylimList <- list(
CREEHA_CONTROL4 = c(0, 75), CREEHA_CONTROL5 = c(0, 55), WT_CONTROL5 = c(0, 70),
CREEHA_10MMAA4 = c(0, 115), CREEHA_10MMAA5 = c(0, 115), WT_10MMAA5 = c(0, 70)
)
profilePlots <- multi_profile_plots(
exptInfo = exptData, genesToPlot = diffGr$name,
profileColors = tfColorList,
clusters = dplyr::select(peakDiffAn, geneId, cluster),
showAnnotation = FALSE,
clustOrd = levels(peakDiffAn$cluster),
targetType = "point",
targetName = "summit",
matBins = c(100, 0, 100, 10), matSource = "normalizedmatrix",
column_title_gp = gpar(fontsize = 12),
ylimFraction = ylimList
)
# pdf(file = paste(outPrefix, "_profiles.pdf", sep = ""), width = 18, height = 13)
png(file = paste(outPrefix, "_profiles.png", sep = ""), width = 3500, height = 2500, res = 250)
ht <- draw(
profilePlots$heatmapList,
main_heatmap = exptData$profileName[1],
# split = dplyr::select(peakDiffAn, pvalGroup, diffBind),
column_title = "creE peaks diffbind comparison",
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
htList2 <- profilePlots$profileHeatmaps$CREEHA_CONTROL4$heatmap +
profilePlots$profileHeatmaps$CREEHA_10MMAA4$heatmap +
profilePlots$profileHeatmaps$WT_CONTROL5$heatmap +
profilePlots$profileHeatmaps$WT_10MMAA5$heatmap
# pdf(file = paste(outPrefix, "_profiles.pdf", sep = ""), width = 18, height = 13)
png(file = paste(outPrefix, ".profiles.ungrouped.png", sep = ""), width = 3000, height = 3000, res = 250)
ht <- draw(
htList2,
# main_heatmap = exptData$profileName[1],
split = rep(1, nrow(tfMeanProfile)),
column_title = "creE peaks diffbind comparison",
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
##################################################################################
## FE track profile plots
matList <- import_profiles(exptInfo = exptData_tf, geneList = diffGr$name,
source = "normalizedmatrix",
targetType = "point", targetName = "peak center",
up = 100, target = 0, down = 100)
## tf colors
tfMeanProfile <- NULL
if(length(exptData_tf$sampleId) == 1){
tfMeanProfile <- matList[[exptData_tf$sampleId[1]]]
} else{
tfMeanProfile <- getSignalsFromList(lt = matList[exptData_tf$sampleId])
}
quantile(tfMeanProfile, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
# tfMeanColor <- colorRamp2(quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T), c("white", "red"))
tfColorList <- sapply(
X = exptData_tf$sampleId,
FUN = function(x){
return(colorRamp2(breaks = quantile(tfMeanProfile, c(0.50, 0.995), na.rm = T),
colors = unlist(strsplit(x = exptDataList[[x]]$color, split = ",")))
)
}
)
# ylimList <- sapply(X = exptData_tf$sampleId, FUN = function(x) c(0, 80), simplify = FALSE)
ylimList <- list(
CREEHA_CONTROL4 = c(0, 5), CREEHA_CONTROL5 = c(0, 5), WT_CONTROL5 = c(0, 5),
CREEHA_10MMAA4 = c(0, 5), CREEHA_10MMAA5 = c(0, 5), WT_10MMAA5 = c(0, 5)
)
profilePlots <- multi_profile_plots(
exptInfo = exptData_tf, genesToPlot = diffGr$name,
profileColors = tfColorList,
showAnnotation = FALSE,
targetType = "point",
targetName = "summit",
matBins = c(100, 0, 100, 10), matSource = "normalizedmatrix",
column_title_gp = gpar(fontsize = 12),
ylimFraction = ylimList
)
rowOrd <- order(peakDiffAn$rankMetric)
pdf(file = paste(outPrefix, ".FE_profiles.pdf", sep = ""), width = 12, height = 10)
# png(file = paste(outPrefix, ".FE_profiles.png", sep = ""), width = 2500, height = 2500, res = 250)
ht <- draw(
profilePlots$heatmapList,
main_heatmap = exptData_tf$profileName[1],
row_order = rowOrd,
column_title = "rglT peaks: Fold Enrichment over untagged",
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
##################################################################################
## AA/CONTROL ratio tracks
## TF1 scalled matrix
tf1ScalledMat <- scale(x = matList[[bestGrp1Id]], center = TRUE, scale = TRUE)
quantile(tf1ScalledMat, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
tf1ScalledColor <- colorRamp2(
breaks = quantile(tf1ScalledMat, c(0.50, 0.99), na.rm = T),
colors = unlist(stringr::str_split(exptDataList[[bestGrp1Id]]$color, pattern = ","))
)
## TF2 scalled matrix
tf2ScalledMat <- scale(x = matList[[bestGrp2Id]], center = TRUE, scale = TRUE)
quantile(tf2ScalledMat, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
tf2ScalledColor <- colorRamp2(
breaks = quantile(tf2ScalledMat, c(0.50, 0.99), na.rm = T),
colors = unlist(stringr::str_split(exptDataList[[bestGrp2Id]]$color, pattern = ","))
)
## Difference between TF2 and TF1 scalled matrix
scalledTfDiffMat <- tf2ScalledMat - tf1ScalledMat
attr(scalledTfDiffMat, "signal_name") <- "fold_change"
attr(scalledTfDiffMat, "target_name") <- "summit"
plot(density(scalledTfDiffMat))
quantile(scalledTfDiffMat, c(0, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.02, 0.05, seq(0.1, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
scalledTfDiffColor <- colorRamp2(breaks = c(-3, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 3),
colors = RColorBrewer::brewer.pal(n = 11, name = "PRGn"))
## Scalled TF diff profile
scalledTfDiffProf <- profile_heatmap(
profileMat = scalledTfDiffMat,
showAnnotation = FALSE,
signalName = "fold_change",
profileColor = scalledTfDiffColor,
column_title_gp = gpar(fontsize = 12),
ylimFraction = c(-2.3, 1.7)
)
rowOrd <- order(peakDiffAn$rankMetric, decreasing = TRUE)
htListDiff <- scalledTfDiffProf$heatmap +
profilePlots$profileHeatmaps[[bestGrp2Id]]$heatmap +
profilePlots$profileHeatmaps[[bestGrp1Id]]$heatmap
pdf(file = paste(outPrefix, ".diff_profiles.pdf", sep = ""), width = 10, height = 12)
# png(file = paste(outPrefix, ".profiles.ungrouped.png", sep = ""), width = 3000, height = 3000, res = 250)
ht <- draw(
htListDiff,
main_heatmap = "fold_change",
# row_order = rowOrd,
column_title = paste("rglT peaks ", bestGrp2Id, "/", bestGrp1Id, " signal ratio", sep = ""),
column_title_gp = gpar(fontsize = 14, fontface = "bold"),
row_sub_title_side = "left",
heatmap_legend_side = "bottom",
gap = unit(7, "mm"),
padding = unit(rep(0.5, times = 4), "cm")
)
dev.off()
|
#
# Estimate genome size by counting unique kmers
#
# df -> data.frame with Frequency and Count columns
# filename -> name of .histo file
# start_freq -> frequency to start from with kmer counting, will autoset if start_freq = NULL
# end_freq -> frequency to end from with kmer counting
# either +ve number > start_freq OR -ve number indicating how far from the end to stop counting
# show_error -> TRUE : highlight discounted regions, FALSE : plot only counted region
peak_count_kmer <- function(df, start_freq = NULL, end_freq = NULL, show_error = TRUE, num_peaks = 1) {
library(quantmod)
# initial max_freq
max_count = max(df$Count)
max_freq = max(df$Frequency)
# determine start and end
if (is.null(start_freq)) {
start_freq = calc_start_freq(df)
} else if (start_freq < 1) {
start_freq = 1
}
if (is.null(end_freq) || end_freq > max_freq) {
end_freq = max_freq
} else if (end_freq < 0) {
end_freq = max_freq + end_freq
}
# get rows within freq range
rows = df[df$Frequency >= start_freq & df$Frequency <= end_freq,]
# freq and count max values recalculated using cutoffs
if (!show_error) {
max_count = max(rows$Count)
max_freq = max(rows$Count)
}
# plotly version
# plot rectangles over ignored regions
plot_data = rows # plot only counted region
rectangles = NULL
if (show_error) {
plot_data = df
rectangles = list(
# error rectangle low frequency end
list(type = "rect",
fillcolor = "red", line = list(color = "red"), opacity = 0.3,
x0 = 0, x1 = start_freq, xref = "Frequency",
y0 = 0, y1 = max_count, yref = "Count"
),
# error rectangle high frequency end
list(type = "rect",
fillcolor = "red", line = list(color = "red"), opacity = 0.3,
x0 = end_freq, x1 = max_freq, xref = "Frequency",
y0 = 0, y1 = max_count, yref = "Count"
)
)
}
# get peak_rows
peak_rows = findPeaks(plot_data$Count)-1
if (num_peaks > length(peak_rows)) {
num_peaks = length(peak_rows)
}
# traces
Peaks = plot_data[peak_rows,] # get peak Freq and Count
# get 1st or 2nd peak of plot
Peaks = Peaks[order(-Peaks$Count),]
Peaks = Peaks[1:num_peaks,]
if (num_peaks == 2) {
Peaks = Peaks[order(Peaks$Frequency),]
peak_freq = Peaks$Frequency[2] # take second peak
} else {
peak_freq = Peaks$Frequency[1] # take tallest peak
}
# peak lines
# initiate a line shape object
line <- list(
type = "line",
line = list(color = "orange", dash = "dash"),
xref = "Frequency",
yref = "Count"
)
lines <- list()
for (i in rownames(Peaks)) {
peak = Peaks[i,]
line[c("x0", "x1")] <- peak$Frequency
line[["y0"]] <- 0
line[["y1"]] <- peak$Count
lines <- c(lines, list(line))
}
# combine all shapes
shapes = append(rectangles, lines)
# create plot
Frequency = plot_data$Frequency
Count = plot_data$Count
p = plot_ly(plot_data, x= ~Frequency, y= ~Count,
name = "Count", type="scatter", mode="lines")
p = add_trace(p, x= ~Peaks$Frequency, y = ~Peaks$Count,
name = "Peaks", mode = "markers")
p = layout(p, title = "Count VS Frequency", showlegend = FALSE, shapes = shapes)
p$elementId = NULL #TODO temp approach to suppress warning
# calculate size using simple unique kmer counting
# only use non-error rows
size = sum(as.numeric(rows$Frequency * rows$Count)) / peak_freq
# if (num_peaks == 2) {
# if (Peaks$Count[2] < Peaks$Count[1]*0.1) {
# size = NA
# }
# }
total_kmers = as.integer(sum(as.numeric(df$Frequency)))
error = total_kmers - size
return (list("graph" = p, "size" = size, "total_kmers" = total_kmers, "error" = error))
}
calc_start_freq <- function(df) {
library(quantmod)
# if start_freq not set we set the error
valley_rows = findValleys(df$Count)-1
start_freq = df$Frequency[valley_rows[1]]
if (is.na(start_freq)) {
start_freq = 0
}
return(start_freq)
}
# Testing
# df = read.table("small.histo")
# names(df) = c("Frequency", "Count")
# r <- peak_count_kmer(df, start_freq = NULL, end_freq = 100, show_error = FALSE, num_peaks = 1)
# r$graph
| /peakCountKmer.R | no_license | slimsuite/GenomeR | R | false | false | 4,613 | r | #
# Estimate genome size by counting unique kmers
#
# df -> data.frame with Frequency and Count columns
# filename -> name of .histo file
# start_freq -> frequency to start from with kmer counting, will autoset if start_freq = NULL
# end_freq -> frequency to end from with kmer counting
# either +ve number > start_freq OR -ve number indicating how far from the end to stop counting
# show_error -> TRUE : highlight discounted regions, FALSE : plot only counted region
peak_count_kmer <- function(df, start_freq = NULL, end_freq = NULL, show_error = TRUE, num_peaks = 1) {
library(quantmod)
# initial max_freq
max_count = max(df$Count)
max_freq = max(df$Frequency)
# determine start and end
if (is.null(start_freq)) {
start_freq = calc_start_freq(df)
} else if (start_freq < 1) {
start_freq = 1
}
if (is.null(end_freq) || end_freq > max_freq) {
end_freq = max_freq
} else if (end_freq < 0) {
end_freq = max_freq + end_freq
}
# get rows within freq range
rows = df[df$Frequency >= start_freq & df$Frequency <= end_freq,]
# freq and count max values recalculated using cutoffs
if (!show_error) {
max_count = max(rows$Count)
max_freq = max(rows$Count)
}
# plotly version
# plot rectangles over ignored regions
plot_data = rows # plot only counted region
rectangles = NULL
if (show_error) {
plot_data = df
rectangles = list(
# error rectangle low frequency end
list(type = "rect",
fillcolor = "red", line = list(color = "red"), opacity = 0.3,
x0 = 0, x1 = start_freq, xref = "Frequency",
y0 = 0, y1 = max_count, yref = "Count"
),
# error rectangle high frequency end
list(type = "rect",
fillcolor = "red", line = list(color = "red"), opacity = 0.3,
x0 = end_freq, x1 = max_freq, xref = "Frequency",
y0 = 0, y1 = max_count, yref = "Count"
)
)
}
# get peak_rows
peak_rows = findPeaks(plot_data$Count)-1
if (num_peaks > length(peak_rows)) {
num_peaks = length(peak_rows)
}
# traces
Peaks = plot_data[peak_rows,] # get peak Freq and Count
# get 1st or 2nd peak of plot
Peaks = Peaks[order(-Peaks$Count),]
Peaks = Peaks[1:num_peaks,]
if (num_peaks == 2) {
Peaks = Peaks[order(Peaks$Frequency),]
peak_freq = Peaks$Frequency[2] # take second peak
} else {
peak_freq = Peaks$Frequency[1] # take tallest peak
}
# peak lines
# initiate a line shape object
line <- list(
type = "line",
line = list(color = "orange", dash = "dash"),
xref = "Frequency",
yref = "Count"
)
lines <- list()
for (i in rownames(Peaks)) {
peak = Peaks[i,]
line[c("x0", "x1")] <- peak$Frequency
line[["y0"]] <- 0
line[["y1"]] <- peak$Count
lines <- c(lines, list(line))
}
# combine all shapes
shapes = append(rectangles, lines)
# create plot
Frequency = plot_data$Frequency
Count = plot_data$Count
p = plot_ly(plot_data, x= ~Frequency, y= ~Count,
name = "Count", type="scatter", mode="lines")
p = add_trace(p, x= ~Peaks$Frequency, y = ~Peaks$Count,
name = "Peaks", mode = "markers")
p = layout(p, title = "Count VS Frequency", showlegend = FALSE, shapes = shapes)
p$elementId = NULL #TODO temp approach to suppress warning
# calculate size using simple unique kmer counting
# only use non-error rows
size = sum(as.numeric(rows$Frequency * rows$Count)) / peak_freq
# if (num_peaks == 2) {
# if (Peaks$Count[2] < Peaks$Count[1]*0.1) {
# size = NA
# }
# }
total_kmers = as.integer(sum(as.numeric(df$Frequency)))
error = total_kmers - size
return (list("graph" = p, "size" = size, "total_kmers" = total_kmers, "error" = error))
}
calc_start_freq <- function(df) {
library(quantmod)
# if start_freq not set we set the error
valley_rows = findValleys(df$Count)-1
start_freq = df$Frequency[valley_rows[1]]
if (is.na(start_freq)) {
start_freq = 0
}
return(start_freq)
}
# Testing
# df = read.table("small.histo")
# names(df) = c("Frequency", "Count")
# r <- peak_count_kmer(df, start_freq = NULL, end_freq = 100, show_error = FALSE, num_peaks = 1)
# r$graph
|
## read_genomic_data -- reads genomic data in the specified zoom parameters...
##
setClass("genomic_data_model",#"restricted_boltzman_machine",
representation(
n_zooms="integer",
window_sizes="integer",
half_nWindows="integer"
),
)
genomic_data_model <- function(window_sizes, half_nWindows) {
stopifnot(NROW(window_sizes) == NROW(half_nWindows))
new("genomic_data_model", n_zooms= as.integer(NROW(window_sizes)), window_sizes= as.integer(window_sizes), half_nWindows= as.integer(half_nWindows))
}
#' Reads genomic data from the specified position...
#'
#' @param bed A data.frame of genomic regions in bed format (chrom, start, end).
#' @param bigwig_plus Path to bigwig file representing GRO-seq/ PRO-seq reads on the plus strand.
#' @param bigwig_minus Path to bigwig file representing GRO-seq/ PRO-seq reads on the minus strand.
#' @param as_matrix If true, returns a matrix object.
#' @param ncores The number of cores.
#' @param scale.method Default is logistic, but if set to linear it will return read counts normalized by total read count
#' @return Returns a list() object, where each element in the list is the zoom data
#' centered on a
read_genomic_data <- function(gdm, bed, file_bigwig_plus, file_bigwig_minus, as_matrix= TRUE, scale.method=c("logistic", "linear"), batch_size=50000, ncores=1) {
stopifnot(NROW(gdm@window_sizes) == NROW(gdm@half_nWindows))
zoom<- list(as.integer(gdm@window_sizes), as.integer(gdm@half_nWindows))
#batch_size = 50000;
n_elem = NROW(bed)
n_batches = floor( n_elem/batch_size )
if(n_batches < ncores)
{
batch_size = ceiling( n_elem/ncores );
n_batches = floor( n_elem/batch_size);
}
interval <- unique(c( seq( 1, n_elem+1, by = batch_size ), n_elem+1))
if(missing(scale.method)){ scale.method <- "logistic" };
total.read.count<- sum(abs(get_reads_from_bigwig(file_bigwig_plus, file_bigwig_minus)));
bed.ord <- order(bed[,1], bed[,2], bed[,3]);
if ( all( bed.ord == c(1:NROW(bed)) ) )
bed.sorted <- bed
else
bed.sorted <- bed[ bed.ord, ];
datList <- list();
for(i in 1:ceiling( (length(interval)-1)/ncores ))
{
start_batch <- (i-1)*ncores+1;
stop_batch <- i*ncores;
if(stop_batch>(length(interval)-1)) stop_batch <- length(interval)-1;
datList[start_batch:stop_batch] <- mclapply(start_batch:stop_batch, function(x) {
batch_indx<- c( interval[x]:(interval[x+1]-1) )
# The output from C/C++ is changed to matrix(n_windows, n_sample)) since 6/20/2016
# The original result was a list, which needs to rbind() to matrix.
if(scale.method=="logistic"){
dat <- .Call("get_genomic_data_R",
as.character( bed.sorted[ batch_indx,1 ] ),
as.integer( floor((bed.sorted[ batch_indx,3 ] + bed.sorted[ batch_indx,2 ])/2) ),
as.character( file_bigwig_plus ),
as.character( file_bigwig_minus ),
zoom,
as.logical(TRUE),
PACKAGE= "dREG")
}
else{
dat <- .Call("get_genomic_data_R",
as.character( bed.sorted[ batch_indx,1 ] ),
as.integer( floor((bed.sorted[ batch_indx,3 ] + bed.sorted[ batch_indx,2 ])/2) ),
as.character( file_bigwig_plus ),
as.character( file_bigwig_minus ),
zoom,
as.logical(FALSE),
PACKAGE= "dREG")
if( !is.null(dat) )
##dat<-lapply(dat, "/", total.read.count);
dat <- dat/total.read.count;
}
if( is.null(dat))
stop("Failed to Call C/C++ functions.\n");
# cat(x, NROW(dat), NCOL(dat), "\n");
return( as.data.frame(t(dat) ) );
}, mc.cores=ncores);
}
if( length(datList)==1)
dat <- datList[[1]]
else
{
if(requireNamespace("data.table"))
dat <- as.matrix( data.table::rbindlist(datList) )
else
dat <- as.matrix( do.call(rbind, datList) );
}
rm(datList);
if ( !all( bed.ord == c(1:NROW(bed)) ) )
## all(bed.sorted[order(bed.ord),] == bed)
dat <- dat [ order(bed.ord), ];
if( !as_matrix )
dat <- c(t(dat));
return(dat);
}
# query read counts of all chromosomes from bigWig file.
#
# @bw.plus, bigWig filename
# @bw.minus, bigWig filename
# @chromInfo data.frame with 2 columns(chr, size);
#
# @return vector of reads in plus and minus file.
get_reads_from_bigwig <- function(file_bigwig_plus, file_bigwig_minus)
{
bw.plus <- load.bigWig(file_bigwig_plus)
bw.minus <- load.bigWig(file_bigwig_minus)
## 1) It takes long time
## 2) The offset is too big for some unmapped section, it will cause errors in library bigWig
#offset_dist <- 250;
#df.bed.plus<-data.frame(bw.plus$chroms, offset_dist, bw.plus$chromSizes, names=".", scores=".",strands="+")
#df.bed.minus<-data.frame(bw.minus$chroms, offset_dist, bw.minus$chromSizes, names=".", scores=".", strands="-")
#r.plus <- sum(abs(bed6.region.bpQuery.bigWig( bw.plus, bw.minus, df.bed.plus)));
#r.minus <- sum(abs(bed6.region.bpQuery.bigWig( bw.plus, bw.minus, df.bed.minus)));
r.plus <- round(bw.plus$mean * bw.plus$basesCovered );
r.minus <- round(bw.minus$mean * bw.minus$basesCovered );
try( unload.bigWig( bw.plus ) );
try( unload.bigWig( bw.minus ) );
return(c(r.plus,r.minus));
}
# improved list of objects
# author: Dirk Eddelbuettel
# reference: http://stackoverflow.com/questions/1358003/tricks-to-manage-the-available-memory-in-an-r-session
.ls.objects <- function (pos = 1, envir=NULL, pattern, order.by,
decreasing=FALSE, head=FALSE, n=5) {
napply <- function(names, fn, missing=NA) sapply(names, function(x){
ret <- suppressWarnings( try(fn( if(is.null(envir)) get(x, pos = pos) else get(x, envir=envir) ), TRUE) );
if (class(ret)=="try-error") return(missing);
ret;
});
if(is.null(envir))
names <- ls( pos = pos, pattern = pattern)
else
names <- ls( envir = envir )
obj.class <- napply(names, function(x) as.character(class(x))[1], "NA")
obj.mode <- napply(names, mode)
obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
obj.prettysize <- napply(names, function(x) {
capture.output(format(utils::object.size(x), units = "auto")) } )
obj.size <- napply(names, object.size )
obj.dim <- t(napply(names, function(x)
as.numeric(dim(x))[1:2], c(NA,NA) ) );
vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
obj.dim[vec, 1] <- napply(names, length)[vec]
out <- data.frame(obj.type, obj.size, obj.prettysize, obj.dim)
names(out) <- c("Type", "Size", "PrettySize", "Rows", "Columns")
if (!missing(order.by))
out <- out[order(out[[order.by]], decreasing=decreasing), ]
if (head)
out <- head(out, n)
out
}
# shorthand
lsos <- function(..., n=10) {
.ls.objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n)
}
| /R_packages/dREG/R/read_genomic_data.R | no_license | juhaa/dREG_analysis | R | false | false | 6,827 | r | ## read_genomic_data -- reads genomic data in the specified zoom parameters...
##
setClass("genomic_data_model",#"restricted_boltzman_machine",
representation(
n_zooms="integer",
window_sizes="integer",
half_nWindows="integer"
),
)
genomic_data_model <- function(window_sizes, half_nWindows) {
stopifnot(NROW(window_sizes) == NROW(half_nWindows))
new("genomic_data_model", n_zooms= as.integer(NROW(window_sizes)), window_sizes= as.integer(window_sizes), half_nWindows= as.integer(half_nWindows))
}
#' Reads genomic data from the specified position...
#'
#' @param bed A data.frame of genomic regions in bed format (chrom, start, end).
#' @param bigwig_plus Path to bigwig file representing GRO-seq/ PRO-seq reads on the plus strand.
#' @param bigwig_minus Path to bigwig file representing GRO-seq/ PRO-seq reads on the minus strand.
#' @param as_matrix If true, returns a matrix object.
#' @param ncores The number of cores.
#' @param scale.method Default is logistic, but if set to linear it will return read counts normalized by total read count
#' @return Returns a list() object, where each element in the list is the zoom data
#' centered on a
read_genomic_data <- function(gdm, bed, file_bigwig_plus, file_bigwig_minus, as_matrix= TRUE, scale.method=c("logistic", "linear"), batch_size=50000, ncores=1) {
stopifnot(NROW(gdm@window_sizes) == NROW(gdm@half_nWindows))
zoom<- list(as.integer(gdm@window_sizes), as.integer(gdm@half_nWindows))
#batch_size = 50000;
n_elem = NROW(bed)
n_batches = floor( n_elem/batch_size )
if(n_batches < ncores)
{
batch_size = ceiling( n_elem/ncores );
n_batches = floor( n_elem/batch_size);
}
interval <- unique(c( seq( 1, n_elem+1, by = batch_size ), n_elem+1))
if(missing(scale.method)){ scale.method <- "logistic" };
total.read.count<- sum(abs(get_reads_from_bigwig(file_bigwig_plus, file_bigwig_minus)));
bed.ord <- order(bed[,1], bed[,2], bed[,3]);
if ( all( bed.ord == c(1:NROW(bed)) ) )
bed.sorted <- bed
else
bed.sorted <- bed[ bed.ord, ];
datList <- list();
for(i in 1:ceiling( (length(interval)-1)/ncores ))
{
start_batch <- (i-1)*ncores+1;
stop_batch <- i*ncores;
if(stop_batch>(length(interval)-1)) stop_batch <- length(interval)-1;
datList[start_batch:stop_batch] <- mclapply(start_batch:stop_batch, function(x) {
batch_indx<- c( interval[x]:(interval[x+1]-1) )
# The output from C/C++ is changed to matrix(n_windows, n_sample)) since 6/20/2016
# The original result was a list, which needs to rbind() to matrix.
if(scale.method=="logistic"){
dat <- .Call("get_genomic_data_R",
as.character( bed.sorted[ batch_indx,1 ] ),
as.integer( floor((bed.sorted[ batch_indx,3 ] + bed.sorted[ batch_indx,2 ])/2) ),
as.character( file_bigwig_plus ),
as.character( file_bigwig_minus ),
zoom,
as.logical(TRUE),
PACKAGE= "dREG")
}
else{
dat <- .Call("get_genomic_data_R",
as.character( bed.sorted[ batch_indx,1 ] ),
as.integer( floor((bed.sorted[ batch_indx,3 ] + bed.sorted[ batch_indx,2 ])/2) ),
as.character( file_bigwig_plus ),
as.character( file_bigwig_minus ),
zoom,
as.logical(FALSE),
PACKAGE= "dREG")
if( !is.null(dat) )
##dat<-lapply(dat, "/", total.read.count);
dat <- dat/total.read.count;
}
if( is.null(dat))
stop("Failed to Call C/C++ functions.\n");
# cat(x, NROW(dat), NCOL(dat), "\n");
return( as.data.frame(t(dat) ) );
}, mc.cores=ncores);
}
if( length(datList)==1)
dat <- datList[[1]]
else
{
if(requireNamespace("data.table"))
dat <- as.matrix( data.table::rbindlist(datList) )
else
dat <- as.matrix( do.call(rbind, datList) );
}
rm(datList);
if ( !all( bed.ord == c(1:NROW(bed)) ) )
## all(bed.sorted[order(bed.ord),] == bed)
dat <- dat [ order(bed.ord), ];
if( !as_matrix )
dat <- c(t(dat));
return(dat);
}
# query read counts of all chromosomes from bigWig file.
#
# @bw.plus, bigWig filename
# @bw.minus, bigWig filename
# @chromInfo data.frame with 2 columns(chr, size);
#
# @return vector of reads in plus and minus file.
get_reads_from_bigwig <- function(file_bigwig_plus, file_bigwig_minus)
{
bw.plus <- load.bigWig(file_bigwig_plus)
bw.minus <- load.bigWig(file_bigwig_minus)
## 1) It takes long time
## 2) The offset is too big for some unmapped section, it will cause errors in library bigWig
#offset_dist <- 250;
#df.bed.plus<-data.frame(bw.plus$chroms, offset_dist, bw.plus$chromSizes, names=".", scores=".",strands="+")
#df.bed.minus<-data.frame(bw.minus$chroms, offset_dist, bw.minus$chromSizes, names=".", scores=".", strands="-")
#r.plus <- sum(abs(bed6.region.bpQuery.bigWig( bw.plus, bw.minus, df.bed.plus)));
#r.minus <- sum(abs(bed6.region.bpQuery.bigWig( bw.plus, bw.minus, df.bed.minus)));
r.plus <- round(bw.plus$mean * bw.plus$basesCovered );
r.minus <- round(bw.minus$mean * bw.minus$basesCovered );
try( unload.bigWig( bw.plus ) );
try( unload.bigWig( bw.minus ) );
return(c(r.plus,r.minus));
}
# improved list of objects
# author: Dirk Eddelbuettel
# reference: http://stackoverflow.com/questions/1358003/tricks-to-manage-the-available-memory-in-an-r-session
.ls.objects <- function (pos = 1, envir=NULL, pattern, order.by,
decreasing=FALSE, head=FALSE, n=5) {
napply <- function(names, fn, missing=NA) sapply(names, function(x){
ret <- suppressWarnings( try(fn( if(is.null(envir)) get(x, pos = pos) else get(x, envir=envir) ), TRUE) );
if (class(ret)=="try-error") return(missing);
ret;
});
if(is.null(envir))
names <- ls( pos = pos, pattern = pattern)
else
names <- ls( envir = envir )
obj.class <- napply(names, function(x) as.character(class(x))[1], "NA")
obj.mode <- napply(names, mode)
obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
obj.prettysize <- napply(names, function(x) {
capture.output(format(utils::object.size(x), units = "auto")) } )
obj.size <- napply(names, object.size )
obj.dim <- t(napply(names, function(x)
as.numeric(dim(x))[1:2], c(NA,NA) ) );
vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
obj.dim[vec, 1] <- napply(names, length)[vec]
out <- data.frame(obj.type, obj.size, obj.prettysize, obj.dim)
names(out) <- c("Type", "Size", "PrettySize", "Rows", "Columns")
if (!missing(order.by))
out <- out[order(out[[order.by]], decreasing=decreasing), ]
if (head)
out <- head(out, n)
out
}
# shorthand
lsos <- function(..., n=10) {
.ls.objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n)
}
|
\name{nwfscSurveyCode-package}
\alias{nwfscSurveyCode-package}
\alias{nwfscSurveyCode}
\docType{package}
\title{
Functions used to analyze NWFSC survey data and create input for SS3
}
\description{
More about what it does (maybe more than one line)
}
\details{
\tabular{ll}{
Package: \tab nwfscSurveyCode\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-01-10\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Allan C Hicks <allan.hicks_at_noaa.gov>
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
}
| /man/nwfscSurveyCode-package.Rd | no_license | melissahaltuch-NOAA/nwfscSurvey | R | false | false | 912 | rd | \name{nwfscSurveyCode-package}
\alias{nwfscSurveyCode-package}
\alias{nwfscSurveyCode}
\docType{package}
\title{
Functions used to analyze NWFSC survey data and create input for SS3
}
\description{
More about what it does (maybe more than one line)
}
\details{
\tabular{ll}{
Package: \tab nwfscSurveyCode\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-01-10\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Allan C Hicks <allan.hicks_at_noaa.gov>
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
}
|
## Functions for creating and using inverted matrices which caching
## Creates cacheable matrix
makeCacheMatrix <- function(original = matrix()) {
inverted <- NULL
set <- function(y) {
original <<- y
inverted <<- NULL
}
# getter and setter for inv. matrix value
get <- function() original
# Inversing the matrix using build in solve() function in R
set_inverted <- function(solve) inverted <<- solve
get_inverted <- function() inverted
list(
set = set,
get = get,
set_inverted = set_inverted,
get_inverted = get_inverted)
}
## Inverses and caches matrix returned by makeCacheMatrix()
## unless inversed matrix is already cached, then uses cached value
## returns inversed matrix
cacheSolve <- function(cacheable, ...) {
inverted <- cacheable$get_inverted()
# not cached?
if(is.null(inverted)) {
print("cache miss")
# invert and cache the matrix
original <- cacheable$get()
inverted <- solve(original)
cacheable$set_inverted(inverted)
} else {
print("cache hit")
}
# return inverted
inverted
}
test <- function() {
m <- makeCacheMatrix(matrix(1:4, nrow = 2, ncol = 2))
cacheSolve(m) # prints 'cache miss'
cacheSolve(m) # prints 'cache hit'
}
#test()
| /cachematrix.R | no_license | dlg99/ProgrammingAssignment2 | R | false | false | 1,255 | r | ## Functions for creating and using inverted matrices which caching
## Creates cacheable matrix
makeCacheMatrix <- function(original = matrix()) {
inverted <- NULL
set <- function(y) {
original <<- y
inverted <<- NULL
}
# getter and setter for inv. matrix value
get <- function() original
# Inversing the matrix using build in solve() function in R
set_inverted <- function(solve) inverted <<- solve
get_inverted <- function() inverted
list(
set = set,
get = get,
set_inverted = set_inverted,
get_inverted = get_inverted)
}
## Inverses and caches matrix returned by makeCacheMatrix()
## unless inversed matrix is already cached, then uses cached value
## returns inversed matrix
cacheSolve <- function(cacheable, ...) {
inverted <- cacheable$get_inverted()
# not cached?
if(is.null(inverted)) {
print("cache miss")
# invert and cache the matrix
original <- cacheable$get()
inverted <- solve(original)
cacheable$set_inverted(inverted)
} else {
print("cache hit")
}
# return inverted
inverted
}
test <- function() {
m <- makeCacheMatrix(matrix(1:4, nrow = 2, ncol = 2))
cacheSolve(m) # prints 'cache miss'
cacheSolve(m) # prints 'cache hit'
}
#test()
|
file_name <- "raw_dataset.zip"
# Download and unzip the given dataset from url :
if (!file.exists(file_name)){
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file( URL, file_name, method= "curl" )
}
if (!file.exists("UCI HAR Dataset")) {
unzip(file_name)
}
# Load the datasets and assign column names
train_data <- read.table("UCI HAR Dataset/train/X_train.txt", stringsAsFactors = FALSE)
test_data <- read.table("UCI HAR Dataset/test/X_test.txt", stringsAsFactors = FALSE)
all_data <- rbind(train_data, test_data)
features <- read.table("UCI HAR Dataset/features.txt", stringsAsFactors = F)
colnames(all_data) <- features[ , 2]
train_activity <- read.table("UCI HAR Dataset/train/y_train.txt", stringsAsFactors = FALSE)
test_activity <- read.table("UCI HAR Dataset/test/y_test.txt", stringsAsFactors = FALSE)
all_activity <- rbind(train_activity, test_activity)
colnames(all_activity) <- "activity"
train_subject <- read.table("UCI HAR Dataset/train/subject_train.txt", stringsAsFactors = FALSE)
test_subject <- read.table("UCI HAR Dataset/test/subject_test.txt", stringsAsFactors = FALSE)
all_subject <- rbind(train_subject, test_subject)
colnames(all_subject) <- "subject"
# Merge all dataset and create a complete raw dataset ( Step : 1 )
raw_data <- cbind(all_activity, all_subject, all_data)
# Extracts only the measurements on the mean and standard deviation for each measurement ( Step : 2 )
extracted_data_index <- grep( "activity|subject|mean\\(\\)|std\\(\\)", colnames(raw_data) )
extracted_raw_data <- raw_data[, extracted_data_index]
# Descriptive activity names to name the activities in the data set ( Step : 3 )
activity_names <- read.table("UCI HAR Dataset/activity_labels.txt", stringsAsFactors = F, col.names = c("ID", "Label"))
extracted_raw_data$activity <- factor(extracted_raw_data$activity, levels = activity_names[,1], labels = activity_names[,2])
# Appropriately labels the data set with descriptive variable names ( Step : 4 )
names(extracted_raw_data)<-gsub("^t", "time", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("^f", "frequency", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("Acc", "Accelerometer", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("Gyro", "Gyroscope", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("Mag", "Magnitude", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("BodyBody", "Body", names(extracted_raw_data))
tidy_data_full <- extracted_raw_data
# Independent tidy data set with the average of each variable for each activity and each subject ( Step : 5 )
library(dplyr)
tidy_data <- tidy_data_full %>% group_by(subject, activity) %>% summarise_each(funs(mean))
write.table(tidy_data, "tidy_data.txt", row.names = FALSE, quote = FALSE)
| /run_analysis.R | no_license | ggarpanchal/Coursera_Data_Cleaning_Project | R | false | false | 2,831 | r | file_name <- "raw_dataset.zip"
# Download and unzip the given dataset from url :
if (!file.exists(file_name)){
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file( URL, file_name, method= "curl" )
}
if (!file.exists("UCI HAR Dataset")) {
unzip(file_name)
}
# Load the datasets and assign column names
train_data <- read.table("UCI HAR Dataset/train/X_train.txt", stringsAsFactors = FALSE)
test_data <- read.table("UCI HAR Dataset/test/X_test.txt", stringsAsFactors = FALSE)
all_data <- rbind(train_data, test_data)
features <- read.table("UCI HAR Dataset/features.txt", stringsAsFactors = F)
colnames(all_data) <- features[ , 2]
train_activity <- read.table("UCI HAR Dataset/train/y_train.txt", stringsAsFactors = FALSE)
test_activity <- read.table("UCI HAR Dataset/test/y_test.txt", stringsAsFactors = FALSE)
all_activity <- rbind(train_activity, test_activity)
colnames(all_activity) <- "activity"
train_subject <- read.table("UCI HAR Dataset/train/subject_train.txt", stringsAsFactors = FALSE)
test_subject <- read.table("UCI HAR Dataset/test/subject_test.txt", stringsAsFactors = FALSE)
all_subject <- rbind(train_subject, test_subject)
colnames(all_subject) <- "subject"
# Merge all dataset and create a complete raw dataset ( Step : 1 )
raw_data <- cbind(all_activity, all_subject, all_data)
# Extracts only the measurements on the mean and standard deviation for each measurement ( Step : 2 )
extracted_data_index <- grep( "activity|subject|mean\\(\\)|std\\(\\)", colnames(raw_data) )
extracted_raw_data <- raw_data[, extracted_data_index]
# Descriptive activity names to name the activities in the data set ( Step : 3 )
activity_names <- read.table("UCI HAR Dataset/activity_labels.txt", stringsAsFactors = F, col.names = c("ID", "Label"))
extracted_raw_data$activity <- factor(extracted_raw_data$activity, levels = activity_names[,1], labels = activity_names[,2])
# Appropriately labels the data set with descriptive variable names ( Step : 4 )
names(extracted_raw_data)<-gsub("^t", "time", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("^f", "frequency", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("Acc", "Accelerometer", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("Gyro", "Gyroscope", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("Mag", "Magnitude", names(extracted_raw_data))
names(extracted_raw_data)<-gsub("BodyBody", "Body", names(extracted_raw_data))
tidy_data_full <- extracted_raw_data
# Independent tidy data set with the average of each variable for each activity and each subject ( Step : 5 )
library(dplyr)
tidy_data <- tidy_data_full %>% group_by(subject, activity) %>% summarise_each(funs(mean))
write.table(tidy_data, "tidy_data.txt", row.names = FALSE, quote = FALSE)
|
##################################
# Author: Gina Nichols (vnichols@iastate.edu)
# Created: June 5 2020
#
# Purpose: make manuscript figs
#
# Notes:
# Last modified: 7/13/2020 (change UB/UG to UD/UM)
#
####################################
library(readr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(stringr)
library(patchwork)
library(PFIweeds2020)
# constant themes ---------------------------------------------------------
mylegendtheme <- theme(legend.position = c(0.1, 0.9),
legend.justification = c(0,1),
legend.background = element_rect(color = "black"))
myaxistexttheme <- theme(axis.text = element_text(size = rel(1.2)),
legend.text = element_text(size = rel(1.3)),
axis.title = element_text(size = rel(1.3)),
strip.text = element_text(size = rel(1.3)))
p_green <- "#619B44"
p_blue <- "#46B2B5"
p_pink <- "#DC1A64"
p_orange <- "#FFA726"
p_yellow <- "#FFC000"
p_gray <- "#E7E6E6"
scales::show_col(p_green)
# weed list ---------------------------------------------------------------
dat_table <-
pfi_ghobsraw %>%
pfifun_sum_weedbyeu() %>%
group_by(weed) %>%
summarise(tot_seeds = sum(seeds)) %>%
mutate(sum = sum(tot_seeds),
pct = round(tot_seeds/sum*100, 2),
pct2 = ifelse(pct < 0.1, "<0.10", pct),
pct2 = paste0(pct2, "%")) %>%
arrange(-pct) %>%
rename(code = weed) %>%
left_join(pfi_weedsplist) %>%
unite(photo_path, functional_grp, col = "desc", sep = " ") %>%
select(code, scientific_name, common_name, desc, pct2) %>%
mutate(scientific_name = str_to_sentence(scientific_name),
common_name = str_to_sentence(common_name),
#scientific_name = ifelse(scientific_name == "Setaria", "Setaria genus", scientific_name),
common_name = ifelse(common_name == "Water hemp", "Waterhemp", common_name),
common_name = ifelse(common_name == "Marestail", "Horseweed", common_name),
common_name = ifelse(common_name == "Nightshade", "Eastern black nightshade", common_name),
common_name = ifelse(common_name == "Rye (cereal)", "Cereal rye", common_name),
desc = ifelse(desc == "NA NA", NA, desc))
dat_table
write_csv(dat_table, "02_make-figs/mf_weed-list-arranged.csv")
# weed list, by trial ---------------------------------------------------------------
dat_table_trial <-
pfi_ghobsraw %>%
pfifun_sum_weedbyeu() %>%
unite(site_name, field, sys_trt, col = "site_sys") %>%
group_by(site_sys, weed) %>%
summarise(tot_seeds = sum(seeds)) %>%
mutate(sum = sum(tot_seeds),
pct = round(tot_seeds/sum*100, 2),
pct2 = ifelse(pct < 0.1, "<0.10", pct),
pct2 = paste0(pct2, "%")) %>%
arrange(-pct) %>%
select(site_sys, weed, pct2) %>%
pivot_wider(names_from = site_sys, values_from = pct2)
dat_table_trial
write_csv(dat_table_trial, "02_make-figs/mf_weed-list-arranged-by-trial.csv")
| /02_make-figs/code_tbl-weed-list.R | no_license | vanichols/PFIweeds2020_analysis | R | false | false | 3,001 | r | ##################################
# Author: Gina Nichols (vnichols@iastate.edu)
# Created: June 5 2020
#
# Purpose: make manuscript figs
#
# Notes:
# Last modified: 7/13/2020 (change UB/UG to UD/UM)
#
####################################
library(readr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(stringr)
library(patchwork)
library(PFIweeds2020)
# constant themes ---------------------------------------------------------
mylegendtheme <- theme(legend.position = c(0.1, 0.9),
legend.justification = c(0,1),
legend.background = element_rect(color = "black"))
myaxistexttheme <- theme(axis.text = element_text(size = rel(1.2)),
legend.text = element_text(size = rel(1.3)),
axis.title = element_text(size = rel(1.3)),
strip.text = element_text(size = rel(1.3)))
p_green <- "#619B44"
p_blue <- "#46B2B5"
p_pink <- "#DC1A64"
p_orange <- "#FFA726"
p_yellow <- "#FFC000"
p_gray <- "#E7E6E6"
scales::show_col(p_green)
# weed list ---------------------------------------------------------------
dat_table <-
pfi_ghobsraw %>%
pfifun_sum_weedbyeu() %>%
group_by(weed) %>%
summarise(tot_seeds = sum(seeds)) %>%
mutate(sum = sum(tot_seeds),
pct = round(tot_seeds/sum*100, 2),
pct2 = ifelse(pct < 0.1, "<0.10", pct),
pct2 = paste0(pct2, "%")) %>%
arrange(-pct) %>%
rename(code = weed) %>%
left_join(pfi_weedsplist) %>%
unite(photo_path, functional_grp, col = "desc", sep = " ") %>%
select(code, scientific_name, common_name, desc, pct2) %>%
mutate(scientific_name = str_to_sentence(scientific_name),
common_name = str_to_sentence(common_name),
#scientific_name = ifelse(scientific_name == "Setaria", "Setaria genus", scientific_name),
common_name = ifelse(common_name == "Water hemp", "Waterhemp", common_name),
common_name = ifelse(common_name == "Marestail", "Horseweed", common_name),
common_name = ifelse(common_name == "Nightshade", "Eastern black nightshade", common_name),
common_name = ifelse(common_name == "Rye (cereal)", "Cereal rye", common_name),
desc = ifelse(desc == "NA NA", NA, desc))
dat_table
write_csv(dat_table, "02_make-figs/mf_weed-list-arranged.csv")
# weed list, by trial ---------------------------------------------------------------
dat_table_trial <-
pfi_ghobsraw %>%
pfifun_sum_weedbyeu() %>%
unite(site_name, field, sys_trt, col = "site_sys") %>%
group_by(site_sys, weed) %>%
summarise(tot_seeds = sum(seeds)) %>%
mutate(sum = sum(tot_seeds),
pct = round(tot_seeds/sum*100, 2),
pct2 = ifelse(pct < 0.1, "<0.10", pct),
pct2 = paste0(pct2, "%")) %>%
arrange(-pct) %>%
select(site_sys, weed, pct2) %>%
pivot_wider(names_from = site_sys, values_from = pct2)
dat_table_trial
write_csv(dat_table_trial, "02_make-figs/mf_weed-list-arranged-by-trial.csv")
|
#' @include CalculateTravelDemand.R
NULL
#===================
#CalculateTravelDemandFuture.R
#===================
#This module calculates average daily vehicle miles traveld for households. It also
#calculates average DVMT, daily consumption of fuel (in gallons), and average daily
#Co2 equivalent greenhouse emissions for all vehicles.
# library(visioneval)
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
## Current implementation
### The current version implements the models used in the RPAT (GreenSTEP)
### ecosystem.
## Future Development
## Use estimation data set to create models
# #Load Dvmt assignment models
# load("data/DvmtLmModels_ls.rda")
#
# #Load PHEV/HEV model data
# load("data/PhevModelData_ls.rda")
#
# #Load default values for Travel Demand module
# load("./data/TravelDemandDefaults_ls.rda")
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
CalculateTravelDemandFutureSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
#Specify input data
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "UrbanIncome",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "currency",
UNITS = "USD.2000",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "UrbanIncome",
TABLE = "Bzone",
GROUP = "BaseYear",
TYPE = "currency",
UNITS = "USD.2000",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"HhId",
"HhPlaceTypes"),
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
ISELEMENTOF = ""
),
item(
NAME = "HhSize",
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Income",
TABLE = "Household",
GROUP = "Year",
TYPE = "currency",
UNITS = "USD.2000",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = items(
"Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"
),
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "VehiclesFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "vehicles",
UNITS = "VEH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
# Vehicle variables
item(
NAME = items("HhIdFuture",
"VehIdFuture"),
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
ISELEMENTOF = ""
),
item(
NAME = "AgeFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "time",
UNITS = "YR",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "MileageFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "TypeFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
SIZE = 7,
PROHIBIT = "NA",
ISELEMENTOF = c("Auto", "LtTrk")
),
item(
NAME = "DvmtPropFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = items(
"FwyLaneMiPCFuture",
"TranRevMiPCFuture"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "BaseCostPerMile",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "USD/MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "DvmtBudgetProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "AnnVmtInflator",
TABLE = "Model",
GROUP = "Global",
TYPE = "integer",
UNITS = "DAYS",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"FuelCost",
"GasTax"
),
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "USD/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "KwhCost",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "USD/KWH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Fuel",
TABLE = "Fuel",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
SIZE = 12,
ISELEMENTOF = c("ULSD", "Biodiesel", "RFG", "CARBOB", "Ethanol", "Cng", "Electricity")
),
item(
NAME = "Intensity",
TABLE = "Fuel",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "VehType",
TABLE = "FuelProp",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
SIZE = 7,
ISELEMENTOF = c("Auto", "LtTruck", "Bus", "Truck")
),
item(
NAME = item(
"PropDiesel",
"PropCng",
"PropGas"
),
TABLE = "FuelProp",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "VehType",
TABLE = "FuelComp",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
SIZE = 7,
ISELEMENTOF = c("Auto", "LtTruck", "Bus", "Truck")
),
item(
NAME = item(
"GasPropEth",
"DieselPropBio"
),
TABLE = "FuelComp",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "BaseLtVehDvmt",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "BaseFwyArtProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c('NA', '< 0', '> 1'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "TruckVmtGrowthMultiplier",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Type",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
ISELEMENTOF = c("BusVmt","TruckVmt"),
SIZE = 8
),
item(
NAME = "PropVmt",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"Fwy",
"Art",
"Other"
),
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "character",
UNITS = "YR",
PROHIBIT = c("NA"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPhevRange",
"LtTruckPhevRange"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "distance",
UNITS = "MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPropPhev",
"LtTruckPropPhev"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoMpg",
"LtTruckMpg"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoMpkwh",
"LtTruckMpkwh"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/KWH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "HevPropMpgYr",
GROUP = "Global",
TYPE = "character",
UNITS = "YR",
PROHIBIT = c("NA"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPropHev",
"LtTruckPropHev"
),
TABLE = "HevPropMpgYr",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoHevMpg",
"LtTruckHevMpg"
),
TABLE = "HevPropMpgYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "character",
UNITS = "YR",
PROHIBIT = c("NA"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoRange",
"LtTruckRange"
),
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "distance",
UNITS = "MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPropEv",
"LtTruckPropEv"
),
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoMpkwh",
"LtTruckMpkwh"
),
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/KWH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
# Marea variables
item(
NAME = "TruckDvmtFuture",
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by trucks"
),
# Bzone variables
item(
NAME = "DvmtFuture",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled"
),
item(
NAME = "EvDvmtFuture",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by electric vehicles"
),
item(
NAME = "HcDvmtFuture",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by ICE vehicles"
),
# Household variables
item(
NAME = "DvmtFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled"
),
item(
NAME = "FuelGallonsFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "GAL/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily fuel consumption in gallons"
),
item(
NAME = "FuelCo2eFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "mass",
UNITS = "GM",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily Co2 equivalent greenhouse gass emissions"
),
item(
NAME = "ElecKwhFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "KWH/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily fuel consumption in gallons"
),
item(
NAME = "ElecCo2eFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "mass",
UNITS = "GM",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily Co2 equivalent greenhouse gas emissions by
consumption of electricity"
),
item(
NAME = "DailyParkingCostFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "USD/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily parking cost"
),
item(
NAME = "FutureCostPerMileFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "USD/MI",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Total fuel cost per mile"
),
# Vehicle variables
item(
NAME = "DvmtFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled"
),
item(
NAME = "EvDvmtFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by electric vehicles"
),
item(
NAME = "HcDvmtFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by ICE vehicles"
),
item(
NAME = "MpKwhFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/KWH",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Power efficiency of electric vehicles"
),
item(
NAME = "PowertrainFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
NAVALUE = -1,
PROHIBIT = c("NA"),
ISELEMENTOF = c("Ice", "Hev", "Phev", "Ev"),
SIZE = 4,
DESCRIPTION = "Power train of vehicles"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for CalculateTravelDemandFuture module
#'
#' A list containing specifications for the CalculateTravelDemandFuture module.
#'
#' @format A list containing 3 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source CalculateTravelDemandFuture.R script.
"CalculateTravelDemandFutureSpecifications"
visioneval::savePackageDataset(CalculateTravelDemandFutureSpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
# This function calculates various attributes of daily travel for the
# households and the vehicles.
#Main module function calculates various attributes of travel demand
#------------------------------------------------------------
#' Calculate various attributes of travel demands for each household
#' and vehicle using future data
#'
#' \code{CalculateTravelDemandFuture} calculate various attributes of travel
#' demands for each household and vehicle using future data
#'
#' This function calculates dvmt by placetypes, households, and vehicles.
#' It also calculates fuel gallons consumed, total fuel cost, and Co2 equivalent
#' gas emission for each household using future data.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name CalculateTravelDemandFuture
#' @import visioneval
#' @export
CalculateTravelDemandFuture <- function(L) {
#Set up
#------
# Function to rename variables to be consistent with Get specfications
# of CalculateTravelDemand.
# Function to add suffix 'Future' at the end of all the variable names
AddSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(!identical(names(noList),character(0))){
names(noList) <- paste0(names(noList),suffix)
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], AddSuffixFuture, suffix=suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Function to remove suffix 'Future' from all the variable names
RemoveSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(length(noList)>0){
names(noList) <- gsub(suffix,"",names(noList))
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], RemoveSuffixFuture, suffix=suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Modify the input data set
L <- RemoveSuffixFuture(L)
#Return the results
#------------------
# Call the CalculateTravelDemand function with the new dataset
Out_ls <- CalculateTravelDemand(L)
# Add 'Future' suffix to all the variables
Out_ls <- AddSuffixFuture(Out_ls)
#Return the outputs list
return(Out_ls)
}
#================================
#Code to aid development and test
#================================
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateTravelDemandFuture",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
# R <- CalculateTravelDemandFuture(L)
#Test code to check everything including running the module and checking whether
#the outputs are consistent with the 'Set' specifications
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateTravelDemandFuture",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = TRUE
# )
| /sources/modules/VEHouseholdTravel/R/CalculateTravelDemandFuture.R | permissive | VisionEval/VisionEval-Dev | R | false | false | 20,933 | r | #' @include CalculateTravelDemand.R
NULL
#===================
#CalculateTravelDemandFuture.R
#===================
#This module calculates average daily vehicle miles traveld for households. It also
#calculates average DVMT, daily consumption of fuel (in gallons), and average daily
#Co2 equivalent greenhouse emissions for all vehicles.
# library(visioneval)
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
## Current implementation
### The current version implements the models used in the RPAT (GreenSTEP)
### ecosystem.
## Future Development
## Use estimation data set to create models
# #Load Dvmt assignment models
# load("data/DvmtLmModels_ls.rda")
#
# #Load PHEV/HEV model data
# load("data/PhevModelData_ls.rda")
#
# #Load default values for Travel Demand module
# load("./data/TravelDemandDefaults_ls.rda")
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
CalculateTravelDemandFutureSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
#Specify input data
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "UrbanIncome",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "currency",
UNITS = "USD.2000",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "UrbanIncome",
TABLE = "Bzone",
GROUP = "BaseYear",
TYPE = "currency",
UNITS = "USD.2000",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"HhId",
"HhPlaceTypes"),
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
ISELEMENTOF = ""
),
item(
NAME = "HhSize",
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Income",
TABLE = "Household",
GROUP = "Year",
TYPE = "currency",
UNITS = "USD.2000",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = items(
"Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"
),
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "VehiclesFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "vehicles",
UNITS = "VEH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
# Vehicle variables
item(
NAME = items("HhIdFuture",
"VehIdFuture"),
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
ISELEMENTOF = ""
),
item(
NAME = "AgeFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "time",
UNITS = "YR",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "MileageFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "TypeFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
SIZE = 7,
PROHIBIT = "NA",
ISELEMENTOF = c("Auto", "LtTrk")
),
item(
NAME = "DvmtPropFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = items(
"FwyLaneMiPCFuture",
"TranRevMiPCFuture"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "BaseCostPerMile",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "USD/MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "DvmtBudgetProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "AnnVmtInflator",
TABLE = "Model",
GROUP = "Global",
TYPE = "integer",
UNITS = "DAYS",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"FuelCost",
"GasTax"
),
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "USD/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "KwhCost",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "USD/KWH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Fuel",
TABLE = "Fuel",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
SIZE = 12,
ISELEMENTOF = c("ULSD", "Biodiesel", "RFG", "CARBOB", "Ethanol", "Cng", "Electricity")
),
item(
NAME = "Intensity",
TABLE = "Fuel",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "VehType",
TABLE = "FuelProp",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
SIZE = 7,
ISELEMENTOF = c("Auto", "LtTruck", "Bus", "Truck")
),
item(
NAME = item(
"PropDiesel",
"PropCng",
"PropGas"
),
TABLE = "FuelProp",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "VehType",
TABLE = "FuelComp",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
SIZE = 7,
ISELEMENTOF = c("Auto", "LtTruck", "Bus", "Truck")
),
item(
NAME = item(
"GasPropEth",
"DieselPropBio"
),
TABLE = "FuelComp",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "BaseLtVehDvmt",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "BaseFwyArtProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c('NA', '< 0', '> 1'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "TruckVmtGrowthMultiplier",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Type",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
ISELEMENTOF = c("BusVmt","TruckVmt"),
SIZE = 8
),
item(
NAME = "PropVmt",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"Fwy",
"Art",
"Other"
),
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "character",
UNITS = "YR",
PROHIBIT = c("NA"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPhevRange",
"LtTruckPhevRange"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "distance",
UNITS = "MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPropPhev",
"LtTruckPropPhev"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoMpg",
"LtTruckMpg"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoMpkwh",
"LtTruckMpkwh"
),
TABLE = "PhevRangePropYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/KWH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "HevPropMpgYr",
GROUP = "Global",
TYPE = "character",
UNITS = "YR",
PROHIBIT = c("NA"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPropHev",
"LtTruckPropHev"
),
TABLE = "HevPropMpgYr",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoHevMpg",
"LtTruckHevMpg"
),
TABLE = "HevPropMpgYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "character",
UNITS = "YR",
PROHIBIT = c("NA"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoRange",
"LtTruckRange"
),
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "distance",
UNITS = "MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoPropEv",
"LtTruckPropEv"
),
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"AutoMpkwh",
"LtTruckMpkwh"
),
TABLE = "EvRangePropYr",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/KWH",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
# Marea variables
item(
NAME = "TruckDvmtFuture",
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by trucks"
),
# Bzone variables
item(
NAME = "DvmtFuture",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled"
),
item(
NAME = "EvDvmtFuture",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by electric vehicles"
),
item(
NAME = "HcDvmtFuture",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by ICE vehicles"
),
# Household variables
item(
NAME = "DvmtFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled"
),
item(
NAME = "FuelGallonsFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "GAL/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily fuel consumption in gallons"
),
item(
NAME = "FuelCo2eFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "mass",
UNITS = "GM",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily Co2 equivalent greenhouse gass emissions"
),
item(
NAME = "ElecKwhFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "KWH/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily fuel consumption in gallons"
),
item(
NAME = "ElecCo2eFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "mass",
UNITS = "GM",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily Co2 equivalent greenhouse gas emissions by
consumption of electricity"
),
item(
NAME = "DailyParkingCostFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "USD/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily parking cost"
),
item(
NAME = "FutureCostPerMileFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "compound",
UNITS = "USD/MI",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Total fuel cost per mile"
),
# Vehicle variables
item(
NAME = "DvmtFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled"
),
item(
NAME = "EvDvmtFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by electric vehicles"
),
item(
NAME = "HcDvmtFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Average daily vehicle miles traveled by ICE vehicles"
),
item(
NAME = "MpKwhFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/KWH",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Power efficiency of electric vehicles"
),
item(
NAME = "PowertrainFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
NAVALUE = -1,
PROHIBIT = c("NA"),
ISELEMENTOF = c("Ice", "Hev", "Phev", "Ev"),
SIZE = 4,
DESCRIPTION = "Power train of vehicles"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for CalculateTravelDemandFuture module
#'
#' A list containing specifications for the CalculateTravelDemandFuture module.
#'
#' @format A list containing 3 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source CalculateTravelDemandFuture.R script.
"CalculateTravelDemandFutureSpecifications"
visioneval::savePackageDataset(CalculateTravelDemandFutureSpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
# This function calculates various attributes of daily travel for the
# households and the vehicles.
#Main module function calculates various attributes of travel demand
#------------------------------------------------------------
#' Calculate various attributes of travel demands for each household
#' and vehicle using future data
#'
#' \code{CalculateTravelDemandFuture} calculate various attributes of travel
#' demands for each household and vehicle using future data
#'
#' This function calculates dvmt by placetypes, households, and vehicles.
#' It also calculates fuel gallons consumed, total fuel cost, and Co2 equivalent
#' gas emission for each household using future data.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name CalculateTravelDemandFuture
#' @import visioneval
#' @export
CalculateTravelDemandFuture <- function(L) {
#Set up
#------
# Function to rename variables to be consistent with Get specfications
# of CalculateTravelDemand.
# Function to add suffix 'Future' at the end of all the variable names
AddSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(!identical(names(noList),character(0))){
names(noList) <- paste0(names(noList),suffix)
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], AddSuffixFuture, suffix=suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Function to remove suffix 'Future' from all the variable names
RemoveSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(length(noList)>0){
names(noList) <- gsub(suffix,"",names(noList))
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], RemoveSuffixFuture, suffix=suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Modify the input data set
L <- RemoveSuffixFuture(L)
#Return the results
#------------------
# Call the CalculateTravelDemand function with the new dataset
Out_ls <- CalculateTravelDemand(L)
# Add 'Future' suffix to all the variables
Out_ls <- AddSuffixFuture(Out_ls)
#Return the outputs list
return(Out_ls)
}
#================================
#Code to aid development and test
#================================
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateTravelDemandFuture",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
# R <- CalculateTravelDemandFuture(L)
#Test code to check everything including running the module and checking whether
#the outputs are consistent with the 'Set' specifications
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateTravelDemandFuture",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = TRUE
# )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.subcatchments}
\alias{assign_parameters.subcatchments}
\title{conversion helper}
\usage{
\method{assign_parameters}{subcatchments}(
x,
infiltration = NULL,
subcatchment = NULL,
subcatchment_typologies,
conduit_material = NULL,
junction_parameters = NULL
)
}
\description{
conversion helper
}
\keyword{internal}
| /man/assign_parameters.subcatchments.Rd | no_license | dleutnant/swmmr | R | false | true | 445 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assign_parameters.R
\name{assign_parameters.subcatchments}
\alias{assign_parameters.subcatchments}
\title{conversion helper}
\usage{
\method{assign_parameters}{subcatchments}(
x,
infiltration = NULL,
subcatchment = NULL,
subcatchment_typologies,
conduit_material = NULL,
junction_parameters = NULL
)
}
\description{
conversion helper
}
\keyword{internal}
|
#Setup
rm(list=ls())
if (Sys.info()["sysname"] == "Linux") {
j_root <- "/home/j/"
h_root <- "/homes/USERNAME/"
} else {
j_root <- "J:"
h_root <- "H:"
}
##Packages
library(data.table)
library(openxlsx)
##Set objects for each age split
region_pattern <- T ##if true will use regional pattern for Dismod, if false uses US age pattern
location_pattern_id <- 102 ##location id of the country whose pattern you want to use if not using region pattern
bundle_id <- 157
request_num <- "60776"
acause <- "mental_drug_amphet"
gbd_id <- 1978 ##put me_id that you are age_splitting for
output_file <- "results_age_split_5thJune" ##name your output file
age_table_code <- paste0(j_root, "FILEPATH\age_table.R") ##wherever you are keeping the age_table code
note_variable <- "note_modeler"
##Other objects
blank_vars <- c("lower", "upper", "effective_sample_size", "uncertainty_type", "uncertainty_type_value", "seq", "standard_error", "cases")
draws <- paste0("draw_", 0:999)
age <- c(2:20, 30:32, 235) ##epi ages
##Set directories
central_function <- paste0(j_root, "FILEPATH")
uploads <- paste0(j_root, "FILEPATH")
downloads <- paste0(j_root, "FILEPATH")
##Get central functions
source(paste0(central_function, "get_draws.R"))
source(paste0(central_function, "get_population.R"))
source(paste0(central_function, "get_location_metadata.R"))
source(age_table_code)
##Load data
all_age <- as.data.table(read.xlsx(paste0(downloads, "request_", request_num, ".xlsx"), sheet = 1)) ##get all age data
epi_order <- names(all_age)
##make sure all necessary columns exist
vars <- names(all_age)
diff <- setdiff(epi_order, vars)
if (!length(diff) == 0) {
all_age[, (diff) := ""]
}
##Get and format data points to split
all_age <- all_age[measure %in% c("prevalence", "incidence"),]
all_age <- all_age[!group_review==0 | is.na(group_review),] ##don't age split excluded data
all_age <- all_age[is_outlier==0,] ##don't age split outliered data
all_age <- all_age[(age_end-age_start)>20,]
all_age <- all_age[!mean ==0, ] ##don't split points with zero prevalence
all_age[, sex_id := sex]
all_age[sex_id=="Both", sex_id :=3]
all_age[sex_id=="Female", sex_id := 2]
all_age[sex_id=="Male", sex_id :=1]
all_age[, sex_id := as.integer(sex_id)]
all_age[measure == "prevalence", measure_id := 5]
all_age[measure == "incidence", measure_id := 6]
all_age[, year_id := year_start]
##Calculate cases and sample size if missing
all_age[measure == "prevalence" & is.na(sample_size) & !is.na(effective_sample_size), sample_size := effective_sample_size]
all_age[measure == "prevalence" & is.na(sample_size) & is.na(effective_sample_size), sample_size := mean(1-mean)/standard_error^2]
all_age[measure == "incidence" & is.na(sample_size) & !is.na(effective_sample_size), sample_size := effective_sample_size]
all_age[measure == "incidence" & is.na(sample_size) & is.na(effective_sample_size), sample_size := mean/standard_error^2]
all_age[, cases := sample_size * mean] #Use "weighted cases"
all_age <- all_age[!cases==0,] ##don't want to split points with zero cases
## Round age groups to the nearest 5-y boundary
all_age_round <- copy(all_age)
all_age_round <- all_age[, age_start := age_start - age_start %%5]
all_age_round[, age_end := age_end - age_end %%5 + 4]
all_age_round <- all_age_round[age_end > 99, age_end := 99]
## Expand for age
all_age_round[, n.age:=(age_end+1 - age_start)/5]
all_age_round[, age_start_floor:=age_start]
all_age_round[, drop := cases/n.age] ##drop the data points if cases/n.age is less than 1
all_age_round <- all_age_round[!drop<1,]
all_age_parents <- copy(all_age_round) ##keep copy of parents to attach on later
expanded <- rep(all_age_round$seq, all_age_round$n.age) %>% data.table("seq" = .)
split <- merge(expanded, all_age_round, by="seq", all=T)
split[,age.rep:= 1:.N - 1, by =.(seq)]
split[,age_start:= age_start+age.rep*5]
split[, age_end := age_start + 4 ]
##Get super region information and merge on
regions <- get_location_metadata(location_set_id = 22)
regions <- regions[, .(location_id, region_id)]
split <- merge(split, regions, by = "location_id")
regions <- unique(split$region_id) ##get super regions for dismod results
## get age group ids
all_age_total <- merge(split, ages, by = c("age_start", "age_end"), all.x = T)
## create age_group_id == 1 for 0-4 age group
all_age_total[age_start == 0 & age_end == 4, age_group_id := 1]
all_age_total <- all_age_total[age_group_id %in% age | age_group_id ==1] ##don't keep where age group id isn't estimated for cause
##get locations and years for population info later
pop_locs <- unique(all_age_total$location_id)
pop_years <- unique(all_age_total$year_id)
######GET AND FORMAT AGE PATTERN DATA###############################################################
if (region_pattern == T) {
locations <- regions
} else {
locations <- location_pattern_id
}
age_pattern <- as.data.table(get_draws(gbd_id_field = "modelable_entity_id", gbd_id = gbd_id,
measure_ids = c(5, 6), location_ids = locations, source = "epi",
status = "best", sex_ids = c(1,2),
age_group_ids = age, year_ids = 2016)) ##imposing age pattern
us_population <- as.data.table(get_population(location_id = locations, year_id = 2016, sex_id = c(1, 2),
age_group_id = age))
us_population <- us_population[, .(age_group_id, sex_id, population, location_id)]
age_pattern[, se_dismod := apply(.SD, 1, sd), .SDcols = draws]
age_pattern[, rate_dis := rowMeans(.SD), .SDcols = draws]
age_pattern[, (draws) := NULL]
age_pattern <- age_pattern[ ,.(sex_id, measure_id, age_group_id, location_id, se_dismod, rate_dis)]
##Create age group id 1 (collapse all age groups by summing population weighted rates)
age_1 <- copy(age_pattern)
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5), ]
se <- copy(age_1)
se <- se[age_group_id==5, .(measure_id, sex_id, se_dismod, location_id)]
age_1 <- merge(age_1, us_population, by = c("age_group_id", "sex_id", "location_id"))
age_1[, total_pop := sum(population), by = c("sex_id", "measure_id", "location_id")]
age_1[, frac_pop := population / total_pop]
age_1[, weight_rate := rate_dis * frac_pop]
age_1[, rate_dis := sum(weight_rate), by = c("sex_id", "measure_id", "location_id")]
age_1 <- unique(age_1, by = c("sex_id", "measure_id", "location_id"))
age_1 <- age_1[, .(age_group_id, sex_id, measure_id, location_id, rate_dis)]
age_1 <- merge(age_1, se, by = c("sex_id", "measure_id", "location_id"))
age_1[, age_group_id := 1]
age_pattern <- age_pattern[!age_group_id %in% c(2,3,4,5)]
age_pattern <- rbind(age_pattern, age_1)
##Get cases and sample size
age_pattern[measure_id == 5, sample_size_us := rate_dis * (1-rate_dis)/se_dismod^2]
age_pattern[measure_id == 6, sample_size_us := rate_dis/se_dismod^2]
age_pattern[, cases_us := sample_size_us * rate_dis]
age_pattern[is.nan(sample_size_us), sample_size_us := 0] ##if all draws are 0 can't calculate cases and sample size b/c se = 0, but should both be 0
age_pattern[is.nan(cases_us), cases_us := 0]
##Get sex_id 3
sex_3 <- copy(age_pattern)
sex_3[, cases_us := sum(cases_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, sample_size_us := sum(sample_size_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, rate_dis := cases_us/sample_size_us]
sex_3[measure_id == 5, se_dismod := sqrt(rate_dis*(1-rate_dis)/sample_size_us)] ##back calculate cases and sample size
sex_3[measure_id == 6, se_dismod := sqrt(cases_us)/sample_size_us]
sex_3[is.nan(rate_dis), rate_dis := 0] ##if sample_size is 0 can't calculate rate and standard error, but should both be 0
sex_3[is.nan(se_dismod), se_dismod := 0]
sex_3 <- unique(sex_3, by = c("age_group_id", "measure_id", "location_id"))
sex_3[, sex_id := 3]
age_pattern <- rbind(age_pattern, sex_3)
age_pattern[, region_id := location_id]
age_pattern <- age_pattern[ ,.(age_group_id, sex_id, measure_id, cases_us, sample_size_us, rate_dis, se_dismod, region_id)]
######################################################################################################
##merge on age pattern info
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
all_age_total <- merge(all_age_total, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "region_id"))
} else {
age_pattern1 <- copy(age_pattern)
all_age_total <- merge(all_age_total, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
##get population info and merge on
populations <- as.data.table(get_population(location_id = pop_locs, year_id = pop_years,
sex_id = c(1, 2, 3), age_group_id = age))
populations[, process_version_map_id := NULL]
age_1 <- copy(populations) ##create age group id 1 by collapsing lower age groups
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5)]
age_1[, population := sum(population), by = c("location_id", "year_id", "sex_id")]
age_1 <- unique(age_1, by = c("location_id", "year_id", "sex_id"))
age_1[, age_group_id := 1]
populations <- populations[!age_group_id %in% c(2, 3, 4, 5)]
populations <- rbind(populations, age_1) ##add age group id 1 back on
all_age_total <- merge(all_age_total, populations, by = c("location_id", "sex_id", "year_id", "age_group_id"))
#####CALCULATE AGE SPLIT POINTS#######################################################################
##Create new split data points
all_age_total[, total_pop := sum(population), by = "seq"]
all_age_total[, sample_size := (population / total_pop) * sample_size]
all_age_total[, cases_dis := sample_size * rate_dis]
all_age_total[, total_cases_dis := sum(cases_dis), by = "seq"]
all_age_total[, total_sample_size := sum(sample_size), by = "seq"]
all_age_total[, all_age_rate := total_cases_dis/total_sample_size]
all_age_total[, ratio := mean / all_age_rate]
all_age_total[, mean := ratio * rate_dis]
######################################################################################################
##Epi uploader formatting
all_age_total <- all_age_total[, (blank_vars) := ""] ##these columns need to be blank
all_age_total[!is.na(specificity), specificity := paste0(specificity, ", age-split child")]
all_age_total[is.na(specificity), specificity := paste0(specificity, ",age-split child")]
all_age_total[, group_review := 1]
all_age_total <- all_age_total[,c(epi_order), with=F]
setcolorder(all_age_total, epi_order)
##Add to originals with group review 0
all_age_parents <- all_age_parents[,c(epi_order), with=F]
setcolorder(all_age_parents, epi_order)
invisible(all_age_parents[, group_review := 0])
invisible(all_age_parents[!is.na(specificity), specificity := paste0(specificity, ", age-split parent")])
invisible(all_age_parents[is.na(specificity), specificity := paste0(specificity, "age-split parent")])
total <- rbind(all_age_parents, all_age_total)
setnames(total, note_variable, "note_modeler_info")
if (region_pattern ==T) {
total[group_review==1, note_modeler_info := paste0(note_modeler_info, "| Age split using the region age pattern.")]
total[group_review==0, note_modeler_info := paste0(note_modeler_info, "| GR 0. Age split using the region age pattern in separate rows.")]
} else {
total[, note_modeler := paste0(note_modeler, "| age split using the age pattern from location id ", location_pattern_id)]
}
total[, specificity := gsub("NA", "", specificity)]
total[, note_modeler_info := gsub("NA", "", note_modeler_info)]
setnames(total, "note_modeler_info", note_variable)
total[group_review==0, input_type := "group_review"]
total[is.na(group), group := nid]
total[, unit_value_as_published := 1]
write.xlsx(total, paste0("FILEPATH", ".xlsx"), sheetName = "extraction")
| /nonfatal_code/mental_unipolar_mdd/age_sex_splitting/age_split_byregion_cleaned.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 11,719 | r |
#Setup
rm(list=ls())
if (Sys.info()["sysname"] == "Linux") {
j_root <- "/home/j/"
h_root <- "/homes/USERNAME/"
} else {
j_root <- "J:"
h_root <- "H:"
}
##Packages
library(data.table)
library(openxlsx)
##Set objects for each age split
region_pattern <- T ##if true will use regional pattern for Dismod, if false uses US age pattern
location_pattern_id <- 102 ##location id of the country whose pattern you want to use if not using region pattern
bundle_id <- 157
request_num <- "60776"
acause <- "mental_drug_amphet"
gbd_id <- 1978 ##put me_id that you are age_splitting for
output_file <- "results_age_split_5thJune" ##name your output file
age_table_code <- paste0(j_root, "FILEPATH\age_table.R") ##wherever you are keeping the age_table code
note_variable <- "note_modeler"
##Other objects
blank_vars <- c("lower", "upper", "effective_sample_size", "uncertainty_type", "uncertainty_type_value", "seq", "standard_error", "cases")
draws <- paste0("draw_", 0:999)
age <- c(2:20, 30:32, 235) ##epi ages
##Set directories
central_function <- paste0(j_root, "FILEPATH")
uploads <- paste0(j_root, "FILEPATH")
downloads <- paste0(j_root, "FILEPATH")
##Get central functions
source(paste0(central_function, "get_draws.R"))
source(paste0(central_function, "get_population.R"))
source(paste0(central_function, "get_location_metadata.R"))
source(age_table_code)
##Load data
all_age <- as.data.table(read.xlsx(paste0(downloads, "request_", request_num, ".xlsx"), sheet = 1)) ##get all age data
epi_order <- names(all_age)
##make sure all necessary columns exist
vars <- names(all_age)
diff <- setdiff(epi_order, vars)
if (!length(diff) == 0) {
all_age[, (diff) := ""]
}
##Get and format data points to split
all_age <- all_age[measure %in% c("prevalence", "incidence"),]
all_age <- all_age[!group_review==0 | is.na(group_review),] ##don't age split excluded data
all_age <- all_age[is_outlier==0,] ##don't age split outliered data
all_age <- all_age[(age_end-age_start)>20,]
all_age <- all_age[!mean ==0, ] ##don't split points with zero prevalence
all_age[, sex_id := sex]
all_age[sex_id=="Both", sex_id :=3]
all_age[sex_id=="Female", sex_id := 2]
all_age[sex_id=="Male", sex_id :=1]
all_age[, sex_id := as.integer(sex_id)]
all_age[measure == "prevalence", measure_id := 5]
all_age[measure == "incidence", measure_id := 6]
all_age[, year_id := year_start]
##Calculate cases and sample size if missing
all_age[measure == "prevalence" & is.na(sample_size) & !is.na(effective_sample_size), sample_size := effective_sample_size]
all_age[measure == "prevalence" & is.na(sample_size) & is.na(effective_sample_size), sample_size := mean(1-mean)/standard_error^2]
all_age[measure == "incidence" & is.na(sample_size) & !is.na(effective_sample_size), sample_size := effective_sample_size]
all_age[measure == "incidence" & is.na(sample_size) & is.na(effective_sample_size), sample_size := mean/standard_error^2]
all_age[, cases := sample_size * mean] #Use "weighted cases"
all_age <- all_age[!cases==0,] ##don't want to split points with zero cases
## Round age groups to the nearest 5-y boundary
all_age_round <- copy(all_age)
all_age_round <- all_age[, age_start := age_start - age_start %%5]
all_age_round[, age_end := age_end - age_end %%5 + 4]
all_age_round <- all_age_round[age_end > 99, age_end := 99]
## Expand for age
all_age_round[, n.age:=(age_end+1 - age_start)/5]
all_age_round[, age_start_floor:=age_start]
all_age_round[, drop := cases/n.age] ##drop the data points if cases/n.age is less than 1
all_age_round <- all_age_round[!drop<1,]
all_age_parents <- copy(all_age_round) ##keep copy of parents to attach on later
expanded <- rep(all_age_round$seq, all_age_round$n.age) %>% data.table("seq" = .)
split <- merge(expanded, all_age_round, by="seq", all=T)
split[,age.rep:= 1:.N - 1, by =.(seq)]
split[,age_start:= age_start+age.rep*5]
split[, age_end := age_start + 4 ]
##Get super region information and merge on
regions <- get_location_metadata(location_set_id = 22)
regions <- regions[, .(location_id, region_id)]
split <- merge(split, regions, by = "location_id")
regions <- unique(split$region_id) ##get super regions for dismod results
## get age group ids
all_age_total <- merge(split, ages, by = c("age_start", "age_end"), all.x = T)
## create age_group_id == 1 for 0-4 age group
all_age_total[age_start == 0 & age_end == 4, age_group_id := 1]
all_age_total <- all_age_total[age_group_id %in% age | age_group_id ==1] ##don't keep where age group id isn't estimated for cause
##get locations and years for population info later
pop_locs <- unique(all_age_total$location_id)
pop_years <- unique(all_age_total$year_id)
######GET AND FORMAT AGE PATTERN DATA###############################################################
if (region_pattern == T) {
locations <- regions
} else {
locations <- location_pattern_id
}
age_pattern <- as.data.table(get_draws(gbd_id_field = "modelable_entity_id", gbd_id = gbd_id,
measure_ids = c(5, 6), location_ids = locations, source = "epi",
status = "best", sex_ids = c(1,2),
age_group_ids = age, year_ids = 2016)) ##imposing age pattern
us_population <- as.data.table(get_population(location_id = locations, year_id = 2016, sex_id = c(1, 2),
age_group_id = age))
us_population <- us_population[, .(age_group_id, sex_id, population, location_id)]
age_pattern[, se_dismod := apply(.SD, 1, sd), .SDcols = draws]
age_pattern[, rate_dis := rowMeans(.SD), .SDcols = draws]
age_pattern[, (draws) := NULL]
age_pattern <- age_pattern[ ,.(sex_id, measure_id, age_group_id, location_id, se_dismod, rate_dis)]
##Create age group id 1 (collapse all age groups by summing population weighted rates)
age_1 <- copy(age_pattern)
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5), ]
se <- copy(age_1)
se <- se[age_group_id==5, .(measure_id, sex_id, se_dismod, location_id)]
age_1 <- merge(age_1, us_population, by = c("age_group_id", "sex_id", "location_id"))
age_1[, total_pop := sum(population), by = c("sex_id", "measure_id", "location_id")]
age_1[, frac_pop := population / total_pop]
age_1[, weight_rate := rate_dis * frac_pop]
age_1[, rate_dis := sum(weight_rate), by = c("sex_id", "measure_id", "location_id")]
age_1 <- unique(age_1, by = c("sex_id", "measure_id", "location_id"))
age_1 <- age_1[, .(age_group_id, sex_id, measure_id, location_id, rate_dis)]
age_1 <- merge(age_1, se, by = c("sex_id", "measure_id", "location_id"))
age_1[, age_group_id := 1]
age_pattern <- age_pattern[!age_group_id %in% c(2,3,4,5)]
age_pattern <- rbind(age_pattern, age_1)
##Get cases and sample size
age_pattern[measure_id == 5, sample_size_us := rate_dis * (1-rate_dis)/se_dismod^2]
age_pattern[measure_id == 6, sample_size_us := rate_dis/se_dismod^2]
age_pattern[, cases_us := sample_size_us * rate_dis]
age_pattern[is.nan(sample_size_us), sample_size_us := 0] ##if all draws are 0 can't calculate cases and sample size b/c se = 0, but should both be 0
age_pattern[is.nan(cases_us), cases_us := 0]
##Get sex_id 3
sex_3 <- copy(age_pattern)
sex_3[, cases_us := sum(cases_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, sample_size_us := sum(sample_size_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, rate_dis := cases_us/sample_size_us]
sex_3[measure_id == 5, se_dismod := sqrt(rate_dis*(1-rate_dis)/sample_size_us)] ##back calculate cases and sample size
sex_3[measure_id == 6, se_dismod := sqrt(cases_us)/sample_size_us]
sex_3[is.nan(rate_dis), rate_dis := 0] ##if sample_size is 0 can't calculate rate and standard error, but should both be 0
sex_3[is.nan(se_dismod), se_dismod := 0]
sex_3 <- unique(sex_3, by = c("age_group_id", "measure_id", "location_id"))
sex_3[, sex_id := 3]
age_pattern <- rbind(age_pattern, sex_3)
age_pattern[, region_id := location_id]
age_pattern <- age_pattern[ ,.(age_group_id, sex_id, measure_id, cases_us, sample_size_us, rate_dis, se_dismod, region_id)]
######################################################################################################
##merge on age pattern info
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
all_age_total <- merge(all_age_total, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "region_id"))
} else {
age_pattern1 <- copy(age_pattern)
all_age_total <- merge(all_age_total, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
##get population info and merge on
populations <- as.data.table(get_population(location_id = pop_locs, year_id = pop_years,
sex_id = c(1, 2, 3), age_group_id = age))
populations[, process_version_map_id := NULL]
age_1 <- copy(populations) ##create age group id 1 by collapsing lower age groups
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5)]
age_1[, population := sum(population), by = c("location_id", "year_id", "sex_id")]
age_1 <- unique(age_1, by = c("location_id", "year_id", "sex_id"))
age_1[, age_group_id := 1]
populations <- populations[!age_group_id %in% c(2, 3, 4, 5)]
populations <- rbind(populations, age_1) ##add age group id 1 back on
all_age_total <- merge(all_age_total, populations, by = c("location_id", "sex_id", "year_id", "age_group_id"))
#####CALCULATE AGE SPLIT POINTS#######################################################################
##Create new split data points
all_age_total[, total_pop := sum(population), by = "seq"]
all_age_total[, sample_size := (population / total_pop) * sample_size]
all_age_total[, cases_dis := sample_size * rate_dis]
all_age_total[, total_cases_dis := sum(cases_dis), by = "seq"]
all_age_total[, total_sample_size := sum(sample_size), by = "seq"]
all_age_total[, all_age_rate := total_cases_dis/total_sample_size]
all_age_total[, ratio := mean / all_age_rate]
all_age_total[, mean := ratio * rate_dis]
######################################################################################################
##Epi uploader formatting
all_age_total <- all_age_total[, (blank_vars) := ""] ##these columns need to be blank
all_age_total[!is.na(specificity), specificity := paste0(specificity, ", age-split child")]
all_age_total[is.na(specificity), specificity := paste0(specificity, ",age-split child")]
all_age_total[, group_review := 1]
all_age_total <- all_age_total[,c(epi_order), with=F]
setcolorder(all_age_total, epi_order)
##Add to originals with group review 0
all_age_parents <- all_age_parents[,c(epi_order), with=F]
setcolorder(all_age_parents, epi_order)
invisible(all_age_parents[, group_review := 0])
invisible(all_age_parents[!is.na(specificity), specificity := paste0(specificity, ", age-split parent")])
invisible(all_age_parents[is.na(specificity), specificity := paste0(specificity, "age-split parent")])
total <- rbind(all_age_parents, all_age_total)
setnames(total, note_variable, "note_modeler_info")
if (region_pattern ==T) {
total[group_review==1, note_modeler_info := paste0(note_modeler_info, "| Age split using the region age pattern.")]
total[group_review==0, note_modeler_info := paste0(note_modeler_info, "| GR 0. Age split using the region age pattern in separate rows.")]
} else {
total[, note_modeler := paste0(note_modeler, "| age split using the age pattern from location id ", location_pattern_id)]
}
total[, specificity := gsub("NA", "", specificity)]
total[, note_modeler_info := gsub("NA", "", note_modeler_info)]
setnames(total, "note_modeler_info", note_variable)
total[group_review==0, input_type := "group_review"]
total[is.na(group), group := nid]
total[, unit_value_as_published := 1]
write.xlsx(total, paste0("FILEPATH", ".xlsx"), sheetName = "extraction")
|
# test KKT
context(strwrap("KKT checks"))
set.seed(1234)
draw <- gen_structured_model(n = 500,
p_design = 100,
p_kinship = 1e4,
geography = "1d",
percent_causal = 0.05,
percent_overlap = "100",
k = 5, s = 0.5, Fst = 0.1, nPC = 10,
b0 = 0, eta = 0.1, sigma2 = 1)
fit <- ggmix(x = draw[["xtrain"]],
y = draw[["ytrain"]],
kinship = draw[["kin_train"]],
estimation = "full", epsilon = 1e-7, verbose = 0)
test_that("Check predict and coef methods with multiple s values", {
# fit <- ggmix(x = admixed$xtrain, y = admixed$ytrain, kinship = admixed$kin_train,
# estimation = "full")
gicfit <- gic(fit)
coef_res <- coef(gicfit, type = "all")
et <- coef_res["eta",]
sigs <- coef_res["sigma2",]
bet <- coef_res[-which(rownames(coef_res) %in% c("eta","sigma2")),]
kkt <- kkt_check(eta = et, sigma2 = sigs, beta = bet,
eigenvalues = fit$ggmix_object$D, x = fit$ggmix_object$x,
y = fit$ggmix_object$y, nt = length(fit$ggmix_object$y),
lambda = gicfit$lambda.min, tol.kkt = 1e-2)
expect_true(all(abs(kkt)[-1] < 0.02))
})
fit <- ggmix(x = draw[["xtrain"]],
y = draw[["ytrain"]],
kinship = draw[["kin_train"]],
estimation = "full", epsilon = 1e-10,
verbose = 0, lambda = c(0.22150, 0.025, 0.01, 0.33))
test_that("Check predict and coef methods with multiple s values and user defined lambda", {
# fit <- ggmix(x = admixed$xtrain, y = admixed$ytrain, kinship = admixed$kin_train,
# estimation = "full")
gicfit <- gic(fit)
coef_res <- coef(gicfit, type = "all")
et <- coef_res["eta",]
sigs <- coef_res["sigma2",]
bet <- coef_res[-which(rownames(coef_res) %in% c("eta","sigma2")),]
kkt <- kkt_check(eta = et, sigma2 = sigs, beta = bet,
eigenvalues = fit$ggmix_object$D, x = fit$ggmix_object$x,
y = fit$ggmix_object$y, nt = length(fit$ggmix_object$y),
lambda = gicfit$lambda.min, tol.kkt = 1e-2)
expect_true(all(abs(kkt)[-1] < 0.02))
})
| /tests/testthat/test-KKT.R | permissive | areisett/ggmix | R | false | false | 2,303 | r | # test KKT
context(strwrap("KKT checks"))
set.seed(1234)
draw <- gen_structured_model(n = 500,
p_design = 100,
p_kinship = 1e4,
geography = "1d",
percent_causal = 0.05,
percent_overlap = "100",
k = 5, s = 0.5, Fst = 0.1, nPC = 10,
b0 = 0, eta = 0.1, sigma2 = 1)
fit <- ggmix(x = draw[["xtrain"]],
y = draw[["ytrain"]],
kinship = draw[["kin_train"]],
estimation = "full", epsilon = 1e-7, verbose = 0)
test_that("Check predict and coef methods with multiple s values", {
# fit <- ggmix(x = admixed$xtrain, y = admixed$ytrain, kinship = admixed$kin_train,
# estimation = "full")
gicfit <- gic(fit)
coef_res <- coef(gicfit, type = "all")
et <- coef_res["eta",]
sigs <- coef_res["sigma2",]
bet <- coef_res[-which(rownames(coef_res) %in% c("eta","sigma2")),]
kkt <- kkt_check(eta = et, sigma2 = sigs, beta = bet,
eigenvalues = fit$ggmix_object$D, x = fit$ggmix_object$x,
y = fit$ggmix_object$y, nt = length(fit$ggmix_object$y),
lambda = gicfit$lambda.min, tol.kkt = 1e-2)
expect_true(all(abs(kkt)[-1] < 0.02))
})
fit <- ggmix(x = draw[["xtrain"]],
y = draw[["ytrain"]],
kinship = draw[["kin_train"]],
estimation = "full", epsilon = 1e-10,
verbose = 0, lambda = c(0.22150, 0.025, 0.01, 0.33))
test_that("Check predict and coef methods with multiple s values and user defined lambda", {
# fit <- ggmix(x = admixed$xtrain, y = admixed$ytrain, kinship = admixed$kin_train,
# estimation = "full")
gicfit <- gic(fit)
coef_res <- coef(gicfit, type = "all")
et <- coef_res["eta",]
sigs <- coef_res["sigma2",]
bet <- coef_res[-which(rownames(coef_res) %in% c("eta","sigma2")),]
kkt <- kkt_check(eta = et, sigma2 = sigs, beta = bet,
eigenvalues = fit$ggmix_object$D, x = fit$ggmix_object$x,
y = fit$ggmix_object$y, nt = length(fit$ggmix_object$y),
lambda = gicfit$lambda.min, tol.kkt = 1e-2)
expect_true(all(abs(kkt)[-1] < 0.02))
})
|
library(smerc)
## Adjacency matrix
## making an adjacency matrix
library(igraph)
ex_admatrix <- matrix(rep(c(0),36*36),nrow = 36,ncol = 36)
set1 <- c(1,2:5,7,13,19,25,8:11,14:17,20:23,26:29) # set take i+1 and i+6
set2 <- c(31,32:35) # set take i+1
set3 <- c(6,12,18,24,30) # set take i+6
set4 <- c(19,21,31,33)
for ( i in set1)
{
ex_admatrix[i,i+1] <- 1
ex_admatrix[i,i+6] <- 1
for ( i in set2)
{
ex_admatrix[i,i+1] <- 1
for ( i in set3)
{
ex_admatrix[i,i+6] <- 1
}
}}
for ( i in 1:36){ ex_admatrix[,i] <- ex_admatrix[i,]}
for ( i in set4) # add 4 more neighbors to 26
{
ex_admatrix[26,i] <- 1
ex_admatrix[i,26] <- 1
}
## generate population
#set.seed(123)
# ex_pop <- rpois(36,1000000) generate the population, then kept it as the same as every time we run the program.
ex_pop_kept <- ex_pop
## generate observed cases for each regions
ex_cases_1a <- c()
region <- c(1:36)
cluster_1a <- c(5,10,11,17)
for ( i in cluster_1a) { ex_cases_1a[i] <- rpois(1,0.005*ex_pop_kept[i])}
cluster_2a <- c(21,19,26,31,33) ## change 21 to 32
for ( i in cluster_2a) { ex_cases_1a[i] <- rpois(1,0.005*ex_pop_kept[i])}
regions_0.2a <- c(4,6,12,16,24,25,23,24,27,15,14)
for ( i in regions_0.2a) { ex_cases_1a[i] <- rpois(1,0.002*ex_pop_kept[i])}
regions_0.1a <- c(3,2,1,7,8,9,13,18,20,34,35,36,30,32,28,29,22)
for ( j in regions_0.1a) { ex_cases_1a[j] <- rpois(1,0.001*ex_pop_kept[j])}
ex_matrix_1 <- data.frame(cbind(ex_cases_1a,ex_pop,region))
## tests for new example
## create distances between 36 regions using x-y coordinates ( look at nydf data
x <- rep((1:6),6) # x coordinate
y <- c() # y coordinate
for ( i in 1:6) {y <- append(y,rep((i),6))}
cen <- data.frame(cbind(x,y,region)) # region with its x-y coordinates
library(autoimage) ## get images of the density by colors
autoimage(1:6, 1:6, matrix(ex_matrix_1$ex_cases_1a, nrow = 6))
## Nearest neigbors
library(FNN)
# distance matrix
dis <- data.frame() ## distance matrix
for ( i in 1:36)
{
for ( j in 1:36)
{
dis[i,i] <- 0
dis[i,j] <- sqrt((cen[i,]$y - cen[j,]$y)^2 + (cen[i,]$x - cen[j,]$x)^2)
}
}
## Circular test
coor <- cbind(x,y) # x-y coordinate of each region, distance from 1 to 6.
test1_exa <- scan.test(coor,floor(ex_matrix_1$ex_cases_1a),
ex_matrix_1$ex_pop_kept, nsim = 99,
alpha = 0.01, lonlat = FALSE)
## ULS test
test2_exa = uls.test(coor,floor(ex_matrix_1$ex_cases_1a),
ex_matrix_1$ex_pop_kept, w = ex_admatrix,
alpha = 0.01, lonlat =FALSE,
nsim = 99, ubpop = 0.3)
## Flexible test
test3_exa = flex.test(coor,floor(ex_matrix_1$ex_cases_1a),
w = ex_admatrix, k = 9,
ex_matrix_1$ex_pop_kept, nsim = 99,
alpha = 0.01, lonlat = FALSE)
## making up 6x6 grid
## making a graph using igraph
g <- graph.adjacency(ex_admatrix,mode = "Undirected")
plot.igraph(g,vertex.color="yellow")
## clusters of test 1
cluster_test1a <- c()
for ( i in 1:length(test1_exa$clusters))
{
cluster_test1a <- append(cluster_test1a,test1_exa$clusters[[i]]$locids)
}
cluster_test2a <- c()
for ( i in 1:length(test2_exa$clusters))
{
cluster_test2a <-append(cluster_test2a, test2_exa$clusters[[i]]$locids)
}
cluster_test3a <- c()
for ( i in 1:length(test3_exa$clusters))
{
cluster_test3a <-append(cluster_test3a, test3_exa$clusters[[i]]$locids)
}
plot.igraph(g,vertex.color="yellow")
## plot clusters
c1a <- cluster_test1a
for ( i in c1)
{V(g)[i]$color <- "red"}
plot(g)
# plot not in igraph
## Circular Method
plot(y ~ x, type = "n", data = cen,main="A Demo of The Circular Method")
textcol_demo = rep("black",36)
# for each radius scanned, the window includes all the regions within that radius
# with radius of 1, the window includes 11,12,17,5
textcol_demo[11] = "green" # radius = 1
textcol_demo[12] = "blue"
textcol_demo[17] = "blue"
textcol_demo[5] = "blue"
# for radius of 1.41, the window gains 10,16,18
textcol_demo[10] = "blue"# radius = 1.41
textcol_demo[16] = "red"
textcol_demo[18] = "red"
textcol_demo[4] = "red"
textcol_demo[6] = "red"
text(y ~ x, lab = 1:36, data = cen, col = textcol_demo)
plot(y ~ x, type = "n", data = cen,main="Example 2: Clusters of The Circular Method")
textcol = rep("black", 36)
textcol[cluster_test1a] = "blue"
text(y ~ x, lab = 1:36, data = cen, col = textcol)
## new plots from French
library(sp)
library(spdep)
library(autoimage)
library(data.table)
library(maptools)
library(fields)
# data.frame containging all grid locations IN TERMS OF POSITION
g = expand.grid(x = 1:6, y = 1:6)
# a function to create a polygon for each grid point
create_polys = function(x){
xlocs = x[1] + c(-0.5, -0.5, 0.5, 0.5, -0.5)
ylocs = x[2] + c(-0.5, 0.5, 0.5, -0.5, -0.5)
data.frame(x = c(xlocs, NA), y = c(ylocs, NA))
}
# create polygon for grid locations
polys = apply(g, 1, create_polys)
polys_df = data.table::rbindlist(polys)
polys_list = list(x = polys_df$x, y = polys_df$y) # convert data frame to list format
# number of regions
nloc = length(polys)
# color certain polygons
sppoly <- map2SpatialPolygons(polys_list, IDs = seq_len(nloc))
# create SpatialPolygonsDataFrame
# replace rpois(36, 1000000) with the populations you generated previously
polydf = SpatialPolygonsDataFrame(sppoly, data = data.frame(pop = ex_pop_kept))
plot(polydf)
# plot polygons
plot(sppoly)
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_test1a] = "blue"
#mycol[c(5, 6, 10, 11, 12, 17)] = "orange"
plot(sppoly, col = mycol, main="Clusters of The Circular Scanning Test")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
## ULS Method
g <- sum(ex_cases_1a)/sum(ex_pop)
ratio_a <- c()
for ( i in 1:36)
{
ratio_a[i] <- ex_cases_1a[i]/ex_pop_kept[i]
}
ratio_a_list <- c()
ratio_a_list <- append(ratio_a_list, which(ratio_a >= g))
plot(y ~ x, type = "n", data = cen,main="A Demo of The ULS Method")
textcol1_demo = rep("black", 36)
textcol1_demo[ratio_a_list] = "orange"
text(y ~ x, lab = 1:36, data = cen, col = textcol1_demo)
plot(y ~ x, type = "n", data = cen,main="Example 2: Clusters of The ULS Method")
textcol1 = rep("black", 36)
textcol1[cluster_test2a] = "red"
text(y ~ x, lab = 1:36, data = cen, col = textcol1)
# new plots from French
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_test2a] = "yellow"
plot(sppoly, col = mycol, main="Clusters of The ULS Scanning Test")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
## Flexible Method
## For each time, the window pick one region that is the nearest to the original centroid
plot(y ~ x, type = "n", data = cen,main="A Demo of The Flexible Method,k=7")
textcol2_demo = rep("black", 36)
textcol2_demo[11] = "green"
textcol2_demo[12] = "blue"
textcol2_demo[17] = "blue"
textcol2_demo[5] = "blue"
textcol2_demo[10] = "blue"
textcol2_demo[16] = "red"
textcol2_demo[18] = "red"
# in this scenario, k = 7
text(y ~ x, lab = 1:36, data = cen, col = textcol2_demo)
## plot the clusters
plot(y ~ x, type = "n", data = cen,main="Example 2: Clusters of The Flexible Method,k=9")
textcol2 = rep("black", 36)
textcol2[cluster_test3a] = "green"
text(y ~ x, lab = 1:36, data = cen, col = textcol2)
## plot the true clusters
plot(y ~ x, type = "n", data = cen,main="Example 2: True Clusters")
textcol2 = rep("black", 36)
textcol2[clusters_1a] = "green"
textcol2[cluster_2a] = "green"
text(y ~ x, lab = 1:36, data = cen, col = textcol2)
# new plots from French
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_test3a] = "red"
plot(sppoly, col = mycol, main="Clusters of The Flexible Scanning Test, k =9")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
# true cluster plot
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_1a] = "green"
mycol[cluster_2a] = "green"
plot(sppoly, col = mycol, main="True Clusters")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
| /Ex_6x6_French_a.R | no_license | duong87vn/Master-Project | R | false | false | 8,300 | r | library(smerc)
## Adjacency matrix
## making an adjacency matrix
library(igraph)
ex_admatrix <- matrix(rep(c(0),36*36),nrow = 36,ncol = 36)
set1 <- c(1,2:5,7,13,19,25,8:11,14:17,20:23,26:29) # set take i+1 and i+6
set2 <- c(31,32:35) # set take i+1
set3 <- c(6,12,18,24,30) # set take i+6
set4 <- c(19,21,31,33)
for ( i in set1)
{
ex_admatrix[i,i+1] <- 1
ex_admatrix[i,i+6] <- 1
for ( i in set2)
{
ex_admatrix[i,i+1] <- 1
for ( i in set3)
{
ex_admatrix[i,i+6] <- 1
}
}}
for ( i in 1:36){ ex_admatrix[,i] <- ex_admatrix[i,]}
for ( i in set4) # add 4 more neighbors to 26
{
ex_admatrix[26,i] <- 1
ex_admatrix[i,26] <- 1
}
## generate population
#set.seed(123)
# ex_pop <- rpois(36,1000000) generate the population, then kept it as the same as every time we run the program.
ex_pop_kept <- ex_pop
## generate observed cases for each regions
ex_cases_1a <- c()
region <- c(1:36)
cluster_1a <- c(5,10,11,17)
for ( i in cluster_1a) { ex_cases_1a[i] <- rpois(1,0.005*ex_pop_kept[i])}
cluster_2a <- c(21,19,26,31,33) ## change 21 to 32
for ( i in cluster_2a) { ex_cases_1a[i] <- rpois(1,0.005*ex_pop_kept[i])}
regions_0.2a <- c(4,6,12,16,24,25,23,24,27,15,14)
for ( i in regions_0.2a) { ex_cases_1a[i] <- rpois(1,0.002*ex_pop_kept[i])}
regions_0.1a <- c(3,2,1,7,8,9,13,18,20,34,35,36,30,32,28,29,22)
for ( j in regions_0.1a) { ex_cases_1a[j] <- rpois(1,0.001*ex_pop_kept[j])}
ex_matrix_1 <- data.frame(cbind(ex_cases_1a,ex_pop,region))
## tests for new example
## create distances between 36 regions using x-y coordinates ( look at nydf data
x <- rep((1:6),6) # x coordinate
y <- c() # y coordinate
for ( i in 1:6) {y <- append(y,rep((i),6))}
cen <- data.frame(cbind(x,y,region)) # region with its x-y coordinates
library(autoimage) ## get images of the density by colors
autoimage(1:6, 1:6, matrix(ex_matrix_1$ex_cases_1a, nrow = 6))
## Nearest neigbors
library(FNN)
# distance matrix
dis <- data.frame() ## distance matrix
for ( i in 1:36)
{
for ( j in 1:36)
{
dis[i,i] <- 0
dis[i,j] <- sqrt((cen[i,]$y - cen[j,]$y)^2 + (cen[i,]$x - cen[j,]$x)^2)
}
}
## Circular test
coor <- cbind(x,y) # x-y coordinate of each region, distance from 1 to 6.
test1_exa <- scan.test(coor,floor(ex_matrix_1$ex_cases_1a),
ex_matrix_1$ex_pop_kept, nsim = 99,
alpha = 0.01, lonlat = FALSE)
## ULS test
test2_exa = uls.test(coor,floor(ex_matrix_1$ex_cases_1a),
ex_matrix_1$ex_pop_kept, w = ex_admatrix,
alpha = 0.01, lonlat =FALSE,
nsim = 99, ubpop = 0.3)
## Flexible test
test3_exa = flex.test(coor,floor(ex_matrix_1$ex_cases_1a),
w = ex_admatrix, k = 9,
ex_matrix_1$ex_pop_kept, nsim = 99,
alpha = 0.01, lonlat = FALSE)
## making up 6x6 grid
## making a graph using igraph
g <- graph.adjacency(ex_admatrix,mode = "Undirected")
plot.igraph(g,vertex.color="yellow")
## clusters of test 1
cluster_test1a <- c()
for ( i in 1:length(test1_exa$clusters))
{
cluster_test1a <- append(cluster_test1a,test1_exa$clusters[[i]]$locids)
}
cluster_test2a <- c()
for ( i in 1:length(test2_exa$clusters))
{
cluster_test2a <-append(cluster_test2a, test2_exa$clusters[[i]]$locids)
}
cluster_test3a <- c()
for ( i in 1:length(test3_exa$clusters))
{
cluster_test3a <-append(cluster_test3a, test3_exa$clusters[[i]]$locids)
}
plot.igraph(g,vertex.color="yellow")
## plot clusters
c1a <- cluster_test1a
for ( i in c1)
{V(g)[i]$color <- "red"}
plot(g)
# plot not in igraph
## Circular Method
plot(y ~ x, type = "n", data = cen,main="A Demo of The Circular Method")
textcol_demo = rep("black",36)
# for each radius scanned, the window includes all the regions within that radius
# with radius of 1, the window includes 11,12,17,5
textcol_demo[11] = "green" # radius = 1
textcol_demo[12] = "blue"
textcol_demo[17] = "blue"
textcol_demo[5] = "blue"
# for radius of 1.41, the window gains 10,16,18
textcol_demo[10] = "blue"# radius = 1.41
textcol_demo[16] = "red"
textcol_demo[18] = "red"
textcol_demo[4] = "red"
textcol_demo[6] = "red"
text(y ~ x, lab = 1:36, data = cen, col = textcol_demo)
plot(y ~ x, type = "n", data = cen,main="Example 2: Clusters of The Circular Method")
textcol = rep("black", 36)
textcol[cluster_test1a] = "blue"
text(y ~ x, lab = 1:36, data = cen, col = textcol)
## new plots from French
library(sp)
library(spdep)
library(autoimage)
library(data.table)
library(maptools)
library(fields)
# data.frame containging all grid locations IN TERMS OF POSITION
g = expand.grid(x = 1:6, y = 1:6)
# a function to create a polygon for each grid point
create_polys = function(x){
xlocs = x[1] + c(-0.5, -0.5, 0.5, 0.5, -0.5)
ylocs = x[2] + c(-0.5, 0.5, 0.5, -0.5, -0.5)
data.frame(x = c(xlocs, NA), y = c(ylocs, NA))
}
# create polygon for grid locations
polys = apply(g, 1, create_polys)
polys_df = data.table::rbindlist(polys)
polys_list = list(x = polys_df$x, y = polys_df$y) # convert data frame to list format
# number of regions
nloc = length(polys)
# color certain polygons
sppoly <- map2SpatialPolygons(polys_list, IDs = seq_len(nloc))
# create SpatialPolygonsDataFrame
# replace rpois(36, 1000000) with the populations you generated previously
polydf = SpatialPolygonsDataFrame(sppoly, data = data.frame(pop = ex_pop_kept))
plot(polydf)
# plot polygons
plot(sppoly)
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_test1a] = "blue"
#mycol[c(5, 6, 10, 11, 12, 17)] = "orange"
plot(sppoly, col = mycol, main="Clusters of The Circular Scanning Test")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
## ULS Method
g <- sum(ex_cases_1a)/sum(ex_pop)
ratio_a <- c()
for ( i in 1:36)
{
ratio_a[i] <- ex_cases_1a[i]/ex_pop_kept[i]
}
ratio_a_list <- c()
ratio_a_list <- append(ratio_a_list, which(ratio_a >= g))
plot(y ~ x, type = "n", data = cen,main="A Demo of The ULS Method")
textcol1_demo = rep("black", 36)
textcol1_demo[ratio_a_list] = "orange"
text(y ~ x, lab = 1:36, data = cen, col = textcol1_demo)
plot(y ~ x, type = "n", data = cen,main="Example 2: Clusters of The ULS Method")
textcol1 = rep("black", 36)
textcol1[cluster_test2a] = "red"
text(y ~ x, lab = 1:36, data = cen, col = textcol1)
# new plots from French
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_test2a] = "yellow"
plot(sppoly, col = mycol, main="Clusters of The ULS Scanning Test")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
## Flexible Method
## For each time, the window pick one region that is the nearest to the original centroid
plot(y ~ x, type = "n", data = cen,main="A Demo of The Flexible Method,k=7")
textcol2_demo = rep("black", 36)
textcol2_demo[11] = "green"
textcol2_demo[12] = "blue"
textcol2_demo[17] = "blue"
textcol2_demo[5] = "blue"
textcol2_demo[10] = "blue"
textcol2_demo[16] = "red"
textcol2_demo[18] = "red"
# in this scenario, k = 7
text(y ~ x, lab = 1:36, data = cen, col = textcol2_demo)
## plot the clusters
plot(y ~ x, type = "n", data = cen,main="Example 2: Clusters of The Flexible Method,k=9")
textcol2 = rep("black", 36)
textcol2[cluster_test3a] = "green"
text(y ~ x, lab = 1:36, data = cen, col = textcol2)
## plot the true clusters
plot(y ~ x, type = "n", data = cen,main="Example 2: True Clusters")
textcol2 = rep("black", 36)
textcol2[clusters_1a] = "green"
textcol2[cluster_2a] = "green"
text(y ~ x, lab = 1:36, data = cen, col = textcol2)
# new plots from French
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_test3a] = "red"
plot(sppoly, col = mycol, main="Clusters of The Flexible Scanning Test, k =9")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
# true cluster plot
# color certain polygons
mycol = rep("white", nloc)
# change color of true clusters to a different color
mycol[cluster_1a] = "green"
mycol[cluster_2a] = "green"
plot(sppoly, col = mycol, main="True Clusters")
text(coordinates(sppoly), labels=row.names(sppoly)) # name polygons
|
GlMnet_Optimize <- function(FeatMat,ObsVec){
Alpha_Vec <- seq(0,1,0.25)
MCCmedian_Vec <- c()
for(alpha in Alpha_Vec){
MCC_Vec <- c()
for(BSiter in 1:10){
FoldNum <- 3
folds <- createFolds(y=ObsVec, k = FoldNum, list = TRUE, returnTrain = TRUE)
Folditer <- 1
InputInd <- folds[[Folditer]]
ObservVec <- ObsVec[-InputInd]
TrainFeat <- matrix(FeatMat[InputInd,], ncol = ncol(FeatMat))
TrainObs <- ObsVec[InputInd]
TestFeat <- matrix(FeatMat[-InputInd,], ncol = ncol(FeatMat))
GlMnet_Model <- cv.glmnet(x = TrainFeat, y = TrainObs,
alpha=alpha, nfolds = 5,family = "binomial")
PredVec <- predict(GlMnet_Model, TestFeat, type="class",s="lambda.min")
if(length(unique(PredVec)) < length(unique(ObservVec))){
MCC_Vec <- c(MCC_Vec, NA)
}else{
MCC_Vec <- c(MCC_Vec, mcc(factor(PredVec), factor(ObservVec), nperm = 2)$estimate)
}
}
MCCmedian_Vec <- c(MCCmedian_Vec, median(na.omit(MCC_Vec)))
}
print(MCCmedian_Vec)
Opt_alpha <- Alpha_Vec[which(MCCmedian_Vec == max(na.omit(MCCmedian_Vec)))[
sample.int(length(which(MCCmedian_Vec == max(na.omit(MCCmedian_Vec)))),1)]]
#########
return(Opt_alpha)
}
| /R/GLMnet_Optimize.R | no_license | bhklab/MODELS | R | false | false | 1,277 | r | GlMnet_Optimize <- function(FeatMat,ObsVec){
Alpha_Vec <- seq(0,1,0.25)
MCCmedian_Vec <- c()
for(alpha in Alpha_Vec){
MCC_Vec <- c()
for(BSiter in 1:10){
FoldNum <- 3
folds <- createFolds(y=ObsVec, k = FoldNum, list = TRUE, returnTrain = TRUE)
Folditer <- 1
InputInd <- folds[[Folditer]]
ObservVec <- ObsVec[-InputInd]
TrainFeat <- matrix(FeatMat[InputInd,], ncol = ncol(FeatMat))
TrainObs <- ObsVec[InputInd]
TestFeat <- matrix(FeatMat[-InputInd,], ncol = ncol(FeatMat))
GlMnet_Model <- cv.glmnet(x = TrainFeat, y = TrainObs,
alpha=alpha, nfolds = 5,family = "binomial")
PredVec <- predict(GlMnet_Model, TestFeat, type="class",s="lambda.min")
if(length(unique(PredVec)) < length(unique(ObservVec))){
MCC_Vec <- c(MCC_Vec, NA)
}else{
MCC_Vec <- c(MCC_Vec, mcc(factor(PredVec), factor(ObservVec), nperm = 2)$estimate)
}
}
MCCmedian_Vec <- c(MCCmedian_Vec, median(na.omit(MCC_Vec)))
}
print(MCCmedian_Vec)
Opt_alpha <- Alpha_Vec[which(MCCmedian_Vec == max(na.omit(MCCmedian_Vec)))[
sample.int(length(which(MCCmedian_Vec == max(na.omit(MCCmedian_Vec)))),1)]]
#########
return(Opt_alpha)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-airac.R
\name{cfmu_airac_epoch_number}
\alias{cfmu_airac_epoch_number}
\title{The CFMU epoch number for AIRAC numbering scheme}
\usage{
cfmu_airac_epoch_number()
}
\value{
the epoch number for CFMU AIRAC numbering scheme
}
\description{
The CFMU epoch number for AIRAC numbering scheme
}
\seealso{
Other airac:
\code{\link{airac_epoch}()},
\code{\link{airac_interval}()},
\code{\link{airac_year_epoch}()},
\code{\link{airac}()},
\code{\link{cfmu_airac_epoch}()},
\code{\link{cfmu_airac_interval}()},
\code{\link{cfmu_airac}()}
}
\concept{airac}
\keyword{internal}
| /man/cfmu_airac_epoch_number.Rd | no_license | euctrl-pru/trrrj | R | false | true | 649 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-airac.R
\name{cfmu_airac_epoch_number}
\alias{cfmu_airac_epoch_number}
\title{The CFMU epoch number for AIRAC numbering scheme}
\usage{
cfmu_airac_epoch_number()
}
\value{
the epoch number for CFMU AIRAC numbering scheme
}
\description{
The CFMU epoch number for AIRAC numbering scheme
}
\seealso{
Other airac:
\code{\link{airac_epoch}()},
\code{\link{airac_interval}()},
\code{\link{airac_year_epoch}()},
\code{\link{airac}()},
\code{\link{cfmu_airac_epoch}()},
\code{\link{cfmu_airac_interval}()},
\code{\link{cfmu_airac}()}
}
\concept{airac}
\keyword{internal}
|
args = commandArgs(trailingOnly=TRUE)
geneFile=args[1]
RSEM_Output=args[2]
direct=args[3]
print(geneFile)
print(RSEM_Output)
print(direct)
print("Get covered genes from RSEM output")
#RSEM_Output=paste(direct,"coverage","all_genes.bed.genes.results",sep="/")
#print(RSEM_Output)
rsem=read.table(RSEM_Output,header=T)
rownames(rsem)=rsem[,"gene_id"]
rsem=rsem[rsem$TPM > 1,]
covered_genes=as.character(rownames(rsem))
print("Extract covered genes location")
gene_location=read.table(geneFile)
rownames(gene_location)=gene_location[,4]
gene_location=gene_location[covered_genes,]
print("Save covered genes")
savefile=paste(direct,"covered.bed",sep="/")
print(savefile)
write.table(gene_location,savefile,sep="\t",col.names=F,row.names=F,quote=F)
| /AnalysingMethods/getCovered.R | no_license | aina91/FivePrime | R | false | false | 752 | r |
args = commandArgs(trailingOnly=TRUE)
geneFile=args[1]
RSEM_Output=args[2]
direct=args[3]
print(geneFile)
print(RSEM_Output)
print(direct)
print("Get covered genes from RSEM output")
#RSEM_Output=paste(direct,"coverage","all_genes.bed.genes.results",sep="/")
#print(RSEM_Output)
rsem=read.table(RSEM_Output,header=T)
rownames(rsem)=rsem[,"gene_id"]
rsem=rsem[rsem$TPM > 1,]
covered_genes=as.character(rownames(rsem))
print("Extract covered genes location")
gene_location=read.table(geneFile)
rownames(gene_location)=gene_location[,4]
gene_location=gene_location[covered_genes,]
print("Save covered genes")
savefile=paste(direct,"covered.bed",sep="/")
print(savefile)
write.table(gene_location,savefile,sep="\t",col.names=F,row.names=F,quote=F)
|
#' Get an aggregate value from the outdegree of nodes
#'
#' @description
#'
#' Get a single, aggregate value from the outdegree values for all nodes in a
#' graph, or, a subset of graph nodes.
#'
#' @inheritParams render_graph
#' @param agg The aggregation function to use for summarizing outdegree values
#' from graph nodes. The following aggregation functions can be used: `sum`,
#' `min`, `max`, `mean`, or `median`.
#' @param conditions An option to use filtering conditions for the nodes to
#' consider.
#'
#' @return A vector with an aggregate outdegree value.
#'
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 20,
#' m = 35,
#' set_seed = 23) %>%
#' set_node_attrs(
#' node_attr = value,
#' values = rnorm(
#' n = count_nodes(.),
#' mean = 5,
#' sd = 1) %>% round(1))
#'
#' # Get the mean outdegree value from all
#' # nodes in the graph
#' graph %>%
#' get_agg_degree_out(
#' agg = "mean")
#'
#' # Other aggregation functions can be used
#' # (`min`, `max`, `median`, `sum`); let's
#' # get the median in this example
#' graph %>%
#' get_agg_degree_out(
#' agg = "median")
#'
#' # The aggregation of outdegree can occur
#' # for a subset of the graph nodes and this
#' # is made possible by specifying `conditions`
#' # for the nodes
#' graph %>%
#' get_agg_degree_out(
#' agg = "mean",
#' conditions = value < 5.0)
#'
#' @import rlang
#' @export
get_agg_degree_out <- function(
graph,
agg,
conditions = NULL
) {
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Capture provided conditions
conditions <- rlang::enquo(conditions)
# If filtering conditions are provided then
# pass in those conditions and filter the ndf
if (!is.null(
rlang::enquo(conditions) %>%
rlang::get_expr())) {
# Extract the node data frame from the graph
ndf <- get_node_df(graph)
# Apply filtering conditions to the ndf
ndf <- dplyr::filter(.data = ndf, !!conditions)
# Get a vector of node ID values
node_ids <-
ndf %>%
dplyr::pull(id)
}
# Get a data frame with outdegree values for
# all nodes in the graph
outdegree_df <- get_degree_out(graph)
if (exists("node_ids")) {
outdegree_df <-
outdegree_df %>%
dplyr::filter(id %in% node_ids)
}
# Verify that the value provided for `agg`
# is one of the accepted aggregation types
if (!(agg %in% c("sum", "min", "max", "mean", "median"))) {
emit_error(
fcn_name = fcn_name,
reasons = c(
"The specified aggregation method is not valid",
"allowed choices are: `min`, `max`, `mean`, `median`, or `sum`"))
}
# Get the aggregate value of total degree based
# on the aggregate function provided
fun <- match.fun(agg)
outdegree_agg <-
outdegree_df %>%
dplyr::group_by() %>%
dplyr::summarize(fun(outdegree, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
purrr::flatten_dbl()
outdegree_agg
}
| /R/get_agg_degree_out.R | permissive | rich-iannone/DiagrammeR | R | false | false | 3,246 | r | #' Get an aggregate value from the outdegree of nodes
#'
#' @description
#'
#' Get a single, aggregate value from the outdegree values for all nodes in a
#' graph, or, a subset of graph nodes.
#'
#' @inheritParams render_graph
#' @param agg The aggregation function to use for summarizing outdegree values
#' from graph nodes. The following aggregation functions can be used: `sum`,
#' `min`, `max`, `mean`, or `median`.
#' @param conditions An option to use filtering conditions for the nodes to
#' consider.
#'
#' @return A vector with an aggregate outdegree value.
#'
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 20,
#' m = 35,
#' set_seed = 23) %>%
#' set_node_attrs(
#' node_attr = value,
#' values = rnorm(
#' n = count_nodes(.),
#' mean = 5,
#' sd = 1) %>% round(1))
#'
#' # Get the mean outdegree value from all
#' # nodes in the graph
#' graph %>%
#' get_agg_degree_out(
#' agg = "mean")
#'
#' # Other aggregation functions can be used
#' # (`min`, `max`, `median`, `sum`); let's
#' # get the median in this example
#' graph %>%
#' get_agg_degree_out(
#' agg = "median")
#'
#' # The aggregation of outdegree can occur
#' # for a subset of the graph nodes and this
#' # is made possible by specifying `conditions`
#' # for the nodes
#' graph %>%
#' get_agg_degree_out(
#' agg = "mean",
#' conditions = value < 5.0)
#'
#' @import rlang
#' @export
get_agg_degree_out <- function(
graph,
agg,
conditions = NULL
) {
# Get the name of the function
fcn_name <- get_calling_fcn()
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
emit_error(
fcn_name = fcn_name,
reasons = "The graph object is not valid")
}
# Capture provided conditions
conditions <- rlang::enquo(conditions)
# If filtering conditions are provided then
# pass in those conditions and filter the ndf
if (!is.null(
rlang::enquo(conditions) %>%
rlang::get_expr())) {
# Extract the node data frame from the graph
ndf <- get_node_df(graph)
# Apply filtering conditions to the ndf
ndf <- dplyr::filter(.data = ndf, !!conditions)
# Get a vector of node ID values
node_ids <-
ndf %>%
dplyr::pull(id)
}
# Get a data frame with outdegree values for
# all nodes in the graph
outdegree_df <- get_degree_out(graph)
if (exists("node_ids")) {
outdegree_df <-
outdegree_df %>%
dplyr::filter(id %in% node_ids)
}
# Verify that the value provided for `agg`
# is one of the accepted aggregation types
if (!(agg %in% c("sum", "min", "max", "mean", "median"))) {
emit_error(
fcn_name = fcn_name,
reasons = c(
"The specified aggregation method is not valid",
"allowed choices are: `min`, `max`, `mean`, `median`, or `sum`"))
}
# Get the aggregate value of total degree based
# on the aggregate function provided
fun <- match.fun(agg)
outdegree_agg <-
outdegree_df %>%
dplyr::group_by() %>%
dplyr::summarize(fun(outdegree, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
purrr::flatten_dbl()
outdegree_agg
}
|
library(io)
library(dplyr)
library(ggplot2)
library(ggsci)
shift <- function(x, n=1) {
if (n == 0) {
x
} else {
c(tail(x, -n), head(x, n))
}
}
clones <- c("P", "RC1", "RC2", "RC3", "RC4", "RC5", "RC6", "RC7");
clone.cols <- shift(pal_d3()(length(clones)), -1);
names(clone.cols) <- clones;
ref.target <- "18S";
ref.sample <- "SUM149-P";
# alpha level for confidence interval
alpha <- 0.2;
z <- qnorm(1 - alpha/2)
# input data
x1 <- qread("data/qrtpcr_2021-01-20.csv");
x2 <- qread("data/qrtpcr_2021-01-25.csv");
out.fname <- filename("qrtpcr", tag="sum149");
# normalize experiments toward the last experiment
m1 <- x1 %>% select(target, sample, cq) %>%
group_by(target, sample) %>%
summarize(cq=mean(cq));
m2 <- x2 %>% select(target, sample, cq) %>%
group_by(target, sample) %>%
summarize(cq=mean(cq));
mc <- rbind(m1, m2) %>% group_by(target, sample) %>%
summarize(dcq = cq - last(cq)) %>%
filter(dcq > 0)
mct <- mc %>% group_by(target) %>% summarize(dcq_mean = mean(dcq), dcq_sd = sd(dcq));
#mcs <- mc %>% group_by(sample) %>% summarize(dcq_mean = mean(dcq), dcq_sd = sd(dcq));
summary(mc$dcq)
# substract background difference from experiment 1
x1b <- left_join(x1, mct) %>% mutate(cq = cq - dcq_mean) %>%
select(target, sample, cq);
# combined experiments
xcb <- rbind(x1b, select(x2, target, sample, cq)) %>%
group_by(target, sample) %>%
filter(!is.na(cq))
# apply reference target and sample normalization by the delta-delta-cq method
# compute reference target mean csq
xcrt <- filter(xcb, target == ref.target) %>%
summarize(cq_ref = mean(cq)) %>% ungroup();
# normalize against reference target
xcn <- left_join(xcb, select(xcrt, -target), by="sample") %>%
mutate(dcq = cq - cq_ref);
xcrs <- filter(xcn, sample == ref.sample) %>%
summarize(dcq_ref = mean(dcq)) %>% ungroup();
xcnn <- left_join(xcn, select(xcrs, -sample), by="target") %>%
mutate(ddcq = dcq - dcq_ref);
qwrite(xcnn, insert(out.fname, tag="ddcq", ext="tsv"));
s0 <- summarize(xcnn, ddcq_mean=mean(ddcq), ddcq_se=sd(ddcq)) %>%
mutate(
y = 2^(-ddcq_mean),
ymin = 2^(-ddcq_mean - z*ddcq_se),
ymax = 2^(-ddcq_mean + z*ddcq_se)
);
qwrite(s0, insert(out.fname, tag="summary", ext="tsv"));
# compare all samples against the ref sample
targets <- unique(xcnn$target);
names(targets) <- targets;
# test results
hs <- lapply(
targets,
function(tt) {
d <- filter(xcnn, target == tt);
with(d, t.test(cq[sample != ref.sample], cq[sample == ref.sample]))
}
);
# p values
ps <- unlist(lapply(hs, function(h) h$p.value));
# summary differential expression data
s <- filter(s0, target != ref.target) %>%
mutate(sample = sub("SUM149-", "", sample)) %>%
mutate(target = factor(target,
levels=names(ps),
labels=sprintf("%s (p = %s)", names(ps),
# format number individually to avoid all numbers being shown in
# scientific notation
unlist(lapply(ps, format, digits=2)))
));
# plot differential expression
integer_breaks <- function(lim) seq(floor(lim[1]), ceiling(lim[2]));
integer_limits <- function(lim) c(floor(lim[1]), ceiling(lim[2]));
qdraw(
ggplot(s, aes(
x=reorder(sample, desc(sample)), fill=sample,
y=y, ymin=ymin, ymax=ymax
)) +
geom_hline(aes(yintercept=1), colour="grey80") +
geom_col() + geom_errorbar(width=0.5) +
theme_bw() + coord_flip() +
facet_wrap(~ target, scale="free_x") +
xlab("") + ylab("relative expression by qRT-PCR") +
scale_fill_manual(values=clone.cols) +
scale_y_continuous(breaks=integer_breaks, limits=integer_limits) +
guides(fill = FALSE) +
theme(
panel.grid = element_blank(),
strip.background = element_blank()
)
,
file = insert(out.fname, ext="pdf"),
width = 5, height = 2.5
)
| /qrtpcr/analyze.R | no_license | djhshih/analysis-parpir-sum149-rna-seq | R | false | false | 3,711 | r | library(io)
library(dplyr)
library(ggplot2)
library(ggsci)
shift <- function(x, n=1) {
if (n == 0) {
x
} else {
c(tail(x, -n), head(x, n))
}
}
clones <- c("P", "RC1", "RC2", "RC3", "RC4", "RC5", "RC6", "RC7");
clone.cols <- shift(pal_d3()(length(clones)), -1);
names(clone.cols) <- clones;
ref.target <- "18S";
ref.sample <- "SUM149-P";
# alpha level for confidence interval
alpha <- 0.2;
z <- qnorm(1 - alpha/2)
# input data
x1 <- qread("data/qrtpcr_2021-01-20.csv");
x2 <- qread("data/qrtpcr_2021-01-25.csv");
out.fname <- filename("qrtpcr", tag="sum149");
# normalize experiments toward the last experiment
m1 <- x1 %>% select(target, sample, cq) %>%
group_by(target, sample) %>%
summarize(cq=mean(cq));
m2 <- x2 %>% select(target, sample, cq) %>%
group_by(target, sample) %>%
summarize(cq=mean(cq));
mc <- rbind(m1, m2) %>% group_by(target, sample) %>%
summarize(dcq = cq - last(cq)) %>%
filter(dcq > 0)
mct <- mc %>% group_by(target) %>% summarize(dcq_mean = mean(dcq), dcq_sd = sd(dcq));
#mcs <- mc %>% group_by(sample) %>% summarize(dcq_mean = mean(dcq), dcq_sd = sd(dcq));
summary(mc$dcq)
# substract background difference from experiment 1
x1b <- left_join(x1, mct) %>% mutate(cq = cq - dcq_mean) %>%
select(target, sample, cq);
# combined experiments
xcb <- rbind(x1b, select(x2, target, sample, cq)) %>%
group_by(target, sample) %>%
filter(!is.na(cq))
# apply reference target and sample normalization by the delta-delta-cq method
# compute reference target mean csq
xcrt <- filter(xcb, target == ref.target) %>%
summarize(cq_ref = mean(cq)) %>% ungroup();
# normalize against reference target
xcn <- left_join(xcb, select(xcrt, -target), by="sample") %>%
mutate(dcq = cq - cq_ref);
xcrs <- filter(xcn, sample == ref.sample) %>%
summarize(dcq_ref = mean(dcq)) %>% ungroup();
xcnn <- left_join(xcn, select(xcrs, -sample), by="target") %>%
mutate(ddcq = dcq - dcq_ref);
qwrite(xcnn, insert(out.fname, tag="ddcq", ext="tsv"));
s0 <- summarize(xcnn, ddcq_mean=mean(ddcq), ddcq_se=sd(ddcq)) %>%
mutate(
y = 2^(-ddcq_mean),
ymin = 2^(-ddcq_mean - z*ddcq_se),
ymax = 2^(-ddcq_mean + z*ddcq_se)
);
qwrite(s0, insert(out.fname, tag="summary", ext="tsv"));
# compare all samples against the ref sample
targets <- unique(xcnn$target);
names(targets) <- targets;
# test results
hs <- lapply(
targets,
function(tt) {
d <- filter(xcnn, target == tt);
with(d, t.test(cq[sample != ref.sample], cq[sample == ref.sample]))
}
);
# p values
ps <- unlist(lapply(hs, function(h) h$p.value));
# summary differential expression data
s <- filter(s0, target != ref.target) %>%
mutate(sample = sub("SUM149-", "", sample)) %>%
mutate(target = factor(target,
levels=names(ps),
labels=sprintf("%s (p = %s)", names(ps),
# format number individually to avoid all numbers being shown in
# scientific notation
unlist(lapply(ps, format, digits=2)))
));
# plot differential expression
integer_breaks <- function(lim) seq(floor(lim[1]), ceiling(lim[2]));
integer_limits <- function(lim) c(floor(lim[1]), ceiling(lim[2]));
qdraw(
ggplot(s, aes(
x=reorder(sample, desc(sample)), fill=sample,
y=y, ymin=ymin, ymax=ymax
)) +
geom_hline(aes(yintercept=1), colour="grey80") +
geom_col() + geom_errorbar(width=0.5) +
theme_bw() + coord_flip() +
facet_wrap(~ target, scale="free_x") +
xlab("") + ylab("relative expression by qRT-PCR") +
scale_fill_manual(values=clone.cols) +
scale_y_continuous(breaks=integer_breaks, limits=integer_limits) +
guides(fill = FALSE) +
theme(
panel.grid = element_blank(),
strip.background = element_blank()
)
,
file = insert(out.fname, ext="pdf"),
width = 5, height = 2.5
)
|
# Latvia vaccine data
library(here)
source(here("Automation/00_Functions_automation.R"))
library(lubridate)
library(dplyr)
library(tidyverse)
# assigning Drive credentials in the case the script is verified manually
if (!"email" %in% ls()){
email <- "jessica_d.1994@yahoo.de"
}
# info country and N drive address
ctr <- "Latvia_vaccine" # it's a placeholder
dir_n <- "N:/COVerAGE-DB/Automation/Hydra/"
# Drive credentials
drive_auth(email = Sys.getenv("email"))
gs4_auth(email = Sys.getenv("email"))
#Read in data
## MK: 10.11.2022: since these data are not updated since 07.2022, I deprecate this file (and the relevant dataset)
## and shift to ECDC data instead.
m_url_pre22 <- "https://data.gov.lv/dati/eng/dataset/covid19-vakcinacijas/resource/51725018-49f3-40d1-9280-2b13219e026f"
links_pre22 <- scraplinks(m_url_pre22) %>%
filter(str_detect(url, "adp_covid19_vakc")) %>%
select(url)
links2_pre22 = head(links_pre22, 1)
url_pre22 <-
links2_pre22 %>%
select(url) %>%
dplyr::pull()
data_source_pre22 <- paste0(dir_n, "Data_sources/", ctr, "/Latvia_data_pre22", ".xlsx")
download.file(url_pre22, data_source_pre22, mode = "wb")
In_vaccine_pre22 = read_excel(data_source_pre22)
m_url_22 <- "https://data.gov.lv/dati/eng/dataset/covid19-vakcinacijas/resource/9320d913-a4a2-4172-b521-73e58c2cfe83"
links_22 <- scraplinks(m_url_22) %>%
filter(str_detect(url, "adp_covid19_vakc")) %>%
select(url)
links2_22 = head(links_22, 1)
url_22 <-
links2_22 %>%
select(url) %>%
dplyr::pull()
data_source_22 <- paste0(dir_n, "Data_sources/", ctr, "/Latvia_data_22_",today(), ".xlsx")
download.file(url_22, data_source_22, mode = "wb")
In_vaccine_22 = read_excel(data_source_22)
## data source to zip aftewards
data_source <- c(data_source_pre22, data_source_22)
In_vaccine <- bind_rows(In_vaccine_pre22, In_vaccine_22)
#Process data
All_ages <- seq(0,100,by=5)
Out_vaccine= In_vaccine %>%
select(Date= `Vakcinācijas datums`,
Measure= `Vakcinācijas posms`,
Age=`Vakcinētās personas vecums`,
Sex= `Vakcinētās personas dzimums`,
Value= `Vakcinēto personu skaits`) %>%
mutate(Measure = substr(Measure,1,6)) %>%
mutate(Measure= case_when(Measure == "1.pote" ~ "Vaccination1",
Measure == "2.pote"~ "Vaccination2",
Measure == "3.pote"~ "Vaccination3",
Measure == "4.pote" ~ "Vaccination4",
Measure == "1.bals" ~ "Vaccination3",
Measure == "2.bals" ~ "Vaccination4",
Measure == "3.bals" ~ "Vaccination5")) %>%
mutate(Sex = case_when(
is.na(Sex)~ "UNK",
Sex == "V" ~ "m",
Sex == "S" ~ "f",
Sex== "N" ~ "UNK"),
Age = ifelse(Age > 100,100,Age),
Age = Age - Age %% 5) %>%
# aggregate to daily sum
group_by(Date, Age, Measure, Sex) %>%
summarize(Value = sum(Value), .groups="drop") %>%
tidyr::complete(Date, Sex, Age = All_ages, Measure, fill = list(Value = 0)) %>%
#group_by(Date,Sex,Age,Measure)%>%
arrange(Sex,Age, Measure, Date)%>%
group_by(Sex, Age, Measure) %>%
mutate(Value = cumsum(Value)) %>%
ungroup()%>%
mutate(AgeInt = 5L,
Metric = "Count") %>%
mutate(
Date = ymd(Date),
Date = ddmmyyyy(Date),
Code = paste0("LV"),
Country = "Latvia",
Region = "All",
Age = as.character(Age)) %>%
select(Country, Region, Code, Date, Sex,
Age, AgeInt, Metric, Measure, Value) %>%
sort_input_data()
## MK: Quality check for the merge and after-wrangling
# Out_vaccine %>%
# mutate(Date = dmy(Date)) %>%
# ggplot(aes(x = Date, y = Value)) +
# geom_point() +
# facet_wrap(~ Measure)
#save output data
write_rds(Out_vaccine, paste0(dir_n, ctr, ".rds"))
log_update(pp = ctr, N = nrow(Out_vaccine))
#zip input data file
zipname <- paste0(dir_n,
"Data_sources/",
ctr,
"/",
ctr,
"_data_",
today(),
".zip")
zip::zipr(zipname,
data_source,
recurse = TRUE,
compression_level = 9,
include_directories = TRUE)
file.remove(data_source)
| /Automation/00_hydra/deprecated/Latvia_vaccine.R | permissive | timriffe/covid_age | R | false | false | 4,364 | r | # Latvia vaccine data
library(here)
source(here("Automation/00_Functions_automation.R"))
library(lubridate)
library(dplyr)
library(tidyverse)
# assigning Drive credentials in the case the script is verified manually
if (!"email" %in% ls()){
email <- "jessica_d.1994@yahoo.de"
}
# info country and N drive address
ctr <- "Latvia_vaccine" # it's a placeholder
dir_n <- "N:/COVerAGE-DB/Automation/Hydra/"
# Drive credentials
drive_auth(email = Sys.getenv("email"))
gs4_auth(email = Sys.getenv("email"))
#Read in data
## MK: 10.11.2022: since these data are not updated since 07.2022, I deprecate this file (and the relevant dataset)
## and shift to ECDC data instead.
m_url_pre22 <- "https://data.gov.lv/dati/eng/dataset/covid19-vakcinacijas/resource/51725018-49f3-40d1-9280-2b13219e026f"
links_pre22 <- scraplinks(m_url_pre22) %>%
filter(str_detect(url, "adp_covid19_vakc")) %>%
select(url)
links2_pre22 = head(links_pre22, 1)
url_pre22 <-
links2_pre22 %>%
select(url) %>%
dplyr::pull()
data_source_pre22 <- paste0(dir_n, "Data_sources/", ctr, "/Latvia_data_pre22", ".xlsx")
download.file(url_pre22, data_source_pre22, mode = "wb")
In_vaccine_pre22 = read_excel(data_source_pre22)
m_url_22 <- "https://data.gov.lv/dati/eng/dataset/covid19-vakcinacijas/resource/9320d913-a4a2-4172-b521-73e58c2cfe83"
links_22 <- scraplinks(m_url_22) %>%
filter(str_detect(url, "adp_covid19_vakc")) %>%
select(url)
links2_22 = head(links_22, 1)
url_22 <-
links2_22 %>%
select(url) %>%
dplyr::pull()
data_source_22 <- paste0(dir_n, "Data_sources/", ctr, "/Latvia_data_22_",today(), ".xlsx")
download.file(url_22, data_source_22, mode = "wb")
In_vaccine_22 = read_excel(data_source_22)
## data source to zip aftewards
data_source <- c(data_source_pre22, data_source_22)
In_vaccine <- bind_rows(In_vaccine_pre22, In_vaccine_22)
#Process data
All_ages <- seq(0,100,by=5)
Out_vaccine= In_vaccine %>%
select(Date= `Vakcinācijas datums`,
Measure= `Vakcinācijas posms`,
Age=`Vakcinētās personas vecums`,
Sex= `Vakcinētās personas dzimums`,
Value= `Vakcinēto personu skaits`) %>%
mutate(Measure = substr(Measure,1,6)) %>%
mutate(Measure= case_when(Measure == "1.pote" ~ "Vaccination1",
Measure == "2.pote"~ "Vaccination2",
Measure == "3.pote"~ "Vaccination3",
Measure == "4.pote" ~ "Vaccination4",
Measure == "1.bals" ~ "Vaccination3",
Measure == "2.bals" ~ "Vaccination4",
Measure == "3.bals" ~ "Vaccination5")) %>%
mutate(Sex = case_when(
is.na(Sex)~ "UNK",
Sex == "V" ~ "m",
Sex == "S" ~ "f",
Sex== "N" ~ "UNK"),
Age = ifelse(Age > 100,100,Age),
Age = Age - Age %% 5) %>%
# aggregate to daily sum
group_by(Date, Age, Measure, Sex) %>%
summarize(Value = sum(Value), .groups="drop") %>%
tidyr::complete(Date, Sex, Age = All_ages, Measure, fill = list(Value = 0)) %>%
#group_by(Date,Sex,Age,Measure)%>%
arrange(Sex,Age, Measure, Date)%>%
group_by(Sex, Age, Measure) %>%
mutate(Value = cumsum(Value)) %>%
ungroup()%>%
mutate(AgeInt = 5L,
Metric = "Count") %>%
mutate(
Date = ymd(Date),
Date = ddmmyyyy(Date),
Code = paste0("LV"),
Country = "Latvia",
Region = "All",
Age = as.character(Age)) %>%
select(Country, Region, Code, Date, Sex,
Age, AgeInt, Metric, Measure, Value) %>%
sort_input_data()
## MK: Quality check for the merge and after-wrangling
# Out_vaccine %>%
# mutate(Date = dmy(Date)) %>%
# ggplot(aes(x = Date, y = Value)) +
# geom_point() +
# facet_wrap(~ Measure)
#save output data
write_rds(Out_vaccine, paste0(dir_n, ctr, ".rds"))
log_update(pp = ctr, N = nrow(Out_vaccine))
#zip input data file
zipname <- paste0(dir_n,
"Data_sources/",
ctr,
"/",
ctr,
"_data_",
today(),
".zip")
zip::zipr(zipname,
data_source,
recurse = TRUE,
compression_level = 9,
include_directories = TRUE)
file.remove(data_source)
|
#' Rank-based hypothesis tests for HPD matrices
#'
#' \code{pdRankTests} performs a number of generalized rank-based hypothesis tests in the metric space of HPD matrices equipped
#' with the affine-invariant Riemannian metric or Log-Euclidean metric for samples of HPD matrices or samples of sequences
#' (curves) of HPD matrices as described in Chapter 4 of \insertCite{C18}{pdSpecEst}.
#'
#' For samples of \eqn{(d,d)}-dimensional HPD matrices with pooled sample size \eqn{S}, the argument
#' \code{data} is a \eqn{(d,d,S)}-dimensional array of \eqn{(d,d)}-dimensional HPD matrices, where the individual samples are
#' combined along the third array dimension. For samples of sequences of \eqn{(d,d)}-dimensional HPD matrices with pooled sample
#' size \eqn{S}, the argument \code{data} is a \eqn{(d,d,n,S)}-dimensional array of length \eqn{n} sequences
#' of \eqn{(d,d)}-dimensional HPD matrices, where the individual samples are combined along the fourth array dimension. The argument
#' \code{sample_sizes} specifies the sizes of the individual samples so that \code{sum(sample_sizes)} is equal to \code{S}. \cr
#' The available generalized rank-based testing procedures (specified by the argument \code{test}) are:
#' \describe{
#' \item{\code{"rank.sum"}}{Intrinsic Wilcoxon rank-sum test to test for homogeneity of distributions of two independent
#' samples of HPD matrices or samples of sequences of HPD matrices. The usual univariate ranks are replaced by data depth
#' induced ranks obtained with \code{\link{pdDepth}}.}
#' \item{\code{"krusk.wall"}}{Intrinsic Kruskal-Wallis test to test for homogeneity of distributions of more than two independent
#' samples of HPD matrices or samples of sequences of HPD matrices. The usual univariate ranks are replaced by data depth
#' induced ranks obtained with \code{\link{pdDepth}}.}
#' \item{\code{"signed.rank"}}{Intrinsic signed-rank test to test for homogeneity of distributions of independent paired or matched samples
#' of HPD matrices. The intrinsic signed-rank test is \emph{not} based on data depth induced ranks, but on a specific difference score in the Riemannian
#' manifold of HPD matrices equipped with either the affine-invariant Riemannian or Log-Euclidean metric.}
#' \item{\code{"bartels"}}{Intrinsic Bartels-von Neumann test to test for randomness (i.e., exchangeability) within a single independent sample of
#' HPD matrices or a sample of sequences of HPD matrices. The usual univariate ranks are replaced by data depth induced
#' ranks obtained with \code{\link{pdDepth}}.}
#' }
#' The function computes the generalized rank-based test statistics in the \emph{complete} metric space of HPD matrices equipped with one of the following metrics:
#' (i) the Riemannian metric (default) as detailed in e.g., \insertCite{B09}{pdSpecEst}[Chapter 6] or \insertCite{PFA05}{pdSpecEst}; or (ii) the Log-Euclidean metric,
#' the Euclidean inner product between matrix logarithms. The default Riemannian metric is invariant under congruence transformation by any invertible matrix,
#' whereas the Log-Euclidean metric is only invariant under congruence transformation by unitary matrices, see \insertCite{C18}{pdSpecEst}[Chapter 4] for more details.
#'
#' @note The intrinsic signed-rank test also provides a valid test for equivalence of spectral matrices of two multivariate stationary time
#' series based on the HPD periodogram matrices obtained via \code{\link{pdPgram}}, see \insertCite{C18}{pdSpecEst}[Chapter 4] for the details.
#'
#' @note The function does not check for positive definiteness of the input matrices, and may fail
#' if matrices are close to being singular.
#'
#' @note The data depth computations under the Riemannian metric are more involved than under the Log-Euclidean
#' metric, and may therefore result in (significantly) higher computation times.
#'
#' @param data either a \eqn{(d,d,S)}-dimensional array corresponding to an array of pooled individual samples of \eqn{(d,d)}-dimensional
#' HPD matrices, or a \eqn{(d,d,n,S)}-dimensional array corresponding to an array of pooled individual samples of length \eqn{n} sequences
#' of \eqn{(d,d)}-dimensional HPD matrices.
#' @param sample_sizes a numeric vector specifying the individual sample sizes in the pooled sample \code{data}, such that \code{sum(sample_sizes)} is
#' equal to \code{S}. Not required for tests \code{"signed-rank"} and \code{"bartels"}, as the sample sizes are automatically determined from the input array
#' \code{data}.
#' @param test rank-based hypothesis testing procedure, one of \code{"rank.sum"}, \code{"krusk.wall"}, \code{"signed.rank"}, \code{"bartels"} explained
#' in the Details section below.
#' @param depth data depth measure used in the rank-based tests, one of \code{"gdd"}, \code{"zonoid"}, or \code{"spatial"} corresponding to the
#' geodesic distance depth, intrinsic zonoid depth and intrinsic spatial depth respectively. Defaults to \code{"gdd"}. Not required for test
#' \code{"signed.rank"}. See the documentation of the function \code{\link{pdDepth}} for additional details about the different depth measures.
#' @param metric the metric that the space of HPD matrices is equipped with, either \code{"Riemannian"} or \code{"logEuclidean"}. Defaults to
#' \code{"Riemannian"}.
#'
#' @return The function returns a list with five components:
#' \item{test }{name of the rank-based test}
#' \item{p.value }{p-value of the test}
#' \item{statistic }{computed test statistic}
#' \item{null.distr }{distribution of the test statistic under the null hypothesis}
#' \item{depth.values }{computed data depth values (if available)}
#'
#' @examples
#' ## null hypothesis is true
#' data <- replicate(100, Expm(diag(2), H.coeff(rnorm(4), inverse = TRUE)))
#' pdRankTests(data, sample_sizes = c(50, 50), test = "rank.sum") ## homogeneity 2 samples
#' pdRankTests(data, sample_sizes = rep(25, 4), test = "krusk.wall") ## homogeneity 4 samples
#' pdRankTests(data, test = "bartels") ## randomness
#'
#' ## null hypothesis is false
#' data1 <- array(c(data, replicate(50, Expm(diag(2), H.coeff(0.5 * rnorm(4), inverse = TRUE)))),
#' dim = c(2,2,150))
#' pdRankTests(data1, sample_sizes = c(100, 50), test = "rank.sum")
#' pdRankTests(data1, sample_sizes = rep(50, 3), test = "krusk.wall")
#' pdRankTests(data1, test = "bartels")
#'
#' \dontrun{
#' ## signed-rank test for equivalence of spectra of multivariate time series
#' ## ARMA(1,1) process: Example 11.4.1 in (Brockwell and Davis, 1991)
#' Phi <- array(c(0.7, 0, 0, 0.6, rep(0, 4)), dim = c(2, 2, 2))
#' Theta <- array(c(0.5, -0.7, 0.6, 0.8, rep(0, 4)), dim = c(2, 2, 2))
#' Sigma <- matrix(c(1, 0.71, 0.71, 2), nrow = 2)
#' pgram <- function(Sigma) pdPgram(rARMA(2^8, 2, Phi, Theta, Sigma)$X)$P
#'
#' ## null is true
#' pdRankTests(array(c(pgram(Sigma), pgram(Sigma)), dim = c(2,2,2^8)), test = "signed.rank")
#' ## null is false
#' pdRankTests(array(c(pgram(Sigma), pgram(0.5 * Sigma)), dim = c(2,2,2^8)), test = "signed.rank")
#' }
#' @seealso \code{\link{pdDepth}}, \code{\link{pdPgram}}
#'
#' @references
#' \insertAllCited{}
#'
#' @export
pdRankTests <- function(data, sample_sizes, test = c("rank.sum", "krusk.wall", "signed.rank", "bartels"),
depth = c("gdd", "zonoid", "spatial"), metric = c("Riemannian", "logEuclidean")) {
if (missing(depth)) {
depth <- "gdd"
}
ddim <- dim(data)
if (missing(sample_sizes)) {
sample_sizes <- NA
}
metric <- match.arg(metric, c("Riemannian", "logEuclidean"))
test <- match.arg(test, c("rank.sum", "krusk.wall", "signed.rank", "bartels"))
depth <- match.arg(depth, c("gdd", "zonoid", "spatial"))
err.message <- "Incorrect input lenghts for arguments: 'samples' and/or 'sample_sizes',
consult the function documentation for the requested inputs."
n <- sample_sizes
if ((test == "krusk.wall") & (length(n) == 2)) {
warning("Argument 'test' changed to 'rank.sum' to test for homogeneity of
distributions of two independent samples of HPD matrices.")
test <- "rank.sum"
}
## Intrinsic rank-sum test
if (test == "rank.sum") {
if (!isTRUE((((length(ddim) == 3) & (ddim[3] == sum(n))) | ((length(ddim) == 4) &
(ddim[4] == sum(n)))) & (ddim[1] == ddim[2]) & (length(n) == 2))) {
stop(err.message)
}
dd <- pdDepth(X = data, method = depth, metric = metric)
T1 <- (sum(rank(dd, ties.method = "random")[1:n[1]]) - n[1] * (sum(n) + 1)/2) /
sqrt(n[1] * n[2] * (sum(n) + 1)/12)
output <- list(test = "Intrinsic Wilcoxon rank-sum", p.value = 2 * stats::pnorm(abs(T1), lower.tail = FALSE), statistic = T1,
null.distr = "Standard normal distribution", depth.values = dd)
}
## Intrinsic Kruskal-Wallis test
if (test == "krusk.wall") {
N <- sum(n)
if (!isTRUE((((length(ddim) == 3) & (ddim[3] == N)) | ((length(ddim) == 4) &
(ddim[4] == N))) & (ddim[1] == ddim[2]) & (length(n) > 2))) {
stop(err.message)
}
dd <- pdDepth(X = data, method = depth, metric = metric)
R_bar <- unname(unlist(lapply(split(rank(dd, ties.method = "random"),
f = rep(1:length(n), times = n)), mean)))
T2 <- 12/(N * (N + 1)) * sum(n * (R_bar - (N + 1)/2)^2)
output <- list(test = "Intrinsic Kruskal-Wallis", p.value = min(stats::pchisq(T2, df = 2, lower.tail = TRUE),
pchisq(T2, df = 2, lower.tail = FALSE)), statistic = T2,
null.distr = "Chi-squared distribution (df = 2)", depth.values = dd)
}
## Intrinsic signed-rank test
if (test == "signed.rank") {
if (!isTRUE((length(ddim) == 3) & (ddim[1] == ddim[2]) & (ddim[3]%%2 == 0))) {
stop(err.message)
}
n <- ddim[3]/2
d <- ddim[1]
if(metric == "Riemannian"){
ast <- function(A, B) t(Conj(A)) %*% B %*% A
diff <- sapply(1:n, function(i) Re(sum(diag(Logm(diag(d), ast(iSqrt(data[, , n + i]), data[, , i]))))))
} else{
diff <- sapply(1:n, function(i) Re(sum(diag(Logm(diag(d), data[, , n + i]) - Logm(diag(d), data[, , i])))))
}
T3 <- stats::wilcox.test(x = diff, y = rep(0, n), paired = TRUE, correct = TRUE)
output <- list(test = "Intrinsic Wilcoxon signed-rank", p.value = T3$p.value, statistic = T3$statistic, null.distr = T3$method)
}
## Intrinsic Bartels-von Neumann test
if (test == "bartels") {
if (!isTRUE(((length(ddim) == 3) | ((length(ddim) == 4))) & (ddim[1] == ddim[2]))) {
stop(err.message)
}
n <- utils::tail(ddim, 1)
dd <- pdDepth(X = data, method = depth, metric = metric)
T4 <- sum(diff(rank(dd, ties.method = "random"))^2)/(n * (n^2 - 1)/12)
sigma <- sqrt(4 * (n - 2) * (5 * n^2 - 2 * n - 9)/(5 * n * (n + 1) * (n - 1)^2))
output <- list(test = "Intrinsic Bartels-von Neumann", p.value = 2 * pnorm(abs((T4 - 2)/sigma), lower.tail = FALSE),
statistic = (T4 - 2)/sigma, null.distr = "Standard normal distribution",
depth.values = dd)
}
return(output)
}
| /R/ranktests.R | no_license | cran/pdSpecEst | R | false | false | 11,181 | r | #' Rank-based hypothesis tests for HPD matrices
#'
#' \code{pdRankTests} performs a number of generalized rank-based hypothesis tests in the metric space of HPD matrices equipped
#' with the affine-invariant Riemannian metric or Log-Euclidean metric for samples of HPD matrices or samples of sequences
#' (curves) of HPD matrices as described in Chapter 4 of \insertCite{C18}{pdSpecEst}.
#'
#' For samples of \eqn{(d,d)}-dimensional HPD matrices with pooled sample size \eqn{S}, the argument
#' \code{data} is a \eqn{(d,d,S)}-dimensional array of \eqn{(d,d)}-dimensional HPD matrices, where the individual samples are
#' combined along the third array dimension. For samples of sequences of \eqn{(d,d)}-dimensional HPD matrices with pooled sample
#' size \eqn{S}, the argument \code{data} is a \eqn{(d,d,n,S)}-dimensional array of length \eqn{n} sequences
#' of \eqn{(d,d)}-dimensional HPD matrices, where the individual samples are combined along the fourth array dimension. The argument
#' \code{sample_sizes} specifies the sizes of the individual samples so that \code{sum(sample_sizes)} is equal to \code{S}. \cr
#' The available generalized rank-based testing procedures (specified by the argument \code{test}) are:
#' \describe{
#' \item{\code{"rank.sum"}}{Intrinsic Wilcoxon rank-sum test to test for homogeneity of distributions of two independent
#' samples of HPD matrices or samples of sequences of HPD matrices. The usual univariate ranks are replaced by data depth
#' induced ranks obtained with \code{\link{pdDepth}}.}
#' \item{\code{"krusk.wall"}}{Intrinsic Kruskal-Wallis test to test for homogeneity of distributions of more than two independent
#' samples of HPD matrices or samples of sequences of HPD matrices. The usual univariate ranks are replaced by data depth
#' induced ranks obtained with \code{\link{pdDepth}}.}
#' \item{\code{"signed.rank"}}{Intrinsic signed-rank test to test for homogeneity of distributions of independent paired or matched samples
#' of HPD matrices. The intrinsic signed-rank test is \emph{not} based on data depth induced ranks, but on a specific difference score in the Riemannian
#' manifold of HPD matrices equipped with either the affine-invariant Riemannian or Log-Euclidean metric.}
#' \item{\code{"bartels"}}{Intrinsic Bartels-von Neumann test to test for randomness (i.e., exchangeability) within a single independent sample of
#' HPD matrices or a sample of sequences of HPD matrices. The usual univariate ranks are replaced by data depth induced
#' ranks obtained with \code{\link{pdDepth}}.}
#' }
#' The function computes the generalized rank-based test statistics in the \emph{complete} metric space of HPD matrices equipped with one of the following metrics:
#' (i) the Riemannian metric (default) as detailed in e.g., \insertCite{B09}{pdSpecEst}[Chapter 6] or \insertCite{PFA05}{pdSpecEst}; or (ii) the Log-Euclidean metric,
#' the Euclidean inner product between matrix logarithms. The default Riemannian metric is invariant under congruence transformation by any invertible matrix,
#' whereas the Log-Euclidean metric is only invariant under congruence transformation by unitary matrices, see \insertCite{C18}{pdSpecEst}[Chapter 4] for more details.
#'
#' @note The intrinsic signed-rank test also provides a valid test for equivalence of spectral matrices of two multivariate stationary time
#' series based on the HPD periodogram matrices obtained via \code{\link{pdPgram}}, see \insertCite{C18}{pdSpecEst}[Chapter 4] for the details.
#'
#' @note The function does not check for positive definiteness of the input matrices, and may fail
#' if matrices are close to being singular.
#'
#' @note The data depth computations under the Riemannian metric are more involved than under the Log-Euclidean
#' metric, and may therefore result in (significantly) higher computation times.
#'
#' @param data either a \eqn{(d,d,S)}-dimensional array corresponding to an array of pooled individual samples of \eqn{(d,d)}-dimensional
#' HPD matrices, or a \eqn{(d,d,n,S)}-dimensional array corresponding to an array of pooled individual samples of length \eqn{n} sequences
#' of \eqn{(d,d)}-dimensional HPD matrices.
#' @param sample_sizes a numeric vector specifying the individual sample sizes in the pooled sample \code{data}, such that \code{sum(sample_sizes)} is
#' equal to \code{S}. Not required for tests \code{"signed-rank"} and \code{"bartels"}, as the sample sizes are automatically determined from the input array
#' \code{data}.
#' @param test rank-based hypothesis testing procedure, one of \code{"rank.sum"}, \code{"krusk.wall"}, \code{"signed.rank"}, \code{"bartels"} explained
#' in the Details section below.
#' @param depth data depth measure used in the rank-based tests, one of \code{"gdd"}, \code{"zonoid"}, or \code{"spatial"} corresponding to the
#' geodesic distance depth, intrinsic zonoid depth and intrinsic spatial depth respectively. Defaults to \code{"gdd"}. Not required for test
#' \code{"signed.rank"}. See the documentation of the function \code{\link{pdDepth}} for additional details about the different depth measures.
#' @param metric the metric that the space of HPD matrices is equipped with, either \code{"Riemannian"} or \code{"logEuclidean"}. Defaults to
#' \code{"Riemannian"}.
#'
#' @return The function returns a list with five components:
#' \item{test }{name of the rank-based test}
#' \item{p.value }{p-value of the test}
#' \item{statistic }{computed test statistic}
#' \item{null.distr }{distribution of the test statistic under the null hypothesis}
#' \item{depth.values }{computed data depth values (if available)}
#'
#' @examples
#' ## null hypothesis is true
#' data <- replicate(100, Expm(diag(2), H.coeff(rnorm(4), inverse = TRUE)))
#' pdRankTests(data, sample_sizes = c(50, 50), test = "rank.sum") ## homogeneity 2 samples
#' pdRankTests(data, sample_sizes = rep(25, 4), test = "krusk.wall") ## homogeneity 4 samples
#' pdRankTests(data, test = "bartels") ## randomness
#'
#' ## null hypothesis is false
#' data1 <- array(c(data, replicate(50, Expm(diag(2), H.coeff(0.5 * rnorm(4), inverse = TRUE)))),
#' dim = c(2,2,150))
#' pdRankTests(data1, sample_sizes = c(100, 50), test = "rank.sum")
#' pdRankTests(data1, sample_sizes = rep(50, 3), test = "krusk.wall")
#' pdRankTests(data1, test = "bartels")
#'
#' \dontrun{
#' ## signed-rank test for equivalence of spectra of multivariate time series
#' ## ARMA(1,1) process: Example 11.4.1 in (Brockwell and Davis, 1991)
#' Phi <- array(c(0.7, 0, 0, 0.6, rep(0, 4)), dim = c(2, 2, 2))
#' Theta <- array(c(0.5, -0.7, 0.6, 0.8, rep(0, 4)), dim = c(2, 2, 2))
#' Sigma <- matrix(c(1, 0.71, 0.71, 2), nrow = 2)
#' pgram <- function(Sigma) pdPgram(rARMA(2^8, 2, Phi, Theta, Sigma)$X)$P
#'
#' ## null is true
#' pdRankTests(array(c(pgram(Sigma), pgram(Sigma)), dim = c(2,2,2^8)), test = "signed.rank")
#' ## null is false
#' pdRankTests(array(c(pgram(Sigma), pgram(0.5 * Sigma)), dim = c(2,2,2^8)), test = "signed.rank")
#' }
#' @seealso \code{\link{pdDepth}}, \code{\link{pdPgram}}
#'
#' @references
#' \insertAllCited{}
#'
#' @export
pdRankTests <- function(data, sample_sizes, test = c("rank.sum", "krusk.wall", "signed.rank", "bartels"),
depth = c("gdd", "zonoid", "spatial"), metric = c("Riemannian", "logEuclidean")) {
if (missing(depth)) {
depth <- "gdd"
}
ddim <- dim(data)
if (missing(sample_sizes)) {
sample_sizes <- NA
}
metric <- match.arg(metric, c("Riemannian", "logEuclidean"))
test <- match.arg(test, c("rank.sum", "krusk.wall", "signed.rank", "bartels"))
depth <- match.arg(depth, c("gdd", "zonoid", "spatial"))
err.message <- "Incorrect input lenghts for arguments: 'samples' and/or 'sample_sizes',
consult the function documentation for the requested inputs."
n <- sample_sizes
if ((test == "krusk.wall") & (length(n) == 2)) {
warning("Argument 'test' changed to 'rank.sum' to test for homogeneity of
distributions of two independent samples of HPD matrices.")
test <- "rank.sum"
}
## Intrinsic rank-sum test
if (test == "rank.sum") {
if (!isTRUE((((length(ddim) == 3) & (ddim[3] == sum(n))) | ((length(ddim) == 4) &
(ddim[4] == sum(n)))) & (ddim[1] == ddim[2]) & (length(n) == 2))) {
stop(err.message)
}
dd <- pdDepth(X = data, method = depth, metric = metric)
T1 <- (sum(rank(dd, ties.method = "random")[1:n[1]]) - n[1] * (sum(n) + 1)/2) /
sqrt(n[1] * n[2] * (sum(n) + 1)/12)
output <- list(test = "Intrinsic Wilcoxon rank-sum", p.value = 2 * stats::pnorm(abs(T1), lower.tail = FALSE), statistic = T1,
null.distr = "Standard normal distribution", depth.values = dd)
}
## Intrinsic Kruskal-Wallis test
if (test == "krusk.wall") {
N <- sum(n)
if (!isTRUE((((length(ddim) == 3) & (ddim[3] == N)) | ((length(ddim) == 4) &
(ddim[4] == N))) & (ddim[1] == ddim[2]) & (length(n) > 2))) {
stop(err.message)
}
dd <- pdDepth(X = data, method = depth, metric = metric)
R_bar <- unname(unlist(lapply(split(rank(dd, ties.method = "random"),
f = rep(1:length(n), times = n)), mean)))
T2 <- 12/(N * (N + 1)) * sum(n * (R_bar - (N + 1)/2)^2)
output <- list(test = "Intrinsic Kruskal-Wallis", p.value = min(stats::pchisq(T2, df = 2, lower.tail = TRUE),
pchisq(T2, df = 2, lower.tail = FALSE)), statistic = T2,
null.distr = "Chi-squared distribution (df = 2)", depth.values = dd)
}
## Intrinsic signed-rank test
if (test == "signed.rank") {
if (!isTRUE((length(ddim) == 3) & (ddim[1] == ddim[2]) & (ddim[3]%%2 == 0))) {
stop(err.message)
}
n <- ddim[3]/2
d <- ddim[1]
if(metric == "Riemannian"){
ast <- function(A, B) t(Conj(A)) %*% B %*% A
diff <- sapply(1:n, function(i) Re(sum(diag(Logm(diag(d), ast(iSqrt(data[, , n + i]), data[, , i]))))))
} else{
diff <- sapply(1:n, function(i) Re(sum(diag(Logm(diag(d), data[, , n + i]) - Logm(diag(d), data[, , i])))))
}
T3 <- stats::wilcox.test(x = diff, y = rep(0, n), paired = TRUE, correct = TRUE)
output <- list(test = "Intrinsic Wilcoxon signed-rank", p.value = T3$p.value, statistic = T3$statistic, null.distr = T3$method)
}
## Intrinsic Bartels-von Neumann test
if (test == "bartels") {
if (!isTRUE(((length(ddim) == 3) | ((length(ddim) == 4))) & (ddim[1] == ddim[2]))) {
stop(err.message)
}
n <- utils::tail(ddim, 1)
dd <- pdDepth(X = data, method = depth, metric = metric)
T4 <- sum(diff(rank(dd, ties.method = "random"))^2)/(n * (n^2 - 1)/12)
sigma <- sqrt(4 * (n - 2) * (5 * n^2 - 2 * n - 9)/(5 * n * (n + 1) * (n - 1)^2))
output <- list(test = "Intrinsic Bartels-von Neumann", p.value = 2 * pnorm(abs((T4 - 2)/sigma), lower.tail = FALSE),
statistic = (T4 - 2)/sigma, null.distr = "Standard normal distribution",
depth.values = dd)
}
return(output)
}
|
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 2.11320414100646e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615846386-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,090 | r | testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 2.11320414100646e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 45
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 45
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query15_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 32
c no.of clauses 45
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 45
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query15_1344n.qdimacs 32 45 E1 [] 0 8 24 45 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query15_1344n/exquery_query15_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 694 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 45
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 45
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query15_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 32
c no.of clauses 45
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 45
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query15_1344n.qdimacs 32 45 E1 [] 0 8 24 45 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eFrame.R
\name{eFrameR}
\alias{eFrameR}
\title{eFrameR}
\usage{
eFrameR(y, type, siteCovs = NULL)
}
\arguments{
\item{y}{An MxJ matrix of the observed removal data, where M is the
number of sites and J is the maximum number of removal (primary)
periods per site. Each primary period can consist of k secondary
periods but this is not used here.}
\item{type}{sampling method. Currently supports "removal" for removal
sampling and "double" for double observer sampling}
\item{SiteCovs}{A \code{data.frame} of covariates that vary at the
site level. This should have M rows and one column per covariate}
}
\value{
a \code{eFrameR} holding data containing the response and
covariates required for removal models
}
\description{
\code{eFrameR} creates an eFrameR data object for use with removal models.
}
\examples{
rem<- san_nic_rem$rem
emf<- eFrameR(rem, type="removal")
summary(emf)
}
| /man/eFrameR.Rd | no_license | scrogster/eradicate | R | false | true | 969 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eFrame.R
\name{eFrameR}
\alias{eFrameR}
\title{eFrameR}
\usage{
eFrameR(y, type, siteCovs = NULL)
}
\arguments{
\item{y}{An MxJ matrix of the observed removal data, where M is the
number of sites and J is the maximum number of removal (primary)
periods per site. Each primary period can consist of k secondary
periods but this is not used here.}
\item{type}{sampling method. Currently supports "removal" for removal
sampling and "double" for double observer sampling}
\item{SiteCovs}{A \code{data.frame} of covariates that vary at the
site level. This should have M rows and one column per covariate}
}
\value{
a \code{eFrameR} holding data containing the response and
covariates required for removal models
}
\description{
\code{eFrameR} creates an eFrameR data object for use with removal models.
}
\examples{
rem<- san_nic_rem$rem
emf<- eFrameR(rem, type="removal")
summary(emf)
}
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
# Get all files in the directory
fileNames <- list.files("specdata", pattern = "*.csv", full.names = TRUE)
numFiles <- length(fileNames)
# Calculate the correlations
result <- c()
for (i in 1:numFiles) {
if ((complete(directory, i)$nobs) > threshold) {
data <- read.csv(fileNames[i])
sulfate <- data$sulfate
nitrate <- data$nitrate
result <- c(result, cor(sulfate, nitrate, use = "complete.obs"))
}
}
result
} | /1/corr.R | no_license | elegantcoderM/R-programming-course-assignments | R | false | false | 915 | r | corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
# Get all files in the directory
fileNames <- list.files("specdata", pattern = "*.csv", full.names = TRUE)
numFiles <- length(fileNames)
# Calculate the correlations
result <- c()
for (i in 1:numFiles) {
if ((complete(directory, i)$nobs) > threshold) {
data <- read.csv(fileNames[i])
sulfate <- data$sulfate
nitrate <- data$nitrate
result <- c(result, cor(sulfate, nitrate, use = "complete.obs"))
}
}
result
} |
#' A Gadget for testing gsub regular expressions.
#'
#' Supply a regex pattern, a string where matches
#' are sought, and a replacement. This gadget allows
#' regex testing. When the correct regex is found,
#' clicking "Done" will return the current regex.
#'
#' @param pattern The regular expression to match.
#'
#' @param replacement The replacement text to substitute for the
#' matching pattern.
#'
#' @param x The character string where matches are sought.
#'
#' @examples
#'
#' regexTest(pattern = 'night', replace = "day", x = "We can turn day into night with this Gadget")
#'
#' @import miniUI
#' @import shiny
#' @export
regexTest = function(pattern="night",
x = "We can turn day into night with this Gadget",
replace = "day") {
ui = miniPage(
gadgetTitleBar("Basic gsub tester"),
miniContentPanel(
textInput("text","Text:", x),
textInput('pattern','Pattern to replace:', pattern),
textInput("replacement","Text to substitute", replace),
textOutput("out")
)
)
server = function(input, output, session) {
output$out = renderText( gsub(pattern = input$pattern,
replace = input$replacement,
x = input$text) )
observeEvent(input$done, {
returnValue <- input$pattern
stopApp(returnValue)
})
}
runGadget(ui, server)
}
| /R/regexTest.R | no_license | mosaddek-hossain/BiocGadgets | R | false | false | 1,415 | r | #' A Gadget for testing gsub regular expressions.
#'
#' Supply a regex pattern, a string where matches
#' are sought, and a replacement. This gadget allows
#' regex testing. When the correct regex is found,
#' clicking "Done" will return the current regex.
#'
#' @param pattern The regular expression to match.
#'
#' @param replacement The replacement text to substitute for the
#' matching pattern.
#'
#' @param x The character string where matches are sought.
#'
#' @examples
#'
#' regexTest(pattern = 'night', replace = "day", x = "We can turn day into night with this Gadget")
#'
#' @import miniUI
#' @import shiny
#' @export
regexTest = function(pattern="night",
x = "We can turn day into night with this Gadget",
replace = "day") {
ui = miniPage(
gadgetTitleBar("Basic gsub tester"),
miniContentPanel(
textInput("text","Text:", x),
textInput('pattern','Pattern to replace:', pattern),
textInput("replacement","Text to substitute", replace),
textOutput("out")
)
)
server = function(input, output, session) {
output$out = renderText( gsub(pattern = input$pattern,
replace = input$replacement,
x = input$text) )
observeEvent(input$done, {
returnValue <- input$pattern
stopApp(returnValue)
})
}
runGadget(ui, server)
}
|
#' An azimuth/zenith coordinate system
#'
#' This coordinate system imagines that your data will be plotted on the bottom of a
#' sphere, oriented such that 0 azimuth is at the positive x-axis, and 0 zenith is at the
#' top of the bowl. Increasing azimuth is counterclockwise, and zenith angles that are more
#' negative move to the center of the plot. Positive zenith angles are not relevant in this
#' coordinate system, and are squished to zero with a warning. A more approachable
#' coordinate system is \link{coord_bearing_dip}.
#'
#' @param projection Use stereographic to look at the bottom of the sphere as if you are
#' situated at the top of the sphere (probably what you want); use orthographic to look
#' at the bottom of the sphere as if you were very far away from it.
#'
#' @export
#'
#' @examples
#' library(ggplot2)
#'
#' df <- data.frame(
#' azimuth = seq(0, 4*pi, length.out = 40),
#' zenith = seq(0, -pi / 2, length.out = 40)
#' )
#'
#' ggplot(df, aes(azimuth, zenith)) +
#' geom_path(col = "red") +
#' geom_point(col = "blue") +
#' coord_azimuth_zenith() +
#' scale_x_continuous(
#' breaks = radians(seq(0, 330, by = 30)),
#' labels = degrees
#' )
#'
#'
coord_azimuth_zenith <- function(projection = c("stereographic", "orthographic")) {
projection <- match.arg(projection)
ggplot2::ggproto(NULL, CoordAzimuthZenith, projection = projection)
}
#' A bearing/dip coordinate system
#'
#' This coordinate system imagines that your data will be plotted on the bottom of a
#' sphere, as viewed from the inside, oriented such that 0 bearing is due north
#' and and 0 plunge is horizontal. Increasing plunge values plot towards the centre of
#' the plot, and increasing bearing values plot clockwise from north.
#'
#' @inheritParams coord_azimuth_zenith
#' @export
#'
#' @examples
#' library(ggplot2)
#'
#' df <- data.frame(
#' bearing = seq(0, 4*180, length.out = 40),
#' plunge = seq(0, 90, length.out = 40)
#' )
#'
#' ggplot(df, aes(bearing, plunge)) +
#' geom_path(col = "red") +
#' geom_point(col = "blue") +
#' coord_bearing_plunge() +
#' scale_x_continuous(breaks = seq(0, 330, by = 30))
#'
coord_bearing_plunge <- function(projection = c("stereographic", "orthographic")) {
projection <- match.arg(projection)
ggplot2::ggproto(
NULL, CoordAzimuthZenith,
projection = projection,
trans_azimuth = bearing_trans,
trans_zenith = plunge_trans
)
}
#' Pretty breaks for coord_bearing_plunge
#'
#' @param breaks,minor_breaks Where break lines and labels are drawn
#' @param labels Labels or label function that produces labels from breaks
#' @param ... Passed to \link[ggplot2]{scale_x_continuous}.
#'
#' @export
#'
scale_x_bearing <- function(
breaks = seq(0, 330, by = 30),
minor_breaks = seq(0, 350, by = 10), ...) {
ggplot2::scale_x_continuous(
breaks = breaks,
limits = c(0, 360),
minor_breaks = minor_breaks,
oob = function(x, ...) x,
...
)
}
#' @rdname scale_x_bearing
#' @export
scale_x_compass <- function(breaks = c(0, 90, 180, 270), labels = c("N", "E", "S", "W"),
minor_breaks = seq(0, 330, by = 30), ...) {
ggplot2::scale_x_continuous(
breaks = breaks,
limits = c(0, 360),
minor_breaks = minor_breaks,
labels = labels,
oob = function(x, ...) x,
...
)
}
#' Pretty breaks for coord_azimuth_zenith
#'
#' @inheritParams scale_x_bearing
#'
#' @export
#'
scale_x_azimuth <- function(
breaks = seq(0, 7*pi/4, by = pi/4),
labels = pi_labels, ...) {
ggplot2::scale_x_continuous(
breaks = breaks,
limits = c(0, 2*pi),
labels = labels,
oob = function(x, ...) x,
...
)
}
#' @rdname scale_x_azimuth
#' @export
pi_labels <- function(breaks, ...) {
labs <- paste(format(breaks / pi, ...), "pi")
labs[breaks == 0] <- "0"
labs[breaks == pi] <- gsub("^1(\\.0+)?\\s*", "", labs[breaks == pi])
labs
}
bearing_trans <- scales::trans_new(
"bearing",
transform = function(x) radians(90 - x),
inverse = function(x) 90 - degrees(x)
)
plunge_trans <- scales::trans_new(
"dip",
transform = function(x) radians(-x),
inverse = function(x) -degrees(x)
)
#' @rdname coord_azimuth_zenith
#' @export
CoordAzimuthZenith <- ggplot2::ggproto(
"CoordAzimuthZenith", ggplot2::Coord,
aspect = function(ranges) 1,
# the transformation ensures range 0..1,
# however with clipping, points on the unit circle aren't guaranteed
# to not be
clip = "off",
setup_panel_params = function(self, scale_x, scale_y, params = list()) {
# the scale breaks are meaningful here, but they must get transformed to
# azimuth/zenith space
azimuth_params <- list(
azimuth.range = scale_x$get_limits(),
azimuth.labels = scale_x$get_labels(),
azimuth.major = as.numeric(self$trans_azimuth$transform(scale_x$get_breaks())),
azimuth.major_source = as.numeric(scale_x$get_breaks()),
azimuth.minor = as.numeric(self$trans_azimuth$transform(scale_x$get_breaks_minor())),
azimuth.minor_source = as.numeric(scale_x$get_breaks_minor())
)
# the dummy text is the text that decides how much room is left for the axes
# even though it will not appear
if(length(azimuth_params$azimuth.major) > 0) {
cos_breaks <- cos(azimuth_params$azimuth.major)
sin_breaks <- sin(azimuth_params$azimuth.major)
labels <- azimuth_params$azimuth.labels
most_left <- which((cos_breaks < 0) & (cos_breaks == min(cos_breaks)))[1]
most_right <- which((cos_breaks > 0) & (cos_breaks == max(cos_breaks)))[1]
most_bottom <- which((sin_breaks < 0) & (sin_breaks == min(sin_breaks)))[1]
most_top <- which((sin_breaks > 0) & (sin_breaks == max(sin_breaks)))[1]
axis_params <- list(
axis.text_left = labels[most_left],
axis.text_right = labels[most_right],
axis.text_bottom = labels[most_bottom],
axis.text_top = labels[most_top]
)
} else {
axis_params <- list(
axis.text_left = NA,
axis.text_right = NA,
axis.text_bottom = NA,
axis.text_top = NA
)
}
# combine the two lists
c(azimuth_params, axis_params)
},
labels = function(panel_params) {
panel_params
},
render_axis_h = function(self, panel_params, theme) {
# render an axis that takes up the right amount of space to render the labels at the extremes
list(
top = sized_axis_grob(theme, panel_params$axis.text_top, "h"),
bottom = sized_axis_grob(theme, panel_params$axis.text_bottom, "h")
)
},
render_axis_v = function(self, panel_params, theme) {
# render an axis that takes up the right amount of space to render the labels at the extremes
list(
left = sized_axis_grob(theme, panel_params$axis.text_left, "v"),
right = sized_axis_grob(theme, panel_params$axis.text_right, "v")
)
},
render_fg = function(panel_params, theme) {
ggplot2::zeroGrob()
},
render_bg = function(self, panel_params, theme) {
# panel border makes more sense in the backround here with the graticule
# render as grobs so that we can steal their gpars for our modified
# circle grobs, since the panel border isn't a rectangle
background <- element_render(theme, "panel.background")
border <- element_render(theme, "panel.border")
grid::grobTree(
# panel background
if(!is.null(background) && !inherits(background, "zeroGrob")) {
grid::circleGrob(
x = 0.5, y = 0.5, r = 0.5,
gp = background$gp,
default.units = "npc"
)
},
# graticule
guide_great_circles(panel_params$azimuth.major, theme, self$project_xyz, "panel.grid"),
guide_great_circles(panel_params$azimuth.minor, theme, self$project_xyz, "panel.grid.minor"),
# panel border
if(!is.null(border) && !inherits(border, "zeroGrob")) {
grid::circleGrob(
x = 0.5, y = 0.5, r = 0.5,
gp = border$gp,
default.units = "npc"
)
},
# ticks and axis labels
guide_azimuth(panel_params$azimuth.major, panel_params$azimuth.labels, theme, self$project_xyz)
)
},
backtransform_range = function(self, panel_params) {
self$range(panel_params)
},
range = function(self, panel_params) {
# summarise_layout() expects that the x and y ranges here
# match the setting from self$theta and self$r
# setNames(
# list(panel_params$theta.range, panel_params$r.range),
# c(self$theta, self$r)
# )
# fallback until this gets implemented
warning("Range calc not implemented", call. = FALSE)
list(x = panel_params$x.range, y = panel_params$y.range)
},
# this allows subclasses to rotate/make negative/unit convert from the data
# for example, north-based degrees that run clockwise
trans_azimuth = scales::identity_trans(),
trans_zenith = scales::identity_trans(),
project_xyz = function(self, data) {
# this allows different projections, like stereographic, orthographic
# output is assumed to be ranged on the unit circle
# input is an xyz unit vector after adjustment
if(self$projection == "stereographic") {
result <- do.call(stereo_project, data[c("x", "y", "z")])
} else if(self$projection == "orthographic") {
result <- data.frame(x_proj = data$x, y_proj = data$y)
} else {
stop("Unrecognized projection in coord_azimuth_zenith: ", self$projection, call. = FALSE)
}
# scale from -1..1 to 0..1
data.frame(x = result$x / 2 + 0.5, y = result$y / 2 + 0.5)
},
transform = function(self, data, range) {
# the range is ignored here, because it doesn't make sense not to include
# 0..2pi, -pi/2..0 in this coordinate system
data$azimuth <- self$trans_azimuth$transform(data$x)
data$zenith <- self$trans_zenith$transform(data$y)
if(any(data$zenith > 0)) {
warning("Zenith values > 0; setting to NA", call. = FALSE)
data$zenith[data$zenith > 0] <- NA
}
# trans has range 0..1 for x and y
trans <- self$project_xyz(unit_vector(data$azimuth, data$zenith))
data$x <- trans$x
data$y <- trans$y
data
},
# this coordinate system doesn't make sense with scales that are not
# x: 0..2pi and y: -pi/2..0
# should they get modified here?
modify_scales = function(scales_x, scales_y) {
invisible()
}
)
guide_azimuth <- function(breaks, labels, theme, project) {
if(length(breaks) == 0) return(ggplot2::zeroGrob())
# get necessary information from theme + elements
tick_gp <- element_render(theme, "axis.ticks.x")$gp
text_el <- ggplot2::calc_element("axis.text.x", theme)
text_gp <- grid::gpar(
fontsize = text_el$size,
fontfamily = text_el$family,
fontface = text_el$face,
col = text_el$colour
)
# calculate positions of axis ticks such that they have constant length
tick_length <- theme$axis.ticks.length
tick_location_start <- project(unit_vector(azimuth = breaks, zenith = 0))
tl_x0 <- grid::unit(tick_location_start$x, "npc")
tl_y0 <- grid::unit(tick_location_start$y, "npc")
tl_x1 = tl_x0 + tick_length * cos(breaks)
tl_y1 = tl_y0 + tick_length * sin(breaks)
# calculate positions and adjustments of text (respecting margin)
margin <- max(text_el$margin)
txt_x = tl_x1 + margin * cos(breaks)
txt_y = tl_y1 + margin * sin(breaks)
txt_hjust <- ifelse(cos(breaks) > 0, 0, 1)
txt_hjust[abs(cos(breaks)) < 1e-5] <- 0.5
txt_vjust <- ifelse(sin(breaks) > 0, 0, 1)
txt_vjust[abs(sin(breaks)) < 1e-5] <- 0.5
grid::grobTree(
grid::segmentsGrob(
x0 = tl_x0, y0 = tl_y0,
x1 = tl_x1, y1 = tl_y1,
gp = tick_gp
),
grid::textGrob(
label = labels,
x = txt_x, y = txt_y,
hjust = txt_hjust, vjust = txt_vjust,
gp = text_gp
)
)
}
guide_great_circles <- function(breaks, theme, project, element_name) {
major_gc_grobs <- purrr::map(unique(breaks), function(azimuth) {
xyz <- unit_vector(azimuth = azimuth, zenith = seq(-pi / 2, 0, length.out = 10))
coords <- project(xyz)
element_render(
theme, element_name,
x = coords$x, y = coords$y
)
})
do.call(grid::grobTree, major_gc_grobs)
}
# draws a transparent rectangle the right size for an axis
sized_axis_grob <- function(theme, text, orientation = c("v", "h")) {
if((length(text) == 0) || is.na(text)) return(ggplot2::zeroGrob())
orientation <- match.arg(orientation)
text_element <- ggplot2::calc_element("axis.text.x", theme)
text_grob <- element_render(theme, "axis.text.x", label = text)
tick_length <- theme$axis.ticks.length
margin = max(text_element$margin)
if(orientation == "h") {
grid::rectGrob(
width = grid::grobWidth(text_grob),
height = grid::grobHeight(text_grob) + tick_length + margin,
gp = grid::gpar(fill = NA, col = "#00000000")
)
} else {
grid::rectGrob(
width = grid::grobWidth(text_grob) + tick_length + margin,
height = grid::grobHeight(text_grob),
gp = grid::gpar(fill = NA, col = "#00000000")
)
}
}
element_render <- function(theme, element, ..., name = NULL) {
# Get the element from the theme, calculating inheritance
el <- ggplot2::calc_element(element, theme)
if (is.null(el)) {
message("Theme element ", element, " missing")
return(ggplot2::zeroGrob())
}
grob <- ggplot2::element_grob(el, ...)
ggname(paste(element, name, sep = "."), grob)
}
ggname <- function (prefix, grob) {
grob$name <- grid::grobName(grob, prefix)
grob
}
| /R/coords.R | no_license | paleolimbot/ggstereo | R | false | false | 13,521 | r |
#' An azimuth/zenith coordinate system
#'
#' This coordinate system imagines that your data will be plotted on the bottom of a
#' sphere, oriented such that 0 azimuth is at the positive x-axis, and 0 zenith is at the
#' top of the bowl. Increasing azimuth is counterclockwise, and zenith angles that are more
#' negative move to the center of the plot. Positive zenith angles are not relevant in this
#' coordinate system, and are squished to zero with a warning. A more approachable
#' coordinate system is \link{coord_bearing_dip}.
#'
#' @param projection Use stereographic to look at the bottom of the sphere as if you are
#' situated at the top of the sphere (probably what you want); use orthographic to look
#' at the bottom of the sphere as if you were very far away from it.
#'
#' @export
#'
#' @examples
#' library(ggplot2)
#'
#' df <- data.frame(
#' azimuth = seq(0, 4*pi, length.out = 40),
#' zenith = seq(0, -pi / 2, length.out = 40)
#' )
#'
#' ggplot(df, aes(azimuth, zenith)) +
#' geom_path(col = "red") +
#' geom_point(col = "blue") +
#' coord_azimuth_zenith() +
#' scale_x_continuous(
#' breaks = radians(seq(0, 330, by = 30)),
#' labels = degrees
#' )
#'
#'
coord_azimuth_zenith <- function(projection = c("stereographic", "orthographic")) {
projection <- match.arg(projection)
ggplot2::ggproto(NULL, CoordAzimuthZenith, projection = projection)
}
#' A bearing/dip coordinate system
#'
#' This coordinate system imagines that your data will be plotted on the bottom of a
#' sphere, as viewed from the inside, oriented such that 0 bearing is due north
#' and and 0 plunge is horizontal. Increasing plunge values plot towards the centre of
#' the plot, and increasing bearing values plot clockwise from north.
#'
#' @inheritParams coord_azimuth_zenith
#' @export
#'
#' @examples
#' library(ggplot2)
#'
#' df <- data.frame(
#' bearing = seq(0, 4*180, length.out = 40),
#' plunge = seq(0, 90, length.out = 40)
#' )
#'
#' ggplot(df, aes(bearing, plunge)) +
#' geom_path(col = "red") +
#' geom_point(col = "blue") +
#' coord_bearing_plunge() +
#' scale_x_continuous(breaks = seq(0, 330, by = 30))
#'
coord_bearing_plunge <- function(projection = c("stereographic", "orthographic")) {
projection <- match.arg(projection)
ggplot2::ggproto(
NULL, CoordAzimuthZenith,
projection = projection,
trans_azimuth = bearing_trans,
trans_zenith = plunge_trans
)
}
#' Pretty breaks for coord_bearing_plunge
#'
#' @param breaks,minor_breaks Where break lines and labels are drawn
#' @param labels Labels or label function that produces labels from breaks
#' @param ... Passed to \link[ggplot2]{scale_x_continuous}.
#'
#' @export
#'
scale_x_bearing <- function(
breaks = seq(0, 330, by = 30),
minor_breaks = seq(0, 350, by = 10), ...) {
ggplot2::scale_x_continuous(
breaks = breaks,
limits = c(0, 360),
minor_breaks = minor_breaks,
oob = function(x, ...) x,
...
)
}
#' @rdname scale_x_bearing
#' @export
scale_x_compass <- function(breaks = c(0, 90, 180, 270), labels = c("N", "E", "S", "W"),
minor_breaks = seq(0, 330, by = 30), ...) {
ggplot2::scale_x_continuous(
breaks = breaks,
limits = c(0, 360),
minor_breaks = minor_breaks,
labels = labels,
oob = function(x, ...) x,
...
)
}
#' Pretty breaks for coord_azimuth_zenith
#'
#' @inheritParams scale_x_bearing
#'
#' @export
#'
scale_x_azimuth <- function(
breaks = seq(0, 7*pi/4, by = pi/4),
labels = pi_labels, ...) {
ggplot2::scale_x_continuous(
breaks = breaks,
limits = c(0, 2*pi),
labels = labels,
oob = function(x, ...) x,
...
)
}
#' @rdname scale_x_azimuth
#' @export
pi_labels <- function(breaks, ...) {
labs <- paste(format(breaks / pi, ...), "pi")
labs[breaks == 0] <- "0"
labs[breaks == pi] <- gsub("^1(\\.0+)?\\s*", "", labs[breaks == pi])
labs
}
bearing_trans <- scales::trans_new(
"bearing",
transform = function(x) radians(90 - x),
inverse = function(x) 90 - degrees(x)
)
plunge_trans <- scales::trans_new(
"dip",
transform = function(x) radians(-x),
inverse = function(x) -degrees(x)
)
#' @rdname coord_azimuth_zenith
#' @export
CoordAzimuthZenith <- ggplot2::ggproto(
"CoordAzimuthZenith", ggplot2::Coord,
aspect = function(ranges) 1,
# the transformation ensures range 0..1,
# however with clipping, points on the unit circle aren't guaranteed
# to not be
clip = "off",
setup_panel_params = function(self, scale_x, scale_y, params = list()) {
# the scale breaks are meaningful here, but they must get transformed to
# azimuth/zenith space
azimuth_params <- list(
azimuth.range = scale_x$get_limits(),
azimuth.labels = scale_x$get_labels(),
azimuth.major = as.numeric(self$trans_azimuth$transform(scale_x$get_breaks())),
azimuth.major_source = as.numeric(scale_x$get_breaks()),
azimuth.minor = as.numeric(self$trans_azimuth$transform(scale_x$get_breaks_minor())),
azimuth.minor_source = as.numeric(scale_x$get_breaks_minor())
)
# the dummy text is the text that decides how much room is left for the axes
# even though it will not appear
if(length(azimuth_params$azimuth.major) > 0) {
cos_breaks <- cos(azimuth_params$azimuth.major)
sin_breaks <- sin(azimuth_params$azimuth.major)
labels <- azimuth_params$azimuth.labels
most_left <- which((cos_breaks < 0) & (cos_breaks == min(cos_breaks)))[1]
most_right <- which((cos_breaks > 0) & (cos_breaks == max(cos_breaks)))[1]
most_bottom <- which((sin_breaks < 0) & (sin_breaks == min(sin_breaks)))[1]
most_top <- which((sin_breaks > 0) & (sin_breaks == max(sin_breaks)))[1]
axis_params <- list(
axis.text_left = labels[most_left],
axis.text_right = labels[most_right],
axis.text_bottom = labels[most_bottom],
axis.text_top = labels[most_top]
)
} else {
axis_params <- list(
axis.text_left = NA,
axis.text_right = NA,
axis.text_bottom = NA,
axis.text_top = NA
)
}
# combine the two lists
c(azimuth_params, axis_params)
},
labels = function(panel_params) {
panel_params
},
render_axis_h = function(self, panel_params, theme) {
# render an axis that takes up the right amount of space to render the labels at the extremes
list(
top = sized_axis_grob(theme, panel_params$axis.text_top, "h"),
bottom = sized_axis_grob(theme, panel_params$axis.text_bottom, "h")
)
},
render_axis_v = function(self, panel_params, theme) {
# render an axis that takes up the right amount of space to render the labels at the extremes
list(
left = sized_axis_grob(theme, panel_params$axis.text_left, "v"),
right = sized_axis_grob(theme, panel_params$axis.text_right, "v")
)
},
render_fg = function(panel_params, theme) {
ggplot2::zeroGrob()
},
render_bg = function(self, panel_params, theme) {
# panel border makes more sense in the backround here with the graticule
# render as grobs so that we can steal their gpars for our modified
# circle grobs, since the panel border isn't a rectangle
background <- element_render(theme, "panel.background")
border <- element_render(theme, "panel.border")
grid::grobTree(
# panel background
if(!is.null(background) && !inherits(background, "zeroGrob")) {
grid::circleGrob(
x = 0.5, y = 0.5, r = 0.5,
gp = background$gp,
default.units = "npc"
)
},
# graticule
guide_great_circles(panel_params$azimuth.major, theme, self$project_xyz, "panel.grid"),
guide_great_circles(panel_params$azimuth.minor, theme, self$project_xyz, "panel.grid.minor"),
# panel border
if(!is.null(border) && !inherits(border, "zeroGrob")) {
grid::circleGrob(
x = 0.5, y = 0.5, r = 0.5,
gp = border$gp,
default.units = "npc"
)
},
# ticks and axis labels
guide_azimuth(panel_params$azimuth.major, panel_params$azimuth.labels, theme, self$project_xyz)
)
},
backtransform_range = function(self, panel_params) {
self$range(panel_params)
},
range = function(self, panel_params) {
# summarise_layout() expects that the x and y ranges here
# match the setting from self$theta and self$r
# setNames(
# list(panel_params$theta.range, panel_params$r.range),
# c(self$theta, self$r)
# )
# fallback until this gets implemented
warning("Range calc not implemented", call. = FALSE)
list(x = panel_params$x.range, y = panel_params$y.range)
},
# this allows subclasses to rotate/make negative/unit convert from the data
# for example, north-based degrees that run clockwise
trans_azimuth = scales::identity_trans(),
trans_zenith = scales::identity_trans(),
project_xyz = function(self, data) {
# this allows different projections, like stereographic, orthographic
# output is assumed to be ranged on the unit circle
# input is an xyz unit vector after adjustment
if(self$projection == "stereographic") {
result <- do.call(stereo_project, data[c("x", "y", "z")])
} else if(self$projection == "orthographic") {
result <- data.frame(x_proj = data$x, y_proj = data$y)
} else {
stop("Unrecognized projection in coord_azimuth_zenith: ", self$projection, call. = FALSE)
}
# scale from -1..1 to 0..1
data.frame(x = result$x / 2 + 0.5, y = result$y / 2 + 0.5)
},
transform = function(self, data, range) {
# the range is ignored here, because it doesn't make sense not to include
# 0..2pi, -pi/2..0 in this coordinate system
data$azimuth <- self$trans_azimuth$transform(data$x)
data$zenith <- self$trans_zenith$transform(data$y)
if(any(data$zenith > 0)) {
warning("Zenith values > 0; setting to NA", call. = FALSE)
data$zenith[data$zenith > 0] <- NA
}
# trans has range 0..1 for x and y
trans <- self$project_xyz(unit_vector(data$azimuth, data$zenith))
data$x <- trans$x
data$y <- trans$y
data
},
# this coordinate system doesn't make sense with scales that are not
# x: 0..2pi and y: -pi/2..0
# should they get modified here?
modify_scales = function(scales_x, scales_y) {
invisible()
}
)
guide_azimuth <- function(breaks, labels, theme, project) {
if(length(breaks) == 0) return(ggplot2::zeroGrob())
# get necessary information from theme + elements
tick_gp <- element_render(theme, "axis.ticks.x")$gp
text_el <- ggplot2::calc_element("axis.text.x", theme)
text_gp <- grid::gpar(
fontsize = text_el$size,
fontfamily = text_el$family,
fontface = text_el$face,
col = text_el$colour
)
# calculate positions of axis ticks such that they have constant length
tick_length <- theme$axis.ticks.length
tick_location_start <- project(unit_vector(azimuth = breaks, zenith = 0))
tl_x0 <- grid::unit(tick_location_start$x, "npc")
tl_y0 <- grid::unit(tick_location_start$y, "npc")
tl_x1 = tl_x0 + tick_length * cos(breaks)
tl_y1 = tl_y0 + tick_length * sin(breaks)
# calculate positions and adjustments of text (respecting margin)
margin <- max(text_el$margin)
txt_x = tl_x1 + margin * cos(breaks)
txt_y = tl_y1 + margin * sin(breaks)
txt_hjust <- ifelse(cos(breaks) > 0, 0, 1)
txt_hjust[abs(cos(breaks)) < 1e-5] <- 0.5
txt_vjust <- ifelse(sin(breaks) > 0, 0, 1)
txt_vjust[abs(sin(breaks)) < 1e-5] <- 0.5
grid::grobTree(
grid::segmentsGrob(
x0 = tl_x0, y0 = tl_y0,
x1 = tl_x1, y1 = tl_y1,
gp = tick_gp
),
grid::textGrob(
label = labels,
x = txt_x, y = txt_y,
hjust = txt_hjust, vjust = txt_vjust,
gp = text_gp
)
)
}
guide_great_circles <- function(breaks, theme, project, element_name) {
major_gc_grobs <- purrr::map(unique(breaks), function(azimuth) {
xyz <- unit_vector(azimuth = azimuth, zenith = seq(-pi / 2, 0, length.out = 10))
coords <- project(xyz)
element_render(
theme, element_name,
x = coords$x, y = coords$y
)
})
do.call(grid::grobTree, major_gc_grobs)
}
# draws a transparent rectangle the right size for an axis
sized_axis_grob <- function(theme, text, orientation = c("v", "h")) {
if((length(text) == 0) || is.na(text)) return(ggplot2::zeroGrob())
orientation <- match.arg(orientation)
text_element <- ggplot2::calc_element("axis.text.x", theme)
text_grob <- element_render(theme, "axis.text.x", label = text)
tick_length <- theme$axis.ticks.length
margin = max(text_element$margin)
if(orientation == "h") {
grid::rectGrob(
width = grid::grobWidth(text_grob),
height = grid::grobHeight(text_grob) + tick_length + margin,
gp = grid::gpar(fill = NA, col = "#00000000")
)
} else {
grid::rectGrob(
width = grid::grobWidth(text_grob) + tick_length + margin,
height = grid::grobHeight(text_grob),
gp = grid::gpar(fill = NA, col = "#00000000")
)
}
}
element_render <- function(theme, element, ..., name = NULL) {
# Get the element from the theme, calculating inheritance
el <- ggplot2::calc_element(element, theme)
if (is.null(el)) {
message("Theme element ", element, " missing")
return(ggplot2::zeroGrob())
}
grob <- ggplot2::element_grob(el, ...)
ggname(paste(element, name, sep = "."), grob)
}
ggname <- function (prefix, grob) {
grob$name <- grid::grobName(grob, prefix)
grob
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topoplot.R
\name{topoplot}
\alias{topoplot}
\alias{topoplot.data.frame}
\alias{topoplot.eeg_data}
\title{Topographical Plotting Function for EEG}
\usage{
topoplot(data, ...)
\method{topoplot}{data.frame}(data, time_lim = NULL, limits = NULL,
chanLocs = NULL, method = "Biharmonic", r = NULL, grid_res = 67,
palette = "RdBu", interp_limit = "skirt", contour = TRUE,
chan_marker = "point", quantity = "amplitude", montage = NULL,
colourmap, ...)
\method{topoplot}{eeg_data}(data, time_lim = NULL, limits = NULL,
chanLocs = NULL, method = "Biharmonic", r = NULL, grid_res = 67,
palette = "RdBu", interp_limit = "skirt", contour = TRUE,
chan_marker = "point", quantity = "amplitude", montage = NULL, ...)
}
\arguments{
\item{data}{An EEG dataset. Must have columns x, y, and amplitude at present.
x and y are (Cartesian) electrode co-ordinates), amplitude is amplitude.}
\item{...}{Various arguments passed to specific functions}
\item{time_lim}{Timepoint(s) to plot. Can be one time or a range to average
over. If none is supplied, the function will average across all timepoints
in the supplied data.}
\item{limits}{Limits of the fill scale - should be given as a character vector
with two values specifying the start and endpoints e.g. limits = c(-2,-2).
Will ignore anything else. Defaults to the range of the data.}
\item{chanLocs}{Not yet implemented.}
\item{method}{Interpolation method. "Biharmonic" or "gam". "Biharmonic"
implements the same method used in Matlab's EEGLAB. "gam" fits a
Generalized Additive Model with k = 40 knots. Defaults to biharmonic spline
interpolation.}
\item{r}{Radius of cartoon head_shape; if not given, defaults to 1.1 * the
maximum y electrode location.}
\item{grid_res}{Resolution of the interpolated grid. Higher = smoother but
slower.}
\item{palette}{Defaults to RdBu if none supplied. Can be any from
RColorBrewer or viridis. If an unsupported palette is specified, switches
to Greens.}
\item{interp_limit}{"skirt" or "head". Defaults to "skirt". "skirt"
interpolates just past the farthest electrode and does not respect the
boundary of the head_shape. "head" interpolates up to the radius of the
plotted head.}
\item{contour}{Plot contour lines on topography (defaults to TRUE)}
\item{chan_marker}{Set marker for electrode locations. "point" = point,
"name" = electrode name, "none" = no marker. Defaults to "point".}
\item{quantity}{Allows plotting of arbitrary quantitative column. Defaults to
amplitude. Can be any column name. E.g. "p.value", "t-statistic".}
\item{montage}{Name of an existing montage set. Defaults to NULL; (currently
only 'biosemi64alpha' available other than default 10/20 system)}
\item{colourmap}{Deprecated, use palette instead.}
}
\description{
Allows topographical plotting of functional data. Output is a ggplot2 object.
The functions works for both standard data frames and objects of class
\code{eeg_data}.
Both \code{eeg_epochs} and \code{eeg_data} objects are supported.
}
\section{Methods (by class)}{
\itemize{
\item \code{data.frame}: Topographical plotting of data.frames and other non
eeg_data objects.
\item \code{eeg_data}: Topographical plotting of \code{eeg_data} objects.
}}
\section{Notes on usage of Generalized Additive Models for interpolation}{
The
function fits a GAM using the gam function from mgcv. Specifically, it fits
a spline using the model function gam(z ~ s(x, y, bs = "ts", k = 40). Using
GAMs for smooths is very much experimental. The surface is produced from
the predictions of the GAM model fitted to the supplied data. Values at
each electrode do not necessarily match actual values in the data:
high-frequency variation will tend to be smoothed out. Thus, the method
should be used with caution.
}
\author{
Matt Craddock, \email{matt@mattcraddock.com}
}
| /man/topoplot.Rd | permissive | mantour/eegUtils | R | false | true | 3,890 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topoplot.R
\name{topoplot}
\alias{topoplot}
\alias{topoplot.data.frame}
\alias{topoplot.eeg_data}
\title{Topographical Plotting Function for EEG}
\usage{
topoplot(data, ...)
\method{topoplot}{data.frame}(data, time_lim = NULL, limits = NULL,
chanLocs = NULL, method = "Biharmonic", r = NULL, grid_res = 67,
palette = "RdBu", interp_limit = "skirt", contour = TRUE,
chan_marker = "point", quantity = "amplitude", montage = NULL,
colourmap, ...)
\method{topoplot}{eeg_data}(data, time_lim = NULL, limits = NULL,
chanLocs = NULL, method = "Biharmonic", r = NULL, grid_res = 67,
palette = "RdBu", interp_limit = "skirt", contour = TRUE,
chan_marker = "point", quantity = "amplitude", montage = NULL, ...)
}
\arguments{
\item{data}{An EEG dataset. Must have columns x, y, and amplitude at present.
x and y are (Cartesian) electrode co-ordinates), amplitude is amplitude.}
\item{...}{Various arguments passed to specific functions}
\item{time_lim}{Timepoint(s) to plot. Can be one time or a range to average
over. If none is supplied, the function will average across all timepoints
in the supplied data.}
\item{limits}{Limits of the fill scale - should be given as a character vector
with two values specifying the start and endpoints e.g. limits = c(-2,-2).
Will ignore anything else. Defaults to the range of the data.}
\item{chanLocs}{Not yet implemented.}
\item{method}{Interpolation method. "Biharmonic" or "gam". "Biharmonic"
implements the same method used in Matlab's EEGLAB. "gam" fits a
Generalized Additive Model with k = 40 knots. Defaults to biharmonic spline
interpolation.}
\item{r}{Radius of cartoon head_shape; if not given, defaults to 1.1 * the
maximum y electrode location.}
\item{grid_res}{Resolution of the interpolated grid. Higher = smoother but
slower.}
\item{palette}{Defaults to RdBu if none supplied. Can be any from
RColorBrewer or viridis. If an unsupported palette is specified, switches
to Greens.}
\item{interp_limit}{"skirt" or "head". Defaults to "skirt". "skirt"
interpolates just past the farthest electrode and does not respect the
boundary of the head_shape. "head" interpolates up to the radius of the
plotted head.}
\item{contour}{Plot contour lines on topography (defaults to TRUE)}
\item{chan_marker}{Set marker for electrode locations. "point" = point,
"name" = electrode name, "none" = no marker. Defaults to "point".}
\item{quantity}{Allows plotting of arbitrary quantitative column. Defaults to
amplitude. Can be any column name. E.g. "p.value", "t-statistic".}
\item{montage}{Name of an existing montage set. Defaults to NULL; (currently
only 'biosemi64alpha' available other than default 10/20 system)}
\item{colourmap}{Deprecated, use palette instead.}
}
\description{
Allows topographical plotting of functional data. Output is a ggplot2 object.
The functions works for both standard data frames and objects of class
\code{eeg_data}.
Both \code{eeg_epochs} and \code{eeg_data} objects are supported.
}
\section{Methods (by class)}{
\itemize{
\item \code{data.frame}: Topographical plotting of data.frames and other non
eeg_data objects.
\item \code{eeg_data}: Topographical plotting of \code{eeg_data} objects.
}}
\section{Notes on usage of Generalized Additive Models for interpolation}{
The
function fits a GAM using the gam function from mgcv. Specifically, it fits
a spline using the model function gam(z ~ s(x, y, bs = "ts", k = 40). Using
GAMs for smooths is very much experimental. The surface is produced from
the predictions of the GAM model fitted to the supplied data. Values at
each electrode do not necessarily match actual values in the data:
high-frequency variation will tend to be smoothed out. Thus, the method
should be used with caution.
}
\author{
Matt Craddock, \email{matt@mattcraddock.com}
}
|
#' Change type of pdqr-function
#'
#' Modify [type][meta_type()] of pdqr-function using method of choice.
#'
#' @param f A pdqr-function.
#' @param type A desired type of output. Should be one of "discrete" or
#' "continuous". If `NULL` (default), it is chosen as an opposite of `f`'s
#' type.
#' @param method Retyping method. Should be one of "value", "piecelin", "dirac".
#'
#' @details If type of `f` is equal to input `type` then `f` is returned.
#'
#' Method "value" uses renormalized columns of `f`'s "x_tbl" metadata as values
#' for output's "x_tbl" metadata. In other words, it preserves ratios between
#' values of d-function at certain "x" points. Its main advantages are that this
#' method can work well with any pdqr type and that two consecutive conversions
#' return the same function. Conversion algorithm is as follows:
#' - Retyping from "continuous" to `type` "discrete" is done by creating
#' pdqr-function of corresponding class with the following "x_tbl" metadata: "x"
#' column is the same as in `f`; "prob" column is equal to `f`'s "y" column
#' after renormalization (so that their sum is 1).
#' - Retyping from "discrete" to `type` "continuous" is done in the same
#' fashion: "x" column is the same; "y" column is equal to `f`'s "prob" column
#' after renormalization (so that total integral of piecewise-linear density is
#' equal to 1).
#'
#' Method "piecelin" should be used mostly for converting from "continuous" to
#' "discrete" type. It uses the fact that 'pdqr' densities are piecewise-linear
#' (linear in intervals between values of "x" column of ["x_tbl"
#' metadata][meta_x_tbl()]) on their [support][meta_support()]:
#' - Retyping from "continuous" to `type` "discrete" is done by computing "x"
#' values as centers of interval masses with probabilities equal to interval
#' total probabilities.
#' - Retyping from "discrete" to `type` "continuous" is made approximately by
#' trying to compute "x" grid, for which "x" values of input distribution are
#' going to be centers of mass. Algorithm is approximate and might result into a
#' big errors in case of small number of "x" values or if they are not
#' "suitable" for this kind of transformation.
#'
#' Method "dirac" is used mostly for converting from "discrete" to "continuous"
#' type (for example, in `form_mix()` in case different types of input
#' pdqr-functions). It works in the following way:
#' - Retyping from "continuous" to `type` "discrete" works only if "x_tbl"
#' metadata represents a mixture of dirac-like distributions. In that case it is
#' transformed to have "x" values from centers of those dirac-like distributions
#' with corresponding probabilities.
#' - Retyping from "discrete" to `type` "continuous" works by transforming each
#' "x" value from "x_tbl" metadata into dirac-like distribution with total
#' probability taken from corresponding value of "prob" column. Output
#' essentially represents a mixture of dirac-like distributions.
#'
#' @return A pdqr-function with type equal to input `type`.
#'
#' @seealso [form_regrid()] for changing grid (rows of "x_tbl" metadata) of
#' pdqr-function.
#'
#' [form_resupport()] for changing support of pdqr-function.
#'
#' @family form functions
#'
#' @examples
#' my_con <- new_d(data.frame(x = 1:5, y = c(1, 2, 3, 2, 1) / 9), "continuous")
#' meta_x_tbl(my_con)
#'
#' # By default, conversion is done to the opposite type
#' my_dis <- form_retype(my_con)
#' meta_x_tbl(my_dis)
#'
#' # Default retyping (with method "value") is accurate when doing consecutive
#' # retyping
#' my_con_2 <- form_retype(my_dis, "continuous")
#' meta_x_tbl(my_con_2)
#'
#' # Method "dirac"
#' my_dirac <- form_retype(my_dis, "continuous", method = "dirac")
#' meta_x_tbl(my_dirac)
#'
#' # Method "piecelin"
#' ## From "continuous" to "discrete" (preferred direction)
#' my_dis_piece <- form_retype(my_con, "discrete", method = "piecelin")
#' meta_x_tbl(my_dis_piece)
#' ## Conversion from "discrete" to "continuous" is very approximate
#' my_con_piece <- form_retype(my_dis_piece, "continuous", method = "piecelin")
#' meta_x_tbl(my_con_piece)
#'
#' plot(my_con, main = 'Approximate nature of method "piecelin"')
#' lines(my_con_piece, col = "blue")
#' @export
form_retype <- function(f, type = NULL, method = "value") {
assert_pdqr_fun(f)
assert_pdqr_type(type, allow_null = TRUE)
assert_method(method, methods_retype)
# Speed optimization (skips possibly expensive assertions)
disable_asserting_locally()
if (is.null(type)) {
type <- switch(
meta_type(f),
discrete = "continuous",
continuous = "discrete"
)
}
switch(
type,
discrete = retype_dis(f, method),
continuous = retype_con(f, method)
)
}
methods_retype <- c("piecelin", "dirac", "value")
retype_dis <- function(f, method) {
if (meta_type(f) == "discrete") {
return(f)
}
switch(
method,
value = retype_dis_value(f),
piecelin = retype_dis_piecelin(f),
dirac = retype_dis_dirac(f)
)
}
retype_dis_value <- function(f) {
x_tbl <- meta_x_tbl(f)
# Renormalization of "prob" column will be done inside `new_*()` function
new_x_tbl <- data.frame(x = x_tbl[["x"]], prob = x_tbl[["y"]])
new_pdqr_by_ref(f)(new_x_tbl, type = "discrete")
}
retype_dis_piecelin <- function(f) {
x_tbl <- meta_x_tbl(f)
n <- nrow(x_tbl)
x_lag <- x_tbl[["x"]][-n]
x_lead <- x_tbl[["x"]][-1]
y_lag <- x_tbl[["y"]][-n]
y_lead <- x_tbl[["y"]][-1]
y_sum <- y_lag + y_lead
# Output `x` values are computed as intervals' centers of mass
x_mass <- (x_lag * (y_lag + y_sum) + x_lead * (y_lead + y_sum)) / (3 * y_sum)
## If interval has zero probability then its centre is set to the middle
x_mass_bad <- !is.finite(x_mass)
x_mass[x_mass_bad] <- (x_lag[x_mass_bad] + x_lead[x_mass_bad]) / 2
# Output probabilities are computed as probabilites (mass) of intervals
prob <- diff(x_tbl[["cumprob"]])
# Creating pdqr-function
pdqr_fun <- new_pdqr_by_ref(f)
pdqr_fun(data.frame(x = x_mass, prob = prob), "discrete")
}
retype_dis_dirac <- function(f) {
x_tbl <- meta_x_tbl(f)
# Ensure presense of zero densities
x_tbl <- ground_x_tbl(x_tbl)
# One output "discrete" value is computed as mean of two consequtive "x"s with
# zero density. Each "x" corresponds to only one "discrete" value counting
# from left: x_1 and x_2 are used to compute first "discrete" value; x_3 and
# x_4 - second, and so on. Probability of "discrete" value is computed as
# difference in cumulative probabilities between corresponding right and left
# "x" values.
y_is_zero <- is_zero(x_tbl[["y"]])
n_y_zero <- sum(y_is_zero)
dis_groups <- rep(seq_len(n_y_zero), each = 2, length.out = n_y_zero)
new_x <- tapply(x_tbl[["x"]][y_is_zero], dis_groups, mean)
# Remove dimnames
new_x <- as.vector(new_x)
new_prob <- tapply(
x_tbl[["cumprob"]][y_is_zero],
dis_groups,
function(cumprob) {
# This custom function is used instead of `diff()` if, for some reason,
# there are odd number of zero density rows in `x_tbl`. In that case
# output probability is zero.
cumprob[length(cumprob)] - cumprob[1]
})
# Remove dimnames
new_prob <- as.vector(new_prob)
new_pdqr_by_ref(f)(data.frame(x = new_x, prob = new_prob), "discrete")
}
retype_con <- function(f, method) {
if (meta_type(f) == "continuous") {
return(f)
}
switch(
method,
value = retype_con_value(f),
piecelin = retype_con_piecelin(f),
dirac = retype_con_dirac(f)
)
}
retype_con_value <- function(f) {
x_tbl <- meta_x_tbl(f)
# Renormalization of "y" column will be done inside `new_*()` function
new_x_tbl <- data.frame(x = x_tbl[["x"]], y = x_tbl[["prob"]])
new_pdqr_by_ref(f)(new_x_tbl, type = "continuous")
}
retype_con_piecelin <- function(f) {
# Note that `f` has already passed `assert_pdqr_fun()` which means that "x"
# column in "x_tbl" metadata is sorted and has no duplicate values
x_tbl <- meta_x_tbl(f)
n <- nrow(x_tbl)
if (n < 4) {
stop_collapse(
'For conversion to "continuous" type `form_retype()` needs at least 4 ',
'unique "x" values.'
)
}
x <- x_tbl[["x"]]
prob <- x_tbl[["prob"]]
# Values of `x` grid (except first and last elements) of "continuous" output
# is approximated as convex combination of nearest "centers of mass":
# `x_mid = (1-alpha)*x_mass_left + alpha*x_mass_right`. Here `alpha` is
# approximated based on two separate assumptions:
# *Assumption 1*: if locally `y_left` (`y` value in "true" `x` to the left of
# `x_mid`) = `y_mid` = `y_right` (by analogy with `y_left`), then
# `alpha = prob_left / (prob_left + prob_right)`.
# *Assumption 2*: if locally "true" values of `x` are equidistant then `alpha`
# lie inside [1/3; 2/3] interval.
# Final approximation is formed by combining these conclusions
prob_sum <- prob[1:(n - 1)] + prob[2:n]
alpha <- pmin(pmax(prob[1:(n - 1)] / prob_sum, 1 / 3), 2 / 3)
alpha[!is.finite(alpha)] <- 0.5
x_grid <- numeric(n + 1)
x_grid[2:n] <- (1 - alpha) * x[1:(n - 1)] + alpha * x[2:n]
# First and last `x` are approximated so that first and last `x` triplets are
# equidistant
x_grid[1] <- x_grid[2] - (x_grid[3] - x_grid[2])
x_grid[n + 1] <- x_grid[n] + (x_grid[n] - x_grid[n - 1])
# Output `y` grid is approximated in 'pdqr' fashion
p_grid <- c(0, cumsum(prob))
y <- y_from_p_grid(x_grid, p_grid)
# Creating pdqr-function
pdqr_fun <- new_pdqr_by_ref(f)
pdqr_fun(data.frame(x = x_grid, y = y), "continuous")
}
retype_con_dirac <- function(f, h = 1e-8) {
x_tbl <- meta_x_tbl(f)
x <- x_tbl[["x"]]
half_diff_x <- diff(x) / 2
# Vector of "dirac" radius values
left_h_vec <- pmin(h, c(h, half_diff_x))
right_h_vec <- pmin(h, c(half_diff_x, h))
h_vec <- pmin(left_h_vec, right_h_vec)
y_zero <- rep(0, length(x))
new_x <- c(x - h_vec, x, x + h_vec)
new_y <- c(y_zero, x_tbl[["prob"]] / h_vec, y_zero)
new_pdqr_by_ref(f)(data.frame(x = new_x, y = new_y), "continuous")
}
| /R/form_retype.R | permissive | echasnovski/pdqr | R | false | false | 10,030 | r | #' Change type of pdqr-function
#'
#' Modify [type][meta_type()] of pdqr-function using method of choice.
#'
#' @param f A pdqr-function.
#' @param type A desired type of output. Should be one of "discrete" or
#' "continuous". If `NULL` (default), it is chosen as an opposite of `f`'s
#' type.
#' @param method Retyping method. Should be one of "value", "piecelin", "dirac".
#'
#' @details If type of `f` is equal to input `type` then `f` is returned.
#'
#' Method "value" uses renormalized columns of `f`'s "x_tbl" metadata as values
#' for output's "x_tbl" metadata. In other words, it preserves ratios between
#' values of d-function at certain "x" points. Its main advantages are that this
#' method can work well with any pdqr type and that two consecutive conversions
#' return the same function. Conversion algorithm is as follows:
#' - Retyping from "continuous" to `type` "discrete" is done by creating
#' pdqr-function of corresponding class with the following "x_tbl" metadata: "x"
#' column is the same as in `f`; "prob" column is equal to `f`'s "y" column
#' after renormalization (so that their sum is 1).
#' - Retyping from "discrete" to `type` "continuous" is done in the same
#' fashion: "x" column is the same; "y" column is equal to `f`'s "prob" column
#' after renormalization (so that total integral of piecewise-linear density is
#' equal to 1).
#'
#' Method "piecelin" should be used mostly for converting from "continuous" to
#' "discrete" type. It uses the fact that 'pdqr' densities are piecewise-linear
#' (linear in intervals between values of "x" column of ["x_tbl"
#' metadata][meta_x_tbl()]) on their [support][meta_support()]:
#' - Retyping from "continuous" to `type` "discrete" is done by computing "x"
#' values as centers of interval masses with probabilities equal to interval
#' total probabilities.
#' - Retyping from "discrete" to `type` "continuous" is made approximately by
#' trying to compute "x" grid, for which "x" values of input distribution are
#' going to be centers of mass. Algorithm is approximate and might result into a
#' big errors in case of small number of "x" values or if they are not
#' "suitable" for this kind of transformation.
#'
#' Method "dirac" is used mostly for converting from "discrete" to "continuous"
#' type (for example, in `form_mix()` in case different types of input
#' pdqr-functions). It works in the following way:
#' - Retyping from "continuous" to `type` "discrete" works only if "x_tbl"
#' metadata represents a mixture of dirac-like distributions. In that case it is
#' transformed to have "x" values from centers of those dirac-like distributions
#' with corresponding probabilities.
#' - Retyping from "discrete" to `type` "continuous" works by transforming each
#' "x" value from "x_tbl" metadata into dirac-like distribution with total
#' probability taken from corresponding value of "prob" column. Output
#' essentially represents a mixture of dirac-like distributions.
#'
#' @return A pdqr-function with type equal to input `type`.
#'
#' @seealso [form_regrid()] for changing grid (rows of "x_tbl" metadata) of
#' pdqr-function.
#'
#' [form_resupport()] for changing support of pdqr-function.
#'
#' @family form functions
#'
#' @examples
#' my_con <- new_d(data.frame(x = 1:5, y = c(1, 2, 3, 2, 1) / 9), "continuous")
#' meta_x_tbl(my_con)
#'
#' # By default, conversion is done to the opposite type
#' my_dis <- form_retype(my_con)
#' meta_x_tbl(my_dis)
#'
#' # Default retyping (with method "value") is accurate when doing consecutive
#' # retyping
#' my_con_2 <- form_retype(my_dis, "continuous")
#' meta_x_tbl(my_con_2)
#'
#' # Method "dirac"
#' my_dirac <- form_retype(my_dis, "continuous", method = "dirac")
#' meta_x_tbl(my_dirac)
#'
#' # Method "piecelin"
#' ## From "continuous" to "discrete" (preferred direction)
#' my_dis_piece <- form_retype(my_con, "discrete", method = "piecelin")
#' meta_x_tbl(my_dis_piece)
#' ## Conversion from "discrete" to "continuous" is very approximate
#' my_con_piece <- form_retype(my_dis_piece, "continuous", method = "piecelin")
#' meta_x_tbl(my_con_piece)
#'
#' plot(my_con, main = 'Approximate nature of method "piecelin"')
#' lines(my_con_piece, col = "blue")
#' @export
form_retype <- function(f, type = NULL, method = "value") {
assert_pdqr_fun(f)
assert_pdqr_type(type, allow_null = TRUE)
assert_method(method, methods_retype)
# Speed optimization (skips possibly expensive assertions)
disable_asserting_locally()
if (is.null(type)) {
type <- switch(
meta_type(f),
discrete = "continuous",
continuous = "discrete"
)
}
switch(
type,
discrete = retype_dis(f, method),
continuous = retype_con(f, method)
)
}
methods_retype <- c("piecelin", "dirac", "value")
retype_dis <- function(f, method) {
if (meta_type(f) == "discrete") {
return(f)
}
switch(
method,
value = retype_dis_value(f),
piecelin = retype_dis_piecelin(f),
dirac = retype_dis_dirac(f)
)
}
retype_dis_value <- function(f) {
x_tbl <- meta_x_tbl(f)
# Renormalization of "prob" column will be done inside `new_*()` function
new_x_tbl <- data.frame(x = x_tbl[["x"]], prob = x_tbl[["y"]])
new_pdqr_by_ref(f)(new_x_tbl, type = "discrete")
}
retype_dis_piecelin <- function(f) {
x_tbl <- meta_x_tbl(f)
n <- nrow(x_tbl)
x_lag <- x_tbl[["x"]][-n]
x_lead <- x_tbl[["x"]][-1]
y_lag <- x_tbl[["y"]][-n]
y_lead <- x_tbl[["y"]][-1]
y_sum <- y_lag + y_lead
# Output `x` values are computed as intervals' centers of mass
x_mass <- (x_lag * (y_lag + y_sum) + x_lead * (y_lead + y_sum)) / (3 * y_sum)
## If interval has zero probability then its centre is set to the middle
x_mass_bad <- !is.finite(x_mass)
x_mass[x_mass_bad] <- (x_lag[x_mass_bad] + x_lead[x_mass_bad]) / 2
# Output probabilities are computed as probabilites (mass) of intervals
prob <- diff(x_tbl[["cumprob"]])
# Creating pdqr-function
pdqr_fun <- new_pdqr_by_ref(f)
pdqr_fun(data.frame(x = x_mass, prob = prob), "discrete")
}
retype_dis_dirac <- function(f) {
x_tbl <- meta_x_tbl(f)
# Ensure presense of zero densities
x_tbl <- ground_x_tbl(x_tbl)
# One output "discrete" value is computed as mean of two consequtive "x"s with
# zero density. Each "x" corresponds to only one "discrete" value counting
# from left: x_1 and x_2 are used to compute first "discrete" value; x_3 and
# x_4 - second, and so on. Probability of "discrete" value is computed as
# difference in cumulative probabilities between corresponding right and left
# "x" values.
y_is_zero <- is_zero(x_tbl[["y"]])
n_y_zero <- sum(y_is_zero)
dis_groups <- rep(seq_len(n_y_zero), each = 2, length.out = n_y_zero)
new_x <- tapply(x_tbl[["x"]][y_is_zero], dis_groups, mean)
# Remove dimnames
new_x <- as.vector(new_x)
new_prob <- tapply(
x_tbl[["cumprob"]][y_is_zero],
dis_groups,
function(cumprob) {
# This custom function is used instead of `diff()` if, for some reason,
# there are odd number of zero density rows in `x_tbl`. In that case
# output probability is zero.
cumprob[length(cumprob)] - cumprob[1]
})
# Remove dimnames
new_prob <- as.vector(new_prob)
new_pdqr_by_ref(f)(data.frame(x = new_x, prob = new_prob), "discrete")
}
retype_con <- function(f, method) {
if (meta_type(f) == "continuous") {
return(f)
}
switch(
method,
value = retype_con_value(f),
piecelin = retype_con_piecelin(f),
dirac = retype_con_dirac(f)
)
}
retype_con_value <- function(f) {
x_tbl <- meta_x_tbl(f)
# Renormalization of "y" column will be done inside `new_*()` function
new_x_tbl <- data.frame(x = x_tbl[["x"]], y = x_tbl[["prob"]])
new_pdqr_by_ref(f)(new_x_tbl, type = "continuous")
}
retype_con_piecelin <- function(f) {
# Note that `f` has already passed `assert_pdqr_fun()` which means that "x"
# column in "x_tbl" metadata is sorted and has no duplicate values
x_tbl <- meta_x_tbl(f)
n <- nrow(x_tbl)
if (n < 4) {
stop_collapse(
'For conversion to "continuous" type `form_retype()` needs at least 4 ',
'unique "x" values.'
)
}
x <- x_tbl[["x"]]
prob <- x_tbl[["prob"]]
# Values of `x` grid (except first and last elements) of "continuous" output
# is approximated as convex combination of nearest "centers of mass":
# `x_mid = (1-alpha)*x_mass_left + alpha*x_mass_right`. Here `alpha` is
# approximated based on two separate assumptions:
# *Assumption 1*: if locally `y_left` (`y` value in "true" `x` to the left of
# `x_mid`) = `y_mid` = `y_right` (by analogy with `y_left`), then
# `alpha = prob_left / (prob_left + prob_right)`.
# *Assumption 2*: if locally "true" values of `x` are equidistant then `alpha`
# lie inside [1/3; 2/3] interval.
# Final approximation is formed by combining these conclusions
prob_sum <- prob[1:(n - 1)] + prob[2:n]
alpha <- pmin(pmax(prob[1:(n - 1)] / prob_sum, 1 / 3), 2 / 3)
alpha[!is.finite(alpha)] <- 0.5
x_grid <- numeric(n + 1)
x_grid[2:n] <- (1 - alpha) * x[1:(n - 1)] + alpha * x[2:n]
# First and last `x` are approximated so that first and last `x` triplets are
# equidistant
x_grid[1] <- x_grid[2] - (x_grid[3] - x_grid[2])
x_grid[n + 1] <- x_grid[n] + (x_grid[n] - x_grid[n - 1])
# Output `y` grid is approximated in 'pdqr' fashion
p_grid <- c(0, cumsum(prob))
y <- y_from_p_grid(x_grid, p_grid)
# Creating pdqr-function
pdqr_fun <- new_pdqr_by_ref(f)
pdqr_fun(data.frame(x = x_grid, y = y), "continuous")
}
retype_con_dirac <- function(f, h = 1e-8) {
x_tbl <- meta_x_tbl(f)
x <- x_tbl[["x"]]
half_diff_x <- diff(x) / 2
# Vector of "dirac" radius values
left_h_vec <- pmin(h, c(h, half_diff_x))
right_h_vec <- pmin(h, c(half_diff_x, h))
h_vec <- pmin(left_h_vec, right_h_vec)
y_zero <- rep(0, length(x))
new_x <- c(x - h_vec, x, x + h_vec)
new_y <- c(y_zero, x_tbl[["prob"]] / h_vec, y_zero)
new_pdqr_by_ref(f)(data.frame(x = new_x, y = new_y), "continuous")
}
|
# Exercise 5: large data sets: Baby Name Popularity Over Time
# Read in the female baby names data file found in the `data` folder into a
# variable called `names`. Remember to NOT treat the strings as factors!
names <- read.csv("data/female_names.csv", stringsAsFactors = FALSE)
# Create a data frame `names_2013` that contains only the rows for the year 2013
names_2013 <- data.frame(names[names$year == "2013", ])
# What was the most popular female name in 2013?
names_2013[names_2013$prop == max(names_2013$prop), "name"]
# Write a function `most_popular_in_year` that takes in a year as a value and
# returns the most popular name in that year
most_popular_in_a_year <- function(year2) {
names_in_year <- data.frame(names[names$year == year2, ])
most_pop_name <- names_in_year[names_in_year$prop == max(names_in_year$prop), "name"]
most_pop_name
}
# What was the most popular female name in 1994?
most_popular_in_a_year(1994)
# Write a function `number_in_million` that takes in a name and a year, and
# returns statistically how many babies out of 1 million born that year have
# that name.
# Hint: get the popularity percentage, and take that percentage out of 1 million.
number_in_million <- function(name2, year2) {
name_prop <- names[names$name == name2 & names$year == year2, "prop"]
name_prop * 1000000
}
# How many babies out of 1 million had the name 'Laura' in 1995?
number_in_million("Laura", 1995)
# How many babies out of 1 million had your name in the year you were born?
## Consider: what does this tell you about how easy it is to identify you with
## just your name and birth year?
| /exercise-5/exercise.R | permissive | tleung22/ch9-data-frames | R | false | false | 1,630 | r | # Exercise 5: large data sets: Baby Name Popularity Over Time
# Read in the female baby names data file found in the `data` folder into a
# variable called `names`. Remember to NOT treat the strings as factors!
names <- read.csv("data/female_names.csv", stringsAsFactors = FALSE)
# Create a data frame `names_2013` that contains only the rows for the year 2013
names_2013 <- data.frame(names[names$year == "2013", ])
# What was the most popular female name in 2013?
names_2013[names_2013$prop == max(names_2013$prop), "name"]
# Write a function `most_popular_in_year` that takes in a year as a value and
# returns the most popular name in that year
most_popular_in_a_year <- function(year2) {
names_in_year <- data.frame(names[names$year == year2, ])
most_pop_name <- names_in_year[names_in_year$prop == max(names_in_year$prop), "name"]
most_pop_name
}
# What was the most popular female name in 1994?
most_popular_in_a_year(1994)
# Write a function `number_in_million` that takes in a name and a year, and
# returns statistically how many babies out of 1 million born that year have
# that name.
# Hint: get the popularity percentage, and take that percentage out of 1 million.
number_in_million <- function(name2, year2) {
name_prop <- names[names$name == name2 & names$year == year2, "prop"]
name_prop * 1000000
}
# How many babies out of 1 million had the name 'Laura' in 1995?
number_in_million("Laura", 1995)
# How many babies out of 1 million had your name in the year you were born?
## Consider: what does this tell you about how easy it is to identify you with
## just your name and birth year?
|
library(plyr);
#Download the file from the internet into the working directory
internetFile <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(internetFile,destfile="data.zip",method="internal")
unzip(zipfile="data.zip")
#Set the path
filePath <- "UCI HAR Dataset"
#1. INSTRUCTIONS: Merges the training and the test sets to create one data set.
#Read in all necessary Data from files
activityTest <- read.table(file.path(filePath , "test" , "Y_test.txt" ), header = FALSE)
activityTrain <- read.table(file.path(filePath, "train", "Y_train.txt"), header = FALSE)
featuresTest <- read.table(file.path(filePath, "test" , "X_test.txt" ), header = FALSE)
featuresTrain <- read.table(file.path(filePath, "train", "X_train.txt"), header = FALSE)
subjectTrain <- read.table(file.path(filePath, "train", "subject_train.txt"), header = FALSE)
subjectTest <- read.table(file.path(filePath, "test" , "subject_test.txt"), header = FALSE)
featuresNames <- read.table(file.path(filePath, "features.txt"), head=FALSE)
#Get Data from files and rbind test and train together into one set
activity <- rbind(activityTrain, activityTest)
names(activity) <- c("activity")
features <- rbind(featuresTrain, featuresTest)
names(features) <- featuresNames$V2
subject <- rbind(subjectTrain, subjectTest)
names(subject) <-c("subject")
data <- cbind(activity, subject, features)
#2. INSTRUCTIONS: Extracts only the measurements on the mean and standard deviation for each measurement.
#Use grep to get any feature names that contain mean() or std()
subFeaturesNames <- featuresNames$V2[grep("mean\\(\\)|std\\(\\)", featuresNames$V2)]
selectedNames <- c(as.character(subFeaturesNames), "subject", "activity" )
data <- subset(data,select=selectedNames)
#3. INSTRUCTIONS: Uses descriptive activity names to name the activities in the data set
#Get the descriptions from activity_labels.txt
descriptiveName <- read.table(file.path(filePath, "activity_labels.txt"),header = FALSE)
colnames(descriptiveName) <- "activity"
data = merge(data,descriptiveName,by='activity',all.x=TRUE);
#drop column activity and rename the new column to activity
data$activity <- NULL
names(data)[ncol(data)] <- "activity"
#4. INSTRUCTIONS: Appropriately labels the data set with descriptive variable names.
#Use gsub to replace abbreviated matches with full length words
names(data) <- gsub("^t", "Time", names(data))
names(data) <- gsub("^f", "Frequency", names(data))
names(data) <- gsub("Acc", "Accelerometer", names(data))
names(data) <- gsub("Gyro", "Gyroscope", names(data))
names(data) <- gsub("Mag", "Magnitude", names(data))
names(data) <- gsub("BodyBody", "Body", names(data))
#5. INSTRUCTIONS: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#get the mean for each subject for each of the six possible activities
data2 <- aggregate(. ~subject + activity, data, mean)
data2 <- data2[order(data2$subject,data2$activity),]
write.table(data2, file = "tidydata.txt",row.name=FALSE)
| /run_analysis.R | no_license | chungkster/Getting-and-Cleaning-Data | R | false | false | 3,099 | r | library(plyr);
#Download the file from the internet into the working directory
internetFile <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(internetFile,destfile="data.zip",method="internal")
unzip(zipfile="data.zip")
#Set the path
filePath <- "UCI HAR Dataset"
#1. INSTRUCTIONS: Merges the training and the test sets to create one data set.
#Read in all necessary Data from files
activityTest <- read.table(file.path(filePath , "test" , "Y_test.txt" ), header = FALSE)
activityTrain <- read.table(file.path(filePath, "train", "Y_train.txt"), header = FALSE)
featuresTest <- read.table(file.path(filePath, "test" , "X_test.txt" ), header = FALSE)
featuresTrain <- read.table(file.path(filePath, "train", "X_train.txt"), header = FALSE)
subjectTrain <- read.table(file.path(filePath, "train", "subject_train.txt"), header = FALSE)
subjectTest <- read.table(file.path(filePath, "test" , "subject_test.txt"), header = FALSE)
featuresNames <- read.table(file.path(filePath, "features.txt"), head=FALSE)
#Get Data from files and rbind test and train together into one set
activity <- rbind(activityTrain, activityTest)
names(activity) <- c("activity")
features <- rbind(featuresTrain, featuresTest)
names(features) <- featuresNames$V2
subject <- rbind(subjectTrain, subjectTest)
names(subject) <-c("subject")
data <- cbind(activity, subject, features)
#2. INSTRUCTIONS: Extracts only the measurements on the mean and standard deviation for each measurement.
#Use grep to get any feature names that contain mean() or std()
subFeaturesNames <- featuresNames$V2[grep("mean\\(\\)|std\\(\\)", featuresNames$V2)]
selectedNames <- c(as.character(subFeaturesNames), "subject", "activity" )
data <- subset(data,select=selectedNames)
#3. INSTRUCTIONS: Uses descriptive activity names to name the activities in the data set
#Get the descriptions from activity_labels.txt
descriptiveName <- read.table(file.path(filePath, "activity_labels.txt"),header = FALSE)
colnames(descriptiveName) <- "activity"
data = merge(data,descriptiveName,by='activity',all.x=TRUE);
#drop column activity and rename the new column to activity
data$activity <- NULL
names(data)[ncol(data)] <- "activity"
#4. INSTRUCTIONS: Appropriately labels the data set with descriptive variable names.
#Use gsub to replace abbreviated matches with full length words
names(data) <- gsub("^t", "Time", names(data))
names(data) <- gsub("^f", "Frequency", names(data))
names(data) <- gsub("Acc", "Accelerometer", names(data))
names(data) <- gsub("Gyro", "Gyroscope", names(data))
names(data) <- gsub("Mag", "Magnitude", names(data))
names(data) <- gsub("BodyBody", "Body", names(data))
#5. INSTRUCTIONS: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#get the mean for each subject for each of the six possible activities
data2 <- aggregate(. ~subject + activity, data, mean)
data2 <- data2[order(data2$subject,data2$activity),]
write.table(data2, file = "tidydata.txt",row.name=FALSE)
|
ch31=read.csv("D:/r-SEM/data/ch31.csv")
fit<-lm(formula=y~x1+x2, data=ch31)
summary(fit) | /data/ch3-1.R | no_license | Finfra/r-sem | R | false | false | 92 | r | ch31=read.csv("D:/r-SEM/data/ch31.csv")
fit<-lm(formula=y~x1+x2, data=ch31)
summary(fit) |
# R script to find differentially expressed genes by R package wilcoxon.
library("limma")
library("edgeR")
d<-read.csv("CPM_filtered_outlier.csv",header = T)
RPM <- d[,2:42]
rownames(RPM)<-d[,1]
keep <- rowSums(RPM>0)>=9
RPM <- RPM[keep,]
group <- c(rep("HCC",30),rep("Normal",11))
RPM_HCC <- RPM[,1:30]
RPM_Normal <- RPM[,31:41]
RPM_HCC_avg <- apply(RPM_HCC,1,mean)
RPM_Normal_avg <- apply(RPM_Normal,1,mean)
RPM_Avg <- cbind(RPM_HCC_avg,RPM_Normal_avg)
write.csv(RPM_Avg,file = "RPM_average.csv",row.names = T)
myFun <- function(x){
x = as.vector(x)
v1 = x[1:30]
v2 = x[31:41]
out <- wilcox.test(v1,v2)
out <- out$p.value
}
p_value <- apply(RPM,1,myFun)
d2<-read.csv("RPM_average_log2FC.csv",header = T)
Result <- d2[,2:4]
Result<- cbind(Result,p_value)
FDR <- p.adjust(Result[,4],method = "BH")
Result <- cbind(Result,FDR)
write.csv(Result,file = "wilcoxon_result.csv",row.names = T)
FDR_0.05 = Result[Result$FDR<0.05,]
FDR_0.05$up_down = ifelse(as.numeric(FDR_0.05$log2FoldChange) > 0,"up","down")
write.csv(FDR_0.05,file = "wilcoxon_FDR_0.05.csv",row.names = T)
up_gene <- FDR_0.05[as.numeric(FDR_0.05$log2FoldChange) >= 1,]
down_gene <- FDR_0.05[as.numeric(FDR_0.05$log2FoldChange) <= -1,]
dim(up_gene)
dim(down_gene)
write.csv(up_gene,file = "wilcoxon_up.csv",row.names = T)
write.csv(down_gene,file = "wilcoxon_down.csv",row.names = T)
| /upload/3.diff_exp/16-Siqi-RPM_Wilcoxon_Differentail_expression_genes.sh | no_license | YuminTHU/training | R | false | false | 1,361 | sh | # R script to find differentially expressed genes by R package wilcoxon.
library("limma")
library("edgeR")
d<-read.csv("CPM_filtered_outlier.csv",header = T)
RPM <- d[,2:42]
rownames(RPM)<-d[,1]
keep <- rowSums(RPM>0)>=9
RPM <- RPM[keep,]
group <- c(rep("HCC",30),rep("Normal",11))
RPM_HCC <- RPM[,1:30]
RPM_Normal <- RPM[,31:41]
RPM_HCC_avg <- apply(RPM_HCC,1,mean)
RPM_Normal_avg <- apply(RPM_Normal,1,mean)
RPM_Avg <- cbind(RPM_HCC_avg,RPM_Normal_avg)
write.csv(RPM_Avg,file = "RPM_average.csv",row.names = T)
myFun <- function(x){
x = as.vector(x)
v1 = x[1:30]
v2 = x[31:41]
out <- wilcox.test(v1,v2)
out <- out$p.value
}
p_value <- apply(RPM,1,myFun)
d2<-read.csv("RPM_average_log2FC.csv",header = T)
Result <- d2[,2:4]
Result<- cbind(Result,p_value)
FDR <- p.adjust(Result[,4],method = "BH")
Result <- cbind(Result,FDR)
write.csv(Result,file = "wilcoxon_result.csv",row.names = T)
FDR_0.05 = Result[Result$FDR<0.05,]
FDR_0.05$up_down = ifelse(as.numeric(FDR_0.05$log2FoldChange) > 0,"up","down")
write.csv(FDR_0.05,file = "wilcoxon_FDR_0.05.csv",row.names = T)
up_gene <- FDR_0.05[as.numeric(FDR_0.05$log2FoldChange) >= 1,]
down_gene <- FDR_0.05[as.numeric(FDR_0.05$log2FoldChange) <= -1,]
dim(up_gene)
dim(down_gene)
write.csv(up_gene,file = "wilcoxon_up.csv",row.names = T)
write.csv(down_gene,file = "wilcoxon_down.csv",row.names = T)
|
# filtrado intermedio
filterNum <- function (x, min = NULL, max = NULL){
x1 <- x[!is.na(x)]
if (!is.null(min))
x1 <- x1 [x1 >= min ]
if (!is.null(max))
x1 <- x1 [x1 <= max ]
return (as.numeric(x1))
} | /Datasources/src/lib/filter_num.R | no_license | esterniclos/tfm_market_analysis_with_clustering | R | false | false | 233 | r | # filtrado intermedio
filterNum <- function (x, min = NULL, max = NULL){
x1 <- x[!is.na(x)]
if (!is.null(min))
x1 <- x1 [x1 >= min ]
if (!is.null(max))
x1 <- x1 [x1 <= max ]
return (as.numeric(x1))
} |
ui <- navbarPage(title="UK Covid Data Visualised",
tabPanel(title="Maps",
fluidRow(
column(3,
wellPanel(
h4("Configurations"),
radioButtons("dataRadioType",
h5("Data"),
choices = list("Daily Cases"=1,
"Cumulative Deaths"=2),
selected=1),
sliderInput("dateSlider",
h5("Date"),
min=min_date,
max=max_date,
value=max_date,
ticks=FALSE,
timeFormat="%Y-%m-%d")
),
),
column(9,
leafletOutput("ukMapPlot")
)
),
),
tabPanel(title="Comparisons",
fluidRow(
column(3,
wellPanel(
h4("Configurations"),
radioButtons("authorityDataRadioType",
h5("Authorities Scatter Options"),
choices = list("Cumulative Cases"=1,
"Cumulative Deaths"=2),
selected=1),
hr(),
helpText("This displays daily cases only"),
selectInput("authoritySelect",
h5("Authorities"),
choices=authoritiesList,
selected=authoritiesList[1]),
hr(),
radioButtons("regionDataRadioType",
h5("Region Options"),
choices = list("Cumulative Cases"=1,
"Cumulative Deaths"=2),
selected=1),
selectInput("regionSelect",
h5("Regions"),
choices = list("England",
"Scotland",
"Wales",
"Northern Ireland"),
selected="England")
)
),
column(9,
fluidRow(
plotOutput("authorityScatterPlot")
),
fluidRow(
plotOutput("authorityLinePlot")
),
fluidRow(
plotOutput("regionPlot")
)
)
)
),
tabPanel(title="About",
fluidRow(
column(12,
h3("Visualising Covid Data Using R - By Milovan Gveric"),
h4("My first data science project where I aimed to use data
that was currently relevant and visualise it in a few interesting
ways. The source code for the project can be seen ",
tags$a(href="https://github.com/Unknown807/", "here"))
)
)
),
tags$head(includeCSS("styles\\styles.css"))
) | /ui.R | no_license | Unknown807/Interactive-UK-Covid-Data | R | false | false | 3,485 | r |
ui <- navbarPage(title="UK Covid Data Visualised",
tabPanel(title="Maps",
fluidRow(
column(3,
wellPanel(
h4("Configurations"),
radioButtons("dataRadioType",
h5("Data"),
choices = list("Daily Cases"=1,
"Cumulative Deaths"=2),
selected=1),
sliderInput("dateSlider",
h5("Date"),
min=min_date,
max=max_date,
value=max_date,
ticks=FALSE,
timeFormat="%Y-%m-%d")
),
),
column(9,
leafletOutput("ukMapPlot")
)
),
),
tabPanel(title="Comparisons",
fluidRow(
column(3,
wellPanel(
h4("Configurations"),
radioButtons("authorityDataRadioType",
h5("Authorities Scatter Options"),
choices = list("Cumulative Cases"=1,
"Cumulative Deaths"=2),
selected=1),
hr(),
helpText("This displays daily cases only"),
selectInput("authoritySelect",
h5("Authorities"),
choices=authoritiesList,
selected=authoritiesList[1]),
hr(),
radioButtons("regionDataRadioType",
h5("Region Options"),
choices = list("Cumulative Cases"=1,
"Cumulative Deaths"=2),
selected=1),
selectInput("regionSelect",
h5("Regions"),
choices = list("England",
"Scotland",
"Wales",
"Northern Ireland"),
selected="England")
)
),
column(9,
fluidRow(
plotOutput("authorityScatterPlot")
),
fluidRow(
plotOutput("authorityLinePlot")
),
fluidRow(
plotOutput("regionPlot")
)
)
)
),
tabPanel(title="About",
fluidRow(
column(12,
h3("Visualising Covid Data Using R - By Milovan Gveric"),
h4("My first data science project where I aimed to use data
that was currently relevant and visualise it in a few interesting
ways. The source code for the project can be seen ",
tags$a(href="https://github.com/Unknown807/", "here"))
)
)
),
tags$head(includeCSS("styles\\styles.css"))
) |
sum_num <- function(x,y){
x + y
}
sum_num(5, 10)
sub_num <- function(x,y){
x - y
}
sub_num(5, 2)
mult_num <- function(x,y){
x * y
}
mult_num(25, 3)
div_num <- function(x,y){
x / y
}
div_num(100, 2)
exp_num <- function(x,y){
x ** y
}
exp_num(4, 3)
sqrt_num <- function(x){
x ** (1/2.0)
}
sqrt_num(36)
cube_num <- function(x){
x ** 3
}
cube_num(2)
pow_num <- function(x){
x ** 2
}
pow_num(5)
sine <- function(x){
sin(x*pi/180)
}
sine(15)
cosine<- function(x){
cos(120*pi/180)
}
cosine(20) | /CA05/calculatorR.R | no_license | jollyroger78/programming_big_data_rp | R | false | false | 587 | r | sum_num <- function(x,y){
x + y
}
sum_num(5, 10)
sub_num <- function(x,y){
x - y
}
sub_num(5, 2)
mult_num <- function(x,y){
x * y
}
mult_num(25, 3)
div_num <- function(x,y){
x / y
}
div_num(100, 2)
exp_num <- function(x,y){
x ** y
}
exp_num(4, 3)
sqrt_num <- function(x){
x ** (1/2.0)
}
sqrt_num(36)
cube_num <- function(x){
x ** 3
}
cube_num(2)
pow_num <- function(x){
x ** 2
}
pow_num(5)
sine <- function(x){
sin(x*pi/180)
}
sine(15)
cosine<- function(x){
cos(120*pi/180)
}
cosine(20) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cellalign.R
\name{gene_test_plot}
\alias{gene_test_plot}
\title{Plot Gene expression over pseudotime on reference and query trajectories from cellAlign}
\usage{
gene_test_plot(
expGlobalRef,
expGlobalQuery,
trajRef,
trajQuery,
winSz = 0.1,
numPts = 200
)
}
\arguments{
\item{numPts}{}
}
\value{
}
\description{
Plot Gene expression over pseudotime on reference and query trajectories from cellAlign
}
| /man/gene_test_plot.Rd | permissive | mitsingh/seuratTools | R | false | true | 492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cellalign.R
\name{gene_test_plot}
\alias{gene_test_plot}
\title{Plot Gene expression over pseudotime on reference and query trajectories from cellAlign}
\usage{
gene_test_plot(
expGlobalRef,
expGlobalQuery,
trajRef,
trajQuery,
winSz = 0.1,
numPts = 200
)
}
\arguments{
\item{numPts}{}
}
\value{
}
\description{
Plot Gene expression over pseudotime on reference and query trajectories from cellAlign
}
|
library(distrMod)
### Name: FunSymmList-class
### Title: List of Symmetries for a List of Functions
### Aliases: FunSymmList-class
### Keywords: classes
### ** Examples
new("FunSymmList", list(NonSymmetric(), EvenSymmetric(SymmCenter = 1),
OddSymmetric(SymmCenter = 2)))
| /data/genthat_extracted_code/distrMod/examples/FunSymmList-class.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 303 | r | library(distrMod)
### Name: FunSymmList-class
### Title: List of Symmetries for a List of Functions
### Aliases: FunSymmList-class
### Keywords: classes
### ** Examples
new("FunSymmList", list(NonSymmetric(), EvenSymmetric(SymmCenter = 1),
OddSymmetric(SymmCenter = 2)))
|
# Author: Robert J. Hijmans
# Date : December 2011
# Version 1.0
# Licence GPL v3
setMethod("select", signature(x="SpatRaster"),
function(x, ...) {
e <- draw(...)
int <- intersect(e, ext(x))
if (is.null(int)) {
x <- NULL
} else {
x <- crop(x, e)
}
x
}
)
#setMethod("select", signature(x="SpatVector"),
# function(x, use="rec", draw=TRUE, col="cyan", size=2, ...) {
# use <- substr(tolower(use), 1, 3)
# stopifnot(use %in% c("rec", "pol"))
# if (use == "rec") {
# e <- as(drawExtent(), "SpatialPolygons")
# } else {
# e <- draw("pol")
# }
# intersect(x, e)
# }
#)
| /terra/R/select.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 639 | r | # Author: Robert J. Hijmans
# Date : December 2011
# Version 1.0
# Licence GPL v3
setMethod("select", signature(x="SpatRaster"),
function(x, ...) {
e <- draw(...)
int <- intersect(e, ext(x))
if (is.null(int)) {
x <- NULL
} else {
x <- crop(x, e)
}
x
}
)
#setMethod("select", signature(x="SpatVector"),
# function(x, use="rec", draw=TRUE, col="cyan", size=2, ...) {
# use <- substr(tolower(use), 1, 3)
# stopifnot(use %in% c("rec", "pol"))
# if (use == "rec") {
# e <- as(drawExtent(), "SpatialPolygons")
# } else {
# e <- draw("pol")
# }
# intersect(x, e)
# }
#)
|
# Genotypes QC
# plot 1
library(ggplot2)
library(dplyr)
#theme_set(theme_classic())
#tab2 = tab
tab=tabp
tab=tab[!(tab$pred_hard=='./.'),]
tab$simple = 'ccomplex'
tab[tab$GT %in% c('0|0', '1|0', '0|1', '1|1'),]$simple = 'asimple'
tab[tab$GT %in% c('noreads'),]$simple = 'zeroreads'
tab[tab$GT %in% c('noreads'),]$simple = 'zeroreads'
tab[endsWith(tab$GT, '_lowconf'),]$simple = 'complex_lowconf'
tab[tab$GT %in% c('0|0_lowconf', '1|0_lowconf', '0|1_lowconf', '1|1_lowconf'),]$simple = 'bsimple_lowconf'
tab$verdict = tab$simple
# Histogram on a Continuous (Numeric) Variable
#tab$GT = tab$GTs
g <- ggplot(tab, aes(verdict))# + scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=GT),
col="black",
size=.1,
stat='count') + # change number of bins
labs(title="Genotype predictions for 323 inversions in many samples")
#### mendel ####
#' Make a vector to check inheritance plaisibilities
#' @return child_expect, a vector describing excpected child genotypes given the parents.
#' @author Wolfram Hoeps
make_child_expect_vector <- function(){
# ok who cares let's do it the hard way.
#parents = paste(p1, p2)
child_expect <- vector(mode="list")
child_expect[['0|0 0|0']] = c('0|0')
child_expect[['0|0 0|1']] = c('0|0','0|1')
child_expect[['0|0 1|1']] = c('0|1')
child_expect[['0|1 0|0']] = c('0|0','0|1')
child_expect[['0|1 0|1']] = c('0|0','0|1','1|1')
child_expect[['0|1 1|1']] = c('0|1','1|1')
child_expect[['1|1 0|0']] = c('0|1')
child_expect[['1|1 0|1']] = c('0|1','1|1')
child_expect[['1|1 1|1']] = c('1|1')
return (child_expect)
}
ce = make_child_expect_vector()
#' Test a gt for validity
#'
test_mendel <- function(ce, gt_parent1, gt_parent2, gt_child){
gt_parent1 = substr(gt_parent1,1,3)
gt_parent2 = substr(gt_parent2,1,3)
gt_child = substr(gt_child, 1,3)
gt_parent1 = gsub('1\\|0','0\\|1', gt_parent1)
gt_parent2 = gsub('1\\|0','0\\|1', gt_parent2)
gt_child = gsub('1\\|0','0\\|1', gt_child)
valid_gts = c('0|0','0|1','1|1')
c1 = gt_parent1 %in% valid_gts
c2 = gt_parent2 %in% valid_gts
c3 = gt_child %in% valid_gts
#print(c1)
#print(c2)
#print(c3)
#return(gt_parent2)
if (c1){
if (c2){
if (c3){
valid = gt_child %in% ce[[paste(gt_parent1, gt_parent2)]]
#print(valid)
return(valid)
}
}
}
return(F)
}
#c2 = callmatrix
callmatrix = c2
callmatrix[callmatrix=='./.']='0|0'
#callmatrix = c2[c2$verdict=='pass',]
callmatrix$mendel1 = 'UNK'
callmatrix$mendel2 = 'UNK'
callmatrix$mendel3 = 'UNK'
callmatrix$mendel4 = 'UNK'
for (row in 1:nrow(callmatrix)){
#callmatrix[row,]$mendel =
callmatrix[row,]$mendel1 = as.logical(test_mendel(ce, callmatrix[row,]$NA19238, callmatrix[row,]$NA19239,callmatrix[row,]$NA19240 ))
callmatrix[row,]$mendel2 = as.logical(test_mendel(ce, callmatrix[row,]$HG00512, callmatrix[row,]$HG00513,callmatrix[row,]$HG00514 ))
callmatrix[row,]$mendel3 = as.logical(test_mendel(ce, callmatrix[row,]$HG00731, callmatrix[row,]$HG00732,callmatrix[row,]$HG00733 ))
callmatrix[row,]$mendel4 = as.logical(test_mendel(ce, callmatrix[row,]$GM19650, callmatrix[row,]$HG00864,callmatrix[row,]$HG03371 ))
}
ctest = callmatrix
ct2 = ctest[,c('chr','verdict','HG00731','HG00732','HG00733','HG00512','HG00513','HG00514','NA19238','NA19239','NA19240','mendel1','mendel2','mendel3')]
ct3 = ctest[,c('HG00731','HG00732','HG00733','HG00512','HG00513','HG00514','NA19238','NA19239','NA19240')]
acceptable = c('0|0', '1|0', '0|1', '1|1', '0|0_lowconf', '1|0_lowconf', '0|1_lowconf', '1|1_lowconf')
ct4 = ct3[ct3 %in% acceptable,]
#ct3 = ct2[]
#callmatrix[callmatrix$mendel]
#callmatrix$mendelall = callmatrix$mendel1 && callmatrix$mendel2 && callmatrix$mendel3
# 'FALSE' %in% callmatrix[,c('mendel1','mendel2','mendel3')]
#
mendelall = callmatrix %>% mutate(mm = as.logical(mendel1) * as.logical(mendel2) * as.logical(mendel3))
#mendelall = callmatrix %>% mutate(mm = as.numeric(mendel1) + 1)
# test_mendel(ce, p1, p2, c)
# # First trio
# Histogram on a Continuous (Numeric) Variable
g <- ggplot(mendelall, aes()) + scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=as.logical(mm), x=mm),
col="black",
size=.1,
stat='count') + # change number of bins
labs(title="Genotype predictions for 255 inversions in 31 samples")
ctest = callmatrix[,c('HG00512','HG00513','HG00514','HG00731','HG00732','HG00733','NA19238','NA19239','NA19240','mendel1','mendel2','mendel3')]
#### compare to david's calls
david_list = '~/PhD/projects/huminvs/mosaicatcher/bed_factory/revision/david_n35_323/nonred_inversions_n35_genotypes.csv'
dgt = read.table(david_list, header=T, sep=',')
#sth else
dq = '~/Desktop/desktop_5th_oct/davidquick.txt'
d = read.table(dq, header=T)
aa = as.data.frame.matrix(table(d$notes, d$phase.pass))
aa$pct = aa[['TRUE']] / (aa[['FALSE']] + aa[['TRUE']])
aa$names = row.names(aa)
aa = aa[order(aa$pct),]
colnames(aa) = c('fail','pass','pct','names')
aa$good = c('bad','nomap','bad','bad','good','bad','good','good','good','bad','bad','good')
dodgewidth <- position_dodge(width=0.9)
ggplot(data=aa, aes(x=names, y=pct), ylim=c(0,1)) + geom_bar(stat='identity', aes(fill=good)) +
geom_text(aes(label = fail+pass, x = names, y = pct), position = position_dodge(width = 0.8), vjust = -0.6)
library(pheatmap)
pheatmap(aa, cluster_rows = F)
aa$pct = aa$TRUE
aa3 = aa[,c('names','pct')]
# Histogram on a Continuous (Numeric) Variable
aa2 = melt(aa)
g <- ggplot(aa3, aes(names)) + scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=pct),
col="black",
size=.1,
stat='count') + # change number of bins
labs(title="Genotype predictions for 255 inversions in 31 samples")
library(ggplot2)
g <- ggplot(d, aes(log10(width)))
g + geom_density(aes(fill=factor(phase.pass)), alpha=0.8) +
labs(title="phasing pass vs width",
subtitle="City Mileage Grouped by Number of cylinders",
caption="Source: mpg",
x="Inv width",
fill="phase pass")
g <- ggplot(d, aes(log10(width)))
g + geom_density(aes(fill=factor(phase.pass)), alpha=0.8) +
labs(title="phasing pass vs width",
subtitle="City Mileage Grouped by Number of cylinders",
caption="Source: mpg",
x="Inv width",
fill="phase pass")
g <- ggplot(tab, aes(log10(width)))
g + geom_density(aes(fill=factor(verdict)), alpha=0.8) +
labs(title="phasing pass vs width",
subtitle="City Mileage Grouped by Number of cylinders",
caption="Source: mpg",
x="Inv width",
fill="phase pass")
g <- ggplot(tab, aes(verdict, log10(width)))
g + geom_boxplot()# +
geom_dotplot(binaxis='y',
stackdir='center',
dotsize = .5,
fill="red") +
theme(axis.text.x = element_text(angle=65, vjust=0.6)) +
labs(title="Box plot + Dot plot",
subtitle="City Mileage vs Class: Each dot represents 1 row in source data",
caption="Source: mpg",
x="Class of Vehicle",
y="City Mileage")
# variant allele frequency
cm = callmatrix
library(dplyr)
# vaf figure. ###########################################3
cm = callmatrix
#cm = callmatrixpass
cm$nhom = rowSums2(cm=='1|1')
cm$nhet =rowSums2(cm=='1|0') + rowSums2(cm=='0|1')
cm$nref = rowSums2(cm=='0|0')
cm$ninv = (2*rowSums2(cm=='1|1')) + rowSums2(cm=='1|0') + rowSums2(cm=='0|1') +
(2*rowSums2(cm=='1|1_lowconf')) + rowSums2(cm=='1|0_lowconf') + rowSums2(cm=='0|1_lowconf')
cm$n = 2*((rowSums2(cm=='1|1')) + rowSums2(cm=='1|0') + rowSums2(cm=='0|1') +
(rowSums2(cm=='1|1_lowconf')) + rowSums2(cm=='1|0_lowconf') + rowSums2(cm=='0|1_lowconf') +
(rowSums2(cm=='0|0_lowconf')) + (rowSums2(cm=='0|0')))
cm$ninv = cm$ninv +
cm$af = cm$ninv / cm$n
cm2 = cm#[cm$n > 50,]
ggplot(data=cm2) + geom_histogram(aes(x=ninv)) # + scale_x_log10()
as.data.frame.matrix(table(cm$ninv))
aaa = as.data.frame(as.matrix(table(cm$ninv)))
aaa$n = as.numeric(row.names(aaa))
ggplot(data=aaa[aaa$n > 0,], aes(x=n, y=V1)) + geom_point() + geom_line()
ggplot(data=aaa[aaa$n > 0,], aes(x=n, y=V1)) + geom_point() + geom_line() + xlim(1,100) + scale_x_log10() +
labs(title('Variant allele frequency'), x='Variant allele count', y='Inversion sites') + scale_y_log10()
ggplot(data=cm) + geom_histogram(aes(x=af))
######################################################3
#figure1
tab3 = tab#[tab$verdict == 'pass',]
companion_unique_samples = c('HG00514','HG00733','NA19240','HG02018','HG01573','GM19036')
# filter, filter
tab3a = left_join(tab3, cm[,c('chrom','start','end','simpleverdict')])
tab3b = tab3a[tab3a$simpleverdict %in% c('PASS','lowmap', 'AlwaysComplex'),]
tab3c = tab3b[!(tab3b$sample %in% companion_unique_samples),]
tab3 = tab3c
#tab3$GT = tab3$GTs
tab3$simpler = 'ccomplex'
tab3[tab3$GT %in% c('1|1', '1|1_lowconf'),]$simpler = 'aHOM'
tab3[tab3$GT %in% c('1|0', '1|0_lowconf','0|1', '0|1_lowconf'),]$simpler = 'bHET'
tab3[tab3$GT %in% c('0|0', '0|0_lowconf'),]$simpler = 'REF'
tab3[tab3$GT %in% c('noreads'),]$simpler = 'zeroreads'
tab3[tab3$ID %in% cm_detail_hom$ID,]$simpler = 'allMISO'
#tab3[(tab3$ID %in% cm_detail_hom$ID) & (tab3$sample == 'GM20509'),]$simpler = 'aaMISO'
#tab[endsWith(tab$GT, '_lowconf'),]$simpler = 'complex_lowconf'
#tab[tab$GT %in% c('0|0_lowconf', '1|0_lowconf', '0|1_lowconf', '1|1_lowconf'),]$simpler = 'simple_lowconf'
tab3 = tab3[tab3$simpler %in% c('aHOM','bHET','ccomplex', 'aaMISO','zeroreads'),]
#tab3[tab3$simpler %in% c('aHOM','bHET','ccomplex'),]$simpler = 'Hom/Het/Complex'
data = as.data.frame.matrix(table(tab3$sample, tab3$simpler))
dat = melt(as.matrix(data))
library(ggbeeswarm)
g <- ggplot(data=dat, aes(x=X2, y=value)) + geom_boxplot(aes(color=X2)) +
# Plot first the fat dots
geom_beeswarm(data=dat,
size=2,
cex=1.5) +
ylim(c(1,200)) + labs(x='Event class', y='Events per genome') +
theme(axis.text.x = element_text(face="bold",
size=14),
axis.text.y = element_text(face="bold",
size=14))
g
# inverted sequence per genome.
t4 = tab3 %>% group_by(sample,simpler) %>% mutate(nbases = sum(len)) %>% filter(row_number() == 1)
t5 = t4[,c('sample','simpler','nbases')]
t6 = t5[t5$simpler %in% c('aHOM','bHET'),]
t6[t6$simpler == 'aHOM',]$nbases = 2 * t6[t6$simpler == 'aHOM',]$nbases
t7 = t6 %>% group_by(sample) %>% mutate(value = sum(nbases)) %>% filter(row_number() == 1)
t7$x = 'sample'
t7 = as.data.frame(t7)
data = as.data.frame.matrix(table(t7$x, t7$value))
dat = melt(as.matrix(data))
t7$value = t7$value/1000
g <- ggplot(data=t7, aes(x=x, y=value)) +
geom_boxplot(aes(color=x)) +
geom_point(aes(x=x,y=value)) +
ylim(c(1,40)) +
labs(x='', y='Inverted bases per diploid genome [Mb]') +
theme(axis.text.x = element_text(face="bold",
size=14),
axis.text.y = element_text(face="bold",
size=14))
g
# accidental HW
library(HardyWeinberg)
CX = cm[,c('nref','nhet','nhom')]
HWTernaryPlot(CX,100,region=1,hwcurve=TRUE,vbounds=FALSE,vertex.cex=2)
dat$anc = 'UNK'
dat[dat$X1=='GM12329',]$anc = 'EUR'
dat[dat$X1=='GM18534',]$anc = 'CHS'
dat[dat$X1=='GM18939',]$anc = 'CHS'
dat[dat$X1=='GM19036',]$anc = 'AFR'
dat[dat$X1=='GM19650',]$anc = 'AMR'
dat[dat$X1=='GM19983',]$anc = 'AFR'
dat[dat$X1=='GM20509',]$anc = 'EUR'
dat[dat$X1=='GM20847',]$anc = 'SAS'
dat[dat$X1=='HG00096',]$anc = 'EUR'
dat[dat$X1=='HG00171',]$anc = 'EUR'
dat[dat$X1=='HG00512',]$anc = 'EAS'
dat[dat$X1=='HG00513',]$anc = 'EAS'
dat[dat$X1=='HG00514',]$anc = 'EAS'
dat[dat$X1=='HG00731',]$anc = 'AMR'
dat[dat$X1=='HG00732',]$anc = 'AMR'
dat[dat$X1=='HG00733',]$anc = 'AMR'
dat[dat$X1=='HG00864',]$anc = 'EAS'
dat[dat$X1=='HG01114',]$anc = 'AMR'
dat[dat$X1=='HG01505',]$anc = 'EUR'
dat[dat$X1=='HG01573',]$anc = 'AMR'
dat[dat$X1=='HG01596',]$anc = 'EAS'
dat[dat$X1=='HG02011',]$anc = 'AFR'
dat[dat$X1=='HG02018',]$anc = 'EAS'
dat[dat$X1=='HG02492',]$anc = 'SAS'
dat[dat$X1=='HG02587',]$anc = 'AFR'
dat[dat$X1=='HG02818',]$anc = 'AFR'
dat[dat$X1=='HG03009',]$anc = 'SAS'
dat[dat$X1=='HG03065',]$anc = 'AFR'
dat[dat$X1=='HG03371',]$anc = 'AFR'
dat[dat$X1=='HG03683',]$anc = 'SAS'
dat[dat$X1=='HG03732',]$anc = 'SAS'
dat[dat$X1=='NA19238',]$anc = 'AFR'
dat[dat$X1=='NA19239',]$anc = 'AFR'
dat[dat$X1=='NA19240',]$anc = 'AFR'
#donut
donut = as.data.frame(as.matrix(table(tab3$simpler)))
donut$Prediction = row.names(donut)
donut = donut[c('REF','HET','HOM','complex','zeroreads'),]
donut$Prediction = c('aREF','bHET','cHOM','dcomplex','zeroreads')
row.names(donut) = c('aREF','bHET','cHOM','dcomplex','zeroreads')
ypos1 = head(c(0,cumsum(rev(donut$V1))),-1)
ypos2 = cumsum(rev(donut$V1))
ypos = (ypos2 + ypos1)/2
ggplot(donut, aes(x = 2, y = V1, fill = Prediction)) +
geom_bar(stat = "identity", color = "white") +
coord_polar(theta = "y", start = 0)+
geom_text(aes(y = ypos, label = rev(V1)), color = "white")+
theme_void()+
xlim(0.5, 2.5)
# length plot
tab1 = tab[tab$sample=='HG00733',]
bins = 40
g = ggplot(data=tab1, aes(len)) + geom_histogram(aes(y=..density..), bins=bins, color = "black", fill = "grey") + scale_x_log10() + theme_minimal() + geom_density(aes(x=len), size=1)
g
g = ggplot(data=tab1, aes(len)) + geom_histogram(bins=bins, color = "black", fill = "grey") + xlim(c(0,1000)) + scale_x_log10() + geom_density(aes(x=len), size=1)
g
tab4 = tab3
tab4[tab4$simpler == 'REF',]$simpler = 'eREF'
tab4[tab4$simpler == 'HET',]$simpler = 'dHET'
tab4[tab4$simpler == 'HOM',]$simpler = 'cHOM'
tab4[tab4$simpler == 'complex',]$simpler = 'bcomplex'
tab4[tab4$simpler == 'zeroreads',]$simpler = 'anoreads'
g = ggplot(data=tab4[tab4$sample=='HG00733',], aes(len)) + geom_histogram(bins=bins, aes(fill=simpler)) + scale_x_log10() + theme_minimal() #+ geom_density(aes(x=len), size=1)
g
ybreaks = seq(0,20,5)
## On primary axis
g + scale_y_continuous("Counts", breaks = round(ybreaks / (bw * n_obs),3), labels = ybreaks)
## Or on secondary axis
g + scale_y_continuous("Density", sec.axis = sec_axis(
trans = ~ . * bw * n_obs, name = "Counts", breaks = ybreaks))
ggplot(data=tab1) + geom_histogram(aes(x=len), bins=50) + scale_x_log10()
ggplot(data=tab1) + geom_histogram(aes(x=len), bins=60) + scale_x_log10()
| /workflow/scripts/arbigent/genotypes_qc.R | permissive | friendsofstrandseq/mosaicatcher-pipeline | R | false | false | 14,552 | r | # Genotypes QC
# plot 1
library(ggplot2)
library(dplyr)
#theme_set(theme_classic())
#tab2 = tab
tab=tabp
tab=tab[!(tab$pred_hard=='./.'),]
tab$simple = 'ccomplex'
tab[tab$GT %in% c('0|0', '1|0', '0|1', '1|1'),]$simple = 'asimple'
tab[tab$GT %in% c('noreads'),]$simple = 'zeroreads'
tab[tab$GT %in% c('noreads'),]$simple = 'zeroreads'
tab[endsWith(tab$GT, '_lowconf'),]$simple = 'complex_lowconf'
tab[tab$GT %in% c('0|0_lowconf', '1|0_lowconf', '0|1_lowconf', '1|1_lowconf'),]$simple = 'bsimple_lowconf'
tab$verdict = tab$simple
# Histogram on a Continuous (Numeric) Variable
#tab$GT = tab$GTs
g <- ggplot(tab, aes(verdict))# + scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=GT),
col="black",
size=.1,
stat='count') + # change number of bins
labs(title="Genotype predictions for 323 inversions in many samples")
#### mendel ####
#' Make a vector to check inheritance plaisibilities
#' @return child_expect, a vector describing excpected child genotypes given the parents.
#' @author Wolfram Hoeps
make_child_expect_vector <- function(){
# ok who cares let's do it the hard way.
#parents = paste(p1, p2)
child_expect <- vector(mode="list")
child_expect[['0|0 0|0']] = c('0|0')
child_expect[['0|0 0|1']] = c('0|0','0|1')
child_expect[['0|0 1|1']] = c('0|1')
child_expect[['0|1 0|0']] = c('0|0','0|1')
child_expect[['0|1 0|1']] = c('0|0','0|1','1|1')
child_expect[['0|1 1|1']] = c('0|1','1|1')
child_expect[['1|1 0|0']] = c('0|1')
child_expect[['1|1 0|1']] = c('0|1','1|1')
child_expect[['1|1 1|1']] = c('1|1')
return (child_expect)
}
ce = make_child_expect_vector()
#' Test a gt for validity
#'
test_mendel <- function(ce, gt_parent1, gt_parent2, gt_child){
gt_parent1 = substr(gt_parent1,1,3)
gt_parent2 = substr(gt_parent2,1,3)
gt_child = substr(gt_child, 1,3)
gt_parent1 = gsub('1\\|0','0\\|1', gt_parent1)
gt_parent2 = gsub('1\\|0','0\\|1', gt_parent2)
gt_child = gsub('1\\|0','0\\|1', gt_child)
valid_gts = c('0|0','0|1','1|1')
c1 = gt_parent1 %in% valid_gts
c2 = gt_parent2 %in% valid_gts
c3 = gt_child %in% valid_gts
#print(c1)
#print(c2)
#print(c3)
#return(gt_parent2)
if (c1){
if (c2){
if (c3){
valid = gt_child %in% ce[[paste(gt_parent1, gt_parent2)]]
#print(valid)
return(valid)
}
}
}
return(F)
}
#c2 = callmatrix
callmatrix = c2
callmatrix[callmatrix=='./.']='0|0'
#callmatrix = c2[c2$verdict=='pass',]
callmatrix$mendel1 = 'UNK'
callmatrix$mendel2 = 'UNK'
callmatrix$mendel3 = 'UNK'
callmatrix$mendel4 = 'UNK'
for (row in 1:nrow(callmatrix)){
#callmatrix[row,]$mendel =
callmatrix[row,]$mendel1 = as.logical(test_mendel(ce, callmatrix[row,]$NA19238, callmatrix[row,]$NA19239,callmatrix[row,]$NA19240 ))
callmatrix[row,]$mendel2 = as.logical(test_mendel(ce, callmatrix[row,]$HG00512, callmatrix[row,]$HG00513,callmatrix[row,]$HG00514 ))
callmatrix[row,]$mendel3 = as.logical(test_mendel(ce, callmatrix[row,]$HG00731, callmatrix[row,]$HG00732,callmatrix[row,]$HG00733 ))
callmatrix[row,]$mendel4 = as.logical(test_mendel(ce, callmatrix[row,]$GM19650, callmatrix[row,]$HG00864,callmatrix[row,]$HG03371 ))
}
ctest = callmatrix
ct2 = ctest[,c('chr','verdict','HG00731','HG00732','HG00733','HG00512','HG00513','HG00514','NA19238','NA19239','NA19240','mendel1','mendel2','mendel3')]
ct3 = ctest[,c('HG00731','HG00732','HG00733','HG00512','HG00513','HG00514','NA19238','NA19239','NA19240')]
acceptable = c('0|0', '1|0', '0|1', '1|1', '0|0_lowconf', '1|0_lowconf', '0|1_lowconf', '1|1_lowconf')
ct4 = ct3[ct3 %in% acceptable,]
#ct3 = ct2[]
#callmatrix[callmatrix$mendel]
#callmatrix$mendelall = callmatrix$mendel1 && callmatrix$mendel2 && callmatrix$mendel3
# 'FALSE' %in% callmatrix[,c('mendel1','mendel2','mendel3')]
#
mendelall = callmatrix %>% mutate(mm = as.logical(mendel1) * as.logical(mendel2) * as.logical(mendel3))
#mendelall = callmatrix %>% mutate(mm = as.numeric(mendel1) + 1)
# test_mendel(ce, p1, p2, c)
# # First trio
# Histogram on a Continuous (Numeric) Variable
g <- ggplot(mendelall, aes()) + scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=as.logical(mm), x=mm),
col="black",
size=.1,
stat='count') + # change number of bins
labs(title="Genotype predictions for 255 inversions in 31 samples")
ctest = callmatrix[,c('HG00512','HG00513','HG00514','HG00731','HG00732','HG00733','NA19238','NA19239','NA19240','mendel1','mendel2','mendel3')]
#### compare to david's calls
david_list = '~/PhD/projects/huminvs/mosaicatcher/bed_factory/revision/david_n35_323/nonred_inversions_n35_genotypes.csv'
dgt = read.table(david_list, header=T, sep=',')
#sth else
dq = '~/Desktop/desktop_5th_oct/davidquick.txt'
d = read.table(dq, header=T)
aa = as.data.frame.matrix(table(d$notes, d$phase.pass))
aa$pct = aa[['TRUE']] / (aa[['FALSE']] + aa[['TRUE']])
aa$names = row.names(aa)
aa = aa[order(aa$pct),]
colnames(aa) = c('fail','pass','pct','names')
aa$good = c('bad','nomap','bad','bad','good','bad','good','good','good','bad','bad','good')
dodgewidth <- position_dodge(width=0.9)
ggplot(data=aa, aes(x=names, y=pct), ylim=c(0,1)) + geom_bar(stat='identity', aes(fill=good)) +
geom_text(aes(label = fail+pass, x = names, y = pct), position = position_dodge(width = 0.8), vjust = -0.6)
library(pheatmap)
pheatmap(aa, cluster_rows = F)
aa$pct = aa$TRUE
aa3 = aa[,c('names','pct')]
# Histogram on a Continuous (Numeric) Variable
aa2 = melt(aa)
g <- ggplot(aa3, aes(names)) + scale_fill_brewer(palette = "Spectral")
g + geom_histogram(aes(fill=pct),
col="black",
size=.1,
stat='count') + # change number of bins
labs(title="Genotype predictions for 255 inversions in 31 samples")
library(ggplot2)
g <- ggplot(d, aes(log10(width)))
g + geom_density(aes(fill=factor(phase.pass)), alpha=0.8) +
labs(title="phasing pass vs width",
subtitle="City Mileage Grouped by Number of cylinders",
caption="Source: mpg",
x="Inv width",
fill="phase pass")
g <- ggplot(d, aes(log10(width)))
g + geom_density(aes(fill=factor(phase.pass)), alpha=0.8) +
labs(title="phasing pass vs width",
subtitle="City Mileage Grouped by Number of cylinders",
caption="Source: mpg",
x="Inv width",
fill="phase pass")
g <- ggplot(tab, aes(log10(width)))
g + geom_density(aes(fill=factor(verdict)), alpha=0.8) +
labs(title="phasing pass vs width",
subtitle="City Mileage Grouped by Number of cylinders",
caption="Source: mpg",
x="Inv width",
fill="phase pass")
g <- ggplot(tab, aes(verdict, log10(width)))
g + geom_boxplot()# +
geom_dotplot(binaxis='y',
stackdir='center',
dotsize = .5,
fill="red") +
theme(axis.text.x = element_text(angle=65, vjust=0.6)) +
labs(title="Box plot + Dot plot",
subtitle="City Mileage vs Class: Each dot represents 1 row in source data",
caption="Source: mpg",
x="Class of Vehicle",
y="City Mileage")
# variant allele frequency
cm = callmatrix
library(dplyr)
# vaf figure. ###########################################3
cm = callmatrix
#cm = callmatrixpass
cm$nhom = rowSums2(cm=='1|1')
cm$nhet =rowSums2(cm=='1|0') + rowSums2(cm=='0|1')
cm$nref = rowSums2(cm=='0|0')
cm$ninv = (2*rowSums2(cm=='1|1')) + rowSums2(cm=='1|0') + rowSums2(cm=='0|1') +
(2*rowSums2(cm=='1|1_lowconf')) + rowSums2(cm=='1|0_lowconf') + rowSums2(cm=='0|1_lowconf')
cm$n = 2*((rowSums2(cm=='1|1')) + rowSums2(cm=='1|0') + rowSums2(cm=='0|1') +
(rowSums2(cm=='1|1_lowconf')) + rowSums2(cm=='1|0_lowconf') + rowSums2(cm=='0|1_lowconf') +
(rowSums2(cm=='0|0_lowconf')) + (rowSums2(cm=='0|0')))
cm$ninv = cm$ninv +
cm$af = cm$ninv / cm$n
cm2 = cm#[cm$n > 50,]
ggplot(data=cm2) + geom_histogram(aes(x=ninv)) # + scale_x_log10()
as.data.frame.matrix(table(cm$ninv))
aaa = as.data.frame(as.matrix(table(cm$ninv)))
aaa$n = as.numeric(row.names(aaa))
ggplot(data=aaa[aaa$n > 0,], aes(x=n, y=V1)) + geom_point() + geom_line()
ggplot(data=aaa[aaa$n > 0,], aes(x=n, y=V1)) + geom_point() + geom_line() + xlim(1,100) + scale_x_log10() +
labs(title('Variant allele frequency'), x='Variant allele count', y='Inversion sites') + scale_y_log10()
ggplot(data=cm) + geom_histogram(aes(x=af))
######################################################3
#figure1
tab3 = tab#[tab$verdict == 'pass',]
companion_unique_samples = c('HG00514','HG00733','NA19240','HG02018','HG01573','GM19036')
# filter, filter
tab3a = left_join(tab3, cm[,c('chrom','start','end','simpleverdict')])
tab3b = tab3a[tab3a$simpleverdict %in% c('PASS','lowmap', 'AlwaysComplex'),]
tab3c = tab3b[!(tab3b$sample %in% companion_unique_samples),]
tab3 = tab3c
#tab3$GT = tab3$GTs
tab3$simpler = 'ccomplex'
tab3[tab3$GT %in% c('1|1', '1|1_lowconf'),]$simpler = 'aHOM'
tab3[tab3$GT %in% c('1|0', '1|0_lowconf','0|1', '0|1_lowconf'),]$simpler = 'bHET'
tab3[tab3$GT %in% c('0|0', '0|0_lowconf'),]$simpler = 'REF'
tab3[tab3$GT %in% c('noreads'),]$simpler = 'zeroreads'
tab3[tab3$ID %in% cm_detail_hom$ID,]$simpler = 'allMISO'
#tab3[(tab3$ID %in% cm_detail_hom$ID) & (tab3$sample == 'GM20509'),]$simpler = 'aaMISO'
#tab[endsWith(tab$GT, '_lowconf'),]$simpler = 'complex_lowconf'
#tab[tab$GT %in% c('0|0_lowconf', '1|0_lowconf', '0|1_lowconf', '1|1_lowconf'),]$simpler = 'simple_lowconf'
tab3 = tab3[tab3$simpler %in% c('aHOM','bHET','ccomplex', 'aaMISO','zeroreads'),]
#tab3[tab3$simpler %in% c('aHOM','bHET','ccomplex'),]$simpler = 'Hom/Het/Complex'
data = as.data.frame.matrix(table(tab3$sample, tab3$simpler))
dat = melt(as.matrix(data))
library(ggbeeswarm)
g <- ggplot(data=dat, aes(x=X2, y=value)) + geom_boxplot(aes(color=X2)) +
# Plot first the fat dots
geom_beeswarm(data=dat,
size=2,
cex=1.5) +
ylim(c(1,200)) + labs(x='Event class', y='Events per genome') +
theme(axis.text.x = element_text(face="bold",
size=14),
axis.text.y = element_text(face="bold",
size=14))
g
# inverted sequence per genome.
t4 = tab3 %>% group_by(sample,simpler) %>% mutate(nbases = sum(len)) %>% filter(row_number() == 1)
t5 = t4[,c('sample','simpler','nbases')]
t6 = t5[t5$simpler %in% c('aHOM','bHET'),]
t6[t6$simpler == 'aHOM',]$nbases = 2 * t6[t6$simpler == 'aHOM',]$nbases
t7 = t6 %>% group_by(sample) %>% mutate(value = sum(nbases)) %>% filter(row_number() == 1)
t7$x = 'sample'
t7 = as.data.frame(t7)
data = as.data.frame.matrix(table(t7$x, t7$value))
dat = melt(as.matrix(data))
t7$value = t7$value/1000
g <- ggplot(data=t7, aes(x=x, y=value)) +
geom_boxplot(aes(color=x)) +
geom_point(aes(x=x,y=value)) +
ylim(c(1,40)) +
labs(x='', y='Inverted bases per diploid genome [Mb]') +
theme(axis.text.x = element_text(face="bold",
size=14),
axis.text.y = element_text(face="bold",
size=14))
g
# accidental HW
library(HardyWeinberg)
CX = cm[,c('nref','nhet','nhom')]
HWTernaryPlot(CX,100,region=1,hwcurve=TRUE,vbounds=FALSE,vertex.cex=2)
dat$anc = 'UNK'
dat[dat$X1=='GM12329',]$anc = 'EUR'
dat[dat$X1=='GM18534',]$anc = 'CHS'
dat[dat$X1=='GM18939',]$anc = 'CHS'
dat[dat$X1=='GM19036',]$anc = 'AFR'
dat[dat$X1=='GM19650',]$anc = 'AMR'
dat[dat$X1=='GM19983',]$anc = 'AFR'
dat[dat$X1=='GM20509',]$anc = 'EUR'
dat[dat$X1=='GM20847',]$anc = 'SAS'
dat[dat$X1=='HG00096',]$anc = 'EUR'
dat[dat$X1=='HG00171',]$anc = 'EUR'
dat[dat$X1=='HG00512',]$anc = 'EAS'
dat[dat$X1=='HG00513',]$anc = 'EAS'
dat[dat$X1=='HG00514',]$anc = 'EAS'
dat[dat$X1=='HG00731',]$anc = 'AMR'
dat[dat$X1=='HG00732',]$anc = 'AMR'
dat[dat$X1=='HG00733',]$anc = 'AMR'
dat[dat$X1=='HG00864',]$anc = 'EAS'
dat[dat$X1=='HG01114',]$anc = 'AMR'
dat[dat$X1=='HG01505',]$anc = 'EUR'
dat[dat$X1=='HG01573',]$anc = 'AMR'
dat[dat$X1=='HG01596',]$anc = 'EAS'
dat[dat$X1=='HG02011',]$anc = 'AFR'
dat[dat$X1=='HG02018',]$anc = 'EAS'
dat[dat$X1=='HG02492',]$anc = 'SAS'
dat[dat$X1=='HG02587',]$anc = 'AFR'
dat[dat$X1=='HG02818',]$anc = 'AFR'
dat[dat$X1=='HG03009',]$anc = 'SAS'
dat[dat$X1=='HG03065',]$anc = 'AFR'
dat[dat$X1=='HG03371',]$anc = 'AFR'
dat[dat$X1=='HG03683',]$anc = 'SAS'
dat[dat$X1=='HG03732',]$anc = 'SAS'
dat[dat$X1=='NA19238',]$anc = 'AFR'
dat[dat$X1=='NA19239',]$anc = 'AFR'
dat[dat$X1=='NA19240',]$anc = 'AFR'
#donut
donut = as.data.frame(as.matrix(table(tab3$simpler)))
donut$Prediction = row.names(donut)
donut = donut[c('REF','HET','HOM','complex','zeroreads'),]
donut$Prediction = c('aREF','bHET','cHOM','dcomplex','zeroreads')
row.names(donut) = c('aREF','bHET','cHOM','dcomplex','zeroreads')
ypos1 = head(c(0,cumsum(rev(donut$V1))),-1)
ypos2 = cumsum(rev(donut$V1))
ypos = (ypos2 + ypos1)/2
ggplot(donut, aes(x = 2, y = V1, fill = Prediction)) +
geom_bar(stat = "identity", color = "white") +
coord_polar(theta = "y", start = 0)+
geom_text(aes(y = ypos, label = rev(V1)), color = "white")+
theme_void()+
xlim(0.5, 2.5)
# length plot
tab1 = tab[tab$sample=='HG00733',]
bins = 40
g = ggplot(data=tab1, aes(len)) + geom_histogram(aes(y=..density..), bins=bins, color = "black", fill = "grey") + scale_x_log10() + theme_minimal() + geom_density(aes(x=len), size=1)
g
g = ggplot(data=tab1, aes(len)) + geom_histogram(bins=bins, color = "black", fill = "grey") + xlim(c(0,1000)) + scale_x_log10() + geom_density(aes(x=len), size=1)
g
tab4 = tab3
tab4[tab4$simpler == 'REF',]$simpler = 'eREF'
tab4[tab4$simpler == 'HET',]$simpler = 'dHET'
tab4[tab4$simpler == 'HOM',]$simpler = 'cHOM'
tab4[tab4$simpler == 'complex',]$simpler = 'bcomplex'
tab4[tab4$simpler == 'zeroreads',]$simpler = 'anoreads'
g = ggplot(data=tab4[tab4$sample=='HG00733',], aes(len)) + geom_histogram(bins=bins, aes(fill=simpler)) + scale_x_log10() + theme_minimal() #+ geom_density(aes(x=len), size=1)
g
ybreaks = seq(0,20,5)
## On primary axis
g + scale_y_continuous("Counts", breaks = round(ybreaks / (bw * n_obs),3), labels = ybreaks)
## Or on secondary axis
g + scale_y_continuous("Density", sec.axis = sec_axis(
trans = ~ . * bw * n_obs, name = "Counts", breaks = ybreaks))
ggplot(data=tab1) + geom_histogram(aes(x=len), bins=50) + scale_x_log10()
ggplot(data=tab1) + geom_histogram(aes(x=len), bins=60) + scale_x_log10()
|
context("Testing Single Sample Endpoints")
test_that("We can get the info for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sample/')
json_file <- 'api/tests/test_sample.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the sequencing quality for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_quality/')
json_file <- 'api/tests/test_quality.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the assembly stats for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_assembly/')
json_file <- 'api/tests/test_assembly.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the assembled contigs for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_contig/')
json_file <- 'api/tests/test_contig.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted genes for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_gene/')
json_file <- 'api/tests/test_gene.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted InDels for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_indel/')
json_file <- 'api/tests/test_indel.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted SNPs for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_snp/')
json_file <- 'api/tests/test_snp.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted MLST for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_mlst/')
json_file <- 'api/tests/test_mlst.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the SCCmec Primer hits for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_primer/')
json_file <- 'api/tests/test_sccmec_primer.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted SCCmec type (Primer based) for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_primer_predict/')
json_file <- 'api/tests/test_sccmec_primer_predict.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the SCCmec subtype hits for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_subtype/')
json_file <- 'api/tests/test_sccmec_subtype.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted SCCmec subtype (Primer based) for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_subtype_predict/')
json_file <- 'api/tests/test_sccmec_subtype_predict.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
| /staphopia/tests/testthat/test_single_sample_endpoints.R | no_license | staphopia/staphopia-r | R | false | false | 4,408 | r | context("Testing Single Sample Endpoints")
test_that("We can get the info for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sample/')
json_file <- 'api/tests/test_sample.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the sequencing quality for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_quality/')
json_file <- 'api/tests/test_quality.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the assembly stats for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_assembly/')
json_file <- 'api/tests/test_assembly.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the assembled contigs for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_contig/')
json_file <- 'api/tests/test_contig.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted genes for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_gene/')
json_file <- 'api/tests/test_gene.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted InDels for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_indel/')
json_file <- 'api/tests/test_indel.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted SNPs for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_snp/')
json_file <- 'api/tests/test_snp.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted MLST for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_mlst/')
json_file <- 'api/tests/test_mlst.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the SCCmec Primer hits for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_primer/')
json_file <- 'api/tests/test_sccmec_primer.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted SCCmec type (Primer based) for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_primer_predict/')
json_file <- 'api/tests/test_sccmec_primer_predict.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the SCCmec subtype hits for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_subtype/')
json_file <- 'api/tests/test_sccmec_subtype.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
test_that("We can get the predicted SCCmec subtype (Primer based) for a single sample.", {
httptest::skip_if_disconnected()
url <- build_url('/tests/test_sccmec_subtype_predict/')
json_file <- 'api/tests/test_sccmec_subtype_predict.json'
response <- httr::content(httr::GET(url), as="text")
httptest::expect_json_equivalent(jsonlite::fromJSON(response), jsonlite::fromJSON(json_file))
})
|
library(shiny)
library(ggvis)
shinyUI(pageWithSidebar(
div(),
sidebarPanel(
###########################
## Sidebar for file chooser
###########################
conditionalPanel(
condition = "input.tabcond == 1",
strong("Choose a file to visualize from your",
"local drives by clicking on the button.",
"If you need a file, try:"),
br(),
code("data(iris)",style = "color:blue"),
br(),
code("write.csv(iris, file='iris.csv', row.names=FALSE)",style = "color:blue"),
tags$hr(),
# fileInput('datfile', '')
fileInput('datfile', 'Choose a file to upload.',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv',
'.tsv'
)
),
tags$hr(),
strong("Choose whether or not your data has a header row."),
checkboxInput('header', 'Header', TRUE),
tags$hr(),
radioButtons('sep', 'Choose the delimiter.',
c(Comma=',',
Semicolon=';',
Tab='\t'),
','),
tags$hr(),
radioButtons('quote', 'Choose the types of quotes.',
c(None='',
'Double Quote'='"',
'Single Quote'="'"),
'"')
),
###############################
## Sidebar for data table
###############################
conditionalPanel(
condition = "(input.tabcond == 2 || input.tabcond == 3) && output.fileUploaded",
strong("Choose the columns to include in the table."),
tags$hr(),
uiOutput("choose_columns")
) ,
###############################
## Sidebar for heatmap
###############################
conditionalPanel(
condition = "input.tabcond == 3 && output.fileUploaded",
# htmlOutput("choose_columns")
tags$hr(),
strong("Do you want to dummy code (one-hot encode)?"),
checkboxInput("dummybox", label = "Yes", value = FALSE),
tags$hr(),
strong("Do you want to randomize a sample?"),
checkboxInput("randbox", label = "Yes", value = FALSE),
numericInput('headobs', 'Sample size (randomized):', 75)
),
###############################
## Sidebar for distribution plots
###############################
conditionalPanel(
condition = "input.tabcond == 4 && output.fileUploaded",
strong("Choose the column to plot."),
htmlOutput("pick_a_column"),
tags$hr(),
numericInput('distobs', 'Sample size (randomized):', 1000),
tags$hr(),
sliderInput("binsize", "Histogram bin size", .1, 20, value=1)
)
),
mainPanel(
tabsetPanel(
id = "tabcond",
###############################
## File chooser
###############################
tabPanel("Choose File",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file.")),
conditionalPanel(
condition = "output.fileUploaded",
h2("Summary of Dataset:"),
br(),
verbatimTextOutput("summary")),
value=1),
###############################
## Table
###############################
tabPanel("Data Table",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file on the File Chooser tab.")),
conditionalPanel(
condition = "output.fileUploaded",
br(),
dataTableOutput(outputId="table")),
value=2) ,
###############################
## Heatmap
###############################
tabPanel("Heatmap",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file on the File Chooser tab.")),
conditionalPanel(
condition = "output.fileUploaded",
br(),
strong("Note: adjust size of plot (lower right corner) to fix axes if not lining up..."),
br(),
ggvisOutput("heatplot")),
value=3) ,
###############################
## Data distribution plots
###############################
tabPanel("Distribution",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file on the File Chooser tab.")),
conditionalPanel(
condition = "output.fileUploaded",
br(),
strong("Raw data plots. Note: color represents standard deviations from mean."),
br(),
ggvisOutput("rawplot"),
br(),
tags$hr(),
br(),
strong("Distribution of the data points."),
ggvisOutput("histplot")),
value=4),
###############################
## Documentation
###############################
tabPanel("Documentation",
## Display text if file uploaded
h3("Overview"),
p("This app provides a simple interactive data exploration. Choose a dataset saved to disc and select different tabs at the top of the page to view the data in various ways. All figures use R's ggvis, so you can automatically rescale the size (triangle in lower right corner of plot), and save the plots (cog in upper right corner of plot)."),
tags$hr(),
strong("Steps to run:"),
br(),
p("1) Click on 'Choose File' tab, and click on 'Choose File' button on left side panel. Choose a dataset to upload, selecting the correct parameters in the side panel, e.g., whether it has a header row, comma/tab/semicolon seperated, and what type of quotes are used in the file."),
br(),
p("2) Click on the other tabs (Data Table, Heatmap, and Distribution) to view the data both as a sortable table (Data Table) and in different plot types."),
br(),
br(),
tags$hr(),
h3("Notes on Usage"),
strong("Data Table"),
p("This is a simple table of the data, the provides quick sorts and searches of the data."),
br(),
br(),
strong("Heatmap"),
p("Viewing the data as a heatmap is a quick way to visually inspect correlations between the data. The sidebar panel allows you to randomly select a subset of columns, dummy code character/factor/class variables (what computer science majors call one-hot encoding), and randomly select a subset of observations so the figure is readable. Data are scaled from 0-1 to compare among columns."),
br(),
p("Note 1: The figure is an R ggvis plot, so you can hover over the figure to see the value."),
p("Note 2: The axes do not line up correctly for some likely logical reason, but you can drag the small re-size triangle in the lower right of the plot to fix the axes one it re-draws."),
br(),
br(),
strong("Distribution"),
p("The distribution tab provides two plots for any given variable you want to visualize, just select the variable in the side panel to view that data. Note that you can also choose a subset of observations that are randomized. Seeing all data at once (setting the sample high) will show in the original order."),
br(),
p("(a) The raw data (value on the y-axis, observation # on the x-axis) to visualize the range of the data and any potential outliers. The color of the points represents the standard deviations from the mean, where some use 3 or 4 standard deviations as a rough gauge of data outliers."),
br(),
p("(b) A histogram of the data, with an adjustable slider bar in the side-panel to change the bin size."),
br(),
br(),
tags$hr(),
h3("Download Source Files"),
p("The server.r and ui.r files to run the shiny app can be downloaded from:"),
br(),
p("https://github.com/wtcooper/DataProductsCourse/blob/master/RDataViz_Shiny"),
value=5)
)
)
)) | /RDataViz_Shiny/ui.R | no_license | wtcooper/DataProductsCourse | R | false | false | 10,571 | r | library(shiny)
library(ggvis)
shinyUI(pageWithSidebar(
div(),
sidebarPanel(
###########################
## Sidebar for file chooser
###########################
conditionalPanel(
condition = "input.tabcond == 1",
strong("Choose a file to visualize from your",
"local drives by clicking on the button.",
"If you need a file, try:"),
br(),
code("data(iris)",style = "color:blue"),
br(),
code("write.csv(iris, file='iris.csv', row.names=FALSE)",style = "color:blue"),
tags$hr(),
# fileInput('datfile', '')
fileInput('datfile', 'Choose a file to upload.',
accept = c(
'text/csv',
'text/comma-separated-values',
'text/tab-separated-values',
'text/plain',
'.csv',
'.tsv'
)
),
tags$hr(),
strong("Choose whether or not your data has a header row."),
checkboxInput('header', 'Header', TRUE),
tags$hr(),
radioButtons('sep', 'Choose the delimiter.',
c(Comma=',',
Semicolon=';',
Tab='\t'),
','),
tags$hr(),
radioButtons('quote', 'Choose the types of quotes.',
c(None='',
'Double Quote'='"',
'Single Quote'="'"),
'"')
),
###############################
## Sidebar for data table
###############################
conditionalPanel(
condition = "(input.tabcond == 2 || input.tabcond == 3) && output.fileUploaded",
strong("Choose the columns to include in the table."),
tags$hr(),
uiOutput("choose_columns")
) ,
###############################
## Sidebar for heatmap
###############################
conditionalPanel(
condition = "input.tabcond == 3 && output.fileUploaded",
# htmlOutput("choose_columns")
tags$hr(),
strong("Do you want to dummy code (one-hot encode)?"),
checkboxInput("dummybox", label = "Yes", value = FALSE),
tags$hr(),
strong("Do you want to randomize a sample?"),
checkboxInput("randbox", label = "Yes", value = FALSE),
numericInput('headobs', 'Sample size (randomized):', 75)
),
###############################
## Sidebar for distribution plots
###############################
conditionalPanel(
condition = "input.tabcond == 4 && output.fileUploaded",
strong("Choose the column to plot."),
htmlOutput("pick_a_column"),
tags$hr(),
numericInput('distobs', 'Sample size (randomized):', 1000),
tags$hr(),
sliderInput("binsize", "Histogram bin size", .1, 20, value=1)
)
),
mainPanel(
tabsetPanel(
id = "tabcond",
###############################
## File chooser
###############################
tabPanel("Choose File",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file.")),
conditionalPanel(
condition = "output.fileUploaded",
h2("Summary of Dataset:"),
br(),
verbatimTextOutput("summary")),
value=1),
###############################
## Table
###############################
tabPanel("Data Table",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file on the File Chooser tab.")),
conditionalPanel(
condition = "output.fileUploaded",
br(),
dataTableOutput(outputId="table")),
value=2) ,
###############################
## Heatmap
###############################
tabPanel("Heatmap",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file on the File Chooser tab.")),
conditionalPanel(
condition = "output.fileUploaded",
br(),
strong("Note: adjust size of plot (lower right corner) to fix axes if not lining up..."),
br(),
ggvisOutput("heatplot")),
value=3) ,
###############################
## Data distribution plots
###############################
tabPanel("Distribution",
## Display text if file uploaded
conditionalPanel(
condition = "!output.fileUploaded",
br(),
h2("Upload a file on the File Chooser tab.")),
conditionalPanel(
condition = "output.fileUploaded",
br(),
strong("Raw data plots. Note: color represents standard deviations from mean."),
br(),
ggvisOutput("rawplot"),
br(),
tags$hr(),
br(),
strong("Distribution of the data points."),
ggvisOutput("histplot")),
value=4),
###############################
## Documentation
###############################
tabPanel("Documentation",
## Display text if file uploaded
h3("Overview"),
p("This app provides a simple interactive data exploration. Choose a dataset saved to disc and select different tabs at the top of the page to view the data in various ways. All figures use R's ggvis, so you can automatically rescale the size (triangle in lower right corner of plot), and save the plots (cog in upper right corner of plot)."),
tags$hr(),
strong("Steps to run:"),
br(),
p("1) Click on 'Choose File' tab, and click on 'Choose File' button on left side panel. Choose a dataset to upload, selecting the correct parameters in the side panel, e.g., whether it has a header row, comma/tab/semicolon seperated, and what type of quotes are used in the file."),
br(),
p("2) Click on the other tabs (Data Table, Heatmap, and Distribution) to view the data both as a sortable table (Data Table) and in different plot types."),
br(),
br(),
tags$hr(),
h3("Notes on Usage"),
strong("Data Table"),
p("This is a simple table of the data, the provides quick sorts and searches of the data."),
br(),
br(),
strong("Heatmap"),
p("Viewing the data as a heatmap is a quick way to visually inspect correlations between the data. The sidebar panel allows you to randomly select a subset of columns, dummy code character/factor/class variables (what computer science majors call one-hot encoding), and randomly select a subset of observations so the figure is readable. Data are scaled from 0-1 to compare among columns."),
br(),
p("Note 1: The figure is an R ggvis plot, so you can hover over the figure to see the value."),
p("Note 2: The axes do not line up correctly for some likely logical reason, but you can drag the small re-size triangle in the lower right of the plot to fix the axes one it re-draws."),
br(),
br(),
strong("Distribution"),
p("The distribution tab provides two plots for any given variable you want to visualize, just select the variable in the side panel to view that data. Note that you can also choose a subset of observations that are randomized. Seeing all data at once (setting the sample high) will show in the original order."),
br(),
p("(a) The raw data (value on the y-axis, observation # on the x-axis) to visualize the range of the data and any potential outliers. The color of the points represents the standard deviations from the mean, where some use 3 or 4 standard deviations as a rough gauge of data outliers."),
br(),
p("(b) A histogram of the data, with an adjustable slider bar in the side-panel to change the bin size."),
br(),
br(),
tags$hr(),
h3("Download Source Files"),
p("The server.r and ui.r files to run the shiny app can be downloaded from:"),
br(),
p("https://github.com/wtcooper/DataProductsCourse/blob/master/RDataViz_Shiny"),
value=5)
)
)
)) |
Draw.boxplots <- function(Obj)
{
Sensitivity.SP <- Obj$TP.SP/(Obj$TP.SP + Obj$FN.SP)
Sensitivity.EF <- Obj$TP.EF/(Obj$TP.EF + Obj$FN.EF)
Specificity.SP <- Obj$TN.SP/(Obj$TN.SP + Obj$FP.SP)
Specificity.EF <- Obj$TN.EF/(Obj$TN.EF + Obj$FP.EF)
p0hat <- with(Obj, cbind(p0hat.EF, p0hat.SP))
RMSE <- with(Obj, cbind(RMSE.EF, RMSE.SP))
Sensitivity <- cbind(Sensitivity.EF, Sensitivity.SP)
FPR <- cbind(1-Specificity.EF, 1-Specificity.SP)
colnames(p0hat) <- colnames(RMSE) <- colnames(Sensitivity) <- colnames(FPR) <- c("Efron", "Proposed")
par(mfrow = c(2, 2))
boxplot(p0hat, main = "Estimates of p0")
boxplot(RMSE, main = "RMSE of local FDR estimates")
boxplot(FPR, main = "False Positive Rate = (1 - Specificity)", ylim = c(0, 0.05))
boxplot(Sensitivity, main = "Sensitivity", ylim = c(0, 1))
}
SimModel2 <- function(M, n, p0, alpha, beta)
# [Univariate] Null: Normal, Nonnull: Gamma
{
library(locfdr)
result <- data.frame(p0hat.SP = rep(NA, M),
RMSE.SP = rep(NA, M),
TP.SP = rep(NA, M),
TN.SP = rep(NA, M),
FP.SP = rep(NA, M),
FN.SP = rep(NA, M),
p0hat.EF = rep(NA, M),
RMSE.EF = rep(NA, M),
TP.EF = rep(NA, M),
TN.EF = rep(NA, M),
FP.EF = rep(NA, M),
FN.EF = rep(NA, M))
for ( r in 1:M ) {
cat(r, "/", M, "\n")
n0 <- rbinom(1, n, p0)
n1 <- n - n0
z0 <- rnorm(n0)
z1 <- rgamma(n1, shape = alpha, rate = (1/beta))
z <- c(z0, z1)
tmp <- p0*dnorm(z)
f <- tmp + (1-p0)*dgamma(z, shape = alpha, rate = (1/beta))
localFDR <- tmp/f
res <- sp.mix.1D(z, doplot = TRUE)
p0hat <- res$p.0
Nhat <- as.integer(res$localfdr <= 0.2)
TP <- sum(Nhat[-(1:n0)] == 1)
TN <- sum(Nhat[1:n0] == 0)
FP <- n0 - TN
FN <- n1 - TP
RMSE <- sqrt(mean((localFDR >= 0.01)*(localFDR <= 0.5)*(res$localfdr - localFDR)^2))
result$p0hat.SP[r] <- p0hat
result$RMSE.SP[r] <- RMSE
result$TP.SP[r] <- TP
result$TN.SP[r] <- TN
result$FP.SP[r] <- FP
result$FN.SP[r] <- FN
res <- locfdr(z)
p0hat <- res$fp0[3, 3]
Nhat <- as.integer(res$fdr <= 0.2)
TP <- sum(Nhat[-(1:n0)] == 1)
TN <- sum(Nhat[1:n0] == 0)
FP <- n0 - TN
FN <- n1 - TP
RMSE <- sqrt(mean((localFDR >= 0.01)*(localFDR <= 0.5)*(res$fdr - localFDR)^2))
result$p0hat.EF[r] <- p0hat
result$RMSE.EF[r] <- RMSE
result$TP.EF[r] <- TP
result$TN.EF[r] <- TN
result$FP.EF[r] <- FP
result$FN.EF[r] <- FN
}
return(result)
}
#source(file = '../SpMix.R')
#Res.4 <- SimModel2(M = 500, p0 = 0.95, n = 1000, alpha = 12, beta = .25)
#Res.5 <- SimModel2(M = 500, p0 = 0.90, n = 1000, alpha = 12, beta = .25)
#Res.6 <- SimModel2(M = 500, p0 = 0.80, n = 1000, alpha = 12, beta = .25)
#save.image(file = "SimUniGamma.RData")
load(file = "SimUniGamma.RData")
#png(file = "UniGammap95.png", height = 600, width = 900)
Draw.boxplots(Res.4)
#dev.off()
#png(file = "UniGammap90.png", height = 600, width = 900)
Draw.boxplots(Res.5)
#dev.off()
#png(file = "UniGammap80.png", height = 600, width = 900)
Draw.boxplots(Res.6)
#dev.off()
| /simulations/SimUni_Gamma.R | no_license | seokohj/SPMix-LocalFDR | R | false | false | 3,295 | r | Draw.boxplots <- function(Obj)
{
Sensitivity.SP <- Obj$TP.SP/(Obj$TP.SP + Obj$FN.SP)
Sensitivity.EF <- Obj$TP.EF/(Obj$TP.EF + Obj$FN.EF)
Specificity.SP <- Obj$TN.SP/(Obj$TN.SP + Obj$FP.SP)
Specificity.EF <- Obj$TN.EF/(Obj$TN.EF + Obj$FP.EF)
p0hat <- with(Obj, cbind(p0hat.EF, p0hat.SP))
RMSE <- with(Obj, cbind(RMSE.EF, RMSE.SP))
Sensitivity <- cbind(Sensitivity.EF, Sensitivity.SP)
FPR <- cbind(1-Specificity.EF, 1-Specificity.SP)
colnames(p0hat) <- colnames(RMSE) <- colnames(Sensitivity) <- colnames(FPR) <- c("Efron", "Proposed")
par(mfrow = c(2, 2))
boxplot(p0hat, main = "Estimates of p0")
boxplot(RMSE, main = "RMSE of local FDR estimates")
boxplot(FPR, main = "False Positive Rate = (1 - Specificity)", ylim = c(0, 0.05))
boxplot(Sensitivity, main = "Sensitivity", ylim = c(0, 1))
}
SimModel2 <- function(M, n, p0, alpha, beta)
# [Univariate] Null: Normal, Nonnull: Gamma
{
library(locfdr)
result <- data.frame(p0hat.SP = rep(NA, M),
RMSE.SP = rep(NA, M),
TP.SP = rep(NA, M),
TN.SP = rep(NA, M),
FP.SP = rep(NA, M),
FN.SP = rep(NA, M),
p0hat.EF = rep(NA, M),
RMSE.EF = rep(NA, M),
TP.EF = rep(NA, M),
TN.EF = rep(NA, M),
FP.EF = rep(NA, M),
FN.EF = rep(NA, M))
for ( r in 1:M ) {
cat(r, "/", M, "\n")
n0 <- rbinom(1, n, p0)
n1 <- n - n0
z0 <- rnorm(n0)
z1 <- rgamma(n1, shape = alpha, rate = (1/beta))
z <- c(z0, z1)
tmp <- p0*dnorm(z)
f <- tmp + (1-p0)*dgamma(z, shape = alpha, rate = (1/beta))
localFDR <- tmp/f
res <- sp.mix.1D(z, doplot = TRUE)
p0hat <- res$p.0
Nhat <- as.integer(res$localfdr <= 0.2)
TP <- sum(Nhat[-(1:n0)] == 1)
TN <- sum(Nhat[1:n0] == 0)
FP <- n0 - TN
FN <- n1 - TP
RMSE <- sqrt(mean((localFDR >= 0.01)*(localFDR <= 0.5)*(res$localfdr - localFDR)^2))
result$p0hat.SP[r] <- p0hat
result$RMSE.SP[r] <- RMSE
result$TP.SP[r] <- TP
result$TN.SP[r] <- TN
result$FP.SP[r] <- FP
result$FN.SP[r] <- FN
res <- locfdr(z)
p0hat <- res$fp0[3, 3]
Nhat <- as.integer(res$fdr <= 0.2)
TP <- sum(Nhat[-(1:n0)] == 1)
TN <- sum(Nhat[1:n0] == 0)
FP <- n0 - TN
FN <- n1 - TP
RMSE <- sqrt(mean((localFDR >= 0.01)*(localFDR <= 0.5)*(res$fdr - localFDR)^2))
result$p0hat.EF[r] <- p0hat
result$RMSE.EF[r] <- RMSE
result$TP.EF[r] <- TP
result$TN.EF[r] <- TN
result$FP.EF[r] <- FP
result$FN.EF[r] <- FN
}
return(result)
}
#source(file = '../SpMix.R')
#Res.4 <- SimModel2(M = 500, p0 = 0.95, n = 1000, alpha = 12, beta = .25)
#Res.5 <- SimModel2(M = 500, p0 = 0.90, n = 1000, alpha = 12, beta = .25)
#Res.6 <- SimModel2(M = 500, p0 = 0.80, n = 1000, alpha = 12, beta = .25)
#save.image(file = "SimUniGamma.RData")
load(file = "SimUniGamma.RData")
#png(file = "UniGammap95.png", height = 600, width = 900)
Draw.boxplots(Res.4)
#dev.off()
#png(file = "UniGammap90.png", height = 600, width = 900)
Draw.boxplots(Res.5)
#dev.off()
#png(file = "UniGammap80.png", height = 600, width = 900)
Draw.boxplots(Res.6)
#dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_content.R
\name{read_content}
\alias{read_content}
\title{Compute nucleotide content per position. Wrapper function around seqTools.}
\usage{
read_content(fseq, output_file = NA)
}
\arguments{
\item{fseq}{a seqTools::fastqq object}
\item{output_file}{File to write results in CSV format to. Will not write to file if NA. Default NA.}
}
\value{
Data frame of nucleotide sequence content per position
}
\description{
Compute nucleotide content per position. Wrapper function around seqTools.
}
\examples{
infile <- system.file("extdata", "10^5_reads_test.fq.gz", package = "qckitfastq")
fseq <- seqTools::fastqq(infile,k=6)
read_content(fseq)
}
| /man/read_content.Rd | no_license | compbiocore/qckitfastq | R | false | true | 727 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_content.R
\name{read_content}
\alias{read_content}
\title{Compute nucleotide content per position. Wrapper function around seqTools.}
\usage{
read_content(fseq, output_file = NA)
}
\arguments{
\item{fseq}{a seqTools::fastqq object}
\item{output_file}{File to write results in CSV format to. Will not write to file if NA. Default NA.}
}
\value{
Data frame of nucleotide sequence content per position
}
\description{
Compute nucleotide content per position. Wrapper function around seqTools.
}
\examples{
infile <- system.file("extdata", "10^5_reads_test.fq.gz", package = "qckitfastq")
fseq <- seqTools::fastqq(infile,k=6)
read_content(fseq)
}
|
#' @include AllClass.R
{}
#' @include AllGeneric.R
{}
#' @include common.R
{}
#' @include SparseBrainVector.R
{}
.BrainVectorFromMatrix <- function(data, space) {
nvols <- dim(space)[4]
nelements <- prod(dim(space)[1:3])
if ( (dim(data)[1] == nvols) && (dim(data)[2] == nelements) ) {
#fourth dimension is rows
DenseBrainVector(t(data), space)
} else if ((dim(data)[2] == nvols) && (dim(data)[1] == nelements )) {
#fourth dimension is columns
DenseBrainVector(data, space=space)
} else {
stop(paste("illegal matrix dimension ", dim(data)))
}
}
#' makeVector
#'
#' Construct a \code{\linkS4class{BrainVector}} instance, using default (dense) implementation
#' @param data a four-dimensional \code{array}
#' @param refdata an instance of class \code{\linkS4class{BrainVector}} or \code{\linkS4class{BrainVolume}} containing the reference space for the new vector.
#' @param label a \code{character} string
#' @param source an instance of class \code{\linkS4class{BrainSource}}
#' @return \code{\linkS4class{DenseBrainVector}} instance
#' @export makeVector
makeVector <- function(data, refdata, source=NULL, label="") {
stopifnot(length(dim(refdata)) == 4)
rspace <- if (ndim(space(refdata)) == 4) {
dropDim(space(refdata))
} else if (ndim(space(refdata)) == 3) {
space(refdata)
} else {
stop("refdata must have 3 or 4 dimensions")
}
DenseBrainVector(data,addDim(rspace, dim(data)[4]),source, label)
}
#' BrainVector
#'
#' constructor function for virtual class \code{\linkS4class{BrainVector}}
#'
#' @param data the image data which can be a \code{matrix}, a 4d \code{array}, or a list of \code{BrainVolumes}.
#' If the latter, the geometric space of the data \code{BrainSpace} will be inferred from the constituent volumes,
#' which must all be identical.
#' @param space a \code{\linkS4class{BrainSpace}} object. Does not ned to be included if \code{data} argument is a list of \code{BrainVolumes}
#' @param mask an optional \code{array} of type \code{logical}
#' @param source an optional \code{\linkS4class{BrainSource}} object
#' @param label a label of type \code{character}
#' @return a concrete instance of \code{\linkS4class{BrainVector}} class.
#' If \code{mask} is provided then \code{\linkS4class{SparseBrainVector}}, otherwise \code{\linkS4class{DenseBrainVector}}
#' @export BrainVector
#' @rdname BrainVector-class
BrainVector <- function(data, space=NULL, mask=NULL, source=NULL, label="") {
if (is.list(data)) {
space <- space(data[[1]])
space <- addDim(space, length(data))
data <- do.call(cbind, lapply(data, function(x) as.vector(x)))
}
if (prod(dim(space)) != length(data)) {
stop("dimensions of data argument do not match dimensions of space argument")
}
if (is.null(mask)) {
DenseBrainVector(data,space, source, label)
} else {
SparseBrainVector(data,space,mask,source, label)
}
}
#' DenseBrainVector
#'
#' constructor function for class \code{\linkS4class{DenseBrainVector}}
#'
#' @param data a 4-dimensional \code{array} or a 2-dimension \code{matrix} that is either nvoxels by ntime-points or ntime-points by nvoxels
#' @param space a \code{\linkS4class{BrainSpace}} object
#' @param source an optional \code{\linkS4class{BrainSource}} object
#' @param label a label of type \code{character}
#' @return \code{\linkS4class{DenseBrainVector}} instance
#' @export DenseBrainVector
#' @rdname DenseBrainVector-class
DenseBrainVector <- function(data, space, source=NULL, label="") {
if (is.matrix(data)) {
splen <- prod(dim(space)[1:3])
data <- if (ncol(data) == splen) {
t(data)
} else if (nrow(data) == splen) {
data
}
if (length(dim(space)) == 3) {
## add 4th dim to space arg
space <- addDim(space, ncol(data))
}
dim(data) <- dim(space)
}
if (is.null(source)) {
meta <- BrainMetaInfo(dim(data), spacing(space), origin(space), "FLOAT", label)
source <- new("BrainSource", metaInfo=meta)
}
new("DenseBrainVector", .Data=data, source=source, space=space)
}
#' loadData
#' @return an instance of class \code{\linkS4class{BrainVector}}
#' @param mmap use memory-mapped file
#' @importFrom RNifti readNifti
#' @rdname loadData-methods
setMethod(f="loadData", signature=c("BrainVectorSource"),
def=function(x, mmap=FALSE) {
meta <- x@metaInfo
#if (mmap && (.Platform$endian != meta@endian)) {
# message("cannot create memory mapped file when image endianness does not equal OS endianess")
# mmap <- FALSE
#}
if (mmap && neuroim:::.isExtension(meta@dataFile, ".gz")) {
warning("cannot memory map to a gzipped file. ")
mmap <- FALSE
}
stopifnot(length(meta@Dim) == 4)
nels <- prod(meta@Dim[1:4])
ind <- x@indices
if (mmap) {
mappedData <- .makeMMap(meta)
arr <- array(mappedData, c(meta@Dim[1:4]))
} else {
## use RNifti
arr <- RNifti::readNifti(meta@dataFile)
#### old R-level File IO
#reader <- dataReader(meta, 0)
#arr <- array(readElements(reader, nels), c(meta@Dim[1:4]))
#close(reader)
}
## bit of a hack to deal with scale factors
if (.hasSlot(meta, "slope")) {
if (meta@slope != 0) {
arr <- arr* meta@slope
}
}
bspace <- BrainSpace(c(meta@Dim[1:3], length(ind)),meta@spacing, meta@origin, meta@spatialAxes, trans(meta))
DenseBrainVector(arr[,,,ind,drop=FALSE], bspace, x)
})
#' BrainVectorSource
#'
#' Construct a \code{\linkS4class{BrainVectorSource}} object
#'
#' @param fileName name of the 4-dimensional image file
#' @param indices the subset of integer volume indices to load -- if \code{NULL} then all volumes will be loaded
#' @param mask image volume indicating the subset of voxels that will be loaded. If provided, function returns \code{\linkS4class{SparseBrainVectorSource}}
#' @return a instance deriving from \code{\linkS4class{BrainVectorSource}}
#'
#' @details If a \code{mask} is supplied then it should be a \code{\linkS4class{LogicalBrainVolume}} or \code{\linkS4class{BrainVolume}} instance. If the latter, then the mask will be defined by nonzero elements of the volume.
#'
#' @rdname BrainVectorSource
#' @importFrom assertthat assert_that
#' @export
BrainVectorSource <- function(fileName, indices=NULL, mask=NULL) {
assert_that(is.character(fileName))
assert_that(file.exists(fileName))
metaInfo <- readHeader(fileName)
if (!is.null(indices) && max(indices) > 1) {
assert_that(length(dim(metaInfo)) == 4)
assert_that(max(indices) <= dim(metaInfo)[4])
assert_that(min(indices) > 0)
}
if (length(metaInfo@Dim) == 2) {
stop(paste("cannot create BrainVector with only two dimensions: ", paste(metaInfo@Dim, collapse=" ")))
}
if ( length(metaInfo@Dim) == 3) {
indices <- 1
metaInfo@Dim <- c(metaInfo@Dim,1)
} else if (length(metaInfo@Dim) == 4 && is.null(indices)) {
indices=seq(1, metaInfo@Dim[4])
}
if (is.null(mask)) {
new("BrainVectorSource", metaInfo=metaInfo, indices=as.integer(indices))
} else {
SparseBrainVectorSource(metaInfo, as.integer(indices), mask)
}
}
#' Get length of \code{BrainVector}. This is the numbe rof volumes in the volume vector (e.g. the 4th image dimension)
#'
#' @export
#' @rdname length-methods
setMethod("length", signature=c("BrainVector"),
def=function(x) {
dim(x)[4]
})
#' loadVolumeList
#'
#' load a list of image volumes and return a \code{\linkS4class{BrainVector}} instance
#'
#' @param fileNames a list of files to load
#' @param mask an optional mask indicating subset of voxels to load
#' @return an instance of class \code{\linkS4class{BrainVector}}
#' @export loadVolumeList
loadVolumeList <- function(fileNames, mask=NULL) {
stopifnot(all(sapply(fileNames, file.exists)))
metaInfo <- lapply(fileNames, readHeader)
dims <- do.call(rbind, lapply(metaInfo, dim))
if (!all(sapply(1:nrow(dims), function(i) all.equal(dims[1,], dims[i,])))) {
stop("list of volumes must all have same dimensions")
}
if (!all(apply(dims, 1, length) == 3)) {
stop("all volumes in list must have dim = 3")
}
nvols <- length(fileNames)
sourceList <- lapply(fileNames, function(fname) {
BrainVolumeSource(fname, 1)
})
vols <- lapply(sourceList, loadData)
if (is.null(mask)) {
mat <- do.call(cbind, vols)
dspace <- addDim(space(vols[[1]]), length(vols))
DenseBrainVector(mat, dspace, label=sapply(metaInfo, function(m) m@label))
} else {
mat <- do.call(cbind, vols)
dspace <- addDim(space(vols[[1]]), length(vols))
if (is.vector(mask)) {
## mask supplied as index vector, convert to logical
M <- array(logical(prod(dim(dspace)[1:3])), dim(dspace)[1:3])
M[mask] <- TRUE
mask <- M
} else {
mask <- as.logical(mask)
}
SparseBrainVector(mat[mask,], dspace, mask=mask, label=sapply(metaInfo, function(m) m@label))
}
}
setAs("DenseBrainVector", "array", function(from) from@.Data)
setAs("BrainVector", "array", function(from) from[,,,])
#' show a \code{BrainVectorSource}
#' @param object the object
#' @export
setMethod(f="show",
signature=signature(object="BrainVectorSource"),
def=function(object) {
cat("an instance of class", class(object), "\n\n")
cat(" indices: ", object@indices, "\n\n")
cat(" metaInfo: \n")
show(object@metaInfo)
cat("\n\n")
})
#' show a \code{BrainVector}
#' @param object the object
#' @export
setMethod(f="show", signature=signature("BrainVector"),
def=function(object) {
sp <- space(object)
cat(class(object), "\n")
cat(" Type :", class(object), "\n")
cat(" Dimension :", dim(object), "\n")
cat(" Spacing :", paste(paste(sp@spacing[1:(length(sp@spacing)-1)], " X ", collapse=" "),
sp@spacing[length(sp@spacing)], "\n"))
cat(" Origin :", paste(paste(sp@origin[1:(length(sp@origin)-1)], " X ", collapse=" "),
sp@origin[length(sp@origin)], "\n"))
cat(" Axes :", paste(sp@axes@i@axis, sp@axes@j@axis,sp@axes@k@axis), "\n")
cat(" Coordinate Transform :", zapsmall(sp@trans), "\n")
}
)
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="missing", mask="missing"),
def=function(x, FUN, withIndex, mask, ...) {
lapply(1:(dim(x)[4]), function(tt) FUN(x[,,,tt], ...))
})
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="missing", mask="BrainVolume"),
def=function(x, FUN, withIndex, mask, ...) {
mask.idx <- which(mask > 0)
lapply(1:(dim(x)[4]), function(tt) {
vals <- x[,,,tt]
FUN(vals[mask.idx], ...)
})
})
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="missing", mask="missing"),
def=function(x, FUN, withIndex, mask, ...) {
lapply(1:(dim(x)[4]), function(tt) {
vals <- x[,,,tt]
FUN(vals, ...)
})
})
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainBucket", FUN="function", withIndex="missing",mask="missing"),
def=function(x, FUN, withIndex, ...) {
lapply(1:(dim(x)[4]), function(tt) FUN(x[[tt]], ...))
})
#' @rdname eachVolume-methods
#' @export
setMethod("eachVolume", signature=signature(x="BrainBucket", FUN="function", withIndex="logical"),
def=function(x, FUN, withIndex, ...) {
lapply(1:(dim(x)[4]), function(tt) {
vol <- x[[tt]]
if (withIndex) FUN(vol,tt,...) else FUN(vol,...)
})
})
#' @rdname eachVolume-methods
#' @export
setMethod("eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="logical"),
def=function(x, FUN, withIndex, ...) {
lapply(1:(dim(x)[4]), function(tt) {
vol <- x[,,,tt]
if (withIndex) FUN(vol,tt,...) else FUN(vol,...)
})
})
#' @rdname subVector-methods
#' @export
setMethod(f="subVector", signature=signature(x="DenseBrainVector", i="numeric"),
def=function(x, i) {
assertthat::assert_that(max(i) <= dim(x)[4])
xs <- space(x)
dat <- x[,,,i]
newdim <- c(dim(x)[1:3], length(i))
bspace <- BrainSpace(newdim, spacing=spacing(xs), origin=origin(xs), axes(xs), trans(xs))
DenseBrainVector(dat, bspace)
})
#' extractor
#' @rdname BrainVector-methods
#' @param i the volume index
#' @export
setMethod(f="[[", signature=signature(x="BrainVector", i="numeric"),
def = function(x, i) {
xs <- space(x)
dat <- x[,,,i]
newdim <- dim(x)[1:3]
bspace <- BrainSpace(newdim, spacing=spacing(xs), origin=origin(xs), axes(xs), trans(xs))
DenseBrainVolume(dat, bspace)
})
#' @rdname takeVolume-methods
#' @param merge concatenate extracted volumes
#' @export
setMethod(f="takeVolume", signature=signature(x="BrainVector", i="numeric"),
def=function(x, i, merge=FALSE) {
## TODO this is VERY slow
## TODO should be renamed "volSlice"
xs <- space(x)
bspace <- BrainSpace(dim(x)[1:3], spacing=spacing(xs), origin=origin(xs), axes(xs), trans(xs))
makevol <- function(i) {
BrainVolume(x[,,,i], bspace)
}
res <- lapply(i, makevol)
if (length(res) > 1 && merge) {
res <- do.call("concat", res)
}
if (length(res) == 1) {
## TODO should be consistent, e.g. always return list
res[[1]]
} else {
res
}
})
#' @rdname eachSeries-methods
# @importFrom purrr array_branch map
#' @export
setMethod(f="eachSeries", signature=signature(x="DenseBrainVector", FUN="function", withIndex="missing"),
def=function(x, FUN, withIndex=FALSE, ...) {
#stop()
#map(array_branch(x, 1:3), FUN)
callGeneric(x,FUN, withIndex,...)
})
#' @rdname eachSeries-methods
#' @export
setMethod(f="eachSeries", signature=signature(x="BrainVector", FUN="function", withIndex="missing"),
def=function(x, FUN, withIndex=FALSE, ...) {
NX <- dim(x)[1]
NY <- dim(x)[2]
NZ <- dim(x)[3]
ret <- vector("list", prod(NX, NY, NZ))
index <- 1
for (i in 1:NZ) {
for (j in 1:NY) {
for (k in 1:NX) {
ret[[index]] <- FUN(x[k,j,i,])
index <- index+1
}
}
}
ret
})
#' loadVector
#'
#' load an image volume from a file
#'
#' @param fileName the name of the file to load
#' @param indices the indices of the sub-volumes to load (e.g. if the file is 4-dimensional)
#' @param mask a mask defining the spatial elements to load
#' @param mmap memory mapping if possible
#' @return an \code{\linkS4class{BrainVector}} object
#' @export
loadVector <- function(fileName, indices=NULL, mask=NULL, mmap=FALSE) {
src <- BrainVectorSource(fileName, indices, mask)
loadData(src,mmap)
}
#' @rdname concat-methods
#' @export
setMethod(f="concat", signature=signature(x="BrainVector", y="BrainVolume"),
def=function(x,y, ...) {
.concat4D(x,y,...)
})
#' @rdname concat-methods
#' @export
setMethod(f="concat", signature=signature(x="BrainVolume", y="BrainVector"),
def=function(x,y, ...) {
.concat4D(x,y,...)
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="logical", scale="logical"),
def=function(x, center, scale) {
M <- as.matrix(x)
Ms <- scale(t(M), center, scale)
BrainVector(Ms, space(x))
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="missing", scale="logical"),
def=function(x, center, scale) {
callGeneric(x, TRUE, scale)
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="missing", scale="missing"),
def=function(x, center, scale) {
callGeneric(x, TRUE, TRUE)
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="logical", scale="missing"),
def=function(x, center, scale) {
callGeneric(x, center, TRUE)
})
#' @export
#' @rdname splitScale-methods
#' @importFrom abind abind
setMethod(f="splitScale", signature=signature(x = "DenseBrainVector", f="factor", center="missing", scale="missing"),
def=function(x, f) {
callGeneric(x, f, TRUE, TRUE)
})
#' @export
#' @rdname splitScale-methods
#' @importFrom abind abind
setMethod(f="splitScale", signature=signature(x = "DenseBrainVector", f="factor", center="logical", scale="missing"),
def=function(x, f, center) {
callGeneric(x, f, center, TRUE)
})
#' @export
#' @rdname splitScale-methods
#' @importFrom abind abind
setMethod(f="splitScale", signature=signature(x = "DenseBrainVector", f="factor", center="logical", scale="logical"),
def=function(x, f, center, scale) {
m <- callGeneric(t(as.matrix(x)), f, center, scale)
BrainVector(m, space(x))
})
#' @rdname concat-methods
#' @export
setMethod(f="concat", signature=signature(x="BrainVector", y="BrainVector"),
def=function(x,y,...) {
.concat4D(x,y,...)
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="matrix"),
def=function(x,i) {
assertthat::assert_that(ncol(i) == 3)
# old, slower method
#apply(i, 1, function(i) x[i[1], i[2], i[3],])
d4 <- dim(x)[4]
expanded <- i[rep(1:nrow(i), each=d4),]
expanded <- cbind(expanded, 1:d4)
vec <- x[expanded]
matrix(vec, d4, nrow(i))
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="matrix"),
def=function(x,i) {
mat <- series(x, i)
ROIVector(space(x), coords=i, data=mat)
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="ROIVolume"),
def=function(x,i) {
grid <- coords(i)
callGeneric(x, grid)
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="ROIVolume"),
def=function(x,i) {
rvol <- series(x, i)
ROIVector(space(x), coords=coords(rvol), data=as.matrix(values(rvol)))
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="LogicalBrainVolume"),
def=function(x,i) {
assertthat::assert_that(all.equal(dim(x)[1:3], dim(i)[1:3]))
idx <- which(i == TRUE)
assertthat::assert_that(length(idx) > 0)
grid <- indexToGrid(i, idx)
callGeneric(x, grid)
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="LogicalBrainVolume"),
def=function(x,i) {
mat <- as.matrix(series(x, i))
ROIVector(space(x), coords=indexToGrid(which(i == TRUE), idx), data=as.matrix(mat))
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="numeric"),
def=function(x, i, j, k) {
if (missing(j) && missing(k)) {
vdim <- dim(x)[1:3]
mat <- arrayInd(i, vdim)
apply(mat, 1, function(i) x[i[1], i[2], i[3],])
} else {
x[i,j,k,]
}
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="numeric"),
def=function(x, i, j, k) {
mat <- if (missing(j) && missing(k)) {
vdim <- dim(x)[1:3]
vox <- arrayInd(i, vdim)
callGeneric(x, vox)
} else if (missing(i) || missing(j) || missing(k)) {
stop("series_roi: must provide either 1D 'i' or 3D ('i', 'k', 'j') vector indices")
}
else {
vox <- cbind(i,j,k)
callGeneric(x, as.matrix(vox))
}
})
#' @describeIn seriesIter get a series iterator for a \code{\linkS4class{BrainVector}} instance
#' @export
setMethod(f="seriesIter", signature=signature(x="BrainVector"),
def=function(x) {
len <- prod(dim(x)[1:3])
vdim <- dim(x)[1:3]
i <- 1
nextEl <- function() {
if (i <= len) {
vox <- .indexToGrid(i, vdim)
i <<- i + 1
x[vox[1], vox[2], vox[3],]
} else {
stop("StopIteration")
}
}
hasNx <- function() {
i <= len
}
obj <- list(nextElem = nextEl, hasNext=hasNx)
class(obj) <- c("seriesIter", "abstractiter", "iter")
obj
})
#' @export
setAs(from="DenseBrainVector", to="matrix",
function(from) {
data <- from@.Data
dm <- dim(data)
d123 <- prod(dm[1:3])
d4 <- dm[4]
dim(data) <- c(d123,d4)
return(data)
})
#' convert a \code{BrainVector} to \code{list} of volumes.
#'
#' @rdname as.list-methods
#' @param x the object
#' @export
setMethod(f="as.list", signature=signature(x = "BrainVector"), def=function(x) {
out = list()
for (i in 1:dim(x)[4]) {
out[[i]] <- takeVolume(x,i)
}
out
})
#' convert a \code{DenseBrainVector} to a matrix
#'
#' @rdname as.matrix-methods
#' @param x the object
#' @export
setMethod(f="as.matrix", signature=signature(x = "DenseBrainVector"), def=function(x) {
as(x, "matrix")
})
#' @rdname as.sparse-methods
#' @export
setMethod(f="as.sparse", signature=signature(x="DenseBrainVector", mask="LogicalBrainVolume"),
def=function(x, mask) {
assert_that(all(dim(x)[1:3] == dim(mask)))
assert_that(all(spacing(space(x)) == spacing(space(mask))))
vdim <- dim(x)[1:3]
dat <- as.matrix(x)[mask == TRUE,]
bvec <- SparseBrainVector(dat, space(x), mask)
})
#' @rdname as.sparse-methods
setMethod(f="as.sparse", signature=signature(x="DenseBrainVector", mask="numeric"),
def=function(x, mask) {
vdim <- dim(x)[1:3]
m <- array(0, vdim)
m[mask] <- TRUE
logivol <- LogicalBrainVolume(m, dropDim(space(x)))
dat <- as.matrix(x)[mask,]
bvec <- SparseBrainVector(dat, space(x), logivol)
})
#' @export
#' @rdname writeVector-methods
setMethod(f="writeVector",signature=signature(x="BrainVector", fileName="character", format="missing", dataType="missing"),
def=function(x, fileName) {
write.nifti.vector(x, fileName)
})
#' @export
#' @rdname writeVector-methods
setMethod(f="writeVector",signature=signature(x="BrainVector", fileName="character", format="character", dataType="missing"),
def=function(x, fileName, format) {
if (toupper(format) == "NIFTI" || toupper(format) == "NIFTI1" || toupper(format) == "NIFTI-1") {
callGeneric(x, fileName)
} else {
stop(paste("sorry, cannot write format: ", format))
}
})
#' @export writeVector
#' @rdname writeVector-methods
#' @aliases writeVector,BrainVector,character,missing,character,ANY-method
setMethod(f="writeVector",signature=signature(x="BrainVector", fileName="character", format="missing", dataType="character"),
def=function(x, fileName, dataType) {
write.nifti.vector(x, fileName, dataType)
})
| /R/BrainVector.R | no_license | bbuchsbaum/neuroim | R | false | false | 23,858 | r | #' @include AllClass.R
{}
#' @include AllGeneric.R
{}
#' @include common.R
{}
#' @include SparseBrainVector.R
{}
.BrainVectorFromMatrix <- function(data, space) {
nvols <- dim(space)[4]
nelements <- prod(dim(space)[1:3])
if ( (dim(data)[1] == nvols) && (dim(data)[2] == nelements) ) {
#fourth dimension is rows
DenseBrainVector(t(data), space)
} else if ((dim(data)[2] == nvols) && (dim(data)[1] == nelements )) {
#fourth dimension is columns
DenseBrainVector(data, space=space)
} else {
stop(paste("illegal matrix dimension ", dim(data)))
}
}
#' makeVector
#'
#' Construct a \code{\linkS4class{BrainVector}} instance, using default (dense) implementation
#' @param data a four-dimensional \code{array}
#' @param refdata an instance of class \code{\linkS4class{BrainVector}} or \code{\linkS4class{BrainVolume}} containing the reference space for the new vector.
#' @param label a \code{character} string
#' @param source an instance of class \code{\linkS4class{BrainSource}}
#' @return \code{\linkS4class{DenseBrainVector}} instance
#' @export makeVector
makeVector <- function(data, refdata, source=NULL, label="") {
stopifnot(length(dim(refdata)) == 4)
rspace <- if (ndim(space(refdata)) == 4) {
dropDim(space(refdata))
} else if (ndim(space(refdata)) == 3) {
space(refdata)
} else {
stop("refdata must have 3 or 4 dimensions")
}
DenseBrainVector(data,addDim(rspace, dim(data)[4]),source, label)
}
#' BrainVector
#'
#' constructor function for virtual class \code{\linkS4class{BrainVector}}
#'
#' @param data the image data which can be a \code{matrix}, a 4d \code{array}, or a list of \code{BrainVolumes}.
#' If the latter, the geometric space of the data \code{BrainSpace} will be inferred from the constituent volumes,
#' which must all be identical.
#' @param space a \code{\linkS4class{BrainSpace}} object. Does not ned to be included if \code{data} argument is a list of \code{BrainVolumes}
#' @param mask an optional \code{array} of type \code{logical}
#' @param source an optional \code{\linkS4class{BrainSource}} object
#' @param label a label of type \code{character}
#' @return a concrete instance of \code{\linkS4class{BrainVector}} class.
#' If \code{mask} is provided then \code{\linkS4class{SparseBrainVector}}, otherwise \code{\linkS4class{DenseBrainVector}}
#' @export BrainVector
#' @rdname BrainVector-class
BrainVector <- function(data, space=NULL, mask=NULL, source=NULL, label="") {
if (is.list(data)) {
space <- space(data[[1]])
space <- addDim(space, length(data))
data <- do.call(cbind, lapply(data, function(x) as.vector(x)))
}
if (prod(dim(space)) != length(data)) {
stop("dimensions of data argument do not match dimensions of space argument")
}
if (is.null(mask)) {
DenseBrainVector(data,space, source, label)
} else {
SparseBrainVector(data,space,mask,source, label)
}
}
#' DenseBrainVector
#'
#' constructor function for class \code{\linkS4class{DenseBrainVector}}
#'
#' @param data a 4-dimensional \code{array} or a 2-dimension \code{matrix} that is either nvoxels by ntime-points or ntime-points by nvoxels
#' @param space a \code{\linkS4class{BrainSpace}} object
#' @param source an optional \code{\linkS4class{BrainSource}} object
#' @param label a label of type \code{character}
#' @return \code{\linkS4class{DenseBrainVector}} instance
#' @export DenseBrainVector
#' @rdname DenseBrainVector-class
DenseBrainVector <- function(data, space, source=NULL, label="") {
if (is.matrix(data)) {
splen <- prod(dim(space)[1:3])
data <- if (ncol(data) == splen) {
t(data)
} else if (nrow(data) == splen) {
data
}
if (length(dim(space)) == 3) {
## add 4th dim to space arg
space <- addDim(space, ncol(data))
}
dim(data) <- dim(space)
}
if (is.null(source)) {
meta <- BrainMetaInfo(dim(data), spacing(space), origin(space), "FLOAT", label)
source <- new("BrainSource", metaInfo=meta)
}
new("DenseBrainVector", .Data=data, source=source, space=space)
}
#' loadData
#' @return an instance of class \code{\linkS4class{BrainVector}}
#' @param mmap use memory-mapped file
#' @importFrom RNifti readNifti
#' @rdname loadData-methods
setMethod(f="loadData", signature=c("BrainVectorSource"),
def=function(x, mmap=FALSE) {
meta <- x@metaInfo
#if (mmap && (.Platform$endian != meta@endian)) {
# message("cannot create memory mapped file when image endianness does not equal OS endianess")
# mmap <- FALSE
#}
if (mmap && neuroim:::.isExtension(meta@dataFile, ".gz")) {
warning("cannot memory map to a gzipped file. ")
mmap <- FALSE
}
stopifnot(length(meta@Dim) == 4)
nels <- prod(meta@Dim[1:4])
ind <- x@indices
if (mmap) {
mappedData <- .makeMMap(meta)
arr <- array(mappedData, c(meta@Dim[1:4]))
} else {
## use RNifti
arr <- RNifti::readNifti(meta@dataFile)
#### old R-level File IO
#reader <- dataReader(meta, 0)
#arr <- array(readElements(reader, nels), c(meta@Dim[1:4]))
#close(reader)
}
## bit of a hack to deal with scale factors
if (.hasSlot(meta, "slope")) {
if (meta@slope != 0) {
arr <- arr* meta@slope
}
}
bspace <- BrainSpace(c(meta@Dim[1:3], length(ind)),meta@spacing, meta@origin, meta@spatialAxes, trans(meta))
DenseBrainVector(arr[,,,ind,drop=FALSE], bspace, x)
})
#' BrainVectorSource
#'
#' Construct a \code{\linkS4class{BrainVectorSource}} object
#'
#' @param fileName name of the 4-dimensional image file
#' @param indices the subset of integer volume indices to load -- if \code{NULL} then all volumes will be loaded
#' @param mask image volume indicating the subset of voxels that will be loaded. If provided, function returns \code{\linkS4class{SparseBrainVectorSource}}
#' @return a instance deriving from \code{\linkS4class{BrainVectorSource}}
#'
#' @details If a \code{mask} is supplied then it should be a \code{\linkS4class{LogicalBrainVolume}} or \code{\linkS4class{BrainVolume}} instance. If the latter, then the mask will be defined by nonzero elements of the volume.
#'
#' @rdname BrainVectorSource
#' @importFrom assertthat assert_that
#' @export
BrainVectorSource <- function(fileName, indices=NULL, mask=NULL) {
assert_that(is.character(fileName))
assert_that(file.exists(fileName))
metaInfo <- readHeader(fileName)
if (!is.null(indices) && max(indices) > 1) {
assert_that(length(dim(metaInfo)) == 4)
assert_that(max(indices) <= dim(metaInfo)[4])
assert_that(min(indices) > 0)
}
if (length(metaInfo@Dim) == 2) {
stop(paste("cannot create BrainVector with only two dimensions: ", paste(metaInfo@Dim, collapse=" ")))
}
if ( length(metaInfo@Dim) == 3) {
indices <- 1
metaInfo@Dim <- c(metaInfo@Dim,1)
} else if (length(metaInfo@Dim) == 4 && is.null(indices)) {
indices=seq(1, metaInfo@Dim[4])
}
if (is.null(mask)) {
new("BrainVectorSource", metaInfo=metaInfo, indices=as.integer(indices))
} else {
SparseBrainVectorSource(metaInfo, as.integer(indices), mask)
}
}
#' Get length of \code{BrainVector}. This is the numbe rof volumes in the volume vector (e.g. the 4th image dimension)
#'
#' @export
#' @rdname length-methods
setMethod("length", signature=c("BrainVector"),
def=function(x) {
dim(x)[4]
})
#' loadVolumeList
#'
#' load a list of image volumes and return a \code{\linkS4class{BrainVector}} instance
#'
#' @param fileNames a list of files to load
#' @param mask an optional mask indicating subset of voxels to load
#' @return an instance of class \code{\linkS4class{BrainVector}}
#' @export loadVolumeList
loadVolumeList <- function(fileNames, mask=NULL) {
stopifnot(all(sapply(fileNames, file.exists)))
metaInfo <- lapply(fileNames, readHeader)
dims <- do.call(rbind, lapply(metaInfo, dim))
if (!all(sapply(1:nrow(dims), function(i) all.equal(dims[1,], dims[i,])))) {
stop("list of volumes must all have same dimensions")
}
if (!all(apply(dims, 1, length) == 3)) {
stop("all volumes in list must have dim = 3")
}
nvols <- length(fileNames)
sourceList <- lapply(fileNames, function(fname) {
BrainVolumeSource(fname, 1)
})
vols <- lapply(sourceList, loadData)
if (is.null(mask)) {
mat <- do.call(cbind, vols)
dspace <- addDim(space(vols[[1]]), length(vols))
DenseBrainVector(mat, dspace, label=sapply(metaInfo, function(m) m@label))
} else {
mat <- do.call(cbind, vols)
dspace <- addDim(space(vols[[1]]), length(vols))
if (is.vector(mask)) {
## mask supplied as index vector, convert to logical
M <- array(logical(prod(dim(dspace)[1:3])), dim(dspace)[1:3])
M[mask] <- TRUE
mask <- M
} else {
mask <- as.logical(mask)
}
SparseBrainVector(mat[mask,], dspace, mask=mask, label=sapply(metaInfo, function(m) m@label))
}
}
setAs("DenseBrainVector", "array", function(from) from@.Data)
setAs("BrainVector", "array", function(from) from[,,,])
#' show a \code{BrainVectorSource}
#' @param object the object
#' @export
setMethod(f="show",
signature=signature(object="BrainVectorSource"),
def=function(object) {
cat("an instance of class", class(object), "\n\n")
cat(" indices: ", object@indices, "\n\n")
cat(" metaInfo: \n")
show(object@metaInfo)
cat("\n\n")
})
#' show a \code{BrainVector}
#' @param object the object
#' @export
setMethod(f="show", signature=signature("BrainVector"),
def=function(object) {
sp <- space(object)
cat(class(object), "\n")
cat(" Type :", class(object), "\n")
cat(" Dimension :", dim(object), "\n")
cat(" Spacing :", paste(paste(sp@spacing[1:(length(sp@spacing)-1)], " X ", collapse=" "),
sp@spacing[length(sp@spacing)], "\n"))
cat(" Origin :", paste(paste(sp@origin[1:(length(sp@origin)-1)], " X ", collapse=" "),
sp@origin[length(sp@origin)], "\n"))
cat(" Axes :", paste(sp@axes@i@axis, sp@axes@j@axis,sp@axes@k@axis), "\n")
cat(" Coordinate Transform :", zapsmall(sp@trans), "\n")
}
)
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="missing", mask="missing"),
def=function(x, FUN, withIndex, mask, ...) {
lapply(1:(dim(x)[4]), function(tt) FUN(x[,,,tt], ...))
})
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="missing", mask="BrainVolume"),
def=function(x, FUN, withIndex, mask, ...) {
mask.idx <- which(mask > 0)
lapply(1:(dim(x)[4]), function(tt) {
vals <- x[,,,tt]
FUN(vals[mask.idx], ...)
})
})
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="missing", mask="missing"),
def=function(x, FUN, withIndex, mask, ...) {
lapply(1:(dim(x)[4]), function(tt) {
vals <- x[,,,tt]
FUN(vals, ...)
})
})
#' @rdname eachVolume-methods
#' @export
setMethod(f="eachVolume", signature=signature(x="BrainBucket", FUN="function", withIndex="missing",mask="missing"),
def=function(x, FUN, withIndex, ...) {
lapply(1:(dim(x)[4]), function(tt) FUN(x[[tt]], ...))
})
#' @rdname eachVolume-methods
#' @export
setMethod("eachVolume", signature=signature(x="BrainBucket", FUN="function", withIndex="logical"),
def=function(x, FUN, withIndex, ...) {
lapply(1:(dim(x)[4]), function(tt) {
vol <- x[[tt]]
if (withIndex) FUN(vol,tt,...) else FUN(vol,...)
})
})
#' @rdname eachVolume-methods
#' @export
setMethod("eachVolume", signature=signature(x="BrainVector", FUN="function", withIndex="logical"),
def=function(x, FUN, withIndex, ...) {
lapply(1:(dim(x)[4]), function(tt) {
vol <- x[,,,tt]
if (withIndex) FUN(vol,tt,...) else FUN(vol,...)
})
})
#' @rdname subVector-methods
#' @export
setMethod(f="subVector", signature=signature(x="DenseBrainVector", i="numeric"),
def=function(x, i) {
assertthat::assert_that(max(i) <= dim(x)[4])
xs <- space(x)
dat <- x[,,,i]
newdim <- c(dim(x)[1:3], length(i))
bspace <- BrainSpace(newdim, spacing=spacing(xs), origin=origin(xs), axes(xs), trans(xs))
DenseBrainVector(dat, bspace)
})
#' extractor
#' @rdname BrainVector-methods
#' @param i the volume index
#' @export
setMethod(f="[[", signature=signature(x="BrainVector", i="numeric"),
def = function(x, i) {
xs <- space(x)
dat <- x[,,,i]
newdim <- dim(x)[1:3]
bspace <- BrainSpace(newdim, spacing=spacing(xs), origin=origin(xs), axes(xs), trans(xs))
DenseBrainVolume(dat, bspace)
})
#' @rdname takeVolume-methods
#' @param merge concatenate extracted volumes
#' @export
setMethod(f="takeVolume", signature=signature(x="BrainVector", i="numeric"),
def=function(x, i, merge=FALSE) {
## TODO this is VERY slow
## TODO should be renamed "volSlice"
xs <- space(x)
bspace <- BrainSpace(dim(x)[1:3], spacing=spacing(xs), origin=origin(xs), axes(xs), trans(xs))
makevol <- function(i) {
BrainVolume(x[,,,i], bspace)
}
res <- lapply(i, makevol)
if (length(res) > 1 && merge) {
res <- do.call("concat", res)
}
if (length(res) == 1) {
## TODO should be consistent, e.g. always return list
res[[1]]
} else {
res
}
})
#' @rdname eachSeries-methods
# @importFrom purrr array_branch map
#' @export
setMethod(f="eachSeries", signature=signature(x="DenseBrainVector", FUN="function", withIndex="missing"),
def=function(x, FUN, withIndex=FALSE, ...) {
#stop()
#map(array_branch(x, 1:3), FUN)
callGeneric(x,FUN, withIndex,...)
})
#' @rdname eachSeries-methods
#' @export
setMethod(f="eachSeries", signature=signature(x="BrainVector", FUN="function", withIndex="missing"),
def=function(x, FUN, withIndex=FALSE, ...) {
NX <- dim(x)[1]
NY <- dim(x)[2]
NZ <- dim(x)[3]
ret <- vector("list", prod(NX, NY, NZ))
index <- 1
for (i in 1:NZ) {
for (j in 1:NY) {
for (k in 1:NX) {
ret[[index]] <- FUN(x[k,j,i,])
index <- index+1
}
}
}
ret
})
#' loadVector
#'
#' load an image volume from a file
#'
#' @param fileName the name of the file to load
#' @param indices the indices of the sub-volumes to load (e.g. if the file is 4-dimensional)
#' @param mask a mask defining the spatial elements to load
#' @param mmap memory mapping if possible
#' @return an \code{\linkS4class{BrainVector}} object
#' @export
loadVector <- function(fileName, indices=NULL, mask=NULL, mmap=FALSE) {
src <- BrainVectorSource(fileName, indices, mask)
loadData(src,mmap)
}
#' @rdname concat-methods
#' @export
setMethod(f="concat", signature=signature(x="BrainVector", y="BrainVolume"),
def=function(x,y, ...) {
.concat4D(x,y,...)
})
#' @rdname concat-methods
#' @export
setMethod(f="concat", signature=signature(x="BrainVolume", y="BrainVector"),
def=function(x,y, ...) {
.concat4D(x,y,...)
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="logical", scale="logical"),
def=function(x, center, scale) {
M <- as.matrix(x)
Ms <- scale(t(M), center, scale)
BrainVector(Ms, space(x))
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="missing", scale="logical"),
def=function(x, center, scale) {
callGeneric(x, TRUE, scale)
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="missing", scale="missing"),
def=function(x, center, scale) {
callGeneric(x, TRUE, TRUE)
})
#' @rdname scaleSeries-methods
#' @export
setMethod(f="scaleSeries", signature=signature(x="BrainVector", center="logical", scale="missing"),
def=function(x, center, scale) {
callGeneric(x, center, TRUE)
})
#' @export
#' @rdname splitScale-methods
#' @importFrom abind abind
setMethod(f="splitScale", signature=signature(x = "DenseBrainVector", f="factor", center="missing", scale="missing"),
def=function(x, f) {
callGeneric(x, f, TRUE, TRUE)
})
#' @export
#' @rdname splitScale-methods
#' @importFrom abind abind
setMethod(f="splitScale", signature=signature(x = "DenseBrainVector", f="factor", center="logical", scale="missing"),
def=function(x, f, center) {
callGeneric(x, f, center, TRUE)
})
#' @export
#' @rdname splitScale-methods
#' @importFrom abind abind
setMethod(f="splitScale", signature=signature(x = "DenseBrainVector", f="factor", center="logical", scale="logical"),
def=function(x, f, center, scale) {
m <- callGeneric(t(as.matrix(x)), f, center, scale)
BrainVector(m, space(x))
})
#' @rdname concat-methods
#' @export
setMethod(f="concat", signature=signature(x="BrainVector", y="BrainVector"),
def=function(x,y,...) {
.concat4D(x,y,...)
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="matrix"),
def=function(x,i) {
assertthat::assert_that(ncol(i) == 3)
# old, slower method
#apply(i, 1, function(i) x[i[1], i[2], i[3],])
d4 <- dim(x)[4]
expanded <- i[rep(1:nrow(i), each=d4),]
expanded <- cbind(expanded, 1:d4)
vec <- x[expanded]
matrix(vec, d4, nrow(i))
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="matrix"),
def=function(x,i) {
mat <- series(x, i)
ROIVector(space(x), coords=i, data=mat)
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="ROIVolume"),
def=function(x,i) {
grid <- coords(i)
callGeneric(x, grid)
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="ROIVolume"),
def=function(x,i) {
rvol <- series(x, i)
ROIVector(space(x), coords=coords(rvol), data=as.matrix(values(rvol)))
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="LogicalBrainVolume"),
def=function(x,i) {
assertthat::assert_that(all.equal(dim(x)[1:3], dim(i)[1:3]))
idx <- which(i == TRUE)
assertthat::assert_that(length(idx) > 0)
grid <- indexToGrid(i, idx)
callGeneric(x, grid)
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="LogicalBrainVolume"),
def=function(x,i) {
mat <- as.matrix(series(x, i))
ROIVector(space(x), coords=indexToGrid(which(i == TRUE), idx), data=as.matrix(mat))
})
#' @rdname series-methods
#' @export
setMethod("series", signature(x="BrainVector", i="numeric"),
def=function(x, i, j, k) {
if (missing(j) && missing(k)) {
vdim <- dim(x)[1:3]
mat <- arrayInd(i, vdim)
apply(mat, 1, function(i) x[i[1], i[2], i[3],])
} else {
x[i,j,k,]
}
})
#' @rdname series-methods
#' @export
setMethod("series_roi", signature(x="BrainVector", i="numeric"),
def=function(x, i, j, k) {
mat <- if (missing(j) && missing(k)) {
vdim <- dim(x)[1:3]
vox <- arrayInd(i, vdim)
callGeneric(x, vox)
} else if (missing(i) || missing(j) || missing(k)) {
stop("series_roi: must provide either 1D 'i' or 3D ('i', 'k', 'j') vector indices")
}
else {
vox <- cbind(i,j,k)
callGeneric(x, as.matrix(vox))
}
})
#' @describeIn seriesIter get a series iterator for a \code{\linkS4class{BrainVector}} instance
#' @export
setMethod(f="seriesIter", signature=signature(x="BrainVector"),
def=function(x) {
len <- prod(dim(x)[1:3])
vdim <- dim(x)[1:3]
i <- 1
nextEl <- function() {
if (i <= len) {
vox <- .indexToGrid(i, vdim)
i <<- i + 1
x[vox[1], vox[2], vox[3],]
} else {
stop("StopIteration")
}
}
hasNx <- function() {
i <= len
}
obj <- list(nextElem = nextEl, hasNext=hasNx)
class(obj) <- c("seriesIter", "abstractiter", "iter")
obj
})
#' @export
setAs(from="DenseBrainVector", to="matrix",
function(from) {
data <- from@.Data
dm <- dim(data)
d123 <- prod(dm[1:3])
d4 <- dm[4]
dim(data) <- c(d123,d4)
return(data)
})
#' convert a \code{BrainVector} to \code{list} of volumes.
#'
#' @rdname as.list-methods
#' @param x the object
#' @export
setMethod(f="as.list", signature=signature(x = "BrainVector"), def=function(x) {
out = list()
for (i in 1:dim(x)[4]) {
out[[i]] <- takeVolume(x,i)
}
out
})
#' convert a \code{DenseBrainVector} to a matrix
#'
#' @rdname as.matrix-methods
#' @param x the object
#' @export
setMethod(f="as.matrix", signature=signature(x = "DenseBrainVector"), def=function(x) {
as(x, "matrix")
})
#' @rdname as.sparse-methods
#' @export
setMethod(f="as.sparse", signature=signature(x="DenseBrainVector", mask="LogicalBrainVolume"),
def=function(x, mask) {
assert_that(all(dim(x)[1:3] == dim(mask)))
assert_that(all(spacing(space(x)) == spacing(space(mask))))
vdim <- dim(x)[1:3]
dat <- as.matrix(x)[mask == TRUE,]
bvec <- SparseBrainVector(dat, space(x), mask)
})
#' @rdname as.sparse-methods
setMethod(f="as.sparse", signature=signature(x="DenseBrainVector", mask="numeric"),
def=function(x, mask) {
vdim <- dim(x)[1:3]
m <- array(0, vdim)
m[mask] <- TRUE
logivol <- LogicalBrainVolume(m, dropDim(space(x)))
dat <- as.matrix(x)[mask,]
bvec <- SparseBrainVector(dat, space(x), logivol)
})
#' @export
#' @rdname writeVector-methods
setMethod(f="writeVector",signature=signature(x="BrainVector", fileName="character", format="missing", dataType="missing"),
def=function(x, fileName) {
write.nifti.vector(x, fileName)
})
#' @export
#' @rdname writeVector-methods
setMethod(f="writeVector",signature=signature(x="BrainVector", fileName="character", format="character", dataType="missing"),
def=function(x, fileName, format) {
if (toupper(format) == "NIFTI" || toupper(format) == "NIFTI1" || toupper(format) == "NIFTI-1") {
callGeneric(x, fileName)
} else {
stop(paste("sorry, cannot write format: ", format))
}
})
#' @export writeVector
#' @rdname writeVector-methods
#' @aliases writeVector,BrainVector,character,missing,character,ANY-method
setMethod(f="writeVector",signature=signature(x="BrainVector", fileName="character", format="missing", dataType="character"),
def=function(x, fileName, dataType) {
write.nifti.vector(x, fileName, dataType)
})
|
#!/usr/bin/Rscript
# This is an example of how to wait for user input
# This does not work in a script (meaning not in Rscript, only in R)
cat("please press a key...\n")
invisible(scan(file = "", what = "", nmax = 1))
| /src/examples/console/scan.R | no_license | veltzer/demos-r | R | false | false | 220 | r | #!/usr/bin/Rscript
# This is an example of how to wait for user input
# This does not work in a script (meaning not in Rscript, only in R)
cat("please press a key...\n")
invisible(scan(file = "", what = "", nmax = 1))
|
| pc = 0xc002 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x05 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc007 | a = 0x05 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0515] = 0x05 |
| pc = 0xc009 | a = 0x03 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00c | a = 0x08 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0515] = 0x05 |
| pc = 0xc00e | a = 0xbf | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc011 | a = 0xbf | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0516] = 0xbf |
| pc = 0xc014 | a = 0x7e | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 01110101 | MEM[0x0516] = 0xbf |
| /res/adc_absx_2.r | permissive | JSpuri/EmuParadise | R | false | false | 788 | r | | pc = 0xc002 | a = 0x05 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc004 | a = 0x05 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc007 | a = 0x05 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0515] = 0x05 |
| pc = 0xc009 | a = 0x03 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 |
| pc = 0xc00c | a = 0x08 | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110100 | MEM[0x0515] = 0x05 |
| pc = 0xc00e | a = 0xbf | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc011 | a = 0xbf | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0516] = 0xbf |
| pc = 0xc014 | a = 0x7e | x = 0x02 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 01110101 | MEM[0x0516] = 0xbf |
|
#sourcing
# for (i in dir("R/")) source(paste0("R/", i))
# library(ggplot2)
# library(crayon)
# stop("Done sourcing.", call. = FALSE)
devtools::load_all(".") #Cmd + Shift + L
#Tests things quickly
#library("cobalt")
data("lalonde", package = "cobalt")
covs <- subset(lalonde, select = -c(re78, treat))
lalonde_ <- splitfactor(lalonde, "race", drop.first = F)
covs_ <- subset(lalonde_, select = -c(re78, treat))
#No Adjustment
bal.tab(covs, treat = lalonde$treat, m.threshold = .1, v.threshold = 2, imbalanced.only = T)
bal.tab(f.build("treat",covs), data = lalonde, ks.threshold = .1)
#MatchIt: matching w/ PS
library("MatchIt")
m1 <- matchit(treat ~ log(age)*married + educ + race + nodegree + re74 + re75,
data = lalonde, replace = T, ratio = 2,
discard = "both", reestimate = TRUE)
bal.tab(m1, int = T, v.threshold = 2, imbalanced.only = F)
#MatchIt: matching w/ distance
m1.1 <- matchit(f.build("treat", covs), data = lalonde, distance = runif(nrow(lalonde)))
bal.tab(m1.1, data = lalonde) #Not needed for MatchIt 4.0.0
#Matching with complicated formula
library(rms)
m1.2 <- matchit(treat ~ log(age)*married + poly(educ,2) + rcs(age,3) + race + factor(nodegree) + re74 + re75,
data = lalonde, replace = T, ratio = 2)
bal.tab(m1.2, addl = ~ rcs(educ,3) + poly(age,2))
#MatchIt: matching w/Mahalanobis
m2 <- matchit(f.build("treat", covs), data = lalonde, distance = "mahalanobis")
bal.tab(m2, data = lalonde, int = T, v.threshold = 2, addl = "race")
#MatchIt: subclassification
m3 <- matchit(f.build("treat", covs), data = lalonde, method = "subclass")
bal.tab(m3, int = F, v.threshold = 2, disp.subclass = T, ks.threshold = .1)
#Matchit: full matching
m4 <- matchit(f.build("treat", covs), data = lalonde, method = "full", distance = "probit", estimand = "ATE")
bal.tab(m4, int = T, ks.threshold = .05)
bal.tab(m4, pairwise = FALSE, un = T)
#Matchit: genetic matching, using matching weights
m5 <- matchit(f.build("treat", covs), data = lalonde, method = "genetic", replace = F,
ratio = 1, pop.size = 50)
bal.tab(m5, method = "m", estimand = "ATT")
#twang
library("twang")
ps.out <- ps(f.build("treat", covs), data = lalonde,
stop.method = c("ks.max", "es.max"),
estimand = "ATT", verbose = FALSE, n.trees = 2000, bag.fraction = .6)
bal.tab(ps.out, thresholds = c(k=.05, m = .05), stats = "k")
sampw <- sample(c(1.25, 0.75), nrow(covs), TRUE, c(.5, .5))
ps.out.s <- ps(f.build("treat", covs), data = lalonde,
stop.method = c("ks.max"),
estimand = "ATT", verbose = FALSE,
sampw = sampw, n.trees = 1000)
bal.tab(ps.out.s, un = T)
bal.tab(covs, lalonde$treat, weights = get.w(ps.out.s), s.weights = sampw,
un = T, distance = ps.out.s$ps)
#CBPS: binary
library("CBPS")
cbps.out <- CBPS(treat ~ log(age)*married + poly(educ,2) + rcs(age,3) + race + factor(nodegree) + re74 + re75, data = lalonde, ATT = F)
bal.tab(cbps.out, disp.bal.tab = T)
cbps.out.e <- CBPS(f.build("treat", covs), data = lalonde, method = "exact", ATT = F)
bal.tab(cbps.out, weights = list("exact" = (cbps.out.e)),
stats = c("m", "v", "k"))
#Matching
library("Matching")
p.score <- glm(f.build("treat", covs),
data = lalonde, family = "binomial")$fitted.values
Match.out <- Match(Tr = lalonde$treat, X = p.score, estimand = "ATE",
M = 1, replace = T, CommonSupport = F)
bal.tab(Match.out, formula = f.build("treat", covs), data = lalonde)
gen <- GenMatch(Tr = lalonde$treat, X = covs_,
M = 1, replace = F, pop.size = 10,
print.level = 0, ties = F)
Gen.Match.out <- Match(Tr=lalonde$treat, X=covs_, Weight.matrix=gen,
M = 2, replace = F, ties = F)
bal.tab(Gen.Match.out, formula = f.build("treat", covs), data = lalonde,
addl = data.frame(age2 = covs$age^2))
gen2 <- GenMatch(Tr = lalonde$treat, X = covs_,
M = 1, replace = F, pop.size = 1000,
print.level = 0, ties = F, BalanceMatrix = cbind(covs_, age2 = covs_$age^2))
Gen.Match.out2 <- Match(Tr=lalonde$treat, X=covs_, Weight.matrix=gen2,
M = 1, replace = F, ties = F)
bal.tab(Gen.Match.out2, formula = f.build("treat", covs), data = lalonde,
addl = data.frame(age2 = covs$age^2))
Matchby.out <- Matchby(Tr = lalonde$treat, X = p.score, by = lalonde$married)
bal.tab(Matchby.out, formula = f.build("treat", covs), data = lalonde,
addl = ~I(age^2) + married*race)
#optmatch
library("optmatch")
ls <- lalonde[sample(1:614, 614, F),]
p.score <- glm(treat ~ age + educ + race +
married + nodegree + re74 + re75,
data = ls, family = binomial)$fitted.values
pm <- pairmatch(treat ~ qlogis(p.score), data = ls)
## Using formula and data (treatment not needed)
bal.tab(pm, ~ age + educ + race +
married + nodegree + re74 + re75, data = ls,
distance = p.score)
#WeightIt
library(WeightIt)
W <- weightit(treat ~ log(age)*married + poly(educ,2) + rms::rcs(age,3) + race + factor(nodegree) + re74 + re75, data = lalonde,
method = "ps", estimand = "ATT")
bal.tab(W)
W.cont <- weightit(f.build("re75", covs[-7]), data = lalonde,
method = "ebal")
bal.tab(W.cont, stats = c("c", "ks"), disp = "means", un = T)
W.mult <- weightit(f.build("race", covs[-3]), data = lalonde,
method = "ps", estimand = "ATE",
focal = "black")
bal.tab(W.mult, disp = c("m", "sds"), which.treat = .all)
#Data frame/formula: weighting
glm1 <- glm(treat ~ age + educ + race, data = lalonde,
family = "binomial")
lalonde$distance <- glm1$fitted.values
lalonde$iptw.weights <- ifelse(lalonde$treat==1,
1/lalonde$distance,
1/(1-lalonde$distance))
bal.tab(f.build("treat", covs), data = lalonde,
weights = "iptw.weights", method = "weighting",
addl = data.frame(age2 = covs$age^2),
distance = "distance")
#Data frame/formula: subclassification
lalonde$subclass <- findInterval(lalonde$distance,
quantile(lalonde$distance, (0:6)/6), all.inside = T)
bal.tab(covs, treat = lalonde$treat, subclass = lalonde$subclass,
disp.subclass = TRUE, addl = ~I(age^2),
m.threshold = .1, v.threshold = 2)
bal.tab(covs, treat = lalonde$re75, subclass = lalonde$subclass,
disp.subclass = TRUE, addl = ~I(age^2))
#Entropy balancing
library("ebal")
e.out <- ebalance(lalonde$treat, cbind(covs_[,-3], age_2 = covs_$age^2, re74_2 = covs_$re74^2/1000,
re75_2 = covs_$re75^2/1000, educ_2 = covs_$educ^2))
bal.tab(e.out, treat = lalonde$treat, covs = covs, disp.ks = T, disp.v.ratio = T)
e.out.trim <- ebalance.trim(e.out)
bal.tab(e.out.trim, treat = lalonde$treat, covs = covs, disp.ks = T, disp.v.ratio = T)
bal.tab(covs, lalonde$treat, weights = list(e = e.out,
e.trim = e.out.trim),
disp.ks = T, disp.v.ratio = T)
#Continuous treatment (CBPS)
cbps.out2 <- CBPS(f.build("re78", covs), data = lalonde, method = "exact")
bal.tab(cbps.out2, stats = c("c", "sp"))
cbps.out2.e <- CBPS(f.build("re78", covs), data = do.call(data.frame, lapply(lalonde, rank)), method = "exact")
bal.tab(cbps.out2, weights = list(CBPS.sp = cbps.out2.e),
stats = c("c", "sp"))
#Clustering with MatchIt
lalonde$school <- sample(LETTERS[1:4], nrow(lalonde), replace = T)
m5 <- matchit(f.build("treat", covs), data = lalonde, exact = "school")
bal.tab(m5, cluster = lalonde$school, disp.v.ratio = T, cluster.summary = T)
bal.tab(m5, cluster = lalonde$school, disp.ks = T, abs = T, cluster.fun = c("max"))
#Clustering w/ continuous treatment
bal.tab(cbps.out2, cluster = lalonde$school, un = T, cluster.fun = "mean")
#Multiple imputation
data("lalonde_mis", package = "cobalt")
covs_mis <- subset(lalonde_mis, select = -c(re78, treat))
lalonde_mis_ <- splitfactor(lalonde_mis, "race")
covs_mis_ <- subset(lalonde_mis_, select = -c(re78, treat))
library("mice")
imp <- mice(lalonde_mis, m = 3)
imp.data <- complete(imp, "long", include = FALSE)
imp.data <- imp.data[with(imp.data, order(.imp, .id)),]
ps <- match.weight <- rep(0, nrow(imp.data))
for (i in unique(imp.data$.imp)) {
in.imp <- imp.data$.imp == i
ps[in.imp] <- glm(f.build("treat", covs_mis), data = imp.data[in.imp,],
family = "binomial")$fitted.values
m.out <- matchit(treat ~ age, data = imp.data[in.imp,], distance = ps[in.imp])
match.weight[in.imp] <- m.out$weights
}
imp.data <- cbind(imp.data, ps = ps, match.weight = match.weight)
bal.tab(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", distance = "ps", which.imp = .all,
disp.ks = T)
bal.tab(f.build("treat", covs_mis), data = imp, weights = match.weight,
method = "matching", distance = ps, which.imp = NULL,
cluster = "race", disp.v = T, abs = T, which.cluster = "black")
#With WeightIt
library(WeightIt)
W.imp <- weightit(f.build("treat", covs_mis), data = imp.data, estimand = "ATT",
by = ".imp", method = "optweight", moments = 2)
bal.tab(W.imp, imp = ".imp", disp.v.ratio = TRUE, abs = F)
#With MatchThem
library(MatchThem)
mt.out <- matchthem(f.build("treat", covs_mis), datasets = imp,
approach = "within")
bal.tab(mt.out)
wt.out <- weightthem(f.build("race", covs_mis[-3]), datasets = imp,
approach = "within", method = "optweight")
bal.tab(wt.out)
#With continuous treatment
imp.data <- complete(imp, "long", include = FALSE)
imp.data <- imp.data[with(imp.data, order(.imp, .id)),]
w <- rep(0, nrow(imp.data))
for (i in unique(imp.data$.imp)) {
in.imp <- imp.data$.imp == i
w[in.imp] <- get.w(CBPS(f.build("re78", covs_mis), data = imp.data[in.imp,],
method = "exact"))
}
imp.data <- cbind(imp.data, cbps.w = w)
bal.tab(f.build("re78", covs_mis), data = imp, weights = w,
which.imp = .all, un = T, thresholds = .1)
bal.tab(f.build("re78", covs_mis), data = imp.data, weights = "cbps.w",
method = "w", imp = ".imp", which.imp = NULL, cluster = "race")
#Missingness indicators
data("lalonde_mis", package = "cobalt")
covs_mis <- subset(lalonde_mis, select = -c(re78, treat))
lalonde_mis_ <- splitfactor(lalonde_mis, "race")
covs_mis_ <- subset(lalonde_mis_, select = -c(re78, treat))
library(twang)
ps.out <- ps(f.build("treat", covs_mis), data = lalonde_mis,
stop.method = c("es.max"),
estimand = "ATE", verbose = FALSE, n.trees = 1000)
bal.tab(ps.out, int = T)
#love.plot
v <- data.frame(old = c("age", "educ", "race_black", "race_hispan",
"race_white", "married", "nodegree", "re74", "re75", "distance"),
new = c("Age", "Years of Education", "Black",
"Hispanic", "White", "Married", "No Degree Earned",
"Earnings 1974", "Earnings 1975", "Propensity Score"))
plot(bal.tab(m1), threshold = .1, var.names = v)
love.plot(m1, stat = c("m", "v"), stars = "std", drop.distance = T)
love.plot(bal.tab(m1, cluster = lalonde$school), stat = "ks", agg.fun = "range", which.cluster = NA)
love.plot(cbps.out2.e, drop.distance = F, line = T, abs = T,
var.order = "u")
love.plot(bal.tab(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", distance = "ps"),
agg.fun = "range")
love.plot(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", distance = "ps",
cluster = "race", which.cluster = 1:3,
agg.fun = "range", stat = c("m", "ks"))
#bal.plot
bal.plot(m1, "age", which = "both")
bal.plot(m1, "race")
bal.plot(cbps.out, "age", mirror = TRUE, type = "h", bins = 20, colors = c("white", "black"))
bal.plot(cbps.out, "race", which = "both")
bal.plot(cbps.out2, "age", which = "both")
bal.plot(cbps.out2, "race", which = "u")
bal.plot(m3, "age", which.sub = 2)
bal.plot(m3, "race", which.sub = 1:3, which = "both")
bal.plot(m5, "age", cluster = lalonde$school, which.cluster = c("A", "B"), which = "both")
bal.plot(m5, "race", cluster = lalonde$school, which.cluster = .none)
bal.plot(cbps.out2, "age", cluster = lalonde$school, which.cluster = c("A", "B"), which = "both")
bal.plot(cbps.out2, "race", cluster = lalonde$school)
bal.plot(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", var.name = "age", which = "b")
bal.plot(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", cluster = "race", which.imp = 1,
var.name = "age", which = "b")
#Other packages
#ATE
library("ATE")
ate.att <- ATE(Y = lalonde_$re78, lalonde_$treat, covs_, ATT = T)
ate.att$weights.q[lalonde$treat == 1] <- 1
bal.tab(covs, lalonde$treat, weights = ate.att$weights.q, method = "w", estimand = "att", disp.v.ratio = T)
ate.ate <- ATE(Y = rep(0, nrow(lalonde)), lalonde_$treat, covs_, ATT = F, theta = 1)
ate.ate$weights <- ate.ate$weights.q + ate.ate$weights.p
bal.tab(covs, lalonde$treat, weights = ate.ate$weights, method = "w", estimand = "ate", disp.v.ratio = T)
#CMatching
library("CMatching")
lalonde$school <- sample(1:2, nrow(lalonde), replace = T)
ls <- lalonde[order(lalonde$school),]
p.score <- glm(treat ~ age + educ + race + married + nodegree + re74 + re75 + factor(school) - 1,
data = ls, family = "binomial")$fitted.values
MW <- MatchPW(Tr = ls$treat, X = p.score,
M = 1, replace = T, Group = ls$school,
caliper = 2, estimand = "ATT")
bal.tab(MW, formula = f.build("treat", covs), data = ls, cluster = "school")
#designmatch
library(designmatch)
ls <- lalonde[order(lalonde$treat, decreasing = T),]
cs <- covs[order(lalonde$treat, decreasing = T),]
dmout <- bmatch(ls$treat,
dist_mat = NULL,
subset_weight = 1,
total_groups = 100,
mom = list(covs = as.matrix(cs[-(3:5)]),
tols = absstddif(as.matrix(cs[-(3:5)]), ls$treat, .0001)),
# ks = list(covs = as.matrix(covs_[-c(3:7)]),
# n_grid = 7,
# tols = rep(.05, 4)),
n_controls = 1,
solver = list(name = "glpk", approximate = 1)
# , total_groups = 185
, fine = list(covs = cs[c(3,4,5)])
)
bal.tab(dmout, covs = cs, treat = ls$treat)
bal.tab(dmout, formula = treat ~ covs, data = ls)
#optweight (default method)
library(optweight)
W <- optweight(f.build("treat", covs), data = lalonde,
estimand = "ATT")
bal.tab(W)
W.cont <- optweight(f.build("re75", covs[-7]), data = lalonde)
bal.tab(W.cont)
W.mult <- optweight(f.build("race", covs[-3]), data = lalonde,
estimand = "ATE",
focal = "black")
bal.tab(W.mult)
#Multinomial
lalonde$treat3 <- factor(ifelse(lalonde$treat == 1, "A", sample(c("B", "C"), nrow(lalonde), T)))
bal.tab(f.build("treat3", covs), data = lalonde, focal = 1, which.treat = 1:3, m.threshold = .1)
mnps3.out <- mnps(f.build("treat3", covs), data = lalonde,
stop.method = c("es.mean", "es.max"),
estimand = "ATE", verbose = FALSE,
n.trees = 200)
bal.tab(mnps3.out, which.treat = 1:3)
bal.plot(mnps3.out, var.name = "age")
mnps3.att <- mnps(f.build("treat3", covs), data = lalonde,
stop.method = c("ks.max"),
estimand = "ATT", verbose = FALSE,
treatATT = "B")
bal.tab(mnps3.att, which.treat = .all)
bal.plot(mnps3.att, var.name = "age")
cbps.out3 <- CBPS(f.build("treat3", covs), data = lalonde)
bal.tab(cbps.out3)
bal.plot(cbps.out3, var.name = "age")
bal.plot(f.build("treat3", covs), data = lalonde, var.name = "age",
weights = list((cbps.out3),
(mnps3.out)),
method = "w", which = "both")
bal.tab(f.build("treat3", covs), data = lalonde,
weights = data.frame(cbps = get.w(cbps.out3),
gbm = get.w(mnps3.out)),
method = "w")
ate3.out <- ATE(rep(0, nrow(lalonde)), as.numeric(lalonde$treat3)-1,
covs_, ATT = TRUE)
ate3.out$weights <- apply(ate3.out$weights.mat, 2, sum)
bal.plot(f.build("treat3", covs), data = lalonde, var.name = "age",
weights = data.frame(ate = ate3.out$weights),
method = "w", which = "both")
bal.tab(f.build("treat3", covs), data = lalonde,
weights = data.frame(ate = ate3.out$weights),
method = "w")
#MSMs
library("twang")
data(iptwExWide, package = "twang")
bal.tab(list(iptwExWide[c("use0", "gender", "age")], iptwExWide[c("use0", "gender", "age", "use1", "tx1")]), treat.list = iptwExWide[c("tx1", "tx2")])
bal.tab(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide)
iptw.Ex <- iptw(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
timeInvariant ~ gender + age,
data = iptwExWide,
cumulative = FALSE,
priorTreatment = FALSE,
verbose = FALSE,
stop.method = "es.max",
n.trees = 2000)
bal.tab(iptw.Ex)
data("iptwExLong")
iptw.l <- iptw(tx ~ gender + age + use, data = iptwExLong$covariates,
timeIndicators = iptwExLong$covariates$time, ID = iptwExLong$covariates$ID,
n.trees = 200, stop.method = "es.max",
verbose = FALSE)
bal.tab(iptw.l)
library("WeightIt")
Wmsm <- weightitMSM(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide,
method = "ps")
bal.tab(Wmsm)
library("CBPS")
data(iptwExLong, package = "twang")
cbps.msm <- CBMSM(tx ~ age + use,
data = iptwExLong$covariates,
id = iptwExLong$covariates$ID,
time = iptwExLong$covariates$time,
time.vary = T, msm.variance = "full")
bal.tab(cbps.msm)
W.msm <- weightitMSM(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide, stabilize = F,
verbose = FALSE,
method = "ps")
bal.tab(W.msm)
#Target checking
library(WeightIt)
w1 <- weightit(lalonde$treat ~ covs, estimand = "ATE")
target.bal.tab(w1, which.treat = NULL)
w2 <- weightit(lalonde$race ~ covs[-3], estimand = "ATE", method = "cbps")
target.bal.tab(w2, which.treat = NULL)
w3 <- weightit(lalonde$re78 ~ covs, estimand = "ATE", method = "cbps", over = FALSE)
target.bal.tab(w3, which.treat = NULL)
target.bal.tab(w3, disp.means = T, int = 2, imbalanced.only = T, v.threshold = .5)
w4 <- weightitMSM(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide,
verbose = FALSE,
method = "ps")
target.bal.tab(w4, which.treat = NULL)
a <- function() {
d <- lalonde
lapply(1:3, function(i) {
lapply(1, function(x) {
b <- do.call(bal.tab, list(f.build("treat",covs), data = d, cluster = "race", which.cluster = NULL), quote = T)
})
})
}
| /do_not_include/tests.R | no_license | ngreifer/cobalt | R | false | false | 19,957 | r | #sourcing
# for (i in dir("R/")) source(paste0("R/", i))
# library(ggplot2)
# library(crayon)
# stop("Done sourcing.", call. = FALSE)
devtools::load_all(".") #Cmd + Shift + L
#Tests things quickly
#library("cobalt")
data("lalonde", package = "cobalt")
covs <- subset(lalonde, select = -c(re78, treat))
lalonde_ <- splitfactor(lalonde, "race", drop.first = F)
covs_ <- subset(lalonde_, select = -c(re78, treat))
#No Adjustment
bal.tab(covs, treat = lalonde$treat, m.threshold = .1, v.threshold = 2, imbalanced.only = T)
bal.tab(f.build("treat",covs), data = lalonde, ks.threshold = .1)
#MatchIt: matching w/ PS
library("MatchIt")
m1 <- matchit(treat ~ log(age)*married + educ + race + nodegree + re74 + re75,
data = lalonde, replace = T, ratio = 2,
discard = "both", reestimate = TRUE)
bal.tab(m1, int = T, v.threshold = 2, imbalanced.only = F)
#MatchIt: matching w/ distance
m1.1 <- matchit(f.build("treat", covs), data = lalonde, distance = runif(nrow(lalonde)))
bal.tab(m1.1, data = lalonde) #Not needed for MatchIt 4.0.0
#Matching with complicated formula
library(rms)
m1.2 <- matchit(treat ~ log(age)*married + poly(educ,2) + rcs(age,3) + race + factor(nodegree) + re74 + re75,
data = lalonde, replace = T, ratio = 2)
bal.tab(m1.2, addl = ~ rcs(educ,3) + poly(age,2))
#MatchIt: matching w/Mahalanobis
m2 <- matchit(f.build("treat", covs), data = lalonde, distance = "mahalanobis")
bal.tab(m2, data = lalonde, int = T, v.threshold = 2, addl = "race")
#MatchIt: subclassification
m3 <- matchit(f.build("treat", covs), data = lalonde, method = "subclass")
bal.tab(m3, int = F, v.threshold = 2, disp.subclass = T, ks.threshold = .1)
#Matchit: full matching
m4 <- matchit(f.build("treat", covs), data = lalonde, method = "full", distance = "probit", estimand = "ATE")
bal.tab(m4, int = T, ks.threshold = .05)
bal.tab(m4, pairwise = FALSE, un = T)
#Matchit: genetic matching, using matching weights
m5 <- matchit(f.build("treat", covs), data = lalonde, method = "genetic", replace = F,
ratio = 1, pop.size = 50)
bal.tab(m5, method = "m", estimand = "ATT")
#twang
library("twang")
ps.out <- ps(f.build("treat", covs), data = lalonde,
stop.method = c("ks.max", "es.max"),
estimand = "ATT", verbose = FALSE, n.trees = 2000, bag.fraction = .6)
bal.tab(ps.out, thresholds = c(k=.05, m = .05), stats = "k")
sampw <- sample(c(1.25, 0.75), nrow(covs), TRUE, c(.5, .5))
ps.out.s <- ps(f.build("treat", covs), data = lalonde,
stop.method = c("ks.max"),
estimand = "ATT", verbose = FALSE,
sampw = sampw, n.trees = 1000)
bal.tab(ps.out.s, un = T)
bal.tab(covs, lalonde$treat, weights = get.w(ps.out.s), s.weights = sampw,
un = T, distance = ps.out.s$ps)
#CBPS: binary
library("CBPS")
cbps.out <- CBPS(treat ~ log(age)*married + poly(educ,2) + rcs(age,3) + race + factor(nodegree) + re74 + re75, data = lalonde, ATT = F)
bal.tab(cbps.out, disp.bal.tab = T)
cbps.out.e <- CBPS(f.build("treat", covs), data = lalonde, method = "exact", ATT = F)
bal.tab(cbps.out, weights = list("exact" = (cbps.out.e)),
stats = c("m", "v", "k"))
#Matching
library("Matching")
p.score <- glm(f.build("treat", covs),
data = lalonde, family = "binomial")$fitted.values
Match.out <- Match(Tr = lalonde$treat, X = p.score, estimand = "ATE",
M = 1, replace = T, CommonSupport = F)
bal.tab(Match.out, formula = f.build("treat", covs), data = lalonde)
gen <- GenMatch(Tr = lalonde$treat, X = covs_,
M = 1, replace = F, pop.size = 10,
print.level = 0, ties = F)
Gen.Match.out <- Match(Tr=lalonde$treat, X=covs_, Weight.matrix=gen,
M = 2, replace = F, ties = F)
bal.tab(Gen.Match.out, formula = f.build("treat", covs), data = lalonde,
addl = data.frame(age2 = covs$age^2))
gen2 <- GenMatch(Tr = lalonde$treat, X = covs_,
M = 1, replace = F, pop.size = 1000,
print.level = 0, ties = F, BalanceMatrix = cbind(covs_, age2 = covs_$age^2))
Gen.Match.out2 <- Match(Tr=lalonde$treat, X=covs_, Weight.matrix=gen2,
M = 1, replace = F, ties = F)
bal.tab(Gen.Match.out2, formula = f.build("treat", covs), data = lalonde,
addl = data.frame(age2 = covs$age^2))
Matchby.out <- Matchby(Tr = lalonde$treat, X = p.score, by = lalonde$married)
bal.tab(Matchby.out, formula = f.build("treat", covs), data = lalonde,
addl = ~I(age^2) + married*race)
#optmatch
library("optmatch")
ls <- lalonde[sample(1:614, 614, F),]
p.score <- glm(treat ~ age + educ + race +
married + nodegree + re74 + re75,
data = ls, family = binomial)$fitted.values
pm <- pairmatch(treat ~ qlogis(p.score), data = ls)
## Using formula and data (treatment not needed)
bal.tab(pm, ~ age + educ + race +
married + nodegree + re74 + re75, data = ls,
distance = p.score)
#WeightIt
library(WeightIt)
W <- weightit(treat ~ log(age)*married + poly(educ,2) + rms::rcs(age,3) + race + factor(nodegree) + re74 + re75, data = lalonde,
method = "ps", estimand = "ATT")
bal.tab(W)
W.cont <- weightit(f.build("re75", covs[-7]), data = lalonde,
method = "ebal")
bal.tab(W.cont, stats = c("c", "ks"), disp = "means", un = T)
W.mult <- weightit(f.build("race", covs[-3]), data = lalonde,
method = "ps", estimand = "ATE",
focal = "black")
bal.tab(W.mult, disp = c("m", "sds"), which.treat = .all)
#Data frame/formula: weighting
glm1 <- glm(treat ~ age + educ + race, data = lalonde,
family = "binomial")
lalonde$distance <- glm1$fitted.values
lalonde$iptw.weights <- ifelse(lalonde$treat==1,
1/lalonde$distance,
1/(1-lalonde$distance))
bal.tab(f.build("treat", covs), data = lalonde,
weights = "iptw.weights", method = "weighting",
addl = data.frame(age2 = covs$age^2),
distance = "distance")
#Data frame/formula: subclassification
lalonde$subclass <- findInterval(lalonde$distance,
quantile(lalonde$distance, (0:6)/6), all.inside = T)
bal.tab(covs, treat = lalonde$treat, subclass = lalonde$subclass,
disp.subclass = TRUE, addl = ~I(age^2),
m.threshold = .1, v.threshold = 2)
bal.tab(covs, treat = lalonde$re75, subclass = lalonde$subclass,
disp.subclass = TRUE, addl = ~I(age^2))
#Entropy balancing
library("ebal")
e.out <- ebalance(lalonde$treat, cbind(covs_[,-3], age_2 = covs_$age^2, re74_2 = covs_$re74^2/1000,
re75_2 = covs_$re75^2/1000, educ_2 = covs_$educ^2))
bal.tab(e.out, treat = lalonde$treat, covs = covs, disp.ks = T, disp.v.ratio = T)
e.out.trim <- ebalance.trim(e.out)
bal.tab(e.out.trim, treat = lalonde$treat, covs = covs, disp.ks = T, disp.v.ratio = T)
bal.tab(covs, lalonde$treat, weights = list(e = e.out,
e.trim = e.out.trim),
disp.ks = T, disp.v.ratio = T)
#Continuous treatment (CBPS)
cbps.out2 <- CBPS(f.build("re78", covs), data = lalonde, method = "exact")
bal.tab(cbps.out2, stats = c("c", "sp"))
cbps.out2.e <- CBPS(f.build("re78", covs), data = do.call(data.frame, lapply(lalonde, rank)), method = "exact")
bal.tab(cbps.out2, weights = list(CBPS.sp = cbps.out2.e),
stats = c("c", "sp"))
#Clustering with MatchIt
lalonde$school <- sample(LETTERS[1:4], nrow(lalonde), replace = T)
m5 <- matchit(f.build("treat", covs), data = lalonde, exact = "school")
bal.tab(m5, cluster = lalonde$school, disp.v.ratio = T, cluster.summary = T)
bal.tab(m5, cluster = lalonde$school, disp.ks = T, abs = T, cluster.fun = c("max"))
#Clustering w/ continuous treatment
bal.tab(cbps.out2, cluster = lalonde$school, un = T, cluster.fun = "mean")
#Multiple imputation
data("lalonde_mis", package = "cobalt")
covs_mis <- subset(lalonde_mis, select = -c(re78, treat))
lalonde_mis_ <- splitfactor(lalonde_mis, "race")
covs_mis_ <- subset(lalonde_mis_, select = -c(re78, treat))
library("mice")
imp <- mice(lalonde_mis, m = 3)
imp.data <- complete(imp, "long", include = FALSE)
imp.data <- imp.data[with(imp.data, order(.imp, .id)),]
ps <- match.weight <- rep(0, nrow(imp.data))
for (i in unique(imp.data$.imp)) {
in.imp <- imp.data$.imp == i
ps[in.imp] <- glm(f.build("treat", covs_mis), data = imp.data[in.imp,],
family = "binomial")$fitted.values
m.out <- matchit(treat ~ age, data = imp.data[in.imp,], distance = ps[in.imp])
match.weight[in.imp] <- m.out$weights
}
imp.data <- cbind(imp.data, ps = ps, match.weight = match.weight)
bal.tab(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", distance = "ps", which.imp = .all,
disp.ks = T)
bal.tab(f.build("treat", covs_mis), data = imp, weights = match.weight,
method = "matching", distance = ps, which.imp = NULL,
cluster = "race", disp.v = T, abs = T, which.cluster = "black")
#With WeightIt
library(WeightIt)
W.imp <- weightit(f.build("treat", covs_mis), data = imp.data, estimand = "ATT",
by = ".imp", method = "optweight", moments = 2)
bal.tab(W.imp, imp = ".imp", disp.v.ratio = TRUE, abs = F)
#With MatchThem
library(MatchThem)
mt.out <- matchthem(f.build("treat", covs_mis), datasets = imp,
approach = "within")
bal.tab(mt.out)
wt.out <- weightthem(f.build("race", covs_mis[-3]), datasets = imp,
approach = "within", method = "optweight")
bal.tab(wt.out)
#With continuous treatment
imp.data <- complete(imp, "long", include = FALSE)
imp.data <- imp.data[with(imp.data, order(.imp, .id)),]
w <- rep(0, nrow(imp.data))
for (i in unique(imp.data$.imp)) {
in.imp <- imp.data$.imp == i
w[in.imp] <- get.w(CBPS(f.build("re78", covs_mis), data = imp.data[in.imp,],
method = "exact"))
}
imp.data <- cbind(imp.data, cbps.w = w)
bal.tab(f.build("re78", covs_mis), data = imp, weights = w,
which.imp = .all, un = T, thresholds = .1)
bal.tab(f.build("re78", covs_mis), data = imp.data, weights = "cbps.w",
method = "w", imp = ".imp", which.imp = NULL, cluster = "race")
#Missingness indicators
data("lalonde_mis", package = "cobalt")
covs_mis <- subset(lalonde_mis, select = -c(re78, treat))
lalonde_mis_ <- splitfactor(lalonde_mis, "race")
covs_mis_ <- subset(lalonde_mis_, select = -c(re78, treat))
library(twang)
ps.out <- ps(f.build("treat", covs_mis), data = lalonde_mis,
stop.method = c("es.max"),
estimand = "ATE", verbose = FALSE, n.trees = 1000)
bal.tab(ps.out, int = T)
#love.plot
v <- data.frame(old = c("age", "educ", "race_black", "race_hispan",
"race_white", "married", "nodegree", "re74", "re75", "distance"),
new = c("Age", "Years of Education", "Black",
"Hispanic", "White", "Married", "No Degree Earned",
"Earnings 1974", "Earnings 1975", "Propensity Score"))
plot(bal.tab(m1), threshold = .1, var.names = v)
love.plot(m1, stat = c("m", "v"), stars = "std", drop.distance = T)
love.plot(bal.tab(m1, cluster = lalonde$school), stat = "ks", agg.fun = "range", which.cluster = NA)
love.plot(cbps.out2.e, drop.distance = F, line = T, abs = T,
var.order = "u")
love.plot(bal.tab(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", distance = "ps"),
agg.fun = "range")
love.plot(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", distance = "ps",
cluster = "race", which.cluster = 1:3,
agg.fun = "range", stat = c("m", "ks"))
#bal.plot
bal.plot(m1, "age", which = "both")
bal.plot(m1, "race")
bal.plot(cbps.out, "age", mirror = TRUE, type = "h", bins = 20, colors = c("white", "black"))
bal.plot(cbps.out, "race", which = "both")
bal.plot(cbps.out2, "age", which = "both")
bal.plot(cbps.out2, "race", which = "u")
bal.plot(m3, "age", which.sub = 2)
bal.plot(m3, "race", which.sub = 1:3, which = "both")
bal.plot(m5, "age", cluster = lalonde$school, which.cluster = c("A", "B"), which = "both")
bal.plot(m5, "race", cluster = lalonde$school, which.cluster = .none)
bal.plot(cbps.out2, "age", cluster = lalonde$school, which.cluster = c("A", "B"), which = "both")
bal.plot(cbps.out2, "race", cluster = lalonde$school)
bal.plot(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", var.name = "age", which = "b")
bal.plot(f.build("treat", covs_mis), data = imp.data, weights = "match.weight",
method = "matching", imp = ".imp", cluster = "race", which.imp = 1,
var.name = "age", which = "b")
#Other packages
#ATE
library("ATE")
ate.att <- ATE(Y = lalonde_$re78, lalonde_$treat, covs_, ATT = T)
ate.att$weights.q[lalonde$treat == 1] <- 1
bal.tab(covs, lalonde$treat, weights = ate.att$weights.q, method = "w", estimand = "att", disp.v.ratio = T)
ate.ate <- ATE(Y = rep(0, nrow(lalonde)), lalonde_$treat, covs_, ATT = F, theta = 1)
ate.ate$weights <- ate.ate$weights.q + ate.ate$weights.p
bal.tab(covs, lalonde$treat, weights = ate.ate$weights, method = "w", estimand = "ate", disp.v.ratio = T)
#CMatching
library("CMatching")
lalonde$school <- sample(1:2, nrow(lalonde), replace = T)
ls <- lalonde[order(lalonde$school),]
p.score <- glm(treat ~ age + educ + race + married + nodegree + re74 + re75 + factor(school) - 1,
data = ls, family = "binomial")$fitted.values
MW <- MatchPW(Tr = ls$treat, X = p.score,
M = 1, replace = T, Group = ls$school,
caliper = 2, estimand = "ATT")
bal.tab(MW, formula = f.build("treat", covs), data = ls, cluster = "school")
#designmatch
library(designmatch)
ls <- lalonde[order(lalonde$treat, decreasing = T),]
cs <- covs[order(lalonde$treat, decreasing = T),]
dmout <- bmatch(ls$treat,
dist_mat = NULL,
subset_weight = 1,
total_groups = 100,
mom = list(covs = as.matrix(cs[-(3:5)]),
tols = absstddif(as.matrix(cs[-(3:5)]), ls$treat, .0001)),
# ks = list(covs = as.matrix(covs_[-c(3:7)]),
# n_grid = 7,
# tols = rep(.05, 4)),
n_controls = 1,
solver = list(name = "glpk", approximate = 1)
# , total_groups = 185
, fine = list(covs = cs[c(3,4,5)])
)
bal.tab(dmout, covs = cs, treat = ls$treat)
bal.tab(dmout, formula = treat ~ covs, data = ls)
#optweight (default method)
library(optweight)
W <- optweight(f.build("treat", covs), data = lalonde,
estimand = "ATT")
bal.tab(W)
W.cont <- optweight(f.build("re75", covs[-7]), data = lalonde)
bal.tab(W.cont)
W.mult <- optweight(f.build("race", covs[-3]), data = lalonde,
estimand = "ATE",
focal = "black")
bal.tab(W.mult)
#Multinomial
lalonde$treat3 <- factor(ifelse(lalonde$treat == 1, "A", sample(c("B", "C"), nrow(lalonde), T)))
bal.tab(f.build("treat3", covs), data = lalonde, focal = 1, which.treat = 1:3, m.threshold = .1)
mnps3.out <- mnps(f.build("treat3", covs), data = lalonde,
stop.method = c("es.mean", "es.max"),
estimand = "ATE", verbose = FALSE,
n.trees = 200)
bal.tab(mnps3.out, which.treat = 1:3)
bal.plot(mnps3.out, var.name = "age")
mnps3.att <- mnps(f.build("treat3", covs), data = lalonde,
stop.method = c("ks.max"),
estimand = "ATT", verbose = FALSE,
treatATT = "B")
bal.tab(mnps3.att, which.treat = .all)
bal.plot(mnps3.att, var.name = "age")
cbps.out3 <- CBPS(f.build("treat3", covs), data = lalonde)
bal.tab(cbps.out3)
bal.plot(cbps.out3, var.name = "age")
bal.plot(f.build("treat3", covs), data = lalonde, var.name = "age",
weights = list((cbps.out3),
(mnps3.out)),
method = "w", which = "both")
bal.tab(f.build("treat3", covs), data = lalonde,
weights = data.frame(cbps = get.w(cbps.out3),
gbm = get.w(mnps3.out)),
method = "w")
ate3.out <- ATE(rep(0, nrow(lalonde)), as.numeric(lalonde$treat3)-1,
covs_, ATT = TRUE)
ate3.out$weights <- apply(ate3.out$weights.mat, 2, sum)
bal.plot(f.build("treat3", covs), data = lalonde, var.name = "age",
weights = data.frame(ate = ate3.out$weights),
method = "w", which = "both")
bal.tab(f.build("treat3", covs), data = lalonde,
weights = data.frame(ate = ate3.out$weights),
method = "w")
#MSMs
library("twang")
data(iptwExWide, package = "twang")
bal.tab(list(iptwExWide[c("use0", "gender", "age")], iptwExWide[c("use0", "gender", "age", "use1", "tx1")]), treat.list = iptwExWide[c("tx1", "tx2")])
bal.tab(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide)
iptw.Ex <- iptw(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
timeInvariant ~ gender + age,
data = iptwExWide,
cumulative = FALSE,
priorTreatment = FALSE,
verbose = FALSE,
stop.method = "es.max",
n.trees = 2000)
bal.tab(iptw.Ex)
data("iptwExLong")
iptw.l <- iptw(tx ~ gender + age + use, data = iptwExLong$covariates,
timeIndicators = iptwExLong$covariates$time, ID = iptwExLong$covariates$ID,
n.trees = 200, stop.method = "es.max",
verbose = FALSE)
bal.tab(iptw.l)
library("WeightIt")
Wmsm <- weightitMSM(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide,
method = "ps")
bal.tab(Wmsm)
library("CBPS")
data(iptwExLong, package = "twang")
cbps.msm <- CBMSM(tx ~ age + use,
data = iptwExLong$covariates,
id = iptwExLong$covariates$ID,
time = iptwExLong$covariates$time,
time.vary = T, msm.variance = "full")
bal.tab(cbps.msm)
W.msm <- weightitMSM(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide, stabilize = F,
verbose = FALSE,
method = "ps")
bal.tab(W.msm)
#Target checking
library(WeightIt)
w1 <- weightit(lalonde$treat ~ covs, estimand = "ATE")
target.bal.tab(w1, which.treat = NULL)
w2 <- weightit(lalonde$race ~ covs[-3], estimand = "ATE", method = "cbps")
target.bal.tab(w2, which.treat = NULL)
w3 <- weightit(lalonde$re78 ~ covs, estimand = "ATE", method = "cbps", over = FALSE)
target.bal.tab(w3, which.treat = NULL)
target.bal.tab(w3, disp.means = T, int = 2, imbalanced.only = T, v.threshold = .5)
w4 <- weightitMSM(list(tx1 ~ use0 + gender + age,
tx2 ~ use1 + use0 + tx1 + gender + age,
tx3 ~ use2 + use1 + use0 + tx2 + tx1 + gender + age),
data = iptwExWide,
verbose = FALSE,
method = "ps")
target.bal.tab(w4, which.treat = NULL)
a <- function() {
d <- lalonde
lapply(1:3, function(i) {
lapply(1, function(x) {
b <- do.call(bal.tab, list(f.build("treat",covs), data = d, cluster = "race", which.cluster = NULL), quote = T)
})
})
}
|
# path to the file
dataFile <- "./data/household_power_consumption.txt"
# load data from the file
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# take only two days data
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# for global active power
globalActivePower <- as.numeric(subSetData$Global_active_power)
# for sub_metering_1
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
# for sub_metering_2
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
# for sub_metering_3
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
# store output in png format
png("plot3.png", width=480, height=480)
# Generate plot
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
# for legends in top right corner
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
# close the connection
dev.off() | /plot3.R | no_license | bradd123/ExData_Plotting1 | R | false | false | 1,141 | r | # path to the file
dataFile <- "./data/household_power_consumption.txt"
# load data from the file
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# take only two days data
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# for global active power
globalActivePower <- as.numeric(subSetData$Global_active_power)
# for sub_metering_1
subMetering1 <- as.numeric(subSetData$Sub_metering_1)
# for sub_metering_2
subMetering2 <- as.numeric(subSetData$Sub_metering_2)
# for sub_metering_3
subMetering3 <- as.numeric(subSetData$Sub_metering_3)
# store output in png format
png("plot3.png", width=480, height=480)
# Generate plot
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
# for legends in top right corner
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
# close the connection
dev.off() |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/modeling.R
\name{DR.Okunieff}
\alias{DR.Okunieff}
\title{Function that calculates TCP according Okunieff model}
\usage{
DR.Okunieff(doses, TD50 = 45, gamma50 = 1.5, a = 1)
}
\arguments{
\item{doses}{Either a \code{dvhmatrix} class object or a vector with nominal doses}
\item{TD50}{The value of dose that gives the 50\% of probability of outcome}
\item{gamma50}{The slope of dose/response curve at 50\% of probability}
\item{a}{Value for parallel-serial correlation in radiobiological response}
}
\value{
A vector with TCP calculated according Munro/Gilbert/Kallman model.
}
\description{
This function calculates the Tumor Control Probability according the
Okunieff model.
}
\details{
This model is the equivalent of the \emph{logistic} generalized linear model where the covariates and their coefficients
have been reported as function of \eqn{TD_{50}} and \eqn{\gamma_{50}}. The original Okunieff formula is the following:
\deqn{TCP=\frac{e^{\frac{D-TD_{50}}{k}}}{1+e^{\frac{D-TD_{50}}{k}}}}
where \eqn{k=\gamma_{50}/(4*TD_{50})} and so giving the final model as direct function of \eqn{TD_{50}} and \eqn{\gamma_{50}}:
\deqn{TCP=\frac{1}{1+e^{4\gamma_{50}(1-\frac{D}{TD_{50}})}}}
In the model equation \eqn{D} can be either the nominal dose or the \eqn{EUD} as calculated by \code{\link{DVH.eud}} function.
}
\references{
Okunieff P, Morgan D, Niemierko A, Suit HD. \emph{Radiation dose-response of human tumors}. Int J Radiat Oncol Biol Phys. 1995 Jul 15;32(4):1227-37. PubMed PMID: 7607946.
}
| /man/DR.Okunieff.Rd | no_license | cat2tom/moddicom | R | false | false | 1,588 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/modeling.R
\name{DR.Okunieff}
\alias{DR.Okunieff}
\title{Function that calculates TCP according Okunieff model}
\usage{
DR.Okunieff(doses, TD50 = 45, gamma50 = 1.5, a = 1)
}
\arguments{
\item{doses}{Either a \code{dvhmatrix} class object or a vector with nominal doses}
\item{TD50}{The value of dose that gives the 50\% of probability of outcome}
\item{gamma50}{The slope of dose/response curve at 50\% of probability}
\item{a}{Value for parallel-serial correlation in radiobiological response}
}
\value{
A vector with TCP calculated according Munro/Gilbert/Kallman model.
}
\description{
This function calculates the Tumor Control Probability according the
Okunieff model.
}
\details{
This model is the equivalent of the \emph{logistic} generalized linear model where the covariates and their coefficients
have been reported as function of \eqn{TD_{50}} and \eqn{\gamma_{50}}. The original Okunieff formula is the following:
\deqn{TCP=\frac{e^{\frac{D-TD_{50}}{k}}}{1+e^{\frac{D-TD_{50}}{k}}}}
where \eqn{k=\gamma_{50}/(4*TD_{50})} and so giving the final model as direct function of \eqn{TD_{50}} and \eqn{\gamma_{50}}:
\deqn{TCP=\frac{1}{1+e^{4\gamma_{50}(1-\frac{D}{TD_{50}})}}}
In the model equation \eqn{D} can be either the nominal dose or the \eqn{EUD} as calculated by \code{\link{DVH.eud}} function.
}
\references{
Okunieff P, Morgan D, Niemierko A, Suit HD. \emph{Radiation dose-response of human tumors}. Int J Radiat Oncol Biol Phys. 1995 Jul 15;32(4):1227-37. PubMed PMID: 7607946.
}
|
/r4ds_ch5.R | no_license | yan9914/R4DS | R | false | false | 4,322 | r | ||
# Define required packages and install new packages
dm.required.packages <-
c("lubridate",
"dplyr",
"ggplot2",
"scales",
"rpart", "rpart.plot", "party", "RColorBrewer")
package.install.func <- function(x) {
for (i in x) {
# require returns TRUE invisibly if it was able to load package
if (!require(i , character.only = TRUE)) {
# If package was not able to be loaded then re-install
install.packages(i , dependencies = TRUE)
# Load package after installing
require(i , character.only = TRUE)
}
}
}
package.install.func(dm.required.packages)
# Set working directory and import raw data
setwd("~/Desktop/kdd-cricket")
# Source
source("PREPROCESSING.R")
##### Summarising Data - Data Exploration #####
cricket <- read.csv("3-transformed-data/cric_transformed_odi.csv")
str(cricket)
# set factor variables and level orders
cricket$day_night <- as.factor(cricket$day_night)
cricket$rain <- as.factor(cricket$rain)
cricket$duckworth_lewis <- as.factor(cricket$duckworth_lewis)
cricket$series <-
factor(raw.data.sub$series, c("low", "medium", "high"))
library(lubridate)
cricket$date <- ymd(as.character(cricket$date))
# see what data looks like
dim(cricket)
names(cricket)
str(cricket)
summary(cricket)
# avoid printing whole data frame
class(cricket)
library(dplyr)
cricket <- tbl_df(cricket)
class(cricket)
cricket
# review and observations on the data set
head(cricket)
tail(cricket)
str(cricket)
# check the summary of the data
summary(cricket)
# men vs women odi matches
boxplot(first_innings_total ~ gender,
data = cricket,
names = c("Female", "Male"))
# Proportion of tosses won in home country
# Proportion of tosses won when playing away from home
homet <-
with(cricket[cricket$team_a_venue == "home", ], xtabs(~ team_a_venue + toss_decision))
homet / rowSums(homet) # 53% bat, 47% bowl
binom.test(1195, 2258, p = .5)
awayt <-
with(cricket[cricket$team_a_venue == "away", ], xtabs(~ team_a_venue + toss_decision))
awayt / rowSums(awayt) # 55% bat, 45% bowl
# Proportion of toss decision when played home and D/N match
homet.dn <-
with(cricket[cricket$team_a_venue == "home" &
cricket$day_night == 1, ], xtabs(~ team_a_venue + toss_decision))
homet.dn / rowSums(homet.dn) # 72% bat, 28% bowl
awayt.dn <-
with(cricket[cricket$team_a_venue == "away" &
cricket$day_night == 0, ], xtabs(~ team_a_venue + toss_decision))
awayt.dn / rowSums(awayt.dn) # 47% bat, 53% bowl
# Proportion of toss decision when played home and rain
homet.r <-
with(cricket[cricket$team_a_venue == "home" &
cricket$rain == 1, ], xtabs(~ team_a_venue + toss_decision))
homet.r / rowSums(homet.r) # 43% bat, 57% bowl
awayt.r <-
with(cricket[cricket$team_a_venue == "away" &
cricket$rain == 0, ], xtabs(~ team_a_venue + toss_decision))
awayt.r / rowSums(awayt.r) # 55% bat, 45% bowl
# Proportion of toss decision when played home, day night and rain
homet.dn.r <-
with(cricket[cricket$team_a_venue == "home" &
cricket$day_night == 1 &
cricket$rain == 1, ], xtabs(~ team_a_venue + toss_decision))
homet.dn.r / rowSums(homet.dn.r) # 52% bat, 48% bowl
awayt.dn.r <-
with(cricket[cricket$team_a_venue == "away" &
cricket$day_night == 0 &
cricket$rain == 0, ], xtabs(~ team_a_venue + toss_decision))
awayt.dn.r / rowSums(awayt.dn.r) # 47% bat, 53% bowl
# Win toss win game
cricket$teama_win_toss <-
1 * (cricket$team_a == cricket$toss_winner)
cricket$teamb_win_toss <-
1 * (cricket$team_b == cricket$toss_winner)
cricket$teama_win_game <- 1 * (cricket$team_a == cricket$winner)
cricket$teamb_win_game <- 1 * (cricket$team_b == cricket$winner)
(cricket$win_toss_win_game <-
((cricket$teama_win_toss &
cricket$teama_win_game == 1) |
(cricket$teama_win_toss & cricket$teamb_win_game == 1)
))
library(dplyr)
cricket.toss <-
select(.data = cricket, team_a, team_b, winner, toss_winner, win_toss_win_game)
# proportion of matches won
prop.table(table(cricket.toss$win_toss_win_game)) # 49% do not win, 51% wins
# proportion of matches won by gender
prop.table(table(cricket$gender, cricket.toss$win_toss_win_game), 1)
# proportion of matches won by countries
prop <-
prop.table(table(cricket.toss$winner,
cricket.toss$win_toss_win_game),
1) * 100
prop
barplot(
t(as.matrix(prop)),
width = 2,
beside = T,
space = c(0, 2),
ylim = c(0, 60),
main = "Win vs Lost % on winning the toss",
ylab = "% won",
col = c("black", "white")
)
legend(
"bottom",
legend = c("lost", "won"),
fill = c("black", "white"),
ncol = 2,
cex = 0.75
)
# Home match better probablity of winniing
library(dplyr)
sel_cricket <-
select(.data = cricket, team_a, team_b, winner, toss_winner, team_a_venue)
# sel_cricket$team_a <- as.character(sel_cricket$team_a)
# sel_cricket$team_b <- as.character(sel_cricket$team_b)
# sel_cricket$winner <- as.character(sel_cricket$winner)
# sel_cricket$toss_winner <- as.character(sel_cricket$toss_winner)
# sel_cricket$team_a_venue <- as.character(sel_cricket$team_a_venue)
str(sel_cricket)
homewin <-
(
sel_cricket %>%
filter(team_a_venue == "home") %>%
group_by(team_a, team_b) %>%
summarise(won = sum(team_a == winner), total = length(winner)) %>%
mutate(percent = (won / total) * 100) %>% arrange(team_a, team_b)
)
awaywin <-
(
sel_cricket %>%
filter(team_a_venue == "away") %>%
group_by(team_a, team_b) %>%
summarise(won = sum(team_a == winner), total = length(winner)) %>%
mutate(percent = (won / total) * 100) %>% arrange(team_a, team_b)
)
splitted.data.home <- split(homewin, homewin$team_a)
splitted.data.away <- split(awaywin, awaywin$team_a)
fill <- c("#4f90c1", "#d3c8c8")
par(mfrow = c(1, 1))
library(ggplot2)
library(scales)
for (country in levels(cricket$team_a)[1:9]) {
chome <- splitted.data.home[country]
coutside <- splitted.data.away[country]
title <- paste(country, "vs Rest")
dat1 <-
data.frame(
type = "Home",
percent = chome[[1]]$percent,
opponent = chome[[1]]$team_b,
total = chome[[1]]$total
)
dat2 <-
data.frame(
type = "Away",
percent = coutside[[1]]$percent,
opponent = coutside[[1]]$team_b,
total = coutside[[1]]$total
)
dat <- rbind(dat1, dat2)
p4 <-
ggplot() + theme_bw() + geom_bar(aes(y = percent, x = opponent, fill = type),
data = dat,
stat = "identity")
p4 <-
p4 + theme(axis.text.x = element_text(angle = 70, hjust = 1))
p4 <-
p4 + geom_text(
data = dat,
aes(
x = opponent,
y = percent,
label = paste0(round(percent,
2), "%")
),
size = 3,
position = position_stack(vjust = 0.5)
)
p4 <-
p4 + theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.title = element_blank()
)
p4 <-
p4 + ggtitle(title) + labs(y = "Win percent") + scale_y_continuous(labels = dollar_format(suffix = "%",
prefix = ""))
p4 <- p4 + scale_fill_manual(values = fill)
print(p4)
}
# Percentage of Team vs Opponent MEAN HOME win
mean(homewin$percent[homewin$team_a == "Australia"]) # 73.89%
mean(homewin$percent[homewin$team_a == "Bangladesh"]) # 13.64%
mean(homewin$percent[homewin$team_a == "England"]) # 64.71%
mean(homewin$percent[homewin$team_a == "India"]) # 63.13%
mean(homewin$percent[homewin$team_a == "New Zealand"]) # 66.94%
mean(homewin$percent[homewin$team_a == "Pakistan"]) # 62.65%
mean(homewin$percent[homewin$team_a == "South Africa"]) # 75.02%
mean(homewin$percent[homewin$team_a == "Sri Lanka"]) # 69.14%
mean(homewin$percent[homewin$team_a == "West Indies"]) # 66.16%
mean(homewin$percent[homewin$team_a == "Zimbabwe"]) # 21.33 %
# Percentage of Team vs Opponent MEAN AWAY win
mean(awaywin$percent[awaywin$team_a == "Australia"]) # 78.62%
mean(awaywin$percent[awaywin$team_a == "Bangladesh"]) # 14.48%
mean(awaywin$percent[awaywin$team_a == "England"]) # 54.93%
mean(awaywin$percent[awaywin$team_a == "India"]) # 55.55%
mean(awaywin$percent[awaywin$team_a == "New Zealand"]) # 55.69%
mean(awaywin$percent[awaywin$team_a == "Pakistan"]) # 56.40%
mean(awaywin$percent[awaywin$team_a == "South Africa"]) # 63.29%
mean(awaywin$percent[awaywin$team_a == "Sri Lanka"]) # 55.07%
mean(awaywin$percent[awaywin$team_a == "West Indies"]) # 68.75%
mean(awaywin$percent[awaywin$team_a == "Zimbabwe"]) # NaN
# 300 or more runs, more wins
library(dplyr)
fil_bat_decision <- filter(.data = cricket, first_innings_total >= 300) # 227 matches w/ 300+ runs
fil_bat_decision <- select(.data = fil_bat_decision, team_a, team_b, toss_winner:first_innings_total, winner)
bat_won <- nrow(fil_bat_decision[fil_bat_decision$toss_decision == "bat"
& fil_bat_decision$toss_winner == fil_bat_decision$winner ,]) # 108
field_won <- nrow(fil_bat_decision[fil_bat_decision$toss_decision == "field"
& fil_bat_decision$toss_winner != fil_bat_decision$winner ,]) # 93
(bat_won + field_won)/nrow(fil_bat_decision) # 88.54% won
###### Data Mining - Decision Trees (rpart) #####
library(dplyr)
# convert logical value to numeric
cricket$win_toss_win_game <- as.numeric(cricket$win_toss_win_game)
# select data
df <- select(.data = cricket, team_a:gender, series:team_a_venue,toss_winner:duckworth_lewis, win_toss_win_game)
df <- arrange(.data = df, team_a, first_innings_total)
df$win_toss_win_game <- factor(df$win_toss_win_game, levels = c(0, 1), labels = c("lost", "won"))
# verify
levels(df$win_toss_win_game)
sapply(df$win_toss_win_game, class)
str(df)
# write to csv file
write.csv(x = df,
file = "4-data-mining/win_toss_win_game.csv",
row.names = FALSE)
# extract training data
set.seed(1234)
train <- sample(nrow(df), 0.7 * nrow(df))
df.train <- df[train, ]
# write to csv file
write.csv(x = df.train,
file = "4-data-mining/win_toss_win_game_train.csv",
row.names = FALSE)
# extract validation data
df.validate <- df[-train, ]
# write to csv file
write.csv(x = df.validate,
file = "4-data-mining/win_toss_win_game_validate.csv",
row.names = FALSE)
# Inspect
# Categorical variables are examined using table()
table(df.train$win_toss_win_game)
table(df.validate$win_toss_win_game)
# apply decision tree technique
library(rpart)
set.seed(1234)
dtree <-
rpart(
win_toss_win_game ~ .,
data = df.train,
method = "class",
parms = list(split = "information")
)
dtree
summary(dtree)
# Examine cp table to choose a final tree size
# CP table contains prediction error for various tree sizes
# CP is used to penalise larger trees
# nsplit = number of branch splits i.e. n + 1 terminal nodes
# rel error = error rate
# xerror = cross-validated error based on 10-fold cross validation
# xstd = standard error of the cross validation
dtree$cptable
# Plot complexity paramenter (CP)
plotcp(dtree)
# Prune the tree
# The tree is too large
# Prune least important splits
## smallest tree -> range between xerror + xstd and xerror - xstd
dtree.pruned <- prune(dtree, cp = 0.019)
plotcp(dtree.pruned)
library(rpart.plot)
prp(
dtree.pruned,
type = 2,
extra = 104,
fallen.leaves = T,
main = "Decision Tree"
)
# library(RGtk2)
# library(rattle)
# fancyRpartPlot(dtree)
# Classify each observation against validation sample data
dtree.pred <- predict(dtree, df.validate, type = "class")
# Create a cross tabulation of the actual status against the predicted status
dtree.perf <- table(df.validate$win_toss_win_game, dtree.pred, dnn = c("Actual", "Predicted"))
dtree.perf
library(party)
fit.ctree <- ctree(win_toss_win_game ~ ., data = df.train)
plot(fit.ctree, main = "Conditional Inference Tree")
ctree.pred <- predict(fit.ctree, df.validate, type = "response")
ctree.perf <- table(df.validate$win_toss_win_game, ctree.pred, dnn = c("Actual", "Predicted"))
ctree.perf
| /CRICKET.R | no_license | shubhampachori12110095/kdd-cricket | R | false | false | 12,268 | r | # Define required packages and install new packages
dm.required.packages <-
c("lubridate",
"dplyr",
"ggplot2",
"scales",
"rpart", "rpart.plot", "party", "RColorBrewer")
package.install.func <- function(x) {
for (i in x) {
# require returns TRUE invisibly if it was able to load package
if (!require(i , character.only = TRUE)) {
# If package was not able to be loaded then re-install
install.packages(i , dependencies = TRUE)
# Load package after installing
require(i , character.only = TRUE)
}
}
}
package.install.func(dm.required.packages)
# Set working directory and import raw data
setwd("~/Desktop/kdd-cricket")
# Source
source("PREPROCESSING.R")
##### Summarising Data - Data Exploration #####
cricket <- read.csv("3-transformed-data/cric_transformed_odi.csv")
str(cricket)
# set factor variables and level orders
cricket$day_night <- as.factor(cricket$day_night)
cricket$rain <- as.factor(cricket$rain)
cricket$duckworth_lewis <- as.factor(cricket$duckworth_lewis)
cricket$series <-
factor(raw.data.sub$series, c("low", "medium", "high"))
library(lubridate)
cricket$date <- ymd(as.character(cricket$date))
# see what data looks like
dim(cricket)
names(cricket)
str(cricket)
summary(cricket)
# avoid printing whole data frame
class(cricket)
library(dplyr)
cricket <- tbl_df(cricket)
class(cricket)
cricket
# review and observations on the data set
head(cricket)
tail(cricket)
str(cricket)
# check the summary of the data
summary(cricket)
# men vs women odi matches
boxplot(first_innings_total ~ gender,
data = cricket,
names = c("Female", "Male"))
# Proportion of tosses won in home country
# Proportion of tosses won when playing away from home
homet <-
with(cricket[cricket$team_a_venue == "home", ], xtabs(~ team_a_venue + toss_decision))
homet / rowSums(homet) # 53% bat, 47% bowl
binom.test(1195, 2258, p = .5)
awayt <-
with(cricket[cricket$team_a_venue == "away", ], xtabs(~ team_a_venue + toss_decision))
awayt / rowSums(awayt) # 55% bat, 45% bowl
# Proportion of toss decision when played home and D/N match
homet.dn <-
with(cricket[cricket$team_a_venue == "home" &
cricket$day_night == 1, ], xtabs(~ team_a_venue + toss_decision))
homet.dn / rowSums(homet.dn) # 72% bat, 28% bowl
awayt.dn <-
with(cricket[cricket$team_a_venue == "away" &
cricket$day_night == 0, ], xtabs(~ team_a_venue + toss_decision))
awayt.dn / rowSums(awayt.dn) # 47% bat, 53% bowl
# Proportion of toss decision when played home and rain
homet.r <-
with(cricket[cricket$team_a_venue == "home" &
cricket$rain == 1, ], xtabs(~ team_a_venue + toss_decision))
homet.r / rowSums(homet.r) # 43% bat, 57% bowl
awayt.r <-
with(cricket[cricket$team_a_venue == "away" &
cricket$rain == 0, ], xtabs(~ team_a_venue + toss_decision))
awayt.r / rowSums(awayt.r) # 55% bat, 45% bowl
# Proportion of toss decision when played home, day night and rain
homet.dn.r <-
with(cricket[cricket$team_a_venue == "home" &
cricket$day_night == 1 &
cricket$rain == 1, ], xtabs(~ team_a_venue + toss_decision))
homet.dn.r / rowSums(homet.dn.r) # 52% bat, 48% bowl
awayt.dn.r <-
with(cricket[cricket$team_a_venue == "away" &
cricket$day_night == 0 &
cricket$rain == 0, ], xtabs(~ team_a_venue + toss_decision))
awayt.dn.r / rowSums(awayt.dn.r) # 47% bat, 53% bowl
# Win toss win game
cricket$teama_win_toss <-
1 * (cricket$team_a == cricket$toss_winner)
cricket$teamb_win_toss <-
1 * (cricket$team_b == cricket$toss_winner)
cricket$teama_win_game <- 1 * (cricket$team_a == cricket$winner)
cricket$teamb_win_game <- 1 * (cricket$team_b == cricket$winner)
(cricket$win_toss_win_game <-
((cricket$teama_win_toss &
cricket$teama_win_game == 1) |
(cricket$teama_win_toss & cricket$teamb_win_game == 1)
))
library(dplyr)
cricket.toss <-
select(.data = cricket, team_a, team_b, winner, toss_winner, win_toss_win_game)
# proportion of matches won
prop.table(table(cricket.toss$win_toss_win_game)) # 49% do not win, 51% wins
# proportion of matches won by gender
prop.table(table(cricket$gender, cricket.toss$win_toss_win_game), 1)
# proportion of matches won by countries
prop <-
prop.table(table(cricket.toss$winner,
cricket.toss$win_toss_win_game),
1) * 100
prop
barplot(
t(as.matrix(prop)),
width = 2,
beside = T,
space = c(0, 2),
ylim = c(0, 60),
main = "Win vs Lost % on winning the toss",
ylab = "% won",
col = c("black", "white")
)
legend(
"bottom",
legend = c("lost", "won"),
fill = c("black", "white"),
ncol = 2,
cex = 0.75
)
# Home match better probablity of winniing
library(dplyr)
sel_cricket <-
select(.data = cricket, team_a, team_b, winner, toss_winner, team_a_venue)
# sel_cricket$team_a <- as.character(sel_cricket$team_a)
# sel_cricket$team_b <- as.character(sel_cricket$team_b)
# sel_cricket$winner <- as.character(sel_cricket$winner)
# sel_cricket$toss_winner <- as.character(sel_cricket$toss_winner)
# sel_cricket$team_a_venue <- as.character(sel_cricket$team_a_venue)
str(sel_cricket)
homewin <-
(
sel_cricket %>%
filter(team_a_venue == "home") %>%
group_by(team_a, team_b) %>%
summarise(won = sum(team_a == winner), total = length(winner)) %>%
mutate(percent = (won / total) * 100) %>% arrange(team_a, team_b)
)
awaywin <-
(
sel_cricket %>%
filter(team_a_venue == "away") %>%
group_by(team_a, team_b) %>%
summarise(won = sum(team_a == winner), total = length(winner)) %>%
mutate(percent = (won / total) * 100) %>% arrange(team_a, team_b)
)
splitted.data.home <- split(homewin, homewin$team_a)
splitted.data.away <- split(awaywin, awaywin$team_a)
fill <- c("#4f90c1", "#d3c8c8")
par(mfrow = c(1, 1))
library(ggplot2)
library(scales)
for (country in levels(cricket$team_a)[1:9]) {
chome <- splitted.data.home[country]
coutside <- splitted.data.away[country]
title <- paste(country, "vs Rest")
dat1 <-
data.frame(
type = "Home",
percent = chome[[1]]$percent,
opponent = chome[[1]]$team_b,
total = chome[[1]]$total
)
dat2 <-
data.frame(
type = "Away",
percent = coutside[[1]]$percent,
opponent = coutside[[1]]$team_b,
total = coutside[[1]]$total
)
dat <- rbind(dat1, dat2)
p4 <-
ggplot() + theme_bw() + geom_bar(aes(y = percent, x = opponent, fill = type),
data = dat,
stat = "identity")
p4 <-
p4 + theme(axis.text.x = element_text(angle = 70, hjust = 1))
p4 <-
p4 + geom_text(
data = dat,
aes(
x = opponent,
y = percent,
label = paste0(round(percent,
2), "%")
),
size = 3,
position = position_stack(vjust = 0.5)
)
p4 <-
p4 + theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.title = element_blank()
)
p4 <-
p4 + ggtitle(title) + labs(y = "Win percent") + scale_y_continuous(labels = dollar_format(suffix = "%",
prefix = ""))
p4 <- p4 + scale_fill_manual(values = fill)
print(p4)
}
# Percentage of Team vs Opponent MEAN HOME win
mean(homewin$percent[homewin$team_a == "Australia"]) # 73.89%
mean(homewin$percent[homewin$team_a == "Bangladesh"]) # 13.64%
mean(homewin$percent[homewin$team_a == "England"]) # 64.71%
mean(homewin$percent[homewin$team_a == "India"]) # 63.13%
mean(homewin$percent[homewin$team_a == "New Zealand"]) # 66.94%
mean(homewin$percent[homewin$team_a == "Pakistan"]) # 62.65%
mean(homewin$percent[homewin$team_a == "South Africa"]) # 75.02%
mean(homewin$percent[homewin$team_a == "Sri Lanka"]) # 69.14%
mean(homewin$percent[homewin$team_a == "West Indies"]) # 66.16%
mean(homewin$percent[homewin$team_a == "Zimbabwe"]) # 21.33 %
# Percentage of Team vs Opponent MEAN AWAY win
mean(awaywin$percent[awaywin$team_a == "Australia"]) # 78.62%
mean(awaywin$percent[awaywin$team_a == "Bangladesh"]) # 14.48%
mean(awaywin$percent[awaywin$team_a == "England"]) # 54.93%
mean(awaywin$percent[awaywin$team_a == "India"]) # 55.55%
mean(awaywin$percent[awaywin$team_a == "New Zealand"]) # 55.69%
mean(awaywin$percent[awaywin$team_a == "Pakistan"]) # 56.40%
mean(awaywin$percent[awaywin$team_a == "South Africa"]) # 63.29%
mean(awaywin$percent[awaywin$team_a == "Sri Lanka"]) # 55.07%
mean(awaywin$percent[awaywin$team_a == "West Indies"]) # 68.75%
mean(awaywin$percent[awaywin$team_a == "Zimbabwe"]) # NaN
# 300 or more runs, more wins
library(dplyr)
fil_bat_decision <- filter(.data = cricket, first_innings_total >= 300) # 227 matches w/ 300+ runs
fil_bat_decision <- select(.data = fil_bat_decision, team_a, team_b, toss_winner:first_innings_total, winner)
bat_won <- nrow(fil_bat_decision[fil_bat_decision$toss_decision == "bat"
& fil_bat_decision$toss_winner == fil_bat_decision$winner ,]) # 108
field_won <- nrow(fil_bat_decision[fil_bat_decision$toss_decision == "field"
& fil_bat_decision$toss_winner != fil_bat_decision$winner ,]) # 93
(bat_won + field_won)/nrow(fil_bat_decision) # 88.54% won
###### Data Mining - Decision Trees (rpart) #####
library(dplyr)
# convert logical value to numeric
cricket$win_toss_win_game <- as.numeric(cricket$win_toss_win_game)
# select data
df <- select(.data = cricket, team_a:gender, series:team_a_venue,toss_winner:duckworth_lewis, win_toss_win_game)
df <- arrange(.data = df, team_a, first_innings_total)
df$win_toss_win_game <- factor(df$win_toss_win_game, levels = c(0, 1), labels = c("lost", "won"))
# verify
levels(df$win_toss_win_game)
sapply(df$win_toss_win_game, class)
str(df)
# write to csv file
write.csv(x = df,
file = "4-data-mining/win_toss_win_game.csv",
row.names = FALSE)
# extract training data
set.seed(1234)
train <- sample(nrow(df), 0.7 * nrow(df))
df.train <- df[train, ]
# write to csv file
write.csv(x = df.train,
file = "4-data-mining/win_toss_win_game_train.csv",
row.names = FALSE)
# extract validation data
df.validate <- df[-train, ]
# write to csv file
write.csv(x = df.validate,
file = "4-data-mining/win_toss_win_game_validate.csv",
row.names = FALSE)
# Inspect
# Categorical variables are examined using table()
table(df.train$win_toss_win_game)
table(df.validate$win_toss_win_game)
# apply decision tree technique
library(rpart)
set.seed(1234)
dtree <-
rpart(
win_toss_win_game ~ .,
data = df.train,
method = "class",
parms = list(split = "information")
)
dtree
summary(dtree)
# Examine cp table to choose a final tree size
# CP table contains prediction error for various tree sizes
# CP is used to penalise larger trees
# nsplit = number of branch splits i.e. n + 1 terminal nodes
# rel error = error rate
# xerror = cross-validated error based on 10-fold cross validation
# xstd = standard error of the cross validation
dtree$cptable
# Plot complexity paramenter (CP)
plotcp(dtree)
# Prune the tree
# The tree is too large
# Prune least important splits
## smallest tree -> range between xerror + xstd and xerror - xstd
dtree.pruned <- prune(dtree, cp = 0.019)
plotcp(dtree.pruned)
library(rpart.plot)
prp(
dtree.pruned,
type = 2,
extra = 104,
fallen.leaves = T,
main = "Decision Tree"
)
# library(RGtk2)
# library(rattle)
# fancyRpartPlot(dtree)
# Classify each observation against validation sample data
dtree.pred <- predict(dtree, df.validate, type = "class")
# Create a cross tabulation of the actual status against the predicted status
dtree.perf <- table(df.validate$win_toss_win_game, dtree.pred, dnn = c("Actual", "Predicted"))
dtree.perf
library(party)
fit.ctree <- ctree(win_toss_win_game ~ ., data = df.train)
plot(fit.ctree, main = "Conditional Inference Tree")
ctree.pred <- predict(fit.ctree, df.validate, type = "response")
ctree.perf <- table(df.validate$win_toss_win_game, ctree.pred, dnn = c("Actual", "Predicted"))
ctree.perf
|
dane <- read.csv("household_power_consumption.txt",sep = ";", na.strings="?")
dane$Date <- as.Date(dane$Date, format="%d/%m/%Y")
dane$Date<-paste(dane$Date, dane$Time)
dane$Time<-NULL
sub.dane = subset(dane, as.Date(Date)>= '2007-02-01'& as.Date(Date)<='2007-02-02')
sub.dane$Date<-as.POSIXct(strptime(sub.dane$Date, "%Y-%m-%d %H:%M:%S"))
Sys.setlocale("LC_TIME", "C") ##this ensures that weekdays will be in English
png(filename="plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(sub.dane$Date,sub.dane$Global_active_power, type='l', xlab="", ylab="Global Active Power (kilowatts)")
plot(sub.dane$Date,sub.dane$Voltage, type='l', xlab="datetime", ylab="Voltage")
plot(sub.dane$Date, sub.dane$Sub_metering_1, type='l', xlab="", ylab="Energy sub metering")
lines(sub.dane$Date, sub.dane$Sub_metering_2, col="red")
lines(sub.dane$Date, sub.dane$Sub_metering_3, col="blue")
plot(sub.dane$Date,sub.dane$Global_reactive_power, type='l', xlab="datetime", ylab="Global_reactive_power")
dev.off() | /plot4.R | no_license | cornerek/ExData_Plotting1 | R | false | false | 996 | r | dane <- read.csv("household_power_consumption.txt",sep = ";", na.strings="?")
dane$Date <- as.Date(dane$Date, format="%d/%m/%Y")
dane$Date<-paste(dane$Date, dane$Time)
dane$Time<-NULL
sub.dane = subset(dane, as.Date(Date)>= '2007-02-01'& as.Date(Date)<='2007-02-02')
sub.dane$Date<-as.POSIXct(strptime(sub.dane$Date, "%Y-%m-%d %H:%M:%S"))
Sys.setlocale("LC_TIME", "C") ##this ensures that weekdays will be in English
png(filename="plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(sub.dane$Date,sub.dane$Global_active_power, type='l', xlab="", ylab="Global Active Power (kilowatts)")
plot(sub.dane$Date,sub.dane$Voltage, type='l', xlab="datetime", ylab="Voltage")
plot(sub.dane$Date, sub.dane$Sub_metering_1, type='l', xlab="", ylab="Energy sub metering")
lines(sub.dane$Date, sub.dane$Sub_metering_2, col="red")
lines(sub.dane$Date, sub.dane$Sub_metering_3, col="blue")
plot(sub.dane$Date,sub.dane$Global_reactive_power, type='l', xlab="datetime", ylab="Global_reactive_power")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_wind.R
\name{plot_wind}
\alias{plot_wind}
\alias{plot_wind.swmpr}
\title{Create a wind rose}
\usage{
plot_wind(swmpr_in, ...)
\method{plot_wind}{swmpr}(swmpr_in, years = NULL, angle = 45,
width = 1.5, breaks = 5, paddle = FALSE, grid.line = 10,
max.freq = 30, cols = "GnBu", annotate = FALSE, main = NULL,
type = "default", between = list(x = 1, y = 1),
par.settings = NULL, strip = NULL, ...)
}
\arguments{
\item{swmpr_in}{input swmpr object}
\item{...}{arguments passed to or from other methods}
\item{years}{numeric of years to plot, defaults to most recent}
\item{angle}{numeric for the number of degrees occupied by each spoke}
\item{width}{numeric for width of paddles if \code{paddle = TRUE}}
\item{breaks}{numeric for the number of break points in the wind speed}
\item{paddle}{logical for paddles at the ends of the spokes}
\item{grid.line}{numeric for grid line interval to use}
\item{max.freq}{numeric for the scaling used to set the maximum value of the radial limits (like zoom)}
\item{cols}{chr string for colors to use for plotting, can be any palette R recognizes or a collection of colors as a vector}
\item{annotate}{logical indicating if text is shown on the bottom of the plot for the percentage of observations as 'calm' and mean values}
\item{main}{chr string for plot title, defaults to station name and year plotted}
\item{type}{chr string for temporal divisions of the plot, defaults to whole year. See details.}
\item{between}{list for lattice plot options, defines spacing between plots}
\item{par.settings}{list for optional plot formatting passed to \code{\link[lattice]{lattice.options}}}
\item{strip}{list for optional strip formatting passed to \code{\link[lattice]{strip.custom}}}
}
\value{
A wind rose plot
}
\description{
Create a wind rose from met data
}
\details{
This function is a convenience wrapper to \code{\link[openair]{windRose}}. Most of the arguments are taken directly from this function.
The \code{type} argument can be used for temporal divisions of the plot. Options include the entire year (\code{type = "default"}), seasons (\code{type = "season"}), months (\code{type = "month"}), or weekdays (\code{type = "weekday"}). Combinations are also possible (see \code{\link[openair]{windRose}}).
}
\examples{
plot_wind(apaebmet)
}
\author{
Kimberly Cressman, Marcus Beck
}
\concept{analyze}
| /man/plot_wind.Rd | no_license | swmpkim/SWMPr | R | false | true | 2,452 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_wind.R
\name{plot_wind}
\alias{plot_wind}
\alias{plot_wind.swmpr}
\title{Create a wind rose}
\usage{
plot_wind(swmpr_in, ...)
\method{plot_wind}{swmpr}(swmpr_in, years = NULL, angle = 45,
width = 1.5, breaks = 5, paddle = FALSE, grid.line = 10,
max.freq = 30, cols = "GnBu", annotate = FALSE, main = NULL,
type = "default", between = list(x = 1, y = 1),
par.settings = NULL, strip = NULL, ...)
}
\arguments{
\item{swmpr_in}{input swmpr object}
\item{...}{arguments passed to or from other methods}
\item{years}{numeric of years to plot, defaults to most recent}
\item{angle}{numeric for the number of degrees occupied by each spoke}
\item{width}{numeric for width of paddles if \code{paddle = TRUE}}
\item{breaks}{numeric for the number of break points in the wind speed}
\item{paddle}{logical for paddles at the ends of the spokes}
\item{grid.line}{numeric for grid line interval to use}
\item{max.freq}{numeric for the scaling used to set the maximum value of the radial limits (like zoom)}
\item{cols}{chr string for colors to use for plotting, can be any palette R recognizes or a collection of colors as a vector}
\item{annotate}{logical indicating if text is shown on the bottom of the plot for the percentage of observations as 'calm' and mean values}
\item{main}{chr string for plot title, defaults to station name and year plotted}
\item{type}{chr string for temporal divisions of the plot, defaults to whole year. See details.}
\item{between}{list for lattice plot options, defines spacing between plots}
\item{par.settings}{list for optional plot formatting passed to \code{\link[lattice]{lattice.options}}}
\item{strip}{list for optional strip formatting passed to \code{\link[lattice]{strip.custom}}}
}
\value{
A wind rose plot
}
\description{
Create a wind rose from met data
}
\details{
This function is a convenience wrapper to \code{\link[openair]{windRose}}. Most of the arguments are taken directly from this function.
The \code{type} argument can be used for temporal divisions of the plot. Options include the entire year (\code{type = "default"}), seasons (\code{type = "season"}), months (\code{type = "month"}), or weekdays (\code{type = "weekday"}). Combinations are also possible (see \code{\link[openair]{windRose}}).
}
\examples{
plot_wind(apaebmet)
}
\author{
Kimberly Cressman, Marcus Beck
}
\concept{analyze}
|
setwd("~/Desktop/tmp/MorrisonLab/Computational/06102019-TERT/")
library(ggplot2)
TERT = read.table("TERT_mut.tsv", sep="\t")
TERT = TERT[,c(1,2,7,8,9,10,11,13,14,16,17,29)]
TERT = unique(TERT)
rb_patient = fread("~/Desktop/rblist.csv")
not_rb_patient = fread("~/Desktop/notrblist.csv")
TERT_rb <- TERT %>% filter(V2 %in% rb_patient$x)
TERT_notrb <- TERT %>% filter(V2 %in% not_rb_patient$x)
TERT_rb1 <- TERT_rb %>% filter((V10 == "1295250" & V16 == "G" & V17 == "A") | (V10 == "1295228" & V16 =="G" & V17 == "A") | (V10 == "1295242" & V11 == "1295243"))
length(unique(TERT_rb1$V2))
TERT_notrb1 <- TERT_notrb %>% filter((V10 == "1295250" & V16 == "G" & V17 == "A") | (V10 == "1295228" & V16 =="G" & V17 == "A") | (V10 == "1295242" & V11 == "1295243"))
length(unique(TERT_notrb1$V2))
dat <- fread("~/Desktop/yr5/summ/bioinformatics_util/data/fig5e.csv")
ggplot(dat, aes(x = Type, y = `Porportion of patients`, fill = `Rb-pathway status`)) +
geom_bar(stat = "identity", position = "dodge") + theme_bw() +
theme(axis.title = element_text(face = "bold"), legend.title = element_text(face = "bold"),
axis.text.x = element_text(angle = 30, hjust = 1)) + xlab("Mutation")
ggsave("~/Desktop/yr5/summ/bioinformatics_util/fig22.pdf", width = 3.5, height = 3.5)
| /plot_script/figure22.R | no_license | mkmwong/bioinformatics_util | R | false | false | 1,269 | r | setwd("~/Desktop/tmp/MorrisonLab/Computational/06102019-TERT/")
library(ggplot2)
TERT = read.table("TERT_mut.tsv", sep="\t")
TERT = TERT[,c(1,2,7,8,9,10,11,13,14,16,17,29)]
TERT = unique(TERT)
rb_patient = fread("~/Desktop/rblist.csv")
not_rb_patient = fread("~/Desktop/notrblist.csv")
TERT_rb <- TERT %>% filter(V2 %in% rb_patient$x)
TERT_notrb <- TERT %>% filter(V2 %in% not_rb_patient$x)
TERT_rb1 <- TERT_rb %>% filter((V10 == "1295250" & V16 == "G" & V17 == "A") | (V10 == "1295228" & V16 =="G" & V17 == "A") | (V10 == "1295242" & V11 == "1295243"))
length(unique(TERT_rb1$V2))
TERT_notrb1 <- TERT_notrb %>% filter((V10 == "1295250" & V16 == "G" & V17 == "A") | (V10 == "1295228" & V16 =="G" & V17 == "A") | (V10 == "1295242" & V11 == "1295243"))
length(unique(TERT_notrb1$V2))
dat <- fread("~/Desktop/yr5/summ/bioinformatics_util/data/fig5e.csv")
ggplot(dat, aes(x = Type, y = `Porportion of patients`, fill = `Rb-pathway status`)) +
geom_bar(stat = "identity", position = "dodge") + theme_bw() +
theme(axis.title = element_text(face = "bold"), legend.title = element_text(face = "bold"),
axis.text.x = element_text(angle = 30, hjust = 1)) + xlab("Mutation")
ggsave("~/Desktop/yr5/summ/bioinformatics_util/fig22.pdf", width = 3.5, height = 3.5)
|
## Run a linear regression analysis of two chosen variables in a dataset.
linear_regression <- function(variable_1, variable_2) {
model_fit <- lm(variable_1 ~ variable_2)
summary <- summary(model_fit)
return(summary)
}
Ex:
> linear_regression(Data$Estimated_Age, Data$SVL) | /brock/R/project_two_function.R | no_license | BiologicalDataAnalysis2019/2020 | R | false | false | 287 | r | ## Run a linear regression analysis of two chosen variables in a dataset.
linear_regression <- function(variable_1, variable_2) {
model_fit <- lm(variable_1 ~ variable_2)
summary <- summary(model_fit)
return(summary)
}
Ex:
> linear_regression(Data$Estimated_Age, Data$SVL) |
library(cancereffectsizeR)
library(scales)
library(stringr)
library(dplyr)
library(ggplot2)
library(cowplot)
scientific <- function(x){
ifelse(x==0, "0", parse(text=gsub("[+]", "", gsub("e", " %*% 10^", label_scientific()(x)))))
}
#Load in your file
threestage_final <- load_cesa("threestage_final.rds")
threestage_results <- snv_results(threestage_final)
threestage_results <- threestage_results$selection.1
threestage_results$variant_name <- str_replace(threestage_results$variant_name, "_", " ")
aac <- threestage_results$variant_type == "aac"
threestage_results <- threestage_results[aac,]
threestage_results <- threestage_results[,c(1,3:5,7:12,19,37,39,41)]
#Separating the data in to early/late/met
threestage_results_early <- threestage_results[,c(1:2,5:6,11,12)]
all <-threestage_results_early$maf_freq_in_Early >=1
threestage_results_early <- threestage_results_early[all,]
threestage_results_early <- threestage_results_early[order(-si_1),]
colnames(threestage_results_early)[2] <- "si"
threestage_results_early$progression <- rep("Early", length(threestage_results_early$variant_name))
threestage_results_late <- threestage_results[,c(1,3,7:8,11,13)]
all <-threestage_results_late$maf_freq_in_Late >=1
threestage_results_late <- threestage_results_late[all,]
threestage_results_late <- threestage_results_late[order(-si_2),]
colnames(threestage_results_late)[2] <- "si"
threestage_results_late$progression <- rep("Late", length(threestage_results_late$variant_name))
threestage_results_met <- threestage_results[,c(1,4,9:10,11,14)]
all <-threestage_results_met$maf_freq_in_Metastasis >=1
threestage_results_met <- threestage_results_met[all,]
threestage_results_met <- threestage_results_met[order(-si_3),]
colnames(threestage_results_met)[2] <- "si"
threestage_results_met$progression <- rep("Metastasis", length(threestage_results_met$variant_name))
#Extracting recurrent variants
recurrent <- threestage_results_early$maf_freq_in_Early > 1
threestage_results_early_recur <- threestage_results_early[recurrent,]
recurrent <- threestage_results_late$maf_freq_in_Late > 1
threestage_results_late_recur <- threestage_results_late[recurrent,]
recurrent <- threestage_results_met$maf_freq_in_Metastasis > 1
threestage_results_met_recur <- threestage_results_met[recurrent,]
##########################################
#Summary function
summary_gene <- function(data) {
data_clean <- data %>%
arrange(desc(si)) %>%
filter(si > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(si),
mean_si = mean(si),
median_si = median(si),
sd = sd(si),
max_si = max(si),
n_variant = n_distinct(variant_name)) %>%
filter(n_variant > 1)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
#Used to find genes that have at least one recurrent variants
summary_gene_recur <- function(data) {
data_clean <- data %>%
arrange(desc(si)) %>%
filter(si > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(si), # change sum to mean and sd
mean_si = mean(si),
median_si = median(si),
sd = sd(si),
max_si = max(si),
n_variant = n_distinct(variant_name)) %>%
filter(n_variant > 0)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
###############################################################################################
#Gene level SI
################################################################################################
early_data <- data.frame(variant_name = threestage_results_early$variant_name,
gene = threestage_results_early$gene,
si = threestage_results_early$si)
late_data <- data.frame(variant_name = threestage_results_late$variant_name,
gene = threestage_results_late$gene,
si = threestage_results_late$si)
met_data <- data.frame(variant_name = threestage_results_met$variant_name,
gene = threestage_results_met$gene,
si = threestage_results_met$si)
early_info <- summary_gene(early_data)
late_info <- summary_gene(late_data)
met_info <- summary_gene(met_data)
early_data_recur <- data.frame(variant_name = threestage_results_early_recur$variant_name,
gene = threestage_results_early_recur$gene,
si = threestage_results_early_recur$si)
late_data_recur <- data.frame(variant_name = threestage_results_late_recur$variant_name,
gene = threestage_results_late_recur$gene,
si = threestage_results_late_recur$si)
met_data_recur <- data.frame(variant_name = threestage_results_met_recur$variant_name,
gene = threestage_results_met_recur$gene,
si = threestage_results_met_recur$si)
early_info_recur <- summary_gene_recur(early_data_recur)
late_info_recur <- summary_gene_recur(late_data_recur)
met_info_recur <- summary_gene_recur(met_data_recur)
#Filtering out all genes that have NO recurrent variants,
#aka filtering in genes that have at least ONE recurrent variant
early_info <- early_info[which(early_info$gene %in% early_info_recur$gene),]
late_info <- late_info[which(late_info$gene %in% late_info_recur$gene),]
met_info <- met_info[which(met_info$gene %in% met_info_recur$gene),]
fill_na <- function(x, fill = 0) {
x = ifelse(is.na(x), fill, x)
return(x)
}
prim_info <- merge(early_info, late_info, by = "gene", all = T,
suffixes = c(".e", ".l")) %>%
mutate_at(c("cum_si.e", "cum_si.l",
"mean_si.e", "mean_si.l",
"sd.e", "sd.l",
"n_variant.e", "n_variant.l"), fill_na) %>%
mutate(n_variant_prim = n_variant.e + n_variant.l,
mean_si_prim = (cum_si.e + cum_si.l) / n_variant_prim) %>%
arrange(desc(n_variant_prim))
colnames(met_info) <- paste(colnames(met_info), ".m", sep = "")
colnames(met_info)[1] <- "gene"
stage_merge <- merge(prim_info, met_info, by = "gene", all = T) %>%
mutate_at(c("cum_si.e", "cum_si.l", "cum_si.m",
"mean_si.e", "mean_si.l", "mean_si.m",
"sd.e", "sd.l", "sd.m",
"n_variant.e", "n_variant.l", "n_variant.m"), fill_na) %>%
mutate(n_variant_total = n_variant.e + n_variant.l + n_variant.m,
mean_si_total = (cum_si.e + cum_si.l + cum_si.m) / n_variant_total) %>%
arrange(desc(n_variant_total))
########################################################################################
# Early
stage_merge_early_ordered <- stage_merge[order(-stage_merge$mean_si.e),]
selected_early_genes <- stage_merge_early_ordered$gene[1:10]
#select all variants within gene list
early_list.e <- threestage_results_early %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
early_list.l <- threestage_results_late %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
early_list.m <- threestage_results_met %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
colnames(early_list.e)[4] <- "maf_freq"
colnames(early_list.l)[4] <- "maf_freq"
colnames(early_list.m)[4] <- "maf_freq"
early_list <- rbind(early_list.e, early_list.l)
early_list <- rbind(early_list, early_list.m)
#set order of genes for plot
early_list$gene <- early_list$gene %>%
factor(levels = selected_early_genes)
early_jitter <- ggplot(early_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(early_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-0.8e4, 1e5))
early_jitter
########################################################################################
# Late
stage_merge_late_ordered <- stage_merge[order(-stage_merge$mean_si.l),]
selected_late_genes <- stage_merge_late_ordered$gene[1:10]
#select all variants within gene list
late_list.e <- threestage_results_early %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
late_list.l <- threestage_results_late %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
late_list.m <- threestage_results_met %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
colnames(late_list.e)[4] <- "maf_freq"
colnames(late_list.l)[4] <- "maf_freq"
colnames(late_list.m)[4] <- "maf_freq"
late_list <- rbind(late_list.e, late_list.l)
late_list <- rbind(late_list, late_list.m)
#set order of genes for plot
late_list$gene <- late_list$gene %>%
factor(levels = selected_late_genes)
#Dummy points
dummy_OR4N4.e <- list("OR4N4 Variant.e", as.double(0.001), "Early", "1", "OR4N4")
dummy_CHRNA6.e <- list("CHRNA6 Variant.e", as.double(0.001), "Early", "1", "CHRNA6")
late_list <- late_list %>%
rbind(dummy_OR4N4.e) %>%
rbind(dummy_CHRNA6.e)
library(ggplot2)
late_jitter<- ggplot(late_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(late_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-.24e4, 3e4), breaks = c(0, 1e4, 1.5e4, 2e4, 3e4))
late_jitter
########################################################################################
# Metastasis
stage_merge_met_ordered <- stage_merge[order(-stage_merge$mean_si.m),]
selected_met_genes <- stage_merge_met_ordered$gene[1:10]
#select all variants within gene list
met_list.e <- threestage_results_early %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
met_list.l <- threestage_results_late %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
met_list.m <- threestage_results_met %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
colnames(met_list.e)[4] <- "maf_freq"
colnames(met_list.l)[4] <- "maf_freq"
colnames(met_list.m)[4] <- "maf_freq"
met_list <- rbind(met_list.e, met_list.l)
met_list <- rbind(met_list, met_list.m)
#set order of genes for plot
met_list$gene <- met_list$gene %>%
factor(levels = selected_met_genes)
#Dummy points
dummy_ORC3.e <- list("ORC3 Variant.e", as.double(0.001), "Early", "1", "ORC3")
dummy_ORC3.l <- list("ORC3 Variant.l", as.double(0.001), "Late", "1", "ORC3")
dummy_ZNF780B.l <- list("ZNF780B Variant.l", as.double(0.001), "Late", "1", "ZNF780B")
dummy_DIMT1.e <- list("DIMT1 Variant.e", as.double(0.001), "Early", "1", "DIMT1")
dummy_DIMT1.l <- list("DIMT1 Variant.l", as.double(0.001), "Late", "1", "DIMT1")
dummy_KRTAP13_3.l <- list("KRTAP13-3 Variant.l", as.double(0.001), "Late", "1", "KRTAP13-3")
dummy_ZNF714.e <- list("ZNF714 Variant.e", as.double(0.001), "Early", "1", "ZNF714")
dummy_GRB7.e <- list("GRB7 Variant.e", as.double(0.001), "Early", "1", "GRB7")
dummy_APCS.e <- list("APCS Variant.e", as.double(0.001), "Early", "1", "APCS")
met_list <- met_list %>%
rbind(dummy_ORC3.e) %>%
rbind(dummy_ORC3.l) %>%
rbind(dummy_ZNF780B.l)%>%
rbind(dummy_DIMT1.e)%>%
rbind(dummy_DIMT1.l)%>%
rbind(dummy_KRTAP13_3.l)%>%
rbind(dummy_ZNF714.e)%>%
rbind(dummy_GRB7.e)%>%
rbind(dummy_APCS.e)
library(ggplot2)
met_jitter <- ggplot(met_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(met_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-0.9e4, 1.051e5))
met_jitter
##############################################################################
#Wilcoxon test
early.e <- list()
early.l <- list()
early.m <- list()
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
early.e <- c(early.e, list(values))
}
names(early.e) <- unique(early_list$gene)
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
early.l <- c(early.l, list(values))
}
names(early.l) <- unique(early_list$gene)
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
early.m <- c(early.m, list(values))
}
names(early.m) <- unique(early_list$gene)
late.e <- list()
late.l <- list()
late.m <- list()
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
late.e <- c(late.e, list(values))
}
names(late.e) <- unique(late_list$gene)
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
late.l <- c(late.l, list(values))
}
names(late.l) <- unique(late_list$gene)
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
late.m <- c(late.m, list(values))
}
names(late.m) <- unique(late_list$gene)
met.e <- list()
met.l <- list()
met.m <- list()
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
met.e <- c(met.e, list(values))
}
names(met.e) <- unique(met_list$gene)
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
met.l <- c(met.l, list(values))
}
names(met.l) <- unique(met_list$gene)
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
met.m <- c(met.m, list(values))
}
names(met.m) <- unique(met_list$gene)
#Tests for Lower-risk
wilcox_early.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.e[[x]], early.l[[x]])
wilcox_early.e_l <- c(wilcox_early.e_l, wilcox$p.value)
}
wilcox_early.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.e[[x]], early.m[[x]])
wilcox_early.e_m <- c(wilcox_early.e_m, wilcox$p.value)
}
wilcox_early.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.l[[x]], early.m[[x]])
wilcox_early.l_m <- c(wilcox_early.l_m, wilcox$p.value)
}
early.wilcox <- data.frame(early_late = wilcox_early.e_l,
early_met = wilcox_early.e_m,
late_met = wilcox_early.l_m)
row.names(early.wilcox) <- unique(early_list$gene)
#Tests for Higher-risk
wilcox_late.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.e[[x]], late.l[[x]])
wilcox_late.e_l <- c(wilcox_late.e_l, wilcox$p.value)
}
wilcox_late.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.e[[x]], late.m[[x]])
wilcox_late.e_m <- c(wilcox_late.e_m, wilcox$p.value)
}
wilcox_late.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.l[[x]], late.m[[x]])
wilcox_late.l_m <- c(wilcox_late.l_m, wilcox$p.value)
}
late.wilcox <- data.frame(early_late = wilcox_late.e_l,
early_met = wilcox_late.e_m,
late_met = wilcox_late.l_m)
row.names(late.wilcox) <- unique(late_list$gene)
#Tests for Metastasis
wilcox_met.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.e[[x]], met.l[[x]])
wilcox_met.e_l <- c(wilcox_met.e_l, wilcox$p.value)
}
wilcox_met.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.e[[x]], met.m[[x]])
wilcox_met.e_m <- c(wilcox_met.e_m, wilcox$p.value)
}
wilcox_met.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.l[[x]], met.m[[x]])
wilcox_met.l_m <- c(wilcox_met.l_m, wilcox$p.value)
}
met.wilcox <- data.frame(early_late = wilcox_met.e_l,
early_met = wilcox_met.e_m,
late_met = wilcox_met.l_m)
row.names(met.wilcox) <- unique(met_list$gene)
# write.table(early.wilcox, file = "wilcox_early.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
# write.table(late.wilcox, file = "wilcox_late.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
# write.table(met.wilcox, file = "wilcox_met.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
###########################################################################
#Extracting data for Bayesian approach
#Extracting SI + 95% CI for each variant in each of the featured top 10 genes
#Early
early_list.e <- threestage_results_early %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
early_list.l <- threestage_results_late %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
early_list.m <- threestage_results_met %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
colnames(early_list.e)[3] <- "ci_low_95"
colnames(early_list.l)[3] <- "ci_low_95"
colnames(early_list.m)[3] <- "ci_low_95"
colnames(early_list.e)[4] <- "ci_high_95"
colnames(early_list.l)[4] <- "ci_high_95"
colnames(early_list.m)[4] <- "ci_high_95"
colnames(early_list.e)[6] <- "maf_freq"
colnames(early_list.l)[6] <- "maf_freq"
colnames(early_list.m)[6] <- "maf_freq"
early_list <- rbind(early_list.e, early_list.l)
early_list <- rbind(early_list, early_list.m)
#Late
late_list.e <- threestage_results_early %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
late_list.l <- threestage_results_late %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
late_list.m <- threestage_results_met %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
colnames(late_list.e)[3] <- "ci_low_95"
colnames(late_list.l)[3] <- "ci_low_95"
colnames(late_list.m)[3] <- "ci_low_95"
colnames(late_list.e)[4] <- "ci_high_95"
colnames(late_list.l)[4] <- "ci_high_95"
colnames(late_list.m)[4] <- "ci_high_95"
colnames(late_list.e)[6] <- "maf_freq"
colnames(late_list.l)[6] <- "maf_freq"
colnames(late_list.m)[6] <- "maf_freq"
late_list <- rbind(late_list.e, late_list.l)
late_list <- rbind(late_list, late_list.m)
#Metastasis
met_list.e <- threestage_results_early %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
met_list.l <- threestage_results_late %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
met_list.m <- threestage_results_met %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
colnames(met_list.e)[3] <- "ci_low_95"
colnames(met_list.l)[3] <- "ci_low_95"
colnames(met_list.m)[3] <- "ci_low_95"
colnames(met_list.e)[4] <- "ci_high_95"
colnames(met_list.l)[4] <- "ci_high_95"
colnames(met_list.m)[4] <- "ci_high_95"
colnames(met_list.e)[6] <- "maf_freq"
colnames(met_list.l)[6] <- "maf_freq"
colnames(met_list.m)[6] <- "maf_freq"
met_list <- rbind(met_list.e, met_list.l)
met_list <- rbind(met_list, met_list.m)
############################################################################
write.table(early_list, file = "lower-risk_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
write.table(late_list, file = "higher-risk_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
write.table(met_list, file = "metastatic_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
##############################################################################
# Variants
early_top10 <- threestage_results_early_recur[1:10,]
late_top10 <- threestage_results_late_recur[1:10,]
met_top10 <- threestage_results_met_recur[1:10,]
unique_jitter_early_all <- ggplot(data = early_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#F8766D") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-0.8e4, 1e5))
unique_jitter_late_all<- ggplot(data = late_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#00BA38") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-.24e4, 3e4), breaks = c(0, 1e4, 1.5e4, 2e4, 3.0e4))
unique_jitter_met_all<- ggplot(data = met_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#619CFF") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-0.9e4, 1.051e5))
#########################################
early_title <- ggdraw() +
draw_label(
"Lower-risk",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
late_title <- ggdraw() +
draw_label(
"Higher-risk",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
met_title <- ggdraw() +
draw_label(
"Metastases",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
#######################################
#Combining all the graphs + titles
early_combined <- plot_grid(early_jitter, unique_jitter_early_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("A", "B"), label_size = 10)
early_combined_title <- plot_grid(early_title, early_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
late_combined <- plot_grid(late_jitter, unique_jitter_late_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("C", "D"), label_size = 10)
late_combined_title <- plot_grid(late_title, late_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
met_combined <- plot_grid(met_jitter, unique_jitter_met_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("E", "F"), label_size = 10)
met_combined_title <- plot_grid(met_title, met_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
bar_box_combined <- plot_grid(early_combined_title, late_combined_title, met_combined_title,
align = "h", axis = "t", nrow = 3, ncol = 1, scale = 1)
bar_box_combined
ggsave("PRAD_figures/bar_jitter_test.png", width = 12.5, height = 12.5)
| /Figure3_CES.R | no_license | alexander-y-yang/PRAD | R | false | false | 26,307 | r | library(cancereffectsizeR)
library(scales)
library(stringr)
library(dplyr)
library(ggplot2)
library(cowplot)
scientific <- function(x){
ifelse(x==0, "0", parse(text=gsub("[+]", "", gsub("e", " %*% 10^", label_scientific()(x)))))
}
#Load in your file
threestage_final <- load_cesa("threestage_final.rds")
threestage_results <- snv_results(threestage_final)
threestage_results <- threestage_results$selection.1
threestage_results$variant_name <- str_replace(threestage_results$variant_name, "_", " ")
aac <- threestage_results$variant_type == "aac"
threestage_results <- threestage_results[aac,]
threestage_results <- threestage_results[,c(1,3:5,7:12,19,37,39,41)]
#Separating the data in to early/late/met
threestage_results_early <- threestage_results[,c(1:2,5:6,11,12)]
all <-threestage_results_early$maf_freq_in_Early >=1
threestage_results_early <- threestage_results_early[all,]
threestage_results_early <- threestage_results_early[order(-si_1),]
colnames(threestage_results_early)[2] <- "si"
threestage_results_early$progression <- rep("Early", length(threestage_results_early$variant_name))
threestage_results_late <- threestage_results[,c(1,3,7:8,11,13)]
all <-threestage_results_late$maf_freq_in_Late >=1
threestage_results_late <- threestage_results_late[all,]
threestage_results_late <- threestage_results_late[order(-si_2),]
colnames(threestage_results_late)[2] <- "si"
threestage_results_late$progression <- rep("Late", length(threestage_results_late$variant_name))
threestage_results_met <- threestage_results[,c(1,4,9:10,11,14)]
all <-threestage_results_met$maf_freq_in_Metastasis >=1
threestage_results_met <- threestage_results_met[all,]
threestage_results_met <- threestage_results_met[order(-si_3),]
colnames(threestage_results_met)[2] <- "si"
threestage_results_met$progression <- rep("Metastasis", length(threestage_results_met$variant_name))
#Extracting recurrent variants
recurrent <- threestage_results_early$maf_freq_in_Early > 1
threestage_results_early_recur <- threestage_results_early[recurrent,]
recurrent <- threestage_results_late$maf_freq_in_Late > 1
threestage_results_late_recur <- threestage_results_late[recurrent,]
recurrent <- threestage_results_met$maf_freq_in_Metastasis > 1
threestage_results_met_recur <- threestage_results_met[recurrent,]
##########################################
#Summary function
summary_gene <- function(data) {
data_clean <- data %>%
arrange(desc(si)) %>%
filter(si > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(si),
mean_si = mean(si),
median_si = median(si),
sd = sd(si),
max_si = max(si),
n_variant = n_distinct(variant_name)) %>%
filter(n_variant > 1)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
#Used to find genes that have at least one recurrent variants
summary_gene_recur <- function(data) {
data_clean <- data %>%
arrange(desc(si)) %>%
filter(si > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(si), # change sum to mean and sd
mean_si = mean(si),
median_si = median(si),
sd = sd(si),
max_si = max(si),
n_variant = n_distinct(variant_name)) %>%
filter(n_variant > 0)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
###############################################################################################
#Gene level SI
################################################################################################
early_data <- data.frame(variant_name = threestage_results_early$variant_name,
gene = threestage_results_early$gene,
si = threestage_results_early$si)
late_data <- data.frame(variant_name = threestage_results_late$variant_name,
gene = threestage_results_late$gene,
si = threestage_results_late$si)
met_data <- data.frame(variant_name = threestage_results_met$variant_name,
gene = threestage_results_met$gene,
si = threestage_results_met$si)
early_info <- summary_gene(early_data)
late_info <- summary_gene(late_data)
met_info <- summary_gene(met_data)
early_data_recur <- data.frame(variant_name = threestage_results_early_recur$variant_name,
gene = threestage_results_early_recur$gene,
si = threestage_results_early_recur$si)
late_data_recur <- data.frame(variant_name = threestage_results_late_recur$variant_name,
gene = threestage_results_late_recur$gene,
si = threestage_results_late_recur$si)
met_data_recur <- data.frame(variant_name = threestage_results_met_recur$variant_name,
gene = threestage_results_met_recur$gene,
si = threestage_results_met_recur$si)
early_info_recur <- summary_gene_recur(early_data_recur)
late_info_recur <- summary_gene_recur(late_data_recur)
met_info_recur <- summary_gene_recur(met_data_recur)
#Filtering out all genes that have NO recurrent variants,
#aka filtering in genes that have at least ONE recurrent variant
early_info <- early_info[which(early_info$gene %in% early_info_recur$gene),]
late_info <- late_info[which(late_info$gene %in% late_info_recur$gene),]
met_info <- met_info[which(met_info$gene %in% met_info_recur$gene),]
fill_na <- function(x, fill = 0) {
x = ifelse(is.na(x), fill, x)
return(x)
}
prim_info <- merge(early_info, late_info, by = "gene", all = T,
suffixes = c(".e", ".l")) %>%
mutate_at(c("cum_si.e", "cum_si.l",
"mean_si.e", "mean_si.l",
"sd.e", "sd.l",
"n_variant.e", "n_variant.l"), fill_na) %>%
mutate(n_variant_prim = n_variant.e + n_variant.l,
mean_si_prim = (cum_si.e + cum_si.l) / n_variant_prim) %>%
arrange(desc(n_variant_prim))
colnames(met_info) <- paste(colnames(met_info), ".m", sep = "")
colnames(met_info)[1] <- "gene"
stage_merge <- merge(prim_info, met_info, by = "gene", all = T) %>%
mutate_at(c("cum_si.e", "cum_si.l", "cum_si.m",
"mean_si.e", "mean_si.l", "mean_si.m",
"sd.e", "sd.l", "sd.m",
"n_variant.e", "n_variant.l", "n_variant.m"), fill_na) %>%
mutate(n_variant_total = n_variant.e + n_variant.l + n_variant.m,
mean_si_total = (cum_si.e + cum_si.l + cum_si.m) / n_variant_total) %>%
arrange(desc(n_variant_total))
########################################################################################
# Early
stage_merge_early_ordered <- stage_merge[order(-stage_merge$mean_si.e),]
selected_early_genes <- stage_merge_early_ordered$gene[1:10]
#select all variants within gene list
early_list.e <- threestage_results_early %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
early_list.l <- threestage_results_late %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
early_list.m <- threestage_results_met %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,7,6,5)
colnames(early_list.e)[4] <- "maf_freq"
colnames(early_list.l)[4] <- "maf_freq"
colnames(early_list.m)[4] <- "maf_freq"
early_list <- rbind(early_list.e, early_list.l)
early_list <- rbind(early_list, early_list.m)
#set order of genes for plot
early_list$gene <- early_list$gene %>%
factor(levels = selected_early_genes)
early_jitter <- ggplot(early_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(early_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-0.8e4, 1e5))
early_jitter
########################################################################################
# Late
stage_merge_late_ordered <- stage_merge[order(-stage_merge$mean_si.l),]
selected_late_genes <- stage_merge_late_ordered$gene[1:10]
#select all variants within gene list
late_list.e <- threestage_results_early %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
late_list.l <- threestage_results_late %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
late_list.m <- threestage_results_met %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,7,6,5)
colnames(late_list.e)[4] <- "maf_freq"
colnames(late_list.l)[4] <- "maf_freq"
colnames(late_list.m)[4] <- "maf_freq"
late_list <- rbind(late_list.e, late_list.l)
late_list <- rbind(late_list, late_list.m)
#set order of genes for plot
late_list$gene <- late_list$gene %>%
factor(levels = selected_late_genes)
#Dummy points
dummy_OR4N4.e <- list("OR4N4 Variant.e", as.double(0.001), "Early", "1", "OR4N4")
dummy_CHRNA6.e <- list("CHRNA6 Variant.e", as.double(0.001), "Early", "1", "CHRNA6")
late_list <- late_list %>%
rbind(dummy_OR4N4.e) %>%
rbind(dummy_CHRNA6.e)
library(ggplot2)
late_jitter<- ggplot(late_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(late_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-.24e4, 3e4), breaks = c(0, 1e4, 1.5e4, 2e4, 3e4))
late_jitter
########################################################################################
# Metastasis
stage_merge_met_ordered <- stage_merge[order(-stage_merge$mean_si.m),]
selected_met_genes <- stage_merge_met_ordered$gene[1:10]
#select all variants within gene list
met_list.e <- threestage_results_early %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
met_list.l <- threestage_results_late %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
met_list.m <- threestage_results_met %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,7,6,5)
colnames(met_list.e)[4] <- "maf_freq"
colnames(met_list.l)[4] <- "maf_freq"
colnames(met_list.m)[4] <- "maf_freq"
met_list <- rbind(met_list.e, met_list.l)
met_list <- rbind(met_list, met_list.m)
#set order of genes for plot
met_list$gene <- met_list$gene %>%
factor(levels = selected_met_genes)
#Dummy points
dummy_ORC3.e <- list("ORC3 Variant.e", as.double(0.001), "Early", "1", "ORC3")
dummy_ORC3.l <- list("ORC3 Variant.l", as.double(0.001), "Late", "1", "ORC3")
dummy_ZNF780B.l <- list("ZNF780B Variant.l", as.double(0.001), "Late", "1", "ZNF780B")
dummy_DIMT1.e <- list("DIMT1 Variant.e", as.double(0.001), "Early", "1", "DIMT1")
dummy_DIMT1.l <- list("DIMT1 Variant.l", as.double(0.001), "Late", "1", "DIMT1")
dummy_KRTAP13_3.l <- list("KRTAP13-3 Variant.l", as.double(0.001), "Late", "1", "KRTAP13-3")
dummy_ZNF714.e <- list("ZNF714 Variant.e", as.double(0.001), "Early", "1", "ZNF714")
dummy_GRB7.e <- list("GRB7 Variant.e", as.double(0.001), "Early", "1", "GRB7")
dummy_APCS.e <- list("APCS Variant.e", as.double(0.001), "Early", "1", "APCS")
met_list <- met_list %>%
rbind(dummy_ORC3.e) %>%
rbind(dummy_ORC3.l) %>%
rbind(dummy_ZNF780B.l)%>%
rbind(dummy_DIMT1.e)%>%
rbind(dummy_DIMT1.l)%>%
rbind(dummy_KRTAP13_3.l)%>%
rbind(dummy_ZNF714.e)%>%
rbind(dummy_GRB7.e)%>%
rbind(dummy_APCS.e)
library(ggplot2)
met_jitter <- ggplot(met_list, aes(x=gene, y=si, color=progression)) +
geom_point(position = position_jitterdodge(jitter.width = 0.1))+
xlab("Gene or protein") + ylab("Scaled selection coefficient") + theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
panel.border = element_blank(),
legend.position = c(0.95, 0.85))+
scale_color_discrete(name = "Stage", labels = c("Lower-risk", "Higher-risk", "Metastasis")) +
geom_vline(xintercept=seq(1.5, length(unique(met_list$gene))-0.5, 1),
lwd=.5, colour="lightgrey") +
scale_y_continuous(labels=scientific, limits = c(-0.9e4, 1.051e5))
met_jitter
##############################################################################
#Wilcoxon test
early.e <- list()
early.l <- list()
early.m <- list()
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
early.e <- c(early.e, list(values))
}
names(early.e) <- unique(early_list$gene)
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
early.l <- c(early.l, list(values))
}
names(early.l) <- unique(early_list$gene)
for (x in unique(early_list$gene)) {
values <- early_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
early.m <- c(early.m, list(values))
}
names(early.m) <- unique(early_list$gene)
late.e <- list()
late.l <- list()
late.m <- list()
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
late.e <- c(late.e, list(values))
}
names(late.e) <- unique(late_list$gene)
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
late.l <- c(late.l, list(values))
}
names(late.l) <- unique(late_list$gene)
for (x in unique(late_list$gene)) {
values <- late_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
late.m <- c(late.m, list(values))
}
names(late.m) <- unique(late_list$gene)
met.e <- list()
met.l <- list()
met.m <- list()
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Early") %>%
pull(si)
met.e <- c(met.e, list(values))
}
names(met.e) <- unique(met_list$gene)
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Late") %>%
pull(si)
met.l <- c(met.l, list(values))
}
names(met.l) <- unique(met_list$gene)
for (x in unique(met_list$gene)) {
values <- met_list %>%
filter(gene == x) %>%
filter(progression == "Metastasis") %>%
pull(si)
met.m <- c(met.m, list(values))
}
names(met.m) <- unique(met_list$gene)
#Tests for Lower-risk
wilcox_early.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.e[[x]], early.l[[x]])
wilcox_early.e_l <- c(wilcox_early.e_l, wilcox$p.value)
}
wilcox_early.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.e[[x]], early.m[[x]])
wilcox_early.e_m <- c(wilcox_early.e_m, wilcox$p.value)
}
wilcox_early.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(early.l[[x]], early.m[[x]])
wilcox_early.l_m <- c(wilcox_early.l_m, wilcox$p.value)
}
early.wilcox <- data.frame(early_late = wilcox_early.e_l,
early_met = wilcox_early.e_m,
late_met = wilcox_early.l_m)
row.names(early.wilcox) <- unique(early_list$gene)
#Tests for Higher-risk
wilcox_late.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.e[[x]], late.l[[x]])
wilcox_late.e_l <- c(wilcox_late.e_l, wilcox$p.value)
}
wilcox_late.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.e[[x]], late.m[[x]])
wilcox_late.e_m <- c(wilcox_late.e_m, wilcox$p.value)
}
wilcox_late.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(late.l[[x]], late.m[[x]])
wilcox_late.l_m <- c(wilcox_late.l_m, wilcox$p.value)
}
late.wilcox <- data.frame(early_late = wilcox_late.e_l,
early_met = wilcox_late.e_m,
late_met = wilcox_late.l_m)
row.names(late.wilcox) <- unique(late_list$gene)
#Tests for Metastasis
wilcox_met.e_l <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.e[[x]], met.l[[x]])
wilcox_met.e_l <- c(wilcox_met.e_l, wilcox$p.value)
}
wilcox_met.e_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.e[[x]], met.m[[x]])
wilcox_met.e_m <- c(wilcox_met.e_m, wilcox$p.value)
}
wilcox_met.l_m <- c()
for (x in 1:10){
wilcox <- wilcox.test(met.l[[x]], met.m[[x]])
wilcox_met.l_m <- c(wilcox_met.l_m, wilcox$p.value)
}
met.wilcox <- data.frame(early_late = wilcox_met.e_l,
early_met = wilcox_met.e_m,
late_met = wilcox_met.l_m)
row.names(met.wilcox) <- unique(met_list$gene)
# write.table(early.wilcox, file = "wilcox_early.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
# write.table(late.wilcox, file = "wilcox_late.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
# write.table(met.wilcox, file = "wilcox_met.txt", sep = "\t",
# row.names = TRUE, col.names = TRUE, quote = FALSE)
###########################################################################
#Extracting data for Bayesian approach
#Extracting SI + 95% CI for each variant in each of the featured top 10 genes
#Early
early_list.e <- threestage_results_early %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
early_list.l <- threestage_results_late %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
early_list.m <- threestage_results_met %>%
filter(gene %in% selected_early_genes) %>%
select(1,2,3,4,7,6,5)
colnames(early_list.e)[3] <- "ci_low_95"
colnames(early_list.l)[3] <- "ci_low_95"
colnames(early_list.m)[3] <- "ci_low_95"
colnames(early_list.e)[4] <- "ci_high_95"
colnames(early_list.l)[4] <- "ci_high_95"
colnames(early_list.m)[4] <- "ci_high_95"
colnames(early_list.e)[6] <- "maf_freq"
colnames(early_list.l)[6] <- "maf_freq"
colnames(early_list.m)[6] <- "maf_freq"
early_list <- rbind(early_list.e, early_list.l)
early_list <- rbind(early_list, early_list.m)
#Late
late_list.e <- threestage_results_early %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
late_list.l <- threestage_results_late %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
late_list.m <- threestage_results_met %>%
filter(gene %in% selected_late_genes) %>%
select(1,2,3,4,7,6,5)
colnames(late_list.e)[3] <- "ci_low_95"
colnames(late_list.l)[3] <- "ci_low_95"
colnames(late_list.m)[3] <- "ci_low_95"
colnames(late_list.e)[4] <- "ci_high_95"
colnames(late_list.l)[4] <- "ci_high_95"
colnames(late_list.m)[4] <- "ci_high_95"
colnames(late_list.e)[6] <- "maf_freq"
colnames(late_list.l)[6] <- "maf_freq"
colnames(late_list.m)[6] <- "maf_freq"
late_list <- rbind(late_list.e, late_list.l)
late_list <- rbind(late_list, late_list.m)
#Metastasis
met_list.e <- threestage_results_early %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
met_list.l <- threestage_results_late %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
met_list.m <- threestage_results_met %>%
filter(gene %in% selected_met_genes) %>%
select(1,2,3,4,7,6,5)
colnames(met_list.e)[3] <- "ci_low_95"
colnames(met_list.l)[3] <- "ci_low_95"
colnames(met_list.m)[3] <- "ci_low_95"
colnames(met_list.e)[4] <- "ci_high_95"
colnames(met_list.l)[4] <- "ci_high_95"
colnames(met_list.m)[4] <- "ci_high_95"
colnames(met_list.e)[6] <- "maf_freq"
colnames(met_list.l)[6] <- "maf_freq"
colnames(met_list.m)[6] <- "maf_freq"
met_list <- rbind(met_list.e, met_list.l)
met_list <- rbind(met_list, met_list.m)
############################################################################
write.table(early_list, file = "lower-risk_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
write.table(late_list, file = "higher-risk_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
write.table(met_list, file = "metastatic_gene_variants.txt", sep = "\t",
row.names = FALSE, col.names = TRUE, quote = FALSE)
##############################################################################
# Variants
early_top10 <- threestage_results_early_recur[1:10,]
late_top10 <- threestage_results_late_recur[1:10,]
met_top10 <- threestage_results_met_recur[1:10,]
unique_jitter_early_all <- ggplot(data = early_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#F8766D") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-0.8e4, 1e5))
unique_jitter_late_all<- ggplot(data = late_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#00BA38") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-.24e4, 3e4), breaks = c(0, 1e4, 1.5e4, 2e4, 3.0e4))
unique_jitter_met_all<- ggplot(data = met_top10, aes(x = 1, y = si, color=progression)) +
geom_jitter(position=position_jitter(0.0, seed = 5))+
theme(axis.ticks.x = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(color = "black"),
legend.position="none")+
xlab("Top 10 variants") + ylab("") +
scale_color_manual(values = "#619CFF") +
scale_x_continuous(limits = c(0.975, 1.25)) +
scale_y_continuous(labels=scientific, limits = c(-0.9e4, 1.051e5))
#########################################
early_title <- ggdraw() +
draw_label(
"Lower-risk",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
late_title <- ggdraw() +
draw_label(
"Higher-risk",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
met_title <- ggdraw() +
draw_label(
"Metastases",
fontface = 'bold',
x = 0,
hjust = 0
) +
theme(
# add margin on the left of the drawing canvas,
# so title is aligned with left edge of first plot
plot.margin = margin(0, 0, 0, 49)
)
#######################################
#Combining all the graphs + titles
early_combined <- plot_grid(early_jitter, unique_jitter_early_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("A", "B"), label_size = 10)
early_combined_title <- plot_grid(early_title, early_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
late_combined <- plot_grid(late_jitter, unique_jitter_late_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("C", "D"), label_size = 10)
late_combined_title <- plot_grid(late_title, late_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
met_combined <- plot_grid(met_jitter, unique_jitter_met_all,
align = "h", axis = "t", nrow = 1, ncol = 2, scale = 1, rel_widths = c(5,1),
labels = c("E", "F"), label_size = 10)
met_combined_title <- plot_grid(met_title, met_combined,
align = "h", axis = "t", nrow = 2, ncol = 1, scale = 1, rel_heights = c(0.1,1))
bar_box_combined <- plot_grid(early_combined_title, late_combined_title, met_combined_title,
align = "h", axis = "t", nrow = 3, ncol = 1, scale = 1)
bar_box_combined
ggsave("PRAD_figures/bar_jitter_test.png", width = 12.5, height = 12.5)
|
context("map_layers")
test_that("layerId includes all layers", {
layers <- c(
"animated_arc"
, "animated_line"
, "arc"
, "bitmap"
, "column"
, "geojson"
, "greatcircle"
, "grid"
, "h3"
, "heatmap"
, "hexagon"
, "line"
, "mesh"
, "path"
, "pointcloud"
, "polygon"
, "scatterplot"
, "screengrid"
, "terrain"
, "text"
, "tile3d"
, "title"
, "trips"
)
# expect_equal( layers, mapdeck:::mapdeck_layers() )
res <- sapply( layers, function(x) { mapdeck:::layerId( layer_id = "test", layer = x) })
expect_equal( layers, names( res ) )
f <- list.files(path = system.file("./R", package = "mapdeck") )
f <- f[ grepl("map_layer_*", f) ]
f <- gsub("map_layer_", "", f)
f <- gsub("\\.R","",f)
f <- sort( f )
f <- setdiff(f, c("parameter_checks","sf"))
expect_equal( f, sort( layers ) )
res <- sapply( layers, function(x) { mapdeck:::layerId( layer_id = "test", layer = x) })
expect_equal( layers, names( res ) )
})
# test_that("layer_ids required", {
#
# df <- data.frame(
# origin = 1
# , destination = 2
# , lat_from = 1
# , lon_from = 1
# , lat_to = 2
# , lon_to = 2
# , polyline = 'a'
# )
# m <- mapdeck(token = 'abc')
# layer_error <- 'argument "layer_id" is missing, with no default'
#
# expect_error(
# add_arc(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'))
# , layer_error
# )
# expect_error(
# add_geojson(m, data = df)
# , layer_error
# )
# expect_error(
# add_grid(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_line(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'))
# , layer_error
# )
# expect_error(
# add_path(m, data = df, polyline = 'polyline')
# , layer_error
# )
# expect_error(
# add_pointcloud(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_polygon(m, data = df, polyline = 'polyline')
# , layer_error
# )
# expect_error(
# add_scatterplot(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_screengrid(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_text(m, data = df, text = 'polylne', polyline = 'polyline')
# , layer_error
# )
# })
test_that("layers are plotted", {
df <- data.frame(
origin = 1
, destination = 2
, lat_from = 1
, lon_from = 1
, lat_to = 2
, lon_to = 2
, polyline = 'a'
)
m <- mapdeck(token = 'abc')
layer_id <- 'layer'
layer <- add_arc(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'), layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_geojson(m, data = geojson, layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_grid(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_line(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'), layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_path(m, data = df, polyline = 'polyline', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_pointcloud(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_polygon(m, data = df, polyline = 'polyline', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_scatterplot(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_screengrid(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_text(m, data = df, text = 'polyline', polyline = 'polyline', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
})
| /tests/testthat/test-map_layers.R | no_license | SymbolixAU/mapdeck | R | false | false | 4,222 | r | context("map_layers")
test_that("layerId includes all layers", {
layers <- c(
"animated_arc"
, "animated_line"
, "arc"
, "bitmap"
, "column"
, "geojson"
, "greatcircle"
, "grid"
, "h3"
, "heatmap"
, "hexagon"
, "line"
, "mesh"
, "path"
, "pointcloud"
, "polygon"
, "scatterplot"
, "screengrid"
, "terrain"
, "text"
, "tile3d"
, "title"
, "trips"
)
# expect_equal( layers, mapdeck:::mapdeck_layers() )
res <- sapply( layers, function(x) { mapdeck:::layerId( layer_id = "test", layer = x) })
expect_equal( layers, names( res ) )
f <- list.files(path = system.file("./R", package = "mapdeck") )
f <- f[ grepl("map_layer_*", f) ]
f <- gsub("map_layer_", "", f)
f <- gsub("\\.R","",f)
f <- sort( f )
f <- setdiff(f, c("parameter_checks","sf"))
expect_equal( f, sort( layers ) )
res <- sapply( layers, function(x) { mapdeck:::layerId( layer_id = "test", layer = x) })
expect_equal( layers, names( res ) )
})
# test_that("layer_ids required", {
#
# df <- data.frame(
# origin = 1
# , destination = 2
# , lat_from = 1
# , lon_from = 1
# , lat_to = 2
# , lon_to = 2
# , polyline = 'a'
# )
# m <- mapdeck(token = 'abc')
# layer_error <- 'argument "layer_id" is missing, with no default'
#
# expect_error(
# add_arc(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'))
# , layer_error
# )
# expect_error(
# add_geojson(m, data = df)
# , layer_error
# )
# expect_error(
# add_grid(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_line(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'))
# , layer_error
# )
# expect_error(
# add_path(m, data = df, polyline = 'polyline')
# , layer_error
# )
# expect_error(
# add_pointcloud(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_polygon(m, data = df, polyline = 'polyline')
# , layer_error
# )
# expect_error(
# add_scatterplot(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_screengrid(m, data = df, lon = 'lon_from', lat = 'lat_from')
# , layer_error
# )
# expect_error(
# add_text(m, data = df, text = 'polylne', polyline = 'polyline')
# , layer_error
# )
# })
test_that("layers are plotted", {
df <- data.frame(
origin = 1
, destination = 2
, lat_from = 1
, lon_from = 1
, lat_to = 2
, lon_to = 2
, polyline = 'a'
)
m <- mapdeck(token = 'abc')
layer_id <- 'layer'
layer <- add_arc(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'), layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_geojson(m, data = geojson, layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_grid(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_line(m, data = df, origin = c('lon_from', 'lat_from'), destination = c('lon_to', 'lat_to'), layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_path(m, data = df, polyline = 'polyline', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_pointcloud(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_polygon(m, data = df, polyline = 'polyline', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_scatterplot(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_screengrid(m, data = df, lon = 'lon_from', lat = 'lat_from', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
layer <- add_text(m, data = df, text = 'polyline', polyline = 'polyline', layer_id = layer_id)
expect_true(all(attr(layer, 'class') == c("mapdeck","htmlwidget")))
})
|
#####
# PACKAGES
#####
library(rgdal)
library(readr)
library(readxl)
library(ggplot2)
library(dplyr)
#####
# DIRECTORIES
#####
#wd <- getwd() # Be in the mortality_map/code directory -- change only if needed
#####
# LOAD CHEN'S DATA
#####
df <- read_excel('../data/dataofcountries.xls')
#####
# LOAD IN THE WORLD SHAPEFILE
#####
world <- readOGR('../data/countries_shape_file/', 'world_borders')
#####
# EXPLORE THE SHAPEFILE
#####
# Examine the format
head(world@data)
# Look at distribution of one variable
hist(world@data$POP_CNTRY, breaks = 30, col = 'grey', border = 'white',
xlab = 'Population', main = 'Distribution of countries\' population')
# Make a simple map
plot(world, col = 'grey', border = 'white')
# Make a new variable in world: population density
# this will be each countries' population divided by its area
world@data$POP_DEN <- world@data$POP_CNTRY / world@data$AREA
# Examine the distribution of population densities (this time using ggplot instead of base graphics)
ggplot(data = world@data, aes(x = POP_DEN)) +
geom_density(fill = 'blue', alpha = 0.5) +
theme_bw() +
xlab('People per square kilometer') +
ggtitle('Distribution of population densities') +
scale_x_log10() # put on log 10 scale
# Make a choropleth in which each country is shaded by the square root of its population density
world@data$POP_DEN_SQRT <- ceiling(sqrt(world@data$POP_DEN))
colors <- colorRampPalette(c('red', 'yellow'))(max(world@data$POP_DEN_SQRT)) %>%
adjustcolor(alpha.f = 0.9)
plot(world, col = colors, border = NA, main = 'Population density')
legend('bottomleft',
fill = colors[c(1, length(colors)/2, length(colors))],
legend = rev(round(c(1, length(colors)/2, length(colors)) ^ 2, digits = -2)),
title = 'Inhabitants per square kilometer',
bty = 'n',
cex = 0.5,
border = NA)
#####
# MERGE THE MORTALITY AND LIFE EXPECTANCY DATA (df) WITH THE WORLD SHAPEFILE
#####
# We will join on the 'Country' column in df
# and the 'CNTRY_NAME' column in world
# However, since these are unlikely to be identically named
# (and we don't want to take the time to manually change them)
# we'll write a "fuzzy matching" algorithm, which will
# create a column in df with the 'CNTRY_NAME' from world@data
# which is most similar to that row's 'Country' column
fuzzy_match <- function(x = world@data$CNTRY_NAME,
y = df$Country){
# Create matrix of "string distance" between names
distances <- adist(x = x, y = y)
# For each row, get the index of the lowest (best-matching) column
best_matches <-
apply(distances, 1, function(z){
possibs <- which.min(z)
return(possibs[1])
})
# Return the actual names of the best matches
names <- y[best_matches]
distance <- apply(distances, 1, min)
return_object <- list('name' = names, 'distance' = distance)
return(return_object)
}
# Now, use our algorithm to populate a new column in "world"
# with the names from "df" (so that we can correctly merge)
results <- fuzzy_match()
world@data$Country <- results$name
# We can also populate a column which shows us the "string distance"
# (ie, how closely matched the terms were)
# high numbers are bad, low numbers are good. 0 is a perfect match
world@data$accuracy <- results$distance
# Now let's take another look at our world shapefile
head(world@data)
# And examine the accuracy results
hist(world@data[!duplicated(world@data$FIPS_CNTRY),]$accuracy, breaks = 30,
xlab = 'Character transformation',
main = 'Accuracy (0 = perfect)',
col = 'grey',
border = 'white')
# So, our algorithm was pretty good, but there are sill a few countries with
# pretty bad results. For example, let's just look at those
# countries who took more than 10 character transformations
world@data[which(world@data$accuracy >= 10 & !duplicated(world@data$FIPS_CNTRY)),
c('CNTRY_NAME', 'Country')]
# Clearly, the above isn't good!
# However, the algorithm did pretty good on most
world@data[which(world@data$accuracy <=2 & !duplicated(world@data$FIPS_CNTRY)),
c('CNTRY_NAME', 'Country')][1:10,]
# For now, we'll use the algorithm. If you want a perfect match,
# you'll need to go through your spreadsheet (dataofcountries.xls)
# and rename "Country" so that each name is one of the
# elements of the "correct_countries" vector
correct_countries <- unique(sort(world@data$CNTRY_NAME))
# (I've printed the entire vector at the end of this document)
# and then re-run the code
#####
# MERGE
#####
# Anyway, now that MOST of the data is matched, we'll merge
world@data <- left_join(x = world@data, y = df)
# Now we can see that we've brough the data in from df into world@data
head(world@data)
# Since the algorithm wasn't able to match ALL the countries,
# we're only going to keep those perfect accuracy
world_accurate <- world[which(world@data$accuracy == 0),]
# Let's see which countries we were able to keep
plot(world)
plot(world_accurate, col = 'red', add = TRUE)
legend('left',
fill = c('white', 'red'),
legend = c('Inaccurate match', 'Accurate match'),
cex = 0.7)
#####
# PLOT
#####
# Let's define a simple function for plotting a choropleth
choro <- function(shape = world_accurate,
background = world,
colors = c('red', 'yellow', 'blue'),
variable = 'MR2000_100000population'){
variables <- c('MR2000_100000population', 'MR2012_100000population',
'LE2000_year', 'LE2012_year')
if(!variable %in% variables){
stop(paste0('variable must be one of ', paste0(variables, collapse = ', ')))
}
plot(background, col = 'grey', border = NA)
# Get title information
tit <- paste0(
ifelse(substr(variable, 1,2) == 'MR', 'Morality', 'Life expectancy'),
' in ',
substr(variable, 3,6)
)
# Create a var for plotting
var <- ceiling(shape@data[,variable])
cols <- colorRampPalette(colors)(max(var, na.rm = TRUE))
# (reverse color scheme if mortality)
if(substr(variable, 1, 2) == 'MR'){
cols <- rev(cols)
}
var_cols <- cols[var]
# Plot
plot(shape, col = var_cols, add = TRUE, border = NA)
title(main = tit)
# Legend
legend_vec <- 1:length(cols)
legend_quant <- round(as.numeric(quantile(legend_vec, na.rm = TRUE)))
legend('left',
fill = c(cols[legend_quant], 'grey'),
legend = c(legend_quant, 'No data'),
cex = 0.7,
border = NA)
}
# Now let's plot all of our variables
variables <- c('MR2000_100000population', 'MR2012_100000population',
'LE2000_year', 'LE2012_year')
choro(variable = variables[1])
choro(variable = variables[2])
choro(variable = variables[3])
choro(variable = variables[4])
# Alternatively, we could plot all the countries (even those we know weren't correctly matched)
choro(variable = variables[1], shape = world)
choro(variable = variables[2], shape = world)
choro(variable = variables[3], shape = world)
choro(variable = variables[4], shape = world)
#####
# NEXT STEPS
#####
# In order to get the matches perfect,
# go through the spreadsheet with your data
# and rename all the countries to one of the following:
print(correct_countries)
| /chen/mortality_map/code/make_maps.R | no_license | joebrew/uf | R | false | false | 7,225 | r | #####
# PACKAGES
#####
library(rgdal)
library(readr)
library(readxl)
library(ggplot2)
library(dplyr)
#####
# DIRECTORIES
#####
#wd <- getwd() # Be in the mortality_map/code directory -- change only if needed
#####
# LOAD CHEN'S DATA
#####
df <- read_excel('../data/dataofcountries.xls')
#####
# LOAD IN THE WORLD SHAPEFILE
#####
world <- readOGR('../data/countries_shape_file/', 'world_borders')
#####
# EXPLORE THE SHAPEFILE
#####
# Examine the format
head(world@data)
# Look at distribution of one variable
hist(world@data$POP_CNTRY, breaks = 30, col = 'grey', border = 'white',
xlab = 'Population', main = 'Distribution of countries\' population')
# Make a simple map
plot(world, col = 'grey', border = 'white')
# Make a new variable in world: population density
# this will be each countries' population divided by its area
world@data$POP_DEN <- world@data$POP_CNTRY / world@data$AREA
# Examine the distribution of population densities (this time using ggplot instead of base graphics)
ggplot(data = world@data, aes(x = POP_DEN)) +
geom_density(fill = 'blue', alpha = 0.5) +
theme_bw() +
xlab('People per square kilometer') +
ggtitle('Distribution of population densities') +
scale_x_log10() # put on log 10 scale
# Make a choropleth in which each country is shaded by the square root of its population density
world@data$POP_DEN_SQRT <- ceiling(sqrt(world@data$POP_DEN))
colors <- colorRampPalette(c('red', 'yellow'))(max(world@data$POP_DEN_SQRT)) %>%
adjustcolor(alpha.f = 0.9)
plot(world, col = colors, border = NA, main = 'Population density')
legend('bottomleft',
fill = colors[c(1, length(colors)/2, length(colors))],
legend = rev(round(c(1, length(colors)/2, length(colors)) ^ 2, digits = -2)),
title = 'Inhabitants per square kilometer',
bty = 'n',
cex = 0.5,
border = NA)
#####
# MERGE THE MORTALITY AND LIFE EXPECTANCY DATA (df) WITH THE WORLD SHAPEFILE
#####
# We will join on the 'Country' column in df
# and the 'CNTRY_NAME' column in world
# However, since these are unlikely to be identically named
# (and we don't want to take the time to manually change them)
# we'll write a "fuzzy matching" algorithm, which will
# create a column in df with the 'CNTRY_NAME' from world@data
# which is most similar to that row's 'Country' column
fuzzy_match <- function(x = world@data$CNTRY_NAME,
y = df$Country){
# Create matrix of "string distance" between names
distances <- adist(x = x, y = y)
# For each row, get the index of the lowest (best-matching) column
best_matches <-
apply(distances, 1, function(z){
possibs <- which.min(z)
return(possibs[1])
})
# Return the actual names of the best matches
names <- y[best_matches]
distance <- apply(distances, 1, min)
return_object <- list('name' = names, 'distance' = distance)
return(return_object)
}
# Now, use our algorithm to populate a new column in "world"
# with the names from "df" (so that we can correctly merge)
results <- fuzzy_match()
world@data$Country <- results$name
# We can also populate a column which shows us the "string distance"
# (ie, how closely matched the terms were)
# high numbers are bad, low numbers are good. 0 is a perfect match
world@data$accuracy <- results$distance
# Now let's take another look at our world shapefile
head(world@data)
# And examine the accuracy results
hist(world@data[!duplicated(world@data$FIPS_CNTRY),]$accuracy, breaks = 30,
xlab = 'Character transformation',
main = 'Accuracy (0 = perfect)',
col = 'grey',
border = 'white')
# So, our algorithm was pretty good, but there are sill a few countries with
# pretty bad results. For example, let's just look at those
# countries who took more than 10 character transformations
world@data[which(world@data$accuracy >= 10 & !duplicated(world@data$FIPS_CNTRY)),
c('CNTRY_NAME', 'Country')]
# Clearly, the above isn't good!
# However, the algorithm did pretty good on most
world@data[which(world@data$accuracy <=2 & !duplicated(world@data$FIPS_CNTRY)),
c('CNTRY_NAME', 'Country')][1:10,]
# For now, we'll use the algorithm. If you want a perfect match,
# you'll need to go through your spreadsheet (dataofcountries.xls)
# and rename "Country" so that each name is one of the
# elements of the "correct_countries" vector
correct_countries <- unique(sort(world@data$CNTRY_NAME))
# (I've printed the entire vector at the end of this document)
# and then re-run the code
#####
# MERGE
#####
# Anyway, now that MOST of the data is matched, we'll merge
world@data <- left_join(x = world@data, y = df)
# Now we can see that we've brough the data in from df into world@data
head(world@data)
# Since the algorithm wasn't able to match ALL the countries,
# we're only going to keep those perfect accuracy
world_accurate <- world[which(world@data$accuracy == 0),]
# Let's see which countries we were able to keep
plot(world)
plot(world_accurate, col = 'red', add = TRUE)
legend('left',
fill = c('white', 'red'),
legend = c('Inaccurate match', 'Accurate match'),
cex = 0.7)
#####
# PLOT
#####
# Let's define a simple function for plotting a choropleth
choro <- function(shape = world_accurate,
background = world,
colors = c('red', 'yellow', 'blue'),
variable = 'MR2000_100000population'){
variables <- c('MR2000_100000population', 'MR2012_100000population',
'LE2000_year', 'LE2012_year')
if(!variable %in% variables){
stop(paste0('variable must be one of ', paste0(variables, collapse = ', ')))
}
plot(background, col = 'grey', border = NA)
# Get title information
tit <- paste0(
ifelse(substr(variable, 1,2) == 'MR', 'Morality', 'Life expectancy'),
' in ',
substr(variable, 3,6)
)
# Create a var for plotting
var <- ceiling(shape@data[,variable])
cols <- colorRampPalette(colors)(max(var, na.rm = TRUE))
# (reverse color scheme if mortality)
if(substr(variable, 1, 2) == 'MR'){
cols <- rev(cols)
}
var_cols <- cols[var]
# Plot
plot(shape, col = var_cols, add = TRUE, border = NA)
title(main = tit)
# Legend
legend_vec <- 1:length(cols)
legend_quant <- round(as.numeric(quantile(legend_vec, na.rm = TRUE)))
legend('left',
fill = c(cols[legend_quant], 'grey'),
legend = c(legend_quant, 'No data'),
cex = 0.7,
border = NA)
}
# Now let's plot all of our variables
variables <- c('MR2000_100000population', 'MR2012_100000population',
'LE2000_year', 'LE2012_year')
choro(variable = variables[1])
choro(variable = variables[2])
choro(variable = variables[3])
choro(variable = variables[4])
# Alternatively, we could plot all the countries (even those we know weren't correctly matched)
choro(variable = variables[1], shape = world)
choro(variable = variables[2], shape = world)
choro(variable = variables[3], shape = world)
choro(variable = variables[4], shape = world)
#####
# NEXT STEPS
#####
# In order to get the matches perfect,
# go through the spreadsheet with your data
# and rename all the countries to one of the following:
print(correct_countries)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{scripts_post_containers_clone}
\alias{scripts_post_containers_clone}
\title{Clone this Container Script}
\usage{
scripts_post_containers_clone(
id,
clone_schedule = NULL,
clone_triggers = NULL,
clone_notifications = NULL
)
}
\arguments{
\item{id}{integer required. The ID for the script.}
\item{clone_schedule}{boolean optional. If true, also copy the schedule to the new script.}
\item{clone_triggers}{boolean optional. If true, also copy the triggers to the new script.}
\item{clone_notifications}{boolean optional. If true, also copy the notifications to the new script.}
}
\value{
A list containing the following elements:
\item{id}{integer, The ID for the script.}
\item{name}{string, The name of the container.}
\item{type}{string, The type of the script (e.g Container)}
\item{createdAt}{string, The time this script was created.}
\item{updatedAt}{string, The time the script was last updated.}
\item{author}{list, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{state}{string, The status of the script's last run.}
\item{finishedAt}{string, The time that the script's last run finished.}
\item{category}{string, The category of the script.}
\item{projects}{array, An array containing the following fields:
\itemize{
\item id integer, The ID for the project.
\item name string, The name of the project.
}}
\item{parentId}{integer, The ID of the parent job that will trigger this script}
\item{userContext}{string, "runner" or "author", who to execute the script as when run as a template.}
\item{params}{array, An array containing the following fields:
\itemize{
\item name string, The variable's name as used within your code.
\item label string, The label to present to users when asking them for the value.
\item description string, A short sentence or fragment describing this parameter to the end user.
\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom
\item required boolean, Whether this param is required.
\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param.
\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type.
\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}`
}}
\item{arguments}{list, Parameter-value pairs to use when running this script. Only settable if this script has defined parameters.}
\item{isTemplate}{boolean, Whether others scripts use this one as a template.}
\item{templateDependentsCount}{integer, How many other scripts use this one as a template.}
\item{publishedAsTemplateId}{integer, The ID of the template that this script is backing.}
\item{fromTemplateId}{integer, The ID of the template script.}
\item{templateScriptName}{string, The name of the template script.}
\item{links}{list, A list containing the following elements:
\itemize{
\item details string, The details link to get more information about the script.
\item runs string, The runs link to get the run information list for this script.
}}
\item{schedule}{list, A list containing the following elements:
\itemize{
\item scheduled boolean, If the item is scheduled.
\item scheduledDays array, Day based on numeric value starting at 0 for Sunday.
\item scheduledHours array, Hours of the day it is scheduled on.
\item scheduledMinutes array, Minutes of the day it is scheduled on.
\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour.
}}
\item{notifications}{list, A list containing the following elements:
\itemize{
\item urls array, URLs to receive a POST request at job completion
\item successEmailSubject string, Custom subject line for success e-mail.
\item successEmailBody string, Custom body text for success e-mail, written in Markdown.
\item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully.
\item successEmailFromName string, Name from which success emails are sent; defaults to "Civis."
\item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job.
\item failureEmailAddresses array, Addresses to notify by e-mail when the job fails.
\item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes.
\item successOn boolean, If success email notifications are on.
\item failureOn boolean, If failure email notifications are on.
}}
\item{runningAs}{list, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{requiredResources}{list, A list containing the following elements:
\itemize{
\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1024 shares.
\item memory integer, The amount of RAM to allocate for the container (in MiB).
\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported.
}}
\item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.}
\item{repoRef}{string, The tag or branch of the github repo to clone into the container.}
\item{remoteHostCredentialId}{integer, The id of the database credentials to pass into the environment of the container.}
\item{gitCredentialId}{integer, The id of the git credential to be used when checking out the specified git repo. If not supplied, the first git credential you've submitted will be used. Unnecessary if no git repo is specified or the git repo is public.}
\item{dockerCommand}{string, The command to run on the container. Will be run via sh as: ["sh", "-c", dockerCommand]. }
\item{dockerImageName}{string, The name of the docker image to pull from DockerHub.}
\item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.}
\item{instanceType}{string, The EC2 instance type to deploy to. Only available for jobs running on kubernetes.}
\item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.}
\item{lastRun}{list, A list containing the following elements:
\itemize{
\item id integer,
\item state string,
\item createdAt string, The time that the run was queued.
\item startedAt string, The time that the run started.
\item finishedAt string, The time that the run completed.
\item error string, The error message for this run, if present.
}}
\item{timeZone}{string, The time zone of this script.}
\item{hidden}{boolean, The hidden status of the item.}
\item{archived}{string, The archival status of the requested item(s).}
\item{targetProjectId}{integer, Target project to which script outputs will be added.}
}
\description{
Clone this Container Script
}
| /man/scripts_post_containers_clone.Rd | no_license | elsander/civis-r | R | false | true | 7,911 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{scripts_post_containers_clone}
\alias{scripts_post_containers_clone}
\title{Clone this Container Script}
\usage{
scripts_post_containers_clone(
id,
clone_schedule = NULL,
clone_triggers = NULL,
clone_notifications = NULL
)
}
\arguments{
\item{id}{integer required. The ID for the script.}
\item{clone_schedule}{boolean optional. If true, also copy the schedule to the new script.}
\item{clone_triggers}{boolean optional. If true, also copy the triggers to the new script.}
\item{clone_notifications}{boolean optional. If true, also copy the notifications to the new script.}
}
\value{
A list containing the following elements:
\item{id}{integer, The ID for the script.}
\item{name}{string, The name of the container.}
\item{type}{string, The type of the script (e.g Container)}
\item{createdAt}{string, The time this script was created.}
\item{updatedAt}{string, The time the script was last updated.}
\item{author}{list, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{state}{string, The status of the script's last run.}
\item{finishedAt}{string, The time that the script's last run finished.}
\item{category}{string, The category of the script.}
\item{projects}{array, An array containing the following fields:
\itemize{
\item id integer, The ID for the project.
\item name string, The name of the project.
}}
\item{parentId}{integer, The ID of the parent job that will trigger this script}
\item{userContext}{string, "runner" or "author", who to execute the script as when run as a template.}
\item{params}{array, An array containing the following fields:
\itemize{
\item name string, The variable's name as used within your code.
\item label string, The label to present to users when asking them for the value.
\item description string, A short sentence or fragment describing this parameter to the end user.
\item type string, The type of parameter. Valid options: string, multi_line_string, integer, float, bool, file, table, database, credential_aws, credential_redshift, or credential_custom
\item required boolean, Whether this param is required.
\item value string, The value you would like to set this param to. Setting this value makes this parameter a fixed param.
\item default string, If an argument for this parameter is not defined, it will use this default value. Use true, True, t, y, yes, or 1 for true bool's or false, False, f, n, no, or 0 for false bool's. Cannot be used for parameters that are required or a credential type.
\item allowedValues array, The possible values this parameter can take, effectively making this an enumerable parameter. Allowed values is an array of hashes of the following format: `{label: 'Import', 'value': 'import'}`
}}
\item{arguments}{list, Parameter-value pairs to use when running this script. Only settable if this script has defined parameters.}
\item{isTemplate}{boolean, Whether others scripts use this one as a template.}
\item{templateDependentsCount}{integer, How many other scripts use this one as a template.}
\item{publishedAsTemplateId}{integer, The ID of the template that this script is backing.}
\item{fromTemplateId}{integer, The ID of the template script.}
\item{templateScriptName}{string, The name of the template script.}
\item{links}{list, A list containing the following elements:
\itemize{
\item details string, The details link to get more information about the script.
\item runs string, The runs link to get the run information list for this script.
}}
\item{schedule}{list, A list containing the following elements:
\itemize{
\item scheduled boolean, If the item is scheduled.
\item scheduledDays array, Day based on numeric value starting at 0 for Sunday.
\item scheduledHours array, Hours of the day it is scheduled on.
\item scheduledMinutes array, Minutes of the day it is scheduled on.
\item scheduledRunsPerHour integer, Alternative to scheduled minutes, number of times to run per hour.
}}
\item{notifications}{list, A list containing the following elements:
\itemize{
\item urls array, URLs to receive a POST request at job completion
\item successEmailSubject string, Custom subject line for success e-mail.
\item successEmailBody string, Custom body text for success e-mail, written in Markdown.
\item successEmailAddresses array, Addresses to notify by e-mail when the job completes successfully.
\item successEmailFromName string, Name from which success emails are sent; defaults to "Civis."
\item successEmailReplyTo string, Address for replies to success emails; defaults to the author of the job.
\item failureEmailAddresses array, Addresses to notify by e-mail when the job fails.
\item stallWarningMinutes integer, Stall warning emails will be sent after this amount of minutes.
\item successOn boolean, If success email notifications are on.
\item failureOn boolean, If failure email notifications are on.
}}
\item{runningAs}{list, A list containing the following elements:
\itemize{
\item id integer, The ID of this user.
\item name string, This user's name.
\item username string, This user's username.
\item initials string, This user's initials.
\item online boolean, Whether this user is online.
}}
\item{requiredResources}{list, A list containing the following elements:
\itemize{
\item cpu integer, The number of CPU shares to allocate for the container. Each core has 1024 shares.
\item memory integer, The amount of RAM to allocate for the container (in MiB).
\item diskSpace number, The amount of disk space, in GB, to allocate for the container. This space will be used to hold the git repo configured for the container and anything your container writes to /tmp or /data. Fractional values (e.g. 0.25) are supported.
}}
\item{repoHttpUri}{string, The location of a github repo to clone into the container, e.g. github.com/my-user/my-repo.git.}
\item{repoRef}{string, The tag or branch of the github repo to clone into the container.}
\item{remoteHostCredentialId}{integer, The id of the database credentials to pass into the environment of the container.}
\item{gitCredentialId}{integer, The id of the git credential to be used when checking out the specified git repo. If not supplied, the first git credential you've submitted will be used. Unnecessary if no git repo is specified or the git repo is public.}
\item{dockerCommand}{string, The command to run on the container. Will be run via sh as: ["sh", "-c", dockerCommand]. }
\item{dockerImageName}{string, The name of the docker image to pull from DockerHub.}
\item{dockerImageTag}{string, The tag of the docker image to pull from DockerHub.}
\item{instanceType}{string, The EC2 instance type to deploy to. Only available for jobs running on kubernetes.}
\item{cancelTimeout}{integer, The amount of time (in seconds) to wait before forcibly terminating the script. When the script is cancelled, it is first sent a TERM signal. If the script is still running after the timeout, it is sent a KILL signal. Defaults to 0.}
\item{lastRun}{list, A list containing the following elements:
\itemize{
\item id integer,
\item state string,
\item createdAt string, The time that the run was queued.
\item startedAt string, The time that the run started.
\item finishedAt string, The time that the run completed.
\item error string, The error message for this run, if present.
}}
\item{timeZone}{string, The time zone of this script.}
\item{hidden}{boolean, The hidden status of the item.}
\item{archived}{string, The archival status of the requested item(s).}
\item{targetProjectId}{integer, Target project to which script outputs will be added.}
}
\description{
Clone this Container Script
}
|
library(raster)
library(RColorBrewer)
library(lattice)
library(rasterVis)
r = stack("../Data files/modis.tif")
min_div = min(r,na.rm=TRUE)
min_val = min(r[[1]],na.rm = TRUE)
range_ndvi = range(r,na.rm = TRUE)
numero_layers = nlayers(range_ndvi)
levelplot(stack(r[[1]]), par.settings = RdBuTheme, contour = FALSE) | /map_algebra.R | no_license | geosconsulting/geospatial_analysis | R | false | false | 314 | r | library(raster)
library(RColorBrewer)
library(lattice)
library(rasterVis)
r = stack("../Data files/modis.tif")
min_div = min(r,na.rm=TRUE)
min_val = min(r[[1]],na.rm = TRUE)
range_ndvi = range(r,na.rm = TRUE)
numero_layers = nlayers(range_ndvi)
levelplot(stack(r[[1]]), par.settings = RdBuTheme, contour = FALSE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggTrace.R
\name{ggTrace}
\alias{ggTrace}
\title{ggTrace}
\usage{
ggTrace(output, pars, title = "")
}
\arguments{
\item{output}{BayesTraits MCMC output - output from btmcmc function.}
\item{pars}{The paramater you want to see an autocorrelation plot for.}
}
\description{
Make a trace plot for an MCMC parameter using ggplot2.
}
| /man/ggTrace.Rd | no_license | hferg/btrtools | R | false | true | 407 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggTrace.R
\name{ggTrace}
\alias{ggTrace}
\title{ggTrace}
\usage{
ggTrace(output, pars, title = "")
}
\arguments{
\item{output}{BayesTraits MCMC output - output from btmcmc function.}
\item{pars}{The paramater you want to see an autocorrelation plot for.}
}
\description{
Make a trace plot for an MCMC parameter using ggplot2.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sexRatioTest.R
\name{print.sexRatioTest}
\alias{print.sexRatioTest}
\title{\code{print()} helper function for \code{sexRatioTest()} function}
\usage{
\method{print}{sexRatioTest}(x, ...)
}
\arguments{
\item{x}{Output resulting from applying the \code{sexRatioTest()} function}
\item{...}{Additional \code{print()} parameters}
}
\value{
Printed output of \code{sexRatioTest()} function
}
\description{
\code{print()} helper function for \code{sexRatioTest()} function
}
\examples{
# Use \code{sexRatioTest()} on household roster data from a survey in Tanzania
# (as.ex01) and census data of Tanzania extracted from Wolfram|Alpha knowledge
# engine (as.ex02)
svy <- as.ex01
ref <- as.ex02
censusM <- sum(ref$Males)
censusF <- sum(ref$Females)
srt <- sexRatioTest(svy$sex, codes = c(1, 2), pop = c(censusM, censusF))
print(srt)
}
| /man/print.sexRatioTest.Rd | no_license | SaeedR1987/nipnTK | R | false | true | 906 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sexRatioTest.R
\name{print.sexRatioTest}
\alias{print.sexRatioTest}
\title{\code{print()} helper function for \code{sexRatioTest()} function}
\usage{
\method{print}{sexRatioTest}(x, ...)
}
\arguments{
\item{x}{Output resulting from applying the \code{sexRatioTest()} function}
\item{...}{Additional \code{print()} parameters}
}
\value{
Printed output of \code{sexRatioTest()} function
}
\description{
\code{print()} helper function for \code{sexRatioTest()} function
}
\examples{
# Use \code{sexRatioTest()} on household roster data from a survey in Tanzania
# (as.ex01) and census data of Tanzania extracted from Wolfram|Alpha knowledge
# engine (as.ex02)
svy <- as.ex01
ref <- as.ex02
censusM <- sum(ref$Males)
censusF <- sum(ref$Females)
srt <- sexRatioTest(svy$sex, codes = c(1, 2), pop = c(censusM, censusF))
print(srt)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fdo.R
\docType{data}
\name{fdo_leagueCodes}
\alias{fdo_leagueCodes}
\title{fdo_leagueCodes}
\format{A data frame with 3 variables: \describe{
\item{\code{leagueCode}}{The League Code as a string e.g. "PL" represents
Premier League} \item{\code{country}}{The country in which that league
takes place e.g. "England"} \item{\code{league}}{The name of the league
e.g. "Premier League", "1.Bundesliga"} }
For further details, see
\url{http://api.football-data.org/docs/v1/index.html#league_codes}}
\usage{
fdo_leagueCodes
}
\description{
Table of leagueCodes to be used with \code{\link{fdo_listFixtures}}
}
\keyword{datasets}
| /man/fdo_leagueCodes.Rd | permissive | biglongnow/footballR | R | false | true | 736 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fdo.R
\docType{data}
\name{fdo_leagueCodes}
\alias{fdo_leagueCodes}
\title{fdo_leagueCodes}
\format{A data frame with 3 variables: \describe{
\item{\code{leagueCode}}{The League Code as a string e.g. "PL" represents
Premier League} \item{\code{country}}{The country in which that league
takes place e.g. "England"} \item{\code{league}}{The name of the league
e.g. "Premier League", "1.Bundesliga"} }
For further details, see
\url{http://api.football-data.org/docs/v1/index.html#league_codes}}
\usage{
fdo_leagueCodes
}
\description{
Table of leagueCodes to be used with \code{\link{fdo_listFixtures}}
}
\keyword{datasets}
|
#' ATE and ATT functions
#'
#' Computes ATE and ATT from data and nuisance parameters as in DoubleML p.35 (Chernozhukov et al., 2017)
#' Last edited: 11/01/2018
#'
#' @param y vector of the dependent variable
#' @param D vector of treatment assignment
#' @param g1 estimate of E(Y_1 \vert X)
#' @param g0 estimate of E(Y_0 \vert X)
#' @param m propensity score estimate P(D=1 \vert X)
#' @param selec indicator of common support
#'
#' All vectors above must have the same length.
#'
#' @author Jeremy LHour
ATE <- function(Y,D,g1,g0,m,CS){
if(missing(CS)){
n = length(Y)
CS = rep(TRUE,n)
}
gamma = (g1[CS]-g0[CS]) + D[CS]*(Y[CS]-g1[CS])/m[CS] - (1-D[CS])*(Y[CS]-g0[CS])/(1-m[CS])
return(mean(gamma))
}
SE.ATE <- function(Y,D,g1,g0,m,CS){
if(missing(CS)) CS = rep(TRUE,n)
n = length(Y[CS])
gamma = (g1[CS]-g0[CS]) + D[CS]*(Y[CS]-g1[CS])/m[CS] - (1-D[CS])*(Y[CS]-g0[CS])/(1-m[CS])
return(sd(gamma)/sqrt(n))
}
ATT <- function(Y,D,g0,m,CS){
if(missing(CS)) CS = rep(TRUE,length(Y))
pi = mean(D[CS])
gamma = D[CS]*(Y[CS]-g0[CS])/pi - (1-D[CS])*m[CS]*(Y[CS]-g0[CS])/(pi*(1-m[CS]))
return(mean(gamma))
}
SE.ATT <- function(Y,D,g0,m,CS){
if(missing(CS)) CS = rep(TRUE,length(Y))
n = length(Y[CS])
pi = mean(D[CS])
gamma = D[CS]*(Y[CS]-g0[CS])/pi - (1-D[CS])*m[CS]*(Y[CS]-g0[CS])/(pi*(1-m[CS]))
return(list(SE = sd(gamma)/sqrt(n),
gamma = gamma))
} | /functions/ATE.R | no_license | zhangc927/hdmetrics | R | false | false | 1,402 | r | #' ATE and ATT functions
#'
#' Computes ATE and ATT from data and nuisance parameters as in DoubleML p.35 (Chernozhukov et al., 2017)
#' Last edited: 11/01/2018
#'
#' @param y vector of the dependent variable
#' @param D vector of treatment assignment
#' @param g1 estimate of E(Y_1 \vert X)
#' @param g0 estimate of E(Y_0 \vert X)
#' @param m propensity score estimate P(D=1 \vert X)
#' @param selec indicator of common support
#'
#' All vectors above must have the same length.
#'
#' @author Jeremy LHour
ATE <- function(Y,D,g1,g0,m,CS){
if(missing(CS)){
n = length(Y)
CS = rep(TRUE,n)
}
gamma = (g1[CS]-g0[CS]) + D[CS]*(Y[CS]-g1[CS])/m[CS] - (1-D[CS])*(Y[CS]-g0[CS])/(1-m[CS])
return(mean(gamma))
}
SE.ATE <- function(Y,D,g1,g0,m,CS){
if(missing(CS)) CS = rep(TRUE,n)
n = length(Y[CS])
gamma = (g1[CS]-g0[CS]) + D[CS]*(Y[CS]-g1[CS])/m[CS] - (1-D[CS])*(Y[CS]-g0[CS])/(1-m[CS])
return(sd(gamma)/sqrt(n))
}
ATT <- function(Y,D,g0,m,CS){
if(missing(CS)) CS = rep(TRUE,length(Y))
pi = mean(D[CS])
gamma = D[CS]*(Y[CS]-g0[CS])/pi - (1-D[CS])*m[CS]*(Y[CS]-g0[CS])/(pi*(1-m[CS]))
return(mean(gamma))
}
SE.ATT <- function(Y,D,g0,m,CS){
if(missing(CS)) CS = rep(TRUE,length(Y))
n = length(Y[CS])
pi = mean(D[CS])
gamma = D[CS]*(Y[CS]-g0[CS])/pi - (1-D[CS])*m[CS]*(Y[CS]-g0[CS])/(pi*(1-m[CS]))
return(list(SE = sd(gamma)/sqrt(n),
gamma = gamma))
} |
rm(list=ls())
source(file="connectTodb.R")
query="select * from high_repu"
high<-dbGetQuery(con,query)
query="select * from mid_repu"
mid<-dbGetQuery(con,query)
query="select * from low_repu"
low<-dbGetQuery(con,query)
rm(con,query)
par(mfrow=c(1,5))
level_contrib<-rbind(cbind(high,type="H"),cbind(mid,type="M"),cbind(low,type="L"))
boxplot(O~type,data=level_contrib,notch=T,outline=F,xlab="O")
boxplot(C~type,data=level_contrib,notch=T,outline=F,xlab="C")
boxplot(E~type,data=level_contrib,notch=T,outline=F,xlab="E")
boxplot(A~type,data=level_contrib,notch=T,outline=F,xlab="A")
boxplot(N~type,data=level_contrib,notch=T,outline=F,xlab="N")
dev.off()
#ANOVA for the level of contribution HIGH-MID-LOW
aov.out<-aov(O~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(C~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(E~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(A~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(N~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
#anova for the level H-M, H-L, L-M
high_mid<-rbind(cbind(high,type="1"),cbind(mid,type="2"))
high_low<-rbind(cbind(high,type="1"),cbind(low,type="2"))
low_mid<-rbind(cbind(low,type="1"),cbind(mid,type="2"))
aov.out<-aov(N~type,data=low_mid)
tukey.test <- TukeyHSD(aov.out)
tukey.test
| /replications/Rastogi_2016/R/level_of_contribution.R | permissive | micheledinanni/Psychometric-tools-benchmark | R | false | false | 1,388 | r | rm(list=ls())
source(file="connectTodb.R")
query="select * from high_repu"
high<-dbGetQuery(con,query)
query="select * from mid_repu"
mid<-dbGetQuery(con,query)
query="select * from low_repu"
low<-dbGetQuery(con,query)
rm(con,query)
par(mfrow=c(1,5))
level_contrib<-rbind(cbind(high,type="H"),cbind(mid,type="M"),cbind(low,type="L"))
boxplot(O~type,data=level_contrib,notch=T,outline=F,xlab="O")
boxplot(C~type,data=level_contrib,notch=T,outline=F,xlab="C")
boxplot(E~type,data=level_contrib,notch=T,outline=F,xlab="E")
boxplot(A~type,data=level_contrib,notch=T,outline=F,xlab="A")
boxplot(N~type,data=level_contrib,notch=T,outline=F,xlab="N")
dev.off()
#ANOVA for the level of contribution HIGH-MID-LOW
aov.out<-aov(O~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(C~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(E~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(A~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
aov.out<-aov(N~type,data=level_contrib)
summary(aov.out)
etaSquared(aov.out)
#anova for the level H-M, H-L, L-M
high_mid<-rbind(cbind(high,type="1"),cbind(mid,type="2"))
high_low<-rbind(cbind(high,type="1"),cbind(low,type="2"))
low_mid<-rbind(cbind(low,type="1"),cbind(mid,type="2"))
aov.out<-aov(N~type,data=low_mid)
tukey.test <- TukeyHSD(aov.out)
tukey.test
|
## Objective: Optimize performance for Inverse Vector by caching result
## Execution example:
## m <- makeCacheMatrix( matrix( rnorm(16) , nrow=4 , ncol=4) ) // cache original matrix
## cacheSolve( m ) // return inverse matrix
## cacheSolve( m ) // second time, return same inverted matrix with the message "getting cached inverted vector..."
## makeCaheMatrix contains:
## - subFunctions to get and set original matrix
## - subFunctions to get and set inverted matrix
makeCacheMatrix <- function(x = matrix()) {
mInverted <- NULL
set <- function( y ) {
x <<- y
mInverted <<- NULL
}
get <- function( ) x
setInverted <- function( z ) {
mInverted <<- z
}
getInverted <- function( ) mInverted
list(set = set, get = get , setInverted = setInverted , getInverted = getInverted )
}
## cacheSolve retrieves
## - cached Inverted matrix if exist
## - or compute inverse matrix and store in cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverted()
if(!is.null(m)) {
m <- x$getInverted()
message("Getting cached inverse matrix !" )
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverted(m)
m
} | /cachematrix.R | no_license | bertranddol/ProgrammingAssignment2 | R | false | false | 1,326 | r | ## Objective: Optimize performance for Inverse Vector by caching result
## Execution example:
## m <- makeCacheMatrix( matrix( rnorm(16) , nrow=4 , ncol=4) ) // cache original matrix
## cacheSolve( m ) // return inverse matrix
## cacheSolve( m ) // second time, return same inverted matrix with the message "getting cached inverted vector..."
## makeCaheMatrix contains:
## - subFunctions to get and set original matrix
## - subFunctions to get and set inverted matrix
makeCacheMatrix <- function(x = matrix()) {
mInverted <- NULL
set <- function( y ) {
x <<- y
mInverted <<- NULL
}
get <- function( ) x
setInverted <- function( z ) {
mInverted <<- z
}
getInverted <- function( ) mInverted
list(set = set, get = get , setInverted = setInverted , getInverted = getInverted )
}
## cacheSolve retrieves
## - cached Inverted matrix if exist
## - or compute inverse matrix and store in cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverted()
if(!is.null(m)) {
m <- x$getInverted()
message("Getting cached inverse matrix !" )
return(m)
}
data <- x$get()
m <- solve(data)
x$setInverted(m)
m
} |
#' @title Sample Correlation Coefficient (r)
#' @name r
#' @description It estimates the Pearson's coefficient of correlation (r) for a continuous predicted-observed dataset.
#' @param data (Optional) argument to call an existing data frame containing the data.
#' @param obs Vector with observed values (numeric).
#' @param pred Vector with predicted values (numeric).
#' @param tidy Logical operator (TRUE/FALSE) to decide the type of return. TRUE
#' returns a data.frame, FALSE returns a list; Default : FALSE.
#' @param na.rm Logic argument to remove rows with missing values
#' (NA). Default is na.rm = TRUE.
#' @return an object of class `numeric` within a `list` (if tidy = FALSE) or within a
#' `data frame` (if tidy = TRUE).
#' @details The r coefficient measures the strength of linear relationship between two variables.
#' It only accounts for precision, but it is not sensitive to lack of prediction accuracy.
#' It is a normalized, dimensionless coefficient, that ranges between -1 to 1. It is expected that
#' predicted and observed values will show 0 < r < 1.
#' It is also known as the Pearson Product Moment Correlation, among other names.
#' For the formula and more details, see [online-documentation](https://adriancorrendo.github.io/metrica/articles/available_metrics_regression.html)
#' @references
#' Kirch (2008)
#' Pearson’s Correlation Coefficient.
#' _In: Kirch W. (eds) Encyclopedia of Public Health. Springer, Dordrecht._
#' \doi{https://doi.org/10.1007/978-1-4020-5614-7_2569}
#' @examples
#' \donttest{
#' set.seed(1)
#' X <- rnorm(n = 100, mean = 0, sd = 10)
#' Y <- X + rnorm(n=100, mean = 0, sd = 3)
#' r(obs = X, pred = Y)
#' }
#' @rdname r
#' @importFrom stats cor
#' @importFrom rlang eval_tidy quo
#' @export
r <- function(data=NULL,
obs,
pred,
tidy = FALSE,
na.rm = TRUE){
r <- rlang::eval_tidy(
data = data,
rlang::quo(
(sum(({{obs}}-mean({{obs}}))*({{pred}}-mean({{pred}}))))/
(sqrt(sum(({{obs}}-mean({{obs}}))^2))*sqrt(sum(({{pred}}-mean({{pred}}))^2)))
)
)
if (tidy==TRUE){ return(as.data.frame(r)) }
if (tidy==FALSE){ return(list("r"=r)) }
} | /R/reg_r.R | permissive | adriancorrendo/metrica | R | false | false | 2,185 | r | #' @title Sample Correlation Coefficient (r)
#' @name r
#' @description It estimates the Pearson's coefficient of correlation (r) for a continuous predicted-observed dataset.
#' @param data (Optional) argument to call an existing data frame containing the data.
#' @param obs Vector with observed values (numeric).
#' @param pred Vector with predicted values (numeric).
#' @param tidy Logical operator (TRUE/FALSE) to decide the type of return. TRUE
#' returns a data.frame, FALSE returns a list; Default : FALSE.
#' @param na.rm Logic argument to remove rows with missing values
#' (NA). Default is na.rm = TRUE.
#' @return an object of class `numeric` within a `list` (if tidy = FALSE) or within a
#' `data frame` (if tidy = TRUE).
#' @details The r coefficient measures the strength of linear relationship between two variables.
#' It only accounts for precision, but it is not sensitive to lack of prediction accuracy.
#' It is a normalized, dimensionless coefficient, that ranges between -1 to 1. It is expected that
#' predicted and observed values will show 0 < r < 1.
#' It is also known as the Pearson Product Moment Correlation, among other names.
#' For the formula and more details, see [online-documentation](https://adriancorrendo.github.io/metrica/articles/available_metrics_regression.html)
#' @references
#' Kirch (2008)
#' Pearson’s Correlation Coefficient.
#' _In: Kirch W. (eds) Encyclopedia of Public Health. Springer, Dordrecht._
#' \doi{https://doi.org/10.1007/978-1-4020-5614-7_2569}
#' @examples
#' \donttest{
#' set.seed(1)
#' X <- rnorm(n = 100, mean = 0, sd = 10)
#' Y <- X + rnorm(n=100, mean = 0, sd = 3)
#' r(obs = X, pred = Y)
#' }
#' @rdname r
#' @importFrom stats cor
#' @importFrom rlang eval_tidy quo
#' @export
r <- function(data=NULL,
obs,
pred,
tidy = FALSE,
na.rm = TRUE){
r <- rlang::eval_tidy(
data = data,
rlang::quo(
(sum(({{obs}}-mean({{obs}}))*({{pred}}-mean({{pred}}))))/
(sqrt(sum(({{obs}}-mean({{obs}}))^2))*sqrt(sum(({{pred}}-mean({{pred}}))^2)))
)
)
if (tidy==TRUE){ return(as.data.frame(r)) }
if (tidy==FALSE){ return(list("r"=r)) }
} |
require("biomaRt")
ensembl <- useEnsembl(biomart = "genes", dataset = "hsapiens_gene_ensembl")
res <- getBM(c("ensembl_gene_id", "ensembl_transcript_id", "5_utr_start", "5_utr_end", "3_utr_start", "3_utr_end"), mart=ensembl, filter="with_hgnc", values=TRUE)
res <- res[!(is.na(res[,3]) & is.na(res[,4]) & is.na(res[,5]) & is.na(res[,6])),]
res$utr_5_len <- res[,4]-res[,3]
res$utr_3_len <- res[,6]-res[,5]
write.table(res, "UTRs_ensembl_hsap_7_July2020.txt", col.names=TRUE, row.names=FALSE)
| /download_UTRs_Ensembl.R | no_license | yingstat/snRNAseq_Pipeline | R | false | false | 501 | r | require("biomaRt")
ensembl <- useEnsembl(biomart = "genes", dataset = "hsapiens_gene_ensembl")
res <- getBM(c("ensembl_gene_id", "ensembl_transcript_id", "5_utr_start", "5_utr_end", "3_utr_start", "3_utr_end"), mart=ensembl, filter="with_hgnc", values=TRUE)
res <- res[!(is.na(res[,3]) & is.na(res[,4]) & is.na(res[,5]) & is.na(res[,6])),]
res$utr_5_len <- res[,4]-res[,3]
res$utr_3_len <- res[,6]-res[,5]
write.table(res, "UTRs_ensembl_hsap_7_July2020.txt", col.names=TRUE, row.names=FALSE)
|
# calculate_irl_setup.R
# Bryan Holman || v0.1 || 20170501
# Given the GEFS ensemble data just downloaded, this R script calculates the
# setup according to the procedure outlined in Colvin et al. (2016).
# v0.1 -> Calculates wind run for each GEFS ensemble member using the
# interpolated data only. No bias correction or ensemble calibration performed
# as of yet.
# libraries ---------------------------------------------------------------
library(rmarkdown) # rendering index.Rmd at the end
library(riem) # access to ASOS data through iowa state
library(lubridate) # awesome date handling
library(xts) # also awesome date handling
library(WindVerification) # wind data handling
# functions ---------------------------------------------------------------
# calculates the 12-hour wind run given a series of u and v forecasts, returns
# the u and v wind run components
getWindRun <- function(times, us, vs, type = 'model') {
# create a dataframe given input variables
df <- data.frame(time = times, u = us, v = vs)
# we can't calculate the 12 hour wind run until we have 12 hours of data!
# So don't start calculating until this starting point
windRun.start <- df$time[1] + hours(9)
# if we are doing KMLB obs
if (type == 'asos') windRun.start <- df$time[1] + hours(11)
df.length <- length(df$time)
# by default, all wind runs are NAs
windRun.u <- rep(NA, df.length)
windRun.v <- rep(NA, df.length)
# loop through each line and calculate the wind runs
for (i in 1:df.length) {
# go to next line if we haven't reach windRun.start yet!
if (df$time[i] < windRun.start) next
# if we have reach windRun.start, then calculate wind run
windRun.uv <- colMeans(df[df$time <= df$time[i] &
df$time > df$time[i] - hours(12),
c('u', 'v')], na.rm = TRUE)
# save current wind run to vectors
windRun.u[i] <- windRun.uv[1]
windRun.v[i] <- windRun.uv[2]
}
# return windruns as a list
return(list(u = windRun.u, v = windRun.v))
}
# convert from radians to degrees
deg2rad <- function(degrees) {
return((degrees * pi) / 180)
}
# calculate the setup
getSetup <- function(times, us, vs, type = 'model') {
# calculate 12 hour wind run
windRun <- getWindRun(times, us, vs, type)
# get wind speed and wind direction for these wind runs
windRun.spd.dir <- mapply(getwspdwdir, windRun$u, windRun$v)
# calculate u1 and u2, the irl-oriented wind components for the northern
# and southern irl, respectively
u1 <- unlist(windRun.spd.dir[1,]) *
cos(sapply(unlist(windRun.spd.dir[2,]) + 10, deg2rad))
u2 <- unlist(windRun.spd.dir[1,]) *
cos(sapply(unlist(windRun.spd.dir[2,]) + 26, deg2rad))
# now calculate the lagoon relative wind
u.r <- -1 * (u1 * 28 + u2 * 70) / (28 + 70)
# u.r <- (u1 * 28 + u2 * 70) / (28 + 70)
# return the setup given u.r
return(1.637 * sign(u.r) * abs(u.r)^1.5 - .109)
}
# global variables --------------------------------------------------------
# path for /data
data.path <- paste(getwd(), '/data', sep = '')
# ensemble members
ens.mems <- c('gec00', 'gep01', 'gep02', 'gep03', 'gep04', 'gep05', 'gep06',
'gep07', 'gep08', 'gep09', 'gep10', 'gep11', 'gep12', 'gep13',
'gep14', 'gep15', 'gep16', 'gep17', 'gep18', 'gep19', 'gep20')
# # most recent data (yesterday)
# date <- format(Sys.Date() - days(1), '%Y%m%d')
# data --------------------------------------------------------------------
# open df.all, we will need a few days of gefs forecasts
df.all <- read.csv(paste(data.path, '/gefs_all.csv', sep = ''),
header = TRUE, stringsAsFactors = FALSE)
df.all$runtime <- as.POSIXct(df.all$runtime, tz = 'UTC')
df.all$validtime <- as.POSIXct(df.all$validtime, tz = 'UTC')
# we will only calculate setup for the last 4 gefs runs
gefs.runs <- tail(unique(df.all$runtime), 4)
# get data frames for only these four runs, and neglect kmlb.u & kmlb.v columns
df.gefs.1 <- df.all[df.all$runtime == gefs.runs[1],-c(46, 47)]
df.gefs.2 <- df.all[df.all$runtime == gefs.runs[2],-c(46, 47)]
df.gefs.3 <- df.all[df.all$runtime == gefs.runs[3],-c(46, 47)]
df.gefs.recent <- df.all[df.all$runtime == gefs.runs[4],-c(46, 47)]
# clear up some memory
rm(df.all)
# data frames to store setup information
gefs.setup.1 <- data.frame(validtime = df.gefs.1$validtime)
gefs.setup.2 <- data.frame(validtime = df.gefs.2$validtime)
gefs.setup.3 <- data.frame(validtime = df.gefs.3$validtime)
gefs.setup.recent <- data.frame(validtime = df.gefs.recent$validtime)
# loop through all the ensemble members and calculate the wind runs
for (ens.mem in ens.mems) {
# calculate setup for each gefs run and add it to the respective setup
# data frame
gefs.setup.1[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.1$validtime,
df.gefs.1[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.1[[paste(ens.mem, 'v', sep = '.')]])
gefs.setup.2[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.2$validtime,
df.gefs.2[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.2[[paste(ens.mem, 'v', sep = '.')]])
gefs.setup.3[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.3$validtime,
df.gefs.3[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.3[[paste(ens.mem, 'v', sep = '.')]])
gefs.setup.recent[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.recent$validtime,
df.gefs.recent[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.recent[[paste(ens.mem, 'v', sep = '.')]])
}
# clear up some memory
rm(df.gefs.1, df.gefs.2, df.gefs.3, df.gefs.recent)
# get hourly KMLB ASOS data for the last few days to plot
df.kmlb <- riem_measures(station = "MLB",
date_start = format(Sys.Date() - days(4), '%Y-%m-%d'),
date_end = format(Sys.Date() + days(1), '%Y-%m-%d'))
# Only keep the hourly updates, which happen to be the only observations with
# MSLP
df.kmlb <- df.kmlb[!is.na(df.kmlb$mslp),]
# round valid times to nearest quarter hour, n is in seconds
df.kmlb$roundvalid <- as.POSIXct(align.time(df.kmlb$valid, n=60*15))
df.kmlb$roundvalid <- as.POSIXct(format(df.kmlb$roundvalid,
'%Y-%m-%d %H:%M:%S'), tz = 'UTC')
# convert wind speed from knots to m/s
df.kmlb$wspd <- convertunits(df.kmlb$sknt, inunits = 'knots', outunits = 'm/s')
# get u and v
uv <- mapply(getuv, df.kmlb$wspd, df.kmlb$drct)
df.kmlb$u <- uv[1,]
df.kmlb$v <- uv[2,]
# calculate KMLB ASOS setup
kmlb.setup <- getSetup(df.kmlb$roundvalid, df.kmlb$u, df.kmlb$v,
type = 'asos')
# create a dataframe with just this information
asos.setup <- data.frame(roundvalid = df.kmlb$roundvalid, setup = kmlb.setup)
save(gefs.setup.1, gefs.setup.2, gefs.setup.3, gefs.setup.recent, asos.setup,
file = 'data/setup.RData')
rmarkdown::render('R/index.Rmd', output_dir = '~/Dropbox/IRLSetup/docs/')
# save df.run to disk
# write.csv(df.run, file = paste(data.path, '/', 'gefs_', date, '.csv', sep = ''),
# row.names = FALSE)
| /R/calculate_irl_setup.R | permissive | FL-ASGS/IRLSetup | R | false | false | 7,415 | r | # calculate_irl_setup.R
# Bryan Holman || v0.1 || 20170501
# Given the GEFS ensemble data just downloaded, this R script calculates the
# setup according to the procedure outlined in Colvin et al. (2016).
# v0.1 -> Calculates wind run for each GEFS ensemble member using the
# interpolated data only. No bias correction or ensemble calibration performed
# as of yet.
# libraries ---------------------------------------------------------------
library(rmarkdown) # rendering index.Rmd at the end
library(riem) # access to ASOS data through iowa state
library(lubridate) # awesome date handling
library(xts) # also awesome date handling
library(WindVerification) # wind data handling
# functions ---------------------------------------------------------------
# calculates the 12-hour wind run given a series of u and v forecasts, returns
# the u and v wind run components
getWindRun <- function(times, us, vs, type = 'model') {
# create a dataframe given input variables
df <- data.frame(time = times, u = us, v = vs)
# we can't calculate the 12 hour wind run until we have 12 hours of data!
# So don't start calculating until this starting point
windRun.start <- df$time[1] + hours(9)
# if we are doing KMLB obs
if (type == 'asos') windRun.start <- df$time[1] + hours(11)
df.length <- length(df$time)
# by default, all wind runs are NAs
windRun.u <- rep(NA, df.length)
windRun.v <- rep(NA, df.length)
# loop through each line and calculate the wind runs
for (i in 1:df.length) {
# go to next line if we haven't reach windRun.start yet!
if (df$time[i] < windRun.start) next
# if we have reach windRun.start, then calculate wind run
windRun.uv <- colMeans(df[df$time <= df$time[i] &
df$time > df$time[i] - hours(12),
c('u', 'v')], na.rm = TRUE)
# save current wind run to vectors
windRun.u[i] <- windRun.uv[1]
windRun.v[i] <- windRun.uv[2]
}
# return windruns as a list
return(list(u = windRun.u, v = windRun.v))
}
# convert from radians to degrees
deg2rad <- function(degrees) {
return((degrees * pi) / 180)
}
# calculate the setup
getSetup <- function(times, us, vs, type = 'model') {
# calculate 12 hour wind run
windRun <- getWindRun(times, us, vs, type)
# get wind speed and wind direction for these wind runs
windRun.spd.dir <- mapply(getwspdwdir, windRun$u, windRun$v)
# calculate u1 and u2, the irl-oriented wind components for the northern
# and southern irl, respectively
u1 <- unlist(windRun.spd.dir[1,]) *
cos(sapply(unlist(windRun.spd.dir[2,]) + 10, deg2rad))
u2 <- unlist(windRun.spd.dir[1,]) *
cos(sapply(unlist(windRun.spd.dir[2,]) + 26, deg2rad))
# now calculate the lagoon relative wind
u.r <- -1 * (u1 * 28 + u2 * 70) / (28 + 70)
# u.r <- (u1 * 28 + u2 * 70) / (28 + 70)
# return the setup given u.r
return(1.637 * sign(u.r) * abs(u.r)^1.5 - .109)
}
# global variables --------------------------------------------------------
# path for /data
data.path <- paste(getwd(), '/data', sep = '')
# ensemble members
ens.mems <- c('gec00', 'gep01', 'gep02', 'gep03', 'gep04', 'gep05', 'gep06',
'gep07', 'gep08', 'gep09', 'gep10', 'gep11', 'gep12', 'gep13',
'gep14', 'gep15', 'gep16', 'gep17', 'gep18', 'gep19', 'gep20')
# # most recent data (yesterday)
# date <- format(Sys.Date() - days(1), '%Y%m%d')
# data --------------------------------------------------------------------
# open df.all, we will need a few days of gefs forecasts
df.all <- read.csv(paste(data.path, '/gefs_all.csv', sep = ''),
header = TRUE, stringsAsFactors = FALSE)
df.all$runtime <- as.POSIXct(df.all$runtime, tz = 'UTC')
df.all$validtime <- as.POSIXct(df.all$validtime, tz = 'UTC')
# we will only calculate setup for the last 4 gefs runs
gefs.runs <- tail(unique(df.all$runtime), 4)
# get data frames for only these four runs, and neglect kmlb.u & kmlb.v columns
df.gefs.1 <- df.all[df.all$runtime == gefs.runs[1],-c(46, 47)]
df.gefs.2 <- df.all[df.all$runtime == gefs.runs[2],-c(46, 47)]
df.gefs.3 <- df.all[df.all$runtime == gefs.runs[3],-c(46, 47)]
df.gefs.recent <- df.all[df.all$runtime == gefs.runs[4],-c(46, 47)]
# clear up some memory
rm(df.all)
# data frames to store setup information
gefs.setup.1 <- data.frame(validtime = df.gefs.1$validtime)
gefs.setup.2 <- data.frame(validtime = df.gefs.2$validtime)
gefs.setup.3 <- data.frame(validtime = df.gefs.3$validtime)
gefs.setup.recent <- data.frame(validtime = df.gefs.recent$validtime)
# loop through all the ensemble members and calculate the wind runs
for (ens.mem in ens.mems) {
# calculate setup for each gefs run and add it to the respective setup
# data frame
gefs.setup.1[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.1$validtime,
df.gefs.1[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.1[[paste(ens.mem, 'v', sep = '.')]])
gefs.setup.2[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.2$validtime,
df.gefs.2[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.2[[paste(ens.mem, 'v', sep = '.')]])
gefs.setup.3[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.3$validtime,
df.gefs.3[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.3[[paste(ens.mem, 'v', sep = '.')]])
gefs.setup.recent[[paste(ens.mem, 'raw', sep = '.')]] <-
getSetup(times = df.gefs.recent$validtime,
df.gefs.recent[[paste(ens.mem, 'u', sep = '.')]],
df.gefs.recent[[paste(ens.mem, 'v', sep = '.')]])
}
# clear up some memory
rm(df.gefs.1, df.gefs.2, df.gefs.3, df.gefs.recent)
# get hourly KMLB ASOS data for the last few days to plot
df.kmlb <- riem_measures(station = "MLB",
date_start = format(Sys.Date() - days(4), '%Y-%m-%d'),
date_end = format(Sys.Date() + days(1), '%Y-%m-%d'))
# Only keep the hourly updates, which happen to be the only observations with
# MSLP
df.kmlb <- df.kmlb[!is.na(df.kmlb$mslp),]
# round valid times to nearest quarter hour, n is in seconds
df.kmlb$roundvalid <- as.POSIXct(align.time(df.kmlb$valid, n=60*15))
df.kmlb$roundvalid <- as.POSIXct(format(df.kmlb$roundvalid,
'%Y-%m-%d %H:%M:%S'), tz = 'UTC')
# convert wind speed from knots to m/s
df.kmlb$wspd <- convertunits(df.kmlb$sknt, inunits = 'knots', outunits = 'm/s')
# get u and v
uv <- mapply(getuv, df.kmlb$wspd, df.kmlb$drct)
df.kmlb$u <- uv[1,]
df.kmlb$v <- uv[2,]
# calculate KMLB ASOS setup
kmlb.setup <- getSetup(df.kmlb$roundvalid, df.kmlb$u, df.kmlb$v,
type = 'asos')
# create a dataframe with just this information
asos.setup <- data.frame(roundvalid = df.kmlb$roundvalid, setup = kmlb.setup)
save(gefs.setup.1, gefs.setup.2, gefs.setup.3, gefs.setup.recent, asos.setup,
file = 'data/setup.RData')
rmarkdown::render('R/index.Rmd', output_dir = '~/Dropbox/IRLSetup/docs/')
# save df.run to disk
# write.csv(df.run, file = paste(data.path, '/', 'gefs_', date, '.csv', sep = ''),
# row.names = FALSE)
|
/R/encodeOntargetGeno.R | no_license | lje00006/DeepCpf1 | R | false | false | 2,796 | r | ||
#Matrices
matrix(0, 3, 4)
a <- 1:12
print(a)
matrix(a, 3, 4)
plank <- 1:8
dim(plank) <- c(2, 4)
print(plank)
matrix(1, 5, 5)
#Matrix Access
print(plank)
plank[2, 3]
plank[1,4]
plank[1, 4] <- 0
plank[2,]
plank[, 4]
plank[, 2:4]
#Matrix Plotting
elevation <- matrix(1, 10, 10)
elevation[4, 6] <- 0
contour(elevation)
persp(elevation)
persp(elevation, expand=0.2)
contour(volcano)
persp(volcano, expand=0.2)
image(volcano)
| /3.R | no_license | Evonne0623/Try-R | R | false | false | 426 | r | #Matrices
matrix(0, 3, 4)
a <- 1:12
print(a)
matrix(a, 3, 4)
plank <- 1:8
dim(plank) <- c(2, 4)
print(plank)
matrix(1, 5, 5)
#Matrix Access
print(plank)
plank[2, 3]
plank[1,4]
plank[1, 4] <- 0
plank[2,]
plank[, 4]
plank[, 2:4]
#Matrix Plotting
elevation <- matrix(1, 10, 10)
elevation[4, 6] <- 0
contour(elevation)
persp(elevation)
persp(elevation, expand=0.2)
contour(volcano)
persp(volcano, expand=0.2)
image(volcano)
|
#' Find and hide dataset names in a package that match a class.
#'
#' @param package String; Package name to search datasets in.
#' @param .class Class of data to find or hide.
#'
#' @examples
#' show_data_of_class("fgeo.tool", "tbl")
#' hide_data_of_class("fgeo.tool", "data.frame")
#'
#' @family functions for developers
#' @name find_data_of_class
NULL
#' Factory to filter dataset names in a package matching some class.
#' @noRd
string_datasets_of_class <- function(.f = purrr::keep) {
function(package, .class) {
dts <- string_datasets(package)
cls <- dts %>%
lapply(get) %>%
purrr::set_names(dts) %>%
purrr::map(class)
out <- cls %>% .f(~any(grepl(.class, .x)))
unlist(out)
}
}
#' @export
#' @rdname find_data_of_class
show_data_of_class <- string_datasets_of_class(.f = purrr::keep)
#' @rdname find_data_of_class
#' @export
hide_data_of_class <- string_datasets_of_class(.f = purrr::discard)
#' String datasets in a package.
#' @noRd
string_datasets <- function(package) {
dinfo <- utils::data(package = package)
dinfo[["results"]][, "Item"]
}
| /R/find_data_of_class.R | no_license | forestgeo/fgeo.misc | R | false | false | 1,104 | r | #' Find and hide dataset names in a package that match a class.
#'
#' @param package String; Package name to search datasets in.
#' @param .class Class of data to find or hide.
#'
#' @examples
#' show_data_of_class("fgeo.tool", "tbl")
#' hide_data_of_class("fgeo.tool", "data.frame")
#'
#' @family functions for developers
#' @name find_data_of_class
NULL
#' Factory to filter dataset names in a package matching some class.
#' @noRd
string_datasets_of_class <- function(.f = purrr::keep) {
function(package, .class) {
dts <- string_datasets(package)
cls <- dts %>%
lapply(get) %>%
purrr::set_names(dts) %>%
purrr::map(class)
out <- cls %>% .f(~any(grepl(.class, .x)))
unlist(out)
}
}
#' @export
#' @rdname find_data_of_class
show_data_of_class <- string_datasets_of_class(.f = purrr::keep)
#' @rdname find_data_of_class
#' @export
hide_data_of_class <- string_datasets_of_class(.f = purrr::discard)
#' String datasets in a package.
#' @noRd
string_datasets <- function(package) {
dinfo <- utils::data(package = package)
dinfo[["results"]][, "Item"]
}
|
###########10X_5cl
process10x_rmDupGenes <- function(genebycellmat){
tb = genebycellmat
tb <- tb[rowSums(tb) > 0,]
gn = rownames(tb)
rs <- rowSums(tb)
kid <- sapply(unique(gn),function(sid) {
tmp <- which(gn==sid)
if (length(tmp)==1) {
tmp
} else {
tmp[which.max(rs[tmp])]
}
})
tb <- tb[kid,]
row.names(tb) <- gn[kid]
tb <- tb[!grepl('^MT-',row.names(tb)),]
tb = round(tb)
}
##########
bk = read.csv('/home/wucheng/imputation/DEG/GSE86337_reverse.stranded.unfiltered.count.matrix.txt', as.is=T, sep='\t')
tb = as.matrix(read.csv('/home/wucheng/imputation/DEG/GSE86337_anno.txt',sep='\t'))
suppressMessages(library(org.Hs.eg.db))
genename <- select(org.Hs.eg.db, key=as.character(bk$Entrez.Gene.IDs),columns=c("SYMBOL"),keytype="ENTREZID")$SYMBOL
bk = bk[,-1]
bk = as.matrix(bk)
rownames(bk) = genename
bk <- bk[!is.na(row.names(bk)),]
mat = process10x_rmDupGenes(bk)
colnames(mat) = tb[match(sapply(colnames(mat), function(i) paste0(strsplit(i,'_')[[1]][1:2],collapse='_')), tb[,'Sample.name']), 'characteristics.cell.line']
colnames(mat) = paste0(colnames(mat),'_', 1:ncol(mat))
saveRDS(mat,'/home/wucheng/imputation/DEG/GSE86337_processed_count.rds')
#####bulk deg
cnt <- readRDS("/home/wucheng/imputation/DEG/GSE86337_processed_count.rds")
ct <- c("HCC827","HCC827","H2228","H2228","H838","H838","A549","A549","H1975","H1975")
for (i in unique(ct)){colnames(cnt)[ct == i] <- paste0(i,'_',1:sum(ct==i))
}
for (n1 in 1:(length(unique(ct))-1)){
i = unique(ct)[n1]
for (n2 in ((n1+1):length(unique(ct)))){
j = unique(ct)[n2]
print(paste0(i,'_',j))
expr = cnt[, ct %in% c(i,j)]
expr = expr[rowSums(expr>0)>0,]
sct <- sub('_.*','',colnames(expr))
library(limma)
des <- cbind(1,ifelse(sct==i,1,0))
fit <- eBayes(lmFit(voom(expr,des),design=des))
res <- topTable(fit,coef=2,number=nrow(expr))
# ind <-intersect(c(which(res[,1]>=2),which(res[,1]<=(-2))),which(res[,'adj.P.Val']<=0.05))
# res <- res[res[,'adj.P.Val']<0.05,]
ind <-intersect(c(which(res[,1]>=2),which(res[,1]<=(-2))),which(res[,'adj.P.Val']<=0.05))
res <- res[ind,]
gs <- rownames(res)
saveRDS(gs,paste0('/home/wucheng/imputation/deg/10x_5cl/bulk2/',i,'_',j,'_diffgene.rds'))
}
}
#######sc deg
pbmc.data = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
pbmc <- CreateSeuratObject(counts = as.data.frame(pbmc.data))
pbmc
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
metadata <-as.data.frame(as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=",")))
pbmc@active.ident <-metadata[,29]
ct <-c("HCC827","H2228","H838","A549","H1975")
for (n1 in 1:(length(ct)-1)){
i = unique(ct)[n1]
for (n2 in ((n1+1):length(ct))){
j = unique(ct)[n2]
print(paste0(i,'_',j))
cluster.markers <- FindMarkers(pbmc, ident.1 = i, ident.2 = j, min.pct = 0,logfc.threshold=0,test.use = "wilcox")
saveRDS(cluster.markers,paste0('/home/wucheng/imputation/deg/10x_5cl/Method/Raw/wilcox/',i,'_',j,'_diffgene.rds'))
clu.markers <- FindMarkers(pbmc, ident.1 = i, ident.2 = j, min.pct = 0,logfc.threshold=0,test.use = "MAST")
saveRDS(clu.markers,paste0('/home/wucheng/imputation/deg/10x_5cl/Method/Raw/MAST/',i,'_',j,'_diffgene.rds'))
}}
#########overlap
##############bulk
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[1]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps.rds')
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps.rds')
######bulk1
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[1]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk2/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps2.rds')
##
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk2/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps2.rds')
#######plot
library(reshape2)
library(ggplot2)
library(ggrepel)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o2 <- rowMeans(ove)
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=o2,Wilcox=o1,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_mast_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=MAST,y=Wilcox,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')
dev.off()
##
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o2 <- rowMeans(ove)
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=o2,Wilcox=o1,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_mast_compare2.pdf',width=4,height=4)
ggplot(pd,aes(x=MAST,y=Wilcox,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')
dev.off()
#################################################
pbmc.data = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
metadata <-as.data.frame(as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=",")))
imp <-pbmc.data[,which(metadata[29]=="A549")]
df = expand.grid(c(10,50,100,500),c(10,50,100,500))
colnames(df) = c('n1','n2')
df = df[df[,'n1']<=df[,'n2'],]
for (i in 1:nrow(df)){
print(i)
cn1 = df[i,'n1']
cn2 = df[i,'n2']
set.seed(12345)
id = sample(1:ncol(imp), cn1+cn2)
expr = imp[,id]
pbmc <- CreateSeuratObject(counts = as.data.frame(expr))
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc@meta.data$V1 <-c(rep("cn1",cn1),rep("cn2",cn2))
pbmc@active.ident <-as.data.frame(as.matrix(pbmc@meta.data))[,4]
cluster.markers <- FindMarkers(pbmc, ident.1 = "cn1", ident.2 = "cn2", min.pct = 0.1,logfc.threshold=0,test.use = "wilcox")
cluster1.markers <- FindMarkers(pbmc, ident.1 = "cn1", ident.2 = "cn2", min.pct = 0.1,logfc.threshold=0,test.use = "MAST")
saveRDS(cluster.markers, paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/Raw/wilcox/',cn1,'_',cn2,'.rds'))
saveRDS(cluster1.markers, paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/Raw/MAST/',cn1,'_',cn2,'.rds'))
}
######
source('/home/wucheng/imputation/DEG/function.R')
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/')
df <- sapply(allmtd, function(mtd){
rdir = paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/', mtd,'/wilcox/')
af = list.files(rdir)
sapply(af, function(f){
res <- readRDS(paste0(rdir,f))
up <-intersect(which(res$p_val<=0.01),which(res$avg_logFC >=0.25))
down <-intersect(which(res$p_val<=0.01),which(res$avg_logFC <(-0.25)))
sum(length(up),length(down))
})
})
if (is.list(df)){
df = df[sapply(df,length)>0]
pd = as.matrix(do.call(cbind, df))
} else {
pd = df
}
if (grepl('_0_0.rds.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds.rds','',rownames(pd))
} else if (grepl('_0_0.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds','',rownames(pd))
} else {
rownames(pd) = sub('.rds','',rownames(pd))
}
library(reshape2)
pd = melt(pd)
colnames(pd) = c('data','method','Num')
mtdorder = names(sort(tapply(pd[,'Num'],list(pd[,'method']), mean), decreasing = T))
stat = tapply(pd[,'Num'],list(pd[,'method']), mean)
saveRDS(stat,paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds'))
#######
source('/home/wucheng/imputation/DEG/function.R')
library(RColorBrewer)
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/')
df <- sapply(allmtd, function(mtd){
rdir = paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/', mtd,'/MAST/')
af = list.files(rdir)
sapply(af, function(f){
res <- readRDS(paste0(rdir,f))
up <-intersect(which(res$p_val<=0.01),which(res$avg_logFC >=0.25))
down <-intersect(which(res$p_val<=0.01),which(res$avg_logFC <(-0.25)))
sum(length(up),length(down))
})
})
if (is.list(df)){
df = df[sapply(df,length)>0]
pd = as.matrix(do.call(cbind, df))
} else {
pd = df
}
if (grepl('_0_0.rds.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds.rds','',rownames(pd))
} else if (grepl('_0_0.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds','',rownames(pd))
} else {
rownames(pd) = sub('.rds','',rownames(pd))
}
library(reshape2)
pd = melt(pd)
colnames(pd) = c('data','method','Num')
mtdorder = names(sort(tapply(pd[,'Num'],list(pd[,'method']), mean), decreasing = T))
stat = tapply(pd[,'Num'],list(pd[,'method']), mean)
saveRDS(stat,paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds'))
##############
source('/home/wucheng/imputation/DEG/function.R')
library(reshape2)
library(ggplot2)
library(ggrepel)
o1 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds')
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=o1,Wilcoxon=o2,mtd=int)
xmin <- (0)
ymin <- (0)
pdf('/home/wucheng/imputation/deg/10x_5cl/NULLDE/wilcox_mast_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=MAST,y=Wilcoxon,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlim(c(xmin,max((pd$MAST)+2))) + ylim(c(ymin,max(pd$Wilcoxon)+2))
dev.off()
###############
library(ggplot2)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/mast_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
###
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/mast_compare2.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_compare2.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
############
#############bulk 1.5
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/'))
#raw = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
#metadata <-as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=","))
#ct <-matrix(metadata[,29])[,1]
f=allf[1]
ove <- sapply(allmtd, function(mtd){
print(mtd)
t(sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))
res = res[order(res[,'p_val_adj']),]
ct1 <- sub('_.*','',f)
ct2 <- sub('.*_','',sub('_diffgene.rds','',f))
raw_res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',"Raw",'/wilcox/',f))
fc <- abs(as.matrix(raw_res)[,2])
highid = names(which(fc>quantile(fc,0.9)))## express in more cells
lowid = names(which(fc<quantile(fc,0.1)))## less
#highid = names(which(rowMeans(tmpmat>0)>quantile(rowMeans(tmpmat>0),0.9)))## express in more cells
#lowid = names(which(rowMeans(tmpmat>0)<quantile(rowMeans(tmpmat>0),0.1)))## less
#bfex <- list.files('/home/wucheng/imputation/deg/10x_5cl/bulk/')
#bf <- c(paste0(ct1,'_',ct2,'_diffgene.rds'),paste0(ct2,'_',ct1,'_diffgene.rds'))
#bf <- intersect(bfex,bf)
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk1/',f))
tmpres <- res[rownames(res) %in% highid,]
tmp1 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}))
tmpres <- res[rownames(res) %in% lowid,]
tmp2 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}),na.rm=T)
c(tmp1,tmp2)
} else {
return(c(NA,NA))
}
}))
}) ## first 10 high, last 10 low
ove_high = t(ove[1:10,])
ove_low = t(ove[11:20,])
colnames(ove_high) <- colnames(ove_low) <- sub('.rds','', allf)
rdir = '/home/wucheng/imputation/deg/10x_5cl/high_low1.5/wilcox/'
dir.create(rdir,showWarnings = F, recursive = T)
saveRDS(ove_high, paste0(rdir,'bulk_sc_diffgene_overlaps_moreExprGene.rds'))
saveRDS(ove_low, paste0(rdir,'bulk_sc_diffgene_overlaps_lessExprGene.rds'))
###
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/'))
#raw = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
#metadata <-as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=","))
#ct <-matrix(metadata[,29])[,1]
f=allf[1]
ove <- sapply(allmtd, function(mtd){
print(mtd)
t(sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))
res = res[order(res[,'p_val_adj']),]
ct1 <- sub('_.*','',f)
ct2 <- sub('.*_','',sub('_diffgene.rds','',f))
raw_res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',"Raw",'/MAST/',f))
fc <- abs(as.matrix(raw_res)[,2])
highid = names(which(fc>quantile(fc,0.9)))## express in more cells
lowid = names(which(fc<quantile(fc,0.1)))## less
#highid = names(which(rowMeans(tmpmat>0)>quantile(rowMeans(tmpmat>0),0.9)))## express in more cells
#lowid = names(which(rowMeans(tmpmat>0)<quantile(rowMeans(tmpmat>0),0.1)))## less
#bfex <- list.files('/home/wucheng/imputation/deg/10x_5cl/bulk/')
#bf <- c(paste0(ct1,'_',ct2,'_diffgene.rds'),paste0(ct2,'_',ct1,'_diffgene.rds'))
#bf <- intersect(bfex,bf)
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk1/',f))
tmpres <- res[rownames(res) %in% highid,]
tmp1 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}))
tmpres <- res[rownames(res) %in% lowid,]
tmp2 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}),na.rm=T)
c(tmp1,tmp2)
} else {
return(c(NA,NA))
}
}))
}) ## first 10 high, last 10 low
ove_high = t(ove[1:10,])
ove_low = t(ove[11:20,])
colnames(ove_high) <- colnames(ove_low) <- sub('.rds','', allf)
rdir = '/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/'
dir.create(rdir,showWarnings = F, recursive = T)
saveRDS(ove_high, paste0(rdir,'bulk_sc_diffgene_overlaps_moreExprGene.rds'))
saveRDS(ove_low, paste0(rdir,'bulk_sc_diffgene_overlaps_lessExprGene.rds'))
##########
###########boxplot
ove_high <-readRDS("/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/bulk_sc_diffgene_overlaps_moreExprGene.rds")
c(min(ove_high),max(ove_high))
df <-melt(ove_high)
levels <-c("Raw","DISC","scImpute","VIPER","MAGIC","DCA","deepImpute","scScope","scVI")
p <- ggplot(df, aes(x=factor(Var1,levels=levels), y=value, fill=factor(Var1,levels=levels))) + geom_boxplot()
p <-p+ ylim(0.30,0.75)+theme_classic()+theme(axis.text.x = element_text(size = 12,angle = 45, hjust = 1, vjust = 1, face = "bold"))+theme(axis.text.y = element_text(size = 12, hjust = 1, vjust = 1, face = "bold"))
pdf(file="/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/high_boxplot.pdf",width=6,height=4)
p+theme(legend.title=element_blank())
dev.off()
ove_low <-readRDS("/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/bulk_sc_diffgene_overlaps_lessExprGene.rds")
c(min(ove_low),max(ove_low))
df <-melt(ove_low)
levels <-c("Raw","DISC","scImpute","VIPER","MAGIC","DCA","deepImpute","scScope","scVI")
p <- ggplot(df, aes(x=factor(Var1,levels=levels), y=value, fill=factor(Var1,levels=levels))) + geom_boxplot()
p <-p+ ylim(0,0.16)+theme_classic()+theme(axis.text.x = element_text(size = 12,angle = 45, hjust = 1, vjust = 1, face = "bold"))+theme(axis.text.y = element_text(size = 12, hjust = 1, vjust = 1, face = "bold"))
pdf(file="/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/low_boxplot.pdf",width=6,height=4)
p+theme(legend.title=element_blank())
dev.off()
####
cnt <- readRDS("/home/wucheng/imputation/DEG/GSE86337_processed_count.rds")
ct <- c("HCC827","HCC827","H2228","H2228","H838","H838","A549","A549","H1975","H1975")
for (i in unique(ct)){colnames(cnt)[ct == i] <- paste0(i,'_',1:sum(ct==i))
}
cutoff <-c(0.0,1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0)
for(cut in 1:length(cutoff)){
for (n1 in 1:(length(unique(ct))-1)){
i = unique(ct)[n1]
for (n2 in ((n1+1):length(unique(ct)))){
j = unique(ct)[n2]
print(paste0(i,'_',j))
expr = cnt[, ct %in% c(i,j)]
expr = expr[rowSums(expr>0)>0,]
sct <- sub('_.*','',colnames(expr))
library(limma)
des <- cbind(1,ifelse(sct==i,1,0))
fit <- eBayes(lmFit(voom(expr,des),design=des))
res <- topTable(fit,coef=2,number=nrow(expr))
# ind <-intersect(c(which(res[,1]>=2),which(res[,1]<=(-2))),which(res[,'adj.P.Val']<=0.05))
# res <- res[res[,'adj.P.Val']<0.05,]
ind <-intersect(c(which(res[,1]>=(cutoff[cut])),which(res[,1]<=(-cutoff[cut]))),which(res[,'adj.P.Val']<=0.05))
res <- res[ind,]
rdir = paste0('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/','bulk',cutoff[cut])
dir.create(rdir,showWarnings = F, recursive = T)
saveRDS(res,paste0('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/','bulk',cutoff[cut],'/',i,'_',j,'_diffgene.rds'))
}
}
}
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/bulk1/')
ove <- sapply(allmtd, function(mtd){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk1/',mtd))
mtd <-length(res)
})
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/')
allf <-list.files('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/bulk0')
ove <- sapply(allmtd, function(mtd){
print(mtd)
sapply(allf,function(f) {
print(f)
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/',mtd,'/',f))
tmp <- nrow(res)
})
})
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/cutoff.rds')
###########
library(reshape2)
high1 <-melt(ove_high1)
high1$V1 <-"top1"
high2 <-melt(ove_high2)
high2$V1 <-"top2"
high3 <-melt(ove_high3)
high3$V1 <-"top3"
high4 <-melt(ove_high4)
high4$V1 <-"top4"
high5 <-melt(ove_high5)
high5$V1 <-"top5"
high6 <-melt(ove_high6)
high6$V1 <-"top6"
high7 <-melt(ove_high7)
high7$V1 <-"top7"
high8 <-melt(ove_high8)
high8$V1 <-"top8"
high9 <-melt(ove_high9)
high9$V1 <-"top9"
high10 <-melt(ove_high10)
high10$V1 <-"top10"
high <-rbind(high1,high2,high3,high4,high5,high6,high7,high8,high9,high10)
saveRDS(high, "/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/high.rds")
high <-readRDS("C:/Users/ADMIN/Desktop/imputation/DEG/high.rds")
index <-which(high[,1]=="Raw")
index1 <-which(high[,1]=="DISC")
high <-high[c(index,index1),]
levels <-c("top1","top2","top3","top4","top5","top6","top7","top8","top9","top10")
dodge <- position_dodge(width = 0.4)
ggplot(high, aes(x=factor(V1,levels=levels), y=value,fill = Var1)) + stat_boxplot(geom="errorbar",width=0.15,position = dodge)+geom_boxplot(width=0.4)+
ylim(0,1)+theme(legend.title=element_blank())+theme(axis.text.x = element_text(size = 12,angle = 45, hjust = 1, vjust = 1, face = "bold"))+
labs(x="foldchange interval", y = "overlap")+theme_classic()+ scale_fill_manual(values = brewer.pal(3,"Set1")[c(1,2)])
mast <-readRDS("C:/Users/ADMIN/Desktop/imputation/DEG/MAST_bulk_sc_diffgene_overlaps_morelessExprGene.rds")
library(reshape2)
library(ggplot2)
df <-melt(mast)
p <-ggplot(df, aes(x=factor(Var1), y=value, colour=Var2,group=Var2)) + geom_line(size=1)+geom_point(size=2)
p<- p + labs( x="cutoff", y = "precent")+theme_classic()
p+ ylim(0,0.85)+theme(legend.title=element_blank())
p
mast <-readRDS("C:/Users/ADMIN/Desktop/imputation/DEG/wilcox_bulk_sc_diffgene_overlaps_morelessExprGene.rds")
library(reshape2)
library(ggplot2)
df <-melt(mast)
p <-ggplot(df, aes(x=factor(Var1), y=value, colour=Var2,group=Var2)) + geom_line(size=1)+geom_point(size=2)
p<- p + labs( x="cutoff", y = "precent")+theme_classic()
p+ ylim(0,0.85)+theme(legend.title=element_blank())
p
| /reproducibility/Down-stream Analysis Improvement/raw_scripts/DEG_10x_5cl.r | permissive | iyhaoo/DISC | R | false | false | 25,571 | r | ###########10X_5cl
process10x_rmDupGenes <- function(genebycellmat){
tb = genebycellmat
tb <- tb[rowSums(tb) > 0,]
gn = rownames(tb)
rs <- rowSums(tb)
kid <- sapply(unique(gn),function(sid) {
tmp <- which(gn==sid)
if (length(tmp)==1) {
tmp
} else {
tmp[which.max(rs[tmp])]
}
})
tb <- tb[kid,]
row.names(tb) <- gn[kid]
tb <- tb[!grepl('^MT-',row.names(tb)),]
tb = round(tb)
}
##########
bk = read.csv('/home/wucheng/imputation/DEG/GSE86337_reverse.stranded.unfiltered.count.matrix.txt', as.is=T, sep='\t')
tb = as.matrix(read.csv('/home/wucheng/imputation/DEG/GSE86337_anno.txt',sep='\t'))
suppressMessages(library(org.Hs.eg.db))
genename <- select(org.Hs.eg.db, key=as.character(bk$Entrez.Gene.IDs),columns=c("SYMBOL"),keytype="ENTREZID")$SYMBOL
bk = bk[,-1]
bk = as.matrix(bk)
rownames(bk) = genename
bk <- bk[!is.na(row.names(bk)),]
mat = process10x_rmDupGenes(bk)
colnames(mat) = tb[match(sapply(colnames(mat), function(i) paste0(strsplit(i,'_')[[1]][1:2],collapse='_')), tb[,'Sample.name']), 'characteristics.cell.line']
colnames(mat) = paste0(colnames(mat),'_', 1:ncol(mat))
saveRDS(mat,'/home/wucheng/imputation/DEG/GSE86337_processed_count.rds')
#####bulk deg
cnt <- readRDS("/home/wucheng/imputation/DEG/GSE86337_processed_count.rds")
ct <- c("HCC827","HCC827","H2228","H2228","H838","H838","A549","A549","H1975","H1975")
for (i in unique(ct)){colnames(cnt)[ct == i] <- paste0(i,'_',1:sum(ct==i))
}
for (n1 in 1:(length(unique(ct))-1)){
i = unique(ct)[n1]
for (n2 in ((n1+1):length(unique(ct)))){
j = unique(ct)[n2]
print(paste0(i,'_',j))
expr = cnt[, ct %in% c(i,j)]
expr = expr[rowSums(expr>0)>0,]
sct <- sub('_.*','',colnames(expr))
library(limma)
des <- cbind(1,ifelse(sct==i,1,0))
fit <- eBayes(lmFit(voom(expr,des),design=des))
res <- topTable(fit,coef=2,number=nrow(expr))
# ind <-intersect(c(which(res[,1]>=2),which(res[,1]<=(-2))),which(res[,'adj.P.Val']<=0.05))
# res <- res[res[,'adj.P.Val']<0.05,]
ind <-intersect(c(which(res[,1]>=2),which(res[,1]<=(-2))),which(res[,'adj.P.Val']<=0.05))
res <- res[ind,]
gs <- rownames(res)
saveRDS(gs,paste0('/home/wucheng/imputation/deg/10x_5cl/bulk2/',i,'_',j,'_diffgene.rds'))
}
}
#######sc deg
pbmc.data = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
pbmc <- CreateSeuratObject(counts = as.data.frame(pbmc.data))
pbmc
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
metadata <-as.data.frame(as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=",")))
pbmc@active.ident <-metadata[,29]
ct <-c("HCC827","H2228","H838","A549","H1975")
for (n1 in 1:(length(ct)-1)){
i = unique(ct)[n1]
for (n2 in ((n1+1):length(ct))){
j = unique(ct)[n2]
print(paste0(i,'_',j))
cluster.markers <- FindMarkers(pbmc, ident.1 = i, ident.2 = j, min.pct = 0,logfc.threshold=0,test.use = "wilcox")
saveRDS(cluster.markers,paste0('/home/wucheng/imputation/deg/10x_5cl/Method/Raw/wilcox/',i,'_',j,'_diffgene.rds'))
clu.markers <- FindMarkers(pbmc, ident.1 = i, ident.2 = j, min.pct = 0,logfc.threshold=0,test.use = "MAST")
saveRDS(clu.markers,paste0('/home/wucheng/imputation/deg/10x_5cl/Method/Raw/MAST/',i,'_',j,'_diffgene.rds'))
}}
#########overlap
##############bulk
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[1]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps.rds')
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps.rds')
######bulk1
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[1]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk2/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps2.rds')
##
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/'))
ove <- sapply(allmtd, function(mtd){
sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))
res = res[order(res[,'p_val']),]
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk2/', sub('.rds','',f),'.rds'))
tmp <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(res)[1:i] %in% gs) ## discuss
}))
} else {
return(NA)
}
})
})
ove = t(ove)
colnames(ove) = sub('.rds','', colnames(ove))
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps2.rds')
#######plot
library(reshape2)
library(ggplot2)
library(ggrepel)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o2 <- rowMeans(ove)
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=o2,Wilcox=o1,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_mast_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=MAST,y=Wilcox,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')
dev.off()
##
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o2 <- rowMeans(ove)
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=o2,Wilcox=o1,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_mast_compare2.pdf',width=4,height=4)
ggplot(pd,aes(x=MAST,y=Wilcox,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')
dev.off()
#################################################
pbmc.data = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
metadata <-as.data.frame(as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=",")))
imp <-pbmc.data[,which(metadata[29]=="A549")]
df = expand.grid(c(10,50,100,500),c(10,50,100,500))
colnames(df) = c('n1','n2')
df = df[df[,'n1']<=df[,'n2'],]
for (i in 1:nrow(df)){
print(i)
cn1 = df[i,'n1']
cn2 = df[i,'n2']
set.seed(12345)
id = sample(1:ncol(imp), cn1+cn2)
expr = imp[,id]
pbmc <- CreateSeuratObject(counts = as.data.frame(expr))
pbmc <- NormalizeData(pbmc, normalization.method = "LogNormalize", scale.factor = 10000)
pbmc@meta.data$V1 <-c(rep("cn1",cn1),rep("cn2",cn2))
pbmc@active.ident <-as.data.frame(as.matrix(pbmc@meta.data))[,4]
cluster.markers <- FindMarkers(pbmc, ident.1 = "cn1", ident.2 = "cn2", min.pct = 0.1,logfc.threshold=0,test.use = "wilcox")
cluster1.markers <- FindMarkers(pbmc, ident.1 = "cn1", ident.2 = "cn2", min.pct = 0.1,logfc.threshold=0,test.use = "MAST")
saveRDS(cluster.markers, paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/Raw/wilcox/',cn1,'_',cn2,'.rds'))
saveRDS(cluster1.markers, paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/Raw/MAST/',cn1,'_',cn2,'.rds'))
}
######
source('/home/wucheng/imputation/DEG/function.R')
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/')
df <- sapply(allmtd, function(mtd){
rdir = paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/', mtd,'/wilcox/')
af = list.files(rdir)
sapply(af, function(f){
res <- readRDS(paste0(rdir,f))
up <-intersect(which(res$p_val<=0.01),which(res$avg_logFC >=0.25))
down <-intersect(which(res$p_val<=0.01),which(res$avg_logFC <(-0.25)))
sum(length(up),length(down))
})
})
if (is.list(df)){
df = df[sapply(df,length)>0]
pd = as.matrix(do.call(cbind, df))
} else {
pd = df
}
if (grepl('_0_0.rds.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds.rds','',rownames(pd))
} else if (grepl('_0_0.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds','',rownames(pd))
} else {
rownames(pd) = sub('.rds','',rownames(pd))
}
library(reshape2)
pd = melt(pd)
colnames(pd) = c('data','method','Num')
mtdorder = names(sort(tapply(pd[,'Num'],list(pd[,'method']), mean), decreasing = T))
stat = tapply(pd[,'Num'],list(pd[,'method']), mean)
saveRDS(stat,paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds'))
#######
source('/home/wucheng/imputation/DEG/function.R')
library(RColorBrewer)
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/')
df <- sapply(allmtd, function(mtd){
rdir = paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/Method/', mtd,'/MAST/')
af = list.files(rdir)
sapply(af, function(f){
res <- readRDS(paste0(rdir,f))
up <-intersect(which(res$p_val<=0.01),which(res$avg_logFC >=0.25))
down <-intersect(which(res$p_val<=0.01),which(res$avg_logFC <(-0.25)))
sum(length(up),length(down))
})
})
if (is.list(df)){
df = df[sapply(df,length)>0]
pd = as.matrix(do.call(cbind, df))
} else {
pd = df
}
if (grepl('_0_0.rds.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds.rds','',rownames(pd))
} else if (grepl('_0_0.rds',rownames(pd)[1])){
rownames(pd) = sub('_0_0.rds','',rownames(pd))
} else {
rownames(pd) = sub('.rds','',rownames(pd))
}
library(reshape2)
pd = melt(pd)
colnames(pd) = c('data','method','Num')
mtdorder = names(sort(tapply(pd[,'Num'],list(pd[,'method']), mean), decreasing = T))
stat = tapply(pd[,'Num'],list(pd[,'method']), mean)
saveRDS(stat,paste0('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds'))
##############
source('/home/wucheng/imputation/DEG/function.R')
library(reshape2)
library(ggplot2)
library(ggrepel)
o1 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds')
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=o1,Wilcoxon=o2,mtd=int)
xmin <- (0)
ymin <- (0)
pdf('/home/wucheng/imputation/deg/10x_5cl/NULLDE/wilcox_mast_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=MAST,y=Wilcoxon,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlim(c(xmin,max((pd$MAST)+2))) + ylim(c(ymin,max(pd$Wilcoxon)+2))
dev.off()
###############
library(ggplot2)
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/mast_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_compare.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
###
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/MAST_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_MAST.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/mast_compare2.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
ove = readRDS('/home/wucheng/imputation/deg/10x_5cl/wilcox_bulk_sc_overlaps2.rds')
ove = ove[rowMeans(is.na(ove))<1, ]
o1 <- rowMeans(ove)
o2 = readRDS('/home/wucheng/imputation/deg/10x_5cl/NULLDE/10x_5cl_wilcox.rds')
int <- intersect(names(o1),names(o2))
pd <- data.frame(MAST=(1-o1),Wilcoxon=o2,mtd=int)
pdf('/home/wucheng/imputation/deg/10x_5cl/wilcox_compare2.pdf',width=4,height=4)
ggplot(pd,aes(x=Wilcoxon,y=MAST,label=mtd,color=mtd)) + geom_point() + geom_text_repel() + theme_bw() + theme(legend.position = 'none')+
xlab("diff_number") + ylab("1-overlap")
dev.off()
############
#############bulk 1.5
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/'))
#raw = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
#metadata <-as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=","))
#ct <-matrix(metadata[,29])[,1]
f=allf[1]
ove <- sapply(allmtd, function(mtd){
print(mtd)
t(sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/wilcox/',f))
res = res[order(res[,'p_val_adj']),]
ct1 <- sub('_.*','',f)
ct2 <- sub('.*_','',sub('_diffgene.rds','',f))
raw_res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',"Raw",'/wilcox/',f))
fc <- abs(as.matrix(raw_res)[,2])
highid = names(which(fc>quantile(fc,0.9)))## express in more cells
lowid = names(which(fc<quantile(fc,0.1)))## less
#highid = names(which(rowMeans(tmpmat>0)>quantile(rowMeans(tmpmat>0),0.9)))## express in more cells
#lowid = names(which(rowMeans(tmpmat>0)<quantile(rowMeans(tmpmat>0),0.1)))## less
#bfex <- list.files('/home/wucheng/imputation/deg/10x_5cl/bulk/')
#bf <- c(paste0(ct1,'_',ct2,'_diffgene.rds'),paste0(ct2,'_',ct1,'_diffgene.rds'))
#bf <- intersect(bfex,bf)
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk1/',f))
tmpres <- res[rownames(res) %in% highid,]
tmp1 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}))
tmpres <- res[rownames(res) %in% lowid,]
tmp2 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}),na.rm=T)
c(tmp1,tmp2)
} else {
return(c(NA,NA))
}
}))
}) ## first 10 high, last 10 low
ove_high = t(ove[1:10,])
ove_low = t(ove[11:20,])
colnames(ove_high) <- colnames(ove_low) <- sub('.rds','', allf)
rdir = '/home/wucheng/imputation/deg/10x_5cl/high_low1.5/wilcox/'
dir.create(rdir,showWarnings = F, recursive = T)
saveRDS(ove_high, paste0(rdir,'bulk_sc_diffgene_overlaps_moreExprGene.rds'))
saveRDS(ove_low, paste0(rdir,'bulk_sc_diffgene_overlaps_lessExprGene.rds'))
###
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/Method/')
mtd = allmtd[5]
allf = list.files(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/'))
#raw = readloom("/home/yuanhao/github_repositories/DISC/reproducibility/data/10X_5CL/imputation/raw_mc_10_mce_1.loom")
#metadata <-as.matrix(read.table("/home/wucheng/imputation/DEG/sc_10x_5cl.metadata.csv",header=T,row.names=1,sep=","))
#ct <-matrix(metadata[,29])[,1]
f=allf[1]
ove <- sapply(allmtd, function(mtd){
print(mtd)
t(sapply(allf,function(f) {
print(f)
if (file.exists(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',mtd,'/MAST/',f))
res = res[order(res[,'p_val_adj']),]
ct1 <- sub('_.*','',f)
ct2 <- sub('.*_','',sub('_diffgene.rds','',f))
raw_res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/Method/',"Raw",'/MAST/',f))
fc <- abs(as.matrix(raw_res)[,2])
highid = names(which(fc>quantile(fc,0.9)))## express in more cells
lowid = names(which(fc<quantile(fc,0.1)))## less
#highid = names(which(rowMeans(tmpmat>0)>quantile(rowMeans(tmpmat>0),0.9)))## express in more cells
#lowid = names(which(rowMeans(tmpmat>0)<quantile(rowMeans(tmpmat>0),0.1)))## less
#bfex <- list.files('/home/wucheng/imputation/deg/10x_5cl/bulk/')
#bf <- c(paste0(ct1,'_',ct2,'_diffgene.rds'),paste0(ct2,'_',ct1,'_diffgene.rds'))
#bf <- intersect(bfex,bf)
gs = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk1/',f))
tmpres <- res[rownames(res) %in% highid,]
tmp1 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}))
tmpres <- res[rownames(res) %in% lowid,]
tmp2 <- mean(sapply(c(1:100)*10,function(i) {
mean(rownames(tmpres[1:i,]) %in% gs) ## discuss
}),na.rm=T)
c(tmp1,tmp2)
} else {
return(c(NA,NA))
}
}))
}) ## first 10 high, last 10 low
ove_high = t(ove[1:10,])
ove_low = t(ove[11:20,])
colnames(ove_high) <- colnames(ove_low) <- sub('.rds','', allf)
rdir = '/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/'
dir.create(rdir,showWarnings = F, recursive = T)
saveRDS(ove_high, paste0(rdir,'bulk_sc_diffgene_overlaps_moreExprGene.rds'))
saveRDS(ove_low, paste0(rdir,'bulk_sc_diffgene_overlaps_lessExprGene.rds'))
##########
###########boxplot
ove_high <-readRDS("/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/bulk_sc_diffgene_overlaps_moreExprGene.rds")
c(min(ove_high),max(ove_high))
df <-melt(ove_high)
levels <-c("Raw","DISC","scImpute","VIPER","MAGIC","DCA","deepImpute","scScope","scVI")
p <- ggplot(df, aes(x=factor(Var1,levels=levels), y=value, fill=factor(Var1,levels=levels))) + geom_boxplot()
p <-p+ ylim(0.30,0.75)+theme_classic()+theme(axis.text.x = element_text(size = 12,angle = 45, hjust = 1, vjust = 1, face = "bold"))+theme(axis.text.y = element_text(size = 12, hjust = 1, vjust = 1, face = "bold"))
pdf(file="/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/high_boxplot.pdf",width=6,height=4)
p+theme(legend.title=element_blank())
dev.off()
ove_low <-readRDS("/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/bulk_sc_diffgene_overlaps_lessExprGene.rds")
c(min(ove_low),max(ove_low))
df <-melt(ove_low)
levels <-c("Raw","DISC","scImpute","VIPER","MAGIC","DCA","deepImpute","scScope","scVI")
p <- ggplot(df, aes(x=factor(Var1,levels=levels), y=value, fill=factor(Var1,levels=levels))) + geom_boxplot()
p <-p+ ylim(0,0.16)+theme_classic()+theme(axis.text.x = element_text(size = 12,angle = 45, hjust = 1, vjust = 1, face = "bold"))+theme(axis.text.y = element_text(size = 12, hjust = 1, vjust = 1, face = "bold"))
pdf(file="/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/low_boxplot.pdf",width=6,height=4)
p+theme(legend.title=element_blank())
dev.off()
####
cnt <- readRDS("/home/wucheng/imputation/DEG/GSE86337_processed_count.rds")
ct <- c("HCC827","HCC827","H2228","H2228","H838","H838","A549","A549","H1975","H1975")
for (i in unique(ct)){colnames(cnt)[ct == i] <- paste0(i,'_',1:sum(ct==i))
}
cutoff <-c(0.0,1.0,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2.0)
for(cut in 1:length(cutoff)){
for (n1 in 1:(length(unique(ct))-1)){
i = unique(ct)[n1]
for (n2 in ((n1+1):length(unique(ct)))){
j = unique(ct)[n2]
print(paste0(i,'_',j))
expr = cnt[, ct %in% c(i,j)]
expr = expr[rowSums(expr>0)>0,]
sct <- sub('_.*','',colnames(expr))
library(limma)
des <- cbind(1,ifelse(sct==i,1,0))
fit <- eBayes(lmFit(voom(expr,des),design=des))
res <- topTable(fit,coef=2,number=nrow(expr))
# ind <-intersect(c(which(res[,1]>=2),which(res[,1]<=(-2))),which(res[,'adj.P.Val']<=0.05))
# res <- res[res[,'adj.P.Val']<0.05,]
ind <-intersect(c(which(res[,1]>=(cutoff[cut])),which(res[,1]<=(-cutoff[cut]))),which(res[,'adj.P.Val']<=0.05))
res <- res[ind,]
rdir = paste0('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/','bulk',cutoff[cut])
dir.create(rdir,showWarnings = F, recursive = T)
saveRDS(res,paste0('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/','bulk',cutoff[cut],'/',i,'_',j,'_diffgene.rds'))
}
}
}
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/bulk1/')
ove <- sapply(allmtd, function(mtd){
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk1/',mtd))
mtd <-length(res)
})
allmtd = list.files('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/')
allf <-list.files('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/bulk0')
ove <- sapply(allmtd, function(mtd){
print(mtd)
sapply(allf,function(f) {
print(f)
res = readRDS(paste0('/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/',mtd,'/',f))
tmp <- nrow(res)
})
})
saveRDS(ove, '/home/wucheng/imputation/deg/10x_5cl/bulk_cutoff/cutoff.rds')
###########
library(reshape2)
high1 <-melt(ove_high1)
high1$V1 <-"top1"
high2 <-melt(ove_high2)
high2$V1 <-"top2"
high3 <-melt(ove_high3)
high3$V1 <-"top3"
high4 <-melt(ove_high4)
high4$V1 <-"top4"
high5 <-melt(ove_high5)
high5$V1 <-"top5"
high6 <-melt(ove_high6)
high6$V1 <-"top6"
high7 <-melt(ove_high7)
high7$V1 <-"top7"
high8 <-melt(ove_high8)
high8$V1 <-"top8"
high9 <-melt(ove_high9)
high9$V1 <-"top9"
high10 <-melt(ove_high10)
high10$V1 <-"top10"
high <-rbind(high1,high2,high3,high4,high5,high6,high7,high8,high9,high10)
saveRDS(high, "/home/wucheng/imputation/deg/10x_5cl/high_low1.5/MAST/high.rds")
high <-readRDS("C:/Users/ADMIN/Desktop/imputation/DEG/high.rds")
index <-which(high[,1]=="Raw")
index1 <-which(high[,1]=="DISC")
high <-high[c(index,index1),]
levels <-c("top1","top2","top3","top4","top5","top6","top7","top8","top9","top10")
dodge <- position_dodge(width = 0.4)
ggplot(high, aes(x=factor(V1,levels=levels), y=value,fill = Var1)) + stat_boxplot(geom="errorbar",width=0.15,position = dodge)+geom_boxplot(width=0.4)+
ylim(0,1)+theme(legend.title=element_blank())+theme(axis.text.x = element_text(size = 12,angle = 45, hjust = 1, vjust = 1, face = "bold"))+
labs(x="foldchange interval", y = "overlap")+theme_classic()+ scale_fill_manual(values = brewer.pal(3,"Set1")[c(1,2)])
mast <-readRDS("C:/Users/ADMIN/Desktop/imputation/DEG/MAST_bulk_sc_diffgene_overlaps_morelessExprGene.rds")
library(reshape2)
library(ggplot2)
df <-melt(mast)
p <-ggplot(df, aes(x=factor(Var1), y=value, colour=Var2,group=Var2)) + geom_line(size=1)+geom_point(size=2)
p<- p + labs( x="cutoff", y = "precent")+theme_classic()
p+ ylim(0,0.85)+theme(legend.title=element_blank())
p
mast <-readRDS("C:/Users/ADMIN/Desktop/imputation/DEG/wilcox_bulk_sc_diffgene_overlaps_morelessExprGene.rds")
library(reshape2)
library(ggplot2)
df <-melt(mast)
p <-ggplot(df, aes(x=factor(Var1), y=value, colour=Var2,group=Var2)) + geom_line(size=1)+geom_point(size=2)
p<- p + labs( x="cutoff", y = "precent")+theme_classic()
p+ ylim(0,0.85)+theme(legend.title=element_blank())
p
|
#' @rdname website
#' @title Bucket Website configuration
#' @description Get/Put/Delete the website configuration for a bucket.
#'
#' @template bucket
#' @param request_body A character string containing an XML request body, as defined in the specification in the \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html}{API Documentation}.
#' @template dots
#'
#' @return For \code{put_website} and \code{get_website}, a list containing the website configuration, if one has been set.
#' For \code{delete_website}: \code{TRUE} if successful, \code{FALSE} otherwise.
#' An \code{aws_error} object may be returned if the request failed.
#'
#' @references
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html}{API Documentation: PUT website}
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETwebsite.html}{API Documentation: GET website}
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html}{API Documentation: DELETE website}
#' @export
delete_website <- function(bucket, ...){
r <- s3HTTP(verb = "DELETE",
bucket = bucket,
query = list(website = ""),
parse_response = FALSE,
...)
return(r)
}
#' @rdname website
#' @export
put_website <- function(bucket, request_body, ...){
r <- s3HTTP(verb = "PUT",
bucket = bucket,
query = list(website = ""),
request_body = request_body,
...)
structure(r, class = "s3_bucket")
}
#' @rdname website
#' @export
get_website <- function(bucket, ...){
r <- s3HTTP(verb = "GET",
bucket = bucket,
query = list(website = ""),
...)
return(r)
}
| /R/website.R | no_license | dgruenew/aws.s3 | R | false | false | 1,831 | r | #' @rdname website
#' @title Bucket Website configuration
#' @description Get/Put/Delete the website configuration for a bucket.
#'
#' @template bucket
#' @param request_body A character string containing an XML request body, as defined in the specification in the \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html}{API Documentation}.
#' @template dots
#'
#' @return For \code{put_website} and \code{get_website}, a list containing the website configuration, if one has been set.
#' For \code{delete_website}: \code{TRUE} if successful, \code{FALSE} otherwise.
#' An \code{aws_error} object may be returned if the request failed.
#'
#' @references
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html}{API Documentation: PUT website}
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETwebsite.html}{API Documentation: GET website}
#' \href{http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html}{API Documentation: DELETE website}
#' @export
delete_website <- function(bucket, ...){
r <- s3HTTP(verb = "DELETE",
bucket = bucket,
query = list(website = ""),
parse_response = FALSE,
...)
return(r)
}
#' @rdname website
#' @export
put_website <- function(bucket, request_body, ...){
r <- s3HTTP(verb = "PUT",
bucket = bucket,
query = list(website = ""),
request_body = request_body,
...)
structure(r, class = "s3_bucket")
}
#' @rdname website
#' @export
get_website <- function(bucket, ...){
r <- s3HTTP(verb = "GET",
bucket = bucket,
query = list(website = ""),
...)
return(r)
}
|
# Script preprocesses file containing epistatic interactions in TGEN cohort.
library(gProfileR)
# Read the file
epi <- read.table(file = "~/absb/data/epistasis/TGEN_epistasis.tsv", header = T)
# Create the folder where current results will be written
resdir <- "~/absb/results/epistasis/"
dir.create(file.path(resdir),showWarnings = FALSE, recursive = TRUE)
# Data size
dim(epi)
# Create the folder where current results will be written
resdir <- "~/absb/results/epistasis/"
dir.create(file.path(resdir),showWarnings = FALSE, recursive = TRUE)
# Set created directory as working dirrectory
setwd(resdir)
# Find LRG genes
epi_a <- epi[grep("LRG_", epi$ENSG_A), ]
epi_b <- epi[grep("LRG_", epi$ENSG_B), ]
# Number of LRG genes among interactors A
dim(epi_a)
# Number of LRG genes among interactors B
dim(epi_b)#
rows_cut <- row.names(epi_b)
# epi_b contains part of epi df that contains LRG genes
# Convert LRG to ENSG
B <- as.character(epi_b$ENSG_B)
LRG2ensg_tgen <- gconvert(B)
LRG2ensg_tgen <- LRG2ensg_tgen[, c(2,4)]
colnames(LRG2ensg_tgen) <- c(".id", "Target")
# Save to the file in RData and txt formats
#save(LRG2ensg_tgen, file = "LRG2ensg_tgen.RData")
#write.table(LRG2ensg_tgen, file = "LRG2ensg_tgen.txt", quote = F,sep = "\t",row.names = F)
# Remove duplicates
LRG2ensg_tgen <- LRG2ensg_tgen[!duplicated(LRG2ensg_tgen), ]
dim(LRG2ensg_tgen)
# Merge with epi_b
colnames(epi_b)
dim(merge(epi_b,LRG2ensg_tgen, by.x = "ENSG_B", by.y = ".id", all = F))
epi_tgen_lrg <- merge(epi_b,LRG2ensg_tgen, by.x = "ENSG_B", by.y = ".id", all = F)
# Select only ensg1 ensg2 and score
epi_tgen_lrg_fin <- epi_tgen_lrg[,c(2,8,3)]
# Print head of the data
head(epi_tgen_lrg_fin)
# Bind columns with interaction_type, data_source.
epi_tgen_lrg_fin <- cbind(epi_tgen_lrg_fin, interaction_type = "epistasis")
epi_tgen_lrg_fin <- cbind(epi_tgen_lrg_fin, data_source = "TGEN")
colnames(epi_tgen_lrg_fin) <- c("ensg1","ensg2","score","interaction_type","data_source")
# Save data
#save(epi_tgen_lrg_fin, file = "epi_tgen_lrg.RData")
#write.table(epi_tgen_lrg_fin,file = "epi_tgen_lrg_fin.txt",sep = "\t", quote = F, row.names = F)
# Combine with the main data frame
epi_cut <- epi[!row.names(epi)%in%rows_cut,]
dim(epi_cut)
dim(epi)
# Take only ensg1 ensg2 and score
epi_cut_fin <- epi_cut[,c(1,2,3)]
epi_cut_fin <- cbind(epi_cut_fin,interaction_type = "epistasis")
epi_cut_fin <- cbind(epi_cut_fin, data_source = "TGEN")
colnames(epi_cut_fin) <- c("ensg1","ensg2","score","interaction_type","data_source")
epi_tgen <- epi_cut_fin
# Convert gene ids and ensg id to tha latest Ensembl version
epi_tgen_ensg12ensg <- gconvert(epi_tgen$ensg1)
epi_tgen_ensg12ensg <- epi_tgen_ensg12ensg[, c(2,4)]
colnames(epi_tgen_ensg12ensg) <- c(".id", "Target")
dim(epi_tgen_ensg12ensg)
# Remove duplicates
epi_tgen_ensg12ensg <- epi_tgen_ensg12ensg[!duplicated(epi_tgen_ensg12ensg), ]
dim(epi_tgen_ensg12ensg)
# Convert second column of interactors
epi_tgen_ensg22ensg <- gconvert(epi_tgen$ensg2)
epi_tgen_ensg22ensg <- epi_tgen_ensg22ensg[, c(2,4)]
colnames(epi_tgen_ensg22ensg) <- c(".id", "Target")
dim(epi_tgen_ensg22ensg)
# Remove duplicates
epi_tgen_ensg22ensg <- epi_tgen_ensg22ensg[!duplicated(epi_tgen_ensg22ensg), ]
dim(epi_tgen_ensg22ensg)
# Merge by ensg1
epi_tgen_2ensg <- merge(epi_tgen,epi_tgen_ensg12ensg, by.x = "ensg1", by.y = ".id", all = F)
dim(epi_tgen_2ensg)
# Merge by ensg2
epi_tgen_2ensg <- merge(epi_tgen_2ensg,epi_tgen_ensg22ensg, by.x = "ensg2", by.y = ".id", all = F)
# Size of the dataset with old ENSG IDs(ver 74) converted to ver 90
dim(epi_tgen_2ensg)
# Size of the dataset with old ENSG IDs(ver74)
dim(epi_tgen)
# Find differences between ENSG IDs in Ensembl ver 90 and Ensembl ver 74
# All ENSG IDs in ver 90
ensg_tgen_ver90<-unique(c(as.character(epi_tgen_2ensg$ensg1),as.character(epi_tgen_2ensg$ensg2)))
length(ensg_tgen_ver90)
# All ENSG IDs in ver 74
ensg_tgen_ver74<-unique(c(as.character(epi_tgen$ensg1),as.character(epi_tgen$ensg2)))
length(ensg_tgen_ver74)
# ENSG IDs present in ver 74 and not present in ver 90
ensg_90_vs_74_tgen<-ensg_tgen_ver74[!ensg_tgen_ver74%in%ensg_tgen_ver90]
length(ensg_90_vs_74_tgen)#
# Save to file
#write.table(ensg_90_vs_74_tgen, file = "ensg_90_vs_74_tgen.txt", quote = F, row.names = F, sep = "\t")
epi_tgen<-epi_tgen_2ensg[,c(6,7,3,4,5)]
colnames(epi_tgen) <- c("ensg1","ensg2","score","interaction_type","data_source")
epi_tgen <- epi_tgen[!duplicated(epi_tgen), ]
dim(epi_tgen)
# Write to files
#save(epi_tgen, file = "epi_tgen.RData")
#write.table(epi_tgen,file = "epi_tgen.txt",sep = "\t",quote = F, row.names = F)
# Combine dataframes of LRG genes interactions and the rest
epi_tgen_int <- rbind(epi_tgen,epi_tgen_lrg_fin)
# Remove the duplicated undirrescted edges with the same score.
# For example ENSG1-ENSG2 0.5 and ENSG2-ENSG1 0.5
# Convert factors to characters
df2string<-function(df){
i <- sapply(df, is.factor)
df[i] <- lapply(df[i], as.character)
df[,3]<-as.numeric(df[,3])
return (df)}
epi_tgen_int <- df2string(epi_tgen_int)
str(epi_tgen_int)
dim(epi_tgen_int)
epi_tgen_int <- epi_tgen_int[!duplicated(data.frame(t(apply(epi_tgen_int[1:2], 1, sort)), epi_tgen_int[,c(3,5)])),]
# New size
dim(epi_tgen_int)
#Save the part of the integrated dataset related to TGEN cohort
save(epi_tgen_int, file = "epi_tgen_int.RData")
write.table(epi_tgen_int,file = "epi_tgen_int.txt",sep = "\t",quote = F, row.names = F)
| /scripts/epistasis/epi_tgen.R | no_license | Mozihua/AgedBrainSYSBIO | R | false | false | 5,436 | r | # Script preprocesses file containing epistatic interactions in TGEN cohort.
library(gProfileR)
# Read the file
epi <- read.table(file = "~/absb/data/epistasis/TGEN_epistasis.tsv", header = T)
# Create the folder where current results will be written
resdir <- "~/absb/results/epistasis/"
dir.create(file.path(resdir),showWarnings = FALSE, recursive = TRUE)
# Data size
dim(epi)
# Create the folder where current results will be written
resdir <- "~/absb/results/epistasis/"
dir.create(file.path(resdir),showWarnings = FALSE, recursive = TRUE)
# Set created directory as working dirrectory
setwd(resdir)
# Find LRG genes
epi_a <- epi[grep("LRG_", epi$ENSG_A), ]
epi_b <- epi[grep("LRG_", epi$ENSG_B), ]
# Number of LRG genes among interactors A
dim(epi_a)
# Number of LRG genes among interactors B
dim(epi_b)#
rows_cut <- row.names(epi_b)
# epi_b contains part of epi df that contains LRG genes
# Convert LRG to ENSG
B <- as.character(epi_b$ENSG_B)
LRG2ensg_tgen <- gconvert(B)
LRG2ensg_tgen <- LRG2ensg_tgen[, c(2,4)]
colnames(LRG2ensg_tgen) <- c(".id", "Target")
# Save to the file in RData and txt formats
#save(LRG2ensg_tgen, file = "LRG2ensg_tgen.RData")
#write.table(LRG2ensg_tgen, file = "LRG2ensg_tgen.txt", quote = F,sep = "\t",row.names = F)
# Remove duplicates
LRG2ensg_tgen <- LRG2ensg_tgen[!duplicated(LRG2ensg_tgen), ]
dim(LRG2ensg_tgen)
# Merge with epi_b
colnames(epi_b)
dim(merge(epi_b,LRG2ensg_tgen, by.x = "ENSG_B", by.y = ".id", all = F))
epi_tgen_lrg <- merge(epi_b,LRG2ensg_tgen, by.x = "ENSG_B", by.y = ".id", all = F)
# Select only ensg1 ensg2 and score
epi_tgen_lrg_fin <- epi_tgen_lrg[,c(2,8,3)]
# Print head of the data
head(epi_tgen_lrg_fin)
# Bind columns with interaction_type, data_source.
epi_tgen_lrg_fin <- cbind(epi_tgen_lrg_fin, interaction_type = "epistasis")
epi_tgen_lrg_fin <- cbind(epi_tgen_lrg_fin, data_source = "TGEN")
colnames(epi_tgen_lrg_fin) <- c("ensg1","ensg2","score","interaction_type","data_source")
# Save data
#save(epi_tgen_lrg_fin, file = "epi_tgen_lrg.RData")
#write.table(epi_tgen_lrg_fin,file = "epi_tgen_lrg_fin.txt",sep = "\t", quote = F, row.names = F)
# Combine with the main data frame
epi_cut <- epi[!row.names(epi)%in%rows_cut,]
dim(epi_cut)
dim(epi)
# Take only ensg1 ensg2 and score
epi_cut_fin <- epi_cut[,c(1,2,3)]
epi_cut_fin <- cbind(epi_cut_fin,interaction_type = "epistasis")
epi_cut_fin <- cbind(epi_cut_fin, data_source = "TGEN")
colnames(epi_cut_fin) <- c("ensg1","ensg2","score","interaction_type","data_source")
epi_tgen <- epi_cut_fin
# Convert gene ids and ensg id to tha latest Ensembl version
epi_tgen_ensg12ensg <- gconvert(epi_tgen$ensg1)
epi_tgen_ensg12ensg <- epi_tgen_ensg12ensg[, c(2,4)]
colnames(epi_tgen_ensg12ensg) <- c(".id", "Target")
dim(epi_tgen_ensg12ensg)
# Remove duplicates
epi_tgen_ensg12ensg <- epi_tgen_ensg12ensg[!duplicated(epi_tgen_ensg12ensg), ]
dim(epi_tgen_ensg12ensg)
# Convert second column of interactors
epi_tgen_ensg22ensg <- gconvert(epi_tgen$ensg2)
epi_tgen_ensg22ensg <- epi_tgen_ensg22ensg[, c(2,4)]
colnames(epi_tgen_ensg22ensg) <- c(".id", "Target")
dim(epi_tgen_ensg22ensg)
# Remove duplicates
epi_tgen_ensg22ensg <- epi_tgen_ensg22ensg[!duplicated(epi_tgen_ensg22ensg), ]
dim(epi_tgen_ensg22ensg)
# Merge by ensg1
epi_tgen_2ensg <- merge(epi_tgen,epi_tgen_ensg12ensg, by.x = "ensg1", by.y = ".id", all = F)
dim(epi_tgen_2ensg)
# Merge by ensg2
epi_tgen_2ensg <- merge(epi_tgen_2ensg,epi_tgen_ensg22ensg, by.x = "ensg2", by.y = ".id", all = F)
# Size of the dataset with old ENSG IDs(ver 74) converted to ver 90
dim(epi_tgen_2ensg)
# Size of the dataset with old ENSG IDs(ver74)
dim(epi_tgen)
# Find differences between ENSG IDs in Ensembl ver 90 and Ensembl ver 74
# All ENSG IDs in ver 90
ensg_tgen_ver90<-unique(c(as.character(epi_tgen_2ensg$ensg1),as.character(epi_tgen_2ensg$ensg2)))
length(ensg_tgen_ver90)
# All ENSG IDs in ver 74
ensg_tgen_ver74<-unique(c(as.character(epi_tgen$ensg1),as.character(epi_tgen$ensg2)))
length(ensg_tgen_ver74)
# ENSG IDs present in ver 74 and not present in ver 90
ensg_90_vs_74_tgen<-ensg_tgen_ver74[!ensg_tgen_ver74%in%ensg_tgen_ver90]
length(ensg_90_vs_74_tgen)#
# Save to file
#write.table(ensg_90_vs_74_tgen, file = "ensg_90_vs_74_tgen.txt", quote = F, row.names = F, sep = "\t")
epi_tgen<-epi_tgen_2ensg[,c(6,7,3,4,5)]
colnames(epi_tgen) <- c("ensg1","ensg2","score","interaction_type","data_source")
epi_tgen <- epi_tgen[!duplicated(epi_tgen), ]
dim(epi_tgen)
# Write to files
#save(epi_tgen, file = "epi_tgen.RData")
#write.table(epi_tgen,file = "epi_tgen.txt",sep = "\t",quote = F, row.names = F)
# Combine dataframes of LRG genes interactions and the rest
epi_tgen_int <- rbind(epi_tgen,epi_tgen_lrg_fin)
# Remove the duplicated undirrescted edges with the same score.
# For example ENSG1-ENSG2 0.5 and ENSG2-ENSG1 0.5
# Convert factors to characters
df2string<-function(df){
i <- sapply(df, is.factor)
df[i] <- lapply(df[i], as.character)
df[,3]<-as.numeric(df[,3])
return (df)}
epi_tgen_int <- df2string(epi_tgen_int)
str(epi_tgen_int)
dim(epi_tgen_int)
epi_tgen_int <- epi_tgen_int[!duplicated(data.frame(t(apply(epi_tgen_int[1:2], 1, sort)), epi_tgen_int[,c(3,5)])),]
# New size
dim(epi_tgen_int)
#Save the part of the integrated dataset related to TGEN cohort
save(epi_tgen_int, file = "epi_tgen_int.RData")
write.table(epi_tgen_int,file = "epi_tgen_int.txt",sep = "\t",quote = F, row.names = F)
|
rm(list=ls())
library(knitr)
library(plotrix)
# Set up the experiment values
set.seed(1230)
par(xpd=NA)
lambda <- 0.2
n <- 40
# Generate the random variables for these n values
par(mfrow = c(2,2))
for (no_sim in c(10, 100, 1000, 10000)){
meanValue <- NULL
meanSD <- NULL
for (i in 1:no_sim){
values <- rexp(n, lambda)
means <- mean(values)
sds <- sd(values)
meanValue <- c(meanValue, means)
meanSD <- c(meanSD, sds)
}
myhist <- hist(meanValue, freq = TRUE, xlim = c(2, 8), xlab = "Values",
main = paste(no_sim, "simulations"),col="light yellow")
}
# no_sim = 10,000 the histogram of probability density
par(mfrow = c(1,1))
myhist <- hist(meanValue, freq = FALSE, xlim = c(2, 8), xlab = "Values", ylim = c(0, .55),
breaks = 25, col="light yellow",
main = paste("Probability Density Function for", no_sim, "Simulations"))
# Total mean and sd of the aggregated samples
avg <- mean(meanValue)
s <- sd(meanValue)
# Average value from the data set
abline(v = avg , col = "red", lwd = 3, lty = 2)
# Expected value of an exponential distribution
abline(v = 5, col = "purple", lwd = 3, lty = 9)
# Theoretical normal distribution for the data set
x <- seq(min(meanValue), max(meanValue), length = 100)
y <- dnorm(x, mean = avg, sd = s)
curve(dnorm(x, mean = avg, sd = s),
col = "gray", lwd = 3, lty = 3, add = TRUE)
legend('topright', c("Expected value", "Actual mean", "Normal distrubution"),
lty=1, col=c('purple', 'red', "grey"), bty='n', cex=.75)
sd(meanValue)
qqnorm(meanValue, col = "purple")
qqline(meanValue)
# For no_sim <- 100
no_sim <- 100
meanValue<- NULL; meanSD <- NULL
for (i in 1:no_sim){
values <- rexp(n, lambda)
means <- mean(values)
sds <- sd(values)
meanValue <- c(meanValue, means)
meanSD <- c(meanSD, sds)
}
# 95% confidence interval for each simulation
upper <- meanValue + 1.96 * (meanSD/sqrt(n))
lower <- meanValue - 1.96 * (meanSD/sqrt(n))
sum(lower < 5 & 5 < upper)/no_sim * 100
index <- c(1:no_sim)
plot(index, upper, ylim = c(0, 10), type = "n", xlab = "Index", ylab = "Mean",
main = "Plot of confidence interval coverage for 100 simulations",col="purple")
segments(index, upper, index, lower, col = "purple", lwd = 3)
text(-8, 5, expression(paste("", mu, "")), cex = 1.5)
ablineclip(h=5, x1 = -2.5, lty = 2, col="red") | /StatisticsProject/simulation.R | no_license | yywxenia/datasciencecoursera | R | false | false | 2,371 | r |
rm(list=ls())
library(knitr)
library(plotrix)
# Set up the experiment values
set.seed(1230)
par(xpd=NA)
lambda <- 0.2
n <- 40
# Generate the random variables for these n values
par(mfrow = c(2,2))
for (no_sim in c(10, 100, 1000, 10000)){
meanValue <- NULL
meanSD <- NULL
for (i in 1:no_sim){
values <- rexp(n, lambda)
means <- mean(values)
sds <- sd(values)
meanValue <- c(meanValue, means)
meanSD <- c(meanSD, sds)
}
myhist <- hist(meanValue, freq = TRUE, xlim = c(2, 8), xlab = "Values",
main = paste(no_sim, "simulations"),col="light yellow")
}
# no_sim = 10,000 the histogram of probability density
par(mfrow = c(1,1))
myhist <- hist(meanValue, freq = FALSE, xlim = c(2, 8), xlab = "Values", ylim = c(0, .55),
breaks = 25, col="light yellow",
main = paste("Probability Density Function for", no_sim, "Simulations"))
# Total mean and sd of the aggregated samples
avg <- mean(meanValue)
s <- sd(meanValue)
# Average value from the data set
abline(v = avg , col = "red", lwd = 3, lty = 2)
# Expected value of an exponential distribution
abline(v = 5, col = "purple", lwd = 3, lty = 9)
# Theoretical normal distribution for the data set
x <- seq(min(meanValue), max(meanValue), length = 100)
y <- dnorm(x, mean = avg, sd = s)
curve(dnorm(x, mean = avg, sd = s),
col = "gray", lwd = 3, lty = 3, add = TRUE)
legend('topright', c("Expected value", "Actual mean", "Normal distrubution"),
lty=1, col=c('purple', 'red', "grey"), bty='n', cex=.75)
sd(meanValue)
qqnorm(meanValue, col = "purple")
qqline(meanValue)
# For no_sim <- 100
no_sim <- 100
meanValue<- NULL; meanSD <- NULL
for (i in 1:no_sim){
values <- rexp(n, lambda)
means <- mean(values)
sds <- sd(values)
meanValue <- c(meanValue, means)
meanSD <- c(meanSD, sds)
}
# 95% confidence interval for each simulation
upper <- meanValue + 1.96 * (meanSD/sqrt(n))
lower <- meanValue - 1.96 * (meanSD/sqrt(n))
sum(lower < 5 & 5 < upper)/no_sim * 100
index <- c(1:no_sim)
plot(index, upper, ylim = c(0, 10), type = "n", xlab = "Index", ylab = "Mean",
main = "Plot of confidence interval coverage for 100 simulations",col="purple")
segments(index, upper, index, lower, col = "purple", lwd = 3)
text(-8, 5, expression(paste("", mu, "")), cex = 1.5)
ablineclip(h=5, x1 = -2.5, lty = 2, col="red") |
my.approx.match <- function(mSetObj=NA, q, lipid){
mSetObj <- .get.mSet(mSetObj);
if(anal.type %in% c("msetora", "msetssp", "msetqea") & lipid){
cmpd.db <- .get.my.lib("lipid_compound_db.qs");
}else if(anal.type == "utils"){
cmpd.db <- .get.my.lib("master_compound_db.qs");
}else{
cmpd.db <- .get.my.lib("compound_db.qs");
}
if(anal.type %in% c("msetora", "msetssp", "msetqea") & lipid){
syn.db <- .get.my.lib("lipid_syn_nms.qs")
}else if(anal.type == "utils"){
syn.db <- .get.my.lib("master_syn_nms.qs")
}else{
syn.db <- .get.my.lib("syn_nms.qs")
}
if(!lipid){
# only for none lipids
nonLipidInx <- cmpd.db$lipid == 0;
com.nms <- cmpd.db$name[nonLipidInx];
syns.vec <- syn.db$syns.vec[nonLipidInx];
syns.list <- syn.db$syns.list[nonLipidInx];
matched.dist <- NULL;
q.length <- nchar(q);
s <- c(0, 0.1, 0.2);
# init withno hits, then see if any hits
mSetObj$dataSet$candidates <- NULL;
for (j in s) {
new.q <- q;
if(q.length > 32){ # note: agrep fail for exact match when length over 32 characters
new.q<-substr(q, 1, 32);
}
matched <- FALSE;
matched.inx <- agrep(new.q, syns.vec, ignore.case=T, max.distance=j, useBytes=T);
if(length(matched.inx) > 0) {
# record all the candidates,
# don't use cbind, since all will be converted to character mode
# for data.frame specify "stringsAsFactors" to prevent convert value col into factor
candidates <- data.frame(index=vector(mode = "numeric", length=length(matched.inx)),
value=vector(mode = "character", length=length(matched.inx)),
score=vector(mode = "numeric", length=length(matched.inx)),
stringsAsFactors = FALSE);
for(n in 1:length(matched.inx)){
nm.vec<-syns.list[[matched.inx[n]]];
# try approximate match, note: in some cases, split into element will break match using whole string
hit3.inx <- agrep(q,nm.vec,ignore.case=T, max.distance=j, useBytes=T);
if(length(hit3.inx)>0){
hit3.nm <- vector(mode = "character", length=length(hit3.inx));
hit3.score <- vector(mode = "numeric", length=length(hit3.inx));
for(k in 1:length(hit3.inx)){
idx <- hit3.inx[k];
hit3.nm[k] <- nm.vec[idx];
hit3.score[k] <- j + abs(nchar(nm.vec[idx])-nchar(q))/(10*nchar(q));
}
# now get the best match, the rule is that the first two character should matches
# first check if first two character are digits or characters, otherwise will cause error
matches2 <- c();
if(length(grep("^[1-9a-z]{2}", q, ignore.case=T))>0){
matches2 <- grep(paste("^", substr(q, 1, 2), sep=""), hit3.nm);
}else if (length(grep("^[1-9a-z]", q, ignore.case=T))>0){
matches2 <- grep(paste("^", substr(q, 1, 1), sep=""), hit3.nm);
}
if(length(matches2)>0){
hit3.score[matches2] <- hit3.score[matches2] - 0.05;
}
best.inx<-which(hit3.score==min(hit3.score))[1];
candidates[n,1]<-matched.inx[n];
# candidates[n,2]<-hit3.nm[best.inx]; # show matched syn names
candidates[n,2]<-com.nms[matched.inx[n]] # show common names
candidates[n,3]<-hit3.score[best.inx];
}
}
rm.inx <- is.na(candidates[,2]) | candidates[,2]=="NA" | candidates[,2]=="";
mSetObj$dataSet$candidates<-candidates[!rm.inx, ];
mSetObj$dataSet$candidates<-candidates[order(candidates[,3], decreasing=F), , drop=F];
if(nrow(candidates) > 10){
mSetObj$dataSet$candidates<-candidates[1:10,];
}
return(.set.mSet(mSetObj));
}
}
}else{
mSetObj$dataSet$candidates <- NULL;
new.q <- CleanLipidNames(q)
syns.vec <- syn.db$syns.vec;
com.nms <- cmpd.db$name
matched.inx <- agrep(new.q, syns.vec, ignore.case=T, max.distance=0, useBytes=T);
if(length(matched.inx) == 0){
matched.inx <- agrep(new.q, syns.vec, ignore.case=T, max.distance=0.1, useBytes=T);
}
if(length(matched.inx) > 0){
candidates <- data.frame(index=vector(mode = "numeric", length=length(matched.inx)),
value=vector(mode = "character", length=length(matched.inx)),
score=vector(mode = "numeric", length=length(matched.inx)),
stringsAsFactors = FALSE);
for(n in seq_along(matched.inx)){
candidates[n,1] <- matched.inx[n];
candidates[n,2] <- com.nms[matched.inx[n]] # show common names
candidates[n,3] <- min(as.numeric(adist(new.q, unlist(strsplit(syns.vec[matched.inx[1]], "; ")) )))
}
candidates <- candidates[order(candidates[,3]),]
if(nrow(candidates) > 10){
matched.inx <- candidates[1:10, ]
candidates <- candidates[1:10, ]
}
mSetObj$dataSet$candidates <- candidates
}
}
return(.set.mSet(mSetObj));
}
| /R/util_approx.R | permissive | xia-lab/MetaboAnalystR | R | false | false | 5,319 | r | my.approx.match <- function(mSetObj=NA, q, lipid){
mSetObj <- .get.mSet(mSetObj);
if(anal.type %in% c("msetora", "msetssp", "msetqea") & lipid){
cmpd.db <- .get.my.lib("lipid_compound_db.qs");
}else if(anal.type == "utils"){
cmpd.db <- .get.my.lib("master_compound_db.qs");
}else{
cmpd.db <- .get.my.lib("compound_db.qs");
}
if(anal.type %in% c("msetora", "msetssp", "msetqea") & lipid){
syn.db <- .get.my.lib("lipid_syn_nms.qs")
}else if(anal.type == "utils"){
syn.db <- .get.my.lib("master_syn_nms.qs")
}else{
syn.db <- .get.my.lib("syn_nms.qs")
}
if(!lipid){
# only for none lipids
nonLipidInx <- cmpd.db$lipid == 0;
com.nms <- cmpd.db$name[nonLipidInx];
syns.vec <- syn.db$syns.vec[nonLipidInx];
syns.list <- syn.db$syns.list[nonLipidInx];
matched.dist <- NULL;
q.length <- nchar(q);
s <- c(0, 0.1, 0.2);
# init withno hits, then see if any hits
mSetObj$dataSet$candidates <- NULL;
for (j in s) {
new.q <- q;
if(q.length > 32){ # note: agrep fail for exact match when length over 32 characters
new.q<-substr(q, 1, 32);
}
matched <- FALSE;
matched.inx <- agrep(new.q, syns.vec, ignore.case=T, max.distance=j, useBytes=T);
if(length(matched.inx) > 0) {
# record all the candidates,
# don't use cbind, since all will be converted to character mode
# for data.frame specify "stringsAsFactors" to prevent convert value col into factor
candidates <- data.frame(index=vector(mode = "numeric", length=length(matched.inx)),
value=vector(mode = "character", length=length(matched.inx)),
score=vector(mode = "numeric", length=length(matched.inx)),
stringsAsFactors = FALSE);
for(n in 1:length(matched.inx)){
nm.vec<-syns.list[[matched.inx[n]]];
# try approximate match, note: in some cases, split into element will break match using whole string
hit3.inx <- agrep(q,nm.vec,ignore.case=T, max.distance=j, useBytes=T);
if(length(hit3.inx)>0){
hit3.nm <- vector(mode = "character", length=length(hit3.inx));
hit3.score <- vector(mode = "numeric", length=length(hit3.inx));
for(k in 1:length(hit3.inx)){
idx <- hit3.inx[k];
hit3.nm[k] <- nm.vec[idx];
hit3.score[k] <- j + abs(nchar(nm.vec[idx])-nchar(q))/(10*nchar(q));
}
# now get the best match, the rule is that the first two character should matches
# first check if first two character are digits or characters, otherwise will cause error
matches2 <- c();
if(length(grep("^[1-9a-z]{2}", q, ignore.case=T))>0){
matches2 <- grep(paste("^", substr(q, 1, 2), sep=""), hit3.nm);
}else if (length(grep("^[1-9a-z]", q, ignore.case=T))>0){
matches2 <- grep(paste("^", substr(q, 1, 1), sep=""), hit3.nm);
}
if(length(matches2)>0){
hit3.score[matches2] <- hit3.score[matches2] - 0.05;
}
best.inx<-which(hit3.score==min(hit3.score))[1];
candidates[n,1]<-matched.inx[n];
# candidates[n,2]<-hit3.nm[best.inx]; # show matched syn names
candidates[n,2]<-com.nms[matched.inx[n]] # show common names
candidates[n,3]<-hit3.score[best.inx];
}
}
rm.inx <- is.na(candidates[,2]) | candidates[,2]=="NA" | candidates[,2]=="";
mSetObj$dataSet$candidates<-candidates[!rm.inx, ];
mSetObj$dataSet$candidates<-candidates[order(candidates[,3], decreasing=F), , drop=F];
if(nrow(candidates) > 10){
mSetObj$dataSet$candidates<-candidates[1:10,];
}
return(.set.mSet(mSetObj));
}
}
}else{
mSetObj$dataSet$candidates <- NULL;
new.q <- CleanLipidNames(q)
syns.vec <- syn.db$syns.vec;
com.nms <- cmpd.db$name
matched.inx <- agrep(new.q, syns.vec, ignore.case=T, max.distance=0, useBytes=T);
if(length(matched.inx) == 0){
matched.inx <- agrep(new.q, syns.vec, ignore.case=T, max.distance=0.1, useBytes=T);
}
if(length(matched.inx) > 0){
candidates <- data.frame(index=vector(mode = "numeric", length=length(matched.inx)),
value=vector(mode = "character", length=length(matched.inx)),
score=vector(mode = "numeric", length=length(matched.inx)),
stringsAsFactors = FALSE);
for(n in seq_along(matched.inx)){
candidates[n,1] <- matched.inx[n];
candidates[n,2] <- com.nms[matched.inx[n]] # show common names
candidates[n,3] <- min(as.numeric(adist(new.q, unlist(strsplit(syns.vec[matched.inx[1]], "; ")) )))
}
candidates <- candidates[order(candidates[,3]),]
if(nrow(candidates) > 10){
matched.inx <- candidates[1:10, ]
candidates <- candidates[1:10, ]
}
mSetObj$dataSet$candidates <- candidates
}
}
return(.set.mSet(mSetObj));
}
|
path <- "F://Coursera/Johns Hopkins Data Science Specialization//4. Exploratory Data Analysis//Project//exdata-data-household_power_consumption"
setwd(path)
full.data <- read.table("household_power_consumption.txt", na.strings="?"
, sep=";", header=TRUE)
data <- full.data[(full.data$Date=="1/2/2007") | (full.data$Date=="2/2/2007"),,drop=FALSE]
rm(full.data)
data$Date <- strptime(data$Date,"%d/%m/%Y")
hist(data$Global_active_power, col="red", xlab="Global Active Power (Kilowatts)",
main="Global Active Power")
dev.copy(png,file="plot1.png", width=480, height=480)
dev.off() | /plot1.R | no_license | taha-islam/exdata-data-household_power_consumption | R | false | false | 607 | r | path <- "F://Coursera/Johns Hopkins Data Science Specialization//4. Exploratory Data Analysis//Project//exdata-data-household_power_consumption"
setwd(path)
full.data <- read.table("household_power_consumption.txt", na.strings="?"
, sep=";", header=TRUE)
data <- full.data[(full.data$Date=="1/2/2007") | (full.data$Date=="2/2/2007"),,drop=FALSE]
rm(full.data)
data$Date <- strptime(data$Date,"%d/%m/%Y")
hist(data$Global_active_power, col="red", xlab="Global Active Power (Kilowatts)",
main="Global Active Power")
dev.copy(png,file="plot1.png", width=480, height=480)
dev.off() |
message("03_define_grid.R")
grid <- tribble(
~algo, ~hyper,
# glmnet
# LASSO
"glmnet", list(alpha = 1, lambda = 10^seq(10, -2, length = 100)),
# # RIDGE
"glmnet", list(alpha = 0, lambda = 10^seq(10, -2, length = 100)),
# kknn
"kknn", list(k = seq(from = 1, by = 4, length.out = 20)),
# e1071
"naiveBayes", list(laplace = 1:5),
# libsvm
"svm", list(cost = c(0.01, 0.1, 0.5, 1:3), gamma = 0:3,
kernel = c("linear", "polynomial", "radial", "sigmoid")),
# nnet
"nnet", list(size = seq(1,13,2), decay = 10^-(seq(4,1)), MaxNWts = 100000),
# "plsdaCaret", list(ncomp = 1:5),
# rpart
"rpart", list(cp = c(0.001, 0.005, 0.01, 0.05, 0.1)),
# C50
"C50", list(winnow = c(winnow_false = FALSE, winnow_true = TRUE),
CF = seq(0,0.35,0.05),
rules = c(rules_false = FALSE, rules_true = TRUE)),
# ranger
"ranger", list(mtry = c(1,2,4,8), min.node.size = c(1, seq(5,25,5)), num.trees = c(250, 500, 1000)),
# "h2o.randomForest", list(mtries = seq(4, min(100, ncol(dl$data)-1), 16)),
# xgboost
"xgboost", list(eta = c(0.01, 0.05, 0.1, 0.2), max_depth = 1:3, gamma = 0, colsample_bytree = seq(0.2, 1, 0.2),
min_child_weight = c(0.5, 1, 2), subsample = seq(0.5, 1, 0.25), nrounds = seq(50, 250, 100))
) %>%
mutate(subset = list(c(#"original", "evo", "sumstats",
"original+evo"#, "original+sumstats", "all"
))) %>%
mutate(fs = list(c("none", "cfs"))) %>%
unnest(subset, .drop = FALSE) %>%
unnest(fs, .drop = FALSE) %>%
mutate(id = str_pad(row_number(), width = 3, pad = "0")) %>%
# mutate(fold = list(1:10)) %>%
# unnest(fold, .drop = FALSE) %>%
select(id, everything())
grid
# getParamSet("classif.nnet")
message("==========")
| /R/03_define_grid.R | permissive | unmnn/evoxploit_scirep | R | false | false | 1,784 | r | message("03_define_grid.R")
grid <- tribble(
~algo, ~hyper,
# glmnet
# LASSO
"glmnet", list(alpha = 1, lambda = 10^seq(10, -2, length = 100)),
# # RIDGE
"glmnet", list(alpha = 0, lambda = 10^seq(10, -2, length = 100)),
# kknn
"kknn", list(k = seq(from = 1, by = 4, length.out = 20)),
# e1071
"naiveBayes", list(laplace = 1:5),
# libsvm
"svm", list(cost = c(0.01, 0.1, 0.5, 1:3), gamma = 0:3,
kernel = c("linear", "polynomial", "radial", "sigmoid")),
# nnet
"nnet", list(size = seq(1,13,2), decay = 10^-(seq(4,1)), MaxNWts = 100000),
# "plsdaCaret", list(ncomp = 1:5),
# rpart
"rpart", list(cp = c(0.001, 0.005, 0.01, 0.05, 0.1)),
# C50
"C50", list(winnow = c(winnow_false = FALSE, winnow_true = TRUE),
CF = seq(0,0.35,0.05),
rules = c(rules_false = FALSE, rules_true = TRUE)),
# ranger
"ranger", list(mtry = c(1,2,4,8), min.node.size = c(1, seq(5,25,5)), num.trees = c(250, 500, 1000)),
# "h2o.randomForest", list(mtries = seq(4, min(100, ncol(dl$data)-1), 16)),
# xgboost
"xgboost", list(eta = c(0.01, 0.05, 0.1, 0.2), max_depth = 1:3, gamma = 0, colsample_bytree = seq(0.2, 1, 0.2),
min_child_weight = c(0.5, 1, 2), subsample = seq(0.5, 1, 0.25), nrounds = seq(50, 250, 100))
) %>%
mutate(subset = list(c(#"original", "evo", "sumstats",
"original+evo"#, "original+sumstats", "all"
))) %>%
mutate(fs = list(c("none", "cfs"))) %>%
unnest(subset, .drop = FALSE) %>%
unnest(fs, .drop = FALSE) %>%
mutate(id = str_pad(row_number(), width = 3, pad = "0")) %>%
# mutate(fold = list(1:10)) %>%
# unnest(fold, .drop = FALSE) %>%
select(id, everything())
grid
# getParamSet("classif.nnet")
message("==========")
|
## validate output of sw_1_calculate_summaries.R
##==## load and validate summaries: treatment dose_uM CMAX comparison over cell lines/ replicates
files_paths <- dir('generated/results/processed summaries', )
##==## basic quality control plotting of all data | /sw_2_validate_sw_1_summaries.R | no_license | Hardervidertsie/DILItimepoint | R | false | false | 264 | r |
## validate output of sw_1_calculate_summaries.R
##==## load and validate summaries: treatment dose_uM CMAX comparison over cell lines/ replicates
files_paths <- dir('generated/results/processed summaries', )
##==## basic quality control plotting of all data |
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "BC"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(1.5))
betaU <- c(log(4), log(2.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen14",patt,".RData"))
| /Simulations/Scripts/R/Rare/Scenario 14/CMPEn50KrareScen14BC.R | no_license | yadevi/CausalMPE | R | false | false | 4,220 | r | rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "BC"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(1.5))
betaU <- c(log(4), log(2.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen14",patt,".RData"))
|
library(tidyverse)
library(gtools)
# load data ####
load('data/daf_fst.RData')
#### QC of variant data ####
# does quality depend on read depth?
dvar %>%
filter(qual < 999) %>%
ggplot(aes(raw_depth, qual)) + geom_point(alpha=0.05) + scale_x_log10()
# qual ~ read depth plot
dvar %>%
filter(qual < 999) %>%
mutate(dpsum=ref_f + ref_r + alt_r + alt_f) %>%
ggplot(aes(dpsum, qual)) + geom_point(alpha=0.1) + scale_x_log10()
# histogram of var qualities
dvar %>%
filter(qual < 999) %>%
ggplot(aes(qual)) + geom_histogram()
# 'gene' sizes in zebra finch
dvar %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len < 5e4) %>%
ggplot(aes(zf_len)) + geom_histogram()
# gene sizes per chromosome
dvar %>%
group_by(chrom, contig_name) %>%
summarize(zf_max=max(zf_max), zf_min=min(zf_min)) %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len < 5e4) %>%
ggplot(aes(chrom, zf_len)) + geom_boxplot() + coord_flip()
# max chrom sizes
dvar %>%
group_by(chrom) %>%
summarize(chrom_len=max(zf_max)) %>%
ggplot(aes(chrom, chrom_len)) + geom_bar(stat="identity") + coord_flip()
# mvz annotations
dvar %>%
ggplot(aes(mvz_seqpart)) +
geom_histogram()
# filter out too wide contig targets
dvar %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len < 5e4) %>%
ggplot(aes(zf_len)) +
geom_histogram()
# check whether 'chromosomes' are covered with variants
dfai <- read.delim("data-genome/lp2.fasta.fai",
header=F,
col.names=c("chrom", "len", "start", "x", "y"))
# get shared levels for final order
chrom_levs <- unique(c(levels(dvar$chrom), levels(dfai$chrom))) %>% mixedsort %>% rev
data.frame() %>%
ggplot(aes(chrom)) +
geom_bar(aes(y=len), stat="identity", data=dfai) +
geom_point(aes(y=pos), colour="red", data=dvar) +
coord_flip() +
scale_x_discrete(limits=chrom_levs)
ggsave(file="results/var-coverage.png", width=200, height=290, units="mm")
#### QC of Fst values ####
# check check distribution of fst in badly mapped contigs
# this is not possible with current 'pre-filtered' approach
#daf %>%
# ggplot(aes(fst, fill=zf_max - zf_min < 5e5)) +
# geom_density(alpha=0.6, colour=NA)
daf %>%
ggplot(aes(zf_max - zf_min < 5e5, fst)) +
geom_boxplot() +
coord_flip()
# maybe a bit lower fst values for the badly mapped exons
# check if the fst outside the mapped exons differ
daf %>%
ggplot(daf, aes(fst, fill=is.na(zf_pos))) +
geom_density(alpha=0.6, colour=NA)
# no apparent difference
# count not exactly mapped variants
daf %>%
group_by(is.na(zf_pos)) %>%
summarise(n())
# checking the zebra finch annotation, only 6 genes is longer than 500k (ensGenes)
daf %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len > 1e5) %>%
.[,"contig_name"] %>%
as.character %>%
unique %>%
length
# ->
# 1426 contigs are filtered out as 'too long' at 5e4
# 1005 at 1e5
# this filters out 16k additional variants
daf %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len > 5e4) %>%
summarize(nvars=n())
| /data-qa.R | no_license | matiasgoco/islands-paper | R | false | false | 3,011 | r | library(tidyverse)
library(gtools)
# load data ####
load('data/daf_fst.RData')
#### QC of variant data ####
# does quality depend on read depth?
dvar %>%
filter(qual < 999) %>%
ggplot(aes(raw_depth, qual)) + geom_point(alpha=0.05) + scale_x_log10()
# qual ~ read depth plot
dvar %>%
filter(qual < 999) %>%
mutate(dpsum=ref_f + ref_r + alt_r + alt_f) %>%
ggplot(aes(dpsum, qual)) + geom_point(alpha=0.1) + scale_x_log10()
# histogram of var qualities
dvar %>%
filter(qual < 999) %>%
ggplot(aes(qual)) + geom_histogram()
# 'gene' sizes in zebra finch
dvar %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len < 5e4) %>%
ggplot(aes(zf_len)) + geom_histogram()
# gene sizes per chromosome
dvar %>%
group_by(chrom, contig_name) %>%
summarize(zf_max=max(zf_max), zf_min=min(zf_min)) %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len < 5e4) %>%
ggplot(aes(chrom, zf_len)) + geom_boxplot() + coord_flip()
# max chrom sizes
dvar %>%
group_by(chrom) %>%
summarize(chrom_len=max(zf_max)) %>%
ggplot(aes(chrom, chrom_len)) + geom_bar(stat="identity") + coord_flip()
# mvz annotations
dvar %>%
ggplot(aes(mvz_seqpart)) +
geom_histogram()
# filter out too wide contig targets
dvar %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len < 5e4) %>%
ggplot(aes(zf_len)) +
geom_histogram()
# check whether 'chromosomes' are covered with variants
dfai <- read.delim("data-genome/lp2.fasta.fai",
header=F,
col.names=c("chrom", "len", "start", "x", "y"))
# get shared levels for final order
chrom_levs <- unique(c(levels(dvar$chrom), levels(dfai$chrom))) %>% mixedsort %>% rev
data.frame() %>%
ggplot(aes(chrom)) +
geom_bar(aes(y=len), stat="identity", data=dfai) +
geom_point(aes(y=pos), colour="red", data=dvar) +
coord_flip() +
scale_x_discrete(limits=chrom_levs)
ggsave(file="results/var-coverage.png", width=200, height=290, units="mm")
#### QC of Fst values ####
# check check distribution of fst in badly mapped contigs
# this is not possible with current 'pre-filtered' approach
#daf %>%
# ggplot(aes(fst, fill=zf_max - zf_min < 5e5)) +
# geom_density(alpha=0.6, colour=NA)
daf %>%
ggplot(aes(zf_max - zf_min < 5e5, fst)) +
geom_boxplot() +
coord_flip()
# maybe a bit lower fst values for the badly mapped exons
# check if the fst outside the mapped exons differ
daf %>%
ggplot(daf, aes(fst, fill=is.na(zf_pos))) +
geom_density(alpha=0.6, colour=NA)
# no apparent difference
# count not exactly mapped variants
daf %>%
group_by(is.na(zf_pos)) %>%
summarise(n())
# checking the zebra finch annotation, only 6 genes is longer than 500k (ensGenes)
daf %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len > 1e5) %>%
.[,"contig_name"] %>%
as.character %>%
unique %>%
length
# ->
# 1426 contigs are filtered out as 'too long' at 5e4
# 1005 at 1e5
# this filters out 16k additional variants
daf %>%
mutate(zf_len=zf_max - zf_min) %>%
filter(zf_len > 5e4) %>%
summarize(nvars=n())
|
## 1. removed entries in linneaus.csv data that had no species
#IB: Data comming from??? show read.data or refer to the file were loaded
#clean_linneaus=linneaus[c(1:294, 296:367, 369:474, 476:495, 497:538),]
# ## 2. grouped factors into categories
# ## create new column by using micro-habitat data
# clean_linneaus$Habitat_groups=(clean_linneaus$`Micro-habitat`)
# ## set data to as factor
# clean_linneaus$Habitat_groups=as.factor(clean_linneaus$Habitat_groups)
# ## re group factors in new column
# levels(clean_linneaus$Habitat_groups)<-list(
# Rupes=c("1 Rupes Lassbyenses", "10 Rupes Gottsundenses", "3 Rupes"),
# Silva=c("2 Pascua Sylvatica", "12 Silva Regia", "5 Silva Regia",
# "1 Sylva Deserta", "4 Saltus Rickombergensis", "5 Nemus Regium",
# "6 Lucus Norrbyensis"),
# Palus=c("3 Palus Norrbyensis", "2 Palus Jumkilensis"),
# Pratum=c("4 Pratum Norrbyense", "7 Pratum Gottsundense",
# "11 Praedium Wardsattra", "1 Pratum Regium",
# "2 Pratumbarbyense", "3 Prata Ekensia & Fallensia",
# "3 Pratum Saltpetterángen", "4 Prata",
# "8 Praedium Kiattinge", "9 Pratum Nyflense",
# "10 Praedium Husby", "5 Prataflogstadensia & Hagensia"),
# Agros=c("2 Agri Upsalieness", "2 Agri Waxalenses", "3 Versurae Agrorum"),
# Fluvius=c("2 Ripae", "2 Ripa fluvii UPSALIENSIS", "3 Fluvius",
# "6 Rivulus", "6 Fons Sandwiksensis","5 Uliginosa Amni",
# "2 Piscina"),
# Urbis=c("1Tecta Upsaliensia", "3 Plateae Urbis Upsaliensis",
# "1 Via Upsaliensis", "1 Tecta Upsaliensia", "4 Campus Polonicus",
# "Tegelhagen", "1 SEPTA"),
# Rusticus=c("9 Praecipitia Gottsundensia", "6 Lucus Norrbyensis", "7 Haga",
# "1 Luthagen", "5 Tórnby", "4 Falla", "5 Lucus Nantunensis",
# "9 Liljeholmen", "7 Ultuna", "5 Wittulsbergensis",
# "4 Tumuli Upsaliensis", "4 Templo Danemarkensi",
# "7 Templi Borfensis", "3 Lupuleta", "8 Bovilia Pastoris",
# "1 Tegelhagen")
# )
#
# ## saved to data folder
# write.csv(clean_linneaus, file="clean_linneaus.csv")
## 3. checked and changed species names
#IB: Data comming from??? show read.data or refer to the file were loaded
# clean_linne_sp <- read.csv("data/clean_linne_sp.csv", h = T)
# ## manually change species names using gsub
# clean_linne_sp$species<-gsub("Arbutus Uva urfi", "Arctostaphylos uva-ursi", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Cerefus Padus", "Prunus padus", clean_linne_sp$species)
# ## perhaps remove this species as it is a fungus
# clean_linne_sp$species<-gsub("Elvela Mitra", "Gyromitra infula", clean_linne_sp$species)
# ##
# clean_linne_sp$species<-gsub("Ophrys Nidus", "Neottia nidus-avis", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Ranuculus Ficaria:", "Ficaria verna", clean_linne_sp$species)
# ## perhaps remove this species as it is a lichen
# clean_linne_sp$species<-gsub("Lichen nivalis:", "Lepraria nivalis", clean_linne_sp$species)
# ##
# clean_linne_sp$species<-gsub("Ribes Vua crispa", "Ribes uva-crispa", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("TuffiIago Farfara", "Tussilago farfara", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Convolvulus Polygonum", "Polygonum convolvulus", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Crataeus Aira", "Sorbus aria", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Buphtalmo tinctorio", "Cota tinctoria", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Comarum palustre", "Potentilla palustris", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Ethusa Cynapium", "Aethusa cynapium", clean_linne_sp$species)
# ## perhaps remove this species as it is a liverwort
# clean_linne_sp$species<-gsub("Jungermannia pusilla", "Blasia pusilla", clean_linne_sp$species)
# ##
# clean_linne_sp$species<-gsub("Ophrys Monorchis", "Herminium monorchis", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Pedicularis SceptCarolin", "Pedicularis sceptrum-carolinum", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Scabiosam arvensem", "Knautia arvensis", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Chenopodium Bonus henricus", "Blitum bonus-henricus", clean_linne_sp$species)
# ## saved to data folder
# write.csv(clean_linne_sp, file="clean_linne_sp.csv")
## checked and changed species names clean_trait_sp
#IB: Data comming from??? show read.data or refer to the file were loaded
#
# clean_trait_sp$species<-gsub("Capsella bursa -pastoris", "Capsella bursa-pastoris", clean_trait_sp$species)
# ## possibly remove these as they have no positive species ID
# clean_trait_sp$species<-gsub("Arabidopsis sp.", "Arabidopsis", clean_trait_sp$species)
# clean_trait_sp$species<-gsub("Myosotis spp.", "Myosotis", clean_trait_sp$species)
# ## saved to data folder
# write.csv(clean_trait_sp, file="clean_trait_sp.csv")
## checked and changed species names clean_trails_sp
#IB: Data comming from??? show read.data or refer to the file were loaded
# clean_trails_sp$species<-gsub("Capsella bursa -pastoris", "Capsella bursa-pastoris", clean_trails_sp$species)
# ## possibly remove these as they have no positive species ID
# clean_trails_sp$species<-gsub("Arabidopsis spp.", "Arabidopsis", clean_trails_sp$species)
# clean_trails_sp$species<-gsub("Myosotis spp.", "Myosotis", clean_trails_sp$species)
## saved to data folder
#write.csv(clean_trails_sp, file="clean_trails_sp.csv")
| /joe_edits.R | no_license | ibartomeus/linnaeus2.0 | R | false | false | 5,539 | r | ## 1. removed entries in linneaus.csv data that had no species
#IB: Data comming from??? show read.data or refer to the file were loaded
#clean_linneaus=linneaus[c(1:294, 296:367, 369:474, 476:495, 497:538),]
# ## 2. grouped factors into categories
# ## create new column by using micro-habitat data
# clean_linneaus$Habitat_groups=(clean_linneaus$`Micro-habitat`)
# ## set data to as factor
# clean_linneaus$Habitat_groups=as.factor(clean_linneaus$Habitat_groups)
# ## re group factors in new column
# levels(clean_linneaus$Habitat_groups)<-list(
# Rupes=c("1 Rupes Lassbyenses", "10 Rupes Gottsundenses", "3 Rupes"),
# Silva=c("2 Pascua Sylvatica", "12 Silva Regia", "5 Silva Regia",
# "1 Sylva Deserta", "4 Saltus Rickombergensis", "5 Nemus Regium",
# "6 Lucus Norrbyensis"),
# Palus=c("3 Palus Norrbyensis", "2 Palus Jumkilensis"),
# Pratum=c("4 Pratum Norrbyense", "7 Pratum Gottsundense",
# "11 Praedium Wardsattra", "1 Pratum Regium",
# "2 Pratumbarbyense", "3 Prata Ekensia & Fallensia",
# "3 Pratum Saltpetterángen", "4 Prata",
# "8 Praedium Kiattinge", "9 Pratum Nyflense",
# "10 Praedium Husby", "5 Prataflogstadensia & Hagensia"),
# Agros=c("2 Agri Upsalieness", "2 Agri Waxalenses", "3 Versurae Agrorum"),
# Fluvius=c("2 Ripae", "2 Ripa fluvii UPSALIENSIS", "3 Fluvius",
# "6 Rivulus", "6 Fons Sandwiksensis","5 Uliginosa Amni",
# "2 Piscina"),
# Urbis=c("1Tecta Upsaliensia", "3 Plateae Urbis Upsaliensis",
# "1 Via Upsaliensis", "1 Tecta Upsaliensia", "4 Campus Polonicus",
# "Tegelhagen", "1 SEPTA"),
# Rusticus=c("9 Praecipitia Gottsundensia", "6 Lucus Norrbyensis", "7 Haga",
# "1 Luthagen", "5 Tórnby", "4 Falla", "5 Lucus Nantunensis",
# "9 Liljeholmen", "7 Ultuna", "5 Wittulsbergensis",
# "4 Tumuli Upsaliensis", "4 Templo Danemarkensi",
# "7 Templi Borfensis", "3 Lupuleta", "8 Bovilia Pastoris",
# "1 Tegelhagen")
# )
#
# ## saved to data folder
# write.csv(clean_linneaus, file="clean_linneaus.csv")
## 3. checked and changed species names
#IB: Data comming from??? show read.data or refer to the file were loaded
# clean_linne_sp <- read.csv("data/clean_linne_sp.csv", h = T)
# ## manually change species names using gsub
# clean_linne_sp$species<-gsub("Arbutus Uva urfi", "Arctostaphylos uva-ursi", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Cerefus Padus", "Prunus padus", clean_linne_sp$species)
# ## perhaps remove this species as it is a fungus
# clean_linne_sp$species<-gsub("Elvela Mitra", "Gyromitra infula", clean_linne_sp$species)
# ##
# clean_linne_sp$species<-gsub("Ophrys Nidus", "Neottia nidus-avis", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Ranuculus Ficaria:", "Ficaria verna", clean_linne_sp$species)
# ## perhaps remove this species as it is a lichen
# clean_linne_sp$species<-gsub("Lichen nivalis:", "Lepraria nivalis", clean_linne_sp$species)
# ##
# clean_linne_sp$species<-gsub("Ribes Vua crispa", "Ribes uva-crispa", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("TuffiIago Farfara", "Tussilago farfara", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Convolvulus Polygonum", "Polygonum convolvulus", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Crataeus Aira", "Sorbus aria", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Buphtalmo tinctorio", "Cota tinctoria", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Comarum palustre", "Potentilla palustris", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Ethusa Cynapium", "Aethusa cynapium", clean_linne_sp$species)
# ## perhaps remove this species as it is a liverwort
# clean_linne_sp$species<-gsub("Jungermannia pusilla", "Blasia pusilla", clean_linne_sp$species)
# ##
# clean_linne_sp$species<-gsub("Ophrys Monorchis", "Herminium monorchis", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Pedicularis SceptCarolin", "Pedicularis sceptrum-carolinum", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Scabiosam arvensem", "Knautia arvensis", clean_linne_sp$species)
# clean_linne_sp$species<-gsub("Chenopodium Bonus henricus", "Blitum bonus-henricus", clean_linne_sp$species)
# ## saved to data folder
# write.csv(clean_linne_sp, file="clean_linne_sp.csv")
## checked and changed species names clean_trait_sp
#IB: Data comming from??? show read.data or refer to the file were loaded
#
# clean_trait_sp$species<-gsub("Capsella bursa -pastoris", "Capsella bursa-pastoris", clean_trait_sp$species)
# ## possibly remove these as they have no positive species ID
# clean_trait_sp$species<-gsub("Arabidopsis sp.", "Arabidopsis", clean_trait_sp$species)
# clean_trait_sp$species<-gsub("Myosotis spp.", "Myosotis", clean_trait_sp$species)
# ## saved to data folder
# write.csv(clean_trait_sp, file="clean_trait_sp.csv")
## checked and changed species names clean_trails_sp
#IB: Data comming from??? show read.data or refer to the file were loaded
# clean_trails_sp$species<-gsub("Capsella bursa -pastoris", "Capsella bursa-pastoris", clean_trails_sp$species)
# ## possibly remove these as they have no positive species ID
# clean_trails_sp$species<-gsub("Arabidopsis spp.", "Arabidopsis", clean_trails_sp$species)
# clean_trails_sp$species<-gsub("Myosotis spp.", "Myosotis", clean_trails_sp$species)
## saved to data folder
#write.csv(clean_trails_sp, file="clean_trails_sp.csv")
|
## Hijack a function
## see: http://stackoverflow.com/a/25366322/1000343
hijack <- function(FUN, ...){
.FUN <- FUN
args <- list(...)
invisible(lapply(seq_along(args), function(i) {
formals(.FUN)[[names(args)[i]]] <<- args[[i]]
}))
.FUN
}
## Ellipsis recieving function changing
elli <- function(fun, myargs, ...){
if (missing(myargs)) myargs <- list(...)
formals_match <- names(myargs) %in% names(formals(fun))
if (any(formals_match)){
.FUN <- fun
args <- myargs[formals_match]
invisible(lapply(seq_along(args), function(i) {
formals(.FUN)[[names(args)[i]]] <<- args[[i]]
}))
fun <- .FUN
} else {
return(fun)
}
fun
}
binder <- function(x, ignore.case = FALSE, left = "\\b", right = left) {
if (ignore.case){
x <- ignore_case(x)
}
qdapRegex::pastex(qdapRegex::group(qdapRegex::bind(x, left = left, right = right)))
}
binder2 <- function(x, ignore.case = FALSE, left = "\\b", right = left) {
paste0("(",binder(x, ignore.case = FALSE, left = left, right = right), ")")
}
## Ignore case in regex (i.e., convert `x` to [xX]`
ignore_case <- function(terms, left = "\\b", right = left){
mapply(function(x, y) {
gsub("(^[a-zA-Z])", paste0("[", tolower(y), toupper(y), "]"), x)
}, terms, substring(terms, 1, 1), USE.NAMES = FALSE
)
}
## Rename termco object's data frame's grouping variable
termco_group_name_replace <- function(x, nms){
x[c("raw", "prop", "rnp")] <- lapply(x[c("raw", "prop", "rnp")], function(y, nm = nms){
names(y)[1] <- nm
y
})
x
}
## Capitalize Plot Titles
Caps <- function (x, all = FALSE) {
if (all) {
x <- strsplit(x, " ")[[1]]
}
paste(toupper(substring(x, 1, 1)), substring(x, 2), sep = "",
collapse = " ")
}
| /R/utils.R | no_license | trinker/discon | R | false | false | 1,873 | r | ## Hijack a function
## see: http://stackoverflow.com/a/25366322/1000343
hijack <- function(FUN, ...){
.FUN <- FUN
args <- list(...)
invisible(lapply(seq_along(args), function(i) {
formals(.FUN)[[names(args)[i]]] <<- args[[i]]
}))
.FUN
}
## Ellipsis recieving function changing
elli <- function(fun, myargs, ...){
if (missing(myargs)) myargs <- list(...)
formals_match <- names(myargs) %in% names(formals(fun))
if (any(formals_match)){
.FUN <- fun
args <- myargs[formals_match]
invisible(lapply(seq_along(args), function(i) {
formals(.FUN)[[names(args)[i]]] <<- args[[i]]
}))
fun <- .FUN
} else {
return(fun)
}
fun
}
binder <- function(x, ignore.case = FALSE, left = "\\b", right = left) {
if (ignore.case){
x <- ignore_case(x)
}
qdapRegex::pastex(qdapRegex::group(qdapRegex::bind(x, left = left, right = right)))
}
binder2 <- function(x, ignore.case = FALSE, left = "\\b", right = left) {
paste0("(",binder(x, ignore.case = FALSE, left = left, right = right), ")")
}
## Ignore case in regex (i.e., convert `x` to [xX]`
ignore_case <- function(terms, left = "\\b", right = left){
mapply(function(x, y) {
gsub("(^[a-zA-Z])", paste0("[", tolower(y), toupper(y), "]"), x)
}, terms, substring(terms, 1, 1), USE.NAMES = FALSE
)
}
## Rename termco object's data frame's grouping variable
termco_group_name_replace <- function(x, nms){
x[c("raw", "prop", "rnp")] <- lapply(x[c("raw", "prop", "rnp")], function(y, nm = nms){
names(y)[1] <- nm
y
})
x
}
## Capitalize Plot Titles
Caps <- function (x, all = FALSE) {
if (all) {
x <- strsplit(x, " ")[[1]]
}
paste(toupper(substring(x, 1, 1)), substring(x, 2), sep = "",
collapse = " ")
}
|
data <- read.table("household_power_consumption.txt", header=T, sep = ";", stringsAsFactors = F, dec = ".")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
subdata <- data[data$Date == "2007-02-01" | data$Date == "2007-02-02",]
png("plot1.png", width=480, height=480)
hist(as.numeric(subdata$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | takanyaotomo/ExData_Plotting1 | R | false | false | 407 | r | data <- read.table("household_power_consumption.txt", header=T, sep = ";", stringsAsFactors = F, dec = ".")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
subdata <- data[data$Date == "2007-02-01" | data$Date == "2007-02-02",]
png("plot1.png", width=480, height=480)
hist(as.numeric(subdata$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
#' Creates a plotly manhattan plot
#'
#' Creates an interactive manhattan plot with multiple annotation options
#'
#' @param x Can be an object of class \code{manhattanr} produced by the
#' \code{\link{manhattanr}} function or a \code{data.frame} which must contain
#' at least the following three columns: \itemize{ \item{the chromosome
#' number} \item{genomic base-pair position} \item{a numeric quantity to plot
#' such as a p-value or zscore} }
#' @param col A character vector indicating the colors of each chromosome. If
#' the number of colors specified is less than the number of unique
#' chromosomes, then the elements will be recycled. Can be
#' \href{http://www.rapidtables.com/web/color/RGB_Color.htm}{Hex Codes} as
#' well.
#' @param point_size A \code{numeric} indicating the size of the points on the
#' plot. Default is 5
#' @param labelChr A character vector equal to the number of chromosomes
#' specifying the chromosome labels (e.g., \code{c(1:22, "X", "Y", "MT")}).
#' Default is \code{NULL}, meaning that the actual chromosome numbers will be
#' used.
#' @param suggestiveline Where to draw a "suggestive" line. Default is
#' \code{-log10(1e-5)}. Set to \code{FALSE} to disable.
#' @param suggestiveline_color color of "suggestive" line. Only used if
#' \code{suggestiveline} is not set to \code{FALSE}. Default is \code{"blue"}.
#' @param suggestiveline_width Width of \code{suggestiveline}. Default is 1.
#' @param genomewideline Where to draw a "genome-wide sigificant" line. Default
#' \code{-log10(5e-8)}. Set to \code{FALSE} to disable.
#' @param genomewideline_color color of "genome-wide sigificant" line. Only used
#' if \code{genomewideline} is not set to \code{FALSE}. Default is
#' \code{"red"}.
#' @param genomewideline_width Width of \code{genomewideline}. Default is 1.
#' @param highlight A character vector of SNPs in your dataset to highlight.
#' These SNPs should all be in your dataset. Default is \code{NULL} which
#' means that nothing is highlighted.
#' @param highlight_color Color used to highlight points. Only used if
#' \code{highlight} argument has been specified
#' @param showlegend Should a legend be shown. Default is \code{FALSE}.
#' @param showgrid Should gridlines be shown. Default is \code{FALSE}.
#' @param xlab X-axis label. Default is \code{NULL} which means that the label
#' is automatically determined by the \code{\link{manhattanr}} function.
#' Specify here to overwrite the default.
#' @param ylab Y-axis label. Default is \code{"-log10(p)"}.
#' @param title Title of the plot. Default is \code{"Manhattan Plot"}
#' @param ... other parameters passed to \code{\link{manhattanr}}
#' @inheritParams manhattanr
#' @note This package is inspired by the
#' \href{https://github.com/stephenturner/qqman}{\code{qqman}} package by
#' \href{http://www.gettinggeneticsdone.com/}{Stephen Turner}. Much of the
#' plot format and pre-processing is the same. This package provides
#' additional annotation options and builds on the \code{\link{plotly}}
#' \code{d3.js} engine. These plots can be included in Shiny apps, Rmarkdown
#' documents or embeded in websites using simple HTML code.
#' @return An interactive manhattan plot.
#' @seealso \code{\link{manhattanr}}, \code{\link{HapMap}},
#' \code{\link{significantSNP}}, \code{\link[qqman]{manhattan}},
#' \url{https://github.com/stephenturner/qqman},
#' \href{https://github.com/nstrayer/D3ManhattanPlots}{D3ManhattanPlots}
#' @aliases manhattanly.default manhattanly.manhattanr
#' @importFrom magrittr '%<>%'
#' @import plotly
#' @export
#' @examples
#' \dontrun{
#' library(manhattanly)
#' manhattanly(HapMap)
#'
#' # highlight SNPs of interest
#' # 'signigicantSNP' is a character vector of SNPs included in this package
#' manhattanly(HapMap, snp = "SNP", highlight = significantSNP)
#' }
manhattanly <- function(x,
# col = colorRampPalette(RColorBrewer::brewer.pal(n = 9, name = "Set1"))(nchr),
# col = RColorBrewer::brewer.pal(n = 9, name = "Greys"),
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
UseMethod("manhattanly")
}
#' @export
manhattanly.default <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
mh <- manhattanr(x, ...)
nchr <- mh$nchr
manhattanly.manhattanr(mh,
col = col,
labelChr = labelChr,
point_size = point_size,
suggestiveline = suggestiveline,
suggestiveline_color = suggestiveline_color,
suggestiveline_width = suggestiveline_width,
genomewideline = genomewideline,
genomewideline_color = genomewideline_color,
genomewideline_width = genomewideline_width,
highlight = highlight,
highlight_color = highlight_color,
showlegend = showlegend,
showgrid = showgrid,
xlab = xlab,
ylab = ylab,
title = title)
}
#' @export
manhattanly.manhattanr <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
# x <- manhattanr(gwasResults)
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(kk, annotation1 = "ZSCORE")
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(HapMap, snp = "SNP", gene = "GENE")
#
# x$data %>% head
# str(x$data)
# labelChr <- NULL
# col <- colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name ="Set1")))(22)
# showgrid <- TRUE
# labelChr = NULL
# point_size = 5
# suggestiveline = -log10(1e-5)
# genomewideline = -log10(5e-8)
# suggestiveline_color = "blue"
# genomewideline_color = "red"
# suggestiveline_width = genomewideline_width = 1;
# highlight_color = "#00FF00"
# highlight = c(significantSNP, x$data$SNP[1:20])
# showlegend = TRUE
# showgrid = TRUE
# ylab = "-log10(p)"
# xlab = NULL
# title = "Manhattan Plot"
# col = c("#969696", "#252525")
#########
library(plotly)
d <- x$data
pName <- x$pName
snpName <- x$snpName
geneName <- x$geneName
annotation1Name <- x$annotation1Name
annotation2Name <- x$annotation2Name
labs <- x$labs
xlabel <- x$xlabel
ticks <- x$ticks
nchr <- x$nchr
group_var <- x$group
if (!is.null(highlight) & is.na(snpName)) stop("You're trying to highlight snps, but havent provided a snp column")
# Initialize plot
xmax = ceiling(max(d$pos) * 1.03)
xmin = floor(max(d$pos) * -0.03)
# If manually specifying chromosome labels, ensure a character vector
# and number of labels matches number chrs.
if (!is.null(labelChr)) {
if (is.character(labelChr)) {
if (length(labelChr)==length(labs)) {
labs <- labelChr
} else {
warning("You're trying to specify chromosome labels but the number of labels != number of chromosomes.")
}
} else {
warning("If you're trying to specify chromosome labels, labelChr must be a character vector")
}
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initalize plotly
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p <- plotly::plot_ly()
# Add an axis.
if (nchr == 1) {
#If single chromosome, ticks and labels automatic.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else xlabel,
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax)
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
} else {
# if multiple chrs, use the ticks and labels you created above.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
}
# Create a vector of alternatiting colors
col <- brewer.pal(n = length(unique(d$group)), name = "Set1")
# Add points to the plot
if (nchr==1) {
# paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]],"<br>"),
# if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]],"<br>"),
# if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]],"<br>")
# if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]],"<br>")
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]]), sep = "<br>")
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p %<>% plotly::add_trace(data = d, x = d$pos, y = d$logp, color = ~group, colors = "Set1",
type = "scatter",
mode = "markers",
# text = TEXT,
showlegend = showlegend,
marker = list(size = point_size)
#name = paste0("chr", unique(d$CHR))
)
} else {
p %<>% plotly::add_trace(data = d, x = d$pos, y = d$logp, color = ~group, colors = "Set1",
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(size = point_size)
#name = paste0("chr", unique(d$CHR))
)
}
} else {
# for(i in unique(d$index)) {
#
# tmp <- d[d$index == unique(d$index)[i], ]
#
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", d[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", d[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", d[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", d[[annotation2Name]]),
sep = "<br>")
# # get chromosome name for labeling
# chromo <- unique(tmp[which(tmp$index==i),"CHR"])
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p <- d %>%
group_by(group) %>%
plot_ly(x = ~pos , color = ~group, colors = "Set1") %>%
add_trace(y = ~logp,
showlegend = showlegend,
type = "scatter",
mode = "markers",
marker = list(size = point_size)) %>%
plotly::layout(title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab))
## Old original code
# p %<>% plotly::add_trace(data =tmp, x = tmp$pos, y = tmp$logp, type = "scatter", color = ~group, colors = "Set1",
# type = "scatter",
# mode = "markers",
# showlegend = showlegend,
# marker = list(size = point_size),
# legendgroup=~group)
} else {
p <- d %>%
group_by(group) %>%
plot_ly(x = ~pos , color = ~group, colors = "Set1") %>%
add_trace(y = ~logp,
showlegend = showlegend,
type = "scatter",
mode = "markers",
text = TEXT,
marker = list(size = point_size)) %>%
plotly::layout(title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",s
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab))
# p %<>% plotly::add_trace(data =tmp, x = tmp$pos, y = tmp$logp, type = "scatter", color = ~group, colors = "Set1",
# type = "scatter",
# mode = "markers",
# showlegend = showlegend,
# text = TEXT,
# marker = list(size = point_size),
# legendgroup=~group)
}
#}
}
if (suggestiveline & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y"),
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
if (suggestiveline & !(genomewideline)) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y")
))}
if (!(suggestiveline) & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
# Highlight snps from a character vector
if (!is.na(snpName)) {
if (!is.null(highlight)) {
if (any(!(highlight %in% d[[snpName]]))) warning("You're trying to highlight SNPs that don't exist in your results.")
d.highlight <- d[which(d[[snpName]] %in% highlight), ]
# Add points to the plot
if (nchr==1) {
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d.highlight[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d.highlight[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d.highlight[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d.highlight[[annotation2Name]]), sep = "<br>")
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
} else {
# icol <- 1
for(i in unique(d.highlight$index)) {
tmp <- d.highlight[d.highlight$index == i, ]
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", tmp[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", tmp[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", tmp[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", tmp[[annotation2Name]]),
sep = "<br>")
# get chromosome name for labeling
chromo <- unique(tmp[which(tmp$index==i),"CHR"])
p %<>% plotly::add_trace(x = tmp$pos,
y = tmp$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
# icol = icol + 1
}
}
# p %<>% plotly::add_trace(x = d.highlight$pos,
# y = d.highlight$logp,
# type = "scatter",
# mode = "markers",
# #evaluate = TRUE,
# text = d.highlight[[snpName]],
# showlegend = showlegend,
# marker = list(color = highlight_color,
# size = point_size),
# name = "of interest")
}
}
p
}
# jj <- manhattan_plotly(gwasResults, genomewideline = FALSE)
#
# jj
# str(jj)
# topHits = subset(d, P <= annotatePval)
# p %>% layout(annotations = list(x = topHits$pos[10],
# y = -log10(topHits$P[10]),
# text = topHits$SNP[10],
# showarrow = T))
"%ni%" <- Negate("%in%")
| /R/manhattanly.R | no_license | FloWuenne/manhattanlyCRISPR | R | false | false | 23,730 | r | #' Creates a plotly manhattan plot
#'
#' Creates an interactive manhattan plot with multiple annotation options
#'
#' @param x Can be an object of class \code{manhattanr} produced by the
#' \code{\link{manhattanr}} function or a \code{data.frame} which must contain
#' at least the following three columns: \itemize{ \item{the chromosome
#' number} \item{genomic base-pair position} \item{a numeric quantity to plot
#' such as a p-value or zscore} }
#' @param col A character vector indicating the colors of each chromosome. If
#' the number of colors specified is less than the number of unique
#' chromosomes, then the elements will be recycled. Can be
#' \href{http://www.rapidtables.com/web/color/RGB_Color.htm}{Hex Codes} as
#' well.
#' @param point_size A \code{numeric} indicating the size of the points on the
#' plot. Default is 5
#' @param labelChr A character vector equal to the number of chromosomes
#' specifying the chromosome labels (e.g., \code{c(1:22, "X", "Y", "MT")}).
#' Default is \code{NULL}, meaning that the actual chromosome numbers will be
#' used.
#' @param suggestiveline Where to draw a "suggestive" line. Default is
#' \code{-log10(1e-5)}. Set to \code{FALSE} to disable.
#' @param suggestiveline_color color of "suggestive" line. Only used if
#' \code{suggestiveline} is not set to \code{FALSE}. Default is \code{"blue"}.
#' @param suggestiveline_width Width of \code{suggestiveline}. Default is 1.
#' @param genomewideline Where to draw a "genome-wide sigificant" line. Default
#' \code{-log10(5e-8)}. Set to \code{FALSE} to disable.
#' @param genomewideline_color color of "genome-wide sigificant" line. Only used
#' if \code{genomewideline} is not set to \code{FALSE}. Default is
#' \code{"red"}.
#' @param genomewideline_width Width of \code{genomewideline}. Default is 1.
#' @param highlight A character vector of SNPs in your dataset to highlight.
#' These SNPs should all be in your dataset. Default is \code{NULL} which
#' means that nothing is highlighted.
#' @param highlight_color Color used to highlight points. Only used if
#' \code{highlight} argument has been specified
#' @param showlegend Should a legend be shown. Default is \code{FALSE}.
#' @param showgrid Should gridlines be shown. Default is \code{FALSE}.
#' @param xlab X-axis label. Default is \code{NULL} which means that the label
#' is automatically determined by the \code{\link{manhattanr}} function.
#' Specify here to overwrite the default.
#' @param ylab Y-axis label. Default is \code{"-log10(p)"}.
#' @param title Title of the plot. Default is \code{"Manhattan Plot"}
#' @param ... other parameters passed to \code{\link{manhattanr}}
#' @inheritParams manhattanr
#' @note This package is inspired by the
#' \href{https://github.com/stephenturner/qqman}{\code{qqman}} package by
#' \href{http://www.gettinggeneticsdone.com/}{Stephen Turner}. Much of the
#' plot format and pre-processing is the same. This package provides
#' additional annotation options and builds on the \code{\link{plotly}}
#' \code{d3.js} engine. These plots can be included in Shiny apps, Rmarkdown
#' documents or embeded in websites using simple HTML code.
#' @return An interactive manhattan plot.
#' @seealso \code{\link{manhattanr}}, \code{\link{HapMap}},
#' \code{\link{significantSNP}}, \code{\link[qqman]{manhattan}},
#' \url{https://github.com/stephenturner/qqman},
#' \href{https://github.com/nstrayer/D3ManhattanPlots}{D3ManhattanPlots}
#' @aliases manhattanly.default manhattanly.manhattanr
#' @importFrom magrittr '%<>%'
#' @import plotly
#' @export
#' @examples
#' \dontrun{
#' library(manhattanly)
#' manhattanly(HapMap)
#'
#' # highlight SNPs of interest
#' # 'signigicantSNP' is a character vector of SNPs included in this package
#' manhattanly(HapMap, snp = "SNP", highlight = significantSNP)
#' }
manhattanly <- function(x,
# col = colorRampPalette(RColorBrewer::brewer.pal(n = 9, name = "Set1"))(nchr),
# col = RColorBrewer::brewer.pal(n = 9, name = "Greys"),
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
UseMethod("manhattanly")
}
#' @export
manhattanly.default <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
mh <- manhattanr(x, ...)
nchr <- mh$nchr
manhattanly.manhattanr(mh,
col = col,
labelChr = labelChr,
point_size = point_size,
suggestiveline = suggestiveline,
suggestiveline_color = suggestiveline_color,
suggestiveline_width = suggestiveline_width,
genomewideline = genomewideline,
genomewideline_color = genomewideline_color,
genomewideline_width = genomewideline_width,
highlight = highlight,
highlight_color = highlight_color,
showlegend = showlegend,
showgrid = showgrid,
xlab = xlab,
ylab = ylab,
title = title)
}
#' @export
manhattanly.manhattanr <- function(x,
...,
col = c("#969696", "#252525"),
point_size = 5,
labelChr = NULL,
suggestiveline = -log10(1e-5),
suggestiveline_color = "blue",
suggestiveline_width = 1,
genomewideline = -log10(5e-8),
genomewideline_color = "red",
genomewideline_width = 1,
highlight = NULL,
highlight_color = "#00FF00",
showlegend = FALSE,
showgrid = FALSE,
xlab = NULL,
ylab = "-log10(p)",
title = "Manhattan Plot") {
# x <- manhattanr(gwasResults)
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(kk, annotation1 = "ZSCORE")
# x <- manhattanr(kk, annotation1 = "ZSCORE", annotation2 = "EFFECTSIZE")
# x <- manhattanr(HapMap, snp = "SNP", gene = "GENE")
#
# x$data %>% head
# str(x$data)
# labelChr <- NULL
# col <- colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name ="Set1")))(22)
# showgrid <- TRUE
# labelChr = NULL
# point_size = 5
# suggestiveline = -log10(1e-5)
# genomewideline = -log10(5e-8)
# suggestiveline_color = "blue"
# genomewideline_color = "red"
# suggestiveline_width = genomewideline_width = 1;
# highlight_color = "#00FF00"
# highlight = c(significantSNP, x$data$SNP[1:20])
# showlegend = TRUE
# showgrid = TRUE
# ylab = "-log10(p)"
# xlab = NULL
# title = "Manhattan Plot"
# col = c("#969696", "#252525")
#########
library(plotly)
d <- x$data
pName <- x$pName
snpName <- x$snpName
geneName <- x$geneName
annotation1Name <- x$annotation1Name
annotation2Name <- x$annotation2Name
labs <- x$labs
xlabel <- x$xlabel
ticks <- x$ticks
nchr <- x$nchr
group_var <- x$group
if (!is.null(highlight) & is.na(snpName)) stop("You're trying to highlight snps, but havent provided a snp column")
# Initialize plot
xmax = ceiling(max(d$pos) * 1.03)
xmin = floor(max(d$pos) * -0.03)
# If manually specifying chromosome labels, ensure a character vector
# and number of labels matches number chrs.
if (!is.null(labelChr)) {
if (is.character(labelChr)) {
if (length(labelChr)==length(labs)) {
labs <- labelChr
} else {
warning("You're trying to specify chromosome labels but the number of labels != number of chromosomes.")
}
} else {
warning("If you're trying to specify chromosome labels, labelChr must be a character vector")
}
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Initalize plotly
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
p <- plotly::plot_ly()
# Add an axis.
if (nchr == 1) {
#If single chromosome, ticks and labels automatic.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else xlabel,
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax)
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
} else {
# if multiple chrs, use the ticks and labels you created above.
p %<>% plotly::layout(p,
title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab)#,
#range = c(0,ceiling(max(d$logp)))
#)
)
}
# Create a vector of alternatiting colors
col <- brewer.pal(n = length(unique(d$group)), name = "Set1")
# Add points to the plot
if (nchr==1) {
# paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]],"<br>"),
# if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]],"<br>"),
# if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]],"<br>")
# if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]],"<br>")
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d[[annotation2Name]]), sep = "<br>")
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p %<>% plotly::add_trace(data = d, x = d$pos, y = d$logp, color = ~group, colors = "Set1",
type = "scatter",
mode = "markers",
# text = TEXT,
showlegend = showlegend,
marker = list(size = point_size)
#name = paste0("chr", unique(d$CHR))
)
} else {
p %<>% plotly::add_trace(data = d, x = d$pos, y = d$logp, color = ~group, colors = "Set1",
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(size = point_size)
#name = paste0("chr", unique(d$CHR))
)
}
} else {
# for(i in unique(d$index)) {
#
# tmp <- d[d$index == unique(d$index)[i], ]
#
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", d[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", d[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", d[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", d[[annotation2Name]]),
sep = "<br>")
# # get chromosome name for labeling
# chromo <- unique(tmp[which(tmp$index==i),"CHR"])
if (is.na(snpName) && is.na(geneName) && is.na(annotation1Name) && is.na(annotation2Name)) {
p <- d %>%
group_by(group) %>%
plot_ly(x = ~pos , color = ~group, colors = "Set1") %>%
add_trace(y = ~logp,
showlegend = showlegend,
type = "scatter",
mode = "markers",
marker = list(size = point_size)) %>%
plotly::layout(title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab))
## Old original code
# p %<>% plotly::add_trace(data =tmp, x = tmp$pos, y = tmp$logp, type = "scatter", color = ~group, colors = "Set1",
# type = "scatter",
# mode = "markers",
# showlegend = showlegend,
# marker = list(size = point_size),
# legendgroup=~group)
} else {
p <- d %>%
group_by(group) %>%
plot_ly(x = ~pos , color = ~group, colors = "Set1") %>%
add_trace(y = ~logp,
showlegend = showlegend,
type = "scatter",
mode = "markers",
text = TEXT,
marker = list(size = point_size)) %>%
plotly::layout(title = title,
xaxis = list(
title = if(!is.null(xlab)) xlab else "Chromosome",
# title = "ll",s
showgrid = showgrid,
range = c(xmin, xmax),
autotick = FALSE,
tickmode = "array",
tickvals = ticks,
ticktext = labs,
ticks = "outside"
),
yaxis = list(
title = ylab))
# p %<>% plotly::add_trace(data =tmp, x = tmp$pos, y = tmp$logp, type = "scatter", color = ~group, colors = "Set1",
# type = "scatter",
# mode = "markers",
# showlegend = showlegend,
# text = TEXT,
# marker = list(size = point_size),
# legendgroup=~group)
}
#}
}
if (suggestiveline & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y"),
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
if (suggestiveline & !(genomewideline)) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = suggestiveline_color,
line = list(color = suggestiveline_color,
width = suggestiveline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = suggestiveline, y1 = suggestiveline, yref = "y")
))}
if (!(suggestiveline) & genomewideline) {p %<>% plotly::layout(p,
shapes = list(
list(type = "line",
fillcolor = genomewideline_color,
line = list(color = genomewideline_color,
width = genomewideline_width),
x0 = xmin, x1 = xmax, xref = "x",
y0 = genomewideline, y1 = genomewideline, yref = "y")
))}
# Highlight snps from a character vector
if (!is.na(snpName)) {
if (!is.null(highlight)) {
if (any(!(highlight %in% d[[snpName]]))) warning("You're trying to highlight SNPs that don't exist in your results.")
d.highlight <- d[which(d[[snpName]] %in% highlight), ]
# Add points to the plot
if (nchr==1) {
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ",d.highlight[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ",d.highlight[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ",d.highlight[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ",d.highlight[[annotation2Name]]), sep = "<br>")
p %<>% plotly::add_trace(x = d$pos, y = d$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
} else {
# icol <- 1
for(i in unique(d.highlight$index)) {
tmp <- d.highlight[d.highlight$index == i, ]
TEXT <- paste(if (!is.na(snpName)) paste0(snpName,": ", tmp[[snpName]]),
if (!is.na(geneName)) paste0(geneName,": ", tmp[[geneName]]),
if (!is.na(annotation1Name)) paste0(annotation1Name,": ", tmp[[annotation1Name]]),
if (!is.na(annotation2Name)) paste0(annotation2Name,": ", tmp[[annotation2Name]]),
sep = "<br>")
# get chromosome name for labeling
chromo <- unique(tmp[which(tmp$index==i),"CHR"])
p %<>% plotly::add_trace(x = tmp$pos,
y = tmp$logp,
type = "scatter",
mode = "markers",
text = TEXT,
showlegend = showlegend,
marker = list(color = highlight_color,
size = point_size),
name = "of interest")
# icol = icol + 1
}
}
# p %<>% plotly::add_trace(x = d.highlight$pos,
# y = d.highlight$logp,
# type = "scatter",
# mode = "markers",
# #evaluate = TRUE,
# text = d.highlight[[snpName]],
# showlegend = showlegend,
# marker = list(color = highlight_color,
# size = point_size),
# name = "of interest")
}
}
p
}
# jj <- manhattan_plotly(gwasResults, genomewideline = FALSE)
#
# jj
# str(jj)
# topHits = subset(d, P <= annotatePval)
# p %>% layout(annotations = list(x = topHits$pos[10],
# y = -log10(topHits$P[10]),
# text = topHits$SNP[10],
# showarrow = T))
"%ni%" <- Negate("%in%")
|
process_hours <- function(day_hour, senior = FALSE) {
days_long <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")
days_col_normal <- c("mon", "tues", "wed", "thr", "fri", "sat", "sun", "mon", "tues", "wed", "thr", "fri", "sat", "sun")
days_col_senior <- str_c("sp_", days_col_normal)
if (senior) {
days_col <- days_col_senior
} else {
days_col <- days_col_normal
}
if (is.na(day_hour)) {
out <- rep("0", 8)
names(out) <- c(days_col[1:7], "additional_hours")
return(out %>% as_tibble_row())
}
try({
out <- rep("0", 8)
# split into groups of "days: hours"
for (dt in str_split(day_hour, '[\\n;,]+') %>% unlist()) {
dt <- str_trim(dt)
d <- str_split(dt, ': ') %>% unlist() %>% .[1]
t <- str_split(dt, ': ') %>% unlist() %>% .[2]
# handles additional openings on odd days
if (str_starts(str_trim(d, side = "left"), "\\d+")) {
out[8] <- if (out[8] == "0") dt else str_c(out[8], dt, sep = ", ")
next
}
# handles normal opening hours
# day range
if (str_detect(d, '-')) {
start = str_split(d, ' - ') %>% unlist() %>% .[1]
end = str_split(d, ' - ') %>% unlist() %>% .[2]
# remove plurals
if (str_ends(start, 's')) {
start <- str_sub(start, 1, -2)
}
if (str_ends(end, 's')) {
end <- str_sub(end, 1, -2)
}
# find corresponding day indeces
start_i = match(start, days_long)
end_i = match(end, days_long[start_i : length(days_long)]) + start_i - 1
# populate output vector
for (i in seq(start_i, end_i)) {
if (i > 7) {
out[i %% 7] = if (out[i %% 7] == "0") t else str_c(out[i %% 7], t, sep = ", ")
} else {
out[i] = if (out[i] == "0") t else str_c(out[i], t, sep = ", ")
}
}
# multiple, non contiguous days
} else if (str_detect(d, ', ')) {
# split by days, find index and populate output
for (single_d in str_split(d, ', ') %>% unlist()) {
if (str_ends(single_d, 's')) {
single_d <- str_sub(single_d, 1, -2)
}
start_i = match(single_d, days_long)
out[start_i] = if (out[start_i] == "0") t else str_c(out[start_i], t, sep = ", ")
}
# single day
} else {
if (str_ends(d, 's')) {
d <- str_sub(d, 1, -2)
}
start_i = match(d, days_long)
if (is.na(start_i)) {
stop()
}
out[start_i] = if (out[start_i] == "0") t else str_c(out[start_i], t, sep = ", ")
}
}
out <- str_to_upper(out) %>% replace_na("")
names(out) <- c(days_col[1:7], "additional_hours")
return(out %>% as_tibble_row())
}, silent = T)
# handles unparsable text
out <- c(rep(day_hour, 7), "0")
names(out) <- c(days_col[1:7], "additional_hours")
return(out %>% as_tibble_row())
}
comm_resource_data %>% tail(10)
comm_resource_data$days_hours %>% head(10)
comm_resource_data$days_hours %>% head(10) %>%
map_dfr(process_hours)
str_split(day_hour, "[\\n,;]")
dt <- "Mon - Thurs: 9:00am - 8:00pm"
dt <- "Monday, Wednesday, Friday: 9:00am - 8:00pm"
process_hours(dt)
dt <- str_trim(dt)
d <- str_split(dt, ': ') %>% unlist() %>% .[1]
t <- str_split(dt, ': ') %>% unlist() %>% .[2]
day_hour <- "Mon - Thurs: 9:00am - 8:00pm, Friday: 9:00am - 4:00pm, Saturday : 10:00am - 2:00pm"
| /megamap/data_extraction_and_formatting/debugging_process_hours.R | permissive | codeforsanjose/bac-resources | R | false | false | 3,644 | r |
process_hours <- function(day_hour, senior = FALSE) {
days_long <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")
days_col_normal <- c("mon", "tues", "wed", "thr", "fri", "sat", "sun", "mon", "tues", "wed", "thr", "fri", "sat", "sun")
days_col_senior <- str_c("sp_", days_col_normal)
if (senior) {
days_col <- days_col_senior
} else {
days_col <- days_col_normal
}
if (is.na(day_hour)) {
out <- rep("0", 8)
names(out) <- c(days_col[1:7], "additional_hours")
return(out %>% as_tibble_row())
}
try({
out <- rep("0", 8)
# split into groups of "days: hours"
for (dt in str_split(day_hour, '[\\n;,]+') %>% unlist()) {
dt <- str_trim(dt)
d <- str_split(dt, ': ') %>% unlist() %>% .[1]
t <- str_split(dt, ': ') %>% unlist() %>% .[2]
# handles additional openings on odd days
if (str_starts(str_trim(d, side = "left"), "\\d+")) {
out[8] <- if (out[8] == "0") dt else str_c(out[8], dt, sep = ", ")
next
}
# handles normal opening hours
# day range
if (str_detect(d, '-')) {
start = str_split(d, ' - ') %>% unlist() %>% .[1]
end = str_split(d, ' - ') %>% unlist() %>% .[2]
# remove plurals
if (str_ends(start, 's')) {
start <- str_sub(start, 1, -2)
}
if (str_ends(end, 's')) {
end <- str_sub(end, 1, -2)
}
# find corresponding day indeces
start_i = match(start, days_long)
end_i = match(end, days_long[start_i : length(days_long)]) + start_i - 1
# populate output vector
for (i in seq(start_i, end_i)) {
if (i > 7) {
out[i %% 7] = if (out[i %% 7] == "0") t else str_c(out[i %% 7], t, sep = ", ")
} else {
out[i] = if (out[i] == "0") t else str_c(out[i], t, sep = ", ")
}
}
# multiple, non contiguous days
} else if (str_detect(d, ', ')) {
# split by days, find index and populate output
for (single_d in str_split(d, ', ') %>% unlist()) {
if (str_ends(single_d, 's')) {
single_d <- str_sub(single_d, 1, -2)
}
start_i = match(single_d, days_long)
out[start_i] = if (out[start_i] == "0") t else str_c(out[start_i], t, sep = ", ")
}
# single day
} else {
if (str_ends(d, 's')) {
d <- str_sub(d, 1, -2)
}
start_i = match(d, days_long)
if (is.na(start_i)) {
stop()
}
out[start_i] = if (out[start_i] == "0") t else str_c(out[start_i], t, sep = ", ")
}
}
out <- str_to_upper(out) %>% replace_na("")
names(out) <- c(days_col[1:7], "additional_hours")
return(out %>% as_tibble_row())
}, silent = T)
# handles unparsable text
out <- c(rep(day_hour, 7), "0")
names(out) <- c(days_col[1:7], "additional_hours")
return(out %>% as_tibble_row())
}
comm_resource_data %>% tail(10)
comm_resource_data$days_hours %>% head(10)
comm_resource_data$days_hours %>% head(10) %>%
map_dfr(process_hours)
str_split(day_hour, "[\\n,;]")
dt <- "Mon - Thurs: 9:00am - 8:00pm"
dt <- "Monday, Wednesday, Friday: 9:00am - 8:00pm"
process_hours(dt)
dt <- str_trim(dt)
d <- str_split(dt, ': ') %>% unlist() %>% .[1]
t <- str_split(dt, ': ') %>% unlist() %>% .[2]
day_hour <- "Mon - Thurs: 9:00am - 8:00pm, Friday: 9:00am - 4:00pm, Saturday : 10:00am - 2:00pm"
|
require(ggplot2)
# Get data
wdfiles <- list.files(".")
if( !"data.zip" %in% wdfiles )
{
print( "Downloading files." );
download.file( "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", "data.zip", method="curl" )
}
if( !all( c("Source_Classification_Code.rds", "summarySCC_PM25.rds") %in% wdfiles ) )
{
print( "Extracting files." );
unzip( "data.zip", overwrite=TRUE )
}
# Read data
NEI <- readRDS( "summarySCC_PM25.rds" )
SCC <- readRDS( "Source_Classification_Code.rds" )
#' Process data
#' 6. Compare emissions from motor vehicle sources in Baltimore City with
#' emissions from motor vehicle sources in Los Angeles County, California
#' (fips == "06037"). Which city has seen greater changes over time in motor
#' vehicle emissions?
#' Select motor vehicle emissions
motorVSCC <- SCC$SCC[ SCC$Data.Category=="Onroad" |
grepl( "motorcycle", SCC$Short.Name, ignore.case=TRUE ) ]
#' Select Baltimore City and Los Angeles
states <- NEI[NEI$SCC %in% motorVSCC & (NEI$fips=="24510"|NEI$fips=="06037"),]
# Replace numeric labels of sources by actual source names.
states$fips[states$fips=="24510"] <- "Baltimore"
states$fips[states$fips=="06037"] <- "Los Angeles"
png( "plot6.png" )
ggplot( states, aes(factor(year), log10(Emissions)) ) +
geom_jitter(color="grey", alpha=0.5) +
geom_boxplot( notch=TRUE, alpha=0.5 ) + facet_grid( .~fips ) +
labs( x="Year" ) +
labs( title="Motor Vehicle Emissions,\nBaltimore vs Los Angeles")
dev.off()
| /plot6.R | no_license | axelgafu/ExploratoryDataAnalysis-Project2 | R | false | false | 1,525 | r | require(ggplot2)
# Get data
wdfiles <- list.files(".")
if( !"data.zip" %in% wdfiles )
{
print( "Downloading files." );
download.file( "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", "data.zip", method="curl" )
}
if( !all( c("Source_Classification_Code.rds", "summarySCC_PM25.rds") %in% wdfiles ) )
{
print( "Extracting files." );
unzip( "data.zip", overwrite=TRUE )
}
# Read data
NEI <- readRDS( "summarySCC_PM25.rds" )
SCC <- readRDS( "Source_Classification_Code.rds" )
#' Process data
#' 6. Compare emissions from motor vehicle sources in Baltimore City with
#' emissions from motor vehicle sources in Los Angeles County, California
#' (fips == "06037"). Which city has seen greater changes over time in motor
#' vehicle emissions?
#' Select motor vehicle emissions
motorVSCC <- SCC$SCC[ SCC$Data.Category=="Onroad" |
grepl( "motorcycle", SCC$Short.Name, ignore.case=TRUE ) ]
#' Select Baltimore City and Los Angeles
states <- NEI[NEI$SCC %in% motorVSCC & (NEI$fips=="24510"|NEI$fips=="06037"),]
# Replace numeric labels of sources by actual source names.
states$fips[states$fips=="24510"] <- "Baltimore"
states$fips[states$fips=="06037"] <- "Los Angeles"
png( "plot6.png" )
ggplot( states, aes(factor(year), log10(Emissions)) ) +
geom_jitter(color="grey", alpha=0.5) +
geom_boxplot( notch=TRUE, alpha=0.5 ) + facet_grid( .~fips ) +
labs( x="Year" ) +
labs( title="Motor Vehicle Emissions,\nBaltimore vs Los Angeles")
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.