content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#' Check ... Used
#'
#' @description
#' Checks if is ... used using
#'
#' `length(list(...)) != 0L`
#'
#' @inheritParams params
#' @return
#' The `chk_` function throws an informative error if the test fails.
#'
#' The `vld_` function returns a flag indicating whether the test was met.
#'
#' @family chk_ellipsis
#' @export
#'
#' @examples
#'
#' # chk_used
#' fun <- function(x, ...) {
#' chk_used(...)
#' x
#' }
#' try(fun(1))
#' fun(1, 2)
chk_used <- function(...) {
if (vld_used(...)) {
return(invisible())
}
abort_chk("`...` must be used")
}
#' @describeIn chk_used Validate ... Used
#'
#' @export
#'
#' @examples
#'
#' # vld_used
#' fun <- function(x, ...) {
#' vld_used(...)
#' }
#' fun(1)
#' fun(1, 2)
vld_used <- function(...) length(list(...)) != 0L
|
/R/chk-used.R
|
permissive
|
krlmlr/chk
|
R
| false
| false
| 777
|
r
|
#' Check ... Used
#'
#' @description
#' Checks if is ... used using
#'
#' `length(list(...)) != 0L`
#'
#' @inheritParams params
#' @return
#' The `chk_` function throws an informative error if the test fails.
#'
#' The `vld_` function returns a flag indicating whether the test was met.
#'
#' @family chk_ellipsis
#' @export
#'
#' @examples
#'
#' # chk_used
#' fun <- function(x, ...) {
#' chk_used(...)
#' x
#' }
#' try(fun(1))
#' fun(1, 2)
chk_used <- function(...) {
if (vld_used(...)) {
return(invisible())
}
abort_chk("`...` must be used")
}
#' @describeIn chk_used Validate ... Used
#'
#' @export
#'
#' @examples
#'
#' # vld_used
#' fun <- function(x, ...) {
#' vld_used(...)
#' }
#' fun(1)
#' fun(1, 2)
vld_used <- function(...) length(list(...)) != 0L
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/http-browse.r
\name{BROWSE}
\alias{BROWSE}
\title{Open specified url in browser.}
\usage{
BROWSE(url = NULL, config = list(), ..., handle = NULL)
}
\arguments{
\item{url}{the url of the page to retrieve}
\item{config}{All configuration options are ignored because the request
is handled by the browser, not \pkg{RCurl}.}
\item{...}{Further named parameters, such as \code{query}, \code{path}, etc,
passed on to \code{\link{modify_url}}. Unnamed parameters will be combined
with \code{\link{config}}.}
\item{handle}{The handle to use with this request. If not
supplied, will be retrieved and reused from the \code{\link{handle_pool}}
based on the scheme, hostname and port of the url. By default \pkg{httr}
requests to the same scheme/host/port combo. This substantially reduces
connection time, and ensures that cookies are maintained over multiple
requests to the same host. See \code{\link{handle_pool}} for more
details.}
}
\description{
(This isn't really a http verb, but it seems to follow the same format).
}
\details{
Only works in interactive sessions.
}
\examples{
BROWSE("http://google.com")
BROWSE("http://had.co.nz")
}
\seealso{
Other http methods: \code{\link{DELETE}};
\code{\link{GET}}; \code{\link{HEAD}}; \code{\link{VERB}}
}
|
/Data Science/Miscellaenous/Misc - httr info/man/BROWSE.Rd
|
no_license
|
Mike-Kuklinski/Coursera
|
R
| false
| false
| 1,348
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/http-browse.r
\name{BROWSE}
\alias{BROWSE}
\title{Open specified url in browser.}
\usage{
BROWSE(url = NULL, config = list(), ..., handle = NULL)
}
\arguments{
\item{url}{the url of the page to retrieve}
\item{config}{All configuration options are ignored because the request
is handled by the browser, not \pkg{RCurl}.}
\item{...}{Further named parameters, such as \code{query}, \code{path}, etc,
passed on to \code{\link{modify_url}}. Unnamed parameters will be combined
with \code{\link{config}}.}
\item{handle}{The handle to use with this request. If not
supplied, will be retrieved and reused from the \code{\link{handle_pool}}
based on the scheme, hostname and port of the url. By default \pkg{httr}
requests to the same scheme/host/port combo. This substantially reduces
connection time, and ensures that cookies are maintained over multiple
requests to the same host. See \code{\link{handle_pool}} for more
details.}
}
\description{
(This isn't really a http verb, but it seems to follow the same format).
}
\details{
Only works in interactive sessions.
}
\examples{
BROWSE("http://google.com")
BROWSE("http://had.co.nz")
}
\seealso{
Other http methods: \code{\link{DELETE}};
\code{\link{GET}}; \code{\link{HEAD}}; \code{\link{VERB}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiread.r
\name{multiread}
\alias{multiread}
\title{Multiread}
\usage{
multiread(path = ".", extension = "txt", recursive = FALSE,
ignore.case = FALSE, prune.empty = TRUE, pathnames = TRUE)
}
\arguments{
\item{path}{The base file path to search.}
\item{extension}{An extension or the "*" wildcard (for everything). For example,
to read in files ending \code{.txt}, you could specify
\code{extension="txt"}. For the purposes of this function,
each of \code{*.txt}, \code{*txt}, \code{.txt}, and \code{txt}
are treated the same.}
\item{recursive}{Logical; should the search include all subdirectories?}
\item{ignore.case}{Logical; should case be ignored in the extension? For example, if
\code{TRUE}, then \code{.r} and \code{.R} files are treated the
same.}
\item{prune.empty}{Logical; should empty files be removed from the returned list?}
\item{pathnames}{Logical; should the full path be included in the names of the
returned list.}
}
\value{
A named list of strings, where the names are the file names.
}
\description{
Read in a collection of text files.
}
\details{
The \code{extension} argument is not a general regular
expression pattern, but a simplified pattern. For example,
the pattern \code{*.txt} is really equivalent to
\code{*[.]txt$} as a regular expression. If you need more
complicated patterns, you should directly use the \code{dir()}
function.
}
\examples{
\dontrun{
path <- system.file(package="ngram")
### Read all files in the base path
multiread(path, extension="*")
### Read all .r/.R files recursively (warning: lots of text)
multiread(path, extension="r", recursive=TRUE, ignore.case=TRUE)
}
}
\keyword{Utility}
|
/man/multiread.Rd
|
permissive
|
josesaribeiro/ngram
|
R
| false
| true
| 1,737
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiread.r
\name{multiread}
\alias{multiread}
\title{Multiread}
\usage{
multiread(path = ".", extension = "txt", recursive = FALSE,
ignore.case = FALSE, prune.empty = TRUE, pathnames = TRUE)
}
\arguments{
\item{path}{The base file path to search.}
\item{extension}{An extension or the "*" wildcard (for everything). For example,
to read in files ending \code{.txt}, you could specify
\code{extension="txt"}. For the purposes of this function,
each of \code{*.txt}, \code{*txt}, \code{.txt}, and \code{txt}
are treated the same.}
\item{recursive}{Logical; should the search include all subdirectories?}
\item{ignore.case}{Logical; should case be ignored in the extension? For example, if
\code{TRUE}, then \code{.r} and \code{.R} files are treated the
same.}
\item{prune.empty}{Logical; should empty files be removed from the returned list?}
\item{pathnames}{Logical; should the full path be included in the names of the
returned list.}
}
\value{
A named list of strings, where the names are the file names.
}
\description{
Read in a collection of text files.
}
\details{
The \code{extension} argument is not a general regular
expression pattern, but a simplified pattern. For example,
the pattern \code{*.txt} is really equivalent to
\code{*[.]txt$} as a regular expression. If you need more
complicated patterns, you should directly use the \code{dir()}
function.
}
\examples{
\dontrun{
path <- system.file(package="ngram")
### Read all files in the base path
multiread(path, extension="*")
### Read all .r/.R files recursively (warning: lots of text)
multiread(path, extension="r", recursive=TRUE, ignore.case=TRUE)
}
}
\keyword{Utility}
|
export_function <- function (uniqueID, moduleDir, outputDir, gtool) {
if (!file.exists(outputDir)) {
stop(paste("Did not find a output data with this id", uniqueID))
}
output <- list()
# get input constants
load(paste0(moduleDir, "/ethnicity/2017-04-03_ethnicity_snps.rdata"))
load(paste0(moduleDir, "/ethnicity/2017-04-03_ethnicity_pca.rdata"))
ethnicity_desc <- read.table(paste0(moduleDir, "/ethnicity/2017-04-03_ethnicity_descriptions.txt"), sep = "\t", header = TRUE, stringsAsFactors = FALSE, row.names = 1)
# get genotypes
genotypes <- get_genotypes(uniqueID = uniqueID, request = ethnicity_snps, gtool = gtool, destinationDir = outputDir, namingLabel = "cached.ethnicity")
ethnicity_snps[, "genotype"] <- genotypes[rownames(ethnicity_snps), "genotype"]
get_alt_count <- function(x){sum(strsplit(x["genotype"], "/")[[1]] %in% x["alt"])}
ethnicity_snps[, "alt_count"] <- apply(ethnicity_snps, 1, get_alt_count)
# get missing SNP counts
found <- sum(!is.na(ethnicity_snps[, "genotype"]))
QC_conclusion <- paste("Based on", found, "of", nrow(ethnicity_snps), "pre-computed ethnicity SNPs.")
if(found < 1500){
QC_conclusion <- paste(QC_conclusion, "This is a low number and may be a serious problem for the ancestry call.")
}
# quick-calculate the PCA metrics for this person
you <- data.frame(pop = "YOU", super_pop = "YOU", gender = NA, stringsAsFactors = FALSE)
for(pc in 1:5){
val <- sum(((ethnicity_snps[, "alt_count"] - ethnicity_snps[, "center"])/ethnicity_snps[, "scale"]) * ethnicity_snps[, paste0("rot_PC", pc)])
you[, paste0("pos_PC", pc)] <- val
}
pca <- rbind(pca_data, you)
# pick some colours for each super population (first dilute their alpha a little)
colours <- ethnicity_desc[, "Col"]
names(colours) <- ethnicity_desc[, "PopulationDescription"]
# also get the long descriptor of each populations
pca[, "pop_long"] <- ethnicity_desc[pca[, "pop"], "PopulationDescription"]
# calculate closest superpopulation (just use geometric distance. A little low tech but should be ok for sure cases)
y <- which(pca[, "pop"] %in% "YOU")
pca[, "distance"] <- sqrt((pca[, "pos_PC1"] - pca[y, "pos_PC1"])^2 + (pca[, "pos_PC2"] - pca[y, "pos_PC2"])^2 + (pca[, "pos_PC3"] - pca[y, "pos_PC3"])^2)
pca <- pca[order(pca[, "distance"]), ]
guessed_super_pop <- unique(pca[2:6, "super_pop"])
if (length(guessed_super_pop)!= 1) guessed_super_pop <- NA # if there's more than one superpop among closest 5 - then we don't want to guess
if (found < 1500) guessed_super_pop <- NA # also don't guess if too many SNPs were missing
output[["guessed_super_pop"]] <- guessed_super_pop
# adding in some coordinates to the JSON
output[["PCA_coordinates"]] <- list()
output[["PCA_coordinates"]][["PC1"]] <- pca[pca[, "pop"] %in% "YOU", "pos_PC1"]
output[["PCA_coordinates"]][["PC2"]] <- pca[pca[, "pop"] %in% "YOU", "pos_PC2"]
output[["PCA_coordinates"]][["PC3"]] <- pca[pca[, "pop"] %in% "YOU", "pos_PC3"]
output[["SNP_count"]] <- QC_conclusion
return(output)
}
|
/imputation2/imputeTraits/ethnicity/export_script.R
|
permissive
|
trvinh/genomes-io-prj
|
R
| false
| false
| 3,158
|
r
|
export_function <- function (uniqueID, moduleDir, outputDir, gtool) {
if (!file.exists(outputDir)) {
stop(paste("Did not find a output data with this id", uniqueID))
}
output <- list()
# get input constants
load(paste0(moduleDir, "/ethnicity/2017-04-03_ethnicity_snps.rdata"))
load(paste0(moduleDir, "/ethnicity/2017-04-03_ethnicity_pca.rdata"))
ethnicity_desc <- read.table(paste0(moduleDir, "/ethnicity/2017-04-03_ethnicity_descriptions.txt"), sep = "\t", header = TRUE, stringsAsFactors = FALSE, row.names = 1)
# get genotypes
genotypes <- get_genotypes(uniqueID = uniqueID, request = ethnicity_snps, gtool = gtool, destinationDir = outputDir, namingLabel = "cached.ethnicity")
ethnicity_snps[, "genotype"] <- genotypes[rownames(ethnicity_snps), "genotype"]
get_alt_count <- function(x){sum(strsplit(x["genotype"], "/")[[1]] %in% x["alt"])}
ethnicity_snps[, "alt_count"] <- apply(ethnicity_snps, 1, get_alt_count)
# get missing SNP counts
found <- sum(!is.na(ethnicity_snps[, "genotype"]))
QC_conclusion <- paste("Based on", found, "of", nrow(ethnicity_snps), "pre-computed ethnicity SNPs.")
if(found < 1500){
QC_conclusion <- paste(QC_conclusion, "This is a low number and may be a serious problem for the ancestry call.")
}
# quick-calculate the PCA metrics for this person
you <- data.frame(pop = "YOU", super_pop = "YOU", gender = NA, stringsAsFactors = FALSE)
for(pc in 1:5){
val <- sum(((ethnicity_snps[, "alt_count"] - ethnicity_snps[, "center"])/ethnicity_snps[, "scale"]) * ethnicity_snps[, paste0("rot_PC", pc)])
you[, paste0("pos_PC", pc)] <- val
}
pca <- rbind(pca_data, you)
# pick some colours for each super population (first dilute their alpha a little)
colours <- ethnicity_desc[, "Col"]
names(colours) <- ethnicity_desc[, "PopulationDescription"]
# also get the long descriptor of each populations
pca[, "pop_long"] <- ethnicity_desc[pca[, "pop"], "PopulationDescription"]
# calculate closest superpopulation (just use geometric distance. A little low tech but should be ok for sure cases)
y <- which(pca[, "pop"] %in% "YOU")
pca[, "distance"] <- sqrt((pca[, "pos_PC1"] - pca[y, "pos_PC1"])^2 + (pca[, "pos_PC2"] - pca[y, "pos_PC2"])^2 + (pca[, "pos_PC3"] - pca[y, "pos_PC3"])^2)
pca <- pca[order(pca[, "distance"]), ]
guessed_super_pop <- unique(pca[2:6, "super_pop"])
if (length(guessed_super_pop)!= 1) guessed_super_pop <- NA # if there's more than one superpop among closest 5 - then we don't want to guess
if (found < 1500) guessed_super_pop <- NA # also don't guess if too many SNPs were missing
output[["guessed_super_pop"]] <- guessed_super_pop
# adding in some coordinates to the JSON
output[["PCA_coordinates"]] <- list()
output[["PCA_coordinates"]][["PC1"]] <- pca[pca[, "pop"] %in% "YOU", "pos_PC1"]
output[["PCA_coordinates"]][["PC2"]] <- pca[pca[, "pop"] %in% "YOU", "pos_PC2"]
output[["PCA_coordinates"]][["PC3"]] <- pca[pca[, "pop"] %in% "YOU", "pos_PC3"]
output[["SNP_count"]] <- QC_conclusion
return(output)
}
|
\name{plan_neb_LF}
\docType{data}
\alias{plan_neb_LF}
\title{Planetary nebula luminosity function}
\description{
This dataset contains the visual magnitudes of 531 planetary nebulae in five nearby galaxies: M 31 (Andromeda), M 81, NGC 3379, NGC 4494 and NGC 4382 (Ciardullo et al. 2002 and references therein). If the distribution of planetary nebula luminosities (its `luminosity function') is universal, then the offsets between the distributions can be used to estimate galaxy distances. However, the samples are truncated at different levels for each galaxy. The dataset is discussed at \url{http://astrostatistics.psu.edu/datasets/plan_neb.html} (with references) and can be used to exercise statistical methods of density estimation and regression in the presence of truncation.
}
\usage{plan_neb_LF}
\format{A table containing 531 rows and 2 columns with header row}
\source{Ciardullo et al. (2002)}
\references{
Ciardullo, R., Feldmeier, J. J., Jacoby, G. H., Kuzio de Naray, R., Laychak, M. B. and Durrell, P. R. (2002) Planetary nebulae as standard candles. XII. Connecting the Population I and Population II distance scales, \emph{Astrophysical Journal}, 577, 31-50
}
\keyword{datasets}
|
/man/plan_neb_LF.Rd
|
no_license
|
cran/astrodatR
|
R
| false
| false
| 1,201
|
rd
|
\name{plan_neb_LF}
\docType{data}
\alias{plan_neb_LF}
\title{Planetary nebula luminosity function}
\description{
This dataset contains the visual magnitudes of 531 planetary nebulae in five nearby galaxies: M 31 (Andromeda), M 81, NGC 3379, NGC 4494 and NGC 4382 (Ciardullo et al. 2002 and references therein). If the distribution of planetary nebula luminosities (its `luminosity function') is universal, then the offsets between the distributions can be used to estimate galaxy distances. However, the samples are truncated at different levels for each galaxy. The dataset is discussed at \url{http://astrostatistics.psu.edu/datasets/plan_neb.html} (with references) and can be used to exercise statistical methods of density estimation and regression in the presence of truncation.
}
\usage{plan_neb_LF}
\format{A table containing 531 rows and 2 columns with header row}
\source{Ciardullo et al. (2002)}
\references{
Ciardullo, R., Feldmeier, J. J., Jacoby, G. H., Kuzio de Naray, R., Laychak, M. B. and Durrell, P. R. (2002) Planetary nebulae as standard candles. XII. Connecting the Population I and Population II distance scales, \emph{Astrophysical Journal}, 577, 31-50
}
\keyword{datasets}
|
#' Coerce to a cor_network object
#' @description Functions to coerce a object to cor_network if possible.
#' @param x any \code{R} object.
#' @param directed logical value, whether or not to create a directed graph.
#' @param simplify logical value (defaults to TRUE) indicating whether to
#' delete nodes without edge connections.
#' @param weight NULL (default) or name of column in edges which will be renamed
#' to "weight".
#' @param r.thres a numeric value.
#' @param r.absolute logical value (defaults to TRUE).
#' @param p.thres a numeric value.
#' @param ... extra params passing to \code{\link[ggcor]{cor_network}}.
#' @return a cor_network object.
#' @importFrom dplyr filter rename %>%
#' @importFrom tibble tibble
#' @importFrom rlang sym !!
#' @rdname as_cor_network
#' @examples
#' ll <- correlate(mtcars)
#' as_cor_network(ll)
#' @author Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
#' @export
as_cor_network <- function(x, ...) {
UseMethod("as_cor_network")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network cor_tbl
as_cor_network.cor_tbl <- function(x,
directed = FALSE,
simplify = TRUE,
weight = NULL,
r.thres = 0.6,
r.absolute = TRUE,
p.thres = 0.05,
...)
{
if(is_general_cor_tbl(x)) {
edges <- if("p.value" %in% names(x) && is.finite(p.thres)) {
dplyr::filter(x, p.value < p.thres)
} else x
} else {
edges <- if(is.finite(r.thres)) {
if("p.value" %in% names(x) && is.finite(p.thres)) {
if(r.absolute) {
dplyr::filter(x, abs(r) > r.thres, p.value < p.thres)
} else {
dplyr::filter(x, r > r.thres, p.value < p.thres)
}
} else {
if(r.absolute) {
dplyr::filter(x, abs(r) > r.thres)
} else {
dplyr::filter(x, r > r.thres)
}
}
} else {
if("p.value" %in% names(x) && is.finite(p.thres)) {
dplyr::filter(x, p.value < p.thres)
} else {
x
}
}
}
# rename
edges <- rename_cor_network_edge(edges, ".row.names", ".col.names")
nodes <- if(simplify) {
tibble::tibble(name = unique(c(edges$from, edges$to)))
} else {
tibble::tibble(name = unique(c(x$.col.names, x$.row.names)))
}
if(!is.null(weight)) {
if(!weight %in% names(edges)) {
stop("don't find ", weight, " in egdes table.", call. = FALSE)
}
weight <- rlang::sym(weight)
edges <- dplyr::rename(edges, weight = !!weight)
}
structure(.Data = list(nodes = nodes, edges = edges),
directed = directed, class = "cor_network")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network mantel_tbl
as_cor_network.mantel_tbl <- function(x, directed = FALSE, ...) {
as_cor_network(as_cor_tbl(x), directed = directed, ...)
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network pro_tbl
as_cor_network.pro_tbl <- function(x, directed = FALSE, ...) {
as_cor_network(as_cor_tbl(x), directed = directed, ...)
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network matrix
as_cor_network.matrix <- function(x, directed = FALSE, ...) {
cor_network(corr = x, directed = directed, ..., val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network data.frame
as_cor_network.data.frame <- function(x, directed = FALSE, ...) {
cor_network(corr = x, directed = directed, ..., val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network correlate
as_cor_network.correlate <- function(x, directed = FALSE, ...) {
cor_network(corr = x$r, p.value = x$p.value, directed = directed, ...,
val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network rcorr
as_cor_network.rcorr <- function(x, directed = FALSE, ...)
{
p.value <- x$P
diag(p.value) <- 0
cor_network(corr = x$r, p.value = p.value, directed = directed, ...,
val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network corr.test
as_cor_network.corr.test <- function(x, directed = FALSE, ...)
{
cor_network(corr = x$r, p.value = x$p, directed = directed, ...,
val.type = "list")
}
#' @importFrom tibble as_tibble
#' @importFrom igraph as_data_frame is.directed
#' @rdname as_cor_network
#' @export
#' @method as_cor_network igraph
as_cor_network.igraph <- function(x, ...)
{
nodes <- tibble::as_tibble(igraph::as_data_frame(x, "vertices"))
edges <- tibble::as_tibble(igraph::as_data_frame(x, "edges"))
structure(.Data = list(nodes = nodes, edges = edges),
directed = igraph::is.directed(x), class = "cor_network")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network tbl_graph
as_cor_network.tbl_graph <- function(x, ...)
{
as_cor_network(igraph::as.igraph(x), ...)
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network default
as_cor_network.default <- function(x, ...) {
stop(class(x)[1], " hasn't been realized yet.", call. = FALSE)
}
#' @noRd
rename_cor_network_edge <- function(x, from, to)
{
stopifnot(is.data.frame(x))
name <- names(x)
name[name %in% c(from, to)] <- c("from", "to")
names(x) <- name
new <- c(c("from", "to"), setdiff(name, c("from", "to")))
x[new]
}
|
/R/as-cor-network.R
|
no_license
|
Bmdistef/ggcor
|
R
| false
| false
| 5,432
|
r
|
#' Coerce to a cor_network object
#' @description Functions to coerce a object to cor_network if possible.
#' @param x any \code{R} object.
#' @param directed logical value, whether or not to create a directed graph.
#' @param simplify logical value (defaults to TRUE) indicating whether to
#' delete nodes without edge connections.
#' @param weight NULL (default) or name of column in edges which will be renamed
#' to "weight".
#' @param r.thres a numeric value.
#' @param r.absolute logical value (defaults to TRUE).
#' @param p.thres a numeric value.
#' @param ... extra params passing to \code{\link[ggcor]{cor_network}}.
#' @return a cor_network object.
#' @importFrom dplyr filter rename %>%
#' @importFrom tibble tibble
#' @importFrom rlang sym !!
#' @rdname as_cor_network
#' @examples
#' ll <- correlate(mtcars)
#' as_cor_network(ll)
#' @author Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
#' @export
as_cor_network <- function(x, ...) {
UseMethod("as_cor_network")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network cor_tbl
as_cor_network.cor_tbl <- function(x,
directed = FALSE,
simplify = TRUE,
weight = NULL,
r.thres = 0.6,
r.absolute = TRUE,
p.thres = 0.05,
...)
{
if(is_general_cor_tbl(x)) {
edges <- if("p.value" %in% names(x) && is.finite(p.thres)) {
dplyr::filter(x, p.value < p.thres)
} else x
} else {
edges <- if(is.finite(r.thres)) {
if("p.value" %in% names(x) && is.finite(p.thres)) {
if(r.absolute) {
dplyr::filter(x, abs(r) > r.thres, p.value < p.thres)
} else {
dplyr::filter(x, r > r.thres, p.value < p.thres)
}
} else {
if(r.absolute) {
dplyr::filter(x, abs(r) > r.thres)
} else {
dplyr::filter(x, r > r.thres)
}
}
} else {
if("p.value" %in% names(x) && is.finite(p.thres)) {
dplyr::filter(x, p.value < p.thres)
} else {
x
}
}
}
# rename
edges <- rename_cor_network_edge(edges, ".row.names", ".col.names")
nodes <- if(simplify) {
tibble::tibble(name = unique(c(edges$from, edges$to)))
} else {
tibble::tibble(name = unique(c(x$.col.names, x$.row.names)))
}
if(!is.null(weight)) {
if(!weight %in% names(edges)) {
stop("don't find ", weight, " in egdes table.", call. = FALSE)
}
weight <- rlang::sym(weight)
edges <- dplyr::rename(edges, weight = !!weight)
}
structure(.Data = list(nodes = nodes, edges = edges),
directed = directed, class = "cor_network")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network mantel_tbl
as_cor_network.mantel_tbl <- function(x, directed = FALSE, ...) {
as_cor_network(as_cor_tbl(x), directed = directed, ...)
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network pro_tbl
as_cor_network.pro_tbl <- function(x, directed = FALSE, ...) {
as_cor_network(as_cor_tbl(x), directed = directed, ...)
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network matrix
as_cor_network.matrix <- function(x, directed = FALSE, ...) {
cor_network(corr = x, directed = directed, ..., val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network data.frame
as_cor_network.data.frame <- function(x, directed = FALSE, ...) {
cor_network(corr = x, directed = directed, ..., val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network correlate
as_cor_network.correlate <- function(x, directed = FALSE, ...) {
cor_network(corr = x$r, p.value = x$p.value, directed = directed, ...,
val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network rcorr
as_cor_network.rcorr <- function(x, directed = FALSE, ...)
{
p.value <- x$P
diag(p.value) <- 0
cor_network(corr = x$r, p.value = p.value, directed = directed, ...,
val.type = "list")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network corr.test
as_cor_network.corr.test <- function(x, directed = FALSE, ...)
{
cor_network(corr = x$r, p.value = x$p, directed = directed, ...,
val.type = "list")
}
#' @importFrom tibble as_tibble
#' @importFrom igraph as_data_frame is.directed
#' @rdname as_cor_network
#' @export
#' @method as_cor_network igraph
as_cor_network.igraph <- function(x, ...)
{
nodes <- tibble::as_tibble(igraph::as_data_frame(x, "vertices"))
edges <- tibble::as_tibble(igraph::as_data_frame(x, "edges"))
structure(.Data = list(nodes = nodes, edges = edges),
directed = igraph::is.directed(x), class = "cor_network")
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network tbl_graph
as_cor_network.tbl_graph <- function(x, ...)
{
as_cor_network(igraph::as.igraph(x), ...)
}
#' @rdname as_cor_network
#' @export
#' @method as_cor_network default
as_cor_network.default <- function(x, ...) {
stop(class(x)[1], " hasn't been realized yet.", call. = FALSE)
}
#' @noRd
rename_cor_network_edge <- function(x, from, to)
{
stopifnot(is.data.frame(x))
name <- names(x)
name[name %in% c(from, to)] <- c("from", "to")
names(x) <- name
new <- c(c("from", "to"), setdiff(name, c("from", "to")))
x[new]
}
|
## This R script completes unvariate analyses in the imputed and sample data frame
## Specify libraries
library(pacman)
library(tidyverse)
library(Hmisc)
library(here)
library(arrow)
library(purrr)
library(broom)
library(data.table)
library(forcats)
library(rstatix)
library(janitor)
library(lubridate)
library(skimr)
library(ggplot2)
library(mice)
BMI_trajectories <- read_csv (here::here ("output/data", "CC_imputation_DF_for_impute.csv"))
BMI_imp_long <- read_csv (here::here ("output/data", "CC_imputation_dataframe.csv"))
BMI_trajectories <- BMI_trajectories %>%
dplyr::filter(diabetes_t2 == TRUE)
BMI_imp_long <- BMI_imp_long %>%
dplyr::filter(diabetes_t2 == TRUE)
BMI_trajectories$imd <- factor(BMI_trajectories$imd,
levels = c('1','2','3','4','5'))
BMI_imp_long$imd <- factor(BMI_imp_long$imd,
levels = c('1','2','3','4','5'))
BMI_trajectories$eth_16_corrected <- factor(BMI_trajectories$eth_16_corrected,
levels = c(
"White_British",
"White_Irish",
"Other_White",
"Indian",
"Pakistani",
"Bangladeshi",
"Other_Asian",
"Caribbean",
"African",
"Other_Black",
"Chinese",
"White_Asian",
"White_Black_Carib",
"White_Black_African",
"Other_Mixed",
"Other",
"Missing"))
BMI_trajectories<- BMI_trajectories %>%
dplyr::mutate(eth_collapsed = case_when(
eth_16_corrected == "White_British" ~ "white",
eth_16_corrected == "White_Irish" ~ "white",
eth_16_corrected == "Other_White" ~ "white",
eth_16_corrected == "White_Black_Carib" ~ "mixed",
eth_16_corrected == "White_Black_African" ~ "mixed",
eth_16_corrected == "White_Asian" ~ "mixed",
eth_16_corrected == "Other_Mixed" ~ "mixed",
eth_16_corrected == "Indian" ~ "south_asian",
eth_16_corrected == "Pakistani" ~ "south_asian",
eth_16_corrected == "Bangladeshi" ~ "south_asian",
eth_16_corrected == "Other_Asian" ~ "chinese_other",
eth_16_corrected == "Chinese" ~ "chinese_other",
eth_16_corrected == "Caribbean" ~ "black",
eth_16_corrected == "African" ~ "black",
eth_16_corrected == "Other_Black" ~ "black",
eth_16_corrected == "Other" ~ "chinese_other"
))
BMI_imp_long$eth_16_corrected <- factor(BMI_imp_long$eth_16_corrected,
levels = c(
"White_British",
"White_Irish",
"Other_White",
"Indian",
"Pakistani",
"Bangladeshi",
"Other_Asian",
"Caribbean",
"African",
"Other_Black",
"Chinese",
"White_Asian",
"White_Black_Carib",
"White_Black_African",
"Other_Mixed",
"Other",
"Missing"))
BMI_imp_long <- BMI_imp_long %>%
dplyr::mutate(eth_collapsed = case_when(
eth_16_corrected == "White_British" ~ "white",
eth_16_corrected == "White_Irish" ~ "white",
eth_16_corrected == "Other_White" ~ "white",
eth_16_corrected == "White_Black_Carib" ~ "mixed",
eth_16_corrected == "White_Black_African" ~ "mixed",
eth_16_corrected == "White_Asian" ~ "mixed",
eth_16_corrected == "Other_Mixed" ~ "mixed",
eth_16_corrected == "Indian" ~ "south_asian",
eth_16_corrected == "Pakistani" ~ "south_asian",
eth_16_corrected == "Bangladeshi" ~ "south_asian",
eth_16_corrected == "Other_Asian" ~ "chinese_other",
eth_16_corrected == "Chinese" ~ "chinese_other",
eth_16_corrected == "Caribbean" ~ "black",
eth_16_corrected == "African" ~ "black",
eth_16_corrected == "Other_Black" ~ "black",
eth_16_corrected == "Other" ~ "chinese_other"
))
BMI_trajectories$eth_collapsed <- factor(BMI_trajectories$eth_collapsed,
levels = c('white','black','south_asian','chinese_other','mixed'))
BMI_imp_long$eth_collapsed <- factor(BMI_imp_long$eth_collapsed,
levels = c('white','black','south_asian','chinese_other','mixed'))
BMI_trajectories <- BMI_trajectories %>%
dplyr::mutate(rapid_change = case_when(
bmi_change_cat == "over 0.5" ~ 1,
bmi_change_cat != "over 0.5" ~ 0
))
BMI_trajectories %>%
tabyl(bmi_change_cat, rapid_change)
BMI_imp_long <- BMI_imp_long %>%
dplyr::mutate(rapid_change = case_when(
bmi_change_cat == "over 0.5" ~ 1,
bmi_change_cat != "over 0.5" ~ 0
))
BMI_imp_long %>%
tabyl(bmi_change_cat, rapid_change)
BMI_imp_long_mids<-as.mids(BMI_imp_long)
### IMPUTED MODELS COMPARED TO NON IMPUTED
age <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + age_group_2, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_age')
print(1)
imp_age <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + age_group_2, data = BMI_imp_long_mids, family = binomial)
imp_age <- summary(pool(imp_age), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_age')
print(2)
##
hypertension <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + hypertension, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_hypertension')
imp_hypertension <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + hypertension, data = BMI_imp_long_mids, family = binomial)
imp_hypertension <- summary(pool(imp_hypertension), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_hypertension')
##
diabetes_t1 <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t1, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_diabetes_t1')
imp_diabetes_t1 <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t1, data = BMI_imp_long_mids, family = binomial)
imp_diabetes_t1 <- summary(pool(imp_diabetes_t1), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_diabetes_t1')
##
diabetes_t2 <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t2, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_diabetes_t2')
imp_diabetes_t2 <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t2, data = BMI_imp_long_mids, family = binomial)
imp_diabetes_t2 <- summary(pool(imp_diabetes_t2), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_diabetes_t2')
##
chronic_cardiac <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + chronic_cardiac, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_chronic_cardiac')
imp_chronic_cardiac <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + chronic_cardiac, data = BMI_imp_long_mids, family = binomial)
imp_chronic_cardiac <- summary(pool(imp_chronic_cardiac), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_chronic_cardiac')
##
learning_disability <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + learning_disability, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_learning_disability')
imp_learning_disability <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + learning_disability, data = BMI_imp_long_mids, family = binomial)
imp_learning_disability <- summary(pool(imp_learning_disability), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_learning_disability')
##
psychosis_schiz_bipolar <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + psychosis_schiz_bipolar, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_psychosis_schiz_bipolar')
imp_psychosis_schiz_bipolar <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + psychosis_schiz_bipolar, data = BMI_imp_long_mids, family = binomial)
imp_psychosis_schiz_bipolar <- summary(pool(imp_psychosis_schiz_bipolar), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_psychosis_schiz_bipolar')
##
depression <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + depression, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_depression')
imp_depression <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + depression, data = BMI_imp_long_mids, family = binomial)
imp_depression <- summary(pool(imp_depression), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_depression')
##
COPD <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + COPD, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_COPD')
imp_COPD <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + COPD, data = BMI_imp_long_mids, family = binomial)
imp_COPD <- summary(pool(imp_COPD), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_COPD')
##
asthma <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + asthma, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_asthma')
imp_asthma <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + asthma, data = BMI_imp_long_mids, family = binomial)
imp_asthma <- summary(pool(imp_asthma), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_asthma')
##
dementia <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + dementia, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_dementia')
imp_dementia <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + dementia, data = BMI_imp_long_mids, family = binomial)
imp_dementia <- summary(pool(imp_dementia), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_dementia')
##
stroke_and_TIA <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + stroke_and_TIA, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_stroke_and_TIA')
imp_stroke_and_TIA <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + stroke_and_TIA, data = BMI_imp_long_mids, family = binomial)
imp_stroke_and_TIA <- summary(pool(imp_stroke_and_TIA), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_stroke_and_TIA')
print(3)
univariate_models <- age %>%
bind_rows(hypertension,
diabetes_t1,
diabetes_t2,
chronic_cardiac,
learning_disability,
psychosis_schiz_bipolar,
depression,
COPD,
asthma,
dementia,
stroke_and_TIA)
univariate_imputed <- imp_age %>%
bind_rows(imp_hypertension,
imp_diabetes_t1,
imp_diabetes_t2,
imp_chronic_cardiac,
imp_learning_disability,
imp_psychosis_schiz_bipolar,
imp_depression,
imp_COPD,
imp_asthma,
imp_dementia,
imp_stroke_and_TIA)
print(4)
write.csv (univariate_models, here::here ("output/data", "CC_imputation_sample_multivariate_t2d.csv"))
write.csv (univariate_imputed, here::here ("output/data", "CC_imputation_multivariate_t2d.csv"))
|
/analysis/ethnicity_check/MICE/eth_corrected_MICE_multivariate_t2d.R
|
permissive
|
opensafely/BMI-and-Metabolic-Markers
|
R
| false
| false
| 13,171
|
r
|
## This R script completes unvariate analyses in the imputed and sample data frame
## Specify libraries
library(pacman)
library(tidyverse)
library(Hmisc)
library(here)
library(arrow)
library(purrr)
library(broom)
library(data.table)
library(forcats)
library(rstatix)
library(janitor)
library(lubridate)
library(skimr)
library(ggplot2)
library(mice)
BMI_trajectories <- read_csv (here::here ("output/data", "CC_imputation_DF_for_impute.csv"))
BMI_imp_long <- read_csv (here::here ("output/data", "CC_imputation_dataframe.csv"))
BMI_trajectories <- BMI_trajectories %>%
dplyr::filter(diabetes_t2 == TRUE)
BMI_imp_long <- BMI_imp_long %>%
dplyr::filter(diabetes_t2 == TRUE)
BMI_trajectories$imd <- factor(BMI_trajectories$imd,
levels = c('1','2','3','4','5'))
BMI_imp_long$imd <- factor(BMI_imp_long$imd,
levels = c('1','2','3','4','5'))
BMI_trajectories$eth_16_corrected <- factor(BMI_trajectories$eth_16_corrected,
levels = c(
"White_British",
"White_Irish",
"Other_White",
"Indian",
"Pakistani",
"Bangladeshi",
"Other_Asian",
"Caribbean",
"African",
"Other_Black",
"Chinese",
"White_Asian",
"White_Black_Carib",
"White_Black_African",
"Other_Mixed",
"Other",
"Missing"))
BMI_trajectories<- BMI_trajectories %>%
dplyr::mutate(eth_collapsed = case_when(
eth_16_corrected == "White_British" ~ "white",
eth_16_corrected == "White_Irish" ~ "white",
eth_16_corrected == "Other_White" ~ "white",
eth_16_corrected == "White_Black_Carib" ~ "mixed",
eth_16_corrected == "White_Black_African" ~ "mixed",
eth_16_corrected == "White_Asian" ~ "mixed",
eth_16_corrected == "Other_Mixed" ~ "mixed",
eth_16_corrected == "Indian" ~ "south_asian",
eth_16_corrected == "Pakistani" ~ "south_asian",
eth_16_corrected == "Bangladeshi" ~ "south_asian",
eth_16_corrected == "Other_Asian" ~ "chinese_other",
eth_16_corrected == "Chinese" ~ "chinese_other",
eth_16_corrected == "Caribbean" ~ "black",
eth_16_corrected == "African" ~ "black",
eth_16_corrected == "Other_Black" ~ "black",
eth_16_corrected == "Other" ~ "chinese_other"
))
BMI_imp_long$eth_16_corrected <- factor(BMI_imp_long$eth_16_corrected,
levels = c(
"White_British",
"White_Irish",
"Other_White",
"Indian",
"Pakistani",
"Bangladeshi",
"Other_Asian",
"Caribbean",
"African",
"Other_Black",
"Chinese",
"White_Asian",
"White_Black_Carib",
"White_Black_African",
"Other_Mixed",
"Other",
"Missing"))
BMI_imp_long <- BMI_imp_long %>%
dplyr::mutate(eth_collapsed = case_when(
eth_16_corrected == "White_British" ~ "white",
eth_16_corrected == "White_Irish" ~ "white",
eth_16_corrected == "Other_White" ~ "white",
eth_16_corrected == "White_Black_Carib" ~ "mixed",
eth_16_corrected == "White_Black_African" ~ "mixed",
eth_16_corrected == "White_Asian" ~ "mixed",
eth_16_corrected == "Other_Mixed" ~ "mixed",
eth_16_corrected == "Indian" ~ "south_asian",
eth_16_corrected == "Pakistani" ~ "south_asian",
eth_16_corrected == "Bangladeshi" ~ "south_asian",
eth_16_corrected == "Other_Asian" ~ "chinese_other",
eth_16_corrected == "Chinese" ~ "chinese_other",
eth_16_corrected == "Caribbean" ~ "black",
eth_16_corrected == "African" ~ "black",
eth_16_corrected == "Other_Black" ~ "black",
eth_16_corrected == "Other" ~ "chinese_other"
))
BMI_trajectories$eth_collapsed <- factor(BMI_trajectories$eth_collapsed,
levels = c('white','black','south_asian','chinese_other','mixed'))
BMI_imp_long$eth_collapsed <- factor(BMI_imp_long$eth_collapsed,
levels = c('white','black','south_asian','chinese_other','mixed'))
BMI_trajectories <- BMI_trajectories %>%
dplyr::mutate(rapid_change = case_when(
bmi_change_cat == "over 0.5" ~ 1,
bmi_change_cat != "over 0.5" ~ 0
))
BMI_trajectories %>%
tabyl(bmi_change_cat, rapid_change)
BMI_imp_long <- BMI_imp_long %>%
dplyr::mutate(rapid_change = case_when(
bmi_change_cat == "over 0.5" ~ 1,
bmi_change_cat != "over 0.5" ~ 0
))
BMI_imp_long %>%
tabyl(bmi_change_cat, rapid_change)
BMI_imp_long_mids<-as.mids(BMI_imp_long)
### IMPUTED MODELS COMPARED TO NON IMPUTED
age <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + age_group_2, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_age')
print(1)
imp_age <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + age_group_2, data = BMI_imp_long_mids, family = binomial)
imp_age <- summary(pool(imp_age), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_age')
print(2)
##
hypertension <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + hypertension, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_hypertension')
imp_hypertension <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + hypertension, data = BMI_imp_long_mids, family = binomial)
imp_hypertension <- summary(pool(imp_hypertension), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_hypertension')
##
diabetes_t1 <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t1, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_diabetes_t1')
imp_diabetes_t1 <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t1, data = BMI_imp_long_mids, family = binomial)
imp_diabetes_t1 <- summary(pool(imp_diabetes_t1), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_diabetes_t1')
##
diabetes_t2 <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t2, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_diabetes_t2')
imp_diabetes_t2 <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + diabetes_t2, data = BMI_imp_long_mids, family = binomial)
imp_diabetes_t2 <- summary(pool(imp_diabetes_t2), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_diabetes_t2')
##
chronic_cardiac <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + chronic_cardiac, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_chronic_cardiac')
imp_chronic_cardiac <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + chronic_cardiac, data = BMI_imp_long_mids, family = binomial)
imp_chronic_cardiac <- summary(pool(imp_chronic_cardiac), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_chronic_cardiac')
##
learning_disability <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + learning_disability, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_learning_disability')
imp_learning_disability <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + learning_disability, data = BMI_imp_long_mids, family = binomial)
imp_learning_disability <- summary(pool(imp_learning_disability), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_learning_disability')
##
psychosis_schiz_bipolar <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + psychosis_schiz_bipolar, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_psychosis_schiz_bipolar')
imp_psychosis_schiz_bipolar <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + psychosis_schiz_bipolar, data = BMI_imp_long_mids, family = binomial)
imp_psychosis_schiz_bipolar <- summary(pool(imp_psychosis_schiz_bipolar), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_psychosis_schiz_bipolar')
##
depression <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + depression, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_depression')
imp_depression <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + depression, data = BMI_imp_long_mids, family = binomial)
imp_depression <- summary(pool(imp_depression), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_depression')
##
COPD <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + COPD, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_COPD')
imp_COPD <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + COPD, data = BMI_imp_long_mids, family = binomial)
imp_COPD <- summary(pool(imp_COPD), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_COPD')
##
asthma <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + asthma, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_asthma')
imp_asthma <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + asthma, data = BMI_imp_long_mids, family = binomial)
imp_asthma <- summary(pool(imp_asthma), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_asthma')
##
dementia <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + dementia, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_dementia')
imp_dementia <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + dementia, data = BMI_imp_long_mids, family = binomial)
imp_dementia <- summary(pool(imp_dementia), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_dementia')
##
stroke_and_TIA <- glm(rapid_change ~ age_group_2 + sex + imd + eth_collapsed + stroke_and_TIA, data=BMI_trajectories, family = "binomial") %>%
broom::tidy(conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'sample_stroke_and_TIA')
imp_stroke_and_TIA <- glm.mids((rapid_change) ~ age_group_2 + sex + imd + eth_collapsed + stroke_and_TIA, data = BMI_imp_long_mids, family = binomial)
imp_stroke_and_TIA <- summary(pool(imp_stroke_and_TIA), conf.int = TRUE, exponentiate = TRUE) %>%
dplyr::mutate(variable = 'imp_stroke_and_TIA')
print(3)
univariate_models <- age %>%
bind_rows(hypertension,
diabetes_t1,
diabetes_t2,
chronic_cardiac,
learning_disability,
psychosis_schiz_bipolar,
depression,
COPD,
asthma,
dementia,
stroke_and_TIA)
univariate_imputed <- imp_age %>%
bind_rows(imp_hypertension,
imp_diabetes_t1,
imp_diabetes_t2,
imp_chronic_cardiac,
imp_learning_disability,
imp_psychosis_schiz_bipolar,
imp_depression,
imp_COPD,
imp_asthma,
imp_dementia,
imp_stroke_and_TIA)
print(4)
write.csv (univariate_models, here::here ("output/data", "CC_imputation_sample_multivariate_t2d.csv"))
write.csv (univariate_imputed, here::here ("output/data", "CC_imputation_multivariate_t2d.csv"))
|
library(here)
options(
clustermq.defaults = list(ptn="medium",
log_file="Rout/log%a.log",
time_amt = "120:00:00"
)
)
rmarkdown::render(here(
'R/11_main-sim.R'
))
|
/R/12_render-simulation-11.R
|
no_license
|
denisagniel/longitudinal-surrogates
|
R
| false
| false
| 224
|
r
|
library(here)
options(
clustermq.defaults = list(ptn="medium",
log_file="Rout/log%a.log",
time_amt = "120:00:00"
)
)
rmarkdown::render(here(
'R/11_main-sim.R'
))
|
#install.packages("tidyr")
#install.packages("dplyr")
library(readstata13)
library(dplyr)
library(tidyr)
#reading in the data
#ss19<-read.dta13("C:/Users/jw745/Dropbox/z_ITHIMfiles/Stats19/1b_DataCreated/Stats19_05-15_ready_v3.dta")
load("Stats19.rda")
ss19<-tbl_df(ss19)
ss19<-filter(ss19, strike_mode!='No other vehicle')
ss19<-ss19[complete.cases(ss19),]
#adding age bands
ss19$cas_age_band= cut(ss19$cas_age, breaks = c(0, 20,40,60,80,110), right = FALSE)
ss19$strike_age_band= cut(ss19$strike_age, breaks = c(0, 16,25,60,80,110), right = FALSE)
table(ss19$cas_age_band, ss19$strike_age_band)
#poisson model
library(tidyverse)
## ss19 <- sample_frac(ss19, 0.01) # if we want a 1% random sample
###
# [1] "accident_index" "year" "roadtype"
# [4] "cas_severity" "cas_mode" "cas_male"
# [7] "strike_mode" "strike_male" "strike_age_band"
###
ssg <- group_by(ss19, cas_mode, cas_male, cas_severity, strike_mode, cas_age_band, strike_age_band, roadtype, strike_male) %>%
# select (ss19, cas_mode, cas_male, cas_severity, strike_mode, cas_age_band, strike_age_band, roadtype, strike_male) %>%
summarise(count=n()) %>%
droplevels() %>%
as.data.frame() %>% # remove "grouped" class, which breaks filling with zeroes
complete(cas_mode, cas_male, cas_severity, strike_mode, strike_age_band, cas_age_band, strike_male, roadtype, fill=list(count=0))
#new version modeled for each road type- new specification with nov dropped
ssg<-filter(ssg,roadtype=="A")
fit<-
glm(count ~(cas_age_band*cas_severity + cas_male*cas_severity*cas_age_band + strike_male*strike_age_band + strike_mode*cas_severity*strike_age_band*cas_male*strike_male+cas_mode*cas_severity*cas_age_band*strike_age_band*cas_male*strike_mode),
data=ssg, family=poisson)
#this is Excel ithim 1 structure
#fit_ITHIM1 <-
# glm(count ~ (cas_mode *cas_severity* roadtype* strike_mode),
# data=ssg, family=poisson)
## add predicted count with standard error
pred <- predict(fit, se.fit=TRUE, type="response")
ssg <- mutate(ssg, count_fit=pred[["fit"]], count_se=pred[["se.fit"]])
as.data.frame(ssg)[1:10,]
##I have not gone beyond here -jw
## Predicted counts likely biased and SEs underestimated because we are not including all relevant predictors
## So now fit a "saturated" model including all variables and their full interactions
fitsat <- glm(count ~ cas_male*cas_mode*cas_severity*strike_mode, data=ssg, family=poisson)
pred <- predict(fitsat, se.fit=TRUE, type="response")
ssg <- mutate(ssg, count_fit_sat=pred[["fit"]], count_se_sat=pred[["se.fit"]])
options(scipen=1e+07) # turn off scientific notation for small numbers
as.data.frame(ssg)[1:10,]
## fitted counts match observed counts more closely, and standard errors are bigger.
## Model comparison (lower AIC: saturated model is more efficient despite having many more parameters (df))
AIC(fit, fitsat)
|
/Poisson model stats19.R
|
no_license
|
ITHIM/InjuryModel
|
R
| false
| false
| 2,896
|
r
|
#install.packages("tidyr")
#install.packages("dplyr")
library(readstata13)
library(dplyr)
library(tidyr)
#reading in the data
#ss19<-read.dta13("C:/Users/jw745/Dropbox/z_ITHIMfiles/Stats19/1b_DataCreated/Stats19_05-15_ready_v3.dta")
load("Stats19.rda")
ss19<-tbl_df(ss19)
ss19<-filter(ss19, strike_mode!='No other vehicle')
ss19<-ss19[complete.cases(ss19),]
#adding age bands
ss19$cas_age_band= cut(ss19$cas_age, breaks = c(0, 20,40,60,80,110), right = FALSE)
ss19$strike_age_band= cut(ss19$strike_age, breaks = c(0, 16,25,60,80,110), right = FALSE)
table(ss19$cas_age_band, ss19$strike_age_band)
#poisson model
library(tidyverse)
## ss19 <- sample_frac(ss19, 0.01) # if we want a 1% random sample
###
# [1] "accident_index" "year" "roadtype"
# [4] "cas_severity" "cas_mode" "cas_male"
# [7] "strike_mode" "strike_male" "strike_age_band"
###
ssg <- group_by(ss19, cas_mode, cas_male, cas_severity, strike_mode, cas_age_band, strike_age_band, roadtype, strike_male) %>%
# select (ss19, cas_mode, cas_male, cas_severity, strike_mode, cas_age_band, strike_age_band, roadtype, strike_male) %>%
summarise(count=n()) %>%
droplevels() %>%
as.data.frame() %>% # remove "grouped" class, which breaks filling with zeroes
complete(cas_mode, cas_male, cas_severity, strike_mode, strike_age_band, cas_age_band, strike_male, roadtype, fill=list(count=0))
#new version modeled for each road type- new specification with nov dropped
ssg<-filter(ssg,roadtype=="A")
fit<-
glm(count ~(cas_age_band*cas_severity + cas_male*cas_severity*cas_age_band + strike_male*strike_age_band + strike_mode*cas_severity*strike_age_band*cas_male*strike_male+cas_mode*cas_severity*cas_age_band*strike_age_band*cas_male*strike_mode),
data=ssg, family=poisson)
#this is Excel ithim 1 structure
#fit_ITHIM1 <-
# glm(count ~ (cas_mode *cas_severity* roadtype* strike_mode),
# data=ssg, family=poisson)
## add predicted count with standard error
pred <- predict(fit, se.fit=TRUE, type="response")
ssg <- mutate(ssg, count_fit=pred[["fit"]], count_se=pred[["se.fit"]])
as.data.frame(ssg)[1:10,]
##I have not gone beyond here -jw
## Predicted counts likely biased and SEs underestimated because we are not including all relevant predictors
## So now fit a "saturated" model including all variables and their full interactions
fitsat <- glm(count ~ cas_male*cas_mode*cas_severity*strike_mode, data=ssg, family=poisson)
pred <- predict(fitsat, se.fit=TRUE, type="response")
ssg <- mutate(ssg, count_fit_sat=pred[["fit"]], count_se_sat=pred[["se.fit"]])
options(scipen=1e+07) # turn off scientific notation for small numbers
as.data.frame(ssg)[1:10,]
## fitted counts match observed counts more closely, and standard errors are bigger.
## Model comparison (lower AIC: saturated model is more efficient despite having many more parameters (df))
AIC(fit, fitsat)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{combineDE}
\alias{combineDE}
\title{Combine Multiple Comparison Results from Differential Expression (DE) or Differential Activity (DA) Analysis}
\usage{
combineDE(DE_list, DE_name = NULL, transfer_tab = NULL,
main_id = NULL, method = "Stouffer", twosided = TRUE,
signed = TRUE)
}
\arguments{
\item{DE_list}{list, each element in the list is one DE/DA comparison need to be combined.}
\item{DE_name}{a vector of characters, the DE/DA comparison names.
If not NULL, it must match the names of DE_list in correct order.
If NULL, names of the DE_list will be used.
Default is NULL.}
\item{transfer_tab}{data.frame, the ID conversion table. Users can call \code{get_IDtransfer} to get this table.
The purpose is to correctly mapping ID for \code{DE_list}. The column names must match \code{DE_name}.
If NULL, ID column of each DE comparison will be considered as the same type.
Default is NULL.}
\item{main_id}{character, a name of the element in \code{DE_list}. The ID column of that comparison will be used as the ID of the final combination.
If NULL, the first element name from \code{DE_list} will be used. Default is NULL.}
\item{method}{character, users can choose between "Stouffer" and "Fisher". Default is "Stouffer".}
\item{twosided}{logical, if TRUE, a two-tailed test will be performed.
If FALSE, a one-tailed test will be performed, and P value falls within the range of 0 to 0.5. Default is TRUE.}
\item{signed}{logical, if TRUE, give a sign to the P value, which indicating the direction of testing.
Default is TRUE.}
}
\value{
Return a list contains the combined DE/DA analysis. Each single comparison result before combination is wrapped inside
(may have with some IDs filtered out, due to the combination). A data frame named "combine" inside the list is the combined analysis.
Rows are genes/drivers, columns are combined statistics (e.g. "logFC", "AveExpr", "t", "P.Value" etc.).
}
\description{
\code{combineDE} combines multiple comparisons of DE or DA analysis.
Can combine DE with DE, DA with DA and also DE with DA if proper transfer table prepared.
}
\details{
For example, there are 4 subgroups in the phenotype, G1, G2, G3 and G4. One DE analysis was performed on G1 vs. G2, and another DE was performed on G1 vs. G3.
If user is interested in the DE analysis between G1 vs. (G2 and G3), he can call this function to combine the two comparison results above toghether.
The combined P values will be taken care by \code{combinePvalVector}.
}
\examples{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
phe_info <- Biobase::pData(analysis.par$cal.eset)
each_subtype <- 'G4'
G0 <- rownames(phe_info)[which(phe_info$`subgroup`!=each_subtype)] # get sample list for G0
G1 <- rownames(phe_info)[which(phe_info$`subgroup`==each_subtype)] # get sample list for G1
DE_gene_limma <- getDE.limma.2G(eset=analysis.par$cal.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
DA_driver_limma <- getDE.limma.2G(eset=analysis.par$merge.ac.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
DE_list <- list(DE=DE_gene_limma,DA=DA_driver_limma)
g1 <- gsub('(.*)_.*','\\\\1',DE_list$DA$ID)
transfer_tab <- data.frame(DE=g1,DA=DE_list$DA$ID,stringsAsFactors = FALSE)
res1 <- combineDE(DE_list,transfer_tab=transfer_tab,main_id='DA')
\dontrun{
each_subtype <- 'G4'
G0 <- rownames(phe_info)[which(phe_info$`subgroup`!=each_subtype)] # get sample list for G0
G1 <- rownames(phe_info)[which(phe_info$`subgroup`==each_subtype)] # get sample list for G1
DE_gene_limma_G4 <- getDE.limma.2G(eset=analysis.par$cal.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
each_subtype <- 'SHH'
G0 <- rownames(phe_info)[which(phe_info$`subgroup`!=each_subtype)] # get sample list for G0
G1 <- rownames(phe_info)[which(phe_info$`subgroup`==each_subtype)] # get sample list for G1
DE_gene_limma_SHH <- getDE.limma.2G(eset=analysis.par$cal.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
DE_list <- list(G4=DE_gene_limma_G4,SHH=DE_gene_limma_SHH)
res2 <- combineDE(DE_list,transfer_tab=NULL)
}
}
|
/man/combineDE.Rd
|
permissive
|
WenboSheng/NetBID
|
R
| false
| true
| 4,653
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipeline_functions.R
\name{combineDE}
\alias{combineDE}
\title{Combine Multiple Comparison Results from Differential Expression (DE) or Differential Activity (DA) Analysis}
\usage{
combineDE(DE_list, DE_name = NULL, transfer_tab = NULL,
main_id = NULL, method = "Stouffer", twosided = TRUE,
signed = TRUE)
}
\arguments{
\item{DE_list}{list, each element in the list is one DE/DA comparison need to be combined.}
\item{DE_name}{a vector of characters, the DE/DA comparison names.
If not NULL, it must match the names of DE_list in correct order.
If NULL, names of the DE_list will be used.
Default is NULL.}
\item{transfer_tab}{data.frame, the ID conversion table. Users can call \code{get_IDtransfer} to get this table.
The purpose is to correctly mapping ID for \code{DE_list}. The column names must match \code{DE_name}.
If NULL, ID column of each DE comparison will be considered as the same type.
Default is NULL.}
\item{main_id}{character, a name of the element in \code{DE_list}. The ID column of that comparison will be used as the ID of the final combination.
If NULL, the first element name from \code{DE_list} will be used. Default is NULL.}
\item{method}{character, users can choose between "Stouffer" and "Fisher". Default is "Stouffer".}
\item{twosided}{logical, if TRUE, a two-tailed test will be performed.
If FALSE, a one-tailed test will be performed, and P value falls within the range of 0 to 0.5. Default is TRUE.}
\item{signed}{logical, if TRUE, give a sign to the P value, which indicating the direction of testing.
Default is TRUE.}
}
\value{
Return a list contains the combined DE/DA analysis. Each single comparison result before combination is wrapped inside
(may have with some IDs filtered out, due to the combination). A data frame named "combine" inside the list is the combined analysis.
Rows are genes/drivers, columns are combined statistics (e.g. "logFC", "AveExpr", "t", "P.Value" etc.).
}
\description{
\code{combineDE} combines multiple comparisons of DE or DA analysis.
Can combine DE with DE, DA with DA and also DE with DA if proper transfer table prepared.
}
\details{
For example, there are 4 subgroups in the phenotype, G1, G2, G3 and G4. One DE analysis was performed on G1 vs. G2, and another DE was performed on G1 vs. G3.
If user is interested in the DE analysis between G1 vs. (G2 and G3), he can call this function to combine the two comparison results above toghether.
The combined P values will be taken care by \code{combinePvalVector}.
}
\examples{
analysis.par <- list()
analysis.par$out.dir.DATA <- system.file('demo1','driver/DATA/',package = "NetBID2")
NetBID.loadRData(analysis.par=analysis.par,step='ms-tab')
phe_info <- Biobase::pData(analysis.par$cal.eset)
each_subtype <- 'G4'
G0 <- rownames(phe_info)[which(phe_info$`subgroup`!=each_subtype)] # get sample list for G0
G1 <- rownames(phe_info)[which(phe_info$`subgroup`==each_subtype)] # get sample list for G1
DE_gene_limma <- getDE.limma.2G(eset=analysis.par$cal.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
DA_driver_limma <- getDE.limma.2G(eset=analysis.par$merge.ac.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
DE_list <- list(DE=DE_gene_limma,DA=DA_driver_limma)
g1 <- gsub('(.*)_.*','\\\\1',DE_list$DA$ID)
transfer_tab <- data.frame(DE=g1,DA=DE_list$DA$ID,stringsAsFactors = FALSE)
res1 <- combineDE(DE_list,transfer_tab=transfer_tab,main_id='DA')
\dontrun{
each_subtype <- 'G4'
G0 <- rownames(phe_info)[which(phe_info$`subgroup`!=each_subtype)] # get sample list for G0
G1 <- rownames(phe_info)[which(phe_info$`subgroup`==each_subtype)] # get sample list for G1
DE_gene_limma_G4 <- getDE.limma.2G(eset=analysis.par$cal.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
each_subtype <- 'SHH'
G0 <- rownames(phe_info)[which(phe_info$`subgroup`!=each_subtype)] # get sample list for G0
G1 <- rownames(phe_info)[which(phe_info$`subgroup`==each_subtype)] # get sample list for G1
DE_gene_limma_SHH <- getDE.limma.2G(eset=analysis.par$cal.eset,
G1=G1,G0=G0,
G1_name=each_subtype,
G0_name='other')
DE_list <- list(G4=DE_gene_limma_G4,SHH=DE_gene_limma_SHH)
res2 <- combineDE(DE_list,transfer_tab=NULL)
}
}
|
## Put comments here that give an overall description of what your
## functions do
##This function get a square matrix in input, and return its determinant.
##For square-matrix more than 2x2, it recursive call itself to calculate
##detrminant ad inverted matrix
##The output is a list with detrminant and inverted matrix, in this order
invertMatrix<-function(myMatrix)
{
x<-myMatrix
cofactorsMatrix<-myMatrix
cofactorsTransposedMatrix<-myMatrix
myRow<-1
determinant<-0
base<--1
exponent<-0
sign<-0
complement<-0
cofactor<-0
if (nrow(x) == 2)
{
return((x[1,1]*x[2,2]) - (x[1,2]*x[2,1]))
}
else
{
while (myRow<=nrow(x))
{
y<-matrix(x[row(x)!=myRow],nrow=nrow(x)-1,ncol=ncol(x))
myColumn<-1
while (myColumn<=ncol(x))
{
element<-x[myRow,myColumn]
z<-matrix(y[col(y)!=myColumn],nrow=nrow(y),ncol=ncol(y)-1)
exponent<-myRow+myColumn
sign<-(base^exponent)
complement<-invertMatrix(z)[[1]]
cofactor<-sign * complement
cofactorsMatrix[myRow,myColumn]<-cofactor
if(myRow==1)
{
determinant<-determinant + (element * cofactor)
}
myColumn<-myColumn+1
}
myRow<-myRow+1
}
}
i<-2
vect<-c(cofactorsMatrix[,1])
while(i<=ncol(cofactorsMatrix))
{
vect<-c(vect,cofactorsMatrix[,i])
i<-i+1
}
cofactorsTransposedMatrix<-matrix(vect,nrow=ncol(cofactorsMatrix),ncol=nrow(cofactorsMatrix),byrow=TRUE)
invertMatrix<-cofactorsTransposedMatrix * (1/determinant)
result<-list(determinant,invertMatrix)
return(result)
}
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y)
{
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(invertMatrix) m <<- invertMatrix
getmatrix <- function() m
list(set = set
, get = get
, setmatrix = setmatrix
, getmatrix = getmatrix
)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
data <- x$get()
if( !is.null(m))
{
message("getting cached data")
return(m)
}
m <- invertMatrix(data, ...)
x$setmatrix(m)
m[[2]]
}
|
/cachematrix.R
|
no_license
|
Konko1974/ProgrammingAssignment2-1
|
R
| false
| false
| 2,343
|
r
|
## Put comments here that give an overall description of what your
## functions do
##This function get a square matrix in input, and return its determinant.
##For square-matrix more than 2x2, it recursive call itself to calculate
##detrminant ad inverted matrix
##The output is a list with detrminant and inverted matrix, in this order
invertMatrix<-function(myMatrix)
{
x<-myMatrix
cofactorsMatrix<-myMatrix
cofactorsTransposedMatrix<-myMatrix
myRow<-1
determinant<-0
base<--1
exponent<-0
sign<-0
complement<-0
cofactor<-0
if (nrow(x) == 2)
{
return((x[1,1]*x[2,2]) - (x[1,2]*x[2,1]))
}
else
{
while (myRow<=nrow(x))
{
y<-matrix(x[row(x)!=myRow],nrow=nrow(x)-1,ncol=ncol(x))
myColumn<-1
while (myColumn<=ncol(x))
{
element<-x[myRow,myColumn]
z<-matrix(y[col(y)!=myColumn],nrow=nrow(y),ncol=ncol(y)-1)
exponent<-myRow+myColumn
sign<-(base^exponent)
complement<-invertMatrix(z)[[1]]
cofactor<-sign * complement
cofactorsMatrix[myRow,myColumn]<-cofactor
if(myRow==1)
{
determinant<-determinant + (element * cofactor)
}
myColumn<-myColumn+1
}
myRow<-myRow+1
}
}
i<-2
vect<-c(cofactorsMatrix[,1])
while(i<=ncol(cofactorsMatrix))
{
vect<-c(vect,cofactorsMatrix[,i])
i<-i+1
}
cofactorsTransposedMatrix<-matrix(vect,nrow=ncol(cofactorsMatrix),ncol=nrow(cofactorsMatrix),byrow=TRUE)
invertMatrix<-cofactorsTransposedMatrix * (1/determinant)
result<-list(determinant,invertMatrix)
return(result)
}
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y)
{
x <<- y
m <<- NULL
}
get <- function() x
setmatrix <- function(invertMatrix) m <<- invertMatrix
getmatrix <- function() m
list(set = set
, get = get
, setmatrix = setmatrix
, getmatrix = getmatrix
)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getmatrix()
data <- x$get()
if( !is.null(m))
{
message("getting cached data")
return(m)
}
m <- invertMatrix(data, ...)
x$setmatrix(m)
m[[2]]
}
|
library(mobr)
library(purrr)
library(dplyr)
library(ggplot2)
dat = read.csv('./data/cartoon_data.csv')
scenarios = c('sad', 'N', 'agg')
mob_in_list = list()
mob_in_list$sad = make_mob_in(dat[ , 2:4], dat[ , c(1, 5:7)], c('x', 'y'))
mob_in_list$N = mob_in_list$sad
mob_in_list$agg = mob_in_list$sad
mob_in_list$sad = subset(mob_in_list$sad, cat == "sad")
mob_in_list$N = subset(mob_in_list$N, cat == "N")
mob_in_list$agg = subset(mob_in_list$agg, cat == "agg")
mob = purrr::map(mob_in_list, function(x)
get_delta_stats(x, 'elev', n_perm = 20))
plot(mob$sad, eff_sub_effort = F, eff_disp_pts = F, eff_disp_smooth = T)
plot(mob$N, eff_sub_effort = F, eff_disp_pts = F, eff_disp_smooth = T)
plot(mob$agg, eff_sub_effort = F, eff_disp_pts = F, eff_disp_smooth = T)
mob$sad$S_df %>% subset(group == 3) %>%
mutate(effort = ifelse(sample == "plot",
effort * 5.5, effort)) %>%
group_by(test) %>%
ggplot(aes(effort, S)) +
geom_line(aes(group = test, color = test, lty=test))
mob$N$S_df %>% subset(group == 3) %>%
mutate(effort = ifelse(sample == "plot",
effort * 5.5, effort)) %>%
group_by(test) %>%
ggplot(aes(effort, S)) +
geom_line(aes(group = test, color = test, lty=test))
mob$agg$S_df %>% subset(group == 3) %>%
mutate(effort = ifelse(sample == "plot",
effort * 5.5, effort)) %>%
group_by(test) %>%
ggplot(aes(effort, S)) +
geom_line(aes(group = test, color = test, lty=test))
|
/scripts/cartoon_analysis.R
|
permissive
|
l5d1l5/elev_gradient
|
R
| false
| false
| 1,507
|
r
|
library(mobr)
library(purrr)
library(dplyr)
library(ggplot2)
dat = read.csv('./data/cartoon_data.csv')
scenarios = c('sad', 'N', 'agg')
mob_in_list = list()
mob_in_list$sad = make_mob_in(dat[ , 2:4], dat[ , c(1, 5:7)], c('x', 'y'))
mob_in_list$N = mob_in_list$sad
mob_in_list$agg = mob_in_list$sad
mob_in_list$sad = subset(mob_in_list$sad, cat == "sad")
mob_in_list$N = subset(mob_in_list$N, cat == "N")
mob_in_list$agg = subset(mob_in_list$agg, cat == "agg")
mob = purrr::map(mob_in_list, function(x)
get_delta_stats(x, 'elev', n_perm = 20))
plot(mob$sad, eff_sub_effort = F, eff_disp_pts = F, eff_disp_smooth = T)
plot(mob$N, eff_sub_effort = F, eff_disp_pts = F, eff_disp_smooth = T)
plot(mob$agg, eff_sub_effort = F, eff_disp_pts = F, eff_disp_smooth = T)
mob$sad$S_df %>% subset(group == 3) %>%
mutate(effort = ifelse(sample == "plot",
effort * 5.5, effort)) %>%
group_by(test) %>%
ggplot(aes(effort, S)) +
geom_line(aes(group = test, color = test, lty=test))
mob$N$S_df %>% subset(group == 3) %>%
mutate(effort = ifelse(sample == "plot",
effort * 5.5, effort)) %>%
group_by(test) %>%
ggplot(aes(effort, S)) +
geom_line(aes(group = test, color = test, lty=test))
mob$agg$S_df %>% subset(group == 3) %>%
mutate(effort = ifelse(sample == "plot",
effort * 5.5, effort)) %>%
group_by(test) %>%
ggplot(aes(effort, S)) +
geom_line(aes(group = test, color = test, lty=test))
|
####---------------------------------
#### EvolutionaryHistoryClusterRun
####---------------------------------
# This script allows running one the cluster the different scenario (non-competitive vs. competitive) of brain size evolution in frugivorous primates.
# This is the example script for low frugivory and low folivory threshold i.e.:
# a=1
# frugivoryThreshold=frugivoryThresholdVector[a]
# b=1
# folivoryThreshold=folivoryThresholdVector[b]
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Set working directory
setwd("/users/biodiv/bperez/data/others/Benji/Evolutionary_history_3/")
dir.create(file.path("Sample_size"), showWarnings = FALSE)
dir.create(file.path("extdata"), showWarnings = FALSE)
dir.create(file.path("OutputEvolModel"), showWarnings = FALSE)
dir.create(file.path("Dataplot"), showWarnings = FALSE)
#Import environment
rm(list=ls())
load("geography_traits_biogeobears.RData")
#load("../BioGeoBEARS/geography_traits_biogeobears.RData")
#Libraries
# require(devtools)
# install_version("phytools", version = "0.6.99", repos = "http://cran.us.r-project.org", lib="/users/biodiv/bperez/packages/")
# Phylogenetics
library(caper)
library(ape)
library(phytools) #, lib.loc = "/users/biodiv/bperez/packages/")
library(geiger)
# library(MCMCglmm, lib.loc = "/users/biodiv/bperez/packages/")
library(ellipsis, lib.loc = "/users/biodiv/bperez/packages/")
library(RPANDA, lib.loc = "/users/biodiv/bperez/packages/")
library(BioGeoBEARS)
library(optimx)
library(svMisc, lib.loc = "/users/biodiv/bperez/packages/")
#Parallelizing
library(parallel)
##--------
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
frugivoryThresholdVector <- seq(from=20, to=40, by=20)
folivoryThresholdVector <- seq(from=40, to=60, by=20)
geographicThresholdVector <- c(10,30)/100
randomSampling=10
numberSimulations=10
numberTrees=1
totModels=randomSampling*numberSimulations*numberTrees*length(frugivoryThresholdVector)*length(folivoryThresholdVector)*length(geographicThresholdVector)
progression=0
##--------
# Set the frugivory and folivory threshold to consider:
a=1
frugivoryThreshold=frugivoryThresholdVector[a]
b=2
folivoryThreshold=folivoryThresholdVector[b]
##------------
## Initialise output files:
##------------
#Data files
summaryBrainFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryEQFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryNeocortexFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryHippocampusFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryCerebellumFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryStriatumFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryMOBFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryOpticTractFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
#Sample size files:
repetition=length(frugivoryThresholdVector)*length(folivoryThresholdVector)*length(geographicThresholdVector)*randomSampling
checkSampleFruit <- rep(NA, times=repetition)
checkSampleLeaf <- rep(NA, times=repetition)
checkSampleRange <- rep(NA, times=repetition)
checkSampleBrain <- rep(NA, times=repetition)
checkSampleEQ <- rep(NA, times=repetition)
checkSampleNeocortex <- rep(NA, times=repetition)
checkSampleHippocampus <- rep(NA, times=repetition)
checkSampleCerebellum <- rep(NA, times=repetition)
checkSampleStriatum <- rep(NA, times=repetition)
checkSampleMOB <- rep(NA, times=repetition)
checkSampleRange <- rep(NA, times=repetition)
### Run the different evolutionary models ###
for(c in 1:length(geographicThresholdVector)){
## Adding the co-occurence
summaryData$geographicCode <- matrixRangingSensitivity[match(summaryData$SpeciesForPhylogeny,matrixRangingSensitivity$SpeciesForPhylogeny),which(thresholdPresenceRange==geographicThresholdVector[c])]
load(paste("BioGeoBEARS/BSM_output_file", c, ".Rdata", sep=""))
# load(paste("../BioGeoBEARS/BSM_output_file", c, ".Rdata", sep=""))
summaryData_init <- summaryData
parrallel_run <- function(d){
base::set.seed(d)
base::set.seed(d)
rm(.Random.seed)
base::set.seed(d)
base::set.seed(d)
runComparisonModelsCompetition <- function(
#######################
## Run the phylogenetic comparison between random evolution (BM, OU) or competitive scenario with MC= exclusion from competitive
# Taxa = mutual exclusion, or DDlin and DDexp, which are positive or negative density dependance to taxa of same "group" (here feeding group)
# of the evolutionary rate, either linearily (lin), or exponentially (exp)
######################
simmap, #The simmap object for grouping
numberMaps, #Number of simmap and BSM
tree, #The phylo object of the tree
data, #Data including: species column name as inTree. This column should be name "SpeciesForPhylogeny". a "Guild" column has to specify the grouping variable.
#Other variables can be associated, namely the trait and subgroup variable
subgroup, #Character string indicating the group of te "Guild" column that if of interest for the analysis
trait, #Column name of variable of interest. String
ana_events_tables,
clado_events_tables
){
#isolate data from subgroup of interest
group.map<-simmap
data.grouped<-data[which(data$Guild==subgroup),]
mass<-data.grouped[,which(colnames(data.grouped)=="SpeciesForPhylogeny")]
names(mass)<-data.grouped[,which(colnames(data.grouped)=="SpeciesForPhylogeny")]
nc<-geiger::name.check(tree,mass)
data.grouped.tree<-drop.tip(tree,nc$tree_not_data)
subdata<-data.grouped[which(data.grouped$SpeciesForPhylogeny%in%data.grouped.tree$tip.label),]
#set up analyses
N=numberMaps #number of stochastic maps to analyze
T=trait #which trait to analyze
res.mat<-matrix(nrow=N,ncol=53)
colnames(res.mat)<-c("subgroup","trait","N","BM.lnL","BM.sig2","BM.z0","BM.AICc","BM.conv",
"OU.lnL","OU.sig2","OU.alpha","OU.z0","OU.AICc","OU.conv",
"EB.lnL","EB.sig2","EB.alpha","EB.z0","EB.AICc","EB.conv",
"MCgeo.dietmap","MCgeo.lnL","MCgeo.sig2","MCgeo.S","MCgeo.z0","MCgeo.AICc","MCgeo.conv",
"DDlingeo.dietmap","DDlingeo.lnL","DDlingeo.sig2","DDlingeo.b","DDlingeo.z0","DDlingeo.AICc","DDlingeo.conv",
"DDexpgeo.dietmap","DDexpgeo.lnL","DDexpgeo.sig2","DDexpgeo.r","DDexpgeo.z0","DDexpgeo.AICc","DDexpgeo.conv",
"BM.delaic","OU.delaic","EB.delaic","MCgeo.delaic","DDlingeo.delaic","DDexpgeo.delaic",
"BM.wi","OU.wi","EB.wi","MCgeo.wi","DDlingeo.wi","DDexpgeo.wi")
#fit BM, OU, EB, MC, DDexp, and DDlin models to subgroup trait data
#write.table("worked", "/OutputEvolModel/workedinitFunction.txt", row.names=FALSE, col.names=TRUE, sep="\t")
for(i in 1:N){
group.map2 <-drop.tip.simmap(group.map[[i]],
group.map[[i]]$tip.label[which(!group.map[[i]]$tip.label%in%tree$tip.label)])
j=which(colnames(subdata)==T)
M<-subdata[,j]
subtree<-data.grouped.tree
names(M)<-subdata$SpeciesForPhylogeny
M<-subset(M,M!='NA')
nc<-geiger::name.check(subtree,M)
if(is.list(nc)){
subtree<-drop.tip(subtree,nc$tree_not_data)
}
o2<-geiger::fitContinuous(subtree,M,model="BM", ncores=1)
BM.log_lik<-o2$opt$lnL
BM.sig2<-o2$opt$sigsq
BM.z0<-o2$opt$z0
BM.aicc<-o2$opt$aicc
BM.conv<-as.numeric(tail(o2$res[,length(o2$res[1,])],n=1))
o3<-geiger::fitContinuous(subtree,M,model="OU", ncores=1)
OU.log_lik<-o3$opt$lnL
OU.sig2<-o3$opt$sigsq
OU.alpha<-o3$opt$alpha
OU.z0<-o3$opt$z0
OU.aicc<-o3$opt$aicc
OU.conv<-as.numeric(tail(o3$res[,length(o3$res[1,])],n=1))
o32<-geiger::fitContinuous(subtree,M,model="EB", ncores=1)
EB.log_lik<-o32$opt$lnL
EB.sig2<-o32$opt$sigsq
EB.alpha<-o32$opt$a
EB.z0<-o32$opt$z0
EB.aicc<-o32$opt$aicc
EB.conv<-as.numeric(tail(o32$res[,length(o32$res[1,])],n=1))
o4<-fit_t_comp_subgroup(full.phylo=tree,
ana.events=ana_events_tables[[i]],
clado.events=clado_events_tables[[i]],
stratified=FALSE,map=group.map2,data=M,
trim.class=subgroup,model="MC",par=NULL,method="Nelder-Mead",bounds=NULL)
MCgeo.lnL<-o4$LH
MCgeo.sig2<-o4$sig2
MCgeo.S<-o4$S
MCgeo.z0<-o4$z0
MCgeo.aicc<-o4$aicc
MCgeo.conv<-o4$convergence
o5<-fit_t_comp_subgroup(full.phylo=tree,
ana.events=ana_events_tables[[i]],
clado.events=clado_events_tables[[i]],
stratified=FALSE,map=group.map2,data=M,trim.class=subgroup,
model="DDexp",par=NULL,method="Nelder-Mead",bounds=NULL)
DDexpgeo.lnL<-o5$LH
DDexpgeo.sig2<-o5$sig2
DDexpgeo.r<-o5$r
DDexpgeo.z0<-o5$z0
DDexpgeo.aicc<-o5$aicc
DDexpgeo.conv<-o5$convergence
o6<-fit_t_comp_subgroup(full.phylo=tree,
ana.events=ana_events_tables[[i]],
clado.events=clado_events_tables[[i]],
stratified=FALSE,map=group.map2,data=M,trim.class=subgroup,
model="DDlin",par=NULL,method="Nelder-Mead",bounds=NULL)
DDlingeo.lnL<-o6$LH
DDlingeo.sig2<-o6$sig2
DDlingeo.b<-o6$b
DDlingeo.z0<-o6$z0
DDlingeo.aicc<-o6$aicc
DDlingeo.conv<-o6$convergence
if(length(which(is.na(c(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc))))==0){
BM.delaic<-BM.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
OU.delaic<-OU.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
EB.delaic<-EB.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
MCgeo.delaic<-MCgeo.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
DDlingeo.delaic<-DDlingeo.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
DDexpgeo.delaic<-DDexpgeo.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
all=sum(exp(-0.5*BM.delaic),exp(-0.5*OU.delaic),exp(-0.5*EB.delaic),exp(-0.5*MCgeo.delaic),exp(-0.5*DDlingeo.delaic),exp(-0.5*DDexpgeo.delaic))
BM.wi<-exp(-0.5*BM.delaic)/all
OU.wi<-exp(-0.5*OU.delaic)/all
EB.wi<-exp(-0.5*EB.delaic)/all
MCgeo.wi<-exp(-0.5*MCgeo.delaic)/all
DDlingeo.wi<-exp(-0.5*DDlingeo.delaic)/all
DDexpgeo.wi<-exp(-0.5*DDexpgeo.delaic)/all
MCgeo.iter <- NA
DDlingeo.iter <- NA
DDexpgeo.iter <- NA
int<-c(subgroup,names(subdata)[j],length(subtree$tip.label),BM.log_lik,BM.sig2,BM.z0,BM.aicc,BM.conv,
OU.log_lik,OU.sig2,OU.alpha,OU.z0,OU.aicc,OU.conv,
EB.log_lik,EB.sig2,EB.alpha,EB.z0,EB.aicc,EB.conv,
MCgeo.iter,MCgeo.lnL,MCgeo.sig2,MCgeo.S,MCgeo.z0,MCgeo.aicc,MCgeo.conv,
DDlingeo.iter,DDlingeo.lnL,DDlingeo.sig2,DDlingeo.b,DDlingeo.z0,DDlingeo.aicc,DDlingeo.conv,
DDexpgeo.iter,DDexpgeo.lnL,DDexpgeo.sig2,DDexpgeo.r,DDexpgeo.z0,DDexpgeo.aicc,DDexpgeo.conv,
BM.delaic,OU.delaic,EB.delaic,MCgeo.delaic,DDlingeo.delaic,DDexpgeo.delaic,
BM.wi,OU.wi,EB.wi,MCgeo.wi,DDlingeo.wi,DDexpgeo.wi)
res.mat[i,]<-int
}
else{
res.mat[i,] <- rep(NA, times=ncol(res.mat))
}
}
res.mat <- as.data.frame(res.mat)
resm <- data.frame(res.mat)
return(resm)
}
##### To run with different d values
summaryData <- summaryData_init
#Source functions & packages
#Capitalize first letter
firstup <- function(x) {
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
#SaveDataGeneral
summaryData <- read.table(paste("Dataplot/Dataplot", a, "_", b, "_", c, "_", d,".txt", sep=""), header=TRUE, sep="\t", colClasses = "character")
for (col in c(4:34, 38:47)){summaryData[,col] <- as.numeric(summaryData[,col])}
write.table(length(summaryData$DietaryGuild[summaryData$DietaryGuild=="Fruit"]),
paste("Sample_size/checkSampleFruit", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(length(summaryData$DietaryGuild[summaryData$DietaryGuild=="Leaf"]),
paste("Sample_size/checkSampleLeaf", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(length(summaryData$geographicCode[!is.na(summaryData$geographicCode)]),
paste("Sample_size/checkSampleRange", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleBrain", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Bodymass)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleEQ", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Neocortex)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleNeocortex", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Hippocampus)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleHippocampus", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Cerebellum)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleCerebellum", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Striatum)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleStriatum", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$MOB)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleMOB", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
##--------
# Evolutionary history of diet
##--------
vectorDiet <- summaryData$DietaryGuild
names(vectorDiet) <- summaryData$SpeciesForPhylogeny
vectorDiet <- vectorDiet[vectorDiet!="Other"&!is.na(summaryData$geographicCode)]
#Load and save tree corresponding to species with diet
phylo <- read.tree("Tree/Tree_biogeobears.nex")
# phylo <- read.tree("../Raw_data/Tree/Tree_biogeobears.nex")
phylo <- drop.tip(phylo,
phylo$tip.label[
which(phylo$tip.label
%nin%names(vectorDiet))])
#simmapdiet1 <- make.simmap(tree=phylo, vectorDiet, model="ARD", pi="estimated", nsim=numberSimulations)#inequal and not symmetrical rate of transition from folivory to frugivory etc...
load(file=paste("Simmap/Output_simmap_transition", a, "_", b, "_", c, "_", d, ".Rdata", sep=""))
write.table(as.vector(simmapdiet1[[1]]$Q[,1]), paste("OutputEvolModel/Output_simmap_transition", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
# Evolutionary history of traits (~brain size) with and without competition
#following Drury et al.'s approach to see whether one trait (here one brain area size) phylogenetical history is better described if considering competition
#https://academic.oup.com/sysbio/article/65/4/700/1753588
#https://journals.plos.org/plosbiology/article?rev=1&id=10.1371/journal.pbio.2003563#pbio.2003563.ref028
##--------
#Create variable of rinterest
summaryData$ratioBrain <- summaryData$Brain*1.036*(10**-3)/summaryData$Bodymass #Following decasien for multiplication by 1.036
summaryData$EQ <- summaryData$Brain*1.036*(10**-3)/(0.085*summaryData$Bodymass**0.775) #Following decasien, according to #Jerison, H. J. Evolution of the Brain and Intelligence (Academic, 1973).
#Make it having symmetrical (and if possible gaussian distribution, since it seems to be a prerequisite of the analysis)
hist(summaryData$ratioBrain)
hist(log(summaryData$ratioBrain))
hist(summaryData$EQ)
summaryData$EQ.log <- log(summaryData$EQ)
hist(summaryData$Brain)
summaryData$Brain.log <- log(summaryData$Brain)
hist(summaryData$Brain.log)
#Reload tree to have same than used for biogeobears
phylo <- read.tree("Tree/Tree_biogeobears.nex")
colnames(summaryData)[colnames(summaryData)=="DietaryGuild"] <- "Guild"
# Brain
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_BrainRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("Brain")
summaryData$ratioBrain <- summaryData$Brain
hist(summaryData$ratioBrain )
summaryData$ratioBrain.log <- log(summaryData$ratioBrain)
resultBrainFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$Brain.log)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioBrain.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultBrainFrugivory, paste("OutputEvolModel/Output_evolutionary_history_BrainRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_NeocortexRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("Neocortex")
summaryData$ratioNeocortex <- summaryData$Neocortex
hist(summaryData$ratioNeocortex )
summaryData$ratioNeocortex.log <- log(summaryData$ratioNeocortex)
resultNeocortexFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioNeocortex)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioNeocortex.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultNeocortexFrugivory, paste("OutputEvolModel/Output_evolutionary_history_NeocortexRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#Hippocampus
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_HippocampusRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("ratioHippocampus")
summaryData$ratioHippocampus <- summaryData$Hippocampus
hist(summaryData$ratioHippocampus )
summaryData$ratioHippocampus.log <- log(summaryData$ratioHippocampus)
resultHippocampusFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioHippocampus)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioHippocampus.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultHippocampusFrugivory, paste("OutputEvolModel/Output_evolutionary_history_HippocampusRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#Cerebellum
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_CerebellumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("ratioCerebellum")
summaryData$ratioCerebellum <- summaryData$Cerebellum
hist(summaryData$ratioCerebellum )
summaryData$ratioCerebellum.log <- log(summaryData$ratioCerebellum)
resultCerebellumFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioCerebellum)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioCerebellum.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultCerebellumFrugivory, paste("OutputEvolModel/Output_evolutionary_history_CerebellumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#Striatum
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_StriatumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("ratioStriatum")
summaryData$ratioStriatum <- summaryData$Striatum
hist(summaryData$ratioStriatum)
summaryData$ratioStriatum.log <- log(summaryData$ratioStriatum)
resultStriatumFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioStriatum)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioStriatum.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultStriatumFrugivory, paste("OutputEvolModel/Output_evolutionary_history_StriatumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#MOB
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_MOBRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("MOB")
summaryData$ratioMOB <- summaryData$MOB
hist(summaryData$ratioMOB)
summaryData$ratioMOB.log <- log(summaryData$ratioMOB)
hist(summaryData$ratioMOB.log)
resultMOBFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioMOB)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioMOB.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultMOBFrugivory, paste("OutputEvolModel/Output_evolutionary_history_MOBRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
}
# Run parallel
parallel::mclapply(1:randomSampling, parrallel_run, mc.cores=10, mc.preschedule = T)
# for (d in 1:randomSampling){
# parrallel_run(d)
# }
}
## END ANALYSIS
##----------------------
##--
## Saving environment
save.image("geography_trait_models1.RData")
|
/Scripts/Analysis1_competitionsignal/EvolutionaryHistoryClusterRun_a1b2_raw.R
|
no_license
|
benjaminrobira/Meta_analysis_cognition_primates
|
R
| false
| false
| 24,588
|
r
|
####---------------------------------
#### EvolutionaryHistoryClusterRun
####---------------------------------
# This script allows running one the cluster the different scenario (non-competitive vs. competitive) of brain size evolution in frugivorous primates.
# This is the example script for low frugivory and low folivory threshold i.e.:
# a=1
# frugivoryThreshold=frugivoryThresholdVector[a]
# b=1
# folivoryThreshold=folivoryThresholdVector[b]
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Set working directory
setwd("/users/biodiv/bperez/data/others/Benji/Evolutionary_history_3/")
dir.create(file.path("Sample_size"), showWarnings = FALSE)
dir.create(file.path("extdata"), showWarnings = FALSE)
dir.create(file.path("OutputEvolModel"), showWarnings = FALSE)
dir.create(file.path("Dataplot"), showWarnings = FALSE)
#Import environment
rm(list=ls())
load("geography_traits_biogeobears.RData")
#load("../BioGeoBEARS/geography_traits_biogeobears.RData")
#Libraries
# require(devtools)
# install_version("phytools", version = "0.6.99", repos = "http://cran.us.r-project.org", lib="/users/biodiv/bperez/packages/")
# Phylogenetics
library(caper)
library(ape)
library(phytools) #, lib.loc = "/users/biodiv/bperez/packages/")
library(geiger)
# library(MCMCglmm, lib.loc = "/users/biodiv/bperez/packages/")
library(ellipsis, lib.loc = "/users/biodiv/bperez/packages/")
library(RPANDA, lib.loc = "/users/biodiv/bperez/packages/")
library(BioGeoBEARS)
library(optimx)
library(svMisc, lib.loc = "/users/biodiv/bperez/packages/")
#Parallelizing
library(parallel)
##--------
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
frugivoryThresholdVector <- seq(from=20, to=40, by=20)
folivoryThresholdVector <- seq(from=40, to=60, by=20)
geographicThresholdVector <- c(10,30)/100
randomSampling=10
numberSimulations=10
numberTrees=1
totModels=randomSampling*numberSimulations*numberTrees*length(frugivoryThresholdVector)*length(folivoryThresholdVector)*length(geographicThresholdVector)
progression=0
##--------
# Set the frugivory and folivory threshold to consider:
a=1
frugivoryThreshold=frugivoryThresholdVector[a]
b=2
folivoryThreshold=folivoryThresholdVector[b]
##------------
## Initialise output files:
##------------
#Data files
summaryBrainFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryEQFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryNeocortexFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryHippocampusFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryCerebellumFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryStriatumFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryMOBFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
summaryOpticTractFrugivory <- as.data.frame(matrix(NA, ncol=6, nrow=totModels))
#Sample size files:
repetition=length(frugivoryThresholdVector)*length(folivoryThresholdVector)*length(geographicThresholdVector)*randomSampling
checkSampleFruit <- rep(NA, times=repetition)
checkSampleLeaf <- rep(NA, times=repetition)
checkSampleRange <- rep(NA, times=repetition)
checkSampleBrain <- rep(NA, times=repetition)
checkSampleEQ <- rep(NA, times=repetition)
checkSampleNeocortex <- rep(NA, times=repetition)
checkSampleHippocampus <- rep(NA, times=repetition)
checkSampleCerebellum <- rep(NA, times=repetition)
checkSampleStriatum <- rep(NA, times=repetition)
checkSampleMOB <- rep(NA, times=repetition)
checkSampleRange <- rep(NA, times=repetition)
### Run the different evolutionary models ###
for(c in 1:length(geographicThresholdVector)){
## Adding the co-occurence
summaryData$geographicCode <- matrixRangingSensitivity[match(summaryData$SpeciesForPhylogeny,matrixRangingSensitivity$SpeciesForPhylogeny),which(thresholdPresenceRange==geographicThresholdVector[c])]
load(paste("BioGeoBEARS/BSM_output_file", c, ".Rdata", sep=""))
# load(paste("../BioGeoBEARS/BSM_output_file", c, ".Rdata", sep=""))
summaryData_init <- summaryData
parrallel_run <- function(d){
base::set.seed(d)
base::set.seed(d)
rm(.Random.seed)
base::set.seed(d)
base::set.seed(d)
runComparisonModelsCompetition <- function(
#######################
## Run the phylogenetic comparison between random evolution (BM, OU) or competitive scenario with MC= exclusion from competitive
# Taxa = mutual exclusion, or DDlin and DDexp, which are positive or negative density dependance to taxa of same "group" (here feeding group)
# of the evolutionary rate, either linearily (lin), or exponentially (exp)
######################
simmap, #The simmap object for grouping
numberMaps, #Number of simmap and BSM
tree, #The phylo object of the tree
data, #Data including: species column name as inTree. This column should be name "SpeciesForPhylogeny". a "Guild" column has to specify the grouping variable.
#Other variables can be associated, namely the trait and subgroup variable
subgroup, #Character string indicating the group of te "Guild" column that if of interest for the analysis
trait, #Column name of variable of interest. String
ana_events_tables,
clado_events_tables
){
#isolate data from subgroup of interest
group.map<-simmap
data.grouped<-data[which(data$Guild==subgroup),]
mass<-data.grouped[,which(colnames(data.grouped)=="SpeciesForPhylogeny")]
names(mass)<-data.grouped[,which(colnames(data.grouped)=="SpeciesForPhylogeny")]
nc<-geiger::name.check(tree,mass)
data.grouped.tree<-drop.tip(tree,nc$tree_not_data)
subdata<-data.grouped[which(data.grouped$SpeciesForPhylogeny%in%data.grouped.tree$tip.label),]
#set up analyses
N=numberMaps #number of stochastic maps to analyze
T=trait #which trait to analyze
res.mat<-matrix(nrow=N,ncol=53)
colnames(res.mat)<-c("subgroup","trait","N","BM.lnL","BM.sig2","BM.z0","BM.AICc","BM.conv",
"OU.lnL","OU.sig2","OU.alpha","OU.z0","OU.AICc","OU.conv",
"EB.lnL","EB.sig2","EB.alpha","EB.z0","EB.AICc","EB.conv",
"MCgeo.dietmap","MCgeo.lnL","MCgeo.sig2","MCgeo.S","MCgeo.z0","MCgeo.AICc","MCgeo.conv",
"DDlingeo.dietmap","DDlingeo.lnL","DDlingeo.sig2","DDlingeo.b","DDlingeo.z0","DDlingeo.AICc","DDlingeo.conv",
"DDexpgeo.dietmap","DDexpgeo.lnL","DDexpgeo.sig2","DDexpgeo.r","DDexpgeo.z0","DDexpgeo.AICc","DDexpgeo.conv",
"BM.delaic","OU.delaic","EB.delaic","MCgeo.delaic","DDlingeo.delaic","DDexpgeo.delaic",
"BM.wi","OU.wi","EB.wi","MCgeo.wi","DDlingeo.wi","DDexpgeo.wi")
#fit BM, OU, EB, MC, DDexp, and DDlin models to subgroup trait data
#write.table("worked", "/OutputEvolModel/workedinitFunction.txt", row.names=FALSE, col.names=TRUE, sep="\t")
for(i in 1:N){
group.map2 <-drop.tip.simmap(group.map[[i]],
group.map[[i]]$tip.label[which(!group.map[[i]]$tip.label%in%tree$tip.label)])
j=which(colnames(subdata)==T)
M<-subdata[,j]
subtree<-data.grouped.tree
names(M)<-subdata$SpeciesForPhylogeny
M<-subset(M,M!='NA')
nc<-geiger::name.check(subtree,M)
if(is.list(nc)){
subtree<-drop.tip(subtree,nc$tree_not_data)
}
o2<-geiger::fitContinuous(subtree,M,model="BM", ncores=1)
BM.log_lik<-o2$opt$lnL
BM.sig2<-o2$opt$sigsq
BM.z0<-o2$opt$z0
BM.aicc<-o2$opt$aicc
BM.conv<-as.numeric(tail(o2$res[,length(o2$res[1,])],n=1))
o3<-geiger::fitContinuous(subtree,M,model="OU", ncores=1)
OU.log_lik<-o3$opt$lnL
OU.sig2<-o3$opt$sigsq
OU.alpha<-o3$opt$alpha
OU.z0<-o3$opt$z0
OU.aicc<-o3$opt$aicc
OU.conv<-as.numeric(tail(o3$res[,length(o3$res[1,])],n=1))
o32<-geiger::fitContinuous(subtree,M,model="EB", ncores=1)
EB.log_lik<-o32$opt$lnL
EB.sig2<-o32$opt$sigsq
EB.alpha<-o32$opt$a
EB.z0<-o32$opt$z0
EB.aicc<-o32$opt$aicc
EB.conv<-as.numeric(tail(o32$res[,length(o32$res[1,])],n=1))
o4<-fit_t_comp_subgroup(full.phylo=tree,
ana.events=ana_events_tables[[i]],
clado.events=clado_events_tables[[i]],
stratified=FALSE,map=group.map2,data=M,
trim.class=subgroup,model="MC",par=NULL,method="Nelder-Mead",bounds=NULL)
MCgeo.lnL<-o4$LH
MCgeo.sig2<-o4$sig2
MCgeo.S<-o4$S
MCgeo.z0<-o4$z0
MCgeo.aicc<-o4$aicc
MCgeo.conv<-o4$convergence
o5<-fit_t_comp_subgroup(full.phylo=tree,
ana.events=ana_events_tables[[i]],
clado.events=clado_events_tables[[i]],
stratified=FALSE,map=group.map2,data=M,trim.class=subgroup,
model="DDexp",par=NULL,method="Nelder-Mead",bounds=NULL)
DDexpgeo.lnL<-o5$LH
DDexpgeo.sig2<-o5$sig2
DDexpgeo.r<-o5$r
DDexpgeo.z0<-o5$z0
DDexpgeo.aicc<-o5$aicc
DDexpgeo.conv<-o5$convergence
o6<-fit_t_comp_subgroup(full.phylo=tree,
ana.events=ana_events_tables[[i]],
clado.events=clado_events_tables[[i]],
stratified=FALSE,map=group.map2,data=M,trim.class=subgroup,
model="DDlin",par=NULL,method="Nelder-Mead",bounds=NULL)
DDlingeo.lnL<-o6$LH
DDlingeo.sig2<-o6$sig2
DDlingeo.b<-o6$b
DDlingeo.z0<-o6$z0
DDlingeo.aicc<-o6$aicc
DDlingeo.conv<-o6$convergence
if(length(which(is.na(c(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc))))==0){
BM.delaic<-BM.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
OU.delaic<-OU.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
EB.delaic<-EB.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
MCgeo.delaic<-MCgeo.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
DDlingeo.delaic<-DDlingeo.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
DDexpgeo.delaic<-DDexpgeo.aicc-min(BM.aicc,OU.aicc,EB.aicc,MCgeo.aicc,DDlingeo.aicc,DDexpgeo.aicc)
all=sum(exp(-0.5*BM.delaic),exp(-0.5*OU.delaic),exp(-0.5*EB.delaic),exp(-0.5*MCgeo.delaic),exp(-0.5*DDlingeo.delaic),exp(-0.5*DDexpgeo.delaic))
BM.wi<-exp(-0.5*BM.delaic)/all
OU.wi<-exp(-0.5*OU.delaic)/all
EB.wi<-exp(-0.5*EB.delaic)/all
MCgeo.wi<-exp(-0.5*MCgeo.delaic)/all
DDlingeo.wi<-exp(-0.5*DDlingeo.delaic)/all
DDexpgeo.wi<-exp(-0.5*DDexpgeo.delaic)/all
MCgeo.iter <- NA
DDlingeo.iter <- NA
DDexpgeo.iter <- NA
int<-c(subgroup,names(subdata)[j],length(subtree$tip.label),BM.log_lik,BM.sig2,BM.z0,BM.aicc,BM.conv,
OU.log_lik,OU.sig2,OU.alpha,OU.z0,OU.aicc,OU.conv,
EB.log_lik,EB.sig2,EB.alpha,EB.z0,EB.aicc,EB.conv,
MCgeo.iter,MCgeo.lnL,MCgeo.sig2,MCgeo.S,MCgeo.z0,MCgeo.aicc,MCgeo.conv,
DDlingeo.iter,DDlingeo.lnL,DDlingeo.sig2,DDlingeo.b,DDlingeo.z0,DDlingeo.aicc,DDlingeo.conv,
DDexpgeo.iter,DDexpgeo.lnL,DDexpgeo.sig2,DDexpgeo.r,DDexpgeo.z0,DDexpgeo.aicc,DDexpgeo.conv,
BM.delaic,OU.delaic,EB.delaic,MCgeo.delaic,DDlingeo.delaic,DDexpgeo.delaic,
BM.wi,OU.wi,EB.wi,MCgeo.wi,DDlingeo.wi,DDexpgeo.wi)
res.mat[i,]<-int
}
else{
res.mat[i,] <- rep(NA, times=ncol(res.mat))
}
}
res.mat <- as.data.frame(res.mat)
resm <- data.frame(res.mat)
return(resm)
}
##### To run with different d values
summaryData <- summaryData_init
#Source functions & packages
#Capitalize first letter
firstup <- function(x) {
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
#SaveDataGeneral
summaryData <- read.table(paste("Dataplot/Dataplot", a, "_", b, "_", c, "_", d,".txt", sep=""), header=TRUE, sep="\t", colClasses = "character")
for (col in c(4:34, 38:47)){summaryData[,col] <- as.numeric(summaryData[,col])}
write.table(length(summaryData$DietaryGuild[summaryData$DietaryGuild=="Fruit"]),
paste("Sample_size/checkSampleFruit", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(length(summaryData$DietaryGuild[summaryData$DietaryGuild=="Leaf"]),
paste("Sample_size/checkSampleLeaf", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(length(summaryData$geographicCode[!is.na(summaryData$geographicCode)]),
paste("Sample_size/checkSampleRange", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleBrain", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Bodymass)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleEQ", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Neocortex)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleNeocortex", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Hippocampus)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleHippocampus", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Cerebellum)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleCerebellum", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$Striatum)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleStriatum", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
write.table(nrow(summaryData[!is.na(summaryData$Brain)&!is.na(summaryData$MOB)&summaryData$DietaryGuild=="Fruit",]),
paste("Sample_size/checkSampleMOB", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
##--------
# Evolutionary history of diet
##--------
vectorDiet <- summaryData$DietaryGuild
names(vectorDiet) <- summaryData$SpeciesForPhylogeny
vectorDiet <- vectorDiet[vectorDiet!="Other"&!is.na(summaryData$geographicCode)]
#Load and save tree corresponding to species with diet
phylo <- read.tree("Tree/Tree_biogeobears.nex")
# phylo <- read.tree("../Raw_data/Tree/Tree_biogeobears.nex")
phylo <- drop.tip(phylo,
phylo$tip.label[
which(phylo$tip.label
%nin%names(vectorDiet))])
#simmapdiet1 <- make.simmap(tree=phylo, vectorDiet, model="ARD", pi="estimated", nsim=numberSimulations)#inequal and not symmetrical rate of transition from folivory to frugivory etc...
load(file=paste("Simmap/Output_simmap_transition", a, "_", b, "_", c, "_", d, ".Rdata", sep=""))
write.table(as.vector(simmapdiet1[[1]]$Q[,1]), paste("OutputEvolModel/Output_simmap_transition", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
# Evolutionary history of traits (~brain size) with and without competition
#following Drury et al.'s approach to see whether one trait (here one brain area size) phylogenetical history is better described if considering competition
#https://academic.oup.com/sysbio/article/65/4/700/1753588
#https://journals.plos.org/plosbiology/article?rev=1&id=10.1371/journal.pbio.2003563#pbio.2003563.ref028
##--------
#Create variable of rinterest
summaryData$ratioBrain <- summaryData$Brain*1.036*(10**-3)/summaryData$Bodymass #Following decasien for multiplication by 1.036
summaryData$EQ <- summaryData$Brain*1.036*(10**-3)/(0.085*summaryData$Bodymass**0.775) #Following decasien, according to #Jerison, H. J. Evolution of the Brain and Intelligence (Academic, 1973).
#Make it having symmetrical (and if possible gaussian distribution, since it seems to be a prerequisite of the analysis)
hist(summaryData$ratioBrain)
hist(log(summaryData$ratioBrain))
hist(summaryData$EQ)
summaryData$EQ.log <- log(summaryData$EQ)
hist(summaryData$Brain)
summaryData$Brain.log <- log(summaryData$Brain)
hist(summaryData$Brain.log)
#Reload tree to have same than used for biogeobears
phylo <- read.tree("Tree/Tree_biogeobears.nex")
colnames(summaryData)[colnames(summaryData)=="DietaryGuild"] <- "Guild"
# Brain
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_BrainRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("Brain")
summaryData$ratioBrain <- summaryData$Brain
hist(summaryData$ratioBrain )
summaryData$ratioBrain.log <- log(summaryData$ratioBrain)
resultBrainFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$Brain.log)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioBrain.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultBrainFrugivory, paste("OutputEvolModel/Output_evolutionary_history_BrainRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_NeocortexRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("Neocortex")
summaryData$ratioNeocortex <- summaryData$Neocortex
hist(summaryData$ratioNeocortex )
summaryData$ratioNeocortex.log <- log(summaryData$ratioNeocortex)
resultNeocortexFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioNeocortex)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioNeocortex.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultNeocortexFrugivory, paste("OutputEvolModel/Output_evolutionary_history_NeocortexRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#Hippocampus
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_HippocampusRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("ratioHippocampus")
summaryData$ratioHippocampus <- summaryData$Hippocampus
hist(summaryData$ratioHippocampus )
summaryData$ratioHippocampus.log <- log(summaryData$ratioHippocampus)
resultHippocampusFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioHippocampus)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioHippocampus.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultHippocampusFrugivory, paste("OutputEvolModel/Output_evolutionary_history_HippocampusRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#Cerebellum
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_CerebellumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("ratioCerebellum")
summaryData$ratioCerebellum <- summaryData$Cerebellum
hist(summaryData$ratioCerebellum )
summaryData$ratioCerebellum.log <- log(summaryData$ratioCerebellum)
resultCerebellumFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioCerebellum)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioCerebellum.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultCerebellumFrugivory, paste("OutputEvolModel/Output_evolutionary_history_CerebellumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#Striatum
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_StriatumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("ratioStriatum")
summaryData$ratioStriatum <- summaryData$Striatum
hist(summaryData$ratioStriatum)
summaryData$ratioStriatum.log <- log(summaryData$ratioStriatum)
resultStriatumFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioStriatum)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioStriatum.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultStriatumFrugivory, paste("OutputEvolModel/Output_evolutionary_history_StriatumRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
#MOB
if (!file.exists(paste("OutputEvolModel/Output_evolutionary_history_MOBRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""))){
print("MOB")
summaryData$ratioMOB <- summaryData$MOB
hist(summaryData$ratioMOB)
summaryData$ratioMOB.log <- log(summaryData$ratioMOB)
hist(summaryData$ratioMOB.log)
resultMOBFrugivory <- runComparisonModelsCompetition(
simmap=simmapdiet1,
data=summaryData[!is.na(summaryData$ratioMOB)&!is.na(summaryData$geographicCode)&summaryData$SpeciesForPhylogeny%in%phylo$tip.label,],
subgroup="Fruit",
numberMaps=numberSimulations,
trait="ratioMOB.log",
tree=phylo,
ana_events_tables=BSM_output$RES_ana_events_tables,
clado_events_tables=BSM_output$RES_clado_events_tables
)
write.table(resultMOBFrugivory, paste("OutputEvolModel/Output_evolutionary_history_MOBRaw", a, "_", b, "_", c, "_", d, ".txt", sep=""), row.names=FALSE, col.names=TRUE, sep="\t")
}
}
# Run parallel
parallel::mclapply(1:randomSampling, parrallel_run, mc.cores=10, mc.preschedule = T)
# for (d in 1:randomSampling){
# parrallel_run(d)
# }
}
## END ANALYSIS
##----------------------
##--
## Saving environment
save.image("geography_trait_models1.RData")
|
testlist <- list(A = structure(c(3.33925822834345e-294, 5.14291267281886e+25, 1.39067024653022e-309, 3.33925822911813e-294, 5.14291266320765e+25, 1.44589382990762e-303, 7.86273043163713e-236, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 9:8), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
/mgss/inst/testfiles/MVP_normalfactor_rcpp/AFL_MVP_normalfactor_rcpp/MVP_normalfactor_rcpp_valgrind_files/1615949745-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 518
|
r
|
testlist <- list(A = structure(c(3.33925822834345e-294, 5.14291267281886e+25, 1.39067024653022e-309, 3.33925822911813e-294, 5.14291266320765e+25, 1.44589382990762e-303, 7.86273043163713e-236, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = 9:8), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
########## R script: MichInctAddMod ##########
# For doing a t-response regression additive
# model example for the "wife work" data
# using the VGAM package.
# Last changed: 19 JUN 2017
# Load required packages:
library(VGAM) ; library(Ecdat)
# Set-up data:
data(Workinghours)
MichInc <- data.frame(husbandManager = as.numeric(as.character(
Workinghours$occupation) == "mp"))
MichInc <- transform(MichInc,
otherIncome = Workinghours$income/10,
nonWhite = Workinghours$nonwhite,
homeOwned = Workinghours$owned,
unemployRate = Workinghours$unemp,
wifeAge = Workinghours$age,
wifeEducationYears = Workinghours$education,
numChildren = with(Workinghours, child5 +
child13 + child17))
# Obtain vgam() fit with "family = studentt3":
fit <- vgam(otherIncome ~ s(wifeAge,df = 10)+ s(unemployRate,df = 4)
+ s(wifeEducationYears,df = 4)
+ s(numChildren,df = 4) + nonWhite
+ homeOwned + husbandManager,
family = studentt3,data = MichInc)
# Extract the estimates of the scale and degrees of freedom variables:
sigmaHat <- loge(coef(fit)[2], inverse = TRUE)
nuHat <- loglog(coef(fit)[3], inverse = TRUE)
cat("\n The estimated degrees of freedom for the t-distribution is:",
signif(nuHat,4),"\n\n")
# Plot the additive fits:
par(mfrow = c(3,3),mai = c(0.7,0.4,0.05,0.04))
plotvgam(fit,se = TRUE,noxmean = TRUE,,bty = "l",lcol = c("darkgreen"),
scol = "green3",rcol = "dodgerblue",llwd = 2,slwd = 2,ylab = "",
cex.axis = 1.5,cex.lab = 1.8,scale = 40)
############ End of MichInctAddMod ############
|
/demo/MichInctAddMod.R
|
no_license
|
cran/HRW
|
R
| false
| false
| 1,841
|
r
|
########## R script: MichInctAddMod ##########
# For doing a t-response regression additive
# model example for the "wife work" data
# using the VGAM package.
# Last changed: 19 JUN 2017
# Load required packages:
library(VGAM) ; library(Ecdat)
# Set-up data:
data(Workinghours)
MichInc <- data.frame(husbandManager = as.numeric(as.character(
Workinghours$occupation) == "mp"))
MichInc <- transform(MichInc,
otherIncome = Workinghours$income/10,
nonWhite = Workinghours$nonwhite,
homeOwned = Workinghours$owned,
unemployRate = Workinghours$unemp,
wifeAge = Workinghours$age,
wifeEducationYears = Workinghours$education,
numChildren = with(Workinghours, child5 +
child13 + child17))
# Obtain vgam() fit with "family = studentt3":
fit <- vgam(otherIncome ~ s(wifeAge,df = 10)+ s(unemployRate,df = 4)
+ s(wifeEducationYears,df = 4)
+ s(numChildren,df = 4) + nonWhite
+ homeOwned + husbandManager,
family = studentt3,data = MichInc)
# Extract the estimates of the scale and degrees of freedom variables:
sigmaHat <- loge(coef(fit)[2], inverse = TRUE)
nuHat <- loglog(coef(fit)[3], inverse = TRUE)
cat("\n The estimated degrees of freedom for the t-distribution is:",
signif(nuHat,4),"\n\n")
# Plot the additive fits:
par(mfrow = c(3,3),mai = c(0.7,0.4,0.05,0.04))
plotvgam(fit,se = TRUE,noxmean = TRUE,,bty = "l",lcol = c("darkgreen"),
scol = "green3",rcol = "dodgerblue",llwd = 2,slwd = 2,ylab = "",
cex.axis = 1.5,cex.lab = 1.8,scale = 40)
############ End of MichInctAddMod ############
|
library(runjags)
model <- "model
{
#time step [1] conditions (note: T for truncation)
Pmed[1] <-0
P[1]~dlnorm(Pmed[1], isigma2)T(0.05,1.6)
#time steps of model
for( t in 2 : N )
{
Pmed[t] <- log(max(P[t - 1] + (r * P[t - 1]) * (1 - P[t - 1]) - C[t - 1] / K, 0.001) )
#the first part is the deterministic part, which is just the population model
P[t] ~ dlnorm(Pmed[t],isigma2)T(0.05,1.5)
#this second part is where the process error comes in to the population. in dlnorm, the mean is in log space
}
# Likelihood
for( t in 1 : N )
{
Imed[t] <- log((q * K) * P[t])
I[t] ~ dlnorm(Imed[t],itau2)
#posterior predictions (hint, the parameterization of dlnorm is not the same as in R)
index[t]<-(q*K*P[t])
I.new[t]~dlnorm(log(index[t]), itau2)
}
#priors
r ~ dlnorm( -1.38, 3.845)T(0.01,1.2)
isigma2 ~ dgamma(3.785,0.0102)
itau2 ~ dgamma(1.709,0.00861)
iq ~ dgamma(0.001,0.001)T( 0.5,100)
K ~ dlnorm(5.0429,3.7603)T(10,1000)
sigma2 <- 1/isigma2
tau2 <- 1/itau2
q <- 1/iq
#additional parameters and preditions
MSP <- r*K/4
EMSP <- r/(2*q)
P1990 <- P[N] + r*P[N]*(1-P[N]) - C[N]/K
B1990 <- P
}"
C<-c(15.9,25.7,28.5,23.7,25.0,33.3,28.2,19.7,17.5,19.3,21.6,23.1,22.5,22.5,23.6,29.1,14.4,13.2,28.4,34.6,37.5,25.9,25.3)
I<-c(61.89,78.98,55.59,44.61,56.89,38.27,33.84,36.13,41.95,36.63,36.33,38.82,34.32,37.64,34.01,32.16,26.88,36.61,30.07,30.75,23.36,22.36,21.91)
N<-23
data <- list(C=C,I=I,N=N)
set.seed(1901)
#initial values
inits1 <- list(r=0.8, K=200, iq=0.5, isigma2=100, itau2=100, P=c(0.99,0.98,0.96,0.94,0.92,0.90,0.88,0.86,0.84,0.82,0.80,0.78,0.76,0.74,0.72,0.70,0.68,0.66,0.64,0.62,0.60,0.58,0.56))
inits2 <- list(r=0.5, K=300, iq=0.8, isigma2=200, itau2=200, P=c(0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99))
inits <- list(inits1,inits2)
results <- run.jags(model=model, monitor=c("r","K","q","P","I.new"),
data=data, n.chains=2, method="rjags", inits=inits,
plots=T,sample=10000,thin=5)
#results <- run.jags(model=model, monitor=c("r","K","q","P","I.new"),
# data=data, n.chains=2, method="rjags", inits=inits,
# plots=T,monitor.deviance=T,sample=10000,thin=5)
print(results)
ggmcmc(ggs(b), file="testjags2_mcmc.pdf")
mcmc <- rbind(results$mcmc[[1]],results$mcmc[[2]])
par(mfrow=c(2,2))
hist(mcmc[,1],xlab="r",main="")
hist(mcmc[,2],xlab="K",main="")
hist(mcmc[,3],xlab="q",main="")
ResSum <- matrix(0,ncol=23,nrow=5)
for (Iyr in 1:23)
ResSum[,Iyr] <- quantile(mcmc[,Iyr+3],probs=c(0.05,0.25,0.5,0.75,0.95))
xx <- seq(1:23)
plot(xx,ResSum[3,],xlab="Year",ylab="Depletion",type='l',lwd=3,ylim=c(0,1.3))
xx2 <- c(xx,rev(xx))
polygon(xx2,c(ResSum[1,],rev(ResSum[5,])),col="gray50")
polygon(xx2,c(ResSum[2,],rev(ResSum[4,])),col="gray95")
lines(xx,ResSum[3,],lwd=3,lty=1)
# This is the posterior predictive interval down here, spend some time with this
par(mfrow=c(2,2))
plot(xx,I,pch=16,ylim=c(0,100))
ResSum <- matrix(0,ncol=23,nrow=5)
for (Iyr in 1:23)
ResSum[,Iyr] <- quantile(mcmc[,26+Iyr],probs=c(0.05,0.25,0.5,0.75,0.95))
lines(xx,ResSum[3,],lwd=3,lty=1)
lines(xx,ResSum[1,],lwd=1,lty=2)
lines(xx,ResSum[5,],lwd=1,lty=2)
# quartz()
plot(results, layout = runjags.getOption("plot.layout"),
new.windows = runjags.getOption("new.windows"), file = "testjags2.pdf")
|
/FISH 558 Lectures/Lecture1 - 2 JAGS/Lecture1B.R
|
no_license
|
DanOvando/FISH-558
|
R
| false
| false
| 3,429
|
r
|
library(runjags)
model <- "model
{
#time step [1] conditions (note: T for truncation)
Pmed[1] <-0
P[1]~dlnorm(Pmed[1], isigma2)T(0.05,1.6)
#time steps of model
for( t in 2 : N )
{
Pmed[t] <- log(max(P[t - 1] + (r * P[t - 1]) * (1 - P[t - 1]) - C[t - 1] / K, 0.001) )
#the first part is the deterministic part, which is just the population model
P[t] ~ dlnorm(Pmed[t],isigma2)T(0.05,1.5)
#this second part is where the process error comes in to the population. in dlnorm, the mean is in log space
}
# Likelihood
for( t in 1 : N )
{
Imed[t] <- log((q * K) * P[t])
I[t] ~ dlnorm(Imed[t],itau2)
#posterior predictions (hint, the parameterization of dlnorm is not the same as in R)
index[t]<-(q*K*P[t])
I.new[t]~dlnorm(log(index[t]), itau2)
}
#priors
r ~ dlnorm( -1.38, 3.845)T(0.01,1.2)
isigma2 ~ dgamma(3.785,0.0102)
itau2 ~ dgamma(1.709,0.00861)
iq ~ dgamma(0.001,0.001)T( 0.5,100)
K ~ dlnorm(5.0429,3.7603)T(10,1000)
sigma2 <- 1/isigma2
tau2 <- 1/itau2
q <- 1/iq
#additional parameters and preditions
MSP <- r*K/4
EMSP <- r/(2*q)
P1990 <- P[N] + r*P[N]*(1-P[N]) - C[N]/K
B1990 <- P
}"
C<-c(15.9,25.7,28.5,23.7,25.0,33.3,28.2,19.7,17.5,19.3,21.6,23.1,22.5,22.5,23.6,29.1,14.4,13.2,28.4,34.6,37.5,25.9,25.3)
I<-c(61.89,78.98,55.59,44.61,56.89,38.27,33.84,36.13,41.95,36.63,36.33,38.82,34.32,37.64,34.01,32.16,26.88,36.61,30.07,30.75,23.36,22.36,21.91)
N<-23
data <- list(C=C,I=I,N=N)
set.seed(1901)
#initial values
inits1 <- list(r=0.8, K=200, iq=0.5, isigma2=100, itau2=100, P=c(0.99,0.98,0.96,0.94,0.92,0.90,0.88,0.86,0.84,0.82,0.80,0.78,0.76,0.74,0.72,0.70,0.68,0.66,0.64,0.62,0.60,0.58,0.56))
inits2 <- list(r=0.5, K=300, iq=0.8, isigma2=200, itau2=200, P=c(0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99))
inits <- list(inits1,inits2)
results <- run.jags(model=model, monitor=c("r","K","q","P","I.new"),
data=data, n.chains=2, method="rjags", inits=inits,
plots=T,sample=10000,thin=5)
#results <- run.jags(model=model, monitor=c("r","K","q","P","I.new"),
# data=data, n.chains=2, method="rjags", inits=inits,
# plots=T,monitor.deviance=T,sample=10000,thin=5)
print(results)
ggmcmc(ggs(b), file="testjags2_mcmc.pdf")
mcmc <- rbind(results$mcmc[[1]],results$mcmc[[2]])
par(mfrow=c(2,2))
hist(mcmc[,1],xlab="r",main="")
hist(mcmc[,2],xlab="K",main="")
hist(mcmc[,3],xlab="q",main="")
ResSum <- matrix(0,ncol=23,nrow=5)
for (Iyr in 1:23)
ResSum[,Iyr] <- quantile(mcmc[,Iyr+3],probs=c(0.05,0.25,0.5,0.75,0.95))
xx <- seq(1:23)
plot(xx,ResSum[3,],xlab="Year",ylab="Depletion",type='l',lwd=3,ylim=c(0,1.3))
xx2 <- c(xx,rev(xx))
polygon(xx2,c(ResSum[1,],rev(ResSum[5,])),col="gray50")
polygon(xx2,c(ResSum[2,],rev(ResSum[4,])),col="gray95")
lines(xx,ResSum[3,],lwd=3,lty=1)
# This is the posterior predictive interval down here, spend some time with this
par(mfrow=c(2,2))
plot(xx,I,pch=16,ylim=c(0,100))
ResSum <- matrix(0,ncol=23,nrow=5)
for (Iyr in 1:23)
ResSum[,Iyr] <- quantile(mcmc[,26+Iyr],probs=c(0.05,0.25,0.5,0.75,0.95))
lines(xx,ResSum[3,],lwd=3,lty=1)
lines(xx,ResSum[1,],lwd=1,lty=2)
lines(xx,ResSum[5,],lwd=1,lty=2)
# quartz()
plot(results, layout = runjags.getOption("plot.layout"),
new.windows = runjags.getOption("new.windows"), file = "testjags2.pdf")
|
compute_fraction <- function(x,p,y,q){
if(q>p) (x/y)^p /y^(q-p) else (x/y)^q *x^(p-q)
}
v <- function(x,g=rep.int(1,length(x))){
sum(x[g>0])
}
uu <- function(x){
c(sum(x),sum(x*x))
}
u <- function(x){
sum(x*x)
}
cv <- function(u,n,alpha){
cv <- qt(alpha,n-1,lower.tail=FALSE)
cv*sqrt(n*u/(n-1+cv^2))
}
ccv <- function(u2,u1,v1,n,alpha){
cv(u1+u2,n,alpha)-v1
}
cv_twosample <- function(uu_1,uu_2,m,n,alpha){
cv <- qt(alpha,n+m-2,lower.tail=FALSE)
(cv* sqrt(m*n*((m+n)*uu_2-uu_1^2)/(m+n-2+cv^2))+m*uu_1)/(m+n)
}
ccv_twosample <- function(uu1_1,uu1_2,uu2_1,uu2_2,v1,m,n,alpha){
cv_twosample(uu1_1+uu2_1,uu1_2+uu2_2,m,n,alpha)-v1
}
ccer <- function(u2,u1,v1,n,n1,alpha){
n2 <- n-n1
.ccv <- ccv(u2,u1,v1,n,alpha)
ifelse(.ccv^2 >= n2*u2,.ccv>0,pt(sqrt(n2-1)*.ccv/sqrt(abs((n2*u2) - (.ccv^2))),n2-1,lower.tail=FALSE))
}
kmn <- function(x,uu_1,uu_2,m,n){
## ! not clear whether they mean (x*(m+n)/(m*n)) or (m+n)/(m*n*x)
(sqrt(m+n-2) * sqrt(m*n/(m+n)) * ((m+n)/(m*n)*x - uu_1/n))/
sqrt(abs(uu_2-uu_1^2/(m+n) - (m*n)/(m+n) * ((m+n)/(m*n)*x - uu_1/n)^2))
}
ccer_twosample <- function(uu2_1,uu2_2,uu1_1,uu1_2,v1,m,n,m1,n1,alpha){
n2 <- n-n1
m2 <- m-m1
.ccv <- ccv_twosample(uu1_1,uu1_2,uu2_1,uu2_2,v1,m,n,alpha)
ifelse((.ccv-m2/(m2+n2)*uu2_1)^2 > abs((uu2_2-uu2_1^2/(m2+n2))*m2*n2/(m2+n2)),
(.ccv-m2/(m2+n2)*uu2_1) > 0,
pt(kmn(.ccv,uu2_1,uu2_2,m2,n2),m2+n2-2,lower.tail=FALSE))
}
## slightly faster and numerically ok
dutu_cf <- function(u,tu,n,tn){
gamma(tn/2)/(gamma(n/2)*gamma((tn-n)/2)) * compute_fraction(u,n/2-1,tu,tn/2-1)*(tu-u)^((tn-n)/2-1)
#(u^(n/2-1)*(tu-u)^((tn-n)/2-1))/tu^(tn/2-1)
}
dutu_const <- function(n,tn){
gamma(tn/2)/(gamma(n/2)*gamma((tn-n)/2))
}
dutu_var <- function(u,tu,n,tn,mpfr){
if(mpfr){
u <- mpfr(u,300)
tu <- mpfr(tu,300)
(u^(n/2-1)*(tu-u)^((tn-n)/2-1))/tu^(tn/2-1)
} else {
return(compute_fraction(u,n/2-1,tu,tn/2-1)*(tu-u)^((tn-n)/2-1))
}
}
dutu_mpfr <- function(u,tu,n,tn){
u <- mpfr(u,500)
tu <- mpfr(tu,500)
gamma(tn/2)/(gamma(n/2)*gamma((tn-n)/2)) * (u^(n/2-1)*(tu-u)^((tn-n)/2-1))/tu^(tn/2-1)
}
## numerically unstable
dutu_chisq <- function(u,tu,n,tn){
dchisq(tu-u,tn-n)*dchisq(u,n)/dchisq(tu,tn)
}
dutu <- function(u,tu,n,tn,mpfr){
if(mpfr){
return(dutu_mpfr(u,tu,n,tn))
} else {
return(dutu_cf(u,tu,n,tn))
}
}
duutuu <- function(uu_1,uu_2,tuu_1,tuu_2,m,n,tm,tn){
uu_1 <- mpfr(uu_1,10000)
uu_2 <- mpfr(uu_2,10000)
tuu_1 <- mpfr(tuu_1,10000)
tuu_2 <- mpfr(tuu_2,10000)
g <- m+n
tg <- tm+tn
## (sqrt(tg)*gamma((tg-1)/2)*(uu_2-uu_1^2/g)^((g-3)/2)*(tuu_2-uu_2-(tuu_1-tuu_1)^2/(tg-g))^((tg-g-3)/2))/
## (sqrt(pi)*sqrt(g)*gamma((tg-1)/2)*sqrt(tg-g)*gamma((tg-g-1)/2)*(tuu_2-tuu_1^2/tg)^((tg-3)/2))
as.numeric({ sqrt(tg)*gamma((tg-1)/2) / (sqrt(pi*g * (tg - g))* gamma((g-1)/2) *gamma((tg-g-1)/2)) } *
compute_fraction((tuu_2-uu_2-(tuu_1-uu_1)^2/(tg-g)) * (uu_2-uu_1^2/g),(tg-g-3)/2,tuu_2-tuu_1^2/tg,(tg-3)/4) *
compute_fraction((uu_2-uu_1^2/g),(2*g-tg)/2,tuu_2-tuu_1^2/tg,(tg-3)/4))
}
## original <- function(){
## { sqrt(tg) * gamma((tg-1)/2) *
## (uu_2-uu_1^2/g)^((g-3)/2) * (tuu_2 - uu_2 - (tuu_1-uu_1)^2/(tg-g))^((tg-g-3)/2) } /
## ## note that instead of repeating gamma((tg-1)/2) as in the appendix of Timmesfeld et al. (2007) we
## ## use gamma((g-1)/2) in the denominator
## { sqrt(pi*g*(tg-g)) * gamma((g-1)/2) * gamma((tg-g-1)/2) *
## (tuu_2-tuu_2^2/tg)^((tg-3)/2) }
## }
## new <- function(){
## { sqrt(tg) * gamma((tg-1)/2) / (sqrt(pi*g*(tg-g)) * gamma((g-1)/2) * gamma((tg-g-1)/2)) } *
## {(uu_2-uu_1^2/g)^((g-3)/2) * (tuu_2 - uu_2 - (tuu_1-uu_1)^2/(tg-g))^((tg-g-3)/2) } /
## ## note that instead of repeating gamma((tg-1)/2) as in the appendix of Timmesfeld et al. (2007) we
## ## use gamma((g-1)/2) in the denominator
## { (tuu_2-tuu_1^2/tg)^((tg-3)/2) }
## }
## prec <- function(){
## uu_1 <- mpfr(uu_1,2000)
## uu_2 <- mpfr(uu_2,2000)
## tuu_1 <- mpfr(tuu_1,2000)
## tuu_2 <- mpfr(tuu_2,2000)
## { sqrt(tg) * gamma((tg-1)/2) *
## (uu_2-uu_1^2/g)^((g-3)/2) * (tuu_2 - uu_2 - (tuu_1-uu_1)^2/(tg-g))^((tg-g-3)/2) } /
## ## note that instead of repeating gamma((tg-1)/2) as in the appendix of Timmesfeld et al. (2007) we
## ## use gamma((g-1)/2) in the denominator
## { sqrt(pi*g*(tg-g)) * gamma((g-1)/2) * gamma((tg-g-1)/2) *
## (tuu_2-tuu_2^2/tg)^((tg-3)/2) }
## }
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title Conditional level of extended two sample t-test
##' @template timmesfeld
##' @param tuu2 Two dimensional vector containing the sum and the sum of squares of the extended second stage observations
##' @param uu1 Two dimensional vector containing the sum and the sum of squares of the first stage observations
##' @param v1 Sum of first stage treatment group observations
##' @param tm2 Extended second stage sample size treatment group
##' @param tn2 Extended second stage sample size control group
##' @param m Preplanned total sample size treatment group
##' @param n Preplanned total sample size control group
##' @param m1 First stage sample size treatment group
##' @param n1 First stage sample size control group
##' @param alpha Significance level
##' @author Florian Klinglmueller
##' @return Conditional level of an extended two sample t-test
##' @export
##' @import pracma
clev_twosample <- function(tuu2,uu1,v1,tm2,tn2,m,n,m1,n1,alpha=0.025){
m2 <- m-m1
n2 <- n-n1
f <- function(x,y) {
ccer_twosample(y,x,uu1[1],uu1[2],v1,m,n,m1,n1,alpha) * duutuu(y,x,tuu2[1],tuu2[2],m2,n2,tm2,tn2)
}
integral2(f,xmin=0,xmax=tuu2[2],
ymin=function(x) -sqrt((m2+n2)*x),
ymax = function(x) sqrt((m2+n2)*x))
}
## m <- n <- 75
## m1 <- n1 <- 20
## v1 <- 112
## uu1 <- c(102,8955.72)
## tm2 <- tn2 <- 76
## tuu2 <- c(421.8,31107)
## clev_twosample(tuu2,uu1,v1,tm2,tn2,m,n,m1,n1)
##' Computes the conditional level of the adaptive one-sided one-sample t-test for the null hypothesis mu=0 against the alternative mu > 0, given first stage sum of observations, sum of squares, and adapted secon stage sum of squares
##' @title Condition level function adaptive t-test
##' @template timmesfeld
##' @return Conditional level for the adapted test
##' @export
##' @import Rmpfr
clev <- function(tu2,u1,v1,tn2,n,n1,alpha=0.025,mpfr=F){
f <- function(u2) ccer(u2,u1,v1,n,n1,alpha) * dutu_var(u2,tu2,n-n1,tn2,mpfr)
# tol = ifelse(mpfr,.5*.Machine$double.eps^0.25,.Machine$double.eps^0.25)
if(mpfr){
expectation <- try(integrateR(f,lower=0,upper=tu2,rel.tol=1e-20)$value)
} else {
expectation <- try(integrate(f,lower=0,upper=tu2)$value)
}
if(class(expectation) == "try-error"){
browser()
}
dutu_const(n-n1,tn2) * expectation
}
##' Computes the conditional critical value of the adaptive one-sided one-sample t-test for the null hypothesis mu=0 against the alternative mu > 0, given first stage sum of observations, sum of squares, and adapted secon stage sum of squares
##' @title Conditional critical value adaptive t-test
##' @template timmesfeld
##' @return Conditional level for the adapted test
##' @author Florian Klinglmueller
##' @export
tccv <- function(tu2,u1,v1,tn2,n,n1,alpha=0.025){
qt(clev(tu2,u1,v1,tn2,n,n1,alpha=0.025),tn2-1,lower.tail=FALSE)
}
|
/R/timmesfeld.R
|
no_license
|
livioivil/resamplingMCP
|
R
| false
| false
| 7,810
|
r
|
compute_fraction <- function(x,p,y,q){
if(q>p) (x/y)^p /y^(q-p) else (x/y)^q *x^(p-q)
}
v <- function(x,g=rep.int(1,length(x))){
sum(x[g>0])
}
uu <- function(x){
c(sum(x),sum(x*x))
}
u <- function(x){
sum(x*x)
}
cv <- function(u,n,alpha){
cv <- qt(alpha,n-1,lower.tail=FALSE)
cv*sqrt(n*u/(n-1+cv^2))
}
ccv <- function(u2,u1,v1,n,alpha){
cv(u1+u2,n,alpha)-v1
}
cv_twosample <- function(uu_1,uu_2,m,n,alpha){
cv <- qt(alpha,n+m-2,lower.tail=FALSE)
(cv* sqrt(m*n*((m+n)*uu_2-uu_1^2)/(m+n-2+cv^2))+m*uu_1)/(m+n)
}
ccv_twosample <- function(uu1_1,uu1_2,uu2_1,uu2_2,v1,m,n,alpha){
cv_twosample(uu1_1+uu2_1,uu1_2+uu2_2,m,n,alpha)-v1
}
ccer <- function(u2,u1,v1,n,n1,alpha){
n2 <- n-n1
.ccv <- ccv(u2,u1,v1,n,alpha)
ifelse(.ccv^2 >= n2*u2,.ccv>0,pt(sqrt(n2-1)*.ccv/sqrt(abs((n2*u2) - (.ccv^2))),n2-1,lower.tail=FALSE))
}
kmn <- function(x,uu_1,uu_2,m,n){
## ! not clear whether they mean (x*(m+n)/(m*n)) or (m+n)/(m*n*x)
(sqrt(m+n-2) * sqrt(m*n/(m+n)) * ((m+n)/(m*n)*x - uu_1/n))/
sqrt(abs(uu_2-uu_1^2/(m+n) - (m*n)/(m+n) * ((m+n)/(m*n)*x - uu_1/n)^2))
}
ccer_twosample <- function(uu2_1,uu2_2,uu1_1,uu1_2,v1,m,n,m1,n1,alpha){
n2 <- n-n1
m2 <- m-m1
.ccv <- ccv_twosample(uu1_1,uu1_2,uu2_1,uu2_2,v1,m,n,alpha)
ifelse((.ccv-m2/(m2+n2)*uu2_1)^2 > abs((uu2_2-uu2_1^2/(m2+n2))*m2*n2/(m2+n2)),
(.ccv-m2/(m2+n2)*uu2_1) > 0,
pt(kmn(.ccv,uu2_1,uu2_2,m2,n2),m2+n2-2,lower.tail=FALSE))
}
## slightly faster and numerically ok
dutu_cf <- function(u,tu,n,tn){
gamma(tn/2)/(gamma(n/2)*gamma((tn-n)/2)) * compute_fraction(u,n/2-1,tu,tn/2-1)*(tu-u)^((tn-n)/2-1)
#(u^(n/2-1)*(tu-u)^((tn-n)/2-1))/tu^(tn/2-1)
}
dutu_const <- function(n,tn){
gamma(tn/2)/(gamma(n/2)*gamma((tn-n)/2))
}
dutu_var <- function(u,tu,n,tn,mpfr){
if(mpfr){
u <- mpfr(u,300)
tu <- mpfr(tu,300)
(u^(n/2-1)*(tu-u)^((tn-n)/2-1))/tu^(tn/2-1)
} else {
return(compute_fraction(u,n/2-1,tu,tn/2-1)*(tu-u)^((tn-n)/2-1))
}
}
dutu_mpfr <- function(u,tu,n,tn){
u <- mpfr(u,500)
tu <- mpfr(tu,500)
gamma(tn/2)/(gamma(n/2)*gamma((tn-n)/2)) * (u^(n/2-1)*(tu-u)^((tn-n)/2-1))/tu^(tn/2-1)
}
## numerically unstable
dutu_chisq <- function(u,tu,n,tn){
dchisq(tu-u,tn-n)*dchisq(u,n)/dchisq(tu,tn)
}
dutu <- function(u,tu,n,tn,mpfr){
if(mpfr){
return(dutu_mpfr(u,tu,n,tn))
} else {
return(dutu_cf(u,tu,n,tn))
}
}
duutuu <- function(uu_1,uu_2,tuu_1,tuu_2,m,n,tm,tn){
uu_1 <- mpfr(uu_1,10000)
uu_2 <- mpfr(uu_2,10000)
tuu_1 <- mpfr(tuu_1,10000)
tuu_2 <- mpfr(tuu_2,10000)
g <- m+n
tg <- tm+tn
## (sqrt(tg)*gamma((tg-1)/2)*(uu_2-uu_1^2/g)^((g-3)/2)*(tuu_2-uu_2-(tuu_1-tuu_1)^2/(tg-g))^((tg-g-3)/2))/
## (sqrt(pi)*sqrt(g)*gamma((tg-1)/2)*sqrt(tg-g)*gamma((tg-g-1)/2)*(tuu_2-tuu_1^2/tg)^((tg-3)/2))
as.numeric({ sqrt(tg)*gamma((tg-1)/2) / (sqrt(pi*g * (tg - g))* gamma((g-1)/2) *gamma((tg-g-1)/2)) } *
compute_fraction((tuu_2-uu_2-(tuu_1-uu_1)^2/(tg-g)) * (uu_2-uu_1^2/g),(tg-g-3)/2,tuu_2-tuu_1^2/tg,(tg-3)/4) *
compute_fraction((uu_2-uu_1^2/g),(2*g-tg)/2,tuu_2-tuu_1^2/tg,(tg-3)/4))
}
## original <- function(){
## { sqrt(tg) * gamma((tg-1)/2) *
## (uu_2-uu_1^2/g)^((g-3)/2) * (tuu_2 - uu_2 - (tuu_1-uu_1)^2/(tg-g))^((tg-g-3)/2) } /
## ## note that instead of repeating gamma((tg-1)/2) as in the appendix of Timmesfeld et al. (2007) we
## ## use gamma((g-1)/2) in the denominator
## { sqrt(pi*g*(tg-g)) * gamma((g-1)/2) * gamma((tg-g-1)/2) *
## (tuu_2-tuu_2^2/tg)^((tg-3)/2) }
## }
## new <- function(){
## { sqrt(tg) * gamma((tg-1)/2) / (sqrt(pi*g*(tg-g)) * gamma((g-1)/2) * gamma((tg-g-1)/2)) } *
## {(uu_2-uu_1^2/g)^((g-3)/2) * (tuu_2 - uu_2 - (tuu_1-uu_1)^2/(tg-g))^((tg-g-3)/2) } /
## ## note that instead of repeating gamma((tg-1)/2) as in the appendix of Timmesfeld et al. (2007) we
## ## use gamma((g-1)/2) in the denominator
## { (tuu_2-tuu_1^2/tg)^((tg-3)/2) }
## }
## prec <- function(){
## uu_1 <- mpfr(uu_1,2000)
## uu_2 <- mpfr(uu_2,2000)
## tuu_1 <- mpfr(tuu_1,2000)
## tuu_2 <- mpfr(tuu_2,2000)
## { sqrt(tg) * gamma((tg-1)/2) *
## (uu_2-uu_1^2/g)^((g-3)/2) * (tuu_2 - uu_2 - (tuu_1-uu_1)^2/(tg-g))^((tg-g-3)/2) } /
## ## note that instead of repeating gamma((tg-1)/2) as in the appendix of Timmesfeld et al. (2007) we
## ## use gamma((g-1)/2) in the denominator
## { sqrt(pi*g*(tg-g)) * gamma((g-1)/2) * gamma((tg-g-1)/2) *
## (tuu_2-tuu_2^2/tg)^((tg-3)/2) }
## }
##' .. content for \description{} (no empty lines) ..
##'
##' .. content for \details{} ..
##' @title Conditional level of extended two sample t-test
##' @template timmesfeld
##' @param tuu2 Two dimensional vector containing the sum and the sum of squares of the extended second stage observations
##' @param uu1 Two dimensional vector containing the sum and the sum of squares of the first stage observations
##' @param v1 Sum of first stage treatment group observations
##' @param tm2 Extended second stage sample size treatment group
##' @param tn2 Extended second stage sample size control group
##' @param m Preplanned total sample size treatment group
##' @param n Preplanned total sample size control group
##' @param m1 First stage sample size treatment group
##' @param n1 First stage sample size control group
##' @param alpha Significance level
##' @author Florian Klinglmueller
##' @return Conditional level of an extended two sample t-test
##' @export
##' @import pracma
clev_twosample <- function(tuu2,uu1,v1,tm2,tn2,m,n,m1,n1,alpha=0.025){
m2 <- m-m1
n2 <- n-n1
f <- function(x,y) {
ccer_twosample(y,x,uu1[1],uu1[2],v1,m,n,m1,n1,alpha) * duutuu(y,x,tuu2[1],tuu2[2],m2,n2,tm2,tn2)
}
integral2(f,xmin=0,xmax=tuu2[2],
ymin=function(x) -sqrt((m2+n2)*x),
ymax = function(x) sqrt((m2+n2)*x))
}
## m <- n <- 75
## m1 <- n1 <- 20
## v1 <- 112
## uu1 <- c(102,8955.72)
## tm2 <- tn2 <- 76
## tuu2 <- c(421.8,31107)
## clev_twosample(tuu2,uu1,v1,tm2,tn2,m,n,m1,n1)
##' Computes the conditional level of the adaptive one-sided one-sample t-test for the null hypothesis mu=0 against the alternative mu > 0, given first stage sum of observations, sum of squares, and adapted secon stage sum of squares
##' @title Condition level function adaptive t-test
##' @template timmesfeld
##' @return Conditional level for the adapted test
##' @export
##' @import Rmpfr
clev <- function(tu2,u1,v1,tn2,n,n1,alpha=0.025,mpfr=F){
f <- function(u2) ccer(u2,u1,v1,n,n1,alpha) * dutu_var(u2,tu2,n-n1,tn2,mpfr)
# tol = ifelse(mpfr,.5*.Machine$double.eps^0.25,.Machine$double.eps^0.25)
if(mpfr){
expectation <- try(integrateR(f,lower=0,upper=tu2,rel.tol=1e-20)$value)
} else {
expectation <- try(integrate(f,lower=0,upper=tu2)$value)
}
if(class(expectation) == "try-error"){
browser()
}
dutu_const(n-n1,tn2) * expectation
}
##' Computes the conditional critical value of the adaptive one-sided one-sample t-test for the null hypothesis mu=0 against the alternative mu > 0, given first stage sum of observations, sum of squares, and adapted secon stage sum of squares
##' @title Conditional critical value adaptive t-test
##' @template timmesfeld
##' @return Conditional level for the adapted test
##' @author Florian Klinglmueller
##' @export
tccv <- function(tu2,u1,v1,tn2,n,n1,alpha=0.025){
qt(clev(tu2,u1,v1,tn2,n,n1,alpha=0.025),tn2-1,lower.tail=FALSE)
}
|
# 2016-08-16
# Figures for paper: include hogenesch, liver kidney WTKO, nuclear proteomics, 4c-seq
# Jake Yeung
#
# Other links to scripts not shown here:
# /home/yeung/projects/tissue-specificity/scripts/pca/pca_adjusted_microarray.label_variance.for_paper.R
# /home/yeung/projects/tissue-specificity/scripts/fourier/total_variance.noise_floor.hogenesch_and_liverWTKO.R
# /home/yeung/projects/tissue-specificity/scripts/three_way_contingency_tables/downstream.three_way_contingency.R
rm(list=ls())
start <- Sys.time()
library(ggplot2)
library(PMA)
# detach("package:dplyr", unload=TRUE) # sometimes necessary to solve some strange issues with PMA and dplyr
library(dplyr)
library(parallel)
setwd("/home/yeung/projects/tissue-specificity")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/PlotFunctions.R")
source("scripts/functions/NcondsFunctions.R")
source("scripts/functions/SvdFunctions.R")
source("scripts/functions/LoadActivitiesLong.R")
source("scripts/functions/LiverKidneyFunctions.R")
source("scripts/functions/PlotActivitiesFunctions.R")
# source("scripts/functions/AnalyzeGeneEnrichment.R")
source("scripts/functions/FourierFunctions.R")
source("scripts/functions/GetTFs.R")
source("scripts/functions/LdaFunctions.R")
source("scripts/functions/HandleMotifNames.R")
source("scripts/functions/RemoveP2Name.R")
source("scripts/functions/GetTopMotifs.R")
source("scripts/functions/NcondsAnalysisFunctions.R")
source("scripts/functions/ModelStrToModel.R")
source("scripts/functions/ProteomicsFunctions.R")
source("scripts/functions/CosSineFunctions.R")
# Inits -------------------------------------------------------------------
remove.kidney.outliers <- TRUE
remove.wfat <- TRUE
plot.i <- 1
tissue.order <- c('Liver','BFAT','Kidney','Lung','Adr','Mus','Heart','Aorta','Hypo','Cere','BS')
plot.dir <- "/home/yeung/projects/tissue-specificity/plots/primetime_plots_full_paper"
dir.create(plot.dir, showWarnings = FALSE)
tfs <- GetTFs(get.mat.only = TRUE)
jmeth <- "g=1001"
load("Robjs/liver_kidney_atger_nestle/dat.freq.bugfixed.Robj", v=T) # LivKid
load("Robjs/liver_kidney_atger_nestle/fits.long.multimethod.filtbest.staggeredtimepts.bugfixed.annotated.Robj", v=T)
fits.long.filt <- subset(fits.long.filt, method == jmeth)
load("Robjs/liver_kidney_atger_nestle/dat.long.liverkidneyWTKO.bugfixed.Robj", v=T); dat.wtko <- dat.long; rm(dat.long)
load("Robjs/liver_kidney_atger_nestle/fits.bytiss.bugfixed.Robj", v=T)
load("Robjs/dat.long.fixed_rik_genes.Robj", v=T) # hogenesch
# model selection with g1000
load("Robjs/nconds_g1000_11_tissues/fits_long.11_tiss_3_max.g1000.bestmodel.filteramp.0.15.Robj", v=T) # use this, looks same as fits.best should be OK?
load("Robjs/dat.complex.fixed_rik_genes.Robj", v=T)
if (remove.wfat){
dat.complex <- subset(dat.complex, tissue != "WFAT")
dat.long <- subset(dat.long, tissue != "WFAT")
}
fits.long.filt$n.params <- sapply(fits.long.filt$model, function(m) return(length(strsplit(as.character(m), ";")[[1]])))
fits.long.filt$n.rhyth <- sapply(fits.long.filt$model, GetNrhythFromModel)
# Remove kidney outliers (optional)
if (remove.kidney.outliers){
# Kidney_SV129 genes contain some weird outliers, remove them
outliers <- c("Cox8b", "Ucp1", "Cidea", "Flg", "Nr4a2")
fits.long.filt <- subset(fits.long.filt, !gene %in% outliers)
dat.wtko <- subset(dat.wtko, !gene %in% outliers)
}
dat.wtko <- StaggeredTimepointsLivKid(dat.wtko)
dat.wtko.collapsed <- CollapseTissueGeno(dat.wtko)
# load proteomics
prot.long <- LoadProteomicsData()
#
load("Robjs/fits.best.max_3.collapsed_models.amp_cutoff_0.15.phase_sd_maxdiff_avg.Robj", v=T)
# Do tables for paper -----------------------------------------------------
maindir <- "/home/yeung/projects/tissue-specificity/tables"
# write microarray-RNASeq merged table
outf <- "1_Hogenesch-Microarray-RNASeq-Merged.txt"
write.table(dat.long, file = file.path(maindir, outf), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# write RNA-Seq WT-KO table
outf2 <- "2_Liver-Kidney-WTKO-RNASeq.txt"
write.table(subset(dat.wtko, !is.na(gene)), file = file.path(maindir, outf2), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# write fits for Hogenesch
outf3 <- "3_Hogenesch-Model-Selection.txt"
fits.best.txtout <- fits.best; fits.best.txtout$param.list <- NULL
fits.best.txtout$param.list <- sapply(fits.best$param.list, function(l){
# Make a long string with Paramname = Value
outstr <- paste(names(l), as.character(l), sep = "=", collapse = ";")
})
RenameCnameStr <- function(dat, old, new){
return(dplyr::rename_(dat, .dots=setNames(list(old), new)))
}
fits.best.txtout <- RenameCnameStr(fits.best.txtout, "model", "Model Name (distinct rhythms are separated by semicolon, while shared rhythms separated by comma)")
fits.best.txtout <- RenameCnameStr(fits.best.txtout, "weight", "Posterior Probability")
fits.best.txtout <- RenameCnameStr(fits.best.txtout, "param.list", "Parameter List (Each name=value pair separated by semicolon. Tissue names denote intercept estimates for RNA-Seq or microarray. Phase-amp denotes rhythmic parameters for tissues. Shared tissue rhythms separated by comma.)")
fits.best.txtout$weight.raw <- NULL
write.table(fits.best.txtout, file = file.path(maindir, outf3), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# write fits for WT KO
outf4 <- "4_LivKidWTKO-Model-Selection.txt"
fits.long.filt.txtout <- fits.long.filt; fits.long.filt.txtout$param.list <- NULL
fits.long.filt.txtout$param.list <- sapply(fits.long.filt$param.list, function(l){
outstr <- paste(names(l), as.character(l), sep = "=", collapse = ";")
})
fits.long.filt.txtout <- RenameCnameStr(fits.long.filt.txtout, "model", "Model Name (distinct rhythms are separated by semicolon, while shared rhythms separated by comma)")
fits.long.filt.txtout <- RenameCnameStr(fits.long.filt.txtout, "weight", "Posterior Probability")
fits.long.filt.txtout <- RenameCnameStr(fits.long.filt.txtout, "param.list", "Parameter List (Each name=value pair separated by semicolon). Tissue names denote intercept estimates for RNA-Seq or microarray. Phase-amp denotes rhythmic parameters for tissues. Shared tissue rhythms separated by comma.)")
fits.long.filt.txtout$weight.raw <- NULL
write.table(fits.long.filt.txtout, file = file.path(maindir, outf4), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
|
/scripts/primetime_figures/make_tables_for_paper.R
|
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
R
| false
| false
| 6,419
|
r
|
# 2016-08-16
# Figures for paper: include hogenesch, liver kidney WTKO, nuclear proteomics, 4c-seq
# Jake Yeung
#
# Other links to scripts not shown here:
# /home/yeung/projects/tissue-specificity/scripts/pca/pca_adjusted_microarray.label_variance.for_paper.R
# /home/yeung/projects/tissue-specificity/scripts/fourier/total_variance.noise_floor.hogenesch_and_liverWTKO.R
# /home/yeung/projects/tissue-specificity/scripts/three_way_contingency_tables/downstream.three_way_contingency.R
rm(list=ls())
start <- Sys.time()
library(ggplot2)
library(PMA)
# detach("package:dplyr", unload=TRUE) # sometimes necessary to solve some strange issues with PMA and dplyr
library(dplyr)
library(parallel)
setwd("/home/yeung/projects/tissue-specificity")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/PlotFunctions.R")
source("scripts/functions/NcondsFunctions.R")
source("scripts/functions/SvdFunctions.R")
source("scripts/functions/LoadActivitiesLong.R")
source("scripts/functions/LiverKidneyFunctions.R")
source("scripts/functions/PlotActivitiesFunctions.R")
# source("scripts/functions/AnalyzeGeneEnrichment.R")
source("scripts/functions/FourierFunctions.R")
source("scripts/functions/GetTFs.R")
source("scripts/functions/LdaFunctions.R")
source("scripts/functions/HandleMotifNames.R")
source("scripts/functions/RemoveP2Name.R")
source("scripts/functions/GetTopMotifs.R")
source("scripts/functions/NcondsAnalysisFunctions.R")
source("scripts/functions/ModelStrToModel.R")
source("scripts/functions/ProteomicsFunctions.R")
source("scripts/functions/CosSineFunctions.R")
# Inits -------------------------------------------------------------------
remove.kidney.outliers <- TRUE
remove.wfat <- TRUE
plot.i <- 1
tissue.order <- c('Liver','BFAT','Kidney','Lung','Adr','Mus','Heart','Aorta','Hypo','Cere','BS')
plot.dir <- "/home/yeung/projects/tissue-specificity/plots/primetime_plots_full_paper"
dir.create(plot.dir, showWarnings = FALSE)
tfs <- GetTFs(get.mat.only = TRUE)
jmeth <- "g=1001"
load("Robjs/liver_kidney_atger_nestle/dat.freq.bugfixed.Robj", v=T) # LivKid
load("Robjs/liver_kidney_atger_nestle/fits.long.multimethod.filtbest.staggeredtimepts.bugfixed.annotated.Robj", v=T)
fits.long.filt <- subset(fits.long.filt, method == jmeth)
load("Robjs/liver_kidney_atger_nestle/dat.long.liverkidneyWTKO.bugfixed.Robj", v=T); dat.wtko <- dat.long; rm(dat.long)
load("Robjs/liver_kidney_atger_nestle/fits.bytiss.bugfixed.Robj", v=T)
load("Robjs/dat.long.fixed_rik_genes.Robj", v=T) # hogenesch
# model selection with g1000
load("Robjs/nconds_g1000_11_tissues/fits_long.11_tiss_3_max.g1000.bestmodel.filteramp.0.15.Robj", v=T) # use this, looks same as fits.best should be OK?
load("Robjs/dat.complex.fixed_rik_genes.Robj", v=T)
if (remove.wfat){
dat.complex <- subset(dat.complex, tissue != "WFAT")
dat.long <- subset(dat.long, tissue != "WFAT")
}
fits.long.filt$n.params <- sapply(fits.long.filt$model, function(m) return(length(strsplit(as.character(m), ";")[[1]])))
fits.long.filt$n.rhyth <- sapply(fits.long.filt$model, GetNrhythFromModel)
# Remove kidney outliers (optional)
if (remove.kidney.outliers){
# Kidney_SV129 genes contain some weird outliers, remove them
outliers <- c("Cox8b", "Ucp1", "Cidea", "Flg", "Nr4a2")
fits.long.filt <- subset(fits.long.filt, !gene %in% outliers)
dat.wtko <- subset(dat.wtko, !gene %in% outliers)
}
dat.wtko <- StaggeredTimepointsLivKid(dat.wtko)
dat.wtko.collapsed <- CollapseTissueGeno(dat.wtko)
# load proteomics
prot.long <- LoadProteomicsData()
#
load("Robjs/fits.best.max_3.collapsed_models.amp_cutoff_0.15.phase_sd_maxdiff_avg.Robj", v=T)
# Do tables for paper -----------------------------------------------------
maindir <- "/home/yeung/projects/tissue-specificity/tables"
# write microarray-RNASeq merged table
outf <- "1_Hogenesch-Microarray-RNASeq-Merged.txt"
write.table(dat.long, file = file.path(maindir, outf), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# write RNA-Seq WT-KO table
outf2 <- "2_Liver-Kidney-WTKO-RNASeq.txt"
write.table(subset(dat.wtko, !is.na(gene)), file = file.path(maindir, outf2), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# write fits for Hogenesch
outf3 <- "3_Hogenesch-Model-Selection.txt"
fits.best.txtout <- fits.best; fits.best.txtout$param.list <- NULL
fits.best.txtout$param.list <- sapply(fits.best$param.list, function(l){
# Make a long string with Paramname = Value
outstr <- paste(names(l), as.character(l), sep = "=", collapse = ";")
})
RenameCnameStr <- function(dat, old, new){
return(dplyr::rename_(dat, .dots=setNames(list(old), new)))
}
fits.best.txtout <- RenameCnameStr(fits.best.txtout, "model", "Model Name (distinct rhythms are separated by semicolon, while shared rhythms separated by comma)")
fits.best.txtout <- RenameCnameStr(fits.best.txtout, "weight", "Posterior Probability")
fits.best.txtout <- RenameCnameStr(fits.best.txtout, "param.list", "Parameter List (Each name=value pair separated by semicolon. Tissue names denote intercept estimates for RNA-Seq or microarray. Phase-amp denotes rhythmic parameters for tissues. Shared tissue rhythms separated by comma.)")
fits.best.txtout$weight.raw <- NULL
write.table(fits.best.txtout, file = file.path(maindir, outf3), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
# write fits for WT KO
outf4 <- "4_LivKidWTKO-Model-Selection.txt"
fits.long.filt.txtout <- fits.long.filt; fits.long.filt.txtout$param.list <- NULL
fits.long.filt.txtout$param.list <- sapply(fits.long.filt$param.list, function(l){
outstr <- paste(names(l), as.character(l), sep = "=", collapse = ";")
})
fits.long.filt.txtout <- RenameCnameStr(fits.long.filt.txtout, "model", "Model Name (distinct rhythms are separated by semicolon, while shared rhythms separated by comma)")
fits.long.filt.txtout <- RenameCnameStr(fits.long.filt.txtout, "weight", "Posterior Probability")
fits.long.filt.txtout <- RenameCnameStr(fits.long.filt.txtout, "param.list", "Parameter List (Each name=value pair separated by semicolon). Tissue names denote intercept estimates for RNA-Seq or microarray. Phase-amp denotes rhythmic parameters for tissues. Shared tissue rhythms separated by comma.)")
fits.long.filt.txtout$weight.raw <- NULL
write.table(fits.long.filt.txtout, file = file.path(maindir, outf4), quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
|
# find primary eQTLs:
# for each eGene/emiR (gene that has at least 1 eSNP below the eigenmt-bh corrected p-value)
# find SNP with lowest p-value
library(here)
library(readr)
library(dplyr)
library(magrittr)
#library(DESeq2)
#library(GenomicRanges)
library(mikelaffr)
date.prefix <- format(Sys.time(), "%Y%m%d")
date.prefix <- "20200120"
# association results directory name
results.name <- "20200120_mirQTLor"
# OUTPUT FILES ########################################################################################################
# output directory
output.dir <- paste0(here("results/conditional_eqtls/"), results.name, "/primary/")
dir.create(output.dir, recursive = TRUE, showWarnings = FALSE)
# eQTL output file
eqtl.df.rds <- paste0(output.dir, results.name, "_primary_eQTLs_dataFrame.rds")
# primary eSNPs to get genotypes using plink
esnps.txt <- paste0(output.dir, results.name, "_primary_eSNPs.txt")
# emiR output file
#emir.df.rds <- paste0(output.dir, results.name, "_emiRs_dataFrame.rds")
# INPUT FILES ##########################################################################################################
# eigenMT-BH nominal p-value
nominal.p.value.txt <- here("results/eigenmt/20200120_mirQTLor/compiled/20200120_mirQTLor_eigenMT-BH_nomPvalue.txt")
# Summarized association results for every variant within 1MB of each expressed miRNA
summarized.results.dataframe.rds <- here("results/emmax/association_results/20200120_mirQTLor/compiled/20200120_mirQTLor_variants_dataFrame.rds")
# vsd
#vsd.rds <- here("results/emmax/phenotype_files/20200120_mirQTLor_VST_miRNA_expression_residual/20200120_mirQTLor_VST_miRNA_expression_and_residual_rse.rds")
# GLOBALS ##############################################################################################################
# CHROMS <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12",
# "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX")
# Seqinfo object
#SEQINFO <- Seqinfo(genome = "hg38")[CHROMS]
# Import Summarized Data ###############################################################################################
# summarizedd results at each variant
df.results <- as_tibble(read_rds(summarized.results.dataframe.rds))
# nominal p-value from eigenMT-BH proceedure
nominal.p.value <- as.numeric(read_lines(nominal.p.value.txt))
# Find Primary eQTLs ###################################################################################################
# find emiRs (eGenes)
df.results %>%
group_by(UniName) %>%
summarise(min.pval = min(P)) %>%
filter(min.pval <= nominal.p.value) %>%
pull(UniName) -> emirs
# find primary eSNPs: lowest P at each emiR, also lowest base position in event of 2+ variants with minimum p-value
df.results %>%
filter(UniName %in% emirs) %>%
group_by(UniName) %>%
filter(P == min(P)) %>%
filter(BP.hg38 == min(BP.hg38)) -> df.primary.eqtls
print(paste("Number of emiRs:", length(emirs)))
print(paste("Number of primary eSNPs:", sum(!duplicated(df.primary.eqtls$SNP))))
print(paste("Number of primary eQTLs:", nrow(df.primary.eqtls)))
# Export Data ##########################################################################################################
write_rds(df.primary.eqtls, eqtl.df.rds)
write_lines(unique(df.primary.eqtls$SNP), esnps.txt)
|
/src/conditionally_independent_eqtls/01.1_find_primary_eqtls.R
|
no_license
|
mikelaff/mirna-eqtl-manuscript
|
R
| false
| false
| 3,409
|
r
|
# find primary eQTLs:
# for each eGene/emiR (gene that has at least 1 eSNP below the eigenmt-bh corrected p-value)
# find SNP with lowest p-value
library(here)
library(readr)
library(dplyr)
library(magrittr)
#library(DESeq2)
#library(GenomicRanges)
library(mikelaffr)
date.prefix <- format(Sys.time(), "%Y%m%d")
date.prefix <- "20200120"
# association results directory name
results.name <- "20200120_mirQTLor"
# OUTPUT FILES ########################################################################################################
# output directory
output.dir <- paste0(here("results/conditional_eqtls/"), results.name, "/primary/")
dir.create(output.dir, recursive = TRUE, showWarnings = FALSE)
# eQTL output file
eqtl.df.rds <- paste0(output.dir, results.name, "_primary_eQTLs_dataFrame.rds")
# primary eSNPs to get genotypes using plink
esnps.txt <- paste0(output.dir, results.name, "_primary_eSNPs.txt")
# emiR output file
#emir.df.rds <- paste0(output.dir, results.name, "_emiRs_dataFrame.rds")
# INPUT FILES ##########################################################################################################
# eigenMT-BH nominal p-value
nominal.p.value.txt <- here("results/eigenmt/20200120_mirQTLor/compiled/20200120_mirQTLor_eigenMT-BH_nomPvalue.txt")
# Summarized association results for every variant within 1MB of each expressed miRNA
summarized.results.dataframe.rds <- here("results/emmax/association_results/20200120_mirQTLor/compiled/20200120_mirQTLor_variants_dataFrame.rds")
# vsd
#vsd.rds <- here("results/emmax/phenotype_files/20200120_mirQTLor_VST_miRNA_expression_residual/20200120_mirQTLor_VST_miRNA_expression_and_residual_rse.rds")
# GLOBALS ##############################################################################################################
# CHROMS <- c("chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12",
# "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX")
# Seqinfo object
#SEQINFO <- Seqinfo(genome = "hg38")[CHROMS]
# Import Summarized Data ###############################################################################################
# summarizedd results at each variant
df.results <- as_tibble(read_rds(summarized.results.dataframe.rds))
# nominal p-value from eigenMT-BH proceedure
nominal.p.value <- as.numeric(read_lines(nominal.p.value.txt))
# Find Primary eQTLs ###################################################################################################
# find emiRs (eGenes)
df.results %>%
group_by(UniName) %>%
summarise(min.pval = min(P)) %>%
filter(min.pval <= nominal.p.value) %>%
pull(UniName) -> emirs
# find primary eSNPs: lowest P at each emiR, also lowest base position in event of 2+ variants with minimum p-value
df.results %>%
filter(UniName %in% emirs) %>%
group_by(UniName) %>%
filter(P == min(P)) %>%
filter(BP.hg38 == min(BP.hg38)) -> df.primary.eqtls
print(paste("Number of emiRs:", length(emirs)))
print(paste("Number of primary eSNPs:", sum(!duplicated(df.primary.eqtls$SNP))))
print(paste("Number of primary eQTLs:", nrow(df.primary.eqtls)))
# Export Data ##########################################################################################################
write_rds(df.primary.eqtls, eqtl.df.rds)
write_lines(unique(df.primary.eqtls$SNP), esnps.txt)
|
importance <- function(newdata,y,runs){
xval <- 4
if(is.data.frame(newdata))
newdata <- data.frame(newdata)
n <- nrow(newdata)
CVL <- array(0,dim=c(xval*runs,ncol(newdata)))
CV <- array(0,dim=c(xval*runs,ncol(newdata)))
xgr <- 1:xval
v<-1
for(k in 1:runs){
id <- sample(rep(xgr, length.out = n), n)
for(j in xgr) {
test <- id == j
train <- !test
S <- lapply(newdata[train,],splitt,y[train])
CV[v,] <- .Call("Dev_oob",S,newdata[test,],as.numeric(y[test]),PACKAGE="TWIX")
CVL[v,] <- unlist(lapply(S,function(x)(x$dev[1])))
v<-v+1
}
}
list(dev.tr=apply(CVL,2,median), dev.test=apply(CV,2,median))
}
|
/R/importance.R
|
no_license
|
cran/TWIX
|
R
| false
| false
| 719
|
r
|
importance <- function(newdata,y,runs){
xval <- 4
if(is.data.frame(newdata))
newdata <- data.frame(newdata)
n <- nrow(newdata)
CVL <- array(0,dim=c(xval*runs,ncol(newdata)))
CV <- array(0,dim=c(xval*runs,ncol(newdata)))
xgr <- 1:xval
v<-1
for(k in 1:runs){
id <- sample(rep(xgr, length.out = n), n)
for(j in xgr) {
test <- id == j
train <- !test
S <- lapply(newdata[train,],splitt,y[train])
CV[v,] <- .Call("Dev_oob",S,newdata[test,],as.numeric(y[test]),PACKAGE="TWIX")
CVL[v,] <- unlist(lapply(S,function(x)(x$dev[1])))
v<-v+1
}
}
list(dev.tr=apply(CVL,2,median), dev.test=apply(CV,2,median))
}
|
library(wikisourcer)
### Name: wikisource_book
### Title: Download a book from Wikisource
### Aliases: wikisource_book
### ** Examples
## Not run:
##D
##D # download Voltaire's "Candide"
##D wikisource_book("https://en.wikisource.org/wiki/Candide")
##D
##D # download "Candide" in French and Spanish
##D library(purrr)
##D
##D fr <- "https://fr.wikisource.org/wiki/Candide,_ou_l%E2%80%99Optimisme/Garnier_1877"
##D de <- "https://es.wikisource.org/wiki/C%C3%A1ndido,_o_el_optimismo"
##D books <- map_df(c(fr, es), wikisource_book)
## End(Not run)
|
/data/genthat_extracted_code/wikisourcer/examples/wikisource_book.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 560
|
r
|
library(wikisourcer)
### Name: wikisource_book
### Title: Download a book from Wikisource
### Aliases: wikisource_book
### ** Examples
## Not run:
##D
##D # download Voltaire's "Candide"
##D wikisource_book("https://en.wikisource.org/wiki/Candide")
##D
##D # download "Candide" in French and Spanish
##D library(purrr)
##D
##D fr <- "https://fr.wikisource.org/wiki/Candide,_ou_l%E2%80%99Optimisme/Garnier_1877"
##D de <- "https://es.wikisource.org/wiki/C%C3%A1ndido,_o_el_optimismo"
##D books <- map_df(c(fr, es), wikisource_book)
## End(Not run)
|
## May wish to set working directory and/or datadir
# setwd(...) # Working directory containing scripts
# DataDir=... # Where data is store permanently
library(utils)
library(sva)
library(limma)
library(lme4) # original lmer
library(lmerTest) # used to get P values using lmer+anova
library(nlme) # used for lme
# Include scripts
source('reply_models.R')
source('reply_subroutines.R')
source('reply_data.R')
### Data selection
#NAME='All'; models.fast=F; # All methods included
NAME='Some'; models.fast=T; # Only some of the methods included
#plot(data.all.avg,data.all.sd,cex=0.1)
data <- select_data(1000) # sample=N to subsample
#data <- select_data(sd.min=.5) # select subset av expression avg or sd
#data <- select_data(subset=which(data.all.sd<.5),subset.tags="Slt0.5")
cat("Data size:",dim(data))
# Get labels for selected data
labels <- labels.all
labels <- labels[match(colnames(data),labels$SLOT),]
labels$chip <- factor(substr(as.character(labels$SLOT),1,nchar(as.character(labels$SLOT))-2))
labels$include <- labels$CLASS %in% c('GA DP','GA Q')
if (F) { # only include batches that contain samples from both groups
commonbatches <- intersect(labels$BATCH[labels$CLASS=='GA DP'],labels$BATCH[labels$CLASS=='GA Q'])
labels$include <- labels$include & labels$BATCH %in% commonbatches
attr(data,'log')$tags <- c(attr(data,'log')$tags,'commonbatches')
}
# Randomise data
#data <- permute_dp_q_samples(data,labels,rows=T,seed=1004)
Log.print(data)
### Continue normalisation and averaging
combat_data <- ComBat(data,batch = labels$BATCH,mod = model.matrix(~labels$CLASS))
combat_data_averaged <- avearrays(combat_data,ID = labels$chip)
labels_averaged <- unique(labels[,c(2,4,5,6)]);
data_averaged <- avearrays(data,ID = labels$chip)
## Final data sets for analysis
dp_vs_q <- list();
dp_vs_q$mat <- data[,labels$include]
dp_vs_q$labels <- labels[labels$include,]
dp_vs_q_averaged <- list();
dp_vs_q_averaged$mat <- combat_data_averaged[,labels_averaged$include]
dp_vs_q_averaged$labels <- labels_averaged[labels_averaged$include,]
dp_vs_q_nocombat_averaged <- list();
dp_vs_q_nocombat_averaged$mat <- data_averaged[,labels_averaged$include]
dp_vs_q_nocombat_averaged$labels <- labels_averaged[labels_averaged$include,]
# Original models
ranova_orig_out <- apply(dp_vs_q$mat,1,ranova,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='original')
ranova_equiv_out <- apply(dp_vs_q$mat,1,ranova,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='equivalent')
lme4_orig_out <- apply(dp_vs_q$mat,1,lmem,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='original')
# Adjusted/corrected models
ranova_out <- apply(dp_vs_q$mat,1,ranova,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='correct') # type=2 is correct model
lme4_out <- apply(dp_vs_q$mat,1,lmem,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='correct') # type=1 is correct model
nlme_out <- apply(dp_vs_q$mat,1,lmem,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='nlme') # type=2 is nlme model
limma_out <- limma_dupcorr(dp_vs_q$mat,lab = dp_vs_q$labels)
# 1-way ANOVA
limma_noblocking_out <- limma_noblocking(inmat = dp_vs_q_averaged$mat,lab = dp_vs_q_averaged$labels)
oneanova_out <- apply(dp_vs_q_averaged$mat,1,oneanova,lab=dp_vs_q_averaged$labels$CLASS)
# 2-way ANOVA
limma_blocking_out <- limma_blocking(inmat = dp_vs_q_nocombat_averaged$mat,lab = dp_vs_q_nocombat_averaged$labels)
twowayanova_blocking_out <- apply(dp_vs_q_nocombat_averaged$mat,1,twowayanova_blocking,batch=dp_vs_q_nocombat_averaged$labels$BATCH,lab=dp_vs_q_nocombat_averaged$labels$CLASS)
# 2-way ANOVA after ComBat
limma_combat_blocking_out <- limma_blocking(inmat = dp_vs_q_averaged$mat,lab = dp_vs_q_averaged$labels)
twowayanova_combat_blocking_out <- apply(dp_vs_q_averaged$mat,1,twowayanova_blocking,batch=dp_vs_q_averaged$labels$BATCH,lab=dp_vs_q_averaged$labels$CLASS)
table(factor(dp_vs_q_averaged$labels$CLASS))
table(factor(dp_vs_q_averaged$labels$BATCH),factor(dp_vs_q_averaged$labels$CLASS))
result_summary <- data.frame(
"Original, faulty, models with replicates and blocking"=
c(N.sign(limma_out),N.sign(ranova_orig_out),N.sign(lme4_orig_out),NA),
"Actual model, equivalent to original model"=
c(NA,N.sign(ranova_equiv_out),NA,NA),
"Corrected model with replicates and CHIP blocking"=
c(NA,N.sign(ranova_out),N.sign(lme4_out),N.sign(nlme_out)),
"One-way: averaged replicates, ComBat with treatments as covariates"=
c(N.sign(limma_noblocking_out),N.sign(oneanova_out),NA,NA),
"Two-way: averaged replicates, CHIP blocking"=
c(N.sign(limma_blocking_out),N.sign(twowayanova_blocking_out),NA,NA),
"Two-way: averaged replicates, ComBat, CHIP blocking"=
c(N.sign(limma_combat_blocking_out),N.sign(twowayanova_combat_blocking_out),NA,NA),
row.names = c('limma','anova','lme4','nlme')
)
print(t(result_summary))
cat('Consensus correlation:',attr(limma_out,'consensus'))
### Similarity between methods
P=data.frame(limma.dupcor=c(limma_out),ranova.orig=ranova_orig_out,ranova.actual=ranova_equiv_out,lme4.orig=lme4_orig_out,
ranova=ranova_out,lme4=lme4_out,nlme=nlme_out,
limma.oneway=c(limma_noblocking_out),anova.oneway=oneanova_out,
limma.twoway=c(limma_blocking_out),anova.twoway=twowayanova_blocking_out,
limma.combat=c(limma_combat_blocking_out),anova.combat=twowayanova_combat_blocking_out)
P.nonmissing <- apply(!is.na(P),2,sum)>0
P.red <- P[,P.nonmissing]
P.dist=dist2(-log(1e-10+P.red))
-log(P.dist)/log(10)*10
plot.sample=sample(nrow(P.red),min(nrow(P.red),1000))
plot.sample=which(apply(data,1,sd)>.5)
pairs((P.red[plot.sample,]),diag.panel=panel.hist,cex=.1)
if (FALSE) { # coloured subgroups
plot.grp=apply(data,1,mean)>7
pairs((P.red[plot.sample,]),diag.panel=panel.hist,bg=c('red','green')[factor(plot.grp[plot.sample])],cex=.1,pch=21)
}
### Save results
#store.results(get.tag.file(data,name=NAME))
|
/response_to_towfic/reply.R
|
no_license
|
ramit29/batch-adjust-warning-figures
|
R
| false
| false
| 6,097
|
r
|
## May wish to set working directory and/or datadir
# setwd(...) # Working directory containing scripts
# DataDir=... # Where data is store permanently
library(utils)
library(sva)
library(limma)
library(lme4) # original lmer
library(lmerTest) # used to get P values using lmer+anova
library(nlme) # used for lme
# Include scripts
source('reply_models.R')
source('reply_subroutines.R')
source('reply_data.R')
### Data selection
#NAME='All'; models.fast=F; # All methods included
NAME='Some'; models.fast=T; # Only some of the methods included
#plot(data.all.avg,data.all.sd,cex=0.1)
data <- select_data(1000) # sample=N to subsample
#data <- select_data(sd.min=.5) # select subset av expression avg or sd
#data <- select_data(subset=which(data.all.sd<.5),subset.tags="Slt0.5")
cat("Data size:",dim(data))
# Get labels for selected data
labels <- labels.all
labels <- labels[match(colnames(data),labels$SLOT),]
labels$chip <- factor(substr(as.character(labels$SLOT),1,nchar(as.character(labels$SLOT))-2))
labels$include <- labels$CLASS %in% c('GA DP','GA Q')
if (F) { # only include batches that contain samples from both groups
commonbatches <- intersect(labels$BATCH[labels$CLASS=='GA DP'],labels$BATCH[labels$CLASS=='GA Q'])
labels$include <- labels$include & labels$BATCH %in% commonbatches
attr(data,'log')$tags <- c(attr(data,'log')$tags,'commonbatches')
}
# Randomise data
#data <- permute_dp_q_samples(data,labels,rows=T,seed=1004)
Log.print(data)
### Continue normalisation and averaging
combat_data <- ComBat(data,batch = labels$BATCH,mod = model.matrix(~labels$CLASS))
combat_data_averaged <- avearrays(combat_data,ID = labels$chip)
labels_averaged <- unique(labels[,c(2,4,5,6)]);
data_averaged <- avearrays(data,ID = labels$chip)
## Final data sets for analysis
dp_vs_q <- list();
dp_vs_q$mat <- data[,labels$include]
dp_vs_q$labels <- labels[labels$include,]
dp_vs_q_averaged <- list();
dp_vs_q_averaged$mat <- combat_data_averaged[,labels_averaged$include]
dp_vs_q_averaged$labels <- labels_averaged[labels_averaged$include,]
dp_vs_q_nocombat_averaged <- list();
dp_vs_q_nocombat_averaged$mat <- data_averaged[,labels_averaged$include]
dp_vs_q_nocombat_averaged$labels <- labels_averaged[labels_averaged$include,]
# Original models
ranova_orig_out <- apply(dp_vs_q$mat,1,ranova,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='original')
ranova_equiv_out <- apply(dp_vs_q$mat,1,ranova,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='equivalent')
lme4_orig_out <- apply(dp_vs_q$mat,1,lmem,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='original')
# Adjusted/corrected models
ranova_out <- apply(dp_vs_q$mat,1,ranova,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='correct') # type=2 is correct model
lme4_out <- apply(dp_vs_q$mat,1,lmem,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='correct') # type=1 is correct model
nlme_out <- apply(dp_vs_q$mat,1,lmem,lab=dp_vs_q$labels$CLASS,batch=dp_vs_q$labels$BATCH,sampid=dp_vs_q$labels$chip,type='nlme') # type=2 is nlme model
limma_out <- limma_dupcorr(dp_vs_q$mat,lab = dp_vs_q$labels)
# 1-way ANOVA
limma_noblocking_out <- limma_noblocking(inmat = dp_vs_q_averaged$mat,lab = dp_vs_q_averaged$labels)
oneanova_out <- apply(dp_vs_q_averaged$mat,1,oneanova,lab=dp_vs_q_averaged$labels$CLASS)
# 2-way ANOVA
limma_blocking_out <- limma_blocking(inmat = dp_vs_q_nocombat_averaged$mat,lab = dp_vs_q_nocombat_averaged$labels)
twowayanova_blocking_out <- apply(dp_vs_q_nocombat_averaged$mat,1,twowayanova_blocking,batch=dp_vs_q_nocombat_averaged$labels$BATCH,lab=dp_vs_q_nocombat_averaged$labels$CLASS)
# 2-way ANOVA after ComBat
limma_combat_blocking_out <- limma_blocking(inmat = dp_vs_q_averaged$mat,lab = dp_vs_q_averaged$labels)
twowayanova_combat_blocking_out <- apply(dp_vs_q_averaged$mat,1,twowayanova_blocking,batch=dp_vs_q_averaged$labels$BATCH,lab=dp_vs_q_averaged$labels$CLASS)
table(factor(dp_vs_q_averaged$labels$CLASS))
table(factor(dp_vs_q_averaged$labels$BATCH),factor(dp_vs_q_averaged$labels$CLASS))
result_summary <- data.frame(
"Original, faulty, models with replicates and blocking"=
c(N.sign(limma_out),N.sign(ranova_orig_out),N.sign(lme4_orig_out),NA),
"Actual model, equivalent to original model"=
c(NA,N.sign(ranova_equiv_out),NA,NA),
"Corrected model with replicates and CHIP blocking"=
c(NA,N.sign(ranova_out),N.sign(lme4_out),N.sign(nlme_out)),
"One-way: averaged replicates, ComBat with treatments as covariates"=
c(N.sign(limma_noblocking_out),N.sign(oneanova_out),NA,NA),
"Two-way: averaged replicates, CHIP blocking"=
c(N.sign(limma_blocking_out),N.sign(twowayanova_blocking_out),NA,NA),
"Two-way: averaged replicates, ComBat, CHIP blocking"=
c(N.sign(limma_combat_blocking_out),N.sign(twowayanova_combat_blocking_out),NA,NA),
row.names = c('limma','anova','lme4','nlme')
)
print(t(result_summary))
cat('Consensus correlation:',attr(limma_out,'consensus'))
### Similarity between methods
P=data.frame(limma.dupcor=c(limma_out),ranova.orig=ranova_orig_out,ranova.actual=ranova_equiv_out,lme4.orig=lme4_orig_out,
ranova=ranova_out,lme4=lme4_out,nlme=nlme_out,
limma.oneway=c(limma_noblocking_out),anova.oneway=oneanova_out,
limma.twoway=c(limma_blocking_out),anova.twoway=twowayanova_blocking_out,
limma.combat=c(limma_combat_blocking_out),anova.combat=twowayanova_combat_blocking_out)
P.nonmissing <- apply(!is.na(P),2,sum)>0
P.red <- P[,P.nonmissing]
P.dist=dist2(-log(1e-10+P.red))
-log(P.dist)/log(10)*10
plot.sample=sample(nrow(P.red),min(nrow(P.red),1000))
plot.sample=which(apply(data,1,sd)>.5)
pairs((P.red[plot.sample,]),diag.panel=panel.hist,cex=.1)
if (FALSE) { # coloured subgroups
plot.grp=apply(data,1,mean)>7
pairs((P.red[plot.sample,]),diag.panel=panel.hist,bg=c('red','green')[factor(plot.grp[plot.sample])],cex=.1,pch=21)
}
### Save results
#store.results(get.tag.file(data,name=NAME))
|
#Script for plot 1
#Reading data
data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
required_data1 <- data[ which( data$Date == "2/2/2007") , ]
required_data2 <- data[ which( data$Date == "1/2/2007") , ]
final_data <- rbind(required_data1, required_data2)
final_data <- cbind(final_data, strptime(final_data[, 1:2], "%d/%m/%y %H:%M:%S"))
png("plot1.png")
hist(final_data$Global_active_power, main="Global Active Power",
xlab="Global Active Power(kilowatts)", col = "Red")
dev.off()
|
/plot1.R
|
no_license
|
mohitarora06/ExData_Plotting1
|
R
| false
| false
| 523
|
r
|
#Script for plot 1
#Reading data
data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
required_data1 <- data[ which( data$Date == "2/2/2007") , ]
required_data2 <- data[ which( data$Date == "1/2/2007") , ]
final_data <- rbind(required_data1, required_data2)
final_data <- cbind(final_data, strptime(final_data[, 1:2], "%d/%m/%y %H:%M:%S"))
png("plot1.png")
hist(final_data$Global_active_power, main="Global Active Power",
xlab="Global Active Power(kilowatts)", col = "Red")
dev.off()
|
context("Test for a_find()")
test_that("find F letter", {
expect_equal(a_find(LETTERS, x: x == "F"), "F")
})
context("Test for Find_()")
test_that("find F letter", {
expect_equal(Find_(LETTERS, x: x == "F"), "F")
})
|
/tests/testthat/test-a_find.R
|
permissive
|
parthasen/lambdaR
|
R
| false
| false
| 223
|
r
|
context("Test for a_find()")
test_that("find F letter", {
expect_equal(a_find(LETTERS, x: x == "F"), "F")
})
context("Test for Find_()")
test_that("find F letter", {
expect_equal(Find_(LETTERS, x: x == "F"), "F")
})
|
# Divide a Dataframe into Cross Validation Subsets
trn_tst = function(df, test_perc, seed=123){
if (!require("pacman")) install.packages("pacman")
pacman::p_load("dplyr", "magrittr", "lubridate")
set.seed(seed)
n = sort(sample(nrow(df), nrow(df) * test_perc))
df = rbind.data.frame(df[-n,] %>% mutate(subset = "train"),
df[n,] %>% mutate(subset = "test"))
}
|
/train_test_split_func.R
|
no_license
|
OlivierNDO/stat_funcs_r
|
R
| false
| false
| 405
|
r
|
# Divide a Dataframe into Cross Validation Subsets
trn_tst = function(df, test_perc, seed=123){
if (!require("pacman")) install.packages("pacman")
pacman::p_load("dplyr", "magrittr", "lubridate")
set.seed(seed)
n = sort(sample(nrow(df), nrow(df) * test_perc))
df = rbind.data.frame(df[-n,] %>% mutate(subset = "train"),
df[n,] %>% mutate(subset = "test"))
}
|
#' Report model diagnostic statistics based on the PEST .res file.
#' This function requires a PEST .res file to be located in the PATH directory.
#'
#' @param PATH this is the full file path to the .res file.
#' @export
pstmodcal <- function(PATH){
if(is.na(PATH)){
PATH <- getwd()
}
res <- MFtools::readpstres(PATH) %>%
dplyr::mutate(SQUARE_RES = `Weight*Residual`^2)
max_obs <- max(res$Measured, na.rm = TRUE)
min_obs <- min(res$Measured, na.rm = TRUE)
range_val <- max_obs - min_obs
STATS <- res %>% dplyr::summarise(MEAN_RES = mean(`Weight*Residual`, na.rm = TRUE),
MEAN_ABS_RES = mean(abs(`Weight*Residual`), na.rm = TRUE),
ST_DEV = sd(`Weight*Residual`, na.rm = TRUE),
SSQ = sum(SQUARE_RES),
RMSE = mean(SQUARE_RES)^0.5,
MIN = min(`Weight*Residual`, na.rm = TRUE),
MAX = max(`Weight*Residual`, na.rm = TRUE),
nOBS = n(),
RANGE = range_val,
SCALED_ST_DEV = ST_DEV / range_val,
SCALED_MEAN_ABS_RES = MEAN_ABS_RES / range_val,
SCALED_RMSE = RMSE / range_val,
SCALED_MEAN_RES = MEAN_RES / range_val)
return(STATS)
}
|
/R/pstmodcal.r
|
permissive
|
dpphat/MFtools
|
R
| false
| false
| 1,553
|
r
|
#' Report model diagnostic statistics based on the PEST .res file.
#' This function requires a PEST .res file to be located in the PATH directory.
#'
#' @param PATH this is the full file path to the .res file.
#' @export
pstmodcal <- function(PATH){
if(is.na(PATH)){
PATH <- getwd()
}
res <- MFtools::readpstres(PATH) %>%
dplyr::mutate(SQUARE_RES = `Weight*Residual`^2)
max_obs <- max(res$Measured, na.rm = TRUE)
min_obs <- min(res$Measured, na.rm = TRUE)
range_val <- max_obs - min_obs
STATS <- res %>% dplyr::summarise(MEAN_RES = mean(`Weight*Residual`, na.rm = TRUE),
MEAN_ABS_RES = mean(abs(`Weight*Residual`), na.rm = TRUE),
ST_DEV = sd(`Weight*Residual`, na.rm = TRUE),
SSQ = sum(SQUARE_RES),
RMSE = mean(SQUARE_RES)^0.5,
MIN = min(`Weight*Residual`, na.rm = TRUE),
MAX = max(`Weight*Residual`, na.rm = TRUE),
nOBS = n(),
RANGE = range_val,
SCALED_ST_DEV = ST_DEV / range_val,
SCALED_MEAN_ABS_RES = MEAN_ABS_RES / range_val,
SCALED_RMSE = RMSE / range_val,
SCALED_MEAN_RES = MEAN_RES / range_val)
return(STATS)
}
|
# Copyright (C) 2011 Jelmer Ypma. All Rights Reserved.
# This code is published under the GPL.
#
# File: demo.R
# Author: Jelmer Ypma
# Date: 31 March 2011
#
# This code is based on the example from www.sparse-grids.de
# with permission from the authors.
library('SparseGrid')
# set seed of random number generator
set.seed( 3141 )
dimension <- 10 # dimensions
maxk <- 4 # max. accuracy level (pol. exactness wil be 2k-1)
# integrand: some function that evaluates g(x): (R times D)->(R times 1)
func <- function( x, sigma=2 ) {
return( apply( exp(-.5*(x/sigma)^2)/sqrt(2*pi*sigma^2), 1, prod ) )
}
# calculate "correct" result of integration between 0 and 1:
trueval <- (pnorm(1, sd=2) - pnorm(0, sd=2))^dimension
# create matrix to hold results
res <- matrix( NA, nrow=maxk-1, ncol=5 )
colnames( res ) <- c("D", "k", "nodes", "SG error", "Sim. error")
rownames( res ) <- rep( "", maxk-1 )
# loop over different accuracy levels
for ( k in 2:maxk ) {
# sparse grids integration
tmp.grid <- createSparseGrid('KPU', dimension, k)
x <- tmp.grid$nodes
w <- tmp.grid$weights
g <- func( x )
SGappr <- sum(w * g)
SGerror <- sqrt((SGappr - trueval)^2) / trueval
# simulation with the same number of nodes, 1000 simulation repetitions
numnodes <- length(w)
sim <- rep(0, 1000)
for (r in 1:1000) {
x <- matrix( runif( numnodes * dimension ), nrow=numnodes, ncol=dimension )
g <- func( x )
sim[ r ] <- mean( g ) # is sum(w * g) where weights are 1/numnodes
}
Simerror = sqrt(mean((sim-trueval)^2)) / trueval
# save results in row of matrix res
res[k-1,] <- c(dimension, k, numnodes, SGerror, Simerror)
}
res
|
/inst/tests/demo.R
|
no_license
|
jyypma/SparseGrid
|
R
| false
| false
| 1,782
|
r
|
# Copyright (C) 2011 Jelmer Ypma. All Rights Reserved.
# This code is published under the GPL.
#
# File: demo.R
# Author: Jelmer Ypma
# Date: 31 March 2011
#
# This code is based on the example from www.sparse-grids.de
# with permission from the authors.
library('SparseGrid')
# set seed of random number generator
set.seed( 3141 )
dimension <- 10 # dimensions
maxk <- 4 # max. accuracy level (pol. exactness wil be 2k-1)
# integrand: some function that evaluates g(x): (R times D)->(R times 1)
func <- function( x, sigma=2 ) {
return( apply( exp(-.5*(x/sigma)^2)/sqrt(2*pi*sigma^2), 1, prod ) )
}
# calculate "correct" result of integration between 0 and 1:
trueval <- (pnorm(1, sd=2) - pnorm(0, sd=2))^dimension
# create matrix to hold results
res <- matrix( NA, nrow=maxk-1, ncol=5 )
colnames( res ) <- c("D", "k", "nodes", "SG error", "Sim. error")
rownames( res ) <- rep( "", maxk-1 )
# loop over different accuracy levels
for ( k in 2:maxk ) {
# sparse grids integration
tmp.grid <- createSparseGrid('KPU', dimension, k)
x <- tmp.grid$nodes
w <- tmp.grid$weights
g <- func( x )
SGappr <- sum(w * g)
SGerror <- sqrt((SGappr - trueval)^2) / trueval
# simulation with the same number of nodes, 1000 simulation repetitions
numnodes <- length(w)
sim <- rep(0, 1000)
for (r in 1:1000) {
x <- matrix( runif( numnodes * dimension ), nrow=numnodes, ncol=dimension )
g <- func( x )
sim[ r ] <- mean( g ) # is sum(w * g) where weights are 1/numnodes
}
Simerror = sqrt(mean((sim-trueval)^2)) / trueval
# save results in row of matrix res
res[k-1,] <- c(dimension, k, numnodes, SGerror, Simerror)
}
res
|
library(testthat)
library(msmod)
test_check("msmod")
|
/tests/testthat.R
|
no_license
|
wkmor1/msmod
|
R
| false
| false
| 54
|
r
|
library(testthat)
library(msmod)
test_check("msmod")
|
# ui.R
# author: Iga Korneta
# version: 1.0
# created: June 1st, 2015
# UN General Assembly Voting Networks
library(shiny)
shinyUI(fluidPage(
titlePanel("UN General Assembly Voting Networks (sessions 58-67 cumulative)"),
fluidRow(
column(3,
wellPanel(
p(strong("Visualise the UN General Assembly Voting Networks.")),
br(),
p(strong("Author: "), a(href="mailto:iga.korneta@gmail.com", "Iga Korneta")),
p(strong("Date: "), "July/August 2015"),
p(strong("Code: "), a(href="http://github.com/ikorneta/unscapp", "Github")),
p(strong("Data source:"), a(href="https://dataverse.harvard.edu/dataset.xhtml?persistentId=hdl:1902.1/12379", "Erik Voeten Dataverse")),
p("See the", strong("Help"), "tabs for the explanation of what this visualisation is about, how I selected the data, and what the various options mean.")
)),
column(9,
tabsetPanel(
tabPanel("Output",
column(8,
p(strong("Voting Networks Graph")),
plotOutput("unscgPlot"),
br()
),
column(4,
p(strong("Network Properties")),
br(),
textOutput("unscgQuant0"),
verbatimTextOutput("unscgQuant1"),
verbatimTextOutput("unscgQuant2")
)
),
tabPanel("Help - Data",
column(12,
p(strong("Data")),
p("The way countries vote in the UN General Assembly can tell us a bit about the shape and strength of alliances between countries."),
p("The data visualised here are the results of non-unanimous UN General Assembly votes from 2004-2012 (inclusive), i.e. sessions 58-67. They come from the", a(href="https://dataverse.harvard.edu/dataset.xhtml?persistentId=hdl:1902.1/12379", "Eric Voeten Dataverse"), ". The thematic classification is described in ", a(href="http://ssrn.com/abstract=2111149", "Voeten, E. (2012)"), ". I chose to include only resolution subtypes with the largest resolving power - human rights, colonialism and economy-related."),
p("The networks are constructed as follows:"),
p("- for each agreement in votes, +1 point is given; for each vote in which one country had a Yes/No vote and the other abstained, either +0.5 or +0.7 points (this is the option ", strong("Abstention vote weight"), ") are given; absences not taken into calculation;"),
p("- edges in the quantiles below the cutoff (e.g. the lowest 95%) are dropped (this is the option ", strong("Edge quantile cutoff"), "). Basically, the larger this value, the more edges are dropped."),
p("Countries are identified by their three-letter abbreviations. See the last tab.")
)),
tabPanel("Help - Visualisation",
column(12,
p(strong("Visualisation options")),
p("You can hide lone nodes with the option ", strong("Show zero-degree nodes"), "."),
p("You can ", strong("colour"), " the countries by ", a(href="http://www.un.org/depts/DGACM/RegionalGroups.shtml", "UN Regional Groups"), ", by whether they are in the ", a(href="https://en.wikipedia.org/wiki/Permanent_members_of_the_United_Nations_Security_Council", "P5"), ", ", a(href="https://en.wikipedia.org/wiki/G4_nations", "G4"),", in the ", a(href="https://en.wikipedia.org/wiki/Uniting_for_Consensus", "United for Consensus core")," or in", a(href="http://www.centerforunreform.org/?q=node/541", "the ACT group"), ", or by membership in the various regional unions (Arab League, ASEAN, African Union, CARICOM, CIS, EU or UNASUR). Guyana and Suriname are members of UNASUR and CARICOM; coloured CARICOM. Algieria, Comoros, Djibouti, Egypt, Libya, Mauritania, Somalia, Sudan and Tunisia are members of the Arab League and the African Union; coloured Arab League. Costa Rica is part to UfC and of ACT; coloured ACT."),
p("UN Regional Groups colours: green: African; red: Asia-Pacific; orange: Eastern European; purple: Latin American; dark blue: Western European and Other States."),
p("Various interest groups colours: red: P5; orange: G4; brown: United for Consensus; blue: ACT."),
p("Economic unions colours: black: Arab League; red: ASEAN; green: African Union; pink: CARICOM; orange: CIS; blue: EU; purple: UNASUR."),
br(),
p("A fun thing to do is to slide the cutoff from the very left to the very right. Remember to switch on/off the zero-degree nodes!")
)
),
tabPanel("Help - Quantitative properties",
column(12,
p(strong("Quantitative properties")),
p("Nodes with the ", strong("highest degree"), " have the most connections."),
p("Nodes with the ", strong("highest betweenness"), " are the most crucial to connecting different communities."),
p("Distinct ", strong("communities"), " are tightly-connected subnetworks of the main network."),
p("The ", strong("assortativity"), " coefficient is positive if similar vertices (based on some external property, in this case belonging to the same UN Regional Group or the same regional union) tend to connect to each other, and negative otherwise.")
)
),
tabPanel("Help - Country codes",
column(3,
p(strong("Country codes")),
tableOutput("ccodes1")),
column(3,
p(br()),
tableOutput("ccodes2")),
column(3,
p(br()),
tableOutput("ccodes3")),
column(3,
p(br()),
tableOutput("ccodes4"))
)
))
),
fluidRow(
column(12,
p(h4(strong('Pick the options:'))),
column(3,
radioButtons("restype", "Resolution type", choices=c("All", "Human Rights (sess. 58-66)", "Colonialism (sess. 58-66)", "Economy (sess. 58-66)"), selected="All")
),
column(3,
radioButtons("abstvote", "Abstention vote weight", choices=c("0.5", "0.7"), selected="0.5", inline=TRUE),
br(),
sliderInput("cutoff", "Edge quantile cutoff", value=0.9, min=0.5, max=0.99, step=0.01)
),
column(3,
radioButtons("small", "Show zero-degree nodes", choices=c("Yes", "No"), selected="Yes", inline=TRUE),
br(),
radioButtons("color", "Colour countries", choices=c("UN Regional Groups", "P5/G4/UfC(core)/ACT", "AU/EU/ASEAN/CIS/UNASUR/CARICOM/AL"), selected="UN Regional Groups")
),
column(3,
radioButtons("quantproperties", "Quantitative properties", choices=c("Nodes with highest degree", "Nodes with highest betweenness", "Communities", "Assortativity"), selected="Nodes with highest degree")
)
)
)
))
|
/ui.R
|
no_license
|
ikorneta/unscapp
|
R
| false
| false
| 7,905
|
r
|
# ui.R
# author: Iga Korneta
# version: 1.0
# created: June 1st, 2015
# UN General Assembly Voting Networks
library(shiny)
shinyUI(fluidPage(
titlePanel("UN General Assembly Voting Networks (sessions 58-67 cumulative)"),
fluidRow(
column(3,
wellPanel(
p(strong("Visualise the UN General Assembly Voting Networks.")),
br(),
p(strong("Author: "), a(href="mailto:iga.korneta@gmail.com", "Iga Korneta")),
p(strong("Date: "), "July/August 2015"),
p(strong("Code: "), a(href="http://github.com/ikorneta/unscapp", "Github")),
p(strong("Data source:"), a(href="https://dataverse.harvard.edu/dataset.xhtml?persistentId=hdl:1902.1/12379", "Erik Voeten Dataverse")),
p("See the", strong("Help"), "tabs for the explanation of what this visualisation is about, how I selected the data, and what the various options mean.")
)),
column(9,
tabsetPanel(
tabPanel("Output",
column(8,
p(strong("Voting Networks Graph")),
plotOutput("unscgPlot"),
br()
),
column(4,
p(strong("Network Properties")),
br(),
textOutput("unscgQuant0"),
verbatimTextOutput("unscgQuant1"),
verbatimTextOutput("unscgQuant2")
)
),
tabPanel("Help - Data",
column(12,
p(strong("Data")),
p("The way countries vote in the UN General Assembly can tell us a bit about the shape and strength of alliances between countries."),
p("The data visualised here are the results of non-unanimous UN General Assembly votes from 2004-2012 (inclusive), i.e. sessions 58-67. They come from the", a(href="https://dataverse.harvard.edu/dataset.xhtml?persistentId=hdl:1902.1/12379", "Eric Voeten Dataverse"), ". The thematic classification is described in ", a(href="http://ssrn.com/abstract=2111149", "Voeten, E. (2012)"), ". I chose to include only resolution subtypes with the largest resolving power - human rights, colonialism and economy-related."),
p("The networks are constructed as follows:"),
p("- for each agreement in votes, +1 point is given; for each vote in which one country had a Yes/No vote and the other abstained, either +0.5 or +0.7 points (this is the option ", strong("Abstention vote weight"), ") are given; absences not taken into calculation;"),
p("- edges in the quantiles below the cutoff (e.g. the lowest 95%) are dropped (this is the option ", strong("Edge quantile cutoff"), "). Basically, the larger this value, the more edges are dropped."),
p("Countries are identified by their three-letter abbreviations. See the last tab.")
)),
tabPanel("Help - Visualisation",
column(12,
p(strong("Visualisation options")),
p("You can hide lone nodes with the option ", strong("Show zero-degree nodes"), "."),
p("You can ", strong("colour"), " the countries by ", a(href="http://www.un.org/depts/DGACM/RegionalGroups.shtml", "UN Regional Groups"), ", by whether they are in the ", a(href="https://en.wikipedia.org/wiki/Permanent_members_of_the_United_Nations_Security_Council", "P5"), ", ", a(href="https://en.wikipedia.org/wiki/G4_nations", "G4"),", in the ", a(href="https://en.wikipedia.org/wiki/Uniting_for_Consensus", "United for Consensus core")," or in", a(href="http://www.centerforunreform.org/?q=node/541", "the ACT group"), ", or by membership in the various regional unions (Arab League, ASEAN, African Union, CARICOM, CIS, EU or UNASUR). Guyana and Suriname are members of UNASUR and CARICOM; coloured CARICOM. Algieria, Comoros, Djibouti, Egypt, Libya, Mauritania, Somalia, Sudan and Tunisia are members of the Arab League and the African Union; coloured Arab League. Costa Rica is part to UfC and of ACT; coloured ACT."),
p("UN Regional Groups colours: green: African; red: Asia-Pacific; orange: Eastern European; purple: Latin American; dark blue: Western European and Other States."),
p("Various interest groups colours: red: P5; orange: G4; brown: United for Consensus; blue: ACT."),
p("Economic unions colours: black: Arab League; red: ASEAN; green: African Union; pink: CARICOM; orange: CIS; blue: EU; purple: UNASUR."),
br(),
p("A fun thing to do is to slide the cutoff from the very left to the very right. Remember to switch on/off the zero-degree nodes!")
)
),
tabPanel("Help - Quantitative properties",
column(12,
p(strong("Quantitative properties")),
p("Nodes with the ", strong("highest degree"), " have the most connections."),
p("Nodes with the ", strong("highest betweenness"), " are the most crucial to connecting different communities."),
p("Distinct ", strong("communities"), " are tightly-connected subnetworks of the main network."),
p("The ", strong("assortativity"), " coefficient is positive if similar vertices (based on some external property, in this case belonging to the same UN Regional Group or the same regional union) tend to connect to each other, and negative otherwise.")
)
),
tabPanel("Help - Country codes",
column(3,
p(strong("Country codes")),
tableOutput("ccodes1")),
column(3,
p(br()),
tableOutput("ccodes2")),
column(3,
p(br()),
tableOutput("ccodes3")),
column(3,
p(br()),
tableOutput("ccodes4"))
)
))
),
fluidRow(
column(12,
p(h4(strong('Pick the options:'))),
column(3,
radioButtons("restype", "Resolution type", choices=c("All", "Human Rights (sess. 58-66)", "Colonialism (sess. 58-66)", "Economy (sess. 58-66)"), selected="All")
),
column(3,
radioButtons("abstvote", "Abstention vote weight", choices=c("0.5", "0.7"), selected="0.5", inline=TRUE),
br(),
sliderInput("cutoff", "Edge quantile cutoff", value=0.9, min=0.5, max=0.99, step=0.01)
),
column(3,
radioButtons("small", "Show zero-degree nodes", choices=c("Yes", "No"), selected="Yes", inline=TRUE),
br(),
radioButtons("color", "Colour countries", choices=c("UN Regional Groups", "P5/G4/UfC(core)/ACT", "AU/EU/ASEAN/CIS/UNASUR/CARICOM/AL"), selected="UN Regional Groups")
),
column(3,
radioButtons("quantproperties", "Quantitative properties", choices=c("Nodes with highest degree", "Nodes with highest betweenness", "Communities", "Assortativity"), selected="Nodes with highest degree")
)
)
)
))
|
# This program is based largely on the methodology in:
# Khitatrakun, Surachai, Gordon B T Mermin, and Norton Francis. “Incorporating State Analysis into the
# Tax Policy Center’s Microsimulation Model: Documentation and Methodology.” Working Paper, March 2016.
# https://www.taxpolicycenter.org/sites/default/files/alfresco/publication-pdfs/2000697-Incorporating-State-Analysis-into-the-TPCs-Microsimulation-Model.pdf.
source(here::here("include", "libraries.r"))
library(mvtnorm)
devtools::session_info()
(.packages()) %>% sort
# define needed functions ----
get_delta <- function(w, beta, x){
beta_x <- exp(beta %*% t(x))
log(w / colSums(beta_x))
}
get_weights <- function(beta, delta, x){
# get all weights
beta_x <- beta %*% t(x)
# add delta to every row of beta_x and transpose
beta_xd <- apply(beta_x, 1 , function(m) m + delta)
exp(beta_xd)
}
# create the poisson problem ----
# xscale <- 10 and step_scale <- 10 works well with this
h <- 10 # number households
k <- 3 # number characteristics
s <- 5 # number states
# for the poisson version we need beta and delta
# beta is an s x k matrix of coefficients
beta <- matrix(c(1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12,
13, 14, 15), ncol=3, byrow = TRUE) / 100
beta
# delta is an h-length vector of individual constants in the formula for state weights
delta <- 1:h / 100
# create multivariate normal x for the 3-characteristics case ----
# x is an h x k matrix of characteristics
sigma <- matrix(c(1, .8, .6,
.8, 1, .4,
.6, .4, 1), ncol=3, byrow = TRUE)
sigma
set.seed(1234)
x <- rmvnorm(n=h, mean=c(10, 20, 30), sigma)
# get household-state weights if poisson model ----
hsweights <- get_weights(beta, delta, x) # poisson model
# end poisson ----
# Alternatively, create a large problem ----
# xscale <- 100 and step_scale <- nrow(x) works well with this
h <- 40e3 # number households
k <- 30 # number of characteristics
s <- 50 # number of states
#.. get household-state weights randomly IF not poisson ----
set.seed(1234)
hsweights <- matrix(runif(n=h * s, min=4, max=8000), nrow=h, ncol=s)
#.. end random ----
# alternative way to get x for large or small problem ----
xbar <- seq(10, 100, length.out=k)
xsd <- xbar / 10
set.seed(1234)
x <- matrix(rnorm(n=h * k, mean=xbar, sd=xsd), nrow=h, ncol=k)
# end alternative way to get x ----
# look at x and its correlations ----
x
cor(x)
# get total household weights and state weights ----
hsweights
hweights <- rowSums(hsweights) # sum of weights for each individual
hweights
sweights <- colSums(hsweights) # sum of weights for each state
sweights
# done with data setup ----
# get problem ready to solve ----
xsave <- x # save initial x values in case we scale them
# scale and go ----
# need to get a rule of thumb for this
# xscale <- 1
(xscale <- sum(xsave) / 1000)
x <- xsave / xscale
targets <- t(hsweights) %*% x
targets
# now search for the beta coefficients and deltas that are consistent with these data and that meet the targets
# we know the data and we know the aggregate weights, but not the state weights -- e prefix will mean estimated
# define xprime-x and its inverse before entering the loop as it will not change within the loop
xpx <- t(x) %*% x
# inverse of matrix that has been multiplied by a non-zero scalar equals inverse of the scalar multiplied by inverse of the matrix,
# so solve xpx once at the start
(invxpx <- solve(xpx)) # this needs to be invertible
# to get the inverse of the jacobian we will multiply by the inverse of the scalar
# define initial values before entering loop ----
ibeta <- matrix(0, nrow=s, ncol=k) # tpc uses 0 as beta starting point
idelta <- get_delta(hweights, ibeta, x) # tpc uses initial delta based on initial beta
ihsweights <- get_weights(ibeta, idelta, x)
ihsweights
# look at distance from targets using initial values
idist <- targets - t(ihsweights) %*% x
idist
isse <- sum(idist^2)
isse
# before start, set values to use when entering loop for first time ----
ebeta <- ibeta
edelta <- idelta
# note that step_scale has a MAJOR impact on iterations
(step_scale <- nrow(x))
a <- proc.time()
for(i in 1:500){
ehsweights <- get_weights(ebeta, edelta, x) # estimated weights for each household for each state
ehweights <- rowSums(ehsweights) # estimated total weights for each individual
esweights <- colSums(ehsweights) # estimated total weights for each state
etargets <- t(ehsweights) %*% x
dist <- targets - etargets
sse <- sum(dist^2) # sum of squared errors
sseu <- sse * (xscale ^2) # what the unscaled sse would be
if(sseu < 1e-6 | (sse < 1e-10 & sseu < 1e-2)) {
print(sprintf("DONE at iteration %i: scaled sse: %.5e, unscaled sse: %.5e", i, sse, sseu))
break
}
if(i<=10 | i %% 5 ==0) {
print(sprintf("iteration %i: scaled sse: %.5e, unscaled sse: %.5e", i, sse, sseu))
}
# get the step
# step <- matrix(nrow=s, ncol=k)
# for(i in 1:s) step[i, ] <- t((1 /esweights[i]) * invxpx %*% dist[i, ]) * step_scale
step <- (1 / esweights) * dist %*% invxpx * step_scale
ebeta <- ebeta + step
edelta <- get_delta(ehweights, ebeta, x)
}
b <- proc.time()
b - a
sse; sseu
dist
quantile(dist)
# calculate final weights
fbeta <- ebeta
fdelta <- edelta
fhsweights <- get_weights(fbeta, fdelta, x)
(fhweights <- rowSums(fhsweights))
(fsweights <- colSums(fhsweights))
fbeta %>% round(3)
beta %>% round(3)
ibeta %>% round(3)
fdelta %>% round(3)
delta %>% round(3)
idelta %>% round(3)
fhsweights %>% round(3)
hsweights %>% round(3)
ihsweights %>% round(3)
(fhsweights - hsweights) %>% round(6)
fhsweights / hsweights
fhsweights / ihsweights
quantile((fhsweights - hsweights) / hsweights * 100 - 100)
fhweights
hweights
(fhweights / hweights * 100 - 100) %>% round(3)
fsweights
sweights
(fsweights / sweights * 100 - 100) %>% round(3)
t1 <- targets * xscale
t2 <- (t(fhsweights) %*% x) * xscale
t1 %>% round(3)
t2 %>% round(3)
(t2 - t1) %>% round(3)
(t2 / t1 * 100 - 100) %>% round(4)
# regression of initial household weights hweights on x variables
mod <- glm(round(hweights) ~ x, family="poisson")
summary(mod)
summary(glm(round(hsweights[, 5]) ~ x, family="poisson")) # 4, 5 are poisson
# poisson regressions with final estimated weights
mod2 <- glm(round(fhweights) ~ x, family="poisson")
summary(mod2)
# now pick a state and estimate that model
fhweights_s <- fhsweights[, 3]
mod3 <- glm(round(fhweights_s) ~ x, family="poisson")
summary(mod3)
|
/misc/tpc.r
|
no_license
|
donboyd5/50_state_taxdata
|
R
| false
| false
| 6,560
|
r
|
# This program is based largely on the methodology in:
# Khitatrakun, Surachai, Gordon B T Mermin, and Norton Francis. “Incorporating State Analysis into the
# Tax Policy Center’s Microsimulation Model: Documentation and Methodology.” Working Paper, March 2016.
# https://www.taxpolicycenter.org/sites/default/files/alfresco/publication-pdfs/2000697-Incorporating-State-Analysis-into-the-TPCs-Microsimulation-Model.pdf.
source(here::here("include", "libraries.r"))
library(mvtnorm)
devtools::session_info()
(.packages()) %>% sort
# define needed functions ----
get_delta <- function(w, beta, x){
beta_x <- exp(beta %*% t(x))
log(w / colSums(beta_x))
}
get_weights <- function(beta, delta, x){
# get all weights
beta_x <- beta %*% t(x)
# add delta to every row of beta_x and transpose
beta_xd <- apply(beta_x, 1 , function(m) m + delta)
exp(beta_xd)
}
# create the poisson problem ----
# xscale <- 10 and step_scale <- 10 works well with this
h <- 10 # number households
k <- 3 # number characteristics
s <- 5 # number states
# for the poisson version we need beta and delta
# beta is an s x k matrix of coefficients
beta <- matrix(c(1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12,
13, 14, 15), ncol=3, byrow = TRUE) / 100
beta
# delta is an h-length vector of individual constants in the formula for state weights
delta <- 1:h / 100
# create multivariate normal x for the 3-characteristics case ----
# x is an h x k matrix of characteristics
sigma <- matrix(c(1, .8, .6,
.8, 1, .4,
.6, .4, 1), ncol=3, byrow = TRUE)
sigma
set.seed(1234)
x <- rmvnorm(n=h, mean=c(10, 20, 30), sigma)
# get household-state weights if poisson model ----
hsweights <- get_weights(beta, delta, x) # poisson model
# end poisson ----
# Alternatively, create a large problem ----
# xscale <- 100 and step_scale <- nrow(x) works well with this
h <- 40e3 # number households
k <- 30 # number of characteristics
s <- 50 # number of states
#.. get household-state weights randomly IF not poisson ----
set.seed(1234)
hsweights <- matrix(runif(n=h * s, min=4, max=8000), nrow=h, ncol=s)
#.. end random ----
# alternative way to get x for large or small problem ----
xbar <- seq(10, 100, length.out=k)
xsd <- xbar / 10
set.seed(1234)
x <- matrix(rnorm(n=h * k, mean=xbar, sd=xsd), nrow=h, ncol=k)
# end alternative way to get x ----
# look at x and its correlations ----
x
cor(x)
# get total household weights and state weights ----
hsweights
hweights <- rowSums(hsweights) # sum of weights for each individual
hweights
sweights <- colSums(hsweights) # sum of weights for each state
sweights
# done with data setup ----
# get problem ready to solve ----
xsave <- x # save initial x values in case we scale them
# scale and go ----
# need to get a rule of thumb for this
# xscale <- 1
(xscale <- sum(xsave) / 1000)
x <- xsave / xscale
targets <- t(hsweights) %*% x
targets
# now search for the beta coefficients and deltas that are consistent with these data and that meet the targets
# we know the data and we know the aggregate weights, but not the state weights -- e prefix will mean estimated
# define xprime-x and its inverse before entering the loop as it will not change within the loop
xpx <- t(x) %*% x
# inverse of matrix that has been multiplied by a non-zero scalar equals inverse of the scalar multiplied by inverse of the matrix,
# so solve xpx once at the start
(invxpx <- solve(xpx)) # this needs to be invertible
# to get the inverse of the jacobian we will multiply by the inverse of the scalar
# define initial values before entering loop ----
ibeta <- matrix(0, nrow=s, ncol=k) # tpc uses 0 as beta starting point
idelta <- get_delta(hweights, ibeta, x) # tpc uses initial delta based on initial beta
ihsweights <- get_weights(ibeta, idelta, x)
ihsweights
# look at distance from targets using initial values
idist <- targets - t(ihsweights) %*% x
idist
isse <- sum(idist^2)
isse
# before start, set values to use when entering loop for first time ----
ebeta <- ibeta
edelta <- idelta
# note that step_scale has a MAJOR impact on iterations
(step_scale <- nrow(x))
a <- proc.time()
for(i in 1:500){
ehsweights <- get_weights(ebeta, edelta, x) # estimated weights for each household for each state
ehweights <- rowSums(ehsweights) # estimated total weights for each individual
esweights <- colSums(ehsweights) # estimated total weights for each state
etargets <- t(ehsweights) %*% x
dist <- targets - etargets
sse <- sum(dist^2) # sum of squared errors
sseu <- sse * (xscale ^2) # what the unscaled sse would be
if(sseu < 1e-6 | (sse < 1e-10 & sseu < 1e-2)) {
print(sprintf("DONE at iteration %i: scaled sse: %.5e, unscaled sse: %.5e", i, sse, sseu))
break
}
if(i<=10 | i %% 5 ==0) {
print(sprintf("iteration %i: scaled sse: %.5e, unscaled sse: %.5e", i, sse, sseu))
}
# get the step
# step <- matrix(nrow=s, ncol=k)
# for(i in 1:s) step[i, ] <- t((1 /esweights[i]) * invxpx %*% dist[i, ]) * step_scale
step <- (1 / esweights) * dist %*% invxpx * step_scale
ebeta <- ebeta + step
edelta <- get_delta(ehweights, ebeta, x)
}
b <- proc.time()
b - a
sse; sseu
dist
quantile(dist)
# calculate final weights
fbeta <- ebeta
fdelta <- edelta
fhsweights <- get_weights(fbeta, fdelta, x)
(fhweights <- rowSums(fhsweights))
(fsweights <- colSums(fhsweights))
fbeta %>% round(3)
beta %>% round(3)
ibeta %>% round(3)
fdelta %>% round(3)
delta %>% round(3)
idelta %>% round(3)
fhsweights %>% round(3)
hsweights %>% round(3)
ihsweights %>% round(3)
(fhsweights - hsweights) %>% round(6)
fhsweights / hsweights
fhsweights / ihsweights
quantile((fhsweights - hsweights) / hsweights * 100 - 100)
fhweights
hweights
(fhweights / hweights * 100 - 100) %>% round(3)
fsweights
sweights
(fsweights / sweights * 100 - 100) %>% round(3)
t1 <- targets * xscale
t2 <- (t(fhsweights) %*% x) * xscale
t1 %>% round(3)
t2 %>% round(3)
(t2 - t1) %>% round(3)
(t2 / t1 * 100 - 100) %>% round(4)
# regression of initial household weights hweights on x variables
mod <- glm(round(hweights) ~ x, family="poisson")
summary(mod)
summary(glm(round(hsweights[, 5]) ~ x, family="poisson")) # 4, 5 are poisson
# poisson regressions with final estimated weights
mod2 <- glm(round(fhweights) ~ x, family="poisson")
summary(mod2)
# now pick a state and estimate that model
fhweights_s <- fhsweights[, 3]
mod3 <- glm(round(fhweights_s) ~ x, family="poisson")
summary(mod3)
|
#define TARGET_API_MAC_CARBON 1
|
/MacPrefix.h
|
no_license
|
fruitsamples/MovieGWorlds
|
R
| false
| false
| 31
|
h
|
#define TARGET_API_MAC_CARBON 1
|
#plot 2 - Global Active Power (kilowatts)
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
ppriyajs/ExData_Plotting1
|
R
| false
| false
| 557
|
r
|
#plot 2 - Global Active Power (kilowatts)
dataFile <- "./household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
# This script produces the heatmaps used to show sharing of expression
# outlier status across tissues
#
# Jonah Einson
# jeinson@nygenome.org
library(ggplot2)
library(ggdendro)
library(tidyverse)
library(magrittr)
library(cowplot)
library(RColorBrewer)
# Attach color information
gtex_key <- read_tsv("processed_input_data/gtex_colors.txt")
attach(gtex_key)
names(tissue_abbrv) <- tissue_id
color_hex <- str_c("#", color_hex)
names(color_hex) <- tissue_abbrv
# Load the results files
# _OA are the tables that have Only Available cross tissue gene comparisons
input_path = "processed_input_data/figureS21/"
ASE_sharing_OA <- read.csv(paste0(input_path, "figS21_ASE_sharing_heatmaap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
TE_sharing_OA <- read.csv(paste0(input_path, "figS21_TE_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
AS_sharing_OA <- read.csv(paste0(input_path, "figS21_AS_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
# _NA files include NAs as a non-shared outlier status
ASE_sharing_NA <- read.csv(paste0(input_path, "figS21_ASE_NA_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
TE_sharing_NA <- read.csv(paste0(input_path, "figS21_TE_NA_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
AS_sharing_NA <- read.csv(paste0(input_path, "figS21_AS_NA_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
# Remove rows and columns with missing data
remove_missing <- function(table){
to.keep <- names(which(rowSums(is.na(table)) != nrow(table)))
table[to.keep, to.keep]
}
ASE_sharing_OA %<>% remove_missing
TE_sharing_OA %<>% remove_missing
AS_sharing_OA %<>% remove_missing
ASE_sharing_NA %<>% remove_missing
TE_sharing_NA %<>% remove_missing
AS_sharing_NA %<>% remove_missing
# theme information
gtex_v8_figure_theme <- function() {
return(theme(plot.title = element_text(face="plain",size=8), text = element_text(size=8),axis.text=element_text(size=7), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"), legend.text = element_text(size=7), legend.title = element_text(size=8)))
}
hmcol <- colorRampPalette(c("white","#d62222"))(100)
# Make the clustering order based on the ASE_sharing_OA
dat <- ASE_sharing_OA
dat[dat == 1] <- NA
dat[dat > .2] <- .2
distances_ASE_OA <- dist(dat, method = "canberra")
clustering_ASE_OA <- hclust(distances_ASE_OA)
row_order_ASE_OA <- colnames(dat)[clustering_ASE_OA$order]
auto_heatmap <- function(dat, exclude_legend = F, extract_legend = F){
dat[dat == 1] <- NA
dat[dat > .2] <- .2
distances <- distances_ASE_OA
clustering <- clustering_ASE_OA
# Order the rows and columsn according to the hierarchical clustering
row_order <- row_order_ASE_OA
dat_df <- dat %>%
rownames_to_column("Test") %>%
gather("Discovery", "val", -"Test")
dat_df$Test <- factor(dat_df$Test, levels = rev(row_order), ordered = T)
dat_df$Discovery <- factor(dat_df$Discovery, levels = row_order, ordered = T)
axis_colors <- color_hex[row_order]
heatmap_plt <-
ggplot(dat_df, aes(y = Test, x = Discovery, fill = val)) +
geom_tile(color = "grey") +
scale_fill_gradient("Sharing \nfraction",
low = "white", high = "#d62222",
na.value = "black", limits = c(0, .201)) +
scale_x_discrete(labels = rep("•", ncol(dat))) +
scale_y_discrete(labels = rep("•", nrow(dat))) +
gtex_v8_figure_theme() +
theme(axis.text.x = element_text(color = axis_colors, size = 15, vjust = 1),
axis.text.y = element_text(color = rev(axis_colors), size = 15),
plot.margin = unit(c(.75 ,.25, .25 ,.25), "cm"))
if(exclude_legend){
heatmap_plt <- heatmap_plt + theme(legend.position = "none")
}
if(extract_legend){
return(cowplot::get_legend(heatmap_plt))
}
# Combine together
return(heatmap_plt)
}
############## Plot the Main Heatmaps #######################
# The only available heatmaps
top_margin <- unit(c(.5, .1, .1, .1), units = "cm")
OA_heatmaps <-
plot_grid(
auto_heatmap(TE_sharing_OA, exclude_legend = T) + theme(plot.margin = top_margin) + xlab(""),
auto_heatmap(ASE_sharing_OA, exclude_legend = T) + theme(plot.margin = top_margin) + xlab("") + ylab(""),
auto_heatmap(AS_sharing_OA, exclude_legend = T) + theme(plot.margin = top_margin) + xlab("") + ylab(""),
nrow = 1, labels = c("eOutliers", "aseOutliers", "sOutliers"),
label_x = .5, label_size = 8, hjust = c(0, 0, .25), vjust = 1.5
)
# The include NA heamaps
bottom_margin <- unit(c(.1, .1, .1, .1), units = "cm")
NA_heatmaps <-
plot_grid(
auto_heatmap(TE_sharing_NA, exclude_legend = T) + theme(plot.margin = bottom_margin)+ xlab(""),
auto_heatmap(ASE_sharing_NA, exclude_legend = T) + theme(plot.margin = bottom_margin) + ylab(""),
auto_heatmap(AS_sharing_NA, exclude_legend = T) + theme(plot.margin = bottom_margin) + ylab("") + xlab(""),
nrow = 1
)
# Top half of the plot #
top_half <- plot_grid(OA_heatmaps, NA_heatmaps, nrow = 2, align = "v", labels = "A")
legend <- auto_heatmap(ASE_sharing_OA, extract_legend = T)
# Add a quick label plot
plot_guide <-
ggplot() +
annotate("text", x = 0, y = 1, angle = 90, label = "Yes") +
annotate("text", x = 0, y = -1, angle = 90, label = "No") +
annotate("text", x = -1, y = 0, angle = 90, size = 4, label = "Limit to co-available genes") +
ylim(-2, 2) + xlim(-2, 1) +
theme_void()
# plot_guide
top_half <- plot_grid(plot_guide, top_half, legend, rel_widths = c(.05, 1, .1), nrow = 1)
############### Panel B: NA Decrease Plot ######################
# How much does the percentage decrease by counting NA genes as non-shared?
mean_perc <- function(x){
tmp <- x
tmp <- tmp[tmp < 1]
median(tmp, na.rm = T)
}
median_boot_ci <- function(x, R = 999){ # A bootstrap function that actually works!
x <- x[x < 1]
x <- x[!is.na(x)]
low <- floor(R * .05)
high <- ceiling(R * .95)
out <- rep(0, R)
for(i in 1:length(out)){
out[i] <- median(sample(x, length(x), replace = T))
}
out <- sort(out)
out[c(low, high)]
}
NA_decrease_tbl <- tibble(
perc = c(
mean_perc(TE_sharing_OA),
mean_perc(TE_sharing_NA),
mean_perc(ASE_sharing_OA),
mean_perc(ASE_sharing_NA),
mean_perc(AS_sharing_OA),
mean_perc(AS_sharing_NA)
),
method = factor(c("eOutliers", "eOutliers", "aseOutliers", "aseOutliers", "sOutliers", "sOutliers"), levels = c("eOutliers", "aseOutliers", "sOutliers")),
incl = factor(c("No", "Yes", "No", "Yes", "No", "Yes"), levels = c("No", "Yes"), ordered = T)
)
# This information from this part will go in the main text of the paper!
NA_decrease_tbl %>% filter(incl == "No")
intervals <- do.call(
"rbind",
list(
median_boot_ci(TE_sharing_OA),
median_boot_ci(TE_sharing_NA),
median_boot_ci(ASE_sharing_OA),
median_boot_ci(ASE_sharing_NA),
median_boot_ci(AS_sharing_OA),
median_boot_ci(AS_sharing_NA))
)
intervals <- as.data.frame(intervals)
colnames(intervals) <- c("lower", "upper")
NA_decrease_tbl %<>% cbind(intervals)
NA_decrease_plt <-
ggplot(NA_decrease_tbl, aes(method, perc, fill = incl)) +
geom_bar(stat = "identity", position = position_dodge()) +
# scale_x_discrete(breaks = c("ASE", "AS", "TE"),
# labels = c("ASE", "Splicing", "Expression")
# )+
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2,
position = position_dodge(.9)) +
theme(plot.margin = unit(c(0,0,0,.5), units = "cm")) +
scale_fill_manual(values = c("#d8b365", "#5ab4ac")) +
ylab("Median Percent Sharing \n Across All Tissues") +
gtex_v8_figure_theme()
#NA_decrease_plt
######################## Panel C: Condensed Heatmap ###################
# Make an aggregated sharing percentage plot using BH corected ANEVA-DOT p values
# Make a barplot with median across columns on top and median across rows on bottom
ASE_sharing_BH <- read.csv(paste0(input_path, "figS21c_ASE_BH_input_data.txt"), sep = "\t", row.names = 1)
tmp <- ASE_sharing_BH
tmp[tmp == 1] <- NA
to_remove = which(colSums(is.na(tmp)) == nrow(tmp))
tmp = tmp[-to_remove, -to_remove]
vec_to_df <- function(vector){
df_colnames <- names(vector)
tibble(names = df_colnames, values = vector)
}
# discovery tissue
# How much, on average, do outliers in X replicate in another tissue?
discovery_median <- vec_to_df(apply(tmp, 2, median, na.rm = T))
discovery_range <- apply(tmp, 2, range, na.rm = T)
discovery_median$lower <- discovery_range[1,]
discovery_median$upper <- discovery_range[2,]
discovery_median %<>% arrange(desc(values))
discovery_median$names <- factor(discovery_median$names, levels = discovery_median$names)
fill_cols <- color_hex[levels(discovery_median$names)]
discovery_plt <-
ggplot(discovery_median, aes(x = names, y = values)) +
geom_bar(stat = "identity", fill = fill_cols) +
scale_fill_manual(color_hex[levels(discovery_median$names)]) +
geom_errorbar(aes(ymax = upper, ymin = lower), width = 0.1) +
ylab("Median % replication \n of outliers in \n test tissue") +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.margin = unit(c(.5, .5, 0. ,.5), units = "cm")) +
gtex_v8_figure_theme()
# Test tissue
# (the row medians for the ASE heatmap from panel A)
test_median <- vec_to_df(apply(tmp, 1, median, na.rm = T))
test_range <- apply(tmp, 1, range, na.rm = T)
test_median$lower <- test_range[1,]
test_median$upper <- test_range[2,]
test_median$names <- factor(test_median$names, levels = discovery_median$names)
fill_cols <- color_hex[levels(test_median$names)]
names(tissue_site_detail) <- tissue_abbrv
test_plt <-
ggplot(test_median, aes(x = names, y = values)) +
geom_bar(stat = "identity", fill = fill_cols) +
scale_fill_manual(color_hex[levels(test_median$names)]) +
geom_errorbar(aes(ymax = upper, ymin = lower), width = 0.1) +
theme(axis.title.x = element_blank(),
axis.text.x = element_text(angle = 270, hjust = 0),
plot.margin = unit(c(0,.5,.5,.5), units = "cm")) +
scale_x_discrete(labels = tissue_site_detail[levels(test_median$names)]) +
#scale_x_discrete(labels = levels(test_median$names)) +
ylab("Median % replication \n of outlier status \n from discovery tissues") +
scale_y_reverse() +
gtex_v8_figure_theme()
#test_plt
#### Generate the Color Key (Panel D) ####
gtex_key_plt <-
gtex_key %>%
mutate(tiss_order = 1:nrow(.)) %>%
mutate(tissue_site_detail = factor(tissue_site_detail, levels = tissue_site_detail))
gtex_key_plt$color_hex <- color_hex
color_key <-
ggplot(gtex_key_plt, aes(0, tiss_order)) +
geom_point(size = 2.5, color = color_hex) +
geom_text(label = tissue_site_detail, aes(x = .02, hjust = 0), size = 2.5) +
scale_y_reverse() +
xlim(-.025, .35) +
gtex_v8_figure_theme() +
theme_void()
#color_key
#### Bottom Half ####
condensed_heatmap <- plot_grid(discovery_plt, test_plt, nrow = 2, rel_heights = c(1.3,2), align = "v", axis = "l")
bottom_left <- plot_grid(NA_decrease_plt, condensed_heatmap, nrow = 2, align = "none", rel_heights = c(1, 3), labels = c("B", "C"))
bottom_half <- plot_grid(bottom_left, color_key, nrow = 1, rel_widths = c(1, .4), labels = c(NA, "D"))
#### Assemble the whole thing!
whole_fig <- plot_grid(top_half, bottom_half, nrow = 2, rel_heights = c(1, 1.35))
png("generated_figures/Fig_S21.png", width = 8.5, height = 11, units = "in", res = 300)
whole_fig
dev.off()
|
/plot_figureS21.R
|
no_license
|
BennyStrobes/gtex_v8_rare_variant_figure_generation
|
R
| false
| false
| 11,695
|
r
|
# This script produces the heatmaps used to show sharing of expression
# outlier status across tissues
#
# Jonah Einson
# jeinson@nygenome.org
library(ggplot2)
library(ggdendro)
library(tidyverse)
library(magrittr)
library(cowplot)
library(RColorBrewer)
# Attach color information
gtex_key <- read_tsv("processed_input_data/gtex_colors.txt")
attach(gtex_key)
names(tissue_abbrv) <- tissue_id
color_hex <- str_c("#", color_hex)
names(color_hex) <- tissue_abbrv
# Load the results files
# _OA are the tables that have Only Available cross tissue gene comparisons
input_path = "processed_input_data/figureS21/"
ASE_sharing_OA <- read.csv(paste0(input_path, "figS21_ASE_sharing_heatmaap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
TE_sharing_OA <- read.csv(paste0(input_path, "figS21_TE_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
AS_sharing_OA <- read.csv(paste0(input_path, "figS21_AS_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
# _NA files include NAs as a non-shared outlier status
ASE_sharing_NA <- read.csv(paste0(input_path, "figS21_ASE_NA_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
TE_sharing_NA <- read.csv(paste0(input_path, "figS21_TE_NA_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
AS_sharing_NA <- read.csv(paste0(input_path, "figS21_AS_NA_sharing_heatmap_input_data.txt"), row.names = 1, check.names = F, sep = "\t")
# Remove rows and columns with missing data
remove_missing <- function(table){
to.keep <- names(which(rowSums(is.na(table)) != nrow(table)))
table[to.keep, to.keep]
}
ASE_sharing_OA %<>% remove_missing
TE_sharing_OA %<>% remove_missing
AS_sharing_OA %<>% remove_missing
ASE_sharing_NA %<>% remove_missing
TE_sharing_NA %<>% remove_missing
AS_sharing_NA %<>% remove_missing
# theme information
gtex_v8_figure_theme <- function() {
return(theme(plot.title = element_text(face="plain",size=8), text = element_text(size=8),axis.text=element_text(size=7), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"), legend.text = element_text(size=7), legend.title = element_text(size=8)))
}
hmcol <- colorRampPalette(c("white","#d62222"))(100)
# Make the clustering order based on the ASE_sharing_OA
dat <- ASE_sharing_OA
dat[dat == 1] <- NA
dat[dat > .2] <- .2
distances_ASE_OA <- dist(dat, method = "canberra")
clustering_ASE_OA <- hclust(distances_ASE_OA)
row_order_ASE_OA <- colnames(dat)[clustering_ASE_OA$order]
auto_heatmap <- function(dat, exclude_legend = F, extract_legend = F){
dat[dat == 1] <- NA
dat[dat > .2] <- .2
distances <- distances_ASE_OA
clustering <- clustering_ASE_OA
# Order the rows and columsn according to the hierarchical clustering
row_order <- row_order_ASE_OA
dat_df <- dat %>%
rownames_to_column("Test") %>%
gather("Discovery", "val", -"Test")
dat_df$Test <- factor(dat_df$Test, levels = rev(row_order), ordered = T)
dat_df$Discovery <- factor(dat_df$Discovery, levels = row_order, ordered = T)
axis_colors <- color_hex[row_order]
heatmap_plt <-
ggplot(dat_df, aes(y = Test, x = Discovery, fill = val)) +
geom_tile(color = "grey") +
scale_fill_gradient("Sharing \nfraction",
low = "white", high = "#d62222",
na.value = "black", limits = c(0, .201)) +
scale_x_discrete(labels = rep("•", ncol(dat))) +
scale_y_discrete(labels = rep("•", nrow(dat))) +
gtex_v8_figure_theme() +
theme(axis.text.x = element_text(color = axis_colors, size = 15, vjust = 1),
axis.text.y = element_text(color = rev(axis_colors), size = 15),
plot.margin = unit(c(.75 ,.25, .25 ,.25), "cm"))
if(exclude_legend){
heatmap_plt <- heatmap_plt + theme(legend.position = "none")
}
if(extract_legend){
return(cowplot::get_legend(heatmap_plt))
}
# Combine together
return(heatmap_plt)
}
############## Plot the Main Heatmaps #######################
# The only available heatmaps
top_margin <- unit(c(.5, .1, .1, .1), units = "cm")
OA_heatmaps <-
plot_grid(
auto_heatmap(TE_sharing_OA, exclude_legend = T) + theme(plot.margin = top_margin) + xlab(""),
auto_heatmap(ASE_sharing_OA, exclude_legend = T) + theme(plot.margin = top_margin) + xlab("") + ylab(""),
auto_heatmap(AS_sharing_OA, exclude_legend = T) + theme(plot.margin = top_margin) + xlab("") + ylab(""),
nrow = 1, labels = c("eOutliers", "aseOutliers", "sOutliers"),
label_x = .5, label_size = 8, hjust = c(0, 0, .25), vjust = 1.5
)
# The include NA heamaps
bottom_margin <- unit(c(.1, .1, .1, .1), units = "cm")
NA_heatmaps <-
plot_grid(
auto_heatmap(TE_sharing_NA, exclude_legend = T) + theme(plot.margin = bottom_margin)+ xlab(""),
auto_heatmap(ASE_sharing_NA, exclude_legend = T) + theme(plot.margin = bottom_margin) + ylab(""),
auto_heatmap(AS_sharing_NA, exclude_legend = T) + theme(plot.margin = bottom_margin) + ylab("") + xlab(""),
nrow = 1
)
# Top half of the plot #
top_half <- plot_grid(OA_heatmaps, NA_heatmaps, nrow = 2, align = "v", labels = "A")
legend <- auto_heatmap(ASE_sharing_OA, extract_legend = T)
# Add a quick label plot
plot_guide <-
ggplot() +
annotate("text", x = 0, y = 1, angle = 90, label = "Yes") +
annotate("text", x = 0, y = -1, angle = 90, label = "No") +
annotate("text", x = -1, y = 0, angle = 90, size = 4, label = "Limit to co-available genes") +
ylim(-2, 2) + xlim(-2, 1) +
theme_void()
# plot_guide
top_half <- plot_grid(plot_guide, top_half, legend, rel_widths = c(.05, 1, .1), nrow = 1)
############### Panel B: NA Decrease Plot ######################
# How much does the percentage decrease by counting NA genes as non-shared?
mean_perc <- function(x){
tmp <- x
tmp <- tmp[tmp < 1]
median(tmp, na.rm = T)
}
median_boot_ci <- function(x, R = 999){ # A bootstrap function that actually works!
x <- x[x < 1]
x <- x[!is.na(x)]
low <- floor(R * .05)
high <- ceiling(R * .95)
out <- rep(0, R)
for(i in 1:length(out)){
out[i] <- median(sample(x, length(x), replace = T))
}
out <- sort(out)
out[c(low, high)]
}
NA_decrease_tbl <- tibble(
perc = c(
mean_perc(TE_sharing_OA),
mean_perc(TE_sharing_NA),
mean_perc(ASE_sharing_OA),
mean_perc(ASE_sharing_NA),
mean_perc(AS_sharing_OA),
mean_perc(AS_sharing_NA)
),
method = factor(c("eOutliers", "eOutliers", "aseOutliers", "aseOutliers", "sOutliers", "sOutliers"), levels = c("eOutliers", "aseOutliers", "sOutliers")),
incl = factor(c("No", "Yes", "No", "Yes", "No", "Yes"), levels = c("No", "Yes"), ordered = T)
)
# This information from this part will go in the main text of the paper!
NA_decrease_tbl %>% filter(incl == "No")
intervals <- do.call(
"rbind",
list(
median_boot_ci(TE_sharing_OA),
median_boot_ci(TE_sharing_NA),
median_boot_ci(ASE_sharing_OA),
median_boot_ci(ASE_sharing_NA),
median_boot_ci(AS_sharing_OA),
median_boot_ci(AS_sharing_NA))
)
intervals <- as.data.frame(intervals)
colnames(intervals) <- c("lower", "upper")
NA_decrease_tbl %<>% cbind(intervals)
NA_decrease_plt <-
ggplot(NA_decrease_tbl, aes(method, perc, fill = incl)) +
geom_bar(stat = "identity", position = position_dodge()) +
# scale_x_discrete(breaks = c("ASE", "AS", "TE"),
# labels = c("ASE", "Splicing", "Expression")
# )+
geom_errorbar(aes(ymin = lower, ymax = upper), width = .2,
position = position_dodge(.9)) +
theme(plot.margin = unit(c(0,0,0,.5), units = "cm")) +
scale_fill_manual(values = c("#d8b365", "#5ab4ac")) +
ylab("Median Percent Sharing \n Across All Tissues") +
gtex_v8_figure_theme()
#NA_decrease_plt
######################## Panel C: Condensed Heatmap ###################
# Make an aggregated sharing percentage plot using BH corected ANEVA-DOT p values
# Make a barplot with median across columns on top and median across rows on bottom
ASE_sharing_BH <- read.csv(paste0(input_path, "figS21c_ASE_BH_input_data.txt"), sep = "\t", row.names = 1)
tmp <- ASE_sharing_BH
tmp[tmp == 1] <- NA
to_remove = which(colSums(is.na(tmp)) == nrow(tmp))
tmp = tmp[-to_remove, -to_remove]
vec_to_df <- function(vector){
df_colnames <- names(vector)
tibble(names = df_colnames, values = vector)
}
# discovery tissue
# How much, on average, do outliers in X replicate in another tissue?
discovery_median <- vec_to_df(apply(tmp, 2, median, na.rm = T))
discovery_range <- apply(tmp, 2, range, na.rm = T)
discovery_median$lower <- discovery_range[1,]
discovery_median$upper <- discovery_range[2,]
discovery_median %<>% arrange(desc(values))
discovery_median$names <- factor(discovery_median$names, levels = discovery_median$names)
fill_cols <- color_hex[levels(discovery_median$names)]
discovery_plt <-
ggplot(discovery_median, aes(x = names, y = values)) +
geom_bar(stat = "identity", fill = fill_cols) +
scale_fill_manual(color_hex[levels(discovery_median$names)]) +
geom_errorbar(aes(ymax = upper, ymin = lower), width = 0.1) +
ylab("Median % replication \n of outliers in \n test tissue") +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
plot.margin = unit(c(.5, .5, 0. ,.5), units = "cm")) +
gtex_v8_figure_theme()
# Test tissue
# (the row medians for the ASE heatmap from panel A)
test_median <- vec_to_df(apply(tmp, 1, median, na.rm = T))
test_range <- apply(tmp, 1, range, na.rm = T)
test_median$lower <- test_range[1,]
test_median$upper <- test_range[2,]
test_median$names <- factor(test_median$names, levels = discovery_median$names)
fill_cols <- color_hex[levels(test_median$names)]
names(tissue_site_detail) <- tissue_abbrv
test_plt <-
ggplot(test_median, aes(x = names, y = values)) +
geom_bar(stat = "identity", fill = fill_cols) +
scale_fill_manual(color_hex[levels(test_median$names)]) +
geom_errorbar(aes(ymax = upper, ymin = lower), width = 0.1) +
theme(axis.title.x = element_blank(),
axis.text.x = element_text(angle = 270, hjust = 0),
plot.margin = unit(c(0,.5,.5,.5), units = "cm")) +
scale_x_discrete(labels = tissue_site_detail[levels(test_median$names)]) +
#scale_x_discrete(labels = levels(test_median$names)) +
ylab("Median % replication \n of outlier status \n from discovery tissues") +
scale_y_reverse() +
gtex_v8_figure_theme()
#test_plt
#### Generate the Color Key (Panel D) ####
gtex_key_plt <-
gtex_key %>%
mutate(tiss_order = 1:nrow(.)) %>%
mutate(tissue_site_detail = factor(tissue_site_detail, levels = tissue_site_detail))
gtex_key_plt$color_hex <- color_hex
color_key <-
ggplot(gtex_key_plt, aes(0, tiss_order)) +
geom_point(size = 2.5, color = color_hex) +
geom_text(label = tissue_site_detail, aes(x = .02, hjust = 0), size = 2.5) +
scale_y_reverse() +
xlim(-.025, .35) +
gtex_v8_figure_theme() +
theme_void()
#color_key
#### Bottom Half ####
condensed_heatmap <- plot_grid(discovery_plt, test_plt, nrow = 2, rel_heights = c(1.3,2), align = "v", axis = "l")
bottom_left <- plot_grid(NA_decrease_plt, condensed_heatmap, nrow = 2, align = "none", rel_heights = c(1, 3), labels = c("B", "C"))
bottom_half <- plot_grid(bottom_left, color_key, nrow = 1, rel_widths = c(1, .4), labels = c(NA, "D"))
#### Assemble the whole thing!
whole_fig <- plot_grid(top_half, bottom_half, nrow = 2, rel_heights = c(1, 1.35))
png("generated_figures/Fig_S21.png", width = 8.5, height = 11, units = "in", res = 300)
whole_fig
dev.off()
|
x=as.POSIXct("2015-12-25 11:35:34")
y=as.POSIXct("2015-12-25 11:35:34")
x
y
unclass(x)
unclass(y)
x=as.Date("2015-12-25")
class(x)
unclass(x)
install.packages("lubridate")
library(lubridate)
mytimepoint<-ymd_hm("1993-12-09 12:12",tz="Europe/Prague")
mytimepoint
mytimepoint2<-ymd_hm("1995-02-14 12:12",tz="Europe/Prague")
minute(mytimepoint)
month(mytimepoint)
day(mytimepoint)
year(mytimepoint)
olson_time_zones()
myinterval=interval(mytimepoint,mytimepoint2)
class(myinterval)
myinterval
minutes(7)
minutes(2.5)
dminutes(2.5)
leap_year(1999:2004)
#[1] FALSE TRUE FALSE FALSE FALSE TRUE
ymd(20061010)+dyears(2)
#[1] "2008-10-09"
x=ymd_hm(tz="CET","2016-12-09 12:13")
y=ymd_hm(tz="CET","2017-05-07 17:04")
minute(x)=14
x
with_tz(x,tz="Europe/London")
x
y-x
x<-rnorm(1000)
install.packages("tseries")
library(tseries)
adf.test(x)
# Augmented Dickey-Fuller Test
#
# # data: x
# Dickey-Fuller = -9.6946, Lag order = 9, p-value = 0.01
# alternative hypothesis: stationary
?adf.test
plot(nottem)
?decompose
plot(decompose(nottem))
adf.test(nottem)
# Augmented Dickey-Fuller Test
#
# data: nottem
# Dickey-Fuller = -12.998, Lag order = 6, p-value = 0.01
# alternative hypothesis: stationary
y=diffinv(x)
?diffinv
s<-1:10
d<-diff(s)
d
diffinv(s)
plot(y)
?lynx
length(lynx)
plot(lynx )
head(lynx)
#[1] 269 321 585 871 1475 2821
head(lynx[-1])
# [1] 321 585 871 1475 2821 3928
head(lynx[-114])
# [1] 269 321 585 871 1475 2821
library(lmtest)
dwtest(lynx[-114]~lynx[-1])
# Durbin-Watson test
#
# data: lynx[-114] ~ lynx[-1]
# DW = 1.1296, p-value = 1.148e-06
# alternative hypothesis: true autocorrelation is greater than 0
length(nottem)
dwtest(nottem[-240]~nottem[-1])
# Durbin-Watson test
#
# data: nottem[-240] ~ nottem[-1]
# DW = 1.0093, p-value = 5.097e-15
# alternative hypothesis: true autocorrelation is greater than 0
x=rnorm(700)
dwtest(x[-700]~x[-1])
# Durbin-Watson test
#
# data: x[-700] ~ x[-1]
# DW = 1.9991, p-value = 0.4958
# alternative hypothesis: true autocorrelation is greater than 0
mydata=runif(n=50,min=10,max=45)
mytimeseries=ts(data=mydata,start=1959,frequency = 4)
class(mytimeseries)
plot(mytimeseries)
head(EuStockMarkets)
class(EuStockMarkets)
# [1] "mts" "ts" "matrix"
x=cumsum(rnorm(n=450))
y=ts(x,start=c(1949,11),frequency = 12)
plot(y)
library(lattice)
xyplot.ts(y)
mynottem=decompose(nottem,"additive")
nottemadjusted=nottem-mynottem$seasonal
plot(nottemadjusted)
install.packages("forecast")
library(forecast)
|
/timeSeries0.R
|
no_license
|
seansio1995/R-tutorials
|
R
| false
| false
| 2,511
|
r
|
x=as.POSIXct("2015-12-25 11:35:34")
y=as.POSIXct("2015-12-25 11:35:34")
x
y
unclass(x)
unclass(y)
x=as.Date("2015-12-25")
class(x)
unclass(x)
install.packages("lubridate")
library(lubridate)
mytimepoint<-ymd_hm("1993-12-09 12:12",tz="Europe/Prague")
mytimepoint
mytimepoint2<-ymd_hm("1995-02-14 12:12",tz="Europe/Prague")
minute(mytimepoint)
month(mytimepoint)
day(mytimepoint)
year(mytimepoint)
olson_time_zones()
myinterval=interval(mytimepoint,mytimepoint2)
class(myinterval)
myinterval
minutes(7)
minutes(2.5)
dminutes(2.5)
leap_year(1999:2004)
#[1] FALSE TRUE FALSE FALSE FALSE TRUE
ymd(20061010)+dyears(2)
#[1] "2008-10-09"
x=ymd_hm(tz="CET","2016-12-09 12:13")
y=ymd_hm(tz="CET","2017-05-07 17:04")
minute(x)=14
x
with_tz(x,tz="Europe/London")
x
y-x
x<-rnorm(1000)
install.packages("tseries")
library(tseries)
adf.test(x)
# Augmented Dickey-Fuller Test
#
# # data: x
# Dickey-Fuller = -9.6946, Lag order = 9, p-value = 0.01
# alternative hypothesis: stationary
?adf.test
plot(nottem)
?decompose
plot(decompose(nottem))
adf.test(nottem)
# Augmented Dickey-Fuller Test
#
# data: nottem
# Dickey-Fuller = -12.998, Lag order = 6, p-value = 0.01
# alternative hypothesis: stationary
y=diffinv(x)
?diffinv
s<-1:10
d<-diff(s)
d
diffinv(s)
plot(y)
?lynx
length(lynx)
plot(lynx )
head(lynx)
#[1] 269 321 585 871 1475 2821
head(lynx[-1])
# [1] 321 585 871 1475 2821 3928
head(lynx[-114])
# [1] 269 321 585 871 1475 2821
library(lmtest)
dwtest(lynx[-114]~lynx[-1])
# Durbin-Watson test
#
# data: lynx[-114] ~ lynx[-1]
# DW = 1.1296, p-value = 1.148e-06
# alternative hypothesis: true autocorrelation is greater than 0
length(nottem)
dwtest(nottem[-240]~nottem[-1])
# Durbin-Watson test
#
# data: nottem[-240] ~ nottem[-1]
# DW = 1.0093, p-value = 5.097e-15
# alternative hypothesis: true autocorrelation is greater than 0
x=rnorm(700)
dwtest(x[-700]~x[-1])
# Durbin-Watson test
#
# data: x[-700] ~ x[-1]
# DW = 1.9991, p-value = 0.4958
# alternative hypothesis: true autocorrelation is greater than 0
mydata=runif(n=50,min=10,max=45)
mytimeseries=ts(data=mydata,start=1959,frequency = 4)
class(mytimeseries)
plot(mytimeseries)
head(EuStockMarkets)
class(EuStockMarkets)
# [1] "mts" "ts" "matrix"
x=cumsum(rnorm(n=450))
y=ts(x,start=c(1949,11),frequency = 12)
plot(y)
library(lattice)
xyplot.ts(y)
mynottem=decompose(nottem,"additive")
nottemadjusted=nottem-mynottem$seasonal
plot(nottemadjusted)
install.packages("forecast")
library(forecast)
|
#---------------------------------------------------------------------
# 1
#---------------------------------------------------------------------
# 1.a
1:20
# 1.b
20:1
# 1.c
c(1:20, 19:1)
# 1.d
tmp <- c(4, 6, 3)
# 1.e
rep(c(4, 6, 3), 10)
# 1.f
rep(c(4, 6, 3), length = 31)
# 1.g
rep(c(4, 6, 3), times = c(10, 20, 30))
#---------------------------------------------------------------------
# 2
#---------------------------------------------------------------------
x <- seq(from = 3, to = 6, by =0.1)
exp(x) * cos(x)
#---------------------------------------------------------------------
# 3
#---------------------------------------------------------------------
# 3.a
x <- seq(3, 36, by = 3)
y <- seq(1, 34, by = 3)
0.1 ^ x * 0.2 ^ y
# 3.b
2 ^ (1:25) / (1:25)
#---------------------------------------------------------------------
# 4
#---------------------------------------------------------------------
#4.a
i <- 10:100
sum((i ^ 3) + (4 * i ^ 2))
#4.b
i <- 1:25
sum(((2 ^ i) / i) + ((3 ^ i) / (i ^ 2)))
#---------------------------------------------------------------------
# 5
#---------------------------------------------------------------------
#5.a
x <- 1:30
paste("label", x, sep = " ")
#5.b
x <- 1:30
paste("fn", x, sep = "")
#---------------------------------------------------------------------
# 6
#---------------------------------------------------------------------
set.seed(50)
xVec <- sample(0:999, 250, replace = T)
yVec <- sample(0:999, 250, replace = T)
#6.a
i <- 2:length(xVec)
yVec[i] - xVec[i - 1]
#6.b
|
/R-Exercises/R-Exercises/Ex1.R
|
no_license
|
KrishnaGMohan/Projects-R
|
R
| false
| false
| 1,551
|
r
|
#---------------------------------------------------------------------
# 1
#---------------------------------------------------------------------
# 1.a
1:20
# 1.b
20:1
# 1.c
c(1:20, 19:1)
# 1.d
tmp <- c(4, 6, 3)
# 1.e
rep(c(4, 6, 3), 10)
# 1.f
rep(c(4, 6, 3), length = 31)
# 1.g
rep(c(4, 6, 3), times = c(10, 20, 30))
#---------------------------------------------------------------------
# 2
#---------------------------------------------------------------------
x <- seq(from = 3, to = 6, by =0.1)
exp(x) * cos(x)
#---------------------------------------------------------------------
# 3
#---------------------------------------------------------------------
# 3.a
x <- seq(3, 36, by = 3)
y <- seq(1, 34, by = 3)
0.1 ^ x * 0.2 ^ y
# 3.b
2 ^ (1:25) / (1:25)
#---------------------------------------------------------------------
# 4
#---------------------------------------------------------------------
#4.a
i <- 10:100
sum((i ^ 3) + (4 * i ^ 2))
#4.b
i <- 1:25
sum(((2 ^ i) / i) + ((3 ^ i) / (i ^ 2)))
#---------------------------------------------------------------------
# 5
#---------------------------------------------------------------------
#5.a
x <- 1:30
paste("label", x, sep = " ")
#5.b
x <- 1:30
paste("fn", x, sep = "")
#---------------------------------------------------------------------
# 6
#---------------------------------------------------------------------
set.seed(50)
xVec <- sample(0:999, 250, replace = T)
yVec <- sample(0:999, 250, replace = T)
#6.a
i <- 2:length(xVec)
yVec[i] - xVec[i - 1]
#6.b
|
# ________________________________________________________________________________
# Run script ----
# ________________________________________________________________________________
# Packages, paths and functions
source("./R/Set_up.R")
# ________________________________________________________________________________
# preprocessing raw data ----
# 03_preprocess_new_freshecol_taxa.R creates an .rds file which is
# then used in the harmonization scripts
# ________________________________________________________________________________
# combine raw freshecol exports
# and change trait names
source("./R/Preprocessing_raw/01_preprocess_new_freshecol_2020.R")
# taxa cleaning and formation
source("./R/Preprocessing_raw/02_preprocess_new_freshecol_taxa.R")
# retrieving taxonomic information where missing and manual correction of some
# taxonomic entries
source("./R/Preprocessing_raw/03_preprocess_new_freshecol_taxa.R")
# ________________________________________________________________________________
# Harmonization scripts ----
# an intermediate .rds file is created at the end of each script
# ________________________________________________________________________________
|
/R/Run.R
|
no_license
|
KunzstLD/Invertebrate_traits
|
R
| false
| false
| 1,197
|
r
|
# ________________________________________________________________________________
# Run script ----
# ________________________________________________________________________________
# Packages, paths and functions
source("./R/Set_up.R")
# ________________________________________________________________________________
# preprocessing raw data ----
# 03_preprocess_new_freshecol_taxa.R creates an .rds file which is
# then used in the harmonization scripts
# ________________________________________________________________________________
# combine raw freshecol exports
# and change trait names
source("./R/Preprocessing_raw/01_preprocess_new_freshecol_2020.R")
# taxa cleaning and formation
source("./R/Preprocessing_raw/02_preprocess_new_freshecol_taxa.R")
# retrieving taxonomic information where missing and manual correction of some
# taxonomic entries
source("./R/Preprocessing_raw/03_preprocess_new_freshecol_taxa.R")
# ________________________________________________________________________________
# Harmonization scripts ----
# an intermediate .rds file is created at the end of each script
# ________________________________________________________________________________
|
\docType{data}
\name{DropOutRates}
\alias{DropOutRates}
\title{The dropout probabilities}
\format{One probability for a given age and socio economic status per row}
\usage{
data(DropOutRates)
}
\description{
The data was obtained from the 'child outcomes data.xlsx'
file in the worksheet 'Schooling Data'. Cells B5:B16 and
H5:H16 were used. Note that this was calculated in that
sheet from other source data. See A20:B41
}
\details{
Column names were chosen to match the parameter names used
in the modgen model.
}
\keyword{datasets}
|
/man/DropOutRates.Rd
|
no_license
|
philliplab/mochModelData
|
R
| false
| false
| 535
|
rd
|
\docType{data}
\name{DropOutRates}
\alias{DropOutRates}
\title{The dropout probabilities}
\format{One probability for a given age and socio economic status per row}
\usage{
data(DropOutRates)
}
\description{
The data was obtained from the 'child outcomes data.xlsx'
file in the worksheet 'Schooling Data'. Cells B5:B16 and
H5:H16 were used. Note that this was calculated in that
sheet from other source data. See A20:B41
}
\details{
Column names were chosen to match the parameter names used
in the modgen model.
}
\keyword{datasets}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(bslib)
# Define UI for application that draws a histogram
ui <- fluidPage(
theme = bslib::bs_theme(bootswatch = "flatly"),
# Application title
titlePanel("Survival"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("chance_operation",
"Proportion Survive Operation:",
min = 0,
max = 1,
value = 0.9),
sliderInput("chance_icu",
"Proportion Survive ICU:",
min = 0,
max = 1,
value = 0.899),
sliderInput("chance_hospital",
"Proportion Survive Hospital:",
min = 0,
max = 1,
value = 0.884),
sliderInput("chance_home",
"Proportion Get Home:",
min = 0,
max = 1,
value = 0.9),
sliderInput("chance_one",
"Proportion Alive One Year:",
min = 0,
max = 1,
value = 0.870),
sliderInput("chance_four",
"Proportion Alive Four Years:",
min = 0,
max = 1,
value = 0.726)
),
# Show a plot of the generated distribution
mainPanel(
h2("Chance Of Survival"),
plotOutput("distPlot"),
em(a("Source Code Found Here", href="https://github.com/callumgwtaylor/surgical_critical_ethics/tree/main/survival"))
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
library(tidyverse)
library(ggalluvial)
patients <- 1:10000
patients_shuffled <- sample(patients)
patients <- tibble(
patient_id = patients_shuffled
)
survived <- function(df_patient, survival_chance = 0.9){
list_patient_id <- df_patient$patient_id
length_patient <- length(list_patient_id)
list_of_patients <- sample(list_patient_id)
patient_survive <- list_of_patients[1:round(survival_chance * length_patient)]
patient_survive <- tibble(
patient_id = patient_survive,
survived = TRUE
)
if(survival_chance < 1){
patient_died <- list_of_patients[round(survival_chance * length_patient + 1):length_patient]
patient_died <- tibble(
patient_id = patient_died,
survived = FALSE
)
patients <- bind_rows(patient_survive, patient_died)
survived_results <- left_join(df_patient, patients)
rm(df_patient, patient_survive, patient_died, patients)
} else{
survived_results <- left_join(df_patient, patient_survive)
rm(df_patient, patient_survive)
}
rm(list_of_patients, list_patient_id, length_patient)
survived_results
}
selective_survived <- function(df_patient, filter_column, new_name, survival_chance = 0.9){
df_patient_true <- df_patient %>%
filter({{filter_column}} == TRUE)
list_patient_id <- df_patient_true$patient_id
length_patient <- length(list_patient_id)
list_of_patients <- sample(list_patient_id)
patient_survive <- list_of_patients[1:round(survival_chance * length_patient)]
patient_survive <- tibble(
patient_id = patient_survive,
survived = TRUE
)
if(survival_chance < 1){
patient_died <- list_of_patients[round(survival_chance * length_patient + 1):length_patient]
patient_died <- tibble(
patient_id = patient_died,
survived = FALSE
)
patients <- bind_rows(patient_survive, patient_died)
survived_results <- left_join(df_patient, patients) %>%
mutate(survived = replace_na(survived, FALSE)) %>%
rename({{new_name}} := survived)
} else {
survived_results <- left_join(df_patient, patient_survive) %>%
mutate(survived = replace_na(survived, FALSE)) %>%
rename({{new_name}} := survived)
}
survived_results
}
patients_percentage <- patients %>%
mutate(operated = TRUE) %>%
selective_survived(filter_column = operated,
new_name = survived_operation,
survival_chance = input$chance_operation) %>%
selective_survived(filter_column = survived_operation,
new_name = survived_icu,
survival_chance = input$chance_icu) %>%
selective_survived(filter_column = survived_icu,
new_name = survived_hospital,
survival_chance = as.numeric(input$chance_hospital)) %>%
selective_survived(filter_column = survived_hospital,
new_name = got_home,
survival_chance = as.numeric(input$chance_home)) %>%
selective_survived(filter_column = got_home,
new_name = one_year,
survival_chance = as.numeric(input$chance_one)) %>%
selective_survived(filter_column = one_year,
new_name = four_year,
survival_chance = as.numeric(input$chance_four)) %>%
selective_survived(filter_column = four_year,
new_name = eventually,
survival_chance = 0) %>%
filter(eventually == FALSE)
rm(patients, patients_shuffled)
patients_percentage <- patients_percentage %>%
pivot_longer(cols = c(operated, survived_operation, survived_icu, survived_hospital, got_home, one_year, four_year, eventually),
names_to = "event",
values_to = "survived") %>%
mutate(event = factor(event,
levels = c(
"operated",
"survived_operation",
"survived_icu",
"survived_hospital",
"got_home",
"one_year",
"four_year",
"eventually"
))) %>%
mutate(survived = factor(survived,
levels = c(
FALSE,
TRUE))) %>%
select(-patient_id) %>%
group_by(event, survived) %>%
summarise(count = n()) %>%
mutate(percentage = (count/10000)*100) %>%
mutate(percentage = round(percentage * (as.numeric(survived)-1), digits = 1))
patients_percentage[patients_percentage == 0.00] <- NA
patients_percentage <- patients_percentage %>%
mutate(truefalse = (((as.numeric(survived)-1)*-1)+1)*10000) %>%
mutate(percentage = stringr::str_c(percentage, "%", sep = ""))
survived_position <- patients_percentage %>%
filter(survived == TRUE)
survived_position <- min(survived_position$count)/2
died_position <- patients_percentage %>%
filter(survived == FALSE)
died_position <- 10000 - min(died_position$count)/2
positioning_labels <- tibble(
position = c(survived_position,died_position),
survived = c(TRUE, FALSE)
) %>%
mutate(survived = factor(survived,
levels = c(FALSE,
TRUE)))
patients_percentage <- left_join(patients_percentage, positioning_labels)
rm(positioning_labels, died_position, survived_position)
cols <- c("TRUE" = "#FC766A",
"FALSE" = "#5B84B1FF")
patients_percentage <- patients_percentage %>%
mutate(event = recode_factor(event,
operated = "Operation",
survived_operation = "Surviving\nOperation",
survived_icu = "Surviving\nICU",
survived_hospital = "Surviving\nHospital",
got_home = "Get\nHome",
one_year = "Surviving\nOne\nYear",
four_year = "Surviving\nFour\nYears",
eventually = "Chance\nOf\nDying\nEventually"))
plot <- ggplot(patients_percentage) +
geom_bar(aes(fill = survived,
x = event,
y = count),
position = "stack",
stat = "identity") +
geom_text(aes(x = event,
y = position,
label = count)) +
geom_text(aes(x = event,
y = position,
label = percentage),
nudge_y = -1000) +
theme_minimal() +
scale_fill_manual(values = cols) +
theme(legend.position = "bottom") +
labs(x = "")
rm(patients_percentage)
plot
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/survival/app.R
|
no_license
|
callumgwtaylor/surgical_critical_ethics
|
R
| false
| false
| 10,835
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(bslib)
# Define UI for application that draws a histogram
ui <- fluidPage(
theme = bslib::bs_theme(bootswatch = "flatly"),
# Application title
titlePanel("Survival"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("chance_operation",
"Proportion Survive Operation:",
min = 0,
max = 1,
value = 0.9),
sliderInput("chance_icu",
"Proportion Survive ICU:",
min = 0,
max = 1,
value = 0.899),
sliderInput("chance_hospital",
"Proportion Survive Hospital:",
min = 0,
max = 1,
value = 0.884),
sliderInput("chance_home",
"Proportion Get Home:",
min = 0,
max = 1,
value = 0.9),
sliderInput("chance_one",
"Proportion Alive One Year:",
min = 0,
max = 1,
value = 0.870),
sliderInput("chance_four",
"Proportion Alive Four Years:",
min = 0,
max = 1,
value = 0.726)
),
# Show a plot of the generated distribution
mainPanel(
h2("Chance Of Survival"),
plotOutput("distPlot"),
em(a("Source Code Found Here", href="https://github.com/callumgwtaylor/surgical_critical_ethics/tree/main/survival"))
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
library(tidyverse)
library(ggalluvial)
patients <- 1:10000
patients_shuffled <- sample(patients)
patients <- tibble(
patient_id = patients_shuffled
)
survived <- function(df_patient, survival_chance = 0.9){
list_patient_id <- df_patient$patient_id
length_patient <- length(list_patient_id)
list_of_patients <- sample(list_patient_id)
patient_survive <- list_of_patients[1:round(survival_chance * length_patient)]
patient_survive <- tibble(
patient_id = patient_survive,
survived = TRUE
)
if(survival_chance < 1){
patient_died <- list_of_patients[round(survival_chance * length_patient + 1):length_patient]
patient_died <- tibble(
patient_id = patient_died,
survived = FALSE
)
patients <- bind_rows(patient_survive, patient_died)
survived_results <- left_join(df_patient, patients)
rm(df_patient, patient_survive, patient_died, patients)
} else{
survived_results <- left_join(df_patient, patient_survive)
rm(df_patient, patient_survive)
}
rm(list_of_patients, list_patient_id, length_patient)
survived_results
}
selective_survived <- function(df_patient, filter_column, new_name, survival_chance = 0.9){
df_patient_true <- df_patient %>%
filter({{filter_column}} == TRUE)
list_patient_id <- df_patient_true$patient_id
length_patient <- length(list_patient_id)
list_of_patients <- sample(list_patient_id)
patient_survive <- list_of_patients[1:round(survival_chance * length_patient)]
patient_survive <- tibble(
patient_id = patient_survive,
survived = TRUE
)
if(survival_chance < 1){
patient_died <- list_of_patients[round(survival_chance * length_patient + 1):length_patient]
patient_died <- tibble(
patient_id = patient_died,
survived = FALSE
)
patients <- bind_rows(patient_survive, patient_died)
survived_results <- left_join(df_patient, patients) %>%
mutate(survived = replace_na(survived, FALSE)) %>%
rename({{new_name}} := survived)
} else {
survived_results <- left_join(df_patient, patient_survive) %>%
mutate(survived = replace_na(survived, FALSE)) %>%
rename({{new_name}} := survived)
}
survived_results
}
patients_percentage <- patients %>%
mutate(operated = TRUE) %>%
selective_survived(filter_column = operated,
new_name = survived_operation,
survival_chance = input$chance_operation) %>%
selective_survived(filter_column = survived_operation,
new_name = survived_icu,
survival_chance = input$chance_icu) %>%
selective_survived(filter_column = survived_icu,
new_name = survived_hospital,
survival_chance = as.numeric(input$chance_hospital)) %>%
selective_survived(filter_column = survived_hospital,
new_name = got_home,
survival_chance = as.numeric(input$chance_home)) %>%
selective_survived(filter_column = got_home,
new_name = one_year,
survival_chance = as.numeric(input$chance_one)) %>%
selective_survived(filter_column = one_year,
new_name = four_year,
survival_chance = as.numeric(input$chance_four)) %>%
selective_survived(filter_column = four_year,
new_name = eventually,
survival_chance = 0) %>%
filter(eventually == FALSE)
rm(patients, patients_shuffled)
patients_percentage <- patients_percentage %>%
pivot_longer(cols = c(operated, survived_operation, survived_icu, survived_hospital, got_home, one_year, four_year, eventually),
names_to = "event",
values_to = "survived") %>%
mutate(event = factor(event,
levels = c(
"operated",
"survived_operation",
"survived_icu",
"survived_hospital",
"got_home",
"one_year",
"four_year",
"eventually"
))) %>%
mutate(survived = factor(survived,
levels = c(
FALSE,
TRUE))) %>%
select(-patient_id) %>%
group_by(event, survived) %>%
summarise(count = n()) %>%
mutate(percentage = (count/10000)*100) %>%
mutate(percentage = round(percentage * (as.numeric(survived)-1), digits = 1))
patients_percentage[patients_percentage == 0.00] <- NA
patients_percentage <- patients_percentage %>%
mutate(truefalse = (((as.numeric(survived)-1)*-1)+1)*10000) %>%
mutate(percentage = stringr::str_c(percentage, "%", sep = ""))
survived_position <- patients_percentage %>%
filter(survived == TRUE)
survived_position <- min(survived_position$count)/2
died_position <- patients_percentage %>%
filter(survived == FALSE)
died_position <- 10000 - min(died_position$count)/2
positioning_labels <- tibble(
position = c(survived_position,died_position),
survived = c(TRUE, FALSE)
) %>%
mutate(survived = factor(survived,
levels = c(FALSE,
TRUE)))
patients_percentage <- left_join(patients_percentage, positioning_labels)
rm(positioning_labels, died_position, survived_position)
cols <- c("TRUE" = "#FC766A",
"FALSE" = "#5B84B1FF")
patients_percentage <- patients_percentage %>%
mutate(event = recode_factor(event,
operated = "Operation",
survived_operation = "Surviving\nOperation",
survived_icu = "Surviving\nICU",
survived_hospital = "Surviving\nHospital",
got_home = "Get\nHome",
one_year = "Surviving\nOne\nYear",
four_year = "Surviving\nFour\nYears",
eventually = "Chance\nOf\nDying\nEventually"))
plot <- ggplot(patients_percentage) +
geom_bar(aes(fill = survived,
x = event,
y = count),
position = "stack",
stat = "identity") +
geom_text(aes(x = event,
y = position,
label = count)) +
geom_text(aes(x = event,
y = position,
label = percentage),
nudge_y = -1000) +
theme_minimal() +
scale_fill_manual(values = cols) +
theme(legend.position = "bottom") +
labs(x = "")
rm(patients_percentage)
plot
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#' @title
#' San Francisco Themes
#'
#' @description
#' An Accessible Set of Themes and Color Palettes Optimized for Light/Dark
#' Appearances, and Different Screen Sizes
#'
#' @md
#' @details
#' sfthemes is a pair of [`ggplot2`](https://ggplot2.tidyverse.org) themes,
#' `theme_sf_light()`, `theme_sf_dark()`, and a collection of color scales based
#' on [Apple's Human Interface Guidelines](https://developer.apple.com/design/human-interface-guidelines/).
#' Themes are inspired by the occasional appearances of charts used by Apple within different
#' contexts, e.g., Apple Health app, Screen Time on iOS and macOS. And colors are
#' adapted from iOS, macOS, and watchOS system colors. Each color scale has a
#' light and dark variant as well that compliments the light/dark themes,
#' `scale_colour_ios_light()` and `scale_colour_ios_dark()`, respectively. In
#' addition, sfthemes uses [dynamic type sizing](https://developer.apple.com/design/human-interface-guidelines/ios/visual-design/typography/)
#' in order to provide a consistent and elegant overall resizing and re-scaling of
#' plot elements. Read more [here](articles/typography.html).
#'
#' When combined, sfthemes offers a unique opportunity to generate two version of
#' your plot with two *similar* yet *individually* optimized color palettes for
#' light and dark themes.
#'
#' @md
#' @name sfthemes
#' @docType package
#' @author Amir Masoud Abdol (i@amirmasoudabdol.name)
#' @keywords internal
#' @import ggplot2 scales extrafont grDevices grid
"_PACKAGE"
|
/R/sfthemes-package.R
|
no_license
|
thechrelsres/sfthemes
|
R
| false
| false
| 1,532
|
r
|
#' @title
#' San Francisco Themes
#'
#' @description
#' An Accessible Set of Themes and Color Palettes Optimized for Light/Dark
#' Appearances, and Different Screen Sizes
#'
#' @md
#' @details
#' sfthemes is a pair of [`ggplot2`](https://ggplot2.tidyverse.org) themes,
#' `theme_sf_light()`, `theme_sf_dark()`, and a collection of color scales based
#' on [Apple's Human Interface Guidelines](https://developer.apple.com/design/human-interface-guidelines/).
#' Themes are inspired by the occasional appearances of charts used by Apple within different
#' contexts, e.g., Apple Health app, Screen Time on iOS and macOS. And colors are
#' adapted from iOS, macOS, and watchOS system colors. Each color scale has a
#' light and dark variant as well that compliments the light/dark themes,
#' `scale_colour_ios_light()` and `scale_colour_ios_dark()`, respectively. In
#' addition, sfthemes uses [dynamic type sizing](https://developer.apple.com/design/human-interface-guidelines/ios/visual-design/typography/)
#' in order to provide a consistent and elegant overall resizing and re-scaling of
#' plot elements. Read more [here](articles/typography.html).
#'
#' When combined, sfthemes offers a unique opportunity to generate two version of
#' your plot with two *similar* yet *individually* optimized color palettes for
#' light and dark themes.
#'
#' @md
#' @name sfthemes
#' @docType package
#' @author Amir Masoud Abdol (i@amirmasoudabdol.name)
#' @keywords internal
#' @import ggplot2 scales extrafont grDevices grid
"_PACKAGE"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvnormal_semi_conjugate.R
\name{Mvnormal2Create}
\alias{Mvnormal2Create}
\title{Create a multivariate normal mixing distribution with semi conjugate prior}
\usage{
Mvnormal2Create(priorParameters)
}
\arguments{
\item{priorParameters}{The prior parameters for the Multivariate Normal.}
}
\description{
Create a multivariate normal mixing distribution with semi conjugate prior
}
|
/man/Mvnormal2Create.Rd
|
no_license
|
cran/dirichletprocess
|
R
| false
| true
| 456
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mvnormal_semi_conjugate.R
\name{Mvnormal2Create}
\alias{Mvnormal2Create}
\title{Create a multivariate normal mixing distribution with semi conjugate prior}
\usage{
Mvnormal2Create(priorParameters)
}
\arguments{
\item{priorParameters}{The prior parameters for the Multivariate Normal.}
}
\description{
Create a multivariate normal mixing distribution with semi conjugate prior
}
|
requireNamespace("dplyr")
#' Crosswalk two `sf` geoms, using area.
#'
#' @param x_geom `sf` polygons to crosswalk from.
#' @param y_geom `sf` polygons to crosswalk to.
#' @param x_id A vector of ids associated with `x_geom`. If `NULL`, ids are `1:N`.
#' @param y_id A vector of ids associated with `y_geom`. If `NULL`, ids are `1:N`.
#' @param allow_unmatched_weights How to handle cases where geoms don't perfectly overlap.
#' @param verbose Print informative meessages.
#' @param tol A tolerance used if `allow_unmatched_weights` is "error".
#'
#' @return A dataframe with columns `x.id`, `y.id`, `area`, `from_x_to_y`, `from_y_to_x`.
#' If x_id or y_id are NULL, the id columns are integers refering to indices of the geoms.
#' `area` is the area of the intersection. `from_x_to_y` is the area in
#' the intersection divided by the area of `x_geom` as a whole. It should be
#' used to proportionally allocate values from `x_geom` to `y_geom`s.
#'
#' `allow_unmatched_weights` handles unmatched geoms that are not 1:1. If "error",
#' then an error is raised whenever the total area for a given geom in the final crosswalk
#' divided by the original area of the geom is not within `tol` of 1.
#'
#' @examples
#' \dontrun{
#'
#' data("divs_2019")
#' data("divs_201911")
#' cw <- crosswalk_geoms_area(
#' divs_2019$geometry,
#' divs_201911$geometry,
#' x_id=divs_2019$warddiv,
#' y_id=divs_201911$warddiv
#' )
#' }
#' @export
crosswalk_geoms_area <- function(
x_geom,
y_geom,
x_id=NULL,
y_id=NULL,
allow_unmatched_weights=c("error", "allow"),
verbose=TRUE,
tol=1e-4
){
allow_unmatched_weights <- match.arg(allow_unmatched_weights)
if(is.null(x_id)) x_id <- seq_along(x_geom)
if(is.null(y_id)) y_id <- seq_along(y_geom)
res <- sf::st_intersection(
sf::st_sf(data.frame(geom=x_geom, id.x=x_id)),
sf::st_sf(data.frame(geom=y_geom, id.y=y_id))
)
res$area <- sf::st_area(res)
res <- res[as.numeric(res$area) > 0, ]
res <- as.data.frame(res) %>% select(-geometry)
if(allow_unmatched_weights == "error"){
validate_loss(x_geom, x_id, res, tol, id.x)
validate_loss(y_geom, y_id, res, tol, id.y)
}
res <- res %>%
group_by(id.x) %>%
mutate(from_x_to_y = as.numeric(area / sum(area))) %>%
group_by(id.y) %>%
mutate(from_y_to_x = as.numeric(area / sum(area)))
return(res)
}
validate_loss <- function(geom, id, res, tol, res_id){
res_id <- enquo(res_id)
orig_area <- data.frame(id=id, area=sf::st_area(geom))
new_area <- res %>% group_by(!!res_id) %>% summarise(area=sum(area))
lost_area <- left_join(orig_area, new_area, by=c(id=rlang::as_name(res_id))) %>%
mutate(diff = area.x - area.y)
invalid <- abs(lost_area$diff) / lost_area$area.x > units::as_units(tol, "1")
invalid <- invalid | is.na(invalid)
if(any(invalid)){
stop(
sprintf(
"Area was lost. geoms %s changed more than tolerance.",
paste(which(invalid), collapse=", ")
)
)
}
}
|
/R/crosswalk_area.R
|
permissive
|
jtannen/jaywalkr
|
R
| false
| false
| 2,981
|
r
|
requireNamespace("dplyr")
#' Crosswalk two `sf` geoms, using area.
#'
#' @param x_geom `sf` polygons to crosswalk from.
#' @param y_geom `sf` polygons to crosswalk to.
#' @param x_id A vector of ids associated with `x_geom`. If `NULL`, ids are `1:N`.
#' @param y_id A vector of ids associated with `y_geom`. If `NULL`, ids are `1:N`.
#' @param allow_unmatched_weights How to handle cases where geoms don't perfectly overlap.
#' @param verbose Print informative meessages.
#' @param tol A tolerance used if `allow_unmatched_weights` is "error".
#'
#' @return A dataframe with columns `x.id`, `y.id`, `area`, `from_x_to_y`, `from_y_to_x`.
#' If x_id or y_id are NULL, the id columns are integers refering to indices of the geoms.
#' `area` is the area of the intersection. `from_x_to_y` is the area in
#' the intersection divided by the area of `x_geom` as a whole. It should be
#' used to proportionally allocate values from `x_geom` to `y_geom`s.
#'
#' `allow_unmatched_weights` handles unmatched geoms that are not 1:1. If "error",
#' then an error is raised whenever the total area for a given geom in the final crosswalk
#' divided by the original area of the geom is not within `tol` of 1.
#'
#' @examples
#' \dontrun{
#'
#' data("divs_2019")
#' data("divs_201911")
#' cw <- crosswalk_geoms_area(
#' divs_2019$geometry,
#' divs_201911$geometry,
#' x_id=divs_2019$warddiv,
#' y_id=divs_201911$warddiv
#' )
#' }
#' @export
crosswalk_geoms_area <- function(
x_geom,
y_geom,
x_id=NULL,
y_id=NULL,
allow_unmatched_weights=c("error", "allow"),
verbose=TRUE,
tol=1e-4
){
allow_unmatched_weights <- match.arg(allow_unmatched_weights)
if(is.null(x_id)) x_id <- seq_along(x_geom)
if(is.null(y_id)) y_id <- seq_along(y_geom)
res <- sf::st_intersection(
sf::st_sf(data.frame(geom=x_geom, id.x=x_id)),
sf::st_sf(data.frame(geom=y_geom, id.y=y_id))
)
res$area <- sf::st_area(res)
res <- res[as.numeric(res$area) > 0, ]
res <- as.data.frame(res) %>% select(-geometry)
if(allow_unmatched_weights == "error"){
validate_loss(x_geom, x_id, res, tol, id.x)
validate_loss(y_geom, y_id, res, tol, id.y)
}
res <- res %>%
group_by(id.x) %>%
mutate(from_x_to_y = as.numeric(area / sum(area))) %>%
group_by(id.y) %>%
mutate(from_y_to_x = as.numeric(area / sum(area)))
return(res)
}
validate_loss <- function(geom, id, res, tol, res_id){
res_id <- enquo(res_id)
orig_area <- data.frame(id=id, area=sf::st_area(geom))
new_area <- res %>% group_by(!!res_id) %>% summarise(area=sum(area))
lost_area <- left_join(orig_area, new_area, by=c(id=rlang::as_name(res_id))) %>%
mutate(diff = area.x - area.y)
invalid <- abs(lost_area$diff) / lost_area$area.x > units::as_units(tol, "1")
invalid <- invalid | is.na(invalid)
if(any(invalid)){
stop(
sprintf(
"Area was lost. geoms %s changed more than tolerance.",
paste(which(invalid), collapse=", ")
)
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample.prop.test.R
\name{sample.prop.test}
\alias{sample.prop.test}
\title{Sample size for comparison of proportion}
\usage{
sample.prop.test(p1, p2, d = NA, sig.level = 0.05, power = 0.8, oneside = F)
}
\arguments{
\item{d}{The expected difference in proportion. If not entered d will be calculated as (p2-p1)}
\item{oneside}{Enter T if the test is onesided.}
\item{p1&p2}{estimate of proportion of first and second group respectively}
\item{sig.level&power}{Significance level and power of the test respectively.}
}
\value{
Returns the minimum sample size requierd per group
}
\description{
Sample size for comparison of proportion
}
\author{
Nithin Nayak
}
|
/MSTAT/man/sample.prop.test.Rd
|
no_license
|
nayaknithin/simplecodes
|
R
| false
| true
| 741
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample.prop.test.R
\name{sample.prop.test}
\alias{sample.prop.test}
\title{Sample size for comparison of proportion}
\usage{
sample.prop.test(p1, p2, d = NA, sig.level = 0.05, power = 0.8, oneside = F)
}
\arguments{
\item{d}{The expected difference in proportion. If not entered d will be calculated as (p2-p1)}
\item{oneside}{Enter T if the test is onesided.}
\item{p1&p2}{estimate of proportion of first and second group respectively}
\item{sig.level&power}{Significance level and power of the test respectively.}
}
\value{
Returns the minimum sample size requierd per group
}
\description{
Sample size for comparison of proportion
}
\author{
Nithin Nayak
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{welfare}
\alias{welfare}
\title{Issue Framing and Support for Welfare Reform}
\format{A data frame with 213 rows and 15 columns: \describe{
\item{gender1}{Gender. 0: Female; 1: Male.}
\item{educ1}{Level of education.
1: the municipal primary and lower secondary school before ninth form;
2: the municipal primary and lower secondary school after ninth or tenth form;
3: Basic schooling; 4: Vocational education;
5: Higher preparatory examination course student
6: Upper secondary school student; 7: Higher commercial examination student
8: Higher technical examination student; 9: Short-term further education;
10: Medium-term further education; 11: Long-term further education;
12: Foreign education; 13: Else.}
\item{polint1}{Political interest, measured on a 0-4 scale.}
\item{ideo1}{Ideological self-placement on a 1-8 scale. A larger value denotes a more
right-wing position.}
\item{know1}{Political knowledge. 1: low; 2: medium; 3: high.}
\item{value1}{Extremity of political values. 0: moderate. 1: extreme.}
\item{ttt}{Treatment assignment. Whether the respondent read a newspaper article that highlighted
the positive effect of welfare reform on job creation (1) versus one emphasizing its negative
effect on the poor (0).}
\item{W1}{The degree to which the respondent attributes welfare recipiency to internal factors,
measured on a 0-1 scale.}
\item{W2}{The degree to which the respondent attributes welfare recipiency to external factors,
measured on a 0-1 scale.}
\item{M1}{How important the respondent thinks that there should always be an incentive for
people to take a job instead of receiving welfare benefits, measured on a 0-1 scale.}
\item{M2}{How important the respondent thinks that nobody should live in poverty, measured on
a 0-1 scale.}
\item{M3}{How important the respondent thinks that government expenditures on welfare
benefits should not be too expensive, measured on a 0-1 scale.}
\item{M4}{How important the respondent thinks that no defrauder should receive welfare benefits,
measured on a 0-1 scale.}
\item{M5}{How important the respondent thinks that the unemployed should have benefit rates
making it possible to maintain a decent standard of living conditions, measured on a 0-1 scale.}
\item{Y}{Support for the proposed welfare reform, measured on a seven-point scale.}
}}
\usage{
welfare
}
\description{
A dataset of 213 Danish students containing variables on gender, education, political interest,
ideology, political knowledge, extremity of political values, treatment assignment (job/poor frame),
beliefs about why some people receive welfare benefits, perceived importance of different
considerations related to welfare policy, and support for a proposed welfare reform (Slothuus 2008; Imai and
Yamamoto 2013).
}
\references{
Slothuus, Rune. 2008. "More than Weighting Cognitive Importance: A Dual-process Model of Issue
Framing Effects." Political Psychology 29(1):1-28.
Imai, Kosuke and Teppei Yamamoto. 2013. "Identification and Sensitivity Analysis for Multiple
Causal Mechanisms: Revisiting Evidence from Framing Experiments." Political Analysis 21(2):141-171.
}
\keyword{datasets}
|
/man/welfare.Rd
|
no_license
|
xiangzhou09/paths
|
R
| false
| true
| 3,354
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{welfare}
\alias{welfare}
\title{Issue Framing and Support for Welfare Reform}
\format{A data frame with 213 rows and 15 columns: \describe{
\item{gender1}{Gender. 0: Female; 1: Male.}
\item{educ1}{Level of education.
1: the municipal primary and lower secondary school before ninth form;
2: the municipal primary and lower secondary school after ninth or tenth form;
3: Basic schooling; 4: Vocational education;
5: Higher preparatory examination course student
6: Upper secondary school student; 7: Higher commercial examination student
8: Higher technical examination student; 9: Short-term further education;
10: Medium-term further education; 11: Long-term further education;
12: Foreign education; 13: Else.}
\item{polint1}{Political interest, measured on a 0-4 scale.}
\item{ideo1}{Ideological self-placement on a 1-8 scale. A larger value denotes a more
right-wing position.}
\item{know1}{Political knowledge. 1: low; 2: medium; 3: high.}
\item{value1}{Extremity of political values. 0: moderate. 1: extreme.}
\item{ttt}{Treatment assignment. Whether the respondent read a newspaper article that highlighted
the positive effect of welfare reform on job creation (1) versus one emphasizing its negative
effect on the poor (0).}
\item{W1}{The degree to which the respondent attributes welfare recipiency to internal factors,
measured on a 0-1 scale.}
\item{W2}{The degree to which the respondent attributes welfare recipiency to external factors,
measured on a 0-1 scale.}
\item{M1}{How important the respondent thinks that there should always be an incentive for
people to take a job instead of receiving welfare benefits, measured on a 0-1 scale.}
\item{M2}{How important the respondent thinks that nobody should live in poverty, measured on
a 0-1 scale.}
\item{M3}{How important the respondent thinks that government expenditures on welfare
benefits should not be too expensive, measured on a 0-1 scale.}
\item{M4}{How important the respondent thinks that no defrauder should receive welfare benefits,
measured on a 0-1 scale.}
\item{M5}{How important the respondent thinks that the unemployed should have benefit rates
making it possible to maintain a decent standard of living conditions, measured on a 0-1 scale.}
\item{Y}{Support for the proposed welfare reform, measured on a seven-point scale.}
}}
\usage{
welfare
}
\description{
A dataset of 213 Danish students containing variables on gender, education, political interest,
ideology, political knowledge, extremity of political values, treatment assignment (job/poor frame),
beliefs about why some people receive welfare benefits, perceived importance of different
considerations related to welfare policy, and support for a proposed welfare reform (Slothuus 2008; Imai and
Yamamoto 2013).
}
\references{
Slothuus, Rune. 2008. "More than Weighting Cognitive Importance: A Dual-process Model of Issue
Framing Effects." Political Psychology 29(1):1-28.
Imai, Kosuke and Teppei Yamamoto. 2013. "Identification and Sensitivity Analysis for Multiple
Causal Mechanisms: Revisiting Evidence from Framing Experiments." Political Analysis 21(2):141-171.
}
\keyword{datasets}
|
#__________________________
#Download required packages
#__________________________
if(!require(tidyverse))
install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret))
install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table))
install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(e1071))
install.packages("e1071", repos = "http://cran.us.r-project.org")
if(!require(randomForest))
install.packages("randomForest", repos = "http://cran.us.r-project.org")
#_____________________________________________________________
# Part 1: Downloading data, preprocessing and Data exploration
#_____________________________________________________________
# Downloading and Reading data
if(!file.exists("adult.data")){
download.file("http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data","adult.data")
}
income_data <- read.csv("adult.data",header = FALSE)
# Set column names
colnames(income_data) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# Explore data
# To get the dimension of data
dim(income_data)
# To check the structure of data
str(income_data)
# To get summary of data
summary(income_data)
# To explore the levels in each column
sapply(income_data, levels)
# Convert " ?" data to NA and the remove rows with NA
income_data <- read.csv("adult.data",na.strings = c(" ?"),header = FALSE)
income_data <- na.omit(income_data)
# Set column names again
colnames(income_data) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# To get the dimension of new data
dim(income_data)
# Create Train and validation set with validation set having 10% data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(income_data$income,p = 0.1,
times = 1,list = FALSE)
trainset <- income_data[-test_index, ]
validation <- income_data[test_index,]
# To get dimension of train and validation data
dim(trainset)
dim(validation)
#___________________________________________
# Part 2: Visualization
#___________________________________________
# 1. age
# Summary of age
summary(trainset$age)
# Group age
trainset <- trainset %>%
mutate(
age_group =case_when(
age > 10 & age <= 20 ~ "17-20",
age > 20 & age <= 30 ~ "21-30",
age > 30 & age <= 40 ~ "31-40",
age > 40 & age <= 50 ~ "41-50",
age > 50 & age <= 60 ~ "51-60",
age > 60 & age <= 70 ~ "61-70",
age > 70 & age <= 80 ~ "71-80",
age > 80 & age <= 90 ~ "81-90"
))
# This plot displays age distribution
trainset %>% ggplot(aes(age)) +
geom_histogram(binwidth = 5, col = "black", fill = "grey") +
scale_x_continuous(breaks = seq(0, 95, 5)) +
scale_y_continuous(breaks = seq(0, 5000, 500)) +
ggtitle("Age distribution")
# This plot displays age_group vs income
trainset %>% ggplot(aes(x = age_group,fill = income)) +
geom_bar(width = 0.3, col = "black") +
scale_y_continuous(breaks = seq(0,9000,1000)) +
ggtitle("Age - Income distribution")
# 2. workclass
# Summary of workclass
summary(trainset$workclass)
# This plot displays workclass distribution
trainset %>% ggplot(aes(workclass)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Workclass distribution")
# This plot displays workclass vs income
trainset %>% ggplot(aes(workclass,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Workclass - Income distribution")
# 3. Final weight
# Summary of fnlwgt
summary(trainset$fnlwgt)
# This plot displays boxplot distribution of fnlwgt
trainset %>% ggplot(aes(factor(0),fnlwgt)) +
geom_boxplot() + ggtitle("Final Weight distribution")
# This plot displays boxplot distribution of fnlwgt vs income
trainset %>% ggplot(aes(income,fnlwgt)) +
geom_boxplot() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
stat_summary(fun.y = mean, geom = "point") +
ggtitle("Final Weight - Income distribution")
# 4.Education
# Summary of education
summary(trainset$education)
# This plot displays education distribution
trainset %>% ggplot(aes(education)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Education distribution")
# This plot displays education vs income
trainset %>% ggplot(aes(education,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Education - Income distribution")
# 5. Eduation Number
# Summary of education_num
summary(trainset$education_num)
# Group education and education_num by education
trainset %>% select(education,education_num) %>%
group_by(education_num) %>% count(education_num,education)
# 6. Marital status
#Summary of marital_status
summary(trainset$marital_status)
# This plot displays marital_status distribution
trainset %>% ggplot(aes(marital_status)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Marital Status distribution")
# This plot displays marital_status vs income
trainset %>% ggplot(aes(marital_status,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Marital Status- Income distribution")
# 7. Occupation
# Summary of occupation
summary(trainset$occupation)
# This plot displays occupation distribution
trainset %>% ggplot(aes(occupation)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Occupation distribution")
# This plot displays occupation vs income
trainset %>% ggplot(aes(occupation,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Occupation - Income distribution")
# 8. Relationship
# Summary of relationship
summary(trainset$relationship)
# This plot displays relationship distribution
trainset %>% ggplot(aes(relationship)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Relationship distribution")
# This plot displays relationship vs income
trainset %>% ggplot(aes(relationship,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Relationship - Income distribution")
# 9. Race
# Summary of race
summary(trainset$race)
# This plot displays race distribution
trainset %>% ggplot(aes(race)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Race distribution")
# This plot displays race vs income distribution
trainset %>% ggplot(aes(race,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Race - Income distribution")
# 10. Sex
# Summary of sex
summary(trainset$sex)
# This plot displays sex distribution
trainset %>% ggplot(aes(sex)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Sex distribution")
# This plot displays sex vs income
trainset %>% ggplot(aes(sex,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Sex - Income distribution")
# This plot displays education vs sex
trainset %>% ggplot(aes(education,fill = sex)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Education - sex distribution")
# 11. Capital_gain
# Summary of capital_gain
summary(trainset$capital_gain)
# This plot displays capital gain distribution
trainset %>% ggplot(aes(capital_gain)) +
geom_histogram(col = "black", fill = "grey") +
ggtitle("Capital-gain distribution")
# This plot displays boxplot of capital gain distribution
trainset %>% ggplot(aes(factor(0),capital_gain)) +
geom_boxplot() +
ggtitle("Capital-gain distribution")
# 12. Capital_loss
# Summary of capital_loss
summary(trainset$capital_loss)
# This plot displays capital loss distribution
trainset %>% ggplot(aes(capital_loss)) +
geom_histogram(col = "black", fill = "grey") +
ggtitle("Capital-loss distribution")
# This plot displays boxplot of capital loss distribution
trainset %>% ggplot(aes(factor(0),capital_loss)) +
geom_boxplot() +
ggtitle("Capital-loss distribution")
# 13. Hours per week
# Summary of hour_per_week
summary(trainset$hours_per_week)
# This plot displays hours_per_week distribution
trainset %>% ggplot(aes(hours_per_week)) +
geom_histogram(binwidth = 5, col = "black", fill = "grey") +
scale_x_continuous(breaks = seq(1, 99, 5)) +
scale_y_continuous(breaks = seq(0, 15000, 1000)) +
ggtitle("Hours-per-week distribution")
# This plot displays hours_per_week vs income
trainset %>% ggplot(aes(x = hours_per_week,fill = income)) +
geom_bar(width = 0.5, col = "black") +
scale_y_continuous(breaks = seq(0,15000,1000)) +
scale_x_continuous(breaks = seq(0,100,5)) +
ggtitle("Hours-per-week - Income distribution")
# 14. Native country
# Summary of native_country
summary(trainset$native_country)
# This plot displays native country distribution
trainset %>% ggplot(aes(native_country)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Native-country distribution")
# This plot displays native country vs income
trainset %>% ggplot(aes(native_country,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Native-country - Income distribution")
# 15. Income
# Summary of income
summary(trainset$income)
# To calculate proportions for each factor level
percentage <- 100 * prop.table(table(trainset$income))
# Display in table format
cbind(freq = table(trainset$income), percentage = percentage)
#___________________________________________
# Part 3: Data modeling
#___________________________________________
# 1. Logistic Regression
set.seed(1, sample.kind="Rounding")
# Fit data using caret package, method - glm, family - binomial
train_lr <- train(income ~ age + workclass + education + occupation +
relationship+ hours_per_week + native_country +
race + sex + marital_status + capital_gain + capital_loss,
data=trainset,
method = "glm",
family="binomial")
# Predict income using the above fitted model
pred_lr <- predict(train_lr,validation)
# Save results of Confusion Matrix
lr_acc <- confusionMatrix(pred_lr,validation$income)
# Add results to a table
results <- tibble(Method="Logistic Regression",
Accuracy_Train = lr_acc$overall["Accuracy"],
F1_Train = lr_acc$byClass[7])
results %>% knitr::kable()
# 2. Support Vector Classifier
set.seed(3, sample.kind="Rounding")
## Fit data using svm from base package
svc <- svm(income ~ age + workclass + education + capital_gain +
occupation + relationship + race + sex + capital_loss +
hours_per_week + native_country + marital_status,
data=trainset)
# Predict income using the above fitted model
pred_svc <- predict(svc,validation)
# Save results of Confusion Matrix
cm_svc <- confusionMatrix(pred_svc,validation$income)
# Add results to the results table
results <- bind_rows(results, tibble(Method="Support Vector Classifier",
Accuracy_Train = cm_svc$overall["Accuracy"],
F1_Train = cm_svc$byClass[7]))
results %>% knitr::kable()
# 3. Random Forest Classifier
set.seed(4, sample.kind="Rounding")
## Fit data using rf from base package
raf <- randomForest(income ~ age + workclass + education + capital_gain +
occupation + relationship + race + sex + capital_loss +
hours_per_week + native_country + marital_status,
data = trainset)
# Predict income using the above fitted model
pred_raf <- predict(raf ,validation)
# Save results of Confusion Matrix
cm_raf <- confusionMatrix(pred_raf,validation$income)
# Add results to the results table
results <- bind_rows(results, tibble(Method="Random Forest Classifier",
Accuracy_Train = cm_raf$overall["Accuracy"],
F1_Train = cm_raf$byClass[7]))
results %>% knitr::kable()
# 4. Gradient Boosting Classifier
set.seed(6, sample.kind="Rounding")
# Fit data using caret package, method - gbm
gbc <- train(income ~ age + workclass + education + capital_gain +
occupation + relationship + race + sex + capital_loss +
hours_per_week + native_country + marital_status,
data=trainset,
method = "gbm")
# Predict income using the above fitted model
pred_gbc <- predict(gbc,validation)
# Save results of Confusion Matrix
cm_gbc <- confusionMatrix(pred_gbc,validation$income)
# Add results to the results table
results <- bind_rows(results,tibble(Method="Gradient Boosting Classifier",
Accuracy_Train = cm_gbc$overall["Accuracy"],
F1_Train = cm_gbc$byClass[7]))
results %>% knitr::kable()
#_______________________________________________
# Part 4: Results / Check the models on test set
#_______________________________________________
# Downloading and Reading test data
if(!file.exists("adult.test")){
download.file("http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test","adult.test")
}
income_test <- read.csv("adult.test",header = FALSE,skip=1)
# Set column names
colnames(income_test) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# explore test data
# To check the structure of test data
str(income_test)
# To get the dimension of test data
dim(income_test)
# To get summary of test data
summary(income_test)
# To explore the levels in each column of test data
sapply(income_test, levels)
# Convert " ?" data to NA and the remove rows with NA
income_test <- read.csv("adult.test",na.strings = c(" ?"),header = FALSE,skip=1)
income_test <- na.omit(income_test)
# Set column names again
colnames(income_test) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# Assign levels of income to test data
levels(income_test$income)[1] <- " <=50K"
levels(income_test$income)[2] <- " >50K"
# To ensure levels of native_country in train and test data are same
levels(income_test$native_country) <- levels(trainset$native_country)
# 1. Logistic Regression
# Use test data to predict income using the above fitted logistic regression model
pred_lrtest <- predict(train_lr,income_test)
# Save results of Confusion Matrix
lr_test <- confusionMatrix(pred_lrtest,income_test$income)
# Add results to the table
test_results <- tibble(Accuracy_Test = lr_test$overall["Accuracy"],
F1_Test = lr_test$byClass[7])
test_results %>% knitr::kable()
# 2. Support Vector Classifier
set.seed(3, sample.kind="Rounding")
# Use test data to predict income using the above fitted svm model
pred_svctest <- predict(svc,income_test)
# Save results of Confusion Matrix
svc_test <- confusionMatrix(pred_svctest,income_test$income)
# Add results to the table
test_results <- bind_rows(test_results, tibble(
Accuracy_Test = svc_test$overall["Accuracy"],
F1_Test = svc_test$byClass[7]))
test_results %>% knitr::kable()
# 3. Random Forest Classifier
set.seed(4, sample.kind="Rounding")
# Use test data to predict income using the above fitted random forest model
pred_raftest <- predict(raf,income_test)
# Save results of Confusion Matrix
raf_test <- confusionMatrix(pred_raftest,income_test$income)
# Add results to the table
test_results <- bind_rows(test_results, tibble(
Accuracy_Test = raf_test$overall["Accuracy"],
F1_Test = raf_test$byClass[7]))
test_results %>% knitr::kable()
# 4. Gradient Boosting Classifier
set.seed(6, sample.kind="Rounding")
# Use test data to predict income using the above fitted gradient boosting model
pred_gbctest <- predict(gbc,income_test)
# Save results of Confusion Matrix
gbc_test <- confusionMatrix(pred_gbctest,income_test$income)
# Add results to the table
test_results <- bind_rows(test_results, tibble(
Accuracy_Test = gbc_test$overall["Accuracy"],
F1_Test= gbc_test$byClass[7]))
test_results %>% knitr::kable()
# Add test results to train results table
results <- bind_cols(results,test_results)
results %>% knitr::kable()
|
/cyop_r.r
|
no_license
|
swalinim/cyop
|
R
| false
| false
| 17,436
|
r
|
#__________________________
#Download required packages
#__________________________
if(!require(tidyverse))
install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret))
install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table))
install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(e1071))
install.packages("e1071", repos = "http://cran.us.r-project.org")
if(!require(randomForest))
install.packages("randomForest", repos = "http://cran.us.r-project.org")
#_____________________________________________________________
# Part 1: Downloading data, preprocessing and Data exploration
#_____________________________________________________________
# Downloading and Reading data
if(!file.exists("adult.data")){
download.file("http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data","adult.data")
}
income_data <- read.csv("adult.data",header = FALSE)
# Set column names
colnames(income_data) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# Explore data
# To get the dimension of data
dim(income_data)
# To check the structure of data
str(income_data)
# To get summary of data
summary(income_data)
# To explore the levels in each column
sapply(income_data, levels)
# Convert " ?" data to NA and the remove rows with NA
income_data <- read.csv("adult.data",na.strings = c(" ?"),header = FALSE)
income_data <- na.omit(income_data)
# Set column names again
colnames(income_data) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# To get the dimension of new data
dim(income_data)
# Create Train and validation set with validation set having 10% data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(income_data$income,p = 0.1,
times = 1,list = FALSE)
trainset <- income_data[-test_index, ]
validation <- income_data[test_index,]
# To get dimension of train and validation data
dim(trainset)
dim(validation)
#___________________________________________
# Part 2: Visualization
#___________________________________________
# 1. age
# Summary of age
summary(trainset$age)
# Group age
trainset <- trainset %>%
mutate(
age_group =case_when(
age > 10 & age <= 20 ~ "17-20",
age > 20 & age <= 30 ~ "21-30",
age > 30 & age <= 40 ~ "31-40",
age > 40 & age <= 50 ~ "41-50",
age > 50 & age <= 60 ~ "51-60",
age > 60 & age <= 70 ~ "61-70",
age > 70 & age <= 80 ~ "71-80",
age > 80 & age <= 90 ~ "81-90"
))
# This plot displays age distribution
trainset %>% ggplot(aes(age)) +
geom_histogram(binwidth = 5, col = "black", fill = "grey") +
scale_x_continuous(breaks = seq(0, 95, 5)) +
scale_y_continuous(breaks = seq(0, 5000, 500)) +
ggtitle("Age distribution")
# This plot displays age_group vs income
trainset %>% ggplot(aes(x = age_group,fill = income)) +
geom_bar(width = 0.3, col = "black") +
scale_y_continuous(breaks = seq(0,9000,1000)) +
ggtitle("Age - Income distribution")
# 2. workclass
# Summary of workclass
summary(trainset$workclass)
# This plot displays workclass distribution
trainset %>% ggplot(aes(workclass)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Workclass distribution")
# This plot displays workclass vs income
trainset %>% ggplot(aes(workclass,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Workclass - Income distribution")
# 3. Final weight
# Summary of fnlwgt
summary(trainset$fnlwgt)
# This plot displays boxplot distribution of fnlwgt
trainset %>% ggplot(aes(factor(0),fnlwgt)) +
geom_boxplot() + ggtitle("Final Weight distribution")
# This plot displays boxplot distribution of fnlwgt vs income
trainset %>% ggplot(aes(income,fnlwgt)) +
geom_boxplot() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()) +
stat_summary(fun.y = mean, geom = "point") +
ggtitle("Final Weight - Income distribution")
# 4.Education
# Summary of education
summary(trainset$education)
# This plot displays education distribution
trainset %>% ggplot(aes(education)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Education distribution")
# This plot displays education vs income
trainset %>% ggplot(aes(education,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Education - Income distribution")
# 5. Eduation Number
# Summary of education_num
summary(trainset$education_num)
# Group education and education_num by education
trainset %>% select(education,education_num) %>%
group_by(education_num) %>% count(education_num,education)
# 6. Marital status
#Summary of marital_status
summary(trainset$marital_status)
# This plot displays marital_status distribution
trainset %>% ggplot(aes(marital_status)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Marital Status distribution")
# This plot displays marital_status vs income
trainset %>% ggplot(aes(marital_status,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Marital Status- Income distribution")
# 7. Occupation
# Summary of occupation
summary(trainset$occupation)
# This plot displays occupation distribution
trainset %>% ggplot(aes(occupation)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Occupation distribution")
# This plot displays occupation vs income
trainset %>% ggplot(aes(occupation,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Occupation - Income distribution")
# 8. Relationship
# Summary of relationship
summary(trainset$relationship)
# This plot displays relationship distribution
trainset %>% ggplot(aes(relationship)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Relationship distribution")
# This plot displays relationship vs income
trainset %>% ggplot(aes(relationship,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Relationship - Income distribution")
# 9. Race
# Summary of race
summary(trainset$race)
# This plot displays race distribution
trainset %>% ggplot(aes(race)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Race distribution")
# This plot displays race vs income distribution
trainset %>% ggplot(aes(race,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Race - Income distribution")
# 10. Sex
# Summary of sex
summary(trainset$sex)
# This plot displays sex distribution
trainset %>% ggplot(aes(sex)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Sex distribution")
# This plot displays sex vs income
trainset %>% ggplot(aes(sex,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Sex - Income distribution")
# This plot displays education vs sex
trainset %>% ggplot(aes(education,fill = sex)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Education - sex distribution")
# 11. Capital_gain
# Summary of capital_gain
summary(trainset$capital_gain)
# This plot displays capital gain distribution
trainset %>% ggplot(aes(capital_gain)) +
geom_histogram(col = "black", fill = "grey") +
ggtitle("Capital-gain distribution")
# This plot displays boxplot of capital gain distribution
trainset %>% ggplot(aes(factor(0),capital_gain)) +
geom_boxplot() +
ggtitle("Capital-gain distribution")
# 12. Capital_loss
# Summary of capital_loss
summary(trainset$capital_loss)
# This plot displays capital loss distribution
trainset %>% ggplot(aes(capital_loss)) +
geom_histogram(col = "black", fill = "grey") +
ggtitle("Capital-loss distribution")
# This plot displays boxplot of capital loss distribution
trainset %>% ggplot(aes(factor(0),capital_loss)) +
geom_boxplot() +
ggtitle("Capital-loss distribution")
# 13. Hours per week
# Summary of hour_per_week
summary(trainset$hours_per_week)
# This plot displays hours_per_week distribution
trainset %>% ggplot(aes(hours_per_week)) +
geom_histogram(binwidth = 5, col = "black", fill = "grey") +
scale_x_continuous(breaks = seq(1, 99, 5)) +
scale_y_continuous(breaks = seq(0, 15000, 1000)) +
ggtitle("Hours-per-week distribution")
# This plot displays hours_per_week vs income
trainset %>% ggplot(aes(x = hours_per_week,fill = income)) +
geom_bar(width = 0.5, col = "black") +
scale_y_continuous(breaks = seq(0,15000,1000)) +
scale_x_continuous(breaks = seq(0,100,5)) +
ggtitle("Hours-per-week - Income distribution")
# 14. Native country
# Summary of native_country
summary(trainset$native_country)
# This plot displays native country distribution
trainset %>% ggplot(aes(native_country)) +
geom_bar(col = "black", fill = "grey") +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Native-country distribution")
# This plot displays native country vs income
trainset %>% ggplot(aes(native_country,fill = income)) +
geom_bar() +
theme(axis.text.x = element_text(angle=90,hjust = 1)) +
ggtitle("Native-country - Income distribution")
# 15. Income
# Summary of income
summary(trainset$income)
# To calculate proportions for each factor level
percentage <- 100 * prop.table(table(trainset$income))
# Display in table format
cbind(freq = table(trainset$income), percentage = percentage)
#___________________________________________
# Part 3: Data modeling
#___________________________________________
# 1. Logistic Regression
set.seed(1, sample.kind="Rounding")
# Fit data using caret package, method - glm, family - binomial
train_lr <- train(income ~ age + workclass + education + occupation +
relationship+ hours_per_week + native_country +
race + sex + marital_status + capital_gain + capital_loss,
data=trainset,
method = "glm",
family="binomial")
# Predict income using the above fitted model
pred_lr <- predict(train_lr,validation)
# Save results of Confusion Matrix
lr_acc <- confusionMatrix(pred_lr,validation$income)
# Add results to a table
results <- tibble(Method="Logistic Regression",
Accuracy_Train = lr_acc$overall["Accuracy"],
F1_Train = lr_acc$byClass[7])
results %>% knitr::kable()
# 2. Support Vector Classifier
set.seed(3, sample.kind="Rounding")
## Fit data using svm from base package
svc <- svm(income ~ age + workclass + education + capital_gain +
occupation + relationship + race + sex + capital_loss +
hours_per_week + native_country + marital_status,
data=trainset)
# Predict income using the above fitted model
pred_svc <- predict(svc,validation)
# Save results of Confusion Matrix
cm_svc <- confusionMatrix(pred_svc,validation$income)
# Add results to the results table
results <- bind_rows(results, tibble(Method="Support Vector Classifier",
Accuracy_Train = cm_svc$overall["Accuracy"],
F1_Train = cm_svc$byClass[7]))
results %>% knitr::kable()
# 3. Random Forest Classifier
set.seed(4, sample.kind="Rounding")
## Fit data using rf from base package
raf <- randomForest(income ~ age + workclass + education + capital_gain +
occupation + relationship + race + sex + capital_loss +
hours_per_week + native_country + marital_status,
data = trainset)
# Predict income using the above fitted model
pred_raf <- predict(raf ,validation)
# Save results of Confusion Matrix
cm_raf <- confusionMatrix(pred_raf,validation$income)
# Add results to the results table
results <- bind_rows(results, tibble(Method="Random Forest Classifier",
Accuracy_Train = cm_raf$overall["Accuracy"],
F1_Train = cm_raf$byClass[7]))
results %>% knitr::kable()
# 4. Gradient Boosting Classifier
set.seed(6, sample.kind="Rounding")
# Fit data using caret package, method - gbm
gbc <- train(income ~ age + workclass + education + capital_gain +
occupation + relationship + race + sex + capital_loss +
hours_per_week + native_country + marital_status,
data=trainset,
method = "gbm")
# Predict income using the above fitted model
pred_gbc <- predict(gbc,validation)
# Save results of Confusion Matrix
cm_gbc <- confusionMatrix(pred_gbc,validation$income)
# Add results to the results table
results <- bind_rows(results,tibble(Method="Gradient Boosting Classifier",
Accuracy_Train = cm_gbc$overall["Accuracy"],
F1_Train = cm_gbc$byClass[7]))
results %>% knitr::kable()
#_______________________________________________
# Part 4: Results / Check the models on test set
#_______________________________________________
# Downloading and Reading test data
if(!file.exists("adult.test")){
download.file("http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test","adult.test")
}
income_test <- read.csv("adult.test",header = FALSE,skip=1)
# Set column names
colnames(income_test) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# explore test data
# To check the structure of test data
str(income_test)
# To get the dimension of test data
dim(income_test)
# To get summary of test data
summary(income_test)
# To explore the levels in each column of test data
sapply(income_test, levels)
# Convert " ?" data to NA and the remove rows with NA
income_test <- read.csv("adult.test",na.strings = c(" ?"),header = FALSE,skip=1)
income_test <- na.omit(income_test)
# Set column names again
colnames(income_test) <- c("age","workclass","fnlwgt","education","education_num","marital_status",
"occupation","relationship","race","sex","capital_gain","capital_loss",
"hours_per_week","native_country","income")
# Assign levels of income to test data
levels(income_test$income)[1] <- " <=50K"
levels(income_test$income)[2] <- " >50K"
# To ensure levels of native_country in train and test data are same
levels(income_test$native_country) <- levels(trainset$native_country)
# 1. Logistic Regression
# Use test data to predict income using the above fitted logistic regression model
pred_lrtest <- predict(train_lr,income_test)
# Save results of Confusion Matrix
lr_test <- confusionMatrix(pred_lrtest,income_test$income)
# Add results to the table
test_results <- tibble(Accuracy_Test = lr_test$overall["Accuracy"],
F1_Test = lr_test$byClass[7])
test_results %>% knitr::kable()
# 2. Support Vector Classifier
set.seed(3, sample.kind="Rounding")
# Use test data to predict income using the above fitted svm model
pred_svctest <- predict(svc,income_test)
# Save results of Confusion Matrix
svc_test <- confusionMatrix(pred_svctest,income_test$income)
# Add results to the table
test_results <- bind_rows(test_results, tibble(
Accuracy_Test = svc_test$overall["Accuracy"],
F1_Test = svc_test$byClass[7]))
test_results %>% knitr::kable()
# 3. Random Forest Classifier
set.seed(4, sample.kind="Rounding")
# Use test data to predict income using the above fitted random forest model
pred_raftest <- predict(raf,income_test)
# Save results of Confusion Matrix
raf_test <- confusionMatrix(pred_raftest,income_test$income)
# Add results to the table
test_results <- bind_rows(test_results, tibble(
Accuracy_Test = raf_test$overall["Accuracy"],
F1_Test = raf_test$byClass[7]))
test_results %>% knitr::kable()
# 4. Gradient Boosting Classifier
set.seed(6, sample.kind="Rounding")
# Use test data to predict income using the above fitted gradient boosting model
pred_gbctest <- predict(gbc,income_test)
# Save results of Confusion Matrix
gbc_test <- confusionMatrix(pred_gbctest,income_test$income)
# Add results to the table
test_results <- bind_rows(test_results, tibble(
Accuracy_Test = gbc_test$overall["Accuracy"],
F1_Test= gbc_test$byClass[7]))
test_results %>% knitr::kable()
# Add test results to train results table
results <- bind_cols(results,test_results)
results %>% knitr::kable()
|
# 153-Specification of food original groups.R
#
# Copyright © 2019: Arin Shahbazian
# Licence: GPL-3
rm(list=ls())
starttime <- proc.time()
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(data.table)
library(readxl)
cat("\n\n================ FoodGroups =====================================\n")
for(year in (Settings$startyear:Settings$endyear)){
cat(paste0("\n------------------------------\nYear:",year,"\n"))
load( file = paste0(Settings$HEISProcessedPath,"Y",year,"BigFData.rda"))
load(file = paste0(Settings$HEISProcessedPath,"Y",year,"TotalFoodExp.rda"))
BigFData[,OriginalFoodExpenditure:=Expenditure]
NfoodExp<-BigFData[,.(HHID,OriginalFoodExpenditure)]
NfoodExp <- NfoodExp[,lapply(.SD,sum),by=HHID]
FoodExpData<-merge(TotalFoodExpData,NfoodExp,all.x = TRUE)
FoodExpData[is.na(FoodExpData)] <- 0
FoodExpData[,FoodOtherExpenditure:=FoodExpenditure-OriginalFoodExpenditure]
save(FoodExpData, file = paste0(Settings$HEISProcessedPath,"Y",year,"FoodExpData.rda"))
}
cat("\n\n==============Finish==============\nIt took ")
endtime <- proc.time()
cat((endtime-starttime)[3],"seconds.")
|
/R/153-Specification of food original groups.R
|
no_license
|
sadraheydari/IRHEIS
|
R
| false
| false
| 1,147
|
r
|
# 153-Specification of food original groups.R
#
# Copyright © 2019: Arin Shahbazian
# Licence: GPL-3
rm(list=ls())
starttime <- proc.time()
library(yaml)
Settings <- yaml.load_file("Settings.yaml")
library(data.table)
library(readxl)
cat("\n\n================ FoodGroups =====================================\n")
for(year in (Settings$startyear:Settings$endyear)){
cat(paste0("\n------------------------------\nYear:",year,"\n"))
load( file = paste0(Settings$HEISProcessedPath,"Y",year,"BigFData.rda"))
load(file = paste0(Settings$HEISProcessedPath,"Y",year,"TotalFoodExp.rda"))
BigFData[,OriginalFoodExpenditure:=Expenditure]
NfoodExp<-BigFData[,.(HHID,OriginalFoodExpenditure)]
NfoodExp <- NfoodExp[,lapply(.SD,sum),by=HHID]
FoodExpData<-merge(TotalFoodExpData,NfoodExp,all.x = TRUE)
FoodExpData[is.na(FoodExpData)] <- 0
FoodExpData[,FoodOtherExpenditure:=FoodExpenditure-OriginalFoodExpenditure]
save(FoodExpData, file = paste0(Settings$HEISProcessedPath,"Y",year,"FoodExpData.rda"))
}
cat("\n\n==============Finish==============\nIt took ")
endtime <- proc.time()
cat((endtime-starttime)[3],"seconds.")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uniprot.R
\name{fetchFromUniProt}
\alias{fetchFromUniProt}
\title{Fetch annotations from UniProt}
\usage{
fetchFromUniProt(
unis,
columns = c("genes", "protein names"),
col.names = NULL,
batchsize = 400,
verbose = FALSE
)
}
\arguments{
\item{unis}{A character vector with UniProt identifiers}
\item{columns}{Data columns requested (see \code{\link{allowedUniProtColumns}})}
\item{col.names}{How to name data columns in the returned data frame}
\item{batchsize}{Size of batch of proteins in a single query}
\item{verbose}{Logical, if true, query progress will be displayed}
}
\value{
A data frame with protein annotations.
}
\description{
Fetch annotations from UniProt
}
\details{
For a given list of UniProt identifiers this function will bring back
annotations from UniProt servers. What information is downloaded is
controlled by the \code{columns} parameter. By default it fetches gene names
and protein name/description. The full list of available columns is in a
vector \code{allowedUniProtColumns}.
The column names in the returned data frame are the same as in \code{columns}
parameter, unless alternative names are provided in parameter
\code{col.names}. The \code{id} column is added by default.
}
\examples{
library(proteusLabelFree)
data(proteusLabelFree)
# Extract UniProt identifiers from protein IDs
unis <- sapply(as.character(prodat$proteins), function(prot) {
s <- unlist(strsplit(prot, "|", fixed=TRUE))
s[2]
})
# Fetch first 100 annotations (for a quick example)
anno <- fetchFromUniProt(unis[1:100])
}
|
/man/fetchFromUniProt.Rd
|
permissive
|
bartongroup/Proteus
|
R
| false
| true
| 1,620
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uniprot.R
\name{fetchFromUniProt}
\alias{fetchFromUniProt}
\title{Fetch annotations from UniProt}
\usage{
fetchFromUniProt(
unis,
columns = c("genes", "protein names"),
col.names = NULL,
batchsize = 400,
verbose = FALSE
)
}
\arguments{
\item{unis}{A character vector with UniProt identifiers}
\item{columns}{Data columns requested (see \code{\link{allowedUniProtColumns}})}
\item{col.names}{How to name data columns in the returned data frame}
\item{batchsize}{Size of batch of proteins in a single query}
\item{verbose}{Logical, if true, query progress will be displayed}
}
\value{
A data frame with protein annotations.
}
\description{
Fetch annotations from UniProt
}
\details{
For a given list of UniProt identifiers this function will bring back
annotations from UniProt servers. What information is downloaded is
controlled by the \code{columns} parameter. By default it fetches gene names
and protein name/description. The full list of available columns is in a
vector \code{allowedUniProtColumns}.
The column names in the returned data frame are the same as in \code{columns}
parameter, unless alternative names are provided in parameter
\code{col.names}. The \code{id} column is added by default.
}
\examples{
library(proteusLabelFree)
data(proteusLabelFree)
# Extract UniProt identifiers from protein IDs
unis <- sapply(as.character(prodat$proteins), function(prot) {
s <- unlist(strsplit(prot, "|", fixed=TRUE))
s[2]
})
# Fetch first 100 annotations (for a quick example)
anno <- fetchFromUniProt(unis[1:100])
}
|
library(spotifyr)
library(dplyr)
library(ggplot2)
library(jsonlite)
library(lubridate)
library(fmsb)
library(tidyverse)
# Sys.setenv(SPOTIFY_CLIENT_ID = '')
# Sys.setenv(SPOTIFY_CLIENT_SECRET = '')
# access_token <- get_spotify_access_token()
# choices for dropdown input
top_artists <- df%>%
mutate(artistName = as.character(artistName))%>%
group_by(artistName)%>%
summarise(n = n())%>%
arrange(-n)%>%
head(20)%>%.$artistName
top_songs_of_top_artists <- df%>%
mutate(artistName = as.character(artistName))%>%
filter(artistName %in% top_artists)%>%
group_by(artistName, trackName)%>%
summarise(n = n())%>%
arrange(artistName, -n)%>%
filter(n > 2)
picked_artist_songs <- top_songs_of_top_artists%>%
filter(artistName == top_artists[1])%>%head(5)
ggplot(picked_artist_songs, aes(x = reorder(trackName, -n), y = n))+
geom_col()
|
/projects/project3/krzyzinski_wojciechowski_zolkowski/eda/top_artist_songs.R
|
no_license
|
iketutg/2021Z-DataVisualizationTechniques
|
R
| false
| false
| 859
|
r
|
library(spotifyr)
library(dplyr)
library(ggplot2)
library(jsonlite)
library(lubridate)
library(fmsb)
library(tidyverse)
# Sys.setenv(SPOTIFY_CLIENT_ID = '')
# Sys.setenv(SPOTIFY_CLIENT_SECRET = '')
# access_token <- get_spotify_access_token()
# choices for dropdown input
top_artists <- df%>%
mutate(artistName = as.character(artistName))%>%
group_by(artistName)%>%
summarise(n = n())%>%
arrange(-n)%>%
head(20)%>%.$artistName
top_songs_of_top_artists <- df%>%
mutate(artistName = as.character(artistName))%>%
filter(artistName %in% top_artists)%>%
group_by(artistName, trackName)%>%
summarise(n = n())%>%
arrange(artistName, -n)%>%
filter(n > 2)
picked_artist_songs <- top_songs_of_top_artists%>%
filter(artistName == top_artists[1])%>%head(5)
ggplot(picked_artist_songs, aes(x = reorder(trackName, -n), y = n))+
geom_col()
|
library(jsonlite)
library(dplyr)
library(magrittr)
library(tm)
library(topicmodels)
set.seed(42)
# Load data
sotu <- fromJSON(txt = "C://Users/bdaet/Desktop/myProjects/big_data_bootcamp_2016-master/sotu_parsed.json")
#adding extra stopwords (common words in the English language that won't provide us with much information)
stopwords_extra <- c(stopwords(kind = 'en'), 'will', 'thank', 'can')
#creating a Document Term Matrix
dtm <- DocumentTermMatrix(x = Corpus(VectorSource(sotu$content)),
control = list(tokenize = words, #tokenizing text
tolower = TRUE, #making text lower case
stopwords = stopwords_extra, #applying added stopwords
stemming = 'english', #converting words to their stems
removePunctuation = TRUE, #removing punctuation
removeNumbers = TRUE, #removing numbers
minDocFreq = length(sotu$content) * 0.05,
maxDocFreq = length(sotu$content) * 0.80,
weighting = weightTf))
#creating a LDA (latin dirichlet allocation) model
lda <- LDA(x = dtm, k = 5, method = 'VEM')
terms(x = lda, k = 10)
topics(x = lda, k = 5)
# DF: Terms
term_extract <- terms(lda, k = 25)
v_topics <- c(); v_terms <- c(); v_ranks <- c(); v_betas <- c()
for (topic in 1:lda@k) {
for (term in 1:25) {
v_topics <- c(v_topics, topic)
v_terms <- c(v_terms, term_extract[term, topic])
v_ranks <- c(v_ranks, term)
v_betas <- c(v_betas, lda@beta[topic, which(lda@terms == term_extract[term, topic])])
}
}
term_rankings <- data_frame(topic = v_topics,
term = v_terms,
rank = v_ranks,
term_beta = v_betas)
rm(v_topics, v_terms, v_ranks, v_betas)
# DF: Document Distributions
v_topics <- c(); v_docs <- c(); v_gammas <-c()
for (topic in 1:lda@k) {
for (doc in 1:length(lda@documents)) {
v_topics <- c(v_topics, topic)
v_docs <- c(v_docs, lda@documents[doc])
v_gammas <- c(v_gammas, lda@gamma[doc, topic])
}
}
doc_distributions <- data_frame(topic = v_topics,
doc = v_docs,
doc_gamma = v_gammas)
rm(v_topics, v_docs, v_gammas)
|
/StateofUnion.R
|
permissive
|
bryandaetz/big_data_bootcamp_2016
|
R
| false
| false
| 2,552
|
r
|
library(jsonlite)
library(dplyr)
library(magrittr)
library(tm)
library(topicmodels)
set.seed(42)
# Load data
sotu <- fromJSON(txt = "C://Users/bdaet/Desktop/myProjects/big_data_bootcamp_2016-master/sotu_parsed.json")
#adding extra stopwords (common words in the English language that won't provide us with much information)
stopwords_extra <- c(stopwords(kind = 'en'), 'will', 'thank', 'can')
#creating a Document Term Matrix
dtm <- DocumentTermMatrix(x = Corpus(VectorSource(sotu$content)),
control = list(tokenize = words, #tokenizing text
tolower = TRUE, #making text lower case
stopwords = stopwords_extra, #applying added stopwords
stemming = 'english', #converting words to their stems
removePunctuation = TRUE, #removing punctuation
removeNumbers = TRUE, #removing numbers
minDocFreq = length(sotu$content) * 0.05,
maxDocFreq = length(sotu$content) * 0.80,
weighting = weightTf))
#creating a LDA (latin dirichlet allocation) model
lda <- LDA(x = dtm, k = 5, method = 'VEM')
terms(x = lda, k = 10)
topics(x = lda, k = 5)
# DF: Terms
term_extract <- terms(lda, k = 25)
v_topics <- c(); v_terms <- c(); v_ranks <- c(); v_betas <- c()
for (topic in 1:lda@k) {
for (term in 1:25) {
v_topics <- c(v_topics, topic)
v_terms <- c(v_terms, term_extract[term, topic])
v_ranks <- c(v_ranks, term)
v_betas <- c(v_betas, lda@beta[topic, which(lda@terms == term_extract[term, topic])])
}
}
term_rankings <- data_frame(topic = v_topics,
term = v_terms,
rank = v_ranks,
term_beta = v_betas)
rm(v_topics, v_terms, v_ranks, v_betas)
# DF: Document Distributions
v_topics <- c(); v_docs <- c(); v_gammas <-c()
for (topic in 1:lda@k) {
for (doc in 1:length(lda@documents)) {
v_topics <- c(v_topics, topic)
v_docs <- c(v_docs, lda@documents[doc])
v_gammas <- c(v_gammas, lda@gamma[doc, topic])
}
}
doc_distributions <- data_frame(topic = v_topics,
doc = v_docs,
doc_gamma = v_gammas)
rm(v_topics, v_docs, v_gammas)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input_f.R
\name{add_tte}
\alias{add_tte}
\title{Define events and the initial event time}
\usage{
add_tte(.data = NULL, trt, evts, other_inp = NULL, input)
}
\arguments{
\item{.data}{Existing data for initial event times}
\item{trt}{The intervention for which the events and initial event times are defined}
\item{evts}{A vector of the names of the events}
\item{other_inp}{A vector of other input variables that should be saved during the simulation}
\item{input}{The definition of initial event times for the events listed in the evts argument}
}
\value{
A list of initial events and event times
}
\description{
Define events and the initial event time
}
\details{
Events need to be separately defined for each intervention.
For each event that is defined in this list, the user needs to add a reaction to the event using the \code{add_reactevt()} function which will determine what calculations will happen at an event.
}
\examples{
add_tte(trt="int",evts = c("start","ttot","idfs","os"),
input={
start <- 0
idfs <- draw_tte(1,'lnorm',coef1=2, coef2=0.5)
ttot <- min(draw_tte(1,'lnorm',coef1=1, coef2=4),idfs)
os <- draw_tte(1,'lnorm',coef1=0.8, coef2=0.2)
})
}
|
/Rpackages/descem/man/add_tte.Rd
|
permissive
|
Diarmuid78/Global-HTA-Evidence-Open
|
R
| false
| true
| 1,249
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input_f.R
\name{add_tte}
\alias{add_tte}
\title{Define events and the initial event time}
\usage{
add_tte(.data = NULL, trt, evts, other_inp = NULL, input)
}
\arguments{
\item{.data}{Existing data for initial event times}
\item{trt}{The intervention for which the events and initial event times are defined}
\item{evts}{A vector of the names of the events}
\item{other_inp}{A vector of other input variables that should be saved during the simulation}
\item{input}{The definition of initial event times for the events listed in the evts argument}
}
\value{
A list of initial events and event times
}
\description{
Define events and the initial event time
}
\details{
Events need to be separately defined for each intervention.
For each event that is defined in this list, the user needs to add a reaction to the event using the \code{add_reactevt()} function which will determine what calculations will happen at an event.
}
\examples{
add_tte(trt="int",evts = c("start","ttot","idfs","os"),
input={
start <- 0
idfs <- draw_tte(1,'lnorm',coef1=2, coef2=0.5)
ttot <- min(draw_tte(1,'lnorm',coef1=1, coef2=4),idfs)
os <- draw_tte(1,'lnorm',coef1=0.8, coef2=0.2)
})
}
|
# Title : Read and display data
# Created by: Josef Ondrej (www.josefondrej.com)
# Created on: 21.01.21
library(scales)
library(graphics)
data = read.csv("./data/Iris.csv")
print(head(data))
# Setting Margins
graphics::par(mar = c(5, 5, 5, 5))
# Scatterplot
graphics::plot(data[["SepalLengthCm"]], data[["SepalWidthCm"]],
col = scales::alpha("deepskyblue", 0.7), type = "p",
pch = 19, cex = 2.2,
xlab = "Sepal Length [cm]", ylab = "Sepal Width [cm]",
xaxt = "n", yaxt = "n")
graphics::axis(1, at = seq(1, 10, 0.5), cex.axis = 1.0, font = 1, tck = .01)
graphics::axis(2, at = seq(1, 5, 0.5), cex.axis = 1.0, font = 1, tck = .01)
# Histogram
graphics::hist(data[["SepalLengthCm"]], freq = FALSE,
xlab = "Sepal Length [cm]", ylab = "Density",
col = alpha("salmon", 0.7), border = FALSE,
breaks = 15, xaxt = "n", yaxt = "n", main = "", bty = "o")
graphics::axis(1, at = seq(1, 10, 0.5), cex.axis = 1.0, font = 1, tck = .01)
graphics::axis(2, at = seq(-1, 2, 0.05), cex.axis = 1.0, font = 1, tck = .01)
# Barplot
graphics::barplot(data[["SepalLengthCm"]][1:20],
border = FALSE, col = alpha("limegreen", 0.7),
names = 1:20, yaxt = "n",
xlab = "ID", ylab = "Height")
graphics::axis(2, at = seq(0, 5, 0.5), cex.axis = 1.0, font = 1, tck = .01)
# Group-By
split_data = base::split(data, data[["Species"]])
mean_sepal_length = function(df) {
return(mean(df[["SepalLengthCm"]]))
}
purrr::map(split_data, mean_sepal_length)
# Join
base::merge(x, y, by, by.x, by.y, sort = TRUE)
|
/read_and_display_data.R
|
no_license
|
josefondrej/advanced-r
|
R
| false
| false
| 1,642
|
r
|
# Title : Read and display data
# Created by: Josef Ondrej (www.josefondrej.com)
# Created on: 21.01.21
library(scales)
library(graphics)
data = read.csv("./data/Iris.csv")
print(head(data))
# Setting Margins
graphics::par(mar = c(5, 5, 5, 5))
# Scatterplot
graphics::plot(data[["SepalLengthCm"]], data[["SepalWidthCm"]],
col = scales::alpha("deepskyblue", 0.7), type = "p",
pch = 19, cex = 2.2,
xlab = "Sepal Length [cm]", ylab = "Sepal Width [cm]",
xaxt = "n", yaxt = "n")
graphics::axis(1, at = seq(1, 10, 0.5), cex.axis = 1.0, font = 1, tck = .01)
graphics::axis(2, at = seq(1, 5, 0.5), cex.axis = 1.0, font = 1, tck = .01)
# Histogram
graphics::hist(data[["SepalLengthCm"]], freq = FALSE,
xlab = "Sepal Length [cm]", ylab = "Density",
col = alpha("salmon", 0.7), border = FALSE,
breaks = 15, xaxt = "n", yaxt = "n", main = "", bty = "o")
graphics::axis(1, at = seq(1, 10, 0.5), cex.axis = 1.0, font = 1, tck = .01)
graphics::axis(2, at = seq(-1, 2, 0.05), cex.axis = 1.0, font = 1, tck = .01)
# Barplot
graphics::barplot(data[["SepalLengthCm"]][1:20],
border = FALSE, col = alpha("limegreen", 0.7),
names = 1:20, yaxt = "n",
xlab = "ID", ylab = "Height")
graphics::axis(2, at = seq(0, 5, 0.5), cex.axis = 1.0, font = 1, tck = .01)
# Group-By
split_data = base::split(data, data[["Species"]])
mean_sepal_length = function(df) {
return(mean(df[["SepalLengthCm"]]))
}
purrr::map(split_data, mean_sepal_length)
# Join
base::merge(x, y, by, by.x, by.y, sort = TRUE)
|
## This fuction will will create a vector that contains both
## a matrix and if it has been computed it's inverse. This vector
## also provided the ability to set and retrive both the matrix and
## the inverse value.
##
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m<<-solve
getInverse <- function() m
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function will take a vector produced usig the makeCacheMatrix
## function and will either return the cached inverse matrix or if the
## inverse matrix has not been cached it will compute the inverse matrix,
## cache the computed inverse matrix in the vector and will return the invers
## matrix that was computed.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
NickLauerman/ProgrammingAssignment2
|
R
| false
| false
| 1,153
|
r
|
## This fuction will will create a vector that contains both
## a matrix and if it has been computed it's inverse. This vector
## also provided the ability to set and retrive both the matrix and
## the inverse value.
##
##
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m<<-solve
getInverse <- function() m
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function will take a vector produced usig the makeCacheMatrix
## function and will either return the cached inverse matrix or if the
## inverse matrix has not been cached it will compute the inverse matrix,
## cache the computed inverse matrix in the vector and will return the invers
## matrix that was computed.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setInverse(m)
m
}
|
library(fma)
### Name: plastics
### Title: Sales of plastic product
### Aliases: plastics
### Keywords: datasets
### ** Examples
plot(plastics)
seasonplot(plastics)
plot(stl(plastics,"periodic"))
|
/data/genthat_extracted_code/fma/examples/plastics.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 202
|
r
|
library(fma)
### Name: plastics
### Title: Sales of plastic product
### Aliases: plastics
### Keywords: datasets
### ** Examples
plot(plastics)
seasonplot(plastics)
plot(stl(plastics,"periodic"))
|
file <- './87695-IncidentReport_2017-02-21_233917.XLSX - UpstreamIncident.csv'
dd <- read.csv(file, stringsAsFactors = FALSE,
header=T)
out <- dd[, c(1,9)]
write.csv(out, './data/spills_lsd.csv',
row.names = FALSE,
quote = FALSE)
write.csv(unique(out$Land.Dls.Id), './unqique_lsd.csv', row.names = FALSE, quote = FALSE)
|
/readcsv.R
|
no_license
|
andrewjdyck/sask-spills-shiny
|
R
| false
| false
| 360
|
r
|
file <- './87695-IncidentReport_2017-02-21_233917.XLSX - UpstreamIncident.csv'
dd <- read.csv(file, stringsAsFactors = FALSE,
header=T)
out <- dd[, c(1,9)]
write.csv(out, './data/spills_lsd.csv',
row.names = FALSE,
quote = FALSE)
write.csv(unique(out$Land.Dls.Id), './unqique_lsd.csv', row.names = FALSE, quote = FALSE)
|
##Read in data##
data_25=read.csv("Study25_thrombosis.csv")
data_25$KO_Group=ifelse(data_25$Cre=="-", "control", as.character(data_25$TM_dose))
data_26=read.csv("Study26_thrombosis.csv")
data_26$KO_Group=ifelse(data_26$Cre=="-", "control", as.character(data_26$TM_dose))
data_thrombosis1=merge(data_25, data_26, all=TRUE)
##Dropping dead mouse 325-3##
data_thrombosis1.1=data_thrombosis1[!(data_thrombosis1$Mouse_ID =="325-3"),]
library(reshape)
data_thrombosis2=melt(data_thrombosis1.1,id=c("Mouse_ID","Sex","DOB","Cre", "TM_dose","Treatment","Exp_Date",
"KO_Group","Renal_medulla_Ptgs1", "Renal_medulla_Ptgs2", "Lung_Ptgs1", "Lung_Ptgs2", "Aorta_Ptgs1", "Aorta_Ptgs2","Celecoxib_trough","PGEM_basal","PGDM_basal","PGIM_basal","TxM_basal", "PGEM_NSAID","PGDM_NSAID","PGIM_NSAID","TxM_NSAID"))
##Convert time variable to numeric##
data_thrombosis2$variable=as.numeric (data_thrombosis2$variable)-1
data_thrombosis2$PGIM_foldchange=data_thrombosis2$PGIM_NSAID/data_thrombosis2$PGIM_basal
data_thrombosis2$TxM_foldchange=data_thrombosis2$TxM_NSAID/data_thrombosis2$TxM_basal
data_thrombosis2$PGEM_foldchange=data_thrombosis2$PGEM_NSAID/data_thrombosis2$PGEM_basal
data_thrombosis2$PGDM_foldchange=data_thrombosis2$PGDM_NSAID/data_thrombosis2$PGDM_basal
data_thrombosis2$Group=with(data_thrombosis2, interaction(Cre,Treatment))
library(ggplot2)
pub_specs=theme(panel.background = element_blank(), panel.grid.major = element_blank(),panel.grid.minor=element_blank(), axis.line.x=element_line(color="black"), axis.line.y=element_line(color="black"),
axis.title.x=element_text(size=12), axis.title.y=element_text(size=12),title=element_text(size=14))
ggplot(data_thrombosis2, aes(variable,value,color=Mouse_ID))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation (FITC Fluorescence Intensity)")
ggplot(data_thrombosis2, aes(PGIM_NSAID,value,color=Mouse_ID))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(PGIM_foldchange,value,color=Mouse_ID))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Cre))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Treatment))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs +scale_y_log10()
data_thrombosis2$vmax=tapply(data_thrombosis2$value,data_thrombosis2$Mouse_ID, FUN=max)
data_thrombosis2$tmax=tapply(data_thrombosis2$value,data_thrombosis2$Mouse_ID, FUN=which.max)
library(MESS)
library(dplyr)
by_mouse=group_by(data_thrombosis2,Mouse_ID)
AUC=summarize(by_mouse,auc=auc(variable,value))
agg.AUC=summarize(by_mouse,agg.auc=auc(variable,value, to=tmax))
dis.AUC=summarize(by_mouse,dis.auc=auc(variable,value, from=tmax))
TMAX=summarize(by_mouse, tmax=which.max(value))
VMAX=summarize(by_mouse, vmax=max(value))
data_1=merge(AUC, agg.AUC, all=TRUE)
data_2=merge(dis.AUC, TMAX, all=TRUE)
data_3=merge(data_1,VMAX,all=TRUE)
data_4=merge(data_3,data_2,all=TRUE)
data_thrombosis3=merge(data_4,data_thrombosis1.1, all=TRUE)
data_thrombosis3$PGIM_foldchange=data_thrombosis3$PGIM_NSAID/data_thrombosis3$PGIM_basal
data_thrombosis3$TxM_foldchange=data_thrombosis3$TxM_NSAID/data_thrombosis3$TxM_basal
data_thrombosis3$PGEM_foldchange=data_thrombosis3$PGEM_NSAID/data_thrombosis3$PGEM_basal
data_thrombosis3$PGDM_foldchange=data_thrombosis3$PGDM_NSAID/data_thrombosis3$PGDM_basal
data_thrombosis3$Group=with(data_thrombosis3, interaction(Cre,Treatment))
data_thrombosis3$Kidney_tertile= with(data_thrombosis3, cut (Renal_medulla_Ptgs2, breaks=quantile(Renal_medulla_Ptgs2, probs=seq(0,1,by=1/3), na.rm=TRUE),include.lowest=TRUE))
data_thrombosis3$Lung_tertile= with(data_thrombosis3, cut (Lung_Ptgs2, breaks=quantile(Lung_Ptgs2, probs=seq(0,1,by=1/3), na.rm=TRUE),include.lowest=TRUE))
data_thrombosis3$Aorta_tertile= with(data_thrombosis3, cut (Aorta_Ptgs2, breaks=quantile(Aorta_Ptgs2, probs=seq(0,1,by=1/3), na.rm=TRUE),include.lowest=TRUE))
data_thrombosis3$KO_Group=ifelse(data_thrombosis3$Cre=="-", "control", as.character(data_thrombosis3$TM_dose))
ggplot(data_thrombosis3, aes(Group, vmax))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, tmax))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, auc))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, agg.auc))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, dis.auc))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Group))+geom_point()+pub_specs
pdf("Study25_graphs.pdf")
ggplot(data_thrombosis2, aes(variable,value,color=Mouse_ID))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation (FITC Fluorescence Intensity)")
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Group, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="AUC")
ggplot(data_thrombosis3, aes(Group, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Group, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Group, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Group, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Group, PGIM_foldchange, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="PGIM (fold change from baseline)")
ggplot(data_thrombosis3, aes(Group, Renal_medulla_Ptgs2, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Renal Medulla Ptgs2")
ggplot(data_thrombosis3, aes(Group, PGIM_NSAID, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="PGIM (ng/mg creatinine)")
ggplot(data_thrombosis3, aes(Kidney_tertile, auc, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Kidney_tertile, agg.auc, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, dis.auc, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Kidney_tertile, agg.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, dis.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Lung_tertile, auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Lung_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Lung_tertile, agg.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Lung_tertile, dis.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Aorta_tertile, auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Aorta_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Aorta_tertile, agg.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Aorta_tertile, dis.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+coord_fixed(ratio=5/10e5)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")+coord_fixed(ratio=1/100)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+coord_fixed(ratio=5/10e5)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")+coord_fixed(ratio=1/100)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+coord_fixed(ratio=5/10e5)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")+coord_fixed(ratio=ratio=1/100)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="AUC")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="AUC")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="AUC")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+scale_x_log10()+scale_y_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, PGIM_foldchange, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="PGIM (fold change from baseline)")+scale_x_log10()
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Disaggregation Phase (AUC)")+ylim(0,3e08)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Disaggregation Phase (AUC)")+ylim(0,3e08)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Disaggregation Phase (AUC)")+ylim(0,3e08)
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=KO_Group))+geom_point(size=3)+pub_specs+ labs(title="PGIM",x="PGIM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(TxM_foldchange, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="TxM",x="TxM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGEM_foldchange, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGEM",x="PGEM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGDM_foldchange, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGDM",x="PGDM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGIM",x="PGIM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(TxM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="TxM",x="TxM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGEM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGEM",x="PGEM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGDM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGDM",x="PGDM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs1, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs1", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Lung_Ptgs1, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs1", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs1, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs1", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
dev.off()
pdf("DC_Thrombosis_graphs.pdf")
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=1/1000)+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=KO_Group))+geom_point(size=3)+pub_specs + labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+scale_x_log10()+ guides(colour=guide_legend("KO Group"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(fold change relative to baseline)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=KO_Group))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(fold change relative to baseline)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("KO Group"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(ng/mg creatinine)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=KO_Group))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(ng/mg creatinine)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("KO Group"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Renal Medulla",x="Renal Medulla Ptgs2 Expression", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Lung",x="Lung Ptgs2 Expression", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Aorta",x="Aorta Ptgs2 Expression", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Renal Medulla",x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Lung_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Lung",x="Lung Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Aorta_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Aorta",x="Aorta Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs + labs(title="Renal Medulla",x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Lung_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs + labs(title="Lung",x="Lung Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Aorta_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs + labs(title="Aorta",x="Aorta Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(KO_Group, vmax))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, vmax))+geom_boxplot()+pub_specs + labs(x="KO Group", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(KO_Group, auc))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="AUC")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, agg.auc))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Aggregation Phase (AUC)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, dis.auc))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Disaggregation Phase (AUC)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Celecoxib_trough))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Celecoxib Plasma Concentration (micromolar)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, PGIM_foldchange))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Urinary PGIM\n(fold-change from baseline)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, PGIM_NSAID))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Urinary PGIM\n(ng/mg creatinine)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Renal_medulla_Ptgs2))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Renal Medulla Ptgs2")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Aorta_Ptgs2))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Aorta Ptgs2")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Lung_Ptgs2))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Lung Ptgs2")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
dev.off()
|
/Thrombosis_analysis.R
|
no_license
|
kntheken/r_scripts
|
R
| false
| false
| 32,075
|
r
|
##Read in data##
data_25=read.csv("Study25_thrombosis.csv")
data_25$KO_Group=ifelse(data_25$Cre=="-", "control", as.character(data_25$TM_dose))
data_26=read.csv("Study26_thrombosis.csv")
data_26$KO_Group=ifelse(data_26$Cre=="-", "control", as.character(data_26$TM_dose))
data_thrombosis1=merge(data_25, data_26, all=TRUE)
##Dropping dead mouse 325-3##
data_thrombosis1.1=data_thrombosis1[!(data_thrombosis1$Mouse_ID =="325-3"),]
library(reshape)
data_thrombosis2=melt(data_thrombosis1.1,id=c("Mouse_ID","Sex","DOB","Cre", "TM_dose","Treatment","Exp_Date",
"KO_Group","Renal_medulla_Ptgs1", "Renal_medulla_Ptgs2", "Lung_Ptgs1", "Lung_Ptgs2", "Aorta_Ptgs1", "Aorta_Ptgs2","Celecoxib_trough","PGEM_basal","PGDM_basal","PGIM_basal","TxM_basal", "PGEM_NSAID","PGDM_NSAID","PGIM_NSAID","TxM_NSAID"))
##Convert time variable to numeric##
data_thrombosis2$variable=as.numeric (data_thrombosis2$variable)-1
data_thrombosis2$PGIM_foldchange=data_thrombosis2$PGIM_NSAID/data_thrombosis2$PGIM_basal
data_thrombosis2$TxM_foldchange=data_thrombosis2$TxM_NSAID/data_thrombosis2$TxM_basal
data_thrombosis2$PGEM_foldchange=data_thrombosis2$PGEM_NSAID/data_thrombosis2$PGEM_basal
data_thrombosis2$PGDM_foldchange=data_thrombosis2$PGDM_NSAID/data_thrombosis2$PGDM_basal
data_thrombosis2$Group=with(data_thrombosis2, interaction(Cre,Treatment))
library(ggplot2)
pub_specs=theme(panel.background = element_blank(), panel.grid.major = element_blank(),panel.grid.minor=element_blank(), axis.line.x=element_line(color="black"), axis.line.y=element_line(color="black"),
axis.title.x=element_text(size=12), axis.title.y=element_text(size=12),title=element_text(size=14))
ggplot(data_thrombosis2, aes(variable,value,color=Mouse_ID))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation (FITC Fluorescence Intensity)")
ggplot(data_thrombosis2, aes(PGIM_NSAID,value,color=Mouse_ID))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(PGIM_foldchange,value,color=Mouse_ID))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Cre))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Treatment))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs +scale_y_log10()
data_thrombosis2$vmax=tapply(data_thrombosis2$value,data_thrombosis2$Mouse_ID, FUN=max)
data_thrombosis2$tmax=tapply(data_thrombosis2$value,data_thrombosis2$Mouse_ID, FUN=which.max)
library(MESS)
library(dplyr)
by_mouse=group_by(data_thrombosis2,Mouse_ID)
AUC=summarize(by_mouse,auc=auc(variable,value))
agg.AUC=summarize(by_mouse,agg.auc=auc(variable,value, to=tmax))
dis.AUC=summarize(by_mouse,dis.auc=auc(variable,value, from=tmax))
TMAX=summarize(by_mouse, tmax=which.max(value))
VMAX=summarize(by_mouse, vmax=max(value))
data_1=merge(AUC, agg.AUC, all=TRUE)
data_2=merge(dis.AUC, TMAX, all=TRUE)
data_3=merge(data_1,VMAX,all=TRUE)
data_4=merge(data_3,data_2,all=TRUE)
data_thrombosis3=merge(data_4,data_thrombosis1.1, all=TRUE)
data_thrombosis3$PGIM_foldchange=data_thrombosis3$PGIM_NSAID/data_thrombosis3$PGIM_basal
data_thrombosis3$TxM_foldchange=data_thrombosis3$TxM_NSAID/data_thrombosis3$TxM_basal
data_thrombosis3$PGEM_foldchange=data_thrombosis3$PGEM_NSAID/data_thrombosis3$PGEM_basal
data_thrombosis3$PGDM_foldchange=data_thrombosis3$PGDM_NSAID/data_thrombosis3$PGDM_basal
data_thrombosis3$Group=with(data_thrombosis3, interaction(Cre,Treatment))
data_thrombosis3$Kidney_tertile= with(data_thrombosis3, cut (Renal_medulla_Ptgs2, breaks=quantile(Renal_medulla_Ptgs2, probs=seq(0,1,by=1/3), na.rm=TRUE),include.lowest=TRUE))
data_thrombosis3$Lung_tertile= with(data_thrombosis3, cut (Lung_Ptgs2, breaks=quantile(Lung_Ptgs2, probs=seq(0,1,by=1/3), na.rm=TRUE),include.lowest=TRUE))
data_thrombosis3$Aorta_tertile= with(data_thrombosis3, cut (Aorta_Ptgs2, breaks=quantile(Aorta_Ptgs2, probs=seq(0,1,by=1/3), na.rm=TRUE),include.lowest=TRUE))
data_thrombosis3$KO_Group=ifelse(data_thrombosis3$Cre=="-", "control", as.character(data_thrombosis3$TM_dose))
ggplot(data_thrombosis3, aes(Group, vmax))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, tmax))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, auc))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, agg.auc))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Group, dis.auc))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGIM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(TxM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGEM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_NSAID, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_NSAID, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_NSAID, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_foldchange, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_foldchange, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(PGDM_foldchange, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Group))+geom_point()+pub_specs
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Group))+geom_point()+pub_specs
pdf("Study25_graphs.pdf")
ggplot(data_thrombosis2, aes(variable,value,color=Mouse_ID))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation (FITC Fluorescence Intensity)")
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Group, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="AUC")
ggplot(data_thrombosis3, aes(Group, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Group, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Group, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Group, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Group, PGIM_foldchange, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="PGIM (fold change from baseline)")
ggplot(data_thrombosis3, aes(Group, Renal_medulla_Ptgs2, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="Renal Medulla Ptgs2")
ggplot(data_thrombosis3, aes(Group, PGIM_NSAID, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Group", y="PGIM (ng/mg creatinine)")
ggplot(data_thrombosis3, aes(Kidney_tertile, auc, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Kidney_tertile, agg.auc, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, dis.auc, color=Treatment))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Kidney_tertile, agg.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Kidney_tertile, dis.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Renal Medulla Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Lung_tertile, auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Lung_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Lung_tertile, agg.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Lung_tertile, dis.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Lung Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Aorta_tertile, auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="AUC")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Aorta_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=20/10e6)
ggplot(data_thrombosis3, aes(Aorta_tertile, agg.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Aorta_tertile, dis.auc, color=Treatment))+geom_boxplot()+pub_specs+ labs(x="Aorta Ptgs2 Expression Tertile", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+coord_fixed(ratio=5/10e5)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")+coord_fixed(ratio=1/100)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Kidney_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+coord_fixed(ratio=5/10e5)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")+coord_fixed(ratio=1/100)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Lung_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+coord_fixed(ratio=5/10e5)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")+coord_fixed(ratio=ratio=1/100)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Aorta_tertile))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+coord_fixed(ratio=5/10e8)+scale_x_log10()
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="AUC")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="AUC")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="AUC")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Aggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Disaggregation Phase (AUC)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Celecoxib_trough, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Celecoxib_trough, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Celecoxib_trough, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="Disaggregation Phase (AUC)")+scale_x_log10()+scale_y_log10()
ggplot(data_thrombosis3, aes(Celecoxib_trough, PGIM_foldchange, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Celecoxib Plasma Concentration (micromolar)", y="PGIM (fold change from baseline)")+scale_x_log10()
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs2", y="Disaggregation Phase (AUC)")+ylim(0,3e08)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Lung_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs2", y="Disaggregation Phase (AUC)")+ylim(0,3e08)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="AUC")+ylim(0,4e08)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")+ylim(0,750000)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, tmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Time to Maximal Thrombus (msec)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, agg.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Aggregation Phase (AUC)")+ylim(0,1e08)
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, dis.auc, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs2", y="Disaggregation Phase (AUC)")+ylim(0,3e08)
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=KO_Group))+geom_point(size=3)+pub_specs+ labs(title="PGIM",x="PGIM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(TxM_foldchange, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="TxM",x="TxM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGEM_foldchange, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGEM",x="PGEM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGDM_foldchange, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGDM",x="PGDM (fold-change from baseline)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGIM",x="PGIM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(TxM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="TxM",x="TxM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGEM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGEM",x="PGEM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(PGDM_NSAID, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(title="PGDM",x="PGDM (ng/mg creatinine)", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs1, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Renal Medulla Ptgs1", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Lung_Ptgs1, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Lung Ptgs1", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(Aorta_Ptgs1, vmax, color=Group))+geom_point(size=3)+pub_specs+ labs(x="Aorta Ptgs1", y="Maximal Thrombus Size (FITC Fluorescence Intensity)")
dev.off()
pdf("DC_Thrombosis_graphs.pdf")
ggplot(data_thrombosis2, aes(variable,value,color=Group))+geom_point()+pub_specs + labs(x="Time (msec)", y="Platelet Aggregation\n(FITC Fluorescence Intensity)")+coord_fixed(ratio=1/1000)+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Celecoxib_trough, vmax, color=KO_Group))+geom_point(size=3)+pub_specs + labs(x="Celecoxib Plasma Concentration (micromolar)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+scale_x_log10()+ guides(colour=guide_legend("KO Group"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(fold change relative to baseline)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_foldchange, vmax, color=KO_Group))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(fold change relative to baseline)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("KO Group"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(ng/mg creatinine)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(PGIM_NSAID, vmax, color=KO_Group))+geom_point(size=3)+pub_specs + labs(x="Urinary PGIM\n(ng/mg creatinine)", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("KO Group"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Renal_medulla_Ptgs2, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Renal Medulla",x="Renal Medulla Ptgs2 Expression", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Lung_Ptgs2, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Lung",x="Lung Ptgs2 Expression", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Aorta_Ptgs2, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Aorta",x="Aorta Ptgs2 Expression", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Renal Medulla",x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Lung_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Lung",x="Lung Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Aorta_tertile, vmax, color=Treatment))+geom_point(size=3)+pub_specs + labs(title="Aorta",x="Aorta Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Kidney_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs + labs(title="Renal Medulla",x="Renal Medulla Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Lung_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs + labs(title="Lung",x="Lung Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(Aorta_tertile, vmax, color=Treatment))+geom_boxplot()+pub_specs + labs(title="Aorta",x="Aorta Ptgs2 Expression Tertile", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")
ggplot(data_thrombosis3, aes(KO_Group, vmax))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, vmax))+geom_boxplot()+pub_specs + labs(x="KO Group", y="Maximal Thrombus Size\n(FITC Fluorescence Intensity)")
ggplot(data_thrombosis3, aes(KO_Group, auc))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="AUC")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, agg.auc))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Aggregation Phase (AUC)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, dis.auc))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Disaggregation Phase (AUC)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Celecoxib_trough))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Celecoxib Plasma Concentration (micromolar)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, PGIM_foldchange))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Urinary PGIM\n(fold-change from baseline)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, PGIM_NSAID))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Urinary PGIM\n(ng/mg creatinine)")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Renal_medulla_Ptgs2))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Renal Medulla Ptgs2")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Aorta_Ptgs2))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Aorta Ptgs2")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
ggplot(data_thrombosis3, aes(KO_Group, Lung_Ptgs2))+geom_boxplot(aes(fill=Treatment))+pub_specs + labs(x="KO Group", y="Lung Ptgs2")+ guides(colour=guide_legend("Treatment"))+theme(legend.position="bottom",legend.box="horizontal")+scale_fill_brewer(palette="Spectral")
dev.off()
|
#' Varying model based on formula (6)
#' @param mx a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @param my a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @export
#' @return numeric value of OLS beta
#' @references László Balázsi, László Mátyás (2014): The Estimation of Varying Coefficients Multi-dimensional Panel Data Models. Formula (6).
varyingModel1 <- function(mx, my) {
if (nrow(mx) != nrow(my))
stop('incompatible matrices provided')
if (!identical(mx[, 1:3], my[, 1:3]))
stop('incompatible matrices provided')
l <- levels(mx[, 1])
N <- length(l)
T <- length(unique(mx[, 3]))
K <- ncol(mx) - 3
## sort data by i, j, t
mx <- mx[order(mx[, 1], mx[, 2], mx[, 3]), ]
Aij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * t(mx[w, 3+(1:K)])
}))
})
}) ## N x N
Cxx <- Reduce('+', lapply(unlist(Aij, recursive = FALSE), solve))
MX <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
N^2 %*% t(mx[w, 3+(1:K)]) %*% solve(Aij[[i]][[j]]) %*% solve(Cxx)
})})})) ## TODO check order
Bij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * my[w, 4]
}))
})
})
Cxy <- sum(mapply(function(x, y) solve(x) %*% y, unlist(Aij, recursive = FALSE), unlist(Bij, recursive = FALSE)))
MY <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
my[w, 4] - t(mx[w, 3+(1:K)]) %*% ((solve(Aij[[i]][[j]]) %*% Bij[[i]][[j]]) - (solve(Aij[[i]][[j]]) %*% solve(Cxx) %*% Cxy))
})})})) ## TODO check order
## OLS
solve(t(MX) %*% as.matrix(MX)) %*% t(MX) %*% as.matrix(MY)
}
#' Varying model based on formula (12)
#' @param mx a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @param my a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @export
#' @return OLS beta
#' @references László Balázsi, László Mátyás (2014): The Estimation of Varying Coefficients Multi-dimensional Panel Data Models. Formula (12).
varyingModel3 <- function(mx, my, mz) {
if (nrow(mx) != nrow(my) | nrow(mx) != nrow(mz) | nrow(my) != nrow(mz))
stop('incompatible matrices provided')
if (!identical(mx[, 1:3], my[, 1:3]) | !identical(mx[, 1:3], mz[, 1:3]) | !identical(my[, 1:3], mz[, 1:3]))
stop('incompatible matrices provided')
l <- levels(mx[, 1])
N <- length(l)
T <- length(unique(mx[, 3]))
K <- ncol(mx) - 3
## sort data by i, j, t
mx <- mx[order(mx[, 1], mx[, 2], mx[, 3]), ]
Aij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * t(mx[w, 3+(1:K)])
}))
})
}) ## N x N
Cxx <- Reduce('+', lapply(unlist(Aij, recursive = FALSE), solve))
MX <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
N^2 %*% t(mx[w, 3+(1:K)]) %*% solve(Aij[[i]][[j]]) %*% solve(Cxx)
})})})) ## TODO check order
Bij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * my[w, 4]
}))
})
})
Cxy <- sum(mapply(function(x, y) solve(x) %*% y, unlist(Aij, recursive = FALSE), unlist(Bij, recursive = FALSE)))
MY <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
my[w, 4] - t(mx[w, 3+(1:K)]) %*% ((solve(Aij[[i]][[j]]) %*% Bij[[i]][[j]]) - (solve(Aij[[i]][[j]]) %*% solve(Cxx) %*% Cxy))
})})})) ## TODO check order
Dij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * t(mz[w, 4])
}))
})
})
Cxz <- sum(mapply(function(x, y) solve(x) %*% y, unlist(Aij, recursive = FALSE), unlist(Dij, recursive = FALSE)))
MZ <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
t(mz[w, 4]) - t(mx[w, 3+(1:K)]) %*% ((solve(Aij[[i]][[j]]) %*% Dij[[i]][[j]]) - (solve(Aij[[i]][[j]]) %*% solve(Cxx) %*% Cxz))
})})})) ## TODO check order
## OLS
}
|
/R/varying.R
|
no_license
|
GlafiraM/within
|
R
| false
| false
| 5,805
|
r
|
#' Varying model based on formula (6)
#' @param mx a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @param my a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @export
#' @return numeric value of OLS beta
#' @references László Balázsi, László Mátyás (2014): The Estimation of Varying Coefficients Multi-dimensional Panel Data Models. Formula (6).
varyingModel1 <- function(mx, my) {
if (nrow(mx) != nrow(my))
stop('incompatible matrices provided')
if (!identical(mx[, 1:3], my[, 1:3]))
stop('incompatible matrices provided')
l <- levels(mx[, 1])
N <- length(l)
T <- length(unique(mx[, 3]))
K <- ncol(mx) - 3
## sort data by i, j, t
mx <- mx[order(mx[, 1], mx[, 2], mx[, 3]), ]
Aij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * t(mx[w, 3+(1:K)])
}))
})
}) ## N x N
Cxx <- Reduce('+', lapply(unlist(Aij, recursive = FALSE), solve))
MX <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
N^2 %*% t(mx[w, 3+(1:K)]) %*% solve(Aij[[i]][[j]]) %*% solve(Cxx)
})})})) ## TODO check order
Bij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * my[w, 4]
}))
})
})
Cxy <- sum(mapply(function(x, y) solve(x) %*% y, unlist(Aij, recursive = FALSE), unlist(Bij, recursive = FALSE)))
MY <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
my[w, 4] - t(mx[w, 3+(1:K)]) %*% ((solve(Aij[[i]][[j]]) %*% Bij[[i]][[j]]) - (solve(Aij[[i]][[j]]) %*% solve(Cxx) %*% Cxy))
})})})) ## TODO check order
## OLS
solve(t(MX) %*% as.matrix(MX)) %*% t(MX) %*% as.matrix(MY)
}
#' Varying model based on formula (12)
#' @param mx a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @param my a \code{matrix} holding 4 columns for \code{i}, \code{j}, \code{t} and \code{value}
#' @export
#' @return OLS beta
#' @references László Balázsi, László Mátyás (2014): The Estimation of Varying Coefficients Multi-dimensional Panel Data Models. Formula (12).
varyingModel3 <- function(mx, my, mz) {
if (nrow(mx) != nrow(my) | nrow(mx) != nrow(mz) | nrow(my) != nrow(mz))
stop('incompatible matrices provided')
if (!identical(mx[, 1:3], my[, 1:3]) | !identical(mx[, 1:3], mz[, 1:3]) | !identical(my[, 1:3], mz[, 1:3]))
stop('incompatible matrices provided')
l <- levels(mx[, 1])
N <- length(l)
T <- length(unique(mx[, 3]))
K <- ncol(mx) - 3
## sort data by i, j, t
mx <- mx[order(mx[, 1], mx[, 2], mx[, 3]), ]
Aij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * t(mx[w, 3+(1:K)])
}))
})
}) ## N x N
Cxx <- Reduce('+', lapply(unlist(Aij, recursive = FALSE), solve))
MX <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
N^2 %*% t(mx[w, 3+(1:K)]) %*% solve(Aij[[i]][[j]]) %*% solve(Cxx)
})})})) ## TODO check order
Bij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * my[w, 4]
}))
})
})
Cxy <- sum(mapply(function(x, y) solve(x) %*% y, unlist(Aij, recursive = FALSE), unlist(Bij, recursive = FALSE)))
MY <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
my[w, 4] - t(mx[w, 3+(1:K)]) %*% ((solve(Aij[[i]][[j]]) %*% Bij[[i]][[j]]) - (solve(Aij[[i]][[j]]) %*% solve(Cxx) %*% Cxy))
})})})) ## TODO check order
Dij <- lapply(structure(l, .Names = l), function(i) {
lapply(structure(l, .Names = l), function(j) {
sum(sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
mx[w, 3+(1:K)] * t(mz[w, 4])
}))
})
})
Cxz <- sum(mapply(function(x, y) solve(x) %*% y, unlist(Aij, recursive = FALSE), unlist(Dij, recursive = FALSE)))
MZ <- as.vector(sapply(l, function(i) {
sapply(l, function(j) {
sapply(1:T, function(t) { ## TODO 1:T => factor
w <- which(mx[, 1] == i & mx[, 2] == j & mx[, 3] == t)
t(mz[w, 4]) - t(mx[w, 3+(1:K)]) %*% ((solve(Aij[[i]][[j]]) %*% Dij[[i]][[j]]) - (solve(Aij[[i]][[j]]) %*% solve(Cxx) %*% Cxz))
})})})) ## TODO check order
## OLS
}
|
library(MASS)
library(data.table)
read.eec <- function(file,nint){
#nint is the number of intervals (rows) in the file to read.
matrix(scan(file),nrow=nint,byrow=TRUE)
}
FileProcessing <-function(proposedFilters,file,originPath,destinationPath,nint){
## Function to write output file after filters being computed
## proposedFilters a matrix with the filter
## file to be written output
##origin and destination path should finish with /
S <- exp(read.eec(paste(originPath,file,sep=""),nint))%*%proposedFilters
Sabs<-abs(S)
Slog<-(Sabs>1)
outputfile <- gzfile(paste(destinationPath,file,sep=""))
write.table(sign(S)*log(Sabs*Slog+1*(!Slog)),outputfile,sep=" ",col.names=FALSE,row.names=FALSE)
}
########
BlockProcessing <- function(indiv,channels,
bandsFQ=list(9:327,328:655,656:983,984:2457,2458:5734,5735:16384),
epsilon=1e-4,
max.iter=40,
originPath= "TMP/FFT_60s_30s_COMPRESS",
destinationPath="TMP/Filtros_Pot_60s_global"){
destinationPath <- paste(destinationPath,"/",sep="")
if(!file.exists(destinationPath)){
dir.create(destinationPath)
}
filesPre <- dir(originPath,pattern=paste(indiv,"_preictal",sep=""))
nint <- as.integer(fread(paste("zcat ",originPath,"/",filesPre[1]," | wc -l",sep="")))
nfreq <- as.integer(fread(paste("zcat ",originPath,"/",filesPre[1]," | wc -w",sep="")))/nint
ng<-length(bandsFQ)
for(k in channels){
filesPre <- dir(originPath,pattern=paste(indiv,"_preictal_segment_.....channel_",sprintf("%02d",k),sep=""))
filesInter <- dir(originPath,pattern=paste(indiv,"_interictal_segment_.....channel_",sprintf("%02d",k),sep=""))
testFiles <- dir(originPath,pattern=paste(indiv,"_test_segment_.....channel_",sprintf("%02d",k),sep=""))
allFiles<-c(filesPre,filesInter,testFiles)
if(!all(file.exists(paste(destinationPath,allFiles,sep="")))){
proposedFilters <- list()
for(g in bandsFQ){
NumberBands<-length(g)
proposedFilters[[length(proposedFilters)+1]] <- matrix(1/sqrt(NumberBands),nrow=NumberBands,ncol=1)
} #close for initialization proposed Filters
error <- 1
iter<-0
while(error>epsilon){
iter<-iter +1
if(iter>max.iter){
write.matrix(errorvector,file=paste(destinationPath,"Warning_No_Convergencia_",indiv,"_channel_",k ,".txt",sep=""))
break()
} #closeif condition met to break
vanew <- list()
vbnew <- list()
errorvector <- numeric(ng)
for(ig in 1:ng){
vanew[[ig]] <- matrix(0,nrow=1,ncol=length(bandsFQ[[ig]]))
vbnew[[ig]] <- matrix(0,nrow=1,ncol=length(bandsFQ[[ig]]))
}
for(f in filesPre){
fileName <- paste(originPath,"/",f,sep="")
data.files <- exp(read.eec(fileName,nint))
for(ig in 1:ng){
g <- bandsFQ[[ig]]
wa<-c(data.files[,g]%*%proposedFilters[[ig]])
vanew[[ig]]<-vanew[[ig]]+ apply(diag(wa)%*%(data.files[,g]),2,sum)
}
} #close multiplication by filesPre
for(f in filesInter){
fileName <- paste(originPath,"/",f,sep="")
data.files <- exp(read.eec(fileName,nint))
for(ig in 1:ng){
g <- bandsFQ[[ig]]
wb<-c(data.files[,g]%*%proposedFilters[[ig]])
vbnew[[ig]] <- vbnew[[ig]]+ apply(diag(wb)%*%(data.files[,g]),2,sum)
}
} #close multiplication by filesInter
for(ig in 1:ng){
vnew <- t((1/length(filesPre))*vanew[[ig]]-(1/length(filesInter))*vbnew[[ig]])
#Taking into account the sign to normalize
vnew <- sign(sum(proposedFilters[[ig]]*vnew))*vnew/sqrt(sum(vnew^2))
errorvector[ig] <- sqrt(sum((proposedFilters[[ig]]-vnew)^2))
proposedFilters[[ig]] <- vnew
} # close for calculation new interate
error <- max(errorvector)
cat(errorvector," \n")
}#close loop power method
filterMatrix <- matrix(nrow=nfreq,ncol=0)
for(ig in 1:ng){
g<-bandsFQ[[ig]]
filterMatrix <- cbind(filterMatrix,rbind(as.matrix(rep(0,g[1]-1)),proposedFilters[[ig]],as.matrix(rep(0,nfreq-g[length(g)]))))
} # close for with filter matrix
write.matrix(filterMatrix,file=paste(destinationPath,indiv,"_channel_",k ,"_DS.txt",sep=""))
for( f in allFiles){
FileProcessing(filterMatrix,f,paste(originPath,"/",sep=""),destinationPath,nint)
}
} # close if checking if files are now calculated
} # close loop for channel
} # close function
subjects <- c(unlist(strsplit(Sys.getenv("SUBJECTS"), " ")))
sources <- Sys.getenv("FFT_COMPRESS_PATH")
for(indiv in subjects){
if(indiv %in% c("Patient_1","Patient_2")){
bandsFQ <- list(83:3354,3355:6709,6710:10065,10066:25164,25165:58719,58720:262144)
} else{
bandsFQ <- list(9:327,328:655,656:983,984:2457,2458:5734,5735:16384)
}
i=0
while(TRUE) {
i=i+1
match <- sprintf("^%s_(.*)ictal(.*)channel_%02d.csv.gz$", indiv, i)
list <- list.files(path = sources, pattern = match, full.names=TRUE)
if (length(list) == 0) break;
}
channels <- 1:(i-1)
BlockProcessing(indiv,channels,
bandsFQ=bandsFQ,
epsilon=1e-4,
max.iter=40,
originPath=sources,
destinationPath=Sys.getenv("DS_PATH"))
}
####### EOF
|
/scripts/PREPROCESS/filter_DS.R
|
permissive
|
fjmalmaraz/supervised-filters
|
R
| false
| false
| 5,989
|
r
|
library(MASS)
library(data.table)
read.eec <- function(file,nint){
#nint is the number of intervals (rows) in the file to read.
matrix(scan(file),nrow=nint,byrow=TRUE)
}
FileProcessing <-function(proposedFilters,file,originPath,destinationPath,nint){
## Function to write output file after filters being computed
## proposedFilters a matrix with the filter
## file to be written output
##origin and destination path should finish with /
S <- exp(read.eec(paste(originPath,file,sep=""),nint))%*%proposedFilters
Sabs<-abs(S)
Slog<-(Sabs>1)
outputfile <- gzfile(paste(destinationPath,file,sep=""))
write.table(sign(S)*log(Sabs*Slog+1*(!Slog)),outputfile,sep=" ",col.names=FALSE,row.names=FALSE)
}
########
BlockProcessing <- function(indiv,channels,
bandsFQ=list(9:327,328:655,656:983,984:2457,2458:5734,5735:16384),
epsilon=1e-4,
max.iter=40,
originPath= "TMP/FFT_60s_30s_COMPRESS",
destinationPath="TMP/Filtros_Pot_60s_global"){
destinationPath <- paste(destinationPath,"/",sep="")
if(!file.exists(destinationPath)){
dir.create(destinationPath)
}
filesPre <- dir(originPath,pattern=paste(indiv,"_preictal",sep=""))
nint <- as.integer(fread(paste("zcat ",originPath,"/",filesPre[1]," | wc -l",sep="")))
nfreq <- as.integer(fread(paste("zcat ",originPath,"/",filesPre[1]," | wc -w",sep="")))/nint
ng<-length(bandsFQ)
for(k in channels){
filesPre <- dir(originPath,pattern=paste(indiv,"_preictal_segment_.....channel_",sprintf("%02d",k),sep=""))
filesInter <- dir(originPath,pattern=paste(indiv,"_interictal_segment_.....channel_",sprintf("%02d",k),sep=""))
testFiles <- dir(originPath,pattern=paste(indiv,"_test_segment_.....channel_",sprintf("%02d",k),sep=""))
allFiles<-c(filesPre,filesInter,testFiles)
if(!all(file.exists(paste(destinationPath,allFiles,sep="")))){
proposedFilters <- list()
for(g in bandsFQ){
NumberBands<-length(g)
proposedFilters[[length(proposedFilters)+1]] <- matrix(1/sqrt(NumberBands),nrow=NumberBands,ncol=1)
} #close for initialization proposed Filters
error <- 1
iter<-0
while(error>epsilon){
iter<-iter +1
if(iter>max.iter){
write.matrix(errorvector,file=paste(destinationPath,"Warning_No_Convergencia_",indiv,"_channel_",k ,".txt",sep=""))
break()
} #closeif condition met to break
vanew <- list()
vbnew <- list()
errorvector <- numeric(ng)
for(ig in 1:ng){
vanew[[ig]] <- matrix(0,nrow=1,ncol=length(bandsFQ[[ig]]))
vbnew[[ig]] <- matrix(0,nrow=1,ncol=length(bandsFQ[[ig]]))
}
for(f in filesPre){
fileName <- paste(originPath,"/",f,sep="")
data.files <- exp(read.eec(fileName,nint))
for(ig in 1:ng){
g <- bandsFQ[[ig]]
wa<-c(data.files[,g]%*%proposedFilters[[ig]])
vanew[[ig]]<-vanew[[ig]]+ apply(diag(wa)%*%(data.files[,g]),2,sum)
}
} #close multiplication by filesPre
for(f in filesInter){
fileName <- paste(originPath,"/",f,sep="")
data.files <- exp(read.eec(fileName,nint))
for(ig in 1:ng){
g <- bandsFQ[[ig]]
wb<-c(data.files[,g]%*%proposedFilters[[ig]])
vbnew[[ig]] <- vbnew[[ig]]+ apply(diag(wb)%*%(data.files[,g]),2,sum)
}
} #close multiplication by filesInter
for(ig in 1:ng){
vnew <- t((1/length(filesPre))*vanew[[ig]]-(1/length(filesInter))*vbnew[[ig]])
#Taking into account the sign to normalize
vnew <- sign(sum(proposedFilters[[ig]]*vnew))*vnew/sqrt(sum(vnew^2))
errorvector[ig] <- sqrt(sum((proposedFilters[[ig]]-vnew)^2))
proposedFilters[[ig]] <- vnew
} # close for calculation new interate
error <- max(errorvector)
cat(errorvector," \n")
}#close loop power method
filterMatrix <- matrix(nrow=nfreq,ncol=0)
for(ig in 1:ng){
g<-bandsFQ[[ig]]
filterMatrix <- cbind(filterMatrix,rbind(as.matrix(rep(0,g[1]-1)),proposedFilters[[ig]],as.matrix(rep(0,nfreq-g[length(g)]))))
} # close for with filter matrix
write.matrix(filterMatrix,file=paste(destinationPath,indiv,"_channel_",k ,"_DS.txt",sep=""))
for( f in allFiles){
FileProcessing(filterMatrix,f,paste(originPath,"/",sep=""),destinationPath,nint)
}
} # close if checking if files are now calculated
} # close loop for channel
} # close function
subjects <- c(unlist(strsplit(Sys.getenv("SUBJECTS"), " ")))
sources <- Sys.getenv("FFT_COMPRESS_PATH")
for(indiv in subjects){
if(indiv %in% c("Patient_1","Patient_2")){
bandsFQ <- list(83:3354,3355:6709,6710:10065,10066:25164,25165:58719,58720:262144)
} else{
bandsFQ <- list(9:327,328:655,656:983,984:2457,2458:5734,5735:16384)
}
i=0
while(TRUE) {
i=i+1
match <- sprintf("^%s_(.*)ictal(.*)channel_%02d.csv.gz$", indiv, i)
list <- list.files(path = sources, pattern = match, full.names=TRUE)
if (length(list) == 0) break;
}
channels <- 1:(i-1)
BlockProcessing(indiv,channels,
bandsFQ=bandsFQ,
epsilon=1e-4,
max.iter=40,
originPath=sources,
destinationPath=Sys.getenv("DS_PATH"))
}
####### EOF
|
#Bar plots of the deltaH for each metrics with the thresholds indicated as "likely altered" "likely unaltered"
#Current to future comparison
library(tidyverse)
#set output directories
#current condition time period WY 1995-2016 (calibration time period)
#out.dir <- "C:/Users/KristineT/SCCWRP/Santa Margarita River Climate Change Analyses - FlowEcology/FlowData/FFM/DeltaHBarplots/"
#current condition time period WY 2002-2020
out.dir <- "C:/Users/KristineT/SCCWRP/Santa Margarita River Climate Change Analyses - FlowEcology/FlowData/FFM/Current_2002_2020/DeltaHBarplots/Current_Mid_Late_Century/current_to_future_comparison/"
dir.create(out.dir)
#read in to summarize the number of altered metric per scenario per CSCI and ASCI
results <- read.csv("C:/Users/KristineT/SCCWRP/Santa Margarita River Climate Change Analyses - FlowEcology/FlowData/FFM/Current_2002_2020/FFM_DeltaH_all_scenarios_current_future_alt_mid_late_century.csv")
#add columns with GCM and Scenario type
#find indices of current scenarios
ind.current <- grep("Current", results$Scenario)
#add scenario type column, set all to future, change current to current
results$ScenarioType <- "Late-Century"
results$ScenarioType[ind.current] <- "Current"
#mid-century
ind.midcentury <- grep("Mid-Century", results$Scenario)
results$ScenarioType[ind.midcentury] <- "Mid-Century"
#find indices of no cwrma, set CWRMA
ind.nocwrma <- grep("No CWRMA", results$Scenario)
#add scenario type column, set all to CWRMA, change no CWRMA to no CWRMA
results$CWRMA <- "CWRMA"
results$CWRMA[ind.nocwrma] <- "No CWRMA"
#set the GCM column, string split by : and take second element
results$GCM <- sapply(strsplit( results$Scenario, ": " ), "[", 2 )
#create a column header col
results$colheader <- paste0(results$ScenarioType, "_", results$CWRMA)
#create a new column with scenario type and GCM for bar labels
results$bar.labels <- paste0(results$GCM, "\n",results$CWRMA)
results$bar.labels1 <- paste0(results$ScenarioType, "\n",results$CWRMA)
#set levels for bar labels current, mid, late
results$bar.labels1 <- factor(results$bar.labels1, levels=c("Mid-Century\nCWRMA", "Late-Century\nCWRMA", "Mid-Century\nNo CWRMA","Late-Century\nNo CWRMA"))
#create new column with GCM wet/cool etc.
results$facet.gcm.name <- results$GCM
results$facet.gcm.name[results$facet.gcm.name == "HadGEM2-ES365"] <- "HadGEM2-ES365 (Drier/Warmer)"
results$facet.gcm.name[results$facet.gcm.name == "CNRM-CM5"] <- "CNRM-CM5 (Wetter/Cooler)"
results$facet.gcm.name[results$facet.gcm.name == "MIROC5"] <- "MIROC5 (Other)"
#will not use bio thresholds since comparison to ref
#read in deltaH thresholds for CSCI and ASCI
#bio.thresholds <- read.csv("C:/Users/KristineT/SCCWRP/SOC WQIP - Flow Ecology Study - General/Tier2_analysis/08_all_delta_thresholds.csv")
#scaled bio thresholds 0 to 1
bio.thresholds <- read.csv("C:/Users/KristineT/SCCWRP/SOC WQIP - Flow Ecology Study - General/Tier2_analysis/08_all_delta_thresholds_scaled.csv")
#Bar plot of median Delta H for each metric for each site
sites <- unique(results$Site)
metrics <- unique(results$metric)
#change to only important metrics
metrics <- unique(bio.thresholds$metric)
metric.names <- c("Dry Season Baseflow Magnitude (cfs)", "Magnitude of Largest Annual Storm (cfs)", "Wet Season Baseflow Magnitude (cfs)", "Spring Recession Duration (days)", "Wet Season Baseflow Duration (days)")
for(i in 1:length(sites)){
#subset to site i
results.sub <- results %>%
filter(Site == sites[i])
#make plots for each metric
for(j in 1:length(metrics)){
results.sub.metric <- results.sub %>%
filter(metric == metrics[j])
#y-axis label
ylab.name <- paste0("Change in ", metric.names[j])
# #find the thresholds for CSCI and/or ASCI for metric j
# bio.thresholds.sub <- bio.thresholds %>%
# filter(metric == metrics[j])
#make faceted bar plots by ScenarioType (Current, Future) and GCM, color = CRWMA and no CWRMA
barplot <- ggplot(results.sub.metric, aes(x=bar.labels1, y=p50, fill=CWRMA)) +
facet_wrap(~facet.gcm.name, ncol=3) +
geom_bar(stat="identity") +
labs(title = paste0(sites[i]), subtitle=metric.names[j],
color = "Legend") + ylab(ylab.name) + xlab("") +
theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
theme_bw() + theme(legend.position="bottom") +
theme(axis.text.x = element_text(angle = 45, hjust=1)) +
geom_hline(yintercept = 0, color="black")
#if Q99, add in thresholds for CSCI and ASCI
if(metrics[j] == "Q99"){
#make faceted bar plots by ScenarioType (Current, Future) and GCM, color = CRWMA and no CWRMA
barplot <- ggplot(results.sub.metric, aes(x=bar.labels1, y=p50, fill=CWRMA)) +
facet_wrap(~facet.gcm.name, ncol=3) +
geom_bar(stat="identity") +
labs(title = paste0(sites[i]), subtitle=metric.names[j],
color = "Legend") + ylab(ylab.name) + xlab("") +
theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
theme_bw() + theme(legend.position="bottom") +
theme(axis.text.x = element_text(angle = 45, hjust=1)) +
geom_hline(yintercept = 0)
}
print(barplot)
#save
file.name2 <- paste0(out.dir, sites[i], "_", metrics[j], "_DeltaH_barplots.jpg")
ggsave(barplot, filename=file.name2, dpi=300, height=5, width=10)
}
}
|
/SMR_FFM_DeltaH_BarPlots_currentto_future_alt.R
|
no_license
|
kristaniguchi/SMR_FlowEcology
|
R
| false
| false
| 5,646
|
r
|
#Bar plots of the deltaH for each metrics with the thresholds indicated as "likely altered" "likely unaltered"
#Current to future comparison
library(tidyverse)
#set output directories
#current condition time period WY 1995-2016 (calibration time period)
#out.dir <- "C:/Users/KristineT/SCCWRP/Santa Margarita River Climate Change Analyses - FlowEcology/FlowData/FFM/DeltaHBarplots/"
#current condition time period WY 2002-2020
out.dir <- "C:/Users/KristineT/SCCWRP/Santa Margarita River Climate Change Analyses - FlowEcology/FlowData/FFM/Current_2002_2020/DeltaHBarplots/Current_Mid_Late_Century/current_to_future_comparison/"
dir.create(out.dir)
#read in to summarize the number of altered metric per scenario per CSCI and ASCI
results <- read.csv("C:/Users/KristineT/SCCWRP/Santa Margarita River Climate Change Analyses - FlowEcology/FlowData/FFM/Current_2002_2020/FFM_DeltaH_all_scenarios_current_future_alt_mid_late_century.csv")
#add columns with GCM and Scenario type
#find indices of current scenarios
ind.current <- grep("Current", results$Scenario)
#add scenario type column, set all to future, change current to current
results$ScenarioType <- "Late-Century"
results$ScenarioType[ind.current] <- "Current"
#mid-century
ind.midcentury <- grep("Mid-Century", results$Scenario)
results$ScenarioType[ind.midcentury] <- "Mid-Century"
#find indices of no cwrma, set CWRMA
ind.nocwrma <- grep("No CWRMA", results$Scenario)
#add scenario type column, set all to CWRMA, change no CWRMA to no CWRMA
results$CWRMA <- "CWRMA"
results$CWRMA[ind.nocwrma] <- "No CWRMA"
#set the GCM column, string split by : and take second element
results$GCM <- sapply(strsplit( results$Scenario, ": " ), "[", 2 )
#create a column header col
results$colheader <- paste0(results$ScenarioType, "_", results$CWRMA)
#create a new column with scenario type and GCM for bar labels
results$bar.labels <- paste0(results$GCM, "\n",results$CWRMA)
results$bar.labels1 <- paste0(results$ScenarioType, "\n",results$CWRMA)
#set levels for bar labels current, mid, late
results$bar.labels1 <- factor(results$bar.labels1, levels=c("Mid-Century\nCWRMA", "Late-Century\nCWRMA", "Mid-Century\nNo CWRMA","Late-Century\nNo CWRMA"))
#create new column with GCM wet/cool etc.
results$facet.gcm.name <- results$GCM
results$facet.gcm.name[results$facet.gcm.name == "HadGEM2-ES365"] <- "HadGEM2-ES365 (Drier/Warmer)"
results$facet.gcm.name[results$facet.gcm.name == "CNRM-CM5"] <- "CNRM-CM5 (Wetter/Cooler)"
results$facet.gcm.name[results$facet.gcm.name == "MIROC5"] <- "MIROC5 (Other)"
#will not use bio thresholds since comparison to ref
#read in deltaH thresholds for CSCI and ASCI
#bio.thresholds <- read.csv("C:/Users/KristineT/SCCWRP/SOC WQIP - Flow Ecology Study - General/Tier2_analysis/08_all_delta_thresholds.csv")
#scaled bio thresholds 0 to 1
bio.thresholds <- read.csv("C:/Users/KristineT/SCCWRP/SOC WQIP - Flow Ecology Study - General/Tier2_analysis/08_all_delta_thresholds_scaled.csv")
#Bar plot of median Delta H for each metric for each site
sites <- unique(results$Site)
metrics <- unique(results$metric)
#change to only important metrics
metrics <- unique(bio.thresholds$metric)
metric.names <- c("Dry Season Baseflow Magnitude (cfs)", "Magnitude of Largest Annual Storm (cfs)", "Wet Season Baseflow Magnitude (cfs)", "Spring Recession Duration (days)", "Wet Season Baseflow Duration (days)")
for(i in 1:length(sites)){
#subset to site i
results.sub <- results %>%
filter(Site == sites[i])
#make plots for each metric
for(j in 1:length(metrics)){
results.sub.metric <- results.sub %>%
filter(metric == metrics[j])
#y-axis label
ylab.name <- paste0("Change in ", metric.names[j])
# #find the thresholds for CSCI and/or ASCI for metric j
# bio.thresholds.sub <- bio.thresholds %>%
# filter(metric == metrics[j])
#make faceted bar plots by ScenarioType (Current, Future) and GCM, color = CRWMA and no CWRMA
barplot <- ggplot(results.sub.metric, aes(x=bar.labels1, y=p50, fill=CWRMA)) +
facet_wrap(~facet.gcm.name, ncol=3) +
geom_bar(stat="identity") +
labs(title = paste0(sites[i]), subtitle=metric.names[j],
color = "Legend") + ylab(ylab.name) + xlab("") +
theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
theme_bw() + theme(legend.position="bottom") +
theme(axis.text.x = element_text(angle = 45, hjust=1)) +
geom_hline(yintercept = 0, color="black")
#if Q99, add in thresholds for CSCI and ASCI
if(metrics[j] == "Q99"){
#make faceted bar plots by ScenarioType (Current, Future) and GCM, color = CRWMA and no CWRMA
barplot <- ggplot(results.sub.metric, aes(x=bar.labels1, y=p50, fill=CWRMA)) +
facet_wrap(~facet.gcm.name, ncol=3) +
geom_bar(stat="identity") +
labs(title = paste0(sites[i]), subtitle=metric.names[j],
color = "Legend") + ylab(ylab.name) + xlab("") +
theme(panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
theme_bw() + theme(legend.position="bottom") +
theme(axis.text.x = element_text(angle = 45, hjust=1)) +
geom_hline(yintercept = 0)
}
print(barplot)
#save
file.name2 <- paste0(out.dir, sites[i], "_", metrics[j], "_DeltaH_barplots.jpg")
ggsave(barplot, filename=file.name2, dpi=300, height=5, width=10)
}
}
|
### function that will find the "lhs" symbol in the pipeline, given that this function is called from with a pipeline
## https://github.com/tidyverse/magrittr/issues/115#issuecomment-173894787
the_lhs <- function() {
parents <- lapply(sys.frames(), parent.env)
is_magrittr_env <-
vapply(parents, identical, logical(1), y = environment(`%>%`))
if (any(is_magrittr_env)) {
deparse(get("lhs", sys.frames()[[max(which(is_magrittr_env))]]))
}
}
# https://stackoverflow.com/questions/26159495/align-multiple-ggplot-graphs-with-and-without-legends
AlignPlots <- function(pltlist = NULL) {
.LegendWidth <- function(x) x$grobs[[8]]$grobs[[1]]$widths[[4]]
plots.grobs <- lapply(pltlist, ggplotGrob)
max.widths <- do.call(grid::unit.pmax, lapply(plots.grobs, "[[", "widths"))
legends.widths <- lapply(plots.grobs, .LegendWidth)
max.legends.width <- base::suppressWarnings(do.call(max, legends.widths))
plots.grobs.eq.widths <- lapply(plots.grobs, function(x) {
x$widths <- max.widths
x
})
plots.grobs.eq.widths.aligned <- lapply(plots.grobs.eq.widths, function(x) {
if (is.gtable(x$grobs[[8]])) {
x$grobs[[8]] <- gtable_add_cols(x$grobs[[8]], unit(abs(diff(c(LegendWidth(x), max.legends.width))),"mm"))
}
x
})
plots.grobs.eq.widths.aligned
}
legendopts <- function(
legend_position = "right",
legend_orientation = NULL
){
## replace default eg "h" if user specified something else
.ucoalesce <- function(x, default){
ifelse(is.null(x), default, x)
}
showlegend <- TRUE
if (is.character(legend_position)){
if (legend_position == "bottom"){
leg_opts <- list(xanchor = "center",
x = 0.5,
y = -0.2,
orientation = .ucoalesce(legend_orientation, "h")
)
} else if (legend_position == "right"){
leg_opts <- list(yanchor = "center",
x = 1.2,
y = 0.5,
orientation = .ucoalesce(legend_orientation, "v")
)
} else if (legend_position == "top"){
leg_opts <- list(xanchor = "center",
x = 0.5,
y = 1.2,
orientation = .ucoalesce(legend_orientation, "h")
)
} else if (legend_position == "left"){
leg_opts <- list(yanchor = "center",
x = -1.0,
y = 0.5,
orientation = .ucoalesce(legend_orientation, "v")
)
} else if (legend_position == "none"){
showlegend <- FALSE
leg_opts <- NULL
}
} else {
leg_opts <- list(x = legend_position[1],
y = legend_position[2]
)
}
return(list(leg_opts = leg_opts, showlegend = showlegend))
}
|
/R/utilities.R
|
permissive
|
SHAESEN2/visR-survival
|
R
| false
| false
| 2,872
|
r
|
### function that will find the "lhs" symbol in the pipeline, given that this function is called from with a pipeline
## https://github.com/tidyverse/magrittr/issues/115#issuecomment-173894787
the_lhs <- function() {
parents <- lapply(sys.frames(), parent.env)
is_magrittr_env <-
vapply(parents, identical, logical(1), y = environment(`%>%`))
if (any(is_magrittr_env)) {
deparse(get("lhs", sys.frames()[[max(which(is_magrittr_env))]]))
}
}
# https://stackoverflow.com/questions/26159495/align-multiple-ggplot-graphs-with-and-without-legends
AlignPlots <- function(pltlist = NULL) {
.LegendWidth <- function(x) x$grobs[[8]]$grobs[[1]]$widths[[4]]
plots.grobs <- lapply(pltlist, ggplotGrob)
max.widths <- do.call(grid::unit.pmax, lapply(plots.grobs, "[[", "widths"))
legends.widths <- lapply(plots.grobs, .LegendWidth)
max.legends.width <- base::suppressWarnings(do.call(max, legends.widths))
plots.grobs.eq.widths <- lapply(plots.grobs, function(x) {
x$widths <- max.widths
x
})
plots.grobs.eq.widths.aligned <- lapply(plots.grobs.eq.widths, function(x) {
if (is.gtable(x$grobs[[8]])) {
x$grobs[[8]] <- gtable_add_cols(x$grobs[[8]], unit(abs(diff(c(LegendWidth(x), max.legends.width))),"mm"))
}
x
})
plots.grobs.eq.widths.aligned
}
legendopts <- function(
legend_position = "right",
legend_orientation = NULL
){
## replace default eg "h" if user specified something else
.ucoalesce <- function(x, default){
ifelse(is.null(x), default, x)
}
showlegend <- TRUE
if (is.character(legend_position)){
if (legend_position == "bottom"){
leg_opts <- list(xanchor = "center",
x = 0.5,
y = -0.2,
orientation = .ucoalesce(legend_orientation, "h")
)
} else if (legend_position == "right"){
leg_opts <- list(yanchor = "center",
x = 1.2,
y = 0.5,
orientation = .ucoalesce(legend_orientation, "v")
)
} else if (legend_position == "top"){
leg_opts <- list(xanchor = "center",
x = 0.5,
y = 1.2,
orientation = .ucoalesce(legend_orientation, "h")
)
} else if (legend_position == "left"){
leg_opts <- list(yanchor = "center",
x = -1.0,
y = 0.5,
orientation = .ucoalesce(legend_orientation, "v")
)
} else if (legend_position == "none"){
showlegend <- FALSE
leg_opts <- NULL
}
} else {
leg_opts <- list(x = legend_position[1],
y = legend_position[2]
)
}
return(list(leg_opts = leg_opts, showlegend = showlegend))
}
|
library(dashCoreComponents)
library(dash)
app <- Dash$new()
app$layout(
htmlDiv(
list(
htmlButton('Button 1', id='btn-1', n_clicks_timestamp=0),
htmlButton('Button 2', id='btn-2', n_clicks_timestamp=0),
htmlButton('Button 3', id='btn-3', n_clicks_timestamp=0),
htmlDiv(id='container-button-timestamp')
)
)
)
app$callback(
output(id = 'container-button-timestamp', property = 'children'),
params = list(input(id = 'btn-1', property = 'n_clicks_timestamp'),
input(id = 'btn-2', property = 'n_clicks_timestamp'),
input(id = 'btn-3', property ='n_clicks_timestamp')),
function(btn1, btn2, btn3){
if((btn1) > (btn2) & (btn1) > (btn3)){
msg = 'Button 1 was most recently clicked'
} else if((btn2) > (btn1) & (btn2) > (btn3)){
msg = 'Button 2 was most recently clicked'
} else if((btn3) > (btn1) & (btn3) > (btn2)){
msg = 'Button 3 was most recently clicked'
}
else{
msg = 'None of the buttons have been clicked yet'
}
return(htmlDiv(list(
htmlDiv(sprintf('btn1: %s', format(btn1, scientific = FALSE))),
htmlDiv(sprintf('btn2: %s', format(btn2, scientific = FALSE))),
htmlDiv(sprintf('btn3: %s', format(btn3, scientific = FALSE))),
htmlDiv(msg))
))
}
)
app$run_server()
|
/dash_docs/chapters/dash_core_components/Button/examples/buttontimestamp.R
|
permissive
|
plotly/dash-docs
|
R
| false
| false
| 1,525
|
r
|
library(dashCoreComponents)
library(dash)
app <- Dash$new()
app$layout(
htmlDiv(
list(
htmlButton('Button 1', id='btn-1', n_clicks_timestamp=0),
htmlButton('Button 2', id='btn-2', n_clicks_timestamp=0),
htmlButton('Button 3', id='btn-3', n_clicks_timestamp=0),
htmlDiv(id='container-button-timestamp')
)
)
)
app$callback(
output(id = 'container-button-timestamp', property = 'children'),
params = list(input(id = 'btn-1', property = 'n_clicks_timestamp'),
input(id = 'btn-2', property = 'n_clicks_timestamp'),
input(id = 'btn-3', property ='n_clicks_timestamp')),
function(btn1, btn2, btn3){
if((btn1) > (btn2) & (btn1) > (btn3)){
msg = 'Button 1 was most recently clicked'
} else if((btn2) > (btn1) & (btn2) > (btn3)){
msg = 'Button 2 was most recently clicked'
} else if((btn3) > (btn1) & (btn3) > (btn2)){
msg = 'Button 3 was most recently clicked'
}
else{
msg = 'None of the buttons have been clicked yet'
}
return(htmlDiv(list(
htmlDiv(sprintf('btn1: %s', format(btn1, scientific = FALSE))),
htmlDiv(sprintf('btn2: %s', format(btn2, scientific = FALSE))),
htmlDiv(sprintf('btn3: %s', format(btn3, scientific = FALSE))),
htmlDiv(msg))
))
}
)
app$run_server()
|
list(
tar_target(file_mm, "data/in/modelling_matrix.feather", format = "file"),
tar_target(file_mm_bin, "data/in/lasso_prep.feather", format = "file"),
tar_target(file_form, "data/in/glm_coefs.feather", format = "file"),
tar_target(file_cv, "data/in/indeces_annual_splits.feather", format = "file"),
tar_target(file_cat_lookup, "data/in/category_lookup.feather", format = "file"),
tar_target(file_feat_lookup, "data/in/feature_lookup.csv", format = "file")
)
|
/_targets_r/targets/files.R
|
no_license
|
LenaNoel/quality_risk_assesment_clinical_trials
|
R
| false
| false
| 471
|
r
|
list(
tar_target(file_mm, "data/in/modelling_matrix.feather", format = "file"),
tar_target(file_mm_bin, "data/in/lasso_prep.feather", format = "file"),
tar_target(file_form, "data/in/glm_coefs.feather", format = "file"),
tar_target(file_cv, "data/in/indeces_annual_splits.feather", format = "file"),
tar_target(file_cat_lookup, "data/in/category_lookup.feather", format = "file"),
tar_target(file_feat_lookup, "data/in/feature_lookup.csv", format = "file")
)
|
\name{readBins}
\alias{readBins}
\title{
Import bin-level ChIP-sep data
}
\description{
Import and preprocess
all or subset of bin-level ChIP-sep data, including ChIP data, matched control data,
mappability score, GC content score, and sequence ambiguity score.
}
\usage{
readBins( type = c("chip", "input"), fileName = NULL,
dataType = "unique", rounding = 100, parallel=FALSE, nCore=8 )
}
\arguments{
\item{type}{
Character vector indicating data types to be imported.
This vector can contain \code{"chip"} (ChIP data), \code{"input"} (matched control data),
\code{"M"} (mappability score), \code{"GC"} (GC content score), and \code{"N"} (sequence ambiguity score).
Currently, \code{readBins} permits only the following combinations:
\code{c("chip", "input")}, \code{c("chip", "input", "N")},
\code{c("chip", "input", "M", "GC", "N")}, and \code{c("chip", "M", "GC", "N")}.
Default is \code{c("chip", "input")}.
}
\item{fileName}{
Character vector of file names, each of which matches each element of \code{type}.
\code{type} and \code{fileName} should have the same length and
corresponding elements in two vectors should appear in the same order.
}
\item{dataType}{
How reads were processed? Possible values are
either \code{"unique"} (only uniquely aligned reads were retained)
or \code{"multi"} (reads aligned to multiple locations were also retained).
}
\item{rounding}{
How are mappability score and GC content score rounded?
Default is 100 and this indicates rounding of mappability score and GC content score
to the nearest hundredth.
}
\item{parallel}{Utilize multiple CPUs for parallel computing using \code{"paralle"} package?
Possible values are \code{TRUE} (use multiple CPUs)
or \code{FALSE} (do not use multiple CPUs).
Default is \code{FALSE} (do not use multiple CPUs).
}
\item{nCore}{Number of CPUs when parallel computing is utilized.
}
}
\details{
Bin-level ChIP and matched control data can be generated
from the aligned read files for your samples using the method \code{constructBins}.
In \code{mosaics} package companion website, \url{http://www.stat.wisc.edu/~keles/Software/mosaics/},
we provide preprocessed mappability score, GC content score,
and sequence ambiguity score files for diverse reference genomes.
Please check the website and the vignette for further details.
The imported data type constraints the analysis that can be implemented.
If \code{type=c("chip", "input")} or \code{c("chip", "input", "N")},
only two-sample analysis without using mappability and GC content is allowed.
For \code{type=c("chip", "input", "M", "GC", "N")},
user can do the one- or two-sample analysis.
If \code{type=c("chip", "M", "GC", "N")}, only one-sample analysis is permitted.
See help page of \code{mosaicsFit}.
When the data contains multiple chromosomes,
parallel computing can be utilized for faster preprocessing
if \code{parallel=TRUE} and \code{parallel} package is loaded.
\code{nCore} determines number of CPUs used for parallel computing.
}
\value{
Construct \code{BinData} class object.
}
\references{
Kuan, PF, D Chung, G Pan, JA Thomson, R Stewart, and S Keles (2011),
"A Statistical Framework for the Analysis of ChIP-Seq Data",
\emph{Journal of the American Statistical Association}, Vol. 106, pp. 891-903.
Chung, D, Zhang Q, and Keles S (2014), "MOSAiCS-HMM: A model-based approach for detecting regions of histone modifications from ChIP-seq data", Datta S and Nettleton D (eds.), \emph{Statistical Analysis of Next Generation Sequencing Data}, Springer.
}
\author{ Dongjun Chung, Pei Fen Kuan, Rene Welch, Sunduz Keles }
\seealso{
\code{\link{constructBins}}, \code{\link{mosaicsFit}}, \code{\linkS4class{BinData}}.
}
\examples{
\dontrun{
library(mosaicsExample)
constructBins( infile=system.file( file.path("extdata","wgEncodeSydhTfbsGm12878Stat1StdAlnRep1_chr22_sorted.bam"), package="mosaicsExample"),
fileFormat="bam", outfileLoc="~/",
PET=FALSE, fragLen=200, binSize=200, capping=0 )
constructBins( infile=system.file( file.path("extdata","wgEncodeSydhTfbsGm12878InputStdAlnRep1_chr22_sorted.bam"), package="mosaicsExample"),
fileFormat="bam", outfileLoc="~/",
PET=FALSE, fragLen=200, binSize=200, capping=0 )
binTFBS <- readBins( type=c("chip","input"),
fileName=c( "~/wgEncodeSydhTfbsGm12878Stat1StdAlnRep1_chr22_sorted.bam_fragL200_bin200.txt",
"~/wgEncodeSydhTfbsGm12878InputStdAlnRep1_chr22_sorted.bam_fragL200_bin200.txt" ) )
}
}
\keyword{models}
\keyword{methods}
|
/man/readBins.Rd
|
no_license
|
keleslab/mosaics
|
R
| false
| false
| 4,671
|
rd
|
\name{readBins}
\alias{readBins}
\title{
Import bin-level ChIP-sep data
}
\description{
Import and preprocess
all or subset of bin-level ChIP-sep data, including ChIP data, matched control data,
mappability score, GC content score, and sequence ambiguity score.
}
\usage{
readBins( type = c("chip", "input"), fileName = NULL,
dataType = "unique", rounding = 100, parallel=FALSE, nCore=8 )
}
\arguments{
\item{type}{
Character vector indicating data types to be imported.
This vector can contain \code{"chip"} (ChIP data), \code{"input"} (matched control data),
\code{"M"} (mappability score), \code{"GC"} (GC content score), and \code{"N"} (sequence ambiguity score).
Currently, \code{readBins} permits only the following combinations:
\code{c("chip", "input")}, \code{c("chip", "input", "N")},
\code{c("chip", "input", "M", "GC", "N")}, and \code{c("chip", "M", "GC", "N")}.
Default is \code{c("chip", "input")}.
}
\item{fileName}{
Character vector of file names, each of which matches each element of \code{type}.
\code{type} and \code{fileName} should have the same length and
corresponding elements in two vectors should appear in the same order.
}
\item{dataType}{
How reads were processed? Possible values are
either \code{"unique"} (only uniquely aligned reads were retained)
or \code{"multi"} (reads aligned to multiple locations were also retained).
}
\item{rounding}{
How are mappability score and GC content score rounded?
Default is 100 and this indicates rounding of mappability score and GC content score
to the nearest hundredth.
}
\item{parallel}{Utilize multiple CPUs for parallel computing using \code{"paralle"} package?
Possible values are \code{TRUE} (use multiple CPUs)
or \code{FALSE} (do not use multiple CPUs).
Default is \code{FALSE} (do not use multiple CPUs).
}
\item{nCore}{Number of CPUs when parallel computing is utilized.
}
}
\details{
Bin-level ChIP and matched control data can be generated
from the aligned read files for your samples using the method \code{constructBins}.
In \code{mosaics} package companion website, \url{http://www.stat.wisc.edu/~keles/Software/mosaics/},
we provide preprocessed mappability score, GC content score,
and sequence ambiguity score files for diverse reference genomes.
Please check the website and the vignette for further details.
The imported data type constraints the analysis that can be implemented.
If \code{type=c("chip", "input")} or \code{c("chip", "input", "N")},
only two-sample analysis without using mappability and GC content is allowed.
For \code{type=c("chip", "input", "M", "GC", "N")},
user can do the one- or two-sample analysis.
If \code{type=c("chip", "M", "GC", "N")}, only one-sample analysis is permitted.
See help page of \code{mosaicsFit}.
When the data contains multiple chromosomes,
parallel computing can be utilized for faster preprocessing
if \code{parallel=TRUE} and \code{parallel} package is loaded.
\code{nCore} determines number of CPUs used for parallel computing.
}
\value{
Construct \code{BinData} class object.
}
\references{
Kuan, PF, D Chung, G Pan, JA Thomson, R Stewart, and S Keles (2011),
"A Statistical Framework for the Analysis of ChIP-Seq Data",
\emph{Journal of the American Statistical Association}, Vol. 106, pp. 891-903.
Chung, D, Zhang Q, and Keles S (2014), "MOSAiCS-HMM: A model-based approach for detecting regions of histone modifications from ChIP-seq data", Datta S and Nettleton D (eds.), \emph{Statistical Analysis of Next Generation Sequencing Data}, Springer.
}
\author{ Dongjun Chung, Pei Fen Kuan, Rene Welch, Sunduz Keles }
\seealso{
\code{\link{constructBins}}, \code{\link{mosaicsFit}}, \code{\linkS4class{BinData}}.
}
\examples{
\dontrun{
library(mosaicsExample)
constructBins( infile=system.file( file.path("extdata","wgEncodeSydhTfbsGm12878Stat1StdAlnRep1_chr22_sorted.bam"), package="mosaicsExample"),
fileFormat="bam", outfileLoc="~/",
PET=FALSE, fragLen=200, binSize=200, capping=0 )
constructBins( infile=system.file( file.path("extdata","wgEncodeSydhTfbsGm12878InputStdAlnRep1_chr22_sorted.bam"), package="mosaicsExample"),
fileFormat="bam", outfileLoc="~/",
PET=FALSE, fragLen=200, binSize=200, capping=0 )
binTFBS <- readBins( type=c("chip","input"),
fileName=c( "~/wgEncodeSydhTfbsGm12878Stat1StdAlnRep1_chr22_sorted.bam_fragL200_bin200.txt",
"~/wgEncodeSydhTfbsGm12878InputStdAlnRep1_chr22_sorted.bam_fragL200_bin200.txt" ) )
}
}
\keyword{models}
\keyword{methods}
|
args = commandArgs(trailingOnly=T)
args = paste(args, collapse = "")
args = unlist(strsplit(args, ";"))
arguments.list = "
seurat.addr.arg = args[1]
feature_genes.arg = args[2]
cell.types.arg = args[3]
save.to.dir.arg = args[4]
ident.set.arg = args[5]
type.to.colours.arg = args[6]
"
expected_arguments = unlist(strsplit(arguments.list, "\n"))
expected_arguments = expected_arguments[!(expected_arguments == "")]
if(length(args) != length(expected_arguments)){
error.msg = sprintf('This pipeline requires %s parameters', as.character(length(expected_arguments)))
expected_arguments = paste(unlist(lapply(strsplit(expected_arguments, ".arg"), "[", 1)), collapse = "\n")
stop(sprintf('This pipeline requires %s parameters: '))
}
eval(parse(text = arguments.list))
for(n in 1:length(expected_arguments)){
argument = expected_arguments[n]
argument = gsub(pattern=" ", replacement="", x=argument)
argument.name = unlist(strsplit(argument, "="))[1]
variable.name = gsub(pattern=".arg", replacement="", argument.name)
argument.content = eval(parse(text = argument.name))
eval(parse(text = argument.content))
if (!exists(variable.name)){
stop(sprintf("Argument %s not passed. Stopping ... ", variable.name))
}
}
library(Seurat)
library(plyr)
library(dplyr)
library(reshape)
library(ggplot2)
library(RColorBrewer)
source("../../tools/bunddle_utils.R")
seurat.addr = file.path("../../data", seurat.addr)
cell.types = file.path('../../resources', cell.types)
feature_genes = file.path('../../resources', feature_genes)
type.to.colours = file.path("../../resources", type.to.colours)
cell.types.file = file(cell.types, "r")
cell.types = readLines(cell.types.file)
close(cell.types.file)
feature_genes.file = file(feature_genes, "r")
feature_genes = readLines(feature_genes.file)
close(feature_genes.file)
################################################################################################################
# a plotting function for indexed legend
plot.indexed.legend = function(label.vector, color.vector, ncols = 2, left.limit = 3.4, symbol.size = 8, text.size = 10, padH = 1, padV = 1, padRight = 0){
if (length(label.vector) != length(color.vector)){
stop("number of labels is different from number colors\nAdvice: learn to count!")
}
if (length(ncol) > length(label.vector)){
stop("You cannot have more columns than labels\nSolution: Learn to count")
}
indices.vector = 1:length(label.vector)
label.no = length(label.vector)
nrows = ceiling(label.no / ncols)
legend.frame = data.frame(X = rep(0, label.no), Y = rep(0, label.no), CS = color.vector, Txt = label.vector)
legend.frame$X = rep(1:ncols, each=nrows)[1:nrow(legend.frame)]
legend.frame$Y = rep(nrows:1, times = ncols)[1:nrow(legend.frame)]
Xrange = range(legend.frame$X)
Yrange = range(legend.frame$Y)
plot.obj = ggplot(data = legend.frame, aes(x = X, y = Y))
plot.obj = plot.obj + geom_point(size = symbol.size, colour = color.vector)
plot.obj = plot.obj + scale_x_continuous(limits = c(Xrange[1] - padRight, Xrange[2] + padH))
plot.obj = plot.obj + scale_y_continuous(limits = c(Yrange[1] - padV, Yrange[2] + padV))
plot.obj = plot.obj + theme_void()
plot.obj = plot.obj + annotate("text", x=legend.frame$X, y = legend.frame$Y, label = indices.vector, size = text.size)
plot.obj = plot.obj + annotate("text", x=legend.frame$X+.1, y = legend.frame$Y, label=legend.frame$Txt, hjust = 0, size = text.size)
return(plot.obj)
}
# plotting function for dimensionaly-reduced data to label population by a round indexed label
dr.plot = function(point.labels, dr1, dr2, dr1.name, dr2.name, no.legend = F, plt.lb.sz = 5, txt.lb.size = 3, pt.size = .2, random_state = 2, use.cols = NULL, use.labels = NULL, limits = NULL, annotate.plot = T){
df.dr = data.frame("Cell Labels" = point.labels, DR1 = dr1, DR2 = dr2)
if(is.null(use.labels)){
p.labels = sort(unique(as.vector(point.labels)))
}
else{
p.labels = use.labels
}
df.dr$Cell.Labels = factor(df.dr$Cell.Labels, levels=p.labels)
p.labels.medians = aggregate(df.dr[, 2:3], list(df.dr$Cell.Labels), median)
df.dr$Cell.Labels = mapvalues(x = df.dr$Cell.Labels, from = p.labels, to = paste(1:length(p.labels), p.labels, sep = " "))
if(is.null(use.cols)){
set.seed(random_state)
plt.colours = sample(colorRampPalette(brewer.pal(12, "Paired"))(length(p.labels)))
}else{
plt.colours = use.cols
}
index.map = 1:length(p.labels)
plot.obj = ggplot(data = df.dr, aes(x = DR1, y = DR2, color = Cell.Labels))
plot.obj = plot.obj + geom_point(size = pt.size)
plot.obj = plot.obj + scale_color_manual(values=plt.colours)
if(annotate.plot){
plot.obj = plot.obj + geom_point(data=p.labels.medians,aes(x = DR1, y = DR2), colour = "gray", size = plt.lb.sz, fill = plt.colours, alpha = .5, pch = 21)
plot.obj = plot.obj + annotate("text", x=p.labels.medians$DR1, y = p.labels.medians$DR2, label = index.map, size = txt.lb.size)
}
if (no.legend){
plot.obj = plot.obj + theme(legend.position="none")
}else{
plot.obj = plot.obj + guides(color = guide_legend(override.aes = list(size=5)))
}
plot.obj = plot.obj + xlab(dr1.name) + ylab(dr2.name)
if(!is.null(limits)){
X0 = limits[1]; X1 = limits[2]; Y0 = limits[3]; Y1 = limits[4];
plot.obj = plot.obj + scale_x_continuous(limits = c(X0, X1))
plot.obj = plot.obj + scale_y_continuous(limits = c(Y0, Y1))
}
return(plot.obj)
}
################################################################################################################
# load seurat object
print("loading data ...")
seurat.obj = readRDS(seurat.addr)
print("Loaded data")
# set the clustering identity
seurat.obj = SetAllIdent(object=seurat.obj, id = ident.set)
# subset data on cell types
seurat.obj = SubsetData(object=seurat.obj, ident.use=cell.types)
# select on singlets
seurat.obj = SetAllIdent(object=seurat.obj, id = "doublets")
seurat.obj = SubsetData(object=seurat.obj, ident.use=c("Singlet"))
seurat.obj = SetAllIdent(object=seurat.obj, id = ident.set)
# normaliza data
print("Normalizing data ...")
seurat.obj = NormalizeData(object = seurat.obj, normalization.method = "LogNormalize", scale.factor = 10000)
# check that all genes are in the dataset
print("check that genes are in the dataset")
if(!(all(feature_genes %in% rownames(seurat.obj@data)))){
not.found = feature_genes[!(feature_genes %in% rownames(seurat.obj@data))]
print(not.found)
}
# check for duplicates
print("check for duplicates")
if(length(feature_genes) != length(unique(feature_genes))){
duplicates = names(table(feature_genes)[table(feature_genes) > 1])
duplicates = paste(duplicates, collapse = ", ")
print(sprintf("Duplicates found: %s", duplicates))
print("This will not affect the workflow, but be aware the heat map will have a smaller genes than expected.")
feature_genes = unique(feature_genes)
}
# create folder for saving the results
print("creating folders")
dir.create(save.to.dir)
# create folder to save working material
material_folder = file.path(save.to.dir, "material")
unlink(material_folder, recursive=T, force=T)
dir.create(material_folder)
# subsetting seurat object so that we do not get a 'problem too large' error
seurat.obj = SetAllIdent(seurat.obj, id="cell.labels")
seurat.obj = SubsetData(seurat.obj, max.cells.per.ident = 1000)
# write the cluster labels to disk
if (!is.na(type.to.colours)){
type.to.colour = read.csv(type.to.colours)
filter.key = type.to.colour$CellTypes %in% as.vector(unique(seurat.obj@ident))
cell.labels = as.vector(type.to.colour$CellTypes[filter.key])
cell.colours = as.vector(type.to.colour$Colours[filter.key])
}else{
cell.labels = sort(as.vector(unique(seurat.obj@ident)))
cell.colours = sample(colorRampPalette(brewer.pal(12, "Paired"))(length(cell.labels)))
}
labels = data.frame(Labels = as.vector(seurat.obj@ident))
labels$Colours = mapvalues(x=labels$Labels, from=cell.labels, to=cell.colours)
write.csv(labels, file.path(material_folder, "labels.csv"), row.names = F)
# write the feature data to disk
print("writing data.csv")
matrix <- as.matrix(seurat.obj@data)
feature_matrix <- subset(matrix, rownames(matrix) %in% feature_genes)
x.data <- as.data.frame(t(matrix))
write.csv(x.data, file.path(material_folder, "data.csv"), row.names = T)
# run the random forest classifier and get the confusion matrix
print("running random forest classifier")
command = paste(python.addr, sprintf("random_forest_classifier.py %s", save.to.dir), sep = " ")
system(command, wait = T)
print("plot the confusion matrix")
cnf_matrix = read.csv(file.path(material_folder, "confusion_matrix.csv"), check.names = F)
cnf_matrix = cnf_matrix[, -c(1)]
confusion = expand.grid(Actual = colnames(cnf_matrix), Predicted = colnames(cnf_matrix))
cnf_matrix_colSums = colSums(cnf_matrix)
cnf_matrix_colSums[cnf_matrix_colSums == 0] = 1.0
cnf_matrix_colSums_matrix = matrix(ncol = length(cnf_matrix_colSums), nrow = length(cnf_matrix_colSums))
cnf_matrix_colSums_matrix[] = cnf_matrix_colSums
cnf_matrix = cnf_matrix / t(cnf_matrix_colSums_matrix)
confusion$Frequency = rapply(cnf_matrix, c)
confusion$Actual = factor(as.vector(confusion$Actual), levels = cell.labels)
confusion$Predicted = factor(as.vector(confusion$Predicted), levels = rev(cell.labels))
confusion.plot = ggplot(data = confusion, aes(x = Actual, y = Predicted)) + geom_tile(aes(fill = Frequency)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_fill_gradient(low = "lightblue", high = "darkred")
pdf(file.path(save.to.dir, "confusion_matrix.pdf"), width = 14, height = 14)
print(confusion.plot)
dev.off()
print("Ended beautifully.")
|
/pipelines/12_gene_discriminatory_power_analysis/gene_discriminatory_power_analysis.R
|
no_license
|
dezember/FCA_liver
|
R
| false
| false
| 9,720
|
r
|
args = commandArgs(trailingOnly=T)
args = paste(args, collapse = "")
args = unlist(strsplit(args, ";"))
arguments.list = "
seurat.addr.arg = args[1]
feature_genes.arg = args[2]
cell.types.arg = args[3]
save.to.dir.arg = args[4]
ident.set.arg = args[5]
type.to.colours.arg = args[6]
"
expected_arguments = unlist(strsplit(arguments.list, "\n"))
expected_arguments = expected_arguments[!(expected_arguments == "")]
if(length(args) != length(expected_arguments)){
error.msg = sprintf('This pipeline requires %s parameters', as.character(length(expected_arguments)))
expected_arguments = paste(unlist(lapply(strsplit(expected_arguments, ".arg"), "[", 1)), collapse = "\n")
stop(sprintf('This pipeline requires %s parameters: '))
}
eval(parse(text = arguments.list))
for(n in 1:length(expected_arguments)){
argument = expected_arguments[n]
argument = gsub(pattern=" ", replacement="", x=argument)
argument.name = unlist(strsplit(argument, "="))[1]
variable.name = gsub(pattern=".arg", replacement="", argument.name)
argument.content = eval(parse(text = argument.name))
eval(parse(text = argument.content))
if (!exists(variable.name)){
stop(sprintf("Argument %s not passed. Stopping ... ", variable.name))
}
}
library(Seurat)
library(plyr)
library(dplyr)
library(reshape)
library(ggplot2)
library(RColorBrewer)
source("../../tools/bunddle_utils.R")
seurat.addr = file.path("../../data", seurat.addr)
cell.types = file.path('../../resources', cell.types)
feature_genes = file.path('../../resources', feature_genes)
type.to.colours = file.path("../../resources", type.to.colours)
cell.types.file = file(cell.types, "r")
cell.types = readLines(cell.types.file)
close(cell.types.file)
feature_genes.file = file(feature_genes, "r")
feature_genes = readLines(feature_genes.file)
close(feature_genes.file)
################################################################################################################
# a plotting function for indexed legend
plot.indexed.legend = function(label.vector, color.vector, ncols = 2, left.limit = 3.4, symbol.size = 8, text.size = 10, padH = 1, padV = 1, padRight = 0){
if (length(label.vector) != length(color.vector)){
stop("number of labels is different from number colors\nAdvice: learn to count!")
}
if (length(ncol) > length(label.vector)){
stop("You cannot have more columns than labels\nSolution: Learn to count")
}
indices.vector = 1:length(label.vector)
label.no = length(label.vector)
nrows = ceiling(label.no / ncols)
legend.frame = data.frame(X = rep(0, label.no), Y = rep(0, label.no), CS = color.vector, Txt = label.vector)
legend.frame$X = rep(1:ncols, each=nrows)[1:nrow(legend.frame)]
legend.frame$Y = rep(nrows:1, times = ncols)[1:nrow(legend.frame)]
Xrange = range(legend.frame$X)
Yrange = range(legend.frame$Y)
plot.obj = ggplot(data = legend.frame, aes(x = X, y = Y))
plot.obj = plot.obj + geom_point(size = symbol.size, colour = color.vector)
plot.obj = plot.obj + scale_x_continuous(limits = c(Xrange[1] - padRight, Xrange[2] + padH))
plot.obj = plot.obj + scale_y_continuous(limits = c(Yrange[1] - padV, Yrange[2] + padV))
plot.obj = plot.obj + theme_void()
plot.obj = plot.obj + annotate("text", x=legend.frame$X, y = legend.frame$Y, label = indices.vector, size = text.size)
plot.obj = plot.obj + annotate("text", x=legend.frame$X+.1, y = legend.frame$Y, label=legend.frame$Txt, hjust = 0, size = text.size)
return(plot.obj)
}
# plotting function for dimensionaly-reduced data to label population by a round indexed label
dr.plot = function(point.labels, dr1, dr2, dr1.name, dr2.name, no.legend = F, plt.lb.sz = 5, txt.lb.size = 3, pt.size = .2, random_state = 2, use.cols = NULL, use.labels = NULL, limits = NULL, annotate.plot = T){
df.dr = data.frame("Cell Labels" = point.labels, DR1 = dr1, DR2 = dr2)
if(is.null(use.labels)){
p.labels = sort(unique(as.vector(point.labels)))
}
else{
p.labels = use.labels
}
df.dr$Cell.Labels = factor(df.dr$Cell.Labels, levels=p.labels)
p.labels.medians = aggregate(df.dr[, 2:3], list(df.dr$Cell.Labels), median)
df.dr$Cell.Labels = mapvalues(x = df.dr$Cell.Labels, from = p.labels, to = paste(1:length(p.labels), p.labels, sep = " "))
if(is.null(use.cols)){
set.seed(random_state)
plt.colours = sample(colorRampPalette(brewer.pal(12, "Paired"))(length(p.labels)))
}else{
plt.colours = use.cols
}
index.map = 1:length(p.labels)
plot.obj = ggplot(data = df.dr, aes(x = DR1, y = DR2, color = Cell.Labels))
plot.obj = plot.obj + geom_point(size = pt.size)
plot.obj = plot.obj + scale_color_manual(values=plt.colours)
if(annotate.plot){
plot.obj = plot.obj + geom_point(data=p.labels.medians,aes(x = DR1, y = DR2), colour = "gray", size = plt.lb.sz, fill = plt.colours, alpha = .5, pch = 21)
plot.obj = plot.obj + annotate("text", x=p.labels.medians$DR1, y = p.labels.medians$DR2, label = index.map, size = txt.lb.size)
}
if (no.legend){
plot.obj = plot.obj + theme(legend.position="none")
}else{
plot.obj = plot.obj + guides(color = guide_legend(override.aes = list(size=5)))
}
plot.obj = plot.obj + xlab(dr1.name) + ylab(dr2.name)
if(!is.null(limits)){
X0 = limits[1]; X1 = limits[2]; Y0 = limits[3]; Y1 = limits[4];
plot.obj = plot.obj + scale_x_continuous(limits = c(X0, X1))
plot.obj = plot.obj + scale_y_continuous(limits = c(Y0, Y1))
}
return(plot.obj)
}
################################################################################################################
# load seurat object
print("loading data ...")
seurat.obj = readRDS(seurat.addr)
print("Loaded data")
# set the clustering identity
seurat.obj = SetAllIdent(object=seurat.obj, id = ident.set)
# subset data on cell types
seurat.obj = SubsetData(object=seurat.obj, ident.use=cell.types)
# select on singlets
seurat.obj = SetAllIdent(object=seurat.obj, id = "doublets")
seurat.obj = SubsetData(object=seurat.obj, ident.use=c("Singlet"))
seurat.obj = SetAllIdent(object=seurat.obj, id = ident.set)
# normaliza data
print("Normalizing data ...")
seurat.obj = NormalizeData(object = seurat.obj, normalization.method = "LogNormalize", scale.factor = 10000)
# check that all genes are in the dataset
print("check that genes are in the dataset")
if(!(all(feature_genes %in% rownames(seurat.obj@data)))){
not.found = feature_genes[!(feature_genes %in% rownames(seurat.obj@data))]
print(not.found)
}
# check for duplicates
print("check for duplicates")
if(length(feature_genes) != length(unique(feature_genes))){
duplicates = names(table(feature_genes)[table(feature_genes) > 1])
duplicates = paste(duplicates, collapse = ", ")
print(sprintf("Duplicates found: %s", duplicates))
print("This will not affect the workflow, but be aware the heat map will have a smaller genes than expected.")
feature_genes = unique(feature_genes)
}
# create folder for saving the results
print("creating folders")
dir.create(save.to.dir)
# create folder to save working material
material_folder = file.path(save.to.dir, "material")
unlink(material_folder, recursive=T, force=T)
dir.create(material_folder)
# subsetting seurat object so that we do not get a 'problem too large' error
seurat.obj = SetAllIdent(seurat.obj, id="cell.labels")
seurat.obj = SubsetData(seurat.obj, max.cells.per.ident = 1000)
# write the cluster labels to disk
if (!is.na(type.to.colours)){
type.to.colour = read.csv(type.to.colours)
filter.key = type.to.colour$CellTypes %in% as.vector(unique(seurat.obj@ident))
cell.labels = as.vector(type.to.colour$CellTypes[filter.key])
cell.colours = as.vector(type.to.colour$Colours[filter.key])
}else{
cell.labels = sort(as.vector(unique(seurat.obj@ident)))
cell.colours = sample(colorRampPalette(brewer.pal(12, "Paired"))(length(cell.labels)))
}
labels = data.frame(Labels = as.vector(seurat.obj@ident))
labels$Colours = mapvalues(x=labels$Labels, from=cell.labels, to=cell.colours)
write.csv(labels, file.path(material_folder, "labels.csv"), row.names = F)
# write the feature data to disk
print("writing data.csv")
matrix <- as.matrix(seurat.obj@data)
feature_matrix <- subset(matrix, rownames(matrix) %in% feature_genes)
x.data <- as.data.frame(t(matrix))
write.csv(x.data, file.path(material_folder, "data.csv"), row.names = T)
# run the random forest classifier and get the confusion matrix
print("running random forest classifier")
command = paste(python.addr, sprintf("random_forest_classifier.py %s", save.to.dir), sep = " ")
system(command, wait = T)
print("plot the confusion matrix")
cnf_matrix = read.csv(file.path(material_folder, "confusion_matrix.csv"), check.names = F)
cnf_matrix = cnf_matrix[, -c(1)]
confusion = expand.grid(Actual = colnames(cnf_matrix), Predicted = colnames(cnf_matrix))
cnf_matrix_colSums = colSums(cnf_matrix)
cnf_matrix_colSums[cnf_matrix_colSums == 0] = 1.0
cnf_matrix_colSums_matrix = matrix(ncol = length(cnf_matrix_colSums), nrow = length(cnf_matrix_colSums))
cnf_matrix_colSums_matrix[] = cnf_matrix_colSums
cnf_matrix = cnf_matrix / t(cnf_matrix_colSums_matrix)
confusion$Frequency = rapply(cnf_matrix, c)
confusion$Actual = factor(as.vector(confusion$Actual), levels = cell.labels)
confusion$Predicted = factor(as.vector(confusion$Predicted), levels = rev(cell.labels))
confusion.plot = ggplot(data = confusion, aes(x = Actual, y = Predicted)) + geom_tile(aes(fill = Frequency)) + theme(axis.text.x = element_text(angle = 45, hjust = 1)) + scale_fill_gradient(low = "lightblue", high = "darkred")
pdf(file.path(save.to.dir, "confusion_matrix.pdf"), width = 14, height = 14)
print(confusion.plot)
dev.off()
print("Ended beautifully.")
|
library(spatstat)
### Name: CDF
### Title: Cumulative Distribution Function From Kernel Density Estimate
### Aliases: CDF CDF.density
### Keywords: nonparametric univar
### ** Examples
b <- density(runif(10))
f <- CDF(b)
f(0.5)
plot(f)
|
/data/genthat_extracted_code/spatstat/examples/CDF.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 255
|
r
|
library(spatstat)
### Name: CDF
### Title: Cumulative Distribution Function From Kernel Density Estimate
### Aliases: CDF CDF.density
### Keywords: nonparametric univar
### ** Examples
b <- density(runif(10))
f <- CDF(b)
f(0.5)
plot(f)
|
#Server codes
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
test <- reactive({
age <- as.integer(input$age)
sex <- as.character(input$sex)
bmi <- as.integer(input$bmi)
children <- as.integer(input$children)
smoker <- as.character(input$smoke)
region <- as.character(input$region)
age2 <- as.integer(input$age ^ 2)
bmi30 <- as.integer(ifelse(input$bmi >= 30 , 1, 0))
fest <- cbind(age, sex, bmi, children, smoker, region, age2, bmi30)
test <-
cbind.data.frame(age, sex, bmi, children, smoker, region, age2, bmi30)
test <- as.data.frame(test)
})
data <- reactive({
bata <-
read.csv("/home/diwash/Projects/Medical_Expense_Predictor/data/insurance.csv")
bata$age2 <- bata$age ^ 2
bata$bmi30 <- ifelse(bata$bmi >= 30 , 1 , 0)
data <- bata
})
pred <- eventReactive(input$predict, {
model <-
lm(charges ~ age + age2 + children + bmi + sex + bmi30 * smoker + region ,
data = data())
predict(model, test())
})
output$insurance <- renderText(pred())
output$table <- renderTable(test())
})
|
/medic_predict/server.R
|
no_license
|
diwashrestha/Medical_Expense_Predictor
|
R
| false
| false
| 1,184
|
r
|
#Server codes
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
test <- reactive({
age <- as.integer(input$age)
sex <- as.character(input$sex)
bmi <- as.integer(input$bmi)
children <- as.integer(input$children)
smoker <- as.character(input$smoke)
region <- as.character(input$region)
age2 <- as.integer(input$age ^ 2)
bmi30 <- as.integer(ifelse(input$bmi >= 30 , 1, 0))
fest <- cbind(age, sex, bmi, children, smoker, region, age2, bmi30)
test <-
cbind.data.frame(age, sex, bmi, children, smoker, region, age2, bmi30)
test <- as.data.frame(test)
})
data <- reactive({
bata <-
read.csv("/home/diwash/Projects/Medical_Expense_Predictor/data/insurance.csv")
bata$age2 <- bata$age ^ 2
bata$bmi30 <- ifelse(bata$bmi >= 30 , 1 , 0)
data <- bata
})
pred <- eventReactive(input$predict, {
model <-
lm(charges ~ age + age2 + children + bmi + sex + bmi30 * smoker + region ,
data = data())
predict(model, test())
})
output$insurance <- renderText(pred())
output$table <- renderTable(test())
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.xmlWrite.R
\name{xmlFromList}
\alias{xmlFromList}
\title{Creat an XML Node from a list}
\usage{
xmlFromList(node, sublist)
}
\arguments{
\item{node}{(\code{XMLNode}) A Node created by XML package}
\item{sublist}{(\code{list}) Any list}
}
\value{
node (\code{XMLNode}) A node where the list is attached to the first XML Node
}
\description{
This function appends a list as an XML object to an item. The function allows
setting attributes of XML items by using the "attributes" list name, therefore
it can never write tags with the name "attributes"
}
\examples{
root <- XML::newXMLNode("root")
li <- list(a = list(aa = 1, ab=2),
b=list(ba = 1,
bb=list(x=4,
attributes=c(value=3)),
bb= 2,
bc =3))
xmlFromList(root,li)
# The result is an XML Node like this
#<root>
# <a>
# <aa>1</aa>
# <ab>2</ab>
# </a>
# <b>
# <ba>1</ba>
# <bb value="3">
# <x>4</x>
# </bb>
# <bb>2</bb>
# <bc>3</bc>
# </b>
#</root>
}
\author{
Sebastian Wolf \email{sebastian.wolf.sw1@roche.com}
}
|
/man/xmlFromList.Rd
|
no_license
|
marchandpatrick/RTest
|
R
| false
| true
| 1,206
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.xmlWrite.R
\name{xmlFromList}
\alias{xmlFromList}
\title{Creat an XML Node from a list}
\usage{
xmlFromList(node, sublist)
}
\arguments{
\item{node}{(\code{XMLNode}) A Node created by XML package}
\item{sublist}{(\code{list}) Any list}
}
\value{
node (\code{XMLNode}) A node where the list is attached to the first XML Node
}
\description{
This function appends a list as an XML object to an item. The function allows
setting attributes of XML items by using the "attributes" list name, therefore
it can never write tags with the name "attributes"
}
\examples{
root <- XML::newXMLNode("root")
li <- list(a = list(aa = 1, ab=2),
b=list(ba = 1,
bb=list(x=4,
attributes=c(value=3)),
bb= 2,
bc =3))
xmlFromList(root,li)
# The result is an XML Node like this
#<root>
# <a>
# <aa>1</aa>
# <ab>2</ab>
# </a>
# <b>
# <ba>1</ba>
# <bb value="3">
# <x>4</x>
# </bb>
# <bb>2</bb>
# <bc>3</bc>
# </b>
#</root>
}
\author{
Sebastian Wolf \email{sebastian.wolf.sw1@roche.com}
}
|
\name{TruncatedNormal-package}
\alias{TruncatedNormal-package}
\alias{TruncatedNormal}
\docType{package}
\title{
Truncated Normal Distribution Toolbox
}
\description{
The routines include:
\itemize{
\item generator of \bold{independent and identically distributed} random vectors from the truncated univariate and multivariate distributions;
\item (Quasi-) Monte Carlo estimator and a \bold{deterministic upper bound} of the cumulative distribution function of the multivariate normal and Student distributions;
\item algorithm for the accurate computation of the quantile function of the normal distribution in the extremes of its tails.
}
}
\details{
\describe{
\item{\code{\link{mvNcdf}(l,u,Sig,n)}}{uses a Monte Carlo sample of size \eqn{n} to estimate the cumulative
distribution function, Pr\eqn{( l < X < u)}, of the \eqn{d}-dimensional
multivariate normal with zero-mean and covariance \eqn{\Sigma}, that is,
\eqn{X} has \eqn{N(0,\Sigma)} distribution;}
\item{\code{\link{mvNqmc}(l,u,Sig,n)}}{ provides a Quasi Monte Carlo algorithm for medium dimensions
(say, \eqn{d<50}), in addition to the faster Monte Carlo algorithm in \code{\link{mvNcdf}};}
\item{\code{\link{mvrandn}(l,u,Sig,n)}}{
simulates \eqn{n} \bold{identically and independently distributed} random vectors \eqn{X} from \eqn{N(0,\Sigma)}, conditional on \eqn{l<X<u};}
\item{\code{\link{norminvp}(p,l,u)}}{ computes the quantile function at \eqn{0\le p\le 1} of the univariate \eqn{N(0,1)} distribution
truncated to \eqn{[l,u]}, and with high precision in the tails;}
\item{\code{\link{trandn}(l,u)}}{ is a fast random number generator from the univariate \eqn{N(0,1)}
distribution truncated to \eqn{[l,u]}.}
\item{\code{\link{mvTcdf}(l,u,Sig,nu,n)}}{ which uses a Monte Carlo sample of size \eqn{n} to estimate the cumulative distribution function, Pr\eqn{(l < X < u)}, of the \eqn{d}-dimensional multivariate Student with zero-mean and covariance \eqn{\Sigma} and degrees of freedom \eqn{\nu}, that is, \eqn{X \sim t_\nu(0, \Sigma)}{X ~ t[\nu](0, \Sigma)}.}
\item{\code{\link{mvTqmc}(l,u,Sig,nu,n)}}{ provides a Quasi Monte Carlo algorithm for medium dimensions
(say, \eqn{d < 50}), in addition to the faster Monte Carlo algorithm in \code{\link{mvTcdf}}.}
\item{\code{\link{mvrandt}(l,u,Sig,nu,n)}}{ simulates \eqn{n} random vectors \eqn{X \sim t_\nu(0, \Sigma)}{X ~ t[\nu](0, \Sigma)}, conditional on \eqn{l < X <u}.}
\item{\code{\link{tregress}(l,u,Sig,df,n)}}{ simulates \eqn{n} pairs, \eqn{Z, R}, so that \eqn{\sqrt{\nu}Z/R \sim t_\nu(0, \Sigma)}{\sqrt{\nu}Z/R ~ t[\nu]((0, \Sigma)}, conditional on \eqn{l < X <u}.}
}
}
\author{
Z. I. Botev, email: \email{botev@unsw.edu.au} and web page: \url{http://web.maths.unsw.edu.au/~zdravkobotev/}
}
\references{
\itemize{
\item Z. I. Botev (2017), \emph{The Normal Law Under Linear Restrictions:
Simulation and Estimation via Minimax Tilting}, Journal of the Royal
Statistical Society, Series B, \bold{79} (1), pp. 1--24.
\item Z. I. Botev and P. L'Ecuyer (2015), \emph{Efficient Estimation
and Simulation of the Truncated Multivariate Student-t Distribution}, Proceedings of the 2015 Winter Simulation Conference,
Huntington Beach, CA, USA
\item Gibson G. J., Glasbey C. A., Elston D. A. (1994),
\emph{Monte Carlo evaluation of multivariate normal integrals and sensitivity to variate ordering},
In: Advances in Numerical Methods and Applications, pages 120--126
}
}
\seealso{ Matlab toolbox:
\url{http://web.maths.unsw.edu.au/~zdravkobotev/}
}
|
/man/TruncatedNormal-package.Rd
|
no_license
|
danmackinlay/TruncatedNormal
|
R
| false
| false
| 3,493
|
rd
|
\name{TruncatedNormal-package}
\alias{TruncatedNormal-package}
\alias{TruncatedNormal}
\docType{package}
\title{
Truncated Normal Distribution Toolbox
}
\description{
The routines include:
\itemize{
\item generator of \bold{independent and identically distributed} random vectors from the truncated univariate and multivariate distributions;
\item (Quasi-) Monte Carlo estimator and a \bold{deterministic upper bound} of the cumulative distribution function of the multivariate normal and Student distributions;
\item algorithm for the accurate computation of the quantile function of the normal distribution in the extremes of its tails.
}
}
\details{
\describe{
\item{\code{\link{mvNcdf}(l,u,Sig,n)}}{uses a Monte Carlo sample of size \eqn{n} to estimate the cumulative
distribution function, Pr\eqn{( l < X < u)}, of the \eqn{d}-dimensional
multivariate normal with zero-mean and covariance \eqn{\Sigma}, that is,
\eqn{X} has \eqn{N(0,\Sigma)} distribution;}
\item{\code{\link{mvNqmc}(l,u,Sig,n)}}{ provides a Quasi Monte Carlo algorithm for medium dimensions
(say, \eqn{d<50}), in addition to the faster Monte Carlo algorithm in \code{\link{mvNcdf}};}
\item{\code{\link{mvrandn}(l,u,Sig,n)}}{
simulates \eqn{n} \bold{identically and independently distributed} random vectors \eqn{X} from \eqn{N(0,\Sigma)}, conditional on \eqn{l<X<u};}
\item{\code{\link{norminvp}(p,l,u)}}{ computes the quantile function at \eqn{0\le p\le 1} of the univariate \eqn{N(0,1)} distribution
truncated to \eqn{[l,u]}, and with high precision in the tails;}
\item{\code{\link{trandn}(l,u)}}{ is a fast random number generator from the univariate \eqn{N(0,1)}
distribution truncated to \eqn{[l,u]}.}
\item{\code{\link{mvTcdf}(l,u,Sig,nu,n)}}{ which uses a Monte Carlo sample of size \eqn{n} to estimate the cumulative distribution function, Pr\eqn{(l < X < u)}, of the \eqn{d}-dimensional multivariate Student with zero-mean and covariance \eqn{\Sigma} and degrees of freedom \eqn{\nu}, that is, \eqn{X \sim t_\nu(0, \Sigma)}{X ~ t[\nu](0, \Sigma)}.}
\item{\code{\link{mvTqmc}(l,u,Sig,nu,n)}}{ provides a Quasi Monte Carlo algorithm for medium dimensions
(say, \eqn{d < 50}), in addition to the faster Monte Carlo algorithm in \code{\link{mvTcdf}}.}
\item{\code{\link{mvrandt}(l,u,Sig,nu,n)}}{ simulates \eqn{n} random vectors \eqn{X \sim t_\nu(0, \Sigma)}{X ~ t[\nu](0, \Sigma)}, conditional on \eqn{l < X <u}.}
\item{\code{\link{tregress}(l,u,Sig,df,n)}}{ simulates \eqn{n} pairs, \eqn{Z, R}, so that \eqn{\sqrt{\nu}Z/R \sim t_\nu(0, \Sigma)}{\sqrt{\nu}Z/R ~ t[\nu]((0, \Sigma)}, conditional on \eqn{l < X <u}.}
}
}
\author{
Z. I. Botev, email: \email{botev@unsw.edu.au} and web page: \url{http://web.maths.unsw.edu.au/~zdravkobotev/}
}
\references{
\itemize{
\item Z. I. Botev (2017), \emph{The Normal Law Under Linear Restrictions:
Simulation and Estimation via Minimax Tilting}, Journal of the Royal
Statistical Society, Series B, \bold{79} (1), pp. 1--24.
\item Z. I. Botev and P. L'Ecuyer (2015), \emph{Efficient Estimation
and Simulation of the Truncated Multivariate Student-t Distribution}, Proceedings of the 2015 Winter Simulation Conference,
Huntington Beach, CA, USA
\item Gibson G. J., Glasbey C. A., Elston D. A. (1994),
\emph{Monte Carlo evaluation of multivariate normal integrals and sensitivity to variate ordering},
In: Advances in Numerical Methods and Applications, pages 120--126
}
}
\seealso{ Matlab toolbox:
\url{http://web.maths.unsw.edu.au/~zdravkobotev/}
}
|
## The two functions below will calculate the inverse of a matrix x.
## If the inverse has been cached as a result of a previous calculation for the same matrix,
## the result is simply returned and no further calculation takes place.
##
## The "makeCacheMatrix" function creates an object that:
## - Initializes a variable 'I'which will save the inverted matrix
## - Contains a function get()to obtain the original matrix
## - Contains a function setIM()to assign the inverse matrix of x to I
## - Contains a function getIM() to obtain the cached inverse matrix
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
get <- function() x
setIM <- function(IM) I <<- IM
getIM <- function() I
list(get=get, setIM=setIM, getIM=getIM)
}
## The "cacheSolve" function first performs a check to ascertain if the inverted
## martix has already been calculated and cached. If found, it is simly returned.
##If not, the calculation is made and the result cached and returned.
cacheSolve <- function(x) {
I <- x$getIM()
if(!is.null(I)){
message("Getting cached data ...")
return(I)
}
else {
message("Calculating inverse matrix...")
data <- x$get()
I <- solve(data)
x$setIM(I)
return(I)
}
}
|
/cachematrix.R
|
no_license
|
MichaelChoudhury/ProgrammingAssignment2
|
R
| false
| false
| 1,300
|
r
|
## The two functions below will calculate the inverse of a matrix x.
## If the inverse has been cached as a result of a previous calculation for the same matrix,
## the result is simply returned and no further calculation takes place.
##
## The "makeCacheMatrix" function creates an object that:
## - Initializes a variable 'I'which will save the inverted matrix
## - Contains a function get()to obtain the original matrix
## - Contains a function setIM()to assign the inverse matrix of x to I
## - Contains a function getIM() to obtain the cached inverse matrix
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
get <- function() x
setIM <- function(IM) I <<- IM
getIM <- function() I
list(get=get, setIM=setIM, getIM=getIM)
}
## The "cacheSolve" function first performs a check to ascertain if the inverted
## martix has already been calculated and cached. If found, it is simly returned.
##If not, the calculation is made and the result cached and returned.
cacheSolve <- function(x) {
I <- x$getIM()
if(!is.null(I)){
message("Getting cached data ...")
return(I)
}
else {
message("Calculating inverse matrix...")
data <- x$get()
I <- solve(data)
x$setIM(I)
return(I)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/port-perf.R
\name{portvar_calc}
\alias{portvar_calc}
\title{Portfolio Variance}
\usage{
portvar_calc(Sigma, weights)
}
\arguments{
\item{Sigma}{a pxp covariance matrix of returns.}
\item{weights}{a numeric vector, the portfolio weights.}
}
\value{
a double, the portfolio variance
}
\description{
Calculates the in-sample variance of a portfolio
}
\examples{
data(sp500_rets)
Sigma <- var(sp500_rets[,-1])
p <- dim(Sigma)[2]
weights <- rep(1/p, p)
portvar <- portvar_calc(Sigma, weights)
}
|
/man/portvar_calc.Rd
|
no_license
|
antshi/auxPort
|
R
| false
| true
| 569
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/port-perf.R
\name{portvar_calc}
\alias{portvar_calc}
\title{Portfolio Variance}
\usage{
portvar_calc(Sigma, weights)
}
\arguments{
\item{Sigma}{a pxp covariance matrix of returns.}
\item{weights}{a numeric vector, the portfolio weights.}
}
\value{
a double, the portfolio variance
}
\description{
Calculates the in-sample variance of a portfolio
}
\examples{
data(sp500_rets)
Sigma <- var(sp500_rets[,-1])
p <- dim(Sigma)[2]
weights <- rep(1/p, p)
portvar <- portvar_calc(Sigma, weights)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LinearDetect-package.R
\name{BIC}
\alias{BIC}
\title{BIC and HBIC function}
\usage{
BIC(residual, phi, gamma.val = 1, method = "MLR")
}
\arguments{
\item{residual}{residual matrix}
\item{phi}{estimated coefficient matrix of the model}
\item{gamma.val}{hyperparameter for HBIC, if HBIC == TRUE.}
\item{method}{method name for the model: MLR: Multiple Linear Regression; VAR: Vector autoregression;}
}
\value{
A list object, which contains the followings
\describe{
\item{BIC}{BIC value}
\item{HBIC}{HBIC value}
}
}
\description{
BIC and HBIC function
}
|
/man/BIC.Rd
|
no_license
|
cran/LinearDetect
|
R
| false
| true
| 639
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LinearDetect-package.R
\name{BIC}
\alias{BIC}
\title{BIC and HBIC function}
\usage{
BIC(residual, phi, gamma.val = 1, method = "MLR")
}
\arguments{
\item{residual}{residual matrix}
\item{phi}{estimated coefficient matrix of the model}
\item{gamma.val}{hyperparameter for HBIC, if HBIC == TRUE.}
\item{method}{method name for the model: MLR: Multiple Linear Regression; VAR: Vector autoregression;}
}
\value{
A list object, which contains the followings
\describe{
\item{BIC}{BIC value}
\item{HBIC}{HBIC value}
}
}
\description{
BIC and HBIC function
}
|
lav_constraints_parse <- function(partable = NULL, constraints = NULL,
theta = NULL,
debug = FALSE) {
# just in case we do not have a $free column in partable
if(is.null(partable$free)) {
partable$free <- seq_len(length(partable$lhs))
}
# from the partable: free parameters
if(!is.null(theta)) {
# nothing to do
} else if(!is.null(partable$est)) {
theta <- partable$est[ partable$free > 0L ]
} else if(!is.null(partable$start)) {
theta <- partable$start[ partable$free > 0L ]
} else {
theta <- rep(0, length(partable$lhs))
}
# number of free parameters
npar <- length(theta)
# parse the constraints
if(is.null(constraints)) {
LIST <- NULL
} else if(!is.character(constraints)) {
stop("lavaan ERROR: constraints should be a string")
} else {
FLAT <- lavParseModelString( constraints )
CON <- attr(FLAT, "constraints")
LIST <- list()
if(length(CON) > 0L) {
lhs = unlist(lapply(CON, "[[", "lhs"))
op = unlist(lapply(CON, "[[", "op"))
rhs = unlist(lapply(CON, "[[", "rhs"))
LIST$lhs <- c(LIST$lhs, lhs)
LIST$op <- c(LIST$op, op)
LIST$rhs <- c(LIST$rhs, rhs)
} else {
stop("lavaan ERROR: no constraints found in constraints argument")
}
}
# variable definitions
def.function <- lav_partable_constraints_def(partable, con = LIST,
debug = debug)
# construct ceq/ciq functions
ceq.function <- lav_partable_constraints_ceq(partable, con = LIST,
debug = debug)
# linear or nonlinear?
ceq.linear.idx <- lav_constraints_linear_idx(func = ceq.function,
npar = npar)
ceq.nonlinear.idx <- lav_constraints_nonlinear_idx(func = ceq.function,
npar = npar)
# inequalities
cin.function <- lav_partable_constraints_ciq(partable, con = LIST,
debug = debug)
# linear or nonlinear?
cin.linear.idx <- lav_constraints_linear_idx(func = cin.function,
npar = npar)
cin.nonlinear.idx <- lav_constraints_nonlinear_idx(func = cin.function,
npar = npar)
# Jacobians
if(!is.null(body(ceq.function))) {
ceq.JAC <- try(lav_func_jacobian_complex(func = ceq.function,
x = theta), silent=TRUE)
if(inherits(ceq.JAC, "try-error")) { # eg. pnorm()
ceq.JAC <- lav_func_jacobian_simple(func = ceq.function, x = theta)
}
# constants
# do we have a non-zero 'rhs' elements? FIXME!!! is this reliable??
ceq.rhs <- -1 * ceq.function( numeric(npar) )
# evaluate constraints
ceq.theta <- ceq.function(theta)
} else {
ceq.JAC <- matrix(0, nrow = 0L, ncol = npar)
ceq.rhs <- numeric(0L)
ceq.theta <- numeric(0L)
}
if(!is.null(body(cin.function))) {
cin.JAC <- try(lav_func_jacobian_complex(func = cin.function,
x = theta), silent=TRUE)
if(inherits(cin.JAC, "try-error")) { # eg. pnorm()
cin.JAC <- lav_func_jacobian_simple(func = cin.function, x = theta)
}
# constants
# do we have a non-zero 'rhs' elements? FIXME!!! is this reliable??
cin.rhs <- -1 * cin.function( numeric(npar) )
# evaluate constraints
cin.theta <- cin.function(theta)
} else {
cin.JAC <- matrix(0, nrow = 0L, ncol = npar)
cin.rhs <- numeric(0L)
cin.theta <- numeric(0L)
}
# shortcut flags
ceq.linear.flag <- length(ceq.linear.idx) > 0L
ceq.nonlinear.flag <- length(ceq.nonlinear.idx) > 0L
ceq.flag <- ceq.linear.flag || ceq.nonlinear.flag
cin.linear.flag <- length(cin.linear.idx) > 0L
cin.nonlinear.flag <- length(cin.nonlinear.idx) > 0L
cin.flag <- cin.linear.flag || cin.nonlinear.flag
ceq.only.flag <- ceq.flag && !cin.flag
cin.only.flag <- cin.flag && !ceq.flag
ceq.linear.only.flag <- ( ceq.linear.flag &&
!ceq.nonlinear.flag &&
!cin.flag )
# additional info if ceq.linear.flag
if(ceq.linear.flag) {
## NEW: 18 nov 2014: handle general *linear* constraints
##
## see Nocedal & Wright (2006) 15.3
## - from x to x.red:
## x.red <- MASS::ginv(Q2) %*% (x - Q1 %*% solve(t(R)) %*% b)
## or
## x.red <- as.numeric((x - b %*% qr.coef(QR,diag(npar))) %*% Q2)
##
## - from x.red to x
## x <- as.numeric(Q1 %*% solve(t(R)) %*% b + Q2 %*% x.red)
## or
## x <- as.numeric(b %*% qr.coef(QR, diag(npar))) +
## as.numeric(Q2 %*% x.red)
##
## we write eq.constraints.K = Q2
## eq.constraints.k0 = b %*% qr.coef(QR, diag(npar)))
# compute range+null space of the jacobion (JAC) of the constraint
# matrix
#JAC <- lav_func_jacobian_complex(func = ceq.function,
# x = lavpartable$start[lavpartable$free > 0L]
QR <- qr(t(ceq.JAC))
ranK <- QR$rank
Q <- qr.Q(QR, complete = TRUE)
# Q1 <- Q[,1:ranK, drop = FALSE] # range space
# Q2 <- Q[,-seq_len(ranK), drop = FALSE] # null space
# R <- qr.R(QR)
ceq.JAC.NULL <- Q[,-seq_len(ranK), drop = FALSE]
if(all(ceq.rhs == 0)) {
ceq.rhs.NULL <- numeric(npar)
} else {
tmp <- qr.coef(QR, diag(npar))
NA.idx <- which(is.na(rowSums(tmp))) # catch NAs
if(length(NA.idx) > 0L) {
tmp[NA.idx,] <- 0
}
ceq.rhs.NULL <- as.numeric(ceq.rhs %*% tmp)
}
} else {
ceq.JAC.NULL <- matrix(0,0L,0L)
ceq.rhs.NULL <- numeric(0L)
}
# dummy jacobian 'function'
ceq.jacobian <- function() NULL
cin.jacobian <- function() NULL
OUT <- list(def.function = def.function,
ceq.function = ceq.function,
ceq.JAC = ceq.JAC,
ceq.jacobian = ceq.jacobian,
ceq.rhs = ceq.rhs,
ceq.theta = ceq.theta,
ceq.linear.idx = ceq.linear.idx,
ceq.nonlinear.idx = ceq.nonlinear.idx,
ceq.linear.flag = ceq.linear.flag,
ceq.nonlinear.flag = ceq.nonlinear.flag,
ceq.flag = ceq.flag,
ceq.linear.only.flag = ceq.linear.only.flag,
ceq.JAC.NULL = ceq.JAC.NULL,
ceq.rhs.NULL = ceq.rhs.NULL,
cin.function = cin.function,
cin.JAC = cin.JAC,
cin.jacobian = cin.jacobian,
cin.rhs = cin.rhs,
cin.theta = cin.theta,
cin.linear.idx = cin.linear.idx,
cin.nonlinear.idx = cin.nonlinear.idx,
cin.linear.flag = cin.linear.flag,
cin.nonlinear.flag = cin.nonlinear.flag,
cin.flag = cin.flag,
cin.only.flag = cin.only.flag)
OUT
}
lav_constraints_linear_idx <- function(func = NULL, npar = NULL) {
if(is.null(func) || is.null(body(func))) return(integer(0L))
# seed 1: rnorm
A0 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
# seed 2: rnorm
A1 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
A0minA1 <- A0 - A1
linear <- apply(A0minA1, 1, function(x) all(x == 0))
which(linear)
}
lav_constraints_nonlinear_idx <- function(func = NULL, npar = NULL) {
if(is.null(func) || is.null(body(func))) return(integer(0L))
# seed 1: rnorm
A0 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
# seed 2: rnorm
A1 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
A0minA1 <- A0 - A1
linear <- apply(A0minA1, 1, function(x) all(x == 0))
which(!linear)
}
# FIXME: is there a more elegant/robust way to do this??
lav_constraints_check_linear <- function(model) {
# seed 1: rnorm
A.ceq <- A.cin <- matrix(0, model@nx.free, 0)
if(!is.null(body(model@ceq.function)))
A.ceq <- t(lav_func_jacobian_complex(func=model@ceq.function, x=rnorm(model@nx.free)))
if(!is.null(body(model@cin.function)))
A.cin <- t(lav_func_jacobian_complex(func=model@cin.function, x=rnorm(model@nx.free)))
A0 <- cbind(A.ceq, A.cin)
# seed 2: rnorm
A.ceq <- A.cin <- matrix(0, model@nx.free, 0)
if(!is.null(body(model@ceq.function)))
A.ceq <- t(lav_func_jacobian_complex(func=model@ceq.function, x=rnorm(model@nx.free)))
if(!is.null(body(model@cin.function)))
A.cin <- t(lav_func_jacobian_complex(func=model@cin.function, x=rnorm(model@nx.free)))
A1 <- cbind(A.ceq, A.cin)
A0minA1 <- all.equal(A0, A1)
if(is.logical(A0minA1) && A0minA1 == TRUE)
return(TRUE)
else
return(FALSE)
}
# check if the equality constraints are 'simple' (a == b)
lav_constraints_check_simple <- function(lavmodel = NULL) {
ones <- (lavmodel@ceq.JAC == 1 | lavmodel@ceq.JAC == -1)
simple <- all(lavmodel@ceq.rhs == 0) &&
all(apply(lavmodel@ceq.JAC != 0, 1, sum) == 2) &&
all(apply(ones, 1, sum) == 2) &&
length(lavmodel@ceq.nonlinear.idx) == 0
# TRUE or FALSE
simple
}
lav_constraints_R2K <- function(lavmodel = NULL, R = NULL) {
# constraint matrix
if(!is.null(lavmodel)) {
R <- lavmodel@ceq.JAC
}
stopifnot(!is.null(R))
npar.full <- NCOL(R)
npar.red <- npar.full - NROW(R)
K <- diag(npar.full)
for(i in 1:NROW(R)) {
idx1 <- which(R[i,] == 1)
idx2 <- which(R[i,] == -1)
K[idx2, idx1] <- 1
}
# remove redundant columns
neg.idx <- which(colSums(R) < 0)
K <- K[,-neg.idx]
K
}
|
/lavaan/R/lav_constraints.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 10,585
|
r
|
lav_constraints_parse <- function(partable = NULL, constraints = NULL,
theta = NULL,
debug = FALSE) {
# just in case we do not have a $free column in partable
if(is.null(partable$free)) {
partable$free <- seq_len(length(partable$lhs))
}
# from the partable: free parameters
if(!is.null(theta)) {
# nothing to do
} else if(!is.null(partable$est)) {
theta <- partable$est[ partable$free > 0L ]
} else if(!is.null(partable$start)) {
theta <- partable$start[ partable$free > 0L ]
} else {
theta <- rep(0, length(partable$lhs))
}
# number of free parameters
npar <- length(theta)
# parse the constraints
if(is.null(constraints)) {
LIST <- NULL
} else if(!is.character(constraints)) {
stop("lavaan ERROR: constraints should be a string")
} else {
FLAT <- lavParseModelString( constraints )
CON <- attr(FLAT, "constraints")
LIST <- list()
if(length(CON) > 0L) {
lhs = unlist(lapply(CON, "[[", "lhs"))
op = unlist(lapply(CON, "[[", "op"))
rhs = unlist(lapply(CON, "[[", "rhs"))
LIST$lhs <- c(LIST$lhs, lhs)
LIST$op <- c(LIST$op, op)
LIST$rhs <- c(LIST$rhs, rhs)
} else {
stop("lavaan ERROR: no constraints found in constraints argument")
}
}
# variable definitions
def.function <- lav_partable_constraints_def(partable, con = LIST,
debug = debug)
# construct ceq/ciq functions
ceq.function <- lav_partable_constraints_ceq(partable, con = LIST,
debug = debug)
# linear or nonlinear?
ceq.linear.idx <- lav_constraints_linear_idx(func = ceq.function,
npar = npar)
ceq.nonlinear.idx <- lav_constraints_nonlinear_idx(func = ceq.function,
npar = npar)
# inequalities
cin.function <- lav_partable_constraints_ciq(partable, con = LIST,
debug = debug)
# linear or nonlinear?
cin.linear.idx <- lav_constraints_linear_idx(func = cin.function,
npar = npar)
cin.nonlinear.idx <- lav_constraints_nonlinear_idx(func = cin.function,
npar = npar)
# Jacobians
if(!is.null(body(ceq.function))) {
ceq.JAC <- try(lav_func_jacobian_complex(func = ceq.function,
x = theta), silent=TRUE)
if(inherits(ceq.JAC, "try-error")) { # eg. pnorm()
ceq.JAC <- lav_func_jacobian_simple(func = ceq.function, x = theta)
}
# constants
# do we have a non-zero 'rhs' elements? FIXME!!! is this reliable??
ceq.rhs <- -1 * ceq.function( numeric(npar) )
# evaluate constraints
ceq.theta <- ceq.function(theta)
} else {
ceq.JAC <- matrix(0, nrow = 0L, ncol = npar)
ceq.rhs <- numeric(0L)
ceq.theta <- numeric(0L)
}
if(!is.null(body(cin.function))) {
cin.JAC <- try(lav_func_jacobian_complex(func = cin.function,
x = theta), silent=TRUE)
if(inherits(cin.JAC, "try-error")) { # eg. pnorm()
cin.JAC <- lav_func_jacobian_simple(func = cin.function, x = theta)
}
# constants
# do we have a non-zero 'rhs' elements? FIXME!!! is this reliable??
cin.rhs <- -1 * cin.function( numeric(npar) )
# evaluate constraints
cin.theta <- cin.function(theta)
} else {
cin.JAC <- matrix(0, nrow = 0L, ncol = npar)
cin.rhs <- numeric(0L)
cin.theta <- numeric(0L)
}
# shortcut flags
ceq.linear.flag <- length(ceq.linear.idx) > 0L
ceq.nonlinear.flag <- length(ceq.nonlinear.idx) > 0L
ceq.flag <- ceq.linear.flag || ceq.nonlinear.flag
cin.linear.flag <- length(cin.linear.idx) > 0L
cin.nonlinear.flag <- length(cin.nonlinear.idx) > 0L
cin.flag <- cin.linear.flag || cin.nonlinear.flag
ceq.only.flag <- ceq.flag && !cin.flag
cin.only.flag <- cin.flag && !ceq.flag
ceq.linear.only.flag <- ( ceq.linear.flag &&
!ceq.nonlinear.flag &&
!cin.flag )
# additional info if ceq.linear.flag
if(ceq.linear.flag) {
## NEW: 18 nov 2014: handle general *linear* constraints
##
## see Nocedal & Wright (2006) 15.3
## - from x to x.red:
## x.red <- MASS::ginv(Q2) %*% (x - Q1 %*% solve(t(R)) %*% b)
## or
## x.red <- as.numeric((x - b %*% qr.coef(QR,diag(npar))) %*% Q2)
##
## - from x.red to x
## x <- as.numeric(Q1 %*% solve(t(R)) %*% b + Q2 %*% x.red)
## or
## x <- as.numeric(b %*% qr.coef(QR, diag(npar))) +
## as.numeric(Q2 %*% x.red)
##
## we write eq.constraints.K = Q2
## eq.constraints.k0 = b %*% qr.coef(QR, diag(npar)))
# compute range+null space of the jacobion (JAC) of the constraint
# matrix
#JAC <- lav_func_jacobian_complex(func = ceq.function,
# x = lavpartable$start[lavpartable$free > 0L]
QR <- qr(t(ceq.JAC))
ranK <- QR$rank
Q <- qr.Q(QR, complete = TRUE)
# Q1 <- Q[,1:ranK, drop = FALSE] # range space
# Q2 <- Q[,-seq_len(ranK), drop = FALSE] # null space
# R <- qr.R(QR)
ceq.JAC.NULL <- Q[,-seq_len(ranK), drop = FALSE]
if(all(ceq.rhs == 0)) {
ceq.rhs.NULL <- numeric(npar)
} else {
tmp <- qr.coef(QR, diag(npar))
NA.idx <- which(is.na(rowSums(tmp))) # catch NAs
if(length(NA.idx) > 0L) {
tmp[NA.idx,] <- 0
}
ceq.rhs.NULL <- as.numeric(ceq.rhs %*% tmp)
}
} else {
ceq.JAC.NULL <- matrix(0,0L,0L)
ceq.rhs.NULL <- numeric(0L)
}
# dummy jacobian 'function'
ceq.jacobian <- function() NULL
cin.jacobian <- function() NULL
OUT <- list(def.function = def.function,
ceq.function = ceq.function,
ceq.JAC = ceq.JAC,
ceq.jacobian = ceq.jacobian,
ceq.rhs = ceq.rhs,
ceq.theta = ceq.theta,
ceq.linear.idx = ceq.linear.idx,
ceq.nonlinear.idx = ceq.nonlinear.idx,
ceq.linear.flag = ceq.linear.flag,
ceq.nonlinear.flag = ceq.nonlinear.flag,
ceq.flag = ceq.flag,
ceq.linear.only.flag = ceq.linear.only.flag,
ceq.JAC.NULL = ceq.JAC.NULL,
ceq.rhs.NULL = ceq.rhs.NULL,
cin.function = cin.function,
cin.JAC = cin.JAC,
cin.jacobian = cin.jacobian,
cin.rhs = cin.rhs,
cin.theta = cin.theta,
cin.linear.idx = cin.linear.idx,
cin.nonlinear.idx = cin.nonlinear.idx,
cin.linear.flag = cin.linear.flag,
cin.nonlinear.flag = cin.nonlinear.flag,
cin.flag = cin.flag,
cin.only.flag = cin.only.flag)
OUT
}
lav_constraints_linear_idx <- function(func = NULL, npar = NULL) {
if(is.null(func) || is.null(body(func))) return(integer(0L))
# seed 1: rnorm
A0 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
# seed 2: rnorm
A1 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
A0minA1 <- A0 - A1
linear <- apply(A0minA1, 1, function(x) all(x == 0))
which(linear)
}
lav_constraints_nonlinear_idx <- function(func = NULL, npar = NULL) {
if(is.null(func) || is.null(body(func))) return(integer(0L))
# seed 1: rnorm
A0 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
# seed 2: rnorm
A1 <- lav_func_jacobian_complex(func = func, x = rnorm(npar))
A0minA1 <- A0 - A1
linear <- apply(A0minA1, 1, function(x) all(x == 0))
which(!linear)
}
# FIXME: is there a more elegant/robust way to do this??
lav_constraints_check_linear <- function(model) {
# seed 1: rnorm
A.ceq <- A.cin <- matrix(0, model@nx.free, 0)
if(!is.null(body(model@ceq.function)))
A.ceq <- t(lav_func_jacobian_complex(func=model@ceq.function, x=rnorm(model@nx.free)))
if(!is.null(body(model@cin.function)))
A.cin <- t(lav_func_jacobian_complex(func=model@cin.function, x=rnorm(model@nx.free)))
A0 <- cbind(A.ceq, A.cin)
# seed 2: rnorm
A.ceq <- A.cin <- matrix(0, model@nx.free, 0)
if(!is.null(body(model@ceq.function)))
A.ceq <- t(lav_func_jacobian_complex(func=model@ceq.function, x=rnorm(model@nx.free)))
if(!is.null(body(model@cin.function)))
A.cin <- t(lav_func_jacobian_complex(func=model@cin.function, x=rnorm(model@nx.free)))
A1 <- cbind(A.ceq, A.cin)
A0minA1 <- all.equal(A0, A1)
if(is.logical(A0minA1) && A0minA1 == TRUE)
return(TRUE)
else
return(FALSE)
}
# check if the equality constraints are 'simple' (a == b)
lav_constraints_check_simple <- function(lavmodel = NULL) {
ones <- (lavmodel@ceq.JAC == 1 | lavmodel@ceq.JAC == -1)
simple <- all(lavmodel@ceq.rhs == 0) &&
all(apply(lavmodel@ceq.JAC != 0, 1, sum) == 2) &&
all(apply(ones, 1, sum) == 2) &&
length(lavmodel@ceq.nonlinear.idx) == 0
# TRUE or FALSE
simple
}
lav_constraints_R2K <- function(lavmodel = NULL, R = NULL) {
# constraint matrix
if(!is.null(lavmodel)) {
R <- lavmodel@ceq.JAC
}
stopifnot(!is.null(R))
npar.full <- NCOL(R)
npar.red <- npar.full - NROW(R)
K <- diag(npar.full)
for(i in 1:NROW(R)) {
idx1 <- which(R[i,] == 1)
idx2 <- which(R[i,] == -1)
K[idx2, idx1] <- 1
}
# remove redundant columns
neg.idx <- which(colSums(R) < 0)
K <- K[,-neg.idx]
K
}
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Calories Burnt Calculation"),
sidebarPanel(
h4('Calroes Burnt'),
selectInput('Sex', label='Select your Sex', c("Male","Female")),
numericInput('Age', 'Your Age', 40),
numericInput('Weight', 'Weight in Lbs', 175),
numericInput('HeartRate', 'Heart Rate in BPM', 125),
numericInput('Wtime', 'Workout Time in Mins', 60)
),
mainPanel(
h4('Your Calories Burnt is - '),
textOutput("Calories")
)
))
|
/ui.R
|
no_license
|
vinkaran/DataProductsAssignment
|
R
| false
| false
| 492
|
r
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Calories Burnt Calculation"),
sidebarPanel(
h4('Calroes Burnt'),
selectInput('Sex', label='Select your Sex', c("Male","Female")),
numericInput('Age', 'Your Age', 40),
numericInput('Weight', 'Weight in Lbs', 175),
numericInput('HeartRate', 'Heart Rate in BPM', 125),
numericInput('Wtime', 'Workout Time in Mins', 60)
),
mainPanel(
h4('Your Calories Burnt is - '),
textOutput("Calories")
)
))
|
library(shiny)
library(manipulate)
myPlot <- function(s) {
plot(cars$dist - mean(cars$dist), cars$speed - mean(cars$speed))
abline(0, s)
}
|
/dataproducts/q1.R
|
no_license
|
bwv988/datasciencecoursera
|
R
| false
| false
| 145
|
r
|
library(shiny)
library(manipulate)
myPlot <- function(s) {
plot(cars$dist - mean(cars$dist), cars$speed - mean(cars$speed))
abline(0, s)
}
|
library(rugarch)
### Name: GARCHfilter-class
### Title: class: GARCH Filter Class
### Aliases: GARCHfilter-class
### Keywords: classes
### ** Examples
showClass("GARCHfilter")
|
/data/genthat_extracted_code/rugarch/examples/GARCHfilter-class.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 183
|
r
|
library(rugarch)
### Name: GARCHfilter-class
### Title: class: GARCH Filter Class
### Aliases: GARCHfilter-class
### Keywords: classes
### ** Examples
showClass("GARCHfilter")
|
##################################################################################
# #
# eLinguistics.net -> Long range relationship analysis - Focus on NOSTRATIC #
# #
# Vincent Beaufils, 14.12.2017 #
# #
##################################################################################
library(sqldf)
setwd("C:/Sprachgenetik/Share/Materials")
#setwd("c:/Forschung/")
data <- read.csv("PairwiseComparisons.csv",header=T,stringsAsFactors=FALSE, sep=";")
#########################################################################################################################
# CALL ALL MACRO FAMILIES IN A LOOP -> OUTPUT IN CSV #
######################################################
MacroFamilies <- c("_IE_","_AA_","Uralic","Turkic","Mongolic","Tungusic","Dravidian","Kartvelian","Caucasian","Niger_Congo","Bantoo","Sino_Tibetan","Austroasiatic","_MP_","Tai_Kadai","Eskimo","Korean","Japan","Ainu","Burushaski","Basque","Summerian","Elamite")
Nostratic <- c("('00_IE_PIE','01_IE_Tocharian','02_IE_Indo_Aryan','03_IE_Iranian','04_IE_Baltic','05_IE_Slavic','06_IE_Germanic','07_IE_Celtic','08_IE_Romance','09_IE_Greek','10_IE_Albanian','11_IE_Armenian','12_IE_Anatolian','20_AA_Semitic','21_AA_Berber','22_AA_Egyptian','23_AA_Chadic','24_AA_Cushitic','25_AA_Omotic','30_AL_Mongolic','31_AL_Turkic','32_TU_Tungusic','33_UR_Uralic','51_KA_Kartvelian','60_DR_Dravidian','IE_Creole')")
DistanceLimit <- 76 # Pairwise Distance limit
# Data Frame to store output
MonitorClusters <- data.frame(Macro_Family=character(),Sample_Size=double(),SelfSmallerThan78=double(),NostraticSmallerThan78=double(),NotinNostraticSmallerThan78=double(),DummiesSmallerThan78=double())
# Progress bar:
pb = txtProgressBar(min = 0, max = length(Clades), style=3)
i <-0
# Loop to produce results
for(MacroFamily in MacroFamilies) {
i <- i+1
setTxtProgressBar(pb,i)
sampleSize <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareDUMMIES_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND NbrWords >14"))
CompareDUMMIES_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareIN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareIN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Distance < ",DistanceLimit," AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
newrow <- data.frame(MacroFamily,nrow(sampleSize),nrow(CompareSELF_BELOW_LIMIT)/nrow(CompareSELF_ALL),nrow(CompareIN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareIN_NOSTRATIC_ALL),nrow(CompareNOT_IN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareNOT_IN_NOSTRATIC_ALL),nrow(CompareDUMMIES_BELOW_LIMIT)/nrow(CompareDUMMIES_ALL))
MonitorClusters <- rbind(MonitorClusters,newrow)
}
colnames(MonitorClusters) <- c("Macro family","Sample size","Performance self", "Performance Nostratic", "Performance NON Nostratic", "Performance Dummies")
write.csv2(file="LongRangeAnalysis.csv", MonitorClusters)
#########################################################################################################################
# CALL A SINGLE COMPARISON #
############################
MacroFamily <- "Dummy"
Nostratic <- c("('00_IE_PIE','01_IE_Tocharian','02_IE_Indo_Aryan','03_IE_Iranian','04_IE_Baltic','05_IE_Slavic','06_IE_Germanic','07_IE_Celtic','08_IE_Romance','09_IE_Greek','10_IE_Albanian','11_IE_Armenian','12_IE_Anatolian','20_AA_Semitic','21_AA_Berber','22_AA_Egyptian','23_AA_Chadic','24_AA_Cushitic','25_AA_Omotic','30_AL_Mongolic','31_AL_Turkic','32_TU_Tungusic','33_UR_Uralic','51_KA_Kartvelian','60_DR_Dravidian','IE_Creole')")
DistanceLimit <- 76 # Pairwise Distance limit
sampleSize <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareDUMMIES_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND NbrWords >14"))
CompareDUMMIES_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareIN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareIN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Distance < ",DistanceLimit," AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
print(paste("Sample size: ",nrow(sampleSize)))
print(paste("Self < limit: ",nrow(CompareSELF_BELOW_LIMIT)/nrow(CompareSELF_ALL)))
print(paste("Dummies < limit: ",nrow(CompareDUMMIES_BELOW_LIMIT)/nrow(CompareDUMMIES_ALL)))
print(paste("Nostratic < limit: ",nrow(CompareIN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareIN_NOSTRATIC_ALL)))
print(paste("Not in Nostratic < limit: ",nrow(CompareNOT_IN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareNOT_IN_NOSTRATIC_ALL)))
print(paste(CompareIN_NOSTRATIC_BELOW_LIMIT$L1,"-",CompareIN_NOSTRATIC_BELOW_LIMIT$L2,"-",CompareIN_NOSTRATIC_BELOW_LIMIT$Distance))
|
/MacroFamilies.R
|
no_license
|
eLinguist/Materials
|
R
| false
| false
| 7,808
|
r
|
##################################################################################
# #
# eLinguistics.net -> Long range relationship analysis - Focus on NOSTRATIC #
# #
# Vincent Beaufils, 14.12.2017 #
# #
##################################################################################
library(sqldf)
setwd("C:/Sprachgenetik/Share/Materials")
#setwd("c:/Forschung/")
data <- read.csv("PairwiseComparisons.csv",header=T,stringsAsFactors=FALSE, sep=";")
#########################################################################################################################
# CALL ALL MACRO FAMILIES IN A LOOP -> OUTPUT IN CSV #
######################################################
MacroFamilies <- c("_IE_","_AA_","Uralic","Turkic","Mongolic","Tungusic","Dravidian","Kartvelian","Caucasian","Niger_Congo","Bantoo","Sino_Tibetan","Austroasiatic","_MP_","Tai_Kadai","Eskimo","Korean","Japan","Ainu","Burushaski","Basque","Summerian","Elamite")
Nostratic <- c("('00_IE_PIE','01_IE_Tocharian','02_IE_Indo_Aryan','03_IE_Iranian','04_IE_Baltic','05_IE_Slavic','06_IE_Germanic','07_IE_Celtic','08_IE_Romance','09_IE_Greek','10_IE_Albanian','11_IE_Armenian','12_IE_Anatolian','20_AA_Semitic','21_AA_Berber','22_AA_Egyptian','23_AA_Chadic','24_AA_Cushitic','25_AA_Omotic','30_AL_Mongolic','31_AL_Turkic','32_TU_Tungusic','33_UR_Uralic','51_KA_Kartvelian','60_DR_Dravidian','IE_Creole')")
DistanceLimit <- 76 # Pairwise Distance limit
# Data Frame to store output
MonitorClusters <- data.frame(Macro_Family=character(),Sample_Size=double(),SelfSmallerThan78=double(),NostraticSmallerThan78=double(),NotinNostraticSmallerThan78=double(),DummiesSmallerThan78=double())
# Progress bar:
pb = txtProgressBar(min = 0, max = length(Clades), style=3)
i <-0
# Loop to produce results
for(MacroFamily in MacroFamilies) {
i <- i+1
setTxtProgressBar(pb,i)
sampleSize <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareDUMMIES_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND NbrWords >14"))
CompareDUMMIES_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareIN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareIN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Distance < ",DistanceLimit," AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
newrow <- data.frame(MacroFamily,nrow(sampleSize),nrow(CompareSELF_BELOW_LIMIT)/nrow(CompareSELF_ALL),nrow(CompareIN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareIN_NOSTRATIC_ALL),nrow(CompareNOT_IN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareNOT_IN_NOSTRATIC_ALL),nrow(CompareDUMMIES_BELOW_LIMIT)/nrow(CompareDUMMIES_ALL))
MonitorClusters <- rbind(MonitorClusters,newrow)
}
colnames(MonitorClusters) <- c("Macro family","Sample size","Performance self", "Performance Nostratic", "Performance NON Nostratic", "Performance Dummies")
write.csv2(file="LongRangeAnalysis.csv", MonitorClusters)
#########################################################################################################################
# CALL A SINGLE COMPARISON #
############################
MacroFamily <- "Dummy"
Nostratic <- c("('00_IE_PIE','01_IE_Tocharian','02_IE_Indo_Aryan','03_IE_Iranian','04_IE_Baltic','05_IE_Slavic','06_IE_Germanic','07_IE_Celtic','08_IE_Romance','09_IE_Greek','10_IE_Albanian','11_IE_Armenian','12_IE_Anatolian','20_AA_Semitic','21_AA_Berber','22_AA_Egyptian','23_AA_Chadic','24_AA_Cushitic','25_AA_Omotic','30_AL_Mongolic','31_AL_Turkic','32_TU_Tungusic','33_UR_Uralic','51_KA_Kartvelian','60_DR_Dravidian','IE_Creole')")
DistanceLimit <- 76 # Pairwise Distance limit
sampleSize <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND NbrWords >14"))
CompareSELF_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", MacroFamily, "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareDUMMIES_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND NbrWords >14"))
CompareDUMMIES_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords >14"))
CompareIN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareIN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE 'IE_Creole' AND Group2 NOT LIKE '%", "Dummy", "%' AND Distance < ",DistanceLimit," AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_ALL <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
CompareNOT_IN_NOSTRATIC_BELOW_LIMIT <- sqldf(paste0("SELECT * FROM data WHERE Group1 LIKE '%", MacroFamily, "%' AND Group2 NOT IN ",Nostratic," AND Group2 NOT LIKE '%",MacroFamily,"%' AND Distance < ",DistanceLimit," AND Group2 NOT LIKE '%", "Dummy", "%' AND NbrWords > 14"))
print(paste("Sample size: ",nrow(sampleSize)))
print(paste("Self < limit: ",nrow(CompareSELF_BELOW_LIMIT)/nrow(CompareSELF_ALL)))
print(paste("Dummies < limit: ",nrow(CompareDUMMIES_BELOW_LIMIT)/nrow(CompareDUMMIES_ALL)))
print(paste("Nostratic < limit: ",nrow(CompareIN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareIN_NOSTRATIC_ALL)))
print(paste("Not in Nostratic < limit: ",nrow(CompareNOT_IN_NOSTRATIC_BELOW_LIMIT)/nrow(CompareNOT_IN_NOSTRATIC_ALL)))
print(paste(CompareIN_NOSTRATIC_BELOW_LIMIT$L1,"-",CompareIN_NOSTRATIC_BELOW_LIMIT$L2,"-",CompareIN_NOSTRATIC_BELOW_LIMIT$Distance))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_nani.R
\name{as_nani}
\alias{as_nani}
\title{as_nani}
\usage{
as_nani(x, input, multiplier = NULL)
}
\arguments{
\item{x}{data to transform into the `nani` format.}
\item{input}{the type of input.}
\item{multiplier}{scaling factor (e.g. to calculate hectars instead of km^2^).}
}
\description{
Returns a tibble that fit with the `nani` package format.
}
\details{
Inputs can be calculated in several ways dependending on the available datasets.
The `nani` package implements widely used calculations methods, but to cover the entire set of possibile calculation methods is unrealistic.
The function `as_nani` allows to transfrom user data into a format suitable to the `nani` package. For instance,
data on animal excretion can be calculated with a different method, but transformed into a format suitable to the `nani` package.
Several options are provided, use the function `as_nani_options` for details.
}
\seealso{
as_nani_options
}
|
/man/as_nani.Rd
|
no_license
|
shekharsg/nani
|
R
| false
| true
| 1,020
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_nani.R
\name{as_nani}
\alias{as_nani}
\title{as_nani}
\usage{
as_nani(x, input, multiplier = NULL)
}
\arguments{
\item{x}{data to transform into the `nani` format.}
\item{input}{the type of input.}
\item{multiplier}{scaling factor (e.g. to calculate hectars instead of km^2^).}
}
\description{
Returns a tibble that fit with the `nani` package format.
}
\details{
Inputs can be calculated in several ways dependending on the available datasets.
The `nani` package implements widely used calculations methods, but to cover the entire set of possibile calculation methods is unrealistic.
The function `as_nani` allows to transfrom user data into a format suitable to the `nani` package. For instance,
data on animal excretion can be calculated with a different method, but transformed into a format suitable to the `nani` package.
Several options are provided, use the function `as_nani_options` for details.
}
\seealso{
as_nani_options
}
|
#compare normally distributed with uniform points
x<-cbind(1,runif(100,-100,100), runif(100,-100,100))
x<-rbind(x, cbind(2,rnorm(100,0,20), rnorm(100,0,20)))
colnames(x) <- c("type","x","y")
fun<-function(a,b) {
if(a[1]!=2) return(3)
if (b[1]==2) return(1)
return(2)
}
r.max<-seq(10,100,10)
r.min<-seq(0,90,10)
r.mid <- (r.max+r.min)/2
tau<-get.tau(x,fun,r=r.max,r.low=r.min)
tau.boot<-get.tau.bootstrap(x,fun,r=r.max,r.low=r.min,boot.iter=50)
tau.ci<-apply(tau.boot,2,quantile,probs=c(0.25,0.75))
plot(r.mid, tau ,ylim=c(min(tau.ci),max(tau.ci)), type="l", log="y")
lines(c(0,100),c(1,1), lty=3, col="grey")
lines(r.mid, tau.ci[1,] , lty=2)
lines(r.mid, tau.ci[2,] , lty=2)
|
/example/get_tau_bootstrap.R
|
permissive
|
HopkinsIDD/spatialtau
|
R
| false
| false
| 695
|
r
|
#compare normally distributed with uniform points
x<-cbind(1,runif(100,-100,100), runif(100,-100,100))
x<-rbind(x, cbind(2,rnorm(100,0,20), rnorm(100,0,20)))
colnames(x) <- c("type","x","y")
fun<-function(a,b) {
if(a[1]!=2) return(3)
if (b[1]==2) return(1)
return(2)
}
r.max<-seq(10,100,10)
r.min<-seq(0,90,10)
r.mid <- (r.max+r.min)/2
tau<-get.tau(x,fun,r=r.max,r.low=r.min)
tau.boot<-get.tau.bootstrap(x,fun,r=r.max,r.low=r.min,boot.iter=50)
tau.ci<-apply(tau.boot,2,quantile,probs=c(0.25,0.75))
plot(r.mid, tau ,ylim=c(min(tau.ci),max(tau.ci)), type="l", log="y")
lines(c(0,100),c(1,1), lty=3, col="grey")
lines(r.mid, tau.ci[1,] , lty=2)
lines(r.mid, tau.ci[2,] , lty=2)
|
## CO2 d13C data from Picarro
## 03092018
setwd("C:/Users/Chris Wilson/Desktop/BRU Respiration Logs/Picarro_Logs/03092018")
library(lubridate); library(ggplot2); library(dplyr)
CO2_log <- read.table("03092018_p1c1.dat",header = T)
CO2_log$time <- hms(CO2_log$TIME)
CO2_log$t <- time_length(CO2_log$time) - min(time_length(CO2_log$time))
CO2_log$CO2_smooth <- smooth.spline(CO2_log$t, CO2_log$X12CO2)[[2]]
ggplot(CO2_log, aes(x = t)) + ylab("CO2 (ppm)") +
theme_bw() + geom_point(aes(y = CO2_smooth)) +
geom_point(aes(x = t, y = -50*Delta_Raw_iCO2), color = "red") +
scale_y_continuous(limits = c(0,1000))
ggplot(CO2_log, aes(x = t, y = Delta_Raw_iCO2)) + geom_point() +
geom_line(color = "black",size = 0.5) + scale_y_continuous(limits = c(-20,5)) +
ylab("d13C") + theme_bw() + scale_x_continuous(breaks = seq(0,1900,100));
ggplot(CO2_log, aes(x = t, y = Delta_30s_iCO2)) + geom_point() +
geom_line(color = "black",size = 0.5) + scale_y_continuous(limits = c(-20,5)) +
ylab("d13C") + theme_bw() + scale_x_continuous(breaks = seq(0,1900,100));
# Evaluating derivative criterion for identifying plateaus
CO2_deriv <- diff(CO2_log$CO2_smooth)/diff(CO2_log$t)
plot(CO2_log$t[-1],CO2_deriv)
deriv_df <- data.frame(x = CO2_log$t[-1], y = CO2_deriv)
ggplot(deriv_df, aes(x=x,y=y)) + geom_point() + scale_y_continuous(limits = c(-1,1))
## Suggests that tolerances should be |dCO2/dt| around 0.25
CO2_log$CO2_deriv <- c(0,CO2_deriv);
CO2_log_plateaus <- CO2_log %>% filter(CO2_deriv < 0.5 & CO2_deriv > - 0.5, X12CO2 > 200)
ggplot(CO2_log_plateaus, aes(x = t)) + ylab("CO2 (ppm)") +
theme_bw() + geom_point(aes(y = CO2_smooth)) +
geom_point(aes(x = t, y = -50*Delta_Raw_iCO2), color = "red") +
scale_y_continuous(limits = c(0,1000))
## BADOOM!
# Now need to separate syringes
syringe <- rep(0, length(CO2_log_plateaus$t));
syringe[1] <- 1
for(i in 2:length(syringe)){
if(CO2_log_plateaus$t[i] <= CO2_log_plateaus$t[i-1]+30){
syringe[i] <- syringe[i-1];
} else{
syringe[i] <- syringe[i-1] + 1;
}
if(syringe[i]>4) warning('Syringe[i] > 4!')
}
# BADOOM!
CO2_log_plateaus$syringe <- syringe
keeling_data <- CO2_log_plateaus %>% group_by(syringe) %>% summarize(mCO2 = mean(X12CO2,na.rm = T),
md13C = mean(Delta_Raw_iCO2,na.rm = T)) %>%
mutate(inv_CO2 = 1/mCO2);
plot(keeling_data$inv_CO2, keeling_data$md13C)
collar_Reco_d13C <- lm(md13C ~ inv_CO2, keeling_data);
summary(collar_Reco_d13C)
# Intervals 125-250, 375-480, 625-710, 875-950
library(dplyr)
CO2_log %>% filter(t > 800 & t < 900) %>% summarise(meand13C = mean(Delta_Raw_iCO2),
meanCO2 = mean(X12CO2))
### E.g. manual extraction of Rhizoma peanut
peanut_delta_p2 <- c(-8.935, -8.2126, -7.22, -7.114);
peanut_CO2_p2 <- c(354,396,469,492);
peanut_CO2_p2_inv <- 1/peanut_CO2_p2;
peanut_source_delta_p2 <- lm(peanut_delta_p2 ~ peanut_CO2_p2_inv)
summary(peanut_source_delta_p2); # Intercept is -2.25 [0.2475]
# Background delta
delta_back <- function(delta_a, c_a, delta_s = -2.25, c_b = 354){
delta_b = (delta_a - delta_s)*(c_a/c_b) + delta_s;
return(delta_b);
}
delta_back(peanut_delta_p2, peanut_CO2_p2); # -8.9
peanut_delta_p1 <- c(-2.286, -1.10, -0.21, 0.27);
peanut_CO2_p1 <- c(423,442,496,519);
peanut_CO2_p1_inv <- 1/peanut_CO2_p2;
peanut_source_delta_p1 <- lm(peanut_delta_p1 ~ peanut_CO2_p1_inv)
summary(peanut_source_delta_p1); # Intercept is 6.41 [0.65]
delta_back(peanut_delta_p1, peanut_CO2_p1, 6.41, 423); # -1.54
# Field Logs
setwd("C:/Users/Chris Wilson/Desktop/BRU Respiration Logs/Field_Logs/03092018")
setwd("C:/Users/Chris Wilson/Desktop/BRU Respiration Logs/Field_Logs/03152018")
list.files()
field_CO2 <- read.table("p5_a1.txt", header = TRUE)
str(field_CO2);
field_CO2$sec <- seq(1,1530,1);
?apply
filenames2 <- list.files(pattern = "*.txt")
flux_df_list <- lapply(filenames2, read.table, header = T)
# OK, I have a list, but it is unnamed.
names(flux_df_list) <- substr(filenames2,1,5)
flux_est <- rep(0,length(names(flux_df_list)));
flux_se <- rep(0,length(flux_est));
for(i in 1:length(flux_est)){
field_CO2 <- flux_df_list[[i]];
field_CO2$sec <- seq(1,length(field_CO2$Time.H.M.S.),1);
field_CO2$CO2_smooth <- smooth.spline(field_CO2$sec, field_CO2$CO2.ppm.)[[2]]
plot_trace <- ggplot(field_CO2, aes(x=sec, y = CO2_smooth)) + geom_line()
print(plot_trace)
ambient <- 400
flux_df <- field_CO2 %>% filter(CO2.ppm. > ambient - 15, CO2.ppm. < ambient + 15,
sec > 100) # will need to figure out better time filter
flux_deriv <- diff(flux_df$CO2.ppm.)/diff(flux_df$sec)
plot(flux_df$sec[-1],flux_deriv)
flux_df$flux_deriv <- c(0,flux_deriv)
flux_df2 <- flux_df %>% filter(flux_deriv > -2.5, flux_deriv < 2.5)
measure <- rep(0, length(flux_df2$flux_deriv));
measure[1] <- 1;
for(i in 2:length(measure)){
if(flux_df2$sec[i] <= flux_df2$sec[i-1]+20){
measure[i] <- measure[i-1];
} else{
measure[i] <- measure[i-1] + 1;
}
if(measure[i]>3) warning('measure[i] > 3!', i)
}
# BADOOM!
flux_df2$measure <- measure
library(broom); library(dplyr); library(ggplot2)
ggplot(flux_df2, aes(x=sec,y=flux_deriv, color = measure)) + geom_point()
# summarizing differentials
fluxes_df <- flux_df2 %>% group_by(measure) %>% summarize(m_flux = mean(flux_deriv,na.rm = T),
se_flux = sd(flux_deriv,na.rm = T))
# fitting lines
fluxes_lm <- flux_df2 %>% group_by(measure) %>% do(fitSlope = lm(CO2.ppm. ~ sec, data = .))
lm_coefs <- tidy(fluxes_lm, fitSlope)
print(lm_coefs)
plot_fit <- ggplot() + geom_point(data = flux_df2, aes(x = sec, y = CO2.ppm.,color = measure)) +
geom_abline(aes(intercept = lm_coefs$estimate[1], slope = lm_coefs$estimate[2]), color = "red") +
geom_abline(aes(intercept = lm_coefs$estimate[3], slope = lm_coefs$estimate[4]), color = "red") +
geom_abline(aes(intercept = lm_coefs$estimate[5], slope = lm_coefs$estimate[6]), color = "red")
print(plot_fit)
flux_est[i] <- coef(summary(flux_model))[2,1]
flux_se[i] <- coef(summary(flux_model))[2,2]
}
### Playing around with smoothers
spl <- smooth.spline(CO2_log$t[-893], y=CO2_log$Delta_Raw_iCO2[-893])
pred <- predict(spl)
plot (CO2_log$t, CO2_log$Delta_Raw_iCO2, log="xy")
lines(pred, col=2)
ycs.prime <- diff(CO2_log$X12CO2[-893])/diff(CO2_log$t[-893])
pred.prime <- predict(spl, deriv=1)
C_pore <- 450
C_atm <- rep(0,3*10^2)
C_atm[1] <- 350
k1 <- 0.03
k2 <- 1
for(i in 2:length(C_atm)){
C_atm[i] <- C_atm[i-1] + k1*(C_pore - C_atm[i-1]) + k2;
}
plot(C_atm)
time <- seq(1,length(C_atm),1)
which(C_atm > 450)
c_diff <- diff(C_atm)/diff(time)
plot(c_diff)
y <- rnorm(5,0,1)
param_proposal <- seq(-5,5,0.1)
log_like <- rep(0,length(param_proposal))
for(i in 1:length(param_proposal)){
log_like[i] <- -sum(log(dnorm(y,param_proposal[i],1)))
}
plot(param_proposal,exp(-log_like))
sum(exp(-log_like))
|
/Processing File.R
|
no_license
|
chwilson/BRU_Respiration_DataProcessing
|
R
| false
| false
| 7,026
|
r
|
## CO2 d13C data from Picarro
## 03092018
setwd("C:/Users/Chris Wilson/Desktop/BRU Respiration Logs/Picarro_Logs/03092018")
library(lubridate); library(ggplot2); library(dplyr)
CO2_log <- read.table("03092018_p1c1.dat",header = T)
CO2_log$time <- hms(CO2_log$TIME)
CO2_log$t <- time_length(CO2_log$time) - min(time_length(CO2_log$time))
CO2_log$CO2_smooth <- smooth.spline(CO2_log$t, CO2_log$X12CO2)[[2]]
ggplot(CO2_log, aes(x = t)) + ylab("CO2 (ppm)") +
theme_bw() + geom_point(aes(y = CO2_smooth)) +
geom_point(aes(x = t, y = -50*Delta_Raw_iCO2), color = "red") +
scale_y_continuous(limits = c(0,1000))
ggplot(CO2_log, aes(x = t, y = Delta_Raw_iCO2)) + geom_point() +
geom_line(color = "black",size = 0.5) + scale_y_continuous(limits = c(-20,5)) +
ylab("d13C") + theme_bw() + scale_x_continuous(breaks = seq(0,1900,100));
ggplot(CO2_log, aes(x = t, y = Delta_30s_iCO2)) + geom_point() +
geom_line(color = "black",size = 0.5) + scale_y_continuous(limits = c(-20,5)) +
ylab("d13C") + theme_bw() + scale_x_continuous(breaks = seq(0,1900,100));
# Evaluating derivative criterion for identifying plateaus
CO2_deriv <- diff(CO2_log$CO2_smooth)/diff(CO2_log$t)
plot(CO2_log$t[-1],CO2_deriv)
deriv_df <- data.frame(x = CO2_log$t[-1], y = CO2_deriv)
ggplot(deriv_df, aes(x=x,y=y)) + geom_point() + scale_y_continuous(limits = c(-1,1))
## Suggests that tolerances should be |dCO2/dt| around 0.25
CO2_log$CO2_deriv <- c(0,CO2_deriv);
CO2_log_plateaus <- CO2_log %>% filter(CO2_deriv < 0.5 & CO2_deriv > - 0.5, X12CO2 > 200)
ggplot(CO2_log_plateaus, aes(x = t)) + ylab("CO2 (ppm)") +
theme_bw() + geom_point(aes(y = CO2_smooth)) +
geom_point(aes(x = t, y = -50*Delta_Raw_iCO2), color = "red") +
scale_y_continuous(limits = c(0,1000))
## BADOOM!
# Now need to separate syringes
syringe <- rep(0, length(CO2_log_plateaus$t));
syringe[1] <- 1
for(i in 2:length(syringe)){
if(CO2_log_plateaus$t[i] <= CO2_log_plateaus$t[i-1]+30){
syringe[i] <- syringe[i-1];
} else{
syringe[i] <- syringe[i-1] + 1;
}
if(syringe[i]>4) warning('Syringe[i] > 4!')
}
# BADOOM!
CO2_log_plateaus$syringe <- syringe
keeling_data <- CO2_log_plateaus %>% group_by(syringe) %>% summarize(mCO2 = mean(X12CO2,na.rm = T),
md13C = mean(Delta_Raw_iCO2,na.rm = T)) %>%
mutate(inv_CO2 = 1/mCO2);
plot(keeling_data$inv_CO2, keeling_data$md13C)
collar_Reco_d13C <- lm(md13C ~ inv_CO2, keeling_data);
summary(collar_Reco_d13C)
# Intervals 125-250, 375-480, 625-710, 875-950
library(dplyr)
CO2_log %>% filter(t > 800 & t < 900) %>% summarise(meand13C = mean(Delta_Raw_iCO2),
meanCO2 = mean(X12CO2))
### E.g. manual extraction of Rhizoma peanut
peanut_delta_p2 <- c(-8.935, -8.2126, -7.22, -7.114);
peanut_CO2_p2 <- c(354,396,469,492);
peanut_CO2_p2_inv <- 1/peanut_CO2_p2;
peanut_source_delta_p2 <- lm(peanut_delta_p2 ~ peanut_CO2_p2_inv)
summary(peanut_source_delta_p2); # Intercept is -2.25 [0.2475]
# Background delta
delta_back <- function(delta_a, c_a, delta_s = -2.25, c_b = 354){
delta_b = (delta_a - delta_s)*(c_a/c_b) + delta_s;
return(delta_b);
}
delta_back(peanut_delta_p2, peanut_CO2_p2); # -8.9
peanut_delta_p1 <- c(-2.286, -1.10, -0.21, 0.27);
peanut_CO2_p1 <- c(423,442,496,519);
peanut_CO2_p1_inv <- 1/peanut_CO2_p2;
peanut_source_delta_p1 <- lm(peanut_delta_p1 ~ peanut_CO2_p1_inv)
summary(peanut_source_delta_p1); # Intercept is 6.41 [0.65]
delta_back(peanut_delta_p1, peanut_CO2_p1, 6.41, 423); # -1.54
# Field Logs
setwd("C:/Users/Chris Wilson/Desktop/BRU Respiration Logs/Field_Logs/03092018")
setwd("C:/Users/Chris Wilson/Desktop/BRU Respiration Logs/Field_Logs/03152018")
list.files()
field_CO2 <- read.table("p5_a1.txt", header = TRUE)
str(field_CO2);
field_CO2$sec <- seq(1,1530,1);
?apply
filenames2 <- list.files(pattern = "*.txt")
flux_df_list <- lapply(filenames2, read.table, header = T)
# OK, I have a list, but it is unnamed.
names(flux_df_list) <- substr(filenames2,1,5)
flux_est <- rep(0,length(names(flux_df_list)));
flux_se <- rep(0,length(flux_est));
for(i in 1:length(flux_est)){
field_CO2 <- flux_df_list[[i]];
field_CO2$sec <- seq(1,length(field_CO2$Time.H.M.S.),1);
field_CO2$CO2_smooth <- smooth.spline(field_CO2$sec, field_CO2$CO2.ppm.)[[2]]
plot_trace <- ggplot(field_CO2, aes(x=sec, y = CO2_smooth)) + geom_line()
print(plot_trace)
ambient <- 400
flux_df <- field_CO2 %>% filter(CO2.ppm. > ambient - 15, CO2.ppm. < ambient + 15,
sec > 100) # will need to figure out better time filter
flux_deriv <- diff(flux_df$CO2.ppm.)/diff(flux_df$sec)
plot(flux_df$sec[-1],flux_deriv)
flux_df$flux_deriv <- c(0,flux_deriv)
flux_df2 <- flux_df %>% filter(flux_deriv > -2.5, flux_deriv < 2.5)
measure <- rep(0, length(flux_df2$flux_deriv));
measure[1] <- 1;
for(i in 2:length(measure)){
if(flux_df2$sec[i] <= flux_df2$sec[i-1]+20){
measure[i] <- measure[i-1];
} else{
measure[i] <- measure[i-1] + 1;
}
if(measure[i]>3) warning('measure[i] > 3!', i)
}
# BADOOM!
flux_df2$measure <- measure
library(broom); library(dplyr); library(ggplot2)
ggplot(flux_df2, aes(x=sec,y=flux_deriv, color = measure)) + geom_point()
# summarizing differentials
fluxes_df <- flux_df2 %>% group_by(measure) %>% summarize(m_flux = mean(flux_deriv,na.rm = T),
se_flux = sd(flux_deriv,na.rm = T))
# fitting lines
fluxes_lm <- flux_df2 %>% group_by(measure) %>% do(fitSlope = lm(CO2.ppm. ~ sec, data = .))
lm_coefs <- tidy(fluxes_lm, fitSlope)
print(lm_coefs)
plot_fit <- ggplot() + geom_point(data = flux_df2, aes(x = sec, y = CO2.ppm.,color = measure)) +
geom_abline(aes(intercept = lm_coefs$estimate[1], slope = lm_coefs$estimate[2]), color = "red") +
geom_abline(aes(intercept = lm_coefs$estimate[3], slope = lm_coefs$estimate[4]), color = "red") +
geom_abline(aes(intercept = lm_coefs$estimate[5], slope = lm_coefs$estimate[6]), color = "red")
print(plot_fit)
flux_est[i] <- coef(summary(flux_model))[2,1]
flux_se[i] <- coef(summary(flux_model))[2,2]
}
### Playing around with smoothers
spl <- smooth.spline(CO2_log$t[-893], y=CO2_log$Delta_Raw_iCO2[-893])
pred <- predict(spl)
plot (CO2_log$t, CO2_log$Delta_Raw_iCO2, log="xy")
lines(pred, col=2)
ycs.prime <- diff(CO2_log$X12CO2[-893])/diff(CO2_log$t[-893])
pred.prime <- predict(spl, deriv=1)
C_pore <- 450
C_atm <- rep(0,3*10^2)
C_atm[1] <- 350
k1 <- 0.03
k2 <- 1
for(i in 2:length(C_atm)){
C_atm[i] <- C_atm[i-1] + k1*(C_pore - C_atm[i-1]) + k2;
}
plot(C_atm)
time <- seq(1,length(C_atm),1)
which(C_atm > 450)
c_diff <- diff(C_atm)/diff(time)
plot(c_diff)
y <- rnorm(5,0,1)
param_proposal <- seq(-5,5,0.1)
log_like <- rep(0,length(param_proposal))
for(i in 1:length(param_proposal)){
log_like[i] <- -sum(log(dnorm(y,param_proposal[i],1)))
}
plot(param_proposal,exp(-log_like))
sum(exp(-log_like))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/execution.R
\name{executeDqChecks}
\alias{executeDqChecks}
\title{Execute DQ checks}
\usage{
executeDqChecks(
connectionDetails,
cdmDatabaseSchema,
resultsDatabaseSchema,
vocabDatabaseSchema = cdmDatabaseSchema,
cdmSourceName,
numThreads = 1,
sqlOnly = FALSE,
outputFolder = "output",
verboseMode = FALSE,
writeToTable = TRUE,
writeTableName = "dqdashboard_results",
checkLevels = c("TABLE", "FIELD", "CONCEPT"),
checkNames = c(),
cohortDefinitionId = c(),
cohortDatabaseSchema = resultsDatabaseSchema,
tablesToExclude = c(),
cdmVersion = "5.3.1",
tableCheckThresholdLoc = "default",
fieldCheckThresholdLoc = "default",
conceptCheckThresholdLoc = "default"
)
}
\arguments{
\item{connectionDetails}{A connectionDetails object for connecting to the CDM database}
\item{cdmDatabaseSchema}{The fully qualified database name of the CDM schema}
\item{resultsDatabaseSchema}{The fully qualified database name of the results schema}
\item{vocabDatabaseSchema}{The fully qualified database name of the vocabulary schema (default is to set it as the cdmDatabaseSchema)}
\item{cdmSourceName}{The name of the CDM data source}
\item{numThreads}{The number of concurrent threads to use to execute the queries}
\item{sqlOnly}{Should the SQLs be executed (FALSE) or just returned (TRUE)?}
\item{outputFolder}{The folder to output logs and SQL files to}
\item{verboseMode}{Boolean to determine if the console will show all execution steps. Default = FALSE}
\item{writeToTable}{Boolean to indicate if the check results will be written to the dqdashboard_results table
in the resultsDatabaseSchema. Default is TRUE.}
\item{checkLevels}{Choose which DQ check levels to execute. Default is all 3 (TABLE, FIELD, CONCEPT)}
\item{checkNames}{(OPTIONAL) Choose which check names to execute. Names can be found in inst/csv/OMOP_CDM_v[cdmVersion]_Check_Desciptions.csv}
\item{cohortDefinitionId}{The cohort definition id for the cohort you wish to run the DQD on. The package assumes a standard OHDSI cohort table called 'Cohort'
with the fields cohort_definition_id and subject_id.}
\item{cohortDatabaseSchema}{The schema where the cohort table is located.}
\item{tablesToExclude}{(OPTIONAL) Choose which CDM tables to exclude from the execution.}
\item{cdmVersion}{The CDM version to target for the data source. By default, 5.3.1 is used.}
\item{tableCheckThresholdLoc}{The location of the threshold file for evaluating the table checks. If not specified the default thresholds will be applied.}
\item{fieldCheckThresholdLoc}{The location of the threshold file for evaluating the field checks. If not specified the default thresholds will be applied.}
\item{conceptCheckThresholdLoc}{The location of the threshold file for evaluating the concept checks. If not specified the default thresholds will be applied.}
}
\value{
If sqlOnly = FALSE, a list object of results
}
\description{
Execute DQ checks
}
|
/man/executeDqChecks.Rd
|
permissive
|
MaximMoinat/DataQualityDashboard
|
R
| false
| true
| 3,015
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/execution.R
\name{executeDqChecks}
\alias{executeDqChecks}
\title{Execute DQ checks}
\usage{
executeDqChecks(
connectionDetails,
cdmDatabaseSchema,
resultsDatabaseSchema,
vocabDatabaseSchema = cdmDatabaseSchema,
cdmSourceName,
numThreads = 1,
sqlOnly = FALSE,
outputFolder = "output",
verboseMode = FALSE,
writeToTable = TRUE,
writeTableName = "dqdashboard_results",
checkLevels = c("TABLE", "FIELD", "CONCEPT"),
checkNames = c(),
cohortDefinitionId = c(),
cohortDatabaseSchema = resultsDatabaseSchema,
tablesToExclude = c(),
cdmVersion = "5.3.1",
tableCheckThresholdLoc = "default",
fieldCheckThresholdLoc = "default",
conceptCheckThresholdLoc = "default"
)
}
\arguments{
\item{connectionDetails}{A connectionDetails object for connecting to the CDM database}
\item{cdmDatabaseSchema}{The fully qualified database name of the CDM schema}
\item{resultsDatabaseSchema}{The fully qualified database name of the results schema}
\item{vocabDatabaseSchema}{The fully qualified database name of the vocabulary schema (default is to set it as the cdmDatabaseSchema)}
\item{cdmSourceName}{The name of the CDM data source}
\item{numThreads}{The number of concurrent threads to use to execute the queries}
\item{sqlOnly}{Should the SQLs be executed (FALSE) or just returned (TRUE)?}
\item{outputFolder}{The folder to output logs and SQL files to}
\item{verboseMode}{Boolean to determine if the console will show all execution steps. Default = FALSE}
\item{writeToTable}{Boolean to indicate if the check results will be written to the dqdashboard_results table
in the resultsDatabaseSchema. Default is TRUE.}
\item{checkLevels}{Choose which DQ check levels to execute. Default is all 3 (TABLE, FIELD, CONCEPT)}
\item{checkNames}{(OPTIONAL) Choose which check names to execute. Names can be found in inst/csv/OMOP_CDM_v[cdmVersion]_Check_Desciptions.csv}
\item{cohortDefinitionId}{The cohort definition id for the cohort you wish to run the DQD on. The package assumes a standard OHDSI cohort table called 'Cohort'
with the fields cohort_definition_id and subject_id.}
\item{cohortDatabaseSchema}{The schema where the cohort table is located.}
\item{tablesToExclude}{(OPTIONAL) Choose which CDM tables to exclude from the execution.}
\item{cdmVersion}{The CDM version to target for the data source. By default, 5.3.1 is used.}
\item{tableCheckThresholdLoc}{The location of the threshold file for evaluating the table checks. If not specified the default thresholds will be applied.}
\item{fieldCheckThresholdLoc}{The location of the threshold file for evaluating the field checks. If not specified the default thresholds will be applied.}
\item{conceptCheckThresholdLoc}{The location of the threshold file for evaluating the concept checks. If not specified the default thresholds will be applied.}
}
\value{
If sqlOnly = FALSE, a list object of results
}
\description{
Execute DQ checks
}
|
test_that("Landscape export function", {
nItems <- 1000L
landsize <- 50.0
nClusters <- 50L
clusterSpread <- 1.0
regen_time <- 100L
land <- get_test_landscape(
nItems = nItems,
landsize = landsize,
nClusters = nClusters,
clusterSpread = clusterSpread,
regen_time = regen_time
)
expect_s3_class(
land, "data.frame"
)
expect_identical(
colnames(land), c("x", "y", "tAvail")
)
expect_equal(
nrow(land), nItems
)
expect_gte(
min(land$x), 0.0
)
expect_lte(
min(land$x), landsize
)
expect_gte(
min(land$y), 0.0
)
expect_lte(
min(land$y), landsize
)
})
|
/tests/testthat/test-export_landscape.R
|
permissive
|
pratikunterwegs/pathomove
|
R
| false
| false
| 640
|
r
|
test_that("Landscape export function", {
nItems <- 1000L
landsize <- 50.0
nClusters <- 50L
clusterSpread <- 1.0
regen_time <- 100L
land <- get_test_landscape(
nItems = nItems,
landsize = landsize,
nClusters = nClusters,
clusterSpread = clusterSpread,
regen_time = regen_time
)
expect_s3_class(
land, "data.frame"
)
expect_identical(
colnames(land), c("x", "y", "tAvail")
)
expect_equal(
nrow(land), nItems
)
expect_gte(
min(land$x), 0.0
)
expect_lte(
min(land$x), landsize
)
expect_gte(
min(land$y), 0.0
)
expect_lte(
min(land$y), landsize
)
})
|
# Visualizing Twitter Text Data about Apply using word clouds
rm(list=ls())
tweets = read.csv("tweets.csv", stringsAsFactors = FALSE)
str(tweets)
summary(tweets)
head(tweets)
# 1) Create a corpus using the Tweet variable
library(tm)
library(SnowballC)
corpus = Corpus(VectorSource(tweets$Tweet))
str(corpus)
summary(corpus)
corpus
corpus[[1]]$content
# 2) Convert the corpus to lowercase
corpus = tm_map(corpus, tolower)
corpus[[1]]
corpus = tm_map(corpus, PlainTextDocument)
# 3) Remove punctuation from the corpus
corpus = tm_map(corpus, removePunctuation)
corpus[[1]]$content
# 4) Remove all English-language stopwords
stopwords("english")[1:10]
length(stopwords("english"))
corpus = tm_map(corpus, removeWords, c("apple", stopwords("english")))
corpus[[1]]$content
# 5) Build a document-term matrix out of the corpus
# matrix of word counts with rows - documents (tweets); columns - words in tweets (terms)
dtm = DocumentTermMatrix(corpus)
dtm
# 6) Convert the document-term matrix to a data frame called allTweets
allTweets = as.data.frame(as.matrix(dtm))
str(allTweets)
head(allTweets)
summary(allTweets)
ncol(allTweets)
# Building a word cloud
install.packages("wordcloud")
library(wordcloud)
?wordcloud
# a vector of the words in our dataset
colnames(allTweets)
# the frequency of each word across all tweets
colSums(allTweets)
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(2, 0.25))
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(2, 0.25), random.order = FALSE)
negativeTweets = subset(allTweets, tweets$Avg <= -1)
wordcloud(colnames(negativeTweets), colSums(negativeTweets), scale = c(4, 0.5))
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(4, 0.5), max.words = 150, rot.per=0.5 ,random.order = FALSE, random.color=TRUE)
install.packages("RColorBrewer")
library("RColorBrewer")
display.brewer.all()
?brewer.pal
brewer.pal(7,"Accent")
brewer.pal(7,"Set2")
brewer.pal(7,"YlOrRd")
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(2, 0.25), colors=brewer.pal(9,"Blues")[c(5:9)])
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(3, 0.5), colors=brewer.pal(9,"Blues")[c(-1:-4)])
|
/word_cloud.R
|
no_license
|
natalya-patrikeeva/analytics-in-R
|
R
| false
| false
| 2,168
|
r
|
# Visualizing Twitter Text Data about Apply using word clouds
rm(list=ls())
tweets = read.csv("tweets.csv", stringsAsFactors = FALSE)
str(tweets)
summary(tweets)
head(tweets)
# 1) Create a corpus using the Tweet variable
library(tm)
library(SnowballC)
corpus = Corpus(VectorSource(tweets$Tweet))
str(corpus)
summary(corpus)
corpus
corpus[[1]]$content
# 2) Convert the corpus to lowercase
corpus = tm_map(corpus, tolower)
corpus[[1]]
corpus = tm_map(corpus, PlainTextDocument)
# 3) Remove punctuation from the corpus
corpus = tm_map(corpus, removePunctuation)
corpus[[1]]$content
# 4) Remove all English-language stopwords
stopwords("english")[1:10]
length(stopwords("english"))
corpus = tm_map(corpus, removeWords, c("apple", stopwords("english")))
corpus[[1]]$content
# 5) Build a document-term matrix out of the corpus
# matrix of word counts with rows - documents (tweets); columns - words in tweets (terms)
dtm = DocumentTermMatrix(corpus)
dtm
# 6) Convert the document-term matrix to a data frame called allTweets
allTweets = as.data.frame(as.matrix(dtm))
str(allTweets)
head(allTweets)
summary(allTweets)
ncol(allTweets)
# Building a word cloud
install.packages("wordcloud")
library(wordcloud)
?wordcloud
# a vector of the words in our dataset
colnames(allTweets)
# the frequency of each word across all tweets
colSums(allTweets)
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(2, 0.25))
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(2, 0.25), random.order = FALSE)
negativeTweets = subset(allTweets, tweets$Avg <= -1)
wordcloud(colnames(negativeTweets), colSums(negativeTweets), scale = c(4, 0.5))
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(4, 0.5), max.words = 150, rot.per=0.5 ,random.order = FALSE, random.color=TRUE)
install.packages("RColorBrewer")
library("RColorBrewer")
display.brewer.all()
?brewer.pal
brewer.pal(7,"Accent")
brewer.pal(7,"Set2")
brewer.pal(7,"YlOrRd")
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(2, 0.25), colors=brewer.pal(9,"Blues")[c(5:9)])
wordcloud(colnames(allTweets), colSums(allTweets), scale = c(3, 0.5), colors=brewer.pal(9,"Blues")[c(-1:-4)])
|
\name{reparam_LapDem_output}
\alias{reparam_LapDem_output}
\title{Re-parameterize LaplacesDemon MCMC output}
\usage{
reparam_LapDem_output(Fit, MyData,
transfun = transform_with_logistic)
}
\arguments{
\item{Fit}{The \code{\link[LaplacesDemon]{LaplacesDemon}}
output object.}
\item{MyData}{The
\code{\link[LaplacesDemon]{LaplacesDemon}} input data.}
\item{transfun}{The function to use for the
transformation.}
}
\value{
\code{Fit} The transformed MCMC output.
}
\description{
\code{\link[LaplacesDemon]{LaplacesDemon}} likes to run
its MCMC sampling on a simple number line. Thus, the
likelihood function etc. should transform the numbers
into the range desired, e.g. 0-1.
}
\details{
This function transforms the
\code{\link[LaplacesDemon]{LaplacesDemon}} output
}
\note{
Go BEARS!
}
\examples{
test=1
}
\author{
Nicholas J. Matzke \email{matzke@berkeley.edu}
}
\references{
\url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
LaplacesDemon_Tutorial
Matzke_2012_IBS
}
\seealso{
\code{\link[LaplacesDemon]{LaplacesDemon}}
}
|
/man/reparam_LapDem_output.Rd
|
no_license
|
pedroreys/BioGeoBEARS
|
R
| false
| false
| 1,108
|
rd
|
\name{reparam_LapDem_output}
\alias{reparam_LapDem_output}
\title{Re-parameterize LaplacesDemon MCMC output}
\usage{
reparam_LapDem_output(Fit, MyData,
transfun = transform_with_logistic)
}
\arguments{
\item{Fit}{The \code{\link[LaplacesDemon]{LaplacesDemon}}
output object.}
\item{MyData}{The
\code{\link[LaplacesDemon]{LaplacesDemon}} input data.}
\item{transfun}{The function to use for the
transformation.}
}
\value{
\code{Fit} The transformed MCMC output.
}
\description{
\code{\link[LaplacesDemon]{LaplacesDemon}} likes to run
its MCMC sampling on a simple number line. Thus, the
likelihood function etc. should transform the numbers
into the range desired, e.g. 0-1.
}
\details{
This function transforms the
\code{\link[LaplacesDemon]{LaplacesDemon}} output
}
\note{
Go BEARS!
}
\examples{
test=1
}
\author{
Nicholas J. Matzke \email{matzke@berkeley.edu}
}
\references{
\url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
LaplacesDemon_Tutorial
Matzke_2012_IBS
}
\seealso{
\code{\link[LaplacesDemon]{LaplacesDemon}}
}
|
# R script for plot4
png(filename="plot4.png", width=480, height=480)
data <- read.table("household_power_consumption.txt",
sep=";", header=TRUE, na.strings="?")
par(mfrow=c(2, 2))
with(subset(data, Date == "1/2/2007" | Date == "2/2/2007"),
{
# plot1
plot(seq_along(Time), Global_active_power, type="l", xaxt="n",
xlab="", ylab="Global Active Power")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
# plot2
plot(seq_along(Time), Voltage, type="l", xaxt="n",
xlab="datetime", ylab="Voltage")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
# plot3
plot(seq_along(Time), Sub_metering_1, type="l", xaxt="n",
xlab="", ylab="Energy sub metering")
lines(seq_along(Time), Sub_metering_2, col="red")
lines(seq_along(Time), Sub_metering_3, col="blue")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
legend("topright", col=c("black", "red", "blue"), lty=1,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot4
plot(seq_along(Time), Global_reactive_power, type="l", xaxt="n",
xlab="datetime", ylab="Global_reactive_power")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
})
dev.off()
|
/plot4.R
|
no_license
|
cinnemonman/ExData_Plotting1
|
R
| false
| false
| 1,253
|
r
|
# R script for plot4
png(filename="plot4.png", width=480, height=480)
data <- read.table("household_power_consumption.txt",
sep=";", header=TRUE, na.strings="?")
par(mfrow=c(2, 2))
with(subset(data, Date == "1/2/2007" | Date == "2/2/2007"),
{
# plot1
plot(seq_along(Time), Global_active_power, type="l", xaxt="n",
xlab="", ylab="Global Active Power")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
# plot2
plot(seq_along(Time), Voltage, type="l", xaxt="n",
xlab="datetime", ylab="Voltage")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
# plot3
plot(seq_along(Time), Sub_metering_1, type="l", xaxt="n",
xlab="", ylab="Energy sub metering")
lines(seq_along(Time), Sub_metering_2, col="red")
lines(seq_along(Time), Sub_metering_3, col="blue")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
legend("topright", col=c("black", "red", "blue"), lty=1,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# plot4
plot(seq_along(Time), Global_reactive_power, type="l", xaxt="n",
xlab="datetime", ylab="Global_reactive_power")
axis(1, at=c(0, 24*60, 2*24*60), labels=c("Thu", "Fri", "Sat"))
})
dev.off()
|
function (x = matrix())
{
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
|
/cachematrix.R
|
no_license
|
TSS-DC/ProgrammingAssignment2
|
R
| false
| false
| 323
|
r
|
function (x = matrix())
{
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_count_vectorizer.R
\name{ft_count_vectorizer}
\alias{ft_count_vectorizer}
\alias{ml_vocabulary}
\title{Feature Transformation -- CountVectorizer (Estimator)}
\usage{
ft_count_vectorizer(x, input_col = NULL, output_col = NULL,
binary = FALSE, min_df = 1, min_tf = 1, vocab_size = 2^18,
uid = random_string("count_vectorizer_"), ...)
ml_vocabulary(model)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{binary}{Binary toggle to control the output vector values.
If \code{TRUE}, all nonzero counts (after \code{min_tf} filter applied)
are set to 1. This is useful for discrete probabilistic models that
model binary events rather than integer counts. Default: \code{FALSE}}
\item{min_df}{Specifies the minimum number of different documents a
term must appear in to be included in the vocabulary. If this is an
integer greater than or equal to 1, this specifies the number of
documents the term must appear in; if this is a double in [0,1), then
this specifies the fraction of documents. Default: 1.}
\item{min_tf}{Filter to ignore rare words in a document. For each
document, terms with frequency/count less than the given threshold
are ignored. If this is an integer greater than or equal to 1, then
this specifies a count (of times the term must appear in the document);
if this is a double in [0,1), then this specifies a fraction (out of
the document's token count). Default: 1.}
\item{vocab_size}{Build a vocabulary that only considers the top
\code{vocab_size} terms ordered by term frequency across the corpus.
Default: \code{2^18}.}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
\item{model}{A \code{ml_count_vectorizer_model}.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
\code{ml_vocabulary()} returns a vector of vocabulary built.
}
\description{
Extracts a vocabulary from document collections.
}
\details{
In the case where \code{x} is a \code{tbl_spark}, the estimator fits against \code{x}
to obtain a transformer, which is then immediately used to transform \code{x}, returning a \code{tbl_spark}.
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers: \code{\link{ft_binarizer}},
\code{\link{ft_bucketizer}},
\code{\link{ft_chisq_selector}}, \code{\link{ft_dct}},
\code{\link{ft_elementwise_product}},
\code{\link{ft_feature_hasher}},
\code{\link{ft_hashing_tf}}, \code{\link{ft_idf}},
\code{\link{ft_imputer}},
\code{\link{ft_index_to_string}},
\code{\link{ft_interaction}}, \code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}},
\code{\link{ft_min_max_scaler}}, \code{\link{ft_ngram}},
\code{\link{ft_normalizer}},
\code{\link{ft_one_hot_encoder}}, \code{\link{ft_pca}},
\code{\link{ft_polynomial_expansion}},
\code{\link{ft_quantile_discretizer}},
\code{\link{ft_r_formula}},
\code{\link{ft_regex_tokenizer}},
\code{\link{ft_sql_transformer}},
\code{\link{ft_standard_scaler}},
\code{\link{ft_stop_words_remover}},
\code{\link{ft_string_indexer}},
\code{\link{ft_tokenizer}},
\code{\link{ft_vector_assembler}},
\code{\link{ft_vector_indexer}},
\code{\link{ft_vector_slicer}}, \code{\link{ft_word2vec}}
}
\concept{feature transformers}
|
/man/ft_count_vectorizer.Rd
|
permissive
|
hhg12345/sparklyr
|
R
| false
| true
| 4,293
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_feature_count_vectorizer.R
\name{ft_count_vectorizer}
\alias{ft_count_vectorizer}
\alias{ml_vocabulary}
\title{Feature Transformation -- CountVectorizer (Estimator)}
\usage{
ft_count_vectorizer(x, input_col = NULL, output_col = NULL,
binary = FALSE, min_df = 1, min_tf = 1, vocab_size = 2^18,
uid = random_string("count_vectorizer_"), ...)
ml_vocabulary(model)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{input_col}{The name of the input column.}
\item{output_col}{The name of the output column.}
\item{binary}{Binary toggle to control the output vector values.
If \code{TRUE}, all nonzero counts (after \code{min_tf} filter applied)
are set to 1. This is useful for discrete probabilistic models that
model binary events rather than integer counts. Default: \code{FALSE}}
\item{min_df}{Specifies the minimum number of different documents a
term must appear in to be included in the vocabulary. If this is an
integer greater than or equal to 1, this specifies the number of
documents the term must appear in; if this is a double in [0,1), then
this specifies the fraction of documents. Default: 1.}
\item{min_tf}{Filter to ignore rare words in a document. For each
document, terms with frequency/count less than the given threshold
are ignored. If this is an integer greater than or equal to 1, then
this specifies a count (of times the term must appear in the document);
if this is a double in [0,1), then this specifies a fraction (out of
the document's token count). Default: 1.}
\item{vocab_size}{Build a vocabulary that only considers the top
\code{vocab_size} terms ordered by term frequency across the corpus.
Default: \code{2^18}.}
\item{uid}{A character string used to uniquely identify the feature transformer.}
\item{...}{Optional arguments; currently unused.}
\item{model}{A \code{ml_count_vectorizer_model}.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns a \code{ml_transformer},
a \code{ml_estimator}, or one of their subclasses. The object contains a pointer to
a Spark \code{Transformer} or \code{Estimator} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the transformer or estimator appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a transformer is constructed then
immediately applied to the input \code{tbl_spark}, returning a \code{tbl_spark}
}
\code{ml_vocabulary()} returns a vector of vocabulary built.
}
\description{
Extracts a vocabulary from document collections.
}
\details{
In the case where \code{x} is a \code{tbl_spark}, the estimator fits against \code{x}
to obtain a transformer, which is then immediately used to transform \code{x}, returning a \code{tbl_spark}.
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-features.html} for
more information on the set of transformations available for DataFrame
columns in Spark.
Other feature transformers: \code{\link{ft_binarizer}},
\code{\link{ft_bucketizer}},
\code{\link{ft_chisq_selector}}, \code{\link{ft_dct}},
\code{\link{ft_elementwise_product}},
\code{\link{ft_feature_hasher}},
\code{\link{ft_hashing_tf}}, \code{\link{ft_idf}},
\code{\link{ft_imputer}},
\code{\link{ft_index_to_string}},
\code{\link{ft_interaction}}, \code{\link{ft_lsh}},
\code{\link{ft_max_abs_scaler}},
\code{\link{ft_min_max_scaler}}, \code{\link{ft_ngram}},
\code{\link{ft_normalizer}},
\code{\link{ft_one_hot_encoder}}, \code{\link{ft_pca}},
\code{\link{ft_polynomial_expansion}},
\code{\link{ft_quantile_discretizer}},
\code{\link{ft_r_formula}},
\code{\link{ft_regex_tokenizer}},
\code{\link{ft_sql_transformer}},
\code{\link{ft_standard_scaler}},
\code{\link{ft_stop_words_remover}},
\code{\link{ft_string_indexer}},
\code{\link{ft_tokenizer}},
\code{\link{ft_vector_assembler}},
\code{\link{ft_vector_indexer}},
\code{\link{ft_vector_slicer}}, \code{\link{ft_word2vec}}
}
\concept{feature transformers}
|
#CUMULATIVE WEIBULL DISTRIBUTION with 3 parameters
model <- list(
name=c("Cumulative Weibull 3 par."),
formula=expression(S == d(1 - exp(-c*A^z)) ),
exp=expression(d*(1 - exp(-c*A^z)) ),
shape="sigmoid",
asymp=function(pars)pars["d"],
parLim = c("Rplus","Rplus","Rplus"),
custStart=function(data)c(10,.01,max(data$S)),
init=function(data){
data = data[data$S!=0,]
#c calculation (asymptote)
d=max(data$S)+max(data$S)/4
#z calculation
Z=log(-log((d-data$S)/d)) # = log z + flogX
Z[][Z == Inf]=NA
c=exp(min(Z))
dat=data.frame("A"=log(data$A),"S"=Z)
c=exp(stats::lm(S~A,dat)$coefficients[[1]])
#f calculation
z=stats::lm(S~A,dat)$coefficients[[2]]
c(d,c,z)
}
)
|
/inst/non_lin_models/Mod_weibull3.R
|
no_license
|
Bhanditz/sars
|
R
| false
| false
| 748
|
r
|
#CUMULATIVE WEIBULL DISTRIBUTION with 3 parameters
model <- list(
name=c("Cumulative Weibull 3 par."),
formula=expression(S == d(1 - exp(-c*A^z)) ),
exp=expression(d*(1 - exp(-c*A^z)) ),
shape="sigmoid",
asymp=function(pars)pars["d"],
parLim = c("Rplus","Rplus","Rplus"),
custStart=function(data)c(10,.01,max(data$S)),
init=function(data){
data = data[data$S!=0,]
#c calculation (asymptote)
d=max(data$S)+max(data$S)/4
#z calculation
Z=log(-log((d-data$S)/d)) # = log z + flogX
Z[][Z == Inf]=NA
c=exp(min(Z))
dat=data.frame("A"=log(data$A),"S"=Z)
c=exp(stats::lm(S~A,dat)$coefficients[[1]])
#f calculation
z=stats::lm(S~A,dat)$coefficients[[2]]
c(d,c,z)
}
)
|
library(ggplot2)
library(ggmap)
library(maps)
library(mapdata)
library(mapproj)
load('output/base_data.Rda')
state_xwalk <- data.frame(cbind(state.abb, state.name))
colnames(state_xwalk) <- c('state_code', 'region')
state_xwalk$region <-tolower(state_xwalk$region)
base_df <- merge(base_df, state_xwalk, by = 'state_code')
rank_df <- base_df %>% filter(!gennme %in% 'other') %>%
group_by(region, gennme) %>%
summarise(nrx = sum(nrx)) %>%
mutate(rank = dense_rank(desc(nrx)))
rank_df <- rank_df %>% select(region, rank, gennme) %>%
filter(rank == 1)
rank_df$gennme[!rank_df$gennme %like% 'Hydrocodone-Acetaminophen' & !rank_df$gennme %like% 'Oxycodone-Acetaminophen'] <- 'Other'
rank_df$gennme <- as.factor(rank_df$gennme)
rank_df$gennme <- factor(rank_df$gennme, levels = c("Hydrocodone-Acetaminophen (Vicodin)",
"Oxycodone-Acetaminophen (Percocet)",
"Other"))
map <- map_data("state")
theme_opts <- list(theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position="bottom",
plot.title = element_text(size=14, hjust = 0.5)))
rank_map_plot <- ggplot(data = rank_df) +
geom_map(data = rank_df, map = map, aes(map_id = region, fill = gennme), alpha = 0.5) +
coord_equal() +
theme_opts +
theme(legend.title = element_blank()) +
scale_fill_manual(values = c('#3386FF', '#DC4A4A', '#D6D6D6')) +
geom_polygon(data = map, aes(x=long, y = lat, group = group), fill = NA, colour="white", size=0.25) +
guides(fill = guide_legend(ncol = 1)) +
labs(title = 'Top Ranked Opioid in Each State')
|
/opioids_SDUD/rank_maps.R
|
no_license
|
davidby332/project_opioids
|
R
| false
| false
| 2,320
|
r
|
library(ggplot2)
library(ggmap)
library(maps)
library(mapdata)
library(mapproj)
load('output/base_data.Rda')
state_xwalk <- data.frame(cbind(state.abb, state.name))
colnames(state_xwalk) <- c('state_code', 'region')
state_xwalk$region <-tolower(state_xwalk$region)
base_df <- merge(base_df, state_xwalk, by = 'state_code')
rank_df <- base_df %>% filter(!gennme %in% 'other') %>%
group_by(region, gennme) %>%
summarise(nrx = sum(nrx)) %>%
mutate(rank = dense_rank(desc(nrx)))
rank_df <- rank_df %>% select(region, rank, gennme) %>%
filter(rank == 1)
rank_df$gennme[!rank_df$gennme %like% 'Hydrocodone-Acetaminophen' & !rank_df$gennme %like% 'Oxycodone-Acetaminophen'] <- 'Other'
rank_df$gennme <- as.factor(rank_df$gennme)
rank_df$gennme <- factor(rank_df$gennme, levels = c("Hydrocodone-Acetaminophen (Vicodin)",
"Oxycodone-Acetaminophen (Percocet)",
"Other"))
map <- map_data("state")
theme_opts <- list(theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.position="bottom",
plot.title = element_text(size=14, hjust = 0.5)))
rank_map_plot <- ggplot(data = rank_df) +
geom_map(data = rank_df, map = map, aes(map_id = region, fill = gennme), alpha = 0.5) +
coord_equal() +
theme_opts +
theme(legend.title = element_blank()) +
scale_fill_manual(values = c('#3386FF', '#DC4A4A', '#D6D6D6')) +
geom_polygon(data = map, aes(x=long, y = lat, group = group), fill = NA, colour="white", size=0.25) +
guides(fill = guide_legend(ncol = 1)) +
labs(title = 'Top Ranked Opioid in Each State')
|
library(circular)
### Name: intersect.modal.region
### Title: Intersection between model region and a given interval.
### Aliases: intersect.modal.region intersect.modal.region.default
### intersect.modal.region.circular
### ** Examples
x <- rvonmises(100, circular(pi), 10)
res <- intersect.modal.region(x, breaks=circular(matrix(c(pi,pi+pi/12,
pi-pi/12, pi), ncol=2, byrow=TRUE)), bw=50)
res$tot
x <- rvonmises(100, circular(0), 10)
res <- intersect.modal.region(x, breaks=circular(matrix(c(pi,pi+pi/12),
ncol=2)), bw=50)
res$tot
res <- intersect.modal.region(x, breaks=circular(matrix(c(pi/12,
2*pi-pi/12), ncol=2, byrow=TRUE)), bw=50)
res$tot
|
/data/genthat_extracted_code/circular/examples/intersect.modal.region.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 690
|
r
|
library(circular)
### Name: intersect.modal.region
### Title: Intersection between model region and a given interval.
### Aliases: intersect.modal.region intersect.modal.region.default
### intersect.modal.region.circular
### ** Examples
x <- rvonmises(100, circular(pi), 10)
res <- intersect.modal.region(x, breaks=circular(matrix(c(pi,pi+pi/12,
pi-pi/12, pi), ncol=2, byrow=TRUE)), bw=50)
res$tot
x <- rvonmises(100, circular(0), 10)
res <- intersect.modal.region(x, breaks=circular(matrix(c(pi,pi+pi/12),
ncol=2)), bw=50)
res$tot
res <- intersect.modal.region(x, breaks=circular(matrix(c(pi/12,
2*pi-pi/12), ncol=2, byrow=TRUE)), bw=50)
res$tot
|
#Read in the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$fips<-as.factor(NEI$fips)
NEI$SCC<-as.factor(NEI$SCC)
NEI$Pollutant<-as.factor(NEI$Pollutant)
NEI$year<-as.factor(NEI$year)
#==================================Question 5===================================
#How have emissions from motor vehicle sources changed from 1999–2008 in
#Baltimore City?
#I assumed that On-road sources would cover motor vehicles
totals.vehicle<-tapply(NEI$Emissions[NEI$type == "ON-ROAD" & NEI$fips == 24510],
NEI$year[NEI$type == "ON-ROAD" & NEI$fips == 24510], sum)
png(filename="plot5.png")
barplot(totals.vehicle, main="Total PM2.5 Emission From Motor Vehicle Sources in Baltimore", xlab="Year",
ylab="Tons of PM2.5", col="steelblue")
dev.off()
|
/plot5.R
|
no_license
|
Sarahfogel/ExploratoryDataAnal-CP2
|
R
| false
| false
| 825
|
r
|
#Read in the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$fips<-as.factor(NEI$fips)
NEI$SCC<-as.factor(NEI$SCC)
NEI$Pollutant<-as.factor(NEI$Pollutant)
NEI$year<-as.factor(NEI$year)
#==================================Question 5===================================
#How have emissions from motor vehicle sources changed from 1999–2008 in
#Baltimore City?
#I assumed that On-road sources would cover motor vehicles
totals.vehicle<-tapply(NEI$Emissions[NEI$type == "ON-ROAD" & NEI$fips == 24510],
NEI$year[NEI$type == "ON-ROAD" & NEI$fips == 24510], sum)
png(filename="plot5.png")
barplot(totals.vehicle, main="Total PM2.5 Emission From Motor Vehicle Sources in Baltimore", xlab="Year",
ylab="Tons of PM2.5", col="steelblue")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_LSIR.R
\name{do.lsir}
\alias{do.lsir}
\title{Localized Sliced Inverse Regression}
\usage{
do.lsir(
X,
response,
ndim = 2,
h = max(2, round(nrow(X)/5)),
preprocess = c("center", "scale", "cscale", "decorrelate", "whiten"),
ycenter = FALSE,
numk = max(2, round(nrow(X)/10)),
tau = 1
)
}
\arguments{
\item{X}{an \eqn{(n\times p)} matrix or data frame whose rows are observations
and columns represent independent variables.}
\item{response}{a length-\eqn{n} vector of response variable.}
\item{ndim}{an integer-valued target dimension.}
\item{h}{the number of slices to divide the range of response vector.}
\item{preprocess}{an additional option for preprocessing the data.
Default is "center". See also \code{\link{aux.preprocess}} for more details.}
\item{ycenter}{a logical; \code{TRUE} to center the response variable, \code{FALSE} otherwise.}
\item{numk}{size of determining neighborhood via \eqn{k}-nearest neighbor selection.}
\item{tau}{regularization parameter for adjusting rank-deficient scatter matrix.}
}
\value{
a named list containing
\describe{
\item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
\item{trfinfo}{a list containing information for out-of-sample prediction.}
\item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
}
}
\description{
Localized SIR (SIR) is an extension of celebrated SIR method. As its name suggests,
the \emph{locality} concept is brought in that for each slice, only local data points
are considered in order to discover intrinsic structure of the data.
}
\examples{
\donttest{
## generate swiss roll with auxiliary dimensions
## it follows reference example from LSIR paper.
n = 123
theta = runif(n)
h = runif(n)
t = (1+2*theta)*(3*pi/2)
X = array(0,c(n,10))
X[,1] = t*cos(t)
X[,2] = 21*h
X[,3] = t*sin(t)
X[,4:10] = matrix(runif(7*n), nrow=n)
## corresponding response vector
y = sin(5*pi*theta)+(runif(n)*sqrt(0.1))
## try different number of neighborhoods
out1 = do.lsir(X, y, numk=5)
out2 = do.lsir(X, y, numk=10)
out3 = do.lsir(X, y, numk=25)
## visualize
opar <- par(no.readonly=TRUE)
par(mfrow=c(1,3))
plot(out1$Y, main="LSIR::nbd size=5")
plot(out2$Y, main="LSIR::nbd size=10")
plot(out3$Y, main="LSIR::nbd size=25")
par(opar)
}
}
\references{
\insertRef{wu_localized_2010}{Rdimtools}
}
\seealso{
\code{\link{do.sir}}
}
\author{
Kisung You
}
|
/man/linear_LSIR.Rd
|
no_license
|
dungcv/Rdimtools
|
R
| false
| true
| 2,474
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_LSIR.R
\name{do.lsir}
\alias{do.lsir}
\title{Localized Sliced Inverse Regression}
\usage{
do.lsir(
X,
response,
ndim = 2,
h = max(2, round(nrow(X)/5)),
preprocess = c("center", "scale", "cscale", "decorrelate", "whiten"),
ycenter = FALSE,
numk = max(2, round(nrow(X)/10)),
tau = 1
)
}
\arguments{
\item{X}{an \eqn{(n\times p)} matrix or data frame whose rows are observations
and columns represent independent variables.}
\item{response}{a length-\eqn{n} vector of response variable.}
\item{ndim}{an integer-valued target dimension.}
\item{h}{the number of slices to divide the range of response vector.}
\item{preprocess}{an additional option for preprocessing the data.
Default is "center". See also \code{\link{aux.preprocess}} for more details.}
\item{ycenter}{a logical; \code{TRUE} to center the response variable, \code{FALSE} otherwise.}
\item{numk}{size of determining neighborhood via \eqn{k}-nearest neighbor selection.}
\item{tau}{regularization parameter for adjusting rank-deficient scatter matrix.}
}
\value{
a named list containing
\describe{
\item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
\item{trfinfo}{a list containing information for out-of-sample prediction.}
\item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
}
}
\description{
Localized SIR (SIR) is an extension of celebrated SIR method. As its name suggests,
the \emph{locality} concept is brought in that for each slice, only local data points
are considered in order to discover intrinsic structure of the data.
}
\examples{
\donttest{
## generate swiss roll with auxiliary dimensions
## it follows reference example from LSIR paper.
n = 123
theta = runif(n)
h = runif(n)
t = (1+2*theta)*(3*pi/2)
X = array(0,c(n,10))
X[,1] = t*cos(t)
X[,2] = 21*h
X[,3] = t*sin(t)
X[,4:10] = matrix(runif(7*n), nrow=n)
## corresponding response vector
y = sin(5*pi*theta)+(runif(n)*sqrt(0.1))
## try different number of neighborhoods
out1 = do.lsir(X, y, numk=5)
out2 = do.lsir(X, y, numk=10)
out3 = do.lsir(X, y, numk=25)
## visualize
opar <- par(no.readonly=TRUE)
par(mfrow=c(1,3))
plot(out1$Y, main="LSIR::nbd size=5")
plot(out2$Y, main="LSIR::nbd size=10")
plot(out3$Y, main="LSIR::nbd size=25")
par(opar)
}
}
\references{
\insertRef{wu_localized_2010}{Rdimtools}
}
\seealso{
\code{\link{do.sir}}
}
\author{
Kisung You
}
|
# R code for input modeling via frequentist model average
# Xi Jiang and Barry L Nelson
# Last update 12/17/2022
# The following distributions are supported: 'normal', 'lognormal', 'beta', 'exponential',
# 'gamma', 'weibull', 'inverse gaussian', 'logistic', 'loglogistic', 'student t', 'uniform',
# 'cauchy', 'pareto', 'rayleigh', 'ED'.
## Example:
## data<-rlnorm(500,meanlog=0,sdlog=0.25)
## Fset<-c('gamma','weibull','normal','ED')
## type<-'P' #by default type<-'Q'
## J<-5 #by default J<-10
## myfit<-fmafit(data,Fset,J,type)
## n<-10000
## sim_data<-rfma(n,myfit)
#' @import stats
#' @import utils
package_install<-function(packages){
# make sure not to install if the packages is already there
# packages = the list of packages need to eb installed to have this package work
# install the required packages only if it's not installed
for (i in 1:length(packages)){
package.need<-packages[i]
if(length(find.package(package.need,quiet=TRUE))==0){
install.packages(packages[i])
}
}
}
# The following packages will be installed if needed
packages<-c("fitdistrplus","actuar","EnvStats","extraDistr","MASS","quadprog")
package_install(packages)
MLE<-function(dist,data){
# Estimate the MLE parameters of a distribution using data
# dist = candidate distribution
# data = vector of data for estimating MLE
# return the MLE parameters, max value of loglikelihood, AIC and BIC
if (dist=='normal'){
fit.norm<-fitdistrplus::fitdist(data,'norm', method='mle')
theta=fit.norm$estimate #c(mean,sd)
ll=fit.norm$loglik
AIC=fit.norm$aic
BIC=fit.norm$bic
} else if (dist=='lognormal'){
fit.lnorm<-fitdistrplus::fitdist(data,'lnorm',method='mle')
theta=fit.lnorm$estimate #c(meanlog,sdlog)
ll=fit.lnorm$loglik
AIC=fit.lnorm$aic
BIC=fit.lnorm$bic
} else if (dist=='beta'){
eps<-.Machine$double.eps #the smallest positive floating-point number x such that 1 + x != 1
data<-(data-min(data))/(max(data)-min(data)) # scale data to [0,1]
data<-eps+(1-2*eps)*(data) # scale again to (0,1)
fit.beta<-fitdistrplus::fitdist(data,'beta',method='mle')
theta=fit.beta$estimate #c(shape1,shape2)
ll=fit.beta$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='exponential'){
fit.exp <- MASS::fitdistr(data,'exponential')
theta=fit.exp$estimate #rate
ll=fit.exp$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='gamma'){
fit.gamma<-fitdistrplus::fitdist(data,'gamma',method='mle')
theta=fit.gamma$estimate #c(shape,rate)
ll=fit.gamma$loglik
AIC=fit.gamma$aic
BIC=fit.gamma$bic
} else if (dist=='weibull'){
fit.weibull <- fitdistrplus::fitdist(data,'weibull',method='mle')
theta=fit.weibull$estimate #c(shape,scale)
ll=fit.weibull$loglik
AIC=fit.weibull$aic
BIC=fit.weibull$bic
} else if (dist=='inverse gaussian'){
MLE_invgauss<-fitdistrplus::fitdist(data, "invgauss", start = list(mean = mean(data), shape = length(data)/sum(1/data-1/mean(data))))
theta=MLE_invgauss$estimate #c(mean,shape)
ll=MLE_invgauss$logLik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='logistic'){
fit.logis <- MASS::fitdistr(data,'logistic')
theta=fit.logis$estimate #c(location,scale)
ll=fit.logis$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='loglogistic'){
MLE_loglogis<-fitdistrplus::fitdist(data, "llogis")
theta=MLE_loglogis$estimate #c(shape,scale)
ll=MLE_loglogis$logLik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='student t'){
fit.t<-fitdistrplus::fitdist(data, "t", method = "mle", start = list(df=1))
theta=fit.t$estimate #df
ll=fit.t$loglik
AIC=fit.t$aic
BIT=fit.t$bic
} else if (dist=='uniform'){
fit.uniform<-fitdistrplus::fitdist(data,'unif',method='mle')
theta=fit.uniform$estimate #c(min,max)
ll=fit.uniform$loglik
AIC=fit.uniform$aic
BIC=fit.uniform$bic
} else if (dist=='cauchy'){
fit.cauchy <- MASS::fitdistr(data,'cauchy')
theta=fit.cauchy$estimate #c(location,scale)
ll=fit.cauchy$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='pareto'){
fit.pareto <- EnvStats::epareto(data,method="mle")
theta=fit.pareto$parameters #c(location,shape)
ll=sum(log(EnvStats::dpareto(data, location = theta[1], shape = theta[2])))
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='rayleigh'){
theta=sqrt(sum(data^2)/2/length(data)) #scale
ll=sum(log(extraDistr::drayleigh(data, sigma=theta)))
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='ED'){
theta='NA'
ll='NA'
AIC='NA'
BIC='NA'
} else{ stop("MLE: Not a supported distribution")}
list(MLE_theta=theta, ll=ll, AIC=AIC, BIC=BIC)
}
CDF<-function(dist,x,theta){
# CDF of a distribution
# dist = some distribution
# x = a value within the support of the distribution
# theta = the parameters of the distribution
# returns F_X(x) = P(X<=x)
switch(dist,
'normal'=pnorm(x,mean=theta[1],sd=theta[2]),
'lognormal'=plnorm(x,meanlog=theta[1],sdlog=theta[2]),
'beta'=pbeta(x,shape1=theta[1],shape2=theta[2]),
'exponential'=pexp(x,rate=theta),
'gamma'=pgamma(x,shape=theta[1],rate=theta[2]),
'weibull'=pweibull(x,shape=theta[1],scale=theta[2]),
'inverse gaussian'=actuar::pinvgauss(x,mean=theta[1],shape=theta[2]),
'student t'=pt(x,df=theta),
'uniform'=punif(x,min=theta[1],max=theta[2]),
'cauchy'=pcauchy(x,location=theta[1],scale=theta[2]),
'pareto'=EnvStats::ppareto(x, location = theta[1], shape = theta[2]),
'rayleigh'=extraDistr::prayleigh(x, sigma = theta),
'logistic'=plogis(x, location = theta[1], scale = theta[2]),
'loglogistic'=actuar::pllogis(x, shape = theta[1], scale = theta[2]),
stop("CDF: Not a supported distribution")
)
}
IT<-function(dist,U,theta){
# Given a vector of unif[0,1]s, generate random variate from distribution using
# inverse transform method
# dist = some distribution
# U = the list of RV~unif[0,1]
# theta = the parameters of the distribution
# return X = F^{-1}(Ugrid), random variates with the specified distribution
switch(dist,
'normal'=qnorm(U,mean=theta[1],sd=theta[2]),
'lognormal'=qlnorm(U,meanlog=theta[1],sdlog=theta[2]),
'beta'=qbeta(U,shape1=theta[1],shape2=theta[2]),
'exponential'=qexp(U,rate=theta),
'gamma'=qgamma(U,shape=theta[1],rate=theta[2]),
'weibull'=qweibull(U,shape=theta[1],scale=theta[2]),
'inverse gaussian'=actuar::qinvgauss(U,mean=theta[1],shape=theta[2]),
'student t'=qt(U,df=theta),
'uniform'=qunif(U,min=theta[1],max=theta[2]),
'cauchy'=qcauchy(U,location=theta[1],scale=theta[2]),
'pareto'=EnvStats::qpareto(U, location = theta[1], shape = theta[2]),
'rayleigh'=extraDistr::qrayleigh(U, sigma = theta),
'logistic'=qlogis(U, location = theta[1], scale = theta[2]),
'loglogistic'=actuar::qllogis(U, shape = theta[1], scale = theta[2]),
stop("IT: Not a supported distribution")
)
}
lappend <- function (lst, ...){
# a function that appends elements to a list
# lst = orginial list
# returns the appended list
lst <- c(lst, list(...))
return(lst)
}
D_matrix<-function(data,J,n,Fset){
# Compute the D_matrix for QP that returns the optimal weight vector
# for probability average fitting
# data = vector of data
# J = number of groups for cross validation
# n = size of the vector of data
# Fset = set of candidate distributions
# returns matrix D for quadratic term in the objective function of QP
xsim_matrix <- matrix(data, nrow=J, byrow=T)
D_matrix<-matrix(0,length(Fset),length(Fset))
for (rep1 in 1:J){
dat1<-as.vector(t(xsim_matrix[-rep1,]))
dat2<-xsim_matrix[rep1,]
ED_CV2<-ecdf(dat2)
MLE_theta<-list()
for (rep2 in 1:length(Fset)){
if (Fset[rep2]!='ED'){
MLE_theta<-lappend(MLE_theta,MLE(Fset[rep2],dat1)$MLE_theta)
} else if (Fset[rep2]=='ED'){
ED_CV1<-ecdf(dat1)
}
}
for (rep3 in 1:(n/J)){
c_vector<-vector('numeric')
for (rep4 in 1:length(Fset)){
if (Fset[rep4]!='ED' & Fset[rep4]!='beta'){
c_vector<-append(c_vector, CDF(Fset[rep4],dat2[rep3],unlist(MLE_theta[rep4]))-ED_CV2(dat2[rep3]))
} else if (Fset[rep4]=='beta'){#normalize the data for beta distribution
eps<-.Machine$double.eps
data_pt<-(dat2[rep3]-min(data))/(max(data)-min(data))
data_pt<-eps+(1-2*eps)*(data_pt)
c_vector<-append(c_vector, CDF(Fset[rep4],data_pt,unlist(MLE_theta[rep4]))-ED_CV2(dat2[rep3]))
} else if (Fset[rep4]=='ED'){
c_vector<-append(c_vector, ED_CV1(dat2[rep3])-ED_CV2(dat2[rep3]))
}
}
D_matrix<-D_matrix+c_vector%*%t(c_vector)
}
}
return(D_mat=D_matrix)
}
DG_matrix<-function(data,J,n,Fset){
# Compute the D_matrix for QP which returns the optimal weight vector
# for quantile average fitting
# data = vector of data
# J = number of groups for cross validation
# n = size of the vector of data
# Fset = set of candidate distributions
# returns matrix D for quadratic term in the objective function of QP
xsim_matrix <- matrix(data, nrow=J, byrow=T)
D_matrix<-matrix(0,length(Fset),length(Fset))
for (rep1 in 1:J){
dat1<-as.vector(t(xsim_matrix[-rep1,]))
dat2<-xsim_matrix[rep1,]
MLE_theta<-list()
for (rep2 in 1:length(Fset)){
if (Fset[rep2]!='ED'){
MLE_theta<-lappend(MLE_theta,MLE(Fset[rep2],dat1)$MLE_theta)
} else if (Fset[rep2]=='ED'){
dat1<-sort(dat1)
}
}
for (rep3 in 1:(n/J)){
c_vector<-vector('numeric')
for (rep4 in 1:length(Fset)){
if (Fset[rep4]!='ED' & Fset[rep4]!='beta'){
c_vector<-append(c_vector, IT(Fset[rep4],rep3/(n/J+1),unlist(MLE_theta[rep4]))-(sort(dat2))[rep3])
} else if (Fset[rep4]=='beta'){#denormalize the data for beta distribution
eps<-.Machine$double.eps
data_pt<-(IT(Fset[rep4],rep3/(n/J+1),unlist(MLE_theta[rep4]))-eps)/(1-2*eps)*(max(data)-min(data))+min(data)
c_vector<-append(c_vector, data_pt-(sort(dat2))[rep3])
} else if (Fset[rep4]=='ED'){
c_vector<-append(c_vector, QT(dat1,rep3/(n/J+1))-sort(dat2)[rep3])
}
}
D_matrix<-D_matrix+c_vector%*%t(c_vector)
}
}
return(D_mat=D_matrix)
}
Qua_opt<-function(D_mat,Fset){
# QP that finds the optimal weight vector
# D_mat = the matrix D for QP in the objective function with the quadratic term
# Fset = set of candidate distributions
# returns the weight vector that minimizes the J-fold CV criterion
dvec <- rep(0,nrow(D_mat))
A.Equality <- matrix(rep(1,nrow(D_mat)), ncol=1)
Amat <- cbind(A.Equality, diag(nrow(D_mat)))
bvec <- c(1, rep(0, nrow(D_mat)))
qp <- quadprog::solve.QP(D_mat, dvec, Amat, bvec, meq=1)$solution
qp[qp<=.Machine$double.eps]=0
qp<-qp/sum(qp)
return(qp)
}
QT<-function(X,probs,type='non LI'){
# quantile function of empirical CDF with no linear interpolation for sorted data
# x = sorted samples of data
# probs = certain probability
# type = 'non LI' or 'LI'
# returns the quantile of a given probability of given samples of data
n <- length(X)
if(type == 'LI') {
# If desired, place linear interpolated quantile function here
} else if (type=='non LI'){
nppm <- n * probs
lo <- floor(nppm)
if (nppm==lo){
index<-lo
} else {
index<-lo+1
}
result <- X[index]
}
return(result)
}
EmpCDF<-function(X,data){
# THIS FUNCTION NOT CURRENTLY USED
# empirical CDF of sorted data
# X = sorted samples of data
# data = one of the data point from X
# returns the empirical cdf value
n<-length(X)
y_step<-1:(n+1)
f.U<-stepfun(X,y_step,f=0)
result<-(f.U(data)-1)/n
return(result)
}
# Complete set of supported distributions
SET = c('normal', 'lognormal', 'beta', 'exponential', 'gamma', 'weibull',
'inverse gaussian', 'logistic', 'loglogistic', 'student t',
'uniform', 'cauchy', 'pareto', 'rayleigh', 'ED')
Inputcheck<-function(X,Fset,J,type){
# check the input
# X = samples of data for fitting
# Fset = the list of candidate distributions
# J = the number of folds for cross-validation
# type = 'P' (probability) or 'Q' (quantile) model averaging
# stop the function when inputs are not supported
if (!is.numeric(X)){
stop("X: data is not numeric")
}
if (!all(Fset %in% SET)){
stop("Fset: Fset includes distributions that are not supported")
}
if ((J-floor(J))>=.Machine$double.eps){
stop("J: not an integer")
} else {
if (J<2) {
stop("J: J >= 2 required ")
} else {
if (length(X)<2*J){
stop("X: length of X >= 2*J required")
}
}
}
if (type!='P' & type!='Q'){
stop("type: not a valid fitting type")
}
}
#' @export
fmafit<-function(X,Fset,J=10,type='P'){
# Fit a model average distribution to data
# X = samples of data for fitting
# Fset = the list of candidate distributions
# J = the number of folds for cross-validation
# type = P (probability) or Q (quantile) model averaging
# returns weight w, MLE_list, Fset, and data
Inputcheck(X,Fset,J,type)
n=length(X)
set.seed(1)
data = sample(X) # scramble in case sorted
n <- floor(n/J)*J
data = data[1:n]
MLE_theta<-list()
for (i in 1:length(Fset)){
MLE_theta<-lappend(MLE_theta,MLE(Fset[i],data)$MLE_theta)
}
if (type=='P'){
D_mat<-D_matrix(data,J,n,Fset)
weight <- Qua_opt(D_mat,Fset)
} else if (type=='Q'){
DG_mat<-DG_matrix(data,J,n,Fset)
weight <- Qua_opt(DG_mat,Fset)
} else {
stop("fmafit: Unsupported type")
}
list(w=weight,MLE_list=MLE_theta,Fset=Fset,data=sort(data))
}
#' @export
rfma<-function(n,myfit){
# Generate n random samples from model average distribution
# n = the number of samples to generate
# Fset = the list of distributions
# MLE_list = the list of MLE of parameters of Fset
# w = the weight vector associated with distributions in Fset for MAE
# data = fitting data (needed for ED)
# returns vector of samples
w<-myfit$w
Fset<-myfit$Fset
MLE_list<-myfit$MLE_list
data<-myfit$data
xsim<-vector('numeric')
len<-length(Fset)
for (i in 1:n){
U<-runif(1)
if (len==1){
U_1<-runif(1)
if (Fset!='ED'){
x = IT(Fset,U_1,unname(MLE_list))
} else if (Fset=='ED'){
x<-unname(QT(data,U_1))
}
} else {
x_step<-cumsum(w)
x_step[len]<-1
y_step<-1:(len+1)
f.U<-stepfun(x_step,y_step,right=FALSE)
k<-f.U(U)
U_1<-runif(1)
if (Fset[k]!= 'ED'){
x = IT(Fset[k],U_1,unlist(MLE_list[k]))
} else if (Fset[k]=='ED'){
x<-unname(QT(data,U_1))
}
}
xsim<-append(xsim,x)
}
return(xsim)
}
|
/R/fma12172022.R
|
no_license
|
cran/FMAdist
|
R
| false
| false
| 15,231
|
r
|
# R code for input modeling via frequentist model average
# Xi Jiang and Barry L Nelson
# Last update 12/17/2022
# The following distributions are supported: 'normal', 'lognormal', 'beta', 'exponential',
# 'gamma', 'weibull', 'inverse gaussian', 'logistic', 'loglogistic', 'student t', 'uniform',
# 'cauchy', 'pareto', 'rayleigh', 'ED'.
## Example:
## data<-rlnorm(500,meanlog=0,sdlog=0.25)
## Fset<-c('gamma','weibull','normal','ED')
## type<-'P' #by default type<-'Q'
## J<-5 #by default J<-10
## myfit<-fmafit(data,Fset,J,type)
## n<-10000
## sim_data<-rfma(n,myfit)
#' @import stats
#' @import utils
package_install<-function(packages){
# make sure not to install if the packages is already there
# packages = the list of packages need to eb installed to have this package work
# install the required packages only if it's not installed
for (i in 1:length(packages)){
package.need<-packages[i]
if(length(find.package(package.need,quiet=TRUE))==0){
install.packages(packages[i])
}
}
}
# The following packages will be installed if needed
packages<-c("fitdistrplus","actuar","EnvStats","extraDistr","MASS","quadprog")
package_install(packages)
MLE<-function(dist,data){
# Estimate the MLE parameters of a distribution using data
# dist = candidate distribution
# data = vector of data for estimating MLE
# return the MLE parameters, max value of loglikelihood, AIC and BIC
if (dist=='normal'){
fit.norm<-fitdistrplus::fitdist(data,'norm', method='mle')
theta=fit.norm$estimate #c(mean,sd)
ll=fit.norm$loglik
AIC=fit.norm$aic
BIC=fit.norm$bic
} else if (dist=='lognormal'){
fit.lnorm<-fitdistrplus::fitdist(data,'lnorm',method='mle')
theta=fit.lnorm$estimate #c(meanlog,sdlog)
ll=fit.lnorm$loglik
AIC=fit.lnorm$aic
BIC=fit.lnorm$bic
} else if (dist=='beta'){
eps<-.Machine$double.eps #the smallest positive floating-point number x such that 1 + x != 1
data<-(data-min(data))/(max(data)-min(data)) # scale data to [0,1]
data<-eps+(1-2*eps)*(data) # scale again to (0,1)
fit.beta<-fitdistrplus::fitdist(data,'beta',method='mle')
theta=fit.beta$estimate #c(shape1,shape2)
ll=fit.beta$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='exponential'){
fit.exp <- MASS::fitdistr(data,'exponential')
theta=fit.exp$estimate #rate
ll=fit.exp$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='gamma'){
fit.gamma<-fitdistrplus::fitdist(data,'gamma',method='mle')
theta=fit.gamma$estimate #c(shape,rate)
ll=fit.gamma$loglik
AIC=fit.gamma$aic
BIC=fit.gamma$bic
} else if (dist=='weibull'){
fit.weibull <- fitdistrplus::fitdist(data,'weibull',method='mle')
theta=fit.weibull$estimate #c(shape,scale)
ll=fit.weibull$loglik
AIC=fit.weibull$aic
BIC=fit.weibull$bic
} else if (dist=='inverse gaussian'){
MLE_invgauss<-fitdistrplus::fitdist(data, "invgauss", start = list(mean = mean(data), shape = length(data)/sum(1/data-1/mean(data))))
theta=MLE_invgauss$estimate #c(mean,shape)
ll=MLE_invgauss$logLik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='logistic'){
fit.logis <- MASS::fitdistr(data,'logistic')
theta=fit.logis$estimate #c(location,scale)
ll=fit.logis$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='loglogistic'){
MLE_loglogis<-fitdistrplus::fitdist(data, "llogis")
theta=MLE_loglogis$estimate #c(shape,scale)
ll=MLE_loglogis$logLik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='student t'){
fit.t<-fitdistrplus::fitdist(data, "t", method = "mle", start = list(df=1))
theta=fit.t$estimate #df
ll=fit.t$loglik
AIC=fit.t$aic
BIT=fit.t$bic
} else if (dist=='uniform'){
fit.uniform<-fitdistrplus::fitdist(data,'unif',method='mle')
theta=fit.uniform$estimate #c(min,max)
ll=fit.uniform$loglik
AIC=fit.uniform$aic
BIC=fit.uniform$bic
} else if (dist=='cauchy'){
fit.cauchy <- MASS::fitdistr(data,'cauchy')
theta=fit.cauchy$estimate #c(location,scale)
ll=fit.cauchy$loglik
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='pareto'){
fit.pareto <- EnvStats::epareto(data,method="mle")
theta=fit.pareto$parameters #c(location,shape)
ll=sum(log(EnvStats::dpareto(data, location = theta[1], shape = theta[2])))
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='rayleigh'){
theta=sqrt(sum(data^2)/2/length(data)) #scale
ll=sum(log(extraDistr::drayleigh(data, sigma=theta)))
AIC=2*length(theta)-2*ll
BIC=log(length(data))*length(theta)-2*ll
} else if (dist=='ED'){
theta='NA'
ll='NA'
AIC='NA'
BIC='NA'
} else{ stop("MLE: Not a supported distribution")}
list(MLE_theta=theta, ll=ll, AIC=AIC, BIC=BIC)
}
CDF<-function(dist,x,theta){
# CDF of a distribution
# dist = some distribution
# x = a value within the support of the distribution
# theta = the parameters of the distribution
# returns F_X(x) = P(X<=x)
switch(dist,
'normal'=pnorm(x,mean=theta[1],sd=theta[2]),
'lognormal'=plnorm(x,meanlog=theta[1],sdlog=theta[2]),
'beta'=pbeta(x,shape1=theta[1],shape2=theta[2]),
'exponential'=pexp(x,rate=theta),
'gamma'=pgamma(x,shape=theta[1],rate=theta[2]),
'weibull'=pweibull(x,shape=theta[1],scale=theta[2]),
'inverse gaussian'=actuar::pinvgauss(x,mean=theta[1],shape=theta[2]),
'student t'=pt(x,df=theta),
'uniform'=punif(x,min=theta[1],max=theta[2]),
'cauchy'=pcauchy(x,location=theta[1],scale=theta[2]),
'pareto'=EnvStats::ppareto(x, location = theta[1], shape = theta[2]),
'rayleigh'=extraDistr::prayleigh(x, sigma = theta),
'logistic'=plogis(x, location = theta[1], scale = theta[2]),
'loglogistic'=actuar::pllogis(x, shape = theta[1], scale = theta[2]),
stop("CDF: Not a supported distribution")
)
}
IT<-function(dist,U,theta){
# Given a vector of unif[0,1]s, generate random variate from distribution using
# inverse transform method
# dist = some distribution
# U = the list of RV~unif[0,1]
# theta = the parameters of the distribution
# return X = F^{-1}(Ugrid), random variates with the specified distribution
switch(dist,
'normal'=qnorm(U,mean=theta[1],sd=theta[2]),
'lognormal'=qlnorm(U,meanlog=theta[1],sdlog=theta[2]),
'beta'=qbeta(U,shape1=theta[1],shape2=theta[2]),
'exponential'=qexp(U,rate=theta),
'gamma'=qgamma(U,shape=theta[1],rate=theta[2]),
'weibull'=qweibull(U,shape=theta[1],scale=theta[2]),
'inverse gaussian'=actuar::qinvgauss(U,mean=theta[1],shape=theta[2]),
'student t'=qt(U,df=theta),
'uniform'=qunif(U,min=theta[1],max=theta[2]),
'cauchy'=qcauchy(U,location=theta[1],scale=theta[2]),
'pareto'=EnvStats::qpareto(U, location = theta[1], shape = theta[2]),
'rayleigh'=extraDistr::qrayleigh(U, sigma = theta),
'logistic'=qlogis(U, location = theta[1], scale = theta[2]),
'loglogistic'=actuar::qllogis(U, shape = theta[1], scale = theta[2]),
stop("IT: Not a supported distribution")
)
}
lappend <- function (lst, ...){
# a function that appends elements to a list
# lst = orginial list
# returns the appended list
lst <- c(lst, list(...))
return(lst)
}
D_matrix<-function(data,J,n,Fset){
# Compute the D_matrix for QP that returns the optimal weight vector
# for probability average fitting
# data = vector of data
# J = number of groups for cross validation
# n = size of the vector of data
# Fset = set of candidate distributions
# returns matrix D for quadratic term in the objective function of QP
xsim_matrix <- matrix(data, nrow=J, byrow=T)
D_matrix<-matrix(0,length(Fset),length(Fset))
for (rep1 in 1:J){
dat1<-as.vector(t(xsim_matrix[-rep1,]))
dat2<-xsim_matrix[rep1,]
ED_CV2<-ecdf(dat2)
MLE_theta<-list()
for (rep2 in 1:length(Fset)){
if (Fset[rep2]!='ED'){
MLE_theta<-lappend(MLE_theta,MLE(Fset[rep2],dat1)$MLE_theta)
} else if (Fset[rep2]=='ED'){
ED_CV1<-ecdf(dat1)
}
}
for (rep3 in 1:(n/J)){
c_vector<-vector('numeric')
for (rep4 in 1:length(Fset)){
if (Fset[rep4]!='ED' & Fset[rep4]!='beta'){
c_vector<-append(c_vector, CDF(Fset[rep4],dat2[rep3],unlist(MLE_theta[rep4]))-ED_CV2(dat2[rep3]))
} else if (Fset[rep4]=='beta'){#normalize the data for beta distribution
eps<-.Machine$double.eps
data_pt<-(dat2[rep3]-min(data))/(max(data)-min(data))
data_pt<-eps+(1-2*eps)*(data_pt)
c_vector<-append(c_vector, CDF(Fset[rep4],data_pt,unlist(MLE_theta[rep4]))-ED_CV2(dat2[rep3]))
} else if (Fset[rep4]=='ED'){
c_vector<-append(c_vector, ED_CV1(dat2[rep3])-ED_CV2(dat2[rep3]))
}
}
D_matrix<-D_matrix+c_vector%*%t(c_vector)
}
}
return(D_mat=D_matrix)
}
DG_matrix<-function(data,J,n,Fset){
# Compute the D_matrix for QP which returns the optimal weight vector
# for quantile average fitting
# data = vector of data
# J = number of groups for cross validation
# n = size of the vector of data
# Fset = set of candidate distributions
# returns matrix D for quadratic term in the objective function of QP
xsim_matrix <- matrix(data, nrow=J, byrow=T)
D_matrix<-matrix(0,length(Fset),length(Fset))
for (rep1 in 1:J){
dat1<-as.vector(t(xsim_matrix[-rep1,]))
dat2<-xsim_matrix[rep1,]
MLE_theta<-list()
for (rep2 in 1:length(Fset)){
if (Fset[rep2]!='ED'){
MLE_theta<-lappend(MLE_theta,MLE(Fset[rep2],dat1)$MLE_theta)
} else if (Fset[rep2]=='ED'){
dat1<-sort(dat1)
}
}
for (rep3 in 1:(n/J)){
c_vector<-vector('numeric')
for (rep4 in 1:length(Fset)){
if (Fset[rep4]!='ED' & Fset[rep4]!='beta'){
c_vector<-append(c_vector, IT(Fset[rep4],rep3/(n/J+1),unlist(MLE_theta[rep4]))-(sort(dat2))[rep3])
} else if (Fset[rep4]=='beta'){#denormalize the data for beta distribution
eps<-.Machine$double.eps
data_pt<-(IT(Fset[rep4],rep3/(n/J+1),unlist(MLE_theta[rep4]))-eps)/(1-2*eps)*(max(data)-min(data))+min(data)
c_vector<-append(c_vector, data_pt-(sort(dat2))[rep3])
} else if (Fset[rep4]=='ED'){
c_vector<-append(c_vector, QT(dat1,rep3/(n/J+1))-sort(dat2)[rep3])
}
}
D_matrix<-D_matrix+c_vector%*%t(c_vector)
}
}
return(D_mat=D_matrix)
}
Qua_opt<-function(D_mat,Fset){
# QP that finds the optimal weight vector
# D_mat = the matrix D for QP in the objective function with the quadratic term
# Fset = set of candidate distributions
# returns the weight vector that minimizes the J-fold CV criterion
dvec <- rep(0,nrow(D_mat))
A.Equality <- matrix(rep(1,nrow(D_mat)), ncol=1)
Amat <- cbind(A.Equality, diag(nrow(D_mat)))
bvec <- c(1, rep(0, nrow(D_mat)))
qp <- quadprog::solve.QP(D_mat, dvec, Amat, bvec, meq=1)$solution
qp[qp<=.Machine$double.eps]=0
qp<-qp/sum(qp)
return(qp)
}
QT<-function(X,probs,type='non LI'){
# quantile function of empirical CDF with no linear interpolation for sorted data
# x = sorted samples of data
# probs = certain probability
# type = 'non LI' or 'LI'
# returns the quantile of a given probability of given samples of data
n <- length(X)
if(type == 'LI') {
# If desired, place linear interpolated quantile function here
} else if (type=='non LI'){
nppm <- n * probs
lo <- floor(nppm)
if (nppm==lo){
index<-lo
} else {
index<-lo+1
}
result <- X[index]
}
return(result)
}
EmpCDF<-function(X,data){
# THIS FUNCTION NOT CURRENTLY USED
# empirical CDF of sorted data
# X = sorted samples of data
# data = one of the data point from X
# returns the empirical cdf value
n<-length(X)
y_step<-1:(n+1)
f.U<-stepfun(X,y_step,f=0)
result<-(f.U(data)-1)/n
return(result)
}
# Complete set of supported distributions
SET = c('normal', 'lognormal', 'beta', 'exponential', 'gamma', 'weibull',
'inverse gaussian', 'logistic', 'loglogistic', 'student t',
'uniform', 'cauchy', 'pareto', 'rayleigh', 'ED')
Inputcheck<-function(X,Fset,J,type){
# check the input
# X = samples of data for fitting
# Fset = the list of candidate distributions
# J = the number of folds for cross-validation
# type = 'P' (probability) or 'Q' (quantile) model averaging
# stop the function when inputs are not supported
if (!is.numeric(X)){
stop("X: data is not numeric")
}
if (!all(Fset %in% SET)){
stop("Fset: Fset includes distributions that are not supported")
}
if ((J-floor(J))>=.Machine$double.eps){
stop("J: not an integer")
} else {
if (J<2) {
stop("J: J >= 2 required ")
} else {
if (length(X)<2*J){
stop("X: length of X >= 2*J required")
}
}
}
if (type!='P' & type!='Q'){
stop("type: not a valid fitting type")
}
}
#' @export
fmafit<-function(X,Fset,J=10,type='P'){
# Fit a model average distribution to data
# X = samples of data for fitting
# Fset = the list of candidate distributions
# J = the number of folds for cross-validation
# type = P (probability) or Q (quantile) model averaging
# returns weight w, MLE_list, Fset, and data
Inputcheck(X,Fset,J,type)
n=length(X)
set.seed(1)
data = sample(X) # scramble in case sorted
n <- floor(n/J)*J
data = data[1:n]
MLE_theta<-list()
for (i in 1:length(Fset)){
MLE_theta<-lappend(MLE_theta,MLE(Fset[i],data)$MLE_theta)
}
if (type=='P'){
D_mat<-D_matrix(data,J,n,Fset)
weight <- Qua_opt(D_mat,Fset)
} else if (type=='Q'){
DG_mat<-DG_matrix(data,J,n,Fset)
weight <- Qua_opt(DG_mat,Fset)
} else {
stop("fmafit: Unsupported type")
}
list(w=weight,MLE_list=MLE_theta,Fset=Fset,data=sort(data))
}
#' @export
rfma<-function(n,myfit){
# Generate n random samples from model average distribution
# n = the number of samples to generate
# Fset = the list of distributions
# MLE_list = the list of MLE of parameters of Fset
# w = the weight vector associated with distributions in Fset for MAE
# data = fitting data (needed for ED)
# returns vector of samples
w<-myfit$w
Fset<-myfit$Fset
MLE_list<-myfit$MLE_list
data<-myfit$data
xsim<-vector('numeric')
len<-length(Fset)
for (i in 1:n){
U<-runif(1)
if (len==1){
U_1<-runif(1)
if (Fset!='ED'){
x = IT(Fset,U_1,unname(MLE_list))
} else if (Fset=='ED'){
x<-unname(QT(data,U_1))
}
} else {
x_step<-cumsum(w)
x_step[len]<-1
y_step<-1:(len+1)
f.U<-stepfun(x_step,y_step,right=FALSE)
k<-f.U(U)
U_1<-runif(1)
if (Fset[k]!= 'ED'){
x = IT(Fset[k],U_1,unlist(MLE_list[k]))
} else if (Fset[k]=='ED'){
x<-unname(QT(data,U_1))
}
}
xsim<-append(xsim,x)
}
return(xsim)
}
|
# some helper functions
# parse.graph
# ===========
# construct a formula for log-linear model from an undirected graph
.parse.graph <- function(amat)
{
if(any(skeleton(amat) != amat))
stop("You should input an undirected graph!")
cliq <- .lcd.cliques
cl <- cliq(amat)
ele <- sapply(cl, function(x) paste(x, collapse=":"))
as.formula(paste("Freq ~", paste(ele, collapse="+")))
}
#
# fit: original fit
# amat: orginal graph structure
# freq.tb: frequency table as data
# edge: a number of where to add edge
# no intended to be called by user!
#
.update.fit <- function(fit, amat, edge, freq.tb)
{
amat[upper.tri(amat)][edge] <- 1
amat <- skeleton(amat)
form <- .parse.graph(amat)
newfit <- loglm(form, data = as.data.frame(freq.tb@table))[c(3,8)]
res <- c(fit$deviance - newfit$deviance,
fit$df - newfit$df)
c(newfit$deviance, newfit$df, abs(res[1]), abs(res[2]),
1 - pchisq(abs(res[1]), abs(res[2])))
}
#
# amat: original graph
# fit: original log-linear model
# freq.tb: frequency table as data
# p.value: thresholding p-value for picking edge
#
.try.addedge <- function(amat, fit, freq.tb, p.value)
{
change <- FALSE
if(any(skeleton(amat) != amat))
stop("You should input an undirected graph!")
candlist <- which(!amat[upper.tri(amat)] == 1)
if (length(candlist) > 0) {
res <- t(sapply(candlist, function(x)
.update.fit(fit, amat, x, freq.tb)))
p.val <- res[,5]
p.val.min <- min(p.val)
idx <- which(p.val == p.val.min)[1]
if(p.val.min < p.value){
change <- TRUE
amat[upper.tri(amat)][candlist[idx]] <- 1
amat <- skeleton(amat)
fit <- list(df = res[idx,2], deviance = res[idx,1])
}
}
return(list(amat = amat,
change = change,
fit = fit))
}
#
# if data is sparse, do forward selection
# else do simultaneous testing (or backward selection? may be slow...)
#
naive.getug.multinom <- function(freq.tb, p.value, method = "mkb")
{
mm <- switch(method,
mkb = 1,
simple = 2,
fwd = 3,
0)
if (mm == 0) stop("Invalid method!")
vnames <- colnames(freq.tb@table)
vnames <- vnames[-length(vnames)]
p <- length(vnames)
amat <- matrix(0, p, p)
rownames(amat) <- colnames(amat) <- vnames
if (mm == 1)
return(.naive.getug.mkb(freq.tb, p.value))
if (mm == 2) {
for(i in 1:(p-1)) for(j in (i+1):p)
amat[i,j] <- multinom.ci.test(freq.tb,
vnames[i],
vnames[j],
vnames[-c(i,j)])$p.value < p.value + 0
return(skeleton(amat))
}
if (mm == 3){
initform <- .parse.graph(amat)
fit <- loglm(initform, data = as.data.frame(freq.tb@table))[c(3,8)]
while (TRUE) {
res <- .try.addedge(amat, fit, freq.tb, p.value)
if (res$change) {
amat <- res$amat
fit <- res$fit
} else {
break
}
}
}
amat
}
#
# alternative method: markov blanket selection
# ============================================
# a grow-shrink markov blanket selection procedure
# last modified: @@ Mon, Apr 28, 2008, 13:35 @@
#
.learn.mkvblkt <- function(freq.tb, var, curr = c(), p.value)
{
forbid <- curr
vnames <- colnames(freq.tb@table)
vnames <- vnames[-length(vnames)]
rest <- setdiff(vnames, c(curr,var))
continue <- TRUE
while (continue) { # grow the Markov blanket first
p.val <- sapply(rest, function(x)
multinom.ci.test(freq.tb, var, x, curr)$p.value)
p.val.min <- min(p.val)
idx <- which(p.val == p.val.min)[1]
if(p.val.min < p.value) {
curr <- c(curr, rest[idx])
rest <- rest[-idx]
} else {
continue <- FALSE
}
}
continue <- TRUE
freq.tb <- compress.freq.tb(freq.tb, c(curr,var))
delcand <- setdiff(curr, forbid) # only those added later is allowed to be deleted
if (length(delcand) == 0)
continue <- FALSE
while (continue) { # shrink the Markov blanket
p.val <- sapply(delcand, function(x)
multinom.ci.test(freq.tb, var, x, setdiff(curr,x))$p.value)
## this step could be speeded up significantly!!!
p.val.max <- max(p.val)
idx <- which(p.val == p.val.max)[1]
if(p.val.max > p.value) {
curr <- setdiff(curr, delcand[idx])
delcand <- delcand[-idx]
} else {
continue <- FALSE
}
}
curr
}
.naive.getug.mkb <- function(freq.tb, p.value)
{
vnames <- colnames(freq.tb@table)
vnames <- vnames[-length(vnames)]
p <- length(vnames)
amat <- matrix(0, p, p)
vset <- vnames[sample(p)]
rownames(amat) <- colnames(amat) <- vset
for(i in 1:p) {
curr <- vset[amat[vset[i],] == 1]
res <- .learn.mkvblkt(freq.tb, vset[i], curr, p.value)
amat[vset[i], res] <- 1
amat[res, vset[i]] <- 1
}
amat[vnames, vnames]
}
|
/R/naive.getug.multinom.R
|
no_license
|
cran/lcd
|
R
| false
| false
| 5,468
|
r
|
# some helper functions
# parse.graph
# ===========
# construct a formula for log-linear model from an undirected graph
.parse.graph <- function(amat)
{
if(any(skeleton(amat) != amat))
stop("You should input an undirected graph!")
cliq <- .lcd.cliques
cl <- cliq(amat)
ele <- sapply(cl, function(x) paste(x, collapse=":"))
as.formula(paste("Freq ~", paste(ele, collapse="+")))
}
#
# fit: original fit
# amat: orginal graph structure
# freq.tb: frequency table as data
# edge: a number of where to add edge
# no intended to be called by user!
#
.update.fit <- function(fit, amat, edge, freq.tb)
{
amat[upper.tri(amat)][edge] <- 1
amat <- skeleton(amat)
form <- .parse.graph(amat)
newfit <- loglm(form, data = as.data.frame(freq.tb@table))[c(3,8)]
res <- c(fit$deviance - newfit$deviance,
fit$df - newfit$df)
c(newfit$deviance, newfit$df, abs(res[1]), abs(res[2]),
1 - pchisq(abs(res[1]), abs(res[2])))
}
#
# amat: original graph
# fit: original log-linear model
# freq.tb: frequency table as data
# p.value: thresholding p-value for picking edge
#
.try.addedge <- function(amat, fit, freq.tb, p.value)
{
change <- FALSE
if(any(skeleton(amat) != amat))
stop("You should input an undirected graph!")
candlist <- which(!amat[upper.tri(amat)] == 1)
if (length(candlist) > 0) {
res <- t(sapply(candlist, function(x)
.update.fit(fit, amat, x, freq.tb)))
p.val <- res[,5]
p.val.min <- min(p.val)
idx <- which(p.val == p.val.min)[1]
if(p.val.min < p.value){
change <- TRUE
amat[upper.tri(amat)][candlist[idx]] <- 1
amat <- skeleton(amat)
fit <- list(df = res[idx,2], deviance = res[idx,1])
}
}
return(list(amat = amat,
change = change,
fit = fit))
}
#
# if data is sparse, do forward selection
# else do simultaneous testing (or backward selection? may be slow...)
#
naive.getug.multinom <- function(freq.tb, p.value, method = "mkb")
{
mm <- switch(method,
mkb = 1,
simple = 2,
fwd = 3,
0)
if (mm == 0) stop("Invalid method!")
vnames <- colnames(freq.tb@table)
vnames <- vnames[-length(vnames)]
p <- length(vnames)
amat <- matrix(0, p, p)
rownames(amat) <- colnames(amat) <- vnames
if (mm == 1)
return(.naive.getug.mkb(freq.tb, p.value))
if (mm == 2) {
for(i in 1:(p-1)) for(j in (i+1):p)
amat[i,j] <- multinom.ci.test(freq.tb,
vnames[i],
vnames[j],
vnames[-c(i,j)])$p.value < p.value + 0
return(skeleton(amat))
}
if (mm == 3){
initform <- .parse.graph(amat)
fit <- loglm(initform, data = as.data.frame(freq.tb@table))[c(3,8)]
while (TRUE) {
res <- .try.addedge(amat, fit, freq.tb, p.value)
if (res$change) {
amat <- res$amat
fit <- res$fit
} else {
break
}
}
}
amat
}
#
# alternative method: markov blanket selection
# ============================================
# a grow-shrink markov blanket selection procedure
# last modified: @@ Mon, Apr 28, 2008, 13:35 @@
#
.learn.mkvblkt <- function(freq.tb, var, curr = c(), p.value)
{
forbid <- curr
vnames <- colnames(freq.tb@table)
vnames <- vnames[-length(vnames)]
rest <- setdiff(vnames, c(curr,var))
continue <- TRUE
while (continue) { # grow the Markov blanket first
p.val <- sapply(rest, function(x)
multinom.ci.test(freq.tb, var, x, curr)$p.value)
p.val.min <- min(p.val)
idx <- which(p.val == p.val.min)[1]
if(p.val.min < p.value) {
curr <- c(curr, rest[idx])
rest <- rest[-idx]
} else {
continue <- FALSE
}
}
continue <- TRUE
freq.tb <- compress.freq.tb(freq.tb, c(curr,var))
delcand <- setdiff(curr, forbid) # only those added later is allowed to be deleted
if (length(delcand) == 0)
continue <- FALSE
while (continue) { # shrink the Markov blanket
p.val <- sapply(delcand, function(x)
multinom.ci.test(freq.tb, var, x, setdiff(curr,x))$p.value)
## this step could be speeded up significantly!!!
p.val.max <- max(p.val)
idx <- which(p.val == p.val.max)[1]
if(p.val.max > p.value) {
curr <- setdiff(curr, delcand[idx])
delcand <- delcand[-idx]
} else {
continue <- FALSE
}
}
curr
}
.naive.getug.mkb <- function(freq.tb, p.value)
{
vnames <- colnames(freq.tb@table)
vnames <- vnames[-length(vnames)]
p <- length(vnames)
amat <- matrix(0, p, p)
vset <- vnames[sample(p)]
rownames(amat) <- colnames(amat) <- vset
for(i in 1:p) {
curr <- vset[amat[vset[i],] == 1]
res <- .learn.mkvblkt(freq.tb, vset[i], curr, p.value)
amat[vset[i], res] <- 1
amat[res, vset[i]] <- 1
}
amat[vnames, vnames]
}
|
testdf <- function(variable, max.augmentations) {
require(fUnitRoots)
require(lmtest)
results_adf <- data.frame(augmentations = -1, adf = 0, p_adf = 0, bgodfrey = 0, p_bg = 0)
variable <- coredata(variable)
variable <- as.numeric(variable)
variable <- variable[!is.na(variable)]
for(augmentations in 0:max.augmentations) {
df.test <- adfTest(variable, lags = augmentations, type = "c")
df <- as.numeric(df.test@test$statistic)
p_adf <- as.numeric(df.test@test$p.value)
resids <- df.test@test$lm$residuals
bgtest <- bgtest(resids ~ 1, order = 1)
bgodfrey <- bgtest$statistic
names(bgodfrey) <- NULL
p_bg <- bgtest$p.value
results_adf <-
rbind(results_adf,
data.frame(augmentations = augmentations,
adf = df,
p_adf = p_adf,
bgodfrey = bgodfrey,
p_bg = p_bg))
}
results_adf <- results_adf[results_adf$augmentations >= 0, ]
row.names(results_adf) <- NULL
# for the basic graphics:
if (0) plot(variable, type = "l", col = "darkblue", lwd = 1, main = "Plot of the examined variable")
# for ggplot2 graphics:
p <-
tibble(y = variable) %>%
mutate(obs = row_number()) %>%
ggplot(aes(obs, y)) +
geom_line(col = "royalblue3") +
theme_bw() +
labs(title = "Plot of the examined variable")
p %>% print()
return(results_adf)
}
|
/functions/testdf.R
|
no_license
|
rafrys/time-series-final-project
|
R
| false
| false
| 1,487
|
r
|
testdf <- function(variable, max.augmentations) {
require(fUnitRoots)
require(lmtest)
results_adf <- data.frame(augmentations = -1, adf = 0, p_adf = 0, bgodfrey = 0, p_bg = 0)
variable <- coredata(variable)
variable <- as.numeric(variable)
variable <- variable[!is.na(variable)]
for(augmentations in 0:max.augmentations) {
df.test <- adfTest(variable, lags = augmentations, type = "c")
df <- as.numeric(df.test@test$statistic)
p_adf <- as.numeric(df.test@test$p.value)
resids <- df.test@test$lm$residuals
bgtest <- bgtest(resids ~ 1, order = 1)
bgodfrey <- bgtest$statistic
names(bgodfrey) <- NULL
p_bg <- bgtest$p.value
results_adf <-
rbind(results_adf,
data.frame(augmentations = augmentations,
adf = df,
p_adf = p_adf,
bgodfrey = bgodfrey,
p_bg = p_bg))
}
results_adf <- results_adf[results_adf$augmentations >= 0, ]
row.names(results_adf) <- NULL
# for the basic graphics:
if (0) plot(variable, type = "l", col = "darkblue", lwd = 1, main = "Plot of the examined variable")
# for ggplot2 graphics:
p <-
tibble(y = variable) %>%
mutate(obs = row_number()) %>%
ggplot(aes(obs, y)) +
geom_line(col = "royalblue3") +
theme_bw() +
labs(title = "Plot of the examined variable")
p %>% print()
return(results_adf)
}
|
source("helpers.R");
logisticRegressionBIC <- function(X, y, candidate_models, psi = 1) {
I_k <- rep(0, ncol(candidate_models));
w_k_numerator <- C_k <- numeric(ncol(candidate_models));
s_k <- apply(candidate_models, 2, sum);
k <- ncol(candidate_models);
p <- ncol(X);
for (i in 1:k) {
indices <- as.vector(which(candidate_models[,i] != 0));
if (length(indices) == 0) {
fit_logReg_k <- glm( y ~ 1, family = "binomial" );
} else {
Xs_k <- X[ ,indices];
reg_data <- as.data.frame(cbind(y, Xs_k));
colnames(reg_data)[1] <- "y";
fit_logReg_k <- glm( y ~ . , data = reg_data, family = "binomial" );
}
I_k[i] <- BIC(fit_logReg_k);
C_k[i] <- calculateCk(s_k[i], p);
if(is.infinite(I_k[i])) {
w_k_numerator[i] <- rep(0, times = k);
} else {
w_k_numerator[i] <- exp(-I_k[i]/2 - psi*C_k[i]);
}
}
w_k_numerator[is.nan(w_k_numerator)] <- 0;
weight_vector <- w_k_numerator / sum(w_k_numerator);
soil_importance <- weight_vector %*% t(candidate_models);
return (
list(
weight_vector = round(weight_vector, 6),
soil_importance = soil_importance
)
);
}
|
/project-thesis/BIC-logistic-regression.R
|
no_license
|
schweryjonas/master-thesis
|
R
| false
| false
| 1,161
|
r
|
source("helpers.R");
logisticRegressionBIC <- function(X, y, candidate_models, psi = 1) {
I_k <- rep(0, ncol(candidate_models));
w_k_numerator <- C_k <- numeric(ncol(candidate_models));
s_k <- apply(candidate_models, 2, sum);
k <- ncol(candidate_models);
p <- ncol(X);
for (i in 1:k) {
indices <- as.vector(which(candidate_models[,i] != 0));
if (length(indices) == 0) {
fit_logReg_k <- glm( y ~ 1, family = "binomial" );
} else {
Xs_k <- X[ ,indices];
reg_data <- as.data.frame(cbind(y, Xs_k));
colnames(reg_data)[1] <- "y";
fit_logReg_k <- glm( y ~ . , data = reg_data, family = "binomial" );
}
I_k[i] <- BIC(fit_logReg_k);
C_k[i] <- calculateCk(s_k[i], p);
if(is.infinite(I_k[i])) {
w_k_numerator[i] <- rep(0, times = k);
} else {
w_k_numerator[i] <- exp(-I_k[i]/2 - psi*C_k[i]);
}
}
w_k_numerator[is.nan(w_k_numerator)] <- 0;
weight_vector <- w_k_numerator / sum(w_k_numerator);
soil_importance <- weight_vector %*% t(candidate_models);
return (
list(
weight_vector = round(weight_vector, 6),
soil_importance = soil_importance
)
);
}
|
source("rei2.R")
set.seed(123)
bank <- read.csv("bank-full.csv", sep=";")
sapply(bank, class)
bank.processed <- makefeature(bank)
N <- nrow(bank)
inds.tr <- sample(seq(N), as.integer(0.7 * N))
bank.train <- bank.processed[inds.tr,]
bank.test <- bank.processed[-inds.tr,]
|
/20151201/rei3.R
|
no_license
|
shengbo-medley/MiscForStudy
|
R
| false
| false
| 270
|
r
|
source("rei2.R")
set.seed(123)
bank <- read.csv("bank-full.csv", sep=";")
sapply(bank, class)
bank.processed <- makefeature(bank)
N <- nrow(bank)
inds.tr <- sample(seq(N), as.integer(0.7 * N))
bank.train <- bank.processed[inds.tr,]
bank.test <- bank.processed[-inds.tr,]
|
extractFile <- function(file, min.word.length,max.word.length,apply.stoplist) {
file.content <- scan(file,what="character",sep="\n")
file.content <- paste(file.content,collapse=" ")
file.content <- tolower(file.content)
file.content.words <- strsplit(file.content,"\\W")
file.content.word.vector <- unlist(file.content.words)
not.blanks <- which(file.content.word.vector!="")
file.content.word.vector <- file.content.word.vector[not.blanks]
min.word.filter <- nchar(file.content.word.vector) > min.word.length
file.content.word.vector <- file.content.word.vector[min.word.filter]
max.word.filter <- nchar(file.content.word.vector) < max.word.length
file.content.word.vector <- file.content.word.vector[max.word.filter]
if (apply.stoplist=="apply stoplist") {
english.stoplist <- scan("jockersStopList.txt",
what = "character")
aa <- strsplit(english.stoplist, ",")
# english.stoplist<-unique(english.stoplist[,1])
english.stoplist <- sapply(X = aa, FUN = function(x) {
x[[1]]
})
length(english.stoplist)
mywhich <- function(word.vector, stoplist) {
word.vector[!(word.vector %in% stoplist)]
}
words.without.stops <- lapply(file.content.word.vector, mywhich, english.stoplist)
return (unlist(words.without.stops))
}
else {
return (file.content.word.vector)
}
}
|
/extractfile.R
|
no_license
|
Anupam02/text_analysis_r
|
R
| false
| false
| 1,386
|
r
|
extractFile <- function(file, min.word.length,max.word.length,apply.stoplist) {
file.content <- scan(file,what="character",sep="\n")
file.content <- paste(file.content,collapse=" ")
file.content <- tolower(file.content)
file.content.words <- strsplit(file.content,"\\W")
file.content.word.vector <- unlist(file.content.words)
not.blanks <- which(file.content.word.vector!="")
file.content.word.vector <- file.content.word.vector[not.blanks]
min.word.filter <- nchar(file.content.word.vector) > min.word.length
file.content.word.vector <- file.content.word.vector[min.word.filter]
max.word.filter <- nchar(file.content.word.vector) < max.word.length
file.content.word.vector <- file.content.word.vector[max.word.filter]
if (apply.stoplist=="apply stoplist") {
english.stoplist <- scan("jockersStopList.txt",
what = "character")
aa <- strsplit(english.stoplist, ",")
# english.stoplist<-unique(english.stoplist[,1])
english.stoplist <- sapply(X = aa, FUN = function(x) {
x[[1]]
})
length(english.stoplist)
mywhich <- function(word.vector, stoplist) {
word.vector[!(word.vector %in% stoplist)]
}
words.without.stops <- lapply(file.content.word.vector, mywhich, english.stoplist)
return (unlist(words.without.stops))
}
else {
return (file.content.word.vector)
}
}
|
## This function comes from the package "POT" . The gpd function
## corresponds to the gpdmle function. So, I'm very gratefull to Mathieu Ribatet.
#'@useDynLib noniid.pm
gpd <- function(x, threshold, start, ...,
std.err.type = "observed", corr = FALSE,
method = "BFGS", warn.inf = TRUE){
if (all(c("observed", "expected", "none") != std.err.type))
stop("``std.err.type'' must be one of 'observed', 'expected' or 'none'")
nlpot <- function(scale, shape) {
-.C("gpdlik", exceed, nat, threshold, scale,
shape, dns = double(1))$dns
}
nn <- length(x)
threshold <- rep(threshold, length.out = nn)
high <- (x > threshold) & !is.na(x)
threshold <- as.double(threshold[high])
exceed <- as.double(x[high])
nat <- length(exceed)
if(!nat) stop("no data above threshold")
pat <- nat/nn
param <- c("scale", "shape")
if(missing(start)) {
start <- list(scale = 0, shape = 0)
start$scale <- mean(exceed) - min(threshold)
start <- start[!(param %in% names(list(...)))]
}
if(!is.list(start))
stop("`start' must be a named list")
if(!length(start))
stop("there are no parameters left to maximize over")
nm <- names(start)
l <- length(nm)
f <- formals(nlpot)
names(f) <- param
m <- match(nm, param)
if(any(is.na(m)))
stop("`start' specifies unknown arguments")
formals(nlpot) <- c(f[m], f[-m])
nllh <- function(p, ...) nlpot(p, ...)
if(l > 1)
body(nllh) <- parse(text = paste("nlpot(", paste("p[",1:l,
"]", collapse = ", "), ", ...)"))
fixed.param <- list(...)[names(list(...)) %in% param]
if(any(!(param %in% c(nm,names(fixed.param)))))
stop("unspecified parameters")
start.arg <- c(list(p = unlist(start)), fixed.param)
if( warn.inf && do.call("nllh", start.arg) == 1e6 )
warning("negative log-likelihood is infinite at starting values")
opt <- optim(start, nllh, hessian = TRUE, ..., method = method)
if ((opt$convergence != 0) || (opt$value == 1e6)) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if (std.err.type != "none"){
tol <- .Machine$double.eps^0.5
if(std.err.type == "observed") {
var.cov <- qr(opt$hessian, tol = tol)
if(var.cov$rank != ncol(var.cov$qr)){
warning("observed information matrix is singular; passing std.err.type to ``expected''")
obs.fish <- FALSE
return
}
if (std.err.type == "observed"){
var.cov <- try(solve(var.cov, tol = tol), silent = TRUE)
if(!is.matrix(var.cov)){
warning("observed information matrix is singular; passing std.err.type to ''none''")
std.err.type <- "expected"
return
}
else{
std.err <- diag(var.cov)
if(any(std.err <= 0)){
warning("observed information matrix is singular; passing std.err.type to ``expected''")
std.err.type <- "expected"
return
}
std.err <- sqrt(std.err)
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr.mat <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr.mat) <- rep(1, length(std.err))
}
else {
corr.mat <- NULL
}
}
}
}
if (std.err.type == "expected"){
shape <- opt$par[2]
scale <- opt$par[1]
a22 <- 2/((1+shape)*(1+2*shape))
a12 <- 1/(scale*(1+shape)*(1+2*shape))
a11 <- 1/((scale^2)*(1+2*shape))
##Expected Matix of Information of Fisher
expFisher <- nat * matrix(c(a11,a12,a12,a22),nrow=2)
expFisher <- qr(expFisher, tol = tol)
var.cov <- solve(expFisher, tol = tol)
std.err <- sqrt(diag(var.cov))
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr.mat <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr.mat) <- rep(1, length(std.err))
}
else
corr.mat <- NULL
}
colnames(var.cov) <- nm
rownames(var.cov) <- nm
names(std.err) <- nm
}
else{
std.err <- std.err.type <- corr.mat <- NULL
var.cov <- NULL
}
param <- c(opt$par, unlist(fixed.param))
scale <- param["scale"]
var.thresh <- !all(threshold == threshold[1])
if (!var.thresh)
threshold <- threshold[1]
list(fitted.values = opt$par, std.err = std.err, std.err.type = std.err.type,
var.cov = var.cov, fixed = unlist(fixed.param), param = param,
deviance = 2*opt$value, corr = corr.mat, convergence = opt$convergence,
counts = opt$counts, message = opt$message, threshold = threshold,
nat = nat, pat = pat, data = x, exceed = exceed, scale = scale,
var.thresh = var.thresh, est = "MLE", logLik = -opt$value,
opt.value = opt$value, hessian = opt$hessian)
}
|
/sandbox/pulkit/R/gpdmle.R
|
no_license
|
braverock/PerformanceAnalytics
|
R
| false
| false
| 5,102
|
r
|
## This function comes from the package "POT" . The gpd function
## corresponds to the gpdmle function. So, I'm very gratefull to Mathieu Ribatet.
#'@useDynLib noniid.pm
gpd <- function(x, threshold, start, ...,
std.err.type = "observed", corr = FALSE,
method = "BFGS", warn.inf = TRUE){
if (all(c("observed", "expected", "none") != std.err.type))
stop("``std.err.type'' must be one of 'observed', 'expected' or 'none'")
nlpot <- function(scale, shape) {
-.C("gpdlik", exceed, nat, threshold, scale,
shape, dns = double(1))$dns
}
nn <- length(x)
threshold <- rep(threshold, length.out = nn)
high <- (x > threshold) & !is.na(x)
threshold <- as.double(threshold[high])
exceed <- as.double(x[high])
nat <- length(exceed)
if(!nat) stop("no data above threshold")
pat <- nat/nn
param <- c("scale", "shape")
if(missing(start)) {
start <- list(scale = 0, shape = 0)
start$scale <- mean(exceed) - min(threshold)
start <- start[!(param %in% names(list(...)))]
}
if(!is.list(start))
stop("`start' must be a named list")
if(!length(start))
stop("there are no parameters left to maximize over")
nm <- names(start)
l <- length(nm)
f <- formals(nlpot)
names(f) <- param
m <- match(nm, param)
if(any(is.na(m)))
stop("`start' specifies unknown arguments")
formals(nlpot) <- c(f[m], f[-m])
nllh <- function(p, ...) nlpot(p, ...)
if(l > 1)
body(nllh) <- parse(text = paste("nlpot(", paste("p[",1:l,
"]", collapse = ", "), ", ...)"))
fixed.param <- list(...)[names(list(...)) %in% param]
if(any(!(param %in% c(nm,names(fixed.param)))))
stop("unspecified parameters")
start.arg <- c(list(p = unlist(start)), fixed.param)
if( warn.inf && do.call("nllh", start.arg) == 1e6 )
warning("negative log-likelihood is infinite at starting values")
opt <- optim(start, nllh, hessian = TRUE, ..., method = method)
if ((opt$convergence != 0) || (opt$value == 1e6)) {
warning("optimization may not have succeeded")
if(opt$convergence == 1) opt$convergence <- "iteration limit reached"
}
else opt$convergence <- "successful"
if (std.err.type != "none"){
tol <- .Machine$double.eps^0.5
if(std.err.type == "observed") {
var.cov <- qr(opt$hessian, tol = tol)
if(var.cov$rank != ncol(var.cov$qr)){
warning("observed information matrix is singular; passing std.err.type to ``expected''")
obs.fish <- FALSE
return
}
if (std.err.type == "observed"){
var.cov <- try(solve(var.cov, tol = tol), silent = TRUE)
if(!is.matrix(var.cov)){
warning("observed information matrix is singular; passing std.err.type to ''none''")
std.err.type <- "expected"
return
}
else{
std.err <- diag(var.cov)
if(any(std.err <= 0)){
warning("observed information matrix is singular; passing std.err.type to ``expected''")
std.err.type <- "expected"
return
}
std.err <- sqrt(std.err)
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr.mat <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr.mat) <- rep(1, length(std.err))
}
else {
corr.mat <- NULL
}
}
}
}
if (std.err.type == "expected"){
shape <- opt$par[2]
scale <- opt$par[1]
a22 <- 2/((1+shape)*(1+2*shape))
a12 <- 1/(scale*(1+shape)*(1+2*shape))
a11 <- 1/((scale^2)*(1+2*shape))
##Expected Matix of Information of Fisher
expFisher <- nat * matrix(c(a11,a12,a12,a22),nrow=2)
expFisher <- qr(expFisher, tol = tol)
var.cov <- solve(expFisher, tol = tol)
std.err <- sqrt(diag(var.cov))
if(corr) {
.mat <- diag(1/std.err, nrow = length(std.err))
corr.mat <- structure(.mat %*% var.cov %*% .mat, dimnames = list(nm,nm))
diag(corr.mat) <- rep(1, length(std.err))
}
else
corr.mat <- NULL
}
colnames(var.cov) <- nm
rownames(var.cov) <- nm
names(std.err) <- nm
}
else{
std.err <- std.err.type <- corr.mat <- NULL
var.cov <- NULL
}
param <- c(opt$par, unlist(fixed.param))
scale <- param["scale"]
var.thresh <- !all(threshold == threshold[1])
if (!var.thresh)
threshold <- threshold[1]
list(fitted.values = opt$par, std.err = std.err, std.err.type = std.err.type,
var.cov = var.cov, fixed = unlist(fixed.param), param = param,
deviance = 2*opt$value, corr = corr.mat, convergence = opt$convergence,
counts = opt$counts, message = opt$message, threshold = threshold,
nat = nat, pat = pat, data = x, exceed = exceed, scale = scale,
var.thresh = var.thresh, est = "MLE", logLik = -opt$value,
opt.value = opt$value, hessian = opt$hessian)
}
|
make.networks <- function(plantas, aves, dens = 0.2, runs = 1000) {
networks <- vector("list", runs)
## Aqui assumimos que as aves consomem frutos "aleatoriamente" desde que o fruto
## seja menor que o tamanho do bico. Algo pra se pensar é que como o que temos são as
## médias, seria legal tentar assimilar a variação pelo menos nos frutos pra considerar
## essas relações.
## Quais links são possíveis dadas as características morfológicas das espécies?
links.morpho <- outer(plantas$fruit, aves$gape, `<`)
## Estrato
## links.strata <- outer(plantas$strata, aves$strata, function(x, y) {
## any(unlist(strsplit(x, " and |, ")), unlist(strsplit(y, " and |, ")))
## })
links.strata <- matrix(F, nrow = length(plantas$strata), ncol = length(aves$strata))
for (i in 1:length(plantas$strata)) {
for (j in 1:length(aves$strata)) {
links.strata[i, j] <- any(unlist(strsplit(plantas$strata[i], " and |, ")) %in% unlist(strsplit(aves$strata[j], " and |, ")))
}
}
## Ambos
## links <- ifelse(links.morpho & links.strata, 1, 0)
links <- links.morpho
## Tirar as espécies sem ligações
colsums <- colSums(links)
rowsums <- rowSums(links)
nomes.plantas <- tolower(unique(plantas$species)[rowsums > 0])
nomes.aves <- tolower(unique(aves$specie)[colsums > 0])
links <- links[rowsums > 0, colsums > 0]
N.plantas <- nrow(links)
N.aves <- ncol(links)
## Checando a densidade
dens.simul <- sum(links)/(N.aves * N.plantas)
## Como muito provavelmente a densidade ainda é maior que a esperada, começamos
## a retirar alguns links baseados na probabilidade deles acontecerem dadas as
## abundâncias até chegarmos na densidade desejada. Aqui não estou me preocupando
## em fazer com que a distribuição do número de arestas de cada nível seja lognormal
## pq não consideramos as frequências das interações.
## Qual o número mínimo de interações de uma espécie?
min.links <- 1
## Abundâncias relativas * abundâncias relativas
mat.prob <- outer(plantas$abundance, aves$abundance, function(x, y) x/sum(plantas$abundance) * y/sum(aves$abundance))
mat.prob <- mat.prob[rowsums > 0, colsums > 0]
colsums <- colSums(links)
rowsums <- rowSums(links)
c.keep <- colsums <= min.links & colsums != 0
r.keep <- rowsums <= min.links & rowsums != 0
c.remove.n <- sum(colsums[c.keep])
r.remove.n <- sum(rowsums[r.keep])
## Quantos links falta tirar?
# links.out <- sum(links) - length(links) * dens
# prob <- quantile(mat.prob, links.out)
# final.mat <- ifelse(mat.prob * links >= prob, 1, 0)
mat.prob[r.keep, c.keep] <- mat.prob[r.keep, c.keep] + links[r.keep, c.keep]
for (i in seq_len(runs)) {
final.mat <- matrix(0, ncol = N.aves, nrow = N.plantas)
links.keep <- sample(which(links == 1),
length(links) * dens,
prob = mat.prob[links == 1])
final.mat[links.keep] <- 1
(dens.simul <- sum(final.mat)/(N.aves * N.plantas))
final.mat <- t(final.mat)
colnames(final.mat) <- nomes.plantas
rownames(final.mat) <- nomes.aves
# visweb(t(mat.bin))
# plotweb(t(mat.bin))
# plotweb(final.mat, text.rot = 90)
# networklevel(final.mat, index = "connectance")
networks[[i]] <- final.mat
}
invisible(networks)
}
|
/make_networks.R
|
no_license
|
gustavobio/networks-r
|
R
| false
| false
| 3,377
|
r
|
make.networks <- function(plantas, aves, dens = 0.2, runs = 1000) {
networks <- vector("list", runs)
## Aqui assumimos que as aves consomem frutos "aleatoriamente" desde que o fruto
## seja menor que o tamanho do bico. Algo pra se pensar é que como o que temos são as
## médias, seria legal tentar assimilar a variação pelo menos nos frutos pra considerar
## essas relações.
## Quais links são possíveis dadas as características morfológicas das espécies?
links.morpho <- outer(plantas$fruit, aves$gape, `<`)
## Estrato
## links.strata <- outer(plantas$strata, aves$strata, function(x, y) {
## any(unlist(strsplit(x, " and |, ")), unlist(strsplit(y, " and |, ")))
## })
links.strata <- matrix(F, nrow = length(plantas$strata), ncol = length(aves$strata))
for (i in 1:length(plantas$strata)) {
for (j in 1:length(aves$strata)) {
links.strata[i, j] <- any(unlist(strsplit(plantas$strata[i], " and |, ")) %in% unlist(strsplit(aves$strata[j], " and |, ")))
}
}
## Ambos
## links <- ifelse(links.morpho & links.strata, 1, 0)
links <- links.morpho
## Tirar as espécies sem ligações
colsums <- colSums(links)
rowsums <- rowSums(links)
nomes.plantas <- tolower(unique(plantas$species)[rowsums > 0])
nomes.aves <- tolower(unique(aves$specie)[colsums > 0])
links <- links[rowsums > 0, colsums > 0]
N.plantas <- nrow(links)
N.aves <- ncol(links)
## Checando a densidade
dens.simul <- sum(links)/(N.aves * N.plantas)
## Como muito provavelmente a densidade ainda é maior que a esperada, começamos
## a retirar alguns links baseados na probabilidade deles acontecerem dadas as
## abundâncias até chegarmos na densidade desejada. Aqui não estou me preocupando
## em fazer com que a distribuição do número de arestas de cada nível seja lognormal
## pq não consideramos as frequências das interações.
## Qual o número mínimo de interações de uma espécie?
min.links <- 1
## Abundâncias relativas * abundâncias relativas
mat.prob <- outer(plantas$abundance, aves$abundance, function(x, y) x/sum(plantas$abundance) * y/sum(aves$abundance))
mat.prob <- mat.prob[rowsums > 0, colsums > 0]
colsums <- colSums(links)
rowsums <- rowSums(links)
c.keep <- colsums <= min.links & colsums != 0
r.keep <- rowsums <= min.links & rowsums != 0
c.remove.n <- sum(colsums[c.keep])
r.remove.n <- sum(rowsums[r.keep])
## Quantos links falta tirar?
# links.out <- sum(links) - length(links) * dens
# prob <- quantile(mat.prob, links.out)
# final.mat <- ifelse(mat.prob * links >= prob, 1, 0)
mat.prob[r.keep, c.keep] <- mat.prob[r.keep, c.keep] + links[r.keep, c.keep]
for (i in seq_len(runs)) {
final.mat <- matrix(0, ncol = N.aves, nrow = N.plantas)
links.keep <- sample(which(links == 1),
length(links) * dens,
prob = mat.prob[links == 1])
final.mat[links.keep] <- 1
(dens.simul <- sum(final.mat)/(N.aves * N.plantas))
final.mat <- t(final.mat)
colnames(final.mat) <- nomes.plantas
rownames(final.mat) <- nomes.aves
# visweb(t(mat.bin))
# plotweb(t(mat.bin))
# plotweb(final.mat, text.rot = 90)
# networklevel(final.mat, index = "connectance")
networks[[i]] <- final.mat
}
invisible(networks)
}
|
library(vegan)
### Name: goodness.cca
### Title: Diagnostic Tools for [Constrained] Ordination (CCA, RDA, DCA,
### CA, PCA)
### Aliases: goodness goodness.cca inertcomp spenvcor intersetcor vif.cca
### alias.cca
### Keywords: multivariate
### ** Examples
data(dune)
data(dune.env)
mod <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env)
goodness(mod, addprevious = TRUE)
goodness(mod, addprevious = TRUE, summ = TRUE)
# Inertia components
inertcomp(mod, prop = TRUE)
inertcomp(mod)
# vif.cca
vif.cca(mod)
# Aliased constraints
mod <- cca(dune ~ ., dune.env)
mod
vif.cca(mod)
alias(mod)
with(dune.env, table(Management, Manure))
# The standard correlations (not recommended)
spenvcor(mod)
intersetcor(mod)
|
/data/genthat_extracted_code/vegan/examples/goodness.cca.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 732
|
r
|
library(vegan)
### Name: goodness.cca
### Title: Diagnostic Tools for [Constrained] Ordination (CCA, RDA, DCA,
### CA, PCA)
### Aliases: goodness goodness.cca inertcomp spenvcor intersetcor vif.cca
### alias.cca
### Keywords: multivariate
### ** Examples
data(dune)
data(dune.env)
mod <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env)
goodness(mod, addprevious = TRUE)
goodness(mod, addprevious = TRUE, summ = TRUE)
# Inertia components
inertcomp(mod, prop = TRUE)
inertcomp(mod)
# vif.cca
vif.cca(mod)
# Aliased constraints
mod <- cca(dune ~ ., dune.env)
mod
vif.cca(mod)
alias(mod)
with(dune.env, table(Management, Manure))
# The standard correlations (not recommended)
spenvcor(mod)
intersetcor(mod)
|
#' Elastic Net SearcheR
#'
#' Search a grid of values of alpha and lambda for minimum mean CV error
#'
#' @inheritParams glmnet::cv.glmnet
#' @inheritParams glmnet::glmnet
#' @param alphas a sequence of alpha values
#'
#' @export
ensr <- function(x, y, alphas = seq(0.00, 1.00, length = 10), nlambda = 100L, standardize = TRUE, nfolds = 10L, foldid, ...) {
# build a single set of folds
if (missing(foldid)) {
foldid <- rep(seq(nfolds), length.out = nrow(x))
}
cl <- as.list(match.call())
cl[[1]] <- quote(glmnet::cv.glmnet)
cl$alphas <- NULL
lmax <- lambda_max(y, x, alphas, standardize = standardize)
lgrid <- lambda_alpha_grid(lmax, alphas, nlambda = nlambda)
l_and_a <- split(lgrid$lgrid, lgrid$lgrid$a)
models <- lapply(l_and_a,
function(la) {
cl$alpha <- la$a[1]
cl$lambda <- la$l
eval(as.call(cl))
})
names(models) <- NULL
class(models) <- "ensr"
models
}
#' @export
print.ensr <- function(x, ...) {
cat("A ensr object with", length(x), "cv.glmnet objects.\n")
utils::str(x, max.level = 0L)
}
#' @export
summary.ensr <- function(object, ...) {
out <-
data.table::rbindlist(
lapply(seq_along(object),
function(idx) {
data.table::data.table(l_index = idx,
lambda = object[[idx]]$lambda,
cvm = object[[idx]]$cvm,
nzero = object[[idx]]$nzero,
alpha = as.list(object[[idx]]$glmnet.fit$call)$alpha)
})
)
class(out) <- c("ensr_summary", class(out))
out
}
#' @export
print.ensr_summary <- function(x, ...) {
NextMethod("print")
}
#' @export
plot.ensr_summary <- function(x, type = c(1), ...) {
if (1 %in% type) {
sout <- data.table::copy(x)
data.table::set(sout, j = "z", value = standardize(sout$cvm, stats = list(center = "min", scale = "sd")))
imin <- which.min(sout$cvm)
g1 <-
ggplot2::ggplot(sout) +
ggplot2::aes_string(x = "alpha", y = "lambda", z = "log10(z)", color = "log10(z)") +
ggplot2::geom_point() +
ggplot2::geom_contour() +
ggplot2::scale_y_log10() +
ggplot2::geom_point(data = sout[imin, ], cex = 2, pch = 4, color = "red") +
ggplot2::scale_color_gradient2(low = "#1b7837", high = "#762183") +
ggplot2::xlab(expression(alpha)) +
ggplot2::ylab(expression(lambda))
}
if (2 %in% type) {
x2 <- data.table::copy(x)
x2 <- data.table::rbindlist(lapply(unique(x2$nzero), function(i) {
x3 <- subset(x2, x2$nzero == i)
subset(x3, x3$cvm == min(x3$cvm))
}))
g2 <-
ggplot2::ggplot(x2) +
ggplot2::aes_string(x = "nzero", y = "cvm") +
ggplot2::geom_line() +
ggplot2::geom_point()
}
if (all( c(1, 2) %in% type)) {
gridExtra::grid.arrange(g1, g2, nrow = 1)
} else if (1 %in% type) {
g1
} else if (2 %in% type) {
g2
} else {
stop("Unknown plot type.")
}
}
#' @export
plot.ensr <- function(x, type = c(1), ...) {
graphics::plot(summary(x), type = type, ...)
}
#' Predict Methods for ensr objects
#'
#' Using either the \code{lambda.min} or \code{lambda.1se}, find the preferable
#' model from the \code{ensr} object and return a prediction.
#'
#' The \code{glmnet::predict} argument \code{s} is ignored if specified and
#' attempted to be passed via \code{...}. The value of \code{s} that is passed
#' to \code{glmnet::predict} is determined by the value of \code{lambda.min} or
#' \code{lambda.1se} found from a call to \code{\link{preferable}}.
#'
#' @inheritParams glmnet::predict.elnet
#' @param object a \code{ensr} object
#' @param ... other arguments passed along to \code{predict}
#' @name predict
#' @export
predict.ensr <- function(object, ...) {
pm <- preferable(object)
cl <- as.list(match.call())
cl[[1]] <- quote(predict)
cl$object <- pm
cl$s <- pm$cv_row$lambda
eval(as.call(cl))
}
#' @rdname predict
#' @export
coef.ensr <- function(object, ...) {
cl <- as.list(match.call())
cl[[1]] <- quote(predict)
cl$type = "coefficients"
eval(as.call(cl))
}
#' Preferable Elastic Net Model
#'
#' Find the preferable Elastic Net Model from an ensr object. The preferable
#' model is defined as the model with the lowest mean cross validation error and
#' largest alpha value.
#'
#' @param object an ensr object
#' @param ... not currently used.
#' @export
preferable <- function(object, ...) {
UseMethod("preferable")
}
#' @export
preferable.ensr <- function(object, ...) {
sm <- summary(object)
sm <- sm[sm[["cvm"]] == min(sm[["cvm"]]), ]
if (nrow(sm) > 1L) {
sm <- sm[sm[['alpha']] == max(sm[['alpha']])]
}
model_idx <- sm$l_index
out <- object[[model_idx]]$glmnet.fit
out$cv_row <- sm
attr(out$cv_row, "call") <- match.call()
class(out) <- c("ensr_pref", class(out))
out
}
|
/R/ensr.R
|
no_license
|
cran/ensr
|
R
| false
| false
| 5,052
|
r
|
#' Elastic Net SearcheR
#'
#' Search a grid of values of alpha and lambda for minimum mean CV error
#'
#' @inheritParams glmnet::cv.glmnet
#' @inheritParams glmnet::glmnet
#' @param alphas a sequence of alpha values
#'
#' @export
ensr <- function(x, y, alphas = seq(0.00, 1.00, length = 10), nlambda = 100L, standardize = TRUE, nfolds = 10L, foldid, ...) {
# build a single set of folds
if (missing(foldid)) {
foldid <- rep(seq(nfolds), length.out = nrow(x))
}
cl <- as.list(match.call())
cl[[1]] <- quote(glmnet::cv.glmnet)
cl$alphas <- NULL
lmax <- lambda_max(y, x, alphas, standardize = standardize)
lgrid <- lambda_alpha_grid(lmax, alphas, nlambda = nlambda)
l_and_a <- split(lgrid$lgrid, lgrid$lgrid$a)
models <- lapply(l_and_a,
function(la) {
cl$alpha <- la$a[1]
cl$lambda <- la$l
eval(as.call(cl))
})
names(models) <- NULL
class(models) <- "ensr"
models
}
#' @export
print.ensr <- function(x, ...) {
cat("A ensr object with", length(x), "cv.glmnet objects.\n")
utils::str(x, max.level = 0L)
}
#' @export
summary.ensr <- function(object, ...) {
out <-
data.table::rbindlist(
lapply(seq_along(object),
function(idx) {
data.table::data.table(l_index = idx,
lambda = object[[idx]]$lambda,
cvm = object[[idx]]$cvm,
nzero = object[[idx]]$nzero,
alpha = as.list(object[[idx]]$glmnet.fit$call)$alpha)
})
)
class(out) <- c("ensr_summary", class(out))
out
}
#' @export
print.ensr_summary <- function(x, ...) {
NextMethod("print")
}
#' @export
plot.ensr_summary <- function(x, type = c(1), ...) {
if (1 %in% type) {
sout <- data.table::copy(x)
data.table::set(sout, j = "z", value = standardize(sout$cvm, stats = list(center = "min", scale = "sd")))
imin <- which.min(sout$cvm)
g1 <-
ggplot2::ggplot(sout) +
ggplot2::aes_string(x = "alpha", y = "lambda", z = "log10(z)", color = "log10(z)") +
ggplot2::geom_point() +
ggplot2::geom_contour() +
ggplot2::scale_y_log10() +
ggplot2::geom_point(data = sout[imin, ], cex = 2, pch = 4, color = "red") +
ggplot2::scale_color_gradient2(low = "#1b7837", high = "#762183") +
ggplot2::xlab(expression(alpha)) +
ggplot2::ylab(expression(lambda))
}
if (2 %in% type) {
x2 <- data.table::copy(x)
x2 <- data.table::rbindlist(lapply(unique(x2$nzero), function(i) {
x3 <- subset(x2, x2$nzero == i)
subset(x3, x3$cvm == min(x3$cvm))
}))
g2 <-
ggplot2::ggplot(x2) +
ggplot2::aes_string(x = "nzero", y = "cvm") +
ggplot2::geom_line() +
ggplot2::geom_point()
}
if (all( c(1, 2) %in% type)) {
gridExtra::grid.arrange(g1, g2, nrow = 1)
} else if (1 %in% type) {
g1
} else if (2 %in% type) {
g2
} else {
stop("Unknown plot type.")
}
}
#' @export
plot.ensr <- function(x, type = c(1), ...) {
graphics::plot(summary(x), type = type, ...)
}
#' Predict Methods for ensr objects
#'
#' Using either the \code{lambda.min} or \code{lambda.1se}, find the preferable
#' model from the \code{ensr} object and return a prediction.
#'
#' The \code{glmnet::predict} argument \code{s} is ignored if specified and
#' attempted to be passed via \code{...}. The value of \code{s} that is passed
#' to \code{glmnet::predict} is determined by the value of \code{lambda.min} or
#' \code{lambda.1se} found from a call to \code{\link{preferable}}.
#'
#' @inheritParams glmnet::predict.elnet
#' @param object a \code{ensr} object
#' @param ... other arguments passed along to \code{predict}
#' @name predict
#' @export
predict.ensr <- function(object, ...) {
pm <- preferable(object)
cl <- as.list(match.call())
cl[[1]] <- quote(predict)
cl$object <- pm
cl$s <- pm$cv_row$lambda
eval(as.call(cl))
}
#' @rdname predict
#' @export
coef.ensr <- function(object, ...) {
cl <- as.list(match.call())
cl[[1]] <- quote(predict)
cl$type = "coefficients"
eval(as.call(cl))
}
#' Preferable Elastic Net Model
#'
#' Find the preferable Elastic Net Model from an ensr object. The preferable
#' model is defined as the model with the lowest mean cross validation error and
#' largest alpha value.
#'
#' @param object an ensr object
#' @param ... not currently used.
#' @export
preferable <- function(object, ...) {
UseMethod("preferable")
}
#' @export
preferable.ensr <- function(object, ...) {
sm <- summary(object)
sm <- sm[sm[["cvm"]] == min(sm[["cvm"]]), ]
if (nrow(sm) > 1L) {
sm <- sm[sm[['alpha']] == max(sm[['alpha']])]
}
model_idx <- sm$l_index
out <- object[[model_idx]]$glmnet.fit
out$cv_row <- sm
attr(out$cv_row, "call") <- match.call()
class(out) <- c("ensr_pref", class(out))
out
}
|
library(equate)
### Name: equate
### Title: Observed Score Linking and Equating
### Aliases: equate equate.list equate.freqtab equate.default
### summary.equate summary.equate.list
### Keywords: methods
### ** Examples
# See vignette("equatevignette") and Albano (2016) for a
# description of methods and additional examples
# Random groups equating for (1) identity, (2) mean,
# (3) linear, (4) equipercentile with loglinear
# smoothing, and (5) a composite of mean and identity
rx <- as.freqtab(ACTmath[, 1:2])
ry <- as.freqtab(ACTmath[, c(1, 3)])
set.seed(2007)
req1 <- equate(rx, ry, type = "i", boot = TRUE, reps = 5)
req2 <- equate(rx, ry, type = "m", boot = TRUE, reps = 5)
req3 <- equate(rx, ry, type = "l", boot = TRUE, reps = 5)
req4 <- equate(rx, ry, type = "e", boot = TRUE, reps = 5,
smooth = "loglin", degree = 3)
req5 <- composite(list(req1, req2), wc = .5, symmetric = TRUE)
# Compare equating functions
plot(req1, req2, req3, req4, req5[[1]], addident = FALSE)
# Compare boostrap standard errors
# Bootstrapping isn't supported for composite equating
plot(req1, req2, req3, req4, addident = FALSE, out = "se",
legendplace = "topleft")
# Nonequivalent groups design for (1) Tucker linear,
# (2) frequency estimation , and (3) Braun/Holland linear
nx <- freqtab(KBneat$x, scales = list(0:36, 0:12))
ny <- freqtab(KBneat$y, scales = list(0:36, 0:12))
neq1 <- equate(nx, ny, type = "linear", method = "tuck", ws = 1)
neq2 <- equate(nx, ny, type = "equip", method = "freq", ws = 1)
neq3 <- equate(nx, ny, type = "linear", method = "braun", ws = 1)
# Compare equated scores
round(cbind(xscale = 0:36, tucker = neq1$conc$yx,
fe = neq2$conc$yx, braun = neq3$conc$yx), 2)
# Multiple linkings using PISA reading booklet 6
# clusters 3a, 5, 6, and 7
r3 <- freqtab(PISA$totals$b6$r3a, scales = 0:15)
r5 <- freqtab(PISA$totals$b6$r5, scales = 0:15)
r6 <- freqtab(PISA$totals$b6$r6, scales = 0:15)
r7 <- freqtab(PISA$totals$b6$r7, scales = 0:14)
eqargs <- list(r3r5 = list(type = "linear", x = "r3", y = "r5",
name = "Linear Linking PISA r3 to r5"),
r5r6 = list(type = "linear", x = "r5", y = "r6",
name = "Linear Linking PISA r5 to r6"),
r6r7 = list(type = "linear", x = "r6", y = "r7",
name = "Linear Linking PISA r5 to r7"))
req <- equate(list(r3 = r3, r5 = r5, r6 = r6, r7 = r7), eqargs)
# Put PISA r3 on the scale of r7 using the linking chain
# Compare to a direct linking of r3 to r7
equate(equate(req$r3r5$conc$yx, req$r5r6), req$r6r7)
equate(r3, r7, "linear")$conc$yx
# Linking PISA cluster r3a to r5 with multiple anchors
m367 <- freqtab(PISA$totals$b6[1:198, c("r3a", "r6", "r7")],
scales = list(0:15, 0:16, 0:14))
m567 <- freqtab(PISA$totals$b6[199:396, c("r5", "r6", "r7")],
scales = list(0:15, 0:16, 0:14))
meq1 <- equate(m367, m567, type = "mean", method = "nom")
meq2 <- equate(m367, m567, type = "mean", method = "tuck")
meq3 <- equate(m367, m567, type = "lin", method = "tuck")
meq4 <- equate(m367, m567, type = "equip", method = "freq",
smooth = "log", show = FALSE)
meq <- equate(m367, m567, type = "mean", method = "nom")
plot(meq1, meq2, meq3, meq4, meq, req[[1]])
|
/data/genthat_extracted_code/equate/examples/equate.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 3,136
|
r
|
library(equate)
### Name: equate
### Title: Observed Score Linking and Equating
### Aliases: equate equate.list equate.freqtab equate.default
### summary.equate summary.equate.list
### Keywords: methods
### ** Examples
# See vignette("equatevignette") and Albano (2016) for a
# description of methods and additional examples
# Random groups equating for (1) identity, (2) mean,
# (3) linear, (4) equipercentile with loglinear
# smoothing, and (5) a composite of mean and identity
rx <- as.freqtab(ACTmath[, 1:2])
ry <- as.freqtab(ACTmath[, c(1, 3)])
set.seed(2007)
req1 <- equate(rx, ry, type = "i", boot = TRUE, reps = 5)
req2 <- equate(rx, ry, type = "m", boot = TRUE, reps = 5)
req3 <- equate(rx, ry, type = "l", boot = TRUE, reps = 5)
req4 <- equate(rx, ry, type = "e", boot = TRUE, reps = 5,
smooth = "loglin", degree = 3)
req5 <- composite(list(req1, req2), wc = .5, symmetric = TRUE)
# Compare equating functions
plot(req1, req2, req3, req4, req5[[1]], addident = FALSE)
# Compare boostrap standard errors
# Bootstrapping isn't supported for composite equating
plot(req1, req2, req3, req4, addident = FALSE, out = "se",
legendplace = "topleft")
# Nonequivalent groups design for (1) Tucker linear,
# (2) frequency estimation , and (3) Braun/Holland linear
nx <- freqtab(KBneat$x, scales = list(0:36, 0:12))
ny <- freqtab(KBneat$y, scales = list(0:36, 0:12))
neq1 <- equate(nx, ny, type = "linear", method = "tuck", ws = 1)
neq2 <- equate(nx, ny, type = "equip", method = "freq", ws = 1)
neq3 <- equate(nx, ny, type = "linear", method = "braun", ws = 1)
# Compare equated scores
round(cbind(xscale = 0:36, tucker = neq1$conc$yx,
fe = neq2$conc$yx, braun = neq3$conc$yx), 2)
# Multiple linkings using PISA reading booklet 6
# clusters 3a, 5, 6, and 7
r3 <- freqtab(PISA$totals$b6$r3a, scales = 0:15)
r5 <- freqtab(PISA$totals$b6$r5, scales = 0:15)
r6 <- freqtab(PISA$totals$b6$r6, scales = 0:15)
r7 <- freqtab(PISA$totals$b6$r7, scales = 0:14)
eqargs <- list(r3r5 = list(type = "linear", x = "r3", y = "r5",
name = "Linear Linking PISA r3 to r5"),
r5r6 = list(type = "linear", x = "r5", y = "r6",
name = "Linear Linking PISA r5 to r6"),
r6r7 = list(type = "linear", x = "r6", y = "r7",
name = "Linear Linking PISA r5 to r7"))
req <- equate(list(r3 = r3, r5 = r5, r6 = r6, r7 = r7), eqargs)
# Put PISA r3 on the scale of r7 using the linking chain
# Compare to a direct linking of r3 to r7
equate(equate(req$r3r5$conc$yx, req$r5r6), req$r6r7)
equate(r3, r7, "linear")$conc$yx
# Linking PISA cluster r3a to r5 with multiple anchors
m367 <- freqtab(PISA$totals$b6[1:198, c("r3a", "r6", "r7")],
scales = list(0:15, 0:16, 0:14))
m567 <- freqtab(PISA$totals$b6[199:396, c("r5", "r6", "r7")],
scales = list(0:15, 0:16, 0:14))
meq1 <- equate(m367, m567, type = "mean", method = "nom")
meq2 <- equate(m367, m567, type = "mean", method = "tuck")
meq3 <- equate(m367, m567, type = "lin", method = "tuck")
meq4 <- equate(m367, m567, type = "equip", method = "freq",
smooth = "log", show = FALSE)
meq <- equate(m367, m567, type = "mean", method = "nom")
plot(meq1, meq2, meq3, meq4, meq, req[[1]])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.